content
large_stringlengths 0
6.46M
| path
large_stringlengths 3
331
| license_type
large_stringclasses 2
values | repo_name
large_stringlengths 5
125
| language
large_stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.46M
| extension
large_stringclasses 75
values | text
stringlengths 0
6.46M
|
---|---|---|---|---|---|---|---|---|---|
#' @export
getSleepdata <- function(token){
u <- sprintf("https://jawbone.com/nudge/api/v.1.1/users/@me/sleeps")
res <- httr::GET(url = u,
httr::config(token = token))
dat <- jsonlite::fromJSON(httr::content(res, as = "text"))
result <- dat$data$items
return(result)
}
#' @export
getSleepTickdata <- function(date, token){
data_all <- getSleepdata(token)
data_all$date <- as.Date(as.character(data_all$date), format="%Y%m%d")
trg_id <- subset(data_all, date==date)
u <- sprintf("https://jawbone.com/nudge/api/v.1.1/sleeps/%s/ticks", trg_id$xid)
res <- httr::GET(url = u,
httr::config(token = token))
dat <- jsonlite::fromJSON(httr::content(res, as = "text"))
result <- dat$data$items
result$time <- as.POSIXct(result$time, origin="1970-01-01")
return(result)
}
|
/R/get.R
|
no_license
|
dichika/myJawbone
|
R
| false | false | 828 |
r
|
#' @export
getSleepdata <- function(token){
u <- sprintf("https://jawbone.com/nudge/api/v.1.1/users/@me/sleeps")
res <- httr::GET(url = u,
httr::config(token = token))
dat <- jsonlite::fromJSON(httr::content(res, as = "text"))
result <- dat$data$items
return(result)
}
#' @export
getSleepTickdata <- function(date, token){
data_all <- getSleepdata(token)
data_all$date <- as.Date(as.character(data_all$date), format="%Y%m%d")
trg_id <- subset(data_all, date==date)
u <- sprintf("https://jawbone.com/nudge/api/v.1.1/sleeps/%s/ticks", trg_id$xid)
res <- httr::GET(url = u,
httr::config(token = token))
dat <- jsonlite::fromJSON(httr::content(res, as = "text"))
result <- dat$data$items
result$time <- as.POSIXct(result$time, origin="1970-01-01")
return(result)
}
|
context('match_rules')
test_that('it should create a regex match_rule', {
rule <- match_regex('\\d', as.integer)
expect_is(rule, 'match_rule')
expect_is(rule, 'regex_rule')
})
test_that('it should apply a regex_rule', {
rule <- match_regex('^\\d+$', as.integer)
result <- apply_rule(rule, '11')
expect_is(result, 'rule_result')
expect_true(result$applied)
expect_equal(result$value, 11)
result <- apply_rule(rule, 11)
expect_false(result$applied) # regex_rule only handles character data
result <- apply_rule(rule, 'a')
expect_false(result$applied)
result <- apply_rule(rule, '1.1')
expect_false(result$applied)
})
test_that('it should apply a regex_rule with a group', {
rule <- match_regex('delta (\\d+)', function(data, match) {
as.integer(match[,2])
})
res <- apply_rule(rule, c('delta 50', 'delta 25'))
expect_equal(res$value, c(50, 25))
res <- apply_rule(rule, c('delta 50', NA, 'delta 25'))
expect_equal(res$value, c(50, NA, 25))
})
test_that('it should apply a regex_rule if at least one element has been matched', {
rule <- match_regex('\\d+', function(data, match) {
as.integer(match[,1])
}, apply_to='any')
res <- apply_rule(rule, c('50', '25', 'W'))
expect_equal(res$value, c(50, 25, NA))
})
test_that('it should apply a regex_rule if all elements match', {
rule <- match_regex('\\d+', function(data, match) {
as.integer(match[,1])
}, apply_to='all')
res <- apply_rule(rule, c('50', '25', 'W'))
expect_false(res$applied)
})
test_that('it should apply a regex_rule if all elements match', {
rule <- match_regex('\\d+', function(data, match) {
as.integer(match[,1])
}, apply_to='all')
res <- apply_rule(rule, c('50', '25', 'W'))
expect_false(res$applied)
})
test_that('it should iterate thru a list of rules ', {
rules <- list(
match_regex('NA', identity),
match_regex('1', identity, priority=1),
match_regex('2', identity, priority=2),
match_regex('NA2', identity)
)
rules <- iter_rules(rules)
expect_equal(take(rules, 'regex'), c('1', '2', 'NA', 'NA2'))
})
test_that('it should create a check class_rule', {
rule <- match_class('Date', as.character)
expect_is(rule, 'match_rule')
expect_is(rule, 'class_rule')
})
test_that('it should apply a class_rule', {
rule <- match_class('Date', as.character)
result <- apply_rule(rule, '11')
expect_is(result, 'rule_result')
expect_false(result$applied)
result <- apply_rule(rule, as.Date('2015-11-21'))
expect_true(result$applied)
expect_equal(result$value, '2015-11-21')
})
test_that('it should apply a pred_rule', {
rule <- match_predicate(is.na, function(x, idx, ...) {
x[idx] <- 0
x
})
result <- apply_rule(rule, '11')
expect_is(result, 'rule_result')
expect_false(result$applied)
result <- apply_rule(rule, NA)
expect_true(result$applied)
expect_equal(result$value, 0)
result <- apply_rule(rule, c(NA, 1))
expect_equal(result$value, c(0, 1))
})
|
/tests/testthat/test-match_rules.R
|
no_license
|
wilsonfreitas/transmute
|
R
| false | false | 2,977 |
r
|
context('match_rules')
test_that('it should create a regex match_rule', {
rule <- match_regex('\\d', as.integer)
expect_is(rule, 'match_rule')
expect_is(rule, 'regex_rule')
})
test_that('it should apply a regex_rule', {
rule <- match_regex('^\\d+$', as.integer)
result <- apply_rule(rule, '11')
expect_is(result, 'rule_result')
expect_true(result$applied)
expect_equal(result$value, 11)
result <- apply_rule(rule, 11)
expect_false(result$applied) # regex_rule only handles character data
result <- apply_rule(rule, 'a')
expect_false(result$applied)
result <- apply_rule(rule, '1.1')
expect_false(result$applied)
})
test_that('it should apply a regex_rule with a group', {
rule <- match_regex('delta (\\d+)', function(data, match) {
as.integer(match[,2])
})
res <- apply_rule(rule, c('delta 50', 'delta 25'))
expect_equal(res$value, c(50, 25))
res <- apply_rule(rule, c('delta 50', NA, 'delta 25'))
expect_equal(res$value, c(50, NA, 25))
})
test_that('it should apply a regex_rule if at least one element has been matched', {
rule <- match_regex('\\d+', function(data, match) {
as.integer(match[,1])
}, apply_to='any')
res <- apply_rule(rule, c('50', '25', 'W'))
expect_equal(res$value, c(50, 25, NA))
})
test_that('it should apply a regex_rule if all elements match', {
rule <- match_regex('\\d+', function(data, match) {
as.integer(match[,1])
}, apply_to='all')
res <- apply_rule(rule, c('50', '25', 'W'))
expect_false(res$applied)
})
test_that('it should apply a regex_rule if all elements match', {
rule <- match_regex('\\d+', function(data, match) {
as.integer(match[,1])
}, apply_to='all')
res <- apply_rule(rule, c('50', '25', 'W'))
expect_false(res$applied)
})
test_that('it should iterate thru a list of rules ', {
rules <- list(
match_regex('NA', identity),
match_regex('1', identity, priority=1),
match_regex('2', identity, priority=2),
match_regex('NA2', identity)
)
rules <- iter_rules(rules)
expect_equal(take(rules, 'regex'), c('1', '2', 'NA', 'NA2'))
})
test_that('it should create a check class_rule', {
rule <- match_class('Date', as.character)
expect_is(rule, 'match_rule')
expect_is(rule, 'class_rule')
})
test_that('it should apply a class_rule', {
rule <- match_class('Date', as.character)
result <- apply_rule(rule, '11')
expect_is(result, 'rule_result')
expect_false(result$applied)
result <- apply_rule(rule, as.Date('2015-11-21'))
expect_true(result$applied)
expect_equal(result$value, '2015-11-21')
})
test_that('it should apply a pred_rule', {
rule <- match_predicate(is.na, function(x, idx, ...) {
x[idx] <- 0
x
})
result <- apply_rule(rule, '11')
expect_is(result, 'rule_result')
expect_false(result$applied)
result <- apply_rule(rule, NA)
expect_true(result$applied)
expect_equal(result$value, 0)
result <- apply_rule(rule, c(NA, 1))
expect_equal(result$value, c(0, 1))
})
|
library(MASS)
library(survival)
library(mc2d)
library(tmvtnorm) #for multivariate normal
library(mvtnorm)
library(MCMCpack) #for inverse whishart
library(mvnfast) #for fast mvrnorm
source("update_pos.R") #store all MCMC updates functions
source("mcmc.R") #store the function to run MCMC
source("Marginal_Surv.R") #store the function to compute the marginal survivals
source("Estimand.R") #store the function to compute the causal estimand h(u)
##full_data: The first column is observed progression time, the second column is the observed
##survival time, the third column is delta (the censoring indicator for progress),
## and the last column is the xi (the censoring indicator for survival)
##delta=xi=1, we observe both progression time and death time
##delta=xi=0, we only have the censoring time C
##delta=1, xi=0, we observe progression time, but censored before observing death
##delta=0, xi=1, we observe death only, but neither progression nor death
load("data.Rdata")
########################################################################
Niter = 5000 #the number of MCMC iterations
burn.in = 2000 #the number of burn.in
lag=10 #save samples every lag iterations
nsave = (Niter-burn.in)/lag #the number of saved samples
#Not run. The MCMC result is saved in the file "saved_mcmc.RData"
#mcmc_result <- main_mcmc(Niter, burn.in, lag, full_data, Z)
#Reproduce Figure 1 in Supplementary Material
##############Marginal
load("saved_mcmc.RData")
#NOT RUN. I save the results below in "Figure1.RData".
# figure1_result<-Marginal_survival(full_data, Z, mcmc0, mcmc1)
# fmean0_ave=figure1_result$fmean0_ave
# fmean1_ave=figure1_result$fmean1_ave
# fquantile0=figure1_result$fquantile0
# fquantile1=figure1_result$fquantile1
load("Figure1.RData")
library(survival)
pdf("brain_survival.pdf")
tim = seq(0,10,0.3)
mini.surv <- survfit(Surv(full_data[,2], full_data[,4])~ Z[,1], conf.type="none")
plot(mini.surv, col=c(1, 2), xlab="log(Time)", ylab="Survival Probability")
lines(tim, fmean0_ave, col=1,lty=2)
lines(tim, fquantile0[1,], col=1, lty=3)
lines(tim, fquantile0[2,], col=1, lty=3)
lines(tim, fmean1_ave, col=2,lty=2)
lines(tim, fquantile1[1,], col=2, lty=3)
lines(tim, fquantile1[2,], col=2, lty=3)
legend("topright", c("Treatment", "Control"), col = c(2,1),lty=1)
dev.off()
##########################################################
#Reproduce Figure 2 in Supplementary Material
##############h(u)
#NOT RUN. I save hu_result in the file "hu_rho1.RDat" when rho=0.2; and hu_result1 in the file "hu_rho2.RDat" when rho=0.8.
#rho = 0.2; hu_result <- Estimate_hu(rho, full_data, Z, mcmc0, mcmc1)
#rho = 0.8;hu_result1 <- Estimate_hu(rho, full_data, Z, mcmc0, mcmc1)
load("hu_rho1.RData")
load("hu_rho2.RData")
#####Figure 7 h(u)
u_range = seq(0,6,0.5)
ratio = matrix(0, length(u_range), 2)
ratio_interval=array(0, c(2, length(u_range), 2))
tmp = ifelse(hu_result[,,1]<=0.05 & hu_result[,,2]<=0.05, 1, hu_result[,,2]/hu_result[,,1])
tmp1 = ifelse(hu_result1[,,1]<=0.05 & hu_result1[,,2]<=0.05, 1, hu_result1[,,2]/hu_result1[,,1])
for (u_index in 1:length(u_range))
{
ratio[u_index,1] = mean(tmp[u_index,])
ratio_interval[, u_index, 1] = quantile(tmp[u_index,], c(0.025, 0.975))
ratio[u_index,2] = mean(tmp1[u_index,])
ratio_interval[, u_index, 2] = quantile(tmp1[u_index,], c(0.025, 0.975))
}
pdf("hu.pdf")
par(mar=c(5.1,5.1,4.1,2.1))
plot(u_range, predict(loess(ratio[,1]~u_range)),"l",xlab="log(Time) (u)", ylab=expression(paste("Estimate of", " ", tau, "(u)")),ylim=c(0, 2),cex.axis=2, cex.lab=2)
lines(u_range, predict(loess(ratio_interval[1,,1]~u_range)),col=1,lty=2)
lines(u_range, predict(loess(ratio_interval[2,,1]~u_range)),col=1,lty=2)
lines(u_range, predict(loess(ratio[,2]~u_range)),col=2)
lines(u_range, predict(loess(ratio_interval[1,,2]~u_range)),col=2,lty=2)
lines(u_range, predict(loess(ratio_interval[2,,2]~u_range)),col=2,lty=2)
legend("topright", c(expression(paste(rho, "=0.2")),expression(paste(rho, "=0.8"))), col = c(1, 2),lty=c(1,1,1))
dev.off()
|
/main.R
|
no_license
|
sommukh/BaySemiCompeting
|
R
| false | false | 4,014 |
r
|
library(MASS)
library(survival)
library(mc2d)
library(tmvtnorm) #for multivariate normal
library(mvtnorm)
library(MCMCpack) #for inverse whishart
library(mvnfast) #for fast mvrnorm
source("update_pos.R") #store all MCMC updates functions
source("mcmc.R") #store the function to run MCMC
source("Marginal_Surv.R") #store the function to compute the marginal survivals
source("Estimand.R") #store the function to compute the causal estimand h(u)
##full_data: The first column is observed progression time, the second column is the observed
##survival time, the third column is delta (the censoring indicator for progress),
## and the last column is the xi (the censoring indicator for survival)
##delta=xi=1, we observe both progression time and death time
##delta=xi=0, we only have the censoring time C
##delta=1, xi=0, we observe progression time, but censored before observing death
##delta=0, xi=1, we observe death only, but neither progression nor death
load("data.Rdata")
########################################################################
Niter = 5000 #the number of MCMC iterations
burn.in = 2000 #the number of burn.in
lag=10 #save samples every lag iterations
nsave = (Niter-burn.in)/lag #the number of saved samples
#Not run. The MCMC result is saved in the file "saved_mcmc.RData"
#mcmc_result <- main_mcmc(Niter, burn.in, lag, full_data, Z)
#Reproduce Figure 1 in Supplementary Material
##############Marginal
load("saved_mcmc.RData")
#NOT RUN. I save the results below in "Figure1.RData".
# figure1_result<-Marginal_survival(full_data, Z, mcmc0, mcmc1)
# fmean0_ave=figure1_result$fmean0_ave
# fmean1_ave=figure1_result$fmean1_ave
# fquantile0=figure1_result$fquantile0
# fquantile1=figure1_result$fquantile1
load("Figure1.RData")
library(survival)
pdf("brain_survival.pdf")
tim = seq(0,10,0.3)
mini.surv <- survfit(Surv(full_data[,2], full_data[,4])~ Z[,1], conf.type="none")
plot(mini.surv, col=c(1, 2), xlab="log(Time)", ylab="Survival Probability")
lines(tim, fmean0_ave, col=1,lty=2)
lines(tim, fquantile0[1,], col=1, lty=3)
lines(tim, fquantile0[2,], col=1, lty=3)
lines(tim, fmean1_ave, col=2,lty=2)
lines(tim, fquantile1[1,], col=2, lty=3)
lines(tim, fquantile1[2,], col=2, lty=3)
legend("topright", c("Treatment", "Control"), col = c(2,1),lty=1)
dev.off()
##########################################################
#Reproduce Figure 2 in Supplementary Material
##############h(u)
#NOT RUN. I save hu_result in the file "hu_rho1.RDat" when rho=0.2; and hu_result1 in the file "hu_rho2.RDat" when rho=0.8.
#rho = 0.2; hu_result <- Estimate_hu(rho, full_data, Z, mcmc0, mcmc1)
#rho = 0.8;hu_result1 <- Estimate_hu(rho, full_data, Z, mcmc0, mcmc1)
load("hu_rho1.RData")
load("hu_rho2.RData")
#####Figure 7 h(u)
u_range = seq(0,6,0.5)
ratio = matrix(0, length(u_range), 2)
ratio_interval=array(0, c(2, length(u_range), 2))
tmp = ifelse(hu_result[,,1]<=0.05 & hu_result[,,2]<=0.05, 1, hu_result[,,2]/hu_result[,,1])
tmp1 = ifelse(hu_result1[,,1]<=0.05 & hu_result1[,,2]<=0.05, 1, hu_result1[,,2]/hu_result1[,,1])
for (u_index in 1:length(u_range))
{
ratio[u_index,1] = mean(tmp[u_index,])
ratio_interval[, u_index, 1] = quantile(tmp[u_index,], c(0.025, 0.975))
ratio[u_index,2] = mean(tmp1[u_index,])
ratio_interval[, u_index, 2] = quantile(tmp1[u_index,], c(0.025, 0.975))
}
pdf("hu.pdf")
par(mar=c(5.1,5.1,4.1,2.1))
plot(u_range, predict(loess(ratio[,1]~u_range)),"l",xlab="log(Time) (u)", ylab=expression(paste("Estimate of", " ", tau, "(u)")),ylim=c(0, 2),cex.axis=2, cex.lab=2)
lines(u_range, predict(loess(ratio_interval[1,,1]~u_range)),col=1,lty=2)
lines(u_range, predict(loess(ratio_interval[2,,1]~u_range)),col=1,lty=2)
lines(u_range, predict(loess(ratio[,2]~u_range)),col=2)
lines(u_range, predict(loess(ratio_interval[1,,2]~u_range)),col=2,lty=2)
lines(u_range, predict(loess(ratio_interval[2,,2]~u_range)),col=2,lty=2)
legend("topright", c(expression(paste(rho, "=0.2")),expression(paste(rho, "=0.8"))), col = c(1, 2),lty=c(1,1,1))
dev.off()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/RFunctions_1_0_2.r
\name{omit.history}
\alias{omit.history}
\title{Function to remove irrelevant covariate history from a tidy dataframe used to construct balance tables and plots. Takes input from lengthen(), balance() or diagnose().}
\usage{
omit.history(input, omission, covariate.name, distance = NULL,
times = NULL)
}
\arguments{
\item{input}{restructured tidy dataframe from lengthen() or a dataframe from balance() or diagnose()}
\item{omission}{type of omission e.g. "fixed" or "relative" or "same.time"}
\item{covariate.name}{root name of the covariate e.g. "m"}
\item{distance}{the distance between exposure and covariate measurements e.g. 2}
\item{times}{a vector of measurement times for the covariate e.g. c(1,2,3)}
}
\value{
A "tidy" dataframe where covariate measurements have
been removed based on their fixed measurement time or relative
distance from exposure measurements (at time t). The removed
covariate measurements are typically ones chosen to be ones
that do not support exchangeability assumptions at time t.
}
\description{
Function to remove irrelevant covariate history from a tidy dataframe used to construct balance tables and plots. Takes input from lengthen(), balance() or diagnose().
}
\details{
Intended for use with Diagnostics 1 and 3. omit.history() will take the dataframe produced by lengthen() and remove covariate measurements based on their fixed measurement time or relative distance from exposure measurements (at time t) i.e. ones that do not support exchangeability assumptions at time t. The covariate.name argument is used to name the covariate whose history you wish to modify. To process the same manipulation for a set of covariates, simply supply a vector of covariate names to covariate.name. The omission argument determines whether the covariate history is (i) set to missing for certain covariate measurement times (omission ="fixed" with times=a vector of integers) or (ii) set to missing only for covariate measurement times at or before a certain distance k from exposure measurement times (omission ="relative" with distance=some integer) or (iii) set to missing only for covariate measurements that share the same timing as exposure measurements (omission ="same.time"). The removed values are set to missing. For example, using the "fixed" omission option for covariate "l" at time 2 will set all data on "l" at time 2 to missing, regardless of the exposure measurement time. In contrast, using the "relative" omission option for covariate "l" with distance 2 will only set to missing data on "l" that is measured two units or more before the exposure measurement time (i.e. t-2, t-3, t-4 and so on). Last, using the "same.time" omission option for covariate "l" will set to missing all data on "l" that is measured at the same time as the exposure. Missing data will be ignored when this dataframe is supplied to the balance() function. They will not contribute to the resulting covariate balance table, nor to plots produced by makeplot(), nor will they contribute to any summary metrics are estimated by averaging over person-time. Note that omit.history also accepts input from balance() and diagnose() when their scope argument has been set to "all" (i.e., not averaging over time or distance or selecting times based on recency of measurements).
}
\examples{
# Simulate the output of lengthen()
id <- as.numeric(rep(c(1,1,1,2,2,2), 7))
time.exposure <- as.numeric(rep(c(0,1,2), 14))
a <- as.character(rep(c(0,1,1,1,1,0), 7))
h <- as.character(rep(c("H","H0","H01","H","H1","H11"), 7))
name.cov <- as.character(c(rep("n",6), rep("l",18), rep("m",18)))
time.covariate <- as.numeric(c(rep(0,6), rep(c(rep(0,6),
rep(1,6),rep(2,6)), 2)))
value.cov <- as.numeric(c(rep(1,9), rep(0,3), rep(1,6),
rep(0,3), rep(1,3), rep(0,12),
rep(1,3), rep(0,3)))
mydata.long <- data.frame(id, time.exposure, a, h,
name.cov, time.covariate, value.cov)
# Run the omit.history() function
mydata.long.omit <- omit.history(input=mydata.long,
omission="relative",
covariate.name=c("l","m"),
distance=1)
}
|
/man/omit.history.Rd
|
no_license
|
jwjackson/confoundr
|
R
| false | true | 4,350 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/RFunctions_1_0_2.r
\name{omit.history}
\alias{omit.history}
\title{Function to remove irrelevant covariate history from a tidy dataframe used to construct balance tables and plots. Takes input from lengthen(), balance() or diagnose().}
\usage{
omit.history(input, omission, covariate.name, distance = NULL,
times = NULL)
}
\arguments{
\item{input}{restructured tidy dataframe from lengthen() or a dataframe from balance() or diagnose()}
\item{omission}{type of omission e.g. "fixed" or "relative" or "same.time"}
\item{covariate.name}{root name of the covariate e.g. "m"}
\item{distance}{the distance between exposure and covariate measurements e.g. 2}
\item{times}{a vector of measurement times for the covariate e.g. c(1,2,3)}
}
\value{
A "tidy" dataframe where covariate measurements have
been removed based on their fixed measurement time or relative
distance from exposure measurements (at time t). The removed
covariate measurements are typically ones chosen to be ones
that do not support exchangeability assumptions at time t.
}
\description{
Function to remove irrelevant covariate history from a tidy dataframe used to construct balance tables and plots. Takes input from lengthen(), balance() or diagnose().
}
\details{
Intended for use with Diagnostics 1 and 3. omit.history() will take the dataframe produced by lengthen() and remove covariate measurements based on their fixed measurement time or relative distance from exposure measurements (at time t) i.e. ones that do not support exchangeability assumptions at time t. The covariate.name argument is used to name the covariate whose history you wish to modify. To process the same manipulation for a set of covariates, simply supply a vector of covariate names to covariate.name. The omission argument determines whether the covariate history is (i) set to missing for certain covariate measurement times (omission ="fixed" with times=a vector of integers) or (ii) set to missing only for covariate measurement times at or before a certain distance k from exposure measurement times (omission ="relative" with distance=some integer) or (iii) set to missing only for covariate measurements that share the same timing as exposure measurements (omission ="same.time"). The removed values are set to missing. For example, using the "fixed" omission option for covariate "l" at time 2 will set all data on "l" at time 2 to missing, regardless of the exposure measurement time. In contrast, using the "relative" omission option for covariate "l" with distance 2 will only set to missing data on "l" that is measured two units or more before the exposure measurement time (i.e. t-2, t-3, t-4 and so on). Last, using the "same.time" omission option for covariate "l" will set to missing all data on "l" that is measured at the same time as the exposure. Missing data will be ignored when this dataframe is supplied to the balance() function. They will not contribute to the resulting covariate balance table, nor to plots produced by makeplot(), nor will they contribute to any summary metrics are estimated by averaging over person-time. Note that omit.history also accepts input from balance() and diagnose() when their scope argument has been set to "all" (i.e., not averaging over time or distance or selecting times based on recency of measurements).
}
\examples{
# Simulate the output of lengthen()
id <- as.numeric(rep(c(1,1,1,2,2,2), 7))
time.exposure <- as.numeric(rep(c(0,1,2), 14))
a <- as.character(rep(c(0,1,1,1,1,0), 7))
h <- as.character(rep(c("H","H0","H01","H","H1","H11"), 7))
name.cov <- as.character(c(rep("n",6), rep("l",18), rep("m",18)))
time.covariate <- as.numeric(c(rep(0,6), rep(c(rep(0,6),
rep(1,6),rep(2,6)), 2)))
value.cov <- as.numeric(c(rep(1,9), rep(0,3), rep(1,6),
rep(0,3), rep(1,3), rep(0,12),
rep(1,3), rep(0,3)))
mydata.long <- data.frame(id, time.exposure, a, h,
name.cov, time.covariate, value.cov)
# Run the omit.history() function
mydata.long.omit <- omit.history(input=mydata.long,
omission="relative",
covariate.name=c("l","m"),
distance=1)
}
|
#' Matrix Inversion
#'
#' Performs a Moore-Penrose generalized inverse (also called the Pseudoinverse).
#'
#' @inheritParams cor_to_pcor
#' @examples
#' m <- cor(iris[1:4])
#' matrix_inverse(m)
#' @param m Matrix for which the inverse is required.
#'
#' @return An inversed matrix.
#' @seealso pinv from the pracma package
#' @export
matrix_inverse <- function(m, tol = .Machine$double.eps^(2 / 3)) {
# valid matrix checks
# valid matrix checks
if (!isSquare(m)) {
stop("The matrix should be a square matrix.", call. = FALSE)
}
stopifnot(is.numeric(m), length(dim(m)) == 2, is.matrix(m))
s <- svd(m)
p <- (s$d > max(tol * s$d[1], 0))
if (all(p)) {
mp <- s$v %*% (1 / s$d * t(s$u))
} else if (any(p)) {
mp <- s$v[, p, drop = FALSE] %*% (1 / s$d[p] * t(s$u[, p, drop = FALSE]))
} else {
mp <- matrix(0, nrow = ncol(m), ncol = nrow(m))
}
colnames(mp) <- colnames(m)
row.names(mp) <- row.names(m)
mp
}
#' @keywords internal
.invert_matrix <- function(m, tol = .Machine$double.eps^(2 / 3)) {
if (det(m) < tol) {
# The inverse of variance-covariance matrix is calculated using
# Moore-Penrose generalized matrix invers due to its determinant of zero.
out <- matrix_inverse(m, tol)
colnames(out) <- colnames(m)
row.names(out) <- row.names(m)
} else {
out <- solve(m)
}
out
}
|
/R/matrix_inverse.R
|
no_license
|
cran/correlation
|
R
| false | false | 1,404 |
r
|
#' Matrix Inversion
#'
#' Performs a Moore-Penrose generalized inverse (also called the Pseudoinverse).
#'
#' @inheritParams cor_to_pcor
#' @examples
#' m <- cor(iris[1:4])
#' matrix_inverse(m)
#' @param m Matrix for which the inverse is required.
#'
#' @return An inversed matrix.
#' @seealso pinv from the pracma package
#' @export
matrix_inverse <- function(m, tol = .Machine$double.eps^(2 / 3)) {
# valid matrix checks
# valid matrix checks
if (!isSquare(m)) {
stop("The matrix should be a square matrix.", call. = FALSE)
}
stopifnot(is.numeric(m), length(dim(m)) == 2, is.matrix(m))
s <- svd(m)
p <- (s$d > max(tol * s$d[1], 0))
if (all(p)) {
mp <- s$v %*% (1 / s$d * t(s$u))
} else if (any(p)) {
mp <- s$v[, p, drop = FALSE] %*% (1 / s$d[p] * t(s$u[, p, drop = FALSE]))
} else {
mp <- matrix(0, nrow = ncol(m), ncol = nrow(m))
}
colnames(mp) <- colnames(m)
row.names(mp) <- row.names(m)
mp
}
#' @keywords internal
.invert_matrix <- function(m, tol = .Machine$double.eps^(2 / 3)) {
if (det(m) < tol) {
# The inverse of variance-covariance matrix is calculated using
# Moore-Penrose generalized matrix invers due to its determinant of zero.
out <- matrix_inverse(m, tol)
colnames(out) <- colnames(m)
row.names(out) <- row.names(m)
} else {
out <- solve(m)
}
out
}
|
# Using Probability Simulation in R
# February 15 2018
# Adrian Wiegman
# stochastic vs deterministic models
# install/load necessary packages
library(ggplot2)
#generate random uniform data
testData <- runif(1000)
qplot(x=testData)
#creating a function in R to make custom graphs
#functions must go at the top of programs so that
#they can be compiled into the memory
#///FUNCTIONS-----------------------
#_Function Histo
# better histogram plot
# input xData = numeric vector
# input fColor = fill color
# output = corrected ggplot histogram
# output = summary statistics
# output = 95% interval
Histo <- function(xData=runif(1000),fColor='salmon') {z <-qplot(x=xData,color=I('black'),fill=I(fColor),xlab='X',boundary=0)
print(z)
print(summary(xData))
print(quantile(x=xData,probs=c(0.025,0.975)))
}
#function(){} is an R function for building functions
#qplot() is a ggplot function
#I() is a variable for passing arguments?????
#Function IHisto
#works better than histo for integer values!
#input xData = vector of integers
#input fColor = fill color
#output = summary of x data
#output = 95% confidence interval
iHisto <- function(xData=runif(1000),fColor='salmon') {
z <-qplot(x=factor(xData),color=I('black'),fill=I(fColor),xlab='X',boundary=0)
print(z)
print(summary(xData))
print(quantile(x=xData,probs=c(0.025,0.975)))
}
#///MAIN PROGRAM-------------------------
Histo()
temp <- rnorm(1000)
Histo(xData=temp,fColor='yellow1')
iHisto()
#DISCRETE PROBABILITY DISTRIBUTION
#Poisson distribution
temp2 <- rpois(n=1000,lambda=0.5) #poisson distribution, lamba represents the average rate of events per sampling interval
#poisson gets more course as lambda approaches zero
iHisto(temp2)
iHisto(xData=temp2, fColor='springgreen')
mean(temp2==0) # mean of a string of TRUE FALS that were coerced to integer
# Binomial distribution
# integer from 0 to number of trials
# input parameters...
# n= number of trials
# size= number of replications per trial
# p= probaility of success
zz <- rbinom(n=1000,size=40,p=0.75)
iHisto(xData=zz,fColor='slateblue')
#poisson constant rate process
z <- rpois(n=1000,lambda=1)
iHisto(z)
mean(z==0)
#the negative binomial distribution fits environmental data nicely
#range from 0 to infinity
#n = number of replicates
#size is number of trials per replicate
# prob = probability of success with 1 trial
z < rnbinom(n=1000, size=2, prob=0.5)
iHisto(z)
#number of failures until we get to a certain number of successes
# imaging a string of coin toss results
# success = 2 H
# HH = 0 failure
# THH = 1 failure
# HTHH = 2 failures
# THTHH = 3 failures
#alternatively we can call mu
#size = index of overdispersion
#small size leads to high dispersion
z <- rnbinom(n=1000, mu=1.1, size=0.7)
iHisto(z)
#special case where the number of trials = 1 and prob is low
z <- rnbinom(n=1000, size=1, prob=0.05)
iHisto(z)
#probability is high
z <- rnbinom(n=1000, size=1, prob=0.95)
iHisto(z)
#binomial distribution is a TRUE or FALS distribution
#----------------------------
#multinomial distribution (greater than two posibilities)
#"imagine balls in urns"
# size = number of balls
# prob = is a vector who's length is equal to the number of urns, containing the probability of a ball landing in each urn
z <-rmultinom(n=1000, size=20,prob=c(0.2,0.7,0.1))
#don't print this out if larger than 10
rowSums(z)
rowMeans(z)
#creating a multinomial with sample
z <- sample(x=LETTERS[1:3], size=20, prob=c(0.2,0.7,0.1), replace=TRUE)
z
table(z) #
#CONTINOUS PROBABILITY DISTRIBUTIONS
#uniform distribution
z <- runif(n=1000, min=3, max=10.2)
Histo(z)
#normal distribution
z <- rnorm(n=1000, mean=2.2, sd=6)
Histo(z)
#problematic for simulation because it gives negative values which don't normally occur in real life (e.g. biomass)
#gamma distribution
#distribution of waiting times for failure to occur
#can only generate positve values
#shape and scape parameters
# mean = shape*scale
# variance = shape*scale^2
z <- rgamma(n=1000, shape=1,scale=10) #exponential decay
Histo(z)
z <- rgamma(n=1000, shape=10,scale=10) #moves towards bell with increase in shape
Histo(z)
z <- rgamma(n=1000, shape=0.1,scale=10) # power decay
Histo(z)
#beta distribution
#bounded between 0 and 1
# change boundary by adding or multiplying final vector
# conjugate prior for a binomial distribution
# binomial begins with underlying probability
# generates a number of successes and failures
# p is ~ success/(success + failure)
# problem at small sample size
# parameters
# shape1 = number of successes + 1
# shape2 = number of failures + 1
#though experiment:
# start with no data
successes = 0
failures = 0
z <- rbeta(n=1000,shape1=(successes+1),shape2=(failures+1))
Histo(z)
# two coin tosses
successes = 1
failures = 1
z <- rbeta(n=1000,shape1=(successes+1),shape2=(failures+1))
Histo(z)
# 10 coin tosses
successes = 10
failures = 10
z <- rbeta(n=1000,shape1=(successes+1),shape2=(failures+1))
Histo(z)
# 100 coin tosses with a biassed coin
successes = 90
failures = 10
z <- rbeta(n=1000,shape1=(successes+1),shape2=(failures+1))
Histo(z)
# small values coin tosses with a biassed coin
z <- rbeta(n=1000,shape1=0.1,shape2=10)
Histo(z)
z <- rbeta(n=1000,shape1=0.1,shape2=0.1)
Histo(z)
#--------------------------------------------
# MAXIMUM LIKELIHOOD ESTIMATION IN R
x<-rnorm(1000,mean=92.5,sd=2.5)
Histo(x)
library(MASS)
#fit distribution function:
#fit to normal
zFit <- fitdistr(x,'normal')
str(zFit)
zFit$estimate # the dollar sign references a vector in a list
#now fit a gamma
zFit <- fitdistr(x,'gamma')
zFit$estimate
zNew <- rgamma(n=1000, shape=1449, rate=15.7)
Histo(zNew)
#gamma distribution replicates normal quite nicely
summary(x)
z <- runif(n=1000,min=85,max=100)
Histo(z)
|
/R/ProbabilityDistributions.R
|
no_license
|
arhwiegman/Scripts
|
R
| false | false | 5,737 |
r
|
# Using Probability Simulation in R
# February 15 2018
# Adrian Wiegman
# stochastic vs deterministic models
# install/load necessary packages
library(ggplot2)
#generate random uniform data
testData <- runif(1000)
qplot(x=testData)
#creating a function in R to make custom graphs
#functions must go at the top of programs so that
#they can be compiled into the memory
#///FUNCTIONS-----------------------
#_Function Histo
# better histogram plot
# input xData = numeric vector
# input fColor = fill color
# output = corrected ggplot histogram
# output = summary statistics
# output = 95% interval
Histo <- function(xData=runif(1000),fColor='salmon') {z <-qplot(x=xData,color=I('black'),fill=I(fColor),xlab='X',boundary=0)
print(z)
print(summary(xData))
print(quantile(x=xData,probs=c(0.025,0.975)))
}
#function(){} is an R function for building functions
#qplot() is a ggplot function
#I() is a variable for passing arguments?????
#Function IHisto
#works better than histo for integer values!
#input xData = vector of integers
#input fColor = fill color
#output = summary of x data
#output = 95% confidence interval
iHisto <- function(xData=runif(1000),fColor='salmon') {
z <-qplot(x=factor(xData),color=I('black'),fill=I(fColor),xlab='X',boundary=0)
print(z)
print(summary(xData))
print(quantile(x=xData,probs=c(0.025,0.975)))
}
#///MAIN PROGRAM-------------------------
Histo()
temp <- rnorm(1000)
Histo(xData=temp,fColor='yellow1')
iHisto()
#DISCRETE PROBABILITY DISTRIBUTION
#Poisson distribution
temp2 <- rpois(n=1000,lambda=0.5) #poisson distribution, lamba represents the average rate of events per sampling interval
#poisson gets more course as lambda approaches zero
iHisto(temp2)
iHisto(xData=temp2, fColor='springgreen')
mean(temp2==0) # mean of a string of TRUE FALS that were coerced to integer
# Binomial distribution
# integer from 0 to number of trials
# input parameters...
# n= number of trials
# size= number of replications per trial
# p= probaility of success
zz <- rbinom(n=1000,size=40,p=0.75)
iHisto(xData=zz,fColor='slateblue')
#poisson constant rate process
z <- rpois(n=1000,lambda=1)
iHisto(z)
mean(z==0)
#the negative binomial distribution fits environmental data nicely
#range from 0 to infinity
#n = number of replicates
#size is number of trials per replicate
# prob = probability of success with 1 trial
z < rnbinom(n=1000, size=2, prob=0.5)
iHisto(z)
#number of failures until we get to a certain number of successes
# imaging a string of coin toss results
# success = 2 H
# HH = 0 failure
# THH = 1 failure
# HTHH = 2 failures
# THTHH = 3 failures
#alternatively we can call mu
#size = index of overdispersion
#small size leads to high dispersion
z <- rnbinom(n=1000, mu=1.1, size=0.7)
iHisto(z)
#special case where the number of trials = 1 and prob is low
z <- rnbinom(n=1000, size=1, prob=0.05)
iHisto(z)
#probability is high
z <- rnbinom(n=1000, size=1, prob=0.95)
iHisto(z)
#binomial distribution is a TRUE or FALS distribution
#----------------------------
#multinomial distribution (greater than two posibilities)
#"imagine balls in urns"
# size = number of balls
# prob = is a vector who's length is equal to the number of urns, containing the probability of a ball landing in each urn
z <-rmultinom(n=1000, size=20,prob=c(0.2,0.7,0.1))
#don't print this out if larger than 10
rowSums(z)
rowMeans(z)
#creating a multinomial with sample
z <- sample(x=LETTERS[1:3], size=20, prob=c(0.2,0.7,0.1), replace=TRUE)
z
table(z) #
#CONTINOUS PROBABILITY DISTRIBUTIONS
#uniform distribution
z <- runif(n=1000, min=3, max=10.2)
Histo(z)
#normal distribution
z <- rnorm(n=1000, mean=2.2, sd=6)
Histo(z)
#problematic for simulation because it gives negative values which don't normally occur in real life (e.g. biomass)
#gamma distribution
#distribution of waiting times for failure to occur
#can only generate positve values
#shape and scape parameters
# mean = shape*scale
# variance = shape*scale^2
z <- rgamma(n=1000, shape=1,scale=10) #exponential decay
Histo(z)
z <- rgamma(n=1000, shape=10,scale=10) #moves towards bell with increase in shape
Histo(z)
z <- rgamma(n=1000, shape=0.1,scale=10) # power decay
Histo(z)
#beta distribution
#bounded between 0 and 1
# change boundary by adding or multiplying final vector
# conjugate prior for a binomial distribution
# binomial begins with underlying probability
# generates a number of successes and failures
# p is ~ success/(success + failure)
# problem at small sample size
# parameters
# shape1 = number of successes + 1
# shape2 = number of failures + 1
#though experiment:
# start with no data
successes = 0
failures = 0
z <- rbeta(n=1000,shape1=(successes+1),shape2=(failures+1))
Histo(z)
# two coin tosses
successes = 1
failures = 1
z <- rbeta(n=1000,shape1=(successes+1),shape2=(failures+1))
Histo(z)
# 10 coin tosses
successes = 10
failures = 10
z <- rbeta(n=1000,shape1=(successes+1),shape2=(failures+1))
Histo(z)
# 100 coin tosses with a biassed coin
successes = 90
failures = 10
z <- rbeta(n=1000,shape1=(successes+1),shape2=(failures+1))
Histo(z)
# small values coin tosses with a biassed coin
z <- rbeta(n=1000,shape1=0.1,shape2=10)
Histo(z)
z <- rbeta(n=1000,shape1=0.1,shape2=0.1)
Histo(z)
#--------------------------------------------
# MAXIMUM LIKELIHOOD ESTIMATION IN R
x<-rnorm(1000,mean=92.5,sd=2.5)
Histo(x)
library(MASS)
#fit distribution function:
#fit to normal
zFit <- fitdistr(x,'normal')
str(zFit)
zFit$estimate # the dollar sign references a vector in a list
#now fit a gamma
zFit <- fitdistr(x,'gamma')
zFit$estimate
zNew <- rgamma(n=1000, shape=1449, rate=15.7)
Histo(zNew)
#gamma distribution replicates normal quite nicely
summary(x)
z <- runif(n=1000,min=85,max=100)
Histo(z)
|
## HCDB sampling locations and sample type
##
library(dataone) #needed to run
library(dplyr)
## Initialize a client to interact with DataONE
cli <- D1Client("PROD", "urn:node:GOA")
hcdb=read.csv("Total_Aromatic_Alkanes_PWS.csv",header=T)
hcdb=hcdb %>%
mutate(matr2=tolower(matrix)) %>%
filter(!matr2=='fblank') %>%
filter(!matr2=='blank') %>%
filter(!matr2=='us') %>%
filter(!matr2=='qcsed')
### mapping
library(rworldmap)
library(rworldxtra)
library(rgdal)
library(ggplot2)
world=getMap('low',projection=NA)
worldB=world[!is.na(world$continent),]
world2=worldB[worldB$continent=='North America' & worldB$LON<0,]
fWorld=fortify(world2)
colMap=c('dimgrey','black')
extDf=data.frame(xmin=-157,xmax=-143,ymin=56,ymax=62)
ggplot(data=fWorld) +
geom_map(map=fWorld,aes(x=long,y=lat,map_id=id))+
coord_map(xlim = c(-180, -123),ylim = c(34, 63))+
geom_point(data=hcdb,mapping=aes(x=as.numeric(LONG), y=as.numeric(LAT),colour=matr2),size=1,alpha=0.7, shape=20) +
#scale_color_manual(values=tsColors,name='category')+ #,breaks=rev(cnLevels),labels=rev(cnLevels)
#geom_rect(data=extDf,aes(xmin=xmin,xmax=xmax,ymin=ymin,ymax=ymax),color='gray53',fill=NULL,lwd=0.5,alpha=0.75)+
ggtitle('Locations of HCDB samples in the Gulf of Alaska')+
xlab('lon')+
theme(axis.line=element_line(),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.border = element_blank(),
panel.background = element_blank(),
legend.position='none',
axis.text=element_text(size=14),
title=element_text(size=16,face="bold"))+
guides(colour = guide_legend(override.aes = list(size=6)))
ggsave("./results/hcdbSamplesGOA.png", width=12, height=8)
### ZOOOOOM in:
tempFilename <- "./results/akMapData.zip"
akMapObject=getD1Object(cli,'df35d.431.1') ## shp file from dataONE
akMapData <- getData(akMapObject)
write(akMapData,file = "./results/akMapData")
file.rename('./results/akMapData',tempFilename)
unzip(tempFilename, list=FALSE) ### ERRORS, not sure why, line 21: Error in name == "GADM" : comparison (1) is possible only for atomic and list types
state <- readOGR('GIS','statep010')
stateDf=fortify(state)
## Colors:
library('RColorBrewer')
ggplot(data=stateDf, aes(y=lat, x=lon)) +
geom_map(map=stateDf,aes(x=long,y=lat,map_id=id))+
coord_map(xlim = c(-157, -143),ylim = c(56, 62))+
#scale_fill_manual(values=colMap)+
geom_point(data=hcdb, aes(x=as.numeric(LONG), y=as.numeric(LAT),colour=matrix), size=2, shape=20,alpha=0.75) +
scale_colour_brewer(palette='Set1',name='Sample type')+#,breaks=cnLevels,labels=cnLevels
ggtitle('Locations of HCDB samples in Northern GOA')+
theme(axis.line=element_line('black'),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.border = element_blank(),
panel.background = element_blank(),
legend.position='right',
axis.text=element_text(size=14),
title=element_text(size=16,face="bold"))+
guides(colour = guide_legend(override.aes = list(size=6)))
ggsave("./results/hcdbSampleLocs.png", width=12, height=9)
############################################
###########################################
|
/transparency/test/new_hcdbSites.R
|
no_license
|
shek21/DataONE_2018_Summer_Intern_Project1
|
R
| false | false | 3,224 |
r
|
## HCDB sampling locations and sample type
##
library(dataone) #needed to run
library(dplyr)
## Initialize a client to interact with DataONE
cli <- D1Client("PROD", "urn:node:GOA")
hcdb=read.csv("Total_Aromatic_Alkanes_PWS.csv",header=T)
hcdb=hcdb %>%
mutate(matr2=tolower(matrix)) %>%
filter(!matr2=='fblank') %>%
filter(!matr2=='blank') %>%
filter(!matr2=='us') %>%
filter(!matr2=='qcsed')
### mapping
library(rworldmap)
library(rworldxtra)
library(rgdal)
library(ggplot2)
world=getMap('low',projection=NA)
worldB=world[!is.na(world$continent),]
world2=worldB[worldB$continent=='North America' & worldB$LON<0,]
fWorld=fortify(world2)
colMap=c('dimgrey','black')
extDf=data.frame(xmin=-157,xmax=-143,ymin=56,ymax=62)
ggplot(data=fWorld) +
geom_map(map=fWorld,aes(x=long,y=lat,map_id=id))+
coord_map(xlim = c(-180, -123),ylim = c(34, 63))+
geom_point(data=hcdb,mapping=aes(x=as.numeric(LONG), y=as.numeric(LAT),colour=matr2),size=1,alpha=0.7, shape=20) +
#scale_color_manual(values=tsColors,name='category')+ #,breaks=rev(cnLevels),labels=rev(cnLevels)
#geom_rect(data=extDf,aes(xmin=xmin,xmax=xmax,ymin=ymin,ymax=ymax),color='gray53',fill=NULL,lwd=0.5,alpha=0.75)+
ggtitle('Locations of HCDB samples in the Gulf of Alaska')+
xlab('lon')+
theme(axis.line=element_line(),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.border = element_blank(),
panel.background = element_blank(),
legend.position='none',
axis.text=element_text(size=14),
title=element_text(size=16,face="bold"))+
guides(colour = guide_legend(override.aes = list(size=6)))
ggsave("./results/hcdbSamplesGOA.png", width=12, height=8)
### ZOOOOOM in:
tempFilename <- "./results/akMapData.zip"
akMapObject=getD1Object(cli,'df35d.431.1') ## shp file from dataONE
akMapData <- getData(akMapObject)
write(akMapData,file = "./results/akMapData")
file.rename('./results/akMapData',tempFilename)
unzip(tempFilename, list=FALSE) ### ERRORS, not sure why, line 21: Error in name == "GADM" : comparison (1) is possible only for atomic and list types
state <- readOGR('GIS','statep010')
stateDf=fortify(state)
## Colors:
library('RColorBrewer')
ggplot(data=stateDf, aes(y=lat, x=lon)) +
geom_map(map=stateDf,aes(x=long,y=lat,map_id=id))+
coord_map(xlim = c(-157, -143),ylim = c(56, 62))+
#scale_fill_manual(values=colMap)+
geom_point(data=hcdb, aes(x=as.numeric(LONG), y=as.numeric(LAT),colour=matrix), size=2, shape=20,alpha=0.75) +
scale_colour_brewer(palette='Set1',name='Sample type')+#,breaks=cnLevels,labels=cnLevels
ggtitle('Locations of HCDB samples in Northern GOA')+
theme(axis.line=element_line('black'),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.border = element_blank(),
panel.background = element_blank(),
legend.position='right',
axis.text=element_text(size=14),
title=element_text(size=16,face="bold"))+
guides(colour = guide_legend(override.aes = list(size=6)))
ggsave("./results/hcdbSampleLocs.png", width=12, height=9)
############################################
###########################################
|
#############################################################
# Section 6.10 Analysis of the Stanford Heart Transplant Data
#############################################################
library(LearnBayes)
data(stanfordheart)
start=c(0,3,-1)
laplacefit=laplace(transplantpost,start,stanfordheart)
laplacefit
proposal=list(var=laplacefit$var,scale=2)
s=rwmetrop(transplantpost,proposal,start,10000,stanfordheart)
s$accept
par(mfrow=c(2,2))
tau=exp(s$par[,1])
plot(density(tau),main="TAU")
lambda=exp(s$par[,2])
plot(density(lambda),main="LAMBDA")
p=exp(s$par[,3])
plot(density(p),main="P")
apply(exp(s$par),2,quantile,c(.05,.5,.95))
par(mfrow=c(1,1))
t=seq(1,240)
p5=0*t; p50=0*t; p95=0*t
for (j in 1:240)
{ S=(lambda/(lambda+t[j]))^p
q=quantile(S,c(.05,.5,.95))
p5[j]=q[1]; p50[j]=q[2]; p95[j]=q[3]}
plot(t,p50,type="l",ylim=c(0,1),ylab="Prob(Survival)",
xlab="time")
lines(t,p5,lty=2)
lines(t,p95,lty=2)
|
/04_Bayesian Statistics/New Folder With Items/bayesian_computation/Chapter.6.10.R
|
no_license
|
Yousuf28/Statistics_with_R_Coursera
|
R
| false | false | 944 |
r
|
#############################################################
# Section 6.10 Analysis of the Stanford Heart Transplant Data
#############################################################
library(LearnBayes)
data(stanfordheart)
start=c(0,3,-1)
laplacefit=laplace(transplantpost,start,stanfordheart)
laplacefit
proposal=list(var=laplacefit$var,scale=2)
s=rwmetrop(transplantpost,proposal,start,10000,stanfordheart)
s$accept
par(mfrow=c(2,2))
tau=exp(s$par[,1])
plot(density(tau),main="TAU")
lambda=exp(s$par[,2])
plot(density(lambda),main="LAMBDA")
p=exp(s$par[,3])
plot(density(p),main="P")
apply(exp(s$par),2,quantile,c(.05,.5,.95))
par(mfrow=c(1,1))
t=seq(1,240)
p5=0*t; p50=0*t; p95=0*t
for (j in 1:240)
{ S=(lambda/(lambda+t[j]))^p
q=quantile(S,c(.05,.5,.95))
p5[j]=q[1]; p50[j]=q[2]; p95[j]=q[3]}
plot(t,p50,type="l",ylim=c(0,1),ylab="Prob(Survival)",
xlab="time")
lines(t,p5,lty=2)
lines(t,p95,lty=2)
|
shinyplotsingle<- function(fit, xl, xu, ql, qu, ex){
plotlimits <- paste(xl, xu , sep = ",")
# Determine set of suitable distributions
if(fit$limits[ex, 1]>=0 & fit$limits[ex, 2] < Inf){
distributionchoices <- list("Histogram" = 1, "Normal" = 2, "Student t" = 3, "Gamma" = 4, "log normal" = 5, "Log Student t" = 6, "Beta" = 7, "Best fitting" =8)
}
if(fit$limits[ex, 1]>=0 & fit$limits[ex, 2] == Inf){
distributionchoices <- list("Histogram" = 1, "Normal" = 2, "Student t" = 3, "Gamma" = 4, "log normal" = 5, "Log Student t" = 6, "Best fitting" =8)
}
if(fit$limits[ex, 1]==-Inf & fit$limits[ex, 2] == Inf){
distributionchoices <- list("Histogram" = 1, "Normal" = 2, "Student t" = 3, "Best fitting" =8)
}
if(fit$limits[ex, 1]>-Inf & fit$limits[ex, 1] < 0 & fit$limits[ex, 2] < Inf){
distributionchoices <- list("Histogram" = 1, "Normal" = 2, "Student t" = 3, "Beta" = 7, "Best fitting" =8)
}
if(is.na(ql) == TRUE){ql <- 0.05}
if(is.na(qu) == TRUE){qu <- 0.95}
###
runApp(list(
ui = shinyUI(fluidPage(
# Application title
titlePanel("Feedback"),
sidebarLayout(
sidebarPanel(
textInput("xlimits", label = h5("x-axis limits"), value = plotlimits),
radioButtons("radio", label = h5("Distribution"), choices = distributionchoices, selected = 1 ),
numericInput("fq1", label = h5("lower feedback quantile"), value = ql, min=0, max=1),
numericInput("fq2", label = h5("upper feedback quantile"), value = qu ,min=0, max=1)
),
mainPanel(
plotOutput("distPlot"),
tableOutput("values")
)
)
)),
server = function(input, output) {
output$distPlot <- renderPlot({
xlimits<-eval(parse(text=paste("c(",input$xlimits,")")))
dist<-c("hist","normal", "t", "gamma", "lognormal", "logt","beta", "best")
drawdensity(fit, d=dist[as.numeric(input$radio)], ql=input$fq1, qu=input$fq2, xl=xlimits[1], xu=xlimits[2], ex=ex)
})
ssq <- fit$ssq[1, is.na(fit$ssq[1,])==F]
best.index <- which(ssq == min(ssq))[1]
quantileValues <- reactive({
xlimits<-eval(parse(text=paste("c(",input$xlimits,")")))
pl<-xlimits[1]
pu<-xlimits[2]
if(as.numeric(input$radio)==8){index<-best.index}else{index<-as.numeric(input$radio) - 1}
if(as.numeric(input$radio)==1){
if(pl == -Inf & fit$limits[ex,1] > -Inf){pl <- fit$limits[ex,1]}
if(pu == Inf & fit$limits[ex,2] < Inf){pu <- fit$limits[ex,2] }
if(pl == -Inf & fit$limits[ex,1] == -Inf){pl <- qnorm(0.001, fit$Normal[ex,1], fit$Normal[ex,2])}
if(pu == Inf & fit$limits[ex,2] == Inf){pu <- qnorm(0.999, fit$Normal[ex,1], fit$Normal[ex,2])}
p <- c(0, fit$probs[ex,], 1)
x <- c(pl, fit$vals[ex,], pu)
values <- qhist(c(input$fq1,input$fq2), x, p)
}
if(as.numeric(input$radio)>1){
temp<-feedback(fit, quantiles=c(input$fq1,input$fq2), ex=ex)
values=temp$fitted.quantiles[,index]
}
data.frame(quantiles=c(input$fq1,input$fq2), values=values)
})
output$values <- renderTable({
quantileValues()
})
}
))
}
|
/SHELF/R/shinyplotsingle.R
|
no_license
|
ingted/R-Examples
|
R
| false | false | 3,232 |
r
|
shinyplotsingle<- function(fit, xl, xu, ql, qu, ex){
plotlimits <- paste(xl, xu , sep = ",")
# Determine set of suitable distributions
if(fit$limits[ex, 1]>=0 & fit$limits[ex, 2] < Inf){
distributionchoices <- list("Histogram" = 1, "Normal" = 2, "Student t" = 3, "Gamma" = 4, "log normal" = 5, "Log Student t" = 6, "Beta" = 7, "Best fitting" =8)
}
if(fit$limits[ex, 1]>=0 & fit$limits[ex, 2] == Inf){
distributionchoices <- list("Histogram" = 1, "Normal" = 2, "Student t" = 3, "Gamma" = 4, "log normal" = 5, "Log Student t" = 6, "Best fitting" =8)
}
if(fit$limits[ex, 1]==-Inf & fit$limits[ex, 2] == Inf){
distributionchoices <- list("Histogram" = 1, "Normal" = 2, "Student t" = 3, "Best fitting" =8)
}
if(fit$limits[ex, 1]>-Inf & fit$limits[ex, 1] < 0 & fit$limits[ex, 2] < Inf){
distributionchoices <- list("Histogram" = 1, "Normal" = 2, "Student t" = 3, "Beta" = 7, "Best fitting" =8)
}
if(is.na(ql) == TRUE){ql <- 0.05}
if(is.na(qu) == TRUE){qu <- 0.95}
###
runApp(list(
ui = shinyUI(fluidPage(
# Application title
titlePanel("Feedback"),
sidebarLayout(
sidebarPanel(
textInput("xlimits", label = h5("x-axis limits"), value = plotlimits),
radioButtons("radio", label = h5("Distribution"), choices = distributionchoices, selected = 1 ),
numericInput("fq1", label = h5("lower feedback quantile"), value = ql, min=0, max=1),
numericInput("fq2", label = h5("upper feedback quantile"), value = qu ,min=0, max=1)
),
mainPanel(
plotOutput("distPlot"),
tableOutput("values")
)
)
)),
server = function(input, output) {
output$distPlot <- renderPlot({
xlimits<-eval(parse(text=paste("c(",input$xlimits,")")))
dist<-c("hist","normal", "t", "gamma", "lognormal", "logt","beta", "best")
drawdensity(fit, d=dist[as.numeric(input$radio)], ql=input$fq1, qu=input$fq2, xl=xlimits[1], xu=xlimits[2], ex=ex)
})
ssq <- fit$ssq[1, is.na(fit$ssq[1,])==F]
best.index <- which(ssq == min(ssq))[1]
quantileValues <- reactive({
xlimits<-eval(parse(text=paste("c(",input$xlimits,")")))
pl<-xlimits[1]
pu<-xlimits[2]
if(as.numeric(input$radio)==8){index<-best.index}else{index<-as.numeric(input$radio) - 1}
if(as.numeric(input$radio)==1){
if(pl == -Inf & fit$limits[ex,1] > -Inf){pl <- fit$limits[ex,1]}
if(pu == Inf & fit$limits[ex,2] < Inf){pu <- fit$limits[ex,2] }
if(pl == -Inf & fit$limits[ex,1] == -Inf){pl <- qnorm(0.001, fit$Normal[ex,1], fit$Normal[ex,2])}
if(pu == Inf & fit$limits[ex,2] == Inf){pu <- qnorm(0.999, fit$Normal[ex,1], fit$Normal[ex,2])}
p <- c(0, fit$probs[ex,], 1)
x <- c(pl, fit$vals[ex,], pu)
values <- qhist(c(input$fq1,input$fq2), x, p)
}
if(as.numeric(input$radio)>1){
temp<-feedback(fit, quantiles=c(input$fq1,input$fq2), ex=ex)
values=temp$fitted.quantiles[,index]
}
data.frame(quantiles=c(input$fq1,input$fq2), values=values)
})
output$values <- renderTable({
quantileValues()
})
}
))
}
|
library(e1071)
library(LiblineaR)
set.seed(1)
x <- matrix(rnorm(20*2), ncol = 2)
y <- c(rep(-1,10), rep(1,10))
x[ y== 1, ] <- x[y == 1,] + 1
plot(x, col = (3-y))
data <- data.frame(x = x, y = as.factor(y)) # must set as a factor!!!
svm_fit_10 <- svm(y~., data = data, kernel = "linear", cost = 10, scale = FALSE)
# scale = FALSE --> tedo not scale each feature to have mean zero or SD of 1
plot(svm_fit_10, data)
str(svm_fit_10)
svm_fit_10$index
summary(svm_fit_10)
# Run with a smaller cost parameters
svm_fit_0.1 <- svm(y~., data = data, kernel = "linear", cost = 0.1, scale = FALSE)
# scale = FALSE --> tedo not scale each feature to have mean zero or SD of 1
plot(svm_fit_0.1, data)
str(svm_fit_0.1)
svm_fit_0.1$index
summary(svm_fit_0.1)
# Let's compare SVMs with a linear kernel comparing a range of values of the cost parameter
tune_out <- tune(svm, y~., data = data, kernel = "linear", ranges = list(cost = c(0.001, 0.01, 0.1, 1, 5, 10, 100)))
summary(tune_out)
best_svm <- tune_out$best.model
summary(best_svm)
# Making predictions
xtest <- matrix(rnorm(20*2), ncol = 2)
ytest <- sample(c(-1,1), 20, rep = TRUE)
xtest[ytest == 1, ] <- xtest[ytest == 1,] + 1
test_data <- data.frame(x = xtest, y = as.factor(ytest))
y_prediction_0.1 <- predict(best_svm, test_data)
table(predict = y_prediction_0.1, truth = test_data$y)
# If we had used a cost of 0.01 instead
svm_fit_0.01 <- svm(y~., data = data, kernel = "linear", cost = 0.01, scale = FALSE)
y_prediction_0.01 <- predict(svm_fit_0.01, test_data)
table(predict = y_prediction_0.01, truth = test_data$y)
# separating hyperplane
# let's further separate the results
x[ y== 1, ] <- x[y == 1,] + 0.5
plot(x[,2],x[,1],col=(y+5)/2,pch=19)
data <- data.frame(x = x, y = as.factor(y)) # must set as a factor!!!
svm_fit_large_cost <- svm(y~., data = data, kernel = "linear", cost = 1e5, scale = FALSE)
plot(svm_fit_large_cost, data)
summary(svm_fit_large_cost)
svm_fit_1 <- svm(y~., data=data, kernel ="linear", cost =1, scale = FALSE)
summary(svm_fit_1 )
plot(svm_fit_1, data)
## Support Vector Machine
set.seed(1)
x <- matrix(rnorm (200*2) , ncol =2)
x[1:100, ] <- x[1:100,] + 2
x[101:150, ] <- x[101:150, ] - 2
y <- c(rep(1,150), rep(2 ,50))
data <- data.frame(x = x ,y = as.factor(y))
plot(x, col=y, pch=19)
train <- sample(200 ,100)
# Radial kernel
svm_fit_radial_1 <- svm(y~., data=data[train ,], kernel ="radial", gamma =1, cost =1)
plot(svm_fit_radial_1 , data[train ,])
summary(svm_fit_radial_1)
# increase the cost
svm_fit_radial_1e5 <- svm(y~., data=data[train ,], kernel ="radial", gamma =1, cost =1e5)
plot(svm_fit_radial_1e5 , data[train ,])
summary(svmfit)
# Perform cross-validation to select the best choice of gamma and cost for a radial kernel
set.seed (1)
tune_out_radial <- tune(svm,y~.,data=data[train ,],kernel ="radial", ranges =list(cost=c(0.1 ,1 ,10 ,100 ,1000), gamma=c(0.5,1,2,3,4) ))
summary(tune_out_radial)
# Error rate = 10%
table(true=data[-train ,"y"], pred=predict(tune_out_radial$best.model,newdata =data[-train,]))
```{r plots}
|
/svm.R
|
permissive
|
marschmi/STATS415_DataMining
|
R
| false | false | 3,057 |
r
|
library(e1071)
library(LiblineaR)
set.seed(1)
x <- matrix(rnorm(20*2), ncol = 2)
y <- c(rep(-1,10), rep(1,10))
x[ y== 1, ] <- x[y == 1,] + 1
plot(x, col = (3-y))
data <- data.frame(x = x, y = as.factor(y)) # must set as a factor!!!
svm_fit_10 <- svm(y~., data = data, kernel = "linear", cost = 10, scale = FALSE)
# scale = FALSE --> tedo not scale each feature to have mean zero or SD of 1
plot(svm_fit_10, data)
str(svm_fit_10)
svm_fit_10$index
summary(svm_fit_10)
# Run with a smaller cost parameters
svm_fit_0.1 <- svm(y~., data = data, kernel = "linear", cost = 0.1, scale = FALSE)
# scale = FALSE --> tedo not scale each feature to have mean zero or SD of 1
plot(svm_fit_0.1, data)
str(svm_fit_0.1)
svm_fit_0.1$index
summary(svm_fit_0.1)
# Let's compare SVMs with a linear kernel comparing a range of values of the cost parameter
tune_out <- tune(svm, y~., data = data, kernel = "linear", ranges = list(cost = c(0.001, 0.01, 0.1, 1, 5, 10, 100)))
summary(tune_out)
best_svm <- tune_out$best.model
summary(best_svm)
# Making predictions
xtest <- matrix(rnorm(20*2), ncol = 2)
ytest <- sample(c(-1,1), 20, rep = TRUE)
xtest[ytest == 1, ] <- xtest[ytest == 1,] + 1
test_data <- data.frame(x = xtest, y = as.factor(ytest))
y_prediction_0.1 <- predict(best_svm, test_data)
table(predict = y_prediction_0.1, truth = test_data$y)
# If we had used a cost of 0.01 instead
svm_fit_0.01 <- svm(y~., data = data, kernel = "linear", cost = 0.01, scale = FALSE)
y_prediction_0.01 <- predict(svm_fit_0.01, test_data)
table(predict = y_prediction_0.01, truth = test_data$y)
# separating hyperplane
# let's further separate the results
x[ y== 1, ] <- x[y == 1,] + 0.5
plot(x[,2],x[,1],col=(y+5)/2,pch=19)
data <- data.frame(x = x, y = as.factor(y)) # must set as a factor!!!
svm_fit_large_cost <- svm(y~., data = data, kernel = "linear", cost = 1e5, scale = FALSE)
plot(svm_fit_large_cost, data)
summary(svm_fit_large_cost)
svm_fit_1 <- svm(y~., data=data, kernel ="linear", cost =1, scale = FALSE)
summary(svm_fit_1 )
plot(svm_fit_1, data)
## Support Vector Machine
set.seed(1)
x <- matrix(rnorm (200*2) , ncol =2)
x[1:100, ] <- x[1:100,] + 2
x[101:150, ] <- x[101:150, ] - 2
y <- c(rep(1,150), rep(2 ,50))
data <- data.frame(x = x ,y = as.factor(y))
plot(x, col=y, pch=19)
train <- sample(200 ,100)
# Radial kernel
svm_fit_radial_1 <- svm(y~., data=data[train ,], kernel ="radial", gamma =1, cost =1)
plot(svm_fit_radial_1 , data[train ,])
summary(svm_fit_radial_1)
# increase the cost
svm_fit_radial_1e5 <- svm(y~., data=data[train ,], kernel ="radial", gamma =1, cost =1e5)
plot(svm_fit_radial_1e5 , data[train ,])
summary(svmfit)
# Perform cross-validation to select the best choice of gamma and cost for a radial kernel
set.seed (1)
tune_out_radial <- tune(svm,y~.,data=data[train ,],kernel ="radial", ranges =list(cost=c(0.1 ,1 ,10 ,100 ,1000), gamma=c(0.5,1,2,3,4) ))
summary(tune_out_radial)
# Error rate = 10%
table(true=data[-train ,"y"], pred=predict(tune_out_radial$best.model,newdata =data[-train,]))
```{r plots}
|
#!/usr/bin/env Rscript
group1 = c(0.7, -1.6, -0.2, -1.2, -0.1, 3.4, 3.7, 0.8, 0.0, 2.0)
group2 = c(1.9, 0.8, 1.1, 0.1, -0.1, 4.4, 5.5, 1.6, 4.6, 3.4)
p_value_studentt = t.test(x, y, var.equal=T)$p.value
print(p_value_studentt) # => 0.07939414
|
/statistical_test/t_student.R
|
permissive
|
nishimoto/py_r_stats
|
R
| false | false | 244 |
r
|
#!/usr/bin/env Rscript
group1 = c(0.7, -1.6, -0.2, -1.2, -0.1, 3.4, 3.7, 0.8, 0.0, 2.0)
group2 = c(1.9, 0.8, 1.1, 0.1, -0.1, 4.4, 5.5, 1.6, 4.6, 3.4)
p_value_studentt = t.test(x, y, var.equal=T)$p.value
print(p_value_studentt) # => 0.07939414
|
library(glmnet)
mydata = read.table("../../../../TrainingSet/FullSet/AvgRank/upper_aerodigestive_tract.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mse",alpha=0.25,family="gaussian",standardize=TRUE)
sink('./upper_aerodigestive_tract_037.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
|
/Model/EN/AvgRank/upper_aerodigestive_tract/upper_aerodigestive_tract_037.R
|
no_license
|
esbgkannan/QSMART
|
R
| false | false | 384 |
r
|
library(glmnet)
mydata = read.table("../../../../TrainingSet/FullSet/AvgRank/upper_aerodigestive_tract.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mse",alpha=0.25,family="gaussian",standardize=TRUE)
sink('./upper_aerodigestive_tract_037.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
|
#' Print method for crosstab
#'
#' The cross tabulation is rendered as a html, which can be viewed in RStudio's
#' viewer pane.
#'
#' @param x A crosstab object.
#' @keywords internal
#' @method print crosstab
#' @export
print.crosstab <- function(x, ...) {
html_page <- combine_parts(x)
# output the page to temporary html_file
html_page %>%
browsable() %>%
html_print()
invisible(x)
}
#' Knit print method for crosstab
#'
#' @keywords internal
#' @export
knit_print.crosstab <- function(x, ...) {
html_page <- combine_parts(x)
deps <- htmltools::findDependencies(html_page)
knitr::asis_output(htmltools::htmlPreserve(as.character(html_page)),
meta = deps)
}
# Helper functions -----------
combine_parts <- function(x) {
# compute table
tab_out <- build_tab(x)
# prepare test statistics
html_tests <- prepare_stats(x)
# create html table
html_table <- prepare_table(tab_out)
# add headings
html_table <- add_headings(html_table, tab_out, x)
# create html page
html_page <- create_page(html_table, html_tests)
html_page
}
#' HTML parts of page
#'
#' This function currently serves to prepare the pagelayout.
#'
#' Later on this function should be more flexible insofar as the content of the
#' page should be dependent on the content of the table.
#'
#' @param table A bare HTML table, created with \code{htmlTable}.
#' @param stats Character output from a statistical test.
#' @return A \code{tagList} with registered dependencies.
#' @keywords internal
create_page <- function(table, stats) {
# create link to stylesheet
style_link <- htmltools::htmlDependency(
name = "crosstabr",
version = as.character(utils::packageVersion("crosstabr")),
src = system.file(package = "crosstabr"),
stylesheet = "css/crosstabr.css"
)
# Create page without statistics
if (is.null(stats)) {
html <- tagList(
tags$body(
div(id = "tables",
div(id = "two-way",
HTML(table)
)
)
)
)
} else if (!is.null(stats)) { # Create page with statistics
style_link$stylesheet <- c(style_link$stylesheet, "css/with_stats.css")
html <- tagList(
tags$body(
div(id = "tables",
div(id = "two-way",
HTML(table)
)
),
div(id = "stats",
HTML(stats)
)
)
)
}
html <- attachDependencies(html, style_link)
html
}
#' Creates a HTML table
#'
#' @param x A matrix, created by \code{build_tab}.
#' @return A table in HTML format, without inline styling.
#' @keywords internal
prepare_table <- function(x) {
# create html_table
x <- utils::capture.output(
print(htmlTable::htmlTable(x), useViewer = F)
)
# pattern to remove inline css-styles
style_pattern <- "(style).*(?=;).{2}"
# remove inline css
x %>%
stringr::str_replace_all(style_pattern, "") %>%
stringr::str_c(collapse = "")
}
#' @keywords internal
add_headings <- function(html_table, tab_out, x) {
# insert heading into original table
html_table <- stringr::str_replace(html_table, "<th >",
paste0("<th>", x$dependent))
# find number of cols and rows (excluding total col)
dimensions <- dim(tab_out) - 1
cols <- dimensions[2]
rows <- dimensions[1]
top <- paste0("<table id='outer_table'><tbody><tr id='headings'><td></td>",
"<td id='independent' colspan='",
cols,
"'>",
x$independent,
"</td><td></td></tr><tr><td colspan='",
cols + 2,
"'>")
bottom <- "</td></tr></tbody></table>"
result <- paste(top, html_table, bottom)
result
}
|
/R/print.R
|
no_license
|
tklebel/crosstabr
|
R
| false | false | 3,751 |
r
|
#' Print method for crosstab
#'
#' The cross tabulation is rendered as a html, which can be viewed in RStudio's
#' viewer pane.
#'
#' @param x A crosstab object.
#' @keywords internal
#' @method print crosstab
#' @export
print.crosstab <- function(x, ...) {
html_page <- combine_parts(x)
# output the page to temporary html_file
html_page %>%
browsable() %>%
html_print()
invisible(x)
}
#' Knit print method for crosstab
#'
#' @keywords internal
#' @export
knit_print.crosstab <- function(x, ...) {
html_page <- combine_parts(x)
deps <- htmltools::findDependencies(html_page)
knitr::asis_output(htmltools::htmlPreserve(as.character(html_page)),
meta = deps)
}
# Helper functions -----------
combine_parts <- function(x) {
# compute table
tab_out <- build_tab(x)
# prepare test statistics
html_tests <- prepare_stats(x)
# create html table
html_table <- prepare_table(tab_out)
# add headings
html_table <- add_headings(html_table, tab_out, x)
# create html page
html_page <- create_page(html_table, html_tests)
html_page
}
#' HTML parts of page
#'
#' This function currently serves to prepare the pagelayout.
#'
#' Later on this function should be more flexible insofar as the content of the
#' page should be dependent on the content of the table.
#'
#' @param table A bare HTML table, created with \code{htmlTable}.
#' @param stats Character output from a statistical test.
#' @return A \code{tagList} with registered dependencies.
#' @keywords internal
create_page <- function(table, stats) {
# create link to stylesheet
style_link <- htmltools::htmlDependency(
name = "crosstabr",
version = as.character(utils::packageVersion("crosstabr")),
src = system.file(package = "crosstabr"),
stylesheet = "css/crosstabr.css"
)
# Create page without statistics
if (is.null(stats)) {
html <- tagList(
tags$body(
div(id = "tables",
div(id = "two-way",
HTML(table)
)
)
)
)
} else if (!is.null(stats)) { # Create page with statistics
style_link$stylesheet <- c(style_link$stylesheet, "css/with_stats.css")
html <- tagList(
tags$body(
div(id = "tables",
div(id = "two-way",
HTML(table)
)
),
div(id = "stats",
HTML(stats)
)
)
)
}
html <- attachDependencies(html, style_link)
html
}
#' Creates a HTML table
#'
#' @param x A matrix, created by \code{build_tab}.
#' @return A table in HTML format, without inline styling.
#' @keywords internal
prepare_table <- function(x) {
# create html_table
x <- utils::capture.output(
print(htmlTable::htmlTable(x), useViewer = F)
)
# pattern to remove inline css-styles
style_pattern <- "(style).*(?=;).{2}"
# remove inline css
x %>%
stringr::str_replace_all(style_pattern, "") %>%
stringr::str_c(collapse = "")
}
#' @keywords internal
add_headings <- function(html_table, tab_out, x) {
# insert heading into original table
html_table <- stringr::str_replace(html_table, "<th >",
paste0("<th>", x$dependent))
# find number of cols and rows (excluding total col)
dimensions <- dim(tab_out) - 1
cols <- dimensions[2]
rows <- dimensions[1]
top <- paste0("<table id='outer_table'><tbody><tr id='headings'><td></td>",
"<td id='independent' colspan='",
cols,
"'>",
x$independent,
"</td><td></td></tr><tr><td colspan='",
cols + 2,
"'>")
bottom <- "</td></tr></tbody></table>"
result <- paste(top, html_table, bottom)
result
}
|
testlist <- list(ends = c(-1125300777L, 765849512L, -1760774663L, 791623263L, 1358782356L, -128659642L, -14914341L, 1837701012L, NA, 1632068659L ), pts = c(1758370433L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L ), starts = c(16777216L, 0L, 738263040L, 682962941L, 1612840977L, 150997320L, 747898999L, -1195392662L, 2024571419L, 808515032L, 1373469055L, -282236997L, -207881465L, -237801926L, -168118689L, -1090227888L, 235129118L, 949454105L, 1651285440L, -1119277667L, -1328604284L), members = NULL, total_members = 0L)
result <- do.call(IntervalSurgeon:::rcpp_pile,testlist)
str(result)
|
/IntervalSurgeon/inst/testfiles/rcpp_pile/AFL_rcpp_pile/rcpp_pile_valgrind_files/1609875408-test.R
|
no_license
|
akhikolla/updated-only-Issues
|
R
| false | false | 720 |
r
|
testlist <- list(ends = c(-1125300777L, 765849512L, -1760774663L, 791623263L, 1358782356L, -128659642L, -14914341L, 1837701012L, NA, 1632068659L ), pts = c(1758370433L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L ), starts = c(16777216L, 0L, 738263040L, 682962941L, 1612840977L, 150997320L, 747898999L, -1195392662L, 2024571419L, 808515032L, 1373469055L, -282236997L, -207881465L, -237801926L, -168118689L, -1090227888L, 235129118L, 949454105L, 1651285440L, -1119277667L, -1328604284L), members = NULL, total_members = 0L)
result <- do.call(IntervalSurgeon:::rcpp_pile,testlist)
str(result)
|
#' @importFrom ggplot2 aes ggtitle guides theme guide_legend
#' @importFrom ggplot2 scale_size scale_color_manual
#' @importFrom ggplot2 GeomSegment GeomText GeomPoint
#' @importFrom ggrepel geom_label_repel
#' @importFrom tidytree groupOTU
#' @importFrom ggtree ggtree geom_point2 geom_tiplab
#' @rdname plotSingleSite
#' @title Color the tree by a single site
#' @description Plot and color the tree according to amino acid/nucleotide of
#' the selected site. The color scheme depends on the \code{seqType} set in
#' \code{\link{addMSA}} function.
#' @param x The object to plot.
#' @param site For \code{lineagePath}, it can be any site within sequence
#' length. For \code{fixationSites} and \code{parallelSites}, it is restrained
#' to a predicted fixation site. The numbering is consistent with the
#' reference defined by \code{\link{setSiteNumbering}}.
#' @param ... Other arguments. Since 1.5.4, the function uses
#' \code{\link{ggtree}} as the base function to make plots so the arguments in
#' \code{plot.phylo} will no longer work.
#' @return Since 1.5.4, the function returns a ggplot object so on longer
#' behaviors like the generic \code{\link{plot}} function.
#' @seealso \code{\link{plot.sitePath}}
#' @export
#' @examples
#' data(zikv_tree)
#' data(zikv_align)
#' tree <- addMSA(zikv_tree, alignment = zikv_align)
#' paths <- lineagePath(tree)
#' plotSingleSite(paths, 139)
plotSingleSite <- function(x, site, ...) {
UseMethod("plotSingleSite")
}
#' @rdname plotSingleSite
#' @description For \code{\link{lineagePath}}, the tree will be colored
#' according to the amino acid of the site. The color scheme tries to assign
#' distinguishable color for each amino acid.
#' @param showPath If plot the lineage result from \code{\link{lineagePath}}.
#' The default is \code{TRUE}.
#' @param showTips Whether to plot the tip labels. The default is \code{FALSE}.
#' @export
plotSingleSite.lineagePath <- function(x,
site,
showPath = TRUE,
showTips = FALSE,
...) {
seqType <- attr(x, "seqType")
group <- extractTips.lineagePath(x, site)
# Use different color scheme depending on the sequence type
names(group) <- toupper(names(group))
groupColors <- .siteColorScheme(seqType)
tree <- attr(x, "tree")
group <- groupOTU(as_tibble(tree), group)
group <- group[["group"]]
size <- NULL
sizeRange <- c(GeomSegment[["default_aes"]][["size"]], 1.5)
# Set lineage nodes and non-lineage nodes as separate group
if (showPath) {
pathNodes <- unique(unlist(x))
pathLabel <- ".lineage"
# Color the path node black
levels(group) <- c(levels(group), pathLabel)
group[pathNodes] <- pathLabel
lineageColor <- "black"
names(lineageColor) <- pathLabel
groupColors <- c(groupColors, lineageColor)
# Set the size of the lineage nodes
size <- rep(1, times = length(group))
size[pathNodes] <- 2
}
if (seqType == "AA") {
legendTitle <- "Amino acid"
} else {
legendTitle <- "Nucleotide"
}
p <- ggtree(tree, aes(color = group, size = size)) +
scale_size(range = sizeRange, guide = "none") +
scale_color_manual(values = groupColors,
limits = unique(group)) +
guides(color = guide_legend(title = legendTitle,
override.aes = list(size = 3))) +
theme(legend.position = "left") +
ggtitle(site)
if (showTips) {
p <- p + geom_tiplab()
}
return(p)
}
.siteColorScheme <- function(seqType) {
if (seqType == "AA") {
groupColors <- vapply(
X = AA_FULL_NAMES,
FUN = function(i) {
AA_COLORS[[i]]
},
FUN.VALUE = character(1)
)
} else {
groupColors <- NT_COLORS
}
names(groupColors) <- toupper(names(groupColors))
groupColors[["hide"]] <- NA
return(groupColors)
}
#' @rdname plotSingleSite
#' @export
plotSingleSite.sitesMinEntropy <- function(x,
site,
...) {
tree <- as.phylo.sitesMinEntropy(x)
allPaths <- attr(x, "paths")
# Specify the color of mutations by pre-defined color set.
sitePaths <- lapply(x, "[[", as.character(site))
seqType <- attr(allPaths, "seqType")
groupColors <- .siteColorScheme(seqType)
if (seqType == "AA") {
legendTitle <- "Amino acid"
} else {
legendTitle <- "Nucleotide"
}
# Collect the fixation mutation for each evolutionary pathway
group <- list()
for (seg in sitePaths) {
for (tips in seg) {
fixedAA <- attr(tips, "AA")
if (fixedAA %in% names(group)) {
group[[fixedAA]] <- c(group[[fixedAA]], tips)
} else {
group[[fixedAA]] <- tips
}
}
}
tree <- groupOTU(tree, group)
# Just in case the fixation mutation name is too long
# Annotate the mutation on the tree
p <- ggtree(tree, aes(color = group)) +
scale_color_manual(values = groupColors, limits = names(group)) +
guides(linetype = "none",
color = guide_legend(title = legendTitle,
override.aes = list(size = 3))) +
theme(legend.position = "left") +
ggtitle(site)
return(p)
}
#' @rdname plotSingleSite
#' @description For \code{\link{parallelSites}}, the tree will be colored
#' according to the amino acid of the site if the mutation is not fixed.
#' @export
plotSingleSite.parallelSites <- function(x,
site,
showPath = TRUE,
...) {
paths <- attr(x, "paths")
tree <- attr(paths, "tree")
tipNames <- tree[["tip.label"]]
nNodes <- length(tipNames) + tree[["Nnode"]]
parallelMut <- extractTips.parallelSites(x, site)
fixationMut <- character()
sporadicTip <- rep(FALSE, nNodes)
for (node in names(parallelMut)) {
tips <- parallelMut[[node]]
if (attr(tips, "fixed")) {
fixationMut[node] <- attr(tips, "mutName")[4]
} else {
sporadicTip[which(tipNames == node)] <- TRUE
}
}
if (length(fixationMut) != 0) {
attr(paths, "tree") <- .annotateSNPonTree(tree, fixationMut)
p <- plotSingleSite.lineagePath(
x = paths,
site = site,
showPath = showPath,
showTips = FALSE
) +
geom_label_repel(
aes(x = branch, label = SNPs),
fill = 'lightgreen',
color = "black",
min.segment.length = 0,
na.rm = TRUE,
size = GeomText[["default_aes"]][["size"]]
)
} else {
p <- plotSingleSite.lineagePath(
x = paths,
site = site,
showPath = showPath,
showTips = FALSE
)
}
if (any(sporadicTip)) {
p <- p + geom_point2(aes(subset = sporadicTip,
size = GeomPoint[["default_aes"]][["size"]]))
}
return(p)
}
#' @rdname plotSingleSite
#' @description For \code{\link{fixationSites}}, it will color the ancestral
#' tips in red, descendant tips in blue and excluded tips in grey.
#' @param select Select which fixation path in to plot. The default is NULL
#' which will plot all the fixations.
#' @export
#' @examples
#' fixations <- fixationSites(paths)
#' plotSingleSite(fixations, 139)
plotSingleSite.fixationSites <- function(x,
site,
select = NULL,
...) {
plot.sitePath(x = .actualExtractSite(x, site),
y = TRUE,
select = select,
...)
}
|
/R/plotSingleSite.R
|
permissive
|
wuaipinglab/sitePath
|
R
| false | false | 8,127 |
r
|
#' @importFrom ggplot2 aes ggtitle guides theme guide_legend
#' @importFrom ggplot2 scale_size scale_color_manual
#' @importFrom ggplot2 GeomSegment GeomText GeomPoint
#' @importFrom ggrepel geom_label_repel
#' @importFrom tidytree groupOTU
#' @importFrom ggtree ggtree geom_point2 geom_tiplab
#' @rdname plotSingleSite
#' @title Color the tree by a single site
#' @description Plot and color the tree according to amino acid/nucleotide of
#' the selected site. The color scheme depends on the \code{seqType} set in
#' \code{\link{addMSA}} function.
#' @param x The object to plot.
#' @param site For \code{lineagePath}, it can be any site within sequence
#' length. For \code{fixationSites} and \code{parallelSites}, it is restrained
#' to a predicted fixation site. The numbering is consistent with the
#' reference defined by \code{\link{setSiteNumbering}}.
#' @param ... Other arguments. Since 1.5.4, the function uses
#' \code{\link{ggtree}} as the base function to make plots so the arguments in
#' \code{plot.phylo} will no longer work.
#' @return Since 1.5.4, the function returns a ggplot object so on longer
#' behaviors like the generic \code{\link{plot}} function.
#' @seealso \code{\link{plot.sitePath}}
#' @export
#' @examples
#' data(zikv_tree)
#' data(zikv_align)
#' tree <- addMSA(zikv_tree, alignment = zikv_align)
#' paths <- lineagePath(tree)
#' plotSingleSite(paths, 139)
plotSingleSite <- function(x, site, ...) {
UseMethod("plotSingleSite")
}
#' @rdname plotSingleSite
#' @description For \code{\link{lineagePath}}, the tree will be colored
#' according to the amino acid of the site. The color scheme tries to assign
#' distinguishable color for each amino acid.
#' @param showPath If plot the lineage result from \code{\link{lineagePath}}.
#' The default is \code{TRUE}.
#' @param showTips Whether to plot the tip labels. The default is \code{FALSE}.
#' @export
plotSingleSite.lineagePath <- function(x,
site,
showPath = TRUE,
showTips = FALSE,
...) {
seqType <- attr(x, "seqType")
group <- extractTips.lineagePath(x, site)
# Use different color scheme depending on the sequence type
names(group) <- toupper(names(group))
groupColors <- .siteColorScheme(seqType)
tree <- attr(x, "tree")
group <- groupOTU(as_tibble(tree), group)
group <- group[["group"]]
size <- NULL
sizeRange <- c(GeomSegment[["default_aes"]][["size"]], 1.5)
# Set lineage nodes and non-lineage nodes as separate group
if (showPath) {
pathNodes <- unique(unlist(x))
pathLabel <- ".lineage"
# Color the path node black
levels(group) <- c(levels(group), pathLabel)
group[pathNodes] <- pathLabel
lineageColor <- "black"
names(lineageColor) <- pathLabel
groupColors <- c(groupColors, lineageColor)
# Set the size of the lineage nodes
size <- rep(1, times = length(group))
size[pathNodes] <- 2
}
if (seqType == "AA") {
legendTitle <- "Amino acid"
} else {
legendTitle <- "Nucleotide"
}
p <- ggtree(tree, aes(color = group, size = size)) +
scale_size(range = sizeRange, guide = "none") +
scale_color_manual(values = groupColors,
limits = unique(group)) +
guides(color = guide_legend(title = legendTitle,
override.aes = list(size = 3))) +
theme(legend.position = "left") +
ggtitle(site)
if (showTips) {
p <- p + geom_tiplab()
}
return(p)
}
.siteColorScheme <- function(seqType) {
if (seqType == "AA") {
groupColors <- vapply(
X = AA_FULL_NAMES,
FUN = function(i) {
AA_COLORS[[i]]
},
FUN.VALUE = character(1)
)
} else {
groupColors <- NT_COLORS
}
names(groupColors) <- toupper(names(groupColors))
groupColors[["hide"]] <- NA
return(groupColors)
}
#' @rdname plotSingleSite
#' @export
plotSingleSite.sitesMinEntropy <- function(x,
site,
...) {
tree <- as.phylo.sitesMinEntropy(x)
allPaths <- attr(x, "paths")
# Specify the color of mutations by pre-defined color set.
sitePaths <- lapply(x, "[[", as.character(site))
seqType <- attr(allPaths, "seqType")
groupColors <- .siteColorScheme(seqType)
if (seqType == "AA") {
legendTitle <- "Amino acid"
} else {
legendTitle <- "Nucleotide"
}
# Collect the fixation mutation for each evolutionary pathway
group <- list()
for (seg in sitePaths) {
for (tips in seg) {
fixedAA <- attr(tips, "AA")
if (fixedAA %in% names(group)) {
group[[fixedAA]] <- c(group[[fixedAA]], tips)
} else {
group[[fixedAA]] <- tips
}
}
}
tree <- groupOTU(tree, group)
# Just in case the fixation mutation name is too long
# Annotate the mutation on the tree
p <- ggtree(tree, aes(color = group)) +
scale_color_manual(values = groupColors, limits = names(group)) +
guides(linetype = "none",
color = guide_legend(title = legendTitle,
override.aes = list(size = 3))) +
theme(legend.position = "left") +
ggtitle(site)
return(p)
}
#' @rdname plotSingleSite
#' @description For \code{\link{parallelSites}}, the tree will be colored
#' according to the amino acid of the site if the mutation is not fixed.
#' @export
plotSingleSite.parallelSites <- function(x,
site,
showPath = TRUE,
...) {
paths <- attr(x, "paths")
tree <- attr(paths, "tree")
tipNames <- tree[["tip.label"]]
nNodes <- length(tipNames) + tree[["Nnode"]]
parallelMut <- extractTips.parallelSites(x, site)
fixationMut <- character()
sporadicTip <- rep(FALSE, nNodes)
for (node in names(parallelMut)) {
tips <- parallelMut[[node]]
if (attr(tips, "fixed")) {
fixationMut[node] <- attr(tips, "mutName")[4]
} else {
sporadicTip[which(tipNames == node)] <- TRUE
}
}
if (length(fixationMut) != 0) {
attr(paths, "tree") <- .annotateSNPonTree(tree, fixationMut)
p <- plotSingleSite.lineagePath(
x = paths,
site = site,
showPath = showPath,
showTips = FALSE
) +
geom_label_repel(
aes(x = branch, label = SNPs),
fill = 'lightgreen',
color = "black",
min.segment.length = 0,
na.rm = TRUE,
size = GeomText[["default_aes"]][["size"]]
)
} else {
p <- plotSingleSite.lineagePath(
x = paths,
site = site,
showPath = showPath,
showTips = FALSE
)
}
if (any(sporadicTip)) {
p <- p + geom_point2(aes(subset = sporadicTip,
size = GeomPoint[["default_aes"]][["size"]]))
}
return(p)
}
#' @rdname plotSingleSite
#' @description For \code{\link{fixationSites}}, it will color the ancestral
#' tips in red, descendant tips in blue and excluded tips in grey.
#' @param select Select which fixation path in to plot. The default is NULL
#' which will plot all the fixations.
#' @export
#' @examples
#' fixations <- fixationSites(paths)
#' plotSingleSite(fixations, 139)
plotSingleSite.fixationSites <- function(x,
site,
select = NULL,
...) {
plot.sitePath(x = .actualExtractSite(x, site),
y = TRUE,
select = select,
...)
}
|
# GBM
# Rscript cmp_before_after_coexprDist.R GSE105194_ENCFF027IEO_astroCerebellum_vs_GSE105957_ENCFF715HDW_astroSpinal TCGAgbm_classical_mesenchymal #
# Rscript cmp_before_after_coexprDist.R GSE105194_ENCFF027IEO_astroCerebellum_vs_GSE105957_ENCFF715HDW_astroSpinal TCGAgbm_classical_neural #
# Rscript cmp_before_after_coexprDist.R GSE105194_ENCFF027IEO_astroCerebellum_vs_GSE105957_ENCFF715HDW_astroSpinal TCGAgbm_classical_proneural #
# COLORECTAL
# Rscript cmp_before_after_coexprDist.R GSE105318_ENCFF439QFU_DLD1 TCGAcoad_msi_mss # no change
# BREAST
# Rscript cmp_before_after_coexprDist.R GSM1631185_MCF7_vs_GSE75070_MCF7_shGFP TCGAbrca_lum_bas #
# KIDNEY
# Rscript cmp_before_after_coexprDist.R GSE105465_ENCFF777DUA_Caki2_vs_GSE105235_ENCFF235TGH_G401 TCGAkich_norm_kich #
# LUNG
# Rscript cmp_before_after_coexprDist.R GSE105600_ENCFF852YOE_A549_vs_GSE105725_ENCFF697NNX_NCIH460 TCGAluad_mutKRAS_mutEGFR #
# Rscript cmp_before_after_coexprDist.R GSE105600_ENCFF852YOE_A549_vs_GSE105725_ENCFF697NNX_NCIH460 TCGAluad_nonsmoker_smoker #
# Rscript cmp_before_after_coexprDist.R GSE105600_ENCFF852YOE_A549_vs_GSE105725_ENCFF697NNX_NCIH460 TCGAluad_wt_mutKRAS #
# Rscript cmp_before_after_coexprDist.R GSE105600_ENCFF852YOE_A549_vs_GSE105725_ENCFF697NNX_NCIH460 TCGAlusc_norm_lusc #
# SKIN
# Rscript cmp_before_after_coexprDist.R GSE106022_ENCFF614EKT_RPMI7951_vs_GSE105491_ENCFF458OWO_SKMEL5 TCGAskcm_lowInf_highInf #
# Rscript cmp_before_after_coexprDist.R GSE106022_ENCFF614EKT_RPMI7951_vs_GSE105491_ENCFF458OWO_SKMEL5 TCGAskcm_wt_mutBRAF #
# Rscript cmp_before_after_coexprDist.R GSE106022_ENCFF614EKT_RPMI7951_vs_GSE105491_ENCFF458OWO_SKMEL5 TCGAskcm_wt_mutCTNNB1 #
# PANCREAS
# Rscript cmp_before_after_coexprDist.R GSE105566_ENCFF358MNA_Panc1 TCGApaad_wt_mutKRAS
dataset="GSE105318_ENCFF439QFU_DLD1"
exprds="TCGAcoad_msi_mss"
cat("> START ", "cmp_before_after_coexprDist.R", "\n")
# Rscript cmp_before_after_coexprDist.R GSE105318_ENCFF439QFU_DLD1 TCGAcoad_msi_mss_hgnc
args <- commandArgs(trailingOnly = TRUE)
stopifnot(length(args) == 2)
dataset=args[1]
exprds=args[2]
exprds=paste0(exprds, "_hgnc")
cat("... START: ", dataset, " - ", exprds, "\n")
cat("load genefam0 \n")
genefam0 = eval(parse(text=load("/mnt/ed4/marie/scripts/TAD_DE_pipeline_v2_TopDom/PREP_GENE_FAMILIES_TAD_DATA/hgnc_entrezID_family_TAD_DT.Rdata")))
cat("load samefam0 \n")
samefam0 = eval(parse(text=load("/mnt/ed4/marie/scripts/TAD_DE_pipeline_v2_TopDom/CREATE_SAME_FAMILY_SORTNODUP/hgnc_family_all_family_pairs.Rdata")))
cat("load dist0 \n")
dist0 = eval(parse(text=load("/mnt/ed4/marie/scripts/TAD_DE_pipeline_v2_TopDom/CREATE_DIST_SORTNODUP/all_dist_pairs.Rdata")))
cat("load coexpr0 \n")
coexpr0 = eval(parse(text=load(paste0("/mnt/ed4/marie/scripts/TAD_DE_pipeline_v2_TopDom/CREATE_COEXPR_SORTNODUP/", gsub("hgnc", "", exprds), "pearson/coexprDT.Rdata"))))
cat("load genefam1 \n")
genefam1 = eval(parse(text=load(paste0("PREP_GENE_FAMILIES_TAD_DATA/", dataset, "/hgnc_entrezID_family_TAD_DT.Rdata"))))
cat("load samefam1 \n")
samefam1 = eval(parse(text=load(paste0("CREATE_SAME_FAMILY_SORTNODUP/", dataset, "/hgnc_family_short_all_family_pairs.Rdata"))))
cat("load dist1 \n")
dist1 = eval(parse(text=load(paste0( "CREATE_DIST_SORTNODUP/", dataset, "/all_dist_pairs.Rdata"))))
cat("load coexpr1 \n")
coexpr1 = eval(parse(text=load(paste0( "CREATE_COEXPR_SORTNODUP/", dataset, "/", gsub("hgnc", "", exprds), "pearson/coexprDT.Rdata"))))
cat("... dim(genefam0) = ", dim(genefam0), "\n")
cat("... dim(genefam1) = ", dim(genefam1), "\n")
cat("... dim(samefam0) = ", dim(samefam0), "\n")
cat("... dim(samefam1) = ", dim(samefam1), "\n")
cat("... dim(dist0) = ", dim(dist0), "\n")
cat("... dim(dist1) = ", dim(dist1), "\n")
cat("... dim(coexpr0) = ", dim(coexpr0), "\n")
cat("... dim(coexpr1) = ", dim(coexpr1), "\n")
### FOR THE RATIO RESULTS
cat("load ratio0 \n")
# AUC_COEXPRDIST_WITHFAM_SORTNODUP_BEFORE08.01.19_sameTAD_sameFamFile/GSE105318_ENCFF439QFU_DLD1/TCGAcoad_msi_mss_hgnc/hgnc_family_short/auc_values.Rdata
ratioFile0 <- paste0("AUC_COEXPRDIST_WITHFAM_SORTNODUP_BEFORE08.01.19_sameTAD_sameFamFile/", dataset, "/", exprds, "/hgnc_family_short/auc_values.Rdata")
ratio0 = eval(parse(text=load(ratioFile0)))
# AUC_COEXPRDIST_WITHFAM_SORTNODUP_BEFORE08.01.19_sameTAD_sameFamFile/GSE105318_ENCFF439QFU_DLD1/TCGAcoad_msi_mss_hgnc_hgnc/hgnc_family_short/auc_values.Rdata
# AUC_COEXPRDIST_WITHFAM_SORTNODUP_BEFORE08.01.19_sameTAD_sameFamFile/GSE105318_ENCFF439QFU_DLD1/TCGAcoad_msi_mss_hgnc/hgnc_family_short/auc_values.Rdata")
cat("load ratio1 \n")
ratio1 = eval(parse(text=load(paste0("AUC_COEXPRDIST_WITHFAM_SORTNODUP/", dataset, "/", exprds, "/hgnc_family_short/auc_values.Rdata"))))
all_vars <- c(
"auc_diffTAD_distVect",
"auc_sameTAD_distVect",
"auc_ratio_same_over_diff_distVect",
"auc_diffTAD_obsDist",
"auc_sameTAD_obsDist",
"auc_ratio_same_over_diff_obsDist",
"auc_sameFamDiffTAD_distVect",
"auc_sameFamSameTAD_distVect",
"auc_ratio_sameFam_same_over_diff_distVect",
"auc_sameFamDiffTAD_obsDist",
"auc_sameFamSameTAD_obsDist",
"auc_ratio_sameFam_same_over_diff_obsDist"
)
var="auc_ratio_sameFam_same_over_diff_obsDist"
for(var in all_vars) {
if(ratio0[[paste0(var)]] != ratio1[[paste0(var)]]){
cat(paste0("...... ", var, "\nratio0=", ratio0[[paste0(var)]] , "\nratio1=",ratio1[[paste0(var)]], "\n" ))
} else{
cat(paste0("...... ", var, "\nratio0==ratio1\n" ))
}
}
|
/cmp_before_after_coexprDist.R
|
no_license
|
marzuf/Dixon2018_integrative_data
|
R
| false | false | 5,577 |
r
|
# GBM
# Rscript cmp_before_after_coexprDist.R GSE105194_ENCFF027IEO_astroCerebellum_vs_GSE105957_ENCFF715HDW_astroSpinal TCGAgbm_classical_mesenchymal #
# Rscript cmp_before_after_coexprDist.R GSE105194_ENCFF027IEO_astroCerebellum_vs_GSE105957_ENCFF715HDW_astroSpinal TCGAgbm_classical_neural #
# Rscript cmp_before_after_coexprDist.R GSE105194_ENCFF027IEO_astroCerebellum_vs_GSE105957_ENCFF715HDW_astroSpinal TCGAgbm_classical_proneural #
# COLORECTAL
# Rscript cmp_before_after_coexprDist.R GSE105318_ENCFF439QFU_DLD1 TCGAcoad_msi_mss # no change
# BREAST
# Rscript cmp_before_after_coexprDist.R GSM1631185_MCF7_vs_GSE75070_MCF7_shGFP TCGAbrca_lum_bas #
# KIDNEY
# Rscript cmp_before_after_coexprDist.R GSE105465_ENCFF777DUA_Caki2_vs_GSE105235_ENCFF235TGH_G401 TCGAkich_norm_kich #
# LUNG
# Rscript cmp_before_after_coexprDist.R GSE105600_ENCFF852YOE_A549_vs_GSE105725_ENCFF697NNX_NCIH460 TCGAluad_mutKRAS_mutEGFR #
# Rscript cmp_before_after_coexprDist.R GSE105600_ENCFF852YOE_A549_vs_GSE105725_ENCFF697NNX_NCIH460 TCGAluad_nonsmoker_smoker #
# Rscript cmp_before_after_coexprDist.R GSE105600_ENCFF852YOE_A549_vs_GSE105725_ENCFF697NNX_NCIH460 TCGAluad_wt_mutKRAS #
# Rscript cmp_before_after_coexprDist.R GSE105600_ENCFF852YOE_A549_vs_GSE105725_ENCFF697NNX_NCIH460 TCGAlusc_norm_lusc #
# SKIN
# Rscript cmp_before_after_coexprDist.R GSE106022_ENCFF614EKT_RPMI7951_vs_GSE105491_ENCFF458OWO_SKMEL5 TCGAskcm_lowInf_highInf #
# Rscript cmp_before_after_coexprDist.R GSE106022_ENCFF614EKT_RPMI7951_vs_GSE105491_ENCFF458OWO_SKMEL5 TCGAskcm_wt_mutBRAF #
# Rscript cmp_before_after_coexprDist.R GSE106022_ENCFF614EKT_RPMI7951_vs_GSE105491_ENCFF458OWO_SKMEL5 TCGAskcm_wt_mutCTNNB1 #
# PANCREAS
# Rscript cmp_before_after_coexprDist.R GSE105566_ENCFF358MNA_Panc1 TCGApaad_wt_mutKRAS
dataset="GSE105318_ENCFF439QFU_DLD1"
exprds="TCGAcoad_msi_mss"
cat("> START ", "cmp_before_after_coexprDist.R", "\n")
# Rscript cmp_before_after_coexprDist.R GSE105318_ENCFF439QFU_DLD1 TCGAcoad_msi_mss_hgnc
args <- commandArgs(trailingOnly = TRUE)
stopifnot(length(args) == 2)
dataset=args[1]
exprds=args[2]
exprds=paste0(exprds, "_hgnc")
cat("... START: ", dataset, " - ", exprds, "\n")
cat("load genefam0 \n")
genefam0 = eval(parse(text=load("/mnt/ed4/marie/scripts/TAD_DE_pipeline_v2_TopDom/PREP_GENE_FAMILIES_TAD_DATA/hgnc_entrezID_family_TAD_DT.Rdata")))
cat("load samefam0 \n")
samefam0 = eval(parse(text=load("/mnt/ed4/marie/scripts/TAD_DE_pipeline_v2_TopDom/CREATE_SAME_FAMILY_SORTNODUP/hgnc_family_all_family_pairs.Rdata")))
cat("load dist0 \n")
dist0 = eval(parse(text=load("/mnt/ed4/marie/scripts/TAD_DE_pipeline_v2_TopDom/CREATE_DIST_SORTNODUP/all_dist_pairs.Rdata")))
cat("load coexpr0 \n")
coexpr0 = eval(parse(text=load(paste0("/mnt/ed4/marie/scripts/TAD_DE_pipeline_v2_TopDom/CREATE_COEXPR_SORTNODUP/", gsub("hgnc", "", exprds), "pearson/coexprDT.Rdata"))))
cat("load genefam1 \n")
genefam1 = eval(parse(text=load(paste0("PREP_GENE_FAMILIES_TAD_DATA/", dataset, "/hgnc_entrezID_family_TAD_DT.Rdata"))))
cat("load samefam1 \n")
samefam1 = eval(parse(text=load(paste0("CREATE_SAME_FAMILY_SORTNODUP/", dataset, "/hgnc_family_short_all_family_pairs.Rdata"))))
cat("load dist1 \n")
dist1 = eval(parse(text=load(paste0( "CREATE_DIST_SORTNODUP/", dataset, "/all_dist_pairs.Rdata"))))
cat("load coexpr1 \n")
coexpr1 = eval(parse(text=load(paste0( "CREATE_COEXPR_SORTNODUP/", dataset, "/", gsub("hgnc", "", exprds), "pearson/coexprDT.Rdata"))))
cat("... dim(genefam0) = ", dim(genefam0), "\n")
cat("... dim(genefam1) = ", dim(genefam1), "\n")
cat("... dim(samefam0) = ", dim(samefam0), "\n")
cat("... dim(samefam1) = ", dim(samefam1), "\n")
cat("... dim(dist0) = ", dim(dist0), "\n")
cat("... dim(dist1) = ", dim(dist1), "\n")
cat("... dim(coexpr0) = ", dim(coexpr0), "\n")
cat("... dim(coexpr1) = ", dim(coexpr1), "\n")
### FOR THE RATIO RESULTS
cat("load ratio0 \n")
# AUC_COEXPRDIST_WITHFAM_SORTNODUP_BEFORE08.01.19_sameTAD_sameFamFile/GSE105318_ENCFF439QFU_DLD1/TCGAcoad_msi_mss_hgnc/hgnc_family_short/auc_values.Rdata
ratioFile0 <- paste0("AUC_COEXPRDIST_WITHFAM_SORTNODUP_BEFORE08.01.19_sameTAD_sameFamFile/", dataset, "/", exprds, "/hgnc_family_short/auc_values.Rdata")
ratio0 = eval(parse(text=load(ratioFile0)))
# AUC_COEXPRDIST_WITHFAM_SORTNODUP_BEFORE08.01.19_sameTAD_sameFamFile/GSE105318_ENCFF439QFU_DLD1/TCGAcoad_msi_mss_hgnc_hgnc/hgnc_family_short/auc_values.Rdata
# AUC_COEXPRDIST_WITHFAM_SORTNODUP_BEFORE08.01.19_sameTAD_sameFamFile/GSE105318_ENCFF439QFU_DLD1/TCGAcoad_msi_mss_hgnc/hgnc_family_short/auc_values.Rdata")
cat("load ratio1 \n")
ratio1 = eval(parse(text=load(paste0("AUC_COEXPRDIST_WITHFAM_SORTNODUP/", dataset, "/", exprds, "/hgnc_family_short/auc_values.Rdata"))))
all_vars <- c(
"auc_diffTAD_distVect",
"auc_sameTAD_distVect",
"auc_ratio_same_over_diff_distVect",
"auc_diffTAD_obsDist",
"auc_sameTAD_obsDist",
"auc_ratio_same_over_diff_obsDist",
"auc_sameFamDiffTAD_distVect",
"auc_sameFamSameTAD_distVect",
"auc_ratio_sameFam_same_over_diff_distVect",
"auc_sameFamDiffTAD_obsDist",
"auc_sameFamSameTAD_obsDist",
"auc_ratio_sameFam_same_over_diff_obsDist"
)
var="auc_ratio_sameFam_same_over_diff_obsDist"
for(var in all_vars) {
if(ratio0[[paste0(var)]] != ratio1[[paste0(var)]]){
cat(paste0("...... ", var, "\nratio0=", ratio0[[paste0(var)]] , "\nratio1=",ratio1[[paste0(var)]], "\n" ))
} else{
cat(paste0("...... ", var, "\nratio0==ratio1\n" ))
}
}
|
########################################
input_file_name = "data/data_2020_05_28/application_sample.csv"
output_file_name = "data/data_2020_05_28/application_whole_track.csv"
layers <- c(1:10)
input <- read.csv(input_file_name)
application <- input[, c(1, c(11:120)) ]
column_names <- c("hit_id", "x", "y", "z", "rho", "eta", "phi",
"volume_id", "layer_id", "module_id", "value")
colnames(application)[1] <- "sample_id"
count.column = 2
for( l in layers ){
for( c in 1:length(column_names) ){
column.name <- paste0(column_names[c], "_", l)
colnames(application)[count.column] <- column.name
count.column = count.column + 1
}
}
write.csv( application, file=output_file_name, row.names=FALSE )
|
/create_application_whole_track.R
|
no_license
|
AngeloSantos/TrackML
|
R
| false | false | 728 |
r
|
########################################
input_file_name = "data/data_2020_05_28/application_sample.csv"
output_file_name = "data/data_2020_05_28/application_whole_track.csv"
layers <- c(1:10)
input <- read.csv(input_file_name)
application <- input[, c(1, c(11:120)) ]
column_names <- c("hit_id", "x", "y", "z", "rho", "eta", "phi",
"volume_id", "layer_id", "module_id", "value")
colnames(application)[1] <- "sample_id"
count.column = 2
for( l in layers ){
for( c in 1:length(column_names) ){
column.name <- paste0(column_names[c], "_", l)
colnames(application)[count.column] <- column.name
count.column = count.column + 1
}
}
write.csv( application, file=output_file_name, row.names=FALSE )
|
% Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/entrez_info.r
\name{entrez_db_searchable}
\alias{entrez_db_searchable}
\title{List available search fields for a given database}
\usage{
entrez_db_searchable(db, config = NULL)
}
\arguments{
\item{db}{character, name of database to get search field from}
\item{config}{config vector passed to \code{httr::GET}}
}
\value{
An eInfoSearch object (subclassed from list) summarising linked-datbases.
Can be coerced to a data-frame with \code{as.data.frame}. Printing the object
shows only the names of each available search field.
}
\description{
Can be used in conjunction with \code{\link{entrez_search}} to find available
search fields to include in the \code{term} argument of that function.
}
\examples{
\donttest{
(pmc_fields <- entrez_db_searchable("pmc"))
pmc_fields[["AFFL"]]
entrez_search(db="pmc", term="Otago[AFFL]", retmax=0)
entrez_search(db="pmc", term="Auckland[AFFL]", retmax=0)
sra_fields <- entrez_db_searchable("sra")
as.data.frame(sra_fields)
}
}
\seealso{
\code{\link{entrez_search}}
Other einfo: \code{\link{entrez_db_links}};
\code{\link{entrez_db_summary}};
\code{\link{entrez_dbs}}; \code{\link{entrez_info}}
}
|
/man/entrez_db_searchable.Rd
|
no_license
|
F-104S/rentrez
|
R
| false | false | 1,226 |
rd
|
% Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/entrez_info.r
\name{entrez_db_searchable}
\alias{entrez_db_searchable}
\title{List available search fields for a given database}
\usage{
entrez_db_searchable(db, config = NULL)
}
\arguments{
\item{db}{character, name of database to get search field from}
\item{config}{config vector passed to \code{httr::GET}}
}
\value{
An eInfoSearch object (subclassed from list) summarising linked-datbases.
Can be coerced to a data-frame with \code{as.data.frame}. Printing the object
shows only the names of each available search field.
}
\description{
Can be used in conjunction with \code{\link{entrez_search}} to find available
search fields to include in the \code{term} argument of that function.
}
\examples{
\donttest{
(pmc_fields <- entrez_db_searchable("pmc"))
pmc_fields[["AFFL"]]
entrez_search(db="pmc", term="Otago[AFFL]", retmax=0)
entrez_search(db="pmc", term="Auckland[AFFL]", retmax=0)
sra_fields <- entrez_db_searchable("sra")
as.data.frame(sra_fields)
}
}
\seealso{
\code{\link{entrez_search}}
Other einfo: \code{\link{entrez_db_links}};
\code{\link{entrez_db_summary}};
\code{\link{entrez_dbs}}; \code{\link{entrez_info}}
}
|
flights %>%
group_by(month, day) %>%
summarize(
cancelled = sum(is.na(arr_delay)),
avg_dep_delay = mean(dep_delay, na.rm = TRUE),
avg_arr_delay = mean(arr_delay, na.rm = TRUE)
) %>%
filter(cancelled > 0) %>%
arrange(desc(cancelled))
|
/cap05/arrange09.R
|
permissive
|
vcwild/r4ds
|
R
| false | false | 281 |
r
|
flights %>%
group_by(month, day) %>%
summarize(
cancelled = sum(is.na(arr_delay)),
avg_dep_delay = mean(dep_delay, na.rm = TRUE),
avg_arr_delay = mean(arr_delay, na.rm = TRUE)
) %>%
filter(cancelled > 0) %>%
arrange(desc(cancelled))
|
# Data pre-processing before feeding into the model
dataset <- readRDS("data/dataset.rds")
centering <- readRDS("data/stats.rds")
utils <- new.env()
source("utilities.r", local = utils)
# 1-1. Permuting rows of the dataset before train-test split
set.seed(2021-3-8)
permuted_rows <- sample(nrow(dataset$Z))
z <- dataset$Z[permuted_rows, ]
y <- dataset$Y[permuted_rows, ]
labels <- dataset$group[permuted_rows]
J <- max(dataset$id) # no. of patients
jj <- dataset$id[permuted_rows]
J_train <- round(J * .7)
jj_train <- jj <= J_train
jj_test <- jj > J_train
N_train <- sum(jj_train)
N_test <- sum(jj_test)
N <- N_train + N_test
# 1-2. Exclusion of the nasal quadrant
QUADRANT_NO <- ncol(dataset$Z) / 4
P <- 3 * QUADRANT_NO
Q <- ncol(dataset$Y)
zkeep_inds <- c(
seq(QUADRANT_NO * 3 + 1, QUADRANT_NO * 4),
seq(1, QUADRANT_NO * 2)
)
z <- z[, zkeep_inds]
# 1-3. "Centering" and rescaling to mm (to a more interpretable scale)
z <- sweep(z, 2, centering$cp["q5", zkeep_inds])
y <- sweep(y, 2, centering$m["q5", ])
z <- z / 1000
y <- y / 1000
# 1-4. Resolution downscaling of cpRNFL image
# Average by pairs => each location ~.9 angle apart
z <- .5 * z %*% (diag(1, P / 2) %x% c(1, 1))
P <- P / 2
# 1-6. Train-test set split
z_train <- z[jj_train, ]
y_train <- y[jj_train, ]
z_test <- z[jj_test, ]
y_test <- y[jj_test, ]
# 2. Knot selection over surface of macula image
# (Design set of knots is itself a tuning parameter)
Nknots_y <- 16
full_y <- as.matrix(expand.grid(1:8, 1:8))
knots <- c(11, 14, 18, 20, 21, 23, 27, 30,
35, 38, 42, 44, 45, 47, 51, 54)
distMat <- as.matrix(dist(full_y))
# [OUTDATED]
# 3. distance matrix of the cpRNFL
# to be used later as valid weighting
# 4. Finding out missing values (but not impute them)
# dim(which(is.na(y)), arr.ind = TRUE)
mis_inds <- which(is.na(y_train), arr.ind = T)
|
/main_processing.r
|
no_license
|
ybaek/SDOCT
|
R
| false | false | 1,849 |
r
|
# Data pre-processing before feeding into the model
dataset <- readRDS("data/dataset.rds")
centering <- readRDS("data/stats.rds")
utils <- new.env()
source("utilities.r", local = utils)
# 1-1. Permuting rows of the dataset before train-test split
set.seed(2021-3-8)
permuted_rows <- sample(nrow(dataset$Z))
z <- dataset$Z[permuted_rows, ]
y <- dataset$Y[permuted_rows, ]
labels <- dataset$group[permuted_rows]
J <- max(dataset$id) # no. of patients
jj <- dataset$id[permuted_rows]
J_train <- round(J * .7)
jj_train <- jj <= J_train
jj_test <- jj > J_train
N_train <- sum(jj_train)
N_test <- sum(jj_test)
N <- N_train + N_test
# 1-2. Exclusion of the nasal quadrant
QUADRANT_NO <- ncol(dataset$Z) / 4
P <- 3 * QUADRANT_NO
Q <- ncol(dataset$Y)
zkeep_inds <- c(
seq(QUADRANT_NO * 3 + 1, QUADRANT_NO * 4),
seq(1, QUADRANT_NO * 2)
)
z <- z[, zkeep_inds]
# 1-3. "Centering" and rescaling to mm (to a more interpretable scale)
z <- sweep(z, 2, centering$cp["q5", zkeep_inds])
y <- sweep(y, 2, centering$m["q5", ])
z <- z / 1000
y <- y / 1000
# 1-4. Resolution downscaling of cpRNFL image
# Average by pairs => each location ~.9 angle apart
z <- .5 * z %*% (diag(1, P / 2) %x% c(1, 1))
P <- P / 2
# 1-6. Train-test set split
z_train <- z[jj_train, ]
y_train <- y[jj_train, ]
z_test <- z[jj_test, ]
y_test <- y[jj_test, ]
# 2. Knot selection over surface of macula image
# (Design set of knots is itself a tuning parameter)
Nknots_y <- 16
full_y <- as.matrix(expand.grid(1:8, 1:8))
knots <- c(11, 14, 18, 20, 21, 23, 27, 30,
35, 38, 42, 44, 45, 47, 51, 54)
distMat <- as.matrix(dist(full_y))
# [OUTDATED]
# 3. distance matrix of the cpRNFL
# to be used later as valid weighting
# 4. Finding out missing values (but not impute them)
# dim(which(is.na(y)), arr.ind = TRUE)
mis_inds <- which(is.na(y_train), arr.ind = T)
|
# install.packages('gtrendsR')
library(gtrendsR)
# Run the google trends querry
gdata <- gtrends(c("Riveredge Resort"), time = "2016-01-01 2018-12-31")
# Check the names of dataframes in the list
names(gdata)
# Check related queries
head(gdata$related_topics, 20)
# Drill down on a related querry
head(gtrends("Boldt Castle")$related_queries, 20)
gdata$related_topics[1:10, ]
gdata$interest_by_dma[1:5, ]
gdata$interest_by_country[1:5, ]
# Specify category
categories[grepl("^Hotel", categories$name), ]
gdata <- gtrends(keyword = "Riveredge Resort", time = "today+5-y", category = 179)
# Check interest by country and MSA
gdata$interest_by_dma[1:5, ]
gdata$interest_by_country[1:5, ]
# Write a function to querry Google Trends and plot interest over time
google_trends <- function(keyword, geo = ""){
pres_data <- gtrends(keyword = keyword, geo = geo, time = "today+5-y", onlyInterest = TRUE)
plot(pres_data, lwd = 5)
hits <- pres_data$interest_over_time$hits
last <- length(hits)
round((mean(hits[(last-10):last]) / mean(hits[1:10]) - 1) * 100)
}
# Some sample searches
google_trends('NYC')
google_trends('Riveredge Resort')
google_trends('Riveredge Resort', geo = "US-NY")
google_trends('Riveredge Resort', geo = "CA-ON")
google_trends("1000 Islands")
google_trends('1000 Islands', geo = "CA-ON")
google_trends('1000 Islands', geo = "US-NY")
google_trends("Thousand Islands")
google_trends('Thousand Islands', geo = "CA-ON")
google_trends('Thousand Islands', geo = "US-NY")
|
/google_trends.R
|
no_license
|
RomeoAlphaYankee/DataScienceR
|
R
| false | false | 1,551 |
r
|
# install.packages('gtrendsR')
library(gtrendsR)
# Run the google trends querry
gdata <- gtrends(c("Riveredge Resort"), time = "2016-01-01 2018-12-31")
# Check the names of dataframes in the list
names(gdata)
# Check related queries
head(gdata$related_topics, 20)
# Drill down on a related querry
head(gtrends("Boldt Castle")$related_queries, 20)
gdata$related_topics[1:10, ]
gdata$interest_by_dma[1:5, ]
gdata$interest_by_country[1:5, ]
# Specify category
categories[grepl("^Hotel", categories$name), ]
gdata <- gtrends(keyword = "Riveredge Resort", time = "today+5-y", category = 179)
# Check interest by country and MSA
gdata$interest_by_dma[1:5, ]
gdata$interest_by_country[1:5, ]
# Write a function to querry Google Trends and plot interest over time
google_trends <- function(keyword, geo = ""){
pres_data <- gtrends(keyword = keyword, geo = geo, time = "today+5-y", onlyInterest = TRUE)
plot(pres_data, lwd = 5)
hits <- pres_data$interest_over_time$hits
last <- length(hits)
round((mean(hits[(last-10):last]) / mean(hits[1:10]) - 1) * 100)
}
# Some sample searches
google_trends('NYC')
google_trends('Riveredge Resort')
google_trends('Riveredge Resort', geo = "US-NY")
google_trends('Riveredge Resort', geo = "CA-ON")
google_trends("1000 Islands")
google_trends('1000 Islands', geo = "CA-ON")
google_trends('1000 Islands', geo = "US-NY")
google_trends("Thousand Islands")
google_trends('Thousand Islands', geo = "CA-ON")
google_trends('Thousand Islands', geo = "US-NY")
|
#############################################################################################
## Title: sampleCounts.R
## Author: Andrew Bernath, Cadmus Group
## Created: 07/05/2017
## Updated:
## Billing Code(s):
## Description: Code to import and count pop and sample sizes for each
## post-strata region
#############################################################################################
################################################################################
# Use FILEPATHS from Step 1 for folders and file names of:
# - METER data
# - ZIP Code data (with pop counts from ACS)
# - output data
################################################################################
# Call file names
popZIP.datMap <- "ZIP_Code_Utility_Mapping.xlsx"
meter.export <- "METERS_2017.06.16.xlsx"
bldg.export <- "SITES_2017.06.16.xlsx"
#############################################################################################
# Import, Subset, CLean Data
#############################################################################################
# Import clean RBSA data
cleanRBSA.dat <- read.xlsx(paste(filepathCleanData
, paste("clean.rbsa.data.unweighted", rundate, ".xlsx", sep = "")
, sep="/")
)
names(cleanRBSA.dat)
# subset to necessary columns
cleanRBSA.dat1 <- data.frame("CK_Cadmus_ID" = cleanRBSA.dat$CK_Cadmus_ID
, "BuildingType" = cleanRBSA.dat$BuildingType
, stringsAsFactors = F)
# clean and count Cadmus IDs
cleanRBSA.dat1$CK_Cadmus_ID <- trimws(toupper(cleanRBSA.dat1$CK_Cadmus_ID))
length(unique(cleanRBSA.dat1$CK_Cadmus_ID)) ## 601 unique ID's
# standardize MF to a single category
cleanRBSA.dat1$BuildingType[grep("Multifamily", cleanRBSA.dat1$BuildingType)] <- "Multifamily"
unique(cleanRBSA.dat1$BuildingType)
# Import ID and ZIP data
id_zip.dat <- read.xlsx(xlsxFile = file.path(filepathRawData, meter.export), sheet=1)
length(unique(id_zip.dat$CK_Cadmus_ID))
zipFromSites.dat <- read.xlsx(xlsxFile = file.path(filepathRawData, bldg.export), sheet=1)
zipCodes <- unique(zipFromSites.dat[which(colnames(zipFromSites.dat) %in% c("CK_Cadmus_ID","SITE_ZIP"))])
# subset to necessary columns
id_zip.dat0.1 <- data.frame("CK_Cadmus_ID" = id_zip.dat$CK_Cadmus_ID
# , "ZIPCode" = id_zip.dat$SITE_ZIP
, "Utility" = id_zip.dat$Utility
, "MeterType" = id_zip.dat$Type
, stringsAsFactors = F)
id_zip.dat1 <- unique(left_join(zipCodes, id_zip.dat0.1, by = "CK_Cadmus_ID"))
length(unique(id_zip.dat1$CK_Cadmus_ID))
colnames(id_zip.dat1) <- c("CK_Cadmus_ID", "ZIPCode","Utility","MeterType")
# clean and count Cadmus IDs, clean utility and meter type
id_zip.dat1$CK_Cadmus_ID <- trimws(toupper(id_zip.dat1$CK_Cadmus_ID))
id_zip.dat1$Utility <- trimws(toupper(id_zip.dat1$Utility))
id_zip.dat1$MeterType <- trimws(toupper(id_zip.dat1$MeterType))
id_zip.dat1$ZIPCode <- as.numeric(substr(id_zip.dat1$ZIPCode, 1, 5)) ## Remove ZIP-Ext
length(unique(id_zip.dat1$CK_Cadmus_ID)) ## 567 unique respondent ID's
# Indicate invalid ZIP codes
id_zip.dat1$invalidZIP <- rep(0, nrow(id_zip.dat1))
id_zip.dat1$invalidZIP[which(id_zip.dat1$ZIPCode < 10000)] <- 1
id_zip.dat1$invalidZIP[which(is.na(id_zip.dat1$ZIPCode))] <- 1
# Subset to electric meters only
id_zip.dat2.0 <- id_zip.dat1[which(id_zip.dat1$MeterType != "THERMOSTAT"),]
id_zip.dat2.1 <- id_zip.dat2.0[with(id_zip.dat2.0, order(MeterType, decreasing = F)),]
which(duplicated(id_zip.dat2.1$CK_Cadmus_ID))
id_zip.dat2 <- id_zip.dat2.1[which(!(duplicated(id_zip.dat2.1$CK_Cadmus_ID))),]
## QA/QC: Any lost customers?
length(unique(id_zip.dat1$CK_Cadmus_ID)) == length(unique(id_zip.dat2$CK_Cadmus_ID))
length(unique(id_zip.dat1$CK_Cadmus_ID)) - length(unique(id_zip.dat2$CK_Cadmus_ID))
## 35 lost customers
# Import ZIP code mapping
zipMap.dat <- read.xlsx(xlsxFile = file.path(filepathWeightingDocs, popZIP.datMap), sheet=1)
names(zipMap.dat) <- c("ZIPCode"
, "City"
, "County"
, "State"
, "Region"
, "FERC_ID"
, "Utility"
, "Fraction"
, "BPA_vs_IOU"
, "SF.N"
, "MF.N"
, "MH.N"
, "SF.N.adj"
, "MF.N.adj"
, "MH.N.adj")
head(zipMap.dat)
# Clean up data: clean utility, remove any punctuation from utility, make zip codes numeric
zipMap.dat$Utility <- trimws(toupper(zipMap.dat$Utility))
zipMap.dat$Utility <- gsub('[[:punct:]]+', '', zipMap.dat$Utility)
zipMap.dat$ZIPCode <- as.numeric(zipMap.dat$ZIPCode)
zipMap.dat1 <- data.frame("ZIPCode" = zipMap.dat$ZIPCode
, "State" = zipMap.dat$State
, "Region" = zipMap.dat$Region
, "Utility" = zipMap.dat$Utility
, "BPA_vs_IOU" = zipMap.dat$BPA_vs_IOU
, stringsAsFactors = F)
## QA/QC: Check names of utilities for mismatches
sort(unique(zipMap.dat1$Utility), decreasing=F)
sort(unique(id_zip.dat2$Utility), decreasing=F)
## Andrew: were these reviewed with Rietz or Steve?, are there any others that could have been missed?
## Fix mismatches
zipMap.dat1$Utility[which(zipMap.dat1$Utility == "PUD NO 1 OF SKAMANIA CO")] <-
"PUD #1 SKAMANIA COUNTY"
zipMap.dat1$Utility[which(zipMap.dat1$Utility == "TACOMA CITY OF")] <-
"CITY OF TACOMA"
zipMap.dat1$Utility[which(zipMap.dat1$Utility == "ELMHURST MUTUAL POWER LIGHT CO")] <-
"ELMHURST MUTUAL POWER AND LIGHT"
zipMap.dat1$Utility[which(zipMap.dat1$Utility == "FLATHEAD ELECTRIC COOP INC")] <-
"FLATHEAD ELECTRIC COOPERATIVE"
zipMap.dat1$Utility[which(zipMap.dat1$Utility == "GLACIER ELECTRIC COOP INC")] <-
"GLACIER ELECTRIC COOP"
zipMap.dat1$Utility[which(zipMap.dat1$Utility == "LAKEVIEW LIGHT POWER")] <-
"LAKEVIEW POWER & LIGHT"
## Double check this is right -- Mission Valley Power
zipMap.dat1$Utility[which(zipMap.dat1$Utility == "USBIAMISSION VALLEY POWER")] <-
"MISSION VALLEY POWER"
zipMap.dat1$Utility[which(zipMap.dat1$Utility == "MISSOULA ELECTRIC COOP INC")] <-
"MISSOULA ELECTRIC COOP"
zipMap.dat1$Utility[which(zipMap.dat1$Utility == "NORTHWESTERN CORPORATION")] <-
"NORTHWESTERN ENERGY"
zipMap.dat1$Utility[which(zipMap.dat1$Utility == "OHOP MUTUAL LIGHT COMPANY INC")] <-
"OHOP MUTUAL LIGHT CO"
zipMap.dat1$Utility[which(zipMap.dat1$Utility == "PUGET SOUND ENERGY INC")] <-
"PUGET SOUND ENERGY"
zipMap.dat1$Utility[which(zipMap.dat1$Utility == "SEATTLE CITY OF")] <-
"SEATTLE CITY LIGHT"
zipMap.dat1$Utility[which(zipMap.dat1$Utility == "SNOHOMISH COUNTY PUD NO 1")] <-
"SNOHOMISH PUD"
zipMap.dat1$Utility[which(zipMap.dat1$Utility == "USBIAMISSION VALLEY POWER")] <-
"MISSION VALLEY POWER"
## QA/QC: How many missing?
length(id_zip.dat2$Utility[which(id_zip.dat2$Utility == "-- DID NOT ENTER! --")]) ## 0 not entered
#############################################################################################
# Merge data and assign electric utility
#############################################################################################
# Join ZIP codes to cleaned building type data
samp.dat.0 <- left_join(cleanRBSA.dat1, id_zip.dat2, by="CK_Cadmus_ID")
# Join ZIP mapping to previous step
colnames(samp.dat.0) <- c("CK_Cadmus_ID", "BuildingType", "ZIPCode" , "Utility" , "MeterType" , "invalidZIP")
samp.dat.1 <- left_join(samp.dat.0, zipMap.dat1, by="ZIPCode")
samp.dat.1$tally <- rep(1, nrow(samp.dat.1))
head(samp.dat.1)
nrow(samp.dat.1)## 959 rows (old) - 9/12 671 rows
colnames(samp.dat.1) <- c("CK_Cadmus_ID"
,"BuildingType"
,"ZIPCode"
,"Utility.Customer.Data"
,"MeterType"
,"invalidZIP"
,"State"
,"Region"
,"Utility.ZIP.map"
,"BPA_vs_IOU"
,"tally")
########################################################################################
## ##
## STEP 1:
## IF Cust data utility is "-- DID NOT ENTER! --"
## -> Replace with ZIP map utility
## ##
## STEP 2:
## IF ZIP map utility has duplicates
## IF Cust data has duplicates
## -> Tag for manual fix
## ELSE Use ZIP map utility
## ##
## STEP 3:
## IF ZIP map has no duplicates
## IF Cust data has no duplicates
## -> Tag for manual fix
## ELSE Use cust data utility
## ##
########################################################################################
## Replace missing utility from sample data with utility from zip code mapping
# missingInd <- which(samp.dat.1$Utility.Customer.Data == "-- DID NOT ENTER! --")
samp.dat.2 <- samp.dat.1
# samp.dat.2$Utility.Customer.Data[missingInd] <- samp.dat.2$Utility.ZIP.map[missingInd]
# Remove full row duplicates
dupRows <- which(duplicated(samp.dat.2)) #NONE
samp.dat.3 <- samp.dat.2#[-dupRows,] ## 862 rows
#
# ## Cust ID's with duplicates
dupCustIDs <- unique(samp.dat.3$CK_Cadmus_ID[which(duplicated(samp.dat.3$CK_Cadmus_ID))])
dupUtil.0 <- samp.dat.3[which(samp.dat.3$CK_Cadmus_ID %in% dupCustIDs),]
#
#
# # Initialize counter and output vector
cntr <- 1
dupUtil.0$Utility <- rep("MISSING", nrow(dupUtil.0))
#
# ## Create "Not In" operator
"%notin%" <- Negate("%in%")
#
#
# ## For loops to assign utility as per above logic
# ## STEP 2
for(cntr in 1:length(dupCustIDs)) {
if("TRUE" %in% duplicated(dupUtil.0$Utility.ZIP.map[which(dupUtil.0$CK_Cadmus_ID == dupCustIDs[cntr])])) {
if("TRUE" %in% duplicated(dupUtil.0$Utility.Customer.Data[which(dupUtil.0$CK_Cadmus_ID == dupCustIDs[cntr])])) {
dupUtil.0$Utility[which(dupUtil.0$CK_Cadmus_ID == dupCustIDs[cntr])] <- "MANUAL FIX"
}
else {
dupUtil.0$Utility[which(dupUtil.0$CK_Cadmus_ID == dupCustIDs[cntr])] <-
dupUtil.0$Utility.ZIP.map[which(dupUtil.0$CK_Cadmus_ID == dupCustIDs[cntr])]
}
}
}
## STEP 3
for(cntr in 1:length(dupCustIDs)) {
if("TRUE" %notin% duplicated(dupUtil.0$Utility.ZIP.map[which(dupUtil.0$CK_Cadmus_ID == dupCustIDs[cntr])])) {
if("TRUE" %notin% duplicated(dupUtil.0$Utility.Customer.Data[which(dupUtil.0$CK_Cadmus_ID == dupCustIDs[cntr])])) {
dupUtil.0$Utility[which(dupUtil.0$CK_Cadmus_ID == dupCustIDs[cntr])] <- "MANUAL FIX"
}
else {
dupUtil.0$Utility[which(dupUtil.0$CK_Cadmus_ID == dupCustIDs[cntr])] <-
dupUtil.0$Utility.Customer.Data[which(dupUtil.0$CK_Cadmus_ID == dupCustIDs[cntr])]
}
}
}
#
# ## Subset to ID and Utility column and merge back into sample data
names(dupUtil.0)
dupUtil.1 <- unique(dupUtil.0[which(colnames(dupUtil.0) %in% c("CK_Cadmus_ID", "Utility"))])
samp.dat.4 <- left_join(samp.dat.3, dupUtil.1, by="CK_Cadmus_ID")
#
# ## For non-duplicates, use cust data
samp.dat.4$Utility[which(samp.dat.4$CK_Cadmus_ID %notin% dupCustIDs)] <-
samp.dat.4$Utility.Customer.Data[which(samp.dat.4$CK_Cadmus_ID %notin% dupCustIDs)]
##########################################
## ##
## MANUAL FIXES FOR MISSING UTILITIES ##
## ##
##########################################
# samp.dat.4 <- samp.dat.3
utilFix <- samp.dat.4[,which(names(samp.dat.4) %in% c("CK_Cadmus_ID"
, "ZIPCode"
, "Utility.Customer.Data"
, "Utility.ZIP.map"))]
## Andrew: What does "inspection" mean here? did you review the zip code? address? etc?
## Based on inspection
samp.dat.4$Utility[which(samp.dat.4$CK_Cadmus_ID =="BPS25495 OS BPA")] <- "CITY OF TACOMA"
samp.dat.4$Utility[which(samp.dat.4$CK_Cadmus_ID =="WH3590")] <- "PUGET SOUND ENERGY"
## Based on others in ZIP
samp.dat.4$Utility[which(samp.dat.4$CK_Cadmus_ID =="SG0200 OS SCL")] <- "SEATTLE CITY LIGHT"
samp.dat.4$ZIPCode[which(samp.dat.4$CK_Cadmus_ID =="SG0200 OS SCL")] <- 98118
samp.dat.4$invalidZIP[which(samp.dat.4$CK_Cadmus_ID =="SG0200 OS SCL")] <- 0
samp.dat.4$Utility[which(samp.dat.4$CK_Cadmus_ID =="SL2122 OS SCL")] <- "SEATTLE CITY LIGHT"
samp.dat.4$Utility[which(samp.dat.4$CK_Cadmus_ID =="SE2163 OS SCL")] <- "SEATTLE CITY LIGHT"
samp.dat.4$Utility[which(samp.dat.4$CK_Cadmus_ID =="SL1673 OS SCL")] <- "SEATTLE CITY LIGHT"
samp.dat.4$Utility[which(samp.dat.4$CK_Cadmus_ID =="WH1221")] <- "CITY OF TACOMA"
## Based on which sample round
samp.dat.4$Utility[which(samp.dat.4$CK_Cadmus_ID =="SG0048 OS SCL")] <- "SEATTLE CITY LIGHT"
## Fix missing state/region
samp.dat.4$State[which(samp.dat.4$CK_Cadmus_ID =="MM0574")] <- "MT"
samp.dat.4$Region[which(samp.dat.4$CK_Cadmus_ID =="MM0574")] <- "W"
## Fix BPA vs IOU
samp.dat.4$BPA_vs_IOU[which(samp.dat.4$Utility == "SEATTLE CITY LIGHT")] <- "BPA"
samp.dat.4$BPA_vs_IOU[which(samp.dat.4$Utility == "SNOHOMISH PUD")] <- "BPA"
samp.dat.4$BPA_vs_IOU[which(samp.dat.4$Utility == "PUGET SOUND ENERGY")] <- "IOU"
samp.dat.4$BPA_vs_IOU[which(samp.dat.4$Utility == "NORTHWESTERN ENERGY")] <- "IOU"
samp.dat.4$BPA_vs_IOU[which(samp.dat.4$Utility == "MISSION VALLEY POWER")] <- "BPA"
samp.dat.4$BPA_vs_IOU[which(samp.dat.4$Utility == "MISSOULA ELECTRIC COOP")] <- "BPA"
## UPDATES 9/11-12/2017
samp.dat.4$State[which(samp.dat.4$CK_Cadmus_ID =="SE2257 OS SCL")] <- "WA"
samp.dat.4$Region[which(samp.dat.4$CK_Cadmus_ID =="SE2257 OS SCL")] <- "PS"
samp.dat.4$State[which(samp.dat.4$CK_Cadmus_ID =="SG0200 OS SCL")] <- "WA"
samp.dat.4$Region[which(samp.dat.4$CK_Cadmus_ID =="SG0200 OS SCL")] <- "PS"
samp.dat.4$State[which(samp.dat.4$CK_Cadmus_ID =="SL0418 OS SCL")] <- "WA"
samp.dat.4$Region[which(samp.dat.4$CK_Cadmus_ID =="SL0418 OS SCL")] <- "PS"
## Remove old utility columns and duplicate rows
samp.dat.5 <- unique(samp.dat.4[,-which(names(samp.dat.4) %in% c("Utility.Customer.Data", "Utility.ZIP.map"))])
which(duplicated(samp.dat.5$CK_Cadmus_ID)) ## All duplicates removed
############ NEED TO FIX #############
samp.dat.4[which(samp.dat.4$CK_Cadmus_ID == "BPS26690 OS BPA"),]
## remove missing information FOR NOW -- this will be corrected in the final data
samp.dat.6 <- samp.dat.5[which(!(is.na(samp.dat.5$State))),]
# Subset and define strata
# Initialize the vector for strata names
samp.dat.6$Strata <- rep("MISSING", nrow(samp.dat.6))
unique(samp.dat.6$Utility)
## QA/QC: Make sure oversample utilities are in expected BPA territory
samp.dat.6$BPA_vs_IOU[grep("SEATTLE CITY LIGHT", samp.dat.6$Utility)] == "BPA"
samp.dat.6$BPA_vs_IOU[grep("SNOHOMISH", samp.dat.6$Utility)] == "BPA"
samp.dat.6$BPA_vs_IOU[grep("PUGET SOUND", samp.dat.6$Utility)] == "IOU"
# Assign strata
samp.dat.6$Strata[which(samp.dat.6$BPA_vs_IOU == "BPA")] <- "BPA"
samp.dat.6$Strata[which(samp.dat.6$BPA_vs_IOU == "IOU")] <- "Non-BPA/PSE"
samp.dat.6$Strata[grep("SNOHOMISH", samp.dat.6$Utility)] <- "SnoPUD"
samp.dat.6$Strata[grep("PUGET SOUND", samp.dat.6$Utility)] <- "PSE"
samp.dat.6$Strata[grep("SEATTLE CITY LIGHT", samp.dat.6$Utility)] <- "SCL"
samp.dat.6 <- data.frame(samp.dat.6, stringsAsFactors = F)
# Summarize sample counts
sampCounts.0 <- summarise(group_by(samp.dat.6,BuildingType, State, Region, Utility, BPA_vs_IOU)
, n = sum(tally))
#############################################################################################
# Merge data and count sample sizes
#############################################################################################
# Subset and define strata
# Initialize the vector for strata names
sampCounts.0$Strata <- rep("MISSING", nrow(sampCounts.0))
unique(sampCounts.0$Utility)
## QA/QC: Make sure oversample utilities are in expected BPA territory
sampCounts.0$BPA_vs_IOU[grep("SEATTLE CITY LIGHT", sampCounts.0$Utility)] == "BPA"
sampCounts.0$BPA_vs_IOU[grep("SNOHOMISH", sampCounts.0$Utility)] == "BPA"
sampCounts.0$BPA_vs_IOU[grep("PUGET SOUND", sampCounts.0$Utility)] == "IOU"
# Assign strata
sampCounts.0$Strata[which(sampCounts.0$BPA_vs_IOU == "BPA")] <- "BPA"
sampCounts.0$Strata[which(sampCounts.0$BPA_vs_IOU == "IOU")] <- "Non-BPA/PSE"
sampCounts.0$Strata[grep("SNOHOMISH", sampCounts.0$Utility)] <- "SnoPUD"
sampCounts.0$Strata[grep("PUGET SOUND", sampCounts.0$Utility)] <- "PSE"
sampCounts.0$Strata[grep("SEATTLE CITY LIGHT", sampCounts.0$Utility)] <- "SCL"
# Get sample sizes in each strata
sampCounts.1 <- summarise(group_by(sampCounts.0,
BuildingType, State, Region, Strata)
, n.h = sum(n))
#############################################################################################
# Count population sizes
#############################################################################################
# Join ZIP codes to building type data
names(zipMap.dat)
popCounts.0 <- summarise(group_by(zipMap.dat, State, Region, Utility, BPA_vs_IOU)
, SF.pop = round(sum(SF.N.adj), 0)
, MH.pop = round(sum(MH.N.adj), 0)
, MF.pop = round(sum(MF.N.adj), 0)
)
# Initialize the vector for strata names
popCounts.0$Strata <- rep("MISSING", nrow(popCounts.0))
## QA/QC: Make sure oversample utilities are in expected BPA territory
popCounts.0$BPA_vs_IOU[grep("SEATTLE CITY", popCounts.0$Utility)] == "BPA"
popCounts.0$BPA_vs_IOU[grep("SNOHOMISH", popCounts.0$Utility)] == "BPA"
popCounts.0$BPA_vs_IOU[grep("PUGET SOUND", popCounts.0$Utility)] == "IOU"
# Assign strata
popCounts.0$Strata[which(popCounts.0$BPA_vs_IOU == "BPA")] <- "BPA"
popCounts.0$Strata[which(popCounts.0$BPA_vs_IOU == "IOU")] <- "Non-BPA/PSE"
popCounts.0$Strata[grep("SNOHOMISH", popCounts.0$Utility)] <- "SnoPUD"
popCounts.0$Strata[grep("PUGET SOUND", popCounts.0$Utility)] <- "PSE"
popCounts.0$Strata[grep("SEATTLE CITY", popCounts.0$Utility)] <- "SCL"
# Get sample sizes in each strata
popCounts.1 <- summarise(group_by(popCounts.0,
State, Region, Strata)
, N_SF.h = sum(SF.pop)
, N_MH.h = sum(MH.pop)
, N_MF.h = sum(MF.pop))
popMelt <- melt(popCounts.1, id.vars = c("State", "Region", "Strata"))
popMelt$BuildingType <- NA
popMelt$BuildingType[grep("SF", popMelt$variable)] <- "Single Family"
popMelt$BuildingType[grep("MF", popMelt$variable)] <- "Multifamily"
popMelt$BuildingType[grep("MH", popMelt$variable)] <- "Manufactured"
total.counts <- full_join(popMelt, sampCounts.1, by = c("BuildingType"
,"State"
,"Region"
,"Strata"))
total.counts$n.h[which(is.na(total.counts$n.h))] <- 0
colnames(total.counts)[which(colnames(total.counts) == "value")] <- "N.h"
final.counts <- total.counts[which(!(colnames(total.counts) %in% c("variable")))]
#############################################################################################
# Combine sample and population counts
#############################################################################################
# Join pop to samp by state/region/strata
names(sampCounts.1)
names(popCounts.1)
allCounts.0 <- left_join(sampCounts.1, popCounts.1, by=c("State", "Region", "Strata"))
# Set pop size in strata based on bldg type
# Initialize the vector for pop sizes
allCounts.0$N.h <- rep("MISSING", nrow(allCounts.0))
# Single Family Homes
allCounts.0$N.h[which(allCounts.0$BuildingType == "Single Family")] <-
allCounts.0$N_SF.h[which(allCounts.0$BuildingType == "Single Family")]
# Manufactured Homes
allCounts.0$N.h[which(allCounts.0$BuildingType == "Manufactured")] <-
allCounts.0$N_MH.h[which(allCounts.0$BuildingType == "Manufactured")]
# Multifamily Homes
allCounts.0$N.h[grep("Multifamily",allCounts.0$BuildingType)] <-
allCounts.0$N_MF.h[grep("Multifamily",allCounts.0$BuildingType)]
# Remove unnecessary columns
allCounts.1 <- allCounts.0[,-which(names(allCounts.0) %in% c("N_SF.h", "N_MH.h", "N_MF.h"))]
allCounts.1$n.h <- as.numeric(allCounts.1$n.h)
allCounts.1$N.h <- as.numeric(allCounts.1$N.h)
# Compute expansion weights
allCounts.1$w.h <- round(allCounts.1$N.h/allCounts.1$n.h, 2)
allCounts.final <- allCounts.1[which(!(is.na(allCounts.1$State))),]
allCounts.final1 <- allCounts.final[which(!(is.na(allCounts.final$N.h))),]
allCounts.final1$Final.Strata <- paste(allCounts.final1$State
,allCounts.final1$Region
,allCounts.final1$Strata)
samp.dat.7 <- left_join(samp.dat.6, final.counts, by = c("BuildingType"
,"State"
,"Region"
,"Strata"))
samp.dat.8 <- samp.dat.7[which(!(is.na(samp.dat.7$N.h))),]
samp.dat.final <- left_join(samp.dat.8, cleanRBSA.dat)
## Export clean data merged with weights
write.xlsx(samp.dat.final, paste(filepathCleanData, paste("clean.rbsa.data", rundate, ".xlsx", sep = ""), sep="/"),
append = T, row.names = F, showNA = F)
## Export
write.xlsx(final.counts, paste(filepathCleanData, paste("weights.data", rundate, ".xlsx", sep = ""), sep="/"),
append = T, row.names = F, showNA = F)
|
/Code/Sample Weighting/Old/Weights.R
|
no_license
|
casey-stevens/Cadmus-6000-2017
|
R
| false | false | 22,880 |
r
|
#############################################################################################
## Title: sampleCounts.R
## Author: Andrew Bernath, Cadmus Group
## Created: 07/05/2017
## Updated:
## Billing Code(s):
## Description: Code to import and count pop and sample sizes for each
## post-strata region
#############################################################################################
################################################################################
# Use FILEPATHS from Step 1 for folders and file names of:
# - METER data
# - ZIP Code data (with pop counts from ACS)
# - output data
################################################################################
# Call file names
popZIP.datMap <- "ZIP_Code_Utility_Mapping.xlsx"
meter.export <- "METERS_2017.06.16.xlsx"
bldg.export <- "SITES_2017.06.16.xlsx"
#############################################################################################
# Import, Subset, CLean Data
#############################################################################################
# Import clean RBSA data
cleanRBSA.dat <- read.xlsx(paste(filepathCleanData
, paste("clean.rbsa.data.unweighted", rundate, ".xlsx", sep = "")
, sep="/")
)
names(cleanRBSA.dat)
# subset to necessary columns
cleanRBSA.dat1 <- data.frame("CK_Cadmus_ID" = cleanRBSA.dat$CK_Cadmus_ID
, "BuildingType" = cleanRBSA.dat$BuildingType
, stringsAsFactors = F)
# clean and count Cadmus IDs
cleanRBSA.dat1$CK_Cadmus_ID <- trimws(toupper(cleanRBSA.dat1$CK_Cadmus_ID))
length(unique(cleanRBSA.dat1$CK_Cadmus_ID)) ## 601 unique ID's
# standardize MF to a single category
cleanRBSA.dat1$BuildingType[grep("Multifamily", cleanRBSA.dat1$BuildingType)] <- "Multifamily"
unique(cleanRBSA.dat1$BuildingType)
# Import ID and ZIP data
id_zip.dat <- read.xlsx(xlsxFile = file.path(filepathRawData, meter.export), sheet=1)
length(unique(id_zip.dat$CK_Cadmus_ID))
zipFromSites.dat <- read.xlsx(xlsxFile = file.path(filepathRawData, bldg.export), sheet=1)
zipCodes <- unique(zipFromSites.dat[which(colnames(zipFromSites.dat) %in% c("CK_Cadmus_ID","SITE_ZIP"))])
# subset to necessary columns
id_zip.dat0.1 <- data.frame("CK_Cadmus_ID" = id_zip.dat$CK_Cadmus_ID
# , "ZIPCode" = id_zip.dat$SITE_ZIP
, "Utility" = id_zip.dat$Utility
, "MeterType" = id_zip.dat$Type
, stringsAsFactors = F)
id_zip.dat1 <- unique(left_join(zipCodes, id_zip.dat0.1, by = "CK_Cadmus_ID"))
length(unique(id_zip.dat1$CK_Cadmus_ID))
colnames(id_zip.dat1) <- c("CK_Cadmus_ID", "ZIPCode","Utility","MeterType")
# clean and count Cadmus IDs, clean utility and meter type
id_zip.dat1$CK_Cadmus_ID <- trimws(toupper(id_zip.dat1$CK_Cadmus_ID))
id_zip.dat1$Utility <- trimws(toupper(id_zip.dat1$Utility))
id_zip.dat1$MeterType <- trimws(toupper(id_zip.dat1$MeterType))
id_zip.dat1$ZIPCode <- as.numeric(substr(id_zip.dat1$ZIPCode, 1, 5)) ## Remove ZIP-Ext
length(unique(id_zip.dat1$CK_Cadmus_ID)) ## 567 unique respondent ID's
# Indicate invalid ZIP codes
id_zip.dat1$invalidZIP <- rep(0, nrow(id_zip.dat1))
id_zip.dat1$invalidZIP[which(id_zip.dat1$ZIPCode < 10000)] <- 1
id_zip.dat1$invalidZIP[which(is.na(id_zip.dat1$ZIPCode))] <- 1
# Subset to electric meters only
id_zip.dat2.0 <- id_zip.dat1[which(id_zip.dat1$MeterType != "THERMOSTAT"),]
id_zip.dat2.1 <- id_zip.dat2.0[with(id_zip.dat2.0, order(MeterType, decreasing = F)),]
which(duplicated(id_zip.dat2.1$CK_Cadmus_ID))
id_zip.dat2 <- id_zip.dat2.1[which(!(duplicated(id_zip.dat2.1$CK_Cadmus_ID))),]
## QA/QC: Any lost customers?
length(unique(id_zip.dat1$CK_Cadmus_ID)) == length(unique(id_zip.dat2$CK_Cadmus_ID))
length(unique(id_zip.dat1$CK_Cadmus_ID)) - length(unique(id_zip.dat2$CK_Cadmus_ID))
## 35 lost customers
# Import ZIP code mapping
zipMap.dat <- read.xlsx(xlsxFile = file.path(filepathWeightingDocs, popZIP.datMap), sheet=1)
names(zipMap.dat) <- c("ZIPCode"
, "City"
, "County"
, "State"
, "Region"
, "FERC_ID"
, "Utility"
, "Fraction"
, "BPA_vs_IOU"
, "SF.N"
, "MF.N"
, "MH.N"
, "SF.N.adj"
, "MF.N.adj"
, "MH.N.adj")
head(zipMap.dat)
# Clean up data: clean utility, remove any punctuation from utility, make zip codes numeric
zipMap.dat$Utility <- trimws(toupper(zipMap.dat$Utility))
zipMap.dat$Utility <- gsub('[[:punct:]]+', '', zipMap.dat$Utility)
zipMap.dat$ZIPCode <- as.numeric(zipMap.dat$ZIPCode)
zipMap.dat1 <- data.frame("ZIPCode" = zipMap.dat$ZIPCode
, "State" = zipMap.dat$State
, "Region" = zipMap.dat$Region
, "Utility" = zipMap.dat$Utility
, "BPA_vs_IOU" = zipMap.dat$BPA_vs_IOU
, stringsAsFactors = F)
## QA/QC: Check names of utilities for mismatches
sort(unique(zipMap.dat1$Utility), decreasing=F)
sort(unique(id_zip.dat2$Utility), decreasing=F)
## Andrew: were these reviewed with Rietz or Steve?, are there any others that could have been missed?
## Fix mismatches
zipMap.dat1$Utility[which(zipMap.dat1$Utility == "PUD NO 1 OF SKAMANIA CO")] <-
"PUD #1 SKAMANIA COUNTY"
zipMap.dat1$Utility[which(zipMap.dat1$Utility == "TACOMA CITY OF")] <-
"CITY OF TACOMA"
zipMap.dat1$Utility[which(zipMap.dat1$Utility == "ELMHURST MUTUAL POWER LIGHT CO")] <-
"ELMHURST MUTUAL POWER AND LIGHT"
zipMap.dat1$Utility[which(zipMap.dat1$Utility == "FLATHEAD ELECTRIC COOP INC")] <-
"FLATHEAD ELECTRIC COOPERATIVE"
zipMap.dat1$Utility[which(zipMap.dat1$Utility == "GLACIER ELECTRIC COOP INC")] <-
"GLACIER ELECTRIC COOP"
zipMap.dat1$Utility[which(zipMap.dat1$Utility == "LAKEVIEW LIGHT POWER")] <-
"LAKEVIEW POWER & LIGHT"
## Double check this is right -- Mission Valley Power
zipMap.dat1$Utility[which(zipMap.dat1$Utility == "USBIAMISSION VALLEY POWER")] <-
"MISSION VALLEY POWER"
zipMap.dat1$Utility[which(zipMap.dat1$Utility == "MISSOULA ELECTRIC COOP INC")] <-
"MISSOULA ELECTRIC COOP"
zipMap.dat1$Utility[which(zipMap.dat1$Utility == "NORTHWESTERN CORPORATION")] <-
"NORTHWESTERN ENERGY"
zipMap.dat1$Utility[which(zipMap.dat1$Utility == "OHOP MUTUAL LIGHT COMPANY INC")] <-
"OHOP MUTUAL LIGHT CO"
zipMap.dat1$Utility[which(zipMap.dat1$Utility == "PUGET SOUND ENERGY INC")] <-
"PUGET SOUND ENERGY"
zipMap.dat1$Utility[which(zipMap.dat1$Utility == "SEATTLE CITY OF")] <-
"SEATTLE CITY LIGHT"
zipMap.dat1$Utility[which(zipMap.dat1$Utility == "SNOHOMISH COUNTY PUD NO 1")] <-
"SNOHOMISH PUD"
zipMap.dat1$Utility[which(zipMap.dat1$Utility == "USBIAMISSION VALLEY POWER")] <-
"MISSION VALLEY POWER"
## QA/QC: How many missing?
length(id_zip.dat2$Utility[which(id_zip.dat2$Utility == "-- DID NOT ENTER! --")]) ## 0 not entered
#############################################################################################
# Merge data and assign electric utility
#############################################################################################
# Join ZIP codes to cleaned building type data
samp.dat.0 <- left_join(cleanRBSA.dat1, id_zip.dat2, by="CK_Cadmus_ID")
# Join ZIP mapping to previous step
colnames(samp.dat.0) <- c("CK_Cadmus_ID", "BuildingType", "ZIPCode" , "Utility" , "MeterType" , "invalidZIP")
samp.dat.1 <- left_join(samp.dat.0, zipMap.dat1, by="ZIPCode")
samp.dat.1$tally <- rep(1, nrow(samp.dat.1))
head(samp.dat.1)
nrow(samp.dat.1)## 959 rows (old) - 9/12 671 rows
colnames(samp.dat.1) <- c("CK_Cadmus_ID"
,"BuildingType"
,"ZIPCode"
,"Utility.Customer.Data"
,"MeterType"
,"invalidZIP"
,"State"
,"Region"
,"Utility.ZIP.map"
,"BPA_vs_IOU"
,"tally")
########################################################################################
## ##
## STEP 1:
## IF Cust data utility is "-- DID NOT ENTER! --"
## -> Replace with ZIP map utility
## ##
## STEP 2:
## IF ZIP map utility has duplicates
## IF Cust data has duplicates
## -> Tag for manual fix
## ELSE Use ZIP map utility
## ##
## STEP 3:
## IF ZIP map has no duplicates
## IF Cust data has no duplicates
## -> Tag for manual fix
## ELSE Use cust data utility
## ##
########################################################################################
## Replace missing utility from sample data with utility from zip code mapping
# missingInd <- which(samp.dat.1$Utility.Customer.Data == "-- DID NOT ENTER! --")
samp.dat.2 <- samp.dat.1
# samp.dat.2$Utility.Customer.Data[missingInd] <- samp.dat.2$Utility.ZIP.map[missingInd]
# Remove full row duplicates
dupRows <- which(duplicated(samp.dat.2)) #NONE
samp.dat.3 <- samp.dat.2#[-dupRows,] ## 862 rows
#
# ## Cust ID's with duplicates
dupCustIDs <- unique(samp.dat.3$CK_Cadmus_ID[which(duplicated(samp.dat.3$CK_Cadmus_ID))])
dupUtil.0 <- samp.dat.3[which(samp.dat.3$CK_Cadmus_ID %in% dupCustIDs),]
#
#
# # Initialize counter and output vector
cntr <- 1
dupUtil.0$Utility <- rep("MISSING", nrow(dupUtil.0))
#
# ## Create "Not In" operator
"%notin%" <- Negate("%in%")
#
#
# ## For loops to assign utility as per above logic
# ## STEP 2
for(cntr in 1:length(dupCustIDs)) {
if("TRUE" %in% duplicated(dupUtil.0$Utility.ZIP.map[which(dupUtil.0$CK_Cadmus_ID == dupCustIDs[cntr])])) {
if("TRUE" %in% duplicated(dupUtil.0$Utility.Customer.Data[which(dupUtil.0$CK_Cadmus_ID == dupCustIDs[cntr])])) {
dupUtil.0$Utility[which(dupUtil.0$CK_Cadmus_ID == dupCustIDs[cntr])] <- "MANUAL FIX"
}
else {
dupUtil.0$Utility[which(dupUtil.0$CK_Cadmus_ID == dupCustIDs[cntr])] <-
dupUtil.0$Utility.ZIP.map[which(dupUtil.0$CK_Cadmus_ID == dupCustIDs[cntr])]
}
}
}
## STEP 3
for(cntr in 1:length(dupCustIDs)) {
if("TRUE" %notin% duplicated(dupUtil.0$Utility.ZIP.map[which(dupUtil.0$CK_Cadmus_ID == dupCustIDs[cntr])])) {
if("TRUE" %notin% duplicated(dupUtil.0$Utility.Customer.Data[which(dupUtil.0$CK_Cadmus_ID == dupCustIDs[cntr])])) {
dupUtil.0$Utility[which(dupUtil.0$CK_Cadmus_ID == dupCustIDs[cntr])] <- "MANUAL FIX"
}
else {
dupUtil.0$Utility[which(dupUtil.0$CK_Cadmus_ID == dupCustIDs[cntr])] <-
dupUtil.0$Utility.Customer.Data[which(dupUtil.0$CK_Cadmus_ID == dupCustIDs[cntr])]
}
}
}
#
# ## Subset to ID and Utility column and merge back into sample data
names(dupUtil.0)
dupUtil.1 <- unique(dupUtil.0[which(colnames(dupUtil.0) %in% c("CK_Cadmus_ID", "Utility"))])
samp.dat.4 <- left_join(samp.dat.3, dupUtil.1, by="CK_Cadmus_ID")
#
# ## For non-duplicates, use cust data
samp.dat.4$Utility[which(samp.dat.4$CK_Cadmus_ID %notin% dupCustIDs)] <-
samp.dat.4$Utility.Customer.Data[which(samp.dat.4$CK_Cadmus_ID %notin% dupCustIDs)]
##########################################
## ##
## MANUAL FIXES FOR MISSING UTILITIES ##
## ##
##########################################
# samp.dat.4 <- samp.dat.3
utilFix <- samp.dat.4[,which(names(samp.dat.4) %in% c("CK_Cadmus_ID"
, "ZIPCode"
, "Utility.Customer.Data"
, "Utility.ZIP.map"))]
## Andrew: What does "inspection" mean here? did you review the zip code? address? etc?
## Based on inspection
samp.dat.4$Utility[which(samp.dat.4$CK_Cadmus_ID =="BPS25495 OS BPA")] <- "CITY OF TACOMA"
samp.dat.4$Utility[which(samp.dat.4$CK_Cadmus_ID =="WH3590")] <- "PUGET SOUND ENERGY"
## Based on others in ZIP
samp.dat.4$Utility[which(samp.dat.4$CK_Cadmus_ID =="SG0200 OS SCL")] <- "SEATTLE CITY LIGHT"
samp.dat.4$ZIPCode[which(samp.dat.4$CK_Cadmus_ID =="SG0200 OS SCL")] <- 98118
samp.dat.4$invalidZIP[which(samp.dat.4$CK_Cadmus_ID =="SG0200 OS SCL")] <- 0
samp.dat.4$Utility[which(samp.dat.4$CK_Cadmus_ID =="SL2122 OS SCL")] <- "SEATTLE CITY LIGHT"
samp.dat.4$Utility[which(samp.dat.4$CK_Cadmus_ID =="SE2163 OS SCL")] <- "SEATTLE CITY LIGHT"
samp.dat.4$Utility[which(samp.dat.4$CK_Cadmus_ID =="SL1673 OS SCL")] <- "SEATTLE CITY LIGHT"
samp.dat.4$Utility[which(samp.dat.4$CK_Cadmus_ID =="WH1221")] <- "CITY OF TACOMA"
## Based on which sample round
samp.dat.4$Utility[which(samp.dat.4$CK_Cadmus_ID =="SG0048 OS SCL")] <- "SEATTLE CITY LIGHT"
## Fix missing state/region
samp.dat.4$State[which(samp.dat.4$CK_Cadmus_ID =="MM0574")] <- "MT"
samp.dat.4$Region[which(samp.dat.4$CK_Cadmus_ID =="MM0574")] <- "W"
## Fix BPA vs IOU
samp.dat.4$BPA_vs_IOU[which(samp.dat.4$Utility == "SEATTLE CITY LIGHT")] <- "BPA"
samp.dat.4$BPA_vs_IOU[which(samp.dat.4$Utility == "SNOHOMISH PUD")] <- "BPA"
samp.dat.4$BPA_vs_IOU[which(samp.dat.4$Utility == "PUGET SOUND ENERGY")] <- "IOU"
samp.dat.4$BPA_vs_IOU[which(samp.dat.4$Utility == "NORTHWESTERN ENERGY")] <- "IOU"
samp.dat.4$BPA_vs_IOU[which(samp.dat.4$Utility == "MISSION VALLEY POWER")] <- "BPA"
samp.dat.4$BPA_vs_IOU[which(samp.dat.4$Utility == "MISSOULA ELECTRIC COOP")] <- "BPA"
## UPDATES 9/11-12/2017
samp.dat.4$State[which(samp.dat.4$CK_Cadmus_ID =="SE2257 OS SCL")] <- "WA"
samp.dat.4$Region[which(samp.dat.4$CK_Cadmus_ID =="SE2257 OS SCL")] <- "PS"
samp.dat.4$State[which(samp.dat.4$CK_Cadmus_ID =="SG0200 OS SCL")] <- "WA"
samp.dat.4$Region[which(samp.dat.4$CK_Cadmus_ID =="SG0200 OS SCL")] <- "PS"
samp.dat.4$State[which(samp.dat.4$CK_Cadmus_ID =="SL0418 OS SCL")] <- "WA"
samp.dat.4$Region[which(samp.dat.4$CK_Cadmus_ID =="SL0418 OS SCL")] <- "PS"
## Remove old utility columns and duplicate rows
samp.dat.5 <- unique(samp.dat.4[,-which(names(samp.dat.4) %in% c("Utility.Customer.Data", "Utility.ZIP.map"))])
which(duplicated(samp.dat.5$CK_Cadmus_ID)) ## All duplicates removed
############ NEED TO FIX #############
samp.dat.4[which(samp.dat.4$CK_Cadmus_ID == "BPS26690 OS BPA"),]
## remove missing information FOR NOW -- this will be corrected in the final data
samp.dat.6 <- samp.dat.5[which(!(is.na(samp.dat.5$State))),]
# Subset and define strata
# Initialize the vector for strata names
samp.dat.6$Strata <- rep("MISSING", nrow(samp.dat.6))
unique(samp.dat.6$Utility)
## QA/QC: Make sure oversample utilities are in expected BPA territory
samp.dat.6$BPA_vs_IOU[grep("SEATTLE CITY LIGHT", samp.dat.6$Utility)] == "BPA"
samp.dat.6$BPA_vs_IOU[grep("SNOHOMISH", samp.dat.6$Utility)] == "BPA"
samp.dat.6$BPA_vs_IOU[grep("PUGET SOUND", samp.dat.6$Utility)] == "IOU"
# Assign strata
samp.dat.6$Strata[which(samp.dat.6$BPA_vs_IOU == "BPA")] <- "BPA"
samp.dat.6$Strata[which(samp.dat.6$BPA_vs_IOU == "IOU")] <- "Non-BPA/PSE"
samp.dat.6$Strata[grep("SNOHOMISH", samp.dat.6$Utility)] <- "SnoPUD"
samp.dat.6$Strata[grep("PUGET SOUND", samp.dat.6$Utility)] <- "PSE"
samp.dat.6$Strata[grep("SEATTLE CITY LIGHT", samp.dat.6$Utility)] <- "SCL"
samp.dat.6 <- data.frame(samp.dat.6, stringsAsFactors = F)
# Summarize sample counts
sampCounts.0 <- summarise(group_by(samp.dat.6,BuildingType, State, Region, Utility, BPA_vs_IOU)
, n = sum(tally))
#############################################################################################
# Merge data and count sample sizes
#############################################################################################
# Subset and define strata
# Initialize the vector for strata names
sampCounts.0$Strata <- rep("MISSING", nrow(sampCounts.0))
unique(sampCounts.0$Utility)
## QA/QC: Make sure oversample utilities are in expected BPA territory
sampCounts.0$BPA_vs_IOU[grep("SEATTLE CITY LIGHT", sampCounts.0$Utility)] == "BPA"
sampCounts.0$BPA_vs_IOU[grep("SNOHOMISH", sampCounts.0$Utility)] == "BPA"
sampCounts.0$BPA_vs_IOU[grep("PUGET SOUND", sampCounts.0$Utility)] == "IOU"
# Assign strata
sampCounts.0$Strata[which(sampCounts.0$BPA_vs_IOU == "BPA")] <- "BPA"
sampCounts.0$Strata[which(sampCounts.0$BPA_vs_IOU == "IOU")] <- "Non-BPA/PSE"
sampCounts.0$Strata[grep("SNOHOMISH", sampCounts.0$Utility)] <- "SnoPUD"
sampCounts.0$Strata[grep("PUGET SOUND", sampCounts.0$Utility)] <- "PSE"
sampCounts.0$Strata[grep("SEATTLE CITY LIGHT", sampCounts.0$Utility)] <- "SCL"
# Get sample sizes in each strata
sampCounts.1 <- summarise(group_by(sampCounts.0,
BuildingType, State, Region, Strata)
, n.h = sum(n))
#############################################################################################
# Count population sizes
#############################################################################################
# Join ZIP codes to building type data
names(zipMap.dat)
popCounts.0 <- summarise(group_by(zipMap.dat, State, Region, Utility, BPA_vs_IOU)
, SF.pop = round(sum(SF.N.adj), 0)
, MH.pop = round(sum(MH.N.adj), 0)
, MF.pop = round(sum(MF.N.adj), 0)
)
# Initialize the vector for strata names
popCounts.0$Strata <- rep("MISSING", nrow(popCounts.0))
## QA/QC: Make sure oversample utilities are in expected BPA territory
popCounts.0$BPA_vs_IOU[grep("SEATTLE CITY", popCounts.0$Utility)] == "BPA"
popCounts.0$BPA_vs_IOU[grep("SNOHOMISH", popCounts.0$Utility)] == "BPA"
popCounts.0$BPA_vs_IOU[grep("PUGET SOUND", popCounts.0$Utility)] == "IOU"
# Assign strata
popCounts.0$Strata[which(popCounts.0$BPA_vs_IOU == "BPA")] <- "BPA"
popCounts.0$Strata[which(popCounts.0$BPA_vs_IOU == "IOU")] <- "Non-BPA/PSE"
popCounts.0$Strata[grep("SNOHOMISH", popCounts.0$Utility)] <- "SnoPUD"
popCounts.0$Strata[grep("PUGET SOUND", popCounts.0$Utility)] <- "PSE"
popCounts.0$Strata[grep("SEATTLE CITY", popCounts.0$Utility)] <- "SCL"
# Get sample sizes in each strata
popCounts.1 <- summarise(group_by(popCounts.0,
State, Region, Strata)
, N_SF.h = sum(SF.pop)
, N_MH.h = sum(MH.pop)
, N_MF.h = sum(MF.pop))
popMelt <- melt(popCounts.1, id.vars = c("State", "Region", "Strata"))
popMelt$BuildingType <- NA
popMelt$BuildingType[grep("SF", popMelt$variable)] <- "Single Family"
popMelt$BuildingType[grep("MF", popMelt$variable)] <- "Multifamily"
popMelt$BuildingType[grep("MH", popMelt$variable)] <- "Manufactured"
total.counts <- full_join(popMelt, sampCounts.1, by = c("BuildingType"
,"State"
,"Region"
,"Strata"))
total.counts$n.h[which(is.na(total.counts$n.h))] <- 0
colnames(total.counts)[which(colnames(total.counts) == "value")] <- "N.h"
final.counts <- total.counts[which(!(colnames(total.counts) %in% c("variable")))]
#############################################################################################
# Combine sample and population counts
#############################################################################################
# Join pop to samp by state/region/strata
names(sampCounts.1)
names(popCounts.1)
allCounts.0 <- left_join(sampCounts.1, popCounts.1, by=c("State", "Region", "Strata"))
# Set pop size in strata based on bldg type
# Initialize the vector for pop sizes
allCounts.0$N.h <- rep("MISSING", nrow(allCounts.0))
# Single Family Homes
allCounts.0$N.h[which(allCounts.0$BuildingType == "Single Family")] <-
allCounts.0$N_SF.h[which(allCounts.0$BuildingType == "Single Family")]
# Manufactured Homes
allCounts.0$N.h[which(allCounts.0$BuildingType == "Manufactured")] <-
allCounts.0$N_MH.h[which(allCounts.0$BuildingType == "Manufactured")]
# Multifamily Homes
allCounts.0$N.h[grep("Multifamily",allCounts.0$BuildingType)] <-
allCounts.0$N_MF.h[grep("Multifamily",allCounts.0$BuildingType)]
# Remove unnecessary columns
allCounts.1 <- allCounts.0[,-which(names(allCounts.0) %in% c("N_SF.h", "N_MH.h", "N_MF.h"))]
allCounts.1$n.h <- as.numeric(allCounts.1$n.h)
allCounts.1$N.h <- as.numeric(allCounts.1$N.h)
# Compute expansion weights
allCounts.1$w.h <- round(allCounts.1$N.h/allCounts.1$n.h, 2)
allCounts.final <- allCounts.1[which(!(is.na(allCounts.1$State))),]
allCounts.final1 <- allCounts.final[which(!(is.na(allCounts.final$N.h))),]
allCounts.final1$Final.Strata <- paste(allCounts.final1$State
,allCounts.final1$Region
,allCounts.final1$Strata)
samp.dat.7 <- left_join(samp.dat.6, final.counts, by = c("BuildingType"
,"State"
,"Region"
,"Strata"))
samp.dat.8 <- samp.dat.7[which(!(is.na(samp.dat.7$N.h))),]
samp.dat.final <- left_join(samp.dat.8, cleanRBSA.dat)
## Export clean data merged with weights
write.xlsx(samp.dat.final, paste(filepathCleanData, paste("clean.rbsa.data", rundate, ".xlsx", sep = ""), sep="/"),
append = T, row.names = F, showNA = F)
## Export
write.xlsx(final.counts, paste(filepathCleanData, paste("weights.data", rundate, ".xlsx", sep = ""), sep="/"),
append = T, row.names = F, showNA = F)
|
#################
### BIBLIOTECA ##
#################
library(dplyr)
#################
# BASE DE DADOS #
#################
base <- read.table("20180516-base-pfgn.txt",
header = T, sep = ";", quote = "\"",
dec = ",", fill = TRUE,
stringsAsFactors=F)
head(base)
#################
## MANIPULACAO ##
#################
options(digits = 2, scipen = 14)
colnames(base) <- c("CNPJ","Nome","Municpio","UF","Valor")
temp_uf <- base$UF
temp_uf <- gsub(" ","-",temp_uf)
head(temp_uf)
valor <- gsub("[.]","",base$Valor)
valor <- gsub("[,]",".",valor)
valor <- as.numeric(valor)
# head(valor)
cnpjLimpo <- base$CNPJ
cnpjLimpo <- gsub("[.]","",cnpjLimpo)
cnpjLimpo <- gsub("[-]", "", cnpjLimpo)
cnpjLimpo <- gsub("[/]", "", cnpjLimpo)
cnpjLimpo <- as.numeric(cnpjLimpo)
# head(cnpjLimpo)
base$UF <- temp_uf
base$CNPJ <- cnpjLimpo
base$Valor <- valor
# rm(cnpjLimpo, valor, temp_uf)
#################
### SPLIT - UF ##
#################
estados <- unique(base$UF)
# head(estados)
lista_por_UF <- split(base, base$UF)
for(i in 1:length(estados)){
uf <- estados[i]
base_uf <- lista_por_UF[[uf]]
write.csv2(base_uf,paste0('refis_pgfn_',uf,'.csv'),row.names = FALSE)
}
# rm(estados, i, lista_por_UF, uf, base_uf)
#################
### STATISTIC ###
#################
valor_por_uf <-
base %>%
group_by(UF) %>%
summarise(qtde = n(), soma_total = sum(Valor))
valor_por_uf$divida_media <- valor_por_uf$soma_total / valor_por_uf$qtde
valor_por_uf
View(valor_por_uf)
|
/PGFN/Divida-Ativa-Script.R
|
no_license
|
alexvlima/Simples-Tax-Revenues
|
R
| false | false | 1,533 |
r
|
#################
### BIBLIOTECA ##
#################
library(dplyr)
#################
# BASE DE DADOS #
#################
base <- read.table("20180516-base-pfgn.txt",
header = T, sep = ";", quote = "\"",
dec = ",", fill = TRUE,
stringsAsFactors=F)
head(base)
#################
## MANIPULACAO ##
#################
options(digits = 2, scipen = 14)
colnames(base) <- c("CNPJ","Nome","Municpio","UF","Valor")
temp_uf <- base$UF
temp_uf <- gsub(" ","-",temp_uf)
head(temp_uf)
valor <- gsub("[.]","",base$Valor)
valor <- gsub("[,]",".",valor)
valor <- as.numeric(valor)
# head(valor)
cnpjLimpo <- base$CNPJ
cnpjLimpo <- gsub("[.]","",cnpjLimpo)
cnpjLimpo <- gsub("[-]", "", cnpjLimpo)
cnpjLimpo <- gsub("[/]", "", cnpjLimpo)
cnpjLimpo <- as.numeric(cnpjLimpo)
# head(cnpjLimpo)
base$UF <- temp_uf
base$CNPJ <- cnpjLimpo
base$Valor <- valor
# rm(cnpjLimpo, valor, temp_uf)
#################
### SPLIT - UF ##
#################
estados <- unique(base$UF)
# head(estados)
lista_por_UF <- split(base, base$UF)
for(i in 1:length(estados)){
uf <- estados[i]
base_uf <- lista_por_UF[[uf]]
write.csv2(base_uf,paste0('refis_pgfn_',uf,'.csv'),row.names = FALSE)
}
# rm(estados, i, lista_por_UF, uf, base_uf)
#################
### STATISTIC ###
#################
valor_por_uf <-
base %>%
group_by(UF) %>%
summarise(qtde = n(), soma_total = sum(Valor))
valor_por_uf$divida_media <- valor_por_uf$soma_total / valor_por_uf$qtde
valor_por_uf
View(valor_por_uf)
|
testlist <- list(type = 1L, z = 8.31522758516251e-317)
result <- do.call(esreg::G1_fun,testlist)
str(result)
|
/esreg/inst/testfiles/G1_fun/libFuzzer_G1_fun/G1_fun_valgrind_files/1609890918-test.R
|
no_license
|
akhikolla/updated-only-Issues
|
R
| false | false | 108 |
r
|
testlist <- list(type = 1L, z = 8.31522758516251e-317)
result <- do.call(esreg::G1_fun,testlist)
str(result)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/find_Weibull_MLEs.R
\name{find_Weibull_MLEs}
\alias{find_Weibull_MLEs}
\title{A Wrapper Function of \code{simulate_weibull_data}}
\usage{
find_Weibull_MLEs(censor_data)
}
\arguments{
\item{censor_data}{The value of function\code{simulate_weibull_data()}.}
}
\description{
This function only gives the MLEs, discarding the censoring data.
}
|
/man/find_Weibull_MLEs.Rd
|
permissive
|
tianqinglong/myRtoolbox
|
R
| false | true | 432 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/find_Weibull_MLEs.R
\name{find_Weibull_MLEs}
\alias{find_Weibull_MLEs}
\title{A Wrapper Function of \code{simulate_weibull_data}}
\usage{
find_Weibull_MLEs(censor_data)
}
\arguments{
\item{censor_data}{The value of function\code{simulate_weibull_data()}.}
}
\description{
This function only gives the MLEs, discarding the censoring data.
}
|
if(!require(shiny))
install.packages("shiny")
library(shiny)
runApp("app.R",port = 2705,quiet=TRUE,launch.browser = TRUE)
|
/Run.R
|
no_license
|
Dfperezgdatascientist/Prueba_KuberGCP
|
R
| false | false | 128 |
r
|
if(!require(shiny))
install.packages("shiny")
library(shiny)
runApp("app.R",port = 2705,quiet=TRUE,launch.browser = TRUE)
|
tallyVotes <- function(test,
blendedModel,
classes=c("SS", "CSiS", "FSiS", "SiSh", "MS", "WS", "D", "PS", "BS")
) {
wells <- names(blendedModel[["fits"]])
# initialize data frame for weighted vote tallies with zeros
votes <- data.frame(matrix(0, nrow = nrow(test), ncol = length(classes)))
names(votes) <- classes
for (well_i in wells) {
predictions <- predict(blendedModel[["fits"]][[well_i]], newdata=test)
w <- blendedModel[["weights"]][[well_i]]
for (i in 1:nrow(test)) {
# add well weight
votes[i, which(names(votes) %in% predictions[i])] <- votes[i, which(names(votes) %in% predictions[i])] + w
}
}
votes
}
electClass <- function(test, votes) {
for (i in 1:nrow(test)) {
test$Predicted[i] <- names(votes)[which.max(votes[i,])]
}
test$Predicted <- as.factor(test$Predicted)
levels(test$Predicted) <- c("SS", "CSiS", "FSiS", "SiSh", "MS", "WS", "D", "PS", "BS")
test$Predicted
}
|
/jpoirier/archive/evaluationFunctions.R
|
permissive
|
yohanesnuwara/2016-ml-contest
|
R
| false | false | 1,109 |
r
|
tallyVotes <- function(test,
blendedModel,
classes=c("SS", "CSiS", "FSiS", "SiSh", "MS", "WS", "D", "PS", "BS")
) {
wells <- names(blendedModel[["fits"]])
# initialize data frame for weighted vote tallies with zeros
votes <- data.frame(matrix(0, nrow = nrow(test), ncol = length(classes)))
names(votes) <- classes
for (well_i in wells) {
predictions <- predict(blendedModel[["fits"]][[well_i]], newdata=test)
w <- blendedModel[["weights"]][[well_i]]
for (i in 1:nrow(test)) {
# add well weight
votes[i, which(names(votes) %in% predictions[i])] <- votes[i, which(names(votes) %in% predictions[i])] + w
}
}
votes
}
electClass <- function(test, votes) {
for (i in 1:nrow(test)) {
test$Predicted[i] <- names(votes)[which.max(votes[i,])]
}
test$Predicted <- as.factor(test$Predicted)
levels(test$Predicted) <- c("SS", "CSiS", "FSiS", "SiSh", "MS", "WS", "D", "PS", "BS")
test$Predicted
}
|
tournament_selection <- function(population) {
population_size <- length(population)
random_choice_1 <- sample(population_size, replace = TRUE)
random_choice_2 <- sample(population_size, replace = TRUE)
new_population <- random_choice_1
selection <- population[random_choice_1] > population[random_choice_2]
new_population[selection] <- random_choice_2[selection]
return(new_population)
}
|
/R/selection.R
|
permissive
|
jiripetrlik/r-multiobjective-evolutionary-algorithms
|
R
| false | false | 405 |
r
|
tournament_selection <- function(population) {
population_size <- length(population)
random_choice_1 <- sample(population_size, replace = TRUE)
random_choice_2 <- sample(population_size, replace = TRUE)
new_population <- random_choice_1
selection <- population[random_choice_1] > population[random_choice_2]
new_population[selection] <- random_choice_2[selection]
return(new_population)
}
|
#' @useDynLib pedinf
NULL
|
/R/pedinf-package.R
|
no_license
|
mikldk/pedinf
|
R
| false | false | 26 |
r
|
#' @useDynLib pedinf
NULL
|
testlist <- list(a = 0L, b = 0L, x = c(-44536L, -1694498817L, 134744252L, 184483840L, 16711680L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L))
result <- do.call(grattan:::anyOutside,testlist)
str(result)
|
/grattan/inst/testfiles/anyOutside/libFuzzer_anyOutside/anyOutside_valgrind_files/1610386265-test.R
|
no_license
|
akhikolla/updated-only-Issues
|
R
| false | false | 256 |
r
|
testlist <- list(a = 0L, b = 0L, x = c(-44536L, -1694498817L, 134744252L, 184483840L, 16711680L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L))
result <- do.call(grattan:::anyOutside,testlist)
str(result)
|
# Data Cleaning Week 4 Project
# 10/22/2016
# This code will produce two tidy data frames in a linux environment
# df contains a subset of the combined test and training data with
# mean and std quantities from the original data set.
# df2 has one row per subject per activity and captures
# the means of the measurement columns from df with corresponding
# test subject and activty.
# Note: this code uses = instead of <- for assignment for readability
# because [Thing] less than negative [Other thing] is confusing
# Include useful library
library(dplyr)
# Step 0 - get the data, if necessary:
# Uncomment 3 lines below if data has been retrieved already
#download.file("https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip",
# "./thedata.zip")
#unzip("./thedata.zip")
# Step 1:
# Merging training and test data
# I am ignoring the raw accelerometer and gyro data and only using
# the X, y, subject files.
# Note: this could also be done on a bash command line with something like:
# $ paste subject_test.txt, X_test.txt, y_test.txt >temp1
# $ paste subject_train.txt, X_train.txt, y_train.txt >temp2
# $ cat temp1, temp2 >fulldata.txt
# Within each set (test and train), combine subject, data, and label:
df_train = read.table("./UCI HAR Dataset/train/X_train.txt")
df_train_labs = read.table("./UCI HAR Dataset/train/y_train.txt")
df_train_subs = read.table("./UCI HAR Dataset/train/subject_train.txt")
df_test = read.table("./UCI HAR Dataset/test/X_test.txt")
df_test_labs = read.table("./UCI HAR Dataset/test/y_test.txt")
df_test_subs = read.table("./UCI HAR Dataset/test/subject_test.txt")
# Combine data, labels, and subjects into training and test tables:
df_train = cbind(df_train, df_train_labs, df_train_subs)
df_test = cbind(df_test, df_test_labs, df_test_subs)
# Cols are now: [561 features], label (i.e. activity), subject (i.e. #1-30)
mycols = 562:563
mycolnames = c("activity","subject")
# Combine test and train sets into one dataframe
df = rbind(df_train, df_test)
# Clean up temporary data frames
rm(df_train, df_train_labs, df_train_subs, df_test, df_test_subs, df_test_labs)
# Step 2:
# Extract column names from the data's own feature list file and
# pull out the columns with "mean" and "std" in their names
# Use features.txt to get columns with means and stds
features = read.table("./UCI HAR Dataset/features.txt", col.names = c("col","name"))
# col is equivalent to the row number in features, grep will return desired column #s
meanstdcols = grep("[Mm][Ee][Aa][Nn]|[Ss][Tt][Dd]", features$name)
meanstdcolnames = features$name[meanstdcols]
allcols = c(meanstdcols, mycols)
allcolnames_unclean = c(as.character(meanstdcolnames), mycolnames)
# Note: the regex above gives several columns in addition to the plain mean() ones
# (and that have no corresponding std()). I'm leaving them for now because they look
# like potentially useful quantities.
# Clean up temporary objects
rm(features, meanstdcols, meanstdcolnames, mycols, mycolnames)
# Trim df down to desired columns and apply descriptive column labels
# as taken from the data's documentation:
df = df[,c(allcols)]
colnames(df) = allcolnames_unclean
# Step 3
# Replace activity number labels with their descriptions:
activitylabs = read.table("./UCI HAR Dataset/activity_labels.txt",
col.names = c("label","activity_desc"))
# Get rid of "_" for tidy-ness
activitylabs$activity_desc = gsub("_", "", tolower(activitylabs$activity_desc))
# Make factor for convenience later
df$activity = factor(df$activity,labels = activitylabs$activity_desc)
# Clean up temporary objects
rm(activitylabs, allcolnames_unclean, allcols)
# Step 4
# Neaten up the remaining variable (column) names by removing non-alphanumeric
# characters and converting to lowercase
colnames(df) = gsub("\\(\\)|-","", tolower(colnames(df)))
# Step 5
# Generate second data set of average values by subject and activity
df2 = df %>% group_by(subject,activity) %>%
summarise_all(funs(mean))
write.table(df2, "tidytable.txt", row.name=FALSE)
|
/run_analysis.R
|
no_license
|
escapedneutrino/DataCleaningProject
|
R
| false | false | 4,095 |
r
|
# Data Cleaning Week 4 Project
# 10/22/2016
# This code will produce two tidy data frames in a linux environment
# df contains a subset of the combined test and training data with
# mean and std quantities from the original data set.
# df2 has one row per subject per activity and captures
# the means of the measurement columns from df with corresponding
# test subject and activty.
# Note: this code uses = instead of <- for assignment for readability
# because [Thing] less than negative [Other thing] is confusing
# Include useful library
library(dplyr)
# Step 0 - get the data, if necessary:
# Uncomment 3 lines below if data has been retrieved already
#download.file("https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip",
# "./thedata.zip")
#unzip("./thedata.zip")
# Step 1:
# Merging training and test data
# I am ignoring the raw accelerometer and gyro data and only using
# the X, y, subject files.
# Note: this could also be done on a bash command line with something like:
# $ paste subject_test.txt, X_test.txt, y_test.txt >temp1
# $ paste subject_train.txt, X_train.txt, y_train.txt >temp2
# $ cat temp1, temp2 >fulldata.txt
# Within each set (test and train), combine subject, data, and label:
df_train = read.table("./UCI HAR Dataset/train/X_train.txt")
df_train_labs = read.table("./UCI HAR Dataset/train/y_train.txt")
df_train_subs = read.table("./UCI HAR Dataset/train/subject_train.txt")
df_test = read.table("./UCI HAR Dataset/test/X_test.txt")
df_test_labs = read.table("./UCI HAR Dataset/test/y_test.txt")
df_test_subs = read.table("./UCI HAR Dataset/test/subject_test.txt")
# Combine data, labels, and subjects into training and test tables:
df_train = cbind(df_train, df_train_labs, df_train_subs)
df_test = cbind(df_test, df_test_labs, df_test_subs)
# Cols are now: [561 features], label (i.e. activity), subject (i.e. #1-30)
mycols = 562:563
mycolnames = c("activity","subject")
# Combine test and train sets into one dataframe
df = rbind(df_train, df_test)
# Clean up temporary data frames
rm(df_train, df_train_labs, df_train_subs, df_test, df_test_subs, df_test_labs)
# Step 2:
# Extract column names from the data's own feature list file and
# pull out the columns with "mean" and "std" in their names
# Use features.txt to get columns with means and stds
features = read.table("./UCI HAR Dataset/features.txt", col.names = c("col","name"))
# col is equivalent to the row number in features, grep will return desired column #s
meanstdcols = grep("[Mm][Ee][Aa][Nn]|[Ss][Tt][Dd]", features$name)
meanstdcolnames = features$name[meanstdcols]
allcols = c(meanstdcols, mycols)
allcolnames_unclean = c(as.character(meanstdcolnames), mycolnames)
# Note: the regex above gives several columns in addition to the plain mean() ones
# (and that have no corresponding std()). I'm leaving them for now because they look
# like potentially useful quantities.
# Clean up temporary objects
rm(features, meanstdcols, meanstdcolnames, mycols, mycolnames)
# Trim df down to desired columns and apply descriptive column labels
# as taken from the data's documentation:
df = df[,c(allcols)]
colnames(df) = allcolnames_unclean
# Step 3
# Replace activity number labels with their descriptions:
activitylabs = read.table("./UCI HAR Dataset/activity_labels.txt",
col.names = c("label","activity_desc"))
# Get rid of "_" for tidy-ness
activitylabs$activity_desc = gsub("_", "", tolower(activitylabs$activity_desc))
# Make factor for convenience later
df$activity = factor(df$activity,labels = activitylabs$activity_desc)
# Clean up temporary objects
rm(activitylabs, allcolnames_unclean, allcols)
# Step 4
# Neaten up the remaining variable (column) names by removing non-alphanumeric
# characters and converting to lowercase
colnames(df) = gsub("\\(\\)|-","", tolower(colnames(df)))
# Step 5
# Generate second data set of average values by subject and activity
df2 = df %>% group_by(subject,activity) %>%
summarise_all(funs(mean))
write.table(df2, "tidytable.txt", row.name=FALSE)
|
library(abd)
### Name: ZebraFinchBeaks
### Title: Mate Preference in Zebra Finches
### Aliases: ZebraFinchBeaks
### Keywords: datasets
### ** Examples
ZebraFinchBeaks
|
/data/genthat_extracted_code/abd/examples/ZebraFinchBeaks.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false | false | 174 |
r
|
library(abd)
### Name: ZebraFinchBeaks
### Title: Mate Preference in Zebra Finches
### Aliases: ZebraFinchBeaks
### Keywords: datasets
### ** Examples
ZebraFinchBeaks
|
library("gsim")
import::from("dplyr", n_distinct)
import::from("Rcpp", cpp_object_initializer)
# Radon simulations used in tests
stan_radon_data <- radon_data() %>%
define(
y, county,
X = design(1, x),
U = design(1, u, .unjoin = county),
N = nrow(.),
J = n_distinct(county),
P_X = ncol(X),
P_U = ncol(U)
)
radon_m <- radon_model(language = "stan", variant = "centered")
radon_stanfit <- rstan::stan(model_code = radon_m, data = stan_radon_data,
iter = 100, chains = 2)
radon_sims <- radon_stanfit %>%
select(mu_y, y_rep, Beta, sigma, Gamma, sigma_Beta, Omega)
saveRDS(radon_sims, file = "./tests/testthat/radon-sims.rds")
|
/data-raw/radon-sims.r
|
no_license
|
lionel-/gsim
|
R
| false | false | 664 |
r
|
library("gsim")
import::from("dplyr", n_distinct)
import::from("Rcpp", cpp_object_initializer)
# Radon simulations used in tests
stan_radon_data <- radon_data() %>%
define(
y, county,
X = design(1, x),
U = design(1, u, .unjoin = county),
N = nrow(.),
J = n_distinct(county),
P_X = ncol(X),
P_U = ncol(U)
)
radon_m <- radon_model(language = "stan", variant = "centered")
radon_stanfit <- rstan::stan(model_code = radon_m, data = stan_radon_data,
iter = 100, chains = 2)
radon_sims <- radon_stanfit %>%
select(mu_y, y_rep, Beta, sigma, Gamma, sigma_Beta, Omega)
saveRDS(radon_sims, file = "./tests/testthat/radon-sims.rds")
|
app <- ShinyDriver$new("../../", seed = 100, shinyOptions = list(display.mode = "normal"))
app$snapshotInit("mytest")
Sys.sleep(3)
app$snapshot()
app$setInputs(year = c(1988, 2014),wait_=FALSE, values_=FALSE)
app$setInputs(year = c(2002, 2014),wait_=FALSE, values_=FALSE)
Sys.sleep(2)
app$snapshot()
app$setInputs(year = c(2008, 2014),wait_=FALSE, values_=FALSE)
app$setInputs(oscars = 1, wait_=FALSE, values_=FALSE)
Sys.sleep(2)
app$snapshot()
app$setInputs(oscars = 0, wait_=FALSE, values_=FALSE)
app$setInputs(genre = "Animation",wait_=FALSE, values_=FALSE)
app$setInputs(xvar = "Reviews",wait_=FALSE, values_=FALSE)
app$setInputs(yvar = "BoxOffice",wait_=FALSE, values_=FALSE)
Sys.sleep(2)
app$snapshot()
|
/shinycoreci-apps-master/shinycoreci-apps-master/051-movie-explorer/tests/shinytests/mytest.R
|
no_license
|
RohanYashraj/R-Tutorials-Code
|
R
| false | false | 710 |
r
|
app <- ShinyDriver$new("../../", seed = 100, shinyOptions = list(display.mode = "normal"))
app$snapshotInit("mytest")
Sys.sleep(3)
app$snapshot()
app$setInputs(year = c(1988, 2014),wait_=FALSE, values_=FALSE)
app$setInputs(year = c(2002, 2014),wait_=FALSE, values_=FALSE)
Sys.sleep(2)
app$snapshot()
app$setInputs(year = c(2008, 2014),wait_=FALSE, values_=FALSE)
app$setInputs(oscars = 1, wait_=FALSE, values_=FALSE)
Sys.sleep(2)
app$snapshot()
app$setInputs(oscars = 0, wait_=FALSE, values_=FALSE)
app$setInputs(genre = "Animation",wait_=FALSE, values_=FALSE)
app$setInputs(xvar = "Reviews",wait_=FALSE, values_=FALSE)
app$setInputs(yvar = "BoxOffice",wait_=FALSE, values_=FALSE)
Sys.sleep(2)
app$snapshot()
|
remove(list = ls())
#===================================================
### Load necessary R packages
#===================================================
library(INLA)
library(ggplot2)
library(raster)
library(spacetime)
library(scoringRules)
library(gridExtra)
library(gstat)
library(rgeos)
library(RColorBrewer)
library(gdata)
library(viridis)
## Set seed for reproducibility
set.seed(123)
## Load useful functions
source("utility.R")
#===================================================
### Data preparation
#===================================================
load("workspace_data.RData")
pol = "no2"
final_dataset$site.type = factor(as.character(final_dataset$site.type), ordered = T, levels = c("RUR","URB","RKS"))
final_dataset$site.type.n = as.numeric(final_dataset$site.type)
final_dataset$sitetype.idx = as.numeric(final_dataset$site.type)
##--- Visualise data
plot(shape)
points(coordinates.pcm, pch=3, cex=.2)
points(coordinates.aqum, pch=19, cex=.5, col="red")
points(monitors[monitors$site.type=="RUR", c("easting","northing")], pch=8, cex=.5, col="green3")
points(monitors[monitors$site.type=="URB", c("easting","northing")], pch=15, cex=.5, col="orange")
points(monitors[monitors$site.type=="RKS", c("easting","northing")], pch=17, cex=.5, col="blue")
lines(london.shape)
par(xpd=TRUE)
legend("topleft",inset=c(-0.15,0.1),legend=c("PCM","AQUM","Rural monitors","Urban monitors",
"Roadside/ \n Kerbside monitors"), col = c("black","red","green3","orange","blue"),
pch=c(3,19,8,15,17), cex=.7, pt.cex=1.2, bty = "n")
formula = y ~ -1 + alpha1 + alpha2 + alpha3 + betaURB + betaRKS +
f(z2, model='rw1', hyper = rw1.aqum.prior, scale.model = TRUE, constr = TRUE) +
f(z23, copy='z2', fixed = FALSE, hyper=lambda23) +
f(z1, model=spde, extraconstr = list(A=matrix(1,ncol=mesh$n,nrow=1), e=matrix(0,ncol=1))) +
f(z12, copy='z1', fixed = FALSE, hyper=lambda12) +
f(z13, copy='z1', fixed = FALSE, hyper=lambda13) +
f(z3, model='ar1', hyper=ar1.time.prior, replicate = sitetype.idx, constr=TRUE, rankdef = 1)
##--- NOTE: data_id indicates the validation set and is received from the bash script
data_id <- ifelse(nchar(Sys.getenv("DATA_ID"))>0, as.numeric(Sys.getenv("DATA_ID")), 1) # 1 to 6
if (!file.exists(file.path(getwd(),paste0("Output_",data_id)))){
dir.create(file.path(getwd(),paste0("Output_",data_id)))
}
setwd(paste0("Output_",data_id))
print(getwd())
valid = final_dataset[final_dataset$code %in% monitors_val[[data_id]] , ]
estim = final_dataset[!(final_dataset$code %in% monitors_val[[data_id]]) , ]
coordinates.estim<-unique(estim[,c("loc.idx","easting","northing")])
coordinates.valid<-unique(valid[,c("loc.idx","easting","northing")])
n_monitors=nrow(coordinates.y)
n_data=nrow(estim)+nrow(valid)
n_days=n_data/n_monitors
#===================================================
### Create mesh
#===================================================
mesh = inla.mesh.2d(rbind(coordinates.y,coordinates.aqum,coordinates.pcm),
loc.domain=boundary@polygons[[1]]@Polygons[[1]]@coords,
max.edge = c(75000,40000),
offset = c(10000,30000),
cutoff=8000)
plot(mesh, main="")
lines(shape, col="blue")
lines(london.shape, col="blue")
title("Domain triangulation")
points(coordinates.estim, col="green")
points(coordinates.valid, col="red")
#===================================================
### Construct the SPDE model for Matern field with some prior information obtained from the mesh or the spatial domain
#===================================================
range0 <- min(c(diff(range(mesh$loc[,1])),diff(range(mesh$loc[,2]))))/5
spde <- inla.spde2.pcmatern(mesh=mesh, alpha=2, ### mesh and smoothness parameter
prior.range=c(range0, 0.95), ### P(practic.range<range0)=0.95
prior.sigma=c(100, 0.5)) ### P(sigma>100)=0.5
#===================================================
### Hyperpriors
#===================================================
rw1.aqum.prior = list(theta=list(prior="pc.prec", param=c(sd(aqum[,paste0(pol,"_log")]),0.01)))
ar1.time.prior = list(theta2 = list(prior='normal',
param=c(inla.models()$latent$ar1$hyper$theta2$to.theta(0.3), 0.5)))
lambda23 = list(theta = list(prior = 'normal', param = c(0.9, 0.01), initial=0.9)) # lambda_2,3
lambda12 = list(theta = list(prior = 'normal', param = c(1.1, 0.01), initial=1.1)) # lambda_1,2
lambda13 = list(theta = list(prior = 'normal', param = c(1.3, 0.01), initial=1.3)) # lambda_1,3
#===================================================
### Stack
#===================================================
## ***** PCM *****
A_pcm <- inla.spde.make.A(mesh=mesh, cbind(pcm$easting, pcm$northing))
stk_pcm <- inla.stack(data=list(y=cbind(pcm[,paste0(pol,"_log")], NA, NA)),
effects=list(list(alpha3=rep(1,nrow(pcm))), list(z1=1:spde$n.spde)),
A=list(1,A_pcm),
tag="est.pcm")
## ***** AQUM *****
A_aqum <- inla.spde.make.A(mesh=mesh,cbind(aqum$easting, aqum$northing))
stk_aqum <- inla.stack(data=list(y=cbind(NA, aqum[,paste0(pol,"_log")], NA)),
effects=list(list(alpha2=1, z2=aqum$date.idx),
list(z12=1:spde$n.spde)),
A=list(1, A_aqum),
tag="est.aqum")
## data stack: include all the effects
A_y_e <- inla.spde.make.A(mesh=mesh, cbind(estim$easting,estim$northing))
stk_y_e <- inla.stack(data=list(y=cbind(NA,NA,estim[,paste0(pol,"_log")])),
effects=list(list(z23=estim[,paste0("date.idx.",pol)]),
list(z13=1:spde$n.spde),
data.frame(alpha1=1,
z3=estim[,paste0("date.idx.",pol)],
betaURB=estim[,paste0("stURB.",pol)],
betaRKS=estim[,paste0("stRKS.",pol)],
estim[,c("sitetype.idx","loc.idx","code","easting","northing")])),
A=list(1, A_y_e, 1),
tag="est.y")
### validation scenario
A_y_v <- inla.spde.make.A(mesh=mesh, cbind(valid$easting, valid$northing))
stk_y_v <- inla.stack(data=list(y=cbind(NA,NA,rep(NA,length(valid[,paste0(pol,"_log")])))),
effects=list(list(z23=valid[,paste0("date.idx.",pol)]),
list(z13=1:spde$n.spde),
data.frame(alpha1=1,
z3=valid[,paste0("date.idx.",pol)],
betaURB=valid[,paste0("stURB.",pol)],
betaRKS=valid[,paste0("stRKS.",pol)],
valid[,c("sitetype.idx","loc.idx","code","easting","northing")])),
A=list(1, A_y_v, 1),
tag="val.y")
stack <- inla.stack(stk_aqum,
stk_pcm,
stk_y_v,
stk_y_e)
#===================================================
### INLA call
#===================================================
##--- NOTE: the INLA call can be parallelized using Pardiso - see inla.pardiso()
mod<- inla(formula,
family=c("gaussian","gaussian","gaussian"),
data=inla.stack.data(stack),
control.predictor=list(compute=TRUE,link=1, A=inla.stack.A(stack)),
control.compute = list(dic = TRUE,cpo=TRUE, config=TRUE, waic=TRUE),
control.inla = list(adaptive,int.strategy='eb'),
verbose=TRUE)
#===================================================
### Summary of Posterior distributions of parameters and hyperparameters of interest
#===================================================
##--- FIXED EFFECTS
print(round(mod$summary.fixed,4))
##--- HYPERPARAMETERS
print(round(mod$summary.hyperpar,4))
#===================================================
### Check model performance
#===================================================
n.failures = sum(mod$cpo$failure, na.rm = T)
if(mean(mod$cpo$failure, na.rm = T)!=0){
# summary(mod$cpo$failure)
# Two options:
# 1. recompute using control.inla=list(int.strategy = "grid", diff.logdens = 4, strategy = "laplace", npoints = 21) see http://www.r-inla.org/faq
# 2. run mod = inla.cpo(mod) #recomputes in an efficient way the cpo/pit for which mod$cpo$failure > 0
mod_imp = inla.cpo(mod)
print("Model cpo have been improved")
logscore = -mean(log(mod_imp$cpo$cpo), na.rm=T)
par(mfrow=c(1,2))
hist(mod_imp$cpo$pit, breaks=100, main=paste0("PIT improved - n.failures=", n.failures))
hist(mod_imp$cpo$cpo, breaks=100, main=paste0("CPO improved - n.failures=", n.failures))
}else{
logscore = -mean(log(mod$cpo$cpo), na.rm=T)
par(mfrow=c(1,2))
hist(mod$cpo$pit, breaks=100, main=paste0("PIT - n.failures=", n.failures))
hist(mod$cpo$cpo, breaks=100, main=paste0("CPO - n.failures=", n.failures))
}
#===================================================
### Extract posterior latent fields
#===================================================
print(names(mod$summary.random))
# Index for the temporal fields
index.temp.field = which(substr(names(mod$summary.random),1,2) == "z2")
print(index.temp.field)
# Index for the spatial fields
index.spat.field = which(substr(names(mod$summary.random),1,2) == "z1")
print(index.spat.field)
##--- Time-sitetype interaction
rur.mean <- ts(mod$summary.random$z3$mean[1:1826], start = c(2007, 1), frequency = 365)
rur.mean.low <- ts(mod$summary.random$z3$`0.025quant`[1:1826], start = c(2007, 1), frequency = 365)
rur.mean.upp <- ts(mod$summary.random$z3$`0.975quant`[1:1826], start = c(2007, 1), frequency = 365)
urb.mean <- ts(mod$summary.random$z3$mean[1827:(2*1826)], start = c(2007, 1), frequency = 365)
urb.mean.low <- ts(mod$summary.random$z3$`0.025quant`[1827:(2*1826)], start = c(2007, 1), frequency = 365)
urb.mean.upp <- ts(mod$summary.random$z3$`0.975quant`[1827:(2*1826)], start = c(2007, 1), frequency = 365)
rks.mean <- ts(mod$summary.random$z3$mean[(2*1826+1):(3*1826)], start = c(2007, 1), frequency = 365)
rks.mean.low <- ts(mod$summary.random$z3$`0.025quant`[(2*1826+1):(3*1826)], start = c(2007, 1), frequency = 365)
rks.mean.upp <- ts(mod$summary.random$z3$`0.975quant`[(2*1826+1):(3*1826)], start = c(2007, 1), frequency = 365)
lower = min(mod$summary.random$z3$`0.025quant`)
upper = max(mod$summary.random$z3$`0.975quant`)
png('time_sitetype_interaction.png', width = 8, height = 8, unit="in", res=600)
par(mfrow=c(3,1))
plot(rur.mean.upp, type="l", ylab=expression(paste(mu,"g/",m^3)), xlab="Year", ylim=c(lower,upper), main="RUR", lty="twodash", col="red")
lines(rur.mean.low, lty="twodash", col="red")
lines(rur.mean)
abline(h=0,col="blue")
plot(urb.mean.upp, type="l", ylab=expression(paste(mu,"g/",m^3)), xlab="Year", ylim=c(lower,upper), main="URB", lty="twodash", col="red")
lines(urb.mean.low, lty="twodash", col="red")
lines(urb.mean)
abline(h=0,col="blue")
plot(rks.mean.upp, type="l", ylab=expression(paste(mu,"g/",m^3)), xlab="Year", ylim=c(lower,upper), main="RKS", lty="twodash", col="red")
lines(rks.mean.low, lty="twodash", col="red")
lines(rks.mean)
abline(h=0,col="blue")
dev.off()
##--- Latent temporal fields
for(i in index.temp.field){
aqum.ts.mean <- ts(mod$summary.random[[i]]$mean, start = c(2007, 1), frequency = 365)
aqum.ts.low <- ts(mod$summary.random[[i]]$`0.025quant`, start = c(2007, 1), frequency = 365)
aqum.ts.upp <- ts(mod$summary.random[[i]]$`0.975quant`, start = c(2007, 1), frequency = 365)
aqum.ts.sd <- ts(mod$summary.random[[i]]$sd, start = c(2007, 1), frequency = 365)
png(paste0(names(mod$summary.random)[i],'.png'), width = 10, height = 8, unit="in", res=600)
par(mfrow=c(2,1))
plot(aqum.ts.upp,
main=paste0(names(mod$marginals.random)[i]," - mean and CI"),lty="twodash",
xlab="Year",ylab=expression(paste(mu,"g/",m^3)),
ylim=c(min(aqum.ts.low),max(aqum.ts.upp))
)
lines(aqum.ts.low, lty="twodash")
lines(aqum.ts.mean, col="red")
plot(aqum.ts.sd,
main=paste0(names(mod$marginals.random)[i]," - SD"),
xlab="Year",ylab=expression(paste(mu,"g/",m^3)))
dev.off()
}
##--- Latent spatial fields
for(i in index.spat.field){
length.grid.x=150
length.grid.y=150
# construct a lattice over the mesh extent
pred.grid.lat= inla.mesh.lattice(x=seq(extent(boundary)[1],extent(boundary)[2],length.out = length.grid.x),
y=seq(extent(boundary)[3],extent(boundary)[4],length.out = length.grid.y))
proj = inla.mesh.projector(mesh, lattice = pred.grid.lat)
spat.field = cbind(expand.grid(x=seq(extent(boundary)[1],extent(boundary)[2],length.out = length.grid.x),
y=seq(extent(boundary)[3],extent(boundary)[4],length.out = length.grid.y)),
mean.log=as.vector(inla.mesh.project(proj,field= mod$summary.random[[i]]$mean)),
sd.log=as.vector(inla.mesh.project(proj,field= mod$summary.random[[i]]$sd)))
coordinates(spat.field) = ~x+y
proj4string(spat.field) = proj4string(shape)
spat.field = subset(spat.field, over(spat.field, shape)$objectid==1)
spat.field = as.data.frame(spat.field)
pcm_lat_log = ggplot(spat.field) +
#gg(mesh.pcm) +
geom_raster(aes(x, y, fill = mean.log)) +
scale_fill_viridis(bquote(paste("log(",.(toupper(pol)),") (",mu,"g/",m^3,")"))) +
ggtitle(paste0(names(mod$marginals.random)[i]," - posterior mean")) +
geom_path(data = fortify(shape), aes(group = group, x = long, y = lat)) +
geom_path(data = fortify(london.shape), aes(group = group, x = long, y = lat), size=0.5) +
theme(plot.title = element_text(family = "sans", color="#666666", size=14, hjust=0.5, face="bold"),
axis.ticks.x=element_blank(), axis.ticks.y=element_blank(), axis.text.x=element_blank(), axis.text.y=element_blank(),
legend.key.height = unit(1.5,"cm"),
legend.text = element_text(size=7, vjust=-0.5)) +
labs(x="Easting",y="Northing")
pcm_lat_sd_log = ggplot(spat.field) +
geom_raster(aes(x, y, fill = sd.log)) +
#gg(mesh.pcm) +
scale_fill_viridis(bquote(paste("log(",.(toupper(pol)),") (",mu,"g/",m^3,")"))) +
ggtitle(paste0(names(mod$marginals.random)[i]," - posterior SD")) +
geom_path(data = fortify(shape), aes(group = group, x = long, y = lat)) +
geom_path(data = fortify(london.shape), aes(group = group, x = long, y = lat), size=0.5) +
theme(plot.title = element_text(family = "sans", color="#666666", size=14, hjust=0.5, face="bold"),
axis.ticks.x=element_blank(), axis.ticks.y=element_blank(), axis.text.x=element_blank(), axis.text.y=element_blank(),
legend.key.height = unit(1.5,"cm"),
legend.text = element_text(size=7, vjust=-0.5)) +
labs(x="Easting",y="Northing")
ggsave(paste0(names(mod$marginals.random)[i],'.png'), plot=pcm_lat_log, width = 8, height = 8, units="in", dpi=600)
}
#===================================================
### Predictive capability
#===================================================
sample.size=50
n.pred=100
predictions= list()
# extract sample.size values from each marginal of the fitted values
posterior.samples = inla.posterior.sample(n = sample.size, result=mod, intern = FALSE, use.improved.mean = TRUE, add.names = TRUE, seed = 0L)
# extract the posterior latent for the validation sites and reshape in order to have a matrix of nrow(valid) x sample.size
samples.fitted <- matrix(unlist(lapply(lapply(posterior.samples,"[[", "latent" ),"[", inla.stack.index(stack,"val.y")$data), use.names=FALSE), nrow = nrow(valid), byrow = F)
# extract sample.size values from marginal of the likelihood variance
# (it is always the last of the Gaussian observations, so we extract the position)
par.index = length(which(substr(rownames(mod$summary.hyperpar),1,26) == "Precision for the Gaussian"))
samples.var = 1/unlist(lapply(lapply(posterior.samples,"[[", "hyperpar" ),"[", par.index))
# this is not exactly what we want, in theory we should sample from the transformed posterior marginal
# rather than inverting the values sampled from the posterior marginal of the precision
samples.fitted = cbind(samples.fitted, obs.value=valid[,paste0(pol,"_log")])
predictive.capability = apply(samples.fitted, MARGIN = 1, FUN=extract.predicted, variance=samples.var)
# To aggregate the results we keep code, date and site type variables
predictive.capability = cbind(code=valid$code, date.idx=valid[,paste0("date.idx.",pol)], site.type=valid$site.type, as.data.frame(t(predictive.capability)))
predictive.capability$CRPS = crps_sample(y=samples.fitted[,(ncol(samples.fitted)-1)], dat=as.matrix(samples.fitted[,1:(ncol(samples.fitted)-1)]))
predictive.capability$logscore = logscore
saveRDS(predictive.capability, "predictive_capability.rds")
##--- Run this chunk once all the models results have been extracted, so we have the predictions for all the monitors (the 6 validation sets)
files = list.files(path=list.dirs(path = "..", full.names = TRUE, recursive = TRUE), pattern ="predictive_capability.rds", full.names = TRUE)
predictive.capability = do.call(rbind, lapply(files, function (x) readRDS(x)))
predictive.capability.measures = c(my.validation(z = predictive.capability[,paste0(pol,"_obs")],
zhat = predictive.capability[,paste0(pol,"_pred")],
penalty = predictive.capability$pmcc_penalty,
coverage = predictive.capability$coverage),
LOGSCORE = predictive.capability$logscore,
COV_CI = mean(predictive.capability$ci_amplitude),
CRPS = mean(predictive.capability$CRPS, na.rm = T))
# NOTE: predictive.capability.measures can be computed by site-type, by day or by site,
# subsetting predictive.capability by site.type, date.idx or code respectively.
##---
#===================================================
### Extract daily predictions
#===================================================
# NOTE: daily predictions here are extracted for each model;
# this chunk can be skipped and run only after re-running the model using all data (no validation)
n.samples = 50
n.days = 1826
n.locs = nrow(pred.grid)
A_pred =inla.spde.make.A(mesh, loc=cbind(pred.grid$easting, pred.grid$northing))
contents=as.data.frame(mod$misc$configs$contents)
contents$end = contents$start + contents$length - 1
fields = lapply(1:n.samples, FUN=extract.contents)
if (!file.exists(file.path(getwd(), "predictions_by_day"))){
dir.create(file.path(getwd(), "predictions_by_day"))
}
date = data.frame(date=seq.Date(as.Date("2007-01-01"), as.Date("2011-12-31"), "days"),
date.idx.no2=c(1:n_days),
year=as.numeric(substr(unique(aqum$date),1,4)),
month=as.numeric(substr(unique(aqum$date),6,7)),
day=as.numeric(substr(unique(aqum$date),9,10)))
invisible(lapply(1:n.days, FUN=compute.daily.predictions))
##--- predictions for days of pollution events
pred_2007_12_11 = readRDS("predictions_by_day/predictions_2007-12-11.rds")
pred_2007_12_19 = readRDS("predictions_by_day/predictions_2007-12-19.rds")
pred_2009_01_03 = readRDS("predictions_by_day/predictions_2009-01-03.rds")
pred_2010_11_16 = readRDS("predictions_by_day/predictions_2010-11-16.rds")
##--- predictions for days of low pollution
pred_2007_06_24 = readRDS("predictions_by_day/predictions_2007-06-24.rds")
pred_2008_06_22 = readRDS("predictions_by_day/predictions_2008-06-22.rds")
pred_2009_06_21 = readRDS("predictions_by_day/predictions_2009-06-21.rds")
pred_2010_06_20 = readRDS("predictions_by_day/predictions_2010-06-20.rds")
pred_events = rbind(cbind(mean = rowMeans(pred_2007_12_11[,-c(1:3)], na.rm = T), day = "2007-12-11", pred.grid),
cbind(mean = rowMeans(pred_2007_12_19[,-c(1:3)], na.rm = T), day = "2007-12-19", pred.grid),
cbind(mean = rowMeans(pred_2009_01_03[,-c(1:3)], na.rm = T), day = "2009-01-03", pred.grid),
cbind(mean = rowMeans(pred_2010_11_16[,-c(1:3)], na.rm = T), day = "2010-11-16", pred.grid),
cbind(mean = rowMeans(pred_2007_06_24[,-c(1:3)], na.rm = T), day = "2007-06-24", pred.grid),
cbind(mean = rowMeans(pred_2008_06_22[,-c(1:3)], na.rm = T), day = "2008-06-22", pred.grid),
cbind(mean = rowMeans(pred_2009_06_21[,-c(1:3)], na.rm = T), day = "2009-06-21", pred.grid),
cbind(mean = rowMeans(pred_2010_06_20[,-c(1:3)], na.rm = T), day = "2010-06-20", pred.grid))
ggsave("daily_predictions.png",
ggplot(pred_events) +
geom_raster(aes(x=easting, y=northing, fill = mean))+
facet_wrap(~day, ncol=4) +
scale_fill_viridis(bquote(paste("log(",.(toupper(pol)),") (",mu,"g/",m^3,")"))) +
ggtitle("Daily predictions on selected days with air pollution events (top row) or low concentration (bottom row)") +
geom_path(data = fortify(london.shape), aes(group = group, x = long, y = lat), size=0.5)+
geom_path(data = fortify(shape), aes(group = group, x = long, y = lat))+
geom_path(data = fortify(roads_major), aes(group = group, x = long, y = lat), size=0.3, color="grey85")+
theme(plot.title = element_text(family = "sans", color="#666666", size=14, hjust=0.5, face="bold"),
axis.ticks.x=element_blank(), axis.ticks.y=element_blank(), axis.text.x=element_blank(), axis.text.y=element_blank(),
legend.key.height = unit(1,"cm"),
legend.text = element_text(size=7, vjust=-0.5)),
width=15, height=8, unit="in", dpi=600)
|
/joint_model.R
|
no_license
|
cf416/joint_model_no2_INLA_SPDE
|
R
| false | false | 22,193 |
r
|
remove(list = ls())
#===================================================
### Load necessary R packages
#===================================================
library(INLA)
library(ggplot2)
library(raster)
library(spacetime)
library(scoringRules)
library(gridExtra)
library(gstat)
library(rgeos)
library(RColorBrewer)
library(gdata)
library(viridis)
## Set seed for reproducibility
set.seed(123)
## Load useful functions
source("utility.R")
#===================================================
### Data preparation
#===================================================
load("workspace_data.RData")
pol = "no2"
final_dataset$site.type = factor(as.character(final_dataset$site.type), ordered = T, levels = c("RUR","URB","RKS"))
final_dataset$site.type.n = as.numeric(final_dataset$site.type)
final_dataset$sitetype.idx = as.numeric(final_dataset$site.type)
##--- Visualise data
plot(shape)
points(coordinates.pcm, pch=3, cex=.2)
points(coordinates.aqum, pch=19, cex=.5, col="red")
points(monitors[monitors$site.type=="RUR", c("easting","northing")], pch=8, cex=.5, col="green3")
points(monitors[monitors$site.type=="URB", c("easting","northing")], pch=15, cex=.5, col="orange")
points(monitors[monitors$site.type=="RKS", c("easting","northing")], pch=17, cex=.5, col="blue")
lines(london.shape)
par(xpd=TRUE)
legend("topleft",inset=c(-0.15,0.1),legend=c("PCM","AQUM","Rural monitors","Urban monitors",
"Roadside/ \n Kerbside monitors"), col = c("black","red","green3","orange","blue"),
pch=c(3,19,8,15,17), cex=.7, pt.cex=1.2, bty = "n")
formula = y ~ -1 + alpha1 + alpha2 + alpha3 + betaURB + betaRKS +
f(z2, model='rw1', hyper = rw1.aqum.prior, scale.model = TRUE, constr = TRUE) +
f(z23, copy='z2', fixed = FALSE, hyper=lambda23) +
f(z1, model=spde, extraconstr = list(A=matrix(1,ncol=mesh$n,nrow=1), e=matrix(0,ncol=1))) +
f(z12, copy='z1', fixed = FALSE, hyper=lambda12) +
f(z13, copy='z1', fixed = FALSE, hyper=lambda13) +
f(z3, model='ar1', hyper=ar1.time.prior, replicate = sitetype.idx, constr=TRUE, rankdef = 1)
##--- NOTE: data_id indicates the validation set and is received from the bash script
data_id <- ifelse(nchar(Sys.getenv("DATA_ID"))>0, as.numeric(Sys.getenv("DATA_ID")), 1) # 1 to 6
if (!file.exists(file.path(getwd(),paste0("Output_",data_id)))){
dir.create(file.path(getwd(),paste0("Output_",data_id)))
}
setwd(paste0("Output_",data_id))
print(getwd())
valid = final_dataset[final_dataset$code %in% monitors_val[[data_id]] , ]
estim = final_dataset[!(final_dataset$code %in% monitors_val[[data_id]]) , ]
coordinates.estim<-unique(estim[,c("loc.idx","easting","northing")])
coordinates.valid<-unique(valid[,c("loc.idx","easting","northing")])
n_monitors=nrow(coordinates.y)
n_data=nrow(estim)+nrow(valid)
n_days=n_data/n_monitors
#===================================================
### Create mesh
#===================================================
mesh = inla.mesh.2d(rbind(coordinates.y,coordinates.aqum,coordinates.pcm),
loc.domain=boundary@polygons[[1]]@Polygons[[1]]@coords,
max.edge = c(75000,40000),
offset = c(10000,30000),
cutoff=8000)
plot(mesh, main="")
lines(shape, col="blue")
lines(london.shape, col="blue")
title("Domain triangulation")
points(coordinates.estim, col="green")
points(coordinates.valid, col="red")
#===================================================
### Construct the SPDE model for Matern field with some prior information obtained from the mesh or the spatial domain
#===================================================
range0 <- min(c(diff(range(mesh$loc[,1])),diff(range(mesh$loc[,2]))))/5
spde <- inla.spde2.pcmatern(mesh=mesh, alpha=2, ### mesh and smoothness parameter
prior.range=c(range0, 0.95), ### P(practic.range<range0)=0.95
prior.sigma=c(100, 0.5)) ### P(sigma>100)=0.5
#===================================================
### Hyperpriors
#===================================================
rw1.aqum.prior = list(theta=list(prior="pc.prec", param=c(sd(aqum[,paste0(pol,"_log")]),0.01)))
ar1.time.prior = list(theta2 = list(prior='normal',
param=c(inla.models()$latent$ar1$hyper$theta2$to.theta(0.3), 0.5)))
lambda23 = list(theta = list(prior = 'normal', param = c(0.9, 0.01), initial=0.9)) # lambda_2,3
lambda12 = list(theta = list(prior = 'normal', param = c(1.1, 0.01), initial=1.1)) # lambda_1,2
lambda13 = list(theta = list(prior = 'normal', param = c(1.3, 0.01), initial=1.3)) # lambda_1,3
#===================================================
### Stack
#===================================================
## ***** PCM *****
A_pcm <- inla.spde.make.A(mesh=mesh, cbind(pcm$easting, pcm$northing))
stk_pcm <- inla.stack(data=list(y=cbind(pcm[,paste0(pol,"_log")], NA, NA)),
effects=list(list(alpha3=rep(1,nrow(pcm))), list(z1=1:spde$n.spde)),
A=list(1,A_pcm),
tag="est.pcm")
## ***** AQUM *****
A_aqum <- inla.spde.make.A(mesh=mesh,cbind(aqum$easting, aqum$northing))
stk_aqum <- inla.stack(data=list(y=cbind(NA, aqum[,paste0(pol,"_log")], NA)),
effects=list(list(alpha2=1, z2=aqum$date.idx),
list(z12=1:spde$n.spde)),
A=list(1, A_aqum),
tag="est.aqum")
## data stack: include all the effects
A_y_e <- inla.spde.make.A(mesh=mesh, cbind(estim$easting,estim$northing))
stk_y_e <- inla.stack(data=list(y=cbind(NA,NA,estim[,paste0(pol,"_log")])),
effects=list(list(z23=estim[,paste0("date.idx.",pol)]),
list(z13=1:spde$n.spde),
data.frame(alpha1=1,
z3=estim[,paste0("date.idx.",pol)],
betaURB=estim[,paste0("stURB.",pol)],
betaRKS=estim[,paste0("stRKS.",pol)],
estim[,c("sitetype.idx","loc.idx","code","easting","northing")])),
A=list(1, A_y_e, 1),
tag="est.y")
### validation scenario
A_y_v <- inla.spde.make.A(mesh=mesh, cbind(valid$easting, valid$northing))
stk_y_v <- inla.stack(data=list(y=cbind(NA,NA,rep(NA,length(valid[,paste0(pol,"_log")])))),
effects=list(list(z23=valid[,paste0("date.idx.",pol)]),
list(z13=1:spde$n.spde),
data.frame(alpha1=1,
z3=valid[,paste0("date.idx.",pol)],
betaURB=valid[,paste0("stURB.",pol)],
betaRKS=valid[,paste0("stRKS.",pol)],
valid[,c("sitetype.idx","loc.idx","code","easting","northing")])),
A=list(1, A_y_v, 1),
tag="val.y")
stack <- inla.stack(stk_aqum,
stk_pcm,
stk_y_v,
stk_y_e)
#===================================================
### INLA call
#===================================================
##--- NOTE: the INLA call can be parallelized using Pardiso - see inla.pardiso()
mod<- inla(formula,
family=c("gaussian","gaussian","gaussian"),
data=inla.stack.data(stack),
control.predictor=list(compute=TRUE,link=1, A=inla.stack.A(stack)),
control.compute = list(dic = TRUE,cpo=TRUE, config=TRUE, waic=TRUE),
control.inla = list(adaptive,int.strategy='eb'),
verbose=TRUE)
#===================================================
### Summary of Posterior distributions of parameters and hyperparameters of interest
#===================================================
##--- FIXED EFFECTS
print(round(mod$summary.fixed,4))
##--- HYPERPARAMETERS
print(round(mod$summary.hyperpar,4))
#===================================================
### Check model performance
#===================================================
n.failures = sum(mod$cpo$failure, na.rm = T)
if(mean(mod$cpo$failure, na.rm = T)!=0){
# summary(mod$cpo$failure)
# Two options:
# 1. recompute using control.inla=list(int.strategy = "grid", diff.logdens = 4, strategy = "laplace", npoints = 21) see http://www.r-inla.org/faq
# 2. run mod = inla.cpo(mod) #recomputes in an efficient way the cpo/pit for which mod$cpo$failure > 0
mod_imp = inla.cpo(mod)
print("Model cpo have been improved")
logscore = -mean(log(mod_imp$cpo$cpo), na.rm=T)
par(mfrow=c(1,2))
hist(mod_imp$cpo$pit, breaks=100, main=paste0("PIT improved - n.failures=", n.failures))
hist(mod_imp$cpo$cpo, breaks=100, main=paste0("CPO improved - n.failures=", n.failures))
}else{
logscore = -mean(log(mod$cpo$cpo), na.rm=T)
par(mfrow=c(1,2))
hist(mod$cpo$pit, breaks=100, main=paste0("PIT - n.failures=", n.failures))
hist(mod$cpo$cpo, breaks=100, main=paste0("CPO - n.failures=", n.failures))
}
#===================================================
### Extract posterior latent fields
#===================================================
print(names(mod$summary.random))
# Index for the temporal fields
index.temp.field = which(substr(names(mod$summary.random),1,2) == "z2")
print(index.temp.field)
# Index for the spatial fields
index.spat.field = which(substr(names(mod$summary.random),1,2) == "z1")
print(index.spat.field)
##--- Time-sitetype interaction
rur.mean <- ts(mod$summary.random$z3$mean[1:1826], start = c(2007, 1), frequency = 365)
rur.mean.low <- ts(mod$summary.random$z3$`0.025quant`[1:1826], start = c(2007, 1), frequency = 365)
rur.mean.upp <- ts(mod$summary.random$z3$`0.975quant`[1:1826], start = c(2007, 1), frequency = 365)
urb.mean <- ts(mod$summary.random$z3$mean[1827:(2*1826)], start = c(2007, 1), frequency = 365)
urb.mean.low <- ts(mod$summary.random$z3$`0.025quant`[1827:(2*1826)], start = c(2007, 1), frequency = 365)
urb.mean.upp <- ts(mod$summary.random$z3$`0.975quant`[1827:(2*1826)], start = c(2007, 1), frequency = 365)
rks.mean <- ts(mod$summary.random$z3$mean[(2*1826+1):(3*1826)], start = c(2007, 1), frequency = 365)
rks.mean.low <- ts(mod$summary.random$z3$`0.025quant`[(2*1826+1):(3*1826)], start = c(2007, 1), frequency = 365)
rks.mean.upp <- ts(mod$summary.random$z3$`0.975quant`[(2*1826+1):(3*1826)], start = c(2007, 1), frequency = 365)
lower = min(mod$summary.random$z3$`0.025quant`)
upper = max(mod$summary.random$z3$`0.975quant`)
png('time_sitetype_interaction.png', width = 8, height = 8, unit="in", res=600)
par(mfrow=c(3,1))
plot(rur.mean.upp, type="l", ylab=expression(paste(mu,"g/",m^3)), xlab="Year", ylim=c(lower,upper), main="RUR", lty="twodash", col="red")
lines(rur.mean.low, lty="twodash", col="red")
lines(rur.mean)
abline(h=0,col="blue")
plot(urb.mean.upp, type="l", ylab=expression(paste(mu,"g/",m^3)), xlab="Year", ylim=c(lower,upper), main="URB", lty="twodash", col="red")
lines(urb.mean.low, lty="twodash", col="red")
lines(urb.mean)
abline(h=0,col="blue")
plot(rks.mean.upp, type="l", ylab=expression(paste(mu,"g/",m^3)), xlab="Year", ylim=c(lower,upper), main="RKS", lty="twodash", col="red")
lines(rks.mean.low, lty="twodash", col="red")
lines(rks.mean)
abline(h=0,col="blue")
dev.off()
##--- Latent temporal fields
for(i in index.temp.field){
aqum.ts.mean <- ts(mod$summary.random[[i]]$mean, start = c(2007, 1), frequency = 365)
aqum.ts.low <- ts(mod$summary.random[[i]]$`0.025quant`, start = c(2007, 1), frequency = 365)
aqum.ts.upp <- ts(mod$summary.random[[i]]$`0.975quant`, start = c(2007, 1), frequency = 365)
aqum.ts.sd <- ts(mod$summary.random[[i]]$sd, start = c(2007, 1), frequency = 365)
png(paste0(names(mod$summary.random)[i],'.png'), width = 10, height = 8, unit="in", res=600)
par(mfrow=c(2,1))
plot(aqum.ts.upp,
main=paste0(names(mod$marginals.random)[i]," - mean and CI"),lty="twodash",
xlab="Year",ylab=expression(paste(mu,"g/",m^3)),
ylim=c(min(aqum.ts.low),max(aqum.ts.upp))
)
lines(aqum.ts.low, lty="twodash")
lines(aqum.ts.mean, col="red")
plot(aqum.ts.sd,
main=paste0(names(mod$marginals.random)[i]," - SD"),
xlab="Year",ylab=expression(paste(mu,"g/",m^3)))
dev.off()
}
##--- Latent spatial fields
for(i in index.spat.field){
length.grid.x=150
length.grid.y=150
# construct a lattice over the mesh extent
pred.grid.lat= inla.mesh.lattice(x=seq(extent(boundary)[1],extent(boundary)[2],length.out = length.grid.x),
y=seq(extent(boundary)[3],extent(boundary)[4],length.out = length.grid.y))
proj = inla.mesh.projector(mesh, lattice = pred.grid.lat)
spat.field = cbind(expand.grid(x=seq(extent(boundary)[1],extent(boundary)[2],length.out = length.grid.x),
y=seq(extent(boundary)[3],extent(boundary)[4],length.out = length.grid.y)),
mean.log=as.vector(inla.mesh.project(proj,field= mod$summary.random[[i]]$mean)),
sd.log=as.vector(inla.mesh.project(proj,field= mod$summary.random[[i]]$sd)))
coordinates(spat.field) = ~x+y
proj4string(spat.field) = proj4string(shape)
spat.field = subset(spat.field, over(spat.field, shape)$objectid==1)
spat.field = as.data.frame(spat.field)
pcm_lat_log = ggplot(spat.field) +
#gg(mesh.pcm) +
geom_raster(aes(x, y, fill = mean.log)) +
scale_fill_viridis(bquote(paste("log(",.(toupper(pol)),") (",mu,"g/",m^3,")"))) +
ggtitle(paste0(names(mod$marginals.random)[i]," - posterior mean")) +
geom_path(data = fortify(shape), aes(group = group, x = long, y = lat)) +
geom_path(data = fortify(london.shape), aes(group = group, x = long, y = lat), size=0.5) +
theme(plot.title = element_text(family = "sans", color="#666666", size=14, hjust=0.5, face="bold"),
axis.ticks.x=element_blank(), axis.ticks.y=element_blank(), axis.text.x=element_blank(), axis.text.y=element_blank(),
legend.key.height = unit(1.5,"cm"),
legend.text = element_text(size=7, vjust=-0.5)) +
labs(x="Easting",y="Northing")
pcm_lat_sd_log = ggplot(spat.field) +
geom_raster(aes(x, y, fill = sd.log)) +
#gg(mesh.pcm) +
scale_fill_viridis(bquote(paste("log(",.(toupper(pol)),") (",mu,"g/",m^3,")"))) +
ggtitle(paste0(names(mod$marginals.random)[i]," - posterior SD")) +
geom_path(data = fortify(shape), aes(group = group, x = long, y = lat)) +
geom_path(data = fortify(london.shape), aes(group = group, x = long, y = lat), size=0.5) +
theme(plot.title = element_text(family = "sans", color="#666666", size=14, hjust=0.5, face="bold"),
axis.ticks.x=element_blank(), axis.ticks.y=element_blank(), axis.text.x=element_blank(), axis.text.y=element_blank(),
legend.key.height = unit(1.5,"cm"),
legend.text = element_text(size=7, vjust=-0.5)) +
labs(x="Easting",y="Northing")
ggsave(paste0(names(mod$marginals.random)[i],'.png'), plot=pcm_lat_log, width = 8, height = 8, units="in", dpi=600)
}
#===================================================
### Predictive capability
#===================================================
sample.size=50
n.pred=100
predictions= list()
# extract sample.size values from each marginal of the fitted values
posterior.samples = inla.posterior.sample(n = sample.size, result=mod, intern = FALSE, use.improved.mean = TRUE, add.names = TRUE, seed = 0L)
# extract the posterior latent for the validation sites and reshape in order to have a matrix of nrow(valid) x sample.size
samples.fitted <- matrix(unlist(lapply(lapply(posterior.samples,"[[", "latent" ),"[", inla.stack.index(stack,"val.y")$data), use.names=FALSE), nrow = nrow(valid), byrow = F)
# extract sample.size values from marginal of the likelihood variance
# (it is always the last of the Gaussian observations, so we extract the position)
par.index = length(which(substr(rownames(mod$summary.hyperpar),1,26) == "Precision for the Gaussian"))
samples.var = 1/unlist(lapply(lapply(posterior.samples,"[[", "hyperpar" ),"[", par.index))
# this is not exactly what we want, in theory we should sample from the transformed posterior marginal
# rather than inverting the values sampled from the posterior marginal of the precision
samples.fitted = cbind(samples.fitted, obs.value=valid[,paste0(pol,"_log")])
predictive.capability = apply(samples.fitted, MARGIN = 1, FUN=extract.predicted, variance=samples.var)
# To aggregate the results we keep code, date and site type variables
predictive.capability = cbind(code=valid$code, date.idx=valid[,paste0("date.idx.",pol)], site.type=valid$site.type, as.data.frame(t(predictive.capability)))
predictive.capability$CRPS = crps_sample(y=samples.fitted[,(ncol(samples.fitted)-1)], dat=as.matrix(samples.fitted[,1:(ncol(samples.fitted)-1)]))
predictive.capability$logscore = logscore
saveRDS(predictive.capability, "predictive_capability.rds")
##--- Run this chunk once all the models results have been extracted, so we have the predictions for all the monitors (the 6 validation sets)
files = list.files(path=list.dirs(path = "..", full.names = TRUE, recursive = TRUE), pattern ="predictive_capability.rds", full.names = TRUE)
predictive.capability = do.call(rbind, lapply(files, function (x) readRDS(x)))
predictive.capability.measures = c(my.validation(z = predictive.capability[,paste0(pol,"_obs")],
zhat = predictive.capability[,paste0(pol,"_pred")],
penalty = predictive.capability$pmcc_penalty,
coverage = predictive.capability$coverage),
LOGSCORE = predictive.capability$logscore,
COV_CI = mean(predictive.capability$ci_amplitude),
CRPS = mean(predictive.capability$CRPS, na.rm = T))
# NOTE: predictive.capability.measures can be computed by site-type, by day or by site,
# subsetting predictive.capability by site.type, date.idx or code respectively.
##---
#===================================================
### Extract daily predictions
#===================================================
# NOTE: daily predictions here are extracted for each model;
# this chunk can be skipped and run only after re-running the model using all data (no validation)
n.samples = 50
n.days = 1826
n.locs = nrow(pred.grid)
A_pred =inla.spde.make.A(mesh, loc=cbind(pred.grid$easting, pred.grid$northing))
contents=as.data.frame(mod$misc$configs$contents)
contents$end = contents$start + contents$length - 1
fields = lapply(1:n.samples, FUN=extract.contents)
if (!file.exists(file.path(getwd(), "predictions_by_day"))){
dir.create(file.path(getwd(), "predictions_by_day"))
}
date = data.frame(date=seq.Date(as.Date("2007-01-01"), as.Date("2011-12-31"), "days"),
date.idx.no2=c(1:n_days),
year=as.numeric(substr(unique(aqum$date),1,4)),
month=as.numeric(substr(unique(aqum$date),6,7)),
day=as.numeric(substr(unique(aqum$date),9,10)))
invisible(lapply(1:n.days, FUN=compute.daily.predictions))
##--- predictions for days of pollution events
pred_2007_12_11 = readRDS("predictions_by_day/predictions_2007-12-11.rds")
pred_2007_12_19 = readRDS("predictions_by_day/predictions_2007-12-19.rds")
pred_2009_01_03 = readRDS("predictions_by_day/predictions_2009-01-03.rds")
pred_2010_11_16 = readRDS("predictions_by_day/predictions_2010-11-16.rds")
##--- predictions for days of low pollution
pred_2007_06_24 = readRDS("predictions_by_day/predictions_2007-06-24.rds")
pred_2008_06_22 = readRDS("predictions_by_day/predictions_2008-06-22.rds")
pred_2009_06_21 = readRDS("predictions_by_day/predictions_2009-06-21.rds")
pred_2010_06_20 = readRDS("predictions_by_day/predictions_2010-06-20.rds")
pred_events = rbind(cbind(mean = rowMeans(pred_2007_12_11[,-c(1:3)], na.rm = T), day = "2007-12-11", pred.grid),
cbind(mean = rowMeans(pred_2007_12_19[,-c(1:3)], na.rm = T), day = "2007-12-19", pred.grid),
cbind(mean = rowMeans(pred_2009_01_03[,-c(1:3)], na.rm = T), day = "2009-01-03", pred.grid),
cbind(mean = rowMeans(pred_2010_11_16[,-c(1:3)], na.rm = T), day = "2010-11-16", pred.grid),
cbind(mean = rowMeans(pred_2007_06_24[,-c(1:3)], na.rm = T), day = "2007-06-24", pred.grid),
cbind(mean = rowMeans(pred_2008_06_22[,-c(1:3)], na.rm = T), day = "2008-06-22", pred.grid),
cbind(mean = rowMeans(pred_2009_06_21[,-c(1:3)], na.rm = T), day = "2009-06-21", pred.grid),
cbind(mean = rowMeans(pred_2010_06_20[,-c(1:3)], na.rm = T), day = "2010-06-20", pred.grid))
ggsave("daily_predictions.png",
ggplot(pred_events) +
geom_raster(aes(x=easting, y=northing, fill = mean))+
facet_wrap(~day, ncol=4) +
scale_fill_viridis(bquote(paste("log(",.(toupper(pol)),") (",mu,"g/",m^3,")"))) +
ggtitle("Daily predictions on selected days with air pollution events (top row) or low concentration (bottom row)") +
geom_path(data = fortify(london.shape), aes(group = group, x = long, y = lat), size=0.5)+
geom_path(data = fortify(shape), aes(group = group, x = long, y = lat))+
geom_path(data = fortify(roads_major), aes(group = group, x = long, y = lat), size=0.3, color="grey85")+
theme(plot.title = element_text(family = "sans", color="#666666", size=14, hjust=0.5, face="bold"),
axis.ticks.x=element_blank(), axis.ticks.y=element_blank(), axis.text.x=element_blank(), axis.text.y=element_blank(),
legend.key.height = unit(1,"cm"),
legend.text = element_text(size=7, vjust=-0.5)),
width=15, height=8, unit="in", dpi=600)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/schedule.R
\name{print_session_info}
\alias{print_session_info}
\title{print_session_info}
\usage{
print_session_info(s)
}
\arguments{
\item{s}{}
}
\value{
character string with session info
}
\description{
print_session_info
}
|
/man/print_session_info.Rd
|
no_license
|
jasonmtroos/rook
|
R
| false | true | 306 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/schedule.R
\name{print_session_info}
\alias{print_session_info}
\title{print_session_info}
\usage{
print_session_info(s)
}
\arguments{
\item{s}{}
}
\value{
character string with session info
}
\description{
print_session_info
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ProjectMethods.R
\name{getGroupSummary}
\alias{getGroupSummary}
\title{Get summary for Groups in ArchRProject}
\usage{
getGroupSummary(
ArchRProj = NULL,
groupBy = "Sample",
select = "TSSEnrichment",
summary = "median",
removeNA = TRUE
)
}
\arguments{
\item{ArchRProj}{An \code{ArchRProject} object.}
\item{groupBy}{The name of the column in \code{cellColData} to use for grouping multiple cells together for summarizing information.}
\item{select}{A character vector containing the column names to select from \code{cellColData}.}
\item{summary}{A character vector describing which method for summarizing across group. Options include "median", "mean", or "sum".}
\item{removeNA}{Remove NA's from summary method.}
}
\description{
This function summarizes a numeric cellColData entry across groupings in a ArchRProject.
}
|
/man/getGroupSummary.Rd
|
permissive
|
GreenleafLab/ArchR
|
R
| false | true | 914 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ProjectMethods.R
\name{getGroupSummary}
\alias{getGroupSummary}
\title{Get summary for Groups in ArchRProject}
\usage{
getGroupSummary(
ArchRProj = NULL,
groupBy = "Sample",
select = "TSSEnrichment",
summary = "median",
removeNA = TRUE
)
}
\arguments{
\item{ArchRProj}{An \code{ArchRProject} object.}
\item{groupBy}{The name of the column in \code{cellColData} to use for grouping multiple cells together for summarizing information.}
\item{select}{A character vector containing the column names to select from \code{cellColData}.}
\item{summary}{A character vector describing which method for summarizing across group. Options include "median", "mean", or "sum".}
\item{removeNA}{Remove NA's from summary method.}
}
\description{
This function summarizes a numeric cellColData entry across groupings in a ArchRProject.
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/getGEenriched.R
\name{get_kernel}
\alias{get_kernel}
\title{Envirotype-informed kernels for statistical models}
\usage{
get_kernel(
K_E = NULL,
K_G,
Y,
model = NULL,
intercept.random = FALSE,
reaction = FALSE,
size_E = NULL
)
}
\arguments{
\item{K_E}{list. Contains nmatrices of envirotype-related kernels (n x n genotypes-environment). If NULL, benchmarck genomic kernels are built.}
\item{K_G}{list. Constains matrices of genomic enabled kernels (p x p genotypes). See BGGE::getK for more information.}
\item{Y}{data.frame. Should contain the following colunms: environemnt, genotype, phenotype.}
\item{model}{character. Model structure for genomic predicion. It can be \code{c('MM','MDs','E-MM','E-MDs')}, in which MM (main effect model \eqn{Y=fixed + G}) and MDs (\eqn{Y=fixed+G+GxE}).}
\item{intercept.random}{boolean. Indicates the inclusion of a genomic random intercept (default = FALSE). For more details, see BGGE package vignette.}
\item{reaction}{boolean. Indicates the inclusion of a reaction norm based GxE kernel (default = FALSE).}
\item{size_E}{character. \code{size_E=c('full','environment')}. In the first, 'full' means taht the environmental relationship kernel has the dimensions of n x n observations, which n = pq (p genotypes, q environments). If 'environment' the size of E-kernel is q x q.}
}
\value{
A list of kernels (relationship matrices) to be used in genomic models.
}
\description{
Get multiple genomic and/or envirotype-informed kernels for bayesian genomic prediciton.
}
\details{
TODO Define models.
}
\examples{
### Loading the genomic, phenotype and weather data
data('maizeG'); data('maizeYield'); data("maizeWTH")
### Y = fixed + G
MM <- get_kernel(K_G = list(G = as.matrix(maizeG)),
Y = maizeYield, model = 'MM')
### Y = fixed + G + GE
MDs <- get_kernel(K_G = list(G = as.matrix(maizeG)),
Y = maizeYield, model = 'MDs')
### Enriching models with weather data
W.cov <- W.matrix(env.data = maizeWTH)
H <- EnvKernel(env.data = W.cov, Y = maizeYield, merge = TRUE, env.id = 'env')
EMM <- get_kernel(K_G = list(G = as.matrix(maizeG)),
Y = maizeYield,K_E = list(W = H$envCov),
model = 'EMM') # or model = MM
### Y = fixed + G + W + GE
EMDs <- get_kernel(K_G = list(G = as.matrix(maizeG)),
Y = maizeYield,
K_E = list(W = H$envCov),
model = 'MDs') # or model = MDs
### Y = fixed + W + G + GW
RN <- get_kernel(K_G = list(G = as.matrix(maizeG)),
Y = maizeYield,
K_E = list(W = H$envCov),
model = 'RNMM')
### Y = fixed + W + G + GW + GE
fullRN <- get_kernel(K_G = list(G = as.matrix(maizeG)),
Y = maizeYield,
K_E = list(W = H$envCov),
model = 'RNMDs')
}
\seealso{
BGGE::getk W.matrix
}
\author{
Germano Costa Neto
}
|
/man/get_kernel.Rd
|
no_license
|
rfritscheneto/EnvRtype
|
R
| false | true | 2,993 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/getGEenriched.R
\name{get_kernel}
\alias{get_kernel}
\title{Envirotype-informed kernels for statistical models}
\usage{
get_kernel(
K_E = NULL,
K_G,
Y,
model = NULL,
intercept.random = FALSE,
reaction = FALSE,
size_E = NULL
)
}
\arguments{
\item{K_E}{list. Contains nmatrices of envirotype-related kernels (n x n genotypes-environment). If NULL, benchmarck genomic kernels are built.}
\item{K_G}{list. Constains matrices of genomic enabled kernels (p x p genotypes). See BGGE::getK for more information.}
\item{Y}{data.frame. Should contain the following colunms: environemnt, genotype, phenotype.}
\item{model}{character. Model structure for genomic predicion. It can be \code{c('MM','MDs','E-MM','E-MDs')}, in which MM (main effect model \eqn{Y=fixed + G}) and MDs (\eqn{Y=fixed+G+GxE}).}
\item{intercept.random}{boolean. Indicates the inclusion of a genomic random intercept (default = FALSE). For more details, see BGGE package vignette.}
\item{reaction}{boolean. Indicates the inclusion of a reaction norm based GxE kernel (default = FALSE).}
\item{size_E}{character. \code{size_E=c('full','environment')}. In the first, 'full' means taht the environmental relationship kernel has the dimensions of n x n observations, which n = pq (p genotypes, q environments). If 'environment' the size of E-kernel is q x q.}
}
\value{
A list of kernels (relationship matrices) to be used in genomic models.
}
\description{
Get multiple genomic and/or envirotype-informed kernels for bayesian genomic prediciton.
}
\details{
TODO Define models.
}
\examples{
### Loading the genomic, phenotype and weather data
data('maizeG'); data('maizeYield'); data("maizeWTH")
### Y = fixed + G
MM <- get_kernel(K_G = list(G = as.matrix(maizeG)),
Y = maizeYield, model = 'MM')
### Y = fixed + G + GE
MDs <- get_kernel(K_G = list(G = as.matrix(maizeG)),
Y = maizeYield, model = 'MDs')
### Enriching models with weather data
W.cov <- W.matrix(env.data = maizeWTH)
H <- EnvKernel(env.data = W.cov, Y = maizeYield, merge = TRUE, env.id = 'env')
EMM <- get_kernel(K_G = list(G = as.matrix(maizeG)),
Y = maizeYield,K_E = list(W = H$envCov),
model = 'EMM') # or model = MM
### Y = fixed + G + W + GE
EMDs <- get_kernel(K_G = list(G = as.matrix(maizeG)),
Y = maizeYield,
K_E = list(W = H$envCov),
model = 'MDs') # or model = MDs
### Y = fixed + W + G + GW
RN <- get_kernel(K_G = list(G = as.matrix(maizeG)),
Y = maizeYield,
K_E = list(W = H$envCov),
model = 'RNMM')
### Y = fixed + W + G + GW + GE
fullRN <- get_kernel(K_G = list(G = as.matrix(maizeG)),
Y = maizeYield,
K_E = list(W = H$envCov),
model = 'RNMDs')
}
\seealso{
BGGE::getk W.matrix
}
\author{
Germano Costa Neto
}
|
## This file contains R functions for solving for the inverse
## of a given matrix making use of caching for quick retrevial
## of past solutions.
## makeCacheMatrix
## Creates a cache to store a matrix and its inverse.
## Functions provided:
## - set the value of the matrix
## - get the value of the matrix
## - set the value of the inverse
## - get the value of the inverse
makeCacheMatrix <- function(x = matrix()) {
inverse <- NULL
set <- function(y) {
x <<- y
inverse <<- NULL
}
get <- function() x
setinverse <- function(i) inverse <<- i
getinverse <- function() inverse
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## cacheSolve
## Solves for the inverse of the given matrix. Makes use of a
## cache for quick retreival of previous results.
##
## Parameters
## x : makeCacheMatrix containing matrix to solve for.
## ... : additional paramters used by the solve() function.
## Returns
## inverse of the matrix (as a matrix).
cacheSolve <- function(x, ...) {
inverse <- x$getinverse()
if(!is.null(inverse)) {
message("getting cached data")
return(inverse)
}
data <- x$get()
inverse <- solve(data, ...)
x$setinverse(inverse)
inverse
}
|
/cachematrix.R
|
no_license
|
mljoslyn/ProgrammingAssignment2
|
R
| false | false | 1,416 |
r
|
## This file contains R functions for solving for the inverse
## of a given matrix making use of caching for quick retrevial
## of past solutions.
## makeCacheMatrix
## Creates a cache to store a matrix and its inverse.
## Functions provided:
## - set the value of the matrix
## - get the value of the matrix
## - set the value of the inverse
## - get the value of the inverse
makeCacheMatrix <- function(x = matrix()) {
inverse <- NULL
set <- function(y) {
x <<- y
inverse <<- NULL
}
get <- function() x
setinverse <- function(i) inverse <<- i
getinverse <- function() inverse
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## cacheSolve
## Solves for the inverse of the given matrix. Makes use of a
## cache for quick retreival of previous results.
##
## Parameters
## x : makeCacheMatrix containing matrix to solve for.
## ... : additional paramters used by the solve() function.
## Returns
## inverse of the matrix (as a matrix).
cacheSolve <- function(x, ...) {
inverse <- x$getinverse()
if(!is.null(inverse)) {
message("getting cached data")
return(inverse)
}
data <- x$get()
inverse <- solve(data, ...)
x$setinverse(inverse)
inverse
}
|
###############################################################
###############################################################
## MODEL M/M/1/K/K - Finite Poblation. ##
###############################################################
###############################################################
NewInput.MM1KK <- function(lambda=0, mu=0, k=1, method=3)
{
res <- list(lambda = lambda, mu = mu, k = k, method = method)
class(res) <- "i_MM1KK"
res
}
CheckInput.i_MM1KK <- function(x, ...)
{
MM1KK_class <- "The class of the object x has to be M/M/1/K/K (i_MM1KK)"
MM1KK_anomalous <- "Some value of lambda, mu or k is anomalous. Check the values."
MM1KK_method <- "method variable has to be 0 to be exact calculus, 1 to be aproximate calculus, 2 to use Jain's Method or 3 to use Poisson truncated distribution"
if (!inherits(x, "i_MM1KK"))
stop(MM1KK_class)
if (is.anomalous(x$lambda) || is.anomalous(x$mu) || is.anomalous(x$k))
stop(MM1KK_anomalous)
if (x$lambda < 0)
stop(ALL_lambda_zpositive)
if (x$mu <= 0)
stop(ALL_mu_positive)
if (x$k < 1)
stop(ALL_k_warning)
if (!is.wholenumber(x$k))
stop(ALL_k_integer)
if (x$method != 0 && x$method != 1 && x$method != 2 && x$method != 3)
stop(MM1KK_method)
}
MM1KK_InitPn_Aprox_Aux <- function(n, lambda, mu, c, k, m)
{
(lfactorial(k) - lfactorial(k-n)) + (n * log(lambda/mu))
}
MM1KK_InitPn_Aprox <- function(x)
{
ProbFactCalculus(x$lambda, x$mu, 1, x$k, x$k, x$k,
MM1KK_InitPn_Aprox_Aux, MM1KK_InitPn_Aprox_Aux, MM1KK_InitPn_Aprox_Aux)
}
MM1KK_InitPn_Exact <- function(x)
{
pn <- c(0:x$k)
z <- x$mu / x$lambda
u <- x$lambda / x$mu
pn[1] <- B_erlang(x$k, z)
totu <- 1
totk <- 1
i <- 2
while (i <= (x$k + 1))
{
totu <- totu * u
totk <- totk * ((x$k + 1) - i + 1)
pn[i] <- pn[1] * totu * totk
i <- i + 1
}
pn
}
MM1KK_method2_Aux <- function(x, i)
{
r <- x$lambda/x$mu
if (i == 0)
{
(x$k - i) * r / (i+1)
}
else
{
(x$k - i) * r
}
}
MM1KK_method2_Prod <- function(x,n)
{
prod <- 1
for (i in 0:(n-1))
{
prod <- prod * MM1KK_method2_Aux(x, i)
}
prod
}
MM1KK_method2_Prob <- function(x)
{
pn <- c()
sumAux <- 1
for (i in (1:x$k))
{
sumAux <- sumAux + MM1KK_method2_Prod(x, i)
}
pn[1] <- 1/sumAux
for (i in 2:(x$k+1))
{
pn[i] <- MM1KK_method2_Aux(x, i-2) * pn[i-1]
}
pn
}
MM1KK_method3_Prob <- function(x)
{
z <- x$mu/x$lambda
funMethod3 <- function(n){ dpois(x$k-n, z)/ppois(x$k, z) }
pn <- sapply(0:x$k, funMethod3)
pn
}
MM1KK_InitPn <- function(x)
{
if (x$method == 0)
pn <- MM1KK_InitPn_Exact(x)
else if (x$method == 1)
pn <- MM1KK_InitPn_Aprox(x)
else if (x$method == 2)
pn <- MM1KK_method2_Prob(x)
else
pn <- MM1KK_method3_Prob(x)
pn
}
QueueingModel.i_MM1KK <- function(x, ...)
{
# Is everything fine??
CheckInput.i_MM1KK(x, ...)
z <- x$mu / x$lambda
Pn <- MM1KK_InitPn(x)
RO <- 1 - Pn[1]
Throughput <- x$mu * RO
L <- x$k - (Throughput / x$lambda)
W <- (x$k / Throughput) - ( 1 / x$lambda)
Wq <- W - (1 / x$mu)
Lq <- Throughput * Wq
WWs <- (x$k / RO) - z
SP <- 1 + z
QnAux <- function(n){ Pn[n] * (x$k - (n-1)) / (x$k - L) }
Qn <- sapply(1:x$k, QnAux)
if (x$k == 1)
{
Wqq <- NA
Lqq <- NA
}
else
{
Wqq <- Wq / (1 - Qn[1])
Lqq <- Wqq * x$mu
}
#Wqq <- Wq / RO
FW <- function(t){
aux <- function(i, t) { Qn[i] * ppois(i-1, x$mu * t) }
1 - sum(sapply(seq(1, x$k, 1), aux, t))
}
if (x$k == 1)
FWq <- function(t){ 0 }
else
{
FWq <- function(t){
aux <- function(i, t) { Qn[i+1] * ppois(i-1, x$mu * t) }
1 - sum(sapply(seq(1, x$k-1, 1), aux, t))
}
}
# variances
VN <- sum( (0:x$k)^2 * Pn ) - (L^2)
xFWc <- Vectorize(function(t){t * (1 - FW(t))})
xFWqc <- Vectorize(function(t){t * (1 - FWq(t))})
FWInt <- integrate(xFWc, 0, Inf)
if (FWInt$message == "OK")
VT <- (2 * FWInt$value) - (W^2)
else
VT <- NA
if (x$k == 1)
{
VNq <- 0
VTq <- 0
}
else
{
VNq <- sum( c(0, 0, 1:(x$k-1))^2 * Pn ) - (Lq^2)
FWqInt <- integrate(xFWqc, 0, Inf)
if (FWqInt$message == "OK")
VTq <- (2 * FWqInt$value) - (Wq^2)
else
VTq <- NA
}
# The result
res <- list(
Inputs=x, RO = RO, Lq = Lq, VNq = VNq, Wq = Wq, VTq = VTq, Throughput = Throughput,
L = L, VN = VN, W = W, VT = VT, Lqq = Lqq, Wqq = Wqq, WWs = WWs, SP = SP, Pn = Pn, Qn = Qn, FW = FW, FWq = FWq
)
class(res) <- "o_MM1KK"
res
}
Inputs.o_MM1KK <- function(x, ...) { x$Inputs }
RO.o_MM1KK <- function(x, ...) { x$RO }
Lq.o_MM1KK <- function(x, ...) { x$Lq }
VNq.o_MM1KK <- function(x, ...) { x$VNq }
Wq.o_MM1KK <- function(x, ...) { x$Wq }
VTq.o_MM1KK <- function(x, ...) { x$VTq }
L.o_MM1KK <- function(x, ...) { x$L }
VN.o_MM1KK <- function(x, ...) { x$VN }
W.o_MM1KK <- function(x, ...) { x$W }
VT.o_MM1KK <- function(x, ...) { x$VT }
Lqq.o_MM1KK <- function(x, ...) { x$Lqq }
Wqq.o_MM1KK <- function(x, ...) { x$Wqq }
WWs.o_MM1KK <- function(x, ...) { x$WWs }
SP.o_MM1KK <- function(x, ...) { x$SP }
Pn.o_MM1KK <- function(x, ...) { x$Pn }
Qn.o_MM1KK <- function(x, ...) { x$Qn }
Throughput.o_MM1KK <- function(x, ...) { x$Throughput }
Report.o_MM1KK <- function(x, ...)
{
reportAux(x)
}
summary.o_MM1KK <- function(object, ...)
{
aux <- list(el=CompareQueueingModels(object))
class(aux) <- "summary.o_MM1"
aux
}
print.summary.o_MM1KK <- function(x, ...)
{
print_summary(x, ...)
}
|
/R/MM1KK.R
|
no_license
|
cran/queueing
|
R
| false | false | 5,686 |
r
|
###############################################################
###############################################################
## MODEL M/M/1/K/K - Finite Poblation. ##
###############################################################
###############################################################
NewInput.MM1KK <- function(lambda=0, mu=0, k=1, method=3)
{
res <- list(lambda = lambda, mu = mu, k = k, method = method)
class(res) <- "i_MM1KK"
res
}
CheckInput.i_MM1KK <- function(x, ...)
{
MM1KK_class <- "The class of the object x has to be M/M/1/K/K (i_MM1KK)"
MM1KK_anomalous <- "Some value of lambda, mu or k is anomalous. Check the values."
MM1KK_method <- "method variable has to be 0 to be exact calculus, 1 to be aproximate calculus, 2 to use Jain's Method or 3 to use Poisson truncated distribution"
if (!inherits(x, "i_MM1KK"))
stop(MM1KK_class)
if (is.anomalous(x$lambda) || is.anomalous(x$mu) || is.anomalous(x$k))
stop(MM1KK_anomalous)
if (x$lambda < 0)
stop(ALL_lambda_zpositive)
if (x$mu <= 0)
stop(ALL_mu_positive)
if (x$k < 1)
stop(ALL_k_warning)
if (!is.wholenumber(x$k))
stop(ALL_k_integer)
if (x$method != 0 && x$method != 1 && x$method != 2 && x$method != 3)
stop(MM1KK_method)
}
MM1KK_InitPn_Aprox_Aux <- function(n, lambda, mu, c, k, m)
{
(lfactorial(k) - lfactorial(k-n)) + (n * log(lambda/mu))
}
MM1KK_InitPn_Aprox <- function(x)
{
ProbFactCalculus(x$lambda, x$mu, 1, x$k, x$k, x$k,
MM1KK_InitPn_Aprox_Aux, MM1KK_InitPn_Aprox_Aux, MM1KK_InitPn_Aprox_Aux)
}
MM1KK_InitPn_Exact <- function(x)
{
pn <- c(0:x$k)
z <- x$mu / x$lambda
u <- x$lambda / x$mu
pn[1] <- B_erlang(x$k, z)
totu <- 1
totk <- 1
i <- 2
while (i <= (x$k + 1))
{
totu <- totu * u
totk <- totk * ((x$k + 1) - i + 1)
pn[i] <- pn[1] * totu * totk
i <- i + 1
}
pn
}
MM1KK_method2_Aux <- function(x, i)
{
r <- x$lambda/x$mu
if (i == 0)
{
(x$k - i) * r / (i+1)
}
else
{
(x$k - i) * r
}
}
MM1KK_method2_Prod <- function(x,n)
{
prod <- 1
for (i in 0:(n-1))
{
prod <- prod * MM1KK_method2_Aux(x, i)
}
prod
}
MM1KK_method2_Prob <- function(x)
{
pn <- c()
sumAux <- 1
for (i in (1:x$k))
{
sumAux <- sumAux + MM1KK_method2_Prod(x, i)
}
pn[1] <- 1/sumAux
for (i in 2:(x$k+1))
{
pn[i] <- MM1KK_method2_Aux(x, i-2) * pn[i-1]
}
pn
}
MM1KK_method3_Prob <- function(x)
{
z <- x$mu/x$lambda
funMethod3 <- function(n){ dpois(x$k-n, z)/ppois(x$k, z) }
pn <- sapply(0:x$k, funMethod3)
pn
}
MM1KK_InitPn <- function(x)
{
if (x$method == 0)
pn <- MM1KK_InitPn_Exact(x)
else if (x$method == 1)
pn <- MM1KK_InitPn_Aprox(x)
else if (x$method == 2)
pn <- MM1KK_method2_Prob(x)
else
pn <- MM1KK_method3_Prob(x)
pn
}
QueueingModel.i_MM1KK <- function(x, ...)
{
# Is everything fine??
CheckInput.i_MM1KK(x, ...)
z <- x$mu / x$lambda
Pn <- MM1KK_InitPn(x)
RO <- 1 - Pn[1]
Throughput <- x$mu * RO
L <- x$k - (Throughput / x$lambda)
W <- (x$k / Throughput) - ( 1 / x$lambda)
Wq <- W - (1 / x$mu)
Lq <- Throughput * Wq
WWs <- (x$k / RO) - z
SP <- 1 + z
QnAux <- function(n){ Pn[n] * (x$k - (n-1)) / (x$k - L) }
Qn <- sapply(1:x$k, QnAux)
if (x$k == 1)
{
Wqq <- NA
Lqq <- NA
}
else
{
Wqq <- Wq / (1 - Qn[1])
Lqq <- Wqq * x$mu
}
#Wqq <- Wq / RO
FW <- function(t){
aux <- function(i, t) { Qn[i] * ppois(i-1, x$mu * t) }
1 - sum(sapply(seq(1, x$k, 1), aux, t))
}
if (x$k == 1)
FWq <- function(t){ 0 }
else
{
FWq <- function(t){
aux <- function(i, t) { Qn[i+1] * ppois(i-1, x$mu * t) }
1 - sum(sapply(seq(1, x$k-1, 1), aux, t))
}
}
# variances
VN <- sum( (0:x$k)^2 * Pn ) - (L^2)
xFWc <- Vectorize(function(t){t * (1 - FW(t))})
xFWqc <- Vectorize(function(t){t * (1 - FWq(t))})
FWInt <- integrate(xFWc, 0, Inf)
if (FWInt$message == "OK")
VT <- (2 * FWInt$value) - (W^2)
else
VT <- NA
if (x$k == 1)
{
VNq <- 0
VTq <- 0
}
else
{
VNq <- sum( c(0, 0, 1:(x$k-1))^2 * Pn ) - (Lq^2)
FWqInt <- integrate(xFWqc, 0, Inf)
if (FWqInt$message == "OK")
VTq <- (2 * FWqInt$value) - (Wq^2)
else
VTq <- NA
}
# The result
res <- list(
Inputs=x, RO = RO, Lq = Lq, VNq = VNq, Wq = Wq, VTq = VTq, Throughput = Throughput,
L = L, VN = VN, W = W, VT = VT, Lqq = Lqq, Wqq = Wqq, WWs = WWs, SP = SP, Pn = Pn, Qn = Qn, FW = FW, FWq = FWq
)
class(res) <- "o_MM1KK"
res
}
Inputs.o_MM1KK <- function(x, ...) { x$Inputs }
RO.o_MM1KK <- function(x, ...) { x$RO }
Lq.o_MM1KK <- function(x, ...) { x$Lq }
VNq.o_MM1KK <- function(x, ...) { x$VNq }
Wq.o_MM1KK <- function(x, ...) { x$Wq }
VTq.o_MM1KK <- function(x, ...) { x$VTq }
L.o_MM1KK <- function(x, ...) { x$L }
VN.o_MM1KK <- function(x, ...) { x$VN }
W.o_MM1KK <- function(x, ...) { x$W }
VT.o_MM1KK <- function(x, ...) { x$VT }
Lqq.o_MM1KK <- function(x, ...) { x$Lqq }
Wqq.o_MM1KK <- function(x, ...) { x$Wqq }
WWs.o_MM1KK <- function(x, ...) { x$WWs }
SP.o_MM1KK <- function(x, ...) { x$SP }
Pn.o_MM1KK <- function(x, ...) { x$Pn }
Qn.o_MM1KK <- function(x, ...) { x$Qn }
Throughput.o_MM1KK <- function(x, ...) { x$Throughput }
Report.o_MM1KK <- function(x, ...)
{
reportAux(x)
}
summary.o_MM1KK <- function(object, ...)
{
aux <- list(el=CompareQueueingModels(object))
class(aux) <- "summary.o_MM1"
aux
}
print.summary.o_MM1KK <- function(x, ...)
{
print_summary(x, ...)
}
|
## CB 2009/5,6,10 2010/6 2013/10
## NOTE the C code does not use long double for accumulation.
.means_simple_triplet_matrix <-
function(x, DIM, na.rm)
{
s <- .Call(R_sums_stm, x, DIM, na.rm)
n <- c(x$nrow, x$ncol)[-DIM]
if (na.rm) {
x$v <- is.na(x$v)
nna <- .Call(R_sums_stm, x, DIM, FALSE)
s / (n - nna)
}
else
s / n
}
## R interfaces
row_sums <-
function(x, na.rm = FALSE, dims = 1, ...)
UseMethod("row_sums")
row_sums.default <-
function(x, na.rm = FALSE, dims = 1, ...)
base::rowSums(x, na.rm, dims, ...)
row_sums.simple_triplet_matrix <-
function(x, na.rm = FALSE, dims = 1, ...)
.Call(R_sums_stm, x, 1L, na.rm)
row_sums.dgCMatrix <-
function(x, na.rm = FALSE, dims = 1, ...)
Matrix::rowSums(x, na.rm = na.rm, dims = dims, ...)
row_sums.dgTMatrix <-
function(x, na.rm = FALSE, dims = 1, ...)
Matrix::rowSums(x, na.rm = na.rm, dims = dims, ...)
col_sums <-
function(x, na.rm = FALSE, dims = 1, ...)
UseMethod("col_sums")
col_sums.default <-
function(x, na.rm = FALSE, dims = 1, ...)
base::colSums(x, na.rm, dims, ...)
col_sums.simple_triplet_matrix <-
function(x, na.rm = FALSE, dims = 1, ...)
.Call(R_sums_stm, x, 2L, na.rm)
col_sums.dgCMatrix <-
function(x, na.rm = FALSE, dims = 1, ...)
Matrix::colSums(x, na.rm = na.rm, dims = dims, ...)
col_sums.dgTMatrix <-
function(x, na.rm = FALSE, dims = 1, ...)
Matrix::colSums(x, na.rm = na.rm, dims = dims, ...)
row_means <-
function(x, na.rm = FALSE, dims = 1, ...)
UseMethod("row_means")
row_means.default <-
function(x, na.rm = FALSE, dims = 1, ...)
base::rowMeans(x, na.rm, dims, ...)
row_means.simple_triplet_matrix <-
function(x, na.rm = FALSE, dims = 1, ...)
.means_simple_triplet_matrix(x, DIM = 1L, na.rm)
row_means.dgCMatrix <-
function(x, na.rm = FALSE, dims = 1, ...)
Matrix::rowMeans(x, na.rm = na.rm, dims = dims, ...)
row_means.dgTMatrix <-
function(x, na.rm = FALSE, dims = 1, ...)
Matrix::rowMeans(x, na.rm = na.rm, dims = dims, ...)
col_means <-
function(x, na.rm = FALSE, dims = 1, ...)
UseMethod("col_means")
col_means.default <-
function(x, na.rm = FALSE, dims = 1, ...)
base::colMeans(x, na.rm, dims, ...)
col_means.simple_triplet_matrix <-
function(x, na.rm = FALSE, dims = 1, ...)
.means_simple_triplet_matrix(x, DIM = 2L, na.rm)
col_means.dgCMatrix <-
function(x, na.rm = FALSE, dims = 1, ...)
Matrix::colMeans(x, na.rm = na.rm, dims = dims, ...)
col_means.dgTMatrix <-
function(x, na.rm = FALSE, dims = 1, ...)
Matrix::colMeans(x, na.rm = na.rm, dims = dims, ...)
row_norms <-
function(x, p = 2)
{
if(p == 2)
sqrt(row_sums(x ^ 2))
else if(p == 1)
row_sums(abs(x))
else if(p == Inf)
c(rollup(abs(x), 2L, FUN = max))
else
row_sums(abs(x) ^ p) ^ (1/p)
}
col_norms <-
function(x, p = 2)
{
if(p == 2)
sqrt(col_sums(x ^ 2))
else if(p == 1)
col_sums(abs(x))
else if(p == Inf)
c(rollup(abs(x), 1L, FUN = max))
else
col_sums(abs(x) ^ p) ^ (1/p)
}
##
.nnzero <-
function(x, scale = FALSE) {
v <- c("simple_triplet_matrix", "simple_sparse_array")
if (inherits(x, v))
v <- x$v
else {
x <- as.array(x)
v <- x
}
v <- v == vector(typeof(v), 1L)
v <- v + 1L
n <- length(v)
v <- tabulate(v, 2L)
v <- c(v, n - sum(v))
names(v) <- c("nnzero", "nzero", NA)
if (scale)
v <- v / prod(dim(x))
v
}
###
|
/R/stm.R
|
no_license
|
cran/slam
|
R
| false | false | 3,433 |
r
|
## CB 2009/5,6,10 2010/6 2013/10
## NOTE the C code does not use long double for accumulation.
.means_simple_triplet_matrix <-
function(x, DIM, na.rm)
{
s <- .Call(R_sums_stm, x, DIM, na.rm)
n <- c(x$nrow, x$ncol)[-DIM]
if (na.rm) {
x$v <- is.na(x$v)
nna <- .Call(R_sums_stm, x, DIM, FALSE)
s / (n - nna)
}
else
s / n
}
## R interfaces
row_sums <-
function(x, na.rm = FALSE, dims = 1, ...)
UseMethod("row_sums")
row_sums.default <-
function(x, na.rm = FALSE, dims = 1, ...)
base::rowSums(x, na.rm, dims, ...)
row_sums.simple_triplet_matrix <-
function(x, na.rm = FALSE, dims = 1, ...)
.Call(R_sums_stm, x, 1L, na.rm)
row_sums.dgCMatrix <-
function(x, na.rm = FALSE, dims = 1, ...)
Matrix::rowSums(x, na.rm = na.rm, dims = dims, ...)
row_sums.dgTMatrix <-
function(x, na.rm = FALSE, dims = 1, ...)
Matrix::rowSums(x, na.rm = na.rm, dims = dims, ...)
col_sums <-
function(x, na.rm = FALSE, dims = 1, ...)
UseMethod("col_sums")
col_sums.default <-
function(x, na.rm = FALSE, dims = 1, ...)
base::colSums(x, na.rm, dims, ...)
col_sums.simple_triplet_matrix <-
function(x, na.rm = FALSE, dims = 1, ...)
.Call(R_sums_stm, x, 2L, na.rm)
col_sums.dgCMatrix <-
function(x, na.rm = FALSE, dims = 1, ...)
Matrix::colSums(x, na.rm = na.rm, dims = dims, ...)
col_sums.dgTMatrix <-
function(x, na.rm = FALSE, dims = 1, ...)
Matrix::colSums(x, na.rm = na.rm, dims = dims, ...)
row_means <-
function(x, na.rm = FALSE, dims = 1, ...)
UseMethod("row_means")
row_means.default <-
function(x, na.rm = FALSE, dims = 1, ...)
base::rowMeans(x, na.rm, dims, ...)
row_means.simple_triplet_matrix <-
function(x, na.rm = FALSE, dims = 1, ...)
.means_simple_triplet_matrix(x, DIM = 1L, na.rm)
row_means.dgCMatrix <-
function(x, na.rm = FALSE, dims = 1, ...)
Matrix::rowMeans(x, na.rm = na.rm, dims = dims, ...)
row_means.dgTMatrix <-
function(x, na.rm = FALSE, dims = 1, ...)
Matrix::rowMeans(x, na.rm = na.rm, dims = dims, ...)
col_means <-
function(x, na.rm = FALSE, dims = 1, ...)
UseMethod("col_means")
col_means.default <-
function(x, na.rm = FALSE, dims = 1, ...)
base::colMeans(x, na.rm, dims, ...)
col_means.simple_triplet_matrix <-
function(x, na.rm = FALSE, dims = 1, ...)
.means_simple_triplet_matrix(x, DIM = 2L, na.rm)
col_means.dgCMatrix <-
function(x, na.rm = FALSE, dims = 1, ...)
Matrix::colMeans(x, na.rm = na.rm, dims = dims, ...)
col_means.dgTMatrix <-
function(x, na.rm = FALSE, dims = 1, ...)
Matrix::colMeans(x, na.rm = na.rm, dims = dims, ...)
row_norms <-
function(x, p = 2)
{
if(p == 2)
sqrt(row_sums(x ^ 2))
else if(p == 1)
row_sums(abs(x))
else if(p == Inf)
c(rollup(abs(x), 2L, FUN = max))
else
row_sums(abs(x) ^ p) ^ (1/p)
}
col_norms <-
function(x, p = 2)
{
if(p == 2)
sqrt(col_sums(x ^ 2))
else if(p == 1)
col_sums(abs(x))
else if(p == Inf)
c(rollup(abs(x), 1L, FUN = max))
else
col_sums(abs(x) ^ p) ^ (1/p)
}
##
.nnzero <-
function(x, scale = FALSE) {
v <- c("simple_triplet_matrix", "simple_sparse_array")
if (inherits(x, v))
v <- x$v
else {
x <- as.array(x)
v <- x
}
v <- v == vector(typeof(v), 1L)
v <- v + 1L
n <- length(v)
v <- tabulate(v, 2L)
v <- c(v, n - sum(v))
names(v) <- c("nnzero", "nzero", NA)
if (scale)
v <- v / prod(dim(x))
v
}
###
|
##subsampling mtDNA reads and identifying heteroplasmic sites
##Reena Debray
#input for this script: a mitocaller output file, uploaded and named "mtcalls"
#output of this script: a data frame named "output" with information on the position, genotypes, and allele frequencies of each heteroplasmic site
#initialize data frame of heteroplasmic sites
output<-data.frame(matrix(nrow=0,ncol=3))
options(stringsAsFactors=F) #rbind command later in the script will not work otherwise
for (line in seq(1,nrow(mtcalls))){
#remove reads with base quality < 20
reads=unlist(strsplit(substr(mtcalls[line,2],6,nchar(as.character(mtcalls[line,2]))),","))
base_quals=unlist(strsplit(substr(mtcalls[line,3],7,nchar(as.character(mtcalls[line,3]))),","))
reads_and_bqs=data.frame(reads,base_quals)
reads_and_bqs_filtered<-reads_and_bqs[as.numeric(as.character(reads_and_bqs$base_quals))>=20,]
#count number of reads; discard if less than 200
if (nrow(reads_and_bqs_filtered)>=200){
#downsample sites with more than 200 reads to 200 reads
if (nrow(reads_and_bqs_filtered)>200) {reads_and_bqs_subsampled<-reads_and_bqs_filtered[sample(seq(1,nrow(reads_and_bqs_filtered)),size=200,replace=F),]}
#keep alleles that are supported by 8 or more reads
supported_alleles=c()
for (base in c("A","C","G","T")) {
if (nrow(reads_and_bqs_subsampled[reads_and_bqs_subsampled$reads==base,])>=8) {supported_alleles<-c(supported_alleles,as.character(reads_and_bqs_subsampled[reads_and_bqs_subsampled$reads==base,"reads"]))}
}
#if there are multiple alleles (heteroplasmy), recalculate frequencies based on supported alleles
if (length(table(supported_alleles))>1){
new_freqs<-data.frame(table(supported_alleles))
new_freqs$percentreads<-round(new_freqs$Freq/sum(new_freqs$Freq),4)
#re-format results and append to data frame of heteroplasmic sites
genotypes<-paste(new_freqs$supported_alleles,collapse="/")
percentreads<-paste(new_freqs$percentreads,collapse="/")
newline=c(mtcalls[line,1],genotypes,percentreads)
output<-rbind(output,newline)
}
}
}
colnames(output)=c("Site","Genotype","Frequency")
|
/Code for subsampling mtDNA reads and identifying heteroplasmic sites.R
|
no_license
|
reenadebray/mtDNA_copy_number
|
R
| false | false | 2,185 |
r
|
##subsampling mtDNA reads and identifying heteroplasmic sites
##Reena Debray
#input for this script: a mitocaller output file, uploaded and named "mtcalls"
#output of this script: a data frame named "output" with information on the position, genotypes, and allele frequencies of each heteroplasmic site
#initialize data frame of heteroplasmic sites
output<-data.frame(matrix(nrow=0,ncol=3))
options(stringsAsFactors=F) #rbind command later in the script will not work otherwise
for (line in seq(1,nrow(mtcalls))){
#remove reads with base quality < 20
reads=unlist(strsplit(substr(mtcalls[line,2],6,nchar(as.character(mtcalls[line,2]))),","))
base_quals=unlist(strsplit(substr(mtcalls[line,3],7,nchar(as.character(mtcalls[line,3]))),","))
reads_and_bqs=data.frame(reads,base_quals)
reads_and_bqs_filtered<-reads_and_bqs[as.numeric(as.character(reads_and_bqs$base_quals))>=20,]
#count number of reads; discard if less than 200
if (nrow(reads_and_bqs_filtered)>=200){
#downsample sites with more than 200 reads to 200 reads
if (nrow(reads_and_bqs_filtered)>200) {reads_and_bqs_subsampled<-reads_and_bqs_filtered[sample(seq(1,nrow(reads_and_bqs_filtered)),size=200,replace=F),]}
#keep alleles that are supported by 8 or more reads
supported_alleles=c()
for (base in c("A","C","G","T")) {
if (nrow(reads_and_bqs_subsampled[reads_and_bqs_subsampled$reads==base,])>=8) {supported_alleles<-c(supported_alleles,as.character(reads_and_bqs_subsampled[reads_and_bqs_subsampled$reads==base,"reads"]))}
}
#if there are multiple alleles (heteroplasmy), recalculate frequencies based on supported alleles
if (length(table(supported_alleles))>1){
new_freqs<-data.frame(table(supported_alleles))
new_freqs$percentreads<-round(new_freqs$Freq/sum(new_freqs$Freq),4)
#re-format results and append to data frame of heteroplasmic sites
genotypes<-paste(new_freqs$supported_alleles,collapse="/")
percentreads<-paste(new_freqs$percentreads,collapse="/")
newline=c(mtcalls[line,1],genotypes,percentreads)
output<-rbind(output,newline)
}
}
}
colnames(output)=c("Site","Genotype","Frequency")
|
#Secretary problem
make_choice <- function(N,split_number)
{
input_list<- sample(1:N,N,replace=FALSE)
mx<- -1
eval_group <- input_list[1:split_number]
for(i in eval_group)
{
if(i>mx)
{
mx <- i
}
}
selection_group <- input_list[(split_number+1):N]
for(i1 in selection_group)
{
if(i1>mx)
{
return(i1)
}
}
# If no one better than selection criteria,we return -1
# YY: we have to select one, so the last item become our choice.
return(input_list[N])
# YY: return(-1)
}
find_optimal <- function(N)
{
mx <- -1
optimal_split <- -1
for(split_number in 1:(N/2))
{
K <- 5000 #We repeat the process K times
count <- 0 # No. of times we get N (100)
for(j in 1:K)
{
if(make_choice(N,split_number)==N)
{
count <- count+1
}
}
if(count>mx)
{
mx<-count
optimal_split <- split_number
}
#cat(paste0(count," ",split_number,"\n"))
}
return(optimal_split)
}
#Driver Code
N <- 100 #We consider the case of hundred secretaries
cat(paste0("Optimal split for N=100 will be at ",find_optimal(100)))
|
/2019/Assignment/FE8828-Siddharth Lalwani/Assignment 2/YY_Secretary_problem.R
|
no_license
|
leafyoung/fe8828
|
R
| false | false | 1,135 |
r
|
#Secretary problem
make_choice <- function(N,split_number)
{
input_list<- sample(1:N,N,replace=FALSE)
mx<- -1
eval_group <- input_list[1:split_number]
for(i in eval_group)
{
if(i>mx)
{
mx <- i
}
}
selection_group <- input_list[(split_number+1):N]
for(i1 in selection_group)
{
if(i1>mx)
{
return(i1)
}
}
# If no one better than selection criteria,we return -1
# YY: we have to select one, so the last item become our choice.
return(input_list[N])
# YY: return(-1)
}
find_optimal <- function(N)
{
mx <- -1
optimal_split <- -1
for(split_number in 1:(N/2))
{
K <- 5000 #We repeat the process K times
count <- 0 # No. of times we get N (100)
for(j in 1:K)
{
if(make_choice(N,split_number)==N)
{
count <- count+1
}
}
if(count>mx)
{
mx<-count
optimal_split <- split_number
}
#cat(paste0(count," ",split_number,"\n"))
}
return(optimal_split)
}
#Driver Code
N <- 100 #We consider the case of hundred secretaries
cat(paste0("Optimal split for N=100 will be at ",find_optimal(100)))
|
#' Invert first \code{K} eigendirections of the matrix. If \code{K} is
#' not specified the functions takes direction with eigencalues grater
#' than given treshold \code{th}.
#' If \code{th} is also not specified then all direction are inverted
#' (equivalent to \code{\link[base]{solve}})
#'
#' @title Invert first K eigendirections of the matrix.
#' @param M matrix to solve
#' @param K number of directions to invert
#' @param th fixed treshold for eigenvalues
#' @return Pseudoinverse matrix
#' @export
pseudoinverse = function(M,K=NULL,th=NULL){
if (!is.matrix(M) || dim(M)[1] != dim(M)[2])
stop("M must be a square matrix")
if (!is.null(K) && !is.positiveint(K+1))
stop("K must be a nonnegative integer")
nbasis = dim(M)[1]
if (is.null(nbasis) || nbasis ==1)
res = 1 / M
else {
E = eigen(M)
vals = 1 / E$values
if (is.null(K))
K = nbasis
if (!is.null(th))
K = min(K, sum(abs(E$values) > th))
vals[ 1:nbasis > K ] = 0
res = E$vectors %*% diag(vals) %*% t(Conj(E$vectors))
}
res
}
|
/freqdom/R/pseudoinverse.R
|
no_license
|
ingted/R-Examples
|
R
| false | false | 1,045 |
r
|
#' Invert first \code{K} eigendirections of the matrix. If \code{K} is
#' not specified the functions takes direction with eigencalues grater
#' than given treshold \code{th}.
#' If \code{th} is also not specified then all direction are inverted
#' (equivalent to \code{\link[base]{solve}})
#'
#' @title Invert first K eigendirections of the matrix.
#' @param M matrix to solve
#' @param K number of directions to invert
#' @param th fixed treshold for eigenvalues
#' @return Pseudoinverse matrix
#' @export
pseudoinverse = function(M,K=NULL,th=NULL){
if (!is.matrix(M) || dim(M)[1] != dim(M)[2])
stop("M must be a square matrix")
if (!is.null(K) && !is.positiveint(K+1))
stop("K must be a nonnegative integer")
nbasis = dim(M)[1]
if (is.null(nbasis) || nbasis ==1)
res = 1 / M
else {
E = eigen(M)
vals = 1 / E$values
if (is.null(K))
K = nbasis
if (!is.null(th))
K = min(K, sum(abs(E$values) > th))
vals[ 1:nbasis > K ] = 0
res = E$vectors %*% diag(vals) %*% t(Conj(E$vectors))
}
res
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/pPPCA.R
\name{pPPCA}
\alias{pPPCA}
\title{Penalized Probabilistic PCA}
\usage{
pPPCA(
lambda,
Tvotes = 1000,
verbose = FALSE,
penalty = 1,
tau = 0.001,
beta = NULL
)
}
\arguments{
\item{lambda}{a numerical vector of sample eigenvalues}
\item{Tvotes}{the number of possible tuning parameter values to be searched}
\item{verbose}{a logical to indicate whether the details of the penalized voting results should be shown}
\item{penalty}{an integer indicating the type of penalty function to use. The default option is 1, which corresponds to the model in Deng and Craiu (2021).}
\item{tau}{a tolerance threshold for the smallest eigenvalue, the default value is 0.001.}
\item{beta}{a numeric between 0 and 1 indicating the weight towards penalty function 1 or 2.}
}
\value{
an integer $K$ between 1 and $n$.
}
\description{
The function returns the results of penalized profile
log-likelihood given a matrix of data or a vector of sample
eigenvalues. The data matrix is assumed to follow the decomposition
\eqn{X = WL + \epsilon}, where rows of \eqn{X} are decomposed to a linear projection
in an orthogonal space plus error. The solution finds the
rank of \eqn{W}, which represents some hidden structure in
the data, such that \eqn{X-WL} have independent and
identically distributed components.
}
\examples{
\dontrun{
library(MASS)
normdata <- mvrnorm(1000, mu = rep(0,50), Sigma = diag(1,50))
eigen_values <- eigen(as.matrix(Matrix::nearPD(stats::cov(scale(normdata)))$mat))$val
pPPCA(lambda = lambda) # supply the sample eigenvalues
}
}
\keyword{PCA,}
\keyword{dimension}
\keyword{effective}
\keyword{log-likelihood,}
\keyword{parameter,}
\keyword{penalized}
\keyword{penalty}
\keyword{probabilistic}
\keyword{profile}
\keyword{tuning}
|
/man/pPPCA.Rd
|
no_license
|
WeiAkaneDeng/SPAC2
|
R
| false | true | 1,855 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/pPPCA.R
\name{pPPCA}
\alias{pPPCA}
\title{Penalized Probabilistic PCA}
\usage{
pPPCA(
lambda,
Tvotes = 1000,
verbose = FALSE,
penalty = 1,
tau = 0.001,
beta = NULL
)
}
\arguments{
\item{lambda}{a numerical vector of sample eigenvalues}
\item{Tvotes}{the number of possible tuning parameter values to be searched}
\item{verbose}{a logical to indicate whether the details of the penalized voting results should be shown}
\item{penalty}{an integer indicating the type of penalty function to use. The default option is 1, which corresponds to the model in Deng and Craiu (2021).}
\item{tau}{a tolerance threshold for the smallest eigenvalue, the default value is 0.001.}
\item{beta}{a numeric between 0 and 1 indicating the weight towards penalty function 1 or 2.}
}
\value{
an integer $K$ between 1 and $n$.
}
\description{
The function returns the results of penalized profile
log-likelihood given a matrix of data or a vector of sample
eigenvalues. The data matrix is assumed to follow the decomposition
\eqn{X = WL + \epsilon}, where rows of \eqn{X} are decomposed to a linear projection
in an orthogonal space plus error. The solution finds the
rank of \eqn{W}, which represents some hidden structure in
the data, such that \eqn{X-WL} have independent and
identically distributed components.
}
\examples{
\dontrun{
library(MASS)
normdata <- mvrnorm(1000, mu = rep(0,50), Sigma = diag(1,50))
eigen_values <- eigen(as.matrix(Matrix::nearPD(stats::cov(scale(normdata)))$mat))$val
pPPCA(lambda = lambda) # supply the sample eigenvalues
}
}
\keyword{PCA,}
\keyword{dimension}
\keyword{effective}
\keyword{log-likelihood,}
\keyword{parameter,}
\keyword{penalized}
\keyword{penalty}
\keyword{probabilistic}
\keyword{profile}
\keyword{tuning}
|
library(Seurat)
cellTypes = c('astro','microglia','n_ex','n_inh','oligo','opc')
#cellTypes = c('microglia','n_ex','n_inh')
tmp_pheno = NULL
tmp = lapply(cellTypes,function(cellType){
if (cellType %in% c('n_ex','n_inh')){
n = readRDS('/home/brasel/SingleCellProjects/dataObjects/neuron.rds')
n$clusters = Idents(n)
if(cellType == 'n_ex') { n = subset(n,subset=clusters %in% c(0:2,6))
} else{ n = subset(n,subset=clusters %in% c(3:5)) }
} else { n = readRDS(sprintf('/home/brasel/SingleCellProjects/dataObjects/%s.rds',cellType)) }
clusterCounts = table(n@meta.data$Sample_ID,Idents(n))
totalCellType = rowSums(clusterCounts)
clusterCounts = clusterCounts/totalCellType
pheno = data.frame(rbind(clusterCounts))
colnames(pheno) = paste0('prop_',0:(ncol(clusterCounts)-1)) #c('prop_0','prop_1','prop_2','prop_3','prop_4')
clusters = 0:(ncol(clusterCounts)-1)
phenoFile = read.csv('/home/brasel/SingleCellProjects/MyProjects/67BrainsPaper/Brain_pheno_jorge.csv',row.names=2)
phenoFile = phenoFile[rownames(pheno),]
### Add in Age of Death ###
AOD = unique(n@meta.data[,c('Sample_ID','AOD')])
pheno = merge(pheno,AOD,by.x='row.names',by.y='Sample_ID')
pheno$AOD <- as.numeric(pheno$AOD)
library(lme4)
### add in one other covariate at a time to see if it removes the significance
covarPos <- c()
#Sex (additive Model)
SEX = unique(n@meta.data[,c('Sample_ID','Gender')])
colnames(SEX)[2] = 'SEX'
pheno = merge(pheno,SEX,by.x='Row.names',by.y='Sample_ID')
rs1582763 = unique(n@meta.data[,c('Sample_ID','MS4')])
colnames(rs1582763)[2] = 'rs1582763'
pheno = merge(pheno,rs1582763,by.x='Row.names',by.y='Sample_ID')
pheno[grep('GG',pheno$rs1582763),'rs1582763'] <- 0
pheno[grep('AG',pheno$rs1582763),'rs1582763'] <- 1
pheno[grep('AA',pheno$rs1582763),'rs1582763'] <- 2
pheno$rs1582763 <- as.numeric(pheno$rs1582763)
#TREM2
nTREM2 = unique(n@meta.data[,c('Sample_ID','nTREM2')])
nTREM2$nTREM2[nTREM2$nTREM2 != 'TREM2'] = 0
nTREM2$nTREM2[nTREM2$nTREM2 == 'TREM2'] = 1
pheno = merge(pheno,nTREM2,by.x='Row.names',by.y='Sample_ID')
pheno$nTREM2 = as.numeric(pheno$nTREM2)
pheno$TREM2_reduced = as.numeric(as.factor((phenoFile$TREM2_type %in% c('R62H','H157Y','R47H') )))-1
pheno$TREM2_reduced[is.na(pheno$TREM2_reduced)] = 0
nAPOE = unique(n@meta.data[,c('Sample_ID','nAPOE')])
pheno = merge(pheno,nAPOE,by.x='Row.names',by.y='Sample_ID')
pheno[-grep('4',pheno$nAPOE),'nAPOE'] <- 0
pheno[grep('4',pheno$nAPOE),'nAPOE'] <- 1
pheno$nAPOE <- as.numeric(pheno$nAPOE)
Final_Status = unique(n@meta.data[,c('Sample_ID','Status')])
colnames(Final_Status)[2] = 'Final_Status'
Final_Status$Final_Status = factor(Final_Status$Final_Status, levels=c('Neuro_CO','Neuro_Presympt','Neuro_AD','Neuro_ADAD','Neuro_OT'))
pheno = merge(pheno,Final_Status,by.x='Row.names',by.y='Sample_ID')
pheno$count_clusterCellType = totalCellType
rownames(pheno) = pheno$Row.names
pheno = pheno[,-1]
minCells = 60
filt_pheno <<- pheno[-which(pheno[,'count_clusterCellType'] < minCells),] #remove subjects with less than 60 cells in the cluster
tmp = lapply(clusters,function(clust){
#ADAD
tmp_pheno <<- filt_pheno
TotalSamples = nrow(tmp_pheno)
Group1 = 'ADAD'
Group2 = 'nonADAD'
Group3 = ''
Group1Samples = sum(tmp_pheno$Final_Status == 'Neuro_ADAD')
Group2Samples = sum(tmp_pheno$Final_Status != 'Neuro_ADAD')
Group3Samples = ''
SamplesGT_0.01 = sum(tmp_pheno[,paste0('prop_',clust)] > 0.01)
G1S_GT_0.01 = sum(tmp_pheno[,paste0('prop_',clust)] > 0.01 & tmp_pheno$Final_Status == 'Neuro_ADAD')
G2S_GT_0.01 = sum(tmp_pheno[,paste0('prop_',clust)] > 0.01 & tmp_pheno$Final_Status != 'Neuro_ADAD')
G3S_GT_0.01 = ''
write(paste(cellType,clust,Group1,Group2,Group3,TotalSamples,Group1Samples,Group2Samples,Group3Samples,SamplesGT_0.01,G1S_GT_0.01,G2S_GT_0.01,G3S_GT_0.01,sep=','),file='/home/brasel/SingleCellProjects/MyProjects/67BrainsPaper/PropAnalyses/SampleNumbersForEachPropAnalysis/sampleNumbersForPropAnalyses_v2.csv',append=T)
tmp = lapply(colnames(tmp_pheno)[grep('prop',colnames(tmp_pheno))],function(col) tmp_pheno[,col] <<- tmp_pheno[,col]^(1/3) )
model <- paste0(paste0('prop_',clust), ' ~ Final_Status + SEX')
re <- glm(formula = model, data=tmp_pheno)
coef <- summary(re)$coefficient
cat(paste(cellType,clust,Group1,paste(coef['Final_StatusNeuro_ADAD',],collapse=','),sep=','))
cat('\n')
#TREM2
tmp_pheno <<- filt_pheno#[filt_pheno$Final_Status == 'Neuro_AD',]
TotalSamples = nrow(tmp_pheno)
Group1 = 'TREM2'
Group2 = 'nonTREM2'
Group3 = ''
Group1Samples = sum(tmp_pheno$nTREM2 == '1')
Group2Samples = sum(tmp_pheno$nTREM2 != '1')
Group3Samples = ''
SamplesGT_0.01 = sum(tmp_pheno[,paste0('prop_',clust)] > 0.01)
G1S_GT_0.01 = sum(tmp_pheno[,paste0('prop_',clust)] > 0.01 & tmp_pheno$nTREM2 == '1')
G2S_GT_0.01 = sum(tmp_pheno[,paste0('prop_',clust)] > 0.01 & tmp_pheno$nTREM2 != '1')
G3S_GT_0.01 = ''
write(paste(cellType,clust,Group1,Group2,Group3,TotalSamples,Group1Samples,Group2Samples,Group3Samples,SamplesGT_0.01,G1S_GT_0.01,G2S_GT_0.01,G3S_GT_0.01,sep=','),file='/home/brasel/SingleCellProjects/MyProjects/67BrainsPaper/PropAnalyses/SampleNumbersForEachPropAnalysis/sampleNumbersForPropAnalyses_v2.csv',append=T)
tmp = lapply(colnames(tmp_pheno)[grep('prop',colnames(tmp_pheno))],function(col) tmp_pheno[,col] <<- tmp_pheno[,col]^(1/3) )
model <- paste0(paste0('prop_',clust), ' ~ nTREM2 + SEX + AOD')
re <- glm(formula = model, data=tmp_pheno)
coef <- summary(re)$coefficient
cat(paste(cellType,clust,Group1,paste(coef['nTREM2',],collapse=','),sep=','))
cat('\n')
#TREM2_reduced
tmp_pheno <<- filt_pheno#[filt_pheno$Final_Status == 'Neuro_AD',]
TotalSamples = nrow(tmp_pheno)
Group1 = 'TREM2_reduced'
Group2 = 'other'
Group3 = ''
Group1Samples = sum(tmp_pheno$TREM2_reduced == '1')
Group2Samples = sum(tmp_pheno$TREM2_reduced != '1')
Group3Samples = ''
SamplesGT_0.01 = sum(tmp_pheno[,paste0('prop_',clust)] > 0.01)
G1S_GT_0.01 = sum(tmp_pheno[,paste0('prop_',clust)] > 0.01 & tmp_pheno$TREM2_reduced == '1')
G2S_GT_0.01 = sum(tmp_pheno[,paste0('prop_',clust)] > 0.01 & tmp_pheno$TREM2_reduced != '1')
G3S_GT_0.01 = ''
write(paste(cellType,clust,Group1,Group2,Group3,TotalSamples,Group1Samples,Group2Samples,Group3Samples,SamplesGT_0.01,G1S_GT_0.01,G2S_GT_0.01,G3S_GT_0.01,sep=','),file='/home/brasel/SingleCellProjects/MyProjects/67BrainsPaper/PropAnalyses/SampleNumbersForEachPropAnalysis/sampleNumbersForPropAnalyses_v2.csv',append=T)
tmp = lapply(colnames(tmp_pheno)[grep('prop',colnames(tmp_pheno))],function(col) tmp_pheno[,col] <<- tmp_pheno[,col]^(1/3) )
model <- paste0(paste0('prop_',clust), ' ~ TREM2_reduced + SEX + AOD')
re <- glm(formula = model, data=tmp_pheno)
coef <- summary(re)$coefficient
cat(paste(cellType,clust,Group1,paste(coef['TREM2_reduced',],collapse=','),sep=','))
cat('\n')
#rs1582763
tmp_pheno <<- na.omit(filt_pheno)
TotalSamples = nrow(tmp_pheno)
Group1 = 'AA'
Group2 = 'AG'
Group3 = 'GG'
Group1Samples = sum(tmp_pheno$rs1582763 == '2')
Group2Samples = sum(tmp_pheno$rs1582763 == '1')
Group3Samples = sum(tmp_pheno$rs1582763 == '0')
SamplesGT_0.01 = sum(tmp_pheno[,paste0('prop_',clust)] > 0.01)
G1S_GT_0.01 = sum(tmp_pheno[,paste0('prop_',clust)] > 0.01 & tmp_pheno$rs1582763 == '2')
G2S_GT_0.01 = sum(tmp_pheno[,paste0('prop_',clust)] > 0.01 & tmp_pheno$rs1582763 == '1')
G3S_GT_0.01 = sum(tmp_pheno[,paste0('prop_',clust)] > 0.01 & tmp_pheno$rs1582763 == '0')
write(paste(cellType,clust,Group1,Group2,Group3,TotalSamples,Group1Samples,Group2Samples,Group3Samples,SamplesGT_0.01,G1S_GT_0.01,G2S_GT_0.01,G3S_GT_0.01,sep=','),file='/home/brasel/SingleCellProjects/MyProjects/67BrainsPaper/PropAnalyses/SampleNumbersForEachPropAnalysis/sampleNumbersForPropAnalyses_v2.csv',append=T)
tmp = lapply(colnames(tmp_pheno)[grep('prop',colnames(tmp_pheno))],function(col) tmp_pheno[,col] <<- tmp_pheno[,col]^(1/3) )
model <- paste0(paste0('prop_',clust), ' ~ rs1582763 + SEX + Final_Status')
re <- glm(formula = model, data=tmp_pheno)
coef <- summary(re)$coefficient
cat(paste(cellType,clust,Group1,paste(coef['rs1582763',],collapse=','),sep=','))
cat('\n')
#sAD
tmp_pheno <<- filt_pheno
TotalSamples = nrow(tmp_pheno)
Group1 = 'sAD'
Group2 = 'non_sAD'
Group3 = ''
Group1Samples = sum(tmp_pheno$Final_Status == 'Neuro_AD')
Group2Samples = sum(tmp_pheno$Final_Status != 'Neuro_AD')
Group3Samples = ''
SamplesGT_0.01 = sum(tmp_pheno[,paste0('prop_',clust)] > 0.01)
G1S_GT_0.01 = sum(tmp_pheno[,paste0('prop_',clust)] > 0.01 & tmp_pheno$Final_Status == 'Neuro_AD')
G2S_GT_0.01 = sum(tmp_pheno[,paste0('prop_',clust)] > 0.01 & tmp_pheno$Final_Status != 'Neuro_AD')
G3S_GT_0.01 = ''
write(paste(cellType,clust,Group1,Group2,Group3,TotalSamples,Group1Samples,Group2Samples,Group3Samples,SamplesGT_0.01,G1S_GT_0.01,G2S_GT_0.01,G3S_GT_0.01,sep=','),file='/home/brasel/SingleCellProjects/MyProjects/67BrainsPaper/PropAnalyses/SampleNumbersForEachPropAnalysis/sampleNumbersForPropAnalyses_v2.csv',append=T)
tmp = lapply(colnames(tmp_pheno)[grep('prop',colnames(tmp_pheno))],function(col) tmp_pheno[,col] <<- tmp_pheno[,col]^(1/3) )
#tmp_pheno$ADstatus = tmp_pheno$Final_Status == 'Neuro_AD'
model <- paste0(paste0('prop_',clust), ' ~ Final_Status + SEX')
re <- glm(formula = model, data=tmp_pheno)
coef <- summary(re)$coefficient
cat(paste(cellType,clust,Group1,paste(coef['Final_StatusNeuro_AD',],collapse=','),sep=','))
cat('\n')
#APOE
tmp_pheno <<- filt_pheno[filt_pheno$Final_Status == 'Neuro_AD',]
TotalSamples = nrow(tmp_pheno)
Group1 = 'APOEe4+'
Group2 = 'APOEe4-'
Group3 = ''
Group1Samples = sum(tmp_pheno$nAPOE == '1')
Group2Samples = sum(tmp_pheno$nAPOE != '1')
Group3Samples = ''
SamplesGT_0.01 = sum(tmp_pheno[,paste0('prop_',clust)] > 0.01)
G1S_GT_0.01 = sum(tmp_pheno[,paste0('prop_',clust)] > 0.01 & tmp_pheno$nAPOE == '1')
G2S_GT_0.01 = sum(tmp_pheno[,paste0('prop_',clust)] > 0.01 & tmp_pheno$nAPOE != '1')
G3S_GT_0.01 = ''
write(paste(cellType,clust,Group1,Group2,Group3,TotalSamples,Group1Samples,Group2Samples,Group3Samples,SamplesGT_0.01,G1S_GT_0.01,G2S_GT_0.01,G3S_GT_0.01,sep=','),file='/home/brasel/SingleCellProjects/MyProjects/67BrainsPaper/PropAnalyses/SampleNumbersForEachPropAnalysis/sampleNumbersForPropAnalyses_v2.csv',append=T)
tmp = lapply(colnames(tmp_pheno)[grep('prop',colnames(tmp_pheno))],function(col) tmp_pheno[,col] <<- tmp_pheno[,col]^(1/3) )
model <- paste0(paste0('prop_',clust), ' ~ nAPOE + SEX + AOD')
re <- glm(formula = model, data=tmp_pheno)
coef <- summary(re)$coefficient
cat(paste(cellType,clust,Group1,paste(coef['nAPOE',],collapse=','),sep=','))
cat('\n')
})
})
|
/AnalysisScripts/ProportionAnalyses/CellStateLevelProportionAnalysis.R
|
no_license
|
HarariLab/parietal-snRNAseq
|
R
| false | false | 10,797 |
r
|
library(Seurat)
cellTypes = c('astro','microglia','n_ex','n_inh','oligo','opc')
#cellTypes = c('microglia','n_ex','n_inh')
tmp_pheno = NULL
tmp = lapply(cellTypes,function(cellType){
if (cellType %in% c('n_ex','n_inh')){
n = readRDS('/home/brasel/SingleCellProjects/dataObjects/neuron.rds')
n$clusters = Idents(n)
if(cellType == 'n_ex') { n = subset(n,subset=clusters %in% c(0:2,6))
} else{ n = subset(n,subset=clusters %in% c(3:5)) }
} else { n = readRDS(sprintf('/home/brasel/SingleCellProjects/dataObjects/%s.rds',cellType)) }
clusterCounts = table(n@meta.data$Sample_ID,Idents(n))
totalCellType = rowSums(clusterCounts)
clusterCounts = clusterCounts/totalCellType
pheno = data.frame(rbind(clusterCounts))
colnames(pheno) = paste0('prop_',0:(ncol(clusterCounts)-1)) #c('prop_0','prop_1','prop_2','prop_3','prop_4')
clusters = 0:(ncol(clusterCounts)-1)
phenoFile = read.csv('/home/brasel/SingleCellProjects/MyProjects/67BrainsPaper/Brain_pheno_jorge.csv',row.names=2)
phenoFile = phenoFile[rownames(pheno),]
### Add in Age of Death ###
AOD = unique(n@meta.data[,c('Sample_ID','AOD')])
pheno = merge(pheno,AOD,by.x='row.names',by.y='Sample_ID')
pheno$AOD <- as.numeric(pheno$AOD)
library(lme4)
### add in one other covariate at a time to see if it removes the significance
covarPos <- c()
#Sex (additive Model)
SEX = unique(n@meta.data[,c('Sample_ID','Gender')])
colnames(SEX)[2] = 'SEX'
pheno = merge(pheno,SEX,by.x='Row.names',by.y='Sample_ID')
rs1582763 = unique(n@meta.data[,c('Sample_ID','MS4')])
colnames(rs1582763)[2] = 'rs1582763'
pheno = merge(pheno,rs1582763,by.x='Row.names',by.y='Sample_ID')
pheno[grep('GG',pheno$rs1582763),'rs1582763'] <- 0
pheno[grep('AG',pheno$rs1582763),'rs1582763'] <- 1
pheno[grep('AA',pheno$rs1582763),'rs1582763'] <- 2
pheno$rs1582763 <- as.numeric(pheno$rs1582763)
#TREM2
nTREM2 = unique(n@meta.data[,c('Sample_ID','nTREM2')])
nTREM2$nTREM2[nTREM2$nTREM2 != 'TREM2'] = 0
nTREM2$nTREM2[nTREM2$nTREM2 == 'TREM2'] = 1
pheno = merge(pheno,nTREM2,by.x='Row.names',by.y='Sample_ID')
pheno$nTREM2 = as.numeric(pheno$nTREM2)
pheno$TREM2_reduced = as.numeric(as.factor((phenoFile$TREM2_type %in% c('R62H','H157Y','R47H') )))-1
pheno$TREM2_reduced[is.na(pheno$TREM2_reduced)] = 0
nAPOE = unique(n@meta.data[,c('Sample_ID','nAPOE')])
pheno = merge(pheno,nAPOE,by.x='Row.names',by.y='Sample_ID')
pheno[-grep('4',pheno$nAPOE),'nAPOE'] <- 0
pheno[grep('4',pheno$nAPOE),'nAPOE'] <- 1
pheno$nAPOE <- as.numeric(pheno$nAPOE)
Final_Status = unique(n@meta.data[,c('Sample_ID','Status')])
colnames(Final_Status)[2] = 'Final_Status'
Final_Status$Final_Status = factor(Final_Status$Final_Status, levels=c('Neuro_CO','Neuro_Presympt','Neuro_AD','Neuro_ADAD','Neuro_OT'))
pheno = merge(pheno,Final_Status,by.x='Row.names',by.y='Sample_ID')
pheno$count_clusterCellType = totalCellType
rownames(pheno) = pheno$Row.names
pheno = pheno[,-1]
minCells = 60
filt_pheno <<- pheno[-which(pheno[,'count_clusterCellType'] < minCells),] #remove subjects with less than 60 cells in the cluster
tmp = lapply(clusters,function(clust){
#ADAD
tmp_pheno <<- filt_pheno
TotalSamples = nrow(tmp_pheno)
Group1 = 'ADAD'
Group2 = 'nonADAD'
Group3 = ''
Group1Samples = sum(tmp_pheno$Final_Status == 'Neuro_ADAD')
Group2Samples = sum(tmp_pheno$Final_Status != 'Neuro_ADAD')
Group3Samples = ''
SamplesGT_0.01 = sum(tmp_pheno[,paste0('prop_',clust)] > 0.01)
G1S_GT_0.01 = sum(tmp_pheno[,paste0('prop_',clust)] > 0.01 & tmp_pheno$Final_Status == 'Neuro_ADAD')
G2S_GT_0.01 = sum(tmp_pheno[,paste0('prop_',clust)] > 0.01 & tmp_pheno$Final_Status != 'Neuro_ADAD')
G3S_GT_0.01 = ''
write(paste(cellType,clust,Group1,Group2,Group3,TotalSamples,Group1Samples,Group2Samples,Group3Samples,SamplesGT_0.01,G1S_GT_0.01,G2S_GT_0.01,G3S_GT_0.01,sep=','),file='/home/brasel/SingleCellProjects/MyProjects/67BrainsPaper/PropAnalyses/SampleNumbersForEachPropAnalysis/sampleNumbersForPropAnalyses_v2.csv',append=T)
tmp = lapply(colnames(tmp_pheno)[grep('prop',colnames(tmp_pheno))],function(col) tmp_pheno[,col] <<- tmp_pheno[,col]^(1/3) )
model <- paste0(paste0('prop_',clust), ' ~ Final_Status + SEX')
re <- glm(formula = model, data=tmp_pheno)
coef <- summary(re)$coefficient
cat(paste(cellType,clust,Group1,paste(coef['Final_StatusNeuro_ADAD',],collapse=','),sep=','))
cat('\n')
#TREM2
tmp_pheno <<- filt_pheno#[filt_pheno$Final_Status == 'Neuro_AD',]
TotalSamples = nrow(tmp_pheno)
Group1 = 'TREM2'
Group2 = 'nonTREM2'
Group3 = ''
Group1Samples = sum(tmp_pheno$nTREM2 == '1')
Group2Samples = sum(tmp_pheno$nTREM2 != '1')
Group3Samples = ''
SamplesGT_0.01 = sum(tmp_pheno[,paste0('prop_',clust)] > 0.01)
G1S_GT_0.01 = sum(tmp_pheno[,paste0('prop_',clust)] > 0.01 & tmp_pheno$nTREM2 == '1')
G2S_GT_0.01 = sum(tmp_pheno[,paste0('prop_',clust)] > 0.01 & tmp_pheno$nTREM2 != '1')
G3S_GT_0.01 = ''
write(paste(cellType,clust,Group1,Group2,Group3,TotalSamples,Group1Samples,Group2Samples,Group3Samples,SamplesGT_0.01,G1S_GT_0.01,G2S_GT_0.01,G3S_GT_0.01,sep=','),file='/home/brasel/SingleCellProjects/MyProjects/67BrainsPaper/PropAnalyses/SampleNumbersForEachPropAnalysis/sampleNumbersForPropAnalyses_v2.csv',append=T)
tmp = lapply(colnames(tmp_pheno)[grep('prop',colnames(tmp_pheno))],function(col) tmp_pheno[,col] <<- tmp_pheno[,col]^(1/3) )
model <- paste0(paste0('prop_',clust), ' ~ nTREM2 + SEX + AOD')
re <- glm(formula = model, data=tmp_pheno)
coef <- summary(re)$coefficient
cat(paste(cellType,clust,Group1,paste(coef['nTREM2',],collapse=','),sep=','))
cat('\n')
#TREM2_reduced
tmp_pheno <<- filt_pheno#[filt_pheno$Final_Status == 'Neuro_AD',]
TotalSamples = nrow(tmp_pheno)
Group1 = 'TREM2_reduced'
Group2 = 'other'
Group3 = ''
Group1Samples = sum(tmp_pheno$TREM2_reduced == '1')
Group2Samples = sum(tmp_pheno$TREM2_reduced != '1')
Group3Samples = ''
SamplesGT_0.01 = sum(tmp_pheno[,paste0('prop_',clust)] > 0.01)
G1S_GT_0.01 = sum(tmp_pheno[,paste0('prop_',clust)] > 0.01 & tmp_pheno$TREM2_reduced == '1')
G2S_GT_0.01 = sum(tmp_pheno[,paste0('prop_',clust)] > 0.01 & tmp_pheno$TREM2_reduced != '1')
G3S_GT_0.01 = ''
write(paste(cellType,clust,Group1,Group2,Group3,TotalSamples,Group1Samples,Group2Samples,Group3Samples,SamplesGT_0.01,G1S_GT_0.01,G2S_GT_0.01,G3S_GT_0.01,sep=','),file='/home/brasel/SingleCellProjects/MyProjects/67BrainsPaper/PropAnalyses/SampleNumbersForEachPropAnalysis/sampleNumbersForPropAnalyses_v2.csv',append=T)
tmp = lapply(colnames(tmp_pheno)[grep('prop',colnames(tmp_pheno))],function(col) tmp_pheno[,col] <<- tmp_pheno[,col]^(1/3) )
model <- paste0(paste0('prop_',clust), ' ~ TREM2_reduced + SEX + AOD')
re <- glm(formula = model, data=tmp_pheno)
coef <- summary(re)$coefficient
cat(paste(cellType,clust,Group1,paste(coef['TREM2_reduced',],collapse=','),sep=','))
cat('\n')
#rs1582763
tmp_pheno <<- na.omit(filt_pheno)
TotalSamples = nrow(tmp_pheno)
Group1 = 'AA'
Group2 = 'AG'
Group3 = 'GG'
Group1Samples = sum(tmp_pheno$rs1582763 == '2')
Group2Samples = sum(tmp_pheno$rs1582763 == '1')
Group3Samples = sum(tmp_pheno$rs1582763 == '0')
SamplesGT_0.01 = sum(tmp_pheno[,paste0('prop_',clust)] > 0.01)
G1S_GT_0.01 = sum(tmp_pheno[,paste0('prop_',clust)] > 0.01 & tmp_pheno$rs1582763 == '2')
G2S_GT_0.01 = sum(tmp_pheno[,paste0('prop_',clust)] > 0.01 & tmp_pheno$rs1582763 == '1')
G3S_GT_0.01 = sum(tmp_pheno[,paste0('prop_',clust)] > 0.01 & tmp_pheno$rs1582763 == '0')
write(paste(cellType,clust,Group1,Group2,Group3,TotalSamples,Group1Samples,Group2Samples,Group3Samples,SamplesGT_0.01,G1S_GT_0.01,G2S_GT_0.01,G3S_GT_0.01,sep=','),file='/home/brasel/SingleCellProjects/MyProjects/67BrainsPaper/PropAnalyses/SampleNumbersForEachPropAnalysis/sampleNumbersForPropAnalyses_v2.csv',append=T)
tmp = lapply(colnames(tmp_pheno)[grep('prop',colnames(tmp_pheno))],function(col) tmp_pheno[,col] <<- tmp_pheno[,col]^(1/3) )
model <- paste0(paste0('prop_',clust), ' ~ rs1582763 + SEX + Final_Status')
re <- glm(formula = model, data=tmp_pheno)
coef <- summary(re)$coefficient
cat(paste(cellType,clust,Group1,paste(coef['rs1582763',],collapse=','),sep=','))
cat('\n')
#sAD
tmp_pheno <<- filt_pheno
TotalSamples = nrow(tmp_pheno)
Group1 = 'sAD'
Group2 = 'non_sAD'
Group3 = ''
Group1Samples = sum(tmp_pheno$Final_Status == 'Neuro_AD')
Group2Samples = sum(tmp_pheno$Final_Status != 'Neuro_AD')
Group3Samples = ''
SamplesGT_0.01 = sum(tmp_pheno[,paste0('prop_',clust)] > 0.01)
G1S_GT_0.01 = sum(tmp_pheno[,paste0('prop_',clust)] > 0.01 & tmp_pheno$Final_Status == 'Neuro_AD')
G2S_GT_0.01 = sum(tmp_pheno[,paste0('prop_',clust)] > 0.01 & tmp_pheno$Final_Status != 'Neuro_AD')
G3S_GT_0.01 = ''
write(paste(cellType,clust,Group1,Group2,Group3,TotalSamples,Group1Samples,Group2Samples,Group3Samples,SamplesGT_0.01,G1S_GT_0.01,G2S_GT_0.01,G3S_GT_0.01,sep=','),file='/home/brasel/SingleCellProjects/MyProjects/67BrainsPaper/PropAnalyses/SampleNumbersForEachPropAnalysis/sampleNumbersForPropAnalyses_v2.csv',append=T)
tmp = lapply(colnames(tmp_pheno)[grep('prop',colnames(tmp_pheno))],function(col) tmp_pheno[,col] <<- tmp_pheno[,col]^(1/3) )
#tmp_pheno$ADstatus = tmp_pheno$Final_Status == 'Neuro_AD'
model <- paste0(paste0('prop_',clust), ' ~ Final_Status + SEX')
re <- glm(formula = model, data=tmp_pheno)
coef <- summary(re)$coefficient
cat(paste(cellType,clust,Group1,paste(coef['Final_StatusNeuro_AD',],collapse=','),sep=','))
cat('\n')
#APOE
tmp_pheno <<- filt_pheno[filt_pheno$Final_Status == 'Neuro_AD',]
TotalSamples = nrow(tmp_pheno)
Group1 = 'APOEe4+'
Group2 = 'APOEe4-'
Group3 = ''
Group1Samples = sum(tmp_pheno$nAPOE == '1')
Group2Samples = sum(tmp_pheno$nAPOE != '1')
Group3Samples = ''
SamplesGT_0.01 = sum(tmp_pheno[,paste0('prop_',clust)] > 0.01)
G1S_GT_0.01 = sum(tmp_pheno[,paste0('prop_',clust)] > 0.01 & tmp_pheno$nAPOE == '1')
G2S_GT_0.01 = sum(tmp_pheno[,paste0('prop_',clust)] > 0.01 & tmp_pheno$nAPOE != '1')
G3S_GT_0.01 = ''
write(paste(cellType,clust,Group1,Group2,Group3,TotalSamples,Group1Samples,Group2Samples,Group3Samples,SamplesGT_0.01,G1S_GT_0.01,G2S_GT_0.01,G3S_GT_0.01,sep=','),file='/home/brasel/SingleCellProjects/MyProjects/67BrainsPaper/PropAnalyses/SampleNumbersForEachPropAnalysis/sampleNumbersForPropAnalyses_v2.csv',append=T)
tmp = lapply(colnames(tmp_pheno)[grep('prop',colnames(tmp_pheno))],function(col) tmp_pheno[,col] <<- tmp_pheno[,col]^(1/3) )
model <- paste0(paste0('prop_',clust), ' ~ nAPOE + SEX + AOD')
re <- glm(formula = model, data=tmp_pheno)
coef <- summary(re)$coefficient
cat(paste(cellType,clust,Group1,paste(coef['nAPOE',],collapse=','),sep=','))
cat('\n')
})
})
|
hpc<-read.table("household_power_consumption.txt", header=TRUE, sep = ";",na.strings="?")
##Read in household_power_consumption.txt with, separator=; and NAs=?
hpc$DateTime <- strptime(paste(hpc$Date, hpc$Time), "%d/%m/%Y %H:%M:%S")
##CreateDateTime column
hpc[,"Date"]<-as.Date(hpc[,"Date"],format="%d/%m/%Y")
##convert date column to date class
hpc_sub<-subset(hpc, Date > as.Date("2007-01-31") & Date < as.Date("2007-02-03"))
##subset by dates
par(mfrow = c(2, 2), mar = c(4, 4, 2, 1), oma = c(0, 0, 2, 0))
##setup 2x2 layout
plot(hpc_sub$DateTime,hpc_sub$Global_active_power, type= "l",ylab="Global active power (kilowatts)",xlab= "")
##plot global_active_power vs datetime
plot(hpc_sub$DateTime,hpc_sub$Voltage, type= "l",ylab="Voltage",xlab= "datetime")
##plot voltage vs datetime
plot(hpc_sub$DateTime,hpc_sub$Sub_metering_1,col=c("black"),type= "l",ylab="Energy sub metering",xlab= "")
lines(hpc_sub$DateTime,hpc_sub$Sub_metering_2, col=c("red"),type= "l")
lines(hpc_sub$DateTime,hpc_sub$Sub_metering_3, col=c("blue"),type= "l")
legend("topright",bty="n", border=NULL, legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"),lwd=1, col=c("black","red","blue"))
##plot submeter values vs datetime
plot(hpc_sub$DateTime,hpc_sub$Global_reactive_power, type= "l",ylab="Global_reactive_power",xlab= "datetime")
##plot global_reactive_power vs datetime
dev.copy(png, file = "plot4.png",width=480, height=480)
##copy screen plot4 png.
dev.off()
##close png device
|
/plot4.R
|
no_license
|
callumd92/CourseraEDA
|
R
| false | false | 1,513 |
r
|
hpc<-read.table("household_power_consumption.txt", header=TRUE, sep = ";",na.strings="?")
##Read in household_power_consumption.txt with, separator=; and NAs=?
hpc$DateTime <- strptime(paste(hpc$Date, hpc$Time), "%d/%m/%Y %H:%M:%S")
##CreateDateTime column
hpc[,"Date"]<-as.Date(hpc[,"Date"],format="%d/%m/%Y")
##convert date column to date class
hpc_sub<-subset(hpc, Date > as.Date("2007-01-31") & Date < as.Date("2007-02-03"))
##subset by dates
par(mfrow = c(2, 2), mar = c(4, 4, 2, 1), oma = c(0, 0, 2, 0))
##setup 2x2 layout
plot(hpc_sub$DateTime,hpc_sub$Global_active_power, type= "l",ylab="Global active power (kilowatts)",xlab= "")
##plot global_active_power vs datetime
plot(hpc_sub$DateTime,hpc_sub$Voltage, type= "l",ylab="Voltage",xlab= "datetime")
##plot voltage vs datetime
plot(hpc_sub$DateTime,hpc_sub$Sub_metering_1,col=c("black"),type= "l",ylab="Energy sub metering",xlab= "")
lines(hpc_sub$DateTime,hpc_sub$Sub_metering_2, col=c("red"),type= "l")
lines(hpc_sub$DateTime,hpc_sub$Sub_metering_3, col=c("blue"),type= "l")
legend("topright",bty="n", border=NULL, legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"),lwd=1, col=c("black","red","blue"))
##plot submeter values vs datetime
plot(hpc_sub$DateTime,hpc_sub$Global_reactive_power, type= "l",ylab="Global_reactive_power",xlab= "datetime")
##plot global_reactive_power vs datetime
dev.copy(png, file = "plot4.png",width=480, height=480)
##copy screen plot4 png.
dev.off()
##close png device
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cpSpace.R
\name{cpSpaceAddBody}
\alias{cpSpaceAddBody}
\title{Add a rigid body to the simulation.}
\usage{
cpSpaceAddBody(space, body)
}
\arguments{
\item{space}{[\code{cpSpace *}]}
\item{body}{[\code{cpBody *}]}
}
\description{
Add a rigid body to the simulation.
}
\details{
C function prototype: \code{CP_EXPORT void cpSpaceAddBody(cpSpace *space, cpBody *body);}
}
|
/man/cpSpaceAddBody.Rd
|
permissive
|
coolbutuseless/chipmunkcore
|
R
| false | true | 448 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cpSpace.R
\name{cpSpaceAddBody}
\alias{cpSpaceAddBody}
\title{Add a rigid body to the simulation.}
\usage{
cpSpaceAddBody(space, body)
}
\arguments{
\item{space}{[\code{cpSpace *}]}
\item{body}{[\code{cpBody *}]}
}
\description{
Add a rigid body to the simulation.
}
\details{
C function prototype: \code{CP_EXPORT void cpSpaceAddBody(cpSpace *space, cpBody *body);}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/compute_functions.R
\name{regionDiskTypes.list}
\alias{regionDiskTypes.list}
\title{Retrieves a list of regional disk types available to the specified project.}
\usage{
regionDiskTypes.list(project, region, filter = NULL, maxResults = NULL,
orderBy = NULL, pageToken = NULL)
}
\arguments{
\item{project}{Project ID for this request}
\item{region}{The name of the region for this request}
\item{filter}{Sets a filter expression for filtering listed resources, in the form filter={expression}}
\item{maxResults}{The maximum number of results per page that should be returned}
\item{orderBy}{Sorts list results by a certain order}
\item{pageToken}{Specifies a page token to use}
}
\description{
Autogenerated via \code{\link[googleAuthR]{gar_create_api_skeleton}}
}
\details{
Authentication scopes used by this function are:
\itemize{
\item https://www.googleapis.com/auth/cloud-platform
\item https://www.googleapis.com/auth/compute
\item https://www.googleapis.com/auth/compute.readonly
}
Set \code{options(googleAuthR.scopes.selected = c(https://www.googleapis.com/auth/cloud-platform, https://www.googleapis.com/auth/compute, https://www.googleapis.com/auth/compute.readonly)}
Then run \code{googleAuthR::gar_auth()} to authenticate.
See \code{\link[googleAuthR]{gar_auth}} for details.
}
\seealso{
\href{https://developers.google.com/compute/docs/reference/latest/}{Google Documentation}
}
|
/googlecomputealpha.auto/man/regionDiskTypes.list.Rd
|
permissive
|
Phippsy/autoGoogleAPI
|
R
| false | true | 1,481 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/compute_functions.R
\name{regionDiskTypes.list}
\alias{regionDiskTypes.list}
\title{Retrieves a list of regional disk types available to the specified project.}
\usage{
regionDiskTypes.list(project, region, filter = NULL, maxResults = NULL,
orderBy = NULL, pageToken = NULL)
}
\arguments{
\item{project}{Project ID for this request}
\item{region}{The name of the region for this request}
\item{filter}{Sets a filter expression for filtering listed resources, in the form filter={expression}}
\item{maxResults}{The maximum number of results per page that should be returned}
\item{orderBy}{Sorts list results by a certain order}
\item{pageToken}{Specifies a page token to use}
}
\description{
Autogenerated via \code{\link[googleAuthR]{gar_create_api_skeleton}}
}
\details{
Authentication scopes used by this function are:
\itemize{
\item https://www.googleapis.com/auth/cloud-platform
\item https://www.googleapis.com/auth/compute
\item https://www.googleapis.com/auth/compute.readonly
}
Set \code{options(googleAuthR.scopes.selected = c(https://www.googleapis.com/auth/cloud-platform, https://www.googleapis.com/auth/compute, https://www.googleapis.com/auth/compute.readonly)}
Then run \code{googleAuthR::gar_auth()} to authenticate.
See \code{\link[googleAuthR]{gar_auth}} for details.
}
\seealso{
\href{https://developers.google.com/compute/docs/reference/latest/}{Google Documentation}
}
|
################################################################################
# Aim: Download full text pdfs, given PMID and url
#
# Contact: Herm Lamberink, h.j.lamberink@umcutrecht.nl
# Date: 2018-03-19
#############################
.libPaths( c(.libPaths(), "/mnt/data/live02/stress/hlamberink/RLibrary" ) )
library( 'xml2' ) # used by rvest package
library( 'rvest' ) # web scraping package
library( "curl" )
library( "XML" )
library( "pbapply" ) # power bar during sapply
library( 'plyr' ); library( 'dplyr' )
library( 'tidyr' )
###################################
# FUNCTIONS
###################################
###
# Get pdf from given pmid
##
get.pdf <- function( pmid, url, outdr = outdir )
{
# prevent the function from shutting down due to an error
v <- tryCatch(
{
# output pdf
outpdf <- paste0( outdr, '/', pmid, '.pdf' )
if( ! file.exists( outpdf ) )
{
# set empty pdflink
pdflink <- NA
#######################
# pdflink per publisher
#######################
# url is from arvojournals
if( grepl( "arvojournals", url ) )
{
# url to pdf
pdflink <- get.pdflink.arvojournals( url )
}
# url is from JAMA
if( grepl( "jamanetwork.com", url ) )
{
# url to pdf
pdflink <- get.pdflink.jama( url )
}
# url is from PLOS
if( grepl( "dx.plos", url ) )
{
# url to pdf
pdflink <- get.pdflink.plos( url )
}
# url is from EHP
if( grepl( "/EHP", url ) )
{
pdflink <- get.pdflink.ehp( url )
}
# url is from doi/bjs
if( grepl( "/bjs", url ) )
{
pdflink <- get.pdflink.doibjs( url )
}
# url is from Wiley, via doi.org
#if( grepl( "dx.doi.org", url ) )
#{
# pdflink <- get.pdflink.doiwiley( url )
#}
# url is from wiley
if( grepl( "wiley.com", url ) )
{
pdflink <- get.pdflink.wileyreal( url )
}
# url is from bmj
if( grepl( "bmj.com", url ) )
{
pdflink <- get.pdflink.bmj( url )
}
# url is from cmaj
if( grepl( "cmaj.ca", url ) )
{
pdflink <- get.pdflink.cmaj( url )
}
# url is from nejm
if( grepl( "nejm.org", url ) )
{
pdflink <- get.pdflink.nejm( url )
}
# url is from scielo
if( grepl( "scielo.br", url ) )
{
pdflink <- get.pdflink.scielo( url )
}
# url is from academic.oup
if( grepl( "academic.oup", url ) )
{
pdflink <- get.pdflink.acoup( url )
}
# url is from annals
if( grepl( "annals", url ) )
{
pdflink <- get.pdflink.annals( url )
}
# url is from cambridge
if( grepl( "cambridge.org", url ) )
{
pdflink <- get.pdflink.cambridge( url )
}
# url is from OVID
if( grepl( "Insights.ovid", url ) )
{
# url to pdf
pdflink <- get.pdflink.ovid1( url )
if( length(pdflink) == 0 ) pdflink <- get.pdflink.ovid2( url )
}
# url is from iiar
if( grepl( "iiar", url ) )
{
pdflink <- get.pdflink.iiar( url )
}
# url is from ahajournals
if( grepl( "ahajournals", url ) )
{
pdflink <- get.pdflink.ahaj( url )
}
# url is from sciencedirect
if( grepl( "sciencedirect.com", url ) )
{
pdflink <- get.pdflink.sciencedirect( url )
}
# url is from asm
if( grepl( "asm", url ) )
{
pdflink <- get.pdflink.asm( url )
}
# url is from ajp
if( grepl( "ajp", url ) )
{
pdflink <- get.pdflink.ajp
}
# url is from apsjournals
if( grepl( "apsjournals", url ) )
{
pdflink <- get.pdflink.apsjournals( url )
}
# url is from arjournals
if( grepl( "arjournals", url ) )
{
pdflink <- get.pdflink.arjournals( url )
}
# url is from ascopubs
if( grepl( "ascopubs", url ) )
{
pdflink <- get.pdflink.ascopubs( url )
}
# url is from avmajournals
if( grepl( "avmajournals", url ) )
{
pdflink <- get.pdflink.avma( url )
}
# url is from bjgp
if( grepl( "bjgp", url ) )
{
pdflink <- get.pdflink.bjgp( url )
}
# url is from boneandjoint
if( grepl( "boneandjoint", url ) )
{
pdflink <- get.pdflink.boneandjoint( url )
}
# url is from aacrjournals
if( grepl( "aacrjournals", url ) )
{
pdflink <- get.pdflink.aacrjournals( url )
}
# url is from diabetesjournals
if( grepl( "diabetesjournals", url ) )
{
pdflink <- get.pdflink.diabetesjournals( url )
}
# url is from asnjournals
if( grepl( "asnjournals", url ) )
{
pdflink <- get.pdflink.asnjournals( url )
}
# url is from ersjournals
if( grepl( "ersjournals", url ) )
{
pdflink <- get.pdflink.ersjournals( url )
}
# url is from gacetamedicade
if( grepl( "gacetamedicade", url ) )
{
pdflink <- get.pdflink.gacetamedicade( url )
}
# url is from tums.ac.ir
if( grepl( "tums.ac.ir", url ) )
{
pdflink <- get.pdflink.tums( url )
}
# url is from nutrition.org
if( grepl( "nutrition.org", url ) )
{
pdflink <- get.pdflink.nutrition( url )
}
# url is from aota.org
if( grepl( "aota.org", url ) )
{
pdflink <- get.pdflink.aota( url )
}
# url is from physiology.org
if( grepl( "physiology.org", url ) )
{
pdflink <- get.pdflink.physiology( url )
}
# url is from asahq.org
if( grepl( "asahq.org", url ) )
{
pdflink <- get.pdflink.asahq( url )
}
# url is from upol.cz
if( grepl( "upol.cz", url ) )
{
pdflink <- get.pdflink.upol.cz( url )
}
# url is from rcpsych
if( grepl( "rcpsych.org", url ) )
{
pdflink <- get.pdflink.rcpsych( url )
}
# url is from sabinet.co.za
if( grepl( "sabinet.co.za", url ) )
{
pdflink <- get.pdflink.sabinet( url )
}
# url is from quintessenz
if( grepl( "quintessenz", url ) )
{
pdflink <- get.pdflink.quintessenz( url )
}
# url is from clinicalandtranslationalinvestigation
if( grepl( "clinicalandtranslationalinvestigation", url ) )
{
pdflink <- get.pdflink.clinicalandtranslationalinvestigation( url )
}
# url is from jaoa.org
if( grepl( "jaoa.org", url ) )
{
pdflink <- get.pdflink.jaoa( url )
}
# url is from snmjournals
if( grepl( "snmjournals", url ) )
{
pdflink <- get.pdflink.snmjournals( url )
}
# url is from umsha.ac.ir
if( grepl( "umsha" , url ) )
{
pdflink <- get.pdflink.umsha( url )
}
# url is from tokai
if( grepl( "tokai" , url ) )
{
pdflink <- get.pdflink.tokai( url )
}
# url is from pamw.pl
if( grepl( "pamw.pl", url ) )
{
pdflink <- get.pdflink.pamw( url )
}
# url is from aappublications
if( grepl( "aappublications", url ) )
{
pdflink <- get.pdflink.aappublications( url )
}
# url is from publisherspanel
if( grepl( "publisherspanel", url ) )
{
pdflink <- get.pdflink.publisherspanel( url )
}
# url is from rcseng
if( grepl( "rcseng", url ) )
{
pdflink <- get.pdflink.rcseng( url )
}
# url is from rsna
if( grepl( "rsna", url ) )
{
pdflink <- get.pdflink.rsna( url )
}
# url is from rcjournal
if( grepl( "rcjournal", url ) )
{
pdflink <- get.pdflink.rcjournal( url )
}
# url is from revistachirurgia
if( grepl( "revistachirurgia", url ) )
{
pdflink <- get.pdflink.revistachirurgia( url )
}
# url is from thejns
if( grepl( "thejns", url ) )
{
pdflink <- get.pdflink.thejns( url )
}
# url is from alphamedpress
if( grepl( "alphamedpress", url ) )
{
pdflink <- get.pdflink.alphamedpress( url )
}
# url is from aepress
if( grepl( "aepress", url ) )
{
pdflink <- get.pdflink.aepress( url )
}
# url is from ajronline
if( grepl( "ajronline", url ) )
{
pdflink <- get.pdflink.ajronline( url )
}
# url is from ajcn
if( grepl( "ajcn", url ) )
{
pdflink <- get.pdflink.ajcn( url )
}
# url is from ams.ac.ir
if( grepl( "ams.ac.ir", url ) )
{
pdflink <- get.pdflink.ams.ac.ir( url )
}
# url is from annfammed
if( grepl( "annfammed", url ) )
{
pdflink <- get.pdflink.annfammed( url )
}
# url is from annsaudimed
if( grepl( "annsaudimed", url ) )
{
pdflink <- get.pdflink.annsaudimed( url )
}
# url is from atsjournals
if( grepl( "atsjournals", url ) )
{
pdflink <- get.pdflink.atsjournals( url )
}
# url is from birpublications
if( grepl( "birpublications", url ) )
{
pdflink <- get.pdflink.birpublications( url )
}
# url is from bloodjournal
if( grepl( "bloodjournal", url ) )
{
pdflink <- get.pdflink.bloodjournal( url )
}
# url is from cfp
if( grepl( "cfp.org", url ) )
{
pdflink <- get.pdflink.cfp( url )
}
# url is from cmj.hr
if( grepl( "cmj.hr", url ) )
{
pdflink <- get.pdflink.cmj.hr( url )
}
# url is from cmj.org
if( grepl( "cmj.org", url ) )
{
pdflink <- get.pdflink.cmj.org( url )
}
# url is from danmedj
if( grepl( "danmedj", url ) )
{
pdflink <- get.pdflink.danmedj( url )
}
# url is from dirjournal
if( grepl( "dirjournal", url ) )
{
pdflink <- get.pdflink.dirjournal( url )
}
# url is from e-cmh
if( grepl( "e-cmh", url ) )
{
pdflink <- get.pdflink.ecmh( url )
}
# url is from ectrx
if( grepl( "ectrx", url ) )
{
pdflink <- get.pdflink.ectrx( url )
}
# url is from educationforhealth
if( grepl( "educationforhealth", url ) )
{
pdflink <- get.pdflink.educationforhealth( url )
}
# url is from eje-online
if( grepl( "eje-online", url ) )
{
pdflink <- get.pdflink.ejeonline( url )
}
# url is from europeanreview
if( grepl( "europeanreview", url ) )
{
pdflink <- get.pdflink.europeanreview( url )
}
# url is from haematologica
if( grepl( "haematologica", url ) )
{
pdflink <- get.pdflink.haematologica( url )
}
# url is from hdbp
if( grepl( "hdbp", url ) )
{
pdflink <- get.pdflink.hdbp( url )
}
# url is from healio
if( grepl( "healio", url ) )
{
pdflink <- get.pdflink.healio( url )
}
# url is from ijkd
if( grepl( "ijkd", url ) )
{
pdflink <- get.pdflink.ijkd( url )
}
# url is from ijo.in
if( grepl( "ijo.in", url ) )
{
pdflink <- get.pdflink.ijo.in( url )
}
# url is from impactjournals
if( grepl( "impactjournals", url ) )
{
pdflink <- get.pdflink.impactjournals( url )
}
# url is from inaactamedica
if( grepl( "inaactamedica", url ) )
{
pdflink <- get.pdflink.inaactamedica( url )
}
# url is from indianjcancer
if( grepl( "indianjcancer", url ) )
{
pdflink <- get.pdflink.indianjcancer( url )
}
# url is from intbrazjurol
if( grepl( "intbrazjurol", url ) )
{
pdflink <- url
}
# url is from jiaci
if( grepl( "jiaci", url ) )
{
pdflink <- get.pdflink.jiaci( url )
}
# url is from jmir
if( grepl( "jmir", url ) )
{
pdflink <- get.pdflink.jmir( url )
}
# url is from jneurosci
if( grepl( "jneurosci", url ) )
{
pdflink <- get.pdflink.jneurosci( url )
}
# url is from jospt
if( grepl( "jospt", url ) )
{
pdflink <- get.pdflink.jospt( url )
}
# url is from mdpi.com
if( grepl( "mdpi.com", url ) )
{
pdflink <- get.pdflink.mdpi.com( url )
}
# url is from painphysicianjournal
if( grepl( "painphysicianjournal", url ) )
{
pdflink <- get.pdflink.painphysicianjournal( url )
}
# url is from sjweh
if( grepl( "sjweh", url ) )
{
pdflink <- get.pdflink.sjweh( url )
}
# url is from tandfonline
if( grepl( "tandfonline", url ) )
{
pdflink <- get.pdflink.tandfonline( url )
}
# url is from thieme-connect
if( grepl( "thieme-connect", url ) )
{
pdflink <- get.pdflink.thieme( url )
}
# url is from wjgnet
if( grepl( "wjgnet", url ) )
{
pdflink <- get.pdflink.wjgnet( url )
}
# url is from degruyter
if( grepl( "degruyter", url ) )
{
pdflink <- get.pdflink.degruyter( url )
}
# url is from biomedcentral
if( grepl( "biomedcentral", url ) )
{
pdflink <- get.pdflink.biomedcentral( url )
}
# url is from karger
if( grepl( "karger", url ) )
{
pdflink <- get.pdflink.karger( url )
}
# url is from jkan.or.kr
if( grepl( "jkan.or.kr", url ) )
{
pdflink <- get.pdflink.jkan.or.kr( url )
}
# url is from medicaljournals.se
if( grepl( "medicaljournals.se", url ) )
{
pdflink <- get.pdflink.medicaljournals.se( url )
}
# url is from anesthesiology
if( grepl( "anesthesiology", url ) )
{
pdflink <- get.pdflink.anesthesiology( url )
}
# url is from linkinghub
if( grepl( "linkinghub", url ) )
{
pdflink <- get.pdflink.linkinghub( url )
}
# url contains 10.1038 (nature publishers)
if( grepl( "doi.org/10.1038", url ) )
{
pdflink <- get.pdflink.nature( url )
}
# url conains 10.1089 (acm journal)
if( grepl( "doi.org/10.1089", url ) )
{
pdflink <- get.pdflink.acm( url )
}
# url conains 10.1111 (acm journal)
if( grepl( "doi.org/10.1111", url ) )
{
pdflink <- get.pdflink.wiley( url )
}
# url conains 10.1002 (acm journal)
if( grepl( "doi.org/10.1002", url ) )
{
pdflink <- get.pdflink.wiley( url )
}
# url contains 10.1038 (springerlink)
if( grepl( "doi.org/10.1007", url ) )
{
pdflink <- get.pdflink.springerlink( url )
}
# psychiatryonline
if( grepl( "psychiatryonline", url ) )
{
pdflink <- get.pdflink.psychiatryonline( url )
}
#######################
# downoad pdf
#######################
# write pdf to output if link is available
if( ! is.na( pdflink ) )
{
# download pdf (only if output is yet downloaded)
download.file( url = pdflink, destfile = outpdf,
mode = "wb", quiet = TRUE )
}
}
return( NA )
},
error=function(err) {
#message(paste("URL does not seem to exist:", url))
#message("Here's the original error message:")
message(paste( pmid, err, "\n" ) )
# Choose a return value in case of error
return( paste( pmid, "URL does not seem to exist" ) )
},
warning=function(war) {
#message(paste("URL caused a warning:", url))
#message("Here's the original warning message: ")
message(paste( pmid, war, "\n" ) )
# Choose a return value in case of warning
return( paste( pmid, "warning, test if downloaded" ) )
}
#finally={
# NOTE:
# Here goes everything that should be executed at the end,
# regardless of success or error.
# If you want more than one expression to be executed, then you
# need to wrap them in curly brackets ({...}); otherwise you could
# just have written 'finally=<expression>'
#message(paste("Processed URL:", url))
#message("Some other message at the end")
#}
)
}
###
# Get full text pdf link from psychiatryonline.org full text website.
##
get.pdflink.psychiatryonline <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- ".show-pdf"
# get href to pdfLink
pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "href" )
return( pdflink )
}
###
# Get full text pdf link from springerlink full text website.
##
get.pdflink.springerlink <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- 'meta[name="citation_pdf_url"]'
# get href to pdfLink
pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "content" )
return( pdflink )
}
###
# Get full text pdf link from nature full text website.
##
get.pdflink.nature <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf
css <- 'meta[name="citation_pdf_url"]'
# save pdflink
pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "content" )
if( identical( pdflink, character(0) ) )
{
css <- 'a[class="inline-block block-link pa10 pl0"]'
intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" )
if( !identical( intermed1, character(0)))
{
pdflink <- paste0( "https://www.nature.com", intermed1[1] )
return( pdflink )
}
}
}
###
# Get full text pdf link from acm full text website.
##
get.pdflink.acm <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf
css <- '.pdfprint a'
# save pdflink
intermed <- page %>% html_nodes( css = css ) %>% html_attr( "href" )
if( !identical( intermed, character(0) ) )
{
pdflink <- paste0( "http://online.liebertpub.com", intermed )
return( pdflink )
}
}
###
# Get full text pdf link from wiley full text website.
##
get.pdflink.wiley <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf
css <- 'meta[name="citation_pdf_url"]'
pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "content" )
return( pdflink )
}
###
# Get full text pdf link from wiley full text website.
##
get.pdflink.wileyreal <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf
css <- 'meta[name="citation_pdf_url"]'
pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "content" )
return( pdflink )
}
get.pdflink.sciencedirect <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
css <- 'input[name="redirectURL"]'
intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "value" )
intermed2 <- URLdecode(intermed1)
page <- xml2::read_html( curl( intermed2, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# xpath of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css = 'meta[name="citation_pdf_url"]'
# get href to pdfLink
intermed3 <- page %>% html_nodes( css = css ) %>% html_attr( "content" )
pdflink <- paste0( "https://www.sciencedirect.com", intermed3 )
return( pdflink )
}
###
# Get full text pdf link from springerlink full text website.
##
get.pdflink.springerlink <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- 'meta[name="citation_pdf_url"]'
# get href to pdfLink
pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "content" )
return( pdflink )
}
###
# Get full text pdf link from medicaljournals.se full text website.
##
get.pdflink.medicaljournals.se <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- 'li:nth-child(2) .btn-success2'
# get href to pdfLink
intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" )
pdflink <- paste0( "https://www.medicaljournals.se", intermed1 )
return( pdflink )
}
###
# Get full text pdf link from jkan.or.kr full text website.
##
get.pdflink.jkan.or.kr <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- '#portlet_content_Format li:nth-child(4) a'
# get href to pdfLink
intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" )
pdflink <- paste0( "https://www.jkan.or.kr", intermed1 )
return( pdflink )
}
###
# Get full text pdf link from karger full text website.
##
get.pdflink.karger <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- '.btn-karger'
# get href to pdfLink
intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" )
pdflink <- paste0( "https://www.karger.com", intermed1 )
return( pdflink )
}
###
# Get full text pdf link from degruyter full text website.
##
get.pdflink.degruyter <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- '.pdf-link'
# get href to pdfLink
intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" )
pdflink <- paste0( "https://www.degruyter.com", intermed1 )
return( pdflink )
}
###
# Get full text pdf link from biomedcentral full text website.
##
get.pdflink.biomedcentral <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- 'meta[name="citation_pdf_url"]'
# get href to pdfLink
pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "content" )
return( pdflink )
}
###
# Get full text pdf link from wjgnet full text website.
##
get.pdflink.wjgnet <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- '.left-articlenav li:nth-child(3) a'
# get href to pdfLink
pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "href" )
return( pdflink )
}
###
# Get full text pdf link from thieme-connect full text website.
##
get.pdflink.thieme <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- '#articleTabs :nth-child(2) a'
# get href to pdfLink
intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" )
intermed2 <- paste0( "http://www.thieme-connect.com", intermed1 )
page2 <- xml2::read_html( curl( intermed2, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
css2 <- '#pdfLink'
intermed3 <- page2 %>% html_nodes( css = css2 ) %>% html_attr( "href" )
pdflink <- paste0( "http://www.thieme-connect.com", intermed3 )
return( pdflink )
}
###
# Get full text pdf link from tandfonline full text website.
##
get.pdflink.tandfonline <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- '.show-pdf'
# get href to pdfLink
intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" )
pdflink <- paste0( "http://www.tandfonline.com", intermed1 )
return( pdflink )
}
###
# Get full text pdf link from sjweh full text website.
##
get.pdflink.sjweh <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- '.pdf-download'
# get href to pdfLink
intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" )
pdflink <- paste0( "http://www.sjweh.fi/", intermed1 )
return( pdflink )
}
###
# Get full text pdf link from painphysicianjournal full text website.
##
get.pdflink.painphysicianjournal <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- '.row .float-right'
# get href to pdfLink
intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" )
pdflink <- paste0( "http://www.painphysicianjournal.com", intermed1 )
return( pdflink )
}
###
# Get full text pdf link from mdpi.com full text website.
##
get.pdflink.mdpi.com <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- 'meta[name="citation_pdf_url"]'
# get href to pdfLink
pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "content" )
return( pdflink )
}
###
# Get full text pdf link from jospt full text website.
##
get.pdflink.jospt <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- 'a[href^="/doi/pdf"]'
# get href to pdfLink
intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" )
pdflink <- paste0( "http://www.jospt.org", intermed1[1] )
return( pdflink )
}
###
# Get full text pdf link from jneurosci full text website.
##
get.pdflink.jneurosci <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- 'meta[name="citation_pdf_url"]'
# get href to pdfLink
pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "content" )
return( pdflink )
}
###
# Get full text pdf link from jmir.org full text website.
##
get.pdflink.jmir.org <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- 'meta[name="citation_abstract_pdf_url"]'
# get href to pdfLink
intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "content" )
page2 <- xml2::read_html( curl( intermed1, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
css2 <- 'a[href^="http://www.jmir.org/article/download"]'
pdflink <- page2 %>% html_nodes( css = css2 ) %>% html_attr( "href" )
return( pdflink )
}
###
# Get full text pdf link from jiaci full text website.
##
get.pdflink.jiaci <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- 'li:nth-child(1) a:nth-child(2)'
# get href to pdfLink
intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" )
pdflink <- paste0( "http://www.jiaci.org", intermed1 )
return( pdflink )
}
###
# Get full text pdf link from indianjcancer full text website.
##
get.pdflink.indianjcancer <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- 'meta[name="citation_pdf_url"]'
# get href to pdfLink
intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "content" )
page2 <- xml2::read_html( curl( intermed1, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
css2 <- 'a[href$=".pdf"]'
intermed2 <- page2 %>% html_nodes( css = css2 ) %>% html_attr( "href" )
pdflink <- paste0( "http://www.indianjcancer.com/", intermed2 )
return( pdflink )
}
###
# Get full text pdf link from inaactamedica full text website.
##
get.pdflink.inaactamedica <- function( url )
{
# get href to pdfLink
pdflink <- url
return( pdflink )
}
###
# Get full text pdf link from impactjournals full text website.
##
get.pdflink.impactjournals <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- 'meta[name="citation_pdf_url"]'
# get href to pdfLink
pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "content" )
return( pdflink )
}
###
# Get full text pdf link from ijo.in full text website.
##
get.pdflink.ijo.in <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- 'meta[name="citation_pdf_url"]'
# get href to pdfLink
intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "content" )
page2 <- xml2::read_html( curl( intermed1[1], handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
css2 <- 'a[href$=".pdf"]'
intermed2 <- page2 %>% html_nodes( css = css2 ) %>% html_attr( "href")
pdflink <- paste0( "http://www.ijo.in/", intermed2 )
return( pdflink )
}
###
# Get full text pdf link from ijkd full text website.
##
get.pdflink.ijkd <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- 'frame'
# get href to pdfLink
intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "src" )
page2 <- xml2::read_html( curl( intermed1[1], handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
css2 <- 'a[href^="http://www.ijkd"]'
pdflink <- page2 %>% html_nodes( css = css2 ) %>% html_attr( "href")
return( pdflink )
}
###
# Get full text pdf link from healio full text website.
##
get.pdflink.healio <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- 'meta[name="citation_pdf_url"]'
# get href to pdfLink
pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "content" )
return( pdflink )
}
###
# Get full text pdf link from hdbp full text website.
##
get.pdflink.hdbp <- function( url )
{
# get href to pdfLink
pdflink <- url
return( pdflink )
}
###
# Get full text pdf link from haematologica full text website.
##
get.pdflink.haematologica <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- 'meta[name="citation_pdf_url"]'
# get href to pdfLink
pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "content" )
return( pdflink )
}
###
# Get full text pdf link from europeanreview full text website.
##
get.pdflink.europeanreview <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- '.right'
# get href to pdfLink
intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" )
pdflink <- sub( " http", "http", intermed1 )
return( pdflink )
}
###
# Get full text pdf link from eje-online full text website.
##
get.pdflink.ejeonline <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- 'meta[name="citation_pdf_url"]'
# get href to pdfLink
pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "content" )
return( pdflink )
}
###
# Get full text pdf link from educationforhealth full text website.
##
get.pdflink.educationforhealth <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- 'meta[name="citation_pdf_url"]'
# get href to pdfLink
intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "content" )
page2 <- xml2::read_html( curl( intermed1, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
css2 <- 'a[href$=".pdf"]'
intermed2 <- page2 %>% html_nodes( css = css2 ) %>% html_attr( "href" )
pdflink <- paste0( "http://www.educationforhealth.net/", intermed2)
return( pdflink )
}
###
# Get full text pdf link from ectrx full text website.
##
get.pdflink.ectrx <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- 'b a'
# get href to pdfLink
intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" )
pdflink <- paste0( "http://www.ectrx.org/forms/", intermed1)
return( pdflink )
}
###
# Get full text pdf link from e-cmh full text website.
##
get.pdflink.ecmh <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- 'meta[name="fulltext_pdf"]'
# get href to pdfLink
pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "content" )
return( pdflink )
}
###
# Get full text pdf link from dirjournal full text website.
##
get.pdflink.dirjournal <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- 'a[href$=".pdf"]'
# get href to pdfLink
intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" )
pdflink <- paste0( "http://www.dirjournal.org", intermed1[2] )
return( pdflink )
}
###
# Get full text pdf link from danmedj full text website.
##
get.pdflink.danmedj <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- 'a[href$=".pdf"]'
# get href to pdfLink
pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "href" )
return( pdflink )
}
###
# Get full text pdf link from cmj.org full text website.
##
get.pdflink.cmj.org <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- 'meta[name="citation_pdf_url"]'
# get href to pdfLink
intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "content" )
page2 <- xml2::read_html( curl( intermed1, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
css2 <- 'p a:nth-child(1)'
intermed2 <- page2 %>% html_nodes( css = css2 ) %>% html_attr( "href" )
pdflink <- paste0( "http://www.cmj.org/", intermed2 )
return( pdflink )
}
###
# Get full text pdf link from cmj.hr full text website.
##
get.pdflink.cmj.hr <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- 'frame[src^="http"]'
# get href to pdfLink
pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "src" )
return( pdflink )
}
###
# Get full text pdf link from cfp full text website.
##
get.pdflink.cfp <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- 'meta[name="citation_pdf_url"]'
# get href to pdfLink
pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "content" )
return( pdflink )
}
###
# Get full text pdf link from canjsurg full text website.
##
get.pdflink.canjsurg <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- 'p:nth-child(2) a:nth-child(2)'
# get href to pdfLink
pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "href" )
return( pdflink )
}
###
# Get full text pdf link from bloodjournal full text website.
##
get.pdflink.bloodjournal <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- 'meta[name="citation_pdf_url"]'
# get href to pdfLink
pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "content" )
return( pdflink )
}
###
# Get full text pdf link from birpublications full text website.
##
get.pdflink.birpublications <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- '.show-pdf'
# get href to pdfLink
intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" )
pdflink <- paste0( "http://www.birpublications.org", intermed1 )
return( pdflink )
}
###
# Get full text pdf link from atsjournals full text website.
##
get.pdflink.atsjournals <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- '.show-pdf'
# get href to pdfLink
intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" )
pdflink <- paste0( "http://www.atsjournals.org", intermed1 )
return( pdflink )
}
###
# Get full text pdf link from annsaudimed full text website.
##
get.pdflink.annsaudimed <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- '.desc'
# get href to pdfLink
pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "href" )
return( pdflink )
}
###
# Get full text pdf link from annfammed.org full text website.
##
get.pdflink.annfammed <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- '.full-text-pdf-view-link a'
# get href to pdfLink
intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" )
intermed2 <- sub( "\\+html", "", intermed1 )
pdflink <- paste0( "http://www.annfammed.org", intermed2 )
return( pdflink )
}
###
# Get full text pdf link from ams.ac.ir full text website.
##
get.pdflink.ams.ac.ir <- function( url )
{
pdflink <- url
return( pdflink )
}
###
# Get full text pdf link from ajronline full text website.
##
get.pdflink.ajronline <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- '#refLinkList+ li .nowrap'
# get href to pdfLink
intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" )
pdflink <- paste0( "http://www.ajronline.org", intermed1 )
return( pdflink )
}
###
# Get full text pdf link from ajcn full text website.
##
get.pdflink.ajcn <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- '.full-text-pdf-view-link a'
# get href to pdfLink
intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" )
intermed2 <- sub( "\\+html", "", intermed1 )
pdflink <- paste0( "http://www.ajcn.org", intermed2 )
return( pdflink )
}
###
# Get full text pdf link from aepress.sk full text website.
##
get.pdflink.aepress <- function( url )
{
pdflink <- url
return( pdflink )
}
###
# Get full text pdf link from alphamedpress full text website.
##
get.pdflink.alphamedpress <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- 'meta[name="citation_pdf_url"]'
# get href to pdfLink
pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "content" )
return( pdflink )
}
###
# Get full text pdf link from thejns full text website.
##
get.pdflink.thejns <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- '.article-tools li:nth-child(2)'
# get href to pdfLink
intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" )
pdflink <- paste0( "http://thejns.org", intermed1 )
return( pdflink )
}
###
# Get full text pdf link from revistachirurgia full text website.
##
get.pdflink.revistachirurgia <- function( url )
{
pdflink <- url
return( pdflink )
}
###
# Get full text pdf link from rcjournal full text website.
##
get.pdflink.rcjournal <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- 'meta[name="citation_pdf_url"]'
# get href to pdfLink
pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "content" )
return( pdflink )
}
###
# Get full text pdf link from rsna full text website.
##
get.pdflink.rsna <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- '.tab-nav li:nth-child(6) a'
# get href to pdfLink
intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" )
pdflink <- paste0( "http://pubs.rsna.org", intermed1)
return( pdflink )
}
###
# Get full text pdf link from rcseng.ac.uk full text website.
##
get.pdflink.rcseng <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- '.tab-nav li:nth-child(4) a'
# get href to pdfLink
intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" )
pdflink <- paste0( "http://publishing.rcseng.ac.uk", intermed1)
return( pdflink )
}
###
# Get full text pdf link from publisherspanel full text website.
##
get.pdflink.publisherspanel <- function( url )
{
pdflink <- url
return( pdflink )
}
###
# Get full text pdf link from aappublications full text website.
##
get.pdflink.aappublications <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- 'meta[name="citation_pdf_url"]'
# get href to pdfLink
pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "content" )
return( pdflink )
}
###
# Get full text pdf link from pamw.pl full text website.
##
get.pdflink.pamw <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- 'div[class="field-item even"] a'
# get href to pdfLink
intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" )
pdflink <- intermed1[1]
return( pdflink )
}
###
# Get full text pdf link from tokai.com full text website.
##
get.pdflink.tokai <- function( url )
{
pdflink <- url
return( pdflink )
}
###
# Get full text pdf link from umsha.ac.ir full text website.
##
get.pdflink.umsha <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- 'meta[name="citation_pdf_url"]'
# get href to pdfLink
pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "content" )
return( pdflink )
}
###
# Get full text pdf link from aspet full text website.
##
get.pdflink.aspet <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- 'meta[name="citation_pdf_url"]'
# get href to pdfLink
pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "content" )
return( pdflink )
}
###
# Get full text pdf link from waocp full text website.
##
get.pdflink.waocp <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- '.pdf'
# get href to pdfLink
intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" )
intermed2 <- sub( "./", "", intermed1 )
pdflink <- paste0( "http://journal.waocp.org/", intermed2 )
return( pdflink )
}
###
# Get full text pdf link from snmjournals full text website.
##
get.pdflink.snmjournals <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- 'meta[name="citation_pdf_url"]'
# get href to pdfLink
pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "content" )
return( pdflink )
}
###
# Get full text pdf link from jaoa.org full text website.
##
get.pdflink.jaoa <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- 'meta[name="citation_pdf_url"]'
# get href to pdfLink
pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "content" )
return( pdflink )
}
###
# Get full text pdf link from clinicalandtranslationalinvestigation full text website.
##
get.pdflink.clinicalandtranslationalinvestigation <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- 'a[href^="files/"]'
# get href to pdfLink
intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" )
pdflink <- paste0( "http://clinicalandtranslationalinvestigation.com/", intermed1 )
return( pdflink )
}
###
# Get full text pdf link from quintessenz full text website.
##
get.pdflink.quintessenz <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- 'a[class="tocbut"]'
# get href to pdfLink
intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" )
link1 <- strsplit( url, ".de" )
pdflink <- paste0( link1[[1]][1], ".de/", intermed1 )
return( pdflink )
}
###
# Get full text pdf link from sabinet.co.za full text website.
##
get.pdflink.sabinet <- function( url )
{
pdflink <- url
return( pdflink )
}
###
# Get full text pdf link from rcpsych full text website.
##
get.pdflink.rcpsych <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- 'link[type="application/pdf"]'
# get href to pdfLink
intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" )
link1 <- strsplit( url, ".org" )
pdflink <- paste0( link1[[1]][1], ".org", intermed1 )
return( pdflink )
}
###
# Get full text pdf link from upol.cz full text website.
##
get.pdflink.upol.cz <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- 'meta[name="citation_pdf_url"]'
# get href to pdfLink
pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "content" )
return( pdflink )
}
###
# Get full text pdf link from asahq.org full text website.
##
get.pdflink.asahq <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- "#pdfLink"
# get href to pdfLink
intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "data-article-url" )
link1 <- strsplit( url, ".org" )
pdflink <- paste0( link1[[1]][1], ".org", intermed1 )
return( pdflink )
}
###
# Get full text pdf link from physiology full text website.
##
get.pdflink.physiology <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- 'link[type="application/pdf"]'
# get href to pdfLink
intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" )
link1 <- strsplit( url, ".org" )
pdflink <- paste0( link1[[1]][1], ".org", intermed1 )
return( pdflink )
}
###
# Get full text pdf link from aota.org full text website.
##
get.pdflink.aota <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- 'meta[name="citation_pdf_url"]'
# get href to pdfLink
pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "content" )
return( pdflink )
}
###
# Get full text pdf link from nutrition.org full text website.
##
get.pdflink.nutrition <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- ".full-text-pdf-view-link a"
# get href to pdfLink
intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" )
link1 <- strsplit( url, ".org" )
intermed2 <- paste0( link1[[1]][1], ".org", intermed1 )
pdflink <- sub( "\\+html", "", intermed2)
return( pdflink )
}
###
# Get full text pdf link from tums.ac.ir full text website.
##
get.pdflink.tums <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- "#sidebarRTArticleTools .file"
# get href to pdfLink
pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "href" )
return( pdflink )
}
###
# Get full text pdf link from arvojournals full text website.
##
get.pdflink.arvojournals <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- "#pdfLink"
# get href to pdfLink
pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "data-article-url" )
pdflink <- paste0( "http://iovs.arvojournals.org/", pdflink )
return( pdflink )
}
###
# Get full text pdf link from JAMA full text website.
##
get.pdflink.jama <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- "#full-text-tab #pdf-link"
# get href to pdfLink
intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "data-article-url" )
link1 <- strsplit( url, ".com" )
pdflink <- paste0( link1[[1]][1], ".com", intermed1 )
return( pdflink )
}
###
# Get full text pdf link from plos full text website.
##
get.pdflink.plos <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- "#downloadPdf"
# get href to pdfLink
pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "href" )
pdflink <- paste0( "http://journals.plos.org", pdflink )
return( pdflink )
}
###
# Get full text pdf link from bmj full text website.
##
get.pdflink.bmj <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- "link[type='application/pdf']"
# get href to pdfLink
intermed <- page %>% html_nodes( css = css ) %>% html_attr( "href" )
pdflink <- paste0( "http://www.bmj.com", intermed )
return( pdflink )
}
###
# Get full text pdf link from nejm full text website.
##
get.pdflink.nejm <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- "li a[href^='/doi/pdf']"
# get href to pdfLink
intermed <- page %>% html_nodes( css = css ) %>% html_attr( "href" )
pdflink <- paste0( "http://www.nejm.org", intermed )
return( pdflink )
}
###
# Get full text pdf link from academic.oup full text website.
##
get.pdflink.acoup <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- ".al-link"
# get href to pdfLink
intermed <- page %>% html_nodes( css = css ) %>% html_attr( "href" )
pdflink <- paste0( "https://academic.oup.com", intermed )
return( pdflink )
}
###
# Get full text pdf link from annals full text website.
##
get.pdflink.annals <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- "#tagmasterPDF"
# get href to pdfLink
pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "data-article-url" )
pdflink <- paste0( "https://www.annals.org", pdflink )
return( pdflink )
}
###
# Get full text pdf link from cambridge full text website.
##
get.pdflink.cambridge <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- ".download-types li:nth-child(1) a"
# get href to pdfLink
pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "href" )
pdflink <- paste0( "https://www.cambridge.org", pdflink[1] )
return( pdflink )
}
###
# Get full text pdf link from OVID full text website.
##
get.pdflink.ovid1 <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- 'meta[name="citation_pdf_url"]'
# get href to pdfLink
# p1 <- page %>% html_nodes( css = css ) %>% html_attr( "content" )
# p2 <- xml2::read_html( curl( p1, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
p3 <- page %>% html_nodes( css = "script[type='text/javascript']")
if ( grepl( "ovidFullTextUrlForButtons = ", p3[2]) )
{
p4 <- p3[2]
p5 <- gsub( ".*ovidFullTextUrlForButtons = \"|PubMed.*", "", p4 )
p6 <- paste0( p5, "PubMed" )
}
page2 <- xml2::read_html( curl( p6, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ), options = "HUGE" )
pdflink <- page2 %>% html_nodes( css = "iframe" ) %>% html_attr( "src" )
#intermed2 <- paste0( "http://ovidsp.tx.ovid.com/", intermed1 )
#page3 <- xml2::read_html( curl( intermed2, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
#pdflink <- page3 %>% html_nodes( css = "iframe") %>% html_attr( "src" )
return( pdflink )
}
###
# Get full text pdf link from OVID full text website.
##
get.pdflink.ovid2 <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- 'meta[name="citation_pdf_url"]'
# get href to pdfLink
p1 <- page %>% html_nodes( css = css ) %>% html_attr( "content" )
if(identical(p1, character(0))){
p3 <- page %>% html_nodes( css = "script[type='text/javascript']")
if ( grepl( "ovidFullTextUrlForButtons = ", p3[2]) )
{
p4 <- p3[2]
p5 <- gsub( ".*ovidFullTextUrlForButtons = \"|PubMed.*", "", p4 )
p6 <- paste0( p5, "PubMed" )
}
page2 <- xml2::read_html( curl( p6, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ), options = "HUGE" )
pdflink <- page2 %>% html_nodes( css = "iframe" ) %>% html_attr( "src" )
}else{
p2 <- xml2::read_html( curl( p1, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
p3 <- p2 %>% html_nodes( css = "script[type='text/javascript']")
if ( grepl( "ovidFullTextUrlForButtons = ", p3[2]) )
{
p4 <- p3[2]
p5 <- gsub( ".*ovidFullTextUrlForButtons = \"|PubMed.*", "", p4 )
p6 <- paste0( p5, "PubMed" )
}
page3 <- xml2::read_html( curl( p6, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ), options = "HUGE" )
intermed1 <- page3 %>% html_nodes( css = "#pdf" ) %>% html_attr( "href" )
intermed2 <- paste0( "http://ovidsp.tx.ovid.com/", intermed1 )
page4 <- xml2::read_html( curl( intermed2, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
pdflink <- page4 %>% html_nodes( css = "iframe") %>% html_attr( "src" )
}
return( pdflink )
}
###
# Get full text pdf link from EHP full text website.
##
get.pdflink.ehp <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- '.pdf_icon'
# get href to pdfLink
pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "href" )
pdflink <- paste0( "https://ehp.niehs.nih.gov", pdflink )
return( pdflink )
}
###
# Get full text pdf link from Science Direct full text website.
##
get.pdflink.sciencedirect <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# xpath of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css = ".pdf-download-btn-link"
# get href to pdfLink
intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" )
intermed2 <- paste0( "http://www.sciencedirect.com", intermed1 )
page2 <- xml2::read_html( curl( intermed2, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
css2 = 'meta[content^="0;URL"]'
intermed3 <- page2 %>% html_nodes( css = css2 ) %>% html_attr( "content" )
pdflink <- strsplit(intermed3, "URL=")[[1]][2]
return( pdflink )
}
# for springerlink, retrieve the correct url
get.pdflink.linkinghub <- function( url )
{
# parse url further and get the specific node with the URL
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Mozilla/5.0" ) ) )
parsedfull <- htmlParse( page )
rootnode <- xmlRoot( parsedfull )
o <- getNodeSet( rootnode, "//input[@name='redirectURL']" )[[1]]
# convert to character
o2 <- capture.output(o)
# extract URL from character string
o3 <- data.frame( col = strsplit( o2, split = " " )[[1]] )
o4 <- separate( o3, col = "col", into = c("a", "b"), sep = "=", fill = "right" )
http <- o4[ o4$a == "value", "b" ]
http <- gsub( "\"", "", http )
outurl <- URLdecode(http)
# parse page
page <- xml2::read_html( curl( outurl, handle = curl::new_handle( "useragent" = "Mozilla/5.0" ) ) )
# xpath of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css = 'meta[name="citation_pdf_url"]'
# get href to pdfLink
intermed3 <- page %>% html_nodes( css = css ) %>% html_attr( "content" )
pdflink1 <- sub( "amp;", "", intermed3 )
page2 <- xml2::read_html( pdflink1 )
css2 = 'div a'
pdflink <- page2 %>% html_nodes( css = css2 ) %>% html_attr( "href" )
return( pdflink )
}
###
# Get full text pdf link from scielo full text website.
##
get.pdflink.scielo <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
# only modification is "\" before the double quotes.
css <- "li:nth-child(2) a:nth-child(1)"
# get href to pdfLink
pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "href" )
pdflink <- paste0( "http://www.scielo.br", pdflink[1] )
return( pdflink )
}
###
# Get full text pdf link from hyper.ahajournals full text website.
##
get.pdflink.ahaj <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- 'meta[name=citation_pdf_url]'
".aha-icon-download"
# get href to following page, then repeat the above steps
pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "content" )
# page1 <- xml2::read_html( curl( intermed1, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css <- ".input-text-url input"
# intermed2 <- page1 %>% html_nodes( css = css ) %>% html_attr( "value" )
# pdflink <- paste0( intermed2, ".full.pdf" )
return( pdflink )
}
###
# Get full text pdf link from cmaj full text website.
##
get.pdflink.cmaj <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
# only modification is "\" before the double quotes.
css <- ".full-text-pdf-view-link a"
# get href to pdfLink
pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "href" )
pdflink <- paste0( "http://www.cmaj.ca", pdflink )
pdflink <- sub( "+html", "", pdflink)
return( pdflink )
}
###
# Get full text pdf link from doi.org (Wiley) full text website.
##
get.pdflink.doiwiley <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
# only modification is "\" before the double quotes.
css <- 'meta[name="citation_pdf_url"]'
# get href to pdfLink
intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "content" )
page2 <- xml2::read_html( curl( intermed1, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
css2 <- "#pdfDocument"
pdflink <- page2 %>% html_nodes( css = css2 ) %>% html_attr( "src" )
return( pdflink )
}
###
# Get full text pdf link from doi.org (bjs) full text website.
##
get.pdflink.doibjs <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
# only modification is "\" before the double quotes.
css <- ".js-infopane-epdf"
# get href to pdfLink
intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" )
pdflink <- sub( "epdf", "pdf", intermed1)
return( pdflink )
}
###
# Get full text pdf link from asm.org full text website.
##
get.pdflink.asm <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# get href to pdfLink
pdflink <- sub( "long", "full.pdf", url)
return( pdflink )
}
###
# Get full text pdf link from ajp... full text website.
##
get.pdflink.ajp <- function( url )
{
pdflink <- url
return( pdflink )
}
###
# Get full text pdf link from apsjournals full text website.
##
get.pdflink.apsjournals <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
# only modification is "\" before the double quotes.
css <- "li:nth-child(2) .nowrap"
# get href to pdfLink
intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" )
pdflink <- paste0( "http://apsjournals.apsnet.org", intermed1 )
return( pdflink )
}
###
# Get full text pdf link from arjournals full text website.
##
get.pdflink.arjournals <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
# only modification is "\" before the double quotes.
css <- "a[href^='/doi/pdf']"
# get href to pdfLink
intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" )
pdflink <- paste0( "http://arjournals.annualreviews.org", intermed1 )
return( pdflink )
}
###
# Get full text pdf link from ascopubs full text website.
##
get.pdflink.ascopubs <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
# only modification is "\" before the double quotes.
css <- ".show-pdf"
# get href to pdfLink
intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" )
intermed2 <- paste0( "http://ascopubs.org", intermed1 )
pdflink <- sub( "/pdf", "/pdfdirect", intermed2 )
return( pdflink )
}
###
# Get full text pdf link from avmajournals full text website.
##
get.pdflink.avma <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
# only modification is "\" before the double quotes.
css <- ".article_link td:nth-child(2) .header4"
# get href to pdfLink
intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" )
pdflink <- paste0( "http://avmajournals.avma.org", intermed1 )
return( pdflink )
}
###
# Get full text pdf link from bjgp full text website.
##
get.pdflink.bjgp <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
# only modification is "\" before the double quotes.
css <- "link[type='application/pdf']"
# get href to pdfLink
intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" )
pdflink <- paste0( "http://bjgp.org", intermed1 )
return( pdflink )
}
###
# Get full text pdf link from boneandjoint full text website.
##
get.pdflink.boneandjoint <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
# only modification is "\" before the double quotes.
css <- "link[type='application/pdf']"
# get href to pdfLink
intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" )
pdflink <- paste0( "http://bjj.boneandjoint.org.uk", intermed1 )
return( pdflink )
}
###
# Get full text pdf link from aacrjournals full text website.
##
get.pdflink.aacrjournals <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
# only modification is "\" before the double quotes.
css <- ".last .highwire-article-nav-jumplink"
# get href to pdfLink
intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" )
link1 <- strsplit(url, ".org")
pdflink <- paste0( link1[[1]][1], ".org", intermed1 )
return( pdflink )
}
###
# Get full text pdf link from diabetesjournals full text website.
##
get.pdflink.diabetesjournals <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
# only modification is "\" before the double quotes.
css <- "link[type='application/pdf']"
# get href to pdfLink
intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" )
link1 <- strsplit(url, ".org")
pdflink <- paste0( link1[[1]][1], ".org", intermed1 )
return( pdflink )
}
###
# Get full text pdf link from asnjournals full text website.
##
get.pdflink.asnjournals <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
# only modification is "\" before the double quotes.
css <- ".primary a"
# get href to pdfLink
intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" )
intermed2 <- sub( ".pdf\\+html", ".pdf", intermed1 )
link1 <- strsplit( url, ".org" )
pdflink <- paste0( link1[[1]][1], ".org", intermed1 )
return( pdflink )
}
###
# Get full text pdf link from ersjournals full text website.
##
get.pdflink.ersjournals <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
# only modification is "\" before the double quotes.
css <- "link[type='application/pdf']"
# get href to pdfLink
intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" )
link1 <- strsplit( url, ".com" )
pdflink <- paste0( link1[[1]][1], ".com", intermed1 )
return( pdflink )
}
###
# Get full text pdf link from gacetamedicade full text website.
##
get.pdflink.gacetamedicade <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
# only modification is "\" before the double quotes.
css <- ".col-sm-2 li:nth-child(1) a"
# get href to pdfLink
intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" )
pdflink <- paste0( "http://gacetamedicademexico.com/", intermed1 )
return( pdflink )
}
###
# Get full text pdf link from iiar full text website.
##
get.pdflink.iiar <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- ".full-text-pdf-view-link a"
# get href to pdfLink
intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" )
link1 <- strsplit( url, ".org" )
intermed2 <- paste0( link1[[1]][1], ".org", intermed1 )
pdflink <- sub( "\\+html", "", intermed2)
return( pdflink )
}
###
# Get full text pdf link from anesthesiology full text website.
##
get.pdflink.anesthesiology <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- "#pdfLink"
# get href to pdfLink
intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "data-article-url" )
link1 <- strsplit( url, ".org" )
pdflink <- paste0( link1[[1]][1], ".org", intermed1 )
return( pdflink )
}
###################################
# END FUNCTIONS
###################################
# output directory to store full text pdf
outdir <- 'pdfNEW/pdfs2'
# read data of missing pdfs
missings <- read.csv2( "missingsWithURL.csv", stringsAsFactors = F )
head(missings)
names(missings) <- c( "pmid", "url" )
min <- 50000
max <- 60000
# set progress bar
progbar <- txtProgressBar( min = min, max = max, style = 3 )
# for every pmid, add url
for( i in min:max )
{
setTxtProgressBar( progbar, i )
# add url
pp <- data.frame( pmid = missings$pmid[ i ],
url = missings$url[ i ],
stringsAsFactors = FALSE )
get.pdf( pmid = pp$pmid, url = pp$url )
}
# quit R session
q( save = "no" )
|
/scripts/obtainingPDFS/7_url.to.pdfdownloadRE6.R
|
permissive
|
wmotte/frrp
|
R
| false | false | 83,839 |
r
|
################################################################################
# Aim: Download full text pdfs, given PMID and url
#
# Contact: Herm Lamberink, h.j.lamberink@umcutrecht.nl
# Date: 2018-03-19
#############################
.libPaths( c(.libPaths(), "/mnt/data/live02/stress/hlamberink/RLibrary" ) )
library( 'xml2' ) # used by rvest package
library( 'rvest' ) # web scraping package
library( "curl" )
library( "XML" )
library( "pbapply" ) # power bar during sapply
library( 'plyr' ); library( 'dplyr' )
library( 'tidyr' )
###################################
# FUNCTIONS
###################################
###
# Get pdf from given pmid
##
get.pdf <- function( pmid, url, outdr = outdir )
{
# prevent the function from shutting down due to an error
v <- tryCatch(
{
# output pdf
outpdf <- paste0( outdr, '/', pmid, '.pdf' )
if( ! file.exists( outpdf ) )
{
# set empty pdflink
pdflink <- NA
#######################
# pdflink per publisher
#######################
# url is from arvojournals
if( grepl( "arvojournals", url ) )
{
# url to pdf
pdflink <- get.pdflink.arvojournals( url )
}
# url is from JAMA
if( grepl( "jamanetwork.com", url ) )
{
# url to pdf
pdflink <- get.pdflink.jama( url )
}
# url is from PLOS
if( grepl( "dx.plos", url ) )
{
# url to pdf
pdflink <- get.pdflink.plos( url )
}
# url is from EHP
if( grepl( "/EHP", url ) )
{
pdflink <- get.pdflink.ehp( url )
}
# url is from doi/bjs
if( grepl( "/bjs", url ) )
{
pdflink <- get.pdflink.doibjs( url )
}
# url is from Wiley, via doi.org
#if( grepl( "dx.doi.org", url ) )
#{
# pdflink <- get.pdflink.doiwiley( url )
#}
# url is from wiley
if( grepl( "wiley.com", url ) )
{
pdflink <- get.pdflink.wileyreal( url )
}
# url is from bmj
if( grepl( "bmj.com", url ) )
{
pdflink <- get.pdflink.bmj( url )
}
# url is from cmaj
if( grepl( "cmaj.ca", url ) )
{
pdflink <- get.pdflink.cmaj( url )
}
# url is from nejm
if( grepl( "nejm.org", url ) )
{
pdflink <- get.pdflink.nejm( url )
}
# url is from scielo
if( grepl( "scielo.br", url ) )
{
pdflink <- get.pdflink.scielo( url )
}
# url is from academic.oup
if( grepl( "academic.oup", url ) )
{
pdflink <- get.pdflink.acoup( url )
}
# url is from annals
if( grepl( "annals", url ) )
{
pdflink <- get.pdflink.annals( url )
}
# url is from cambridge
if( grepl( "cambridge.org", url ) )
{
pdflink <- get.pdflink.cambridge( url )
}
# url is from OVID
if( grepl( "Insights.ovid", url ) )
{
# url to pdf
pdflink <- get.pdflink.ovid1( url )
if( length(pdflink) == 0 ) pdflink <- get.pdflink.ovid2( url )
}
# url is from iiar
if( grepl( "iiar", url ) )
{
pdflink <- get.pdflink.iiar( url )
}
# url is from ahajournals
if( grepl( "ahajournals", url ) )
{
pdflink <- get.pdflink.ahaj( url )
}
# url is from sciencedirect
if( grepl( "sciencedirect.com", url ) )
{
pdflink <- get.pdflink.sciencedirect( url )
}
# url is from asm
if( grepl( "asm", url ) )
{
pdflink <- get.pdflink.asm( url )
}
# url is from ajp
if( grepl( "ajp", url ) )
{
pdflink <- get.pdflink.ajp
}
# url is from apsjournals
if( grepl( "apsjournals", url ) )
{
pdflink <- get.pdflink.apsjournals( url )
}
# url is from arjournals
if( grepl( "arjournals", url ) )
{
pdflink <- get.pdflink.arjournals( url )
}
# url is from ascopubs
if( grepl( "ascopubs", url ) )
{
pdflink <- get.pdflink.ascopubs( url )
}
# url is from avmajournals
if( grepl( "avmajournals", url ) )
{
pdflink <- get.pdflink.avma( url )
}
# url is from bjgp
if( grepl( "bjgp", url ) )
{
pdflink <- get.pdflink.bjgp( url )
}
# url is from boneandjoint
if( grepl( "boneandjoint", url ) )
{
pdflink <- get.pdflink.boneandjoint( url )
}
# url is from aacrjournals
if( grepl( "aacrjournals", url ) )
{
pdflink <- get.pdflink.aacrjournals( url )
}
# url is from diabetesjournals
if( grepl( "diabetesjournals", url ) )
{
pdflink <- get.pdflink.diabetesjournals( url )
}
# url is from asnjournals
if( grepl( "asnjournals", url ) )
{
pdflink <- get.pdflink.asnjournals( url )
}
# url is from ersjournals
if( grepl( "ersjournals", url ) )
{
pdflink <- get.pdflink.ersjournals( url )
}
# url is from gacetamedicade
if( grepl( "gacetamedicade", url ) )
{
pdflink <- get.pdflink.gacetamedicade( url )
}
# url is from tums.ac.ir
if( grepl( "tums.ac.ir", url ) )
{
pdflink <- get.pdflink.tums( url )
}
# url is from nutrition.org
if( grepl( "nutrition.org", url ) )
{
pdflink <- get.pdflink.nutrition( url )
}
# url is from aota.org
if( grepl( "aota.org", url ) )
{
pdflink <- get.pdflink.aota( url )
}
# url is from physiology.org
if( grepl( "physiology.org", url ) )
{
pdflink <- get.pdflink.physiology( url )
}
# url is from asahq.org
if( grepl( "asahq.org", url ) )
{
pdflink <- get.pdflink.asahq( url )
}
# url is from upol.cz
if( grepl( "upol.cz", url ) )
{
pdflink <- get.pdflink.upol.cz( url )
}
# url is from rcpsych
if( grepl( "rcpsych.org", url ) )
{
pdflink <- get.pdflink.rcpsych( url )
}
# url is from sabinet.co.za
if( grepl( "sabinet.co.za", url ) )
{
pdflink <- get.pdflink.sabinet( url )
}
# url is from quintessenz
if( grepl( "quintessenz", url ) )
{
pdflink <- get.pdflink.quintessenz( url )
}
# url is from clinicalandtranslationalinvestigation
if( grepl( "clinicalandtranslationalinvestigation", url ) )
{
pdflink <- get.pdflink.clinicalandtranslationalinvestigation( url )
}
# url is from jaoa.org
if( grepl( "jaoa.org", url ) )
{
pdflink <- get.pdflink.jaoa( url )
}
# url is from snmjournals
if( grepl( "snmjournals", url ) )
{
pdflink <- get.pdflink.snmjournals( url )
}
# url is from umsha.ac.ir
if( grepl( "umsha" , url ) )
{
pdflink <- get.pdflink.umsha( url )
}
# url is from tokai
if( grepl( "tokai" , url ) )
{
pdflink <- get.pdflink.tokai( url )
}
# url is from pamw.pl
if( grepl( "pamw.pl", url ) )
{
pdflink <- get.pdflink.pamw( url )
}
# url is from aappublications
if( grepl( "aappublications", url ) )
{
pdflink <- get.pdflink.aappublications( url )
}
# url is from publisherspanel
if( grepl( "publisherspanel", url ) )
{
pdflink <- get.pdflink.publisherspanel( url )
}
# url is from rcseng
if( grepl( "rcseng", url ) )
{
pdflink <- get.pdflink.rcseng( url )
}
# url is from rsna
if( grepl( "rsna", url ) )
{
pdflink <- get.pdflink.rsna( url )
}
# url is from rcjournal
if( grepl( "rcjournal", url ) )
{
pdflink <- get.pdflink.rcjournal( url )
}
# url is from revistachirurgia
if( grepl( "revistachirurgia", url ) )
{
pdflink <- get.pdflink.revistachirurgia( url )
}
# url is from thejns
if( grepl( "thejns", url ) )
{
pdflink <- get.pdflink.thejns( url )
}
# url is from alphamedpress
if( grepl( "alphamedpress", url ) )
{
pdflink <- get.pdflink.alphamedpress( url )
}
# url is from aepress
if( grepl( "aepress", url ) )
{
pdflink <- get.pdflink.aepress( url )
}
# url is from ajronline
if( grepl( "ajronline", url ) )
{
pdflink <- get.pdflink.ajronline( url )
}
# url is from ajcn
if( grepl( "ajcn", url ) )
{
pdflink <- get.pdflink.ajcn( url )
}
# url is from ams.ac.ir
if( grepl( "ams.ac.ir", url ) )
{
pdflink <- get.pdflink.ams.ac.ir( url )
}
# url is from annfammed
if( grepl( "annfammed", url ) )
{
pdflink <- get.pdflink.annfammed( url )
}
# url is from annsaudimed
if( grepl( "annsaudimed", url ) )
{
pdflink <- get.pdflink.annsaudimed( url )
}
# url is from atsjournals
if( grepl( "atsjournals", url ) )
{
pdflink <- get.pdflink.atsjournals( url )
}
# url is from birpublications
if( grepl( "birpublications", url ) )
{
pdflink <- get.pdflink.birpublications( url )
}
# url is from bloodjournal
if( grepl( "bloodjournal", url ) )
{
pdflink <- get.pdflink.bloodjournal( url )
}
# url is from cfp
if( grepl( "cfp.org", url ) )
{
pdflink <- get.pdflink.cfp( url )
}
# url is from cmj.hr
if( grepl( "cmj.hr", url ) )
{
pdflink <- get.pdflink.cmj.hr( url )
}
# url is from cmj.org
if( grepl( "cmj.org", url ) )
{
pdflink <- get.pdflink.cmj.org( url )
}
# url is from danmedj
if( grepl( "danmedj", url ) )
{
pdflink <- get.pdflink.danmedj( url )
}
# url is from dirjournal
if( grepl( "dirjournal", url ) )
{
pdflink <- get.pdflink.dirjournal( url )
}
# url is from e-cmh
if( grepl( "e-cmh", url ) )
{
pdflink <- get.pdflink.ecmh( url )
}
# url is from ectrx
if( grepl( "ectrx", url ) )
{
pdflink <- get.pdflink.ectrx( url )
}
# url is from educationforhealth
if( grepl( "educationforhealth", url ) )
{
pdflink <- get.pdflink.educationforhealth( url )
}
# url is from eje-online
if( grepl( "eje-online", url ) )
{
pdflink <- get.pdflink.ejeonline( url )
}
# url is from europeanreview
if( grepl( "europeanreview", url ) )
{
pdflink <- get.pdflink.europeanreview( url )
}
# url is from haematologica
if( grepl( "haematologica", url ) )
{
pdflink <- get.pdflink.haematologica( url )
}
# url is from hdbp
if( grepl( "hdbp", url ) )
{
pdflink <- get.pdflink.hdbp( url )
}
# url is from healio
if( grepl( "healio", url ) )
{
pdflink <- get.pdflink.healio( url )
}
# url is from ijkd
if( grepl( "ijkd", url ) )
{
pdflink <- get.pdflink.ijkd( url )
}
# url is from ijo.in
if( grepl( "ijo.in", url ) )
{
pdflink <- get.pdflink.ijo.in( url )
}
# url is from impactjournals
if( grepl( "impactjournals", url ) )
{
pdflink <- get.pdflink.impactjournals( url )
}
# url is from inaactamedica
if( grepl( "inaactamedica", url ) )
{
pdflink <- get.pdflink.inaactamedica( url )
}
# url is from indianjcancer
if( grepl( "indianjcancer", url ) )
{
pdflink <- get.pdflink.indianjcancer( url )
}
# url is from intbrazjurol
if( grepl( "intbrazjurol", url ) )
{
pdflink <- url
}
# url is from jiaci
if( grepl( "jiaci", url ) )
{
pdflink <- get.pdflink.jiaci( url )
}
# url is from jmir
if( grepl( "jmir", url ) )
{
pdflink <- get.pdflink.jmir( url )
}
# url is from jneurosci
if( grepl( "jneurosci", url ) )
{
pdflink <- get.pdflink.jneurosci( url )
}
# url is from jospt
if( grepl( "jospt", url ) )
{
pdflink <- get.pdflink.jospt( url )
}
# url is from mdpi.com
if( grepl( "mdpi.com", url ) )
{
pdflink <- get.pdflink.mdpi.com( url )
}
# url is from painphysicianjournal
if( grepl( "painphysicianjournal", url ) )
{
pdflink <- get.pdflink.painphysicianjournal( url )
}
# url is from sjweh
if( grepl( "sjweh", url ) )
{
pdflink <- get.pdflink.sjweh( url )
}
# url is from tandfonline
if( grepl( "tandfonline", url ) )
{
pdflink <- get.pdflink.tandfonline( url )
}
# url is from thieme-connect
if( grepl( "thieme-connect", url ) )
{
pdflink <- get.pdflink.thieme( url )
}
# url is from wjgnet
if( grepl( "wjgnet", url ) )
{
pdflink <- get.pdflink.wjgnet( url )
}
# url is from degruyter
if( grepl( "degruyter", url ) )
{
pdflink <- get.pdflink.degruyter( url )
}
# url is from biomedcentral
if( grepl( "biomedcentral", url ) )
{
pdflink <- get.pdflink.biomedcentral( url )
}
# url is from karger
if( grepl( "karger", url ) )
{
pdflink <- get.pdflink.karger( url )
}
# url is from jkan.or.kr
if( grepl( "jkan.or.kr", url ) )
{
pdflink <- get.pdflink.jkan.or.kr( url )
}
# url is from medicaljournals.se
if( grepl( "medicaljournals.se", url ) )
{
pdflink <- get.pdflink.medicaljournals.se( url )
}
# url is from anesthesiology
if( grepl( "anesthesiology", url ) )
{
pdflink <- get.pdflink.anesthesiology( url )
}
# url is from linkinghub
if( grepl( "linkinghub", url ) )
{
pdflink <- get.pdflink.linkinghub( url )
}
# url contains 10.1038 (nature publishers)
if( grepl( "doi.org/10.1038", url ) )
{
pdflink <- get.pdflink.nature( url )
}
# url conains 10.1089 (acm journal)
if( grepl( "doi.org/10.1089", url ) )
{
pdflink <- get.pdflink.acm( url )
}
# url conains 10.1111 (acm journal)
if( grepl( "doi.org/10.1111", url ) )
{
pdflink <- get.pdflink.wiley( url )
}
# url conains 10.1002 (acm journal)
if( grepl( "doi.org/10.1002", url ) )
{
pdflink <- get.pdflink.wiley( url )
}
# url contains 10.1038 (springerlink)
if( grepl( "doi.org/10.1007", url ) )
{
pdflink <- get.pdflink.springerlink( url )
}
# psychiatryonline
if( grepl( "psychiatryonline", url ) )
{
pdflink <- get.pdflink.psychiatryonline( url )
}
#######################
# downoad pdf
#######################
# write pdf to output if link is available
if( ! is.na( pdflink ) )
{
# download pdf (only if output is yet downloaded)
download.file( url = pdflink, destfile = outpdf,
mode = "wb", quiet = TRUE )
}
}
return( NA )
},
error=function(err) {
#message(paste("URL does not seem to exist:", url))
#message("Here's the original error message:")
message(paste( pmid, err, "\n" ) )
# Choose a return value in case of error
return( paste( pmid, "URL does not seem to exist" ) )
},
warning=function(war) {
#message(paste("URL caused a warning:", url))
#message("Here's the original warning message: ")
message(paste( pmid, war, "\n" ) )
# Choose a return value in case of warning
return( paste( pmid, "warning, test if downloaded" ) )
}
#finally={
# NOTE:
# Here goes everything that should be executed at the end,
# regardless of success or error.
# If you want more than one expression to be executed, then you
# need to wrap them in curly brackets ({...}); otherwise you could
# just have written 'finally=<expression>'
#message(paste("Processed URL:", url))
#message("Some other message at the end")
#}
)
}
###
# Get full text pdf link from psychiatryonline.org full text website.
##
get.pdflink.psychiatryonline <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- ".show-pdf"
# get href to pdfLink
pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "href" )
return( pdflink )
}
###
# Get full text pdf link from springerlink full text website.
##
get.pdflink.springerlink <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- 'meta[name="citation_pdf_url"]'
# get href to pdfLink
pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "content" )
return( pdflink )
}
###
# Get full text pdf link from nature full text website.
##
get.pdflink.nature <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf
css <- 'meta[name="citation_pdf_url"]'
# save pdflink
pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "content" )
if( identical( pdflink, character(0) ) )
{
css <- 'a[class="inline-block block-link pa10 pl0"]'
intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" )
if( !identical( intermed1, character(0)))
{
pdflink <- paste0( "https://www.nature.com", intermed1[1] )
return( pdflink )
}
}
}
###
# Get full text pdf link from acm full text website.
##
get.pdflink.acm <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf
css <- '.pdfprint a'
# save pdflink
intermed <- page %>% html_nodes( css = css ) %>% html_attr( "href" )
if( !identical( intermed, character(0) ) )
{
pdflink <- paste0( "http://online.liebertpub.com", intermed )
return( pdflink )
}
}
###
# Get full text pdf link from wiley full text website.
##
get.pdflink.wiley <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf
css <- 'meta[name="citation_pdf_url"]'
pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "content" )
return( pdflink )
}
###
# Get full text pdf link from wiley full text website.
##
get.pdflink.wileyreal <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf
css <- 'meta[name="citation_pdf_url"]'
pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "content" )
return( pdflink )
}
get.pdflink.sciencedirect <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
css <- 'input[name="redirectURL"]'
intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "value" )
intermed2 <- URLdecode(intermed1)
page <- xml2::read_html( curl( intermed2, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# xpath of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css = 'meta[name="citation_pdf_url"]'
# get href to pdfLink
intermed3 <- page %>% html_nodes( css = css ) %>% html_attr( "content" )
pdflink <- paste0( "https://www.sciencedirect.com", intermed3 )
return( pdflink )
}
###
# Get full text pdf link from springerlink full text website.
##
get.pdflink.springerlink <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- 'meta[name="citation_pdf_url"]'
# get href to pdfLink
pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "content" )
return( pdflink )
}
###
# Get full text pdf link from medicaljournals.se full text website.
##
get.pdflink.medicaljournals.se <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- 'li:nth-child(2) .btn-success2'
# get href to pdfLink
intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" )
pdflink <- paste0( "https://www.medicaljournals.se", intermed1 )
return( pdflink )
}
###
# Get full text pdf link from jkan.or.kr full text website.
##
get.pdflink.jkan.or.kr <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- '#portlet_content_Format li:nth-child(4) a'
# get href to pdfLink
intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" )
pdflink <- paste0( "https://www.jkan.or.kr", intermed1 )
return( pdflink )
}
###
# Get full text pdf link from karger full text website.
##
get.pdflink.karger <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- '.btn-karger'
# get href to pdfLink
intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" )
pdflink <- paste0( "https://www.karger.com", intermed1 )
return( pdflink )
}
###
# Get full text pdf link from degruyter full text website.
##
get.pdflink.degruyter <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- '.pdf-link'
# get href to pdfLink
intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" )
pdflink <- paste0( "https://www.degruyter.com", intermed1 )
return( pdflink )
}
###
# Get full text pdf link from biomedcentral full text website.
##
get.pdflink.biomedcentral <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- 'meta[name="citation_pdf_url"]'
# get href to pdfLink
pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "content" )
return( pdflink )
}
###
# Get full text pdf link from wjgnet full text website.
##
get.pdflink.wjgnet <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- '.left-articlenav li:nth-child(3) a'
# get href to pdfLink
pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "href" )
return( pdflink )
}
###
# Get full text pdf link from thieme-connect full text website.
##
get.pdflink.thieme <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- '#articleTabs :nth-child(2) a'
# get href to pdfLink
intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" )
intermed2 <- paste0( "http://www.thieme-connect.com", intermed1 )
page2 <- xml2::read_html( curl( intermed2, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
css2 <- '#pdfLink'
intermed3 <- page2 %>% html_nodes( css = css2 ) %>% html_attr( "href" )
pdflink <- paste0( "http://www.thieme-connect.com", intermed3 )
return( pdflink )
}
###
# Get full text pdf link from tandfonline full text website.
##
get.pdflink.tandfonline <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- '.show-pdf'
# get href to pdfLink
intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" )
pdflink <- paste0( "http://www.tandfonline.com", intermed1 )
return( pdflink )
}
###
# Get full text pdf link from sjweh full text website.
##
get.pdflink.sjweh <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- '.pdf-download'
# get href to pdfLink
intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" )
pdflink <- paste0( "http://www.sjweh.fi/", intermed1 )
return( pdflink )
}
###
# Get full text pdf link from painphysicianjournal full text website.
##
get.pdflink.painphysicianjournal <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- '.row .float-right'
# get href to pdfLink
intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" )
pdflink <- paste0( "http://www.painphysicianjournal.com", intermed1 )
return( pdflink )
}
###
# Get full text pdf link from mdpi.com full text website.
##
get.pdflink.mdpi.com <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- 'meta[name="citation_pdf_url"]'
# get href to pdfLink
pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "content" )
return( pdflink )
}
###
# Get full text pdf link from jospt full text website.
##
get.pdflink.jospt <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- 'a[href^="/doi/pdf"]'
# get href to pdfLink
intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" )
pdflink <- paste0( "http://www.jospt.org", intermed1[1] )
return( pdflink )
}
###
# Get full text pdf link from jneurosci full text website.
##
get.pdflink.jneurosci <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- 'meta[name="citation_pdf_url"]'
# get href to pdfLink
pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "content" )
return( pdflink )
}
###
# Get full text pdf link from jmir.org full text website.
##
get.pdflink.jmir.org <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- 'meta[name="citation_abstract_pdf_url"]'
# get href to pdfLink
intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "content" )
page2 <- xml2::read_html( curl( intermed1, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
css2 <- 'a[href^="http://www.jmir.org/article/download"]'
pdflink <- page2 %>% html_nodes( css = css2 ) %>% html_attr( "href" )
return( pdflink )
}
###
# Get full text pdf link from jiaci full text website.
##
get.pdflink.jiaci <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- 'li:nth-child(1) a:nth-child(2)'
# get href to pdfLink
intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" )
pdflink <- paste0( "http://www.jiaci.org", intermed1 )
return( pdflink )
}
###
# Get full text pdf link from indianjcancer full text website.
##
get.pdflink.indianjcancer <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- 'meta[name="citation_pdf_url"]'
# get href to pdfLink
intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "content" )
page2 <- xml2::read_html( curl( intermed1, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
css2 <- 'a[href$=".pdf"]'
intermed2 <- page2 %>% html_nodes( css = css2 ) %>% html_attr( "href" )
pdflink <- paste0( "http://www.indianjcancer.com/", intermed2 )
return( pdflink )
}
###
# Get full text pdf link from inaactamedica full text website.
##
get.pdflink.inaactamedica <- function( url )
{
# get href to pdfLink
pdflink <- url
return( pdflink )
}
###
# Get full text pdf link from impactjournals full text website.
##
get.pdflink.impactjournals <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- 'meta[name="citation_pdf_url"]'
# get href to pdfLink
pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "content" )
return( pdflink )
}
###
# Get full text pdf link from ijo.in full text website.
##
get.pdflink.ijo.in <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- 'meta[name="citation_pdf_url"]'
# get href to pdfLink
intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "content" )
page2 <- xml2::read_html( curl( intermed1[1], handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
css2 <- 'a[href$=".pdf"]'
intermed2 <- page2 %>% html_nodes( css = css2 ) %>% html_attr( "href")
pdflink <- paste0( "http://www.ijo.in/", intermed2 )
return( pdflink )
}
###
# Get full text pdf link from ijkd full text website.
##
get.pdflink.ijkd <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- 'frame'
# get href to pdfLink
intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "src" )
page2 <- xml2::read_html( curl( intermed1[1], handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
css2 <- 'a[href^="http://www.ijkd"]'
pdflink <- page2 %>% html_nodes( css = css2 ) %>% html_attr( "href")
return( pdflink )
}
###
# Get full text pdf link from healio full text website.
##
get.pdflink.healio <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- 'meta[name="citation_pdf_url"]'
# get href to pdfLink
pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "content" )
return( pdflink )
}
###
# Get full text pdf link from hdbp full text website.
##
get.pdflink.hdbp <- function( url )
{
# get href to pdfLink
pdflink <- url
return( pdflink )
}
###
# Get full text pdf link from haematologica full text website.
##
get.pdflink.haematologica <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- 'meta[name="citation_pdf_url"]'
# get href to pdfLink
pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "content" )
return( pdflink )
}
###
# Get full text pdf link from europeanreview full text website.
##
get.pdflink.europeanreview <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- '.right'
# get href to pdfLink
intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" )
pdflink <- sub( " http", "http", intermed1 )
return( pdflink )
}
###
# Get full text pdf link from eje-online full text website.
##
get.pdflink.ejeonline <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- 'meta[name="citation_pdf_url"]'
# get href to pdfLink
pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "content" )
return( pdflink )
}
###
# Get full text pdf link from educationforhealth full text website.
##
get.pdflink.educationforhealth <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- 'meta[name="citation_pdf_url"]'
# get href to pdfLink
intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "content" )
page2 <- xml2::read_html( curl( intermed1, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
css2 <- 'a[href$=".pdf"]'
intermed2 <- page2 %>% html_nodes( css = css2 ) %>% html_attr( "href" )
pdflink <- paste0( "http://www.educationforhealth.net/", intermed2)
return( pdflink )
}
###
# Get full text pdf link from ectrx full text website.
##
get.pdflink.ectrx <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- 'b a'
# get href to pdfLink
intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" )
pdflink <- paste0( "http://www.ectrx.org/forms/", intermed1)
return( pdflink )
}
###
# Get full text pdf link from e-cmh full text website.
##
get.pdflink.ecmh <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- 'meta[name="fulltext_pdf"]'
# get href to pdfLink
pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "content" )
return( pdflink )
}
###
# Get full text pdf link from dirjournal full text website.
##
get.pdflink.dirjournal <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- 'a[href$=".pdf"]'
# get href to pdfLink
intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" )
pdflink <- paste0( "http://www.dirjournal.org", intermed1[2] )
return( pdflink )
}
###
# Get full text pdf link from danmedj full text website.
##
get.pdflink.danmedj <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- 'a[href$=".pdf"]'
# get href to pdfLink
pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "href" )
return( pdflink )
}
###
# Get full text pdf link from cmj.org full text website.
##
get.pdflink.cmj.org <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- 'meta[name="citation_pdf_url"]'
# get href to pdfLink
intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "content" )
page2 <- xml2::read_html( curl( intermed1, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
css2 <- 'p a:nth-child(1)'
intermed2 <- page2 %>% html_nodes( css = css2 ) %>% html_attr( "href" )
pdflink <- paste0( "http://www.cmj.org/", intermed2 )
return( pdflink )
}
###
# Get full text pdf link from cmj.hr full text website.
##
get.pdflink.cmj.hr <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- 'frame[src^="http"]'
# get href to pdfLink
pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "src" )
return( pdflink )
}
###
# Get full text pdf link from cfp full text website.
##
get.pdflink.cfp <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- 'meta[name="citation_pdf_url"]'
# get href to pdfLink
pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "content" )
return( pdflink )
}
###
# Get full text pdf link from canjsurg full text website.
##
get.pdflink.canjsurg <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- 'p:nth-child(2) a:nth-child(2)'
# get href to pdfLink
pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "href" )
return( pdflink )
}
###
# Get full text pdf link from bloodjournal full text website.
##
get.pdflink.bloodjournal <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- 'meta[name="citation_pdf_url"]'
# get href to pdfLink
pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "content" )
return( pdflink )
}
###
# Get full text pdf link from birpublications full text website.
##
get.pdflink.birpublications <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- '.show-pdf'
# get href to pdfLink
intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" )
pdflink <- paste0( "http://www.birpublications.org", intermed1 )
return( pdflink )
}
###
# Get full text pdf link from atsjournals full text website.
##
get.pdflink.atsjournals <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- '.show-pdf'
# get href to pdfLink
intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" )
pdflink <- paste0( "http://www.atsjournals.org", intermed1 )
return( pdflink )
}
###
# Get full text pdf link from annsaudimed full text website.
##
get.pdflink.annsaudimed <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- '.desc'
# get href to pdfLink
pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "href" )
return( pdflink )
}
###
# Get full text pdf link from annfammed.org full text website.
##
get.pdflink.annfammed <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- '.full-text-pdf-view-link a'
# get href to pdfLink
intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" )
intermed2 <- sub( "\\+html", "", intermed1 )
pdflink <- paste0( "http://www.annfammed.org", intermed2 )
return( pdflink )
}
###
# Get full text pdf link from ams.ac.ir full text website.
##
get.pdflink.ams.ac.ir <- function( url )
{
pdflink <- url
return( pdflink )
}
###
# Get full text pdf link from ajronline full text website.
##
get.pdflink.ajronline <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- '#refLinkList+ li .nowrap'
# get href to pdfLink
intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" )
pdflink <- paste0( "http://www.ajronline.org", intermed1 )
return( pdflink )
}
###
# Get full text pdf link from ajcn full text website.
##
get.pdflink.ajcn <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- '.full-text-pdf-view-link a'
# get href to pdfLink
intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" )
intermed2 <- sub( "\\+html", "", intermed1 )
pdflink <- paste0( "http://www.ajcn.org", intermed2 )
return( pdflink )
}
###
# Get full text pdf link from aepress.sk full text website.
##
get.pdflink.aepress <- function( url )
{
pdflink <- url
return( pdflink )
}
###
# Get full text pdf link from alphamedpress full text website.
##
get.pdflink.alphamedpress <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- 'meta[name="citation_pdf_url"]'
# get href to pdfLink
pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "content" )
return( pdflink )
}
###
# Get full text pdf link from thejns full text website.
##
get.pdflink.thejns <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- '.article-tools li:nth-child(2)'
# get href to pdfLink
intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" )
pdflink <- paste0( "http://thejns.org", intermed1 )
return( pdflink )
}
###
# Get full text pdf link from revistachirurgia full text website.
##
get.pdflink.revistachirurgia <- function( url )
{
pdflink <- url
return( pdflink )
}
###
# Get full text pdf link from rcjournal full text website.
##
get.pdflink.rcjournal <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- 'meta[name="citation_pdf_url"]'
# get href to pdfLink
pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "content" )
return( pdflink )
}
###
# Get full text pdf link from rsna full text website.
##
get.pdflink.rsna <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- '.tab-nav li:nth-child(6) a'
# get href to pdfLink
intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" )
pdflink <- paste0( "http://pubs.rsna.org", intermed1)
return( pdflink )
}
###
# Get full text pdf link from rcseng.ac.uk full text website.
##
get.pdflink.rcseng <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- '.tab-nav li:nth-child(4) a'
# get href to pdfLink
intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" )
pdflink <- paste0( "http://publishing.rcseng.ac.uk", intermed1)
return( pdflink )
}
###
# Get full text pdf link from publisherspanel full text website.
##
get.pdflink.publisherspanel <- function( url )
{
pdflink <- url
return( pdflink )
}
###
# Get full text pdf link from aappublications full text website.
##
get.pdflink.aappublications <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- 'meta[name="citation_pdf_url"]'
# get href to pdfLink
pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "content" )
return( pdflink )
}
###
# Get full text pdf link from pamw.pl full text website.
##
get.pdflink.pamw <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- 'div[class="field-item even"] a'
# get href to pdfLink
intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" )
pdflink <- intermed1[1]
return( pdflink )
}
###
# Get full text pdf link from tokai.com full text website.
##
get.pdflink.tokai <- function( url )
{
pdflink <- url
return( pdflink )
}
###
# Get full text pdf link from umsha.ac.ir full text website.
##
get.pdflink.umsha <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- 'meta[name="citation_pdf_url"]'
# get href to pdfLink
pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "content" )
return( pdflink )
}
###
# Get full text pdf link from aspet full text website.
##
get.pdflink.aspet <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- 'meta[name="citation_pdf_url"]'
# get href to pdfLink
pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "content" )
return( pdflink )
}
###
# Get full text pdf link from waocp full text website.
##
get.pdflink.waocp <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- '.pdf'
# get href to pdfLink
intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" )
intermed2 <- sub( "./", "", intermed1 )
pdflink <- paste0( "http://journal.waocp.org/", intermed2 )
return( pdflink )
}
###
# Get full text pdf link from snmjournals full text website.
##
get.pdflink.snmjournals <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- 'meta[name="citation_pdf_url"]'
# get href to pdfLink
pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "content" )
return( pdflink )
}
###
# Get full text pdf link from jaoa.org full text website.
##
get.pdflink.jaoa <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- 'meta[name="citation_pdf_url"]'
# get href to pdfLink
pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "content" )
return( pdflink )
}
###
# Get full text pdf link from clinicalandtranslationalinvestigation full text website.
##
get.pdflink.clinicalandtranslationalinvestigation <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- 'a[href^="files/"]'
# get href to pdfLink
intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" )
pdflink <- paste0( "http://clinicalandtranslationalinvestigation.com/", intermed1 )
return( pdflink )
}
###
# Get full text pdf link from quintessenz full text website.
##
get.pdflink.quintessenz <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- 'a[class="tocbut"]'
# get href to pdfLink
intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" )
link1 <- strsplit( url, ".de" )
pdflink <- paste0( link1[[1]][1], ".de/", intermed1 )
return( pdflink )
}
###
# Get full text pdf link from sabinet.co.za full text website.
##
get.pdflink.sabinet <- function( url )
{
pdflink <- url
return( pdflink )
}
###
# Get full text pdf link from rcpsych full text website.
##
get.pdflink.rcpsych <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- 'link[type="application/pdf"]'
# get href to pdfLink
intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" )
link1 <- strsplit( url, ".org" )
pdflink <- paste0( link1[[1]][1], ".org", intermed1 )
return( pdflink )
}
###
# Get full text pdf link from upol.cz full text website.
##
get.pdflink.upol.cz <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- 'meta[name="citation_pdf_url"]'
# get href to pdfLink
pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "content" )
return( pdflink )
}
###
# Get full text pdf link from asahq.org full text website.
##
get.pdflink.asahq <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- "#pdfLink"
# get href to pdfLink
intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "data-article-url" )
link1 <- strsplit( url, ".org" )
pdflink <- paste0( link1[[1]][1], ".org", intermed1 )
return( pdflink )
}
###
# Get full text pdf link from physiology full text website.
##
get.pdflink.physiology <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- 'link[type="application/pdf"]'
# get href to pdfLink
intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" )
link1 <- strsplit( url, ".org" )
pdflink <- paste0( link1[[1]][1], ".org", intermed1 )
return( pdflink )
}
###
# Get full text pdf link from aota.org full text website.
##
get.pdflink.aota <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- 'meta[name="citation_pdf_url"]'
# get href to pdfLink
pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "content" )
return( pdflink )
}
###
# Get full text pdf link from nutrition.org full text website.
##
get.pdflink.nutrition <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- ".full-text-pdf-view-link a"
# get href to pdfLink
intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" )
link1 <- strsplit( url, ".org" )
intermed2 <- paste0( link1[[1]][1], ".org", intermed1 )
pdflink <- sub( "\\+html", "", intermed2)
return( pdflink )
}
###
# Get full text pdf link from tums.ac.ir full text website.
##
get.pdflink.tums <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- "#sidebarRTArticleTools .file"
# get href to pdfLink
pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "href" )
return( pdflink )
}
###
# Get full text pdf link from arvojournals full text website.
##
get.pdflink.arvojournals <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- "#pdfLink"
# get href to pdfLink
pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "data-article-url" )
pdflink <- paste0( "http://iovs.arvojournals.org/", pdflink )
return( pdflink )
}
###
# Get full text pdf link from JAMA full text website.
##
get.pdflink.jama <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- "#full-text-tab #pdf-link"
# get href to pdfLink
intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "data-article-url" )
link1 <- strsplit( url, ".com" )
pdflink <- paste0( link1[[1]][1], ".com", intermed1 )
return( pdflink )
}
###
# Get full text pdf link from plos full text website.
##
get.pdflink.plos <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- "#downloadPdf"
# get href to pdfLink
pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "href" )
pdflink <- paste0( "http://journals.plos.org", pdflink )
return( pdflink )
}
###
# Get full text pdf link from bmj full text website.
##
get.pdflink.bmj <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- "link[type='application/pdf']"
# get href to pdfLink
intermed <- page %>% html_nodes( css = css ) %>% html_attr( "href" )
pdflink <- paste0( "http://www.bmj.com", intermed )
return( pdflink )
}
###
# Get full text pdf link from nejm full text website.
##
get.pdflink.nejm <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- "li a[href^='/doi/pdf']"
# get href to pdfLink
intermed <- page %>% html_nodes( css = css ) %>% html_attr( "href" )
pdflink <- paste0( "http://www.nejm.org", intermed )
return( pdflink )
}
###
# Get full text pdf link from academic.oup full text website.
##
get.pdflink.acoup <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- ".al-link"
# get href to pdfLink
intermed <- page %>% html_nodes( css = css ) %>% html_attr( "href" )
pdflink <- paste0( "https://academic.oup.com", intermed )
return( pdflink )
}
###
# Get full text pdf link from annals full text website.
##
get.pdflink.annals <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- "#tagmasterPDF"
# get href to pdfLink
pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "data-article-url" )
pdflink <- paste0( "https://www.annals.org", pdflink )
return( pdflink )
}
###
# Get full text pdf link from cambridge full text website.
##
get.pdflink.cambridge <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- ".download-types li:nth-child(1) a"
# get href to pdfLink
pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "href" )
pdflink <- paste0( "https://www.cambridge.org", pdflink[1] )
return( pdflink )
}
###
# Get full text pdf link from OVID full text website.
##
get.pdflink.ovid1 <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- 'meta[name="citation_pdf_url"]'
# get href to pdfLink
# p1 <- page %>% html_nodes( css = css ) %>% html_attr( "content" )
# p2 <- xml2::read_html( curl( p1, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
p3 <- page %>% html_nodes( css = "script[type='text/javascript']")
if ( grepl( "ovidFullTextUrlForButtons = ", p3[2]) )
{
p4 <- p3[2]
p5 <- gsub( ".*ovidFullTextUrlForButtons = \"|PubMed.*", "", p4 )
p6 <- paste0( p5, "PubMed" )
}
page2 <- xml2::read_html( curl( p6, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ), options = "HUGE" )
pdflink <- page2 %>% html_nodes( css = "iframe" ) %>% html_attr( "src" )
#intermed2 <- paste0( "http://ovidsp.tx.ovid.com/", intermed1 )
#page3 <- xml2::read_html( curl( intermed2, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
#pdflink <- page3 %>% html_nodes( css = "iframe") %>% html_attr( "src" )
return( pdflink )
}
###
# Get full text pdf link from OVID full text website.
##
get.pdflink.ovid2 <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- 'meta[name="citation_pdf_url"]'
# get href to pdfLink
p1 <- page %>% html_nodes( css = css ) %>% html_attr( "content" )
if(identical(p1, character(0))){
p3 <- page %>% html_nodes( css = "script[type='text/javascript']")
if ( grepl( "ovidFullTextUrlForButtons = ", p3[2]) )
{
p4 <- p3[2]
p5 <- gsub( ".*ovidFullTextUrlForButtons = \"|PubMed.*", "", p4 )
p6 <- paste0( p5, "PubMed" )
}
page2 <- xml2::read_html( curl( p6, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ), options = "HUGE" )
pdflink <- page2 %>% html_nodes( css = "iframe" ) %>% html_attr( "src" )
}else{
p2 <- xml2::read_html( curl( p1, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
p3 <- p2 %>% html_nodes( css = "script[type='text/javascript']")
if ( grepl( "ovidFullTextUrlForButtons = ", p3[2]) )
{
p4 <- p3[2]
p5 <- gsub( ".*ovidFullTextUrlForButtons = \"|PubMed.*", "", p4 )
p6 <- paste0( p5, "PubMed" )
}
page3 <- xml2::read_html( curl( p6, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ), options = "HUGE" )
intermed1 <- page3 %>% html_nodes( css = "#pdf" ) %>% html_attr( "href" )
intermed2 <- paste0( "http://ovidsp.tx.ovid.com/", intermed1 )
page4 <- xml2::read_html( curl( intermed2, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
pdflink <- page4 %>% html_nodes( css = "iframe") %>% html_attr( "src" )
}
return( pdflink )
}
###
# Get full text pdf link from EHP full text website.
##
get.pdflink.ehp <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- '.pdf_icon'
# get href to pdfLink
pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "href" )
pdflink <- paste0( "https://ehp.niehs.nih.gov", pdflink )
return( pdflink )
}
###
# Get full text pdf link from Science Direct full text website.
##
get.pdflink.sciencedirect <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# xpath of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css = ".pdf-download-btn-link"
# get href to pdfLink
intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" )
intermed2 <- paste0( "http://www.sciencedirect.com", intermed1 )
page2 <- xml2::read_html( curl( intermed2, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
css2 = 'meta[content^="0;URL"]'
intermed3 <- page2 %>% html_nodes( css = css2 ) %>% html_attr( "content" )
pdflink <- strsplit(intermed3, "URL=")[[1]][2]
return( pdflink )
}
# for springerlink, retrieve the correct url
get.pdflink.linkinghub <- function( url )
{
# parse url further and get the specific node with the URL
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Mozilla/5.0" ) ) )
parsedfull <- htmlParse( page )
rootnode <- xmlRoot( parsedfull )
o <- getNodeSet( rootnode, "//input[@name='redirectURL']" )[[1]]
# convert to character
o2 <- capture.output(o)
# extract URL from character string
o3 <- data.frame( col = strsplit( o2, split = " " )[[1]] )
o4 <- separate( o3, col = "col", into = c("a", "b"), sep = "=", fill = "right" )
http <- o4[ o4$a == "value", "b" ]
http <- gsub( "\"", "", http )
outurl <- URLdecode(http)
# parse page
page <- xml2::read_html( curl( outurl, handle = curl::new_handle( "useragent" = "Mozilla/5.0" ) ) )
# xpath of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css = 'meta[name="citation_pdf_url"]'
# get href to pdfLink
intermed3 <- page %>% html_nodes( css = css ) %>% html_attr( "content" )
pdflink1 <- sub( "amp;", "", intermed3 )
page2 <- xml2::read_html( pdflink1 )
css2 = 'div a'
pdflink <- page2 %>% html_nodes( css = css2 ) %>% html_attr( "href" )
return( pdflink )
}
###
# Get full text pdf link from scielo full text website.
##
get.pdflink.scielo <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
# only modification is "\" before the double quotes.
css <- "li:nth-child(2) a:nth-child(1)"
# get href to pdfLink
pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "href" )
pdflink <- paste0( "http://www.scielo.br", pdflink[1] )
return( pdflink )
}
###
# Get full text pdf link from hyper.ahajournals full text website.
##
get.pdflink.ahaj <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- 'meta[name=citation_pdf_url]'
".aha-icon-download"
# get href to following page, then repeat the above steps
pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "content" )
# page1 <- xml2::read_html( curl( intermed1, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css <- ".input-text-url input"
# intermed2 <- page1 %>% html_nodes( css = css ) %>% html_attr( "value" )
# pdflink <- paste0( intermed2, ".full.pdf" )
return( pdflink )
}
###
# Get full text pdf link from cmaj full text website.
##
get.pdflink.cmaj <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
# only modification is "\" before the double quotes.
css <- ".full-text-pdf-view-link a"
# get href to pdfLink
pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "href" )
pdflink <- paste0( "http://www.cmaj.ca", pdflink )
pdflink <- sub( "+html", "", pdflink)
return( pdflink )
}
###
# Get full text pdf link from doi.org (Wiley) full text website.
##
get.pdflink.doiwiley <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
# only modification is "\" before the double quotes.
css <- 'meta[name="citation_pdf_url"]'
# get href to pdfLink
intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "content" )
page2 <- xml2::read_html( curl( intermed1, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
css2 <- "#pdfDocument"
pdflink <- page2 %>% html_nodes( css = css2 ) %>% html_attr( "src" )
return( pdflink )
}
###
# Get full text pdf link from doi.org (bjs) full text website.
##
get.pdflink.doibjs <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
# only modification is "\" before the double quotes.
css <- ".js-infopane-epdf"
# get href to pdfLink
intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" )
pdflink <- sub( "epdf", "pdf", intermed1)
return( pdflink )
}
###
# Get full text pdf link from asm.org full text website.
##
get.pdflink.asm <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# get href to pdfLink
pdflink <- sub( "long", "full.pdf", url)
return( pdflink )
}
###
# Get full text pdf link from ajp... full text website.
##
get.pdflink.ajp <- function( url )
{
pdflink <- url
return( pdflink )
}
###
# Get full text pdf link from apsjournals full text website.
##
get.pdflink.apsjournals <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
# only modification is "\" before the double quotes.
css <- "li:nth-child(2) .nowrap"
# get href to pdfLink
intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" )
pdflink <- paste0( "http://apsjournals.apsnet.org", intermed1 )
return( pdflink )
}
###
# Get full text pdf link from arjournals full text website.
##
get.pdflink.arjournals <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
# only modification is "\" before the double quotes.
css <- "a[href^='/doi/pdf']"
# get href to pdfLink
intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" )
pdflink <- paste0( "http://arjournals.annualreviews.org", intermed1 )
return( pdflink )
}
###
# Get full text pdf link from ascopubs full text website.
##
get.pdflink.ascopubs <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
# only modification is "\" before the double quotes.
css <- ".show-pdf"
# get href to pdfLink
intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" )
intermed2 <- paste0( "http://ascopubs.org", intermed1 )
pdflink <- sub( "/pdf", "/pdfdirect", intermed2 )
return( pdflink )
}
###
# Get full text pdf link from avmajournals full text website.
##
get.pdflink.avma <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
# only modification is "\" before the double quotes.
css <- ".article_link td:nth-child(2) .header4"
# get href to pdfLink
intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" )
pdflink <- paste0( "http://avmajournals.avma.org", intermed1 )
return( pdflink )
}
###
# Get full text pdf link from bjgp full text website.
##
get.pdflink.bjgp <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
# only modification is "\" before the double quotes.
css <- "link[type='application/pdf']"
# get href to pdfLink
intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" )
pdflink <- paste0( "http://bjgp.org", intermed1 )
return( pdflink )
}
###
# Get full text pdf link from boneandjoint full text website.
##
get.pdflink.boneandjoint <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
# only modification is "\" before the double quotes.
css <- "link[type='application/pdf']"
# get href to pdfLink
intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" )
pdflink <- paste0( "http://bjj.boneandjoint.org.uk", intermed1 )
return( pdflink )
}
###
# Get full text pdf link from aacrjournals full text website.
##
get.pdflink.aacrjournals <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
# only modification is "\" before the double quotes.
css <- ".last .highwire-article-nav-jumplink"
# get href to pdfLink
intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" )
link1 <- strsplit(url, ".org")
pdflink <- paste0( link1[[1]][1], ".org", intermed1 )
return( pdflink )
}
###
# Get full text pdf link from diabetesjournals full text website.
##
get.pdflink.diabetesjournals <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
# only modification is "\" before the double quotes.
css <- "link[type='application/pdf']"
# get href to pdfLink
intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" )
link1 <- strsplit(url, ".org")
pdflink <- paste0( link1[[1]][1], ".org", intermed1 )
return( pdflink )
}
###
# Get full text pdf link from asnjournals full text website.
##
get.pdflink.asnjournals <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
# only modification is "\" before the double quotes.
css <- ".primary a"
# get href to pdfLink
intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" )
intermed2 <- sub( ".pdf\\+html", ".pdf", intermed1 )
link1 <- strsplit( url, ".org" )
pdflink <- paste0( link1[[1]][1], ".org", intermed1 )
return( pdflink )
}
###
# Get full text pdf link from ersjournals full text website.
##
get.pdflink.ersjournals <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
# only modification is "\" before the double quotes.
css <- "link[type='application/pdf']"
# get href to pdfLink
intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" )
link1 <- strsplit( url, ".com" )
pdflink <- paste0( link1[[1]][1], ".com", intermed1 )
return( pdflink )
}
###
# Get full text pdf link from gacetamedicade full text website.
##
get.pdflink.gacetamedicade <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
# only modification is "\" before the double quotes.
css <- ".col-sm-2 li:nth-child(1) a"
# get href to pdfLink
intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" )
pdflink <- paste0( "http://gacetamedicademexico.com/", intermed1 )
return( pdflink )
}
###
# Get full text pdf link from iiar full text website.
##
get.pdflink.iiar <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- ".full-text-pdf-view-link a"
# get href to pdfLink
intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" )
link1 <- strsplit( url, ".org" )
intermed2 <- paste0( link1[[1]][1], ".org", intermed1 )
pdflink <- sub( "\\+html", "", intermed2)
return( pdflink )
}
###
# Get full text pdf link from anesthesiology full text website.
##
get.pdflink.anesthesiology <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- "#pdfLink"
# get href to pdfLink
intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "data-article-url" )
link1 <- strsplit( url, ".org" )
pdflink <- paste0( link1[[1]][1], ".org", intermed1 )
return( pdflink )
}
###################################
# END FUNCTIONS
###################################
# output directory to store full text pdf
outdir <- 'pdfNEW/pdfs2'
# read data of missing pdfs
missings <- read.csv2( "missingsWithURL.csv", stringsAsFactors = F )
head(missings)
names(missings) <- c( "pmid", "url" )
min <- 50000
max <- 60000
# set progress bar
progbar <- txtProgressBar( min = min, max = max, style = 3 )
# for every pmid, add url
for( i in min:max )
{
setTxtProgressBar( progbar, i )
# add url
pp <- data.frame( pmid = missings$pmid[ i ],
url = missings$url[ i ],
stringsAsFactors = FALSE )
get.pdf( pmid = pp$pmid, url = pp$url )
}
# quit R session
q( save = "no" )
|
### Jinliang Yang
### 4/7/2015
### transform GBS format to BED+ format with haplotype call
seeds <- read.delim("data/seeds_09.02.2015_22.38.10.txt")
#[1] 22022 51
idtab <- read.csv("data/SeeD_SID_to_GID.csv")
length(unique(idtab$GID)) #4020
length(unique(idtab$SampleID)) #4710
# Note: 690 accessions were genotyped multiple times
subseed <- subset(seeds, general_identifier %in% idtab$GID)
### 3493 unique accessions with collection information!
out <- merge(idtab, seeds, by.x="GID", by.y="general_identifier")
##### transform GBS to BED+ format
source("lib/gbs2bed.R")
for(i in 5:9){
gbs2bed(gbsfile= paste0("/group/jrigrp4/SeeData/All_SeeD_2.7_chr", i, "_no_filter.unimputed.hmp.txt"),
outfile= paste0("/group/jrigrp4/SeeData/chr", i, "_filetered_unimputed.hmp"))
}
##### run the following python code to get the SNP frq and missing rate
### run in terminal:
snpfrq -p /group/jrigrp4/SeeData/ -i chr10_filetered_unimputed.hmp -s 6 -m "0N" -a 0 -b 1 -c 2 -o chr10_filetered_unimputed.frq
snpfrq -p /group/jrigrp4/SeeData/ -i chr9_filetered_unimputed.hmp -s 6 -m "0N" -a 0 -b 1 -c 2 -o chr9_filetered_unimputed.frq
snpfrq -p /group/jrigrp4/SeeData/ -i chr8_filetered_unimputed.hmp -s 6 -m "0N" -a 0 -b 1 -c 2 -o chr8_filetered_unimputed.frq
snpfrq -p /group/jrigrp4/SeeData/ -i chr7_filetered_unimputed.hmp -s 6 -m "0N" -a 0 -b 1 -c 2 -o chr7_filetered_unimputed.frq
snpfrq -p /group/jrigrp4/SeeData/ -i chr6_filetered_unimputed.hmp -s 6 -m "0N" -a 0 -b 1 -c 2 -o chr6_filetered_unimputed.frq
snpfrq -p /group/jrigrp4/SeeData/ -i chr5_filetered_unimputed.hmp -s 6 -m "0N" -a 0 -b 1 -c 2 -o chr5_filetered_unimputed.frq
snpfrq -p /group/jrigrp4/SeeData/ -i chr4_filetered_unimputed.hmp -s 6 -m "0N" -a 0 -b 1 -c 2 -o chr4_filetered_unimputed.frq
snpfrq -p /group/jrigrp4/SeeData/ -i chr3_filetered_unimputed.hmp -s 6 -m "0N" -a 0 -b 1 -c 2 -o chr3_filetered_unimputed.frq
snpfrq -p /group/jrigrp4/SeeData/ -i chr2_filetered_unimputed.hmp -s 6 -m "0N" -a 0 -b 1 -c 2 -o chr2_filetered_unimputed.frq
snpfrq -p /group/jrigrp4/SeeData/ -i chr1_filetered_unimputed.hmp -s 6 -m "0N" -a 0 -b 1 -c 2 -o chr1_filetered_unimputed.frq
|
/profiling/1.SeeD_GBS/1.A.1_GBS_bed.R
|
no_license
|
yangjl/SeeDs
|
R
| false | false | 2,160 |
r
|
### Jinliang Yang
### 4/7/2015
### transform GBS format to BED+ format with haplotype call
seeds <- read.delim("data/seeds_09.02.2015_22.38.10.txt")
#[1] 22022 51
idtab <- read.csv("data/SeeD_SID_to_GID.csv")
length(unique(idtab$GID)) #4020
length(unique(idtab$SampleID)) #4710
# Note: 690 accessions were genotyped multiple times
subseed <- subset(seeds, general_identifier %in% idtab$GID)
### 3493 unique accessions with collection information!
out <- merge(idtab, seeds, by.x="GID", by.y="general_identifier")
##### transform GBS to BED+ format
source("lib/gbs2bed.R")
for(i in 5:9){
gbs2bed(gbsfile= paste0("/group/jrigrp4/SeeData/All_SeeD_2.7_chr", i, "_no_filter.unimputed.hmp.txt"),
outfile= paste0("/group/jrigrp4/SeeData/chr", i, "_filetered_unimputed.hmp"))
}
##### run the following python code to get the SNP frq and missing rate
### run in terminal:
snpfrq -p /group/jrigrp4/SeeData/ -i chr10_filetered_unimputed.hmp -s 6 -m "0N" -a 0 -b 1 -c 2 -o chr10_filetered_unimputed.frq
snpfrq -p /group/jrigrp4/SeeData/ -i chr9_filetered_unimputed.hmp -s 6 -m "0N" -a 0 -b 1 -c 2 -o chr9_filetered_unimputed.frq
snpfrq -p /group/jrigrp4/SeeData/ -i chr8_filetered_unimputed.hmp -s 6 -m "0N" -a 0 -b 1 -c 2 -o chr8_filetered_unimputed.frq
snpfrq -p /group/jrigrp4/SeeData/ -i chr7_filetered_unimputed.hmp -s 6 -m "0N" -a 0 -b 1 -c 2 -o chr7_filetered_unimputed.frq
snpfrq -p /group/jrigrp4/SeeData/ -i chr6_filetered_unimputed.hmp -s 6 -m "0N" -a 0 -b 1 -c 2 -o chr6_filetered_unimputed.frq
snpfrq -p /group/jrigrp4/SeeData/ -i chr5_filetered_unimputed.hmp -s 6 -m "0N" -a 0 -b 1 -c 2 -o chr5_filetered_unimputed.frq
snpfrq -p /group/jrigrp4/SeeData/ -i chr4_filetered_unimputed.hmp -s 6 -m "0N" -a 0 -b 1 -c 2 -o chr4_filetered_unimputed.frq
snpfrq -p /group/jrigrp4/SeeData/ -i chr3_filetered_unimputed.hmp -s 6 -m "0N" -a 0 -b 1 -c 2 -o chr3_filetered_unimputed.frq
snpfrq -p /group/jrigrp4/SeeData/ -i chr2_filetered_unimputed.hmp -s 6 -m "0N" -a 0 -b 1 -c 2 -o chr2_filetered_unimputed.frq
snpfrq -p /group/jrigrp4/SeeData/ -i chr1_filetered_unimputed.hmp -s 6 -m "0N" -a 0 -b 1 -c 2 -o chr1_filetered_unimputed.frq
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/input_tools.R
\name{input_tools_buildsimplist}
\alias{input_tools_buildsimplist}
\title{Construct a list of SimP for given runs}
\usage{
input_tools_buildsimplist(runs, randomseed = 0)
}
\arguments{
\item{runs}{first part of runid}
\item{randomseed}{second part of runid}
}
\value{
list of SimPs
}
\description{
Construct a list of SimP for given runs
}
\author{
Sascha Holzhauer
}
|
/man/input_tools_buildsimplist.Rd
|
no_license
|
CRAFTY-ABM/craftyr
|
R
| false | true | 461 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/input_tools.R
\name{input_tools_buildsimplist}
\alias{input_tools_buildsimplist}
\title{Construct a list of SimP for given runs}
\usage{
input_tools_buildsimplist(runs, randomseed = 0)
}
\arguments{
\item{runs}{first part of runid}
\item{randomseed}{second part of runid}
}
\value{
list of SimPs
}
\description{
Construct a list of SimP for given runs
}
\author{
Sascha Holzhauer
}
|
### -----------------------------
### simon munzert
### scraping dynamic webpages
### -----------------------------
## peparations -------------------
library(rvest)
library(RSelenium)
## setup R + RSelenium -------------------------
# install current version of Java SE Development Kit
browseURL("http://www.oracle.com/technetwork/java/javase/downloads/jdk9-downloads-3848520.html")
# set up connection via RSelenium package
# documentation: http://cran.r-project.org/web/packages/RSelenium/RSelenium.pdf
# check currently installed version of Java
system("java -version")
## example --------------------------
# initiate Selenium driver
rD <- rsDriver()
remDr <- rD[["client"]]
# start browser, navigate to page
url <- "http://www.iea.org/policiesandmeasures/renewableenergy/"
remDr$navigate(url)
# open regions menu
xpath <- '//*[@id="main"]/div/form/div[1]/ul/li[1]/span'
regionsElem <- remDr$findElement(using = 'xpath', value = xpath)
openRegions <- regionsElem$clickElement() # click on button
# selection "European Union"
xpath <- '//*[@id="main"]/div/form/div[1]/ul/li[1]/ul/li[5]/label/input'
euElem <- remDr$findElement(using = 'xpath', value = xpath)
selectEU <- euElem$clickElement() # click on button
# set time frame
xpath <- '//*[@id="main"]/div/form/div[5]/select[1]'
fromDrop <- remDr$findElement(using = 'xpath', value = xpath)
clickFrom <- fromDrop$clickElement() # click on drop-down menu
writeFrom <- fromDrop$sendKeysToElement(list("2000")) # enter start year
xpath <- '//*[@id="main"]/div/form/div[5]/select[2]'
toDrop <- remDr$findElement(using = 'xpath', value = xpath)
clickTo <- toDrop$clickElement() # click on drop-down menu
writeTo <- toDrop$sendKeysToElement(list("2010")) # enter end year
# click on search button
xpath <- '//*[@id="main"]/div/form/button[2]'
searchElem <- remDr$findElement(using = 'xpath', value = xpath)
resultsPage <- searchElem$clickElement() # click on button
# store index page
output <- remDr$getPageSource(header = TRUE)
write(output[[1]], file = "iea-renewables.html")
# close connection
remDr$closeServer()
# parse index table
content <- read_html("iea-renewables.html", encoding = "utf8")
tabs <- html_table(content, fill = TRUE)
tab <- tabs[[1]]
# add names
names(tab) <- c("title", "country", "year", "status", "type", "target")
head(tab)
|
/web scraping/03a-scraping-dynamic-pages.R
|
no_license
|
anel-li/MDM-coding
|
R
| false | false | 2,328 |
r
|
### -----------------------------
### simon munzert
### scraping dynamic webpages
### -----------------------------
## peparations -------------------
library(rvest)
library(RSelenium)
## setup R + RSelenium -------------------------
# install current version of Java SE Development Kit
browseURL("http://www.oracle.com/technetwork/java/javase/downloads/jdk9-downloads-3848520.html")
# set up connection via RSelenium package
# documentation: http://cran.r-project.org/web/packages/RSelenium/RSelenium.pdf
# check currently installed version of Java
system("java -version")
## example --------------------------
# initiate Selenium driver
rD <- rsDriver()
remDr <- rD[["client"]]
# start browser, navigate to page
url <- "http://www.iea.org/policiesandmeasures/renewableenergy/"
remDr$navigate(url)
# open regions menu
xpath <- '//*[@id="main"]/div/form/div[1]/ul/li[1]/span'
regionsElem <- remDr$findElement(using = 'xpath', value = xpath)
openRegions <- regionsElem$clickElement() # click on button
# selection "European Union"
xpath <- '//*[@id="main"]/div/form/div[1]/ul/li[1]/ul/li[5]/label/input'
euElem <- remDr$findElement(using = 'xpath', value = xpath)
selectEU <- euElem$clickElement() # click on button
# set time frame
xpath <- '//*[@id="main"]/div/form/div[5]/select[1]'
fromDrop <- remDr$findElement(using = 'xpath', value = xpath)
clickFrom <- fromDrop$clickElement() # click on drop-down menu
writeFrom <- fromDrop$sendKeysToElement(list("2000")) # enter start year
xpath <- '//*[@id="main"]/div/form/div[5]/select[2]'
toDrop <- remDr$findElement(using = 'xpath', value = xpath)
clickTo <- toDrop$clickElement() # click on drop-down menu
writeTo <- toDrop$sendKeysToElement(list("2010")) # enter end year
# click on search button
xpath <- '//*[@id="main"]/div/form/button[2]'
searchElem <- remDr$findElement(using = 'xpath', value = xpath)
resultsPage <- searchElem$clickElement() # click on button
# store index page
output <- remDr$getPageSource(header = TRUE)
write(output[[1]], file = "iea-renewables.html")
# close connection
remDr$closeServer()
# parse index table
content <- read_html("iea-renewables.html", encoding = "utf8")
tabs <- html_table(content, fill = TRUE)
tab <- tabs[[1]]
# add names
names(tab) <- c("title", "country", "year", "status", "type", "target")
head(tab)
|
## This first line will likely take a few seconds. Be patient!
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
# How have emissions from motor vehicle sources changed
# from 1999–2008 in Baltimore City?
require(data.table)
require(grDevices)
require(ggplot2)
dt<-data.table(NEI)[fips == "24510" & type=="ON-ROAD",sum(Emissions),by=c("year")]
setnames(dt,c("Year","Emissions"))
dt$Year<-factor(dt$Year)
plot<-ggplot(data=dt, aes(x=Year, y=Emissions,fill=Year)) +
geom_bar(stat="identity") +
labs(list(
title="PM2.5 Motor Vehicle Sources Emissions in\nBaltimore City,Maryland",
x="Year",
y="Emissions, Tons"
))
ggsave("plot5.png",plot=plot,width=5.25,height=5.25,units="in",dpi=120)
|
/plot5.R
|
no_license
|
DSCourse001/ExData_Plotting2
|
R
| false | false | 743 |
r
|
## This first line will likely take a few seconds. Be patient!
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
# How have emissions from motor vehicle sources changed
# from 1999–2008 in Baltimore City?
require(data.table)
require(grDevices)
require(ggplot2)
dt<-data.table(NEI)[fips == "24510" & type=="ON-ROAD",sum(Emissions),by=c("year")]
setnames(dt,c("Year","Emissions"))
dt$Year<-factor(dt$Year)
plot<-ggplot(data=dt, aes(x=Year, y=Emissions,fill=Year)) +
geom_bar(stat="identity") +
labs(list(
title="PM2.5 Motor Vehicle Sources Emissions in\nBaltimore City,Maryland",
x="Year",
y="Emissions, Tons"
))
ggsave("plot5.png",plot=plot,width=5.25,height=5.25,units="in",dpi=120)
|
pkgname <- "knnpackage"
source(file.path(R.home("share"), "R", "examples-header.R"))
options(warn = 1)
options(pager = "console")
base::assign(".ExTimings", "knnpackage-Ex.timings", pos = 'CheckExEnv')
base::cat("name\tuser\tsystem\telapsed\n", file=base::get(".ExTimings", pos = 'CheckExEnv'))
base::assign(".format_ptime",
function(x) {
if(!is.na(x[4L])) x[1L] <- x[1L] + x[4L]
if(!is.na(x[5L])) x[2L] <- x[2L] + x[5L]
options(OutDec = '.')
format(x[1L:3L], digits = 7L)
},
pos = 'CheckExEnv')
### * </HEADER>
library('knnpackage')
base::assign(".oldSearch", base::search(), pos = 'CheckExEnv')
base::assign(".old_wd", base::getwd(), pos = 'CheckExEnv')
cleanEx()
nameEx("knnpackage-package")
### * knnpackage-package
flush(stderr()); flush(stdout())
base::assign(".ptime", proc.time(), pos = "CheckExEnv")
### Name: knnpackage-package
### Title: A short title line describing what the package does
### Aliases: knnpackage-package knnpackage
### Keywords: package
### ** Examples
## Not run:
##D ## Optional simple examples of the most important functions
##D ## These can be in \dontrun{} and \donttest{} blocks.
##D
## End(Not run)
base::assign(".dptime", (proc.time() - get(".ptime", pos = "CheckExEnv")), pos = "CheckExEnv")
base::cat("knnpackage-package", base::get(".format_ptime", pos = 'CheckExEnv')(get(".dptime", pos = "CheckExEnv")), "\n", file=base::get(".ExTimings", pos = 'CheckExEnv'), append=TRUE, sep="\t")
cleanEx()
nameEx("rcpp_hello_world")
### * rcpp_hello_world
flush(stderr()); flush(stdout())
base::assign(".ptime", proc.time(), pos = "CheckExEnv")
### Name: rcpp_hello_world
### Title: Simple function using Rcpp
### Aliases: rcpp_hello_world
### ** Examples
## Not run:
##D rcpp_hello_world()
## End(Not run)
base::assign(".dptime", (proc.time() - get(".ptime", pos = "CheckExEnv")), pos = "CheckExEnv")
base::cat("rcpp_hello_world", base::get(".format_ptime", pos = 'CheckExEnv')(get(".dptime", pos = "CheckExEnv")), "\n", file=base::get(".ExTimings", pos = 'CheckExEnv'), append=TRUE, sep="\t")
### * <FOOTER>
###
cleanEx()
options(digits = 7L)
base::cat("Time elapsed: ", proc.time() - base::get("ptime", pos = 'CheckExEnv'),"\n")
grDevices::dev.off()
###
### Local variables: ***
### mode: outline-minor ***
### outline-regexp: "\\(> \\)?### [*]+" ***
### End: ***
quit('no')
|
/Proyecto 1/knnpackage.Rcheck/knnpackage-Ex.R
|
no_license
|
apt345/Advanced-Programming
|
R
| false | false | 2,429 |
r
|
pkgname <- "knnpackage"
source(file.path(R.home("share"), "R", "examples-header.R"))
options(warn = 1)
options(pager = "console")
base::assign(".ExTimings", "knnpackage-Ex.timings", pos = 'CheckExEnv')
base::cat("name\tuser\tsystem\telapsed\n", file=base::get(".ExTimings", pos = 'CheckExEnv'))
base::assign(".format_ptime",
function(x) {
if(!is.na(x[4L])) x[1L] <- x[1L] + x[4L]
if(!is.na(x[5L])) x[2L] <- x[2L] + x[5L]
options(OutDec = '.')
format(x[1L:3L], digits = 7L)
},
pos = 'CheckExEnv')
### * </HEADER>
library('knnpackage')
base::assign(".oldSearch", base::search(), pos = 'CheckExEnv')
base::assign(".old_wd", base::getwd(), pos = 'CheckExEnv')
cleanEx()
nameEx("knnpackage-package")
### * knnpackage-package
flush(stderr()); flush(stdout())
base::assign(".ptime", proc.time(), pos = "CheckExEnv")
### Name: knnpackage-package
### Title: A short title line describing what the package does
### Aliases: knnpackage-package knnpackage
### Keywords: package
### ** Examples
## Not run:
##D ## Optional simple examples of the most important functions
##D ## These can be in \dontrun{} and \donttest{} blocks.
##D
## End(Not run)
base::assign(".dptime", (proc.time() - get(".ptime", pos = "CheckExEnv")), pos = "CheckExEnv")
base::cat("knnpackage-package", base::get(".format_ptime", pos = 'CheckExEnv')(get(".dptime", pos = "CheckExEnv")), "\n", file=base::get(".ExTimings", pos = 'CheckExEnv'), append=TRUE, sep="\t")
cleanEx()
nameEx("rcpp_hello_world")
### * rcpp_hello_world
flush(stderr()); flush(stdout())
base::assign(".ptime", proc.time(), pos = "CheckExEnv")
### Name: rcpp_hello_world
### Title: Simple function using Rcpp
### Aliases: rcpp_hello_world
### ** Examples
## Not run:
##D rcpp_hello_world()
## End(Not run)
base::assign(".dptime", (proc.time() - get(".ptime", pos = "CheckExEnv")), pos = "CheckExEnv")
base::cat("rcpp_hello_world", base::get(".format_ptime", pos = 'CheckExEnv')(get(".dptime", pos = "CheckExEnv")), "\n", file=base::get(".ExTimings", pos = 'CheckExEnv'), append=TRUE, sep="\t")
### * <FOOTER>
###
cleanEx()
options(digits = 7L)
base::cat("Time elapsed: ", proc.time() - base::get("ptime", pos = 'CheckExEnv'),"\n")
grDevices::dev.off()
###
### Local variables: ***
### mode: outline-minor ***
### outline-regexp: "\\(> \\)?### [*]+" ***
### End: ***
quit('no')
|
context("load data")
# the full iris dataset
base64_iris <- "
rde1QlpoOTFBWSZTWTZfsaQABq9/7/////+AAQgAwARIwC/33YBAAAEwACAAJgggSABtbdAEuQgFUAbD
JQkRIphTyYpp6j1DZE0AANqepoeU9R6T00EMnqEU/Ko0BoAANAGgAAAAA0AAkypSoNAABoAAADEBoADQ
yAcAwjCaYhgEAyAGEaZMmEYCGhwDCMJpiGAQDIAYRpkyYRgIafr2w7TkRcpkTPeHfvPD5jioioVC+O7i
eTdOSnJboeX0RlfO6s8TpujlhLfmZiankmjZVQ8qVn8XkAr5bY4hALu4AQBineQCrdEt+YxCWevVinay
1jD75dVsASVppAtjAy7b9vPUCQGRoEkWGg7gV/aLjtSr9YlYEmhGITSEmLWkgDnQkKgUECzxBlGmV45V
CclKszMik1VEDUtBKwrK4ZWFklkamptoFJFmKSRIasKxVhqCSiRGlFFViJaF0ooo5ihBYhWKBkVRHLFN
RTQtVRTZSSGIEtDC2mVJssPdhnYN4LiM7Ox3VsHg8xYcCmyJhTYEAcb6qAUMiAjQ5gItMgQMGkIliAFp
C5zW0kIDtbfz28f7lepDvjBV+RE6ySp1kR9henIzMzZqSoKU+dckpjbY5I5JPOgxNetkNDaFy5yZQVU3
roo1KqLhQZYnkRwcSAzClMKKTodCClEPhJPJTioc4rDxOcrnDcyUQjtxwKNTlF2FDjJwQhJKKQWRWKZF
zQWpZwT0eDh4LYGhc1JoERCRQkloJGiVJNMriSZaldytxqEdINLhV0o5liRQaAWZBRhYFJUcrRKTtMud
KEIAhJIUCFAQFDoTE+AftEY3VGaAZAq9Uncultg+PLBz2unWbCKFWwv3L6Qkv0xISWjsSwtLwuGEIgIc
brr5HEwpmBzCmIczJMEPUaCjsTppJZa5qoZq1OdykKSkNw4hQOw55WoriODHjMgPe6Dh6fBy8AUw9XFR
XOKTJuXcbvVHDaMljXUYLMFd2zdKHkwDu8PR7/qen2cffjGDfF2zuPcco0E7NIqC7+/9RJP2Q4nV4p4E
6ggmX+CF+q7MpngcKM8ksmYzzdo4iKUh9bqbqhRotpIA7BAdCSARt2pNgsKhNto6jrJilKSx6YOOIexC
3A4Sdh6/W4PXpV5yFvm5z5no9J5YcXkJCvCADdGl4uMn/i7kinChIGy/Y0g=
"
# the first three rows of iris, but with the first element changed
# a <- iris # nolint
# a$Sepal.Length[1] <- 5000 # nolint
base64_modified_iris_3 <- "
rde1QlpoOTFBWSZTWdsanVkABrD/7/////+AAQgAwARIwC/33YBAQAEwCCAAJgggSABtbdAEuR7Ai0AW
nrpcJCSmqeTUaPSNBoemoDIADT0htE2UaNqBoeocaMmRhGIBhNBgE0GgZMmjJkMIDCU8pSqGmQAaDTI0
NAYQbQhiDQM1DQ0DVTR6QyZqGgDQ000NAAaADQaAGQwIkpJGJtRtJoYCYCGRgAAjCemgmCHe/fxhyuIi
4piJjzDznfS3DVRFQqF9ZzU59xspstwezwjK+50J1OM4cWEmOqqVeeSrNl6xuJYOPEgFsGFj2KpALr0C
ANqtlAK52Sx1NoSzXaNqtTNbbDi3tGFAJLWaQLlYG9qxYc15CQG20CSL7QbIXcE6D1qWK4SvxYiY4wFY
N1UA7UUbQWhB5ao3RTFdcVQnEpVmZkUmqogaloJWFZXBlYWSWRqamzIFJFmKSRIasFYqwagkokRpRRVY
iWhclFFHGKHCIVigZFURxYpqKaFqqKbFJIYgSyGFsmpIVKZKJ6gYoNuJQxYpfUoW7EpIhILCCSChACI8
dUBIDCAQiSygrBhQhAiiWRAB/QW+v2KiBy5n8+PDyYr5sOfSCr9SJ2UlTsoj1F42MrMuW1KFrb91FJjb
Y6J76N1B2GvKyNDaFxccTFBVTPhRRqVUXBQZYnYjRwkBmFKYUUnIchBSiH0EnZTVQ3VYdTdrdM3EohHM
1wFGpxRcwUNZNEISSikFkVimRcaCylnAnh0cHRbAaFxqTIERCRQkloJGiVJD3nXVcJJlqVxcmoRyQaXB
VyUcZYkUGgFmQUYWBSVHFaJScyZccmRjZJI0yDjaluA3uk3io8Ofc0956W93lO5yfbz/StLco7MHuul4
q0X14uW7giv7gIry+i/GLlyoERAjnRd2+5UVVB1FUjqqKhHnMxZ364rUYGfW9GZ87rnWiotG45FB33Wh
qXSZMeM2wcvyFfSfOqASb7WJARWhBSIWoZINCp4Ui9DQkdBEMpxQQWEg9ejHhwZPzwozoQgXq/ZzPmab
HIEaTkY2wXP88iJJyIanQ1Tz5yggmXlIX7rvRszQcUzUUyqmatezklrR/x3m70U4cKSAOT+iA0pIBGrU
k2C6yibbR+DmKlrWpj44clPbhbQ0k7b2uTR7VKu7C3c3e48PE2Ukq4ijhiAHuIujuln/F3JFOFCQ2xqd
WQ==
"
# The first three rows from iris, but with Sepal.Length doubled
# a <- head(iris, 3) # nolint
# a$Sepal.Length <- a$Sepal.Length * 2 # nolint
base64_scaled_iris_3 <- "
rde1QlpoOTFBWSZTWTV4+F0AAKT/5P//SAAcAQAAwARIwC/n3YBAAAAwACYFAbAA7ICUQSnim9DSNT0I
Bo9QNoNMjUMaGhoAMhoAAAAAJFFNGjQAAAAAAA4wnkeSFSiwlSkbJUEW1CJvxwWLc1ON0BEpUlVDV+sy
15EILrSlYpAncITOjFVJ6FKJMEvSPhFEVxGNqYYEWkEzA1MAe+AQaiwHBcA0ZVj5hVFYxlx6blXc08N9
uNa4quzoR5Yefiyy5h0ny5GAxw/AjCKcFEzMLdWosBZsS3KqwGw663Jo1tNPdCtaXlk5plveRmYSUTUD
jbEWhpt75vb8REb2Treh2S8TPNw5Lyf/F3JFOFCQNXj4XQ==
"
test_that("cached data loaded as expected", {
b <- load_rde_var(TRUE, iris, base64_iris)
expect_equal(length(b), 5)
expect_true(all.equal(b, iris))
})
test_that("new data loaded as expected", {
b <- load_rde_var(FALSE, iris, base64_iris)
expect_equal(length(b), 5)
expect_true(all.equal(b, iris))
})
test_that("new data with multiple lines", {
b <- load_rde_var(
FALSE, {
a <- head(iris, 3)
a$Sepal.Length <- a$Sepal.Length * 2
a
},
base64_scaled_iris_3
)
expect_equal(length(b), 5)
expect_true(all.equal(b$Sepal.Length, head(iris, 3)$Sepal.Length * 2))
expect_true(all.equal(b$Species, head(iris, 3)$Species))
})
test_that("difference between new data and cahced data causes warning", {
expect_warning(
load_rde_var(FALSE, iris, base64_modified_iris_3)
)
})
test_that("when new/cahce data differ, the new data is returned", {
suppressWarnings({
b <- load_rde_var(FALSE, iris, base64_modified_iris_3)
})
expect_true(all.equal(b, iris))
})
test_that("when new data produces error, cached data is returned", {
b <- load_rde_var(FALSE, stop("some error"), base64_iris)
expect_equal(length(b), 5)
expect_true(all.equal(b, iris))
})
test_that("when new data produces error, message is raised", {
expect_message(
load_rde_var(FALSE, stop("some error"), base64_iris),
"Error raised when loading new data"
)
})
test_that("data load code can access variables from the calling environment", {
mult <- 2
b <- load_rde_var(
FALSE, {
a <- head(iris, 3)
a$Sepal.Length <- a$Sepal.Length * mult
a
},
base64_scaled_iris_3
)
expect_equal(length(b), 5)
expect_true(all.equal(b$Sepal.Length, head(iris, 3)$Sepal.Length * 2))
expect_true(all.equal(b$Species, head(iris, 3)$Species))
})
test_that("expressions in load code don't affect enclosing environment", {
mult <- 1
b <- load_rde_var(
FALSE, {
mult <- mult * 2
a <- head(iris, 3)
a$Sepal.Length <- a$Sepal.Length * mult
expect_equal(mult, 2)
a
},
base64_scaled_iris_3
)
expect_equal(mult, 1)
expect_equal(length(b), 5)
expect_true(all.equal(b$Sepal.Length, head(iris, 3)$Sepal.Length * 2))
expect_true(all.equal(b$Species, head(iris, 3)$Species))
})
|
/data/genthat_extracted_code/rde/tests/test_load.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false | false | 5,470 |
r
|
context("load data")
# the full iris dataset
base64_iris <- "
rde1QlpoOTFBWSZTWTZfsaQABq9/7/////+AAQgAwARIwC/33YBAAAEwACAAJgggSABtbdAEuQgFUAbD
JQkRIphTyYpp6j1DZE0AANqepoeU9R6T00EMnqEU/Ko0BoAANAGgAAAAA0AAkypSoNAABoAAADEBoADQ
yAcAwjCaYhgEAyAGEaZMmEYCGhwDCMJpiGAQDIAYRpkyYRgIafr2w7TkRcpkTPeHfvPD5jioioVC+O7i
eTdOSnJboeX0RlfO6s8TpujlhLfmZiankmjZVQ8qVn8XkAr5bY4hALu4AQBineQCrdEt+YxCWevVinay
1jD75dVsASVppAtjAy7b9vPUCQGRoEkWGg7gV/aLjtSr9YlYEmhGITSEmLWkgDnQkKgUECzxBlGmV45V
CclKszMik1VEDUtBKwrK4ZWFklkamptoFJFmKSRIasKxVhqCSiRGlFFViJaF0ooo5ihBYhWKBkVRHLFN
RTQtVRTZSSGIEtDC2mVJssPdhnYN4LiM7Ox3VsHg8xYcCmyJhTYEAcb6qAUMiAjQ5gItMgQMGkIliAFp
C5zW0kIDtbfz28f7lepDvjBV+RE6ySp1kR9henIzMzZqSoKU+dckpjbY5I5JPOgxNetkNDaFy5yZQVU3
roo1KqLhQZYnkRwcSAzClMKKTodCClEPhJPJTioc4rDxOcrnDcyUQjtxwKNTlF2FDjJwQhJKKQWRWKZF
zQWpZwT0eDh4LYGhc1JoERCRQkloJGiVJNMriSZaldytxqEdINLhV0o5liRQaAWZBRhYFJUcrRKTtMud
KEIAhJIUCFAQFDoTE+AftEY3VGaAZAq9Uncultg+PLBz2unWbCKFWwv3L6Qkv0xISWjsSwtLwuGEIgIc
brr5HEwpmBzCmIczJMEPUaCjsTppJZa5qoZq1OdykKSkNw4hQOw55WoriODHjMgPe6Dh6fBy8AUw9XFR
XOKTJuXcbvVHDaMljXUYLMFd2zdKHkwDu8PR7/qen2cffjGDfF2zuPcco0E7NIqC7+/9RJP2Q4nV4p4E
6ggmX+CF+q7MpngcKM8ksmYzzdo4iKUh9bqbqhRotpIA7BAdCSARt2pNgsKhNto6jrJilKSx6YOOIexC
3A4Sdh6/W4PXpV5yFvm5z5no9J5YcXkJCvCADdGl4uMn/i7kinChIGy/Y0g=
"
# the first three rows of iris, but with the first element changed
# a <- iris # nolint
# a$Sepal.Length[1] <- 5000 # nolint
base64_modified_iris_3 <- "
rde1QlpoOTFBWSZTWdsanVkABrD/7/////+AAQgAwARIwC/33YBAQAEwCCAAJgggSABtbdAEuR7Ai0AW
nrpcJCSmqeTUaPSNBoemoDIADT0htE2UaNqBoeocaMmRhGIBhNBgE0GgZMmjJkMIDCU8pSqGmQAaDTI0
NAYQbQhiDQM1DQ0DVTR6QyZqGgDQ000NAAaADQaAGQwIkpJGJtRtJoYCYCGRgAAjCemgmCHe/fxhyuIi
4piJjzDznfS3DVRFQqF9ZzU59xspstwezwjK+50J1OM4cWEmOqqVeeSrNl6xuJYOPEgFsGFj2KpALr0C
ANqtlAK52Sx1NoSzXaNqtTNbbDi3tGFAJLWaQLlYG9qxYc15CQG20CSL7QbIXcE6D1qWK4SvxYiY4wFY
N1UA7UUbQWhB5ao3RTFdcVQnEpVmZkUmqogaloJWFZXBlYWSWRqamzIFJFmKSRIasFYqwagkokRpRRVY
iWhclFFHGKHCIVigZFURxYpqKaFqqKbFJIYgSyGFsmpIVKZKJ6gYoNuJQxYpfUoW7EpIhILCCSChACI8
dUBIDCAQiSygrBhQhAiiWRAB/QW+v2KiBy5n8+PDyYr5sOfSCr9SJ2UlTsoj1F42MrMuW1KFrb91FJjb
Y6J76N1B2GvKyNDaFxccTFBVTPhRRqVUXBQZYnYjRwkBmFKYUUnIchBSiH0EnZTVQ3VYdTdrdM3EohHM
1wFGpxRcwUNZNEISSikFkVimRcaCylnAnh0cHRbAaFxqTIERCRQkloJGiVJD3nXVcJJlqVxcmoRyQaXB
VyUcZYkUGgFmQUYWBSVHFaJScyZccmRjZJI0yDjaluA3uk3io8Ofc0956W93lO5yfbz/StLco7MHuul4
q0X14uW7giv7gIry+i/GLlyoERAjnRd2+5UVVB1FUjqqKhHnMxZ364rUYGfW9GZ87rnWiotG45FB33Wh
qXSZMeM2wcvyFfSfOqASb7WJARWhBSIWoZINCp4Ui9DQkdBEMpxQQWEg9ejHhwZPzwozoQgXq/ZzPmab
HIEaTkY2wXP88iJJyIanQ1Tz5yggmXlIX7rvRszQcUzUUyqmatezklrR/x3m70U4cKSAOT+iA0pIBGrU
k2C6yibbR+DmKlrWpj44clPbhbQ0k7b2uTR7VKu7C3c3e48PE2Ukq4ijhiAHuIujuln/F3JFOFCQ2xqd
WQ==
"
# The first three rows from iris, but with Sepal.Length doubled
# a <- head(iris, 3) # nolint
# a$Sepal.Length <- a$Sepal.Length * 2 # nolint
base64_scaled_iris_3 <- "
rde1QlpoOTFBWSZTWTV4+F0AAKT/5P//SAAcAQAAwARIwC/n3YBAAAAwACYFAbAA7ICUQSnim9DSNT0I
Bo9QNoNMjUMaGhoAMhoAAAAAJFFNGjQAAAAAAA4wnkeSFSiwlSkbJUEW1CJvxwWLc1ON0BEpUlVDV+sy
15EILrSlYpAncITOjFVJ6FKJMEvSPhFEVxGNqYYEWkEzA1MAe+AQaiwHBcA0ZVj5hVFYxlx6blXc08N9
uNa4quzoR5Yefiyy5h0ny5GAxw/AjCKcFEzMLdWosBZsS3KqwGw663Jo1tNPdCtaXlk5plveRmYSUTUD
jbEWhpt75vb8REb2Treh2S8TPNw5Lyf/F3JFOFCQNXj4XQ==
"
test_that("cached data loaded as expected", {
b <- load_rde_var(TRUE, iris, base64_iris)
expect_equal(length(b), 5)
expect_true(all.equal(b, iris))
})
test_that("new data loaded as expected", {
b <- load_rde_var(FALSE, iris, base64_iris)
expect_equal(length(b), 5)
expect_true(all.equal(b, iris))
})
test_that("new data with multiple lines", {
b <- load_rde_var(
FALSE, {
a <- head(iris, 3)
a$Sepal.Length <- a$Sepal.Length * 2
a
},
base64_scaled_iris_3
)
expect_equal(length(b), 5)
expect_true(all.equal(b$Sepal.Length, head(iris, 3)$Sepal.Length * 2))
expect_true(all.equal(b$Species, head(iris, 3)$Species))
})
test_that("difference between new data and cahced data causes warning", {
expect_warning(
load_rde_var(FALSE, iris, base64_modified_iris_3)
)
})
test_that("when new/cahce data differ, the new data is returned", {
suppressWarnings({
b <- load_rde_var(FALSE, iris, base64_modified_iris_3)
})
expect_true(all.equal(b, iris))
})
test_that("when new data produces error, cached data is returned", {
b <- load_rde_var(FALSE, stop("some error"), base64_iris)
expect_equal(length(b), 5)
expect_true(all.equal(b, iris))
})
test_that("when new data produces error, message is raised", {
expect_message(
load_rde_var(FALSE, stop("some error"), base64_iris),
"Error raised when loading new data"
)
})
test_that("data load code can access variables from the calling environment", {
mult <- 2
b <- load_rde_var(
FALSE, {
a <- head(iris, 3)
a$Sepal.Length <- a$Sepal.Length * mult
a
},
base64_scaled_iris_3
)
expect_equal(length(b), 5)
expect_true(all.equal(b$Sepal.Length, head(iris, 3)$Sepal.Length * 2))
expect_true(all.equal(b$Species, head(iris, 3)$Species))
})
test_that("expressions in load code don't affect enclosing environment", {
mult <- 1
b <- load_rde_var(
FALSE, {
mult <- mult * 2
a <- head(iris, 3)
a$Sepal.Length <- a$Sepal.Length * mult
expect_equal(mult, 2)
a
},
base64_scaled_iris_3
)
expect_equal(mult, 1)
expect_equal(length(b), 5)
expect_true(all.equal(b$Sepal.Length, head(iris, 3)$Sepal.Length * 2))
expect_true(all.equal(b$Species, head(iris, 3)$Species))
})
|
# ra_prospect_stan_singleSubj.R
# Programmed by Woo-Young Ahn (wahn55@snu.ac.kr), Apr 2018
rm(list=ls()) # remove all variables
library(rstan)
# source HDIofMCMC.R to calculate HDI
source("HDIofMCMC.R")
# read the data file
dat = read.table("ra_exampleData.txt", header=T, sep="\t")
allSubjs = unique(dat$subjID) # all subject IDs
N = length(allSubjs) # number of subjects
T = table(dat$subjID)[1] # number of trials per subject (=140)
numIter = 100 # number of iterations to find global minimum values
numPars = 3 # number of parameters
dataList <- list(
T = T,
N = N,
Tsubj = table(dat$subjID),
gain = matrix(dat$gain, nrow=N, ncol=T, byrow=T), #matrix[N,T]
loss = matrix(abs(dat$loss), nrow=N, ncol=T, byrow=T), # absolute value
cert = matrix(dat$cert, nrow=N, ncol=T, byrow=T),
gamble = matrix(dat$gamble, nrow=N, ncol=T, byrow=T)
)
# run!
output = stan("ra_prospect_w_reparam.stan", data = dataList,
iter = 1000, warmup=500, chains=2, cores=2)
### load existing output
library(ggplot2)
library(reshape2)
library(dplyr)
load("ra_prospect_w_reparam.RData")
traceplot(output)
# print summary
print(output)
# extract Stan fit object (parameters)
parameters <- rstan::extract(output)
# arrange dataframe
ls(parameters)
names <- paste("sbj", allSubjs)
colnames(parameters$rho) <- names
colnames(parameters$lambda) <- names
colnames(parameters$tau) <- names
names <- c("rho","lambda","tau")
colnames(parameters$sigma) <- names
colnames(parameters$mu_p) <- names
# 2.2.1 plot posteriors for group parameters
#mu_p, sigma
group <- data.frame(rbind(parameters$sigma, parameters$mu_p),
index=rep(c("sigma","mu_p"), each=nrow(parameters$sigma)))
group <- melt(group, id="index")
group_HDI <- group %>% group_by(index, variable) %>%
summarise(mean=mean(value),HDI1=HDIofMCMC(value)[1], HDI2=HDIofMCMC(value)[2])
g1 <- ggplot(group, aes(value, fill=variable)) + geom_histogram(bins = 50) +
facet_wrap(~index+variable, scale="free_x") +
geom_vline(data=group_HDI, aes(xintercept=mean),
linetype="dashed", size=1) +
geom_errorbarh(data=group_HDI, aes(y=0, x=mean, xmin=HDI1, xmax=HDI2),
height=20, size=1) +
ylab(label="")
# 2.2.2 plot posteriors for individual parameters
# rho lambda tau
individual <- data.frame(rbind(parameters$rho, parameters$lambda, parameters$tau),
index=rep(c("rho","lambda","tau"), each=nrow(parameters$rho)))
individual <- melt(individual, id="index")
individual_HDI <- individual %>% group_by(index, variable) %>%
summarise(mean=mean(value),HDI1=HDIofMCMC(value)[1], HDI2=HDIofMCMC(value)[2])
i1 <- ggplot(individual, aes(value, fill=variable)) + geom_histogram(bins = 50) +
facet_wrap(~index+variable, scale="free_x", nrow=3) +
geom_vline(data=individual_HDI, aes(xintercept=mean),
linetype="dashed", size=1) +
geom_errorbarh(data=individual_HDI, aes(y=0, x=mean, xmin=HDI1, xmax=HDI2),
height=20, size=1) +
ylab(label="")
|
/HW4/q2/ra_prospect_stan_w_reparam.R
|
no_license
|
mindy2801/Computational_Modeling
|
R
| false | false | 3,174 |
r
|
# ra_prospect_stan_singleSubj.R
# Programmed by Woo-Young Ahn (wahn55@snu.ac.kr), Apr 2018
rm(list=ls()) # remove all variables
library(rstan)
# source HDIofMCMC.R to calculate HDI
source("HDIofMCMC.R")
# read the data file
dat = read.table("ra_exampleData.txt", header=T, sep="\t")
allSubjs = unique(dat$subjID) # all subject IDs
N = length(allSubjs) # number of subjects
T = table(dat$subjID)[1] # number of trials per subject (=140)
numIter = 100 # number of iterations to find global minimum values
numPars = 3 # number of parameters
dataList <- list(
T = T,
N = N,
Tsubj = table(dat$subjID),
gain = matrix(dat$gain, nrow=N, ncol=T, byrow=T), #matrix[N,T]
loss = matrix(abs(dat$loss), nrow=N, ncol=T, byrow=T), # absolute value
cert = matrix(dat$cert, nrow=N, ncol=T, byrow=T),
gamble = matrix(dat$gamble, nrow=N, ncol=T, byrow=T)
)
# run!
output = stan("ra_prospect_w_reparam.stan", data = dataList,
iter = 1000, warmup=500, chains=2, cores=2)
### load existing output
library(ggplot2)
library(reshape2)
library(dplyr)
load("ra_prospect_w_reparam.RData")
traceplot(output)
# print summary
print(output)
# extract Stan fit object (parameters)
parameters <- rstan::extract(output)
# arrange dataframe
ls(parameters)
names <- paste("sbj", allSubjs)
colnames(parameters$rho) <- names
colnames(parameters$lambda) <- names
colnames(parameters$tau) <- names
names <- c("rho","lambda","tau")
colnames(parameters$sigma) <- names
colnames(parameters$mu_p) <- names
# 2.2.1 plot posteriors for group parameters
#mu_p, sigma
group <- data.frame(rbind(parameters$sigma, parameters$mu_p),
index=rep(c("sigma","mu_p"), each=nrow(parameters$sigma)))
group <- melt(group, id="index")
group_HDI <- group %>% group_by(index, variable) %>%
summarise(mean=mean(value),HDI1=HDIofMCMC(value)[1], HDI2=HDIofMCMC(value)[2])
g1 <- ggplot(group, aes(value, fill=variable)) + geom_histogram(bins = 50) +
facet_wrap(~index+variable, scale="free_x") +
geom_vline(data=group_HDI, aes(xintercept=mean),
linetype="dashed", size=1) +
geom_errorbarh(data=group_HDI, aes(y=0, x=mean, xmin=HDI1, xmax=HDI2),
height=20, size=1) +
ylab(label="")
# 2.2.2 plot posteriors for individual parameters
# rho lambda tau
individual <- data.frame(rbind(parameters$rho, parameters$lambda, parameters$tau),
index=rep(c("rho","lambda","tau"), each=nrow(parameters$rho)))
individual <- melt(individual, id="index")
individual_HDI <- individual %>% group_by(index, variable) %>%
summarise(mean=mean(value),HDI1=HDIofMCMC(value)[1], HDI2=HDIofMCMC(value)[2])
i1 <- ggplot(individual, aes(value, fill=variable)) + geom_histogram(bins = 50) +
facet_wrap(~index+variable, scale="free_x", nrow=3) +
geom_vline(data=individual_HDI, aes(xintercept=mean),
linetype="dashed", size=1) +
geom_errorbarh(data=individual_HDI, aes(y=0, x=mean, xmin=HDI1, xmax=HDI2),
height=20, size=1) +
ylab(label="")
|
B <- c(22, 27, 26, 24, 23)
barplot(B)
# barchart with added parameters
barplot(B,
main = "Company B Stock Prices",
xlab = "Week End 9/26",
ylab = "Price",
ylim = c(0,30),
names.arg = c('Mon', 'Tue', 'Wed', 'Thu', 'Fri'),
col = colors()[12],
horiz = FALSE)
# Create data
set.seed(112)
Z = matrix(
c(15, 13, 18, 55, 60, 35, 35, 38, 41),
nrow = 3,
ncol = 3,
byrow = TRUE)
dimnames(Z) = list(
c('Sell', 'Hold', "Buy"),
c('A', 'B', "C")
)
# Get the stacked barplot
barplot (Z)
barplot(Z,
col = colors() [c(35, 77, 89)],
border = "white",
space = 0.04,
font.axis = 2,
xlab = "group")
|
/Bar_Chart.R
|
no_license
|
jamisonbrogdon/r-practice
|
R
| false | false | 690 |
r
|
B <- c(22, 27, 26, 24, 23)
barplot(B)
# barchart with added parameters
barplot(B,
main = "Company B Stock Prices",
xlab = "Week End 9/26",
ylab = "Price",
ylim = c(0,30),
names.arg = c('Mon', 'Tue', 'Wed', 'Thu', 'Fri'),
col = colors()[12],
horiz = FALSE)
# Create data
set.seed(112)
Z = matrix(
c(15, 13, 18, 55, 60, 35, 35, 38, 41),
nrow = 3,
ncol = 3,
byrow = TRUE)
dimnames(Z) = list(
c('Sell', 'Hold', "Buy"),
c('A', 'B', "C")
)
# Get the stacked barplot
barplot (Z)
barplot(Z,
col = colors() [c(35, 77, 89)],
border = "white",
space = 0.04,
font.axis = 2,
xlab = "group")
|
num<-as.numeric(Sys.getenv("Sim"))
#This is a modification version for re-submission
#The major features include:
#1. Add average distance within the buffer.
#2. Calculate two other angle range: 30 and 60.
#3. Calculat the downwind number for negative control.
library(raster)
library(rgeos)
library(dplyr)
library(splines)
library(rgeos)
library(lubridate)
library(here)
prjstring<-"+proj=aea +lat_1=20 +lat_2=60 +lat_0=23 +lon_0=-96 +x_0=0 +y_0=0 +ellps=GRS80 +datum=WGS84 +units=m +no_defs "
geoprjstring<-"+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs"
#Load function files-----------------------------------------------------
source(here::here("code","Data_Downloading_Functions.R"))
load(here::here("data","Beta_Measurements_2001.RData"))
load(here::here("data","RadNet.RData"))
load(here::here("data","Wells_3rd.RData"))
coordinates(wells)<-~lon+lat
proj4string(wells)<-geoprjstring
wells<-spTransform(wells,prjstring)
radnet_sp<-spTransform(radnet,prjstring)
#load(here::here("data","Rad_NARR_2001.RData"))
load(here::here("data","NARR_2001.RData"))
i=num
city<-radnet@data[i,"city_state"]
overwrite=file.exists(here::here("data","Resub_city_daily_prod",
paste0(city,"_Daily_Prod_E2001.RData")))
if(!overwrite){
rad_well_link<-create_link_buffer(point = radnet_sp[i,],point_ID = "city_state",points = wells,
points_ID = "ApiNo",si_col =c("Pred_DrillType","DrillType","ProdType","SpudDate","CompDate","FirstProdDate","LastProdDate","GasCum","LiqCum","Status"),
width=50000)
if(!is.null(rad_well_link)){
rad_well_link$ApiNo<-as.character(rad_well_link$ApiNo)
city_narr=narr_data%>%filter(city_state==city)
table=expand.grid(city=city,date=city_narr$Date,radius=0:6,angle=c(30,45,60))
table$u_h=0
table$u_v=0
#Formalize the production type, converting the diverse oil prodcution activity to oil
rad_well_link<-rad_well_link%>%mutate(ProdType=case_when(
ProdType=="OIL" ~ "OIL",
ProdType=="Gas" ~ "Gas",
ProdType=="O&G" ~ "O&G",
ProdType=="OIL (CYCLIC STEAM)" ~ "OIL"
))
#According to EIA, most wells produce both gas and liquid some time, so I add another two
#columns indicating whether liq/gas was produced
rad_well_link<-rad_well_link%>%mutate(Oil=case_when(
is.na(LiqCum)~ FALSE,
LiqCum==0 ~ FALSE,
ProdType=="OIL" ~ TRUE,
ProdType=="O&G" ~ TRUE,
LiqCum>0 ~ TRUE))
rad_well_link<-rad_well_link%>%mutate(Gas=case_when(
is.na(GasCum)~ FALSE,
GasCum==0 ~ FALSE,
ProdType=="Gas" ~ TRUE,
ProdType=="O&G" ~ TRUE,
GasCum>0 ~ TRUE))
rad_well_link<-rad_well_link%>%filter(Status!="PERMITTED")
rad_well_link<-rad_well_link%>%filter(Status!="CANCELLED")
rad_well_link<-rad_well_link%>%filter(Oil|Gas)
rad_well_link<-rad_well_link%>%mutate(Active_Peroid=case_when(
!is.na(SpudDate) & !is.na(CompDate) & !is.na(LastProdDate) ~ interval(start=SpudDate,end=LastProdDate),
!is.na(SpudDate) & is.na(CompDate) & !is.na(LastProdDate) ~ interval(start= SpudDate, end=LastProdDate),
is.na(SpudDate) & !is.na(CompDate) & !is.na(LastProdDate) ~ interval(start=CompDate,end=LastProdDate),
is.na(SpudDate) & is.na(CompDate) & !is.na(LastProdDate) ~ interval(start=FirstProdDate,end=LastProdDate),
!is.na(SpudDate) & !is.na(CompDate) ~ interval( start = SpudDate, end=CompDate)
))
rad_well_link<-rad_well_link%>%mutate(LastProdDate=case_when(
is.na(Active_Peroid) ~ as.Date("1990-01-01"),
!is.na(Active_Peroid) ~ LastProdDate
))
for(row in 1:nrow(table)){
paras=table[row,]
#bottom=0+paras$radius*5
up=20+paras$radius*5
metes=city_narr%>%filter(Date==paras$date)
well_ext=rad_well_link%>%
filter(ymd(paras$date)>int_start(Active_Peroid),dist<up)%>%
mutate(dir=pi*ifelse(dir>0,dir,360+dir)/180)
wind_dir=pi*metes$dir/180
angle=paras$angle
well_ext=well_ext%>%
mutate(angle_dif=abs(180*atan2(sin(dir-wind_dir),
cos(dir-wind_dir))/pi))
wells_upwind=well_ext%>%filter(
angle_dif<angle
)
wells_downwind=well_ext%>%filter(
angle_dif>(180-angle)
)
table[row,c("d_h")]=wells_downwind%>%filter(Pred_DrillType=="H")%>%count()
table[row,c("d_v")]=wells_downwind%>%filter(Pred_DrillType=="V")%>%count()
table[row,c("d_hd")]=wells_downwind%>%filter(Pred_DrillType=="H")%>%summarise(hd=mean(dist))
table[row,c("d_vd")]=wells_downwind%>%filter(Pred_DrillType=="V")%>%summarise(hd=mean(dist))
table[row,c("u_h")]=wells_upwind%>%filter(Pred_DrillType=="H")%>%count()
table[row,c("u_v")]=wells_upwind%>%filter(Pred_DrillType=="V")%>%count()
table[row,c("u_hd")]=wells_upwind%>%filter(Pred_DrillType=="H")%>%summarise(hd=mean(dist))
table[row,c("u_vd")]=wells_upwind%>%filter(Pred_DrillType=="V")%>%summarise(hd=mean(dist))
table[row,"radius"]=20+5*table[row,"radius"]
if(row%%1000==0){
print(paste0(Sys.time(),"_",row," % ",nrow(table)))
}
}
save(file=here::here("data","Resub_city_daily_prod",paste0(city,"_Daily_Prod_E2001.RData")),table)
}
}else{
print(paste0(num," Alreadt Exist!"))
}
|
/code/Re_37_Batch_City_Prod.R
|
no_license
|
longxiang1025/Fracking_Radiation
|
R
| false | false | 5,353 |
r
|
num<-as.numeric(Sys.getenv("Sim"))
#This is a modification version for re-submission
#The major features include:
#1. Add average distance within the buffer.
#2. Calculate two other angle range: 30 and 60.
#3. Calculat the downwind number for negative control.
library(raster)
library(rgeos)
library(dplyr)
library(splines)
library(rgeos)
library(lubridate)
library(here)
prjstring<-"+proj=aea +lat_1=20 +lat_2=60 +lat_0=23 +lon_0=-96 +x_0=0 +y_0=0 +ellps=GRS80 +datum=WGS84 +units=m +no_defs "
geoprjstring<-"+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs"
#Load function files-----------------------------------------------------
source(here::here("code","Data_Downloading_Functions.R"))
load(here::here("data","Beta_Measurements_2001.RData"))
load(here::here("data","RadNet.RData"))
load(here::here("data","Wells_3rd.RData"))
coordinates(wells)<-~lon+lat
proj4string(wells)<-geoprjstring
wells<-spTransform(wells,prjstring)
radnet_sp<-spTransform(radnet,prjstring)
#load(here::here("data","Rad_NARR_2001.RData"))
load(here::here("data","NARR_2001.RData"))
i=num
city<-radnet@data[i,"city_state"]
overwrite=file.exists(here::here("data","Resub_city_daily_prod",
paste0(city,"_Daily_Prod_E2001.RData")))
if(!overwrite){
rad_well_link<-create_link_buffer(point = radnet_sp[i,],point_ID = "city_state",points = wells,
points_ID = "ApiNo",si_col =c("Pred_DrillType","DrillType","ProdType","SpudDate","CompDate","FirstProdDate","LastProdDate","GasCum","LiqCum","Status"),
width=50000)
if(!is.null(rad_well_link)){
rad_well_link$ApiNo<-as.character(rad_well_link$ApiNo)
city_narr=narr_data%>%filter(city_state==city)
table=expand.grid(city=city,date=city_narr$Date,radius=0:6,angle=c(30,45,60))
table$u_h=0
table$u_v=0
#Formalize the production type, converting the diverse oil prodcution activity to oil
rad_well_link<-rad_well_link%>%mutate(ProdType=case_when(
ProdType=="OIL" ~ "OIL",
ProdType=="Gas" ~ "Gas",
ProdType=="O&G" ~ "O&G",
ProdType=="OIL (CYCLIC STEAM)" ~ "OIL"
))
#According to EIA, most wells produce both gas and liquid some time, so I add another two
#columns indicating whether liq/gas was produced
rad_well_link<-rad_well_link%>%mutate(Oil=case_when(
is.na(LiqCum)~ FALSE,
LiqCum==0 ~ FALSE,
ProdType=="OIL" ~ TRUE,
ProdType=="O&G" ~ TRUE,
LiqCum>0 ~ TRUE))
rad_well_link<-rad_well_link%>%mutate(Gas=case_when(
is.na(GasCum)~ FALSE,
GasCum==0 ~ FALSE,
ProdType=="Gas" ~ TRUE,
ProdType=="O&G" ~ TRUE,
GasCum>0 ~ TRUE))
rad_well_link<-rad_well_link%>%filter(Status!="PERMITTED")
rad_well_link<-rad_well_link%>%filter(Status!="CANCELLED")
rad_well_link<-rad_well_link%>%filter(Oil|Gas)
rad_well_link<-rad_well_link%>%mutate(Active_Peroid=case_when(
!is.na(SpudDate) & !is.na(CompDate) & !is.na(LastProdDate) ~ interval(start=SpudDate,end=LastProdDate),
!is.na(SpudDate) & is.na(CompDate) & !is.na(LastProdDate) ~ interval(start= SpudDate, end=LastProdDate),
is.na(SpudDate) & !is.na(CompDate) & !is.na(LastProdDate) ~ interval(start=CompDate,end=LastProdDate),
is.na(SpudDate) & is.na(CompDate) & !is.na(LastProdDate) ~ interval(start=FirstProdDate,end=LastProdDate),
!is.na(SpudDate) & !is.na(CompDate) ~ interval( start = SpudDate, end=CompDate)
))
rad_well_link<-rad_well_link%>%mutate(LastProdDate=case_when(
is.na(Active_Peroid) ~ as.Date("1990-01-01"),
!is.na(Active_Peroid) ~ LastProdDate
))
for(row in 1:nrow(table)){
paras=table[row,]
#bottom=0+paras$radius*5
up=20+paras$radius*5
metes=city_narr%>%filter(Date==paras$date)
well_ext=rad_well_link%>%
filter(ymd(paras$date)>int_start(Active_Peroid),dist<up)%>%
mutate(dir=pi*ifelse(dir>0,dir,360+dir)/180)
wind_dir=pi*metes$dir/180
angle=paras$angle
well_ext=well_ext%>%
mutate(angle_dif=abs(180*atan2(sin(dir-wind_dir),
cos(dir-wind_dir))/pi))
wells_upwind=well_ext%>%filter(
angle_dif<angle
)
wells_downwind=well_ext%>%filter(
angle_dif>(180-angle)
)
table[row,c("d_h")]=wells_downwind%>%filter(Pred_DrillType=="H")%>%count()
table[row,c("d_v")]=wells_downwind%>%filter(Pred_DrillType=="V")%>%count()
table[row,c("d_hd")]=wells_downwind%>%filter(Pred_DrillType=="H")%>%summarise(hd=mean(dist))
table[row,c("d_vd")]=wells_downwind%>%filter(Pred_DrillType=="V")%>%summarise(hd=mean(dist))
table[row,c("u_h")]=wells_upwind%>%filter(Pred_DrillType=="H")%>%count()
table[row,c("u_v")]=wells_upwind%>%filter(Pred_DrillType=="V")%>%count()
table[row,c("u_hd")]=wells_upwind%>%filter(Pred_DrillType=="H")%>%summarise(hd=mean(dist))
table[row,c("u_vd")]=wells_upwind%>%filter(Pred_DrillType=="V")%>%summarise(hd=mean(dist))
table[row,"radius"]=20+5*table[row,"radius"]
if(row%%1000==0){
print(paste0(Sys.time(),"_",row," % ",nrow(table)))
}
}
save(file=here::here("data","Resub_city_daily_prod",paste0(city,"_Daily_Prod_E2001.RData")),table)
}
}else{
print(paste0(num," Alreadt Exist!"))
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/build_local_dockerfile.R
\name{build_local_dockerfile}
\alias{build_local_dockerfile}
\title{Docker build local image
Assumes your built image is named after your dockerhub username}
\usage{
build_local_dockerfile(dockerhub_username, project_name)
}
\arguments{
\item{dockerhub_username}{username for dockerhub}
\item{project_name}{built image name}
}
\description{
Docker build local image
Assumes your built image is named after your dockerhub username
}
\examples{
build_local_dockerfile('my_username', 'my_project_name')
}
|
/man/build_local_dockerfile.Rd
|
permissive
|
smwindecker/dockertools
|
R
| false | true | 606 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/build_local_dockerfile.R
\name{build_local_dockerfile}
\alias{build_local_dockerfile}
\title{Docker build local image
Assumes your built image is named after your dockerhub username}
\usage{
build_local_dockerfile(dockerhub_username, project_name)
}
\arguments{
\item{dockerhub_username}{username for dockerhub}
\item{project_name}{built image name}
}
\description{
Docker build local image
Assumes your built image is named after your dockerhub username
}
\examples{
build_local_dockerfile('my_username', 'my_project_name')
}
|
#install.packages("devtools")
library(devtools)
library(MASS)
#Global variables
PSA_switch <- 1
PSA_numb <- 750
pat_numb <- 25000
days_to_discharge <- 30
days_in_year <- 365.25
time_horizon <- 100
discount_rate_QALYs <- 0.035
discount_rate_costs <- 0.035
Param_export <- 1
Proportion_RR_MTC_ISS_o8_u16_hosp <- 0
Proportion_RR_MTC_ISS_o8_u16_1yr <- 0
TARN_mort_eq <- "Old" # options are new or old. Default is old
MTCs_in_mort_risk <- "No" #options are Yes or no. Relates to whether the mort eq is a composite risk score for a
#population who has / has not been to an MTC or a population who hasn't gone to an MTC. Default is no, as the
#default for the mortality equation is the Old TARN equation.
percent_TARN_cases_reported_ISS_o16 <- 1
percent_TARN_cases_reported_ISS_o9_u16 <- 1
population_source <- "Dutch" # Options are UK and Dutch. Dutch is the default
population_ISS_over16_only <- "No" # Options are yes or no. Default is no.
efficent_life_expectancy <- "Yes" #Options are Yes or No. Default is yes
test_pat_chars <- "No" #Change this to Yes if you only want to run the base case analysis with patient level results
PSA_strat <- "S100" #Option to make sure that each instance only runs one set of PSAs, as it is computationally intensive
#Options are: S100, S95, S90, S88, S75, S70, S64, S57, S28, MTC, nMTC, S100_S1, S95_S1, S90_S1, S88_S1, S75_S1, S70_S1, S64_S1, S57_S1, S28_S1
PSA_rand_no <- 330413 #random number to determine PSA parameters. #if -99 this will not change the seed after randomly determining the number of patients to run through the model.
#settings for MATTS phase 1 where first 500 runs 26090100 (after generating pat chars), next 1000 runs (ten diagnostic strategies only) 1346
date <- "_3" #name to append to saved files
#read in files from the X drive (note not on Git due to confidentiality reasons)
file_location <- "\\\\uosfstore.shef.ac.uk\\shared\\ScHARR\\PR_MATTS\\General\\Health Economics\\Model\\"
param_data <- read.csv("parameters.csv", row.names=1)
life_tabs <- read.csv("ONSlifetables.csv")
future_costs <- read.csv("lifetime-healthcare-costs.csv")
if(population_source=="UK"){
means <- as.matrix(read.csv(paste(file_location,"means.csv", sep=""),row.names=1))
covariance <- as.matrix(read.csv(paste(file_location,"covariance.csv", sep=""), row.names=1))
age_tab <- read.csv(paste(file_location,"age_tab.csv", sep=""),row.names=1)
gen_tab <- read.csv(paste(file_location,"gen_tab.csv", sep=""),row.names=1)
ISS_tab <- read.csv(paste(file_location,"ISS_tab.csv", sep=""),row.names=1)
GCS_tab <- read.csv(paste(file_location,"GCS_tab.csv", sep=""),row.names=1)
}else{
means <- as.matrix(read.csv(paste(file_location,"means_dutch_v2.csv", sep=""),row.names=1))
covariance <- as.matrix(read.csv(paste(file_location,"covariance_dutch_v2.csv", sep=""), row.names=1))
age_tab <- read.csv(paste(file_location,"age_tab_dutch_v2.csv", sep=""),row.names=1)
gen_tab <- read.csv(paste(file_location,"male_tab_dutch_v2.csv", sep=""),row.names=1)
ISS_tab <- read.csv(paste(file_location,"ISS_tab_dutch_v2.csv", sep=""),row.names=1)
GCS_tab <- read.csv(paste(file_location,"GCS_tab_dutch_v2.csv", sep=""),row.names=1)
blunt_tab <- read.csv(paste(file_location,"blunt_tab_dutch_v2.csv", sep=""),row.names=1)
}
#Call in all functions
source("Functions.R")
#Do you want to the use pre-simluated population and PSA?
predefined_pop_PSA <- "No" # Option to use the pre-simulated population and PSA parameters
#Set to "Yes" if using the publicly shared version of the model
# In the predefined population we have merged some ISS and age categories for potential
#identifiability reasons
#Analysis###################
param_data_bc <- param_data
##########################################################
#with 20,000 patients the results are stable in the base case
if(PSA_switch==0){
sens_100_spec_3 <- run_simulation(param_data_bc, 0, 1, pat_numb, "manual", 0.998, 0.025,1)
sens_95_spec_19 <- run_simulation(param_data_bc, 0, 1, pat_numb, "manual", 0.948, 0.187,1)
sens_90_spec_58 <- run_simulation(param_data_bc, 0, 1, pat_numb, "manual", 0.904, 0.584,1)
sens_88_spec_63 <- run_simulation(param_data_bc, 0, 1, pat_numb, "manual", 0.875, 0.628,1)
sens_75_spec_66 <- run_simulation(param_data_bc, 0, 1, pat_numb, "manual", 0.746, 0.657,1)
sens_70_spec_70 <- run_simulation(param_data_bc, 0, 1, pat_numb, "manual", 0.698, 0.701,1)
sens_64_spec_76 <- run_simulation(param_data_bc, 0, 1, pat_numb, "manual", 0.642, 0.761,1)
sens_57_spec_80 <- run_simulation(param_data_bc, 0, 1, pat_numb, "manual", 0.57, 0.8,1)
sens_28_spec_89 <- run_simulation(param_data_bc, 0, 1, pat_numb, "manual", 0.284, 0.886,1)
#create a matrix to store all runs
det_analyses <- matrix (nrow = 9, ncol =12)
#name the columns to make analysis easier
colnames(det_analyses) <- c("Sens_DR","Spec_DR", "Number_recieving_MTC_care","proportion_died_before_discharge","proportion_died_between_discharge_and_1_year", "Years_lived",
"undiscounted_QALYs", "discounted_QALYs", "undiscounted_Costs", "discounted_Costs", "proportion_ISS_over_16", "proportion_ISS_over_8_under_16")
#name the rows with the appropiate strategy
rownames(det_analyses) <- c("sens_100_spec_3", "sens_95_spec_19", "sens_90_spec_58", "sens_88_spec_63", "sens_75_spec_66",
"sens_70_spec_70", "sens_64_spec_76", "sens_57_spec_80", "sens_28_spec_89")
det_analyses["sens_100_spec_3", ]<- sens_100_spec_3
det_analyses["sens_95_spec_19", ]<- sens_95_spec_19
det_analyses["sens_90_spec_58", ]<- sens_90_spec_58
det_analyses["sens_88_spec_63", ]<- sens_88_spec_63
det_analyses["sens_75_spec_66", ]<- sens_75_spec_66
det_analyses["sens_70_spec_70", ]<- sens_70_spec_70
det_analyses["sens_64_spec_76", ]<- sens_64_spec_76
det_analyses["sens_57_spec_80", ]<- sens_57_spec_80
det_analyses["sens_28_spec_89", ]<- sens_28_spec_89
write.csv(det_analyses,"base case.csv")
}
if(PSA_switch==1){
if(PSA_strat == "S100"){
sens_100_spec_3_PSA <- run_simulation(param_data_bc, 1, PSA_numb, pat_numb, "manual", 0.998, 0.025,1)
write.csv(sens_100_spec_3_PSA, paste(file_location,"PSA results\\sens_100_spec_3_PSA",date,".csv", sep=""))
use_params_sens_100_spec_3_PSA <- read.csv("parameter_outputs.csv")
write.csv(use_params_sens_100_spec_3_PSA, "PSA results\\sens_100_spec_3_PSA_params.csv")
}
if(PSA_strat == "S95"){
sens_95_spec_19_PSA <- run_simulation(param_data_bc, 1, PSA_numb, pat_numb, "manual", 0.948, 0.187,1)
write.csv(sens_95_spec_19_PSA, paste(file_location,"PSA results\\sens_95_spec_19_PSA",date,".csv", sep=""))
use_params_sens_95_spec_19_PSA <- read.csv("parameter_outputs.csv")
write.csv(use_params_sens_95_spec_19_PSA, "PSA results\\sens_95_spec_19_PSA_params.csv")
}
if(PSA_strat == "S90"){
sens_90_spec_58_PSA <- run_simulation(param_data_bc, 1, PSA_numb, pat_numb, "manual", 0.904, 0.584,1)
write.csv(sens_90_spec_58_PSA, paste(file_location,"PSA results\\sens_90_spec_58_PSA",date,".csv", sep=""))
use_params_sens_90_spec_58_PSA <- read.csv("parameter_outputs.csv")
write.csv(use_params_sens_90_spec_58_PSA, "PSA results\\sens_90_spec_58_PSA_params.csv")
}
if(PSA_strat == "S88"){
sens_88_spec_63_PSA <- run_simulation(param_data_bc, 1, PSA_numb, pat_numb, "manual", 0.875, 0.628,1)
write.csv(sens_88_spec_63_PSA, paste(file_location,"PSA results\\sens_88_spec_63_PSA",date,".csv", sep=""))
use_params_sens_88_spec_63_PSA <- read.csv("parameter_outputs.csv")
write.csv(use_params_sens_88_spec_63_PSA, "PSA results\\sens_88_spec_63_PSA_params.csv")
}
if(PSA_strat == "S75"){
sens_75_spec_66_PSA <- run_simulation(param_data_bc, 1, PSA_numb, pat_numb, "manual", 0.746, 0.657,1)
write.csv(sens_75_spec_66_PSA, paste(file_location,"PSA results\\sens_75_spec_66",date,".csv", sep=""))
use_params_sens_75_spec_66_PSA <- read.csv("parameter_outputs.csv")
write.csv(use_params_sens_75_spec_66_PSA, "sens_75_spec_66_PSA_params.csv")
}
if(PSA_strat == "S70"){
sens_70_spec_70_PSA <- run_simulation(param_data_bc, 1, PSA_numb, pat_numb, "manual", 0.698, 0.701,1)
write.csv(sens_70_spec_70_PSA, paste(file_location,"PSA results\\sens_70_spec_70",date,".csv", sep=""))
use_params_sens_70_spec_70_PSA <- read.csv("parameter_outputs.csv")
write.csv(use_params_sens_70_spec_70_PSA, "sens_70_spec_70_PSA_params.csv")
}
if(PSA_strat == "S64"){
sens_64_spec_76_PSA <- run_simulation(param_data_bc, 1, PSA_numb, pat_numb, "manual", 0.642, 0.761,1)
write.csv(sens_64_spec_76_PSA, paste(file_location,"PSA results\\sens_64_spec_76",date,".csv", sep=""))
use_params_sens_64_spec_76 <- read.csv("parameter_outputs.csv")
write.csv(use_params_sens_64_spec_76, "PSA results\\sens_90_spec_58_PSA_params.csv")
}
if(PSA_strat == "S57"){
sens_57_spec_80_PSA <- run_simulation(param_data_bc, 1, PSA_numb, pat_numb, "manual", 0.57, 0.8,1)
write.csv(sens_57_spec_80_PSA, paste(file_location,"PSA results\\sens_57_spec_80",date,".csv", sep=""))
use_params_sens_57_spec_80_PSA <- read.csv("parameter_outputs.csv")
write.csv(use_params_sens_57_spec_80_PSA, "PSA results\\sens_90_spec_58_PSA_params.csv")
}
if(PSA_strat == "S28"){
sens_28_spec_89_PSA <- run_simulation(param_data_bc, 1, PSA_numb, pat_numb, "manual", 0.284, 0.886,1)
write.csv(sens_28_spec_89_PSA, paste(file_location,"PSA results\\sens_28_spec_89",date,".csv", sep=""))
use_params_sens_28_spec_89_PSA <- read.csv("parameter_outputs.csv")
write.csv(use_params_sens_28_spec_89_PSA, "PSA results\\sens_28_spec_89_PSA_params.csv")
}
#Use the newer TARN mortality equation
TARN_mort_eq <- "New"
MTCs_in_mort_risk <- "Yes"
if(PSA_switch==1){
if(PSA_strat == "S100_S1"){
sens_100_spec_3_PSA <- run_simulation(param_data_bc, 1, PSA_numb, pat_numb, "manual", 0.998, 0.025,1)
write.csv(sens_100_spec_3_PSA, paste(file_location,"PSA results\\sens_100_spec_3_PSA",date,".csv", sep=""))
use_params_sens_100_spec_3_PSA <- read.csv("parameter_outputs.csv")
write.csv(use_params_sens_100_spec_3_PSA, "PSA results\\sens_100_spec_3_PSA_params.csv")
}
if(PSA_strat == "S95_S1"){
sens_95_spec_19_PSA <- run_simulation(param_data_bc, 1, PSA_numb, pat_numb, "manual", 0.948, 0.187,1)
write.csv(sens_95_spec_19_PSA, paste(file_location,"PSA results\\sens_95_spec_19_PSA",date,".csv", sep=""))
use_params_sens_95_spec_19_PSA <- read.csv("parameter_outputs.csv")
write.csv(use_params_sens_95_spec_19_PSA, "PSA results\\sens_95_spec_19_PSA_params.csv")
}
if(PSA_strat == "S90_S1"){
sens_90_spec_58_PSA <- run_simulation(param_data_bc, 1, PSA_numb, pat_numb, "manual", 0.904, 0.584,1)
write.csv(sens_90_spec_58_PSA, paste(file_location,"PSA results\\sens_90_spec_58_PSA",date,".csv", sep=""))
use_params_sens_90_spec_58_PSA <- read.csv("parameter_outputs.csv")
write.csv(use_params_sens_90_spec_58_PSA, "PSA results\\sens_90_spec_58_PSA_params.csv")
}
if(PSA_strat == "S88_S1"){
sens_88_spec_63_PSA <- run_simulation(param_data_bc, 1, PSA_numb, pat_numb, "manual", 0.875, 0.628,1)
write.csv(sens_88_spec_63_PSA, paste(file_location,"PSA results\\sens_88_spec_63_PSA",date,".csv", sep=""))
use_params_sens_88_spec_63_PSA <- read.csv("parameter_outputs.csv")
write.csv(use_params_sens_88_spec_63_PSA, "PSA results\\sens_88_spec_63_PSA_params.csv")
}
if(PSA_strat == "S75_S1"){
sens_75_spec_66_PSA <- run_simulation(param_data_bc, 1, PSA_numb, pat_numb, "manual", 0.746, 0.657,1)
write.csv(sens_75_spec_66_PSA, paste(file_location,"PSA results\\sens_75_spec_66",date,".csv", sep=""))
use_params_sens_75_spec_66_PSA <- read.csv("parameter_outputs.csv")
write.csv(use_params_sens_75_spec_66_PSA, "sens_75_spec_66_PSA_params.csv")
}
if(PSA_strat == "S70_S1"){
sens_70_spec_70_PSA <- run_simulation(param_data_bc, 1, PSA_numb, pat_numb, "manual", 0.698, 0.701,1)
write.csv(sens_70_spec_70_PSA, paste(file_location,"PSA results\\sens_70_spec_70",date,".csv", sep=""))
use_params_sens_70_spec_70_PSA <- read.csv("parameter_outputs.csv")
write.csv(use_params_sens_70_spec_70_PSA, "sens_70_spec_70_PSA_params.csv")
}
if(PSA_strat == "S64_S1"){
sens_64_spec_76_PSA <- run_simulation(param_data_bc, 1, PSA_numb, pat_numb, "manual", 0.642, 0.761,1)
write.csv(sens_64_spec_76_PSA, paste(file_location,"PSA results\\sens_64_spec_76",date,".csv", sep=""))
use_params_sens_64_spec_76 <- read.csv("parameter_outputs.csv")
write.csv(use_params_sens_64_spec_76, "PSA results\\sens_90_spec_58_PSA_params.csv")
}
if(PSA_strat == "S57_S1"){
sens_57_spec_80_PSA <- run_simulation(param_data_bc, 1, PSA_numb, pat_numb, "manual", 0.57, 0.8,1)
write.csv(sens_57_spec_80_PSA, paste(file_location,"PSA results\\sens_57_spec_80",date,".csv", sep=""))
use_params_sens_57_spec_80_PSA <- read.csv("parameter_outputs.csv")
write.csv(use_params_sens_57_spec_80_PSA, "PSA results\\sens_90_spec_58_PSA_params.csv")
}
if(PSA_strat == "S28_S1"){
sens_28_spec_89_PSA <- run_simulation(param_data_bc, 1, PSA_numb, pat_numb, "manual", 0.284, 0.886,1)
write.csv(sens_28_spec_89_PSA, paste(file_location,"PSA results\\sens_28_spec_89",date,".csv", sep=""))
use_params_sens_28_spec_89_PSA <- read.csv("parameter_outputs.csv")
write.csv(use_params_sens_28_spec_89_PSA, "PSA results\\sens_28_spec_89_PSA_params.csv")
}
}
param_data_MTCs <- param_data
#Change the variables so everyone with a positive rule goes to the MTC
#everyone with a negative rule goes to an nMTC
#set the costs of MTCs to 0
param_data_MTCs["P_MTC_Tri_pos_ISS_o15",1] <- 1
param_data_MTCs["P_MTC_Tri_pos_ISS_o15",3] <- "Fixed"
param_data_MTCs["P_MTC_Tri_neg_ISS_o15",1] <- 0
param_data_MTCs["P_MTC_Tri_neg_ISS_o15",3] <- "Fixed"
param_data_MTCs["Transfer_nMTC_to_MTC_ISSo15_TN",1] <- 0
param_data_MTCs["Transfer_nMTC_to_MTC_ISSo15_TN",3] <- "Fixed"
param_data_MTCs["C_MTC_ISS_o15",1] <- 0
param_data_MTCs["C_MTC_ISS_o15",3] <- "Fixed"
#Change the population matrix to only include people with an ISS of 16 or more
#reset other options to their defaults
TARN_mort_eq <- "Old"
MTCs_in_mort_risk <- "No"
population_ISS_over16_only <- "Yes"
if(PSA_switch ==0) {
sens_100_spec_10 <- run_simulation(param_data_MTCs, 0, 1, pat_numb, "manual", 1, 0.1,1)
sens_0_spec_90 <- run_simulation(param_data_MTCs, 0, 1, pat_numb, "manual", 0, 0.9,1)
#create a matrix to store all runs
det_analyses <- matrix (nrow = 2, ncol =12)
#name the columns to make analysis easier
colnames(det_analyses) <- c("Sens_DR","Spec_DR", "Number_recieving_MTC_care","proportion_died_before_discharge","proportion_died_between_discharge_and_1_year", "Years_lived",
"undiscounted_QALYs", "discounted_QALYs", "undiscounted_Costs", "discounted_Costs", "proportion_ISS_over_16", "proportion_ISS_over_8_under_16")
#name the rows with the appropiate strategy
rownames(det_analyses) <- c("All_MTC", "No_MTC")
det_analyses["All_MTC", ]<- sens_100_spec_10
det_analyses["No_MTC", ]<- sens_0_spec_90
write.csv(det_analyses, "MTC v no MTC.csv")
}
if(PSA_switch==1){
if(PSA_strat == "MTC"){
sens_100_spec_10_PSA <- run_simulation(param_data_MTCs, 1, PSA_numb, pat_numb, "manual", 1, 0.1,1)
write.csv(sens_100_spec_10_PSA, paste(file_location,"PSA results\\sens_100_spec_10_PSA",date,".csv", sep=""))
use_params_sens_100_spec_10_PSA <- read.csv("parameter_outputs.csv")
write.csv(use_params_sens_100_spec_10_PSA, "PSA results\\sens_100_spec_10_PSA_params.csv")
}
if(PSA_strat == "nMTC"){
sens_0_spec_90_PSA <- run_simulation(param_data_MTCs, 1, PSA_numb, pat_numb, "manual", 0, 0.9,1)
write.csv(sens_0_spec_90_PSA, paste(file_location,"PSA results\\sens_0_spec_90_PSA_",date,".csv", sep=""))
use_params_sens_0_spec_90_PSA<- read.csv("parameter_outputs.csv")
write.csv(use_params_sens_0_spec_90_PSA, "PSA results\\sens_0_spec_90_PSA_params.csv")
}
}
}
|
/Base Case/BaseCasePSA_S100_3.R
|
permissive
|
DanPollardSheff/ideal-winner
|
R
| false | false | 15,828 |
r
|
#install.packages("devtools")
library(devtools)
library(MASS)
#Global variables
PSA_switch <- 1
PSA_numb <- 750
pat_numb <- 25000
days_to_discharge <- 30
days_in_year <- 365.25
time_horizon <- 100
discount_rate_QALYs <- 0.035
discount_rate_costs <- 0.035
Param_export <- 1
Proportion_RR_MTC_ISS_o8_u16_hosp <- 0
Proportion_RR_MTC_ISS_o8_u16_1yr <- 0
TARN_mort_eq <- "Old" # options are new or old. Default is old
MTCs_in_mort_risk <- "No" #options are Yes or no. Relates to whether the mort eq is a composite risk score for a
#population who has / has not been to an MTC or a population who hasn't gone to an MTC. Default is no, as the
#default for the mortality equation is the Old TARN equation.
percent_TARN_cases_reported_ISS_o16 <- 1
percent_TARN_cases_reported_ISS_o9_u16 <- 1
population_source <- "Dutch" # Options are UK and Dutch. Dutch is the default
population_ISS_over16_only <- "No" # Options are yes or no. Default is no.
efficent_life_expectancy <- "Yes" #Options are Yes or No. Default is yes
test_pat_chars <- "No" #Change this to Yes if you only want to run the base case analysis with patient level results
PSA_strat <- "S100" #Option to make sure that each instance only runs one set of PSAs, as it is computationally intensive
#Options are: S100, S95, S90, S88, S75, S70, S64, S57, S28, MTC, nMTC, S100_S1, S95_S1, S90_S1, S88_S1, S75_S1, S70_S1, S64_S1, S57_S1, S28_S1
PSA_rand_no <- 330413 #random number to determine PSA parameters. #if -99 this will not change the seed after randomly determining the number of patients to run through the model.
#settings for MATTS phase 1 where first 500 runs 26090100 (after generating pat chars), next 1000 runs (ten diagnostic strategies only) 1346
date <- "_3" #name to append to saved files
#read in files from the X drive (note not on Git due to confidentiality reasons)
file_location <- "\\\\uosfstore.shef.ac.uk\\shared\\ScHARR\\PR_MATTS\\General\\Health Economics\\Model\\"
param_data <- read.csv("parameters.csv", row.names=1)
life_tabs <- read.csv("ONSlifetables.csv")
future_costs <- read.csv("lifetime-healthcare-costs.csv")
if(population_source=="UK"){
means <- as.matrix(read.csv(paste(file_location,"means.csv", sep=""),row.names=1))
covariance <- as.matrix(read.csv(paste(file_location,"covariance.csv", sep=""), row.names=1))
age_tab <- read.csv(paste(file_location,"age_tab.csv", sep=""),row.names=1)
gen_tab <- read.csv(paste(file_location,"gen_tab.csv", sep=""),row.names=1)
ISS_tab <- read.csv(paste(file_location,"ISS_tab.csv", sep=""),row.names=1)
GCS_tab <- read.csv(paste(file_location,"GCS_tab.csv", sep=""),row.names=1)
}else{
means <- as.matrix(read.csv(paste(file_location,"means_dutch_v2.csv", sep=""),row.names=1))
covariance <- as.matrix(read.csv(paste(file_location,"covariance_dutch_v2.csv", sep=""), row.names=1))
age_tab <- read.csv(paste(file_location,"age_tab_dutch_v2.csv", sep=""),row.names=1)
gen_tab <- read.csv(paste(file_location,"male_tab_dutch_v2.csv", sep=""),row.names=1)
ISS_tab <- read.csv(paste(file_location,"ISS_tab_dutch_v2.csv", sep=""),row.names=1)
GCS_tab <- read.csv(paste(file_location,"GCS_tab_dutch_v2.csv", sep=""),row.names=1)
blunt_tab <- read.csv(paste(file_location,"blunt_tab_dutch_v2.csv", sep=""),row.names=1)
}
#Call in all functions
source("Functions.R")
#Do you want to the use pre-simluated population and PSA?
predefined_pop_PSA <- "No" # Option to use the pre-simulated population and PSA parameters
#Set to "Yes" if using the publicly shared version of the model
# In the predefined population we have merged some ISS and age categories for potential
#identifiability reasons
#Analysis###################
param_data_bc <- param_data
##########################################################
#with 20,000 patients the results are stable in the base case
if(PSA_switch==0){
sens_100_spec_3 <- run_simulation(param_data_bc, 0, 1, pat_numb, "manual", 0.998, 0.025,1)
sens_95_spec_19 <- run_simulation(param_data_bc, 0, 1, pat_numb, "manual", 0.948, 0.187,1)
sens_90_spec_58 <- run_simulation(param_data_bc, 0, 1, pat_numb, "manual", 0.904, 0.584,1)
sens_88_spec_63 <- run_simulation(param_data_bc, 0, 1, pat_numb, "manual", 0.875, 0.628,1)
sens_75_spec_66 <- run_simulation(param_data_bc, 0, 1, pat_numb, "manual", 0.746, 0.657,1)
sens_70_spec_70 <- run_simulation(param_data_bc, 0, 1, pat_numb, "manual", 0.698, 0.701,1)
sens_64_spec_76 <- run_simulation(param_data_bc, 0, 1, pat_numb, "manual", 0.642, 0.761,1)
sens_57_spec_80 <- run_simulation(param_data_bc, 0, 1, pat_numb, "manual", 0.57, 0.8,1)
sens_28_spec_89 <- run_simulation(param_data_bc, 0, 1, pat_numb, "manual", 0.284, 0.886,1)
#create a matrix to store all runs
det_analyses <- matrix (nrow = 9, ncol =12)
#name the columns to make analysis easier
colnames(det_analyses) <- c("Sens_DR","Spec_DR", "Number_recieving_MTC_care","proportion_died_before_discharge","proportion_died_between_discharge_and_1_year", "Years_lived",
"undiscounted_QALYs", "discounted_QALYs", "undiscounted_Costs", "discounted_Costs", "proportion_ISS_over_16", "proportion_ISS_over_8_under_16")
#name the rows with the appropiate strategy
rownames(det_analyses) <- c("sens_100_spec_3", "sens_95_spec_19", "sens_90_spec_58", "sens_88_spec_63", "sens_75_spec_66",
"sens_70_spec_70", "sens_64_spec_76", "sens_57_spec_80", "sens_28_spec_89")
det_analyses["sens_100_spec_3", ]<- sens_100_spec_3
det_analyses["sens_95_spec_19", ]<- sens_95_spec_19
det_analyses["sens_90_spec_58", ]<- sens_90_spec_58
det_analyses["sens_88_spec_63", ]<- sens_88_spec_63
det_analyses["sens_75_spec_66", ]<- sens_75_spec_66
det_analyses["sens_70_spec_70", ]<- sens_70_spec_70
det_analyses["sens_64_spec_76", ]<- sens_64_spec_76
det_analyses["sens_57_spec_80", ]<- sens_57_spec_80
det_analyses["sens_28_spec_89", ]<- sens_28_spec_89
write.csv(det_analyses,"base case.csv")
}
if(PSA_switch==1){
if(PSA_strat == "S100"){
sens_100_spec_3_PSA <- run_simulation(param_data_bc, 1, PSA_numb, pat_numb, "manual", 0.998, 0.025,1)
write.csv(sens_100_spec_3_PSA, paste(file_location,"PSA results\\sens_100_spec_3_PSA",date,".csv", sep=""))
use_params_sens_100_spec_3_PSA <- read.csv("parameter_outputs.csv")
write.csv(use_params_sens_100_spec_3_PSA, "PSA results\\sens_100_spec_3_PSA_params.csv")
}
if(PSA_strat == "S95"){
sens_95_spec_19_PSA <- run_simulation(param_data_bc, 1, PSA_numb, pat_numb, "manual", 0.948, 0.187,1)
write.csv(sens_95_spec_19_PSA, paste(file_location,"PSA results\\sens_95_spec_19_PSA",date,".csv", sep=""))
use_params_sens_95_spec_19_PSA <- read.csv("parameter_outputs.csv")
write.csv(use_params_sens_95_spec_19_PSA, "PSA results\\sens_95_spec_19_PSA_params.csv")
}
if(PSA_strat == "S90"){
sens_90_spec_58_PSA <- run_simulation(param_data_bc, 1, PSA_numb, pat_numb, "manual", 0.904, 0.584,1)
write.csv(sens_90_spec_58_PSA, paste(file_location,"PSA results\\sens_90_spec_58_PSA",date,".csv", sep=""))
use_params_sens_90_spec_58_PSA <- read.csv("parameter_outputs.csv")
write.csv(use_params_sens_90_spec_58_PSA, "PSA results\\sens_90_spec_58_PSA_params.csv")
}
if(PSA_strat == "S88"){
sens_88_spec_63_PSA <- run_simulation(param_data_bc, 1, PSA_numb, pat_numb, "manual", 0.875, 0.628,1)
write.csv(sens_88_spec_63_PSA, paste(file_location,"PSA results\\sens_88_spec_63_PSA",date,".csv", sep=""))
use_params_sens_88_spec_63_PSA <- read.csv("parameter_outputs.csv")
write.csv(use_params_sens_88_spec_63_PSA, "PSA results\\sens_88_spec_63_PSA_params.csv")
}
if(PSA_strat == "S75"){
sens_75_spec_66_PSA <- run_simulation(param_data_bc, 1, PSA_numb, pat_numb, "manual", 0.746, 0.657,1)
write.csv(sens_75_spec_66_PSA, paste(file_location,"PSA results\\sens_75_spec_66",date,".csv", sep=""))
use_params_sens_75_spec_66_PSA <- read.csv("parameter_outputs.csv")
write.csv(use_params_sens_75_spec_66_PSA, "sens_75_spec_66_PSA_params.csv")
}
if(PSA_strat == "S70"){
sens_70_spec_70_PSA <- run_simulation(param_data_bc, 1, PSA_numb, pat_numb, "manual", 0.698, 0.701,1)
write.csv(sens_70_spec_70_PSA, paste(file_location,"PSA results\\sens_70_spec_70",date,".csv", sep=""))
use_params_sens_70_spec_70_PSA <- read.csv("parameter_outputs.csv")
write.csv(use_params_sens_70_spec_70_PSA, "sens_70_spec_70_PSA_params.csv")
}
if(PSA_strat == "S64"){
sens_64_spec_76_PSA <- run_simulation(param_data_bc, 1, PSA_numb, pat_numb, "manual", 0.642, 0.761,1)
write.csv(sens_64_spec_76_PSA, paste(file_location,"PSA results\\sens_64_spec_76",date,".csv", sep=""))
use_params_sens_64_spec_76 <- read.csv("parameter_outputs.csv")
write.csv(use_params_sens_64_spec_76, "PSA results\\sens_90_spec_58_PSA_params.csv")
}
if(PSA_strat == "S57"){
sens_57_spec_80_PSA <- run_simulation(param_data_bc, 1, PSA_numb, pat_numb, "manual", 0.57, 0.8,1)
write.csv(sens_57_spec_80_PSA, paste(file_location,"PSA results\\sens_57_spec_80",date,".csv", sep=""))
use_params_sens_57_spec_80_PSA <- read.csv("parameter_outputs.csv")
write.csv(use_params_sens_57_spec_80_PSA, "PSA results\\sens_90_spec_58_PSA_params.csv")
}
if(PSA_strat == "S28"){
sens_28_spec_89_PSA <- run_simulation(param_data_bc, 1, PSA_numb, pat_numb, "manual", 0.284, 0.886,1)
write.csv(sens_28_spec_89_PSA, paste(file_location,"PSA results\\sens_28_spec_89",date,".csv", sep=""))
use_params_sens_28_spec_89_PSA <- read.csv("parameter_outputs.csv")
write.csv(use_params_sens_28_spec_89_PSA, "PSA results\\sens_28_spec_89_PSA_params.csv")
}
#Use the newer TARN mortality equation
TARN_mort_eq <- "New"
MTCs_in_mort_risk <- "Yes"
if(PSA_switch==1){
if(PSA_strat == "S100_S1"){
sens_100_spec_3_PSA <- run_simulation(param_data_bc, 1, PSA_numb, pat_numb, "manual", 0.998, 0.025,1)
write.csv(sens_100_spec_3_PSA, paste(file_location,"PSA results\\sens_100_spec_3_PSA",date,".csv", sep=""))
use_params_sens_100_spec_3_PSA <- read.csv("parameter_outputs.csv")
write.csv(use_params_sens_100_spec_3_PSA, "PSA results\\sens_100_spec_3_PSA_params.csv")
}
if(PSA_strat == "S95_S1"){
sens_95_spec_19_PSA <- run_simulation(param_data_bc, 1, PSA_numb, pat_numb, "manual", 0.948, 0.187,1)
write.csv(sens_95_spec_19_PSA, paste(file_location,"PSA results\\sens_95_spec_19_PSA",date,".csv", sep=""))
use_params_sens_95_spec_19_PSA <- read.csv("parameter_outputs.csv")
write.csv(use_params_sens_95_spec_19_PSA, "PSA results\\sens_95_spec_19_PSA_params.csv")
}
if(PSA_strat == "S90_S1"){
sens_90_spec_58_PSA <- run_simulation(param_data_bc, 1, PSA_numb, pat_numb, "manual", 0.904, 0.584,1)
write.csv(sens_90_spec_58_PSA, paste(file_location,"PSA results\\sens_90_spec_58_PSA",date,".csv", sep=""))
use_params_sens_90_spec_58_PSA <- read.csv("parameter_outputs.csv")
write.csv(use_params_sens_90_spec_58_PSA, "PSA results\\sens_90_spec_58_PSA_params.csv")
}
if(PSA_strat == "S88_S1"){
sens_88_spec_63_PSA <- run_simulation(param_data_bc, 1, PSA_numb, pat_numb, "manual", 0.875, 0.628,1)
write.csv(sens_88_spec_63_PSA, paste(file_location,"PSA results\\sens_88_spec_63_PSA",date,".csv", sep=""))
use_params_sens_88_spec_63_PSA <- read.csv("parameter_outputs.csv")
write.csv(use_params_sens_88_spec_63_PSA, "PSA results\\sens_88_spec_63_PSA_params.csv")
}
if(PSA_strat == "S75_S1"){
sens_75_spec_66_PSA <- run_simulation(param_data_bc, 1, PSA_numb, pat_numb, "manual", 0.746, 0.657,1)
write.csv(sens_75_spec_66_PSA, paste(file_location,"PSA results\\sens_75_spec_66",date,".csv", sep=""))
use_params_sens_75_spec_66_PSA <- read.csv("parameter_outputs.csv")
write.csv(use_params_sens_75_spec_66_PSA, "sens_75_spec_66_PSA_params.csv")
}
if(PSA_strat == "S70_S1"){
sens_70_spec_70_PSA <- run_simulation(param_data_bc, 1, PSA_numb, pat_numb, "manual", 0.698, 0.701,1)
write.csv(sens_70_spec_70_PSA, paste(file_location,"PSA results\\sens_70_spec_70",date,".csv", sep=""))
use_params_sens_70_spec_70_PSA <- read.csv("parameter_outputs.csv")
write.csv(use_params_sens_70_spec_70_PSA, "sens_70_spec_70_PSA_params.csv")
}
if(PSA_strat == "S64_S1"){
sens_64_spec_76_PSA <- run_simulation(param_data_bc, 1, PSA_numb, pat_numb, "manual", 0.642, 0.761,1)
write.csv(sens_64_spec_76_PSA, paste(file_location,"PSA results\\sens_64_spec_76",date,".csv", sep=""))
use_params_sens_64_spec_76 <- read.csv("parameter_outputs.csv")
write.csv(use_params_sens_64_spec_76, "PSA results\\sens_90_spec_58_PSA_params.csv")
}
if(PSA_strat == "S57_S1"){
sens_57_spec_80_PSA <- run_simulation(param_data_bc, 1, PSA_numb, pat_numb, "manual", 0.57, 0.8,1)
write.csv(sens_57_spec_80_PSA, paste(file_location,"PSA results\\sens_57_spec_80",date,".csv", sep=""))
use_params_sens_57_spec_80_PSA <- read.csv("parameter_outputs.csv")
write.csv(use_params_sens_57_spec_80_PSA, "PSA results\\sens_90_spec_58_PSA_params.csv")
}
if(PSA_strat == "S28_S1"){
sens_28_spec_89_PSA <- run_simulation(param_data_bc, 1, PSA_numb, pat_numb, "manual", 0.284, 0.886,1)
write.csv(sens_28_spec_89_PSA, paste(file_location,"PSA results\\sens_28_spec_89",date,".csv", sep=""))
use_params_sens_28_spec_89_PSA <- read.csv("parameter_outputs.csv")
write.csv(use_params_sens_28_spec_89_PSA, "PSA results\\sens_28_spec_89_PSA_params.csv")
}
}
param_data_MTCs <- param_data
#Change the variables so everyone with a positive rule goes to the MTC
#everyone with a negative rule goes to an nMTC
#set the costs of MTCs to 0
param_data_MTCs["P_MTC_Tri_pos_ISS_o15",1] <- 1
param_data_MTCs["P_MTC_Tri_pos_ISS_o15",3] <- "Fixed"
param_data_MTCs["P_MTC_Tri_neg_ISS_o15",1] <- 0
param_data_MTCs["P_MTC_Tri_neg_ISS_o15",3] <- "Fixed"
param_data_MTCs["Transfer_nMTC_to_MTC_ISSo15_TN",1] <- 0
param_data_MTCs["Transfer_nMTC_to_MTC_ISSo15_TN",3] <- "Fixed"
param_data_MTCs["C_MTC_ISS_o15",1] <- 0
param_data_MTCs["C_MTC_ISS_o15",3] <- "Fixed"
#Change the population matrix to only include people with an ISS of 16 or more
#reset other options to their defaults
TARN_mort_eq <- "Old"
MTCs_in_mort_risk <- "No"
population_ISS_over16_only <- "Yes"
if(PSA_switch ==0) {
sens_100_spec_10 <- run_simulation(param_data_MTCs, 0, 1, pat_numb, "manual", 1, 0.1,1)
sens_0_spec_90 <- run_simulation(param_data_MTCs, 0, 1, pat_numb, "manual", 0, 0.9,1)
#create a matrix to store all runs
det_analyses <- matrix (nrow = 2, ncol =12)
#name the columns to make analysis easier
colnames(det_analyses) <- c("Sens_DR","Spec_DR", "Number_recieving_MTC_care","proportion_died_before_discharge","proportion_died_between_discharge_and_1_year", "Years_lived",
"undiscounted_QALYs", "discounted_QALYs", "undiscounted_Costs", "discounted_Costs", "proportion_ISS_over_16", "proportion_ISS_over_8_under_16")
#name the rows with the appropiate strategy
rownames(det_analyses) <- c("All_MTC", "No_MTC")
det_analyses["All_MTC", ]<- sens_100_spec_10
det_analyses["No_MTC", ]<- sens_0_spec_90
write.csv(det_analyses, "MTC v no MTC.csv")
}
if(PSA_switch==1){
if(PSA_strat == "MTC"){
sens_100_spec_10_PSA <- run_simulation(param_data_MTCs, 1, PSA_numb, pat_numb, "manual", 1, 0.1,1)
write.csv(sens_100_spec_10_PSA, paste(file_location,"PSA results\\sens_100_spec_10_PSA",date,".csv", sep=""))
use_params_sens_100_spec_10_PSA <- read.csv("parameter_outputs.csv")
write.csv(use_params_sens_100_spec_10_PSA, "PSA results\\sens_100_spec_10_PSA_params.csv")
}
if(PSA_strat == "nMTC"){
sens_0_spec_90_PSA <- run_simulation(param_data_MTCs, 1, PSA_numb, pat_numb, "manual", 0, 0.9,1)
write.csv(sens_0_spec_90_PSA, paste(file_location,"PSA results\\sens_0_spec_90_PSA_",date,".csv", sep=""))
use_params_sens_0_spec_90_PSA<- read.csv("parameter_outputs.csv")
write.csv(use_params_sens_0_spec_90_PSA, "PSA results\\sens_0_spec_90_PSA_params.csv")
}
}
}
|
library(ngspatial) # For adjacency.matrix
library(plot.matrix)
simulate_ising <- function(n_pixels, adjacency, beta, n_iter=1000) {
stopifnot(dim(adjacency) == c(n_pixels, n_pixels))
values <- c(-1, 1)
z <- sample(values, size=n_pixels, replace=TRUE)
## Following http://statweb.stanford.edu/~jtaylo/courses/stats352/notes/ising.pdf
for(iter in seq_len(n_iter)) {
for(index in seq_len(n_pixels)) {
neighbors <- which(adjacency[index, ] > 0)
neighbor_sum <- sum(z[neighbors])
odds <- exp(2 * beta * neighbor_sum)
p <- odds / (1 + odds)
z[index] <- sample(values, size=1, prob=c(1-p, p))
}
}
return(z)
}
|
/ising.R
|
no_license
|
atorch/hidden_markov_model
|
R
| false | false | 711 |
r
|
library(ngspatial) # For adjacency.matrix
library(plot.matrix)
simulate_ising <- function(n_pixels, adjacency, beta, n_iter=1000) {
stopifnot(dim(adjacency) == c(n_pixels, n_pixels))
values <- c(-1, 1)
z <- sample(values, size=n_pixels, replace=TRUE)
## Following http://statweb.stanford.edu/~jtaylo/courses/stats352/notes/ising.pdf
for(iter in seq_len(n_iter)) {
for(index in seq_len(n_pixels)) {
neighbors <- which(adjacency[index, ] > 0)
neighbor_sum <- sum(z[neighbors])
odds <- exp(2 * beta * neighbor_sum)
p <- odds / (1 + odds)
z[index] <- sample(values, size=1, prob=c(1-p, p))
}
}
return(z)
}
|
#' Align two curves
#'
#' This function aligns two SRVF functions using Dynamic Programming
#'
#' @param beta1 array defining curve 1
#' @param beta2 array defining curve 1
#' @param lambda controls amount of warping (default = 0)
#' @param method controls which optimization method (default="DP") options are
#' Dynamic Programming ("DP"), Coordinate Descent ("DP2"), Riemannian BFGS
#' ("RBFGS")
#' @param w controls LRBFGS (default = 0.01)
#' @param rotated boolean if rotation is desired
#' @param isclosed boolean if curve is closed
#' @param mode Open ("O") or Closed ("C") curves
#' @return return a List containing \item{gam}{warping function}
#' \item{R}{rotation matrix}
#' \item{tau}{seed point}
#' @keywords srvf alignment
#' @references Srivastava, A., Klassen, E., Joshi, S., Jermyn, I., (2011). Shape analysis of elastic curves in euclidean spaces. Pattern Analysis and Machine Intelligence, IEEE Transactions on 33 (7), 1415-1428.
#' @export
#' @examples
#' data("mpeg7")
#' gam = reparam_curve(beta[,,1,1],beta[,,1,5])$gam
reparam_curve <- function(beta1,beta2,lambda=0,method="DP",w=0.01,rotated=T,
isclosed=F, mode="O"){
n1 = nrow(beta2)
M = ncol(beta2)
timet = seq(0,1,length.out=M)
skipm = 4
auto = 2
tau = 0
if (method=="DPo"){
# Optimize over SO(n) x Gamma
q1 = curve_to_q(beta1)
# Optimize over SO(n)
if (rotated){
out = find_rotation_seed_coord(beta1, beta2, mode)
beta2 = out$beta2
R = out$O_hat
tau = out$tau
} else{
R = diag(n1)
tau = 0
}
q2 = curve_to_q(beta2)
# Optimize over Gamma
q1i = q1
dim(q1i) = c(M*n1)
q2i = q2
dim(q2i) = c(M*n1)
G = rep(0,M)
T1 = rep(0,M)
size = 0
ret = .Call('DPQ2', PACKAGE = 'fdasrvf', q1i, timet, q2i, timet, n1, M, M, timet, timet, M, M, G, T1, size, lambda);
G = ret$G[1:ret$size]
Tf = ret$T[1:ret$size]
gam0 = approx(Tf,G,xout=timet)$y
} else if (method=="DP") {
# Optimize over SO(n) x Gamma
q1 = curve_to_q(beta1)
# Optimize over SO(n)
if (rotated){
out = find_rotation_seed_coord(beta1, beta2);
beta2 = out$beta2
R = out$O_hat
tau = out$tau
} else{
R = diag(n1)
tau = 0
}
q2 = curve_to_q(beta2)
# Optimize over Gamma
q1 = q1/sqrt(innerprod_q2(q1, q1))
q2 = q2/sqrt(innerprod_q2(q2, q2))
q1i = q1
dim(q1i) = c(M*n1)
q2i = q2
dim(q2i) = c(M*n1)
gam0 = .Call('DPQ', PACKAGE = 'fdasrvf', q1i, q2i, n1, M, lambda, 0, rep(0,M))
} else if (method=="DP2") {
stop("Not implemented in CRAN version: please download and install from Github (https://github.com/jdtuck/fdasrvf_R)")
c1 = t(beta1)
dim(c1) = c(M*n1)
c2 = t(beta2)
dim(c2) = c(M*n1)
opt = rep(0,M+n1*n1+1)
swap = FALSE
fopts = rep(0,5)
comtime = rep(0,5)
out = .Call('opt_reparam', PACKAGE = 'fdasrvf', c1,c2,M,n1,0.0,TRUE,
rotated,isclosed,skipm,auto,opt,swap,fopts,comtime)
tmp = length(out$opt)
gam0 = out$opt[1:(tmp-5)]
R = matrix(out$opt[(tmp-4):(tmp-1)],nrow=2)
if (out$swap){
gam0 = invertGamma(gam0)
R = t(R)
}
} else if (method=="RBFGS") {
stop("Not implemented in CRAN version: please download and install from Github (https://github.com/jdtuck/fdasrvf_R)")
c1 = t(beta1)
dim(c1) = c(M*n1)
c2 = t(beta2)
dim(c2) = c(M*n1)
opt = rep(0,M+n1*n1+1)
swap = FALSE
fopts = rep(0,5)
comtime = rep(0,5)
out = .Call('opt_reparam', PACKAGE = 'fdasrvf', c1,c2,M,n1,w,FALSE,
rotated,isclosed,skipm,auto,opt,swap,fopts,comtime)
if (out$fopts[1] == 1000){
out = .Call('opt_reparam', PACKAGE = 'fdasrvf', c1,c2,M,n1,0.0,TRUE,
rotated,isclosed,skipm,auto,opt,swap,fopts,comtime)
}
tmp = length(out$opt)
gam0 = out$opt[1:(tmp-5)]
R = matrix(out$opt[(tmp-4):(tmp-1)],nrow=2)
if (out$swap){
gam0 = invertGamma(gam0);
R = t(R)
}
} else {
stop("Invalid method chosen")
}
gam = (gam0-gam0[1])/(gam0[length(gam0)]-gam0[1]) # slight change on scale
return(list(gam=gam,R=R,tau=tau))
}
|
/fuzzedpackages/fdasrvf/R/reparam_curve.R
|
no_license
|
akhikolla/testpackages
|
R
| false | false | 4,660 |
r
|
#' Align two curves
#'
#' This function aligns two SRVF functions using Dynamic Programming
#'
#' @param beta1 array defining curve 1
#' @param beta2 array defining curve 1
#' @param lambda controls amount of warping (default = 0)
#' @param method controls which optimization method (default="DP") options are
#' Dynamic Programming ("DP"), Coordinate Descent ("DP2"), Riemannian BFGS
#' ("RBFGS")
#' @param w controls LRBFGS (default = 0.01)
#' @param rotated boolean if rotation is desired
#' @param isclosed boolean if curve is closed
#' @param mode Open ("O") or Closed ("C") curves
#' @return return a List containing \item{gam}{warping function}
#' \item{R}{rotation matrix}
#' \item{tau}{seed point}
#' @keywords srvf alignment
#' @references Srivastava, A., Klassen, E., Joshi, S., Jermyn, I., (2011). Shape analysis of elastic curves in euclidean spaces. Pattern Analysis and Machine Intelligence, IEEE Transactions on 33 (7), 1415-1428.
#' @export
#' @examples
#' data("mpeg7")
#' gam = reparam_curve(beta[,,1,1],beta[,,1,5])$gam
reparam_curve <- function(beta1,beta2,lambda=0,method="DP",w=0.01,rotated=T,
isclosed=F, mode="O"){
n1 = nrow(beta2)
M = ncol(beta2)
timet = seq(0,1,length.out=M)
skipm = 4
auto = 2
tau = 0
if (method=="DPo"){
# Optimize over SO(n) x Gamma
q1 = curve_to_q(beta1)
# Optimize over SO(n)
if (rotated){
out = find_rotation_seed_coord(beta1, beta2, mode)
beta2 = out$beta2
R = out$O_hat
tau = out$tau
} else{
R = diag(n1)
tau = 0
}
q2 = curve_to_q(beta2)
# Optimize over Gamma
q1i = q1
dim(q1i) = c(M*n1)
q2i = q2
dim(q2i) = c(M*n1)
G = rep(0,M)
T1 = rep(0,M)
size = 0
ret = .Call('DPQ2', PACKAGE = 'fdasrvf', q1i, timet, q2i, timet, n1, M, M, timet, timet, M, M, G, T1, size, lambda);
G = ret$G[1:ret$size]
Tf = ret$T[1:ret$size]
gam0 = approx(Tf,G,xout=timet)$y
} else if (method=="DP") {
# Optimize over SO(n) x Gamma
q1 = curve_to_q(beta1)
# Optimize over SO(n)
if (rotated){
out = find_rotation_seed_coord(beta1, beta2);
beta2 = out$beta2
R = out$O_hat
tau = out$tau
} else{
R = diag(n1)
tau = 0
}
q2 = curve_to_q(beta2)
# Optimize over Gamma
q1 = q1/sqrt(innerprod_q2(q1, q1))
q2 = q2/sqrt(innerprod_q2(q2, q2))
q1i = q1
dim(q1i) = c(M*n1)
q2i = q2
dim(q2i) = c(M*n1)
gam0 = .Call('DPQ', PACKAGE = 'fdasrvf', q1i, q2i, n1, M, lambda, 0, rep(0,M))
} else if (method=="DP2") {
stop("Not implemented in CRAN version: please download and install from Github (https://github.com/jdtuck/fdasrvf_R)")
c1 = t(beta1)
dim(c1) = c(M*n1)
c2 = t(beta2)
dim(c2) = c(M*n1)
opt = rep(0,M+n1*n1+1)
swap = FALSE
fopts = rep(0,5)
comtime = rep(0,5)
out = .Call('opt_reparam', PACKAGE = 'fdasrvf', c1,c2,M,n1,0.0,TRUE,
rotated,isclosed,skipm,auto,opt,swap,fopts,comtime)
tmp = length(out$opt)
gam0 = out$opt[1:(tmp-5)]
R = matrix(out$opt[(tmp-4):(tmp-1)],nrow=2)
if (out$swap){
gam0 = invertGamma(gam0)
R = t(R)
}
} else if (method=="RBFGS") {
stop("Not implemented in CRAN version: please download and install from Github (https://github.com/jdtuck/fdasrvf_R)")
c1 = t(beta1)
dim(c1) = c(M*n1)
c2 = t(beta2)
dim(c2) = c(M*n1)
opt = rep(0,M+n1*n1+1)
swap = FALSE
fopts = rep(0,5)
comtime = rep(0,5)
out = .Call('opt_reparam', PACKAGE = 'fdasrvf', c1,c2,M,n1,w,FALSE,
rotated,isclosed,skipm,auto,opt,swap,fopts,comtime)
if (out$fopts[1] == 1000){
out = .Call('opt_reparam', PACKAGE = 'fdasrvf', c1,c2,M,n1,0.0,TRUE,
rotated,isclosed,skipm,auto,opt,swap,fopts,comtime)
}
tmp = length(out$opt)
gam0 = out$opt[1:(tmp-5)]
R = matrix(out$opt[(tmp-4):(tmp-1)],nrow=2)
if (out$swap){
gam0 = invertGamma(gam0);
R = t(R)
}
} else {
stop("Invalid method chosen")
}
gam = (gam0-gam0[1])/(gam0[length(gam0)]-gam0[1]) # slight change on scale
return(list(gam=gam,R=R,tau=tau))
}
|
## ############################################################################
##
## DISCLAIMER:
## This script has been developed for research purposes only.
## The script is provided without any warranty of any kind, either express or
## implied. The entire risk arising out of the use or performance of the sample
## script and documentation remains with you.
## In no event shall its author, or anyone else involved in the
## creation, production, or delivery of the script be liable for any damages
## whatsoever (including, without limitation, damages for loss of business
## profits, business interruption, loss of business information, or other
## pecuniary loss) arising out of the use of or inability to use the sample
## scripts or documentation, even if the author has been advised of the
## possibility of such damages.
##
## ############################################################################
##
## DESCRIPTION
## Simulates outbreaks and analyses them using EARS-C3
##
##
## Written by: Angela Noufaily and Felipe J Colón-González
## For any problems with this code, please contact f.colon@uea.ac.uk
##
## ############################################################################
rm(list=ls(all=TRUE))
# FUNCTIONS THAT PRODUCE THE DATA
# DEFINING FUNCTION h
require(data.table)
require(dplyr)
require(tidyr)
require(surveillance)
require(lubridate)
require(zoo)
#==============
# 5-day systems
#==============
h1=function(N,k,k2,alpha,beta,gama1,gama2,gama3,gama4,shift,shift2){
t=1:N
if(k==0 & k2==0){h1=alpha+beta*t}
else{
if(k==0)
{
l=1:k2
h1=rep(0,N)
for(i in 1:N){
h1[i]=alpha+beta*(t[i]+shift)+sum(gama3*cos((2*pi*l*(t[i]+shift))/5)+gama4*sin((2*pi*l*(t[i]+shift))/5))
}
}
else{
j=1:k
l=1:k2
h1=rep(0,N)
for(i in 1:N){
h1[i]=alpha+beta*(t[i]+shift)+sum(gama1*cos((2*pi*j*(t[i]+shift))/(52*5))+gama2*sin((2*pi*j*(t[i]+shift2))/(52*5)))+sum(gama3*cos((2*pi*l*(t[i]+shift))/5)+gama4*sin((2*pi*l*(t[i]+shift))/5))
}
}
}
h1
}
negbinNoise1=function(N,k,k2,alpha,beta,gama1,gama2,gama3,gama4,phi,shift,shift2){
mu <- exp(h1(N,k,k2,alpha,beta,gama1,gama2,gama3,gama4,shift,shift2))
if(phi==1){yi <- rpois(N,mu)}
else{
prob <- 1/phi
size <- mu/(phi-1)
yi <- rnbinom(N,size=size,prob=prob)
}
yi
}
outbreak5=function(currentday,weeklength,wtime,yi,interval,k,k2,alpha,beta,gama1,gama2,gama3,gama4,shift,shift2,phi,numoutbk,peakoutbk,meanlog,sdlog){
# theta, beta, gama1 and gama2 are the parameters of the equation for mu in Section 3.1
N=length(yi)
t=1:N
mu <- exp(h1(N,k,k2,alpha,beta,gama1,gama2,gama3,gama4,shift,shift2))
s=sqrt(mu*phi)
#wtime = (currentday-49*5+1):currentday # current outbreaks
# GENERATING OUTBREAKS
# STARTING TIMES OF OUTBREAKS
startoutbk <- sample(wtime, numoutbk, replace = FALSE)
# OUTBREAK SIZE OF CASES
sizeoutbk=rep(0,numoutbk)
for(i in 1:numoutbk){
set.seed(i)
soutbk=1
sou=1
while(soutbk<2){
set.seed(sou)
soutbk=rpois(1,s[startoutbk[i]]*peakoutbk)
sou=sou+1
}
sizeoutbk[i]=soutbk
}
# DISTRIBUTE THESE CASES OVER TIME USING LOGNORMAL
outbreak=rep(0,2*N)
for( j in 1:numoutbk){
set.seed(j)
outbk <-rlnorm(sizeoutbk[j], meanlog = meanlog, sdlog = sdlog)
#outbk <-rnorm(sizeoutbk[j], mean = meanlog2, sd = sdlog)
#h<- hist(outbk,breaks=seq(0,ceiling(max(outbk)),1),plot=FALSE)
h<- hist(outbk,breaks=seq(0,ceiling(max(outbk)),interval),plot=FALSE)
cases <- h$counts
weight=rep(0,length(cases))
duration<-startoutbk:(startoutbk+length(cases)-1)
dayofweek<-duration%%5 # 0 is friday; 1 is monday; 2 is tuesday etc.
for(i in 1:length(cases)){
if(dayofweek[i]==0){weight[i]=1.1}
if(dayofweek[i]==1){weight[i]=1.5}
if(dayofweek[i]==2){weight[i]=1.1}
if(dayofweek[i]==3){weight[i]=1}
if(dayofweek[i]==4){weight[i]=1}
}
cases2 <- cases*weight
for (l in 1:(length(cases2))){
outbreak[startoutbk[j]+(l-1)]= cases2[l]+outbreak[startoutbk[j]+(l-1)]
}# l loop
}# j loop
#for(v in 1:(currentday-49*5)){if(outbreak[v]>0){outbreak[v]=0}}
for(v in currentday:(currentday+100)){if(outbreak[v]>0){outbreak[v]=0}}
outbreak=outbreak[1:N]
# ADD NOISE AND OUTBREAKS
yitot=yi+outbreak
result=list(yitot=yitot,outbreak=outbreak,startoutbk=startoutbk,sizeoutbk=sizeoutbk,sd=s,mean=mu)
#return(result)
}
#==============
# 7-day systems
#==============
h2=function(N,k,k2,alpha,beta,gama1,gama2,gama3,gama4,shift){
t=1:N
if(k==0 & k2==0){h2=alpha+beta*t}
else{
if(k==0)
{
l=1:k2
h2=rep(0,N)
for(i in 1:N){
h2[i]=alpha+beta*(t[i]+shift)+sum(gama3*cos((2*pi*l*(t[i]+shift))/7)+gama4*sin((2*pi*l*(t[i]+shift))/7))
}
}
else{
j=1:k
l=1:k2
h2=rep(0,N)
for(i in 1:N){
h2[i]=alpha+beta*(t[i]+shift)+sum(gama1*cos((2*pi*j*(t[i]+shift))/(52*7))+gama2*sin((2*pi*j*(t[i]+shift))/(52*7)))+sum(gama3*cos((2*pi*l*(t[i]+shift))/7)+gama4*sin((2*pi*l*(t[i]+shift))/7))
}
}
}
h2
}
negbinNoise2=function(N,k,k2,alpha,beta,gama1,gama2,gama3,gama4,phi,shift){
mu <- exp(h2(N,k,k2,alpha,beta,gama1,gama2,gama3,gama4,shift))
if(phi==1){yi <- rpois(N,mu)}
else{
prob <- 1/phi
size <- mu/(phi-1)
yi <- rnbinom(N,size=size,prob=prob)
}
yi
}
outbreak7=function(currentday,weeklength,wtime,yi,interval,k,k2,alpha,beta,gama1,gama2,gama3,gama4,shift,phi,numoutbk,peakoutbk,meanlog,sdlog){
# theta, beta, gama1 and gama2 are the parameters of the equation for mu in Section 3.1
N=length(yi)
t=1:N
mu <- exp(h2(N,k,k2,alpha,beta,gama1,gama2,gama3,gama4,shift))
s=sqrt(mu*phi)
#wtime = (currentday-49*7+1):currentday # current outbreaks
# wtime = 350*1:7 # current outbreaks
# GENERATING OUTBREAKS
# STARTING TIMES OF OUTBREAKS
startoutbk <- sample(wtime, numoutbk, replace = FALSE)
# OUTBREAK SIZE OF CASES
sizeoutbk=rep(0,numoutbk)
for(i in 1:numoutbk){
set.seed(i)
soutbk=1
sou=1
while(soutbk<2){
set.seed(sou)
soutbk=rpois(1,s[startoutbk[i]]*peakoutbk)
sou=sou+1
}
sizeoutbk[i]=soutbk
}
# DISTRIBUTE THESE CASES OVER TIME USING LOGNORMAL
outbreak=rep(0,2*N)
for( j in 1:numoutbk){
set.seed(j)
outbk <-rlnorm(sizeoutbk[j], meanlog = meanlog, sdlog = sdlog)
#outbk <-rnorm(sizeoutbk[j], mean = meanlog2, sd = sdlog)
#h<- hist(outbk,breaks=seq(0,ceiling(max(outbk)),1),plot=FALSE)
h<- hist(outbk,breaks=seq(0,ceiling(max(outbk)),interval),plot=FALSE)
cases <- h$counts
weight=rep(0,length(cases))
duration<-startoutbk:(startoutbk+length(cases)-1)
dayofweek<-duration%%7 # 0 is sunday; 1 is monday; 2 is tuesday etc.
for(i in 1:length(cases)){
if(dayofweek[i]==0){weight[i]=2}
if(dayofweek[i]==1){weight[i]=1}
if(dayofweek[i]==2){weight[i]=1}
if(dayofweek[i]==3){weight[i]=1}
if(dayofweek[i]==4){weight[i]=1}
if(dayofweek[i]==5){weight[i]=1}
if(dayofweek[i]==6){weight[i]=2}
}
cases2 <- cases*weight
for (l in 1:(length(cases2))){
outbreak[startoutbk[j]+(l-1)]= cases2[l]+outbreak[startoutbk[j]+(l-1)]
}# l loop
}# j loop
#for(v in (currentday-49*7):currentday){if(outbreak[v]>0){outbreak[v]=0}}
for(v in currentday:(currentday+100)){if(outbreak[v]>0){outbreak[v]=0}}
outbreak=outbreak[1:N]
# ADD NOISE AND OUTBREAKS
yitot=yi+outbreak
result=list(yitot=yitot,outbreak=outbreak,startoutbk=startoutbk,sizeoutbk=sizeoutbk,sd=s,mean=mu)
#return(result)
}
#==========================
# Specify the bank holidays
#==========================
myDir <- "/local/zck07apu/Documents/GitLab/rammie_comparison/scripts/C3/10x"
years=7
bankholidays=read.csv(file.path(myDir, "Bankholidays.csv"))
#fix(bankholidays)
bankhols7=bankholidays$bankhol
bankhols7=as.numeric(bankhols7)
length(bankhols7)
#fix(bankhols7)
bankhols5=bankhols7[-seq(6,length(bankhols7),7)]
bankhols5=bankhols5[-seq(6,length(bankhols5),6)]
bankhols5=as.numeric(bankhols5)
length(bankhols5)
#fix(bankhols5)
#=======================
# Define the data frames
#=======================
nsim=100
simulateddata1=data.frame(array(rep(0,nsim*52*7*years),dim=c(52*7*years,nsim)))
simulateddata2=data.frame(array(rep(0,nsim*52*7*years),dim=c(52*7*years,nsim)))
simulateddata3=data.frame(array(rep(0,nsim*52*7*years),dim=c(52*7*years,nsim)))
simulateddata4=data.frame(array(rep(0,nsim*52*7*years),dim=c(52*7*years,nsim)))
simulateddata5=data.frame(array(rep(0,nsim*52*7*years),dim=c(52*7*years,nsim)))
simulateddata6=data.frame(array(rep(0,nsim*52*7*years),dim=c(52*7*years,nsim)))
simulateddata7=data.frame(array(rep(0,nsim*52*7*years),dim=c(52*7*years,nsim)))
simulateddata8=data.frame(array(rep(0,nsim*52*7*years),dim=c(52*7*years,nsim)))
simulateddata9=data.frame(array(rep(0,nsim*52*7*years),dim=c(52*7*years,nsim)))
simulateddata10=data.frame(array(rep(0,nsim*52*7*years),dim=c(52*7*years,nsim)))
simulateddata11=data.frame(array(rep(0,nsim*52*7*years),dim=c(52*7*years,nsim)))
simulateddata12=data.frame(array(rep(0,nsim*52*7*years),dim=c(52*7*years,nsim)))
simulateddata13=data.frame(array(rep(0,nsim*52*7*years),dim=c(52*7*years,nsim)))
simulateddata14=data.frame(array(rep(0,nsim*52*7*years),dim=c(52*7*years,nsim)))
simulateddata15=data.frame(array(rep(0,nsim*52*7*years),dim=c(52*7*years,nsim)))
simulateddata16=data.frame(array(rep(0,nsim*52*7*years),dim=c(52*7*years,nsim)))
simulateddata17=data.frame(array(rep(0,nsim*52*7*years),dim=c(52*7*years,nsim)))
simulatedtotals1=data.frame(array(rep(0,nsim*52*7*years),dim=c(52*7*years,nsim)))
simulatedtotals2=data.frame(array(rep(0,nsim*52*7*years),dim=c(52*7*years,nsim)))
simulatedtotals3=data.frame(array(rep(0,nsim*52*7*years),dim=c(52*7*years,nsim)))
simulatedtotals4=data.frame(array(rep(0,nsim*52*7*years),dim=c(52*7*years,nsim)))
simulatedtotals5=data.frame(array(rep(0,nsim*52*7*years),dim=c(52*7*years,nsim)))
simulatedtotals6=data.frame(array(rep(0,nsim*52*7*years),dim=c(52*7*years,nsim)))
simulatedtotals7=data.frame(array(rep(0,nsim*52*7*years),dim=c(52*7*years,nsim)))
simulatedtotals8=data.frame(array(rep(0,nsim*52*7*years),dim=c(52*7*years,nsim)))
simulatedtotals9=data.frame(array(rep(0,nsim*52*7*years),dim=c(52*7*years,nsim)))
simulatedtotals10=data.frame(array(rep(0,nsim*52*7*years),dim=c(52*7*years,nsim)))
simulatedtotals11=data.frame(array(rep(0,nsim*52*7*years),dim=c(52*7*years,nsim)))
simulatedtotals12=data.frame(array(rep(0,nsim*52*7*years),dim=c(52*7*years,nsim)))
simulatedtotals13=data.frame(array(rep(0,nsim*52*7*years),dim=c(52*7*years,nsim)))
simulatedtotals14=data.frame(array(rep(0,nsim*52*7*years),dim=c(52*7*years,nsim)))
simulatedtotals15=data.frame(array(rep(0,nsim*52*7*years),dim=c(52*7*years,nsim)))
simulatedtotals16=data.frame(array(rep(0,nsim*52*7*years),dim=c(52*7*years,nsim)))
simulatedtotals17=data.frame(array(rep(0,nsim*52*7*years),dim=c(52*7*years,nsim)))
simulatedoutbreak1=data.frame(array(rep(0,nsim*52*7*years),dim=c(52*7*years,nsim)))
simulatedoutbreak2=data.frame(array(rep(0,nsim*52*7*years),dim=c(52*7*years,nsim)))
simulatedoutbreak3=data.frame(array(rep(0,nsim*52*7*years),dim=c(52*7*years,nsim)))
simulatedoutbreak4=data.frame(array(rep(0,nsim*52*7*years),dim=c(52*7*years,nsim)))
simulatedoutbreak5=data.frame(array(rep(0,nsim*52*7*years),dim=c(52*7*years,nsim)))
simulatedoutbreak6=data.frame(array(rep(0,nsim*52*7*years),dim=c(52*7*years,nsim)))
simulatedoutbreak7=data.frame(array(rep(0,nsim*52*7*years),dim=c(52*7*years,nsim)))
simulatedoutbreak8=data.frame(array(rep(0,nsim*52*7*years),dim=c(52*7*years,nsim)))
simulatedoutbreak9=data.frame(array(rep(0,nsim*52*7*years),dim=c(52*7*years,nsim)))
simulatedoutbreak10=data.frame(array(rep(0,nsim*52*7*years),dim=c(52*7*years,nsim)))
simulatedoutbreak11=data.frame(array(rep(0,nsim*52*7*years),dim=c(52*7*years,nsim)))
simulatedoutbreak12=data.frame(array(rep(0,nsim*52*7*years),dim=c(52*7*years,nsim)))
simulatedoutbreak13=data.frame(array(rep(0,nsim*52*7*years),dim=c(52*7*years,nsim)))
simulatedoutbreak14=data.frame(array(rep(0,nsim*52*7*years),dim=c(52*7*years,nsim)))
simulatedoutbreak15=data.frame(array(rep(0,nsim*52*7*years),dim=c(52*7*years,nsim)))
simulatedoutbreak16=data.frame(array(rep(0,nsim*52*7*years),dim=c(52*7*years,nsim)))
simulatedoutbreak17=data.frame(array(rep(0,nsim*52*7*years),dim=c(52*7*years,nsim)))
simulatedzseasoutbreak6=data.frame(array(rep(0,nsim*52*7*years),dim=c(52*7*years,nsim)))
simulatedzseasoutbreak7=data.frame(array(rep(0,nsim*52*7*years),dim=c(52*7*years,nsim)))
simulatedzseasoutbreak16=data.frame(array(rep(0,nsim*52*7*years),dim=c(52*7*years,nsim)))
#################################
#SIMULATE SYNDROMES AND OUTBREAKS
#################################
#=====================
# 5-day week syndromes
#=====================
days5=5
N=52*days5*years
#sigid6
for(i in 1:nsim){
set.seed(i)
yt=negbinNoise1(N=N,k=1,k2=1,alpha=6,beta=0,gama1=0.3,gama2=2,gama3=0.3,gama4=0.5,phi=1.5,shift=-50,shift2=-50)/10
#mu=exp(h1(N=N,k=1,k2=1,alpha=6,beta=0,gama1=0.3,gama2=2,gama3=0.3,gama4=0.5,shift=-50,shift2=-50))
out1=rep(0,N)
for(j in 1:years){
set.seed(j+years*i)
out=outbreak5(currentday=days5*52*years,weeklength=52*days5*years,wtime=((1+(j-1)*days5*52):(20+(j-1)*days5*52)),yi=yt,interval=0.02,k=1,
k2=1,alpha=6,beta=0,gama1=0.3,gama2=2,gama3=0.3,gama4=0.5,phi=1.5,shift=-50,shift2=-50,numoutbk=1,peakoutbk=3*days5*80,meanlog=0,sdlog=0.5)
out1=out1+out$outbreak
}
set.seed(i)
out2=outbreak5(currentday=days5*52*years,weeklength=52*days5*years,wtime=(length(yt)-49*days5+1):length(yt),yi=yt,interval=0.25,k=1,k2=1,alpha=6,beta=0,gama1=0.3,
gama2=2,gama3=0.3,gama4=0.5,phi=1.5,shift=-50,shift2=-50,numoutbk=1,peakoutbk=10*days5,meanlog=0,sdlog=0.5)
zoutbreak=out2$outbreak
zseasoutbreak=out2$outbreak +out1
zt=yt +out1
zitot=yt + out2$outbreak +out1
#zitot[(bankhols5==1)]=0
#zitot[(bankhols5==1)+1]=1.5*zitot[i+1]
for(b in 1:length(zitot)){
if(bankhols5[b]==1){
zitot[b]=0
zitot[b+1]=1.5*zitot[b+1]
}
}
zeros=rep(0,2)
weekend=seq(days5,days5*years*52,days5)
#weekend=seq(0,days5*years*52-1,days5)
for(s in 1:length(weekend)){
yt=append(yt,zeros,after=2*(s-1)+weekend[s])
zt=append(zt,zeros,after=2*(s-1)+weekend[s])
zitot=append(zitot,zeros,after=2*(s-1)+weekend[s])
zoutbreak=append(zoutbreak,zeros,after=2*(s-1)+weekend[s])
zseasoutbreak=append(zseasoutbreak,zeros,after=2*(s-1)+weekend[s])
}
simulateddata6[,i]=round(zt)
simulatedtotals6[,i]=round(zitot)
simulatedoutbreak6[,i]=round(zoutbreak)
simulatedzseasoutbreak6[,i]=round(zseasoutbreak)
}
#----------------------------------------------------
# Plot the datasets and outbreaks using the following
#----------------------------------------------------
#plot(1:N,yt,typ='l')
#plot(1:(52*years*7),zt,typ='l',xlim=c(1,364))
#plot(1:(52*years*7),zitot,typ='l',xlim=c(1,364))
#plot(1:(52*years*7),zitot,typ='l',xlim=c(365,728))
#plot(1:(52*years*7),zitot,typ='l',xlim=c(729,1092))
#plot(1:(52*years*7),zitot,typ='l',xlim=c(1093,1456))
#plot(1:(52*years*7),zitot,typ='l',xlim=c(1457,1820))
#plot(1:(52*years*7),zitot,typ='l',xlim=c(1821,2184))
#plot(1:(52*years*7),zitot,typ='l',xlim=c(2185,2548))
#lines(1:(52*years*7),zoutbreak,col='green')
plot(1:(52*years*7),simulatedtotals6[,4],typ='l',xlim=c(1,7*364))
lines(1:(52*years*7),simulatedzseasoutbreak6[,4],col='green')
lines(1:(52*years*7),simulatedoutbreak6[,4],col='red')
plot(1:(52*years*7),zitot,typ='l',xlim=c(1,7*364))
lines(1:(52*years*7),yt,col='blue')
lines(1:(52*years*7),zseasoutbreak,col='green')
lines(1:(52*years*7),zoutbreak,col='red')
plot(1:(52*years*7),simulatedzseasoutbreak6[,4],col='green',typ='l')
#sigid7
for(i in 1:nsim){
set.seed(i)
yt=negbinNoise1(N=N,k=1,k2=1,alpha=1,beta=0,gama1=0.1,gama2=2,gama3=0.05,gama4=0.05,phi=1,shift=-50,shift2=-50)
#mu=exp(h1(N=N,k=1,k2=1,alpha=1.5,beta=0,gama1=0.1,gama2=2,gama3=0.1,gama4=0.1,shift=-50,shift2=-50))
out1=rep(0,N)
for(j in 1:years){
set.seed(j+years*i)
out=outbreak5(currentday=days5*52*years,weeklength=52*days5*years,wtime=((1+(j-1)*days5*52):(20+(j-1)*days5*52)),yi=yt,interval=0.02,k=1,k2=1,alpha=1,beta=0,gama1=0.1,
gama2=2,gama3=0.05,gama4=0.05,phi=1,shift=-50,shift2=-50,numoutbk=1,peakoutbk=3*days5*50,meanlog=0,sdlog=0.5)
out1=out1+out$outbreak
}
set.seed(i)
out2=outbreak5(currentday=days5*52*years,weeklength=52*days5*years,wtime=(length(yt)-49*days5+1):length(yt),yi=yt,interval=0.25,k=1,k2=1,alpha=1,beta=0,gama1=0.1,
gama2=2,gama3=0.05,gama4=0.05,phi=1,shift=-50,shift2=-50,numoutbk=1,peakoutbk=10*days5,meanlog=0,sdlog=0.5)
zoutbreak=out2$outbreak
zseasoutbreak=out2$outbreak+out1
zt=yt +out1
zitot=yt + out2$outbreak +out1
for(b in 1:length(zitot)){
if(bankhols5[b]==1){
zitot[b]=0
zitot[b+1]=1.5*zitot[b+1]
}
}
zeros=rep(0,2)
weekend=seq(days5,days5*years*52,days5)
#weekend=seq(0,days5*years*52-1,days5)
for(s in 1:length(weekend)){
yt=append(yt,zeros,after=2*(s-1)+weekend[s])
zt=append(zt,zeros,after=2*(s-1)+weekend[s])
zitot=append(zitot,zeros,after=2*(s-1)+weekend[s])
zoutbreak=append(zoutbreak,zeros,after=2*(s-1)+weekend[s])
zseasoutbreak=append(zseasoutbreak,zeros,after=2*(s-1)+weekend[s])
}
simulateddata7[,i]=round(zt)
simulatedtotals7[,i]=round(zitot)
simulatedoutbreak7[,i]=round(zoutbreak)
simulatedzseasoutbreak7[,i]=round(zseasoutbreak)
}
plot(1:(52*years*7),simulatedtotals7[,7],typ='l',xlim=c(1,7*364))
lines(1:(52*years*7),simulatedzseasoutbreak7[,7],col='green')
lines(1:(52*years*7),simulatedoutbreak7[,7],col='red')
plot(1:(52*years*7),zitot,typ='l',xlim=c(1,7*364))
lines(1:(52*years*7),yt,col='blue')
lines(1:(52*years*7),zseasoutbreak,col='green')
lines(1:(52*years*7),zoutbreak,col='red')
plot(1:(52*years*7),simulatedzseasoutbreak7[,4],col='green',typ='l')
#sigid8
for(i in 1:nsim){
set.seed(i)
yt=negbinNoise1(N=N,k=0,k2=1,alpha=6,beta=0.0001,gama1=0,gama2=0,gama3=0.6,gama4=0.9,phi=1.5,shift=0,shift2=0)/10
#mu=exp(h1(N=N,k=0,k2=1,alpha=6,beta=0,gama1=0,gama2=0,gama3=0.6,gama4=0.9,shift=0,shift2=0))
set.seed(i)
out2=outbreak5(currentday=days5*52*years,weeklength=52*days5*years,wtime=(length(yt)-49*days5+1):length(yt),yi=yt,interval=0.25,k=0,k2=1,alpha=6,beta=0,gama1=0,
gama2=0,gama3=0.6,gama4=0.9,phi=1.5,shift=0,shift2=0,numoutbk=1,peakoutbk=10*days5,meanlog=0,sdlog=0.5)
zoutbreak=out2$outbreak
zt=yt#+out1
zitot=yt + out2$outbreak #+out1
#zitot[(bankhols5==1)]=0
#zitot[(bankhols5==1)+1]=1.5*zitot[i+1]
for(b in 1:length(zitot)){
if(bankhols5[b]==1){
zitot[b]=0
zitot[b+1]=1.5*zitot[b+1]
}
}
zeros=rep(0,2)
weekend=seq(days5,days5*years*52,days5)
#weekend=seq(0,days5*years*52-1,days5)
for(s in 1:length(weekend)){
zt=append(zt,zeros,after=2*(s-1)+weekend[s])
zitot=append(zitot,zeros,after=2*(s-1)+weekend[s])
zoutbreak=append(zoutbreak,zeros,after=2*(s-1)+weekend[s])
}
simulateddata8[,i]=round(zt)
simulatedtotals8[,i]=round(zitot)
simulatedoutbreak8[,i]=round(zoutbreak)
}
plot(1:(52*years*7),simulateddata8[,1],typ='l',xlim=c(2185,2548),col='green')
lines(1:(52*years*7),simulateddata8[,1]+simulatedoutbreak8[,1])
#sigid9
for(i in 1:nsim){
set.seed(i)
yt=negbinNoise1(N=N,k=1,k2=1,alpha=3,beta=0,gama1=1.5,gama2=0.1,gama3=0.2,gama4=0.3,phi=1,shift=-150,shift2=-150)
mu=exp(h1(N=N,k=1,k2=1,alpha=3,beta=0,gama1=1.5,gama2=0.1,gama3=0.6,gama4=0.8,shift=-150,shift2=-150))
set.seed(i)
out2=outbreak5(currentday=days5*52*years,weeklength=52*days5*years,wtime=(length(yt)-49*days5+1):length(yt),interval=0.25,yi=yt,k=1,k2=1,alpha=3,beta=0,gama1=1.5,
gama2=0.1,gama3=0.2,gama4=0.3,phi=1,shift=-150,shift2=-150,numoutbk=1,peakoutbk=10*days5,meanlog=0,sdlog=0.5)
zoutbreak=out2$outbreak
zt=yt#+out1
zitot=yt + out2$outbreak #+out1
#zitot[(bankhols5==1)]=0
#zitot[(bankhols5==1)+1]=1.5*zitot[i+1]
for(b in 1:length(zitot)){
if(bankhols5[b]==1){
zitot[b]=0
zitot[b+1]=1.5*zitot[b+1]
}
}
zeros=rep(0,2)
weekend=seq(days5,days5*years*52,days5)
#weekend=seq(0,days5*years*52-1,days5)
for(s in 1:length(weekend)){
zt=append(zt,zeros,after=2*(s-1)+weekend[s])
zitot=append(zitot,zeros,after=2*(s-1)+weekend[s])
zoutbreak=append(zoutbreak,zeros,after=2*(s-1)+weekend[s])
}
simulateddata9[,i]=round(zt)
simulatedtotals9[,i]=round(zitot)
simulatedoutbreak9[,i]=round(zoutbreak)
}
plot(1:(52*years*7),simulateddata9[,1],typ='l',xlim=c(2185,2548),col='green')
lines(1:(52*years*7),simulateddata9[,1]+simulatedoutbreak9[,1])
#sigid10
for(i in 1:nsim){
set.seed(i)
yt=negbinNoise1(N=N,k=1,k2=1,alpha=3,beta=0,gama1=0.2,gama2=0.1,gama3=0.05,gama4=0.15,phi=1,shift=-200,shift2=-200)
#mu=exp(h1(N=N,k=1,k2=1,alpha=3,beta=0,gama1=0.2,gama2=0.1,gama3=0.05,gama4=0.15,shift=-200,shift2=-200))
set.seed(i)
out2=outbreak5(currentday=days5*52*years,weeklength=52*days5*years,wtime=(length(yt)-49*days5+1):length(yt),yi=yt,interval=0.25,k=1,k2=1,alpha=3,beta=0,gama1=0.2,
gama2=0.1,gama3=0.05,gama4=0.15,phi=1,shift=-200,shift2=-200,numoutbk=1,peakoutbk=10*days5,meanlog=0,sdlog=0.5)
zoutbreak=out2$outbreak
zt=yt#+out1
zitot=yt + out2$outbreak #+out1
#zitot[(bankhols5==1)]=0
#zitot[(bankhols5==1)+1]=1.5*zitot[i+1]
for(b in 1:length(zitot)){
if(bankhols5[b]==1){
zitot[b]=0
zitot[b+1]=1.5*zitot[b+1]
}
}
zeros=rep(0,2)
weekend=seq(days5,days5*years*52,days5)
#weekend=seq(0,days5*years*52-1,days5)
for(s in 1:length(weekend)){
zt=append(zt,zeros,after=2*(s-1)+weekend[s])
zitot=append(zitot,zeros,after=2*(s-1)+weekend[s])
zoutbreak=append(zoutbreak,zeros,after=2*(s-1)+weekend[s])
}
simulateddata10[,i]=round(zt)
simulatedtotals10[,i]=round(zitot)
simulatedoutbreak10[,i]=round(zoutbreak)
}
#sigid11
for(i in 1:nsim){
set.seed(i)
yt=negbinNoise1(N=N,k=1,k2=1,alpha=5,beta=0,gama1=0.2,gama2=0.1,gama3=0.05,gama4=0.1,phi=1,shift=0,shift2=0)
mu=exp(h1(N=N,k=1,k2=1,alpha=5,beta=0,gama1=0.2,gama2=0.1,gama3=0.05,gama4=0.1,shift=0,shift2=0))
set.seed(i)
out2=outbreak5(currentday=days5*52*years,weeklength=52*days5*years,wtime=(length(yt)-49*days5+1):length(yt),interval=0.25,yi=yt,k=1,k2=1,alpha=5,beta=0,gama1=0.2,
gama2=0.1,gama3=0.05,gama4=0.1,phi=1,shift=0,shift2=0,numoutbk=1,peakoutbk=10*days5,meanlog=0,sdlog=0.5)
zoutbreak=out2$outbreak
zt=yt#+out1
zitot=yt + out2$outbreak #+out1
#zitot[(bankhols5==1)]=0
#zitot[(bankhols5==1)+1]=1.5*zitot[i+1]
for(b in 1:length(zitot)){
if(bankhols5[b]==1){
zitot[b]=0
zitot[b+1]=1.5*zitot[b+1]
}
}
zeros=rep(0,2)
weekend=seq(days5,days5*years*52,days5)
#weekend=seq(0,days5*years*52-1,days5)
for(s in 1:length(weekend)){
zt=append(zt,zeros,after=2*(s-1)+weekend[s])
zitot=append(zitot,zeros,after=2*(s-1)+weekend[s])
zoutbreak=append(zoutbreak,zeros,after=2*(s-1)+weekend[s])
}
simulateddata11[,i]=round(zt)
simulatedtotals11[,i]=round(zitot)
simulatedoutbreak11[,i]=round(zoutbreak)
}
#sigid12
for(i in 1:nsim){
set.seed(i)
yt=negbinNoise1(N=N,k=2,k2=1,alpha=0.5,beta=0,gama1=0.4,gama2=0,gama3=0.05,gama4=0.15,phi=1,shift=0,shift2=0)
#mu=exp(h1(N=N,k=2,k2=1,alpha=0.5,beta=0,gama1=0.4,gama2=0,gama3=0.05,gama4=0.15,shift=0,shift2=0))
set.seed(i)
out2=outbreak5(currentday=days5*52*years,weeklength=52*days5*years,wtime=(length(yt)-49*days5+1):length(yt),yi=yt,interval=0.25,k=2,k2=1,alpha=0.5,beta=0,gama1=0.4,
gama2=0,gama3=0.05,gama4=0.15,phi=1,shift=0,shift2=0,numoutbk=1,peakoutbk=10*days5,meanlog=0,sdlog=0.5)
zoutbreak=out2$outbreak
zt=yt#+out1
zitot=yt + out2$outbreak #+out1
#zitot[(bankhols5==1)]=0
#zitot[(bankhols5==1)+1]=1.5*zitot[i+1]
for(b in 1:length(zitot)){
if(bankhols5[b]==1){
zitot[b]=0
zitot[b+1]=1.5*zitot[b+1]
}
}
zeros=rep(0,2)
weekend=seq(days5,days5*years*52,days5)
#weekend=seq(0,days5*years*52-1,days5)
for(s in 1:length(weekend)){
zt=append(zt,zeros,after=2*(s-1)+weekend[s])
zitot=append(zitot,zeros,after=2*(s-1)+weekend[s])
zoutbreak=append(zoutbreak,zeros,after=2*(s-1)+weekend[s])
}
simulateddata12[,i]=round(zt)
simulatedtotals12[,i]=round(zitot)
simulatedoutbreak12[,i]=round(zoutbreak)
}
#sigid13
for(i in 1:nsim){
set.seed(i)
yt=negbinNoise1(N=N,k=1,k2=1,alpha=9,beta=0,gama1=0.5,gama2=0.2,gama3=0.2,gama4=0.5,phi=1,shift=0,shift2=0)/100
#mu=exp(h1(N=N,k=1,k2=1,alpha=9,beta=0,gama1=0.5,gama2=0.2,gama3=0.2,gama4=0.5,shift=0,shift2=0))
set.seed(i)
out2=outbreak5(currentday=days5*52*years,weeklength=52*days5*years,wtime=(length(yt)-49*days5+1):length(yt),yi=yt,interval=0.25,k=1,k2=1,alpha=9,beta=0,gama1=0.5,
gama2=0.2,gama3=0.2,gama4=0.5,phi=1,shift=0,shift2=0,numoutbk=1,peakoutbk=10*days5,meanlog=0,sdlog=0.5)
zoutbreak=out2$outbreak
zt=yt#+out1
zitot=yt + out2$outbreak #+out1
#zitot[(bankhols5==1)]=0
#zitot[(bankhols5==1)+1]=1.5*zitot[i+1]
for(b in 1:length(zitot)){
if(bankhols5[b]==1){
zitot[b]=0
zitot[b+1]=1.5*zitot[b+1]
}
}
zeros=rep(0,2)
weekend=seq(days5,days5*years*52,days5)
#weekend=seq(0,days5*years*52-1,days5)
for(s in 1:length(weekend)){
zt=append(zt,zeros,after=2*(s-1)+weekend[s])
zitot=append(zitot,zeros,after=2*(s-1)+weekend[s])
zoutbreak=append(zoutbreak,zeros,after=2*(s-1)+weekend[s])
}
simulateddata13[,i]=round(zt)
simulatedtotals13[,i]=round(zitot)
simulatedoutbreak13[,i]=round(zoutbreak)
}
plot(1:length(simulatedtotals13[,1]),simulatedtotals13[,1],typ='l')
plot(1:N,simulatedtotals13[,1],typ='l',xlim=c(2206,2548),col='green')
lines(1:N,simulateddata13[,1],typ='l')
#=====================
# 7-day week syndromes
#=====================
years=7
days7=7
N=52*days7*years
#sigid1
for(i in 1:nsim){
set.seed(i)
yt=negbinNoise2(N=N,k=1,k2=2,alpha=6,beta=0,gama1=0.2,gama2=0.2,gama3=0.5,gama4=0.4,phi=2,shift=29)
#mu=exp(h2(N=N,k=1,k2=2,alpha=6,beta=0,gama1=0.2,gama2=0.2,gama3=0.5,gama4=0.4,shift=29))
set.seed(i)
out2=outbreak7(currentday=N,weeklength=52*days7*years,wtime=(length(yt)-49*days7+1):length(yt),yi=yt,interval=0.25,k=1,k2=2,alpha=6,beta=0,gama1=0.2,gama2=0.2,
gama3=0.5,gama4=0.4,phi=2,shift=29,numoutbk=1,peakoutbk=10*days7,meanlog=0,sdlog=0.5)
zoutbreak=out2$outbreak
zt=yt#+out1
zitot=yt + out2$outbreak #+out1
for(b in 1:length(zitot)){
if(bankhols7[b]==1){
zitot[b]=2*zitot[b]
}
}
simulateddata1[,i]=round(zt)
simulatedtotals1[,i]=round(zitot)
simulatedoutbreak1[,i]=round(zoutbreak)
}
#sigid3
for(i in 1:nsim){
set.seed(i)
yt=negbinNoise2(N=N,k=1,k2=2,alpha=0.5,beta=0,gama1=1.5,gama2=1.4,gama3=0.5,gama4=0.4,phi=1,shift=-167)
#mu=exp(h2(N=N,k=1,k2=2,alpha=0.5,beta=0,gama1=1.5,gama2=1.4,gama3=0.5,gama4=0.4,shift=-167))
set.seed(i)
out2=outbreak7(currentday=N,weeklength=52*days7*years,wtime=(length(yt)-49*7+1):length(yt),yi=yt,interval=0.25,k=1,k2=2,alpha=0.5,beta=0,gama1=1.5,
gama2=1.4,gama3=0.5,gama4=0.4,phi=1,shift=-167,numoutbk=1,peakoutbk=10*days7,meanlog=0,sdlog=0.5)
zoutbreak=out2$outbreak
zt=yt#+out1
zitot=yt + out2$outbreak #+out1
for(b in 1:length(zitot)){
if(bankhols7[b]==1){
zitot[b]=2*zitot[b]
}
}
simulateddata3[,i]=round(zt)
simulatedtotals3[,i]=round(zitot)
simulatedoutbreak3[,i]=round(zoutbreak)
}
#sigid4
for(i in 1:nsim){
set.seed(i)
yt=negbinNoise2(N=N,k=0,k2=2,alpha=5.5,beta=0,gama1=0,gama2=0,gama3=0.3,gama4=0.25,phi=1,shift=1)
#mu=exp(h2(N=N,k=0,k2=2,alpha=5.5,beta=0,gama1=0,gama2=0,gama3=0.3,gama4=0.25,shift=1))
set.seed(i)
out2=outbreak7(currentday=N,weeklength=52*days7*12,wtime=(length(yt)-49*7+1):length(yt),yi=yt,interval=0.25,k=0,k2=2,alpha=5.5,beta=0,gama1=0,
gama2=0,gama3=0.3,gama4=0.25,phi=1,shift=1,numoutbk=1,peakoutbk=10*days7,meanlog=0,sdlog=0.5)
zoutbreak=out2$outbreak
zt=yt#+out1
zitot=yt + out2$outbreak #+out1
for(b in 1:length(zitot)){
if(bankhols7[b]==1){
zitot[b]=2*zitot[b]
}
}
simulateddata4[,i]=round(zt)
simulatedtotals4[,i]=round(zitot)
simulatedoutbreak4[,i]=round(zoutbreak)
}
#sigid5
for(i in 1:nsim){
set.seed(i)
yt=negbinNoise2(N=N,k=0,k2=2,alpha=2,beta=0,gama1=0,gama2=0,gama3=0.3,gama4=0.25,phi=1,shift=1)
#mu=exp(h2(N=N,k=0,k2=2,alpha=2,beta=0,gama1=0,gama2=0,gama3=0.3,gama4=0.25,shift=1))
set.seed(i)
out2=outbreak7(currentday=N,weeklength=52*days7*years,wtime=(length(yt)-49*days7+1):length(yt),yi=yt,interval=0.25,k=0,k2=2,alpha=2,beta=0,gama1=0,
gama2=0,gama3=0.3,gama4=0.25,phi=1,shift=1,numoutbk=1,peakoutbk=10*days7,meanlog=0,sdlog=0.5)
zoutbreak=out2$outbreak
zt=yt#+out1
zitot=yt + out2$outbreak #+out1
for(b in 1:length(zitot)){
if(bankhols7[b]==1){
zitot[b]=2*zitot[b]
}
}
simulateddata5[,i]=round(zt)
simulatedtotals5[,i]=round(zitot)
simulatedoutbreak5[,i]=round(zoutbreak)
}
#sigid14
for(i in 1:nsim){
set.seed(i)
yt=negbinNoise2(N=N,k=1,k2=2,alpha=2,beta=0.0005,gama1=0.8,gama2=0.8,gama3=0.8,gama4=0.4,phi=4,shift=57)
#mu=exp(h2(N=N,k=1,k2=2,alpha=2,beta=0,gama1=0.8,gama2=0.8,gama3=0.8,gama4=0.4,shift=57))
set.seed(i)
out2=outbreak7(currentday=N,weeklength=52*days7*years,wtime=(length(yt)-49*days7+1):length(yt),yi=yt,interval=0.25,k=1,k2=2,alpha=6,beta=0,gama1=0.2,gama2=0.2,
gama3=0.5,gama4=0.4,phi=2,shift=29,numoutbk=1,peakoutbk=10*days7,meanlog=0,sdlog=0.5)
zoutbreak=out2$outbreak
zt=yt#+out1
zitot=yt + out2$outbreak #+out1
for(b in 1:length(zitot)){
if(bankhols7[b]==1){
zitot[b]=2*zitot[b]
}
}
simulateddata14[,i]=round(zt)
simulatedtotals14[,i]=round(zitot)
simulatedoutbreak14[,i]=round(zoutbreak)
}
#sigid15
for(i in 1:nsim){
set.seed(i)
#yt=0.1*(negbinNoise2(N=N,k=4,k2=1,alpha=1.5,beta=0,gama1=0.1,gama2=0.1,gama3=1.8,gama4=0.1,phi=1,shift=-85)+2)
yt=1*(negbinNoise2(N=N,k=4,k2=1,alpha=0.05,beta=0,gama1=0.01,gama2=0.01,gama3=1.8,gama4=0.1,phi=1,shift=-85)+0)
set.seed(i)
out2=outbreak7(currentday=N,weeklength=52*days7*years,wtime=(length(yt)-49*days7+1):length(yt),yi=yt,interval=0.25,k=1,k2=2,alpha=2,beta=0,gama1=0.8,
gama2=0.8,gama3=0.8,gama4=0.4,phi=4,shift=57,numoutbk=1,peakoutbk=10*days7,meanlog=0,sdlog=0.5)
zoutbreak=out2$outbreak
zt=yt#+out1
zitot=yt + out2$outbreak #+out1
for(b in 1:length(zitot)){
if(bankhols7[b]==1){
zitot[b]=2*zitot[b]
}
}
simulateddata15[,i]=round(zt)
simulatedtotals15[,i]=round(zitot)
simulatedoutbreak15[,i]=round(zoutbreak)
}
#plot(1:N,yt,typ='l')
#plot(1:(52*years*7),zt,typ='l',xlim=c(1,364))
#plot(1:(52*years*7),zitot,typ='l',xlim=c(1,364))
#plot(1:(52*years*7),zitot,typ='l',xlim=c(365,728))
#plot(1:(52*years*7),zitot,typ='l',xlim=c(729,1092))
#plot(1:(52*years*7),zitot,typ='l',xlim=c(1093,1456))
#plot(1:(52*years*7),zitot,typ='l',xlim=c(1457,1820))
#plot(1:(52*years*7),zitot,typ='l',xlim=c(1821,2184))
#plot(1:(52*years*7),zitot,typ='l',xlim=c(2185,2548))
#lines(1:(52*years*7),zoutbreak,col='green')
plot(1:(52*years*7),simulatedtotals6[,4],typ='l',xlim=c(1,7*364))
lines(1:(52*years*7),simulatedzseasoutbreak6[,4],col='green')
lines(1:(52*years*7),simulatedoutbreak6[,4],col='red')
plot(1:(52*years*7),zitot,typ='l',xlim=c(1,7*364))
lines(1:(52*years*7),yt,col='blue')
lines(1:(52*years*7),zseasoutbreak,col='green')
lines(1:(52*years*7),zoutbreak,col='red')
plot(1:(52*years*7),simulatedzseasoutbreak6[,4],col='green',typ='l')
#sigid16
for(i in 1:nsim){
set.seed(i)
yt=negbinNoise2(N=N,k=1,k2=2,alpha=3,beta=0,gama1=0.8,gama2=0.6,gama3=0.8,gama4=0.4,phi=4,shift=29)
#mu=exp(h2(N=N,k=1,k2=2,alpha=3,beta=0,gama1=0.8,gama2=0.6,gama3=0.8,gama4=0.4,shift=29))
out1=rep(0,N)
for(j in 1:years){
set.seed(j+years*i)
out=outbreak5(currentday=days7*52*years,weeklength=52*days7*years,wtime=((210+(j-1)*days7*52):(230+(j-1)*days7*52)),yi=yt,interval=0.02,k=1,k2=1,alpha=1,beta=0,gama1=0.1,
gama2=2,gama3=0.05,gama4=0.05,phi=1,shift=-50,shift2=-50,numoutbk=1,peakoutbk=3*days7*150,meanlog=0,sdlog=0.5)
out1=out1+out$outbreak
}
set.seed(i)
out2=outbreak7(currentday=N,weeklength=52*days7*years,wtime=(length(yt)-49*days7+1):length(yt),yi=yt,interval=0.25,k=1,k2=2,alpha=3,beta=0,gama1=0.8,
gama2=0.6,gama3=0.8,gama4=0.4,phi=4,shift=29,numoutbk=1,peakoutbk=10*days7,meanlog=0,sdlog=0.5)
zoutbreak=out2$outbreak
zseasoutbreak=out2$outbreak+out1
zt=yt +out1
zitot=yt + out2$outbreak +out1
for(b in 1:length(zitot)){
if(bankhols7[b]==1){
zitot[b]=2*zitot[b]
}
}
simulateddata16[,i]=round(zt)
simulatedtotals16[,i]=round(zitot)
simulatedoutbreak16[,i]=round(zoutbreak)
simulatedzseasoutbreak16[,i]=round(zseasoutbreak)
}
plot(1:(52*years*7),zitot,typ='l',xlim=c(1,7*364))
lines(1:(52*years*7),yt,col='blue')
lines(1:(52*years*7),zseasoutbreak,col='green')
lines(1:(52*years*7),zoutbreak,col='red')
plot(1:(52*years*7),simulatedtotals16[,1],typ='l',xlim=c(1,7*364))
lines(1:(52*years*7),simulatedzseasoutbreak16[,1],col='green')
lines(1:(52*years*7),simulatedoutbreak16[,1],col='red')
plot(1:(52*years*7),simulatedtotals16[,2],typ='l',xlim=c(1,7*364))
lines(1:(52*years*7),simulatedzseasoutbreak16[,2],col='green')
lines(1:(52*years*7),simulatedoutbreak16[,2],col='red')
#sigid17
for(i in 1:nsim){
set.seed(i)
yt=negbinNoise2(N=N,k=0,k2=2,alpha=6,beta=0,gama1=0,gama2=0,gama3=0.8,gama4=0.4,phi=4,shift=1)
#mu=exp(h2(N=N,k=0,k2=2,alpha=6,beta=0,gama1=0,gama2=0,gama3=0.8,gama4=0.4,shift=1))
set.seed(i)
out2=outbreak7(currentday=N,weeklength=52*7*12,wtime=(length(yt)-49*days7+1):length(yt),yi=yt,interval=0.25,k=0,k2=2,alpha=6,beta=0,gama1=0,
gama2=0,gama3=0.8,gama4=0.4,phi=4,shift=1,numoutbk=1,peakoutbk=10*days7,meanlog=0,sdlog=0.5)
zoutbreak=out2$outbreak
zt=yt#+out1
zitot=yt + out2$outbreak #+out1
for(b in 1:length(zitot)){
if(bankhols7[b]==1){
zitot[b]=2*zitot[b]
}
}
simulateddata17[,i]=round(zt)
simulatedtotals17[,i]=round(zitot)
simulatedoutbreak17[,i]=round(zoutbreak)
}
#=============================
# Define the alarm data frames
#=============================
days=7
nsim=100
alarmall1=data.frame(array(rep(0,nsim*49*days),dim=c(49*days,nsim)))
alarmall2=data.frame(array(rep(0,nsim*49*days),dim=c(49*days,nsim)))
alarmall3=data.frame(array(rep(0,nsim*49*days),dim=c(49*days,nsim)))
alarmall4=data.frame(array(rep(0,nsim*49*days),dim=c(49*days,nsim)))
alarmall5=data.frame(array(rep(0,nsim*49*days),dim=c(49*days,nsim)))
alarmall6=data.frame(array(rep(0,nsim*49*days),dim=c(49*days,nsim)))
alarmall7=data.frame(array(rep(0,nsim*49*days),dim=c(49*days,nsim)))
alarmall8=data.frame(array(rep(0,nsim*49*days),dim=c(49*days,nsim)))
alarmall9=data.frame(array(rep(0,nsim*49*days),dim=c(49*days,nsim)))
alarmall10=data.frame(array(rep(0,nsim*49*days),dim=c(49*days,nsim)))
alarmall11=data.frame(array(rep(0,nsim*49*days),dim=c(49*days,nsim)))
alarmall12=data.frame(array(rep(0,nsim*49*days),dim=c(49*days,nsim)))
alarmall13=data.frame(array(rep(0,nsim*49*days),dim=c(49*days,nsim)))
alarmall14=data.frame(array(rep(0,nsim*49*days),dim=c(49*days,nsim)))
alarmall15=data.frame(array(rep(0,nsim*49*days),dim=c(49*days,nsim)))
alarmall16=data.frame(array(rep(0,nsim*49*days),dim=c(49*days,nsim)))
alarmall17=data.frame(array(rep(0,nsim*49*days),dim=c(49*days,nsim)))
#########################################
#========================================
#Implement the algorithm to data by days and record the alarms in the dataframes above
#========================================
#########################################
myDates <- seq(ymd('2010-01-01'), ymd('2016-12-30'), by = '1 day')
dropDays <- as.POSIXct(c('2010-12-31','2011-12-31', '2012-12-31',
'2013-12-31', '2014-12-31', '2015-12-31',
'2016-02-29,', '2012-02-29'))
"%ni%" <- Negate("%in%")
myDates <- myDates[myDates %ni% dropDays]
# Convert to 7-day running totals
rolling <- function(x){
rollapplyr(x, width=7, FUN=sum, na.rm=T, fill=NA)
}
simdata1 <- apply(simulateddata1, 2, rolling)
# simdata2 <- apply(simulateddata2, 2, rolling)
simdata3 <- apply(simulateddata3, 2, rolling)
simdata4 <- apply(simulateddata4, 2, rolling)
simdata5 <- apply(simulateddata5, 2, rolling)
simdata6 <- apply(simulateddata6, 2, rolling)
simdata7 <- apply(simulateddata7, 2, rolling)
simdata8 <- apply(simulateddata8, 2, rolling)
simdata9 <- apply(simulateddata9, 2, rolling)
simdata10 <- apply(simulateddata10, 2, rolling)
simdata11 <- apply(simulateddata11, 2, rolling)
simdata12 <- apply(simulateddata12, 2, rolling)
simdata13 <- apply(simulateddata13, 2, rolling)
simdata14 <- apply(simulateddata14, 2, rolling)
simdata15 <- apply(simulateddata15, 2, rolling)
simdata16 <- apply(simulateddata16, 2, rolling)
simdata17 <- apply(simulateddata17, 2, rolling)
simtot1 <- apply(simulatedtotals1, 2, rolling)
# simtot2 <- apply(simulatedtotals2, 2, rolling)
simtot3 <- apply(simulatedtotals3, 2, rolling)
simtot4 <- apply(simulatedtotals4, 2, rolling)
simtot5 <- apply(simulatedtotals5, 2, rolling)
simtot6 <- apply(simulatedtotals6, 2, rolling)
simtot7 <- apply(simulatedtotals7, 2, rolling)
simtot8 <- apply(simulatedtotals8, 2, rolling)
simtot9 <- apply(simulatedtotals9, 2, rolling)
simtot10 <- apply(simulatedtotals10, 2, rolling)
simtot11 <- apply(simulatedtotals11, 2, rolling)
simtot12 <- apply(simulatedtotals12, 2, rolling)
simtot13 <- apply(simulatedtotals13, 2, rolling)
simtot14 <- apply(simulatedtotals14, 2, rolling)
simtot15 <- apply(simulatedtotals15, 2, rolling)
simtot16 <- apply(simulatedtotals16, 2, rolling)
simtot17 <- apply(simulatedtotals17, 2, rolling)
# Convert data to sts
simSts1 <- sts(simdata1, start=c(2010, 1), frequency=364,
epoch=as.numeric(as.Date(myDates)), epochAsDate=TRUE)
# simSts2 <- sts(simdata2, start=c(2010, 1), frequency=364,
# epoch=as.numeric(as.Date(myDates)), epochAsDate=TRUE)
simSts3 <- sts(simdata3, start=c(2010, 1), frequency=364,
epoch=as.numeric(as.Date(myDates)), epochAsDate=TRUE)
simSts4 <- sts(simdata4, start=c(2010, 1), frequency=364,
epoch=as.numeric(as.Date(myDates)), epochAsDate=TRUE)
simSts5 <- sts(simdata5, start=c(2010, 1), frequency=364,
epoch=as.numeric(as.Date(myDates)), epochAsDate=TRUE)
simSts6 <- sts(simdata6, start=c(2010, 1), frequency=364,
epoch=as.numeric(as.Date(myDates)), epochAsDate=TRUE)
simSts7 <- sts(simdata7, start=c(2010, 1), frequency=364,
epoch=as.numeric(as.Date(myDates)), epochAsDate=TRUE)
simSts8 <- sts(simdata8, start=c(2010, 1), frequency=364,
epoch=as.numeric(as.Date(myDates)), epochAsDate=TRUE)
simSts9 <- sts(simdata9, start=c(2010, 1), frequency=364,
epoch=as.numeric(as.Date(myDates)), epochAsDate=TRUE)
simSts10 <- sts(simdata10, start=c(2010, 1), frequency=364,
epoch=as.numeric(as.Date(myDates)), epochAsDate=TRUE)
simSts11 <- sts(simdata11, start=c(2010, 1), frequency=364,
epoch=as.numeric(as.Date(myDates)), epochAsDate=TRUE)
simSts12 <- sts(simdata12, start=c(2010, 1), frequency=364,
epoch=as.numeric(as.Date(myDates)), epochAsDate=TRUE)
simSts13 <- sts(simdata13, start=c(2010, 1), frequency=364,
epoch=as.numeric(as.Date(myDates)), epochAsDate=TRUE)
simSts14 <- sts(simdata14, start=c(2010, 1), frequency=364,
epoch=as.numeric(as.Date(myDates)), epochAsDate=TRUE)
simSts15 <- sts(simdata15, start=c(2010, 1), frequency=364,
epoch=as.numeric(as.Date(myDates)), epochAsDate=TRUE)
simSts16 <- sts(simdata16, start=c(2010, 1), frequency=364,
epoch=as.numeric(as.Date(myDates)), epochAsDate=TRUE)
simSts17 <- sts(simdata17, start=c(2010, 1), frequency=364,
epoch=as.numeric(as.Date(myDates)), epochAsDate=TRUE)
totSts1 <- sts(simtot1, start=c(2010, 1), frequency=364,
epoch=as.numeric(as.Date(myDates)), epochAsDate=TRUE)
# totSts2 <- sts(simtot2, start=c(2010, 1), frequency=364,
# epoch=as.numeric(as.Date(myDates)), epochAsDate=TRUE)
totSts3 <- sts(simtot3, start=c(2010, 1), frequency=364,
epoch=as.numeric(as.Date(myDates)), epochAsDate=TRUE)
totSts4 <- sts(simtot4, start=c(2010, 1), frequency=364,
epoch=as.numeric(as.Date(myDates)), epochAsDate=TRUE)
totSts5 <- sts(simtot5, start=c(2010, 1), frequency=364,
epoch=as.numeric(as.Date(myDates)), epochAsDate=TRUE)
totSts6 <- sts(simtot6, start=c(2010, 1), frequency=364,
epoch=as.numeric(as.Date(myDates)), epochAsDate=TRUE)
totSts7 <- sts(simtot7, start=c(2010, 1), frequency=364,
epoch=as.numeric(as.Date(myDates)), epochAsDate=TRUE)
totSts8 <- sts(simtot8, start=c(2010, 1), frequency=364,
epoch=as.numeric(as.Date(myDates)), epochAsDate=TRUE)
totSts9 <- sts(simtot9, start=c(2010, 1), frequency=364,
epoch=as.numeric(as.Date(myDates)), epochAsDate=TRUE)
totSts10 <- sts(simtot10, start=c(2010, 1), frequency=364,
epoch=as.numeric(as.Date(myDates)), epochAsDate=TRUE)
totSts11 <- sts(simtot11, start=c(2010, 1), frequency=364,
epoch=as.numeric(as.Date(myDates)), epochAsDate=TRUE)
totSts12 <- sts(simtot12, start=c(2010, 1), frequency=364,
epoch=as.numeric(as.Date(myDates)), epochAsDate=TRUE)
totSts13 <- sts(simtot13, start=c(2010, 1), frequency=364,
epoch=as.numeric(as.Date(myDates)), epochAsDate=TRUE)
totSts14 <- sts(simtot14, start=c(2010, 1), frequency=364,
epoch=as.numeric(as.Date(myDates)), epochAsDate=TRUE)
totSts15 <- sts(simtot15, start=c(2010, 1), frequency=364,
epoch=as.numeric(as.Date(myDates)), epochAsDate=TRUE)
totSts16 <- sts(simtot16, start=c(2010, 1), frequency=364,
epoch=as.numeric(as.Date(myDates)), epochAsDate=TRUE)
totSts17 <- sts(simtot17, start=c(2010, 1), frequency=364,
epoch=as.numeric(as.Date(myDates)), epochAsDate=TRUE)
in2016 <- 2206:2548
# Select range of data to monitor, algorithm and prediction interval
control <- list(range=in2016, method="C3", alpha=0.01)
for(sim in seq(nsim)){
cat("\t", sim)
# Run detection algorithm
det1 <- earsC(totSts1[,sim], control=control)
det3 <- earsC(totSts3[,sim], control=control)
det4 <- earsC(totSts4[,sim], control=control)
det5 <- earsC(totSts5[,sim], control=control)
det6 <- earsC(totSts6[,sim], control=control)
det7 <- earsC(totSts7[,sim], control=control)
det8 <- earsC(totSts8[,sim], control=control)
det9 <- earsC(totSts9[,sim], control=control)
det10 <- earsC(totSts10[,sim], control=control)
det11 <- earsC(totSts11[,sim], control=control)
det12 <- earsC(totSts12[,sim], control=control)
det13 <- earsC(totSts13[,sim], control=control)
det14 <- earsC(totSts14[,sim], control=control)
det15 <- earsC(totSts15[,sim], control=control)
det16 <- earsC(totSts16[,sim], control=control)
det17 <- earsC(totSts17[,sim], control=control)
# Plot detection results
dir.create(file.path(myDir, "plots", "totals"),
recursive=TRUE)
png(file.path(myDir, "plots", "totals", paste0("Sim_", sim, ".png")),
width=16,height=14,units="in",res=300)
par(mfrow=c(4, 4), oma=c(0, 0, 2, 0))
plot(det1, main="Dataset 1", legend=NULL)
plot(det3, main="Dataset 3", legend=NULL)
plot(det4, main="Dataset 4", legend=NULL)
plot(det5, main="Dataset 5", legend=NULL)
plot(det6, main="Dataset 6", legend=NULL)
plot(det7, main="Dataset 7", legend=NULL)
plot(det8, main="Dataset 8", legend=NULL)
plot(det9, main="Dataset 9", legend=NULL)
plot(det10, main="Dataset 10", legend=NULL)
plot(det11, main="Dataset 11", legend=NULL)
plot(det12, main="Dataset 12", legend=NULL)
plot(det13, main="Dataset 13", legend=NULL)
plot(det14, main="Dataset 14", legend=NULL)
plot(det15, main="Dataset 15", legend=NULL)
plot(det16, main="Dataset 16", legend=NULL)
plot(det17, main="Dataset 17", legend=NULL)
title(main=list(paste("Simulation", sim, "Alpha", control$alpha ),
cex=2), outer=TRUE)
dev.off()
# Retrieve information about alarms
alarmall1[,sim] <- as.numeric(as.vector(unlist(det1@alarm)))
alarmall3[,sim] <- as.numeric(as.vector(unlist(det3@alarm)))
alarmall4[,sim] <- as.numeric(as.vector(unlist(det4@alarm)))
alarmall5[,sim] <- as.numeric(as.vector(unlist(det5@alarm)))
alarmall6[,sim] <- as.numeric(as.vector(unlist(det6@alarm)))
alarmall7[,sim] <- as.numeric(as.vector(unlist(det7@alarm)))
alarmall8[,sim] <- as.numeric(as.vector(unlist(det8@alarm)))
alarmall9[,sim] <- as.numeric(as.vector(unlist(det9@alarm)))
alarmall10[,sim] <- as.numeric(as.vector(unlist(det10@alarm)))
alarmall11[,sim] <- as.numeric(as.vector(unlist(det11@alarm)))
alarmall12[,sim] <- as.numeric(as.vector(unlist(det12@alarm)))
alarmall13[,sim] <- as.numeric(as.vector(unlist(det13@alarm)))
alarmall14[,sim] <- as.numeric(as.vector(unlist(det14@alarm)))
alarmall15[,sim] <- as.numeric(as.vector(unlist(det15@alarm)))
alarmall16[,sim] <- as.numeric(as.vector(unlist(det16@alarm)))
alarmall17[,sim] <- as.numeric(as.vector(unlist(det17@alarm)))
}
# Replace missing values with zero (?)
alarmall1[is.na(alarmall1)] <- 0
alarmall3[is.na(alarmall3)] <- 0
alarmall4[is.na(alarmall4)] <- 0
alarmall5[is.na(alarmall5)] <- 0
alarmall6[is.na(alarmall6)] <- 0
alarmall7[is.na(alarmall7)] <- 0
alarmall8[is.na(alarmall8)] <- 0
alarmall9[is.na(alarmall9)] <- 0
alarmall10[is.na(alarmall10)] <- 0
alarmall11[is.na(alarmall11)] <- 0
alarmall12[is.na(alarmall12)] <- 0
alarmall13[is.na(alarmall13)] <- 0
alarmall14[is.na(alarmall14)] <- 0
alarmall15[is.na(alarmall15)] <- 0
alarmall16[is.na(alarmall16)] <- 0
alarmall17[is.na(alarmall17)] <- 0
# Compare vs data without oubreaks
for(sim in seq(nsim)){
cat("\t", sim)
det1 <- earsC(simSts1[,sim], control=control)
det3 <- earsC(simSts3[,sim], control=control)
det4 <- earsC(simSts4[,sim], control=control)
det5 <- earsC(simSts5[,sim], control=control)
det6 <- earsC(simSts6[,sim], control=control)
det7 <- earsC(simSts7[,sim], control=control)
det8 <- earsC(simSts8[,sim], control=control)
det9 <- earsC(simSts9[,sim], control=control)
det10 <- earsC(simSts10[,sim], control=control)
det11 <- earsC(simSts11[,sim], control=control)
det12 <- earsC(simSts12[,sim], control=control)
det13 <- earsC(simSts13[,sim], control=control)
det14 <- earsC(simSts14[,sim], control=control)
det15 <- earsC(simSts15[,sim], control=control)
det16 <- earsC(simSts16[,sim], control=control)
det17 <- earsC(simSts17[,sim], control=control)
dir.create(file.path(myDir, "plots", "control"),
recursive=TRUE)
png(file.path(myDir, "plots", "control",
paste0("Sim_", sim, ".png")),
width=16,height=14,units="in",res=300)
par(mfrow=c(4, 4), oma=c(0, 0, 2, 0))
plot(det1, main="Dataset 1", legend=NULL)
plot(det3, main="Dataset 3", legend=NULL)
plot(det4, main="Dataset 4", legend=NULL)
plot(det5, main="Dataset 5", legend=NULL)
plot(det6, main="Dataset 6", legend=NULL)
plot(det7, main="Dataset 7", legend=NULL)
plot(det8, main="Dataset 8", legend=NULL)
plot(det9, main="Dataset 9", legend=NULL)
plot(det10, main="Dataset 10", legend=NULL)
plot(det11, main="Dataset 11", legend=NULL)
plot(det12, main="Dataset 12", legend=NULL)
plot(det13, main="Dataset 13", legend=NULL)
plot(det14, main="Dataset 14", legend=NULL)
plot(det15, main="Dataset 15", legend=NULL)
plot(det16, main="Dataset 16", legend=NULL)
plot(det17, main="Dataset 17", legend=NULL)
title(main=list(paste("Simulation", sim, "Alpha", control$alpha ),
cex=2), outer=TRUE)
dev.off()
}
#====================================
#====================================
#Summary
#====================================
#====================================
days=7
# FPR false positive rate
fpr=rep(0,17)
fprseas=rep(0,3)
nu=0
for(j in 1:nsim){
for(i in (49*7):1){
nu=(alarmall1[nrow(alarmall1)-i+1,j]==1 & simulatedoutbreak1[nrow(simulatedoutbreak1)-i+1,j]==0)+nu
}
}
a=
fpr[1]=nu/sum(simulatedoutbreak1[2206:2548,]==0)
nu=0
for(j in 1:nsim){
for(i in (49*7):1){
nu=(alarmall2[nrow(alarmall2)-i+1,j]==1 & simulatedoutbreak2[nrow(simulatedoutbreak2)-i+1,j]==0)+nu
}
}
fpr[2]=nu/sum(simulatedoutbreak2[2206:2548,]==0)
nu=0
for(j in 1:nsim){
for(i in (49*7):1){
nu=(alarmall3[nrow(alarmall3)-i+1,j]==1 & simulatedoutbreak3[nrow(simulatedoutbreak3)-i+1,j]==0)+nu
}
}
fpr[3]=nu/sum(simulatedoutbreak3[2206:2548,]==0)
nu=0
for(j in 1:nsim){
for(i in (49*7):1){
nu=(alarmall4[nrow(alarmall4)-i+1,j]==1 & simulatedoutbreak4[nrow(simulatedoutbreak4)-i+1,j]==0)+nu
}
}
fpr[4]=nu/sum(simulatedoutbreak4[2206:2548,]==0)
nu=0
for(j in 1:nsim){
for(i in (49*7):1){
nu=(alarmall5[nrow(alarmall5)-i+1,j]==1 & simulatedoutbreak5[nrow(simulatedoutbreak5)-i+1,j]==0)+nu
}
}
fpr[5]=nu/sum(simulatedoutbreak5[2206:2548,]==0)
nu=0
for(j in 1:nsim){
for(i in (49*7):1){
nu=(alarmall6[nrow(alarmall6)-i+1,j]==1 & simulatedoutbreak6[nrow(simulatedoutbreak6)-i+1,j]==0)+nu
}
}
fpr[6]=nu/sum(simulatedoutbreak6[2206:2548,]==0)
nu=0
for(j in 1:nsim){
for(i in (49*7):1){
nu=(alarmall6[nrow(alarmall6)-i+1,j]==1 & simulatedzseasoutbreak6[nrow(simulatedzseasoutbreak6)-i+1,j]==0)+nu
}
}
fprseas[1]=nu/sum(simulatedzseasoutbreak6[2206:2548,]==0)
nu=0
for(j in 1:nsim){
for(i in (49*7):1){
nu=(alarmall7[nrow(alarmall7)-i+1,j]==1 & simulatedoutbreak7[nrow(simulatedoutbreak7)-i+1,j]==0)+nu
}
}
fpr[7]=nu/sum(simulatedoutbreak7[2206:2548,]==0)
nu=0
for(j in 1:nsim){
for(i in (49*7):1){
nu=(alarmall7[nrow(alarmall7)-i+1,j]==1 & simulatedzseasoutbreak7[nrow(simulatedzseasoutbreak7)-i+1,j]==0)+nu
}
}
fprseas[2]=nu/sum(simulatedzseasoutbreak7[2206:2548,]==0)
nu=0
for(j in 1:nsim){
for(i in (49*7):1){
nu=(alarmall8[nrow(alarmall8)-i+1,j]==1 & simulatedoutbreak8[nrow(simulatedoutbreak8)-i+1,j]==0)+nu
}
}
fpr[8]=nu/sum(simulatedoutbreak8[2206:2548,]==0)
nu=0
for(j in 1:nsim){
for(i in (49*7):1){
nu=(alarmall9[nrow(alarmall9)-i+1,j]==1 & simulatedoutbreak9[nrow(simulatedoutbreak9)-i+1,j]==0)+nu
}
}
fpr[9]=nu/sum(simulatedoutbreak9[2206:2548,]==0)
nu=0
for(j in 1:nsim){
for(i in (49*7):1){
nu=(alarmall10[nrow(alarmall10)-i+1,j]==1 & simulatedoutbreak10[nrow(simulatedoutbreak10)-i+1,j]==0)+nu
}
}
fpr[10]=nu/sum(simulatedoutbreak10[2206:2548,]==0)
nu=0
for(j in 1:nsim){
for(i in (49*7):1){
nu=(alarmall11[nrow(alarmall11)-i+1,j]==1 & simulatedoutbreak11[nrow(simulatedoutbreak11)-i+1,j]==0)+nu
}
}
fpr[11]=nu/sum(simulatedoutbreak11[2206:2548,]==0)
nu=0
for(j in 1:nsim){
for(i in (49*7):1){
nu=(alarmall12[nrow(alarmall12)-i+1,j]==1 & simulatedoutbreak12[nrow(simulatedoutbreak12)-i+1,j]==0)+nu
}
}
fpr[12]=nu/sum(simulatedoutbreak12[2206:2548,]==0)
nu=0
for(j in 1:nsim){
for(i in (49*7):1){
nu=(alarmall13[nrow(alarmall13)-i+1,j]==1 & simulatedoutbreak13[nrow(simulatedoutbreak13)-i+1,j]==0)+nu
}
}
fpr[13]=nu/sum(simulatedoutbreak13[2206:2548,]==0)
nu=0
for(j in 1:nsim){
for(i in (49*7):1){
nu=(alarmall14[nrow(alarmall14)-i+1,j]==1 & simulatedoutbreak14[nrow(simulatedoutbreak14)-i+1,j]==0)+nu
}
}
fpr[14]=nu/sum(simulatedoutbreak14[2206:2548,]==0)
nu=0
for(j in 1:nsim){
for(i in (49*7):1){
nu=(alarmall15[nrow(alarmall15)-i+1,j]==1 & simulatedoutbreak15[nrow(simulatedoutbreak15)-i+1,j]==0)+nu
}
}
fpr[15]=nu/sum(simulatedoutbreak15[2206:2548,]==0)
nu=0
for(j in 1:nsim){
for(i in (49*7):1){
nu=(alarmall16[nrow(alarmall16)-i+1,j]==1 & simulatedoutbreak16[nrow(simulatedoutbreak16)-i+1,j]==0)+nu
}
}
fpr[16]=nu/sum(simulatedoutbreak16[2206:2548,]==0)
nu=0
for(j in 1:nsim){
for(i in (49*7):1){
nu=(alarmall16[nrow(alarmall16)-i+1,j]==1 & simulatedzseasoutbreak16[nrow(simulatedzseasoutbreak16)-i+1,j]==0)+nu
}
}
fprseas[3]=nu/sum(simulatedzseasoutbreak16[2206:2548,]==0)
nu=0
for(j in 1:nsim){
for(i in (49*7):1){
nu=(alarmall17[nrow(alarmall17)-i+1,j]==1 & simulatedoutbreak17[nrow(simulatedoutbreak17)-i+1,j]==0)+nu
}
}
fpr[17]=nu/sum(simulatedoutbreak17[2206:2548,]==0)
#--------------------------------------------------------
# POD power of detection
pod=rep(0,17)
podseas=rep(0,3)
mu=0
for(j in 1:nsim){
nu=0
for(i in (49*days):1){
nu=nu+(alarmall1[nrow(alarmall1)-i+1,j]==1 & simulatedoutbreak1[nrow(simulatedoutbreak1)-i+1,j]>0)
}
mu=mu+(nu>0)
}
pod[1]=mu/nsim
mu=0
for(j in 1:nsim){
nu=0
for(i in (49*days):1){
nu=nu+(alarmall2[nrow(alarmall2)-i+1,j]==1 & simulatedoutbreak2[nrow(simulatedoutbreak2)-i+1,j]>0)
}
mu=mu+(nu>0)
}
pod[2]=mu/nsim
mu=0
for(j in 1:nsim){
nu=0
for(i in (49*days):1){
nu=nu+(alarmall3[nrow(alarmall3)-i+1,j]==1 & simulatedoutbreak3[nrow(simulatedoutbreak3)-i+1,j]>0)
}
mu=mu+(nu>0)
}
pod[3]=mu/nsim
mu=0
for(j in 1:nsim){
nu=0
for(i in (49*days):1){
nu=nu+(alarmall4[nrow(alarmall4)-i+1,j]==1 & simulatedoutbreak4[nrow(simulatedoutbreak4)-i+1,j]>0)
}
mu=mu+(nu>0)
}
pod[4]=mu/nsim
mu=0
for(j in 1:nsim){
nu=0
for(i in (49*days):1){
nu=nu+(alarmall5[nrow(alarmall5)-i+1,j]==1 & simulatedoutbreak5[nrow(simulatedoutbreak5)-i+1,j]>0)
}
mu=mu+(nu>0)
}
pod[5]=mu/nsim
mu=0
for(j in 1:nsim){
nu=0
for(i in (49*days):1){
nu=nu+(alarmall6[nrow(alarmall6)-i+1,j]==1 & simulatedoutbreak6[nrow(simulatedoutbreak6)-i+1,j]>0)
}
mu=mu+(nu>0)
}
pod[6]=mu/nsim
mu=0
for(j in 1:nsim){
nu=0
for(i in (49*days):1){
nu=nu+(alarmall6[nrow(alarmall6)-i+1,j]==1 & simulatedzseasoutbreak6[nrow(simulatedzseasoutbreak6)-i+1,j]>0)
}
mu=mu+(nu>0)
}
podseas[1]=mu/nsim
mu=0
for(j in 1:nsim){
nu=0
for(i in (49*days):1){
nu=nu+(alarmall7[nrow(alarmall7)-i+1,j]==1 & simulatedoutbreak7[nrow(simulatedoutbreak7)-i+1,j]>0)
}
mu=mu+(nu>0)
}
pod[7]=mu/nsim
mu=0
for(j in 1:nsim){
nu=0
for(i in (49*days):1){
nu=nu+(alarmall7[nrow(alarmall7)-i+1,j]==1 & simulatedzseasoutbreak7[nrow(simulatedzseasoutbreak7)-i+1,j]>0)
}
mu=mu+(nu>0)
}
podseas[2]=mu/nsim
mu=0
for(j in 1:nsim){
nu=0
for(i in (49*days):1){
nu=nu+(alarmall8[nrow(alarmall8)-i+1,j]==1 & simulatedoutbreak8[nrow(simulatedoutbreak8)-i+1,j]>0)
}
mu=mu+(nu>0)
}
pod[8]=mu/nsim
mu=0
for(j in 1:nsim){
nu=0
for(i in (49*days):1){
nu=nu+(alarmall9[nrow(alarmall9)-i+1,j]==1 & simulatedoutbreak9[nrow(simulatedoutbreak9)-i+1,j]>0)
}
mu=mu+(nu>0)
}
pod[9]=mu/nsim
mu=0
for(j in 1:nsim){
nu=0
for(i in (49*days):1){
nu=nu+(alarmall10[nrow(alarmall10)-i+1,j]==1 & simulatedoutbreak10[nrow(simulatedoutbreak10)-i+1,j]>0)
}
mu=mu+(nu>0)
}
pod[10]=mu/nsim
mu=0
for(j in 1:nsim){
nu=0
for(i in (49*days):1){
nu=nu+(alarmall11[nrow(alarmall11)-i+1,j]==1 & simulatedoutbreak11[nrow(simulatedoutbreak11)-i+1,j]>0)
}
mu=mu+(nu>0)
}
pod[11]=mu/nsim
mu=0
for(j in 1:nsim){
nu=0
for(i in (49*days):1){
nu=nu+(alarmall12[nrow(alarmall12)-i+1,j]==1 & simulatedoutbreak12[nrow(simulatedoutbreak12)-i+1,j]>0)
}
mu=mu+(nu>0)
}
pod[12]=mu/nsim
mu=0
for(j in 1:nsim){
nu=0
for(i in (49*days):1){
nu=nu+(alarmall13[nrow(alarmall13)-i+1,j]==1 & simulatedoutbreak13[nrow(simulatedoutbreak13)-i+1,j]>0)
}
mu=mu+(nu>0)
}
pod[13]=mu/nsim
mu=0
for(j in 1:nsim){
nu=0
for(i in (49*days):1){
nu=nu+(alarmall14[nrow(alarmall14)-i+1,j]==1 & simulatedoutbreak14[nrow(simulatedoutbreak14)-i+1,j]>0)
}
mu=mu+(nu>0)
}
pod[14]=mu/nsim
mu=0
for(j in 1:nsim){
nu=0
for(i in (49*days):1){
nu=nu+(alarmall15[nrow(alarmall15)-i+1,j]==1 & simulatedoutbreak15[nrow(simulatedoutbreak15)-i+1,j]>0)
}
mu=mu+(nu>0)
}
pod[15]=mu/nsim
mu=0
for(j in 1:nsim){
nu=0
for(i in (49*days):1){
nu=nu+(alarmall16[nrow(alarmall16)-i+1,j]==1 & simulatedoutbreak16[nrow(simulatedoutbreak16)-i+1,j]>0)
}
mu=mu+(nu>0)
}
pod[16]=mu/nsim
mu=0
for(j in 1:nsim){
nu=0
for(i in (49*days):1){
nu=nu+(alarmall16[nrow(alarmall16)-i+1,j]==1 & simulatedzseasoutbreak16[nrow(simulatedzseasoutbreak16)-i+1,j]>0)
}
mu=mu+(nu>0)
}
podseas[3]=mu/nsim
mu=0
for(j in 1:nsim){
nu=0
for(i in (49*days):1){
nu=nu+(alarmall17[nrow(alarmall17)-i+1,j]==1 & simulatedoutbreak17[nrow(simulatedoutbreak17)-i+1,j]>0)
}
mu=mu+(nu>0)
}
pod[17]=mu/nsim
#--------------------------------------------------------
# Sensitivity
sensitivity=rep(0,17)
sensitivityseas=rep(0,3)
nu=0
for(j in 1:nsim){
for(i in (49*days):1){
nu=nu+(alarmall1[nrow(alarmall1)-i+1,j]==1 & simulatedoutbreak1[nrow(simulatedoutbreak1)-i+1,j]>0)
}
}
sensitivity[1]=nu/sum(simulatedoutbreak1>0)
nu=0
for(j in 1:nsim){
for(i in (49*days):1){
nu=nu+(alarmall2[nrow(alarmall2)-i+1,j]==1 & simulatedoutbreak2[nrow(simulatedoutbreak2)-i+1,j]>0)
}
}
sensitivity[2]=nu/sum(simulatedoutbreak2>0)
nu=0
for(j in 1:nsim){
for(i in (49*days):1){
nu=nu+(alarmall3[nrow(alarmall3)-i+1,j]==1 & simulatedoutbreak3[nrow(simulatedoutbreak3)-i+1,j]>0)
}
}
sensitivity[3]=nu/sum(simulatedoutbreak3>0)
nu=0
for(j in 1:nsim){
for(i in (49*days):1){
nu=nu+(alarmall4[nrow(alarmall4)-i+1,j]==1 & simulatedoutbreak4[nrow(simulatedoutbreak4)-i+1,j]>0)
}
}
sensitivity[4]=nu/sum(simulatedoutbreak4>0)
nu=0
for(j in 1:nsim){
for(i in (49*days):1){
nu=nu+(alarmall5[nrow(alarmall5)-i+1,j]==1 & simulatedoutbreak5[nrow(simulatedoutbreak5)-i+1,j]>0)
}
}
sensitivity[5]=nu/sum(simulatedoutbreak5>0)
nu=0
for(j in 1:nsim){
for(i in (49*days):1){
nu=nu+(alarmall6[nrow(alarmall6)-i+1,j]==1 & simulatedoutbreak6[nrow(simulatedoutbreak6)-i+1,j]>0)
}
}
sensitivity[6]=nu/sum(simulatedoutbreak6>0)
nu=0
for(j in 1:nsim){
for(i in (49*days):1){
nu=nu+(alarmall6[nrow(alarmall6)-i+1,j]==1 & simulatedzseasoutbreak6[nrow(simulatedzseasoutbreak6)-i+1,j]>0)
}
}
sensitivityseas[1]=nu/sum(simulatedzseasoutbreak6>0)
nu=0
for(j in 1:nsim){
for(i in (49*days):1){
nu=nu+(alarmall7[nrow(alarmall7)-i+1,j]==1 & simulatedoutbreak7[nrow(simulatedoutbreak7)-i+1,j]>0)
}
}
sensitivity[7]=nu/sum(simulatedoutbreak7>0)
nu=0
for(j in 1:nsim){
for(i in (49*days):1){
nu=nu+(alarmall7[nrow(alarmall7)-i+1,j]==1 & simulatedzseasoutbreak7[nrow(simulatedzseasoutbreak7)-i+1,j]>0)
}
}
sensitivityseas[2]=nu/sum(simulatedzseasoutbreak7>0)
nu=0
for(j in 1:nsim){
for(i in (49*days):1){
nu=nu+(alarmall8[nrow(alarmall8)-i+1,j]==1 & simulatedoutbreak8[nrow(simulatedoutbreak8)-i+1,j]>0)
}
}
sensitivity[8]=nu/sum(simulatedoutbreak8>0)
nu=0
for(j in 1:nsim){
for(i in (49*days):1){
nu=nu+(alarmall9[nrow(alarmall9)-i+1,j]==1 & simulatedoutbreak9[nrow(simulatedoutbreak9)-i+1,j]>0)
}
}
sensitivity[9]=nu/sum(simulatedoutbreak9>0)
nu=0
for(j in 1:nsim){
for(i in (49*days):1){
nu=nu+(alarmall10[nrow(alarmall10)-i+1,j]==1 & simulatedoutbreak10[nrow(simulatedoutbreak10)-i+1,j]>0)
}
}
sensitivity[10]=nu/sum(simulatedoutbreak10>0)
nu=0
for(j in 1:nsim){
for(i in (49*days):1){
nu=nu+(alarmall11[nrow(alarmall11)-i+1,j]==1 & simulatedoutbreak11[nrow(simulatedoutbreak11)-i+1,j]>0)
}
}
sensitivity[11]=nu/sum(simulatedoutbreak11>0)
nu=0
for(j in 1:nsim){
for(i in (49*days):1){
nu=nu+(alarmall12[nrow(alarmall12)-i+1,j]==1 & simulatedoutbreak12[nrow(simulatedoutbreak12)-i+1,j]>0)
}
}
sensitivity[12]=nu/sum(simulatedoutbreak12>0)
nu=0
for(j in 1:nsim){
for(i in (49*days):1){
nu=nu+(alarmall13[nrow(alarmall13)-i+1,j]==1 & simulatedoutbreak13[nrow(simulatedoutbreak13)-i+1,j]>0)
}
}
sensitivity[13]=nu/sum(simulatedoutbreak13>0)
nu=0
for(j in 1:nsim){
for(i in (49*days):1){
nu=nu+(alarmall14[nrow(alarmall14)-i+1,j]==1 & simulatedoutbreak14[nrow(simulatedoutbreak14)-i+1,j]>0)
}
}
sensitivity[14]=nu/sum(simulatedoutbreak14>0)
nu=0
for(j in 1:nsim){
for(i in (49*days):1){
nu=nu+(alarmall15[nrow(alarmall15)-i+1,j]==1 & simulatedoutbreak15[nrow(simulatedoutbreak15)-i+1,j]>0)
}
}
sensitivity[15]=nu/sum(simulatedoutbreak15>0)
nu=0
for(j in 1:nsim){
for(i in (49*days):1){
nu=nu+(alarmall16[nrow(alarmall16)-i+1,j]==1 & simulatedoutbreak16[nrow(simulatedoutbreak16)-i+1,j]>0)
}
}
sensitivity[16]=nu/sum(simulatedoutbreak16>0)
nu=0
for(j in 1:nsim){
for(i in (49*days):1){
nu=nu+(alarmall16[nrow(alarmall16)-i+1,j]==1 & simulatedzseasoutbreak16[nrow(simulatedzseasoutbreak16)-i+1,j]>0)
}
}
sensitivityseas[3]=nu/sum(simulatedzseasoutbreak16>0)
nu=0
for(j in 1:nsim){
for(i in (49*days):1){
nu=nu+(alarmall17[nrow(alarmall17)-i+1,j]==1 & simulatedoutbreak17[nrow(simulatedoutbreak17)-i+1,j]>0)
}
}
sensitivity[17]=nu/sum(simulatedoutbreak17>0)
#--------------------------------------------------------
# Specificity
specificity=rep(0,17)
specificityseas=rep(0,3)
# Specificity
nu=0
for(j in 1:nsim){
for(i in (49*days):1){
nu=nu+(alarmall1[nrow(alarmall1)-i+1,j]==0 & simulatedoutbreak1[nrow(simulatedoutbreak1)-i+1,j]==0)
}
}
specificity[1]=nu/sum(simulatedoutbreak1[2206:2548,]==0)
nu=0
for(j in 1:nsim){
for(i in (49*days):1){
nu=nu+(alarmall2[nrow(alarmall2)-i+1,j]==0 & simulatedoutbreak2[nrow(simulatedoutbreak2)-i+1,j]==0)
}
}
specificity[2]=nu/sum(simulatedoutbreak2[2206:2548,]==0)
nu=0
for(j in 1:nsim){
for(i in (49*days):1){
nu=nu+(alarmall3[nrow(alarmall3)-i+1,j]==0 & simulatedoutbreak3[nrow(simulatedoutbreak3)-i+1,j]==0)
}
}
specificity[3]=nu/sum(simulatedoutbreak3[2206:2548,]==0)
nu=0
for(j in 1:nsim){
for(i in (49*days):1){
nu=nu+(alarmall4[nrow(alarmall4)-i+1,j]==0 & simulatedoutbreak4[nrow(simulatedoutbreak4)-i+1,j]==0)
}
}
specificity[4]=nu/sum(simulatedoutbreak4[2206:2548,]==0)
nu=0
for(j in 1:nsim){
for(i in (49*days):1){
nu=nu+(alarmall5[nrow(alarmall5)-i+1,j]==0 & simulatedoutbreak5[nrow(simulatedoutbreak5)-i+1,j]==0)
}
}
specificity[5]=nu/sum(simulatedoutbreak5[2206:2548,]==0)
nu=0
for(j in 1:nsim){
for(i in (49*days):1){
nu=nu+(alarmall6[nrow(alarmall6)-i+1,j]==0 & simulatedoutbreak6[nrow(simulatedoutbreak6)-i+1,j]==0)
}
}
specificity[6]=nu/sum(simulatedoutbreak6[2206:2548,]==0)
nu=0
for(j in 1:nsim){
for(i in (49*days):1){
nu=nu+(alarmall6[nrow(alarmall6)-i+1,j]==0 & simulatedzseasoutbreak6[nrow(simulatedzseasoutbreak6)-i+1,j]==0)
}
}
specificityseas[1]=nu/sum(simulatedzseasoutbreak6[2206:2548,]==0)
nu=0
for(j in 1:nsim){
for(i in (49*days):1){
nu=nu+(alarmall7[nrow(alarmall7)-i+1,j]==0 & simulatedoutbreak7[nrow(simulatedoutbreak7)-i+1,j]==0)
}
}
specificity[7]=nu/sum(simulatedoutbreak7[2206:2548,]==0)
nu=0
for(j in 1:nsim){
for(i in (49*days):1){
nu=nu+(alarmall7[nrow(alarmall7)-i+1,j]==0 & simulatedzseasoutbreak7[nrow(simulatedzseasoutbreak7)-i+1,j]==0)
}
}
specificityseas[2]=nu/sum(simulatedzseasoutbreak7[2206:2548,]==0)
nu=0
for(j in 1:nsim){
for(i in (49*days):1){
nu=nu+(alarmall8[nrow(alarmall8)-i+1,j]==0 & simulatedoutbreak8[nrow(simulatedoutbreak8)-i+1,j]==0)
}
}
specificity[8]=nu/sum(simulatedoutbreak8[2206:2548,]==0)
nu=0
for(j in 1:nsim){
for(i in (49*days):1){
nu=nu+(alarmall9[nrow(alarmall9)-i+1,j]==0 & simulatedoutbreak9[nrow(simulatedoutbreak9)-i+1,j]==0)
}
}
specificity[9]=nu/sum(simulatedoutbreak9[2206:2548,]==0)
nu=0
for(j in 1:nsim){
for(i in (49*days):1){
nu=nu+(alarmall10[nrow(alarmall10)-i+1,j]==0 & simulatedoutbreak10[nrow(simulatedoutbreak10)-i+1,j]==0)
}
}
specificity[10]=nu/sum(simulatedoutbreak10[2206:2548,]==0)
nu=0
for(j in 1:nsim){
for(i in (49*days):1){
nu=nu+(alarmall11[nrow(alarmall11)-i+1,j]==0 & simulatedoutbreak11[nrow(simulatedoutbreak11)-i+1,j]==0)
}
}
specificity[11]=nu/sum(simulatedoutbreak11[2206:2548,]==0)
nu=0
for(j in 1:nsim){
for(i in (49*days):1){
nu=nu+(alarmall12[nrow(alarmall12)-i+1,j]==0 & simulatedoutbreak12[nrow(simulatedoutbreak12)-i+1,j]==0)
}
}
specificity[12]=nu/sum(simulatedoutbreak12[2206:2548,]==0)
nu=0
for(j in 1:nsim){
for(i in (49*days):1){
nu=nu+(alarmall13[nrow(alarmall13)-i+1,j]==0 & simulatedoutbreak13[nrow(simulatedoutbreak13)-i+1,j]==0)
}
}
specificity[13]=nu/sum(simulatedoutbreak13[2206:2548,]==0)
nu=0
for(j in 1:nsim){
for(i in (49*days):1){
nu=nu+(alarmall14[nrow(alarmall14)-i+1,j]==0 & simulatedoutbreak14[nrow(simulatedoutbreak14)-i+1,j]==0)
}
}
specificity[14]=nu/sum(simulatedoutbreak14[2206:2548,]==0)
nu=0
for(j in 1:nsim){
for(i in (49*days):1){
nu=nu+(alarmall15[nrow(alarmall15)-i+1,j]==0 & simulatedoutbreak15[nrow(simulatedoutbreak15)-i+1,j]==0)
}
}
specificity[15]=nu/sum(simulatedoutbreak15[2206:2548,]==0)
nu=0
for(j in 1:nsim){
for(i in (49*days):1){
nu=nu+(alarmall16[nrow(alarmall16)-i+1,j]==0 & simulatedoutbreak16[nrow(simulatedoutbreak16)-i+1,j]==0)
}
}
specificity[16]=nu/sum(simulatedoutbreak16[2206:2548,]==0)
nu=0
for(j in 1:nsim){
for(i in (49*days):1){
nu=nu+(alarmall16[nrow(alarmall16)-i+1,j]==0 & simulatedzseasoutbreak16[nrow(simulatedzseasoutbreak16)-i+1,j]==0)
}
}
specificityseas[3]=nu/sum(simulatedzseasoutbreak16[2206:2548,]==0)
nu=0
for(j in 1:nsim){
for(i in (49*days):1){
nu=nu+(alarmall17[nrow(alarmall17)-i+1,j]==0 & simulatedoutbreak17[nrow(simulatedoutbreak17)-i+1,j]==0)
}
}
specificity[17]=nu/sum(simulatedoutbreak17[2206:2548,]==0)
#----------------------------------------------
# Timeliness
timeliness=rep(0,17)
timelinessseas=rep(0,3)
n=0
ss=0
for(j in 1:nsim){
for(i in (52*days*years):(52*days*(years-1)+3*days+1)){
test=(simulatedoutbreak1[i,j]>0)
if(test==T){
r2=i
break
}
}
for(i in (52*(years-1)*days+3*days+1):(52*years*days)){
test=(simulatedoutbreak1[i,j]>0)
if(test==T){
r1=i
break
}
}
for(i in (49*days):1){
test=(alarmall1[nrow(alarmall1)-i+1,j]==1 & simulatedoutbreak1[nrow(simulatedoutbreak1)-i+1,j]>0)
if(test==T){
ss=ss+(nrow(simulatedoutbreak1)-i+1-r1)/(r2-r1+1)
break
}
}
if(i==1 & test!=T){n=n+1}
}
timeliness[1]=(ss+n)/nsim
n=0
ss=0
for(j in 1:nsim){
for(i in (52*days*years):(52*days*(years-1)+3*days+1)){
test=(simulatedoutbreak2[i,j]>0)
if(test==T){
r2=i
break
}
}
for(i in (52*(years-1)*days+3*days+1):(52*years*days)){
test=(simulatedoutbreak2[i,j]>0)
if(test==T){
r1=i
break
}
}
for(i in (49*days):1){
test=(alarmall2[nrow(alarmall2)-i+1,j]==1 & simulatedoutbreak2[nrow(simulatedoutbreak2)-i+1,j]>0)
if(test==T){
ss=ss+(nrow(simulatedoutbreak2)-i+1-r1)/(r2-r1+1)
break
}
}
if(i==1 & test!=T){n=n+1}
}
timeliness[2]=(ss+n)/nsim
n=0
ss=0
for(j in 1:nsim){
for(i in (52*days*years):(52*days*(years-1)+3*days+1)){
test=(simulatedoutbreak3[i,j]>0)
if(test==T){
r2=i
break
}
}
for(i in (52*(years-1)*days+3*days+1):(52*years*days)){
test=(simulatedoutbreak3[i,j]>0)
if(test==T){
r1=i
break
}
}
for(i in (49*days):1){
test=(alarmall3[nrow(alarmall3)-i+1,j]==1 & simulatedoutbreak3[nrow(simulatedoutbreak3)-i+1,j]>0)
if(test==T){
ss=ss+(nrow(simulatedoutbreak3)-i+1-r1)/(r2-r1+1)
break
}
}
if(i==1 & test!=T){n=n+1}
}
timeliness[3]=(ss+n)/nsim
n=0
ss=0
for(j in 1:nsim){
for(i in (52*days*years):(52*days*(years-1)+3*days+1)){
test=(simulatedoutbreak4[i,j]>0)
if(test==T){
r2=i
break
}
}
for(i in (52*(years-1)*days+3*days+1):(52*years*days)){
test=(simulatedoutbreak4[i,j]>0)
if(test==T){
r1=i
break
}
}
for(i in (49*days):1){
test=(alarmall4[nrow(alarmall4)-i+1,j]==1 & simulatedoutbreak4[nrow(simulatedoutbreak4)-i+1,j]>0)
if(test==T){
ss=ss+(nrow(simulatedoutbreak4)-i+1-r1)/(r2-r1+1)
break
}
}
if(i==1 & test!=T){n=n+1}
}
timeliness[4]=(ss+n)/nsim
n=0
ss=0
for(j in 1:nsim){
for(i in (52*days*years):(52*days*(years-1)+3*days+1)){
test=(simulatedoutbreak5[i,j]>0)
if(test==T){
r2=i
break
}
}
for(i in (52*(years-1)*days+3*days+1):(52*years*days)){
test=(simulatedoutbreak5[i,j]>0)
if(test==T){
r1=i
break
}
}
for(i in (49*days):1){
test=(alarmall5[nrow(alarmall5)-i+1,j]==1 & simulatedoutbreak5[nrow(simulatedoutbreak5)-i+1,j]>0)
if(test==T){
ss=ss+(nrow(simulatedoutbreak5)-i+1-r1)/(r2-r1+1)
break
}
}
if(i==1 & test!=T){n=n+1}
}
timeliness[5]=(ss+n)/nsim
n=0
ss=0
for(j in 1:nsim){
for(i in (52*days*years):(52*days*(years-1)+3*days+1)){
test=(simulatedoutbreak6[i,j]>0)
if(test==T){
r2=i
break
}
}
for(i in (52*(years-1)*days+3*days+1):(52*years*days)){
test=(simulatedoutbreak6[i,j]>0)
if(test==T){
r1=i
break
}
}
for(i in (49*days):1){
test=(alarmall6[nrow(alarmall6)-i+1,j]==1 & simulatedoutbreak6[nrow(simulatedoutbreak6)-i+1,j]>0)
if(test==T){
ss=ss+(nrow(simulatedoutbreak6)-i+1-r1)/(r2-r1+1)
break
}
}
if(i==1 & test!=T){n=n+1}
}
timeliness[6]=(ss+n)/nsim
n=0
ss=0
for(j in 1:nsim){
for(i in (52*days*years):(52*days*(years-1)+3*days+1)){
test=(simulatedzseasoutbreak6[i,j]>0)
if(test==T){
r2=i
break
}
}
for(i in (52*(years-1)*days+3*days+1):(52*years*days)){
test=(simulatedzseasoutbreak6[i,j]>0)
if(test==T){
r1=i
break
}
}
for(i in (49*days):1){
test=(alarmall6[nrow(alarmall6)-i+1,j]==1 & simulatedzseasoutbreak6[nrow(simulatedzseasoutbreak6)-i+1,j]>0)
if(test==T){
ss=ss+(nrow(simulatedzseasoutbreak6)-i+1-r1)/(r2-r1+1)
break
}
}
if(i==1 & test!=T){n=n+1}
}
timelinessseas[1]=(ss+n)/nsim
n=0
ss=0
for(j in 1:nsim){
for(i in (52*days*years):(52*days*(years-1)+3*days+1)){
test=(simulatedoutbreak7[i,j]>0)
if(test==T){
r2=i
break
}
}
for(i in (52*(years-1)*days+3*days+1):(52*years*days)){
test=(simulatedoutbreak7[i,j]>0)
if(test==T){
r1=i
break
}
}
for(i in (49*days):1){
test=(alarmall7[nrow(alarmall7)-i+1,j]==1 & simulatedoutbreak7[nrow(simulatedoutbreak7)-i+1,j]>0)
if(test==T){
ss=ss+(nrow(simulatedoutbreak7)-i+1-r1)/(r2-r1+1)
break
}
}
if(i==1 & test!=T){n=n+1}
}
timeliness[7]=(ss+n)/nsim
n=0
ss=0
for(j in 1:nsim){
for(i in (52*days*years):(52*days*(years-1)+3*days+1)){
test=(simulatedzseasoutbreak7[i,j]>0)
if(test==T){
r2=i
break
}
}
for(i in (52*(years-1)*days+3*days+1):(52*years*days)){
test=(simulatedzseasoutbreak7[i,j]>0)
if(test==T){
r1=i
break
}
}
for(i in (49*days):1){
test=(alarmall7[nrow(alarmall7)-i+1,j]==1 & simulatedzseasoutbreak7[nrow(simulatedzseasoutbreak7)-i+1,j]>0)
if(test==T){
ss=ss+(nrow(simulatedzseasoutbreak7)-i+1-r1)/(r2-r1+1)
break
}
}
if(i==1 & test!=T){n=n+1}
}
timelinessseas[2]=(ss+n)/nsim
n=0
ss=0
for(j in 1:nsim){
for(i in (52*days*years):(52*days*(years-1)+3*days+1)){
test=(simulatedoutbreak8[i,j]>0)
if(test==T){
r2=i
break
}
}
for(i in (52*(years-1)*days+3*days+1):(52*years*days)){
test=(simulatedoutbreak8[i,j]>0)
if(test==T){
r1=i
break
}
}
for(i in (49*days):1){
test=(alarmall8[nrow(alarmall8)-i+1,j]==1 & simulatedoutbreak8[nrow(simulatedoutbreak8)-i+1,j]>0)
if(test==T){
ss=ss+(nrow(simulatedoutbreak8)-i+1-r1)/(r2-r1+1)
break
}
}
if(i==1 & test!=T){n=n+1}
}
timeliness[8]=(ss+n)/nsim
n=0
ss=0
for(j in 1:nsim){
for(i in (52*days*years):(52*days*(years-1)+3*days+1)){
test=(simulatedoutbreak9[i,j]>0)
if(test==T){
r2=i
break
}
}
for(i in (52*(years-1)*days+3*days+1):(52*years*days)){
test=(simulatedoutbreak9[i,j]>0)
if(test==T){
r1=i
break
}
}
for(i in (49*days):1){
test=(alarmall9[nrow(alarmall9)-i+1,j]==1 & simulatedoutbreak9[nrow(simulatedoutbreak9)-i+1,j]>0)
if(test==T){
ss=ss+(nrow(simulatedoutbreak9)-i+1-r1)/(r2-r1+1)
break
}
}
if(i==1 & test!=T){n=n+1}
}
timeliness[9]=(ss+n)/nsim
n=0
ss=0
for(j in 1:nsim){
for(i in (52*days*years):(52*days*(years-1)+3*days+1)){
test=(simulatedoutbreak10[i,j]>0)
if(test==T){
r2=i
break
}
}
for(i in (52*(years-1)*days+3*days+1):(52*years*days)){
test=(simulatedoutbreak10[i,j]>0)
if(test==T){
r1=i
break
}
}
for(i in (49*days):1){
test=(alarmall10[nrow(alarmall10)-i+1,j]==1 & simulatedoutbreak10[nrow(simulatedoutbreak10)-i+1,j]>0)
if(test==T){
ss=ss+(nrow(simulatedoutbreak10)-i+1-r1)/(r2-r1+1)
break
}
}
if(i==1 & test!=T){n=n+1}
}
timeliness[10]=(ss+n)/nsim
n=0
ss=0
for(j in 1:nsim){
for(i in (52*days*years):(52*days*(years-1)+3*days+1)){
test=(simulatedoutbreak11[i,j]>0)
if(test==T){
r2=i
break
}
}
for(i in (52*(years-1)*days+3*days+1):(52*years*days)){
test=(simulatedoutbreak11[i,j]>0)
if(test==T){
r1=i
break
}
}
for(i in (49*days):1){
test=(alarmall11[nrow(alarmall11)-i+1,j]==1 & simulatedoutbreak11[nrow(simulatedoutbreak11)-i+1,j]>0)
if(test==T){
ss=ss+(nrow(simulatedoutbreak11)-i+1-r1)/(r2-r1+1)
break
}
}
if(i==1 & test!=T){n=n+1}
}
timeliness[11]=(ss+n)/nsim
n=0
ss=0
for(j in 1:nsim){
for(i in (52*days*years):(52*days*(years-1)+3*days+1)){
test=(simulatedoutbreak12[i,j]>0)
if(test==T){
r2=i
break
}
}
for(i in (52*(years-1)*days+3*days+1):(52*years*days)){
test=(simulatedoutbreak12[i,j]>0)
if(test==T){
r1=i
break
}
}
for(i in (49*days):1){
test=(alarmall12[nrow(alarmall12)-i+1,j]==1 & simulatedoutbreak12[nrow(simulatedoutbreak12)-i+1,j]>0)
if(test==T){
ss=ss+(nrow(simulatedoutbreak12)-i+1-r1)/(r2-r1+1)
break
}
}
if(i==1 & test!=T){n=n+1}
}
timeliness[12]=(ss+n)/nsim
n=0
ss=0
for(j in 1:nsim){
for(i in (52*days*years):(52*days*(years-1)+3*days+1)){
test=(simulatedoutbreak13[i,j]>0)
if(test==T){
r2=i
break
}
}
for(i in (52*(years-1)*days+3*days+1):(52*years*days)){
test=(simulatedoutbreak13[i,j]>0)
if(test==T){
r1=i
break
}
}
for(i in (49*days):1){
test=(alarmall13[nrow(alarmall13)-i+1,j]==1 & simulatedoutbreak13[nrow(simulatedoutbreak1)-i+1,j]>0)
if(test==T){
ss=ss+(nrow(simulatedoutbreak13)-i+1-r1)/(r2-r1+1)
break
}
}
if(i==1 & test!=T){n=n+1}
}
timeliness[13]=(ss+n)/nsim
n=0
ss=0
for(j in 1:nsim){
for(i in (52*days*years):(52*days*(years-1)+3*days+1)){
test=(simulatedoutbreak14[i,j]>0)
if(test==T){
r2=i
break
}
}
for(i in (52*(years-1)*days+3*days+1):(52*years*days)){
test=(simulatedoutbreak14[i,j]>0)
if(test==T){
r1=i
break
}
}
for(i in (49*days):1){
test=(alarmall14[nrow(alarmall14)-i+1,j]==1 & simulatedoutbreak14[nrow(simulatedoutbreak14)-i+1,j]>0)
if(test==T){
ss=ss+(nrow(simulatedoutbreak14)-i+1-r1)/(r2-r1+1)
break
}
}
if(i==1 & test!=T){n=n+1}
}
timeliness[14]=(ss+n)/nsim
n=0
ss=0
for(j in 1:nsim){
for(i in (52*days*years):(52*days*(years-1)+3*days+1)){
test=(simulatedoutbreak15[i,j]>0)
if(test==T){
r2=i
break
}
}
for(i in (52*(years-1)*days+3*days+1):(52*years*days)){
test=(simulatedoutbreak15[i,j]>0)
if(test==T){
r1=i
break
}
}
for(i in (49*days):1){
test=(alarmall15[nrow(alarmall15)-i+1,j]==1 & simulatedoutbreak15[nrow(simulatedoutbreak15)-i+1,j]>0)
if(test==T){
ss=ss+(nrow(simulatedoutbreak15)-i+1-r1)/(r2-r1+1)
break
}
}
if(i==1 & test!=T){n=n+1}
}
timeliness[15]=(ss+n)/nsim
n=0
ss=0
for(j in 1:nsim){
for(i in (52*days*years):(52*days*(years-1)+3*days+1)){
test=(simulatedoutbreak16[i,j]>0)
if(test==T){
r2=i
break
}
}
for(i in (52*(years-1)*days+3*days+1):(52*years*days)){
test=(simulatedoutbreak16[i,j]>0)
if(test==T){
r1=i
break
}
}
for(i in (49*days):1){
test=(alarmall16[nrow(alarmall16)-i+1,j]==1 & simulatedoutbreak16[nrow(simulatedoutbreak16)-i+1,j]>0)
if(test==T){
ss=ss+(nrow(simulatedoutbreak16)-i+1-r1)/(r2-r1+1)
break
}
}
if(i==1 & test!=T){n=n+1}
}
timeliness[16]=(ss+n)/nsim
n=0
ss=0
for(j in 1:nsim){
for(i in (52*days*years):(52*days*(years-1)+3*days+1)){
test=(simulatedzseasoutbreak16[i,j]>0)
if(test==T){
r2=i
break
}
}
for(i in (52*(years-1)*days+3*days+1):(52*years*days)){
test=(simulatedzseasoutbreak16[i,j]>0)
if(test==T){
r1=i
break
}
}
for(i in (49*days):1){
test=(alarmall16[nrow(alarmall16)-i+1,j]==1 & simulatedzseasoutbreak16[nrow(simulatedzseasoutbreak16)-i+1,j]>0)
if(test==T){
ss=ss+(nrow(simulatedzseasoutbreak16)-i+1-r1)/(r2-r1+1)
break
}
}
if(i==1 & test!=T){n=n+1}
}
timelinessseas[3]=(ss+n)/nsim
n=0
ss=0
for(j in 1:nsim){
for(i in (52*days*years):(52*days*(years-1)+3*days+1)){
test=(simulatedoutbreak17[i,j]>0)
if(test==T){
r2=i
break
}
}
for(i in (52*(years-1)*days+3*days+1):(52*years*days)){
test=(simulatedoutbreak17[i,j]>0)
if(test==T){
r1=i
break
}
}
for(i in (49*days):1){
test=(alarmall17[nrow(alarmall17)-i+1,j]==1 & simulatedoutbreak17[nrow(simulatedoutbreak17)-i+1,j]>0)
if(test==T){
ss=ss+(nrow(simulatedoutbreak17)-i+1-r1)/(r2-r1+1)
break
}
}
if(i==1 & test!=T){n=n+1}
}
timeliness[17]=(ss+n)/nsim
#==================================
# Summary=data.frame(fpr,pod,sensitivity,specificity,timeliness)
# row.names(Summary)=c("sigid1","sigid2","sigid3","sigid4","sigid5","sigid6","sigid7","sigid8","sigid9","sigid10","sigid11","sigid12","sigid13","sigid14","sigid15","sigid16","sigid17")
#
# Summaryseas=data.frame(fprseas,podseas,sensitivityseas,specificityseas,timelinessseas)
# row.names(Summaryseas)=c("sigid6","sigid7","sigid16")
#
#
# fix(Summary)
# fix(Summaryseas)
#
summary1=data.frame(fpr, pod, sensitivity, specificity, timeliness)
row.names(summary1)=c("sigid1", "sigid2", "sigid3", "sigid4", "sigid5",
"sigid6", "sigid7", "sigid8", "sigid9", "sigid10",
"sigid11", "sigid12", "sigid13", "sigid14", "sigid15",
"sigid16","sigid17")
summary2=data.frame(fprseas, podseas, sensitivityseas,
specificityseas, timelinessseas)
row.names(summary2)=c("sigid6", "sigid7", "sigid16")
if(!dir.exists(file.path(myDir, "output"))){
dir.create(file.path(myDir, "output"))
}
write.csv(summary1, file.path(myDir, "output", "summaryC3-18.csv"),
row.names=FALSE)
write.csv(summary2, file.path(myDir, "output", "summarySeasC3-18.csv"),
row.names=FALSE)
|
/EARS/EARSC310x.R
|
no_license
|
FelipeJColon/AlgorithmComparison
|
R
| false | false | 86,299 |
r
|
## ############################################################################
##
## DISCLAIMER:
## This script has been developed for research purposes only.
## The script is provided without any warranty of any kind, either express or
## implied. The entire risk arising out of the use or performance of the sample
## script and documentation remains with you.
## In no event shall its author, or anyone else involved in the
## creation, production, or delivery of the script be liable for any damages
## whatsoever (including, without limitation, damages for loss of business
## profits, business interruption, loss of business information, or other
## pecuniary loss) arising out of the use of or inability to use the sample
## scripts or documentation, even if the author has been advised of the
## possibility of such damages.
##
## ############################################################################
##
## DESCRIPTION
## Simulates outbreaks and analyses them using EARS-C3
##
##
## Written by: Angela Noufaily and Felipe J Colón-González
## For any problems with this code, please contact f.colon@uea.ac.uk
##
## ############################################################################
rm(list=ls(all=TRUE))
# FUNCTIONS THAT PRODUCE THE DATA
# DEFINING FUNCTION h
require(data.table)
require(dplyr)
require(tidyr)
require(surveillance)
require(lubridate)
require(zoo)
#==============
# 5-day systems
#==============
h1=function(N,k,k2,alpha,beta,gama1,gama2,gama3,gama4,shift,shift2){
t=1:N
if(k==0 & k2==0){h1=alpha+beta*t}
else{
if(k==0)
{
l=1:k2
h1=rep(0,N)
for(i in 1:N){
h1[i]=alpha+beta*(t[i]+shift)+sum(gama3*cos((2*pi*l*(t[i]+shift))/5)+gama4*sin((2*pi*l*(t[i]+shift))/5))
}
}
else{
j=1:k
l=1:k2
h1=rep(0,N)
for(i in 1:N){
h1[i]=alpha+beta*(t[i]+shift)+sum(gama1*cos((2*pi*j*(t[i]+shift))/(52*5))+gama2*sin((2*pi*j*(t[i]+shift2))/(52*5)))+sum(gama3*cos((2*pi*l*(t[i]+shift))/5)+gama4*sin((2*pi*l*(t[i]+shift))/5))
}
}
}
h1
}
negbinNoise1=function(N,k,k2,alpha,beta,gama1,gama2,gama3,gama4,phi,shift,shift2){
mu <- exp(h1(N,k,k2,alpha,beta,gama1,gama2,gama3,gama4,shift,shift2))
if(phi==1){yi <- rpois(N,mu)}
else{
prob <- 1/phi
size <- mu/(phi-1)
yi <- rnbinom(N,size=size,prob=prob)
}
yi
}
outbreak5=function(currentday,weeklength,wtime,yi,interval,k,k2,alpha,beta,gama1,gama2,gama3,gama4,shift,shift2,phi,numoutbk,peakoutbk,meanlog,sdlog){
# theta, beta, gama1 and gama2 are the parameters of the equation for mu in Section 3.1
N=length(yi)
t=1:N
mu <- exp(h1(N,k,k2,alpha,beta,gama1,gama2,gama3,gama4,shift,shift2))
s=sqrt(mu*phi)
#wtime = (currentday-49*5+1):currentday # current outbreaks
# GENERATING OUTBREAKS
# STARTING TIMES OF OUTBREAKS
startoutbk <- sample(wtime, numoutbk, replace = FALSE)
# OUTBREAK SIZE OF CASES
sizeoutbk=rep(0,numoutbk)
for(i in 1:numoutbk){
set.seed(i)
soutbk=1
sou=1
while(soutbk<2){
set.seed(sou)
soutbk=rpois(1,s[startoutbk[i]]*peakoutbk)
sou=sou+1
}
sizeoutbk[i]=soutbk
}
# DISTRIBUTE THESE CASES OVER TIME USING LOGNORMAL
outbreak=rep(0,2*N)
for( j in 1:numoutbk){
set.seed(j)
outbk <-rlnorm(sizeoutbk[j], meanlog = meanlog, sdlog = sdlog)
#outbk <-rnorm(sizeoutbk[j], mean = meanlog2, sd = sdlog)
#h<- hist(outbk,breaks=seq(0,ceiling(max(outbk)),1),plot=FALSE)
h<- hist(outbk,breaks=seq(0,ceiling(max(outbk)),interval),plot=FALSE)
cases <- h$counts
weight=rep(0,length(cases))
duration<-startoutbk:(startoutbk+length(cases)-1)
dayofweek<-duration%%5 # 0 is friday; 1 is monday; 2 is tuesday etc.
for(i in 1:length(cases)){
if(dayofweek[i]==0){weight[i]=1.1}
if(dayofweek[i]==1){weight[i]=1.5}
if(dayofweek[i]==2){weight[i]=1.1}
if(dayofweek[i]==3){weight[i]=1}
if(dayofweek[i]==4){weight[i]=1}
}
cases2 <- cases*weight
for (l in 1:(length(cases2))){
outbreak[startoutbk[j]+(l-1)]= cases2[l]+outbreak[startoutbk[j]+(l-1)]
}# l loop
}# j loop
#for(v in 1:(currentday-49*5)){if(outbreak[v]>0){outbreak[v]=0}}
for(v in currentday:(currentday+100)){if(outbreak[v]>0){outbreak[v]=0}}
outbreak=outbreak[1:N]
# ADD NOISE AND OUTBREAKS
yitot=yi+outbreak
result=list(yitot=yitot,outbreak=outbreak,startoutbk=startoutbk,sizeoutbk=sizeoutbk,sd=s,mean=mu)
#return(result)
}
#==============
# 7-day systems
#==============
h2=function(N,k,k2,alpha,beta,gama1,gama2,gama3,gama4,shift){
t=1:N
if(k==0 & k2==0){h2=alpha+beta*t}
else{
if(k==0)
{
l=1:k2
h2=rep(0,N)
for(i in 1:N){
h2[i]=alpha+beta*(t[i]+shift)+sum(gama3*cos((2*pi*l*(t[i]+shift))/7)+gama4*sin((2*pi*l*(t[i]+shift))/7))
}
}
else{
j=1:k
l=1:k2
h2=rep(0,N)
for(i in 1:N){
h2[i]=alpha+beta*(t[i]+shift)+sum(gama1*cos((2*pi*j*(t[i]+shift))/(52*7))+gama2*sin((2*pi*j*(t[i]+shift))/(52*7)))+sum(gama3*cos((2*pi*l*(t[i]+shift))/7)+gama4*sin((2*pi*l*(t[i]+shift))/7))
}
}
}
h2
}
negbinNoise2=function(N,k,k2,alpha,beta,gama1,gama2,gama3,gama4,phi,shift){
mu <- exp(h2(N,k,k2,alpha,beta,gama1,gama2,gama3,gama4,shift))
if(phi==1){yi <- rpois(N,mu)}
else{
prob <- 1/phi
size <- mu/(phi-1)
yi <- rnbinom(N,size=size,prob=prob)
}
yi
}
outbreak7=function(currentday,weeklength,wtime,yi,interval,k,k2,alpha,beta,gama1,gama2,gama3,gama4,shift,phi,numoutbk,peakoutbk,meanlog,sdlog){
# theta, beta, gama1 and gama2 are the parameters of the equation for mu in Section 3.1
N=length(yi)
t=1:N
mu <- exp(h2(N,k,k2,alpha,beta,gama1,gama2,gama3,gama4,shift))
s=sqrt(mu*phi)
#wtime = (currentday-49*7+1):currentday # current outbreaks
# wtime = 350*1:7 # current outbreaks
# GENERATING OUTBREAKS
# STARTING TIMES OF OUTBREAKS
startoutbk <- sample(wtime, numoutbk, replace = FALSE)
# OUTBREAK SIZE OF CASES
sizeoutbk=rep(0,numoutbk)
for(i in 1:numoutbk){
set.seed(i)
soutbk=1
sou=1
while(soutbk<2){
set.seed(sou)
soutbk=rpois(1,s[startoutbk[i]]*peakoutbk)
sou=sou+1
}
sizeoutbk[i]=soutbk
}
# DISTRIBUTE THESE CASES OVER TIME USING LOGNORMAL
outbreak=rep(0,2*N)
for( j in 1:numoutbk){
set.seed(j)
outbk <-rlnorm(sizeoutbk[j], meanlog = meanlog, sdlog = sdlog)
#outbk <-rnorm(sizeoutbk[j], mean = meanlog2, sd = sdlog)
#h<- hist(outbk,breaks=seq(0,ceiling(max(outbk)),1),plot=FALSE)
h<- hist(outbk,breaks=seq(0,ceiling(max(outbk)),interval),plot=FALSE)
cases <- h$counts
weight=rep(0,length(cases))
duration<-startoutbk:(startoutbk+length(cases)-1)
dayofweek<-duration%%7 # 0 is sunday; 1 is monday; 2 is tuesday etc.
for(i in 1:length(cases)){
if(dayofweek[i]==0){weight[i]=2}
if(dayofweek[i]==1){weight[i]=1}
if(dayofweek[i]==2){weight[i]=1}
if(dayofweek[i]==3){weight[i]=1}
if(dayofweek[i]==4){weight[i]=1}
if(dayofweek[i]==5){weight[i]=1}
if(dayofweek[i]==6){weight[i]=2}
}
cases2 <- cases*weight
for (l in 1:(length(cases2))){
outbreak[startoutbk[j]+(l-1)]= cases2[l]+outbreak[startoutbk[j]+(l-1)]
}# l loop
}# j loop
#for(v in (currentday-49*7):currentday){if(outbreak[v]>0){outbreak[v]=0}}
for(v in currentday:(currentday+100)){if(outbreak[v]>0){outbreak[v]=0}}
outbreak=outbreak[1:N]
# ADD NOISE AND OUTBREAKS
yitot=yi+outbreak
result=list(yitot=yitot,outbreak=outbreak,startoutbk=startoutbk,sizeoutbk=sizeoutbk,sd=s,mean=mu)
#return(result)
}
#==========================
# Specify the bank holidays
#==========================
myDir <- "/local/zck07apu/Documents/GitLab/rammie_comparison/scripts/C3/10x"
years=7
bankholidays=read.csv(file.path(myDir, "Bankholidays.csv"))
#fix(bankholidays)
bankhols7=bankholidays$bankhol
bankhols7=as.numeric(bankhols7)
length(bankhols7)
#fix(bankhols7)
bankhols5=bankhols7[-seq(6,length(bankhols7),7)]
bankhols5=bankhols5[-seq(6,length(bankhols5),6)]
bankhols5=as.numeric(bankhols5)
length(bankhols5)
#fix(bankhols5)
#=======================
# Define the data frames
#=======================
nsim=100
simulateddata1=data.frame(array(rep(0,nsim*52*7*years),dim=c(52*7*years,nsim)))
simulateddata2=data.frame(array(rep(0,nsim*52*7*years),dim=c(52*7*years,nsim)))
simulateddata3=data.frame(array(rep(0,nsim*52*7*years),dim=c(52*7*years,nsim)))
simulateddata4=data.frame(array(rep(0,nsim*52*7*years),dim=c(52*7*years,nsim)))
simulateddata5=data.frame(array(rep(0,nsim*52*7*years),dim=c(52*7*years,nsim)))
simulateddata6=data.frame(array(rep(0,nsim*52*7*years),dim=c(52*7*years,nsim)))
simulateddata7=data.frame(array(rep(0,nsim*52*7*years),dim=c(52*7*years,nsim)))
simulateddata8=data.frame(array(rep(0,nsim*52*7*years),dim=c(52*7*years,nsim)))
simulateddata9=data.frame(array(rep(0,nsim*52*7*years),dim=c(52*7*years,nsim)))
simulateddata10=data.frame(array(rep(0,nsim*52*7*years),dim=c(52*7*years,nsim)))
simulateddata11=data.frame(array(rep(0,nsim*52*7*years),dim=c(52*7*years,nsim)))
simulateddata12=data.frame(array(rep(0,nsim*52*7*years),dim=c(52*7*years,nsim)))
simulateddata13=data.frame(array(rep(0,nsim*52*7*years),dim=c(52*7*years,nsim)))
simulateddata14=data.frame(array(rep(0,nsim*52*7*years),dim=c(52*7*years,nsim)))
simulateddata15=data.frame(array(rep(0,nsim*52*7*years),dim=c(52*7*years,nsim)))
simulateddata16=data.frame(array(rep(0,nsim*52*7*years),dim=c(52*7*years,nsim)))
simulateddata17=data.frame(array(rep(0,nsim*52*7*years),dim=c(52*7*years,nsim)))
simulatedtotals1=data.frame(array(rep(0,nsim*52*7*years),dim=c(52*7*years,nsim)))
simulatedtotals2=data.frame(array(rep(0,nsim*52*7*years),dim=c(52*7*years,nsim)))
simulatedtotals3=data.frame(array(rep(0,nsim*52*7*years),dim=c(52*7*years,nsim)))
simulatedtotals4=data.frame(array(rep(0,nsim*52*7*years),dim=c(52*7*years,nsim)))
simulatedtotals5=data.frame(array(rep(0,nsim*52*7*years),dim=c(52*7*years,nsim)))
simulatedtotals6=data.frame(array(rep(0,nsim*52*7*years),dim=c(52*7*years,nsim)))
simulatedtotals7=data.frame(array(rep(0,nsim*52*7*years),dim=c(52*7*years,nsim)))
simulatedtotals8=data.frame(array(rep(0,nsim*52*7*years),dim=c(52*7*years,nsim)))
simulatedtotals9=data.frame(array(rep(0,nsim*52*7*years),dim=c(52*7*years,nsim)))
simulatedtotals10=data.frame(array(rep(0,nsim*52*7*years),dim=c(52*7*years,nsim)))
simulatedtotals11=data.frame(array(rep(0,nsim*52*7*years),dim=c(52*7*years,nsim)))
simulatedtotals12=data.frame(array(rep(0,nsim*52*7*years),dim=c(52*7*years,nsim)))
simulatedtotals13=data.frame(array(rep(0,nsim*52*7*years),dim=c(52*7*years,nsim)))
simulatedtotals14=data.frame(array(rep(0,nsim*52*7*years),dim=c(52*7*years,nsim)))
simulatedtotals15=data.frame(array(rep(0,nsim*52*7*years),dim=c(52*7*years,nsim)))
simulatedtotals16=data.frame(array(rep(0,nsim*52*7*years),dim=c(52*7*years,nsim)))
simulatedtotals17=data.frame(array(rep(0,nsim*52*7*years),dim=c(52*7*years,nsim)))
simulatedoutbreak1=data.frame(array(rep(0,nsim*52*7*years),dim=c(52*7*years,nsim)))
simulatedoutbreak2=data.frame(array(rep(0,nsim*52*7*years),dim=c(52*7*years,nsim)))
simulatedoutbreak3=data.frame(array(rep(0,nsim*52*7*years),dim=c(52*7*years,nsim)))
simulatedoutbreak4=data.frame(array(rep(0,nsim*52*7*years),dim=c(52*7*years,nsim)))
simulatedoutbreak5=data.frame(array(rep(0,nsim*52*7*years),dim=c(52*7*years,nsim)))
simulatedoutbreak6=data.frame(array(rep(0,nsim*52*7*years),dim=c(52*7*years,nsim)))
simulatedoutbreak7=data.frame(array(rep(0,nsim*52*7*years),dim=c(52*7*years,nsim)))
simulatedoutbreak8=data.frame(array(rep(0,nsim*52*7*years),dim=c(52*7*years,nsim)))
simulatedoutbreak9=data.frame(array(rep(0,nsim*52*7*years),dim=c(52*7*years,nsim)))
simulatedoutbreak10=data.frame(array(rep(0,nsim*52*7*years),dim=c(52*7*years,nsim)))
simulatedoutbreak11=data.frame(array(rep(0,nsim*52*7*years),dim=c(52*7*years,nsim)))
simulatedoutbreak12=data.frame(array(rep(0,nsim*52*7*years),dim=c(52*7*years,nsim)))
simulatedoutbreak13=data.frame(array(rep(0,nsim*52*7*years),dim=c(52*7*years,nsim)))
simulatedoutbreak14=data.frame(array(rep(0,nsim*52*7*years),dim=c(52*7*years,nsim)))
simulatedoutbreak15=data.frame(array(rep(0,nsim*52*7*years),dim=c(52*7*years,nsim)))
simulatedoutbreak16=data.frame(array(rep(0,nsim*52*7*years),dim=c(52*7*years,nsim)))
simulatedoutbreak17=data.frame(array(rep(0,nsim*52*7*years),dim=c(52*7*years,nsim)))
simulatedzseasoutbreak6=data.frame(array(rep(0,nsim*52*7*years),dim=c(52*7*years,nsim)))
simulatedzseasoutbreak7=data.frame(array(rep(0,nsim*52*7*years),dim=c(52*7*years,nsim)))
simulatedzseasoutbreak16=data.frame(array(rep(0,nsim*52*7*years),dim=c(52*7*years,nsim)))
#################################
#SIMULATE SYNDROMES AND OUTBREAKS
#################################
#=====================
# 5-day week syndromes
#=====================
days5=5
N=52*days5*years
#sigid6
for(i in 1:nsim){
set.seed(i)
yt=negbinNoise1(N=N,k=1,k2=1,alpha=6,beta=0,gama1=0.3,gama2=2,gama3=0.3,gama4=0.5,phi=1.5,shift=-50,shift2=-50)/10
#mu=exp(h1(N=N,k=1,k2=1,alpha=6,beta=0,gama1=0.3,gama2=2,gama3=0.3,gama4=0.5,shift=-50,shift2=-50))
out1=rep(0,N)
for(j in 1:years){
set.seed(j+years*i)
out=outbreak5(currentday=days5*52*years,weeklength=52*days5*years,wtime=((1+(j-1)*days5*52):(20+(j-1)*days5*52)),yi=yt,interval=0.02,k=1,
k2=1,alpha=6,beta=0,gama1=0.3,gama2=2,gama3=0.3,gama4=0.5,phi=1.5,shift=-50,shift2=-50,numoutbk=1,peakoutbk=3*days5*80,meanlog=0,sdlog=0.5)
out1=out1+out$outbreak
}
set.seed(i)
out2=outbreak5(currentday=days5*52*years,weeklength=52*days5*years,wtime=(length(yt)-49*days5+1):length(yt),yi=yt,interval=0.25,k=1,k2=1,alpha=6,beta=0,gama1=0.3,
gama2=2,gama3=0.3,gama4=0.5,phi=1.5,shift=-50,shift2=-50,numoutbk=1,peakoutbk=10*days5,meanlog=0,sdlog=0.5)
zoutbreak=out2$outbreak
zseasoutbreak=out2$outbreak +out1
zt=yt +out1
zitot=yt + out2$outbreak +out1
#zitot[(bankhols5==1)]=0
#zitot[(bankhols5==1)+1]=1.5*zitot[i+1]
for(b in 1:length(zitot)){
if(bankhols5[b]==1){
zitot[b]=0
zitot[b+1]=1.5*zitot[b+1]
}
}
zeros=rep(0,2)
weekend=seq(days5,days5*years*52,days5)
#weekend=seq(0,days5*years*52-1,days5)
for(s in 1:length(weekend)){
yt=append(yt,zeros,after=2*(s-1)+weekend[s])
zt=append(zt,zeros,after=2*(s-1)+weekend[s])
zitot=append(zitot,zeros,after=2*(s-1)+weekend[s])
zoutbreak=append(zoutbreak,zeros,after=2*(s-1)+weekend[s])
zseasoutbreak=append(zseasoutbreak,zeros,after=2*(s-1)+weekend[s])
}
simulateddata6[,i]=round(zt)
simulatedtotals6[,i]=round(zitot)
simulatedoutbreak6[,i]=round(zoutbreak)
simulatedzseasoutbreak6[,i]=round(zseasoutbreak)
}
#----------------------------------------------------
# Plot the datasets and outbreaks using the following
#----------------------------------------------------
#plot(1:N,yt,typ='l')
#plot(1:(52*years*7),zt,typ='l',xlim=c(1,364))
#plot(1:(52*years*7),zitot,typ='l',xlim=c(1,364))
#plot(1:(52*years*7),zitot,typ='l',xlim=c(365,728))
#plot(1:(52*years*7),zitot,typ='l',xlim=c(729,1092))
#plot(1:(52*years*7),zitot,typ='l',xlim=c(1093,1456))
#plot(1:(52*years*7),zitot,typ='l',xlim=c(1457,1820))
#plot(1:(52*years*7),zitot,typ='l',xlim=c(1821,2184))
#plot(1:(52*years*7),zitot,typ='l',xlim=c(2185,2548))
#lines(1:(52*years*7),zoutbreak,col='green')
plot(1:(52*years*7),simulatedtotals6[,4],typ='l',xlim=c(1,7*364))
lines(1:(52*years*7),simulatedzseasoutbreak6[,4],col='green')
lines(1:(52*years*7),simulatedoutbreak6[,4],col='red')
plot(1:(52*years*7),zitot,typ='l',xlim=c(1,7*364))
lines(1:(52*years*7),yt,col='blue')
lines(1:(52*years*7),zseasoutbreak,col='green')
lines(1:(52*years*7),zoutbreak,col='red')
plot(1:(52*years*7),simulatedzseasoutbreak6[,4],col='green',typ='l')
#sigid7
for(i in 1:nsim){
set.seed(i)
yt=negbinNoise1(N=N,k=1,k2=1,alpha=1,beta=0,gama1=0.1,gama2=2,gama3=0.05,gama4=0.05,phi=1,shift=-50,shift2=-50)
#mu=exp(h1(N=N,k=1,k2=1,alpha=1.5,beta=0,gama1=0.1,gama2=2,gama3=0.1,gama4=0.1,shift=-50,shift2=-50))
out1=rep(0,N)
for(j in 1:years){
set.seed(j+years*i)
out=outbreak5(currentday=days5*52*years,weeklength=52*days5*years,wtime=((1+(j-1)*days5*52):(20+(j-1)*days5*52)),yi=yt,interval=0.02,k=1,k2=1,alpha=1,beta=0,gama1=0.1,
gama2=2,gama3=0.05,gama4=0.05,phi=1,shift=-50,shift2=-50,numoutbk=1,peakoutbk=3*days5*50,meanlog=0,sdlog=0.5)
out1=out1+out$outbreak
}
set.seed(i)
out2=outbreak5(currentday=days5*52*years,weeklength=52*days5*years,wtime=(length(yt)-49*days5+1):length(yt),yi=yt,interval=0.25,k=1,k2=1,alpha=1,beta=0,gama1=0.1,
gama2=2,gama3=0.05,gama4=0.05,phi=1,shift=-50,shift2=-50,numoutbk=1,peakoutbk=10*days5,meanlog=0,sdlog=0.5)
zoutbreak=out2$outbreak
zseasoutbreak=out2$outbreak+out1
zt=yt +out1
zitot=yt + out2$outbreak +out1
for(b in 1:length(zitot)){
if(bankhols5[b]==1){
zitot[b]=0
zitot[b+1]=1.5*zitot[b+1]
}
}
zeros=rep(0,2)
weekend=seq(days5,days5*years*52,days5)
#weekend=seq(0,days5*years*52-1,days5)
for(s in 1:length(weekend)){
yt=append(yt,zeros,after=2*(s-1)+weekend[s])
zt=append(zt,zeros,after=2*(s-1)+weekend[s])
zitot=append(zitot,zeros,after=2*(s-1)+weekend[s])
zoutbreak=append(zoutbreak,zeros,after=2*(s-1)+weekend[s])
zseasoutbreak=append(zseasoutbreak,zeros,after=2*(s-1)+weekend[s])
}
simulateddata7[,i]=round(zt)
simulatedtotals7[,i]=round(zitot)
simulatedoutbreak7[,i]=round(zoutbreak)
simulatedzseasoutbreak7[,i]=round(zseasoutbreak)
}
plot(1:(52*years*7),simulatedtotals7[,7],typ='l',xlim=c(1,7*364))
lines(1:(52*years*7),simulatedzseasoutbreak7[,7],col='green')
lines(1:(52*years*7),simulatedoutbreak7[,7],col='red')
plot(1:(52*years*7),zitot,typ='l',xlim=c(1,7*364))
lines(1:(52*years*7),yt,col='blue')
lines(1:(52*years*7),zseasoutbreak,col='green')
lines(1:(52*years*7),zoutbreak,col='red')
plot(1:(52*years*7),simulatedzseasoutbreak7[,4],col='green',typ='l')
#sigid8
for(i in 1:nsim){
set.seed(i)
yt=negbinNoise1(N=N,k=0,k2=1,alpha=6,beta=0.0001,gama1=0,gama2=0,gama3=0.6,gama4=0.9,phi=1.5,shift=0,shift2=0)/10
#mu=exp(h1(N=N,k=0,k2=1,alpha=6,beta=0,gama1=0,gama2=0,gama3=0.6,gama4=0.9,shift=0,shift2=0))
set.seed(i)
out2=outbreak5(currentday=days5*52*years,weeklength=52*days5*years,wtime=(length(yt)-49*days5+1):length(yt),yi=yt,interval=0.25,k=0,k2=1,alpha=6,beta=0,gama1=0,
gama2=0,gama3=0.6,gama4=0.9,phi=1.5,shift=0,shift2=0,numoutbk=1,peakoutbk=10*days5,meanlog=0,sdlog=0.5)
zoutbreak=out2$outbreak
zt=yt#+out1
zitot=yt + out2$outbreak #+out1
#zitot[(bankhols5==1)]=0
#zitot[(bankhols5==1)+1]=1.5*zitot[i+1]
for(b in 1:length(zitot)){
if(bankhols5[b]==1){
zitot[b]=0
zitot[b+1]=1.5*zitot[b+1]
}
}
zeros=rep(0,2)
weekend=seq(days5,days5*years*52,days5)
#weekend=seq(0,days5*years*52-1,days5)
for(s in 1:length(weekend)){
zt=append(zt,zeros,after=2*(s-1)+weekend[s])
zitot=append(zitot,zeros,after=2*(s-1)+weekend[s])
zoutbreak=append(zoutbreak,zeros,after=2*(s-1)+weekend[s])
}
simulateddata8[,i]=round(zt)
simulatedtotals8[,i]=round(zitot)
simulatedoutbreak8[,i]=round(zoutbreak)
}
plot(1:(52*years*7),simulateddata8[,1],typ='l',xlim=c(2185,2548),col='green')
lines(1:(52*years*7),simulateddata8[,1]+simulatedoutbreak8[,1])
#sigid9
for(i in 1:nsim){
set.seed(i)
yt=negbinNoise1(N=N,k=1,k2=1,alpha=3,beta=0,gama1=1.5,gama2=0.1,gama3=0.2,gama4=0.3,phi=1,shift=-150,shift2=-150)
mu=exp(h1(N=N,k=1,k2=1,alpha=3,beta=0,gama1=1.5,gama2=0.1,gama3=0.6,gama4=0.8,shift=-150,shift2=-150))
set.seed(i)
out2=outbreak5(currentday=days5*52*years,weeklength=52*days5*years,wtime=(length(yt)-49*days5+1):length(yt),interval=0.25,yi=yt,k=1,k2=1,alpha=3,beta=0,gama1=1.5,
gama2=0.1,gama3=0.2,gama4=0.3,phi=1,shift=-150,shift2=-150,numoutbk=1,peakoutbk=10*days5,meanlog=0,sdlog=0.5)
zoutbreak=out2$outbreak
zt=yt#+out1
zitot=yt + out2$outbreak #+out1
#zitot[(bankhols5==1)]=0
#zitot[(bankhols5==1)+1]=1.5*zitot[i+1]
for(b in 1:length(zitot)){
if(bankhols5[b]==1){
zitot[b]=0
zitot[b+1]=1.5*zitot[b+1]
}
}
zeros=rep(0,2)
weekend=seq(days5,days5*years*52,days5)
#weekend=seq(0,days5*years*52-1,days5)
for(s in 1:length(weekend)){
zt=append(zt,zeros,after=2*(s-1)+weekend[s])
zitot=append(zitot,zeros,after=2*(s-1)+weekend[s])
zoutbreak=append(zoutbreak,zeros,after=2*(s-1)+weekend[s])
}
simulateddata9[,i]=round(zt)
simulatedtotals9[,i]=round(zitot)
simulatedoutbreak9[,i]=round(zoutbreak)
}
plot(1:(52*years*7),simulateddata9[,1],typ='l',xlim=c(2185,2548),col='green')
lines(1:(52*years*7),simulateddata9[,1]+simulatedoutbreak9[,1])
#sigid10
for(i in 1:nsim){
set.seed(i)
yt=negbinNoise1(N=N,k=1,k2=1,alpha=3,beta=0,gama1=0.2,gama2=0.1,gama3=0.05,gama4=0.15,phi=1,shift=-200,shift2=-200)
#mu=exp(h1(N=N,k=1,k2=1,alpha=3,beta=0,gama1=0.2,gama2=0.1,gama3=0.05,gama4=0.15,shift=-200,shift2=-200))
set.seed(i)
out2=outbreak5(currentday=days5*52*years,weeklength=52*days5*years,wtime=(length(yt)-49*days5+1):length(yt),yi=yt,interval=0.25,k=1,k2=1,alpha=3,beta=0,gama1=0.2,
gama2=0.1,gama3=0.05,gama4=0.15,phi=1,shift=-200,shift2=-200,numoutbk=1,peakoutbk=10*days5,meanlog=0,sdlog=0.5)
zoutbreak=out2$outbreak
zt=yt#+out1
zitot=yt + out2$outbreak #+out1
#zitot[(bankhols5==1)]=0
#zitot[(bankhols5==1)+1]=1.5*zitot[i+1]
for(b in 1:length(zitot)){
if(bankhols5[b]==1){
zitot[b]=0
zitot[b+1]=1.5*zitot[b+1]
}
}
zeros=rep(0,2)
weekend=seq(days5,days5*years*52,days5)
#weekend=seq(0,days5*years*52-1,days5)
for(s in 1:length(weekend)){
zt=append(zt,zeros,after=2*(s-1)+weekend[s])
zitot=append(zitot,zeros,after=2*(s-1)+weekend[s])
zoutbreak=append(zoutbreak,zeros,after=2*(s-1)+weekend[s])
}
simulateddata10[,i]=round(zt)
simulatedtotals10[,i]=round(zitot)
simulatedoutbreak10[,i]=round(zoutbreak)
}
#sigid11
for(i in 1:nsim){
set.seed(i)
yt=negbinNoise1(N=N,k=1,k2=1,alpha=5,beta=0,gama1=0.2,gama2=0.1,gama3=0.05,gama4=0.1,phi=1,shift=0,shift2=0)
mu=exp(h1(N=N,k=1,k2=1,alpha=5,beta=0,gama1=0.2,gama2=0.1,gama3=0.05,gama4=0.1,shift=0,shift2=0))
set.seed(i)
out2=outbreak5(currentday=days5*52*years,weeklength=52*days5*years,wtime=(length(yt)-49*days5+1):length(yt),interval=0.25,yi=yt,k=1,k2=1,alpha=5,beta=0,gama1=0.2,
gama2=0.1,gama3=0.05,gama4=0.1,phi=1,shift=0,shift2=0,numoutbk=1,peakoutbk=10*days5,meanlog=0,sdlog=0.5)
zoutbreak=out2$outbreak
zt=yt#+out1
zitot=yt + out2$outbreak #+out1
#zitot[(bankhols5==1)]=0
#zitot[(bankhols5==1)+1]=1.5*zitot[i+1]
for(b in 1:length(zitot)){
if(bankhols5[b]==1){
zitot[b]=0
zitot[b+1]=1.5*zitot[b+1]
}
}
zeros=rep(0,2)
weekend=seq(days5,days5*years*52,days5)
#weekend=seq(0,days5*years*52-1,days5)
for(s in 1:length(weekend)){
zt=append(zt,zeros,after=2*(s-1)+weekend[s])
zitot=append(zitot,zeros,after=2*(s-1)+weekend[s])
zoutbreak=append(zoutbreak,zeros,after=2*(s-1)+weekend[s])
}
simulateddata11[,i]=round(zt)
simulatedtotals11[,i]=round(zitot)
simulatedoutbreak11[,i]=round(zoutbreak)
}
#sigid12
for(i in 1:nsim){
set.seed(i)
yt=negbinNoise1(N=N,k=2,k2=1,alpha=0.5,beta=0,gama1=0.4,gama2=0,gama3=0.05,gama4=0.15,phi=1,shift=0,shift2=0)
#mu=exp(h1(N=N,k=2,k2=1,alpha=0.5,beta=0,gama1=0.4,gama2=0,gama3=0.05,gama4=0.15,shift=0,shift2=0))
set.seed(i)
out2=outbreak5(currentday=days5*52*years,weeklength=52*days5*years,wtime=(length(yt)-49*days5+1):length(yt),yi=yt,interval=0.25,k=2,k2=1,alpha=0.5,beta=0,gama1=0.4,
gama2=0,gama3=0.05,gama4=0.15,phi=1,shift=0,shift2=0,numoutbk=1,peakoutbk=10*days5,meanlog=0,sdlog=0.5)
zoutbreak=out2$outbreak
zt=yt#+out1
zitot=yt + out2$outbreak #+out1
#zitot[(bankhols5==1)]=0
#zitot[(bankhols5==1)+1]=1.5*zitot[i+1]
for(b in 1:length(zitot)){
if(bankhols5[b]==1){
zitot[b]=0
zitot[b+1]=1.5*zitot[b+1]
}
}
zeros=rep(0,2)
weekend=seq(days5,days5*years*52,days5)
#weekend=seq(0,days5*years*52-1,days5)
for(s in 1:length(weekend)){
zt=append(zt,zeros,after=2*(s-1)+weekend[s])
zitot=append(zitot,zeros,after=2*(s-1)+weekend[s])
zoutbreak=append(zoutbreak,zeros,after=2*(s-1)+weekend[s])
}
simulateddata12[,i]=round(zt)
simulatedtotals12[,i]=round(zitot)
simulatedoutbreak12[,i]=round(zoutbreak)
}
#sigid13
for(i in 1:nsim){
set.seed(i)
yt=negbinNoise1(N=N,k=1,k2=1,alpha=9,beta=0,gama1=0.5,gama2=0.2,gama3=0.2,gama4=0.5,phi=1,shift=0,shift2=0)/100
#mu=exp(h1(N=N,k=1,k2=1,alpha=9,beta=0,gama1=0.5,gama2=0.2,gama3=0.2,gama4=0.5,shift=0,shift2=0))
set.seed(i)
out2=outbreak5(currentday=days5*52*years,weeklength=52*days5*years,wtime=(length(yt)-49*days5+1):length(yt),yi=yt,interval=0.25,k=1,k2=1,alpha=9,beta=0,gama1=0.5,
gama2=0.2,gama3=0.2,gama4=0.5,phi=1,shift=0,shift2=0,numoutbk=1,peakoutbk=10*days5,meanlog=0,sdlog=0.5)
zoutbreak=out2$outbreak
zt=yt#+out1
zitot=yt + out2$outbreak #+out1
#zitot[(bankhols5==1)]=0
#zitot[(bankhols5==1)+1]=1.5*zitot[i+1]
for(b in 1:length(zitot)){
if(bankhols5[b]==1){
zitot[b]=0
zitot[b+1]=1.5*zitot[b+1]
}
}
zeros=rep(0,2)
weekend=seq(days5,days5*years*52,days5)
#weekend=seq(0,days5*years*52-1,days5)
for(s in 1:length(weekend)){
zt=append(zt,zeros,after=2*(s-1)+weekend[s])
zitot=append(zitot,zeros,after=2*(s-1)+weekend[s])
zoutbreak=append(zoutbreak,zeros,after=2*(s-1)+weekend[s])
}
simulateddata13[,i]=round(zt)
simulatedtotals13[,i]=round(zitot)
simulatedoutbreak13[,i]=round(zoutbreak)
}
plot(1:length(simulatedtotals13[,1]),simulatedtotals13[,1],typ='l')
plot(1:N,simulatedtotals13[,1],typ='l',xlim=c(2206,2548),col='green')
lines(1:N,simulateddata13[,1],typ='l')
#=====================
# 7-day week syndromes
#=====================
years=7
days7=7
N=52*days7*years
#sigid1
for(i in 1:nsim){
set.seed(i)
yt=negbinNoise2(N=N,k=1,k2=2,alpha=6,beta=0,gama1=0.2,gama2=0.2,gama3=0.5,gama4=0.4,phi=2,shift=29)
#mu=exp(h2(N=N,k=1,k2=2,alpha=6,beta=0,gama1=0.2,gama2=0.2,gama3=0.5,gama4=0.4,shift=29))
set.seed(i)
out2=outbreak7(currentday=N,weeklength=52*days7*years,wtime=(length(yt)-49*days7+1):length(yt),yi=yt,interval=0.25,k=1,k2=2,alpha=6,beta=0,gama1=0.2,gama2=0.2,
gama3=0.5,gama4=0.4,phi=2,shift=29,numoutbk=1,peakoutbk=10*days7,meanlog=0,sdlog=0.5)
zoutbreak=out2$outbreak
zt=yt#+out1
zitot=yt + out2$outbreak #+out1
for(b in 1:length(zitot)){
if(bankhols7[b]==1){
zitot[b]=2*zitot[b]
}
}
simulateddata1[,i]=round(zt)
simulatedtotals1[,i]=round(zitot)
simulatedoutbreak1[,i]=round(zoutbreak)
}
#sigid3
for(i in 1:nsim){
set.seed(i)
yt=negbinNoise2(N=N,k=1,k2=2,alpha=0.5,beta=0,gama1=1.5,gama2=1.4,gama3=0.5,gama4=0.4,phi=1,shift=-167)
#mu=exp(h2(N=N,k=1,k2=2,alpha=0.5,beta=0,gama1=1.5,gama2=1.4,gama3=0.5,gama4=0.4,shift=-167))
set.seed(i)
out2=outbreak7(currentday=N,weeklength=52*days7*years,wtime=(length(yt)-49*7+1):length(yt),yi=yt,interval=0.25,k=1,k2=2,alpha=0.5,beta=0,gama1=1.5,
gama2=1.4,gama3=0.5,gama4=0.4,phi=1,shift=-167,numoutbk=1,peakoutbk=10*days7,meanlog=0,sdlog=0.5)
zoutbreak=out2$outbreak
zt=yt#+out1
zitot=yt + out2$outbreak #+out1
for(b in 1:length(zitot)){
if(bankhols7[b]==1){
zitot[b]=2*zitot[b]
}
}
simulateddata3[,i]=round(zt)
simulatedtotals3[,i]=round(zitot)
simulatedoutbreak3[,i]=round(zoutbreak)
}
#sigid4
for(i in 1:nsim){
set.seed(i)
yt=negbinNoise2(N=N,k=0,k2=2,alpha=5.5,beta=0,gama1=0,gama2=0,gama3=0.3,gama4=0.25,phi=1,shift=1)
#mu=exp(h2(N=N,k=0,k2=2,alpha=5.5,beta=0,gama1=0,gama2=0,gama3=0.3,gama4=0.25,shift=1))
set.seed(i)
out2=outbreak7(currentday=N,weeklength=52*days7*12,wtime=(length(yt)-49*7+1):length(yt),yi=yt,interval=0.25,k=0,k2=2,alpha=5.5,beta=0,gama1=0,
gama2=0,gama3=0.3,gama4=0.25,phi=1,shift=1,numoutbk=1,peakoutbk=10*days7,meanlog=0,sdlog=0.5)
zoutbreak=out2$outbreak
zt=yt#+out1
zitot=yt + out2$outbreak #+out1
for(b in 1:length(zitot)){
if(bankhols7[b]==1){
zitot[b]=2*zitot[b]
}
}
simulateddata4[,i]=round(zt)
simulatedtotals4[,i]=round(zitot)
simulatedoutbreak4[,i]=round(zoutbreak)
}
#sigid5
for(i in 1:nsim){
set.seed(i)
yt=negbinNoise2(N=N,k=0,k2=2,alpha=2,beta=0,gama1=0,gama2=0,gama3=0.3,gama4=0.25,phi=1,shift=1)
#mu=exp(h2(N=N,k=0,k2=2,alpha=2,beta=0,gama1=0,gama2=0,gama3=0.3,gama4=0.25,shift=1))
set.seed(i)
out2=outbreak7(currentday=N,weeklength=52*days7*years,wtime=(length(yt)-49*days7+1):length(yt),yi=yt,interval=0.25,k=0,k2=2,alpha=2,beta=0,gama1=0,
gama2=0,gama3=0.3,gama4=0.25,phi=1,shift=1,numoutbk=1,peakoutbk=10*days7,meanlog=0,sdlog=0.5)
zoutbreak=out2$outbreak
zt=yt#+out1
zitot=yt + out2$outbreak #+out1
for(b in 1:length(zitot)){
if(bankhols7[b]==1){
zitot[b]=2*zitot[b]
}
}
simulateddata5[,i]=round(zt)
simulatedtotals5[,i]=round(zitot)
simulatedoutbreak5[,i]=round(zoutbreak)
}
#sigid14
for(i in 1:nsim){
set.seed(i)
yt=negbinNoise2(N=N,k=1,k2=2,alpha=2,beta=0.0005,gama1=0.8,gama2=0.8,gama3=0.8,gama4=0.4,phi=4,shift=57)
#mu=exp(h2(N=N,k=1,k2=2,alpha=2,beta=0,gama1=0.8,gama2=0.8,gama3=0.8,gama4=0.4,shift=57))
set.seed(i)
out2=outbreak7(currentday=N,weeklength=52*days7*years,wtime=(length(yt)-49*days7+1):length(yt),yi=yt,interval=0.25,k=1,k2=2,alpha=6,beta=0,gama1=0.2,gama2=0.2,
gama3=0.5,gama4=0.4,phi=2,shift=29,numoutbk=1,peakoutbk=10*days7,meanlog=0,sdlog=0.5)
zoutbreak=out2$outbreak
zt=yt#+out1
zitot=yt + out2$outbreak #+out1
for(b in 1:length(zitot)){
if(bankhols7[b]==1){
zitot[b]=2*zitot[b]
}
}
simulateddata14[,i]=round(zt)
simulatedtotals14[,i]=round(zitot)
simulatedoutbreak14[,i]=round(zoutbreak)
}
#sigid15
for(i in 1:nsim){
set.seed(i)
#yt=0.1*(negbinNoise2(N=N,k=4,k2=1,alpha=1.5,beta=0,gama1=0.1,gama2=0.1,gama3=1.8,gama4=0.1,phi=1,shift=-85)+2)
yt=1*(negbinNoise2(N=N,k=4,k2=1,alpha=0.05,beta=0,gama1=0.01,gama2=0.01,gama3=1.8,gama4=0.1,phi=1,shift=-85)+0)
set.seed(i)
out2=outbreak7(currentday=N,weeklength=52*days7*years,wtime=(length(yt)-49*days7+1):length(yt),yi=yt,interval=0.25,k=1,k2=2,alpha=2,beta=0,gama1=0.8,
gama2=0.8,gama3=0.8,gama4=0.4,phi=4,shift=57,numoutbk=1,peakoutbk=10*days7,meanlog=0,sdlog=0.5)
zoutbreak=out2$outbreak
zt=yt#+out1
zitot=yt + out2$outbreak #+out1
for(b in 1:length(zitot)){
if(bankhols7[b]==1){
zitot[b]=2*zitot[b]
}
}
simulateddata15[,i]=round(zt)
simulatedtotals15[,i]=round(zitot)
simulatedoutbreak15[,i]=round(zoutbreak)
}
#plot(1:N,yt,typ='l')
#plot(1:(52*years*7),zt,typ='l',xlim=c(1,364))
#plot(1:(52*years*7),zitot,typ='l',xlim=c(1,364))
#plot(1:(52*years*7),zitot,typ='l',xlim=c(365,728))
#plot(1:(52*years*7),zitot,typ='l',xlim=c(729,1092))
#plot(1:(52*years*7),zitot,typ='l',xlim=c(1093,1456))
#plot(1:(52*years*7),zitot,typ='l',xlim=c(1457,1820))
#plot(1:(52*years*7),zitot,typ='l',xlim=c(1821,2184))
#plot(1:(52*years*7),zitot,typ='l',xlim=c(2185,2548))
#lines(1:(52*years*7),zoutbreak,col='green')
plot(1:(52*years*7),simulatedtotals6[,4],typ='l',xlim=c(1,7*364))
lines(1:(52*years*7),simulatedzseasoutbreak6[,4],col='green')
lines(1:(52*years*7),simulatedoutbreak6[,4],col='red')
plot(1:(52*years*7),zitot,typ='l',xlim=c(1,7*364))
lines(1:(52*years*7),yt,col='blue')
lines(1:(52*years*7),zseasoutbreak,col='green')
lines(1:(52*years*7),zoutbreak,col='red')
plot(1:(52*years*7),simulatedzseasoutbreak6[,4],col='green',typ='l')
#sigid16
for(i in 1:nsim){
set.seed(i)
yt=negbinNoise2(N=N,k=1,k2=2,alpha=3,beta=0,gama1=0.8,gama2=0.6,gama3=0.8,gama4=0.4,phi=4,shift=29)
#mu=exp(h2(N=N,k=1,k2=2,alpha=3,beta=0,gama1=0.8,gama2=0.6,gama3=0.8,gama4=0.4,shift=29))
out1=rep(0,N)
for(j in 1:years){
set.seed(j+years*i)
out=outbreak5(currentday=days7*52*years,weeklength=52*days7*years,wtime=((210+(j-1)*days7*52):(230+(j-1)*days7*52)),yi=yt,interval=0.02,k=1,k2=1,alpha=1,beta=0,gama1=0.1,
gama2=2,gama3=0.05,gama4=0.05,phi=1,shift=-50,shift2=-50,numoutbk=1,peakoutbk=3*days7*150,meanlog=0,sdlog=0.5)
out1=out1+out$outbreak
}
set.seed(i)
out2=outbreak7(currentday=N,weeklength=52*days7*years,wtime=(length(yt)-49*days7+1):length(yt),yi=yt,interval=0.25,k=1,k2=2,alpha=3,beta=0,gama1=0.8,
gama2=0.6,gama3=0.8,gama4=0.4,phi=4,shift=29,numoutbk=1,peakoutbk=10*days7,meanlog=0,sdlog=0.5)
zoutbreak=out2$outbreak
zseasoutbreak=out2$outbreak+out1
zt=yt +out1
zitot=yt + out2$outbreak +out1
for(b in 1:length(zitot)){
if(bankhols7[b]==1){
zitot[b]=2*zitot[b]
}
}
simulateddata16[,i]=round(zt)
simulatedtotals16[,i]=round(zitot)
simulatedoutbreak16[,i]=round(zoutbreak)
simulatedzseasoutbreak16[,i]=round(zseasoutbreak)
}
plot(1:(52*years*7),zitot,typ='l',xlim=c(1,7*364))
lines(1:(52*years*7),yt,col='blue')
lines(1:(52*years*7),zseasoutbreak,col='green')
lines(1:(52*years*7),zoutbreak,col='red')
plot(1:(52*years*7),simulatedtotals16[,1],typ='l',xlim=c(1,7*364))
lines(1:(52*years*7),simulatedzseasoutbreak16[,1],col='green')
lines(1:(52*years*7),simulatedoutbreak16[,1],col='red')
plot(1:(52*years*7),simulatedtotals16[,2],typ='l',xlim=c(1,7*364))
lines(1:(52*years*7),simulatedzseasoutbreak16[,2],col='green')
lines(1:(52*years*7),simulatedoutbreak16[,2],col='red')
#sigid17
for(i in 1:nsim){
set.seed(i)
yt=negbinNoise2(N=N,k=0,k2=2,alpha=6,beta=0,gama1=0,gama2=0,gama3=0.8,gama4=0.4,phi=4,shift=1)
#mu=exp(h2(N=N,k=0,k2=2,alpha=6,beta=0,gama1=0,gama2=0,gama3=0.8,gama4=0.4,shift=1))
set.seed(i)
out2=outbreak7(currentday=N,weeklength=52*7*12,wtime=(length(yt)-49*days7+1):length(yt),yi=yt,interval=0.25,k=0,k2=2,alpha=6,beta=0,gama1=0,
gama2=0,gama3=0.8,gama4=0.4,phi=4,shift=1,numoutbk=1,peakoutbk=10*days7,meanlog=0,sdlog=0.5)
zoutbreak=out2$outbreak
zt=yt#+out1
zitot=yt + out2$outbreak #+out1
for(b in 1:length(zitot)){
if(bankhols7[b]==1){
zitot[b]=2*zitot[b]
}
}
simulateddata17[,i]=round(zt)
simulatedtotals17[,i]=round(zitot)
simulatedoutbreak17[,i]=round(zoutbreak)
}
#=============================
# Define the alarm data frames
#=============================
days=7
nsim=100
alarmall1=data.frame(array(rep(0,nsim*49*days),dim=c(49*days,nsim)))
alarmall2=data.frame(array(rep(0,nsim*49*days),dim=c(49*days,nsim)))
alarmall3=data.frame(array(rep(0,nsim*49*days),dim=c(49*days,nsim)))
alarmall4=data.frame(array(rep(0,nsim*49*days),dim=c(49*days,nsim)))
alarmall5=data.frame(array(rep(0,nsim*49*days),dim=c(49*days,nsim)))
alarmall6=data.frame(array(rep(0,nsim*49*days),dim=c(49*days,nsim)))
alarmall7=data.frame(array(rep(0,nsim*49*days),dim=c(49*days,nsim)))
alarmall8=data.frame(array(rep(0,nsim*49*days),dim=c(49*days,nsim)))
alarmall9=data.frame(array(rep(0,nsim*49*days),dim=c(49*days,nsim)))
alarmall10=data.frame(array(rep(0,nsim*49*days),dim=c(49*days,nsim)))
alarmall11=data.frame(array(rep(0,nsim*49*days),dim=c(49*days,nsim)))
alarmall12=data.frame(array(rep(0,nsim*49*days),dim=c(49*days,nsim)))
alarmall13=data.frame(array(rep(0,nsim*49*days),dim=c(49*days,nsim)))
alarmall14=data.frame(array(rep(0,nsim*49*days),dim=c(49*days,nsim)))
alarmall15=data.frame(array(rep(0,nsim*49*days),dim=c(49*days,nsim)))
alarmall16=data.frame(array(rep(0,nsim*49*days),dim=c(49*days,nsim)))
alarmall17=data.frame(array(rep(0,nsim*49*days),dim=c(49*days,nsim)))
#########################################
#========================================
#Implement the algorithm to data by days and record the alarms in the dataframes above
#========================================
#########################################
myDates <- seq(ymd('2010-01-01'), ymd('2016-12-30'), by = '1 day')
dropDays <- as.POSIXct(c('2010-12-31','2011-12-31', '2012-12-31',
'2013-12-31', '2014-12-31', '2015-12-31',
'2016-02-29,', '2012-02-29'))
"%ni%" <- Negate("%in%")
myDates <- myDates[myDates %ni% dropDays]
# Convert to 7-day running totals
rolling <- function(x){
rollapplyr(x, width=7, FUN=sum, na.rm=T, fill=NA)
}
simdata1 <- apply(simulateddata1, 2, rolling)
# simdata2 <- apply(simulateddata2, 2, rolling)
simdata3 <- apply(simulateddata3, 2, rolling)
simdata4 <- apply(simulateddata4, 2, rolling)
simdata5 <- apply(simulateddata5, 2, rolling)
simdata6 <- apply(simulateddata6, 2, rolling)
simdata7 <- apply(simulateddata7, 2, rolling)
simdata8 <- apply(simulateddata8, 2, rolling)
simdata9 <- apply(simulateddata9, 2, rolling)
simdata10 <- apply(simulateddata10, 2, rolling)
simdata11 <- apply(simulateddata11, 2, rolling)
simdata12 <- apply(simulateddata12, 2, rolling)
simdata13 <- apply(simulateddata13, 2, rolling)
simdata14 <- apply(simulateddata14, 2, rolling)
simdata15 <- apply(simulateddata15, 2, rolling)
simdata16 <- apply(simulateddata16, 2, rolling)
simdata17 <- apply(simulateddata17, 2, rolling)
simtot1 <- apply(simulatedtotals1, 2, rolling)
# simtot2 <- apply(simulatedtotals2, 2, rolling)
simtot3 <- apply(simulatedtotals3, 2, rolling)
simtot4 <- apply(simulatedtotals4, 2, rolling)
simtot5 <- apply(simulatedtotals5, 2, rolling)
simtot6 <- apply(simulatedtotals6, 2, rolling)
simtot7 <- apply(simulatedtotals7, 2, rolling)
simtot8 <- apply(simulatedtotals8, 2, rolling)
simtot9 <- apply(simulatedtotals9, 2, rolling)
simtot10 <- apply(simulatedtotals10, 2, rolling)
simtot11 <- apply(simulatedtotals11, 2, rolling)
simtot12 <- apply(simulatedtotals12, 2, rolling)
simtot13 <- apply(simulatedtotals13, 2, rolling)
simtot14 <- apply(simulatedtotals14, 2, rolling)
simtot15 <- apply(simulatedtotals15, 2, rolling)
simtot16 <- apply(simulatedtotals16, 2, rolling)
simtot17 <- apply(simulatedtotals17, 2, rolling)
# Convert data to sts
simSts1 <- sts(simdata1, start=c(2010, 1), frequency=364,
epoch=as.numeric(as.Date(myDates)), epochAsDate=TRUE)
# simSts2 <- sts(simdata2, start=c(2010, 1), frequency=364,
# epoch=as.numeric(as.Date(myDates)), epochAsDate=TRUE)
simSts3 <- sts(simdata3, start=c(2010, 1), frequency=364,
epoch=as.numeric(as.Date(myDates)), epochAsDate=TRUE)
simSts4 <- sts(simdata4, start=c(2010, 1), frequency=364,
epoch=as.numeric(as.Date(myDates)), epochAsDate=TRUE)
simSts5 <- sts(simdata5, start=c(2010, 1), frequency=364,
epoch=as.numeric(as.Date(myDates)), epochAsDate=TRUE)
simSts6 <- sts(simdata6, start=c(2010, 1), frequency=364,
epoch=as.numeric(as.Date(myDates)), epochAsDate=TRUE)
simSts7 <- sts(simdata7, start=c(2010, 1), frequency=364,
epoch=as.numeric(as.Date(myDates)), epochAsDate=TRUE)
simSts8 <- sts(simdata8, start=c(2010, 1), frequency=364,
epoch=as.numeric(as.Date(myDates)), epochAsDate=TRUE)
simSts9 <- sts(simdata9, start=c(2010, 1), frequency=364,
epoch=as.numeric(as.Date(myDates)), epochAsDate=TRUE)
simSts10 <- sts(simdata10, start=c(2010, 1), frequency=364,
epoch=as.numeric(as.Date(myDates)), epochAsDate=TRUE)
simSts11 <- sts(simdata11, start=c(2010, 1), frequency=364,
epoch=as.numeric(as.Date(myDates)), epochAsDate=TRUE)
simSts12 <- sts(simdata12, start=c(2010, 1), frequency=364,
epoch=as.numeric(as.Date(myDates)), epochAsDate=TRUE)
simSts13 <- sts(simdata13, start=c(2010, 1), frequency=364,
epoch=as.numeric(as.Date(myDates)), epochAsDate=TRUE)
simSts14 <- sts(simdata14, start=c(2010, 1), frequency=364,
epoch=as.numeric(as.Date(myDates)), epochAsDate=TRUE)
simSts15 <- sts(simdata15, start=c(2010, 1), frequency=364,
epoch=as.numeric(as.Date(myDates)), epochAsDate=TRUE)
simSts16 <- sts(simdata16, start=c(2010, 1), frequency=364,
epoch=as.numeric(as.Date(myDates)), epochAsDate=TRUE)
simSts17 <- sts(simdata17, start=c(2010, 1), frequency=364,
epoch=as.numeric(as.Date(myDates)), epochAsDate=TRUE)
totSts1 <- sts(simtot1, start=c(2010, 1), frequency=364,
epoch=as.numeric(as.Date(myDates)), epochAsDate=TRUE)
# totSts2 <- sts(simtot2, start=c(2010, 1), frequency=364,
# epoch=as.numeric(as.Date(myDates)), epochAsDate=TRUE)
totSts3 <- sts(simtot3, start=c(2010, 1), frequency=364,
epoch=as.numeric(as.Date(myDates)), epochAsDate=TRUE)
totSts4 <- sts(simtot4, start=c(2010, 1), frequency=364,
epoch=as.numeric(as.Date(myDates)), epochAsDate=TRUE)
totSts5 <- sts(simtot5, start=c(2010, 1), frequency=364,
epoch=as.numeric(as.Date(myDates)), epochAsDate=TRUE)
totSts6 <- sts(simtot6, start=c(2010, 1), frequency=364,
epoch=as.numeric(as.Date(myDates)), epochAsDate=TRUE)
totSts7 <- sts(simtot7, start=c(2010, 1), frequency=364,
epoch=as.numeric(as.Date(myDates)), epochAsDate=TRUE)
totSts8 <- sts(simtot8, start=c(2010, 1), frequency=364,
epoch=as.numeric(as.Date(myDates)), epochAsDate=TRUE)
totSts9 <- sts(simtot9, start=c(2010, 1), frequency=364,
epoch=as.numeric(as.Date(myDates)), epochAsDate=TRUE)
totSts10 <- sts(simtot10, start=c(2010, 1), frequency=364,
epoch=as.numeric(as.Date(myDates)), epochAsDate=TRUE)
totSts11 <- sts(simtot11, start=c(2010, 1), frequency=364,
epoch=as.numeric(as.Date(myDates)), epochAsDate=TRUE)
totSts12 <- sts(simtot12, start=c(2010, 1), frequency=364,
epoch=as.numeric(as.Date(myDates)), epochAsDate=TRUE)
totSts13 <- sts(simtot13, start=c(2010, 1), frequency=364,
epoch=as.numeric(as.Date(myDates)), epochAsDate=TRUE)
totSts14 <- sts(simtot14, start=c(2010, 1), frequency=364,
epoch=as.numeric(as.Date(myDates)), epochAsDate=TRUE)
totSts15 <- sts(simtot15, start=c(2010, 1), frequency=364,
epoch=as.numeric(as.Date(myDates)), epochAsDate=TRUE)
totSts16 <- sts(simtot16, start=c(2010, 1), frequency=364,
epoch=as.numeric(as.Date(myDates)), epochAsDate=TRUE)
totSts17 <- sts(simtot17, start=c(2010, 1), frequency=364,
epoch=as.numeric(as.Date(myDates)), epochAsDate=TRUE)
in2016 <- 2206:2548
# Select range of data to monitor, algorithm and prediction interval
control <- list(range=in2016, method="C3", alpha=0.01)
for(sim in seq(nsim)){
cat("\t", sim)
# Run detection algorithm
det1 <- earsC(totSts1[,sim], control=control)
det3 <- earsC(totSts3[,sim], control=control)
det4 <- earsC(totSts4[,sim], control=control)
det5 <- earsC(totSts5[,sim], control=control)
det6 <- earsC(totSts6[,sim], control=control)
det7 <- earsC(totSts7[,sim], control=control)
det8 <- earsC(totSts8[,sim], control=control)
det9 <- earsC(totSts9[,sim], control=control)
det10 <- earsC(totSts10[,sim], control=control)
det11 <- earsC(totSts11[,sim], control=control)
det12 <- earsC(totSts12[,sim], control=control)
det13 <- earsC(totSts13[,sim], control=control)
det14 <- earsC(totSts14[,sim], control=control)
det15 <- earsC(totSts15[,sim], control=control)
det16 <- earsC(totSts16[,sim], control=control)
det17 <- earsC(totSts17[,sim], control=control)
# Plot detection results
dir.create(file.path(myDir, "plots", "totals"),
recursive=TRUE)
png(file.path(myDir, "plots", "totals", paste0("Sim_", sim, ".png")),
width=16,height=14,units="in",res=300)
par(mfrow=c(4, 4), oma=c(0, 0, 2, 0))
plot(det1, main="Dataset 1", legend=NULL)
plot(det3, main="Dataset 3", legend=NULL)
plot(det4, main="Dataset 4", legend=NULL)
plot(det5, main="Dataset 5", legend=NULL)
plot(det6, main="Dataset 6", legend=NULL)
plot(det7, main="Dataset 7", legend=NULL)
plot(det8, main="Dataset 8", legend=NULL)
plot(det9, main="Dataset 9", legend=NULL)
plot(det10, main="Dataset 10", legend=NULL)
plot(det11, main="Dataset 11", legend=NULL)
plot(det12, main="Dataset 12", legend=NULL)
plot(det13, main="Dataset 13", legend=NULL)
plot(det14, main="Dataset 14", legend=NULL)
plot(det15, main="Dataset 15", legend=NULL)
plot(det16, main="Dataset 16", legend=NULL)
plot(det17, main="Dataset 17", legend=NULL)
title(main=list(paste("Simulation", sim, "Alpha", control$alpha ),
cex=2), outer=TRUE)
dev.off()
# Retrieve information about alarms
alarmall1[,sim] <- as.numeric(as.vector(unlist(det1@alarm)))
alarmall3[,sim] <- as.numeric(as.vector(unlist(det3@alarm)))
alarmall4[,sim] <- as.numeric(as.vector(unlist(det4@alarm)))
alarmall5[,sim] <- as.numeric(as.vector(unlist(det5@alarm)))
alarmall6[,sim] <- as.numeric(as.vector(unlist(det6@alarm)))
alarmall7[,sim] <- as.numeric(as.vector(unlist(det7@alarm)))
alarmall8[,sim] <- as.numeric(as.vector(unlist(det8@alarm)))
alarmall9[,sim] <- as.numeric(as.vector(unlist(det9@alarm)))
alarmall10[,sim] <- as.numeric(as.vector(unlist(det10@alarm)))
alarmall11[,sim] <- as.numeric(as.vector(unlist(det11@alarm)))
alarmall12[,sim] <- as.numeric(as.vector(unlist(det12@alarm)))
alarmall13[,sim] <- as.numeric(as.vector(unlist(det13@alarm)))
alarmall14[,sim] <- as.numeric(as.vector(unlist(det14@alarm)))
alarmall15[,sim] <- as.numeric(as.vector(unlist(det15@alarm)))
alarmall16[,sim] <- as.numeric(as.vector(unlist(det16@alarm)))
alarmall17[,sim] <- as.numeric(as.vector(unlist(det17@alarm)))
}
# Replace missing values with zero (?)
alarmall1[is.na(alarmall1)] <- 0
alarmall3[is.na(alarmall3)] <- 0
alarmall4[is.na(alarmall4)] <- 0
alarmall5[is.na(alarmall5)] <- 0
alarmall6[is.na(alarmall6)] <- 0
alarmall7[is.na(alarmall7)] <- 0
alarmall8[is.na(alarmall8)] <- 0
alarmall9[is.na(alarmall9)] <- 0
alarmall10[is.na(alarmall10)] <- 0
alarmall11[is.na(alarmall11)] <- 0
alarmall12[is.na(alarmall12)] <- 0
alarmall13[is.na(alarmall13)] <- 0
alarmall14[is.na(alarmall14)] <- 0
alarmall15[is.na(alarmall15)] <- 0
alarmall16[is.na(alarmall16)] <- 0
alarmall17[is.na(alarmall17)] <- 0
# Compare vs data without oubreaks
for(sim in seq(nsim)){
cat("\t", sim)
det1 <- earsC(simSts1[,sim], control=control)
det3 <- earsC(simSts3[,sim], control=control)
det4 <- earsC(simSts4[,sim], control=control)
det5 <- earsC(simSts5[,sim], control=control)
det6 <- earsC(simSts6[,sim], control=control)
det7 <- earsC(simSts7[,sim], control=control)
det8 <- earsC(simSts8[,sim], control=control)
det9 <- earsC(simSts9[,sim], control=control)
det10 <- earsC(simSts10[,sim], control=control)
det11 <- earsC(simSts11[,sim], control=control)
det12 <- earsC(simSts12[,sim], control=control)
det13 <- earsC(simSts13[,sim], control=control)
det14 <- earsC(simSts14[,sim], control=control)
det15 <- earsC(simSts15[,sim], control=control)
det16 <- earsC(simSts16[,sim], control=control)
det17 <- earsC(simSts17[,sim], control=control)
dir.create(file.path(myDir, "plots", "control"),
recursive=TRUE)
png(file.path(myDir, "plots", "control",
paste0("Sim_", sim, ".png")),
width=16,height=14,units="in",res=300)
par(mfrow=c(4, 4), oma=c(0, 0, 2, 0))
plot(det1, main="Dataset 1", legend=NULL)
plot(det3, main="Dataset 3", legend=NULL)
plot(det4, main="Dataset 4", legend=NULL)
plot(det5, main="Dataset 5", legend=NULL)
plot(det6, main="Dataset 6", legend=NULL)
plot(det7, main="Dataset 7", legend=NULL)
plot(det8, main="Dataset 8", legend=NULL)
plot(det9, main="Dataset 9", legend=NULL)
plot(det10, main="Dataset 10", legend=NULL)
plot(det11, main="Dataset 11", legend=NULL)
plot(det12, main="Dataset 12", legend=NULL)
plot(det13, main="Dataset 13", legend=NULL)
plot(det14, main="Dataset 14", legend=NULL)
plot(det15, main="Dataset 15", legend=NULL)
plot(det16, main="Dataset 16", legend=NULL)
plot(det17, main="Dataset 17", legend=NULL)
title(main=list(paste("Simulation", sim, "Alpha", control$alpha ),
cex=2), outer=TRUE)
dev.off()
}
#====================================
#====================================
#Summary
#====================================
#====================================
days=7
# FPR false positive rate
fpr=rep(0,17)
fprseas=rep(0,3)
nu=0
for(j in 1:nsim){
for(i in (49*7):1){
nu=(alarmall1[nrow(alarmall1)-i+1,j]==1 & simulatedoutbreak1[nrow(simulatedoutbreak1)-i+1,j]==0)+nu
}
}
a=
fpr[1]=nu/sum(simulatedoutbreak1[2206:2548,]==0)
nu=0
for(j in 1:nsim){
for(i in (49*7):1){
nu=(alarmall2[nrow(alarmall2)-i+1,j]==1 & simulatedoutbreak2[nrow(simulatedoutbreak2)-i+1,j]==0)+nu
}
}
fpr[2]=nu/sum(simulatedoutbreak2[2206:2548,]==0)
nu=0
for(j in 1:nsim){
for(i in (49*7):1){
nu=(alarmall3[nrow(alarmall3)-i+1,j]==1 & simulatedoutbreak3[nrow(simulatedoutbreak3)-i+1,j]==0)+nu
}
}
fpr[3]=nu/sum(simulatedoutbreak3[2206:2548,]==0)
nu=0
for(j in 1:nsim){
for(i in (49*7):1){
nu=(alarmall4[nrow(alarmall4)-i+1,j]==1 & simulatedoutbreak4[nrow(simulatedoutbreak4)-i+1,j]==0)+nu
}
}
fpr[4]=nu/sum(simulatedoutbreak4[2206:2548,]==0)
nu=0
for(j in 1:nsim){
for(i in (49*7):1){
nu=(alarmall5[nrow(alarmall5)-i+1,j]==1 & simulatedoutbreak5[nrow(simulatedoutbreak5)-i+1,j]==0)+nu
}
}
fpr[5]=nu/sum(simulatedoutbreak5[2206:2548,]==0)
nu=0
for(j in 1:nsim){
for(i in (49*7):1){
nu=(alarmall6[nrow(alarmall6)-i+1,j]==1 & simulatedoutbreak6[nrow(simulatedoutbreak6)-i+1,j]==0)+nu
}
}
fpr[6]=nu/sum(simulatedoutbreak6[2206:2548,]==0)
nu=0
for(j in 1:nsim){
for(i in (49*7):1){
nu=(alarmall6[nrow(alarmall6)-i+1,j]==1 & simulatedzseasoutbreak6[nrow(simulatedzseasoutbreak6)-i+1,j]==0)+nu
}
}
fprseas[1]=nu/sum(simulatedzseasoutbreak6[2206:2548,]==0)
nu=0
for(j in 1:nsim){
for(i in (49*7):1){
nu=(alarmall7[nrow(alarmall7)-i+1,j]==1 & simulatedoutbreak7[nrow(simulatedoutbreak7)-i+1,j]==0)+nu
}
}
fpr[7]=nu/sum(simulatedoutbreak7[2206:2548,]==0)
nu=0
for(j in 1:nsim){
for(i in (49*7):1){
nu=(alarmall7[nrow(alarmall7)-i+1,j]==1 & simulatedzseasoutbreak7[nrow(simulatedzseasoutbreak7)-i+1,j]==0)+nu
}
}
fprseas[2]=nu/sum(simulatedzseasoutbreak7[2206:2548,]==0)
nu=0
for(j in 1:nsim){
for(i in (49*7):1){
nu=(alarmall8[nrow(alarmall8)-i+1,j]==1 & simulatedoutbreak8[nrow(simulatedoutbreak8)-i+1,j]==0)+nu
}
}
fpr[8]=nu/sum(simulatedoutbreak8[2206:2548,]==0)
nu=0
for(j in 1:nsim){
for(i in (49*7):1){
nu=(alarmall9[nrow(alarmall9)-i+1,j]==1 & simulatedoutbreak9[nrow(simulatedoutbreak9)-i+1,j]==0)+nu
}
}
fpr[9]=nu/sum(simulatedoutbreak9[2206:2548,]==0)
nu=0
for(j in 1:nsim){
for(i in (49*7):1){
nu=(alarmall10[nrow(alarmall10)-i+1,j]==1 & simulatedoutbreak10[nrow(simulatedoutbreak10)-i+1,j]==0)+nu
}
}
fpr[10]=nu/sum(simulatedoutbreak10[2206:2548,]==0)
nu=0
for(j in 1:nsim){
for(i in (49*7):1){
nu=(alarmall11[nrow(alarmall11)-i+1,j]==1 & simulatedoutbreak11[nrow(simulatedoutbreak11)-i+1,j]==0)+nu
}
}
fpr[11]=nu/sum(simulatedoutbreak11[2206:2548,]==0)
nu=0
for(j in 1:nsim){
for(i in (49*7):1){
nu=(alarmall12[nrow(alarmall12)-i+1,j]==1 & simulatedoutbreak12[nrow(simulatedoutbreak12)-i+1,j]==0)+nu
}
}
fpr[12]=nu/sum(simulatedoutbreak12[2206:2548,]==0)
nu=0
for(j in 1:nsim){
for(i in (49*7):1){
nu=(alarmall13[nrow(alarmall13)-i+1,j]==1 & simulatedoutbreak13[nrow(simulatedoutbreak13)-i+1,j]==0)+nu
}
}
fpr[13]=nu/sum(simulatedoutbreak13[2206:2548,]==0)
nu=0
for(j in 1:nsim){
for(i in (49*7):1){
nu=(alarmall14[nrow(alarmall14)-i+1,j]==1 & simulatedoutbreak14[nrow(simulatedoutbreak14)-i+1,j]==0)+nu
}
}
fpr[14]=nu/sum(simulatedoutbreak14[2206:2548,]==0)
nu=0
for(j in 1:nsim){
for(i in (49*7):1){
nu=(alarmall15[nrow(alarmall15)-i+1,j]==1 & simulatedoutbreak15[nrow(simulatedoutbreak15)-i+1,j]==0)+nu
}
}
fpr[15]=nu/sum(simulatedoutbreak15[2206:2548,]==0)
nu=0
for(j in 1:nsim){
for(i in (49*7):1){
nu=(alarmall16[nrow(alarmall16)-i+1,j]==1 & simulatedoutbreak16[nrow(simulatedoutbreak16)-i+1,j]==0)+nu
}
}
fpr[16]=nu/sum(simulatedoutbreak16[2206:2548,]==0)
nu=0
for(j in 1:nsim){
for(i in (49*7):1){
nu=(alarmall16[nrow(alarmall16)-i+1,j]==1 & simulatedzseasoutbreak16[nrow(simulatedzseasoutbreak16)-i+1,j]==0)+nu
}
}
fprseas[3]=nu/sum(simulatedzseasoutbreak16[2206:2548,]==0)
nu=0
for(j in 1:nsim){
for(i in (49*7):1){
nu=(alarmall17[nrow(alarmall17)-i+1,j]==1 & simulatedoutbreak17[nrow(simulatedoutbreak17)-i+1,j]==0)+nu
}
}
fpr[17]=nu/sum(simulatedoutbreak17[2206:2548,]==0)
#--------------------------------------------------------
# POD power of detection
pod=rep(0,17)
podseas=rep(0,3)
mu=0
for(j in 1:nsim){
nu=0
for(i in (49*days):1){
nu=nu+(alarmall1[nrow(alarmall1)-i+1,j]==1 & simulatedoutbreak1[nrow(simulatedoutbreak1)-i+1,j]>0)
}
mu=mu+(nu>0)
}
pod[1]=mu/nsim
mu=0
for(j in 1:nsim){
nu=0
for(i in (49*days):1){
nu=nu+(alarmall2[nrow(alarmall2)-i+1,j]==1 & simulatedoutbreak2[nrow(simulatedoutbreak2)-i+1,j]>0)
}
mu=mu+(nu>0)
}
pod[2]=mu/nsim
mu=0
for(j in 1:nsim){
nu=0
for(i in (49*days):1){
nu=nu+(alarmall3[nrow(alarmall3)-i+1,j]==1 & simulatedoutbreak3[nrow(simulatedoutbreak3)-i+1,j]>0)
}
mu=mu+(nu>0)
}
pod[3]=mu/nsim
mu=0
for(j in 1:nsim){
nu=0
for(i in (49*days):1){
nu=nu+(alarmall4[nrow(alarmall4)-i+1,j]==1 & simulatedoutbreak4[nrow(simulatedoutbreak4)-i+1,j]>0)
}
mu=mu+(nu>0)
}
pod[4]=mu/nsim
mu=0
for(j in 1:nsim){
nu=0
for(i in (49*days):1){
nu=nu+(alarmall5[nrow(alarmall5)-i+1,j]==1 & simulatedoutbreak5[nrow(simulatedoutbreak5)-i+1,j]>0)
}
mu=mu+(nu>0)
}
pod[5]=mu/nsim
mu=0
for(j in 1:nsim){
nu=0
for(i in (49*days):1){
nu=nu+(alarmall6[nrow(alarmall6)-i+1,j]==1 & simulatedoutbreak6[nrow(simulatedoutbreak6)-i+1,j]>0)
}
mu=mu+(nu>0)
}
pod[6]=mu/nsim
mu=0
for(j in 1:nsim){
nu=0
for(i in (49*days):1){
nu=nu+(alarmall6[nrow(alarmall6)-i+1,j]==1 & simulatedzseasoutbreak6[nrow(simulatedzseasoutbreak6)-i+1,j]>0)
}
mu=mu+(nu>0)
}
podseas[1]=mu/nsim
mu=0
for(j in 1:nsim){
nu=0
for(i in (49*days):1){
nu=nu+(alarmall7[nrow(alarmall7)-i+1,j]==1 & simulatedoutbreak7[nrow(simulatedoutbreak7)-i+1,j]>0)
}
mu=mu+(nu>0)
}
pod[7]=mu/nsim
mu=0
for(j in 1:nsim){
nu=0
for(i in (49*days):1){
nu=nu+(alarmall7[nrow(alarmall7)-i+1,j]==1 & simulatedzseasoutbreak7[nrow(simulatedzseasoutbreak7)-i+1,j]>0)
}
mu=mu+(nu>0)
}
podseas[2]=mu/nsim
mu=0
for(j in 1:nsim){
nu=0
for(i in (49*days):1){
nu=nu+(alarmall8[nrow(alarmall8)-i+1,j]==1 & simulatedoutbreak8[nrow(simulatedoutbreak8)-i+1,j]>0)
}
mu=mu+(nu>0)
}
pod[8]=mu/nsim
mu=0
for(j in 1:nsim){
nu=0
for(i in (49*days):1){
nu=nu+(alarmall9[nrow(alarmall9)-i+1,j]==1 & simulatedoutbreak9[nrow(simulatedoutbreak9)-i+1,j]>0)
}
mu=mu+(nu>0)
}
pod[9]=mu/nsim
mu=0
for(j in 1:nsim){
nu=0
for(i in (49*days):1){
nu=nu+(alarmall10[nrow(alarmall10)-i+1,j]==1 & simulatedoutbreak10[nrow(simulatedoutbreak10)-i+1,j]>0)
}
mu=mu+(nu>0)
}
pod[10]=mu/nsim
mu=0
for(j in 1:nsim){
nu=0
for(i in (49*days):1){
nu=nu+(alarmall11[nrow(alarmall11)-i+1,j]==1 & simulatedoutbreak11[nrow(simulatedoutbreak11)-i+1,j]>0)
}
mu=mu+(nu>0)
}
pod[11]=mu/nsim
mu=0
for(j in 1:nsim){
nu=0
for(i in (49*days):1){
nu=nu+(alarmall12[nrow(alarmall12)-i+1,j]==1 & simulatedoutbreak12[nrow(simulatedoutbreak12)-i+1,j]>0)
}
mu=mu+(nu>0)
}
pod[12]=mu/nsim
mu=0
for(j in 1:nsim){
nu=0
for(i in (49*days):1){
nu=nu+(alarmall13[nrow(alarmall13)-i+1,j]==1 & simulatedoutbreak13[nrow(simulatedoutbreak13)-i+1,j]>0)
}
mu=mu+(nu>0)
}
pod[13]=mu/nsim
mu=0
for(j in 1:nsim){
nu=0
for(i in (49*days):1){
nu=nu+(alarmall14[nrow(alarmall14)-i+1,j]==1 & simulatedoutbreak14[nrow(simulatedoutbreak14)-i+1,j]>0)
}
mu=mu+(nu>0)
}
pod[14]=mu/nsim
mu=0
for(j in 1:nsim){
nu=0
for(i in (49*days):1){
nu=nu+(alarmall15[nrow(alarmall15)-i+1,j]==1 & simulatedoutbreak15[nrow(simulatedoutbreak15)-i+1,j]>0)
}
mu=mu+(nu>0)
}
pod[15]=mu/nsim
mu=0
for(j in 1:nsim){
nu=0
for(i in (49*days):1){
nu=nu+(alarmall16[nrow(alarmall16)-i+1,j]==1 & simulatedoutbreak16[nrow(simulatedoutbreak16)-i+1,j]>0)
}
mu=mu+(nu>0)
}
pod[16]=mu/nsim
mu=0
for(j in 1:nsim){
nu=0
for(i in (49*days):1){
nu=nu+(alarmall16[nrow(alarmall16)-i+1,j]==1 & simulatedzseasoutbreak16[nrow(simulatedzseasoutbreak16)-i+1,j]>0)
}
mu=mu+(nu>0)
}
podseas[3]=mu/nsim
mu=0
for(j in 1:nsim){
nu=0
for(i in (49*days):1){
nu=nu+(alarmall17[nrow(alarmall17)-i+1,j]==1 & simulatedoutbreak17[nrow(simulatedoutbreak17)-i+1,j]>0)
}
mu=mu+(nu>0)
}
pod[17]=mu/nsim
#--------------------------------------------------------
# Sensitivity
sensitivity=rep(0,17)
sensitivityseas=rep(0,3)
nu=0
for(j in 1:nsim){
for(i in (49*days):1){
nu=nu+(alarmall1[nrow(alarmall1)-i+1,j]==1 & simulatedoutbreak1[nrow(simulatedoutbreak1)-i+1,j]>0)
}
}
sensitivity[1]=nu/sum(simulatedoutbreak1>0)
nu=0
for(j in 1:nsim){
for(i in (49*days):1){
nu=nu+(alarmall2[nrow(alarmall2)-i+1,j]==1 & simulatedoutbreak2[nrow(simulatedoutbreak2)-i+1,j]>0)
}
}
sensitivity[2]=nu/sum(simulatedoutbreak2>0)
nu=0
for(j in 1:nsim){
for(i in (49*days):1){
nu=nu+(alarmall3[nrow(alarmall3)-i+1,j]==1 & simulatedoutbreak3[nrow(simulatedoutbreak3)-i+1,j]>0)
}
}
sensitivity[3]=nu/sum(simulatedoutbreak3>0)
nu=0
for(j in 1:nsim){
for(i in (49*days):1){
nu=nu+(alarmall4[nrow(alarmall4)-i+1,j]==1 & simulatedoutbreak4[nrow(simulatedoutbreak4)-i+1,j]>0)
}
}
sensitivity[4]=nu/sum(simulatedoutbreak4>0)
nu=0
for(j in 1:nsim){
for(i in (49*days):1){
nu=nu+(alarmall5[nrow(alarmall5)-i+1,j]==1 & simulatedoutbreak5[nrow(simulatedoutbreak5)-i+1,j]>0)
}
}
sensitivity[5]=nu/sum(simulatedoutbreak5>0)
nu=0
for(j in 1:nsim){
for(i in (49*days):1){
nu=nu+(alarmall6[nrow(alarmall6)-i+1,j]==1 & simulatedoutbreak6[nrow(simulatedoutbreak6)-i+1,j]>0)
}
}
sensitivity[6]=nu/sum(simulatedoutbreak6>0)
nu=0
for(j in 1:nsim){
for(i in (49*days):1){
nu=nu+(alarmall6[nrow(alarmall6)-i+1,j]==1 & simulatedzseasoutbreak6[nrow(simulatedzseasoutbreak6)-i+1,j]>0)
}
}
sensitivityseas[1]=nu/sum(simulatedzseasoutbreak6>0)
nu=0
for(j in 1:nsim){
for(i in (49*days):1){
nu=nu+(alarmall7[nrow(alarmall7)-i+1,j]==1 & simulatedoutbreak7[nrow(simulatedoutbreak7)-i+1,j]>0)
}
}
sensitivity[7]=nu/sum(simulatedoutbreak7>0)
nu=0
for(j in 1:nsim){
for(i in (49*days):1){
nu=nu+(alarmall7[nrow(alarmall7)-i+1,j]==1 & simulatedzseasoutbreak7[nrow(simulatedzseasoutbreak7)-i+1,j]>0)
}
}
sensitivityseas[2]=nu/sum(simulatedzseasoutbreak7>0)
nu=0
for(j in 1:nsim){
for(i in (49*days):1){
nu=nu+(alarmall8[nrow(alarmall8)-i+1,j]==1 & simulatedoutbreak8[nrow(simulatedoutbreak8)-i+1,j]>0)
}
}
sensitivity[8]=nu/sum(simulatedoutbreak8>0)
nu=0
for(j in 1:nsim){
for(i in (49*days):1){
nu=nu+(alarmall9[nrow(alarmall9)-i+1,j]==1 & simulatedoutbreak9[nrow(simulatedoutbreak9)-i+1,j]>0)
}
}
sensitivity[9]=nu/sum(simulatedoutbreak9>0)
nu=0
for(j in 1:nsim){
for(i in (49*days):1){
nu=nu+(alarmall10[nrow(alarmall10)-i+1,j]==1 & simulatedoutbreak10[nrow(simulatedoutbreak10)-i+1,j]>0)
}
}
sensitivity[10]=nu/sum(simulatedoutbreak10>0)
nu=0
for(j in 1:nsim){
for(i in (49*days):1){
nu=nu+(alarmall11[nrow(alarmall11)-i+1,j]==1 & simulatedoutbreak11[nrow(simulatedoutbreak11)-i+1,j]>0)
}
}
sensitivity[11]=nu/sum(simulatedoutbreak11>0)
nu=0
for(j in 1:nsim){
for(i in (49*days):1){
nu=nu+(alarmall12[nrow(alarmall12)-i+1,j]==1 & simulatedoutbreak12[nrow(simulatedoutbreak12)-i+1,j]>0)
}
}
sensitivity[12]=nu/sum(simulatedoutbreak12>0)
nu=0
for(j in 1:nsim){
for(i in (49*days):1){
nu=nu+(alarmall13[nrow(alarmall13)-i+1,j]==1 & simulatedoutbreak13[nrow(simulatedoutbreak13)-i+1,j]>0)
}
}
sensitivity[13]=nu/sum(simulatedoutbreak13>0)
nu=0
for(j in 1:nsim){
for(i in (49*days):1){
nu=nu+(alarmall14[nrow(alarmall14)-i+1,j]==1 & simulatedoutbreak14[nrow(simulatedoutbreak14)-i+1,j]>0)
}
}
sensitivity[14]=nu/sum(simulatedoutbreak14>0)
nu=0
for(j in 1:nsim){
for(i in (49*days):1){
nu=nu+(alarmall15[nrow(alarmall15)-i+1,j]==1 & simulatedoutbreak15[nrow(simulatedoutbreak15)-i+1,j]>0)
}
}
sensitivity[15]=nu/sum(simulatedoutbreak15>0)
nu=0
for(j in 1:nsim){
for(i in (49*days):1){
nu=nu+(alarmall16[nrow(alarmall16)-i+1,j]==1 & simulatedoutbreak16[nrow(simulatedoutbreak16)-i+1,j]>0)
}
}
sensitivity[16]=nu/sum(simulatedoutbreak16>0)
nu=0
for(j in 1:nsim){
for(i in (49*days):1){
nu=nu+(alarmall16[nrow(alarmall16)-i+1,j]==1 & simulatedzseasoutbreak16[nrow(simulatedzseasoutbreak16)-i+1,j]>0)
}
}
sensitivityseas[3]=nu/sum(simulatedzseasoutbreak16>0)
nu=0
for(j in 1:nsim){
for(i in (49*days):1){
nu=nu+(alarmall17[nrow(alarmall17)-i+1,j]==1 & simulatedoutbreak17[nrow(simulatedoutbreak17)-i+1,j]>0)
}
}
sensitivity[17]=nu/sum(simulatedoutbreak17>0)
#--------------------------------------------------------
# Specificity
specificity=rep(0,17)
specificityseas=rep(0,3)
# Specificity
nu=0
for(j in 1:nsim){
for(i in (49*days):1){
nu=nu+(alarmall1[nrow(alarmall1)-i+1,j]==0 & simulatedoutbreak1[nrow(simulatedoutbreak1)-i+1,j]==0)
}
}
specificity[1]=nu/sum(simulatedoutbreak1[2206:2548,]==0)
nu=0
for(j in 1:nsim){
for(i in (49*days):1){
nu=nu+(alarmall2[nrow(alarmall2)-i+1,j]==0 & simulatedoutbreak2[nrow(simulatedoutbreak2)-i+1,j]==0)
}
}
specificity[2]=nu/sum(simulatedoutbreak2[2206:2548,]==0)
nu=0
for(j in 1:nsim){
for(i in (49*days):1){
nu=nu+(alarmall3[nrow(alarmall3)-i+1,j]==0 & simulatedoutbreak3[nrow(simulatedoutbreak3)-i+1,j]==0)
}
}
specificity[3]=nu/sum(simulatedoutbreak3[2206:2548,]==0)
nu=0
for(j in 1:nsim){
for(i in (49*days):1){
nu=nu+(alarmall4[nrow(alarmall4)-i+1,j]==0 & simulatedoutbreak4[nrow(simulatedoutbreak4)-i+1,j]==0)
}
}
specificity[4]=nu/sum(simulatedoutbreak4[2206:2548,]==0)
nu=0
for(j in 1:nsim){
for(i in (49*days):1){
nu=nu+(alarmall5[nrow(alarmall5)-i+1,j]==0 & simulatedoutbreak5[nrow(simulatedoutbreak5)-i+1,j]==0)
}
}
specificity[5]=nu/sum(simulatedoutbreak5[2206:2548,]==0)
nu=0
for(j in 1:nsim){
for(i in (49*days):1){
nu=nu+(alarmall6[nrow(alarmall6)-i+1,j]==0 & simulatedoutbreak6[nrow(simulatedoutbreak6)-i+1,j]==0)
}
}
specificity[6]=nu/sum(simulatedoutbreak6[2206:2548,]==0)
nu=0
for(j in 1:nsim){
for(i in (49*days):1){
nu=nu+(alarmall6[nrow(alarmall6)-i+1,j]==0 & simulatedzseasoutbreak6[nrow(simulatedzseasoutbreak6)-i+1,j]==0)
}
}
specificityseas[1]=nu/sum(simulatedzseasoutbreak6[2206:2548,]==0)
nu=0
for(j in 1:nsim){
for(i in (49*days):1){
nu=nu+(alarmall7[nrow(alarmall7)-i+1,j]==0 & simulatedoutbreak7[nrow(simulatedoutbreak7)-i+1,j]==0)
}
}
specificity[7]=nu/sum(simulatedoutbreak7[2206:2548,]==0)
nu=0
for(j in 1:nsim){
for(i in (49*days):1){
nu=nu+(alarmall7[nrow(alarmall7)-i+1,j]==0 & simulatedzseasoutbreak7[nrow(simulatedzseasoutbreak7)-i+1,j]==0)
}
}
specificityseas[2]=nu/sum(simulatedzseasoutbreak7[2206:2548,]==0)
nu=0
for(j in 1:nsim){
for(i in (49*days):1){
nu=nu+(alarmall8[nrow(alarmall8)-i+1,j]==0 & simulatedoutbreak8[nrow(simulatedoutbreak8)-i+1,j]==0)
}
}
specificity[8]=nu/sum(simulatedoutbreak8[2206:2548,]==0)
nu=0
for(j in 1:nsim){
for(i in (49*days):1){
nu=nu+(alarmall9[nrow(alarmall9)-i+1,j]==0 & simulatedoutbreak9[nrow(simulatedoutbreak9)-i+1,j]==0)
}
}
specificity[9]=nu/sum(simulatedoutbreak9[2206:2548,]==0)
nu=0
for(j in 1:nsim){
for(i in (49*days):1){
nu=nu+(alarmall10[nrow(alarmall10)-i+1,j]==0 & simulatedoutbreak10[nrow(simulatedoutbreak10)-i+1,j]==0)
}
}
specificity[10]=nu/sum(simulatedoutbreak10[2206:2548,]==0)
nu=0
for(j in 1:nsim){
for(i in (49*days):1){
nu=nu+(alarmall11[nrow(alarmall11)-i+1,j]==0 & simulatedoutbreak11[nrow(simulatedoutbreak11)-i+1,j]==0)
}
}
specificity[11]=nu/sum(simulatedoutbreak11[2206:2548,]==0)
nu=0
for(j in 1:nsim){
for(i in (49*days):1){
nu=nu+(alarmall12[nrow(alarmall12)-i+1,j]==0 & simulatedoutbreak12[nrow(simulatedoutbreak12)-i+1,j]==0)
}
}
specificity[12]=nu/sum(simulatedoutbreak12[2206:2548,]==0)
nu=0
for(j in 1:nsim){
for(i in (49*days):1){
nu=nu+(alarmall13[nrow(alarmall13)-i+1,j]==0 & simulatedoutbreak13[nrow(simulatedoutbreak13)-i+1,j]==0)
}
}
specificity[13]=nu/sum(simulatedoutbreak13[2206:2548,]==0)
nu=0
for(j in 1:nsim){
for(i in (49*days):1){
nu=nu+(alarmall14[nrow(alarmall14)-i+1,j]==0 & simulatedoutbreak14[nrow(simulatedoutbreak14)-i+1,j]==0)
}
}
specificity[14]=nu/sum(simulatedoutbreak14[2206:2548,]==0)
nu=0
for(j in 1:nsim){
for(i in (49*days):1){
nu=nu+(alarmall15[nrow(alarmall15)-i+1,j]==0 & simulatedoutbreak15[nrow(simulatedoutbreak15)-i+1,j]==0)
}
}
specificity[15]=nu/sum(simulatedoutbreak15[2206:2548,]==0)
nu=0
for(j in 1:nsim){
for(i in (49*days):1){
nu=nu+(alarmall16[nrow(alarmall16)-i+1,j]==0 & simulatedoutbreak16[nrow(simulatedoutbreak16)-i+1,j]==0)
}
}
specificity[16]=nu/sum(simulatedoutbreak16[2206:2548,]==0)
nu=0
for(j in 1:nsim){
for(i in (49*days):1){
nu=nu+(alarmall16[nrow(alarmall16)-i+1,j]==0 & simulatedzseasoutbreak16[nrow(simulatedzseasoutbreak16)-i+1,j]==0)
}
}
specificityseas[3]=nu/sum(simulatedzseasoutbreak16[2206:2548,]==0)
nu=0
for(j in 1:nsim){
for(i in (49*days):1){
nu=nu+(alarmall17[nrow(alarmall17)-i+1,j]==0 & simulatedoutbreak17[nrow(simulatedoutbreak17)-i+1,j]==0)
}
}
specificity[17]=nu/sum(simulatedoutbreak17[2206:2548,]==0)
#----------------------------------------------
# Timeliness
timeliness=rep(0,17)
timelinessseas=rep(0,3)
n=0
ss=0
for(j in 1:nsim){
for(i in (52*days*years):(52*days*(years-1)+3*days+1)){
test=(simulatedoutbreak1[i,j]>0)
if(test==T){
r2=i
break
}
}
for(i in (52*(years-1)*days+3*days+1):(52*years*days)){
test=(simulatedoutbreak1[i,j]>0)
if(test==T){
r1=i
break
}
}
for(i in (49*days):1){
test=(alarmall1[nrow(alarmall1)-i+1,j]==1 & simulatedoutbreak1[nrow(simulatedoutbreak1)-i+1,j]>0)
if(test==T){
ss=ss+(nrow(simulatedoutbreak1)-i+1-r1)/(r2-r1+1)
break
}
}
if(i==1 & test!=T){n=n+1}
}
timeliness[1]=(ss+n)/nsim
n=0
ss=0
for(j in 1:nsim){
for(i in (52*days*years):(52*days*(years-1)+3*days+1)){
test=(simulatedoutbreak2[i,j]>0)
if(test==T){
r2=i
break
}
}
for(i in (52*(years-1)*days+3*days+1):(52*years*days)){
test=(simulatedoutbreak2[i,j]>0)
if(test==T){
r1=i
break
}
}
for(i in (49*days):1){
test=(alarmall2[nrow(alarmall2)-i+1,j]==1 & simulatedoutbreak2[nrow(simulatedoutbreak2)-i+1,j]>0)
if(test==T){
ss=ss+(nrow(simulatedoutbreak2)-i+1-r1)/(r2-r1+1)
break
}
}
if(i==1 & test!=T){n=n+1}
}
timeliness[2]=(ss+n)/nsim
n=0
ss=0
for(j in 1:nsim){
for(i in (52*days*years):(52*days*(years-1)+3*days+1)){
test=(simulatedoutbreak3[i,j]>0)
if(test==T){
r2=i
break
}
}
for(i in (52*(years-1)*days+3*days+1):(52*years*days)){
test=(simulatedoutbreak3[i,j]>0)
if(test==T){
r1=i
break
}
}
for(i in (49*days):1){
test=(alarmall3[nrow(alarmall3)-i+1,j]==1 & simulatedoutbreak3[nrow(simulatedoutbreak3)-i+1,j]>0)
if(test==T){
ss=ss+(nrow(simulatedoutbreak3)-i+1-r1)/(r2-r1+1)
break
}
}
if(i==1 & test!=T){n=n+1}
}
timeliness[3]=(ss+n)/nsim
n=0
ss=0
for(j in 1:nsim){
for(i in (52*days*years):(52*days*(years-1)+3*days+1)){
test=(simulatedoutbreak4[i,j]>0)
if(test==T){
r2=i
break
}
}
for(i in (52*(years-1)*days+3*days+1):(52*years*days)){
test=(simulatedoutbreak4[i,j]>0)
if(test==T){
r1=i
break
}
}
for(i in (49*days):1){
test=(alarmall4[nrow(alarmall4)-i+1,j]==1 & simulatedoutbreak4[nrow(simulatedoutbreak4)-i+1,j]>0)
if(test==T){
ss=ss+(nrow(simulatedoutbreak4)-i+1-r1)/(r2-r1+1)
break
}
}
if(i==1 & test!=T){n=n+1}
}
timeliness[4]=(ss+n)/nsim
n=0
ss=0
for(j in 1:nsim){
for(i in (52*days*years):(52*days*(years-1)+3*days+1)){
test=(simulatedoutbreak5[i,j]>0)
if(test==T){
r2=i
break
}
}
for(i in (52*(years-1)*days+3*days+1):(52*years*days)){
test=(simulatedoutbreak5[i,j]>0)
if(test==T){
r1=i
break
}
}
for(i in (49*days):1){
test=(alarmall5[nrow(alarmall5)-i+1,j]==1 & simulatedoutbreak5[nrow(simulatedoutbreak5)-i+1,j]>0)
if(test==T){
ss=ss+(nrow(simulatedoutbreak5)-i+1-r1)/(r2-r1+1)
break
}
}
if(i==1 & test!=T){n=n+1}
}
timeliness[5]=(ss+n)/nsim
n=0
ss=0
for(j in 1:nsim){
for(i in (52*days*years):(52*days*(years-1)+3*days+1)){
test=(simulatedoutbreak6[i,j]>0)
if(test==T){
r2=i
break
}
}
for(i in (52*(years-1)*days+3*days+1):(52*years*days)){
test=(simulatedoutbreak6[i,j]>0)
if(test==T){
r1=i
break
}
}
for(i in (49*days):1){
test=(alarmall6[nrow(alarmall6)-i+1,j]==1 & simulatedoutbreak6[nrow(simulatedoutbreak6)-i+1,j]>0)
if(test==T){
ss=ss+(nrow(simulatedoutbreak6)-i+1-r1)/(r2-r1+1)
break
}
}
if(i==1 & test!=T){n=n+1}
}
timeliness[6]=(ss+n)/nsim
n=0
ss=0
for(j in 1:nsim){
for(i in (52*days*years):(52*days*(years-1)+3*days+1)){
test=(simulatedzseasoutbreak6[i,j]>0)
if(test==T){
r2=i
break
}
}
for(i in (52*(years-1)*days+3*days+1):(52*years*days)){
test=(simulatedzseasoutbreak6[i,j]>0)
if(test==T){
r1=i
break
}
}
for(i in (49*days):1){
test=(alarmall6[nrow(alarmall6)-i+1,j]==1 & simulatedzseasoutbreak6[nrow(simulatedzseasoutbreak6)-i+1,j]>0)
if(test==T){
ss=ss+(nrow(simulatedzseasoutbreak6)-i+1-r1)/(r2-r1+1)
break
}
}
if(i==1 & test!=T){n=n+1}
}
timelinessseas[1]=(ss+n)/nsim
n=0
ss=0
for(j in 1:nsim){
for(i in (52*days*years):(52*days*(years-1)+3*days+1)){
test=(simulatedoutbreak7[i,j]>0)
if(test==T){
r2=i
break
}
}
for(i in (52*(years-1)*days+3*days+1):(52*years*days)){
test=(simulatedoutbreak7[i,j]>0)
if(test==T){
r1=i
break
}
}
for(i in (49*days):1){
test=(alarmall7[nrow(alarmall7)-i+1,j]==1 & simulatedoutbreak7[nrow(simulatedoutbreak7)-i+1,j]>0)
if(test==T){
ss=ss+(nrow(simulatedoutbreak7)-i+1-r1)/(r2-r1+1)
break
}
}
if(i==1 & test!=T){n=n+1}
}
timeliness[7]=(ss+n)/nsim
n=0
ss=0
for(j in 1:nsim){
for(i in (52*days*years):(52*days*(years-1)+3*days+1)){
test=(simulatedzseasoutbreak7[i,j]>0)
if(test==T){
r2=i
break
}
}
for(i in (52*(years-1)*days+3*days+1):(52*years*days)){
test=(simulatedzseasoutbreak7[i,j]>0)
if(test==T){
r1=i
break
}
}
for(i in (49*days):1){
test=(alarmall7[nrow(alarmall7)-i+1,j]==1 & simulatedzseasoutbreak7[nrow(simulatedzseasoutbreak7)-i+1,j]>0)
if(test==T){
ss=ss+(nrow(simulatedzseasoutbreak7)-i+1-r1)/(r2-r1+1)
break
}
}
if(i==1 & test!=T){n=n+1}
}
timelinessseas[2]=(ss+n)/nsim
n=0
ss=0
for(j in 1:nsim){
for(i in (52*days*years):(52*days*(years-1)+3*days+1)){
test=(simulatedoutbreak8[i,j]>0)
if(test==T){
r2=i
break
}
}
for(i in (52*(years-1)*days+3*days+1):(52*years*days)){
test=(simulatedoutbreak8[i,j]>0)
if(test==T){
r1=i
break
}
}
for(i in (49*days):1){
test=(alarmall8[nrow(alarmall8)-i+1,j]==1 & simulatedoutbreak8[nrow(simulatedoutbreak8)-i+1,j]>0)
if(test==T){
ss=ss+(nrow(simulatedoutbreak8)-i+1-r1)/(r2-r1+1)
break
}
}
if(i==1 & test!=T){n=n+1}
}
timeliness[8]=(ss+n)/nsim
n=0
ss=0
for(j in 1:nsim){
for(i in (52*days*years):(52*days*(years-1)+3*days+1)){
test=(simulatedoutbreak9[i,j]>0)
if(test==T){
r2=i
break
}
}
for(i in (52*(years-1)*days+3*days+1):(52*years*days)){
test=(simulatedoutbreak9[i,j]>0)
if(test==T){
r1=i
break
}
}
for(i in (49*days):1){
test=(alarmall9[nrow(alarmall9)-i+1,j]==1 & simulatedoutbreak9[nrow(simulatedoutbreak9)-i+1,j]>0)
if(test==T){
ss=ss+(nrow(simulatedoutbreak9)-i+1-r1)/(r2-r1+1)
break
}
}
if(i==1 & test!=T){n=n+1}
}
timeliness[9]=(ss+n)/nsim
n=0
ss=0
for(j in 1:nsim){
for(i in (52*days*years):(52*days*(years-1)+3*days+1)){
test=(simulatedoutbreak10[i,j]>0)
if(test==T){
r2=i
break
}
}
for(i in (52*(years-1)*days+3*days+1):(52*years*days)){
test=(simulatedoutbreak10[i,j]>0)
if(test==T){
r1=i
break
}
}
for(i in (49*days):1){
test=(alarmall10[nrow(alarmall10)-i+1,j]==1 & simulatedoutbreak10[nrow(simulatedoutbreak10)-i+1,j]>0)
if(test==T){
ss=ss+(nrow(simulatedoutbreak10)-i+1-r1)/(r2-r1+1)
break
}
}
if(i==1 & test!=T){n=n+1}
}
timeliness[10]=(ss+n)/nsim
n=0
ss=0
for(j in 1:nsim){
for(i in (52*days*years):(52*days*(years-1)+3*days+1)){
test=(simulatedoutbreak11[i,j]>0)
if(test==T){
r2=i
break
}
}
for(i in (52*(years-1)*days+3*days+1):(52*years*days)){
test=(simulatedoutbreak11[i,j]>0)
if(test==T){
r1=i
break
}
}
for(i in (49*days):1){
test=(alarmall11[nrow(alarmall11)-i+1,j]==1 & simulatedoutbreak11[nrow(simulatedoutbreak11)-i+1,j]>0)
if(test==T){
ss=ss+(nrow(simulatedoutbreak11)-i+1-r1)/(r2-r1+1)
break
}
}
if(i==1 & test!=T){n=n+1}
}
timeliness[11]=(ss+n)/nsim
n=0
ss=0
for(j in 1:nsim){
for(i in (52*days*years):(52*days*(years-1)+3*days+1)){
test=(simulatedoutbreak12[i,j]>0)
if(test==T){
r2=i
break
}
}
for(i in (52*(years-1)*days+3*days+1):(52*years*days)){
test=(simulatedoutbreak12[i,j]>0)
if(test==T){
r1=i
break
}
}
for(i in (49*days):1){
test=(alarmall12[nrow(alarmall12)-i+1,j]==1 & simulatedoutbreak12[nrow(simulatedoutbreak12)-i+1,j]>0)
if(test==T){
ss=ss+(nrow(simulatedoutbreak12)-i+1-r1)/(r2-r1+1)
break
}
}
if(i==1 & test!=T){n=n+1}
}
timeliness[12]=(ss+n)/nsim
n=0
ss=0
for(j in 1:nsim){
for(i in (52*days*years):(52*days*(years-1)+3*days+1)){
test=(simulatedoutbreak13[i,j]>0)
if(test==T){
r2=i
break
}
}
for(i in (52*(years-1)*days+3*days+1):(52*years*days)){
test=(simulatedoutbreak13[i,j]>0)
if(test==T){
r1=i
break
}
}
for(i in (49*days):1){
test=(alarmall13[nrow(alarmall13)-i+1,j]==1 & simulatedoutbreak13[nrow(simulatedoutbreak1)-i+1,j]>0)
if(test==T){
ss=ss+(nrow(simulatedoutbreak13)-i+1-r1)/(r2-r1+1)
break
}
}
if(i==1 & test!=T){n=n+1}
}
timeliness[13]=(ss+n)/nsim
n=0
ss=0
for(j in 1:nsim){
for(i in (52*days*years):(52*days*(years-1)+3*days+1)){
test=(simulatedoutbreak14[i,j]>0)
if(test==T){
r2=i
break
}
}
for(i in (52*(years-1)*days+3*days+1):(52*years*days)){
test=(simulatedoutbreak14[i,j]>0)
if(test==T){
r1=i
break
}
}
for(i in (49*days):1){
test=(alarmall14[nrow(alarmall14)-i+1,j]==1 & simulatedoutbreak14[nrow(simulatedoutbreak14)-i+1,j]>0)
if(test==T){
ss=ss+(nrow(simulatedoutbreak14)-i+1-r1)/(r2-r1+1)
break
}
}
if(i==1 & test!=T){n=n+1}
}
timeliness[14]=(ss+n)/nsim
n=0
ss=0
for(j in 1:nsim){
for(i in (52*days*years):(52*days*(years-1)+3*days+1)){
test=(simulatedoutbreak15[i,j]>0)
if(test==T){
r2=i
break
}
}
for(i in (52*(years-1)*days+3*days+1):(52*years*days)){
test=(simulatedoutbreak15[i,j]>0)
if(test==T){
r1=i
break
}
}
for(i in (49*days):1){
test=(alarmall15[nrow(alarmall15)-i+1,j]==1 & simulatedoutbreak15[nrow(simulatedoutbreak15)-i+1,j]>0)
if(test==T){
ss=ss+(nrow(simulatedoutbreak15)-i+1-r1)/(r2-r1+1)
break
}
}
if(i==1 & test!=T){n=n+1}
}
timeliness[15]=(ss+n)/nsim
n=0
ss=0
for(j in 1:nsim){
for(i in (52*days*years):(52*days*(years-1)+3*days+1)){
test=(simulatedoutbreak16[i,j]>0)
if(test==T){
r2=i
break
}
}
for(i in (52*(years-1)*days+3*days+1):(52*years*days)){
test=(simulatedoutbreak16[i,j]>0)
if(test==T){
r1=i
break
}
}
for(i in (49*days):1){
test=(alarmall16[nrow(alarmall16)-i+1,j]==1 & simulatedoutbreak16[nrow(simulatedoutbreak16)-i+1,j]>0)
if(test==T){
ss=ss+(nrow(simulatedoutbreak16)-i+1-r1)/(r2-r1+1)
break
}
}
if(i==1 & test!=T){n=n+1}
}
timeliness[16]=(ss+n)/nsim
n=0
ss=0
for(j in 1:nsim){
for(i in (52*days*years):(52*days*(years-1)+3*days+1)){
test=(simulatedzseasoutbreak16[i,j]>0)
if(test==T){
r2=i
break
}
}
for(i in (52*(years-1)*days+3*days+1):(52*years*days)){
test=(simulatedzseasoutbreak16[i,j]>0)
if(test==T){
r1=i
break
}
}
for(i in (49*days):1){
test=(alarmall16[nrow(alarmall16)-i+1,j]==1 & simulatedzseasoutbreak16[nrow(simulatedzseasoutbreak16)-i+1,j]>0)
if(test==T){
ss=ss+(nrow(simulatedzseasoutbreak16)-i+1-r1)/(r2-r1+1)
break
}
}
if(i==1 & test!=T){n=n+1}
}
timelinessseas[3]=(ss+n)/nsim
n=0
ss=0
for(j in 1:nsim){
for(i in (52*days*years):(52*days*(years-1)+3*days+1)){
test=(simulatedoutbreak17[i,j]>0)
if(test==T){
r2=i
break
}
}
for(i in (52*(years-1)*days+3*days+1):(52*years*days)){
test=(simulatedoutbreak17[i,j]>0)
if(test==T){
r1=i
break
}
}
for(i in (49*days):1){
test=(alarmall17[nrow(alarmall17)-i+1,j]==1 & simulatedoutbreak17[nrow(simulatedoutbreak17)-i+1,j]>0)
if(test==T){
ss=ss+(nrow(simulatedoutbreak17)-i+1-r1)/(r2-r1+1)
break
}
}
if(i==1 & test!=T){n=n+1}
}
timeliness[17]=(ss+n)/nsim
#==================================
# Summary=data.frame(fpr,pod,sensitivity,specificity,timeliness)
# row.names(Summary)=c("sigid1","sigid2","sigid3","sigid4","sigid5","sigid6","sigid7","sigid8","sigid9","sigid10","sigid11","sigid12","sigid13","sigid14","sigid15","sigid16","sigid17")
#
# Summaryseas=data.frame(fprseas,podseas,sensitivityseas,specificityseas,timelinessseas)
# row.names(Summaryseas)=c("sigid6","sigid7","sigid16")
#
#
# fix(Summary)
# fix(Summaryseas)
#
summary1=data.frame(fpr, pod, sensitivity, specificity, timeliness)
row.names(summary1)=c("sigid1", "sigid2", "sigid3", "sigid4", "sigid5",
"sigid6", "sigid7", "sigid8", "sigid9", "sigid10",
"sigid11", "sigid12", "sigid13", "sigid14", "sigid15",
"sigid16","sigid17")
summary2=data.frame(fprseas, podseas, sensitivityseas,
specificityseas, timelinessseas)
row.names(summary2)=c("sigid6", "sigid7", "sigid16")
if(!dir.exists(file.path(myDir, "output"))){
dir.create(file.path(myDir, "output"))
}
write.csv(summary1, file.path(myDir, "output", "summaryC3-18.csv"),
row.names=FALSE)
write.csv(summary2, file.path(myDir, "output", "summarySeasC3-18.csv"),
row.names=FALSE)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/isIntegerOrNaOrNanVectorOrNull.R
\name{isIntegerOrNaOrNanVectorOrNull}
\alias{isIntegerOrNaOrNanVectorOrNull}
\title{Wrapper for the checkarg function, using specific parameter settings.}
\usage{
isIntegerOrNaOrNanVectorOrNull(argument, default = NULL, stopIfNot = FALSE,
n = NA, message = NULL, argumentName = NULL)
}
\arguments{
\item{argument}{See checkarg function.}
\item{default}{See checkarg function.}
\item{stopIfNot}{See checkarg function.}
\item{n}{See checkarg function.}
\item{message}{See checkarg function.}
\item{argumentName}{See checkarg function.}
}
\value{
See checkarg function.
}
\description{
This function can be used in 3 ways:\enumerate{
\item Return TRUE or FALSE depending on whether the argument checks are
passed. This is suitable e.g. for if statements that take further action
if the argument does not pass the checks.\cr
\item Throw an exception if the argument does not pass the checks. This is
suitable e.g. when no further action needs to be taken other than
throwing an exception if the argument does not pass the checks.\cr
\item Same as (2) but by supplying a default value, a default can be assigned
in a single statement, when the argument is NULL. The checks are still
performed on the returned value, and an exception is thrown when not
passed.\cr
}
}
\details{
Actual call to checkarg: checkarg(argument, "N", default = default, stopIfNot = stopIfNot, nullAllowed = TRUE, n = NA, zeroAllowed = TRUE, negativeAllowed = TRUE, positiveAllowed = TRUE, nonIntegerAllowed = FALSE, naAllowed = TRUE, nanAllowed = TRUE, infAllowed = FALSE, message = message, argumentName = argumentName)
}
\examples{
isIntegerOrNaOrNanVectorOrNull(2)
# returns TRUE (argument is valid)
isIntegerOrNaOrNanVectorOrNull("X")
# returns FALSE (argument is invalid)
#isIntegerOrNaOrNanVectorOrNull("X", stopIfNot = TRUE)
# throws exception with message defined by message and argumentName parameters
isIntegerOrNaOrNanVectorOrNull(2, default = 1)
# returns 2 (the argument, rather than the default, since it is not NULL)
#isIntegerOrNaOrNanVectorOrNull("X", default = 1)
# throws exception with message defined by message and argumentName parameters
isIntegerOrNaOrNanVectorOrNull(NULL, default = 1)
# returns 1 (the default, rather than the argument, since it is NULL)
}
|
/man/isIntegerOrNaOrNanVectorOrNull.Rd
|
no_license
|
cran/checkarg
|
R
| false | true | 2,488 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/isIntegerOrNaOrNanVectorOrNull.R
\name{isIntegerOrNaOrNanVectorOrNull}
\alias{isIntegerOrNaOrNanVectorOrNull}
\title{Wrapper for the checkarg function, using specific parameter settings.}
\usage{
isIntegerOrNaOrNanVectorOrNull(argument, default = NULL, stopIfNot = FALSE,
n = NA, message = NULL, argumentName = NULL)
}
\arguments{
\item{argument}{See checkarg function.}
\item{default}{See checkarg function.}
\item{stopIfNot}{See checkarg function.}
\item{n}{See checkarg function.}
\item{message}{See checkarg function.}
\item{argumentName}{See checkarg function.}
}
\value{
See checkarg function.
}
\description{
This function can be used in 3 ways:\enumerate{
\item Return TRUE or FALSE depending on whether the argument checks are
passed. This is suitable e.g. for if statements that take further action
if the argument does not pass the checks.\cr
\item Throw an exception if the argument does not pass the checks. This is
suitable e.g. when no further action needs to be taken other than
throwing an exception if the argument does not pass the checks.\cr
\item Same as (2) but by supplying a default value, a default can be assigned
in a single statement, when the argument is NULL. The checks are still
performed on the returned value, and an exception is thrown when not
passed.\cr
}
}
\details{
Actual call to checkarg: checkarg(argument, "N", default = default, stopIfNot = stopIfNot, nullAllowed = TRUE, n = NA, zeroAllowed = TRUE, negativeAllowed = TRUE, positiveAllowed = TRUE, nonIntegerAllowed = FALSE, naAllowed = TRUE, nanAllowed = TRUE, infAllowed = FALSE, message = message, argumentName = argumentName)
}
\examples{
isIntegerOrNaOrNanVectorOrNull(2)
# returns TRUE (argument is valid)
isIntegerOrNaOrNanVectorOrNull("X")
# returns FALSE (argument is invalid)
#isIntegerOrNaOrNanVectorOrNull("X", stopIfNot = TRUE)
# throws exception with message defined by message and argumentName parameters
isIntegerOrNaOrNanVectorOrNull(2, default = 1)
# returns 2 (the argument, rather than the default, since it is not NULL)
#isIntegerOrNaOrNanVectorOrNull("X", default = 1)
# throws exception with message defined by message and argumentName parameters
isIntegerOrNaOrNanVectorOrNull(NULL, default = 1)
# returns 1 (the default, rather than the argument, since it is NULL)
}
|
#' Get Kappa problem type function
#'
#' This function apply Test to identify where kappa solutions are placed
#' K0 = Full agreement (diagonal matrix)
#' K1 = Any other case
#' @param Mx Matrix. Matrix reduced.
#' @keywords Mx
#' @export
#' @examples
#' GetKappaProblemType(matrix(c(1,2,0,3,4,0,0,0,1),3,3))
#' GetKappaProblemType(matrix(c(1,0,0,0,2,0,0,0,3),3,3))
GetKappaProblemType <- function(Mx){
#Mx matrix without insignificant rows and columns
Xr = margin.table(Mx,1)
Xc = margin.table(Mx,2)
Xt = sum(Xr)
diag.Mx = diag(Mx)
sum.diag = sum(diag.Mx)
if (sum.diag == Xt) {
ktp = "K0"
return(ktp)
}
else if (sum.diag < Xt) {
ktp = "K1"
return(ktp)
}
}
#GetKappaProblemType(matrix(c(2,0,3,5),2,2))
|
/R/GetKappaProblemType.r
|
no_license
|
cran/Delta
|
R
| false | false | 786 |
r
|
#' Get Kappa problem type function
#'
#' This function apply Test to identify where kappa solutions are placed
#' K0 = Full agreement (diagonal matrix)
#' K1 = Any other case
#' @param Mx Matrix. Matrix reduced.
#' @keywords Mx
#' @export
#' @examples
#' GetKappaProblemType(matrix(c(1,2,0,3,4,0,0,0,1),3,3))
#' GetKappaProblemType(matrix(c(1,0,0,0,2,0,0,0,3),3,3))
GetKappaProblemType <- function(Mx){
#Mx matrix without insignificant rows and columns
Xr = margin.table(Mx,1)
Xc = margin.table(Mx,2)
Xt = sum(Xr)
diag.Mx = diag(Mx)
sum.diag = sum(diag.Mx)
if (sum.diag == Xt) {
ktp = "K0"
return(ktp)
}
else if (sum.diag < Xt) {
ktp = "K1"
return(ktp)
}
}
#GetKappaProblemType(matrix(c(2,0,3,5),2,2))
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/chromR_example.R
\docType{data}
\name{chromR_example}
\alias{chromR_example}
\alias{chrom}
\title{Example chromR object.}
\format{A chromR object}
\description{
An example chromR object containing parts of the *Phytophthora infestans* genome.
}
\details{
This data is a subset of the pinfsc50 dataset.
It has been subset to positions between 500 and 600 kbp.
The coordinate systems of the vcf and gff file have been altered by subtracting 500,000.
This results in a 100 kbp section of supercontig_1.50 that has positional data ranging from 1 to 100 kbp.
}
\examples{
data(chromR_example)
}
\keyword{datasets}
|
/man/chromR_example.Rd
|
no_license
|
aichangji/vcfR
|
R
| false | true | 691 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/chromR_example.R
\docType{data}
\name{chromR_example}
\alias{chromR_example}
\alias{chrom}
\title{Example chromR object.}
\format{A chromR object}
\description{
An example chromR object containing parts of the *Phytophthora infestans* genome.
}
\details{
This data is a subset of the pinfsc50 dataset.
It has been subset to positions between 500 and 600 kbp.
The coordinate systems of the vcf and gff file have been altered by subtracting 500,000.
This results in a 100 kbp section of supercontig_1.50 that has positional data ranging from 1 to 100 kbp.
}
\examples{
data(chromR_example)
}
\keyword{datasets}
|
#install.packages("MatrixEQTL")
# source("Matrix_eQTL_R/Matrix_eQTL_engine.r");
library(MatrixEQTL)
## Location of the package with the data files.
base.dir = find.package('MatrixEQTL');
# base.dir = '.';
## Settings
# Linear model to use, modelANOVA, modelLINEAR, or modelLINEAR_CROSS
useModel = modelLINEAR; # modelANOVA, modelLINEAR, or modelLINEAR_CROSS
# Genotype file name
SNP_file_name = "../Eurobats_chr17p13.2_genotypes_for_colocalizations.dosage";
snps_location_file_name = "../Eurobats_chr17p13.2_locations_for_colocalizations.txt";
# Gene expression file name
expression_file_name = "../../Adipose\ expression\ data/FINAL_logTPMs_and_activities/Filtered_Eurobats_adipose_unnormalized_activities_from_logTPM_for_4213_regulators.txt";
gene_location_file_name = "../../Adipose\ expression\ data/FINAL_logTPMs_and_activities/Hg19_gene_map_for_13776_expressed_genes_in_Eurobats_adipose.map";
# Covariates file name
# Set to character() for no covariates
covariates_file_name = "../Filtered_Eurobats_adipose_covars_no_PEER.txt";
# Output file name
output_file_name_cis = tempfile();
output_file_name_tra = tempfile();
# Only associations significant at this level will be saved
pvOutputThreshold_cis = 1;
pvOutputThreshold_tra = 1;
# Error covariance matrix
# Set to numeric() for identity.
errorCovariance = numeric();
# errorCovariance = read.table("Sample_Data/errorCovariance.txt");
# Distance for local gene-SNP pairs
cisDist = 1e6;
## Load genotype data
snps = SlicedData$new();
snps$fileDelimiter = "\t"; # the TAB character
snps$fileOmitCharacters = "NA"; # denote missing values;
snps$fileSkipRows = 1; # one row of column labels
snps$fileSkipColumns = 1; # one column of row labels
snps$fileSliceSize = 2000; # read file in slices of 2,000 rows
snps$LoadFile(SNP_file_name);
## Load gene expression data
gene = SlicedData$new();
gene$fileDelimiter = "\t"; # the TAB character
gene$fileOmitCharacters = "NA"; # denote missing values;
gene$fileSkipRows = 1; # one row of column labels
gene$fileSkipColumns = 1; # one column of row labels
gene$fileSliceSize = 2000; # read file in slices of 2,000 rows
gene$LoadFile(expression_file_name);
## Normal quantile transformation of gene expression data
for( sl in 1:length(gene) ) {
mat = gene[[sl]];
mat = t(apply(mat, 1, rank, ties.method = "average"));
mat = qnorm(mat / (ncol(gene)+1));
gene[[sl]] = mat;
}
rm(sl, mat);
## Load covariates
cvrt = SlicedData$new();
cvrt$fileDelimiter = "\t"; # the TAB character
cvrt$fileOmitCharacters = "NA"; # denote missing values;
cvrt$fileSkipRows = 1; # one row of column labels
cvrt$fileSkipColumns = 1; # one column of row labels
if(length(covariates_file_name)>0) {
cvrt$LoadFile(covariates_file_name);
}
## Run the analysis
snpspos = read.table(snps_location_file_name, header = TRUE, stringsAsFactors = FALSE);
genepos = read.table(gene_location_file_name, header = TRUE, stringsAsFactors = FALSE);
me = Matrix_eQTL_main(
snps = snps,
gene = gene,
cvrt = cvrt,
output_file_name = output_file_name_tra,
pvOutputThreshold = pvOutputThreshold_tra,
useModel = useModel,
errorCovariance = errorCovariance,
verbose = TRUE,
output_file_name.cis = output_file_name_cis,
pvOutputThreshold.cis = pvOutputThreshold_cis,
snpspos = snpspos,
genepos = genepos,
cisDist = cisDist,
pvalue.hist = "qqplot",
min.pv.by.genesnp = FALSE,
noFDRsaveMemory = FALSE);
unlink(output_file_name_tra);
unlink(output_file_name_cis);
## Results:
cat('Analysis done in: ', me$time.in.sec, ' seconds', '\n');
cat('Detected local eQTLs:', '\n');
cis_eqtls<-me$cis$eqtls
cat('Detected distant eQTLs:', '\n');
trans_eqtls<-me$trans$eqtls
## Plot the Q-Q plot of local and distant p-values
jpeg("Eurobats_adipose_chr17p13.2_aQTLs_from_unnormalized_activities.jpg")
plot(me)
dev.off()
write.table(cis_eqtls,"Eurobats_adipose_chr17p13.2_cis-aQTLs_from_unnormalized_activities.txt",sep="\t",quote = FALSE,row.names=FALSE)
write.table(trans_eqtls,"Eurobats_adipose_chr17p13.2_trans-aQTLs_from_unnormalized_activities.txt",sep="\t",quote = FALSE,row.names=FALSE)
q(save="no")
|
/aQTL_analyses/R_script_for_Eurobats_adipose_chr17p13.2_matrix_aQTL.R
|
no_license
|
hoskinsjw/aQTL2021
|
R
| false | false | 4,187 |
r
|
#install.packages("MatrixEQTL")
# source("Matrix_eQTL_R/Matrix_eQTL_engine.r");
library(MatrixEQTL)
## Location of the package with the data files.
base.dir = find.package('MatrixEQTL');
# base.dir = '.';
## Settings
# Linear model to use, modelANOVA, modelLINEAR, or modelLINEAR_CROSS
useModel = modelLINEAR; # modelANOVA, modelLINEAR, or modelLINEAR_CROSS
# Genotype file name
SNP_file_name = "../Eurobats_chr17p13.2_genotypes_for_colocalizations.dosage";
snps_location_file_name = "../Eurobats_chr17p13.2_locations_for_colocalizations.txt";
# Gene expression file name
expression_file_name = "../../Adipose\ expression\ data/FINAL_logTPMs_and_activities/Filtered_Eurobats_adipose_unnormalized_activities_from_logTPM_for_4213_regulators.txt";
gene_location_file_name = "../../Adipose\ expression\ data/FINAL_logTPMs_and_activities/Hg19_gene_map_for_13776_expressed_genes_in_Eurobats_adipose.map";
# Covariates file name
# Set to character() for no covariates
covariates_file_name = "../Filtered_Eurobats_adipose_covars_no_PEER.txt";
# Output file name
output_file_name_cis = tempfile();
output_file_name_tra = tempfile();
# Only associations significant at this level will be saved
pvOutputThreshold_cis = 1;
pvOutputThreshold_tra = 1;
# Error covariance matrix
# Set to numeric() for identity.
errorCovariance = numeric();
# errorCovariance = read.table("Sample_Data/errorCovariance.txt");
# Distance for local gene-SNP pairs
cisDist = 1e6;
## Load genotype data
snps = SlicedData$new();
snps$fileDelimiter = "\t"; # the TAB character
snps$fileOmitCharacters = "NA"; # denote missing values;
snps$fileSkipRows = 1; # one row of column labels
snps$fileSkipColumns = 1; # one column of row labels
snps$fileSliceSize = 2000; # read file in slices of 2,000 rows
snps$LoadFile(SNP_file_name);
## Load gene expression data
gene = SlicedData$new();
gene$fileDelimiter = "\t"; # the TAB character
gene$fileOmitCharacters = "NA"; # denote missing values;
gene$fileSkipRows = 1; # one row of column labels
gene$fileSkipColumns = 1; # one column of row labels
gene$fileSliceSize = 2000; # read file in slices of 2,000 rows
gene$LoadFile(expression_file_name);
## Normal quantile transformation of gene expression data
for( sl in 1:length(gene) ) {
mat = gene[[sl]];
mat = t(apply(mat, 1, rank, ties.method = "average"));
mat = qnorm(mat / (ncol(gene)+1));
gene[[sl]] = mat;
}
rm(sl, mat);
## Load covariates
cvrt = SlicedData$new();
cvrt$fileDelimiter = "\t"; # the TAB character
cvrt$fileOmitCharacters = "NA"; # denote missing values;
cvrt$fileSkipRows = 1; # one row of column labels
cvrt$fileSkipColumns = 1; # one column of row labels
if(length(covariates_file_name)>0) {
cvrt$LoadFile(covariates_file_name);
}
## Run the analysis
snpspos = read.table(snps_location_file_name, header = TRUE, stringsAsFactors = FALSE);
genepos = read.table(gene_location_file_name, header = TRUE, stringsAsFactors = FALSE);
me = Matrix_eQTL_main(
snps = snps,
gene = gene,
cvrt = cvrt,
output_file_name = output_file_name_tra,
pvOutputThreshold = pvOutputThreshold_tra,
useModel = useModel,
errorCovariance = errorCovariance,
verbose = TRUE,
output_file_name.cis = output_file_name_cis,
pvOutputThreshold.cis = pvOutputThreshold_cis,
snpspos = snpspos,
genepos = genepos,
cisDist = cisDist,
pvalue.hist = "qqplot",
min.pv.by.genesnp = FALSE,
noFDRsaveMemory = FALSE);
unlink(output_file_name_tra);
unlink(output_file_name_cis);
## Results:
cat('Analysis done in: ', me$time.in.sec, ' seconds', '\n');
cat('Detected local eQTLs:', '\n');
cis_eqtls<-me$cis$eqtls
cat('Detected distant eQTLs:', '\n');
trans_eqtls<-me$trans$eqtls
## Plot the Q-Q plot of local and distant p-values
jpeg("Eurobats_adipose_chr17p13.2_aQTLs_from_unnormalized_activities.jpg")
plot(me)
dev.off()
write.table(cis_eqtls,"Eurobats_adipose_chr17p13.2_cis-aQTLs_from_unnormalized_activities.txt",sep="\t",quote = FALSE,row.names=FALSE)
write.table(trans_eqtls,"Eurobats_adipose_chr17p13.2_trans-aQTLs_from_unnormalized_activities.txt",sep="\t",quote = FALSE,row.names=FALSE)
q(save="no")
|
#' \code{follower} returns a matrix of the following vehicle
#'
#' @return A matrix of speed, location data by time.
#' @param veh, a number
#' @param df1df2, a matrix
#' @usage follower(veh, df1df2)
#' @export
follower <- function(veh, df1df2) {
ucol <- 3*(veh-2) + 1
xcol <- 3*(veh-2) + 2
ycol <- 3*(veh-2) + 3
u <- df1df2[,ucol]
x <- df1df2[,xcol]
y <- df1df2[,ycol]
df1 <- data.frame(u,x,y)
ucol <- 3*(veh-1) + 1
xcol <- 3*(veh-1) + 2
ycol <- 3*(veh-1) + 3
u <- df1df2[,ucol]
x <- df1df2[,xcol]
y <- df1df2[,ycol]
df2 <- as.matrix(data.frame(u,x,y))
return(df2)
}
|
/R/follower.R
|
permissive
|
PJOssenbruggen/Basic
|
R
| false | false | 646 |
r
|
#' \code{follower} returns a matrix of the following vehicle
#'
#' @return A matrix of speed, location data by time.
#' @param veh, a number
#' @param df1df2, a matrix
#' @usage follower(veh, df1df2)
#' @export
follower <- function(veh, df1df2) {
ucol <- 3*(veh-2) + 1
xcol <- 3*(veh-2) + 2
ycol <- 3*(veh-2) + 3
u <- df1df2[,ucol]
x <- df1df2[,xcol]
y <- df1df2[,ycol]
df1 <- data.frame(u,x,y)
ucol <- 3*(veh-1) + 1
xcol <- 3*(veh-1) + 2
ycol <- 3*(veh-1) + 3
u <- df1df2[,ucol]
x <- df1df2[,xcol]
y <- df1df2[,ycol]
df2 <- as.matrix(data.frame(u,x,y))
return(df2)
}
|
library(splines)
library(dplyr)
library(tidyr)
library(ggplot2)
theme_set(theme_bw() + theme(panel.spacing=grid::unit(0,"lines")))
n <- 1e5
p_0 <- 0.5
beta_x <- 0.5
beta_z <- 1e-0
seed <- 403
set.seed(seed)
beta_0 <- qlogis(p_0)
print(beta_0)
x <- rnorm(n)
z <- rnorm(n)
ran <- seq(-3, 3, length.out=201)
pfun <- function(beta_0, beta_x, beta_z){
o <- beta_0 + beta_x*x + beta_z*z
res <- rbinom(n, size=1, prob=plogis(o))
smod <- glm(res ~ ns(x, 4), family="binomial")
return(predict(smod
, newdat=data.frame(x=ran)
))
}
beta_z <- seq(1,5)
plst <- list()
for (b in beta_z){
name <- paste0("beta_z", b)
plst[[name]] <- pfun(beta_0, beta_x, b)
}
print(plst)
pplot <- (data.frame(ran, plst)
%>% gather(Beta_z, Value, -ran)
%>% ggplot(aes(x = ran, y = Value, group = Beta_z, colour = Beta_z))
+ geom_line()
+ scale_color_manual(values = beta_z)
)
print(pplot)
quit()
|
/aphrc/wash/binary_random.R
|
no_license
|
CYGUBICKO/projects
|
R
| false | false | 891 |
r
|
library(splines)
library(dplyr)
library(tidyr)
library(ggplot2)
theme_set(theme_bw() + theme(panel.spacing=grid::unit(0,"lines")))
n <- 1e5
p_0 <- 0.5
beta_x <- 0.5
beta_z <- 1e-0
seed <- 403
set.seed(seed)
beta_0 <- qlogis(p_0)
print(beta_0)
x <- rnorm(n)
z <- rnorm(n)
ran <- seq(-3, 3, length.out=201)
pfun <- function(beta_0, beta_x, beta_z){
o <- beta_0 + beta_x*x + beta_z*z
res <- rbinom(n, size=1, prob=plogis(o))
smod <- glm(res ~ ns(x, 4), family="binomial")
return(predict(smod
, newdat=data.frame(x=ran)
))
}
beta_z <- seq(1,5)
plst <- list()
for (b in beta_z){
name <- paste0("beta_z", b)
plst[[name]] <- pfun(beta_0, beta_x, b)
}
print(plst)
pplot <- (data.frame(ran, plst)
%>% gather(Beta_z, Value, -ran)
%>% ggplot(aes(x = ran, y = Value, group = Beta_z, colour = Beta_z))
+ geom_line()
+ scale_color_manual(values = beta_z)
)
print(pplot)
quit()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/clustering_kmeans.R
\name{riem.kmeans}
\alias{riem.kmeans}
\title{K-Means Clustering}
\usage{
riem.kmeans(
riemobj,
k = 2,
geometry = c("intrinsic", "extrinsic"),
maxiter = 10,
nstart = 5,
algorithm = c("MacQueen", "Lloyd"),
init = c("plus", "random")
)
}
\arguments{
\item{riemobj}{a S3 \code{"riemdata"} class for \eqn{N} manifold-valued data.}
\item{k}{the number of clusters.}
\item{geometry}{(case-insensitive) name of geometry; either geodesic (\code{"intrinsic"}) or embedded (\code{"extrinsic"}) geometry.}
\item{maxiter}{the maximum number of iterations allowed.}
\item{nstart}{the number of random starts.}
\item{algorithm}{(case-insensitive) name of an algorithm to be run. (default: \code{"MacQueen"})}
\item{init}{(case-insensitive) name of an initialization scheme. (default: \code{"plus"})}
}
\value{
a named list containing\describe{
\item{means}{a 3d array where each slice along 3rd dimension is a matrix representation of class mean.}
\item{cluster}{a length-\eqn{N} vector of class labels (from \eqn{1:k}).}
\item{score}{within-cluster sum of squares (WCSS).}
}
}
\description{
Given \eqn{N} observations \eqn{X_1, X_2, \ldots, X_N \in \mathcal{M}},
perform k-means clustering by minimizing within-cluster sum of squares (WCSS).
Since the problem is NP-hard and sensitive to the initialization, we provide an
option with multiple starts and return the best result with respect to WCSS.
}
\examples{
#-------------------------------------------------------------------
# Example on Sphere : a dataset with three types
#
# class 1 : 10 perturbed data points near (1,0,0) on S^2 in R^3
# class 2 : 10 perturbed data points near (0,1,0) on S^2 in R^3
# class 3 : 10 perturbed data points near (0,0,1) on S^2 in R^3
#-------------------------------------------------------------------
## GENERATE DATA
mydata = list()
for (i in 1:10){
tgt = c(1, stats::rnorm(2, sd=0.1))
mydata[[i]] = tgt/sqrt(sum(tgt^2))
}
for (i in 11:20){
tgt = c(rnorm(1,sd=0.1),1,rnorm(1,sd=0.1))
mydata[[i]] = tgt/sqrt(sum(tgt^2))
}
for (i in 21:30){
tgt = c(stats::rnorm(2, sd=0.1), 1)
mydata[[i]] = tgt/sqrt(sum(tgt^2))
}
myriem = wrap.sphere(mydata)
mylabs = rep(c(1,2,3), each=10)
## K-MEDOIDS WITH K=2,3,4
clust2 = riem.kmeans(myriem, k=2)
clust3 = riem.kmeans(myriem, k=3)
clust4 = riem.kmeans(myriem, k=4)
## MDS FOR VISUALIZATION
mds2d = riem.mds(myriem, ndim=2)$embed
## VISUALIZE
opar <- par(no.readonly=TRUE)
par(mfrow=c(2,2), pty="s")
plot(mds2d, pch=19, main="true label", col=mylabs)
plot(mds2d, pch=19, main="K=2", col=clust2$cluster)
plot(mds2d, pch=19, main="K=3", col=clust3$cluster)
plot(mds2d, pch=19, main="K=4", col=clust4$cluster)
par(opar)
}
\references{
\insertRef{lloyd_least_1982}{Riemann}
\insertRef{macqueen_methods_1967}{Riemann}
}
\seealso{
\code{\link{riem.kmeanspp}}
}
\concept{clustering}
|
/Riemann/man/riem.kmeans.Rd
|
no_license
|
akhikolla/TestedPackages-NoIssues
|
R
| false | true | 2,939 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/clustering_kmeans.R
\name{riem.kmeans}
\alias{riem.kmeans}
\title{K-Means Clustering}
\usage{
riem.kmeans(
riemobj,
k = 2,
geometry = c("intrinsic", "extrinsic"),
maxiter = 10,
nstart = 5,
algorithm = c("MacQueen", "Lloyd"),
init = c("plus", "random")
)
}
\arguments{
\item{riemobj}{a S3 \code{"riemdata"} class for \eqn{N} manifold-valued data.}
\item{k}{the number of clusters.}
\item{geometry}{(case-insensitive) name of geometry; either geodesic (\code{"intrinsic"}) or embedded (\code{"extrinsic"}) geometry.}
\item{maxiter}{the maximum number of iterations allowed.}
\item{nstart}{the number of random starts.}
\item{algorithm}{(case-insensitive) name of an algorithm to be run. (default: \code{"MacQueen"})}
\item{init}{(case-insensitive) name of an initialization scheme. (default: \code{"plus"})}
}
\value{
a named list containing\describe{
\item{means}{a 3d array where each slice along 3rd dimension is a matrix representation of class mean.}
\item{cluster}{a length-\eqn{N} vector of class labels (from \eqn{1:k}).}
\item{score}{within-cluster sum of squares (WCSS).}
}
}
\description{
Given \eqn{N} observations \eqn{X_1, X_2, \ldots, X_N \in \mathcal{M}},
perform k-means clustering by minimizing within-cluster sum of squares (WCSS).
Since the problem is NP-hard and sensitive to the initialization, we provide an
option with multiple starts and return the best result with respect to WCSS.
}
\examples{
#-------------------------------------------------------------------
# Example on Sphere : a dataset with three types
#
# class 1 : 10 perturbed data points near (1,0,0) on S^2 in R^3
# class 2 : 10 perturbed data points near (0,1,0) on S^2 in R^3
# class 3 : 10 perturbed data points near (0,0,1) on S^2 in R^3
#-------------------------------------------------------------------
## GENERATE DATA
mydata = list()
for (i in 1:10){
tgt = c(1, stats::rnorm(2, sd=0.1))
mydata[[i]] = tgt/sqrt(sum(tgt^2))
}
for (i in 11:20){
tgt = c(rnorm(1,sd=0.1),1,rnorm(1,sd=0.1))
mydata[[i]] = tgt/sqrt(sum(tgt^2))
}
for (i in 21:30){
tgt = c(stats::rnorm(2, sd=0.1), 1)
mydata[[i]] = tgt/sqrt(sum(tgt^2))
}
myriem = wrap.sphere(mydata)
mylabs = rep(c(1,2,3), each=10)
## K-MEDOIDS WITH K=2,3,4
clust2 = riem.kmeans(myriem, k=2)
clust3 = riem.kmeans(myriem, k=3)
clust4 = riem.kmeans(myriem, k=4)
## MDS FOR VISUALIZATION
mds2d = riem.mds(myriem, ndim=2)$embed
## VISUALIZE
opar <- par(no.readonly=TRUE)
par(mfrow=c(2,2), pty="s")
plot(mds2d, pch=19, main="true label", col=mylabs)
plot(mds2d, pch=19, main="K=2", col=clust2$cluster)
plot(mds2d, pch=19, main="K=3", col=clust3$cluster)
plot(mds2d, pch=19, main="K=4", col=clust4$cluster)
par(opar)
}
\references{
\insertRef{lloyd_least_1982}{Riemann}
\insertRef{macqueen_methods_1967}{Riemann}
}
\seealso{
\code{\link{riem.kmeanspp}}
}
\concept{clustering}
|
#The main aim is to obtimise the allocation of the states and the transition between them
#So we use an EM approach. Essentially estimate the transition probability and emission probability
#Should the current parameters be the most optimum and then use those probabilities to find
#The most likely parameters of the model
#OK we are going to initialise so that we have what we need for the first E-Step
#We are then going to calculate the E-Step and use it recalculate the parameters and so on
#Until convergence.
#The E-Step
#Forward Step, is the probability of obtaining a certain sequence from model H
#Calculate the probability of seeing the sequence y1..t and being in state i at the final observation yt
#is only dependent on the previous probability of being in state i-1 at t-1,
#The probability of seeing the first observation y1 of the sequecne and be in state i
#will then be P(y1 & being in state 1) = P(S1)*P(y1)
#But we have multiple possibilities for the first state, either state 1, 2, .. N possible states that we could start from
#The second observation then I could have come from any state and then ended in state j
#So I need to sum for all the possible states I could have come from ending in state 2
#Hence, the probability of being in state 2 is P(S2)*P(y2|S2). The probability of State 2 is then
#The sum over all the previous state probabilities * the probability of transition to this state
#In general then, we could write this as follows
#P(y,j,s) = Current state probability * sum(Previous State probabilities*transition)
#This will have to be done recursively for every sequence of observation and you will then end up
#with a probability of ending in every state for a sequence of observation. If we add them up, this will give us the probability
#of the observation given our model
#This gave us the probability of certain state given the starting model parameters
#Backward Step, this is the probability of looking at the model backwards rather forward.
#We start from the end state and then go backward asking what is the probability of observation given the state
#The probability of being in previous state j at time t is a function of being in state i at t+1
#P(End State T) = 1, just start with one
#P(Observing j of the observation in the Current State) = P(Current State)*P(J Observation|Current state i)*P(Transition from Current State to Previous state)
#Since you could have gone to multiple states, then you need to sum over all the states that you could have gone to
#P(O,length-1,Ending State) = sum(P(Observation at length-1|current state)*P(current->next)*P(Next)) for all the next states
#You can do this iteratively.
#At the End you will get probability for every starting state
#This will then give us the probability of Observation up to a certain length given that I am in a certain time and certain state
#Ok, now the first aim is to find the probability of being in a certain state given the observation and model parameters
#P(S|O,M) = P(O,S,M)/P(O,M), since P(O,S,M) = P(S|O,M)P(O|M)P(M)
#P(O,S,M) = P(O,S|M)P(M)
#So P(S|O,M) = P(O,S|M)/P(O|M)
#Ok, the P(O,S|M) = #forward P(O|S,M) #backward P(S|M), the forward probability gives is the P(S|M), the backward gives the probability of observation given states
#Now, the denominator is P(O|M), which is simply the marginal over all the states of P(O,S|M)
#So the probability of ending up in state i is then forward*backward of state i/sum over all states
#The second aim is to find the probability of a sequence of states as this will help in the final transition matrix estimation
#The probability of being in state i and then going to state j globally
#P(Si,Si+1|O,M) similar to the above = P(Si,Si+1,O|M)/P(O|M)
#Numerator, forward probability P(Si|M)*P(Si+1|M)*transition probability from Si to Si+1
#Deonominator is simply summing over all possible si and si+1, which is actually Backward probability
#Ok from the first aim, for every state, summing over all obervations Sum(P(S|O,M)) for all lengths of observations,
#will give us the probability of being in state S|M, which is what we started with as our assumption
#Similarly summing over all the length of observations, will give us the transition matrix, again one of our initial assumptions
#Remaining, is the probability of our observation given a certain state
#To covert them to probabilities, You essentially, want to sum P(S|O,M) for the first observation across all the sequences, for the first state,
#Then the second state and so on. The final probability will be just the normalised quantity
#The transition from Si to Si+1 will be the transition function above for all the observations irrespective of the position of the observation
#The ratio between the transitions from Si to Si+1 relative to all transitions away from Si
#The probability of the output for a gaussian mixture is as follows will be the sum over all components with the associated parameters for this component
#Updating this probability will be similar to the normal EM approach.
#Ok, let us remember, we have calculated the weights of every component, as the normalised likelihood for this observation
#In this case, we also have the states, so we split things further.
#First, the probability that the lth component in state i have generated a particular observation t is defined as:
#Probability of state i * probability of observation given mixture l * weight of mixture l / sum over all components at state i
#We have the initial weights to calculate this.
#We can now get the updates as weight as before, but instead of summing for one observation, we get it for two states
#So on
#OK let us start with two models of the same input, essentially we are looking at the joint PD of the
#independant and dependant variables and then estimating the coefficients. The states will be
#looking at the Y as the thing to model by an HMM switching between multiple models explaining the Y
#Not the Y itself
X = cbind(rep(1,100),rnorm(100,2,3),rnorm(100,3,10))
B = rbind(c(1,2,3),c(0.5,2,5))
Y = c(X[1:50,]%*%(B[1,]),X[51:100,]%*%(B[2,]))
B = rbind(c(1,1,1),c(2,2,2))
data = Y
plot(data, type="l")
#we can see that we have mixture of two distributions
#Let us initialise our Mixture EM data for every state
#Let us now initialise the state probability i.e. P(Si)
S1 = 0.5
S2 = 0.5
#Transition probability self, then the other
TS1 = c(0.5, 0.5)
TS2 = c(0.5, 0.5)
#Emission probability is calculated as in the EM approach, this makes sense that we have a probability for every observation, since they are not the same observation
#Except that we don't have a mixture in the states, essentially we want every state to map to one component of the mixture
s = max(apply(X,2,sd))*1.5
bs1 = dnorm((Y-(X%*%(B[1,]))),mean = 0,s)
bs2 = dnorm((Y-(X%*%(B[2,]))),mean = 0,s)
for(x in 1:10)
{
#OK, now we are set to do the first HMM iteration
#Forward probability
#As mentioned above of the observation that we have, we will calculate the forward probability
alpha = matrix(ncol=2,nrow=length(data))
#Forward for state 1 and first of the observation
alpha[1,1] = S1 * bs1[1] #Probability of State1 * Probability of observing the first number
#Forward for state 2 and first of the observation
alpha[1,2] = S2 * bs2[1] #Probability of State1 * Probability of observing the first number
#Second is just conditional on the previous one (could be either s1 or s2) and ENDING in state 1
alpha[2,1] = alpha[1,1] * TS1[1] * bs1[2] + # ending in s1, P(S1) * Self transition * P(O2|S1)
alpha[1,2] * TS2[2] * bs1[2] #ending in s1, P(S2) * T[S2->S1] * P(O2|S1)
alpha[2,2] = alpha[1,2] * TS2[1] * bs2[2] + # ending in s2, P(S2) * Self transition * P(O2|S2)
alpha[1,1] * TS1[2] * bs2[2] #ending in s2, P(S1) * T[S2->S1] * P(O2|S1)
for(i in 2:length(data))
{
#Ok, let us see the pattern
#Probability of Observation given state 1 * sum of ((probability of previous state1 * self probability)+
#(Probability of previous state2 * transition from S2 to S1))
alpha[i,1] = bs1[i] * (alpha[i-1,1] * TS1[1] + alpha[i-1,2] * TS2[2])
#Similarly
alpha[i,2] = bs2[i] * (alpha[i-1,2] * TS2[1] + alpha[i-1,1] * TS1[2])
}
#Probability of the whole observation across the two states
po = alpha[length(data),1]+alpha[length(data),2]
#OK now the backward probability
beta = matrix(ncol=2,nrow=length(data))
beta[length(data),1] = 1
beta[length(data),2] = 1
#Stand at the previous step and then look ahead
#Probility at length(data)-1 at state 1 = T[S1->S1] * P(O|S1) * B(length(data),S1) + T[S1->S2] * P(O|S2) * B(length(data),S2)
beta[length(data)-1,1] = TS1[1] * bs1[length(data)-1] * beta[length(data),1] + TS1[2] * bs2[length(data)-1] * beta[length(data),2]
beta[length(data)-1,2] = TS2[1] * bs2[length(data)-1] * beta[length(data),2] + TS2[2] * bs1[length(data)-1] * beta[length(data),1]
#pattern
for(j in (length(data)-1):1)
{
beta[j,1] = TS1[1] * bs1[j+1] * beta[j+1,1] + TS1[2] * bs2[j+1] * beta[j+1,2]
beta[j,2] = TS2[1] * bs2[j+1] * beta[j+1,2] + TS2[2] * bs1[j+1] * beta[j+1,1]
}
#Gamma, which is the probability of the state given the observation
gamma = matrix(ncol=2, nrow=length(data))
gamma[,1] = alpha[,1]*beta[,1]
gamma[,2] = alpha[,2]*beta[,2]
gamma = t(apply(gamma,1,function(x){x/sum(x)}))
#Eta, which is the transition probability from state i to j at data length t
eta1 = matrix(ncol=2,nrow=(length(data)-1)) #for every data observation
eta2 = matrix(ncol=2,nrow=(length(data)-1)) #for every data observation
#going from 1 to 1 across the first observation
eta1[1,1] = (gamma[1,1] * TS1[1] * bs1[2] * beta[2,1])/beta[1,1]
#going from 1 to 2 across the first observation
eta1[1,2] = (gamma[1,1] * TS1[2] * bs2[2] * beta[2,2])/beta[1,1]
#going from 2 to 2 across the first observation
eta2[1,1] = (gamma[1,2] * TS2[1] * bs2[2] * beta[2,2])/beta[1,2]
#going from 1 to 2 across the first observation
eta2[1,2] = (gamma[1,2] * TS2[2] * bs1[2] * beta[2,1])/beta[1,2]
#Pattern
for(t in 1:(length(data)-1))
{
#going from 1 to 1 across the first observation
eta1[t,1] = (gamma[t,1] * TS1[1] * bs1[(t+1)] * beta[(t+1),1])/beta[t,1]
#going from 1 to 2 across the first observation
eta1[t,2] = (gamma[t,1] * TS1[2] * bs2[(t+1)] * beta[(t+1),2])/beta[t,1]
#going from 2 to 2 across the first observation
eta2[t,1] = (gamma[t,2] * TS2[1] * bs2[(t+1)] * beta[(t+1),2])/beta[t,2]
#going from 1 to 2 across the first observation
eta2[t,2] = (gamma[t,2] * TS2[2] * bs1[(t+1)] * beta[(t+1),1])/beta[t,2]
}
TS1[1] = sum(eta1[,1])/sum(gamma[1:(length(data)-1),1])
TS1[2] = sum(eta1[,2])/sum(gamma[1:(length(data)-1),1])
TS2[1] = sum(eta2[,1])/sum(gamma[1:(length(data)-1),2])
TS2[2] = sum(eta2[,2])/sum(gamma[1:(length(data)-1),2])
#if(gamma[1,1]==S1&&gamma[1,2]==S2) break;
S1 = gamma[1,1]
S2 = gamma[1,2]
#Update the distribution parameters, mean and standard deviation similarly
xA = X[,-1]*gamma[,1]
yA = Y*gamma[,1]
xB = X[,-1]*gamma[,2]
yB = Y*gamma[,2]
B = rbind(coef(lm(yA ~ xA)),coef(lm(yB ~ xB)))
bs1 = dnorm((Y-(X%*%(B[1,]))),mean = 0,s)
bs2 = dnorm((Y-(X%*%(B[2,]))),mean = 0,s)
print(B)
print(c(S1,S2))
print(c(TS1,TS2))
}
#Now we can get the state model for the data
StateModel = c("H","L")
print(StateModel[as.integer(apply(gamma,1,function(x){x[1]>x[2]}))+1])
colors = c("red","blue")
plot(data,type="b", col=colors[as.integer(apply(gamma,1,function(x){x[1]>x[2]}))+1])
|
/HMM_EM_GLM.R
|
no_license
|
rafiksalama/HiddenMarkovModel_EM_R
|
R
| false | false | 11,388 |
r
|
#The main aim is to obtimise the allocation of the states and the transition between them
#So we use an EM approach. Essentially estimate the transition probability and emission probability
#Should the current parameters be the most optimum and then use those probabilities to find
#The most likely parameters of the model
#OK we are going to initialise so that we have what we need for the first E-Step
#We are then going to calculate the E-Step and use it recalculate the parameters and so on
#Until convergence.
#The E-Step
#Forward Step, is the probability of obtaining a certain sequence from model H
#Calculate the probability of seeing the sequence y1..t and being in state i at the final observation yt
#is only dependent on the previous probability of being in state i-1 at t-1,
#The probability of seeing the first observation y1 of the sequecne and be in state i
#will then be P(y1 & being in state 1) = P(S1)*P(y1)
#But we have multiple possibilities for the first state, either state 1, 2, .. N possible states that we could start from
#The second observation then I could have come from any state and then ended in state j
#So I need to sum for all the possible states I could have come from ending in state 2
#Hence, the probability of being in state 2 is P(S2)*P(y2|S2). The probability of State 2 is then
#The sum over all the previous state probabilities * the probability of transition to this state
#In general then, we could write this as follows
#P(y,j,s) = Current state probability * sum(Previous State probabilities*transition)
#This will have to be done recursively for every sequence of observation and you will then end up
#with a probability of ending in every state for a sequence of observation. If we add them up, this will give us the probability
#of the observation given our model
#This gave us the probability of certain state given the starting model parameters
#Backward Step, this is the probability of looking at the model backwards rather forward.
#We start from the end state and then go backward asking what is the probability of observation given the state
#The probability of being in previous state j at time t is a function of being in state i at t+1
#P(End State T) = 1, just start with one
#P(Observing j of the observation in the Current State) = P(Current State)*P(J Observation|Current state i)*P(Transition from Current State to Previous state)
#Since you could have gone to multiple states, then you need to sum over all the states that you could have gone to
#P(O,length-1,Ending State) = sum(P(Observation at length-1|current state)*P(current->next)*P(Next)) for all the next states
#You can do this iteratively.
#At the End you will get probability for every starting state
#This will then give us the probability of Observation up to a certain length given that I am in a certain time and certain state
#Ok, now the first aim is to find the probability of being in a certain state given the observation and model parameters
#P(S|O,M) = P(O,S,M)/P(O,M), since P(O,S,M) = P(S|O,M)P(O|M)P(M)
#P(O,S,M) = P(O,S|M)P(M)
#So P(S|O,M) = P(O,S|M)/P(O|M)
#Ok, the P(O,S|M) = #forward P(O|S,M) #backward P(S|M), the forward probability gives is the P(S|M), the backward gives the probability of observation given states
#Now, the denominator is P(O|M), which is simply the marginal over all the states of P(O,S|M)
#So the probability of ending up in state i is then forward*backward of state i/sum over all states
#The second aim is to find the probability of a sequence of states as this will help in the final transition matrix estimation
#The probability of being in state i and then going to state j globally
#P(Si,Si+1|O,M) similar to the above = P(Si,Si+1,O|M)/P(O|M)
#Numerator, forward probability P(Si|M)*P(Si+1|M)*transition probability from Si to Si+1
#Deonominator is simply summing over all possible si and si+1, which is actually Backward probability
#Ok from the first aim, for every state, summing over all obervations Sum(P(S|O,M)) for all lengths of observations,
#will give us the probability of being in state S|M, which is what we started with as our assumption
#Similarly summing over all the length of observations, will give us the transition matrix, again one of our initial assumptions
#Remaining, is the probability of our observation given a certain state
#To covert them to probabilities, You essentially, want to sum P(S|O,M) for the first observation across all the sequences, for the first state,
#Then the second state and so on. The final probability will be just the normalised quantity
#The transition from Si to Si+1 will be the transition function above for all the observations irrespective of the position of the observation
#The ratio between the transitions from Si to Si+1 relative to all transitions away from Si
#The probability of the output for a gaussian mixture is as follows will be the sum over all components with the associated parameters for this component
#Updating this probability will be similar to the normal EM approach.
#Ok, let us remember, we have calculated the weights of every component, as the normalised likelihood for this observation
#In this case, we also have the states, so we split things further.
#First, the probability that the lth component in state i have generated a particular observation t is defined as:
#Probability of state i * probability of observation given mixture l * weight of mixture l / sum over all components at state i
#We have the initial weights to calculate this.
#We can now get the updates as weight as before, but instead of summing for one observation, we get it for two states
#So on
#OK let us start with two models of the same input, essentially we are looking at the joint PD of the
#independant and dependant variables and then estimating the coefficients. The states will be
#looking at the Y as the thing to model by an HMM switching between multiple models explaining the Y
#Not the Y itself
X = cbind(rep(1,100),rnorm(100,2,3),rnorm(100,3,10))
B = rbind(c(1,2,3),c(0.5,2,5))
Y = c(X[1:50,]%*%(B[1,]),X[51:100,]%*%(B[2,]))
B = rbind(c(1,1,1),c(2,2,2))
data = Y
plot(data, type="l")
#we can see that we have mixture of two distributions
#Let us initialise our Mixture EM data for every state
#Let us now initialise the state probability i.e. P(Si)
S1 = 0.5
S2 = 0.5
#Transition probability self, then the other
TS1 = c(0.5, 0.5)
TS2 = c(0.5, 0.5)
#Emission probability is calculated as in the EM approach, this makes sense that we have a probability for every observation, since they are not the same observation
#Except that we don't have a mixture in the states, essentially we want every state to map to one component of the mixture
s = max(apply(X,2,sd))*1.5
bs1 = dnorm((Y-(X%*%(B[1,]))),mean = 0,s)
bs2 = dnorm((Y-(X%*%(B[2,]))),mean = 0,s)
for(x in 1:10)
{
#OK, now we are set to do the first HMM iteration
#Forward probability
#As mentioned above of the observation that we have, we will calculate the forward probability
alpha = matrix(ncol=2,nrow=length(data))
#Forward for state 1 and first of the observation
alpha[1,1] = S1 * bs1[1] #Probability of State1 * Probability of observing the first number
#Forward for state 2 and first of the observation
alpha[1,2] = S2 * bs2[1] #Probability of State1 * Probability of observing the first number
#Second is just conditional on the previous one (could be either s1 or s2) and ENDING in state 1
alpha[2,1] = alpha[1,1] * TS1[1] * bs1[2] + # ending in s1, P(S1) * Self transition * P(O2|S1)
alpha[1,2] * TS2[2] * bs1[2] #ending in s1, P(S2) * T[S2->S1] * P(O2|S1)
alpha[2,2] = alpha[1,2] * TS2[1] * bs2[2] + # ending in s2, P(S2) * Self transition * P(O2|S2)
alpha[1,1] * TS1[2] * bs2[2] #ending in s2, P(S1) * T[S2->S1] * P(O2|S1)
for(i in 2:length(data))
{
#Ok, let us see the pattern
#Probability of Observation given state 1 * sum of ((probability of previous state1 * self probability)+
#(Probability of previous state2 * transition from S2 to S1))
alpha[i,1] = bs1[i] * (alpha[i-1,1] * TS1[1] + alpha[i-1,2] * TS2[2])
#Similarly
alpha[i,2] = bs2[i] * (alpha[i-1,2] * TS2[1] + alpha[i-1,1] * TS1[2])
}
#Probability of the whole observation across the two states
po = alpha[length(data),1]+alpha[length(data),2]
#OK now the backward probability
beta = matrix(ncol=2,nrow=length(data))
beta[length(data),1] = 1
beta[length(data),2] = 1
#Stand at the previous step and then look ahead
#Probility at length(data)-1 at state 1 = T[S1->S1] * P(O|S1) * B(length(data),S1) + T[S1->S2] * P(O|S2) * B(length(data),S2)
beta[length(data)-1,1] = TS1[1] * bs1[length(data)-1] * beta[length(data),1] + TS1[2] * bs2[length(data)-1] * beta[length(data),2]
beta[length(data)-1,2] = TS2[1] * bs2[length(data)-1] * beta[length(data),2] + TS2[2] * bs1[length(data)-1] * beta[length(data),1]
#pattern
for(j in (length(data)-1):1)
{
beta[j,1] = TS1[1] * bs1[j+1] * beta[j+1,1] + TS1[2] * bs2[j+1] * beta[j+1,2]
beta[j,2] = TS2[1] * bs2[j+1] * beta[j+1,2] + TS2[2] * bs1[j+1] * beta[j+1,1]
}
#Gamma, which is the probability of the state given the observation
gamma = matrix(ncol=2, nrow=length(data))
gamma[,1] = alpha[,1]*beta[,1]
gamma[,2] = alpha[,2]*beta[,2]
gamma = t(apply(gamma,1,function(x){x/sum(x)}))
#Eta, which is the transition probability from state i to j at data length t
eta1 = matrix(ncol=2,nrow=(length(data)-1)) #for every data observation
eta2 = matrix(ncol=2,nrow=(length(data)-1)) #for every data observation
#going from 1 to 1 across the first observation
eta1[1,1] = (gamma[1,1] * TS1[1] * bs1[2] * beta[2,1])/beta[1,1]
#going from 1 to 2 across the first observation
eta1[1,2] = (gamma[1,1] * TS1[2] * bs2[2] * beta[2,2])/beta[1,1]
#going from 2 to 2 across the first observation
eta2[1,1] = (gamma[1,2] * TS2[1] * bs2[2] * beta[2,2])/beta[1,2]
#going from 1 to 2 across the first observation
eta2[1,2] = (gamma[1,2] * TS2[2] * bs1[2] * beta[2,1])/beta[1,2]
#Pattern
for(t in 1:(length(data)-1))
{
#going from 1 to 1 across the first observation
eta1[t,1] = (gamma[t,1] * TS1[1] * bs1[(t+1)] * beta[(t+1),1])/beta[t,1]
#going from 1 to 2 across the first observation
eta1[t,2] = (gamma[t,1] * TS1[2] * bs2[(t+1)] * beta[(t+1),2])/beta[t,1]
#going from 2 to 2 across the first observation
eta2[t,1] = (gamma[t,2] * TS2[1] * bs2[(t+1)] * beta[(t+1),2])/beta[t,2]
#going from 1 to 2 across the first observation
eta2[t,2] = (gamma[t,2] * TS2[2] * bs1[(t+1)] * beta[(t+1),1])/beta[t,2]
}
TS1[1] = sum(eta1[,1])/sum(gamma[1:(length(data)-1),1])
TS1[2] = sum(eta1[,2])/sum(gamma[1:(length(data)-1),1])
TS2[1] = sum(eta2[,1])/sum(gamma[1:(length(data)-1),2])
TS2[2] = sum(eta2[,2])/sum(gamma[1:(length(data)-1),2])
#if(gamma[1,1]==S1&&gamma[1,2]==S2) break;
S1 = gamma[1,1]
S2 = gamma[1,2]
#Update the distribution parameters, mean and standard deviation similarly
xA = X[,-1]*gamma[,1]
yA = Y*gamma[,1]
xB = X[,-1]*gamma[,2]
yB = Y*gamma[,2]
B = rbind(coef(lm(yA ~ xA)),coef(lm(yB ~ xB)))
bs1 = dnorm((Y-(X%*%(B[1,]))),mean = 0,s)
bs2 = dnorm((Y-(X%*%(B[2,]))),mean = 0,s)
print(B)
print(c(S1,S2))
print(c(TS1,TS2))
}
#Now we can get the state model for the data
StateModel = c("H","L")
print(StateModel[as.integer(apply(gamma,1,function(x){x[1]>x[2]}))+1])
colors = c("red","blue")
plot(data,type="b", col=colors[as.integer(apply(gamma,1,function(x){x[1]>x[2]}))+1])
|
# Exercise 1: creating data frames
# Create a vector of the number of points the Seahawks scored in the first 4 games
# of the season (google "Seahawks" for the scores!)
points <- c(24, 17, 24, 20)
# Create a vector of the number of points the Seahwaks have allowed to be scored
# against them in each of the first 4 games of the season
points_allowed <- c(27, 24, 13, 17)
# Combine your two vectors into a dataframe called `games`
games <- data.frame(points, points_allowed)
# Create a new column "diff" that is the difference in points between the teams
# Hint: recall the syntax for assigning new elements (which in this case will be
# a vector) to a list!
games$diff <- games$points - games$points_allowed
# Create a new column "won" which is TRUE if the Seahawks won the game
games$won <- games$points > games$points_allowed
# Create a vector of the opponent names corresponding to the games played
opponents <- c("Denver", "Chicago", "Dallas", "Arizona")
# Assign your dataframe rownames of their opponents
rownames(games) <- opponents
# View your data frame to see how it has changed!
View(games)
|
/exercises-yukisea/chapter-10-exercises/exercise-1/exercise.R
|
permissive
|
yukisea/INFO201
|
R
| false | false | 1,113 |
r
|
# Exercise 1: creating data frames
# Create a vector of the number of points the Seahawks scored in the first 4 games
# of the season (google "Seahawks" for the scores!)
points <- c(24, 17, 24, 20)
# Create a vector of the number of points the Seahwaks have allowed to be scored
# against them in each of the first 4 games of the season
points_allowed <- c(27, 24, 13, 17)
# Combine your two vectors into a dataframe called `games`
games <- data.frame(points, points_allowed)
# Create a new column "diff" that is the difference in points between the teams
# Hint: recall the syntax for assigning new elements (which in this case will be
# a vector) to a list!
games$diff <- games$points - games$points_allowed
# Create a new column "won" which is TRUE if the Seahawks won the game
games$won <- games$points > games$points_allowed
# Create a vector of the opponent names corresponding to the games played
opponents <- c("Denver", "Chicago", "Dallas", "Arizona")
# Assign your dataframe rownames of their opponents
rownames(games) <- opponents
# View your data frame to see how it has changed!
View(games)
|
context("sf")
test_that("sf objects are created",{
is_sf <- function(x) {
a <- attributes(x)
all( a$class == c("sf", "data.frame") ) & a$sf_column == "geometry"
}
df <- data.frame(
id = c(1,1,1,1,1,2,2,2,2,2)
, x = 1:10
, y = 1:10
, z = 1:10
, m = 1:10
)
res <- sfheaders:::rcpp_sf_point(df, 1:4 )
expect_true( is_sf( res ) )
res <- sfheaders:::rcpp_sf_point(df, 1:2 )
expect_true( is_sf( res ) )
res <- sfheaders:::rcpp_sf_multipoint(df, 1:4, NULL )
expect_true( is_sf( res ) )
res <- sfheaders:::rcpp_sf_linestring(df, 1:4, NULL )
expect_true( is_sf( res ) )
res <- sfheaders:::rcpp_sf_multilinestring(df, 1:4, NULL, NULL )
expect_true( is_sf( res ) )
res <- sfheaders:::rcpp_sf_polygon(df, 1:4, NULL, NULL )
expect_true( is_sf( res ) )
res <- sfheaders:::rcpp_sf_multipolygon(df, 1:4, NULL, NULL, NULL )
expect_true( is_sf( res ) )
})
test_that("correct number of rows returned",{
is_sf <- function(x) {
a <- attributes(x)
all( a$class == c("sf", "data.frame") ) & a$sf_column == "geometry"
}
df <- data.frame(
id1 = c(1,1,1,1,1,2,2,2,2,2)
, id2 = c(1,1,2,2,1,1,2,2,3,3)
, x = 1:10
, y = 1:10
, z = 1:10
, m = 1:10
)
res <- sfheaders:::rcpp_sf_point( df, c(2:3) )
expect_true( nrow(res) == nrow( df ) )
res <- sfheaders:::rcpp_sf_multipoint( df, c(2:3), 0L )
expect_true( nrow(res) == length( unique( df$id1 ) ) )
expect_true( all( res$id == unique( df$id1 ) ) )
res <- sfheaders:::rcpp_sf_linestring( df, c(2:3), 0L )
expect_true( nrow(res) == length( unique( df$id1 ) ) )
expect_true( all( res$id == unique( df$id1 ) ) )
res <- sfheaders:::rcpp_sf_multilinestring( df, c(2:3), 0L, NULL )
expect_true( nrow(res) == length( unique( df$id1 ) ) )
expect_true( all( res$id == unique( df$id1 ) ) )
res <- sfheaders:::rcpp_sf_polygon( df, c(2:3), 0L, NULL )
expect_true( nrow(res) == length( unique( df$id1 ) ) )
expect_true( all( res$id == unique( df$id1 ) ) )
res <- sfheaders:::rcpp_sf_multipolygon( df, c(2:3), 0L, NULL, NULL )
expect_true( nrow(res) == length( unique( df$id1 ) ) )
expect_true( all( res$id == unique( df$id1 ) ) )
})
test_that("ID order maintained",{
is_sf <- function(x) {
a <- attributes(x)
all( a$class == c("sf", "data.frame") ) & a$sf_column == "geometry"
}
df <- data.frame(
id1 = c(1,1,1,1,1,2,2,2,2,2)
, id2 = c(1,1,2,2,2,1,2,2,3,3)
, x = 1:10
, y = 1:10
, z = 1:10
, m = 1:10
)
res <- sfheaders:::rcpp_sf_point( df, c(2:3) )
m1 <- unclass( res$geometry[[1]] )
expect_equal( m1[1], df[1, "x"] )
expect_equal( m1[2], df[1, "y"] )
m7 <- unclass( res$geometry[[7]] )
expect_equal( m7[1], df[7, "x"] )
expect_equal( m7[2], df[7, "y"] )
res <- sfheaders:::rcpp_sf_multipoint( df, c(2:3), 0L )
m1 <- unclass( res$geometry[[1]] )
m2 <- unclass( res$geometry[[2]] )
expect_equal( m1[, 1], df[ df$id1 == 1, "x" ] )
expect_equal( m1[, 2], df[ df$id1 == 1, "y" ] )
expect_equal( m2[, 1], df[ df$id1 == 2, "x" ] )
expect_equal( m2[, 2], df[ df$id1 == 2, "y" ] )
res <- sfheaders:::rcpp_sf_polygon( df, c(2:3), 0L, 1L )
m1 <- res$geometry[[1]][[1]]
m2 <- res$geometry[[1]][[2]]
m3 <- res$geometry[[2]][[1]]
m4 <- res$geometry[[2]][[2]]
m5 <- res$geometry[[2]][[3]]
expect_equal( m1[, 1], df[ df$id1 == 1 & df$id2 == 1, "x"] )
expect_equal( m1[, 2], df[ df$id1 == 1 & df$id2 == 1, "y"] )
expect_equal( m2[, 1], df[ df$id1 == 1 & df$id2 == 2, "x"] )
expect_equal( m2[, 2], df[ df$id1 == 1 & df$id2 == 2, "y"] )
expect_equal( m3[, 1], df[ df$id1 == 2 & df$id2 == 1, "x"] )
expect_equal( m3[, 2], df[ df$id1 == 2 & df$id2 == 1, "y"] )
expect_equal( m4[, 1], df[ df$id1 == 2 & df$id2 == 2, "x"] )
expect_equal( m4[, 2], df[ df$id1 == 2 & df$id2 == 2, "y"] )
expect_equal( m5[, 1], df[ df$id1 == 2 & df$id2 == 3, "x"] )
expect_equal( m5[, 2], df[ df$id1 == 2 & df$id2 == 3, "y"] )
df <- data.frame(
id1 = c(1,1,1,1,1,2,2,2,2,2)
, id2 = c(1,1,2,2,1,1,2,2,3,3) ## this errored in sf_polygon
, x = 1:10
, y = 1:10
, z = 1:10
, m = 1:10
)
expect_error( sfheaders:::rcpp_sf_polygon( df, c(2:3), 0L, 1L ), "sfheaders - error indexing lines, perhaps caused by un-ordered data?" ) ## because the id2 is out of order
expect_error( sfheaders:::rcpp_sf_linestring( df, c(2:3), 1L ), "sfheaders - error indexing lines, perhaps caused by un-ordered data?" )
expect_error( sfheaders:::rcpp_sf_linestring( df, c(2:3), 0 ), "sfheaders - linestring columns types are different")
})
test_that("unordered ids cause issues",{
df <- data.frame(
id1 = c(2,2,2,2,2,1,1,1,1,1)
, x = 1:10
, y = 1:10
, z = 1:10
, m = 1:10
)
res <- sfheaders::sf_linestring(df, linestring_id = "id1")
expect_true( !any( res$id == unique( df$id1 ) ) )
## sub-group order works
df <- data.frame(
id1 = c(1,1,1,1,1,2,2,2,2,2)
, id2 = c(2,2,3,3,3,1,1,1,2,2)
, x = 1:10
, y = 1:10
, z = 1:10
, m = 1:10
)
res <- sfheaders::sf_polygon(df, polygon_id = "id1", linestring_id = "id2")
expect_true( all( res$id == unique( df$id1 ) ) )
m1 <- res$geometry[[1]][[1]]
m2 <- res$geometry[[1]][[2]]
m3 <- res$geometry[[2]][[1]]
m4 <- res$geometry[[2]][[2]]
expect_equal( m1, unname( as.matrix( df[ df$id1 == 1 & df$id2 == 2, 3:6 ] ) ) )
expect_equal( m2, unname( as.matrix( df[ df$id1 == 1 & df$id2 == 3, 3:6 ] ) ) )
expect_equal( m3, unname( as.matrix( df[ df$id1 == 2 & df$id2 == 1, 3:6 ] ) ) )
expect_equal( m4, unname( as.matrix( df[ df$id1 == 2 & df$id2 == 2, 3:6 ] ) ) )
## sub-group order doesn't work
df <- data.frame(
id1 = c(1,1,1,1,1,2,2,2,2,2)
, id2 = c(2,2,3,3,3,3,3,1,2,2)
, x = 1:10
, y = 1:10
, z = 1:10
, m = 1:10
)
res <- sfheaders::sf_polygon(df, polygon_id = "id1", linestring_id = "id2")
expect_true( all( res$id == unique( df$id1 ) ) )
m1 <- res$geometry[[1]][[1]]
m2 <- res$geometry[[1]][[2]]
m3 <- res$geometry[[2]][[1]]
m4 <- res$geometry[[2]][[2]]
m5 <- res$geometry[[2]][[3]]
## these tests will pass, but the coordinates will be wronge, becase the ID order is wrong
expect_equal( m1, unname( as.matrix( df[ df$id1 == 1 & df$id2 == 2, 3:6 ] ) ) )
expect_equal( m2, unname( as.matrix( df[ df$id1 == 1 & df$id2 == 3, 3:6 ] ) ) )
expect_equal( m3, unname( as.matrix( df[ df$id1 == 2 & df$id2 == 3, 3:6 ] ) ) )
expect_equal( m4, unname( as.matrix( df[ df$id1 == 2 & df$id2 == 1, 3:6 ] ) ) )
expect_equal( m5, unname( as.matrix( df[ df$id1 == 2 & df$id2 == 2, 3:6 ] ) ) )
df <- data.frame(
id1 = c(1,1,1,1,1,2,2,2,2,2)
, id2 = c(2,2,3,3,3,3,3,1,2,2)
, id3 = c(1,2,1,1,1,1,2,2,1,2)
, x = 1:10
, y = 1:10
, z = 1:10
, m = 1:10
)
res <- sfheaders::sf_multipolygon(df, multipolygon_id = "id1", polygon_id = "id2", linestring_id = "id3")
expect_true( all( res$id == unique( df$id1 ) ) )
m1 <- res$geometry[[1]][[1]][[1]]
m2 <- res$geometry[[1]][[1]][[2]]
m3 <- res$geometry[[1]][[2]][[1]]
m4 <- res$geometry[[2]][[1]][[1]]
m5 <- res$geometry[[2]][[1]][[2]]
m6 <- res$geometry[[2]][[2]][[1]]
m7 <- res$geometry[[2]][[3]][[1]]
m8 <- res$geometry[[2]][[3]][[2]]
## these tests will pass, but the coordinates will be wronge, becase the ID order is wrong
expect_equal( m1, unname( as.matrix( df[ df$id1 == 1 & df$id2 == 2 & df$id3 == 1, 4:7 ] ) ) )
expect_equal( m2, unname( as.matrix( df[ df$id1 == 1 & df$id2 == 2 & df$id3 == 2, 4:7 ] ) ) )
expect_equal( m3, unname( as.matrix( df[ df$id1 == 1 & df$id2 == 3 & df$id3 == 1, 4:7 ] ) ) )
expect_equal( m4, unname( as.matrix( df[ df$id1 == 2 & df$id2 == 3 & df$id3 == 1, 4:7 ] ) ) )
expect_equal( m5, unname( as.matrix( df[ df$id1 == 2 & df$id2 == 3 & df$id3 == 2, 4:7 ] ) ) )
expect_equal( m6, unname( as.matrix( df[ df$id1 == 2 & df$id2 == 1 & df$id3 == 2, 4:7 ] ) ) )
expect_equal( m7, unname( as.matrix( df[ df$id1 == 2 & df$id2 == 2 & df$id3 == 1, 4:7 ] ) ) )
expect_equal( m8, unname( as.matrix( df[ df$id1 == 2 & df$id2 == 2 & df$id3 == 2, 4:7 ] ) ) )
})
|
/tests/testthat/test-sf.R
|
no_license
|
nemochina2008/sfheaders
|
R
| false | false | 8,117 |
r
|
context("sf")
test_that("sf objects are created",{
is_sf <- function(x) {
a <- attributes(x)
all( a$class == c("sf", "data.frame") ) & a$sf_column == "geometry"
}
df <- data.frame(
id = c(1,1,1,1,1,2,2,2,2,2)
, x = 1:10
, y = 1:10
, z = 1:10
, m = 1:10
)
res <- sfheaders:::rcpp_sf_point(df, 1:4 )
expect_true( is_sf( res ) )
res <- sfheaders:::rcpp_sf_point(df, 1:2 )
expect_true( is_sf( res ) )
res <- sfheaders:::rcpp_sf_multipoint(df, 1:4, NULL )
expect_true( is_sf( res ) )
res <- sfheaders:::rcpp_sf_linestring(df, 1:4, NULL )
expect_true( is_sf( res ) )
res <- sfheaders:::rcpp_sf_multilinestring(df, 1:4, NULL, NULL )
expect_true( is_sf( res ) )
res <- sfheaders:::rcpp_sf_polygon(df, 1:4, NULL, NULL )
expect_true( is_sf( res ) )
res <- sfheaders:::rcpp_sf_multipolygon(df, 1:4, NULL, NULL, NULL )
expect_true( is_sf( res ) )
})
test_that("correct number of rows returned",{
is_sf <- function(x) {
a <- attributes(x)
all( a$class == c("sf", "data.frame") ) & a$sf_column == "geometry"
}
df <- data.frame(
id1 = c(1,1,1,1,1,2,2,2,2,2)
, id2 = c(1,1,2,2,1,1,2,2,3,3)
, x = 1:10
, y = 1:10
, z = 1:10
, m = 1:10
)
res <- sfheaders:::rcpp_sf_point( df, c(2:3) )
expect_true( nrow(res) == nrow( df ) )
res <- sfheaders:::rcpp_sf_multipoint( df, c(2:3), 0L )
expect_true( nrow(res) == length( unique( df$id1 ) ) )
expect_true( all( res$id == unique( df$id1 ) ) )
res <- sfheaders:::rcpp_sf_linestring( df, c(2:3), 0L )
expect_true( nrow(res) == length( unique( df$id1 ) ) )
expect_true( all( res$id == unique( df$id1 ) ) )
res <- sfheaders:::rcpp_sf_multilinestring( df, c(2:3), 0L, NULL )
expect_true( nrow(res) == length( unique( df$id1 ) ) )
expect_true( all( res$id == unique( df$id1 ) ) )
res <- sfheaders:::rcpp_sf_polygon( df, c(2:3), 0L, NULL )
expect_true( nrow(res) == length( unique( df$id1 ) ) )
expect_true( all( res$id == unique( df$id1 ) ) )
res <- sfheaders:::rcpp_sf_multipolygon( df, c(2:3), 0L, NULL, NULL )
expect_true( nrow(res) == length( unique( df$id1 ) ) )
expect_true( all( res$id == unique( df$id1 ) ) )
})
test_that("ID order maintained",{
is_sf <- function(x) {
a <- attributes(x)
all( a$class == c("sf", "data.frame") ) & a$sf_column == "geometry"
}
df <- data.frame(
id1 = c(1,1,1,1,1,2,2,2,2,2)
, id2 = c(1,1,2,2,2,1,2,2,3,3)
, x = 1:10
, y = 1:10
, z = 1:10
, m = 1:10
)
res <- sfheaders:::rcpp_sf_point( df, c(2:3) )
m1 <- unclass( res$geometry[[1]] )
expect_equal( m1[1], df[1, "x"] )
expect_equal( m1[2], df[1, "y"] )
m7 <- unclass( res$geometry[[7]] )
expect_equal( m7[1], df[7, "x"] )
expect_equal( m7[2], df[7, "y"] )
res <- sfheaders:::rcpp_sf_multipoint( df, c(2:3), 0L )
m1 <- unclass( res$geometry[[1]] )
m2 <- unclass( res$geometry[[2]] )
expect_equal( m1[, 1], df[ df$id1 == 1, "x" ] )
expect_equal( m1[, 2], df[ df$id1 == 1, "y" ] )
expect_equal( m2[, 1], df[ df$id1 == 2, "x" ] )
expect_equal( m2[, 2], df[ df$id1 == 2, "y" ] )
res <- sfheaders:::rcpp_sf_polygon( df, c(2:3), 0L, 1L )
m1 <- res$geometry[[1]][[1]]
m2 <- res$geometry[[1]][[2]]
m3 <- res$geometry[[2]][[1]]
m4 <- res$geometry[[2]][[2]]
m5 <- res$geometry[[2]][[3]]
expect_equal( m1[, 1], df[ df$id1 == 1 & df$id2 == 1, "x"] )
expect_equal( m1[, 2], df[ df$id1 == 1 & df$id2 == 1, "y"] )
expect_equal( m2[, 1], df[ df$id1 == 1 & df$id2 == 2, "x"] )
expect_equal( m2[, 2], df[ df$id1 == 1 & df$id2 == 2, "y"] )
expect_equal( m3[, 1], df[ df$id1 == 2 & df$id2 == 1, "x"] )
expect_equal( m3[, 2], df[ df$id1 == 2 & df$id2 == 1, "y"] )
expect_equal( m4[, 1], df[ df$id1 == 2 & df$id2 == 2, "x"] )
expect_equal( m4[, 2], df[ df$id1 == 2 & df$id2 == 2, "y"] )
expect_equal( m5[, 1], df[ df$id1 == 2 & df$id2 == 3, "x"] )
expect_equal( m5[, 2], df[ df$id1 == 2 & df$id2 == 3, "y"] )
df <- data.frame(
id1 = c(1,1,1,1,1,2,2,2,2,2)
, id2 = c(1,1,2,2,1,1,2,2,3,3) ## this errored in sf_polygon
, x = 1:10
, y = 1:10
, z = 1:10
, m = 1:10
)
expect_error( sfheaders:::rcpp_sf_polygon( df, c(2:3), 0L, 1L ), "sfheaders - error indexing lines, perhaps caused by un-ordered data?" ) ## because the id2 is out of order
expect_error( sfheaders:::rcpp_sf_linestring( df, c(2:3), 1L ), "sfheaders - error indexing lines, perhaps caused by un-ordered data?" )
expect_error( sfheaders:::rcpp_sf_linestring( df, c(2:3), 0 ), "sfheaders - linestring columns types are different")
})
test_that("unordered ids cause issues",{
df <- data.frame(
id1 = c(2,2,2,2,2,1,1,1,1,1)
, x = 1:10
, y = 1:10
, z = 1:10
, m = 1:10
)
res <- sfheaders::sf_linestring(df, linestring_id = "id1")
expect_true( !any( res$id == unique( df$id1 ) ) )
## sub-group order works
df <- data.frame(
id1 = c(1,1,1,1,1,2,2,2,2,2)
, id2 = c(2,2,3,3,3,1,1,1,2,2)
, x = 1:10
, y = 1:10
, z = 1:10
, m = 1:10
)
res <- sfheaders::sf_polygon(df, polygon_id = "id1", linestring_id = "id2")
expect_true( all( res$id == unique( df$id1 ) ) )
m1 <- res$geometry[[1]][[1]]
m2 <- res$geometry[[1]][[2]]
m3 <- res$geometry[[2]][[1]]
m4 <- res$geometry[[2]][[2]]
expect_equal( m1, unname( as.matrix( df[ df$id1 == 1 & df$id2 == 2, 3:6 ] ) ) )
expect_equal( m2, unname( as.matrix( df[ df$id1 == 1 & df$id2 == 3, 3:6 ] ) ) )
expect_equal( m3, unname( as.matrix( df[ df$id1 == 2 & df$id2 == 1, 3:6 ] ) ) )
expect_equal( m4, unname( as.matrix( df[ df$id1 == 2 & df$id2 == 2, 3:6 ] ) ) )
## sub-group order doesn't work
df <- data.frame(
id1 = c(1,1,1,1,1,2,2,2,2,2)
, id2 = c(2,2,3,3,3,3,3,1,2,2)
, x = 1:10
, y = 1:10
, z = 1:10
, m = 1:10
)
res <- sfheaders::sf_polygon(df, polygon_id = "id1", linestring_id = "id2")
expect_true( all( res$id == unique( df$id1 ) ) )
m1 <- res$geometry[[1]][[1]]
m2 <- res$geometry[[1]][[2]]
m3 <- res$geometry[[2]][[1]]
m4 <- res$geometry[[2]][[2]]
m5 <- res$geometry[[2]][[3]]
## these tests will pass, but the coordinates will be wronge, becase the ID order is wrong
expect_equal( m1, unname( as.matrix( df[ df$id1 == 1 & df$id2 == 2, 3:6 ] ) ) )
expect_equal( m2, unname( as.matrix( df[ df$id1 == 1 & df$id2 == 3, 3:6 ] ) ) )
expect_equal( m3, unname( as.matrix( df[ df$id1 == 2 & df$id2 == 3, 3:6 ] ) ) )
expect_equal( m4, unname( as.matrix( df[ df$id1 == 2 & df$id2 == 1, 3:6 ] ) ) )
expect_equal( m5, unname( as.matrix( df[ df$id1 == 2 & df$id2 == 2, 3:6 ] ) ) )
df <- data.frame(
id1 = c(1,1,1,1,1,2,2,2,2,2)
, id2 = c(2,2,3,3,3,3,3,1,2,2)
, id3 = c(1,2,1,1,1,1,2,2,1,2)
, x = 1:10
, y = 1:10
, z = 1:10
, m = 1:10
)
res <- sfheaders::sf_multipolygon(df, multipolygon_id = "id1", polygon_id = "id2", linestring_id = "id3")
expect_true( all( res$id == unique( df$id1 ) ) )
m1 <- res$geometry[[1]][[1]][[1]]
m2 <- res$geometry[[1]][[1]][[2]]
m3 <- res$geometry[[1]][[2]][[1]]
m4 <- res$geometry[[2]][[1]][[1]]
m5 <- res$geometry[[2]][[1]][[2]]
m6 <- res$geometry[[2]][[2]][[1]]
m7 <- res$geometry[[2]][[3]][[1]]
m8 <- res$geometry[[2]][[3]][[2]]
## these tests will pass, but the coordinates will be wronge, becase the ID order is wrong
expect_equal( m1, unname( as.matrix( df[ df$id1 == 1 & df$id2 == 2 & df$id3 == 1, 4:7 ] ) ) )
expect_equal( m2, unname( as.matrix( df[ df$id1 == 1 & df$id2 == 2 & df$id3 == 2, 4:7 ] ) ) )
expect_equal( m3, unname( as.matrix( df[ df$id1 == 1 & df$id2 == 3 & df$id3 == 1, 4:7 ] ) ) )
expect_equal( m4, unname( as.matrix( df[ df$id1 == 2 & df$id2 == 3 & df$id3 == 1, 4:7 ] ) ) )
expect_equal( m5, unname( as.matrix( df[ df$id1 == 2 & df$id2 == 3 & df$id3 == 2, 4:7 ] ) ) )
expect_equal( m6, unname( as.matrix( df[ df$id1 == 2 & df$id2 == 1 & df$id3 == 2, 4:7 ] ) ) )
expect_equal( m7, unname( as.matrix( df[ df$id1 == 2 & df$id2 == 2 & df$id3 == 1, 4:7 ] ) ) )
expect_equal( m8, unname( as.matrix( df[ df$id1 == 2 & df$id2 == 2 & df$id3 == 2, 4:7 ] ) ) )
})
|
## Authors
## Martin Schlather, schlather@math.uni-mannheim.de
##
##
## Copyright (C) 2015 -- 2017 Martin Schlather
##
## This program is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License
## as published by the Free Software Foundation; either version 3
## of the License, or (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
### !!!!!!!!!!!! ACHTUNG !!!!!!!!!!!! TREND als cov-fct muss
### noch programmiert werden !!!
## source("~/R/RF/RandomFields/R/MLES.R")
## PrintLevels
## 0 : no message
## 1 : important error messages
## 2 : warnings
## 3 : minium debugging information
## 5 : extended debugging information
## jetzt nur noch global naturalscaling (ja / nein)
## spaeter eine Funktion schreibbar, die den naturscaling umwandelt;
## im prinzipt CMbuild, aber ruechwaers mit 1/newscale und eingefuegt
## in eventuell schon vorhandene $ operatoren
#Beim paper lesen im Zug nach Muenchen heute morgen ist mir eine Referenz zu einem R Paket "mlegp: Maximum likelihood estimates of Gaussian processes" aufgefallen. Ist Dir aber sicher schon bekannt!
# stop("")
# problem: natscale; im moment 2x implementiert, 1x mal ueber
# scale/aniso (user) und einmal gedoppelt -- irgendwas muss raus
## LSQ variogram fuer trend = const.
## kann verbessert werden, insb. fuer fixed effects, aber auch eingeschraenkt
## fuer random effects -> BA/MA
## REML fehlt
## users.guess muss in eine List von meheren Vorschlaegen umgewandelt werden !!! Und dann muss RFfit recursiver call mit allen bisherigen Werden laufen !!
## zentrale C -Schnittstellen
## .C(C_PutValuesAtNA, RegNr, param)
## bins bei Distances automatisch
## bei repet sind die Trends/fixed effects gleich, es muessen aber die
## random effects unterschiedlich sein.
## bei list(data) werden auch trend/fixed effects unterschiedlich geschaetzt.
## Erweiterungen: Emilio's Bi-MLE, Covarianz-Matrix-INversion per fft oder
## per INLA, grosse Datensaetze spalten in kleinere "unabhaengige".
###################################
## !!! Mixed Model Equations !!! ##
###################################
## accessing slots
accessByNameOrNumber <- function(x, i, j, drop=FALSE) {
stopifnot(length(i)==1)
if (is.numeric(i)) i <- slotNames(x)[i]
return(accessSlotsByName(x=x, i=i, j=j, drop=drop))
}
setMethod("[", signature = "RFfit", def=accessByNameOrNumber)
### to do : ask Paulo
#effects_RFfit <- function(OP, object, method) {
# eff <- RFrandef(object=object, method=method, OP=OP)
# linpart <- fitted_RFfit(OP=OP, object=object, method=method)
# stop("unclear how these two results should be combined in the output")
#}
#effects_RMmodelFit <- function(...) stop("'effects' can only be used with the original and sp_conform output of 'RFfit'.")
#setMethod(f="effects", signature='RFfit',
# definition=function(object, method="ml")
# effects_RFfit("@", object=object, method=method))#
#setMethod(f="effects", signature='RMmodelFit',
# definition=function(object, newdata=NULL) effects_RMmodelFit())#
#effects.RM_modelFit <- function(object, ...) effects_RMmodelFit()
#effects.RF_fit <- function(object, method="ml") effects_RMmodelFit()
simulate_RFfit <- function(OP, object, newdata, conditional, method) {
Z <- do.call(OP, list(object, "Z"))
L <- length(Z$data)
ans <- rep(list(NULL), L)
m <- ModelParts(object[method], effects=Z$effect, complete=FALSE) ## no params
if (conditional) {
for (i in 1:L) {
ans[[i]] <- RFsimulate(model=m$model,
data = Z$data[[i]],
given = Z$coord[[i]],
x = if (!is.null(newdata)) newdata,
err.model = m$err.model)
if (is.list(ans[[i]])) stop("the case with NAs not completely programmed yet. Please let the maintainer now that it is needed ")
}
return (if (L == 1) ans[[1]] else ans)
} else {
for (i in 1:L) {
ans[[i]] <- RFsimulate(model=,
x = if (length(newdata)==0) Z$coord[[i]]
else newdata)
if (is.list(ans[[i]])) stop("the case with NAs not completely programmed yet. Please let the maintainer now that it is needed ")
}
}
}
simulate_RMmodelFit <- function(...) stop("'simulate' can only be used with the original and sp_conform output of 'RFfit'.")
setMethod(f="simulate", signature='RFfit',
definition=function(object, newdata=NULL,
conditional=!is.null(newdata), method="ml")
simulate_RFfit("@", object=object, conditional=conditional,
method=method))#
setMethod(f="simulate", signature='RMmodelFit',
definition=function(object, newdata=NULL) simulate_RMmodelFit())#
simulate.RM_modelFit <- function(object, ...) simulate_RMmodelFit()
simulate.RF_fit <- function(object, method="ml") simulate_RMmodelFit()
predict_RFfit <- function(OP, object, newdata, impute, method) {
Z <- do.call(OP, list(object, "Z"))
L <- length(Z$data)
ans <- rep(list(NULL), L)
if (impute) {
if (length(newdata) > 0) stop("for imputing, 'newdata' may not be given")
for (i in 1:L) {
ans[[i]] <- RFinterpolate(model=object[method],
data = Z$data[[i]],
given = Z$coord[[i]],
err.model = NA)
if (is.list(ans[[i]])) stop("the case with NAs not completely programmed yet. Please let the maintainer now that it is needed ")
}
} else {
for (i in 1:L) {
ans[[i]] <- RFinterpolate(model=object[method],
data = Z$data[[i]],
given = Z$coord[[i]],
x = if (!is.null(newdata)) newdata else
Z$coord,
err.model = NA)
if (is.list(ans[[i]])) stop("the case with NAs not completely programmed yet. Please let the maintainer now that it is needed ")
}
}
return (if (L == 1) ans[[1]] else ans)
}
predict_RMmodelFit <- function(...) stop("'predict' can only be used with the original and sp_conform output of 'RFfit.")
setMethod(f="predict", signature='RFfit',
definition=function(object, newdata=NULL, impute=FALSE, method="ml")
predict_RFfit("@", object=object, newdata=newdata,
impute=impute, method=method))#
setMethod(f="predict", signature='RMmodelFit',
definition=function(object, newdata=NULL) predict_RMmodelFit())#
predict.RM_modelFit <- function(object, ...)
predict_RMmodelFit(object=object, ...)
predict.RF_fit <- function(object, method="ml")
predict_RMmodelFit(object, method=method)
coef_RMmodelFit <- function(OP, object) {
covariat <- do.call(OP, list(object, "covariat"))
glbl.var <- do.call(OP, list(object, "globalvariance"))
p <- do.call(OP, list(object, "param"))
if (length(covariat) > 0) covariat <- as.matrix(covariat)
nr_p <- nrow(p)
if (length(glbl.var) > 0)
glbl.var <- c(glbl.var, rep(NA, nr_p - length(glbl.var)))
p <- cbind(p, glbl.var,
if (length(covariat) > 0)
rbind(covariat, matrix(NA, ncol=ncol(covariat),
nrow= nr_p - nrow(covariat))))
#class(p) <- "coef.RMmodelFit"
p[1, ]
}
setMethod(f="coef", signature='RMmodelFit',
definition=function(object) coef_RMmodelFit("@", object))#
setMethod(f="coef", signature='RFfit',
definition=function(object, method="ml")
coef_RMmodelFit("@", object[method]))#
coef.RM_modelFit <- function(object, ...)
coef_RMmodelFit("$", object)
coef.RF_fit <- function(object, method="ml")
coef_RMmodelFit("$", object[method])
residuals_RMmodelFit <- function(OP, object) {
resid <- do.call(OP, list(object, "residuals"))
message("Note that 'residuals' equals the difference between the data and the linear part (fixed effects).")
if (length(resid) == 1) resid[[1]] else resid
}
setMethod(f="residuals", signature='RMmodelFit',
definition=function(object) residuals_RMmodelFit("@", object))#
setMethod(f="residuals", signature='RFfit',
definition=function(object, method="ml")
residuals_RMmodelFit("@", object[method]))#
residuals.RM_modelFit <- function(object, ...)
residuals_RMmodelFit("$", object)
residuals.RF_fit <- function(object, method="ml")
residuals_RMmodelFit("$", object[method])
fitted_RFfit <- function(OP, object, method) {
data <- do.call(OP, list(object, "Z"))$data
resid <- do.call(OP, list(object[method], "residuals"))
for (i in 1:length(data)) data[[i]] <- data[[i]] - resid[[i]]
message("Note that 'fitted' equals the linear part (fixed effects).")
if (length(data) > 1) data else
if (ncol(data[[1]]) > 1) data[[1]] else as.vector(data[[1]])
}
fitted_RMmodelFit <- function(...) stop("'fitted' can only be used with the original output of 'RFfit', not with some of its extraction.")
setMethod(f="fitted", signature='RMmodelFit',
definition=function(object) fitted_RMmodelFit())#
setMethod(f="fitted", signature='RFfit',
definition=function(object, method="ml")
fitted_RFfit("@", object=object, method=method))#
fitted.RM_modelFit <- function(object, ...) fitted_RMmodelFit()
fitted.RF_fit <- function(object, method="ml")
fitted_RFfit("$", object=object, method=method)
RFhessian <- function(model) {
method <- "ml"
if (is(model, "RF_fit")) return(model[[method]]@hessian)
else if (is(model, "RFfit")) return(model[method]$hessian)
else stop("'model' is not an output of 'RFfit'")
}
anova.RFfit <- function(object, ...) RFratiotest(nullmodel=object, ...)
anova.RF_fit <- function(object, ...) RFratiotest(nullmodel=object, ...)
anova.RMmodelFit <- function(object, ...) RFratiotest(nullmodel=object, ...)
anova.RM_modelFit <- function(object, ...) RFratiotest(nullmodel=object, ...)
setMethod(f="anova", signature=CLASS_FIT, anova.RFfit)#
setMethod(f="anova", signature='RFfit', anova.RFfit)#
boundary_values <- function(variab) {
upper.bound <- variab[4, , drop=FALSE]
lower.bound <- variab[3, , drop=FALSE]
# sd <- variab[2, ]
variab <- variab[1, , drop=FALSE]
lidx <- variab < lower.bound + 1e-8
uidx <- variab > upper.bound - 1e-8
nl <- sum(lidx, na.rm=TRUE)
nu <- sum(uidx, na.rm=TRUE)
if (nl + nu > 0) {
lidx[is.na(lidx)] <- FALSE
uidx[is.na(uidx)] <- FALSE
txt <-
paste(sep="", "Note that the (possibly internal) fitted variable",
if (nl > 0)
paste(if (nl > 1) "s " else " ",
paste("'", colnames(variab)[lidx], "'", sep="", collapse=", "),
if (nl == 1) " is " else " are ",
"close to or on the effective lower boundary", sep=""),
if (nl > 0 && nu > 0) " and the variable",
if (nu > 0)
paste(if (nu > 1) "s " else " ",
paste("'", colnames(variab)[uidx], "'",
sep="", collapse=", "),
if (nu == 1) "is" else "are",
"close to or on the effective upper boundary"),
".\nHence the gradient of the likelihood function might not be zero and none of the\nreported 'sd' values might be reliable.")
} else txt <- NULL
return(txt)
}
summary_RMmodelFit <- function(OP, object, ..., isna.param) {
model <- if (OP == "@") PrepareModel2(object, ...) else object$model
covariat <- do.call(OP, list(object, "covariat"))
glbl.var <- do.call(OP, list(object, "globalvariance"))
p <- do.call(OP, list(object, "param"))
r <- do.call(OP, list(object, "residuals"))
v <- do.call(OP, list(object, "variab"))
l <- list(model=model,
loglikelihood=do.call(OP, list(object, "likelihood")),
AIC = do.call(OP, list(object, "AIC")),
AICc= do.call(OP, list(object, "AICc")),
BIC = do.call(OP, list(object, "BIC")),
residuals=if (length(r) == 1) r[[1]] else r)
if (missing(isna.param)) isna.param <- any(is.na(p))
l$boundary <- boundary_values(v)
if (length(covariat) > 0) covariat <- as.matrix(covariat)
if (!any(is.na(p[1, ]))) {
nr_p <- nrow(p)
if (length(glbl.var) > 0)
glbl.var <- c(glbl.var, rep(NA, nr_p - length(glbl.var)))
l$param <- cbind(p, glbl.var,
if (length(covariat) > 0)
rbind(covariat, matrix(NA, ncol=ncol(covariat),
nrow= nr_p - nrow(covariat))))
}
if (isna.param || !is.null(l$boundary)) {
nr_v <- nrow(v)
if (length(glbl.var) > 0)
glbl.var <- c(glbl.var, rep(NA, nr_v - length(glbl.var)))
l$variab <- cbind(v, glbl.var,
if (length(covariat) > 0)
rbind(covariat, matrix(NA, ncol=ncol(covariat),
nrow=nr_v - nrow(covariat)))
)
}
class(l) <- "summary.RMmodelFit"
l
}
summary.RMmodelFit <- function(object, ..., isna.param) {
summary_RMmodelFit("@", object, ..., isna.param=isna.param)
}
setMethod(f="summary", signature=CLASS_FIT, summary.RMmodelFit)#
summary.RM_modelFit <- function(object, ..., isna.param) {
summary_RMmodelFit("$", object, ..., isna.param=isna.param)
}
print.summary.RMmodelFit <- function(x, ...) {
printVariab <- function(x) {
cat("Internal variables:\n")
if (is.null(x$boundary)) print(x$variab[1:2, , drop=FALSE], ..., na.print="-")#
else print(x$variab, ..., na.print="-")#
cat("\n")
return(ncol(x$variab))
}
printParam <- function(param) {
cat("User's variables:\n")
print(param, ..., na.print="-")#
return(ncol(param))
}
printRest <- function(...) {
x <- unlist(list(...))
stopifnot(length(x) == 3)
names(x) <- c("#variab", "loglikelihood", "AIC")
cat("\n")
print(x) #
cat("\n")
}
if (RFoptions()$general$detailed_output) str(x$model, no.list=TRUE) #
cat("\n")
np <- AIC <- ll <- nm <- NA
if (length(x$submodels) > 0) {
cur_name <- ""
len <- length(x$submodels)
for (i in 1:len) {
sm <- x$submodels[[i]]
n <- sm$report
nnxt <- if (i==len) "" else x$submodels[[i+1]]
if (n != cur_name) {
if (i > 1) {
if (!is.null(sm$param)) printParam(cparam)
printRest(np, ll, AIC) #
if (!is.null(sm$boundary)) cat(sm$boundary, "\n\n")
}
if (nnxt != n && length(sm$fixed) > 0) {
nX <- paste(sep="", n, " (",
paste(c(if (length(sm$fixed$zero) > 0)
paste(colnames(x$param)[sm$fixed$zero], "= 0"),
if (length(sm$fixed$one) > 0)
paste(colnames(x$param)[sm$fixed$one], "= 1")),
sep=", "),
")")
} else nX <- n
cat(if (!is.na(nm)) cat("\n"), nX, "\n",
paste(rep("=", min(80, nchar(nX))), collapse=""),
"\n", sep="")
np <- 0
AIC <- 0
ll <- 0
cparam <- NULL
nm <- 1
}
if (!is.null(sm$variab)) {
if (nm > 1 || (i<len && n==nnxt)) cat("model", nm, ", ")
printVariab(sm)
}
if (!is.null(sm$param)) {
param <- x$param * NA
param[, sm$p.proj] <- sm$param
fixed <- sm$fixed
if (length(fixed) > 0) {
param[1, fixed$zero] <- 0
param[1, fixed$one] <- 1
}
# if (!is.null(cparam)) cparam <- rbind(cparam, NA)
cparam <- rbind(cparam, param)
}
np <- np + length(sm$p.proj)
ll <- ll + sm$loglikelihood
AIC <- AIC + sm$AIC
nm <- nm + 1;
cur_name <- n
}
if (!is.null(sm$param)) printParam(param)
printRest(np, ll, AIC) #
if (!is.null(sm$boundary)) cat(sm$boundary, "\n\n")
cat("\nuser's model\n", paste(rep("=", 12), collapse=""), "\n", sep="")
}
np <- NA
if (!is.null(x$variab)) np <- printVariab(x)
if (!is.null(x$param)) np <- printParam(x$param)
printRest(np, x[c("loglikelihood", "AIC")])#
if (!is.null(x$boundary)) cat(x$boundary, "\n\n")
invisible(x)
}
print.RMmodelFit <- function(x, ...)
print.summary.RMmodelFit(summary.RMmodelFit(x, ...))#
print.RM_modelFit <- function(x, ...)
print.summary.RMmodelFit(summary.RM_modelFit(x, ...))#
setMethod(f="show", signature=CLASS_FIT,
definition=function(object) print.RMmodelFit(object))#
summary.RFfit <- function(object, ..., method="ml", full=FALSE) {
s <- summary.RMmodelFit(object[method])
len <- length(object@submodels)
if (full && length(object@submodels) > 0) {
submodels <- list()
for (i in 1:len) {
## war summary.RM_modelFit
submodels[[i]] <- summary(object@submodels[[i]][[method]],# 'summary'
isna.param=is.null(s$param)) # nicht
submodels[[i]]$report <- object@submodels[[i]]$report # spezifizieren!
submodels[[i]]$p.proj <- object@submodels[[i]]$p.proj
submodels[[i]]$fixed <- object@submodels[[i]]$fixed
}
s$submodels <- submodels
}
s
}
summary.RF_fit <- function(object, ..., method="ml", full=FALSE) {
s <- summary.RM_modelFit(object[[method]])
len <- length(object$submodels)
if (full && len > 0) {
submodels <- list()
for (i in 1:len) {
submodels[[i]] <- summary.RM_modelFit(object$submodels[[i]][[method]],
isna.param=is.null(s$param))
submodels[[i]]$report <- object$submodels[[i]]$report
submodels[[i]]$p.proj <- object$submodels[[i]]$p.proj
submodels[[i]]$fixed <- object$submodels[[i]]$fixed
}
s$submodels <- submodels
}
s
}
print.RFfit <- function(x, ..., method="ml", full=FALSE) {
print.summary.RMmodelFit(summary.RFfit(x, ..., method=method, full=full))
}
setMethod(f="show", signature='RFfit',
definition=function(object) print.RFfit(object))#
print.RF_fit <- function(x, ..., method="ml", full=FALSE) {
print.summary.RMmodelFit(summary.RF_fit(x, ..., method=method, full=full))
}
logLik.RF_fit <- function(object, REML = FALSE, ..., method="ml") {
if (hasArg("REML")) stop("parameter 'REML' is not used. Use 'method' instead")
## according to geoR
val <- object[[method]]$likelihood
attr(val, "df") <- object$number.of.parameters
attr(val, "method") <- method
class(val) <- "logLik"
return(val)
}
logLik.RFfit <- function(object, REML = FALSE, ..., method="ml") {
if (hasArg("REML")) stop("parameter 'REML' is not used. Use 'method' instead")
## according to geoR
val <- object[method]@likelihood
attr(val, "df") <- object@number.of.parameters
attr(val, "method") <- method
class(val) <- "logLik"
return(val)
}
print.AICRFfit<- function(x, ..., digits=3) {
## nur deshalb
fstcol <- 3
sndcol <- 55
trdcol <- 4
forthcol<-9
leer <- formatC("", width=fstcol)
size <- max(abs(x[[2]]))
size <- if (size>0) ceiling(log(size) / log(10)) else 1
cat(leer, formatC("model", flag="-", width=sndcol), " ",
formatC(names(x)[1], width=trdcol),
formatC(names(x)[2], width=forthcol), "\n", sep="")
names <- attr(x, "row.names")
for (i in 1:length(names)) {
cat(formatC(i, width=fstcol, flag="-"))
if (nchar(xx <- names[i]) <= sndcol)
cat(formatC(xx, width=sndcol, flag="-"))
else {
yy <- strsplit(xx, " \\* ")[[1]]
for (j in 1:length(yy)) {
ncyy <- nchar(yy[j])
if (ncyy <= sndcol && j==length(yy))
cat(format(yy[j], width=sndcol, flag="-"))
else {
if (ncyy <= sndcol - 2) {
cat(yy[j])
} else {
zz <- strsplit(yy[j], ", ")[[1]]
ncyy <- 0
lenzz <- length(zz)
for (k in 1:lenzz) {
len <- nchar(zz[k])
if (k > 1 && len > sndcol - 1) {
cat("\n", leer, zz[k], sep="")
if (k < lenzz)
cat(formatC(",", flag="-", width=pmax(1, sndcol-len)))
} else {
if (ncyy + len > sndcol - 1) {
cat("\n", leer, sep="")
ncyy <- len
} else {
ncyy <- ncyy + len
}
cat(zz[k])
if (k < lenzz) {
cat(", ")
ncyy <- ncyy + 2
}
}
} # for k 1:lenzz
} # split according to commata
if (j < length(yy)) cat(" *\n", leer, sep="")
else if (ncyy < sndcol) cat(formatC("", width=sndcol-ncyy))
}
} # for 1:products
} ## not be written in a single line
cat("",
formatC(x[[1]][i], width=trdcol),
formatC(x[[2]][i], format="f", width=size + digits + 1,
digits=digits),"\n")
}
}
fullAIC <- function(x, method="ml", AIC="AIC") {
ats <- approx_test_single(x, method=method)$result
values <- c("name", "df", AIC)
model2 <- paste("model2.", values, sep="")
ats2 <- ats[ !is.na(ats[, model2[2]]), model2]
colnames(ats2) <- values
if (ats2$df < 0) ats2 <- NULL
ats <- ats[, paste("model1.", values, sep="")]
colnames(ats) <- values
if (ats$df < 0) ats <- NULL
ats <- unique(rbind(ats, ats2))
dimnames(ats) <- list(1:nrow(ats), colnames(ats))
names <- as.character(ats$name)
ats <- ats[-1]
attr(ats, "row.names") <- names
class(ats) <- "AICRFfit"
ats
}
AIC.RFfit <- function(object, ..., k=2, method="ml", full=TRUE) {
if (full) {
fullAIC(object, method=method)
} else {
AIC <- object[method]@AIC
names(AIC) <- "AIC"
AIC
}
}
AIC.RF_fit <- function(object, ..., k=2, method="ml", full=TRUE) {
if (full) {
fullAIC(object, method=method)
} else {
AIC <- object[[method]]$AIC
names(AIC) <- "AIC"
AIC
}
}
AICc.RFfit <- function(object, ..., method="ml", full=FALSE) {
if (full) {
stop("for 'AICc' the option 'full=TRUE' has not been programmed yet.")
fullAIC(object, method=method)
} else {
AIC <- object[method]@AIC
names(AIC) <- "AICc"
AIC
}
}
AICc.RF_fit <- function(object, ..., method="ml", full=TRUE) {
if (full) {
stop("for 'AICc' the option 'full=TRUE' has not been programmed yet.")
fullAIC(object, method=method)
} else {
AIC <- object[[method]]$AIC
names(AIC) <- "AICc"
AIC
}
}
BIC.RFfit <- function(object, ..., method="ml", full=TRUE) {
if (full) {
fullAIC(object, method=method, AIC="BIC")
} else {
BIC <- object[method]@BIC
names(BIC) <- "BIC"
BIC
}
}
BIC.RF_fit <- function(object, ..., method="ml", full=TRUE) {
if (full) {
fullAIC(object, method=method, AIC="BIC")
} else {
BIC <- object[[method]]$BIC
names(BIC) <- "BIC"
BIC
}
}
resid.RFfit <- function(object, ..., method="ml") {
resid <- object[method]@residuals
names(resid) <- "residuals"
resid
}
resid.RF_fit <- function(object, ..., method="ml") {
resid <- object[[method]]$residuals
names(resid) <- "residuals"
resid
}
residuals.RFfit <- function(object, ..., method="ml")
resid.RFfit(object=object, method=method)
residuals.RF_fit <- function(object, ..., method="ml")
resid.RF_fit(object=object, method=method)
coef_RMmodelFit <- function(OP, object, ...) {
covariat <- do.call(OP, list(object, "covariat"))
glbl.var <- do.call(OP, list(object, "globalvariance"))
p <- do.call(OP, list(object, "param"))
if (length(covariat) > 0) covariat <- as.matrix(covariat)
nr_p <- nrow(p)
if (length(glbl.var) > 0)
glbl.var <- c(glbl.var, rep(NA, nr_p - length(glbl.var)))
p <- cbind(p, glbl.var,
if (length(covariat) > 0)
rbind(covariat, matrix(NA, ncol=ncol(covariat),
nrow= nr_p - nrow(covariat))))
#class(p) <- "coef.RMmodelFit"
p[1, ]
}
setMethod(f="coef", signature='RMmodelFit',
definition=function(object) coef_RMmodelFit("@", object))#
setMethod(f="coef", signature='RFfit',
definition=function(object) coef_RMmodelFit("@", object["ml"]))#
coef.RM_modelFit <- function(object, ...) coef_RMmodelFit("$", object, ...)
coef.RF_fit <- function(object, ...) coef_RMmodelFit("$", object["ml"], ...)
setMethod(f="plot", signature(x="RFfit", y="missing"),
function(x, y, ...) RFplotEmpVariogram(x, ...))
setMethod(f="persp", signature(x="RFfit"),
function(x, ...) RFplotEmpVariogram(x, ..., plotmethod="persp"))
contour.RFfit <- contour.RFempVariog <-
function(x,...) {
stopifnot(!( (is(x, "RFfit") && is.list(x@ev@centers))
|| (is(x, "RFempVariog") && is.list(x@centers))
))
RFplotEmpVariogram(x, ..., plotmethod="contour")
}
ExpliciteGauss <- function(model) {
if (model[[1]] != "RPgauss" && model[[1]] != "gauss.process") {
boxcox <- RFoptions()$gauss$boxcox
if (any(is.na(boxcox)) || any(boxcox[c(TRUE, FALSE)] != Inf))
return(list("RPgauss", boxcox=boxcox, model))
}
return(model)
}
RFfit <-
function(model, x, y=NULL, z=NULL, T=NULL, grid=NULL, data,
lower=NULL, upper=NULL,
methods, # "reml", "rml1"),
sub.methods,
## "internal" : name should not be changed; should always be last
## method!
optim.control=NULL,
users.guess=NULL,
distances=NULL, dim,
transform=NULL,
params=NULL,
##type = c("Gauss", "BrownResnick", "Smith", "Schlather",
## "Poisson"),
...)
{
.C(C_NoCurrentRegister)
RFoptOld <- internal.rfoptions(xyz=length(y)!=0,...,
internal.examples_reduced = FALSE,
RELAX=is(model, "formula"))
on.exit(RFoptions(LIST=RFoptOld[[1]]))
RFopt <- RFoptOld[[2]]
if (length(params) > 0) {
if ((!is.na(RFopt$fit$estimate_variance_globally) &&
RFopt$fit$estimate_variance_globally) &&
RFopt$basic$printlevel > 0)
message("Value of option 'hestimate_variance_globally' is ignored.")
RFopt$fit$estimate_variance_globally <- FALSE
RFoptions(fit.estimate_variance_globally = FALSE)
}
fit <- RFopt$fit
if (RFopt$general$vdim_close_together)
stop("'vdim_close_together' must be FALSE")
if (is.data.frame(data)) {
name <- "RFfit.user.dataset"
do.call("attach", list(what=data, name=name))
on.exit(detach(name, character.only = TRUE), add=TRUE)
}
## in UnifyData the further.models that contain only the parameter data
## are turned into genuine models
further.models <- list()
models <- c("lower", "upper", "users.guess", "parscale")
if (paramlist <- length(params) > 0) {
parscale <- optim.control$parscale
for (m in models) {
fm <- get(m)
if (!is.null(fm) && !is.numeric(fm))
further.models[[m]] <- PrepareModel2(fm, ...)
}
}
## Print(further.models, model)
Z <- UnifyData(model=model, x=x, y=y, z=z, T=T, grid=grid,
data=data, distances=distances, dim=dim,
RFopt=RFopt,
mindist_pts = RFopt$fit$smalldataset / 2,
further.models = further.models, params=params, ...)
## Print(Z); kkk
Z <- BigDataSplit(Z, RFopt)
if (!hasArg("transform")) transform <- NULL
if (paramlist) {
for (m in models)
if (!is.null(get(m)) && !is.numeric(get(m)))
assign(m, Z$further.models[[m]])
optim.control$parscale <- parscale
if (!is.null(Z$transform)) {
if (!is.null(transform))
stop("argument 'transform' may not be given if 'params' is given")
transform <- Z$transform
}
} else {
parscale <- optim.control$parscale
for (m in models)
if (!is.null(get(m)) && !is.numeric(get(m)))
assign(m, ReplaceC(PrepareModel2(get(m), ...)))
optim.control$parscale <- parscale
}
new.model <- Z$model
if (new.model[[1]] %in% c("RPpoisson", "poisson")) {
res <- fit.poisson()
} else if (new.model[[1]] %in% c("BRmixed", "BRshifted", "BRmixedIntern",
"RFbrownresnick")) {
res <- fit.br()
} else if (new.model[[1]] %in% c("RPschlather", "extremalgauss")) {
res <- fit.extremal.gauss()
} else if (new.model[[1]] %in% c("RPsmith", "smith")) {
res <- fit.smith()
} else if (new.model[[1]] %in% c("RPbernoulli", "binaryprocess")) {
res <- fit.bernoulli()
} else {
Z$model <- ExpliciteGauss(ReplaceC(Z$model))
res <- do.call("rffit.gauss",
c(list(Z, lower=lower, upper=upper, users.guess=users.guess,
optim.control=optim.control,
transform=transform,
recall = FALSE),
if (!missing(methods)) list(mle.methods = methods),
if (!missing(sub.methods)) list(lsq.methods=sub.methods)
## "internal" : name should not be changed; should always
## be last method!
))
}
if (RFopt$general$returncall)
attr(res, "call") <- as.character(deparse(match.call()))
attr(res, "coord_system") <- .Call(C_GetCoordSystem,
as.integer(MODEL_MLE),
RFopt$coords$coord_system,
RFopt$coords$new_coord_system)
return(res)
}
|
/R/RFfit.R
|
no_license
|
cran/RandomFields
|
R
| false | false | 29,952 |
r
|
## Authors
## Martin Schlather, schlather@math.uni-mannheim.de
##
##
## Copyright (C) 2015 -- 2017 Martin Schlather
##
## This program is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License
## as published by the Free Software Foundation; either version 3
## of the License, or (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
### !!!!!!!!!!!! ACHTUNG !!!!!!!!!!!! TREND als cov-fct muss
### noch programmiert werden !!!
## source("~/R/RF/RandomFields/R/MLES.R")
## PrintLevels
## 0 : no message
## 1 : important error messages
## 2 : warnings
## 3 : minium debugging information
## 5 : extended debugging information
## jetzt nur noch global naturalscaling (ja / nein)
## spaeter eine Funktion schreibbar, die den naturscaling umwandelt;
## im prinzipt CMbuild, aber ruechwaers mit 1/newscale und eingefuegt
## in eventuell schon vorhandene $ operatoren
#Beim paper lesen im Zug nach Muenchen heute morgen ist mir eine Referenz zu einem R Paket "mlegp: Maximum likelihood estimates of Gaussian processes" aufgefallen. Ist Dir aber sicher schon bekannt!
# stop("")
# problem: natscale; im moment 2x implementiert, 1x mal ueber
# scale/aniso (user) und einmal gedoppelt -- irgendwas muss raus
## LSQ variogram fuer trend = const.
## kann verbessert werden, insb. fuer fixed effects, aber auch eingeschraenkt
## fuer random effects -> BA/MA
## REML fehlt
## users.guess muss in eine List von meheren Vorschlaegen umgewandelt werden !!! Und dann muss RFfit recursiver call mit allen bisherigen Werden laufen !!
## zentrale C -Schnittstellen
## .C(C_PutValuesAtNA, RegNr, param)
## bins bei Distances automatisch
## bei repet sind die Trends/fixed effects gleich, es muessen aber die
## random effects unterschiedlich sein.
## bei list(data) werden auch trend/fixed effects unterschiedlich geschaetzt.
## Erweiterungen: Emilio's Bi-MLE, Covarianz-Matrix-INversion per fft oder
## per INLA, grosse Datensaetze spalten in kleinere "unabhaengige".
###################################
## !!! Mixed Model Equations !!! ##
###################################
## accessing slots
accessByNameOrNumber <- function(x, i, j, drop=FALSE) {
stopifnot(length(i)==1)
if (is.numeric(i)) i <- slotNames(x)[i]
return(accessSlotsByName(x=x, i=i, j=j, drop=drop))
}
setMethod("[", signature = "RFfit", def=accessByNameOrNumber)
### to do : ask Paulo
#effects_RFfit <- function(OP, object, method) {
# eff <- RFrandef(object=object, method=method, OP=OP)
# linpart <- fitted_RFfit(OP=OP, object=object, method=method)
# stop("unclear how these two results should be combined in the output")
#}
#effects_RMmodelFit <- function(...) stop("'effects' can only be used with the original and sp_conform output of 'RFfit'.")
#setMethod(f="effects", signature='RFfit',
# definition=function(object, method="ml")
# effects_RFfit("@", object=object, method=method))#
#setMethod(f="effects", signature='RMmodelFit',
# definition=function(object, newdata=NULL) effects_RMmodelFit())#
#effects.RM_modelFit <- function(object, ...) effects_RMmodelFit()
#effects.RF_fit <- function(object, method="ml") effects_RMmodelFit()
simulate_RFfit <- function(OP, object, newdata, conditional, method) {
Z <- do.call(OP, list(object, "Z"))
L <- length(Z$data)
ans <- rep(list(NULL), L)
m <- ModelParts(object[method], effects=Z$effect, complete=FALSE) ## no params
if (conditional) {
for (i in 1:L) {
ans[[i]] <- RFsimulate(model=m$model,
data = Z$data[[i]],
given = Z$coord[[i]],
x = if (!is.null(newdata)) newdata,
err.model = m$err.model)
if (is.list(ans[[i]])) stop("the case with NAs not completely programmed yet. Please let the maintainer now that it is needed ")
}
return (if (L == 1) ans[[1]] else ans)
} else {
for (i in 1:L) {
ans[[i]] <- RFsimulate(model=,
x = if (length(newdata)==0) Z$coord[[i]]
else newdata)
if (is.list(ans[[i]])) stop("the case with NAs not completely programmed yet. Please let the maintainer now that it is needed ")
}
}
}
simulate_RMmodelFit <- function(...) stop("'simulate' can only be used with the original and sp_conform output of 'RFfit'.")
setMethod(f="simulate", signature='RFfit',
definition=function(object, newdata=NULL,
conditional=!is.null(newdata), method="ml")
simulate_RFfit("@", object=object, conditional=conditional,
method=method))#
setMethod(f="simulate", signature='RMmodelFit',
definition=function(object, newdata=NULL) simulate_RMmodelFit())#
simulate.RM_modelFit <- function(object, ...) simulate_RMmodelFit()
simulate.RF_fit <- function(object, method="ml") simulate_RMmodelFit()
predict_RFfit <- function(OP, object, newdata, impute, method) {
Z <- do.call(OP, list(object, "Z"))
L <- length(Z$data)
ans <- rep(list(NULL), L)
if (impute) {
if (length(newdata) > 0) stop("for imputing, 'newdata' may not be given")
for (i in 1:L) {
ans[[i]] <- RFinterpolate(model=object[method],
data = Z$data[[i]],
given = Z$coord[[i]],
err.model = NA)
if (is.list(ans[[i]])) stop("the case with NAs not completely programmed yet. Please let the maintainer now that it is needed ")
}
} else {
for (i in 1:L) {
ans[[i]] <- RFinterpolate(model=object[method],
data = Z$data[[i]],
given = Z$coord[[i]],
x = if (!is.null(newdata)) newdata else
Z$coord,
err.model = NA)
if (is.list(ans[[i]])) stop("the case with NAs not completely programmed yet. Please let the maintainer now that it is needed ")
}
}
return (if (L == 1) ans[[1]] else ans)
}
predict_RMmodelFit <- function(...) stop("'predict' can only be used with the original and sp_conform output of 'RFfit.")
setMethod(f="predict", signature='RFfit',
definition=function(object, newdata=NULL, impute=FALSE, method="ml")
predict_RFfit("@", object=object, newdata=newdata,
impute=impute, method=method))#
setMethod(f="predict", signature='RMmodelFit',
definition=function(object, newdata=NULL) predict_RMmodelFit())#
predict.RM_modelFit <- function(object, ...)
predict_RMmodelFit(object=object, ...)
predict.RF_fit <- function(object, method="ml")
predict_RMmodelFit(object, method=method)
coef_RMmodelFit <- function(OP, object) {
covariat <- do.call(OP, list(object, "covariat"))
glbl.var <- do.call(OP, list(object, "globalvariance"))
p <- do.call(OP, list(object, "param"))
if (length(covariat) > 0) covariat <- as.matrix(covariat)
nr_p <- nrow(p)
if (length(glbl.var) > 0)
glbl.var <- c(glbl.var, rep(NA, nr_p - length(glbl.var)))
p <- cbind(p, glbl.var,
if (length(covariat) > 0)
rbind(covariat, matrix(NA, ncol=ncol(covariat),
nrow= nr_p - nrow(covariat))))
#class(p) <- "coef.RMmodelFit"
p[1, ]
}
setMethod(f="coef", signature='RMmodelFit',
definition=function(object) coef_RMmodelFit("@", object))#
setMethod(f="coef", signature='RFfit',
definition=function(object, method="ml")
coef_RMmodelFit("@", object[method]))#
coef.RM_modelFit <- function(object, ...)
coef_RMmodelFit("$", object)
coef.RF_fit <- function(object, method="ml")
coef_RMmodelFit("$", object[method])
residuals_RMmodelFit <- function(OP, object) {
resid <- do.call(OP, list(object, "residuals"))
message("Note that 'residuals' equals the difference between the data and the linear part (fixed effects).")
if (length(resid) == 1) resid[[1]] else resid
}
setMethod(f="residuals", signature='RMmodelFit',
definition=function(object) residuals_RMmodelFit("@", object))#
setMethod(f="residuals", signature='RFfit',
definition=function(object, method="ml")
residuals_RMmodelFit("@", object[method]))#
residuals.RM_modelFit <- function(object, ...)
residuals_RMmodelFit("$", object)
residuals.RF_fit <- function(object, method="ml")
residuals_RMmodelFit("$", object[method])
fitted_RFfit <- function(OP, object, method) {
data <- do.call(OP, list(object, "Z"))$data
resid <- do.call(OP, list(object[method], "residuals"))
for (i in 1:length(data)) data[[i]] <- data[[i]] - resid[[i]]
message("Note that 'fitted' equals the linear part (fixed effects).")
if (length(data) > 1) data else
if (ncol(data[[1]]) > 1) data[[1]] else as.vector(data[[1]])
}
fitted_RMmodelFit <- function(...) stop("'fitted' can only be used with the original output of 'RFfit', not with some of its extraction.")
setMethod(f="fitted", signature='RMmodelFit',
definition=function(object) fitted_RMmodelFit())#
setMethod(f="fitted", signature='RFfit',
definition=function(object, method="ml")
fitted_RFfit("@", object=object, method=method))#
fitted.RM_modelFit <- function(object, ...) fitted_RMmodelFit()
fitted.RF_fit <- function(object, method="ml")
fitted_RFfit("$", object=object, method=method)
RFhessian <- function(model) {
method <- "ml"
if (is(model, "RF_fit")) return(model[[method]]@hessian)
else if (is(model, "RFfit")) return(model[method]$hessian)
else stop("'model' is not an output of 'RFfit'")
}
anova.RFfit <- function(object, ...) RFratiotest(nullmodel=object, ...)
anova.RF_fit <- function(object, ...) RFratiotest(nullmodel=object, ...)
anova.RMmodelFit <- function(object, ...) RFratiotest(nullmodel=object, ...)
anova.RM_modelFit <- function(object, ...) RFratiotest(nullmodel=object, ...)
setMethod(f="anova", signature=CLASS_FIT, anova.RFfit)#
setMethod(f="anova", signature='RFfit', anova.RFfit)#
boundary_values <- function(variab) {
upper.bound <- variab[4, , drop=FALSE]
lower.bound <- variab[3, , drop=FALSE]
# sd <- variab[2, ]
variab <- variab[1, , drop=FALSE]
lidx <- variab < lower.bound + 1e-8
uidx <- variab > upper.bound - 1e-8
nl <- sum(lidx, na.rm=TRUE)
nu <- sum(uidx, na.rm=TRUE)
if (nl + nu > 0) {
lidx[is.na(lidx)] <- FALSE
uidx[is.na(uidx)] <- FALSE
txt <-
paste(sep="", "Note that the (possibly internal) fitted variable",
if (nl > 0)
paste(if (nl > 1) "s " else " ",
paste("'", colnames(variab)[lidx], "'", sep="", collapse=", "),
if (nl == 1) " is " else " are ",
"close to or on the effective lower boundary", sep=""),
if (nl > 0 && nu > 0) " and the variable",
if (nu > 0)
paste(if (nu > 1) "s " else " ",
paste("'", colnames(variab)[uidx], "'",
sep="", collapse=", "),
if (nu == 1) "is" else "are",
"close to or on the effective upper boundary"),
".\nHence the gradient of the likelihood function might not be zero and none of the\nreported 'sd' values might be reliable.")
} else txt <- NULL
return(txt)
}
summary_RMmodelFit <- function(OP, object, ..., isna.param) {
model <- if (OP == "@") PrepareModel2(object, ...) else object$model
covariat <- do.call(OP, list(object, "covariat"))
glbl.var <- do.call(OP, list(object, "globalvariance"))
p <- do.call(OP, list(object, "param"))
r <- do.call(OP, list(object, "residuals"))
v <- do.call(OP, list(object, "variab"))
l <- list(model=model,
loglikelihood=do.call(OP, list(object, "likelihood")),
AIC = do.call(OP, list(object, "AIC")),
AICc= do.call(OP, list(object, "AICc")),
BIC = do.call(OP, list(object, "BIC")),
residuals=if (length(r) == 1) r[[1]] else r)
if (missing(isna.param)) isna.param <- any(is.na(p))
l$boundary <- boundary_values(v)
if (length(covariat) > 0) covariat <- as.matrix(covariat)
if (!any(is.na(p[1, ]))) {
nr_p <- nrow(p)
if (length(glbl.var) > 0)
glbl.var <- c(glbl.var, rep(NA, nr_p - length(glbl.var)))
l$param <- cbind(p, glbl.var,
if (length(covariat) > 0)
rbind(covariat, matrix(NA, ncol=ncol(covariat),
nrow= nr_p - nrow(covariat))))
}
if (isna.param || !is.null(l$boundary)) {
nr_v <- nrow(v)
if (length(glbl.var) > 0)
glbl.var <- c(glbl.var, rep(NA, nr_v - length(glbl.var)))
l$variab <- cbind(v, glbl.var,
if (length(covariat) > 0)
rbind(covariat, matrix(NA, ncol=ncol(covariat),
nrow=nr_v - nrow(covariat)))
)
}
class(l) <- "summary.RMmodelFit"
l
}
summary.RMmodelFit <- function(object, ..., isna.param) {
summary_RMmodelFit("@", object, ..., isna.param=isna.param)
}
setMethod(f="summary", signature=CLASS_FIT, summary.RMmodelFit)#
summary.RM_modelFit <- function(object, ..., isna.param) {
summary_RMmodelFit("$", object, ..., isna.param=isna.param)
}
print.summary.RMmodelFit <- function(x, ...) {
printVariab <- function(x) {
cat("Internal variables:\n")
if (is.null(x$boundary)) print(x$variab[1:2, , drop=FALSE], ..., na.print="-")#
else print(x$variab, ..., na.print="-")#
cat("\n")
return(ncol(x$variab))
}
printParam <- function(param) {
cat("User's variables:\n")
print(param, ..., na.print="-")#
return(ncol(param))
}
printRest <- function(...) {
x <- unlist(list(...))
stopifnot(length(x) == 3)
names(x) <- c("#variab", "loglikelihood", "AIC")
cat("\n")
print(x) #
cat("\n")
}
if (RFoptions()$general$detailed_output) str(x$model, no.list=TRUE) #
cat("\n")
np <- AIC <- ll <- nm <- NA
if (length(x$submodels) > 0) {
cur_name <- ""
len <- length(x$submodels)
for (i in 1:len) {
sm <- x$submodels[[i]]
n <- sm$report
nnxt <- if (i==len) "" else x$submodels[[i+1]]
if (n != cur_name) {
if (i > 1) {
if (!is.null(sm$param)) printParam(cparam)
printRest(np, ll, AIC) #
if (!is.null(sm$boundary)) cat(sm$boundary, "\n\n")
}
if (nnxt != n && length(sm$fixed) > 0) {
nX <- paste(sep="", n, " (",
paste(c(if (length(sm$fixed$zero) > 0)
paste(colnames(x$param)[sm$fixed$zero], "= 0"),
if (length(sm$fixed$one) > 0)
paste(colnames(x$param)[sm$fixed$one], "= 1")),
sep=", "),
")")
} else nX <- n
cat(if (!is.na(nm)) cat("\n"), nX, "\n",
paste(rep("=", min(80, nchar(nX))), collapse=""),
"\n", sep="")
np <- 0
AIC <- 0
ll <- 0
cparam <- NULL
nm <- 1
}
if (!is.null(sm$variab)) {
if (nm > 1 || (i<len && n==nnxt)) cat("model", nm, ", ")
printVariab(sm)
}
if (!is.null(sm$param)) {
param <- x$param * NA
param[, sm$p.proj] <- sm$param
fixed <- sm$fixed
if (length(fixed) > 0) {
param[1, fixed$zero] <- 0
param[1, fixed$one] <- 1
}
# if (!is.null(cparam)) cparam <- rbind(cparam, NA)
cparam <- rbind(cparam, param)
}
np <- np + length(sm$p.proj)
ll <- ll + sm$loglikelihood
AIC <- AIC + sm$AIC
nm <- nm + 1;
cur_name <- n
}
if (!is.null(sm$param)) printParam(param)
printRest(np, ll, AIC) #
if (!is.null(sm$boundary)) cat(sm$boundary, "\n\n")
cat("\nuser's model\n", paste(rep("=", 12), collapse=""), "\n", sep="")
}
np <- NA
if (!is.null(x$variab)) np <- printVariab(x)
if (!is.null(x$param)) np <- printParam(x$param)
printRest(np, x[c("loglikelihood", "AIC")])#
if (!is.null(x$boundary)) cat(x$boundary, "\n\n")
invisible(x)
}
print.RMmodelFit <- function(x, ...)
print.summary.RMmodelFit(summary.RMmodelFit(x, ...))#
print.RM_modelFit <- function(x, ...)
print.summary.RMmodelFit(summary.RM_modelFit(x, ...))#
setMethod(f="show", signature=CLASS_FIT,
definition=function(object) print.RMmodelFit(object))#
summary.RFfit <- function(object, ..., method="ml", full=FALSE) {
s <- summary.RMmodelFit(object[method])
len <- length(object@submodels)
if (full && length(object@submodels) > 0) {
submodels <- list()
for (i in 1:len) {
## war summary.RM_modelFit
submodels[[i]] <- summary(object@submodels[[i]][[method]],# 'summary'
isna.param=is.null(s$param)) # nicht
submodels[[i]]$report <- object@submodels[[i]]$report # spezifizieren!
submodels[[i]]$p.proj <- object@submodels[[i]]$p.proj
submodels[[i]]$fixed <- object@submodels[[i]]$fixed
}
s$submodels <- submodels
}
s
}
summary.RF_fit <- function(object, ..., method="ml", full=FALSE) {
s <- summary.RM_modelFit(object[[method]])
len <- length(object$submodels)
if (full && len > 0) {
submodels <- list()
for (i in 1:len) {
submodels[[i]] <- summary.RM_modelFit(object$submodels[[i]][[method]],
isna.param=is.null(s$param))
submodels[[i]]$report <- object$submodels[[i]]$report
submodels[[i]]$p.proj <- object$submodels[[i]]$p.proj
submodels[[i]]$fixed <- object$submodels[[i]]$fixed
}
s$submodels <- submodels
}
s
}
print.RFfit <- function(x, ..., method="ml", full=FALSE) {
print.summary.RMmodelFit(summary.RFfit(x, ..., method=method, full=full))
}
setMethod(f="show", signature='RFfit',
definition=function(object) print.RFfit(object))#
print.RF_fit <- function(x, ..., method="ml", full=FALSE) {
print.summary.RMmodelFit(summary.RF_fit(x, ..., method=method, full=full))
}
logLik.RF_fit <- function(object, REML = FALSE, ..., method="ml") {
if (hasArg("REML")) stop("parameter 'REML' is not used. Use 'method' instead")
## according to geoR
val <- object[[method]]$likelihood
attr(val, "df") <- object$number.of.parameters
attr(val, "method") <- method
class(val) <- "logLik"
return(val)
}
logLik.RFfit <- function(object, REML = FALSE, ..., method="ml") {
if (hasArg("REML")) stop("parameter 'REML' is not used. Use 'method' instead")
## according to geoR
val <- object[method]@likelihood
attr(val, "df") <- object@number.of.parameters
attr(val, "method") <- method
class(val) <- "logLik"
return(val)
}
print.AICRFfit<- function(x, ..., digits=3) {
## nur deshalb
fstcol <- 3
sndcol <- 55
trdcol <- 4
forthcol<-9
leer <- formatC("", width=fstcol)
size <- max(abs(x[[2]]))
size <- if (size>0) ceiling(log(size) / log(10)) else 1
cat(leer, formatC("model", flag="-", width=sndcol), " ",
formatC(names(x)[1], width=trdcol),
formatC(names(x)[2], width=forthcol), "\n", sep="")
names <- attr(x, "row.names")
for (i in 1:length(names)) {
cat(formatC(i, width=fstcol, flag="-"))
if (nchar(xx <- names[i]) <= sndcol)
cat(formatC(xx, width=sndcol, flag="-"))
else {
yy <- strsplit(xx, " \\* ")[[1]]
for (j in 1:length(yy)) {
ncyy <- nchar(yy[j])
if (ncyy <= sndcol && j==length(yy))
cat(format(yy[j], width=sndcol, flag="-"))
else {
if (ncyy <= sndcol - 2) {
cat(yy[j])
} else {
zz <- strsplit(yy[j], ", ")[[1]]
ncyy <- 0
lenzz <- length(zz)
for (k in 1:lenzz) {
len <- nchar(zz[k])
if (k > 1 && len > sndcol - 1) {
cat("\n", leer, zz[k], sep="")
if (k < lenzz)
cat(formatC(",", flag="-", width=pmax(1, sndcol-len)))
} else {
if (ncyy + len > sndcol - 1) {
cat("\n", leer, sep="")
ncyy <- len
} else {
ncyy <- ncyy + len
}
cat(zz[k])
if (k < lenzz) {
cat(", ")
ncyy <- ncyy + 2
}
}
} # for k 1:lenzz
} # split according to commata
if (j < length(yy)) cat(" *\n", leer, sep="")
else if (ncyy < sndcol) cat(formatC("", width=sndcol-ncyy))
}
} # for 1:products
} ## not be written in a single line
cat("",
formatC(x[[1]][i], width=trdcol),
formatC(x[[2]][i], format="f", width=size + digits + 1,
digits=digits),"\n")
}
}
fullAIC <- function(x, method="ml", AIC="AIC") {
ats <- approx_test_single(x, method=method)$result
values <- c("name", "df", AIC)
model2 <- paste("model2.", values, sep="")
ats2 <- ats[ !is.na(ats[, model2[2]]), model2]
colnames(ats2) <- values
if (ats2$df < 0) ats2 <- NULL
ats <- ats[, paste("model1.", values, sep="")]
colnames(ats) <- values
if (ats$df < 0) ats <- NULL
ats <- unique(rbind(ats, ats2))
dimnames(ats) <- list(1:nrow(ats), colnames(ats))
names <- as.character(ats$name)
ats <- ats[-1]
attr(ats, "row.names") <- names
class(ats) <- "AICRFfit"
ats
}
AIC.RFfit <- function(object, ..., k=2, method="ml", full=TRUE) {
if (full) {
fullAIC(object, method=method)
} else {
AIC <- object[method]@AIC
names(AIC) <- "AIC"
AIC
}
}
AIC.RF_fit <- function(object, ..., k=2, method="ml", full=TRUE) {
if (full) {
fullAIC(object, method=method)
} else {
AIC <- object[[method]]$AIC
names(AIC) <- "AIC"
AIC
}
}
AICc.RFfit <- function(object, ..., method="ml", full=FALSE) {
if (full) {
stop("for 'AICc' the option 'full=TRUE' has not been programmed yet.")
fullAIC(object, method=method)
} else {
AIC <- object[method]@AIC
names(AIC) <- "AICc"
AIC
}
}
AICc.RF_fit <- function(object, ..., method="ml", full=TRUE) {
if (full) {
stop("for 'AICc' the option 'full=TRUE' has not been programmed yet.")
fullAIC(object, method=method)
} else {
AIC <- object[[method]]$AIC
names(AIC) <- "AICc"
AIC
}
}
BIC.RFfit <- function(object, ..., method="ml", full=TRUE) {
if (full) {
fullAIC(object, method=method, AIC="BIC")
} else {
BIC <- object[method]@BIC
names(BIC) <- "BIC"
BIC
}
}
BIC.RF_fit <- function(object, ..., method="ml", full=TRUE) {
if (full) {
fullAIC(object, method=method, AIC="BIC")
} else {
BIC <- object[[method]]$BIC
names(BIC) <- "BIC"
BIC
}
}
resid.RFfit <- function(object, ..., method="ml") {
resid <- object[method]@residuals
names(resid) <- "residuals"
resid
}
resid.RF_fit <- function(object, ..., method="ml") {
resid <- object[[method]]$residuals
names(resid) <- "residuals"
resid
}
residuals.RFfit <- function(object, ..., method="ml")
resid.RFfit(object=object, method=method)
residuals.RF_fit <- function(object, ..., method="ml")
resid.RF_fit(object=object, method=method)
coef_RMmodelFit <- function(OP, object, ...) {
covariat <- do.call(OP, list(object, "covariat"))
glbl.var <- do.call(OP, list(object, "globalvariance"))
p <- do.call(OP, list(object, "param"))
if (length(covariat) > 0) covariat <- as.matrix(covariat)
nr_p <- nrow(p)
if (length(glbl.var) > 0)
glbl.var <- c(glbl.var, rep(NA, nr_p - length(glbl.var)))
p <- cbind(p, glbl.var,
if (length(covariat) > 0)
rbind(covariat, matrix(NA, ncol=ncol(covariat),
nrow= nr_p - nrow(covariat))))
#class(p) <- "coef.RMmodelFit"
p[1, ]
}
setMethod(f="coef", signature='RMmodelFit',
definition=function(object) coef_RMmodelFit("@", object))#
setMethod(f="coef", signature='RFfit',
definition=function(object) coef_RMmodelFit("@", object["ml"]))#
coef.RM_modelFit <- function(object, ...) coef_RMmodelFit("$", object, ...)
coef.RF_fit <- function(object, ...) coef_RMmodelFit("$", object["ml"], ...)
setMethod(f="plot", signature(x="RFfit", y="missing"),
function(x, y, ...) RFplotEmpVariogram(x, ...))
setMethod(f="persp", signature(x="RFfit"),
function(x, ...) RFplotEmpVariogram(x, ..., plotmethod="persp"))
contour.RFfit <- contour.RFempVariog <-
function(x,...) {
stopifnot(!( (is(x, "RFfit") && is.list(x@ev@centers))
|| (is(x, "RFempVariog") && is.list(x@centers))
))
RFplotEmpVariogram(x, ..., plotmethod="contour")
}
ExpliciteGauss <- function(model) {
if (model[[1]] != "RPgauss" && model[[1]] != "gauss.process") {
boxcox <- RFoptions()$gauss$boxcox
if (any(is.na(boxcox)) || any(boxcox[c(TRUE, FALSE)] != Inf))
return(list("RPgauss", boxcox=boxcox, model))
}
return(model)
}
RFfit <-
function(model, x, y=NULL, z=NULL, T=NULL, grid=NULL, data,
lower=NULL, upper=NULL,
methods, # "reml", "rml1"),
sub.methods,
## "internal" : name should not be changed; should always be last
## method!
optim.control=NULL,
users.guess=NULL,
distances=NULL, dim,
transform=NULL,
params=NULL,
##type = c("Gauss", "BrownResnick", "Smith", "Schlather",
## "Poisson"),
...)
{
.C(C_NoCurrentRegister)
RFoptOld <- internal.rfoptions(xyz=length(y)!=0,...,
internal.examples_reduced = FALSE,
RELAX=is(model, "formula"))
on.exit(RFoptions(LIST=RFoptOld[[1]]))
RFopt <- RFoptOld[[2]]
if (length(params) > 0) {
if ((!is.na(RFopt$fit$estimate_variance_globally) &&
RFopt$fit$estimate_variance_globally) &&
RFopt$basic$printlevel > 0)
message("Value of option 'hestimate_variance_globally' is ignored.")
RFopt$fit$estimate_variance_globally <- FALSE
RFoptions(fit.estimate_variance_globally = FALSE)
}
fit <- RFopt$fit
if (RFopt$general$vdim_close_together)
stop("'vdim_close_together' must be FALSE")
if (is.data.frame(data)) {
name <- "RFfit.user.dataset"
do.call("attach", list(what=data, name=name))
on.exit(detach(name, character.only = TRUE), add=TRUE)
}
## in UnifyData the further.models that contain only the parameter data
## are turned into genuine models
further.models <- list()
models <- c("lower", "upper", "users.guess", "parscale")
if (paramlist <- length(params) > 0) {
parscale <- optim.control$parscale
for (m in models) {
fm <- get(m)
if (!is.null(fm) && !is.numeric(fm))
further.models[[m]] <- PrepareModel2(fm, ...)
}
}
## Print(further.models, model)
Z <- UnifyData(model=model, x=x, y=y, z=z, T=T, grid=grid,
data=data, distances=distances, dim=dim,
RFopt=RFopt,
mindist_pts = RFopt$fit$smalldataset / 2,
further.models = further.models, params=params, ...)
## Print(Z); kkk
Z <- BigDataSplit(Z, RFopt)
if (!hasArg("transform")) transform <- NULL
if (paramlist) {
for (m in models)
if (!is.null(get(m)) && !is.numeric(get(m)))
assign(m, Z$further.models[[m]])
optim.control$parscale <- parscale
if (!is.null(Z$transform)) {
if (!is.null(transform))
stop("argument 'transform' may not be given if 'params' is given")
transform <- Z$transform
}
} else {
parscale <- optim.control$parscale
for (m in models)
if (!is.null(get(m)) && !is.numeric(get(m)))
assign(m, ReplaceC(PrepareModel2(get(m), ...)))
optim.control$parscale <- parscale
}
new.model <- Z$model
if (new.model[[1]] %in% c("RPpoisson", "poisson")) {
res <- fit.poisson()
} else if (new.model[[1]] %in% c("BRmixed", "BRshifted", "BRmixedIntern",
"RFbrownresnick")) {
res <- fit.br()
} else if (new.model[[1]] %in% c("RPschlather", "extremalgauss")) {
res <- fit.extremal.gauss()
} else if (new.model[[1]] %in% c("RPsmith", "smith")) {
res <- fit.smith()
} else if (new.model[[1]] %in% c("RPbernoulli", "binaryprocess")) {
res <- fit.bernoulli()
} else {
Z$model <- ExpliciteGauss(ReplaceC(Z$model))
res <- do.call("rffit.gauss",
c(list(Z, lower=lower, upper=upper, users.guess=users.guess,
optim.control=optim.control,
transform=transform,
recall = FALSE),
if (!missing(methods)) list(mle.methods = methods),
if (!missing(sub.methods)) list(lsq.methods=sub.methods)
## "internal" : name should not be changed; should always
## be last method!
))
}
if (RFopt$general$returncall)
attr(res, "call") <- as.character(deparse(match.call()))
attr(res, "coord_system") <- .Call(C_GetCoordSystem,
as.integer(MODEL_MLE),
RFopt$coords$coord_system,
RFopt$coords$new_coord_system)
return(res)
}
|
setwd("C:/Users/admin/Desktop/data")
data <- read.csv("flats.csv", sep=";")
# library(foreign)
# data_flats <- read.dta(file.choose())
data$price_metr <- data$price / data$totsp
data$livesp_walk <- data$walk * data$livesp
data$kitsp_walk <- data$walk * data$kitsp
data$dist_walk <- data$walk * data$dist
data$metrdist_walk <- data$walk * data$metrdist
data$floor_walk <- data$walk * data$floor
data$floors_walk <- data$walk * data$floors
reg <- lm(price_metr ~ 1 + livesp + kitsp + dist + metrdist + floors + walk + livesp_walk + kitsp_walk + dist_walk + metrdist_walk + floors_walk, data=data)
install.packages("car")
library(car)
r0 = "walk = 0"
r1 = "livesp_walk = 0"
r2 = "kitsp_walk = 0"
r3 = "dist_walk = 0"
r4 = "metrdist_walk = 0"
r5 = "floors_walk = 0"
linearHypothesis(reg, c(r0, r1, r2, r3, r4, r5), verbose=TRUE)
coefs <- names(coef(reg))
walk_coefs <- coefs[grep("walk", coefs)]
linearHypothesis(reg, walk_coefs)
reg0 <- lm(price_metr ~ 1 + livesp + kitsp + dist + metrdist + floors, data=data)
reg1 <- lm(price_metr ~ 1 + livesp + kitsp + dist + metrdist + floors, data=data, subset=(walk==1))
reg2 <- lm(price_metr ~ 1 + livesp + kitsp + dist + metrdist + floors, data=data, subset=(walk==0))
RSS <- NULL
RSS$r <- sum(reg0$residuals^2)
RSS$ur1 <- sum(reg1$residuals^2)
RSS$ur2 <- sum(reg2$residuals^2)
k <- reg0$rank
numerator <- (RSS$r - (RSS$ur1 + RSS$ur2))/k
denominator <- (RSS$ur1 + RSS$ur2) / (length(reg0$residuals) - 2*k)
chow <- numerator / denominator
|
/listings/ex-7.4.R
|
no_license
|
Fifis/ekonometrika-bakalavr
|
R
| false | false | 1,480 |
r
|
setwd("C:/Users/admin/Desktop/data")
data <- read.csv("flats.csv", sep=";")
# library(foreign)
# data_flats <- read.dta(file.choose())
data$price_metr <- data$price / data$totsp
data$livesp_walk <- data$walk * data$livesp
data$kitsp_walk <- data$walk * data$kitsp
data$dist_walk <- data$walk * data$dist
data$metrdist_walk <- data$walk * data$metrdist
data$floor_walk <- data$walk * data$floor
data$floors_walk <- data$walk * data$floors
reg <- lm(price_metr ~ 1 + livesp + kitsp + dist + metrdist + floors + walk + livesp_walk + kitsp_walk + dist_walk + metrdist_walk + floors_walk, data=data)
install.packages("car")
library(car)
r0 = "walk = 0"
r1 = "livesp_walk = 0"
r2 = "kitsp_walk = 0"
r3 = "dist_walk = 0"
r4 = "metrdist_walk = 0"
r5 = "floors_walk = 0"
linearHypothesis(reg, c(r0, r1, r2, r3, r4, r5), verbose=TRUE)
coefs <- names(coef(reg))
walk_coefs <- coefs[grep("walk", coefs)]
linearHypothesis(reg, walk_coefs)
reg0 <- lm(price_metr ~ 1 + livesp + kitsp + dist + metrdist + floors, data=data)
reg1 <- lm(price_metr ~ 1 + livesp + kitsp + dist + metrdist + floors, data=data, subset=(walk==1))
reg2 <- lm(price_metr ~ 1 + livesp + kitsp + dist + metrdist + floors, data=data, subset=(walk==0))
RSS <- NULL
RSS$r <- sum(reg0$residuals^2)
RSS$ur1 <- sum(reg1$residuals^2)
RSS$ur2 <- sum(reg2$residuals^2)
k <- reg0$rank
numerator <- (RSS$r - (RSS$ur1 + RSS$ur2))/k
denominator <- (RSS$ur1 + RSS$ur2) / (length(reg0$residuals) - 2*k)
chow <- numerator / denominator
|
## Getting dataset
data <- read.csv("./household_power_consumption.txt",
header=T,
sep=';',
na.strings="?",
nrows=2075259,
check.names=F,
stringsAsFactors=F,
comment.char="",
quote='\"')
data$Date <- as.Date(data$Date, format="%d/%m/%Y")
## Subsetting the data
data_sub <- subset(data, subset=(Date >= "2007-02-01" & Date <= "2007-02-02"))
rm(data)
## Converting dates using POSIXct() and as.Date()
datetime <- paste(as.Date(data_sub$Date), data_sub$Time)
data_sub$Datetime <- as.POSIXct(datetime)
## Plot2.R
plot(data_sub$Global_active_power~data_sub$Datetime, type="l", ylab="Global Active Power (kilowatts)", xlab="")
dev.copy(png, file="plot2.png", height=480, width=480)
dev.off()
|
/plot2.R
|
no_license
|
dingdata/ExData_Plotting1
|
R
| false | false | 835 |
r
|
## Getting dataset
data <- read.csv("./household_power_consumption.txt",
header=T,
sep=';',
na.strings="?",
nrows=2075259,
check.names=F,
stringsAsFactors=F,
comment.char="",
quote='\"')
data$Date <- as.Date(data$Date, format="%d/%m/%Y")
## Subsetting the data
data_sub <- subset(data, subset=(Date >= "2007-02-01" & Date <= "2007-02-02"))
rm(data)
## Converting dates using POSIXct() and as.Date()
datetime <- paste(as.Date(data_sub$Date), data_sub$Time)
data_sub$Datetime <- as.POSIXct(datetime)
## Plot2.R
plot(data_sub$Global_active_power~data_sub$Datetime, type="l", ylab="Global Active Power (kilowatts)", xlab="")
dev.copy(png, file="plot2.png", height=480, width=480)
dev.off()
|
#' Fit a random forest model
#'
#' @param formula A formula of the form \code{groups ~ x1 + x2 + ...}
#' That is, the response is the grouping factor and the right hand side
#' specifies the (non-factor) discriminators, and any transformations, interactions,
#' or other non-additive operators apart from \code{.} will be ignored.
#' @param data A \code{\link{data.frame}} from which variables specified
#' in formula are preferentially to be taken.
#' @param subset An optional vector specifying a subset of observations to be
#' used in the fitting process, or the name of a variable in \code{data}. It
#' may not be an expression.
#' @param weights An optional vector of sampling weights, or the
#' name of a variable in \code{data}. It may not be an expression.
#' @param output One of \code{"Importance"}, \code{"Prediction-Accuracy Table"} or \code{"Detail"}.
#' @param missing How missing data is to be treated. Options:
#' \code{"Error if missing data"},
#' \code{"Exclude cases with missing data"}, or
#' \code{"Imputation (replace missing values with estimates)"}.
#' @param seed The random number seed.
#' @param show.labels Shows the variable labels, as opposed to the labels, in the outputs, where a
#' variables label is an attribute (e.g., attr(foo, "label")).
#' @param sort.by.importance Sort the last column of the importance table
#' in descending order.
#' @param ... Other arguments to be supplied to \code{\link{randomForest}}.
#' @importFrom stats pnorm
#' @importFrom randomForest randomForest
#' @export
RandomForest <- function(formula,
data = NULL,
subset = NULL,
weights = NULL,
output = "Importance",
missing = "Exclude cases with missing data",
seed = 12321,
show.labels = FALSE,
sort.by.importance = TRUE,
...)
{
####################################################################
##### Error checking specific to this function ######
####################################################################
# prepareMachineLearningData called with strict.var.names
####################################################################
##### Reading in the data and doing some basic tidying ######
####################################################################
# Identify whether subset and weights are variables in the environment or in data.
subset.description <- try(deparse(substitute(subset)), silent = TRUE)
subset <- eval(substitute(subset), data, parent.frame())
weights.description <- try(deparse(substitute(weights)), silent = TRUE)
weights <- eval(substitute(weights), data, parent.frame())
prepared.data <- prepareMachineLearningData(formula, data, subset, subset.description,
weights, weights.description, missing, seed,
strict.var.names = TRUE)
unweighted.training.data <- prepared.data$unweighted.training.data
weighted.training.data <- prepared.data$weighted.training.data
####################################################################
##### Fitting the model. Ideally, this should be a call to #####
##### another function, with the output of that function #####
##### called 'original'. #####
####################################################################
set.seed(seed)
result <- list(original = suppressWarnings(randomForest(prepared.data$input.formula,
importance = TRUE,
data = weighted.training.data
, ...)))
####################################################################
##### Saving direct input and model-specific parameters #####
####################################################################
result$original$call <- match.call()
#result$original.subset <- CleanSubset(subset, nrow(data))
result$output <- output
result$missing <- missing
result$sort.by.importance <- sort.by.importance
result$z.statistics <- result$original$importance[, 1:(ncol(result$original$importance) - 1)] / result$original$importanceSD
result$p.values <- 2 * (1 - pnorm(abs(result$z.statistics)))
class(result) <- c("RandomForest", class(result))
####################################################################
##### Saving processed information #####
####################################################################
result <- saveMachineLearningResults(result, prepared.data, show.labels)
if (result$show.labels)
{
if (result$numeric.outcome)
names(result$original$importanceSD) <- result$variable.labels
else
rownames(result$original$importanceSD) <- result$variable.labels
}
attr(result, "ChartData") <- prepareRFChartData(result)
result
}
prepareRFChartData <- function(x)
{
if (x$output == "Importance")
{
output.data <- x$original$importance
if (x$show.labels)
rownames(output.data) <- x$variable.labels
return(output.data)
} else if (x$output == "Prediction-Accuracy Table")
return(ExtractChartData(x$confusion))
else
return(as.matrix(capture.output(print(x$original))))
}
#' @import randomForest
#' @importFrom flipFormat RandomForestTable FormatAsReal RandomForestTable ExtractCommonPrefix
#' @export
print.RandomForest <- function(x, ...)
{
if (x$show.labels)
rownames(x$original$importance) <- x$variable.labels
if (x$output == "Importance")
{
title <- paste0("Random Forest: ", x$outcome.label)
imp <- x$original$importance
extracted <- ExtractCommonPrefix(rownames(imp))
if (!is.na(extracted$common.prefix))
{
title <- paste0(title, " by ", extracted$common.prefix)
rownames(imp) <- extracted$shortened.labels
}
subtitle <- if (x$numeric.outcome)
paste("R-squared:", FormatAsReal(x$original$rsq[length(x$original$rsq)], decimals = 3))
else
{
err <- x$original$err.rate
accuracies <- 1 - err[nrow(err), ]
k <- length(accuracies)
correctPredictionsText(accuracies[1], colnames(err)[2:k], accuracies[2:k], out.of.bag = TRUE)
}
tbl <- RandomForestTable(imp,
x$z.statistics,
x$p.values,
x$sort.by.importance,
title = title,
subtitle = subtitle,
footer = x$sample.description)
print(tbl)
}
else if (x$output == "Prediction-Accuracy Table")
{
print(x$confusion)
}
else
{
x$original$call <- x$formula
print(x$original)
invisible(x)
}
}
|
/R/randomforest.R
|
no_license
|
daniellegrogan/flipMultivariates
|
R
| false | false | 7,293 |
r
|
#' Fit a random forest model
#'
#' @param formula A formula of the form \code{groups ~ x1 + x2 + ...}
#' That is, the response is the grouping factor and the right hand side
#' specifies the (non-factor) discriminators, and any transformations, interactions,
#' or other non-additive operators apart from \code{.} will be ignored.
#' @param data A \code{\link{data.frame}} from which variables specified
#' in formula are preferentially to be taken.
#' @param subset An optional vector specifying a subset of observations to be
#' used in the fitting process, or the name of a variable in \code{data}. It
#' may not be an expression.
#' @param weights An optional vector of sampling weights, or the
#' name of a variable in \code{data}. It may not be an expression.
#' @param output One of \code{"Importance"}, \code{"Prediction-Accuracy Table"} or \code{"Detail"}.
#' @param missing How missing data is to be treated. Options:
#' \code{"Error if missing data"},
#' \code{"Exclude cases with missing data"}, or
#' \code{"Imputation (replace missing values with estimates)"}.
#' @param seed The random number seed.
#' @param show.labels Shows the variable labels, as opposed to the labels, in the outputs, where a
#' variables label is an attribute (e.g., attr(foo, "label")).
#' @param sort.by.importance Sort the last column of the importance table
#' in descending order.
#' @param ... Other arguments to be supplied to \code{\link{randomForest}}.
#' @importFrom stats pnorm
#' @importFrom randomForest randomForest
#' @export
RandomForest <- function(formula,
data = NULL,
subset = NULL,
weights = NULL,
output = "Importance",
missing = "Exclude cases with missing data",
seed = 12321,
show.labels = FALSE,
sort.by.importance = TRUE,
...)
{
####################################################################
##### Error checking specific to this function ######
####################################################################
# prepareMachineLearningData called with strict.var.names
####################################################################
##### Reading in the data and doing some basic tidying ######
####################################################################
# Identify whether subset and weights are variables in the environment or in data.
subset.description <- try(deparse(substitute(subset)), silent = TRUE)
subset <- eval(substitute(subset), data, parent.frame())
weights.description <- try(deparse(substitute(weights)), silent = TRUE)
weights <- eval(substitute(weights), data, parent.frame())
prepared.data <- prepareMachineLearningData(formula, data, subset, subset.description,
weights, weights.description, missing, seed,
strict.var.names = TRUE)
unweighted.training.data <- prepared.data$unweighted.training.data
weighted.training.data <- prepared.data$weighted.training.data
####################################################################
##### Fitting the model. Ideally, this should be a call to #####
##### another function, with the output of that function #####
##### called 'original'. #####
####################################################################
set.seed(seed)
result <- list(original = suppressWarnings(randomForest(prepared.data$input.formula,
importance = TRUE,
data = weighted.training.data
, ...)))
####################################################################
##### Saving direct input and model-specific parameters #####
####################################################################
result$original$call <- match.call()
#result$original.subset <- CleanSubset(subset, nrow(data))
result$output <- output
result$missing <- missing
result$sort.by.importance <- sort.by.importance
result$z.statistics <- result$original$importance[, 1:(ncol(result$original$importance) - 1)] / result$original$importanceSD
result$p.values <- 2 * (1 - pnorm(abs(result$z.statistics)))
class(result) <- c("RandomForest", class(result))
####################################################################
##### Saving processed information #####
####################################################################
result <- saveMachineLearningResults(result, prepared.data, show.labels)
if (result$show.labels)
{
if (result$numeric.outcome)
names(result$original$importanceSD) <- result$variable.labels
else
rownames(result$original$importanceSD) <- result$variable.labels
}
attr(result, "ChartData") <- prepareRFChartData(result)
result
}
prepareRFChartData <- function(x)
{
if (x$output == "Importance")
{
output.data <- x$original$importance
if (x$show.labels)
rownames(output.data) <- x$variable.labels
return(output.data)
} else if (x$output == "Prediction-Accuracy Table")
return(ExtractChartData(x$confusion))
else
return(as.matrix(capture.output(print(x$original))))
}
#' @import randomForest
#' @importFrom flipFormat RandomForestTable FormatAsReal RandomForestTable ExtractCommonPrefix
#' @export
print.RandomForest <- function(x, ...)
{
if (x$show.labels)
rownames(x$original$importance) <- x$variable.labels
if (x$output == "Importance")
{
title <- paste0("Random Forest: ", x$outcome.label)
imp <- x$original$importance
extracted <- ExtractCommonPrefix(rownames(imp))
if (!is.na(extracted$common.prefix))
{
title <- paste0(title, " by ", extracted$common.prefix)
rownames(imp) <- extracted$shortened.labels
}
subtitle <- if (x$numeric.outcome)
paste("R-squared:", FormatAsReal(x$original$rsq[length(x$original$rsq)], decimals = 3))
else
{
err <- x$original$err.rate
accuracies <- 1 - err[nrow(err), ]
k <- length(accuracies)
correctPredictionsText(accuracies[1], colnames(err)[2:k], accuracies[2:k], out.of.bag = TRUE)
}
tbl <- RandomForestTable(imp,
x$z.statistics,
x$p.values,
x$sort.by.importance,
title = title,
subtitle = subtitle,
footer = x$sample.description)
print(tbl)
}
else if (x$output == "Prediction-Accuracy Table")
{
print(x$confusion)
}
else
{
x$original$call <- x$formula
print(x$original)
invisible(x)
}
}
|
beets <- c(41, 40, 41, 42, 44, 35, 41, 36, 47, 45)
no_beets <- c(51, 51, 50, 42, 40, 31, 43, 45)
c(xbar1=mean(beets), xbar2=mean(no_beets),
sd1=sd(beets), sd2=sd(no_beets))
#######################################################################
library("aplpack")
layout(1)
stem.leaf.backback(beets, no_beets, rule.line="Sturges")
boxplot(no_beets,beets,names=c("no beets", "beets"),horizontal = TRUE)
#######################################################################
require(stats);
require(graphics)
michelson <- transform(morley,
Expt = factor(Expt), Run = factor(Run))
xtabs(~ Expt + Run, data = michelson) # 5 x 20 balanced (two-way)
plot(Speed ~ Expt, data = michelson,
main = "Speed of Light Data", xlab = "Experiment No.")
fm <- aov(Speed ~ Run + Expt, data = michelson)
summary(fm)
fm0 <- update(fm, . ~ . - Run)
anova(fm0, fm)
#####################################################################
# ggplot2 examples
library(ggplot2)
# create factors with value labels
mtcars$gear <- factor(mtcars$gear,levels=c(3,4,5),
labels=c("3gears","4gears","5gears"))
mtcars$am <- factor(mtcars$am,levels=c(0,1),
labels=c("Automatic","Manual"))
mtcars$cyl <- factor(mtcars$cyl,levels=c(4,6,8),
labels=c("4cyl","6cyl","8cyl"))
# Kernel density plots for mpg
# grouped by number of gears (indicated by color)
qplot(mpg, data=mtcars, geom="density", fill=gear, alpha=I(.5),
main="Distribution of Gas Milage", xlab="Miles Per Gallon",
ylab="Density")
# Scatterplot of mpg vs. hp for each combination of gears and cylinders
# in each facet, transmittion type is represented by shape and color
qplot(hp, mpg, data=mtcars, shape=am, color=am,
facets=gear~cyl, size=I(3),
xlab="Horsepower", ylab="Miles per Gallon")
# Separate regressions of mpg on weight for each number of cylinders
qplot(wt, mpg, data=mtcars, geom=c("point", "smooth"),
color=cyl,
main="Regression of MPG on Weight",
xlab="Weight", ylab="Miles per Gallon")
# Boxplots of mpg by number of gears
# observations (points) are overlayed and jittered
qplot(gear, mpg, data=mtcars, geom=c("boxplot", "jitter"),
fill=gear, main="Mileage by Gear Number",
xlab="", ylab="Miles per Gallon")
x<-scan("faithful.txt" ,
what = list(eruptions="", waiting=""),sep = ",");
data<-as.data.frame(x)
|
/BaoCao/R Script/chapter3.R
|
no_license
|
thuyltm/predictUsingProbability
|
R
| false | false | 2,426 |
r
|
beets <- c(41, 40, 41, 42, 44, 35, 41, 36, 47, 45)
no_beets <- c(51, 51, 50, 42, 40, 31, 43, 45)
c(xbar1=mean(beets), xbar2=mean(no_beets),
sd1=sd(beets), sd2=sd(no_beets))
#######################################################################
library("aplpack")
layout(1)
stem.leaf.backback(beets, no_beets, rule.line="Sturges")
boxplot(no_beets,beets,names=c("no beets", "beets"),horizontal = TRUE)
#######################################################################
require(stats);
require(graphics)
michelson <- transform(morley,
Expt = factor(Expt), Run = factor(Run))
xtabs(~ Expt + Run, data = michelson) # 5 x 20 balanced (two-way)
plot(Speed ~ Expt, data = michelson,
main = "Speed of Light Data", xlab = "Experiment No.")
fm <- aov(Speed ~ Run + Expt, data = michelson)
summary(fm)
fm0 <- update(fm, . ~ . - Run)
anova(fm0, fm)
#####################################################################
# ggplot2 examples
library(ggplot2)
# create factors with value labels
mtcars$gear <- factor(mtcars$gear,levels=c(3,4,5),
labels=c("3gears","4gears","5gears"))
mtcars$am <- factor(mtcars$am,levels=c(0,1),
labels=c("Automatic","Manual"))
mtcars$cyl <- factor(mtcars$cyl,levels=c(4,6,8),
labels=c("4cyl","6cyl","8cyl"))
# Kernel density plots for mpg
# grouped by number of gears (indicated by color)
qplot(mpg, data=mtcars, geom="density", fill=gear, alpha=I(.5),
main="Distribution of Gas Milage", xlab="Miles Per Gallon",
ylab="Density")
# Scatterplot of mpg vs. hp for each combination of gears and cylinders
# in each facet, transmittion type is represented by shape and color
qplot(hp, mpg, data=mtcars, shape=am, color=am,
facets=gear~cyl, size=I(3),
xlab="Horsepower", ylab="Miles per Gallon")
# Separate regressions of mpg on weight for each number of cylinders
qplot(wt, mpg, data=mtcars, geom=c("point", "smooth"),
color=cyl,
main="Regression of MPG on Weight",
xlab="Weight", ylab="Miles per Gallon")
# Boxplots of mpg by number of gears
# observations (points) are overlayed and jittered
qplot(gear, mpg, data=mtcars, geom=c("boxplot", "jitter"),
fill=gear, main="Mileage by Gear Number",
xlab="", ylab="Miles per Gallon")
x<-scan("faithful.txt" ,
what = list(eruptions="", waiting=""),sep = ",");
data<-as.data.frame(x)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/map2.r, R/map_by_chunk_id.r
\name{cmap2}
\alias{cmap2}
\alias{map_by_chunk_id}
\title{`cmap2` a function to two disk.frames}
\usage{
cmap2(.x, .y, .f, ...)
map_by_chunk_id(.x, .y, .f, ..., outdir)
}
\arguments{
\item{.x}{a disk.frame}
\item{.y}{a disk.frame}
\item{.f}{a function to be called on each chunk of x and y matched by
chunk_id}
\item{...}{not used}
\item{outdir}{output directory}
}
\description{
Perform a function on both disk.frames .x and .y, each chunk of .x and .y
gets run by .f(x.chunk, y.chunk)
}
\examples{
cars.df = as.disk.frame(cars)
cars2.df = cmap2(cars.df, cars.df, ~data.table::rbindlist(list(.x, .y)))
collect(cars2.df)
# clean up cars.df
delete(cars.df)
delete(cars2.df)
}
|
/man/cmap2.Rd
|
no_license
|
cran/disk.frame
|
R
| false | true | 825 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/map2.r, R/map_by_chunk_id.r
\name{cmap2}
\alias{cmap2}
\alias{map_by_chunk_id}
\title{`cmap2` a function to two disk.frames}
\usage{
cmap2(.x, .y, .f, ...)
map_by_chunk_id(.x, .y, .f, ..., outdir)
}
\arguments{
\item{.x}{a disk.frame}
\item{.y}{a disk.frame}
\item{.f}{a function to be called on each chunk of x and y matched by
chunk_id}
\item{...}{not used}
\item{outdir}{output directory}
}
\description{
Perform a function on both disk.frames .x and .y, each chunk of .x and .y
gets run by .f(x.chunk, y.chunk)
}
\examples{
cars.df = as.disk.frame(cars)
cars2.df = cmap2(cars.df, cars.df, ~data.table::rbindlist(list(.x, .y)))
collect(cars2.df)
# clean up cars.df
delete(cars.df)
delete(cars2.df)
}
|
##Weak instrument testing and MVMR analysis of effect of metabolites on AMD.
#We use data of the effect sizes of each SNP on the 118 metabolites combined with the standard error of those SNP exposure associations
#(extracted from the GWAS results avaliable at http://www.computationalmedicine.fi/data#NMR_GWAS).(1) We also use data on the SNP associations
#with age related macular degeneration (AMD) from Fritsche et al 2016 (2).
rm(list = ls(all=TRUE))
#functions defined for this analysis
library(remotes)
#install_github("WSpiller/MRChallenge2019")
library(data.table)
library(knitr)
library(tidyr)
library(dplyr)
library(devtools)
library(readxl)
library(MRChallenge2019)
source("app_functions.R")
dat <- Challenge_dat
dat_se <- data.frame(read.csv("data_incse.txt"))
NMRAdat <- NMRA_dat
names <- NMRAdat$Abbreviation
colnames(dat_se) <- gsub("_", ".", colnames(dat_se))
ids <- as.vector(dat_se$rsid)
row.names(dat_se) <- ids
dat_se <- dat_se[,2:(length(names)+1)]
names <- c("ldl", "hdl", "tg", names)
exp <- subset(dat, select=c(1,9,12,15,18,32:149))
pvals <- subset(dat, select=c(11,14,17,150:267))
colnames(exp) <- sub("beta_","",colnames(exp))
names(exp)[names(exp) == 'acAce'] <- 'AcAce'
colnames(pvals) <- sub("p_","",colnames(pvals))
ids <- exp$rsid
row.names(exp) <- ids
row.names(pvals) <- ids
dat_se <- data.frame(dat$se_amd, dat$se_ldl, dat$se_hdl, dat$se_tg, dat_se)
colnames(dat_se) <- gsub("dat.se_", "", colnames(dat_se))
Fstat <- data.frame()
for(x in 1:length(names)){
for(y in 1:length(ids)){
Fstat[ids[y],names[x]] <- (exp[ids[y],names[x]]/dat_se[ids[y],names[x]])^2
}
}
#import and sort out correlations (NB - correltations are calculated from ALSPAC data and therefore not currently publicly avaliable)
correlations <- read_excel("correlations.xlsx")
correlations <- data.frame(correlations)
row.names(correlations) <- correlations[,1]
correlations[,1] <- NULL
#calculate the exposures with the most SNPs with an F>5 then keep all snps with individual F>5 for at least one of those exposures.
F.ind <- Fstrong(names[4:length(names)])
F.ind <- F.ind[order(-F.ind$no.snps),]
topexp <- row.names(F.ind[1:13,])
F.MR <- data.frame(Fstat[,topexp])
ex.MR <- data.frame(exp[,topexp])
maxF_row <- apply(F.MR,1,function(x) max(as.numeric(x)))
keep <- as.vector(as.numeric(maxF_row > 5))
ex.MR <- ex.MR[,1:length(topexp)]*keep
ex.MR[ex.MR == 0] <- NA
##MR for the final set of exposures
subexp <- c("XS.VLDL.P", "S.VLDL.PL", "L.LDL.L", "IDL.TG")
subexp_se <- c("XS.VLDL.P_se", "S.VLDL.PL_se", "L.LDL.L_se", "IDL.TG_se")
subexp_f <- c("XS.VLDL.P_f", "S.VLDL.PL_f", "L.LDL.L_f", "IDL.TG_f")
F.MR <- data.frame(Fstat[,subexp])
ex.MR <- data.frame(exp[,subexp])
maxF_row <- apply(F.MR,1,function(x) max(as.numeric(x)))
keep <- as.vector(as.numeric(maxF_row > 5))
ex.MR <- ex.MR[,1:length(subexp)]*keep
ex.MR[ex.MR == 0] <- NA
MR.subset <- summary(lm(dat$beta_amd~ -1 + ., data = ex.MR, weights = (dat$se_amd)^-2))$coefficients
conditionalF(subexp)
Fstrong(subexp)
kx <- length(subexp)
analysis.dat_all <- data.frame(exp[,c("amd",subexp)])
analysis.dat_all <- data.frame(cbind(analysis.dat_all, data.frame(dat_se[,c("amd",subexp)]), data.frame(Fstat[,c(subexp)])))
names(analysis.dat_all) <- c("amd",subexp, "amd_se", subexp_se, subexp_f)
F.analysis <- analysis.dat_all[,c(subexp_f)]
maxF_row <- apply(F.analysis,1,function(x) max(as.numeric(x)))
keep <- as.vector(as.numeric(maxF_row > 5))
analysis.dat_all <- analysis.dat_all[,1:length(c("amd",subexp, "amd_se", subexp_se, subexp_f))]*keep
analysis.dat_all[analysis.dat_all==0] <-NA
analysis.dat_all <- na.omit(analysis.dat_all)
analysis.dat <- analysis.dat_all
#MR.results <- MRfunction_jk(subexp)
#results <- MR.results
#analysis with varying correlations
maincorrelations <- correlations
corr <- correlations[c(subexp), c(subexp)]
#s <- "0"
#var.corr <- cbind(s, MR.results)
correlations <- corr + 0.75*(1 - corr)
results_up <- MRfunction_jk(subexp)
for(s in 1:68){
analysis.dat <- analysis.dat_all
analysis.dat[s,] <- NA
analysis.dat <- na.omit(analysis.dat)
temp <- MRfunction_jk(subexp)
results_up <- rbind(results_up, temp)
}
correlations <- 0.75*corr + diag(c(0.25,0.25,0.25,0.25), ncol = 4)
results_down <- MRfunction_jk(subexp)
for(s in 1:68){
analysis.dat <- analysis.dat_all
analysis.dat[s,] <- NA
analysis.dat <- na.omit(analysis.dat)
temp <- MRfunction_jk(subexp)
results_down <- rbind(results_down, temp)
}
save(results_up, file = "highcorr.Rda")
save(results_down, file = "lowcorr.Rda")
|
/app_varcov.R
|
no_license
|
eleanorsanderson/MVMRweakinstruments
|
R
| false | false | 4,563 |
r
|
##Weak instrument testing and MVMR analysis of effect of metabolites on AMD.
#We use data of the effect sizes of each SNP on the 118 metabolites combined with the standard error of those SNP exposure associations
#(extracted from the GWAS results avaliable at http://www.computationalmedicine.fi/data#NMR_GWAS).(1) We also use data on the SNP associations
#with age related macular degeneration (AMD) from Fritsche et al 2016 (2).
rm(list = ls(all=TRUE))
#functions defined for this analysis
library(remotes)
#install_github("WSpiller/MRChallenge2019")
library(data.table)
library(knitr)
library(tidyr)
library(dplyr)
library(devtools)
library(readxl)
library(MRChallenge2019)
source("app_functions.R")
dat <- Challenge_dat
dat_se <- data.frame(read.csv("data_incse.txt"))
NMRAdat <- NMRA_dat
names <- NMRAdat$Abbreviation
colnames(dat_se) <- gsub("_", ".", colnames(dat_se))
ids <- as.vector(dat_se$rsid)
row.names(dat_se) <- ids
dat_se <- dat_se[,2:(length(names)+1)]
names <- c("ldl", "hdl", "tg", names)
exp <- subset(dat, select=c(1,9,12,15,18,32:149))
pvals <- subset(dat, select=c(11,14,17,150:267))
colnames(exp) <- sub("beta_","",colnames(exp))
names(exp)[names(exp) == 'acAce'] <- 'AcAce'
colnames(pvals) <- sub("p_","",colnames(pvals))
ids <- exp$rsid
row.names(exp) <- ids
row.names(pvals) <- ids
dat_se <- data.frame(dat$se_amd, dat$se_ldl, dat$se_hdl, dat$se_tg, dat_se)
colnames(dat_se) <- gsub("dat.se_", "", colnames(dat_se))
Fstat <- data.frame()
for(x in 1:length(names)){
for(y in 1:length(ids)){
Fstat[ids[y],names[x]] <- (exp[ids[y],names[x]]/dat_se[ids[y],names[x]])^2
}
}
#import and sort out correlations (NB - correltations are calculated from ALSPAC data and therefore not currently publicly avaliable)
correlations <- read_excel("correlations.xlsx")
correlations <- data.frame(correlations)
row.names(correlations) <- correlations[,1]
correlations[,1] <- NULL
#calculate the exposures with the most SNPs with an F>5 then keep all snps with individual F>5 for at least one of those exposures.
F.ind <- Fstrong(names[4:length(names)])
F.ind <- F.ind[order(-F.ind$no.snps),]
topexp <- row.names(F.ind[1:13,])
F.MR <- data.frame(Fstat[,topexp])
ex.MR <- data.frame(exp[,topexp])
maxF_row <- apply(F.MR,1,function(x) max(as.numeric(x)))
keep <- as.vector(as.numeric(maxF_row > 5))
ex.MR <- ex.MR[,1:length(topexp)]*keep
ex.MR[ex.MR == 0] <- NA
##MR for the final set of exposures
subexp <- c("XS.VLDL.P", "S.VLDL.PL", "L.LDL.L", "IDL.TG")
subexp_se <- c("XS.VLDL.P_se", "S.VLDL.PL_se", "L.LDL.L_se", "IDL.TG_se")
subexp_f <- c("XS.VLDL.P_f", "S.VLDL.PL_f", "L.LDL.L_f", "IDL.TG_f")
F.MR <- data.frame(Fstat[,subexp])
ex.MR <- data.frame(exp[,subexp])
maxF_row <- apply(F.MR,1,function(x) max(as.numeric(x)))
keep <- as.vector(as.numeric(maxF_row > 5))
ex.MR <- ex.MR[,1:length(subexp)]*keep
ex.MR[ex.MR == 0] <- NA
MR.subset <- summary(lm(dat$beta_amd~ -1 + ., data = ex.MR, weights = (dat$se_amd)^-2))$coefficients
conditionalF(subexp)
Fstrong(subexp)
kx <- length(subexp)
analysis.dat_all <- data.frame(exp[,c("amd",subexp)])
analysis.dat_all <- data.frame(cbind(analysis.dat_all, data.frame(dat_se[,c("amd",subexp)]), data.frame(Fstat[,c(subexp)])))
names(analysis.dat_all) <- c("amd",subexp, "amd_se", subexp_se, subexp_f)
F.analysis <- analysis.dat_all[,c(subexp_f)]
maxF_row <- apply(F.analysis,1,function(x) max(as.numeric(x)))
keep <- as.vector(as.numeric(maxF_row > 5))
analysis.dat_all <- analysis.dat_all[,1:length(c("amd",subexp, "amd_se", subexp_se, subexp_f))]*keep
analysis.dat_all[analysis.dat_all==0] <-NA
analysis.dat_all <- na.omit(analysis.dat_all)
analysis.dat <- analysis.dat_all
#MR.results <- MRfunction_jk(subexp)
#results <- MR.results
#analysis with varying correlations
maincorrelations <- correlations
corr <- correlations[c(subexp), c(subexp)]
#s <- "0"
#var.corr <- cbind(s, MR.results)
correlations <- corr + 0.75*(1 - corr)
results_up <- MRfunction_jk(subexp)
for(s in 1:68){
analysis.dat <- analysis.dat_all
analysis.dat[s,] <- NA
analysis.dat <- na.omit(analysis.dat)
temp <- MRfunction_jk(subexp)
results_up <- rbind(results_up, temp)
}
correlations <- 0.75*corr + diag(c(0.25,0.25,0.25,0.25), ncol = 4)
results_down <- MRfunction_jk(subexp)
for(s in 1:68){
analysis.dat <- analysis.dat_all
analysis.dat[s,] <- NA
analysis.dat <- na.omit(analysis.dat)
temp <- MRfunction_jk(subexp)
results_down <- rbind(results_down, temp)
}
save(results_up, file = "highcorr.Rda")
save(results_down, file = "lowcorr.Rda")
|
RenderScatterplotUI <- function(input, output, session) {
fluidPage(
tabsetPanel(
tabPanel("Scatterplot",
uiOutput("singlescatterplot_ui")
),
tabPanel("Scatterplot Matrix",
uiOutput("scatterplotmatrix_ui")
)
)
)
}
RenderSingleScatterplotUI <- function(input, output, session) {
fluidPage(
# titlePanel("Boxplot"),
fluidRow(
column(2,
wellPanel(
# sidebarPanel(
tags$div(
title="",
selectInput(inputId = "count_file", label = "Select a file count:", choices = BrowsePath(file.path(ProjectPath,"count_files")))
),
tags$div(
title="",
selectInput(inputId = "col1_file", label = "Select a column to plot:", choices = c(1:10), selected = 1)
),
tags$div(
title="",
selectInput(inputId = "col2_file", label = "Select a column to plot:", choices = c(1:10), selected = 2)
),
tags$div(
title="",
checkboxInput(inputId = "log_flag", label = "Log transform")
),
tags$div(
title="",
checkboxInput(inputId = "plotly_flag", label = "Use plotly")
),
tags$br(),
actionButton(inputId = "scatterplot_button", label = "scatterplot", width = "100%", icon = icon("binoculars"))
)
),
# mainPanel(
column(10,
conditionalPanel(
condition = "input.plotly_flag",
plotlyOutput(outputId = "singlescatter_plotly")
),
conditionalPanel(
condition = "!input.plotly_flag",
plotOutput(outputId = "singlescatter_plot")
)
)
)
)
}
RenderSingleScatterplotPlot <- function(input,output,session) {
file.path.complete <- file.path(ProjectPath, "count_files", input$count_file)
# print(file.path.complete)
col.separator <- "\t"
coverage.file <- read.table(file=file.path.complete, header = TRUE, sep = col.separator, row.names = 1)
# print(head(coverage.file))
col1 <- as.integer(input$col1_file)
col2 <- as.integer(input$col2_file)
if ( ( col1> dim(coverage.file)[2]) || (col2 > dim(coverage.file)[2]) ) {
warning("You selected a too big column number!")
return()
}
sub.df <- coverage.file[,c(col1, col2)]
# print(head(sub.df))
self.title = paste0("Scatteplot ", colnames(sub.df)[col1], " vs ", colnames(sub.df)[col2])
ggscp <- ScatterPlot(data.frame.to.plot = sub.df, title=self.title, log.transform = input$log_flag, plotly = input$plotly_flag)
# if(input$plotly_flag) {
# require(plotly)
# # ggbxp <- ggplotly(ggbxp)
# ggbxp=ggplotly(ggbxp)
# }
return(ggscp)
}
RenderScatterplotMatrixUI <- function(input, output, session) {
fluidPage(
# titlePanel("Boxplot"),
fluidRow(
column(2,
wellPanel(
# sidebarPanel(
tags$div(
title="",
selectInput(inputId = "countm_file", label = "Select a file count:", choices = BrowsePath(file.path(ProjectPath,"count_files")))
),
tags$div(
title="",
selectInput(inputId = "col1m_file", label = "Select starting column to plot:", choices = c(1:10), selected = 1)
),
tags$div(
title="",
selectInput(inputId = "col2m_file", label = "Select final column to plot:", choices = c(1:10), selected = 2)
),
tags$div(
title="",
checkboxInput(inputId = "logm_flag", label = "Log transform")
),
# tags$div(
# title="",
# checkboxInput(inputId = "plotly_flag", label = "Use plotly")
# ),
tags$br(),
actionButton(inputId = "scatterplotmatrix_button", label = "scatterplot", width = "100%", icon = icon("bomb"))
)
),
# mainPanel(
column(10,
plotOutput(outputId = "matrixscatter_plot")
)
)
)
}
RenderScatterplotMatrixPlot <- function(input,output,session) {
file.path.complete <- file.path(ProjectPath, "count_files", input$countm_file)
col.separator <- "\t"
coverage.file <- read.table(file=file.path.complete, header = TRUE, sep = col.separator, row.names = 1)
col1 <- as.integer(input$col1m_file)
col2 <- as.integer(input$col2m_file)
if ( ( col1> dim(coverage.file)[2]) || (col2 > dim(coverage.file)[2]) ) {
warning("You selected a too big column number!")
return()
}
# sub.df <- coverage.file[,c(col1:col2)]
# print(head(sub.df))
# self.title = paste0("Scatteplot Matrix from ", colnames(sub.df)[col1], " to ", colnames(sub.df)[col2])
#ggscp <- ScatterPlot(data.frame.to.plot = sub.df, title=self.title, log.transform = input$log_flag, plotly = input$plotly_flag)
require("GGally")
scatmat <- ggscatmat(data = coverage.file, columns = c(col1:col2), alpha = 0.5)
return(scatmat)
}
|
/R/scatterplot_UI.R
|
no_license
|
drighelli/integrho
|
R
| false | false | 5,481 |
r
|
RenderScatterplotUI <- function(input, output, session) {
fluidPage(
tabsetPanel(
tabPanel("Scatterplot",
uiOutput("singlescatterplot_ui")
),
tabPanel("Scatterplot Matrix",
uiOutput("scatterplotmatrix_ui")
)
)
)
}
RenderSingleScatterplotUI <- function(input, output, session) {
fluidPage(
# titlePanel("Boxplot"),
fluidRow(
column(2,
wellPanel(
# sidebarPanel(
tags$div(
title="",
selectInput(inputId = "count_file", label = "Select a file count:", choices = BrowsePath(file.path(ProjectPath,"count_files")))
),
tags$div(
title="",
selectInput(inputId = "col1_file", label = "Select a column to plot:", choices = c(1:10), selected = 1)
),
tags$div(
title="",
selectInput(inputId = "col2_file", label = "Select a column to plot:", choices = c(1:10), selected = 2)
),
tags$div(
title="",
checkboxInput(inputId = "log_flag", label = "Log transform")
),
tags$div(
title="",
checkboxInput(inputId = "plotly_flag", label = "Use plotly")
),
tags$br(),
actionButton(inputId = "scatterplot_button", label = "scatterplot", width = "100%", icon = icon("binoculars"))
)
),
# mainPanel(
column(10,
conditionalPanel(
condition = "input.plotly_flag",
plotlyOutput(outputId = "singlescatter_plotly")
),
conditionalPanel(
condition = "!input.plotly_flag",
plotOutput(outputId = "singlescatter_plot")
)
)
)
)
}
RenderSingleScatterplotPlot <- function(input,output,session) {
file.path.complete <- file.path(ProjectPath, "count_files", input$count_file)
# print(file.path.complete)
col.separator <- "\t"
coverage.file <- read.table(file=file.path.complete, header = TRUE, sep = col.separator, row.names = 1)
# print(head(coverage.file))
col1 <- as.integer(input$col1_file)
col2 <- as.integer(input$col2_file)
if ( ( col1> dim(coverage.file)[2]) || (col2 > dim(coverage.file)[2]) ) {
warning("You selected a too big column number!")
return()
}
sub.df <- coverage.file[,c(col1, col2)]
# print(head(sub.df))
self.title = paste0("Scatteplot ", colnames(sub.df)[col1], " vs ", colnames(sub.df)[col2])
ggscp <- ScatterPlot(data.frame.to.plot = sub.df, title=self.title, log.transform = input$log_flag, plotly = input$plotly_flag)
# if(input$plotly_flag) {
# require(plotly)
# # ggbxp <- ggplotly(ggbxp)
# ggbxp=ggplotly(ggbxp)
# }
return(ggscp)
}
RenderScatterplotMatrixUI <- function(input, output, session) {
fluidPage(
# titlePanel("Boxplot"),
fluidRow(
column(2,
wellPanel(
# sidebarPanel(
tags$div(
title="",
selectInput(inputId = "countm_file", label = "Select a file count:", choices = BrowsePath(file.path(ProjectPath,"count_files")))
),
tags$div(
title="",
selectInput(inputId = "col1m_file", label = "Select starting column to plot:", choices = c(1:10), selected = 1)
),
tags$div(
title="",
selectInput(inputId = "col2m_file", label = "Select final column to plot:", choices = c(1:10), selected = 2)
),
tags$div(
title="",
checkboxInput(inputId = "logm_flag", label = "Log transform")
),
# tags$div(
# title="",
# checkboxInput(inputId = "plotly_flag", label = "Use plotly")
# ),
tags$br(),
actionButton(inputId = "scatterplotmatrix_button", label = "scatterplot", width = "100%", icon = icon("bomb"))
)
),
# mainPanel(
column(10,
plotOutput(outputId = "matrixscatter_plot")
)
)
)
}
RenderScatterplotMatrixPlot <- function(input,output,session) {
file.path.complete <- file.path(ProjectPath, "count_files", input$countm_file)
col.separator <- "\t"
coverage.file <- read.table(file=file.path.complete, header = TRUE, sep = col.separator, row.names = 1)
col1 <- as.integer(input$col1m_file)
col2 <- as.integer(input$col2m_file)
if ( ( col1> dim(coverage.file)[2]) || (col2 > dim(coverage.file)[2]) ) {
warning("You selected a too big column number!")
return()
}
# sub.df <- coverage.file[,c(col1:col2)]
# print(head(sub.df))
# self.title = paste0("Scatteplot Matrix from ", colnames(sub.df)[col1], " to ", colnames(sub.df)[col2])
#ggscp <- ScatterPlot(data.frame.to.plot = sub.df, title=self.title, log.transform = input$log_flag, plotly = input$plotly_flag)
require("GGally")
scatmat <- ggscatmat(data = coverage.file, columns = c(col1:col2), alpha = 0.5)
return(scatmat)
}
|
<dec f='webkit/Source/ThirdParty/libwebrtc/Source/third_party/boringssl/src/include/openssl/ecdsa.h' l='106' type='BIGNUM *'/>
<use f='webkit/Source/ThirdParty/libwebrtc/Source/third_party/boringssl/src/crypto/ecdsa_extra/ecdsa_asn1.c' l='159' u='r' c='ECDSA_SIG_parse'/>
<use f='webkit/Source/ThirdParty/libwebrtc/Source/third_party/boringssl/src/crypto/ecdsa_extra/ecdsa_asn1.c' l='184' u='r' c='ECDSA_SIG_marshal'/>
<offset>0</offset>
<use f='webkit/Source/ThirdParty/libwebrtc/Source/third_party/boringssl/src/crypto/fipsmodule/ecdsa/ecdsa.c' l='107' u='w' c='ECDSA_SIG_new'/>
<use f='webkit/Source/ThirdParty/libwebrtc/Source/third_party/boringssl/src/crypto/fipsmodule/ecdsa/ecdsa.c' l='109' u='r' c='ECDSA_SIG_new'/>
<use f='webkit/Source/ThirdParty/libwebrtc/Source/third_party/boringssl/src/crypto/fipsmodule/ecdsa/ecdsa.c' l='121' u='r' c='ECDSA_SIG_free'/>
<use f='webkit/Source/ThirdParty/libwebrtc/Source/third_party/boringssl/src/crypto/fipsmodule/ecdsa/ecdsa.c' l='127' u='r' c='ECDSA_SIG_get0_r'/>
<use f='webkit/Source/ThirdParty/libwebrtc/Source/third_party/boringssl/src/crypto/fipsmodule/ecdsa/ecdsa.c' l='137' u='r' c='ECDSA_SIG_get0'/>
<use f='webkit/Source/ThirdParty/libwebrtc/Source/third_party/boringssl/src/crypto/fipsmodule/ecdsa/ecdsa.c' l='148' u='r' c='ECDSA_SIG_set0'/>
<use f='webkit/Source/ThirdParty/libwebrtc/Source/third_party/boringssl/src/crypto/fipsmodule/ecdsa/ecdsa.c' l='150' u='w' c='ECDSA_SIG_set0'/>
<use f='webkit/Source/ThirdParty/libwebrtc/Source/third_party/boringssl/src/crypto/fipsmodule/ecdsa/ecdsa.c' l='165' u='r' c='ECDSA_do_verify'/>
<use f='webkit/Source/ThirdParty/libwebrtc/Source/third_party/boringssl/src/crypto/fipsmodule/ecdsa/ecdsa.c' l='166' u='r' c='ECDSA_do_verify'/>
<use f='webkit/Source/ThirdParty/libwebrtc/Source/third_party/boringssl/src/crypto/fipsmodule/ecdsa/ecdsa.c' l='257' u='r' c='ecdsa_sign_impl'/>
<use f='webkit/Source/ThirdParty/libwebrtc/Source/third_party/boringssl/src/ssl/t1_lib.cc' l='4242' u='r' c='_ZN4bssl22tls1_verify_channel_idEPNS_13SSL_HANDSHAKEERKNS_10SSLMessageE'/>
<use f='webkit/Source/ThirdParty/libwebrtc/Source/third_party/boringssl/src/ssl/t1_lib.cc' l='4310' u='r' c='_ZN4bssl21tls1_write_channel_idEPNS_13SSL_HANDSHAKEEP6cbb_st'/>
|
/docs/refs/ecdsa_sig_st..r
|
no_license
|
HarDToBelieve/webkit_codebrowser
|
R
| false | false | 2,238 |
r
|
<dec f='webkit/Source/ThirdParty/libwebrtc/Source/third_party/boringssl/src/include/openssl/ecdsa.h' l='106' type='BIGNUM *'/>
<use f='webkit/Source/ThirdParty/libwebrtc/Source/third_party/boringssl/src/crypto/ecdsa_extra/ecdsa_asn1.c' l='159' u='r' c='ECDSA_SIG_parse'/>
<use f='webkit/Source/ThirdParty/libwebrtc/Source/third_party/boringssl/src/crypto/ecdsa_extra/ecdsa_asn1.c' l='184' u='r' c='ECDSA_SIG_marshal'/>
<offset>0</offset>
<use f='webkit/Source/ThirdParty/libwebrtc/Source/third_party/boringssl/src/crypto/fipsmodule/ecdsa/ecdsa.c' l='107' u='w' c='ECDSA_SIG_new'/>
<use f='webkit/Source/ThirdParty/libwebrtc/Source/third_party/boringssl/src/crypto/fipsmodule/ecdsa/ecdsa.c' l='109' u='r' c='ECDSA_SIG_new'/>
<use f='webkit/Source/ThirdParty/libwebrtc/Source/third_party/boringssl/src/crypto/fipsmodule/ecdsa/ecdsa.c' l='121' u='r' c='ECDSA_SIG_free'/>
<use f='webkit/Source/ThirdParty/libwebrtc/Source/third_party/boringssl/src/crypto/fipsmodule/ecdsa/ecdsa.c' l='127' u='r' c='ECDSA_SIG_get0_r'/>
<use f='webkit/Source/ThirdParty/libwebrtc/Source/third_party/boringssl/src/crypto/fipsmodule/ecdsa/ecdsa.c' l='137' u='r' c='ECDSA_SIG_get0'/>
<use f='webkit/Source/ThirdParty/libwebrtc/Source/third_party/boringssl/src/crypto/fipsmodule/ecdsa/ecdsa.c' l='148' u='r' c='ECDSA_SIG_set0'/>
<use f='webkit/Source/ThirdParty/libwebrtc/Source/third_party/boringssl/src/crypto/fipsmodule/ecdsa/ecdsa.c' l='150' u='w' c='ECDSA_SIG_set0'/>
<use f='webkit/Source/ThirdParty/libwebrtc/Source/third_party/boringssl/src/crypto/fipsmodule/ecdsa/ecdsa.c' l='165' u='r' c='ECDSA_do_verify'/>
<use f='webkit/Source/ThirdParty/libwebrtc/Source/third_party/boringssl/src/crypto/fipsmodule/ecdsa/ecdsa.c' l='166' u='r' c='ECDSA_do_verify'/>
<use f='webkit/Source/ThirdParty/libwebrtc/Source/third_party/boringssl/src/crypto/fipsmodule/ecdsa/ecdsa.c' l='257' u='r' c='ecdsa_sign_impl'/>
<use f='webkit/Source/ThirdParty/libwebrtc/Source/third_party/boringssl/src/ssl/t1_lib.cc' l='4242' u='r' c='_ZN4bssl22tls1_verify_channel_idEPNS_13SSL_HANDSHAKEERKNS_10SSLMessageE'/>
<use f='webkit/Source/ThirdParty/libwebrtc/Source/third_party/boringssl/src/ssl/t1_lib.cc' l='4310' u='r' c='_ZN4bssl21tls1_write_channel_idEPNS_13SSL_HANDSHAKEEP6cbb_st'/>
|
#' CVSelect - Select the Cross-Validation Bandwith described in (Foster, and ) for the Median of the PSE funcion based on Functional Data
#' @import KernSmooth
#' @param bandwith
#' @param x Location of the discretization points. THis discretization points must be uniform and missing values are not accepted.
#' @param y Typically a matrix or data frame which contains a set of curves stored in rows. Missing values are not accepte.
#' @param degree Degree of the local Polynomial to be used. If Degree is missing takes by default degree = 1.
#' @return A bandwith that minimizes the Median of the Median PSE for the functional data set.
#' @references Foster and Stehpen. PhD Thesis. Manchester University
#' @examples \dontrun{
#' Mat<- fdaobjMale$data
#' h<- cv.select(c(0,10), 1:31,t(Mat),1)
#' }
#' @export
medianPSE<- function (bandwidth, x, y, degree)
{
y<-as.matrix(y)
spacing <- diff(x)
if(bandwidth <0)
stop("'bandwithd' must be positive")
if (any(spacing < 0))
stop("'x' must be increasing")
if (nrow(y) < 2)
stop("'y' must have at least two rows")
if (length(x) != ncol(y))
stop("length(x) and ncol(y) must be equal")
n <- nrow(y)
N <- ncol(y)
y.hat <- apply(y, 1, function(z) locpoly(x = x, y = z, bandwidth = bandwidth,
gridsize = N, degree = degree)$y)
mu.hat <- rowMeans(y.hat)
residuals <- (n/(n - 1)) * mu.hat - y.hat/(n - 1) - t(y)
PSEMedian<- apply(residuals^2, 2, median)
return(median(PSEMedian))
}
|
/RPackageTest/R/medianPSE.R
|
no_license
|
gusajr/RPackageTest
|
R
| false | false | 1,563 |
r
|
#' CVSelect - Select the Cross-Validation Bandwith described in (Foster, and ) for the Median of the PSE funcion based on Functional Data
#' @import KernSmooth
#' @param bandwith
#' @param x Location of the discretization points. THis discretization points must be uniform and missing values are not accepted.
#' @param y Typically a matrix or data frame which contains a set of curves stored in rows. Missing values are not accepte.
#' @param degree Degree of the local Polynomial to be used. If Degree is missing takes by default degree = 1.
#' @return A bandwith that minimizes the Median of the Median PSE for the functional data set.
#' @references Foster and Stehpen. PhD Thesis. Manchester University
#' @examples \dontrun{
#' Mat<- fdaobjMale$data
#' h<- cv.select(c(0,10), 1:31,t(Mat),1)
#' }
#' @export
medianPSE<- function (bandwidth, x, y, degree)
{
y<-as.matrix(y)
spacing <- diff(x)
if(bandwidth <0)
stop("'bandwithd' must be positive")
if (any(spacing < 0))
stop("'x' must be increasing")
if (nrow(y) < 2)
stop("'y' must have at least two rows")
if (length(x) != ncol(y))
stop("length(x) and ncol(y) must be equal")
n <- nrow(y)
N <- ncol(y)
y.hat <- apply(y, 1, function(z) locpoly(x = x, y = z, bandwidth = bandwidth,
gridsize = N, degree = degree)$y)
mu.hat <- rowMeans(y.hat)
residuals <- (n/(n - 1)) * mu.hat - y.hat/(n - 1) - t(y)
PSEMedian<- apply(residuals^2, 2, median)
return(median(PSEMedian))
}
|
gh.da <- read.table("~/RMB/Publication/Data/GreenhouseExp/DA_comp_site_OTUs.txt", header = T, row.names = 1)
field.da <- read.table("~/RMB/Publication/Data/FieldExp/enriched_otus.txt", header = T, row.names = 1)
tax <- read.table("~/RMB/Publication/Data/FieldExp/field_tax.txt", header = T, row.names = 1)
## Get core greenhouse first
load("~/RMB/Publication/Data/GreenhouseExp/glm.gh.rda")
gh.da.e <- subset(gh.comp.site.glm, color == "E")
gh.e.arb <- as.character(subset(gh.comp.site.glm, color == "E" & Site == "Arbuckle" & padj < 0.01)$OTU)
gh.e.dav <- as.character(subset(gh.comp.site.glm, color == "E" & Site == "Davis" & padj < 0.01)$OTU)
gh.e.sac <- as.character(subset(gh.comp.site.glm, color == "E" & Site == "Sacramento" & padj < 0.01)$OTU)
gh.core <- intersect(gh.e.arb, intersect(gh.e.dav, gh.e.sac))
field.da.e <- subset(field.da)
b1.e <- unique(as.character(subset(field.da, Site == "BB P1" & padj < 0.01)$OTU))
b4.e <- unique(as.character(subset(field.da, Site == "BB P4" & padj < 0.01)$OTU))
d18.e <- unique(as.character(subset(field.da, Site == "Ditaler 18" & padj < 0.01)$OTU))
d19.e <- unique(as.character(subset(field.da, Site == "Ditaler 19" & padj < 0.01)$OTU))
ds.e <- unique(as.character(subset(field.da, Site == "DS RR" & padj < 0.01)$OTU))
sch.e <- unique(as.character(subset(field.da, Site == "Scheidec" & padj < 0.01)$OTU))
sft.e <- unique(as.character(subset(field.da, Site == "SFT 20 A" & padj < 0.01)$OTU))
sp.e <- unique(as.character(subset(field.da, Site == "Spooner Airstrip" & padj < 0.01)$OTU))
field.all <- table(c(b1.e, b4.e, d18.e, d19.e, ds.e, sch.e, sft.e, sp.e))
field.core <- Reduce(intersect, list(b1.e, b4.e, d18.e, d19.e, ds.e, sch.e, sft.e, sp.e))
all.core <- intersect(field.core, gh.core)
all.core.tax <- tax[match(all.core, row.names(tax)),]
ggplot(all.core.tax, aes(x = Class, fill = Order)) +
geom_bar() +
coord_flip() +
theme(text = element_text(size = 20))
## Get counts for each experiment
gh.counts <- read.table("~/RMB/Publication/Data/GreenhouseExp/gh_otu_table.txt", header = T, row.names = 1)
field.counts <- read.table("~/RMB/Publication/Data/FieldExp/field_otu_table.txt", header = T, row.names = 1)
gh.map <- read.table("~/RMB/Publication/Data/GreenhouseExp/gh_map.txt", header = T, row.names = 1)
field.map <- read.table("~/RMB/Publication/Data/FieldExp/field_map.txt", header = T, row.names = 1)
gh.map$BarcodeSequence <- NULL
gh.map$LinkerPrimerSequence <- NULL
gh.map$Field <- NULL
gh.map$Run <- NULL
field.map$BarcodeSequence <- NULL
field.map$LinkerPrimerSequence <- NULL
field.map$Field <- NULL
field.map$Run <- NULL
gh.core.counts <- melt(cbind(gh.map, t(gh.counts[match(all.core, row.names(gh.counts)), match(row.names(gh.map), colnames(gh.counts))])))
field.core.counts <- melt(cbind(field.map, t(field.counts[match(all.core, row.names(field.counts)), match(row.names(field.map), colnames(field.counts))])))
whole.counts <- rbind(gh.core.counts, field.core.counts)
ggplot(whole.counts, aes(x = Compartment, y = value, fill = variable)) +
geom_boxplot() +
facet_grid(Cultivation ~ .) +
ylim(0,5000)
|
/Greenhouse/core_plots.r
|
no_license
|
rajaldebnath/Edwards-et-al.-2014
|
R
| false | false | 3,091 |
r
|
gh.da <- read.table("~/RMB/Publication/Data/GreenhouseExp/DA_comp_site_OTUs.txt", header = T, row.names = 1)
field.da <- read.table("~/RMB/Publication/Data/FieldExp/enriched_otus.txt", header = T, row.names = 1)
tax <- read.table("~/RMB/Publication/Data/FieldExp/field_tax.txt", header = T, row.names = 1)
## Get core greenhouse first
load("~/RMB/Publication/Data/GreenhouseExp/glm.gh.rda")
gh.da.e <- subset(gh.comp.site.glm, color == "E")
gh.e.arb <- as.character(subset(gh.comp.site.glm, color == "E" & Site == "Arbuckle" & padj < 0.01)$OTU)
gh.e.dav <- as.character(subset(gh.comp.site.glm, color == "E" & Site == "Davis" & padj < 0.01)$OTU)
gh.e.sac <- as.character(subset(gh.comp.site.glm, color == "E" & Site == "Sacramento" & padj < 0.01)$OTU)
gh.core <- intersect(gh.e.arb, intersect(gh.e.dav, gh.e.sac))
field.da.e <- subset(field.da)
b1.e <- unique(as.character(subset(field.da, Site == "BB P1" & padj < 0.01)$OTU))
b4.e <- unique(as.character(subset(field.da, Site == "BB P4" & padj < 0.01)$OTU))
d18.e <- unique(as.character(subset(field.da, Site == "Ditaler 18" & padj < 0.01)$OTU))
d19.e <- unique(as.character(subset(field.da, Site == "Ditaler 19" & padj < 0.01)$OTU))
ds.e <- unique(as.character(subset(field.da, Site == "DS RR" & padj < 0.01)$OTU))
sch.e <- unique(as.character(subset(field.da, Site == "Scheidec" & padj < 0.01)$OTU))
sft.e <- unique(as.character(subset(field.da, Site == "SFT 20 A" & padj < 0.01)$OTU))
sp.e <- unique(as.character(subset(field.da, Site == "Spooner Airstrip" & padj < 0.01)$OTU))
field.all <- table(c(b1.e, b4.e, d18.e, d19.e, ds.e, sch.e, sft.e, sp.e))
field.core <- Reduce(intersect, list(b1.e, b4.e, d18.e, d19.e, ds.e, sch.e, sft.e, sp.e))
all.core <- intersect(field.core, gh.core)
all.core.tax <- tax[match(all.core, row.names(tax)),]
ggplot(all.core.tax, aes(x = Class, fill = Order)) +
geom_bar() +
coord_flip() +
theme(text = element_text(size = 20))
## Get counts for each experiment
gh.counts <- read.table("~/RMB/Publication/Data/GreenhouseExp/gh_otu_table.txt", header = T, row.names = 1)
field.counts <- read.table("~/RMB/Publication/Data/FieldExp/field_otu_table.txt", header = T, row.names = 1)
gh.map <- read.table("~/RMB/Publication/Data/GreenhouseExp/gh_map.txt", header = T, row.names = 1)
field.map <- read.table("~/RMB/Publication/Data/FieldExp/field_map.txt", header = T, row.names = 1)
gh.map$BarcodeSequence <- NULL
gh.map$LinkerPrimerSequence <- NULL
gh.map$Field <- NULL
gh.map$Run <- NULL
field.map$BarcodeSequence <- NULL
field.map$LinkerPrimerSequence <- NULL
field.map$Field <- NULL
field.map$Run <- NULL
gh.core.counts <- melt(cbind(gh.map, t(gh.counts[match(all.core, row.names(gh.counts)), match(row.names(gh.map), colnames(gh.counts))])))
field.core.counts <- melt(cbind(field.map, t(field.counts[match(all.core, row.names(field.counts)), match(row.names(field.map), colnames(field.counts))])))
whole.counts <- rbind(gh.core.counts, field.core.counts)
ggplot(whole.counts, aes(x = Compartment, y = value, fill = variable)) +
geom_boxplot() +
facet_grid(Cultivation ~ .) +
ylim(0,5000)
|
# topic modeling of sentiment
library(tidyverse)
library(tidytext)
library(tidymodels)
library(tm)
library(vip)
library(tictoc)
library(butcher)
library(yardstick)
# setwd("2023-02-28_african_language")
load("data/afrisenti_translated.rdata")
# ----- SETUP ------------------------------
afrisenti_translated <- afrisenti_translated %>%
mutate(lang = as.factor(assigned_long)) %>%
mutate(sentiment = as.factor(label))
tweet_train <- afrisenti_translated %>%
filter(intended_use == "train") %>%
select(tweet_num,sentiment,lang,tweet)
tweet_test <- afrisenti_translated %>%
filter(intended_use == "test") %>%
select(tweet_num,sentiment,lang,tweet)
tweet_dev <- afrisenti_translated %>%
filter(intended_use == "dev") %>%
select(tweet_num,sentiment,lang,tweet)
# add my stop words to defaults
my_stop_words = tibble(word = c("http","https","dey","de","al","url","na","t.co","rt","user","users","wey","don",
as.character(1:100),
"?????????", "?????????","?????????")) %>%
bind_rows(stop_words)
# split into words. Choose native or English
tokenize <- function(dataset, use_translated = FALSE) {
tokens <- dataset %>%
select(tweet_num,
sentiment,
lang,
ifelse(use_translated, "translatedText", "tweet")) %>%
unnest_tokens(word, !!(ifelse(
use_translated, "translatedText", "tweet"
)))
return(tokens)
}
# turn words preceded by "not" into "not_<word>"
# to create a negated token
detect_negations <- function(tokens,negation_words = c("not")) {
# function to negate tokenized data
tokens <- tokens %>% rowid_to_column(var="word_num")
not_words_rows <- tokens |>
filter(word %in% negation_words) |>
mutate(word_num = word_num) |>
pull(word_num)
tokens <- tokens %>%
# create negated terms
filter(!(word_num %in% not_words_rows)) |>
mutate(word = ifelse(word_num %in% (not_words_rows+1),paste0("not_",word),word)) |>
select(-word_num)
return(tokens)
}
# word list size will be critical
# full set will be wasteful and slow
# one author suggested 2000
# remove stop words first
get_top_words <- function(tokens, word_count = 1000, my_stopwords) {
chosen_words <- tokens |>
anti_join(my_stop_words) %>%
ungroup() |>
select(word) |>
count(word) |>
arrange(desc(n)) |>
slice_max(order_by = n, n = word_count)
return(chosen_words)
}
# make document term matrix including words and language. omit stop words. note
# that negation must be done before removing stop words or "not" will be stripped.
make_dtm <- function(tokens) {
chosen_words <- get_top_words(tokens,word_count = 1000)
tweet_dtm <- tokens |>
inner_join(chosen_words) |>
group_by(tweet_num, word) |>
count(word) |>
cast_dtm(tweet_num, word, n) %>%
tidy() %>%
mutate(count = as.integer(count))
dtmm <- tweet_dtm |>
pivot_wider(names_from = term, values_from = count, values_fill = 0) %>%
mutate(tweet_num = as.numeric(document)) %>%
left_join(select(afrisenti_translated,tweet_num,sentiment,lang),by="tweet_num") %>%
select(sentiment,lang,everything()) %>%
select(-document,-tweet_num) %>%
return(dtmm)
}
# ----- END SETUP ------------------------------
# do it with native
# 2- letter words are a huge part of the corpus.
# I don't know what I'm doing but 2-letter words probably don't convey
# as much as longer words.
tokens_a <- tokenize(afrisenti_translated) %>%
filter(str_length(word) > 2)
# do it with English translations
tokens_e <- afrisenti_translated %>%
filter(intended_use == "train") %>%
tokenize(use_translated = TRUE)
# ---------------------------------------------------------
# run the models
tic()
dtmm <- make_dtm(tokens_a)
toc()
cores <- parallel::detectCores()
rf_mod <- parsnip::rand_forest(trees = 100) %>%
set_engine("ranger",num.threads = cores,importance = "impurity") %>%
set_mode("classification")
rf_recipe <-
recipe(sentiment ~ ., data = dtmm)
rf_workflow <-
workflow() %>%
add_model(rf_mod) %>%
add_recipe(rf_recipe)
translate(rf_mod)
#rf_workflow %>%
# fit(mtcars) %>%
# extract_fit_parsnip() %>%
# vip(num_features = 10)
tic()
rf_fit <- rf_workflow %>%
fit(dtmm)
toc()
summary(predict(rf_fit,dtmm[-1]))
# Validation set assessment #1: looking at confusion matrix
predicted_for_table <- tibble(dtmm[,1],predict(rf_fit,dtmm))
xt <- table(predicted_for_table) %>%
broom::tidy() %>%
mutate(across(where(is.character),as.factor)) %>%
# group_by(label) %>%
mutate(prop = round(100*n/sum(n)))
gg <- xt %>%
ggplot(aes(observed,predicted,fill=n)) + geom_tile() +
labs(title = "African Languages Tweets\nQ: Can We Train on English Google Translations?",
subtitle = "A: Yes. A random forest model works pretty well.",
x = "Native Language Sentiment",
y= "Google Translate Sentiment",
caption = "source: Afrisenti Data Set") +
scale_fill_gradient(low = "#FFBF00",high = "#007000") +
theme(text = element_text(family = "dm"),
plot.background = element_rect(fill = "#FDECCD", color = NA),
legend.background = element_blank(),
axis.ticks = element_blank(),
panel.background = element_blank(),
panel.grid = element_blank())
gg + geom_text(aes(label = paste0(as.character(prop),"%")))
plot_gg(gg, width = 5, height = 5, multicore = TRUE, scale = 250,
zoom = 0.7, theta = 10, phi = 30, windowsize = c(800, 800))
pretty_colours <- c("#F8766D","#00BA38","#619CFF")
# Validation set assessment #2: ROC curves and AUC
# Needs to import ROCR package for ROC curve plotting:
library(ROCR)
# Calculate the probability of new observations belonging to each class
predicted_for_roc_curve<- tibble(dtmm[,1:2],
predict(rf_fit,dtmm[,-1],type="prob"))
predicted_for_roc <- bind_cols(predicted_for_table,predicted_for_roc_curve[,2:4])
metrics(predicted_for_roc,sentiment,.pred_class)
predicted_for_roc_curve %>%
group_by(lang) %>%
roc_curve(sentiment,.pred_negative:.pred_positive) %>%
autoplot()
|
/2023-02-28_african_language/ml_predictions.R
|
no_license
|
apsteinmetz/tidytuesday
|
R
| false | false | 6,159 |
r
|
# topic modeling of sentiment
library(tidyverse)
library(tidytext)
library(tidymodels)
library(tm)
library(vip)
library(tictoc)
library(butcher)
library(yardstick)
# setwd("2023-02-28_african_language")
load("data/afrisenti_translated.rdata")
# ----- SETUP ------------------------------
afrisenti_translated <- afrisenti_translated %>%
mutate(lang = as.factor(assigned_long)) %>%
mutate(sentiment = as.factor(label))
tweet_train <- afrisenti_translated %>%
filter(intended_use == "train") %>%
select(tweet_num,sentiment,lang,tweet)
tweet_test <- afrisenti_translated %>%
filter(intended_use == "test") %>%
select(tweet_num,sentiment,lang,tweet)
tweet_dev <- afrisenti_translated %>%
filter(intended_use == "dev") %>%
select(tweet_num,sentiment,lang,tweet)
# add my stop words to defaults
my_stop_words = tibble(word = c("http","https","dey","de","al","url","na","t.co","rt","user","users","wey","don",
as.character(1:100),
"?????????", "?????????","?????????")) %>%
bind_rows(stop_words)
# split into words. Choose native or English
tokenize <- function(dataset, use_translated = FALSE) {
tokens <- dataset %>%
select(tweet_num,
sentiment,
lang,
ifelse(use_translated, "translatedText", "tweet")) %>%
unnest_tokens(word, !!(ifelse(
use_translated, "translatedText", "tweet"
)))
return(tokens)
}
# turn words preceded by "not" into "not_<word>"
# to create a negated token
detect_negations <- function(tokens,negation_words = c("not")) {
# function to negate tokenized data
tokens <- tokens %>% rowid_to_column(var="word_num")
not_words_rows <- tokens |>
filter(word %in% negation_words) |>
mutate(word_num = word_num) |>
pull(word_num)
tokens <- tokens %>%
# create negated terms
filter(!(word_num %in% not_words_rows)) |>
mutate(word = ifelse(word_num %in% (not_words_rows+1),paste0("not_",word),word)) |>
select(-word_num)
return(tokens)
}
# word list size will be critical
# full set will be wasteful and slow
# one author suggested 2000
# remove stop words first
get_top_words <- function(tokens, word_count = 1000, my_stopwords) {
chosen_words <- tokens |>
anti_join(my_stop_words) %>%
ungroup() |>
select(word) |>
count(word) |>
arrange(desc(n)) |>
slice_max(order_by = n, n = word_count)
return(chosen_words)
}
# make document term matrix including words and language. omit stop words. note
# that negation must be done before removing stop words or "not" will be stripped.
make_dtm <- function(tokens) {
chosen_words <- get_top_words(tokens,word_count = 1000)
tweet_dtm <- tokens |>
inner_join(chosen_words) |>
group_by(tweet_num, word) |>
count(word) |>
cast_dtm(tweet_num, word, n) %>%
tidy() %>%
mutate(count = as.integer(count))
dtmm <- tweet_dtm |>
pivot_wider(names_from = term, values_from = count, values_fill = 0) %>%
mutate(tweet_num = as.numeric(document)) %>%
left_join(select(afrisenti_translated,tweet_num,sentiment,lang),by="tweet_num") %>%
select(sentiment,lang,everything()) %>%
select(-document,-tweet_num) %>%
return(dtmm)
}
# ----- END SETUP ------------------------------
# do it with native
# 2- letter words are a huge part of the corpus.
# I don't know what I'm doing but 2-letter words probably don't convey
# as much as longer words.
tokens_a <- tokenize(afrisenti_translated) %>%
filter(str_length(word) > 2)
# do it with English translations
tokens_e <- afrisenti_translated %>%
filter(intended_use == "train") %>%
tokenize(use_translated = TRUE)
# ---------------------------------------------------------
# run the models
tic()
dtmm <- make_dtm(tokens_a)
toc()
cores <- parallel::detectCores()
rf_mod <- parsnip::rand_forest(trees = 100) %>%
set_engine("ranger",num.threads = cores,importance = "impurity") %>%
set_mode("classification")
rf_recipe <-
recipe(sentiment ~ ., data = dtmm)
rf_workflow <-
workflow() %>%
add_model(rf_mod) %>%
add_recipe(rf_recipe)
translate(rf_mod)
#rf_workflow %>%
# fit(mtcars) %>%
# extract_fit_parsnip() %>%
# vip(num_features = 10)
tic()
rf_fit <- rf_workflow %>%
fit(dtmm)
toc()
summary(predict(rf_fit,dtmm[-1]))
# Validation set assessment #1: looking at confusion matrix
predicted_for_table <- tibble(dtmm[,1],predict(rf_fit,dtmm))
xt <- table(predicted_for_table) %>%
broom::tidy() %>%
mutate(across(where(is.character),as.factor)) %>%
# group_by(label) %>%
mutate(prop = round(100*n/sum(n)))
gg <- xt %>%
ggplot(aes(observed,predicted,fill=n)) + geom_tile() +
labs(title = "African Languages Tweets\nQ: Can We Train on English Google Translations?",
subtitle = "A: Yes. A random forest model works pretty well.",
x = "Native Language Sentiment",
y= "Google Translate Sentiment",
caption = "source: Afrisenti Data Set") +
scale_fill_gradient(low = "#FFBF00",high = "#007000") +
theme(text = element_text(family = "dm"),
plot.background = element_rect(fill = "#FDECCD", color = NA),
legend.background = element_blank(),
axis.ticks = element_blank(),
panel.background = element_blank(),
panel.grid = element_blank())
gg + geom_text(aes(label = paste0(as.character(prop),"%")))
plot_gg(gg, width = 5, height = 5, multicore = TRUE, scale = 250,
zoom = 0.7, theta = 10, phi = 30, windowsize = c(800, 800))
pretty_colours <- c("#F8766D","#00BA38","#619CFF")
# Validation set assessment #2: ROC curves and AUC
# Needs to import ROCR package for ROC curve plotting:
library(ROCR)
# Calculate the probability of new observations belonging to each class
predicted_for_roc_curve<- tibble(dtmm[,1:2],
predict(rf_fit,dtmm[,-1],type="prob"))
predicted_for_roc <- bind_cols(predicted_for_table,predicted_for_roc_curve[,2:4])
metrics(predicted_for_roc,sentiment,.pred_class)
predicted_for_roc_curve %>%
group_by(lang) %>%
roc_curve(sentiment,.pred_negative:.pred_positive) %>%
autoplot()
|
library(testthat)
credential <- retrieve_credential_testing()
update_expectation <- FALSE
test_that("Smoke Test", {
testthat::skip_on_cran()
expect_message({
returned_object <-
redcap_variables(
redcap_uri = credential$redcap_uri,
token = credential$token,
verbose = TRUE
)
})
expect_type(returned_object, "list")
})
test_that("default", {
testthat::skip_on_cran()
path_expected <- "test-data/specific-redcapr/variables/default.R"
expected_outcome_message <- "\\d+ variable metadata records were read from REDCap in \\d\\.\\d seconds\\. The http status code was 200\\.(\\n)?"
returned_object <-
redcap_variables(
redcap_uri = credential$redcap_uri,
token = credential$token,
verbose = FALSE
)
if (update_expectation) save_expected(returned_object$data, path_expected)
expected_data_frame <- retrieve_expected(path_expected)
expect_equal(returned_object$data, expected=expected_data_frame, label="The returned data.frame should be correct", ignore_attr = TRUE) # dput(returned_object$data)
expect_equal(returned_object$status_code, expected=200L)
expect_equal(returned_object$raw_text, expected="", ignore_attr = TRUE) # dput(returned_object$raw_text)
expect_match(returned_object$outcome_message, regexp=expected_outcome_message, perl=TRUE)
expect_true(returned_object$success)
expect_s3_class(returned_object$data, "tbl")
})
test_that("Bad Uri -wrong address (1 of 2)", {
testthat::skip_on_cran()
expected_message <- "The requested URL was not found on this server\\."
expect_error(
redcap_variables(
redcap_uri = "https://bbmc.ouhsc.edu/redcap/apiFFFFFFFFFFFFFF/", # Wrong url
token = credential$token
),
expected_message
)
})
test_that("Bad Uri -wrong address (2 of 2)", {
testthat::skip_on_cran()
bad_uri <- "https://aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa.com"
expected_data_frame <- structure(list(), .Names = character(0), row.names = integer(0), class = "data.frame")
# Windows gives a different message than Travis/Linux
expected_outcome_message <- "(https://aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa.com|Couldn't resolve host 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa.com')"
# "The REDCapR variable retrieval was not successful\\..+?Error 405 \\(Method Not Allowed\\).+"
# expected_outcome_message <- "(?s)The REDCapR variable retrieval was not successful\\..+?.+"
expect_error(
redcap_variables(
redcap_uri = bad_uri,
token = credential$token
)#,
# regexp = expected_outcome_message
)
# Now the error is thrown with a bad URI.
# expected_outcome_message <- paste0("(?s)", expected_outcome_message)
#
# expect_equal(returned_object$data, expected=expected_data_frame, label="The returned data.frame should be correct") # dput(returned_object$data)
# expect_equal(returned_object$status_code, expected=405L)
# # expect_equal(returned_object$raw_text, expected="", ignore_attr = TRUE) # dput(returned_object$raw_text)
# expect_match(returned_object$outcome_message, regexp=expected_outcome_message, perl=TRUE)
# expect_false(returned_object$success)
})
test_that("bad token -Error", {
testthat::skip_on_cran()
expected_error_message <- "ERROR: You do not have permissions to use the API"
expect_error(
redcap_variables(
redcap_uri = credential$redcap_uri,
token = "BAD00000000000000000000000000000"
),
expected_error_message
)
})
rm(credential)
|
/tests/testthat/test-variables.R
|
permissive
|
OuhscBbmc/REDCapR
|
R
| false | false | 3,528 |
r
|
library(testthat)
credential <- retrieve_credential_testing()
update_expectation <- FALSE
test_that("Smoke Test", {
testthat::skip_on_cran()
expect_message({
returned_object <-
redcap_variables(
redcap_uri = credential$redcap_uri,
token = credential$token,
verbose = TRUE
)
})
expect_type(returned_object, "list")
})
test_that("default", {
testthat::skip_on_cran()
path_expected <- "test-data/specific-redcapr/variables/default.R"
expected_outcome_message <- "\\d+ variable metadata records were read from REDCap in \\d\\.\\d seconds\\. The http status code was 200\\.(\\n)?"
returned_object <-
redcap_variables(
redcap_uri = credential$redcap_uri,
token = credential$token,
verbose = FALSE
)
if (update_expectation) save_expected(returned_object$data, path_expected)
expected_data_frame <- retrieve_expected(path_expected)
expect_equal(returned_object$data, expected=expected_data_frame, label="The returned data.frame should be correct", ignore_attr = TRUE) # dput(returned_object$data)
expect_equal(returned_object$status_code, expected=200L)
expect_equal(returned_object$raw_text, expected="", ignore_attr = TRUE) # dput(returned_object$raw_text)
expect_match(returned_object$outcome_message, regexp=expected_outcome_message, perl=TRUE)
expect_true(returned_object$success)
expect_s3_class(returned_object$data, "tbl")
})
test_that("Bad Uri -wrong address (1 of 2)", {
testthat::skip_on_cran()
expected_message <- "The requested URL was not found on this server\\."
expect_error(
redcap_variables(
redcap_uri = "https://bbmc.ouhsc.edu/redcap/apiFFFFFFFFFFFFFF/", # Wrong url
token = credential$token
),
expected_message
)
})
test_that("Bad Uri -wrong address (2 of 2)", {
testthat::skip_on_cran()
bad_uri <- "https://aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa.com"
expected_data_frame <- structure(list(), .Names = character(0), row.names = integer(0), class = "data.frame")
# Windows gives a different message than Travis/Linux
expected_outcome_message <- "(https://aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa.com|Couldn't resolve host 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa.com')"
# "The REDCapR variable retrieval was not successful\\..+?Error 405 \\(Method Not Allowed\\).+"
# expected_outcome_message <- "(?s)The REDCapR variable retrieval was not successful\\..+?.+"
expect_error(
redcap_variables(
redcap_uri = bad_uri,
token = credential$token
)#,
# regexp = expected_outcome_message
)
# Now the error is thrown with a bad URI.
# expected_outcome_message <- paste0("(?s)", expected_outcome_message)
#
# expect_equal(returned_object$data, expected=expected_data_frame, label="The returned data.frame should be correct") # dput(returned_object$data)
# expect_equal(returned_object$status_code, expected=405L)
# # expect_equal(returned_object$raw_text, expected="", ignore_attr = TRUE) # dput(returned_object$raw_text)
# expect_match(returned_object$outcome_message, regexp=expected_outcome_message, perl=TRUE)
# expect_false(returned_object$success)
})
test_that("bad token -Error", {
testthat::skip_on_cran()
expected_error_message <- "ERROR: You do not have permissions to use the API"
expect_error(
redcap_variables(
redcap_uri = credential$redcap_uri,
token = "BAD00000000000000000000000000000"
),
expected_error_message
)
})
rm(credential)
|
nys <- readRDS("./temp/new_york_race_census.RDS")
voted_general <- nys[nys$voted_general == T, ]
precincts <- full_join(
voted_general %>%
group_by(election_district, assembly_district) %>%
summarize_at(vars(gender, dem, rep, yob, pred.whi, pred.bla, pred.his, pred.asi,
median_income, some_college, unem), funs(mean(., na = T))),
voted_general %>%
group_by(election_district, assembly_district) %>%
tally(),
by = c("assembly_district", "election_district")
)
precincts$ed <- as.integer(paste0(str_pad(precincts$assembly_district, width = 2, pad = "0", side = "left"),
str_pad(precincts$election_district, width = 3, pad = "0", side = "left")))
ed_shapefile <- readOGR("./raw_data/shapefiles/nyc_election_districts/nyed_19a", "nyed")
ed_shapefile@data$id <- rownames(ed_shapefile@data)
t <- fortify(ed_shapefile)
ed_shapefile <- inner_join(ed_shapefile@data, t, by = "id")
ed_map <- left_join(ed_shapefile, precincts, by = c("ElectDist" = "ed"))
ggplot() +
geom_polygon(data = ed_map, aes(x = long, y = lat, group = group, fill = n)) +
geom_path(data = ed_map, aes(x = long, y = lat, group = group), color = "white", size = 0.01) +
coord_equal() + theme_map() +
labs(fill = "Count of Voters by Precinct") +
scale_fill_gradient(label = scales::comma, limits = c(299, 601), oob = scales::squish)
ggsave("./output/city_map_vcount.png")
ggplot() +
geom_polygon(data = ed_map, aes(x = long, y = lat, group = group, fill = median_income)) +
geom_path(data = ed_map, aes(x = long, y = lat, group = group), color = "white", size = 0.01) +
coord_equal() + theme_map() +
labs(fill = "Median Income by Precinct") +
scale_fill_gradient(label = scales::dollar, limits = c(45000, 100000), oob = scales::squish)
ggsave("./output/city_map_income.png")
ggplot() +
geom_polygon(data = ed_map, aes(x = long, y = lat, group = group, fill = pred.whi)) +
geom_path(data = ed_map, aes(x = long, y = lat, group = group), color = "white", size = 0.01) +
coord_equal() + theme_map() +
labs(fill = "Share Non-Hispanic White by Precinct") +
scale_fill_gradient(label = scales::percent, limits = c(0.15, 0.9), oob = scales::squish)
ggsave("./output/city_map_race.png")
|
/code/old/new_york/02_make_city.R
|
no_license
|
BrennanCenter/resource_allocation
|
R
| false | false | 2,259 |
r
|
nys <- readRDS("./temp/new_york_race_census.RDS")
voted_general <- nys[nys$voted_general == T, ]
precincts <- full_join(
voted_general %>%
group_by(election_district, assembly_district) %>%
summarize_at(vars(gender, dem, rep, yob, pred.whi, pred.bla, pred.his, pred.asi,
median_income, some_college, unem), funs(mean(., na = T))),
voted_general %>%
group_by(election_district, assembly_district) %>%
tally(),
by = c("assembly_district", "election_district")
)
precincts$ed <- as.integer(paste0(str_pad(precincts$assembly_district, width = 2, pad = "0", side = "left"),
str_pad(precincts$election_district, width = 3, pad = "0", side = "left")))
ed_shapefile <- readOGR("./raw_data/shapefiles/nyc_election_districts/nyed_19a", "nyed")
ed_shapefile@data$id <- rownames(ed_shapefile@data)
t <- fortify(ed_shapefile)
ed_shapefile <- inner_join(ed_shapefile@data, t, by = "id")
ed_map <- left_join(ed_shapefile, precincts, by = c("ElectDist" = "ed"))
ggplot() +
geom_polygon(data = ed_map, aes(x = long, y = lat, group = group, fill = n)) +
geom_path(data = ed_map, aes(x = long, y = lat, group = group), color = "white", size = 0.01) +
coord_equal() + theme_map() +
labs(fill = "Count of Voters by Precinct") +
scale_fill_gradient(label = scales::comma, limits = c(299, 601), oob = scales::squish)
ggsave("./output/city_map_vcount.png")
ggplot() +
geom_polygon(data = ed_map, aes(x = long, y = lat, group = group, fill = median_income)) +
geom_path(data = ed_map, aes(x = long, y = lat, group = group), color = "white", size = 0.01) +
coord_equal() + theme_map() +
labs(fill = "Median Income by Precinct") +
scale_fill_gradient(label = scales::dollar, limits = c(45000, 100000), oob = scales::squish)
ggsave("./output/city_map_income.png")
ggplot() +
geom_polygon(data = ed_map, aes(x = long, y = lat, group = group, fill = pred.whi)) +
geom_path(data = ed_map, aes(x = long, y = lat, group = group), color = "white", size = 0.01) +
coord_equal() + theme_map() +
labs(fill = "Share Non-Hispanic White by Precinct") +
scale_fill_gradient(label = scales::percent, limits = c(0.15, 0.9), oob = scales::squish)
ggsave("./output/city_map_race.png")
|
library(readxl)
library(dplyr)
###TINGKAT SISTEM (PEP)###
inputResp<-read_excel("data/cdna_pep.xlsx")
inputResp$logo<-NULL; inputResp$intro0<-NULL; inputResp$intro0a<-NULL; inputResp$url_widget2<-NULL; inputResp$intro1a<-NULL
inputResp$tanggal<-NULL; inputResp$`_index`<-NULL;inputResp$`_validation_status`<-NULL; inputResp$`_submission_time`<-NULL; inputResp$`_uuid`<-NULL; inputResp$`_id`<-NULL
inputResp$intropenutup<-NULL; inputResp$intropenutup2<-NULL; inputResp$introSistem<-NULL; inputResp$intropemantauan1<-NULL
inputResp$alasan<-NULL
for (i in 1:9){
eval(parse(text=paste0("inputResp$alasan_00",i,"<-NULL")))
}
for (i in 10:15){
eval(parse(text=paste0("inputResp$alasan_0",i,"<-NULL")))
}
inputResp<-as.data.frame(inputResp)
sistem<- as.data.frame(lapply(inputResp[,5:length(inputResp)], as.numeric))
q9.1<-rowSums(sistem[,1:5]); q9.1<-as.data.frame(q9.1)/5
q9.2<-rowSums(sistem[,6:11]); q9.2<-as.data.frame(q9.2)/6
q9.3<-rowSums(sistem[,12:14]); q9.3<-as.data.frame(q9.3)/3
q9.4<-rowSums(sistem[,15:16]); q9.4<-as.data.frame(q9.4)/2
levelSistem<-cbind(q9.1,q9.2,q9.3,q9.4)
colnames(levelSistem)<-c("q9.1","q9.2","q9.3","q9.4")
write.csv(levelSistem,"Hasil sistem.csv")
# gap_9.1<-5-levelSistem$q9.1; gap_9.2<-5-levelSistem$q9.2; gap_9.3<-5-levelSistem$q9.3; gap_9.4<-5-levelSistem$q9.4
# valGAP<-cbind(gap_9.1,gap_9.2,gap_9.3,gap_9.4)
# val_Sistem<-cbind(levelSistem,valGAP)
# tempSistem<-as.data.frame((val_Sistem))
# tes <- c("9.1 Muatan/Subtansi", "9.2 Pelaksanaan", "9.3 Pelaksana", "9.4 Pemanfaatan")
#
# #Menampilkan hasil satu responden
# #tempSistem<-filter(tempSistem,Provinsi==input$categoryProvince)
#
# #Hasil per Aspek
# Indikator_Penilaian<-c("9. Pemantauan, Evaluasi, dan Pelaporan")
# LevelPEP<-mean(as.numeric(tempSistem[1:4]))
# LevelSistem<-as.data.frame(t(LevelPEP))
# gapPEP<-mean(as.numeric(tempSistem[5:8]))
# GAPSistem<-as.data.frame(t(gapPEP))
# summSistem<-as.data.frame(cbind(Indikator_Penilaian, LevelSistem, GAPSistem))
# colnames(summSistem)<-c("Aspek Penilaian","Level","GAP")
#
# #Hasil per Kapasitas Fungsional
# tabelKapasitasSistem<-as.data.frame(cbind(tes,t((tempSistem[1:4])),t(tempSistem[5:8])))
# colnames(tabelKapasitasSistem)<-c("Kapasitas Fungsional","Level","GAP")
###TINGKAT INDIVIDU###
inputRespInd<-read_excel("data/cdna_ind2.xlsx")
#inputRespInd<-read_excel("data/cdna_individu_sumsel.xlsx")
inputRespInd$logo<-NULL; inputRespInd$intro0<-NULL; inputRespInd$intro0a<-NULL; inputRespInd$intro1a<-NULL; inputRespInd$callid<-NULL
inputRespInd$gender<-NULL; inputRespInd$jabatan<-NULL; inputRespInd$akun <- NULL; inputRespInd$tanggal<-NULL; inputRespInd$callresp<-NULL
inputRespInd$introIndividu<-NULL; inputRespInd$introSDM2<-NULL; inputRespInd$`_index`<-NULL; inputRespInd$`_validation_status`<-NULL
inputRespInd$`_submission_time`<-NULL; inputRespInd$`_uuid`<-NULL; inputRespInd$`_id`<-NULL; inputRespInd$intropenutup<-NULL
inputRespInd$alasan<-NULL
for (i in 1:9){
eval(parse(text=paste0("inputRespInd$alasan_00",i,"<-NULL")))
}
for (i in 10:22){
eval(parse(text=paste0("inputRespInd$alasan_0",i,"<-NULL")))
}
inputRespInd<-as.data.frame(inputRespInd)
valResp<- as.data.frame(lapply(inputRespInd[,6:length(inputRespInd)], as.numeric))
Level6.1<-rowSums(valResp[,1:2]); Level6.1<-as.data.frame(Level6.1)/2
Level6.2<-rowSums(valResp[,3:11]); Level6.2<-as.data.frame(Level6.2)/9
Level6.3<-rowSums(valResp[,12:20]); Level6.3<-as.data.frame(Level6.3)/9
Level6.4<-rowSums(valResp[,21:23]); Level6.4<-as.data.frame(Level6.4)/3
valInd<-cbind(inputRespInd$provinsi,Level6.1,Level6.2,Level6.3,Level6.4)
individu<-as.data.frame(valInd)
write.csv(individu,"hasilindividu_sumsel.csv")
# Indikator <- c("6.1. Kesesuaian Peran dalam Implementasi RAD GRK/PPRKD dengan Tugas dan Fungsi","6.2. Pengetahuan","6.3. Keterampilan","6.4. Pengembangan dan Motivasi")
# Indikator <- as.data.frame(Indikator)
#
# #individu<-filter(individu,inputRespInd$provinsi==input$categoryProvince)
#
# #Hasil per Aspek
# Indikator_Penilaian_Ind<-"6. Sumber Daya Manusia - Individu"
# Level6<-rowSums(individu[,2:5])/length(individu[,2:5])
# Level6<-sum(Level6)/length(individu$`inputRespInd$provinsi`)
# gap6<-5-Level6
# summInd2<-as.data.frame(cbind(Indikator_Penilaian_Ind, Level6, gap6))
# colnames(summInd2)<-c("Aspek Penilaian","Level","GAP")
#
# ##Hasil per Kapasitas Fungsional
# Ind6.1<-mean(individu$Level6.1); Ind6.2<-mean(individu$Level6.2); Ind6.3<-mean(individu$Level6.3); Ind6.4<-mean(individu$Level6.4)
# tempLevelInd <- as.data.frame(t(cbind(Ind6.1,Ind6.2,Ind6.3,Ind6.4)))
# tempGapInd <- 5 - tempLevelInd
# graphInd2<-cbind(Indikator,tempLevelInd,tempGapInd)
# colnames(graphInd2)<-c("Indikator","Level","GAP")
# graphInd2
|
/_YK/others/kodingan/cleaningSumsel.R
|
no_license
|
alfanugraha/cda
|
R
| false | false | 4,690 |
r
|
library(readxl)
library(dplyr)
###TINGKAT SISTEM (PEP)###
inputResp<-read_excel("data/cdna_pep.xlsx")
inputResp$logo<-NULL; inputResp$intro0<-NULL; inputResp$intro0a<-NULL; inputResp$url_widget2<-NULL; inputResp$intro1a<-NULL
inputResp$tanggal<-NULL; inputResp$`_index`<-NULL;inputResp$`_validation_status`<-NULL; inputResp$`_submission_time`<-NULL; inputResp$`_uuid`<-NULL; inputResp$`_id`<-NULL
inputResp$intropenutup<-NULL; inputResp$intropenutup2<-NULL; inputResp$introSistem<-NULL; inputResp$intropemantauan1<-NULL
inputResp$alasan<-NULL
for (i in 1:9){
eval(parse(text=paste0("inputResp$alasan_00",i,"<-NULL")))
}
for (i in 10:15){
eval(parse(text=paste0("inputResp$alasan_0",i,"<-NULL")))
}
inputResp<-as.data.frame(inputResp)
sistem<- as.data.frame(lapply(inputResp[,5:length(inputResp)], as.numeric))
q9.1<-rowSums(sistem[,1:5]); q9.1<-as.data.frame(q9.1)/5
q9.2<-rowSums(sistem[,6:11]); q9.2<-as.data.frame(q9.2)/6
q9.3<-rowSums(sistem[,12:14]); q9.3<-as.data.frame(q9.3)/3
q9.4<-rowSums(sistem[,15:16]); q9.4<-as.data.frame(q9.4)/2
levelSistem<-cbind(q9.1,q9.2,q9.3,q9.4)
colnames(levelSistem)<-c("q9.1","q9.2","q9.3","q9.4")
write.csv(levelSistem,"Hasil sistem.csv")
# gap_9.1<-5-levelSistem$q9.1; gap_9.2<-5-levelSistem$q9.2; gap_9.3<-5-levelSistem$q9.3; gap_9.4<-5-levelSistem$q9.4
# valGAP<-cbind(gap_9.1,gap_9.2,gap_9.3,gap_9.4)
# val_Sistem<-cbind(levelSistem,valGAP)
# tempSistem<-as.data.frame((val_Sistem))
# tes <- c("9.1 Muatan/Subtansi", "9.2 Pelaksanaan", "9.3 Pelaksana", "9.4 Pemanfaatan")
#
# #Menampilkan hasil satu responden
# #tempSistem<-filter(tempSistem,Provinsi==input$categoryProvince)
#
# #Hasil per Aspek
# Indikator_Penilaian<-c("9. Pemantauan, Evaluasi, dan Pelaporan")
# LevelPEP<-mean(as.numeric(tempSistem[1:4]))
# LevelSistem<-as.data.frame(t(LevelPEP))
# gapPEP<-mean(as.numeric(tempSistem[5:8]))
# GAPSistem<-as.data.frame(t(gapPEP))
# summSistem<-as.data.frame(cbind(Indikator_Penilaian, LevelSistem, GAPSistem))
# colnames(summSistem)<-c("Aspek Penilaian","Level","GAP")
#
# #Hasil per Kapasitas Fungsional
# tabelKapasitasSistem<-as.data.frame(cbind(tes,t((tempSistem[1:4])),t(tempSistem[5:8])))
# colnames(tabelKapasitasSistem)<-c("Kapasitas Fungsional","Level","GAP")
###TINGKAT INDIVIDU###
inputRespInd<-read_excel("data/cdna_ind2.xlsx")
#inputRespInd<-read_excel("data/cdna_individu_sumsel.xlsx")
inputRespInd$logo<-NULL; inputRespInd$intro0<-NULL; inputRespInd$intro0a<-NULL; inputRespInd$intro1a<-NULL; inputRespInd$callid<-NULL
inputRespInd$gender<-NULL; inputRespInd$jabatan<-NULL; inputRespInd$akun <- NULL; inputRespInd$tanggal<-NULL; inputRespInd$callresp<-NULL
inputRespInd$introIndividu<-NULL; inputRespInd$introSDM2<-NULL; inputRespInd$`_index`<-NULL; inputRespInd$`_validation_status`<-NULL
inputRespInd$`_submission_time`<-NULL; inputRespInd$`_uuid`<-NULL; inputRespInd$`_id`<-NULL; inputRespInd$intropenutup<-NULL
inputRespInd$alasan<-NULL
for (i in 1:9){
eval(parse(text=paste0("inputRespInd$alasan_00",i,"<-NULL")))
}
for (i in 10:22){
eval(parse(text=paste0("inputRespInd$alasan_0",i,"<-NULL")))
}
inputRespInd<-as.data.frame(inputRespInd)
valResp<- as.data.frame(lapply(inputRespInd[,6:length(inputRespInd)], as.numeric))
Level6.1<-rowSums(valResp[,1:2]); Level6.1<-as.data.frame(Level6.1)/2
Level6.2<-rowSums(valResp[,3:11]); Level6.2<-as.data.frame(Level6.2)/9
Level6.3<-rowSums(valResp[,12:20]); Level6.3<-as.data.frame(Level6.3)/9
Level6.4<-rowSums(valResp[,21:23]); Level6.4<-as.data.frame(Level6.4)/3
valInd<-cbind(inputRespInd$provinsi,Level6.1,Level6.2,Level6.3,Level6.4)
individu<-as.data.frame(valInd)
write.csv(individu,"hasilindividu_sumsel.csv")
# Indikator <- c("6.1. Kesesuaian Peran dalam Implementasi RAD GRK/PPRKD dengan Tugas dan Fungsi","6.2. Pengetahuan","6.3. Keterampilan","6.4. Pengembangan dan Motivasi")
# Indikator <- as.data.frame(Indikator)
#
# #individu<-filter(individu,inputRespInd$provinsi==input$categoryProvince)
#
# #Hasil per Aspek
# Indikator_Penilaian_Ind<-"6. Sumber Daya Manusia - Individu"
# Level6<-rowSums(individu[,2:5])/length(individu[,2:5])
# Level6<-sum(Level6)/length(individu$`inputRespInd$provinsi`)
# gap6<-5-Level6
# summInd2<-as.data.frame(cbind(Indikator_Penilaian_Ind, Level6, gap6))
# colnames(summInd2)<-c("Aspek Penilaian","Level","GAP")
#
# ##Hasil per Kapasitas Fungsional
# Ind6.1<-mean(individu$Level6.1); Ind6.2<-mean(individu$Level6.2); Ind6.3<-mean(individu$Level6.3); Ind6.4<-mean(individu$Level6.4)
# tempLevelInd <- as.data.frame(t(cbind(Ind6.1,Ind6.2,Ind6.3,Ind6.4)))
# tempGapInd <- 5 - tempLevelInd
# graphInd2<-cbind(Indikator,tempLevelInd,tempGapInd)
# colnames(graphInd2)<-c("Indikator","Level","GAP")
# graphInd2
|
library(glmnet)
mydata = read.table("./TrainingSet/Correlation/upper_aerodigestive_tract.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mae",alpha=0.65,family="gaussian",standardize=FALSE)
sink('./Model/EN/Correlation/upper_aerodigestive_tract/upper_aerodigestive_tract_072.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
|
/Model/EN/Correlation/upper_aerodigestive_tract/upper_aerodigestive_tract_072.R
|
no_license
|
leon1003/QSMART
|
R
| false | false | 418 |
r
|
library(glmnet)
mydata = read.table("./TrainingSet/Correlation/upper_aerodigestive_tract.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mae",alpha=0.65,family="gaussian",standardize=FALSE)
sink('./Model/EN/Correlation/upper_aerodigestive_tract/upper_aerodigestive_tract_072.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
|
#' @keywords internal
"_PACKAGE"
## usethis namespace: start
#' @import sp
#' @importFrom sf as_Spatial
#' @importFrom utils read.csv
#' @import httr
#' @importFrom jsonlite fromJSON
## usethis namespace: end
NULL
|
/R/rnaturalearth-package.R
|
no_license
|
cran/rnaturalearth
|
R
| false | false | 215 |
r
|
#' @keywords internal
"_PACKAGE"
## usethis namespace: start
#' @import sp
#' @importFrom sf as_Spatial
#' @importFrom utils read.csv
#' @import httr
#' @importFrom jsonlite fromJSON
## usethis namespace: end
NULL
|
`etc.diff` <-
function(formula,data,base=1,margin.up=NULL,margin.lo=-margin.up,
method="var.unequal",FWER=0.05) {
if (length(formula) != 3) {
stop("formula mis-specified")
}
mf <- model.frame(formula, data)
if (ncol(mf) != 2) {
stop("Specify one response and only one class variable in the formula")
}
if (is.numeric(mf[, 1]) == FALSE) {
stop("Response variable must be numeric")
}
Response <- mf[, 1]
Treatment <- as.factor(mf[, 2])
tr.names <- levels(Treatment)
comp.names <- paste(tr.names[-base], tr.names[base], sep = "-")
k <- length(comp.names) # number of comparisons
if ( is.numeric(margin.up)==FALSE | (length(margin.up)==k)+(length(margin.up)==1)==0 ) {
stop("margin.up must be a single numeric value or a numeric vector of lenght equal to the number of comparisons")
}
if (length(margin.up)==1) {
margin.up <- rep(margin.up,k)
}
if ( is.numeric(margin.lo)==FALSE | (length(margin.lo)==k)+(length(margin.lo)==1)==0 ) {
stop("margin.lo must be a single numeric value or a numeric vector of lenght equal to the number of comparisons")
}
if (length(margin.lo)==1) {
margin.lo <- rep(margin.lo,k)
}
if (any(margin.up<=0) | any(margin.lo>=0)) {
stop("All components of margin.up (margin.lo) must be positiv (negative)")
}
method <- match.arg(method, choices = c("Bofinger", "var.equal", "var.unequal", "non.par"))
tr.mean <- tapply(Response,Treatment,mean)
tr.sd <- tapply(Response,Treatment,sd)
tr.n <- tapply(Response,Treatment,length)
estimate <- tr.mean[-base]-tr.mean[base] # estimates
test.stat <- numeric(k)
m <- floor(k/2); u <- m+1
p.value <- numeric(k)
if (method=="Bofinger") # due to Bof./Tong, only exact
{ # for balancedness!
if (any(margin.up!=-margin.lo)) {
stop("Method Bofinger works only for margin.up = -margin.lo")
}
if (all(as.numeric(tr.n[-base])==tr.n[-base][1])==FALSE)
{
cat("Warning: Method Bofinger is only correct for equal sample sizes of the test treatments",
"\n")
}
s <- sqrt( sum((tr.n-1)*tr.sd^2)/sum(tr.n-1) ) # pooled standard deviation
degr.fr <- sum(tr.n-1) # degree of freedom
corr.mat <- diag(k) # correl. matrix due to Bof./Tong
if (k>1)
{
for(i in 1:k) { for(j in 1:k) { corr.mat[i,j]=1/sqrt( (1+tr.n[base]/tr.n[-base][i])*
(1+tr.n[base]/tr.n[-base][j]) ) }}
for(i in 1:m) { for(j in u:k) { corr.mat[i,j]=-corr.mat[i,j] }}
for(i in u:k) { for(j in 1:m) { corr.mat[i,j]=-corr.mat[i,j] }}
diag(corr.mat)=rep(1,times=ncol(corr.mat))
}
qu <- qmvt(1-FWER, tail="lower.tail", df=degr.fr, corr=corr.mat)$quantile
test.stat <- ( abs(tr.mean[-base]-tr.mean[base])-margin.up ) / ( s * sqrt(1/tr.n[-base] + 1/tr.n[base]) )
for (i in 1:k) {
p.value[i]=1-pmvt(lower=rep(test.stat[i],times=k),upper=Inf,df=degr.fr,corr=corr.mat)[1]
}
lower <- estimate-qu*( s * sqrt(1/tr.n[-base] + 1/tr.n[base]) ); lower[lower>0]=0
upper <- estimate+qu*( s * sqrt(1/tr.n[-base] + 1/tr.n[base]) ); upper[upper<0]=0
conf.int <- rbind(lower,upper); rownames(conf.int) <- c("lower","upper")
value <- list(comp.names=comp.names,estimate=estimate,degr.fr=degr.fr,test.stat=test.stat,crit.value=-qu,corr.mat=corr.mat,
p.value=p.value,conf.int=conf.int,base=base,margin.lo=margin.lo,margin.up=margin.up,method=method,
FWER=FWER)
}
if(method=="var.equal") # Bonferroni-adjustment
{
s <- sqrt(sum((tr.n-1)*tr.sd^2)/sum(tr.n-1)) # !: pooled standard deviation
degr.fr <- sum(tr.n-1) # degree of freedom
qu <- qt(1-FWER/k, df=degr.fr, lower.tail=TRUE)
test.stat.up <- ( tr.mean[-base]-tr.mean[base]-margin.lo ) /
( s * sqrt(1/tr.n[-base] + 1/tr.n[base]) ) # test "up"
test.stat.do <- ( tr.mean[-base]-tr.mean[base]-margin.up ) /
( s * sqrt(1/tr.n[-base] + 1/tr.n[base]) ) # test "down"
for (i in 1:k) {
test.stat[i]=max(-test.stat.up[i],test.stat.do[i])
p.value[i]=min(pt(q=test.stat[i], df=degr.fr, lower.tail=TRUE)*k, 1)
}
lower <- estimate-qu*( s * sqrt(1/tr.n[-base] + 1/tr.n[base]) ); lower[lower>0]=0
upper <- estimate+qu*( s * sqrt(1/tr.n[-base] + 1/tr.n[base]) ); upper[upper<0]=0
conf.int <- rbind(lower,upper); rownames(conf.int) <- c("lower","upper")
value <- list(comp.names=comp.names,estimate=estimate,degr.fr=degr.fr,test.stat=test.stat,crit.value=-qu,
p.value=p.value,conf.int=conf.int,base=base,margin.lo=margin.lo,margin.up=margin.up,method=method,
FWER=FWER)
}
if(method=="var.unequal") # Bonferroni-adjustment
{
degr.fr <- ( (tr.sd[-base])^2/tr.n[-base]+(tr.sd[base])^2/tr.n[base] )^2 / # degrees of freedom (Welch)
( ((tr.sd[-base])^2/tr.n[-base])^2/(tr.n[-base]-1) + ((tr.sd[base])^2/tr.n[base])^2/(tr.n[base]-1) )
test.stat.up <- ( tr.mean[-base]-tr.mean[base]-margin.lo ) /
( sqrt((tr.sd[-base])^2/tr.n[-base] + (tr.sd[base])^2/tr.n[base]) ) # test "up"
test.stat.do <- ( tr.mean[-base]-tr.mean[base]-margin.up ) /
( sqrt((tr.sd[-base])^2/tr.n[-base] + (tr.sd[base])^2/tr.n[base]) ) # test "down"
qu <- numeric(k)
for (i in 1:k) {
qu[i]=qt(1-FWER/k, df=degr.fr[i], lower.tail=TRUE)
test.stat[i]=max(-test.stat.up[i],test.stat.do[i])
p.value[i]=min(pt(q=test.stat[i], df=degr.fr[i], lower.tail=TRUE)*k, 1)
}
lower <- estimate-qu*( sqrt((tr.sd[-base])^2/tr.n[-base] + (tr.sd[base])^2/tr.n[base]) ); lower[lower>0]=0
upper <- estimate+qu*( sqrt((tr.sd[-base])^2/tr.n[-base] + (tr.sd[base])^2/tr.n[base]) ); upper[upper<0]=0
conf.int <- rbind(lower,upper); rownames(conf.int) <- c("lower","upper")
value <- list(comp.names=comp.names,estimate=estimate,degr.fr=degr.fr,test.stat=test.stat,crit.value=-qu,
p.value=p.value,conf.int=conf.int,base=base,margin.lo=margin.lo,margin.up=margin.up,method=method,
FWER=FWER)
}
if(method=="non.par") # Bonferroni-adjustment
{
test.stat.up <- p.value.up <- lower <- numeric(k)
test.stat.do <- p.value.do <- upper <- numeric(k)
for (i in 1:k) {
test.up <- wilcox.test(x=subset(mf,mf[,2]==tr.names[-base][i])[,1],y=subset(mf,mf[,2]==tr.names[base])[,1],
alternative="greater",mu=margin.lo[i],paired=FALSE,exact=FALSE,correct=TRUE,conf.int=TRUE,conf.level=1-FWER/k)
test.do <- wilcox.test(x=subset(mf,mf[,2]==tr.names[-base][i])[,1],y=subset(mf,mf[,2]==tr.names[base])[,1],
alternative="less",mu=margin.up[i],paired=FALSE,exact=FALSE,correct=TRUE,conf.int=TRUE,conf.level=1-FWER/k)
test.stat.up[i]=test.up$statistic; test.stat.do[i]=test.do$statistic
p.value.up[i]=test.up$p.value; p.value.do[i]=test.do$p.value
p.value[i]=min(max(p.value.up[i],p.value.do[i])*k,1)
lower[i]=test.up$conf.int[1]; upper[i]=test.do$conf.int[2]
}
test.stat <- cbind(test.stat.up,test.stat.do)
lower[lower>0]=0; upper[upper<0]=0
conf.int <- rbind(lower,upper); rownames(conf.int) <- c("lower","upper")
value <- list(comp.names=comp.names,estimate=estimate,test.stat=test.stat,
p.value=p.value,conf.int=conf.int,base=base,margin.lo=margin.lo,margin.up=margin.up,method=method,
FWER=FWER)
}
if(method=="var.unequal") {
names(value$degr.fr) <- comp.names
names(value$crit.value) <- comp.names
}
names(value$estimate) <- comp.names
names(value$test.stat) <- comp.names
names(value$p.value) <- comp.names
colnames(value$conf.int) <- comp.names
class(value) <- "etc.diff"
return(value)
}
|
/R/etc.diff.R
|
no_license
|
cran/ETC
|
R
| false | false | 8,558 |
r
|
`etc.diff` <-
function(formula,data,base=1,margin.up=NULL,margin.lo=-margin.up,
method="var.unequal",FWER=0.05) {
if (length(formula) != 3) {
stop("formula mis-specified")
}
mf <- model.frame(formula, data)
if (ncol(mf) != 2) {
stop("Specify one response and only one class variable in the formula")
}
if (is.numeric(mf[, 1]) == FALSE) {
stop("Response variable must be numeric")
}
Response <- mf[, 1]
Treatment <- as.factor(mf[, 2])
tr.names <- levels(Treatment)
comp.names <- paste(tr.names[-base], tr.names[base], sep = "-")
k <- length(comp.names) # number of comparisons
if ( is.numeric(margin.up)==FALSE | (length(margin.up)==k)+(length(margin.up)==1)==0 ) {
stop("margin.up must be a single numeric value or a numeric vector of lenght equal to the number of comparisons")
}
if (length(margin.up)==1) {
margin.up <- rep(margin.up,k)
}
if ( is.numeric(margin.lo)==FALSE | (length(margin.lo)==k)+(length(margin.lo)==1)==0 ) {
stop("margin.lo must be a single numeric value or a numeric vector of lenght equal to the number of comparisons")
}
if (length(margin.lo)==1) {
margin.lo <- rep(margin.lo,k)
}
if (any(margin.up<=0) | any(margin.lo>=0)) {
stop("All components of margin.up (margin.lo) must be positiv (negative)")
}
method <- match.arg(method, choices = c("Bofinger", "var.equal", "var.unequal", "non.par"))
tr.mean <- tapply(Response,Treatment,mean)
tr.sd <- tapply(Response,Treatment,sd)
tr.n <- tapply(Response,Treatment,length)
estimate <- tr.mean[-base]-tr.mean[base] # estimates
test.stat <- numeric(k)
m <- floor(k/2); u <- m+1
p.value <- numeric(k)
if (method=="Bofinger") # due to Bof./Tong, only exact
{ # for balancedness!
if (any(margin.up!=-margin.lo)) {
stop("Method Bofinger works only for margin.up = -margin.lo")
}
if (all(as.numeric(tr.n[-base])==tr.n[-base][1])==FALSE)
{
cat("Warning: Method Bofinger is only correct for equal sample sizes of the test treatments",
"\n")
}
s <- sqrt( sum((tr.n-1)*tr.sd^2)/sum(tr.n-1) ) # pooled standard deviation
degr.fr <- sum(tr.n-1) # degree of freedom
corr.mat <- diag(k) # correl. matrix due to Bof./Tong
if (k>1)
{
for(i in 1:k) { for(j in 1:k) { corr.mat[i,j]=1/sqrt( (1+tr.n[base]/tr.n[-base][i])*
(1+tr.n[base]/tr.n[-base][j]) ) }}
for(i in 1:m) { for(j in u:k) { corr.mat[i,j]=-corr.mat[i,j] }}
for(i in u:k) { for(j in 1:m) { corr.mat[i,j]=-corr.mat[i,j] }}
diag(corr.mat)=rep(1,times=ncol(corr.mat))
}
qu <- qmvt(1-FWER, tail="lower.tail", df=degr.fr, corr=corr.mat)$quantile
test.stat <- ( abs(tr.mean[-base]-tr.mean[base])-margin.up ) / ( s * sqrt(1/tr.n[-base] + 1/tr.n[base]) )
for (i in 1:k) {
p.value[i]=1-pmvt(lower=rep(test.stat[i],times=k),upper=Inf,df=degr.fr,corr=corr.mat)[1]
}
lower <- estimate-qu*( s * sqrt(1/tr.n[-base] + 1/tr.n[base]) ); lower[lower>0]=0
upper <- estimate+qu*( s * sqrt(1/tr.n[-base] + 1/tr.n[base]) ); upper[upper<0]=0
conf.int <- rbind(lower,upper); rownames(conf.int) <- c("lower","upper")
value <- list(comp.names=comp.names,estimate=estimate,degr.fr=degr.fr,test.stat=test.stat,crit.value=-qu,corr.mat=corr.mat,
p.value=p.value,conf.int=conf.int,base=base,margin.lo=margin.lo,margin.up=margin.up,method=method,
FWER=FWER)
}
if(method=="var.equal") # Bonferroni-adjustment
{
s <- sqrt(sum((tr.n-1)*tr.sd^2)/sum(tr.n-1)) # !: pooled standard deviation
degr.fr <- sum(tr.n-1) # degree of freedom
qu <- qt(1-FWER/k, df=degr.fr, lower.tail=TRUE)
test.stat.up <- ( tr.mean[-base]-tr.mean[base]-margin.lo ) /
( s * sqrt(1/tr.n[-base] + 1/tr.n[base]) ) # test "up"
test.stat.do <- ( tr.mean[-base]-tr.mean[base]-margin.up ) /
( s * sqrt(1/tr.n[-base] + 1/tr.n[base]) ) # test "down"
for (i in 1:k) {
test.stat[i]=max(-test.stat.up[i],test.stat.do[i])
p.value[i]=min(pt(q=test.stat[i], df=degr.fr, lower.tail=TRUE)*k, 1)
}
lower <- estimate-qu*( s * sqrt(1/tr.n[-base] + 1/tr.n[base]) ); lower[lower>0]=0
upper <- estimate+qu*( s * sqrt(1/tr.n[-base] + 1/tr.n[base]) ); upper[upper<0]=0
conf.int <- rbind(lower,upper); rownames(conf.int) <- c("lower","upper")
value <- list(comp.names=comp.names,estimate=estimate,degr.fr=degr.fr,test.stat=test.stat,crit.value=-qu,
p.value=p.value,conf.int=conf.int,base=base,margin.lo=margin.lo,margin.up=margin.up,method=method,
FWER=FWER)
}
if(method=="var.unequal") # Bonferroni-adjustment
{
degr.fr <- ( (tr.sd[-base])^2/tr.n[-base]+(tr.sd[base])^2/tr.n[base] )^2 / # degrees of freedom (Welch)
( ((tr.sd[-base])^2/tr.n[-base])^2/(tr.n[-base]-1) + ((tr.sd[base])^2/tr.n[base])^2/(tr.n[base]-1) )
test.stat.up <- ( tr.mean[-base]-tr.mean[base]-margin.lo ) /
( sqrt((tr.sd[-base])^2/tr.n[-base] + (tr.sd[base])^2/tr.n[base]) ) # test "up"
test.stat.do <- ( tr.mean[-base]-tr.mean[base]-margin.up ) /
( sqrt((tr.sd[-base])^2/tr.n[-base] + (tr.sd[base])^2/tr.n[base]) ) # test "down"
qu <- numeric(k)
for (i in 1:k) {
qu[i]=qt(1-FWER/k, df=degr.fr[i], lower.tail=TRUE)
test.stat[i]=max(-test.stat.up[i],test.stat.do[i])
p.value[i]=min(pt(q=test.stat[i], df=degr.fr[i], lower.tail=TRUE)*k, 1)
}
lower <- estimate-qu*( sqrt((tr.sd[-base])^2/tr.n[-base] + (tr.sd[base])^2/tr.n[base]) ); lower[lower>0]=0
upper <- estimate+qu*( sqrt((tr.sd[-base])^2/tr.n[-base] + (tr.sd[base])^2/tr.n[base]) ); upper[upper<0]=0
conf.int <- rbind(lower,upper); rownames(conf.int) <- c("lower","upper")
value <- list(comp.names=comp.names,estimate=estimate,degr.fr=degr.fr,test.stat=test.stat,crit.value=-qu,
p.value=p.value,conf.int=conf.int,base=base,margin.lo=margin.lo,margin.up=margin.up,method=method,
FWER=FWER)
}
if(method=="non.par") # Bonferroni-adjustment
{
test.stat.up <- p.value.up <- lower <- numeric(k)
test.stat.do <- p.value.do <- upper <- numeric(k)
for (i in 1:k) {
test.up <- wilcox.test(x=subset(mf,mf[,2]==tr.names[-base][i])[,1],y=subset(mf,mf[,2]==tr.names[base])[,1],
alternative="greater",mu=margin.lo[i],paired=FALSE,exact=FALSE,correct=TRUE,conf.int=TRUE,conf.level=1-FWER/k)
test.do <- wilcox.test(x=subset(mf,mf[,2]==tr.names[-base][i])[,1],y=subset(mf,mf[,2]==tr.names[base])[,1],
alternative="less",mu=margin.up[i],paired=FALSE,exact=FALSE,correct=TRUE,conf.int=TRUE,conf.level=1-FWER/k)
test.stat.up[i]=test.up$statistic; test.stat.do[i]=test.do$statistic
p.value.up[i]=test.up$p.value; p.value.do[i]=test.do$p.value
p.value[i]=min(max(p.value.up[i],p.value.do[i])*k,1)
lower[i]=test.up$conf.int[1]; upper[i]=test.do$conf.int[2]
}
test.stat <- cbind(test.stat.up,test.stat.do)
lower[lower>0]=0; upper[upper<0]=0
conf.int <- rbind(lower,upper); rownames(conf.int) <- c("lower","upper")
value <- list(comp.names=comp.names,estimate=estimate,test.stat=test.stat,
p.value=p.value,conf.int=conf.int,base=base,margin.lo=margin.lo,margin.up=margin.up,method=method,
FWER=FWER)
}
if(method=="var.unequal") {
names(value$degr.fr) <- comp.names
names(value$crit.value) <- comp.names
}
names(value$estimate) <- comp.names
names(value$test.stat) <- comp.names
names(value$p.value) <- comp.names
colnames(value$conf.int) <- comp.names
class(value) <- "etc.diff"
return(value)
}
|
## designed to create a special object that
## stores a matrix and caches its inverse.
## makeCacheMatrix sets matrix, gets matrix,
## calculates inverse and gets inverse
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setinv <- function(solve) m <<- solve
getinv <- function() m
list(set = set, get = get,
setinv = setinv,
getinv = getinv)
}
## Returns a matrix that is the inverse of 'x'
cacheSolve <- function(x, ...) {
m <- x$getinv()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- solve(data, ...)
x$setinv(m)
m
}
|
/cachematrix.R
|
no_license
|
davegiff/ProgrammingAssignment2
|
R
| false | false | 741 |
r
|
## designed to create a special object that
## stores a matrix and caches its inverse.
## makeCacheMatrix sets matrix, gets matrix,
## calculates inverse and gets inverse
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setinv <- function(solve) m <<- solve
getinv <- function() m
list(set = set, get = get,
setinv = setinv,
getinv = getinv)
}
## Returns a matrix that is the inverse of 'x'
cacheSolve <- function(x, ...) {
m <- x$getinv()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- solve(data, ...)
x$setinv(m)
m
}
|
\name{midpoints.psp}
\alias{midpoints.psp}
\title{Midpoints of Line Segment Pattern}
\description{
Computes the midpoints of each line segment
in a line segment pattern.
}
\usage{
midpoints.psp(x)
}
\arguments{
\item{x}{
A line segment pattern (object of class \code{"psp"}).
}
}
\value{
Point pattern (object of class \code{"ppp"}).
}
\details{
The midpoint of each line segment is computed.
}
\seealso{
\code{\link{summary.psp}},
\code{\link{lengths.psp}},
\code{\link{angles.psp}}
}
\examples{
a <- psp(runif(10), runif(10), runif(10), runif(10), window=owin())
b <- midpoints.psp(a)
}
\author{
Adrian Baddeley
\email{Adrian.Baddeley@uwa.edu.au}
\url{http://www.maths.uwa.edu.au/~adrian/}
and Rolf Turner
\email{r.turner@auckland.ac.nz}
}
\keyword{spatial}
\keyword{math}
|
/man/midpoints.psp.Rd
|
no_license
|
cuulee/spatstat
|
R
| false | false | 817 |
rd
|
\name{midpoints.psp}
\alias{midpoints.psp}
\title{Midpoints of Line Segment Pattern}
\description{
Computes the midpoints of each line segment
in a line segment pattern.
}
\usage{
midpoints.psp(x)
}
\arguments{
\item{x}{
A line segment pattern (object of class \code{"psp"}).
}
}
\value{
Point pattern (object of class \code{"ppp"}).
}
\details{
The midpoint of each line segment is computed.
}
\seealso{
\code{\link{summary.psp}},
\code{\link{lengths.psp}},
\code{\link{angles.psp}}
}
\examples{
a <- psp(runif(10), runif(10), runif(10), runif(10), window=owin())
b <- midpoints.psp(a)
}
\author{
Adrian Baddeley
\email{Adrian.Baddeley@uwa.edu.au}
\url{http://www.maths.uwa.edu.au/~adrian/}
and Rolf Turner
\email{r.turner@auckland.ac.nz}
}
\keyword{spatial}
\keyword{math}
|
#' distance_mat_gen
#'
#' @description This function aims to generate the distance matrix of taxa based on their path lengths in the phylogenetic tree.
#' @param edges A matrix of dimension N * 2 corresponding to the edge set of the phylogenetic tree (similar to the edge set for a graph).
#' @param n_taxa A scalar corresponding to number of taxa in the dataset.
#' @param tree_height A scalar corresponding to the height of the phylogenetic tree. Any number larger than the height of the phylogenetic tree will also work.
#' The initial value is set to be 50, which is usually enough as for a complete binary tree, height of 50 corresponds to 2^50 nodes.
#' @return A matrix of dimension n_taxa * n_taxa corresponding to the matrix D in mbImpute function.
#' @export
distance_mat_gen <- function(edges, n_taxa, tree_height = 50){
k = tree_height
m = n_taxa
nd_mat <- matrix(rep(1, k*m), k, m)
l <- rep(1,k)
for(i in 1:n_taxa){
print(i)
l <- rep(1,tree_height+1)
l[1] = i
for(j in 2:(tree_height+1)){
if(sum(edges[,2] %in% l[j-1]) != 0){
l[j] = edges[edges[,2] %in% l[j-1], 1]
}
else{
l[j] = NA
}
}
nd_mat[,i] = l[2:(tree_height+1)]
}
d1_mat <- matrix(0, nrow = n_taxa, ncol = n_taxa)
#records the position of 1:n_taxa in the edges set.
taxa_vec <- match(1:n_taxa, edges[,2])
#generate the distance matrix
for(i in 1:n_taxa){
for(j in 1:n_taxa){
int_sc <- intersect(nd_mat[,i], nd_mat[,j])
leni <- sum(!is.na(int_sc))
len1 <- sum(!is.na(nd_mat[,i]))
len2 <- sum(!is.na(nd_mat[,j]))
d1_mat[i, j] = len1 - leni + 1 + len2 - leni + 1
}
}
diag(d1_mat) = 0
#d1_mat denotes the distance for two taxa
return(d1_mat)
}
|
/mbImpute R package/R/distance_mat_gen.R
|
permissive
|
lsxmf/mbImpute
|
R
| false | false | 1,748 |
r
|
#' distance_mat_gen
#'
#' @description This function aims to generate the distance matrix of taxa based on their path lengths in the phylogenetic tree.
#' @param edges A matrix of dimension N * 2 corresponding to the edge set of the phylogenetic tree (similar to the edge set for a graph).
#' @param n_taxa A scalar corresponding to number of taxa in the dataset.
#' @param tree_height A scalar corresponding to the height of the phylogenetic tree. Any number larger than the height of the phylogenetic tree will also work.
#' The initial value is set to be 50, which is usually enough as for a complete binary tree, height of 50 corresponds to 2^50 nodes.
#' @return A matrix of dimension n_taxa * n_taxa corresponding to the matrix D in mbImpute function.
#' @export
distance_mat_gen <- function(edges, n_taxa, tree_height = 50){
k = tree_height
m = n_taxa
nd_mat <- matrix(rep(1, k*m), k, m)
l <- rep(1,k)
for(i in 1:n_taxa){
print(i)
l <- rep(1,tree_height+1)
l[1] = i
for(j in 2:(tree_height+1)){
if(sum(edges[,2] %in% l[j-1]) != 0){
l[j] = edges[edges[,2] %in% l[j-1], 1]
}
else{
l[j] = NA
}
}
nd_mat[,i] = l[2:(tree_height+1)]
}
d1_mat <- matrix(0, nrow = n_taxa, ncol = n_taxa)
#records the position of 1:n_taxa in the edges set.
taxa_vec <- match(1:n_taxa, edges[,2])
#generate the distance matrix
for(i in 1:n_taxa){
for(j in 1:n_taxa){
int_sc <- intersect(nd_mat[,i], nd_mat[,j])
leni <- sum(!is.na(int_sc))
len1 <- sum(!is.na(nd_mat[,i]))
len2 <- sum(!is.na(nd_mat[,j]))
d1_mat[i, j] = len1 - leni + 1 + len2 - leni + 1
}
}
diag(d1_mat) = 0
#d1_mat denotes the distance for two taxa
return(d1_mat)
}
|
## Tests for random forests for survival analysis
library(ranger)
library(survival)
context("ranger_surv")
## Initialize the random forest for survival analysis
rg.surv <- ranger(Surv(time, status) ~ ., data = veteran, verbose = FALSE,
write.forest = TRUE, num.trees = 10)
## Basic tests (for all random forests equal)
test_that("survival result is of class ranger with 16 elements", {
expect_is(rg.surv, "ranger")
expect_equal(length(rg.surv), 16)
})
test_that("results have right number of trees", {
expect_equal(rg.surv$num.trees, 10)
})
test_that("results have right number of independent variables", {
expect_equal(rg.surv$num.independent.variables, ncol(veteran) - 2)
})
test_that("Alternative interface works for survival", {
rf <- ranger(dependent.variable.name = "time", status.variable.name = "status", data = veteran, num.trees = 10)
expect_equal(rf$treetype, "Survival")
})
test_that("Alternative interface prediction works for survival", {
rf <- ranger(dependent.variable.name = "time", status.variable.name = "status", data = veteran, num.trees = 10)
expect_equal(predict(rf, veteran)$num.independent.variables, ncol(veteran) - 2)
expect_equal(predict(rf, veteran[, setdiff(names(veteran), c("time", "status"))])$num.independent.variables, ncol(veteran) - 2)
})
test_that("Matrix interface works for survival", {
rf <- ranger(dependent.variable.name = "time", status.variable.name = "status", data = data.matrix(veteran), write.forest = TRUE, num.trees = 10)
expect_equal(rf$treetype, "Survival")
expect_equal(rf$forest$independent.variable.names, colnames(veteran)[c(1:2, 5:8)])
})
test_that("Matrix interface prediction works for survival", {
dat <- data.matrix(veteran)
rf <- ranger(dependent.variable.name = "time", status.variable.name = "status", data = dat, write.forest = TRUE, num.trees = 10)
expect_silent(predict(rf, dat))
})
test_that("growing works for single observations, survival", {
rf <- ranger(Surv(time, status) ~ ., veteran[1, ], write.forest = TRUE, num.trees = 10)
expect_is(rf$survival, "matrix")
})
test_that("predict works for single observations, survival", {
rf <- ranger(Surv(time, status) ~ ., veteran, write.forest = TRUE, num.trees = 10)
pred <- predict(rf, head(veteran, 1))
expect_equal(length(pred$survival), length(rf$unique.death.times))
})
## Special tests for random forests for survival analysis
test_that("unique death times in survival result is right", {
expect_equal(rg.surv$unique.death.times, sort(unique(veteran$time)))
})
test_that("C-index splitting works", {
rf <- ranger(Surv(time, status) ~ ., data = veteran, verbose = FALSE,
splitrule = "C", num.trees = 10)
expect_equal(rf$treetype, "Survival")
})
test_that("C-index splitting not working on classification data", {
expect_error(ranger(Species ~ ., iris, splitrule = "C", num.trees = 10))
})
test_that("Logrank splitting not working on classification data", {
expect_error(ranger(Species ~ ., iris, splitrule = "logrank", num.trees = 10))
})
test_that("No error if survival tree without OOB observations", {
dat <- data.frame(time = c(1,2), status = c(0,1), x = c(1,2))
expect_silent(ranger(Surv(time, status) ~ ., dat, num.trees = 1, num.threads = 1))
})
test_that("predict.all for survival returns 3d array of size samples x times x trees", {
rf <- ranger(Surv(time, status) ~ ., veteran, num.trees = 5)
pred <- predict(rf, veteran, predict.all = TRUE)
expect_is(pred$survival, "array")
expect_equal(dim(pred$survival),
c(nrow(veteran), length(pred$unique.death.times), rf$num.trees))
expect_is(pred$chf, "array")
expect_equal(dim(pred$chf),
c(nrow(veteran), length(pred$unique.death.times), rf$num.trees))
})
test_that("Mean of predict.all for survival is equal to forest prediction", {
rf <- ranger(Surv(time, status) ~ ., veteran, num.trees = 5)
pred_forest <- predict(rf, veteran, predict.all = FALSE)
pred_trees <- predict(rf, veteran, predict.all = TRUE)
expect_equal(apply(pred_trees$chf, 1:2, mean), pred_forest$chf)
})
test_that("timepoints() function returns timepoints", {
rf <- ranger(Surv(time, status) ~ ., veteran, num.trees = 5)
expect_equal(timepoints(rf), rf$unique.death.times)
pred <- predict(rf, veteran)
expect_equal(timepoints(pred), rf$unique.death.times)
})
test_that("timepoints() working on survival forest only", {
rf <- ranger(Species ~ ., iris, num.trees = 5)
expect_error(timepoints(rf), "No timepoints found. Object is no Survival forest.")
pred <- predict(rf, iris)
expect_error(timepoints(pred), "No timepoints found. Object is no Survival prediction object.")
})
test_that("Survival error without covariates", {
expect_error(ranger(Surv(time, status) ~ ., veteran[, c("time", "status")], num.trees = 5),
"Error: No covariates found.")
})
|
/tests/testthat/test_survival.R
|
no_license
|
jailGroup/RangerBasediRF
|
R
| false | false | 4,901 |
r
|
## Tests for random forests for survival analysis
library(ranger)
library(survival)
context("ranger_surv")
## Initialize the random forest for survival analysis
rg.surv <- ranger(Surv(time, status) ~ ., data = veteran, verbose = FALSE,
write.forest = TRUE, num.trees = 10)
## Basic tests (for all random forests equal)
test_that("survival result is of class ranger with 16 elements", {
expect_is(rg.surv, "ranger")
expect_equal(length(rg.surv), 16)
})
test_that("results have right number of trees", {
expect_equal(rg.surv$num.trees, 10)
})
test_that("results have right number of independent variables", {
expect_equal(rg.surv$num.independent.variables, ncol(veteran) - 2)
})
test_that("Alternative interface works for survival", {
rf <- ranger(dependent.variable.name = "time", status.variable.name = "status", data = veteran, num.trees = 10)
expect_equal(rf$treetype, "Survival")
})
test_that("Alternative interface prediction works for survival", {
rf <- ranger(dependent.variable.name = "time", status.variable.name = "status", data = veteran, num.trees = 10)
expect_equal(predict(rf, veteran)$num.independent.variables, ncol(veteran) - 2)
expect_equal(predict(rf, veteran[, setdiff(names(veteran), c("time", "status"))])$num.independent.variables, ncol(veteran) - 2)
})
test_that("Matrix interface works for survival", {
rf <- ranger(dependent.variable.name = "time", status.variable.name = "status", data = data.matrix(veteran), write.forest = TRUE, num.trees = 10)
expect_equal(rf$treetype, "Survival")
expect_equal(rf$forest$independent.variable.names, colnames(veteran)[c(1:2, 5:8)])
})
test_that("Matrix interface prediction works for survival", {
dat <- data.matrix(veteran)
rf <- ranger(dependent.variable.name = "time", status.variable.name = "status", data = dat, write.forest = TRUE, num.trees = 10)
expect_silent(predict(rf, dat))
})
test_that("growing works for single observations, survival", {
rf <- ranger(Surv(time, status) ~ ., veteran[1, ], write.forest = TRUE, num.trees = 10)
expect_is(rf$survival, "matrix")
})
test_that("predict works for single observations, survival", {
rf <- ranger(Surv(time, status) ~ ., veteran, write.forest = TRUE, num.trees = 10)
pred <- predict(rf, head(veteran, 1))
expect_equal(length(pred$survival), length(rf$unique.death.times))
})
## Special tests for random forests for survival analysis
test_that("unique death times in survival result is right", {
expect_equal(rg.surv$unique.death.times, sort(unique(veteran$time)))
})
test_that("C-index splitting works", {
rf <- ranger(Surv(time, status) ~ ., data = veteran, verbose = FALSE,
splitrule = "C", num.trees = 10)
expect_equal(rf$treetype, "Survival")
})
test_that("C-index splitting not working on classification data", {
expect_error(ranger(Species ~ ., iris, splitrule = "C", num.trees = 10))
})
test_that("Logrank splitting not working on classification data", {
expect_error(ranger(Species ~ ., iris, splitrule = "logrank", num.trees = 10))
})
test_that("No error if survival tree without OOB observations", {
dat <- data.frame(time = c(1,2), status = c(0,1), x = c(1,2))
expect_silent(ranger(Surv(time, status) ~ ., dat, num.trees = 1, num.threads = 1))
})
test_that("predict.all for survival returns 3d array of size samples x times x trees", {
rf <- ranger(Surv(time, status) ~ ., veteran, num.trees = 5)
pred <- predict(rf, veteran, predict.all = TRUE)
expect_is(pred$survival, "array")
expect_equal(dim(pred$survival),
c(nrow(veteran), length(pred$unique.death.times), rf$num.trees))
expect_is(pred$chf, "array")
expect_equal(dim(pred$chf),
c(nrow(veteran), length(pred$unique.death.times), rf$num.trees))
})
test_that("Mean of predict.all for survival is equal to forest prediction", {
rf <- ranger(Surv(time, status) ~ ., veteran, num.trees = 5)
pred_forest <- predict(rf, veteran, predict.all = FALSE)
pred_trees <- predict(rf, veteran, predict.all = TRUE)
expect_equal(apply(pred_trees$chf, 1:2, mean), pred_forest$chf)
})
test_that("timepoints() function returns timepoints", {
rf <- ranger(Surv(time, status) ~ ., veteran, num.trees = 5)
expect_equal(timepoints(rf), rf$unique.death.times)
pred <- predict(rf, veteran)
expect_equal(timepoints(pred), rf$unique.death.times)
})
test_that("timepoints() working on survival forest only", {
rf <- ranger(Species ~ ., iris, num.trees = 5)
expect_error(timepoints(rf), "No timepoints found. Object is no Survival forest.")
pred <- predict(rf, iris)
expect_error(timepoints(pred), "No timepoints found. Object is no Survival prediction object.")
})
test_that("Survival error without covariates", {
expect_error(ranger(Surv(time, status) ~ ., veteran[, c("time", "status")], num.trees = 5),
"Error: No covariates found.")
})
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/importRDB1.R
\name{importRDB1}
\alias{importRDB1}
\title{Function to return data from the NWIS RDB 1.0 format}
\usage{
importRDB1(obs_url, asDateTime = TRUE, convertType = TRUE, tz = "UTC")
}
\arguments{
\item{obs_url}{character containing the url for the retrieval or a file path to the data file.}
\item{asDateTime}{logical, if \code{TRUE} returns date and time as POSIXct, if \code{FALSE}, Date}
\item{convertType}{logical, defaults to \code{TRUE}. If \code{TRUE}, the
function will convert the data to dates, datetimes,
numerics based on a standard algorithm. If false, everything is returned as a character}
\item{tz}{character to set timezone attribute of datetime. Default converts the datetimes to UTC
(properly accounting for daylight savings times based on the data's provided tz_cd column).
Recommended US values include "UTC", "America/New_York", "America/Chicago", "America/Denver",
"America/Los_Angeles", "America/Anchorage", "America/Honolulu", "America/Jamaica", "America/Managua",
"America/Phoenix", and "America/Metlakatla".
For a complete list, see \url{https://en.wikipedia.org/wiki/List_of_tz_database_time_zones}}
}
\value{
A data frame with the following columns:
\tabular{lll}{
Name \tab Type \tab Description \cr
agency_cd \tab character \tab The NWIS code for the agency reporting the data\cr
site_no \tab character \tab The USGS site number \cr
datetime \tab POSIXct \tab The date and time of the value converted to
UTC (if asDateTime = \code{TRUE}), \cr
\tab character \tab or raw character string (if asDateTime = FALSE) \cr
tz_cd \tab character \tab The time zone code for datetime \cr
code \tab character \tab Any codes that qualify the corresponding value\cr
value \tab numeric \tab The numeric value for the parameter \cr
tz_cd_reported \tab The originally reported time zone \cr
}
Note that code and value are repeated for the parameters requested. The names are of the form
XD_P_S, where X is literal,
D is an option description of the parameter,
P is the parameter code,
and S is the statistic code (if applicable).
If a date/time (dt) column contained incomplete date and times, a new column
of dates and time was inserted. This could happen
when older data was reported as dates, and newer data was reported as a date/time.
There are also several useful attributes attached to the data frame:
\tabular{lll}{
Name \tab Type \tab Description \cr
url \tab character \tab The url used to generate the data \cr
queryTime \tab POSIXct \tab The time the data was returned \cr
comment \tab character \tab Header comments from the RDB file \cr
}
}
\description{
This function accepts a url parameter that already contains the desired
NWIS site, parameter code, statistic, startdate and enddate. It is not
recommended to use the RDB format for importing multi-site data.
}
\examples{
\dontshow{if (is_dataRetrieval_user()) (if (getRversion() >= "3.4") withAutoprint else force)(\{ # examplesIf}
site_id <- "02177000"
startDate <- "2012-09-01"
endDate <- "2012-10-01"
offering <- "00003"
property <- "00060"
obs_url <- constructNWISURL(site_id, property,
startDate, endDate, "dv",
format = "tsv"
)
\donttest{
data <- importRDB1(obs_url)
urlMultiPcodes <- constructNWISURL("04085427", c("00060", "00010"),
startDate, endDate, "dv",
statCd = c("00003", "00001"), "tsv"
)
multiData <- importRDB1(urlMultiPcodes)
unitDataURL <- constructNWISURL(site_id, property,
"2020-10-30", "2020-11-01", "uv",
format = "tsv"
) # includes timezone switch
unitData <- importRDB1(unitDataURL, asDateTime = TRUE)
qwURL <- constructNWISURL(c("04024430", "04024000"),
c("34247", "30234", "32104", "34220"),
"2010-11-03", "", "qw",
format = "rdb"
)
qwData <- importRDB1(qwURL, asDateTime = TRUE, tz = "America/Chicago")
iceSite <- "04024000"
start <- "2015-11-09"
end <- "2015-11-24"
urlIce <- constructNWISURL(iceSite, "00060", start, end, "uv", format = "tsv")
ice <- importRDB1(urlIce, asDateTime = TRUE)
iceNoConvert <- importRDB1(urlIce, convertType = FALSE)
}
# User file:
filePath <- system.file("extdata", package = "dataRetrieval")
fileName <- "RDB1Example.txt"
fullPath <- file.path(filePath, fileName)
importUserRDB <- importRDB1(fullPath)
\dontshow{\}) # examplesIf}
}
|
/man/importRDB1.Rd
|
no_license
|
cran/dataRetrieval
|
R
| false | true | 4,409 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/importRDB1.R
\name{importRDB1}
\alias{importRDB1}
\title{Function to return data from the NWIS RDB 1.0 format}
\usage{
importRDB1(obs_url, asDateTime = TRUE, convertType = TRUE, tz = "UTC")
}
\arguments{
\item{obs_url}{character containing the url for the retrieval or a file path to the data file.}
\item{asDateTime}{logical, if \code{TRUE} returns date and time as POSIXct, if \code{FALSE}, Date}
\item{convertType}{logical, defaults to \code{TRUE}. If \code{TRUE}, the
function will convert the data to dates, datetimes,
numerics based on a standard algorithm. If false, everything is returned as a character}
\item{tz}{character to set timezone attribute of datetime. Default converts the datetimes to UTC
(properly accounting for daylight savings times based on the data's provided tz_cd column).
Recommended US values include "UTC", "America/New_York", "America/Chicago", "America/Denver",
"America/Los_Angeles", "America/Anchorage", "America/Honolulu", "America/Jamaica", "America/Managua",
"America/Phoenix", and "America/Metlakatla".
For a complete list, see \url{https://en.wikipedia.org/wiki/List_of_tz_database_time_zones}}
}
\value{
A data frame with the following columns:
\tabular{lll}{
Name \tab Type \tab Description \cr
agency_cd \tab character \tab The NWIS code for the agency reporting the data\cr
site_no \tab character \tab The USGS site number \cr
datetime \tab POSIXct \tab The date and time of the value converted to
UTC (if asDateTime = \code{TRUE}), \cr
\tab character \tab or raw character string (if asDateTime = FALSE) \cr
tz_cd \tab character \tab The time zone code for datetime \cr
code \tab character \tab Any codes that qualify the corresponding value\cr
value \tab numeric \tab The numeric value for the parameter \cr
tz_cd_reported \tab The originally reported time zone \cr
}
Note that code and value are repeated for the parameters requested. The names are of the form
XD_P_S, where X is literal,
D is an option description of the parameter,
P is the parameter code,
and S is the statistic code (if applicable).
If a date/time (dt) column contained incomplete date and times, a new column
of dates and time was inserted. This could happen
when older data was reported as dates, and newer data was reported as a date/time.
There are also several useful attributes attached to the data frame:
\tabular{lll}{
Name \tab Type \tab Description \cr
url \tab character \tab The url used to generate the data \cr
queryTime \tab POSIXct \tab The time the data was returned \cr
comment \tab character \tab Header comments from the RDB file \cr
}
}
\description{
This function accepts a url parameter that already contains the desired
NWIS site, parameter code, statistic, startdate and enddate. It is not
recommended to use the RDB format for importing multi-site data.
}
\examples{
\dontshow{if (is_dataRetrieval_user()) (if (getRversion() >= "3.4") withAutoprint else force)(\{ # examplesIf}
site_id <- "02177000"
startDate <- "2012-09-01"
endDate <- "2012-10-01"
offering <- "00003"
property <- "00060"
obs_url <- constructNWISURL(site_id, property,
startDate, endDate, "dv",
format = "tsv"
)
\donttest{
data <- importRDB1(obs_url)
urlMultiPcodes <- constructNWISURL("04085427", c("00060", "00010"),
startDate, endDate, "dv",
statCd = c("00003", "00001"), "tsv"
)
multiData <- importRDB1(urlMultiPcodes)
unitDataURL <- constructNWISURL(site_id, property,
"2020-10-30", "2020-11-01", "uv",
format = "tsv"
) # includes timezone switch
unitData <- importRDB1(unitDataURL, asDateTime = TRUE)
qwURL <- constructNWISURL(c("04024430", "04024000"),
c("34247", "30234", "32104", "34220"),
"2010-11-03", "", "qw",
format = "rdb"
)
qwData <- importRDB1(qwURL, asDateTime = TRUE, tz = "America/Chicago")
iceSite <- "04024000"
start <- "2015-11-09"
end <- "2015-11-24"
urlIce <- constructNWISURL(iceSite, "00060", start, end, "uv", format = "tsv")
ice <- importRDB1(urlIce, asDateTime = TRUE)
iceNoConvert <- importRDB1(urlIce, convertType = FALSE)
}
# User file:
filePath <- system.file("extdata", package = "dataRetrieval")
fileName <- "RDB1Example.txt"
fullPath <- file.path(filePath, fileName)
importUserRDB <- importRDB1(fullPath)
\dontshow{\}) # examplesIf}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/qdapDictionaries-package.R
\docType{data}
\name{positive.words}
\alias{positive.words}
\title{Positive Words}
\format{A vector with 2003 elements}
\usage{
data(positive.words)
}
\description{
A dataset containing a vector of positive words.
}
\details{
A sentence containing more negative words would be deemed a negative sentence,
whereas a sentence containing more positive words would be considered positive.
}
\references{
Hu, M., & Liu, B. (2004). Mining opinion features in customer
reviews. National Conference on Artificial Intelligence.
\url{http://www.cs.uic.edu/~liub/FBS/sentiment-analysis.html}
}
\keyword{datasets}
|
/man/positive.words.Rd
|
no_license
|
trinker/qdapDictionaries
|
R
| false | true | 710 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/qdapDictionaries-package.R
\docType{data}
\name{positive.words}
\alias{positive.words}
\title{Positive Words}
\format{A vector with 2003 elements}
\usage{
data(positive.words)
}
\description{
A dataset containing a vector of positive words.
}
\details{
A sentence containing more negative words would be deemed a negative sentence,
whereas a sentence containing more positive words would be considered positive.
}
\references{
Hu, M., & Liu, B. (2004). Mining opinion features in customer
reviews. National Conference on Artificial Intelligence.
\url{http://www.cs.uic.edu/~liub/FBS/sentiment-analysis.html}
}
\keyword{datasets}
|
# Automatically generated by openapi-generator (https://openapi-generator.tech)
# Please update as you see appropriate
context("Test ValidationException")
model.instance <- ValidationException$new()
test_that("causingExceptions", {
# tests for the property `causingExceptions` (array[ValidationException])
# An array of sub-exceptions.
# uncomment below to test the property
#expect_equal(model.instance$`causingExceptions`, "EXPECTED_RESULT")
})
test_that("keyword", {
# tests for the property `keyword` (character)
# The JSON schema keyword which was violated.
# uncomment below to test the property
#expect_equal(model.instance$`keyword`, "EXPECTED_RESULT")
})
test_that("message", {
# tests for the property `message` (character)
# The description of the validation failure.
# uncomment below to test the property
#expect_equal(model.instance$`message`, "EXPECTED_RESULT")
})
test_that("pointerToViolation", {
# tests for the property `pointerToViolation` (character)
# A JSON Pointer denoting the path from the input document root to its fragment which caused the validation failure.
# uncomment below to test the property
#expect_equal(model.instance$`pointerToViolation`, "EXPECTED_RESULT")
})
test_that("schemaLocation", {
# tests for the property `schemaLocation` (character)
# A JSON Pointer denoting the path from the schema JSON root to the violated keyword.
# uncomment below to test the property
#expect_equal(model.instance$`schemaLocation`, "EXPECTED_RESULT")
})
|
/tests/testthat/test_validation_exception.R
|
no_license
|
thomasyu888/synr-sdk-client
|
R
| false | false | 1,537 |
r
|
# Automatically generated by openapi-generator (https://openapi-generator.tech)
# Please update as you see appropriate
context("Test ValidationException")
model.instance <- ValidationException$new()
test_that("causingExceptions", {
# tests for the property `causingExceptions` (array[ValidationException])
# An array of sub-exceptions.
# uncomment below to test the property
#expect_equal(model.instance$`causingExceptions`, "EXPECTED_RESULT")
})
test_that("keyword", {
# tests for the property `keyword` (character)
# The JSON schema keyword which was violated.
# uncomment below to test the property
#expect_equal(model.instance$`keyword`, "EXPECTED_RESULT")
})
test_that("message", {
# tests for the property `message` (character)
# The description of the validation failure.
# uncomment below to test the property
#expect_equal(model.instance$`message`, "EXPECTED_RESULT")
})
test_that("pointerToViolation", {
# tests for the property `pointerToViolation` (character)
# A JSON Pointer denoting the path from the input document root to its fragment which caused the validation failure.
# uncomment below to test the property
#expect_equal(model.instance$`pointerToViolation`, "EXPECTED_RESULT")
})
test_that("schemaLocation", {
# tests for the property `schemaLocation` (character)
# A JSON Pointer denoting the path from the schema JSON root to the violated keyword.
# uncomment below to test the property
#expect_equal(model.instance$`schemaLocation`, "EXPECTED_RESULT")
})
|
DataPreProcessing <- function(types)
{
#-------------------------------------------------------------------------------------
# dsRate Dataset
#-------------------------------------------------------------------------------------
if (types=="dsRate")
{
dsRate$Year = ifelse(is.na(dsRate$Year), ave(dsRate$Year,
FUN=function(x) mean(x), na.rm=TRUE), dsRate$Year)
dsRate$TotInsLabour = ifelse(is.na(dsRate$TotInsLabour), ave(dsRate$TotInsLabour,
FUN=function(x) mean(x), na.rm=TRUE), dsRate$TotInsLabour)
dsRate$InsLabEmp = ifelse(is.na(dsRate$InsLabEmp), ave(dsRate$InsLabEmp,
FUN=function(x) mean(x), na.rm=TRUE), dsRate$InsLabEmp)
dsRate$InsLabUnemp = ifelse(is.na(dsRate$InsLabUnemp), ave(dsRate$InsLabUnempp,
FUN=function(x) mean(x), na.rm=TRUE), dsRate$InsLabUnemp)
dsRate$TotOutLabour = ifelse(is.na(dsRate$TotOutLabour), ave(dsRate$TotOutLabour,
FUN=function(x) mean(x), na.rm=TRUE), dsRate$TotOutLabour)
dsRate$RateLabEmp = ifelse(is.na(dsRate$RateLabEmp), ave(dsRate$RateLabEmp,
FUN=function(x) mean(x), na.rm=TRUE), dsRate$RateLabEmp)
dsRate$RateLabUnEmp = ifelse(is.na(dsRate$RateLabUnEmp), ave(dsRate$RateLabUnEmp,
FUN=function(x) mean(x), na.rm=TRUE), dsRate$RateLabUnEmp)
return(paste("Data Cleaning Proses for Dataset (dsRate) has been completed"))
}
#-------------------------------------------------------------------------------------
# dsGender Dataset
#-------------------------------------------------------------------------------------
else if (types=="dsGender")
{
dsGender$Year = ifelse(is.na(dsGender$Year), ave(dsGender$Year,
FUN=function(x) mean(x), na.rm=TRUE), dsGender$Year)
dsGender$TotInsLabourM = ifelse(is.na(dsGender$TotInsLabourM), ave(dsGender$TotInsLabourM,
FUN=function(x) mean(x), na.rm=TRUE), dsGender$TotInsLabourM)
dsGender$InsLabEmpM = ifelse(is.na(dsGender$InsLabEmpM), ave(dsGender$InsLabEmpM,
FUN=function(x) mean(x), na.rm=TRUE), dsGender$InsLabEmpM)
dsGender$InsLabUnempM = ifelse(is.na(dsGender$InsLabUnempM), ave(dsGender$InsLabUnempM,
FUN=function(x) mean(x), na.rm=TRUE), dsGender$InsLabUnempM)
dsGender$TotOutLabourM = ifelse(is.na(dsGender$TotOutLabourM), ave(dsGender$TotOutLabourM,
FUN=function(x) mean(x), na.rm=TRUE), dsGender$TotOutLabourM)
dsGender$RateLabEmpM = ifelse(is.na(dsGender$RateLabEmpM), ave(dsGender$RateLabEmpM,
FUN=function(x) mean(x), na.rm=TRUE), dsGender$RateLabEmpM)
dsGender$RateLabUnEmpM = ifelse(is.na(dsGender$RateLabUnEmpM), ave(dsGender$RateLabUnEmpM,
FUN=function(x) mean(x), na.rm=TRUE), dsGender$RateLabUnEmpM)
dsGender$TotInsLabourF = ifelse(is.na(dsGender$TotInsLabourF), ave(dsGender$TotInsLabourF,
FUN=function(x) mean(x), na.rm=TRUE), dsGender$TotInsLabourF)
dsGender$InsLabEmpF = ifelse(is.na(dsGender$InsLabEmpF), ave(dsGender$InsLabEmpFr,
FUN=function(x) mean(x), na.rm=TRUE), dsGender$InsLabEmpF)
dsGender$InsLabUnempF = ifelse(is.na(dsGender$InsLabUnempF), ave(dsGender$InsLabUnempF,
FUN=function(x) mean(x), na.rm=TRUE), dsGender$InsLabUnempF)
dsGender$TotOutLabourF = ifelse(is.na(dsGender$TotOutLabourF), ave(dsGender$TotOutLabourF,
FUN=function(x) mean(x), na.rm=TRUE), dsGender$TotOutLabourF)
dsGender$RateLabEmpF = ifelse(is.na(dsGender$RateLabEmpF), ave(dsGender$RateLabEmpF,
FUN=function(x) mean(x), na.rm=TRUE), dsGender$RateLabEmpF)
dsGender$RateLabUnEmpF = ifelse(is.na(dsGender$RateLabUnEmpF), ave(dsGender$RateLabUnEmpF,
FUN=function(x) mean(x), na.rm=TRUE), dsGender$RateLabUnEmpF)
return(paste("Data Cleaning Proses for Dataset (dsGender) has been completed"))
}
#-------------------------------------------------------------------------------------
# dsRural Dataset
#-------------------------------------------------------------------------------------
else if (types=="dsRural")
{
dsRural$Year = ifelse(is.na(dsRural$Year), ave(dsRural$Year,
FUN=function(x) mean(x), na.rm=TRUE), dsRural$Year)
dsRural$AllLabourInsR = ifelse(is.na(dsRural$AllLabourInsR), ave(dsRural$AllLabourInsR,
FUN=function(x) mean(x), na.rm=TRUE), dsRural$AllLabourInsR)
dsRural$InsEmpR = ifelse(is.na(dsRural$InsEmpR), ave(dsRural$InsEmpR,
FUN=function(x) mean(x), na.rm=TRUE), dsRural$InsEmpR)
dsRural$InsUnempR = ifelse(is.na(dsRural$InsUnempR), ave(dsRural$InsUnempR,
FUN=function(x) mean(x), na.rm=TRUE), dsRural$InsUnempR)
dsRural$AllLabourOutR = ifelse(is.na(dsRural$AllLabourOutR), ave(dsRural$AllLabourOutR,
FUN=function(x) mean(x), na.rm=TRUE), dsRural$AllLabourOutR)
dsRural$LabForceRateR = ifelse(is.na(dsRural$LabForceRateR), ave(dsRural$LabForceRateR,
FUN=function(x) mean(x), na.rm=TRUE), dsRural$LabForceRateR)
dsRural$LabForceUempRateR = ifelse(is.na(dsRural$LabForceUempRateR), ave(dsRural$LabForceUempRateR,
FUN=function(x) mean(x), na.rm=TRUE), dsRural$LabForceUempRateR)
return(paste("Data Cleaning Proses for Dataset (dsRural) has been completed"))
}
#-------------------------------------------------------------------------------------
# dsUrban Dataset
#-------------------------------------------------------------------------------------
else if (types=="dsUrban")
{
dsUrban$Year = ifelse(is.na(dsUrban$Year), ave(dsUrban$Year,
FUN=function(x) mean(x), na.rm=TRUE), dsUrban$Year)
dsUrban$AllLabourInsU = ifelse(is.na(dsUrban$AllLabourInsU), ave(dsUrban$AllLabourInsU,
FUN=function(x) mean(x), na.rm=TRUE), dsUrban$AllLabourInsU)
dsUrban$InsEmpU = ifelse(is.na(dsUrban$InsEmpU), ave(dsUrban$InsEmpU,
FUN=function(x) mean(x), na.rm=TRUE), dsUrban$InsEmpU)
dsUrban$InsUnempU = ifelse(is.na(dsUrban$InsUnempU), ave(dsUrban$InsUnempU,
FUN=function(x) mean(x), na.rm=TRUE), dsUrban$InsUnempU)
dsUrban$AllLabourOutU = ifelse(is.na(dsUrban$AllLabourOutU), ave(dsUrban$AllLabourOutU,
FUN=function(x) mean(x), na.rm=TRUE), dsUrban$AllLabourOutU)
dsUrban$LabForceRateU = ifelse(is.na(dsUrban$LabForceRateU), ave(dsUrban$LabForceRateU,
FUN=function(x) mean(x), na.rm=TRUE), dsUrban$LabForceRateU)
dsUrban$LabForceUempRateU = ifelse(is.na(dsUrban$LabForceUempRateU), ave(dsUrban$LabForceUempRateU,
FUN=function(x) mean(x), na.rm=TRUE), dsUrban$LabForceUempRateU)
return(paste("Data Cleaning Proses for Dataset (dsUrban) has been completed"))
}
#-------------------------------------------------------------------------------------
# dsAge Dataset
#-------------------------------------------------------------------------------------
else if (types=="dsAge")
{
dsAge$Year = ifelse(is.na(dsAge$Year), ave(dsAge$Year,
FUN=function(x) mean(x), na.rm=TRUE), dsAge$Year)
dsAge$AllTotalAge = ifelse(is.na(dsAge$AllTotalAge), ave(dsAge$AllTotalAge,
FUN=function(x) mean(x), na.rm=TRUE), dsAge$AllTotalAge)
dsAge$Age15to19 = ifelse(is.na(dsAge$Age15to19), ave(dsAge$Age15to19,
FUN=function(x) mean(x), na.rm=TRUE), dsAge$Age15to19)
dsAge$Age20to24 = ifelse(is.na(dsAge$Age20to24), ave(dsAge$Age20to24,
FUN=function(x) mean(x), na.rm=TRUE), dsAge$Age20to24)
dsAge$Age25to29 = ifelse(is.na(dsAge$Age25to29), ave(dsAge$Age25to29,
FUN=function(x) mean(x), na.rm=TRUE), dsAge$Age25to29)
dsAge$Age30to34 = ifelse(is.na(dsAge$Age30to34), ave(dsAge$Age30to34,
FUN=function(x) mean(x), na.rm=TRUE), dsAge$Age30to34)
dsAge$Age35to39 = ifelse(is.na(dsAge$Age35to39), ave(dsAge$Age35to39,
FUN=function(x) mean(x), na.rm=TRUE), dsAge$Age35to39)
dsAge$Age40to44 = ifelse(is.na(dsAge$Age40to44), ave(dsAge$Age40to44,
FUN=function(x) mean(x), na.rm=TRUE), dsAge$Age40to44)
dsAge$Age45_49 = ifelse(is.na(dsAge$Age45to49), ave(dsAge$Age45to49,
FUN=function(x) mean(x), na.rm=TRUE), dsAge$Age45to49)
dsAge$Age50to54 = ifelse(is.na(dsAge$Age50to54), ave(dsAge$Age50to54,
FUN=function(x) mean(x), na.rm=TRUE), dsAge$Age50to54)
dsAge$Age55to59 = ifelse(is.na(dsAge$Age55to59), ave(dsAge$Age55to59,
FUN=function(x) mean(x), na.rm=TRUE), dsAge$Age55to59)
dsAge$Age60to64 = ifelse(is.na(dsAge$Age60to64), ave(dsAge$Age60to64,
FUN=function(x) mean(x), na.rm=TRUE), dsAge$Age60to64)
return(paste("Data Cleaning Proses for Dataset (dsAge) has been completed"))
}
#-------------------------------------------------------------------------------------
# dsEthnic Dataset
#-------------------------------------------------------------------------------------
else if (types=="dsEthnic")
{
dsEthnic$Year = ifelse(is.na(dsEthnic$Year), ave(dsEthnic$Year,
FUN=function(x) mean(x), na.rm=TRUE), dsEthnic$Year)
dsEthnic$AllTotalWorkForce = ifelse(is.na(dsEthnic$AllTotalWorkForce), ave(dsEthnic$AllTotalWorkForce,
FUN=function(x) mean(x), na.rm=TRUE), dsEthnic$AllTotalWorkForce)
dsEthnic$SubTotWorkForce = ifelse(is.na(dsEthnic$SubTotWorkForce), ave(dsEthnic$SubTotWorkForce,
FUN=function(x) mean(x), na.rm=TRUE), dsEthnic$SubTotWorkForce)
dsEthnic$BumiWorkForce = ifelse(is.na(dsEthnic$BumiWorkForce), ave(dsEthnic$BumiWorkForce,
FUN=function(x) mean(x), na.rm=TRUE), dsEthnic$BumiWorkForce)
dsEthnic$ChineseWorkForce = ifelse(is.na(dsEthnic$ChineseWorkForce), ave(dsEthnic$ChineseWorkForce,
FUN=function(x) mean(x), na.rm=TRUE), dsEthnic$ChineseWorkForce)
dsEthnic$IndianWorkForce = ifelse(is.na(dsEthnic$IndianWorkForce), ave(dsEthnic$IndianWorkForce,
FUN=function(x) mean(x), na.rm=TRUE), dsEthnic$IndianWorkForce)
dsEthnic$OtherWorkForce = ifelse(is.na(dsEthnic$OtherWorkForce), ave(dsEthnic$OtherWorkForce,
FUN=function(x) mean(x), na.rm=TRUE), dsEthnic$OtherWorkForce)
dsEthnic$SubTotForeignWorkForce = ifelse(is.na(dsEthnic$SubTotForeignWorkForce), ave(dsEthnic$SubTotForeignWorkForce,
FUN=function(x) mean(x), na.rm=TRUE), dsEthnic$SubTotForeignWorkForce)
return(paste("Data Cleaning Proses for Dataset (dsEthnic) has been completed"))
}
#-------------------------------------------------------------------------------------
# dsEdu Dataset
#-------------------------------------------------------------------------------------
else if (types=="dsEdu")
{
dsEdu$Year = ifelse(is.na(dsEdu$Year), ave(dsEdu$Year,
FUN=function(x) mean(x), na.rm=TRUE), dsEdu$Year)
dsEdu$AllTotalEdu = ifelse(is.na(dsEdu$AllTotalEdu), ave(dsEdu$AllTotalEdu,
FUN=function(x) mean(x), na.rm=TRUE), dsEdu$AllTotalEdu)
dsEdu$NonEduc = ifelse(is.na(dsEdu$NonEduc), ave(dsEdu$NonEduc,
FUN=function(x) mean(x), na.rm=TRUE), dsEdu$NonEduc)
dsEdu$Primary = ifelse(is.na(dsEdu$Primary), ave(dsEdu$Primary,
FUN=function(x) mean(x), na.rm=TRUE), dsEdu$Primary)
dsEdu$Secondary = ifelse(is.na(dsEdu$Secondary), ave(dsEdu$Secondary,
FUN=function(x) mean(x), na.rm=TRUE), dsEdu$Secondary)
dsEdu$Tertiary = ifelse(is.na(dsEdu$Tertiary), ave(dsEdu$Tertiary,
FUN=function(x) mean(x), na.rm=TRUE), dsEdu$Tertiary)
return(paste("Data Cleaning Proses for Dataset (dsEdu) has been completed"))
}
#-------------------------------------------------------------------------------------
# dsCert Dataset
#-------------------------------------------------------------------------------------
else if (types=="dsCert")
{
dsCert$Year = ifelse(is.na(dsCert$Year), ave(dsCert$Year,
FUN=function(x) mean(x), na.rm=TRUE), dsCert$Year)
dsCert$AllTotalCert = ifelse(is.na(dsCert$AllTotalCert), ave(dsCert$AllTotalCert,
FUN=function(x) mean(x), na.rm=TRUE), dsCert$AllTotalCert)
dsCert$UPSRAEquiv = ifelse(is.na(dsCert$UPSRAEquiv), ave(dsCert$UPSRAEquiv,
FUN=function(x) mean(x), na.rm=TRUE), dsCert$UPSRAEquiv)
dsCert$PMRSRPLCEEquiv = ifelse(is.na(dsCert$PMRSRPLCEEquiv), ave(dsCert$PMRSRPLCEEquiv,
FUN=function(x) mean(x), na.rm=TRUE), dsCert$PMRSRPLCEEquiv)
dsCert$SPMEquiv = ifelse(is.na(dsCert$SPMEquiv), ave(dsCert$SPMEquiv,
FUN=function(x) mean(x), na.rm=TRUE), dsCert$SPMEquiv)
dsCert$STPMEquiv = ifelse(is.na(dsCert$STPMEquiv), ave(dsCert$STPMEquiv,
FUN=function(x) mean(x), na.rm=TRUE), dsCert$STPMEquiv)
dsCert$Certificate = ifelse(is.na(dsCert$Certificate), ave(dsCert$Certificate,
FUN=function(x) mean(x), na.rm=TRUE), dsCert$Certificate)
dsCert$Diploma = ifelse(is.na(dsCert$Diploma), ave(dsCert$Diploma,
FUN=function(x) mean(x), na.rm=TRUE), dsCert$Diploma)
dsCert$Degree = ifelse(is.na(dsCert$Degree), ave(dsCert$Degree,
FUN=function(x) mean(x), na.rm=TRUE), dsCert$Degree)
dsCert$ReligCert = ifelse(is.na(dsCert$ReligCert), ave(dsCert$ReligCert,
FUN=function(x) mean(x), na.rm=TRUE), dsCert$ReligCert)
dsCert$NoCert = ifelse(is.na(dsCert$NoCert), ave(dsCert$NoCert,
FUN=function(x) mean(x), na.rm=TRUE), dsCert$NoCert)
dsCert$NoRelevant = ifelse(is.na(dsCert$NoRelevant), ave(dsCert$NoRelevant,
FUN=function(x) mean(x), na.rm=TRUE), dsCert$NoRelevant)
return(paste("Data Cleaning Proses for Dataset (dsCert) has been completed"))
}
#-------------------------------------------------------------------------------------
# dsMarital Dataset
#-------------------------------------------------------------------------------------
else if (types=="dsMarital")
{
dsMarital$AllTotalMarital = ifelse(is.na(dsMarital$AllTotalMarital), ave(dsMarital$AllTotalMarital,
FUN=function(x) mean(x), na.rm=TRUE), dsMarital$AllTotalMarital)
dsMarital$Year = ifelse(is.na(dsMarital$Year), ave(dsMarital$Year,
FUN=function(x) mean(x), na.rm=TRUE), dsMarital$Year)
dsMarital$NeverMarried = ifelse(is.na(dsMarital$NeverMarried), ave(dsMarital$NeverMarried,
FUN=function(x) mean(x), na.rm=TRUE), dsMarital$NeverMarried)
dsMarital$Married = ifelse(is.na(dsMarital$Married), ave(dsMarital$Married,
FUN=function(x) mean(x), na.rm=TRUE), dsMarital$Married)
dsMarital$Widow = ifelse(is.na(dsMarital$Widow), ave(dsMarital$Widow,
FUN=function(x) mean(x), na.rm=TRUE), dsMarital$Widow)
dsMarital$DivorcePermSeparate= ifelse(is.na(dsMarital$DivorcePermSeparate), ave(dsMarital$DivorcePermSeparate,
FUN=function(x) mean(x), na.rm=TRUE), dsMarital$DivorcePermSeparate)
return(paste("Data Cleaning Proses for Dataset (dsMarital) has been completed"))
}
else
{
return(paste("NO Data Cleaning Proses Executed"))
}
}
|
/DPDataPreProcessing.R
|
no_license
|
hannazhar/WQD7002-Final-Data-Science-Project
|
R
| false | false | 18,066 |
r
|
DataPreProcessing <- function(types)
{
#-------------------------------------------------------------------------------------
# dsRate Dataset
#-------------------------------------------------------------------------------------
if (types=="dsRate")
{
dsRate$Year = ifelse(is.na(dsRate$Year), ave(dsRate$Year,
FUN=function(x) mean(x), na.rm=TRUE), dsRate$Year)
dsRate$TotInsLabour = ifelse(is.na(dsRate$TotInsLabour), ave(dsRate$TotInsLabour,
FUN=function(x) mean(x), na.rm=TRUE), dsRate$TotInsLabour)
dsRate$InsLabEmp = ifelse(is.na(dsRate$InsLabEmp), ave(dsRate$InsLabEmp,
FUN=function(x) mean(x), na.rm=TRUE), dsRate$InsLabEmp)
dsRate$InsLabUnemp = ifelse(is.na(dsRate$InsLabUnemp), ave(dsRate$InsLabUnempp,
FUN=function(x) mean(x), na.rm=TRUE), dsRate$InsLabUnemp)
dsRate$TotOutLabour = ifelse(is.na(dsRate$TotOutLabour), ave(dsRate$TotOutLabour,
FUN=function(x) mean(x), na.rm=TRUE), dsRate$TotOutLabour)
dsRate$RateLabEmp = ifelse(is.na(dsRate$RateLabEmp), ave(dsRate$RateLabEmp,
FUN=function(x) mean(x), na.rm=TRUE), dsRate$RateLabEmp)
dsRate$RateLabUnEmp = ifelse(is.na(dsRate$RateLabUnEmp), ave(dsRate$RateLabUnEmp,
FUN=function(x) mean(x), na.rm=TRUE), dsRate$RateLabUnEmp)
return(paste("Data Cleaning Proses for Dataset (dsRate) has been completed"))
}
#-------------------------------------------------------------------------------------
# dsGender Dataset
#-------------------------------------------------------------------------------------
else if (types=="dsGender")
{
dsGender$Year = ifelse(is.na(dsGender$Year), ave(dsGender$Year,
FUN=function(x) mean(x), na.rm=TRUE), dsGender$Year)
dsGender$TotInsLabourM = ifelse(is.na(dsGender$TotInsLabourM), ave(dsGender$TotInsLabourM,
FUN=function(x) mean(x), na.rm=TRUE), dsGender$TotInsLabourM)
dsGender$InsLabEmpM = ifelse(is.na(dsGender$InsLabEmpM), ave(dsGender$InsLabEmpM,
FUN=function(x) mean(x), na.rm=TRUE), dsGender$InsLabEmpM)
dsGender$InsLabUnempM = ifelse(is.na(dsGender$InsLabUnempM), ave(dsGender$InsLabUnempM,
FUN=function(x) mean(x), na.rm=TRUE), dsGender$InsLabUnempM)
dsGender$TotOutLabourM = ifelse(is.na(dsGender$TotOutLabourM), ave(dsGender$TotOutLabourM,
FUN=function(x) mean(x), na.rm=TRUE), dsGender$TotOutLabourM)
dsGender$RateLabEmpM = ifelse(is.na(dsGender$RateLabEmpM), ave(dsGender$RateLabEmpM,
FUN=function(x) mean(x), na.rm=TRUE), dsGender$RateLabEmpM)
dsGender$RateLabUnEmpM = ifelse(is.na(dsGender$RateLabUnEmpM), ave(dsGender$RateLabUnEmpM,
FUN=function(x) mean(x), na.rm=TRUE), dsGender$RateLabUnEmpM)
dsGender$TotInsLabourF = ifelse(is.na(dsGender$TotInsLabourF), ave(dsGender$TotInsLabourF,
FUN=function(x) mean(x), na.rm=TRUE), dsGender$TotInsLabourF)
dsGender$InsLabEmpF = ifelse(is.na(dsGender$InsLabEmpF), ave(dsGender$InsLabEmpFr,
FUN=function(x) mean(x), na.rm=TRUE), dsGender$InsLabEmpF)
dsGender$InsLabUnempF = ifelse(is.na(dsGender$InsLabUnempF), ave(dsGender$InsLabUnempF,
FUN=function(x) mean(x), na.rm=TRUE), dsGender$InsLabUnempF)
dsGender$TotOutLabourF = ifelse(is.na(dsGender$TotOutLabourF), ave(dsGender$TotOutLabourF,
FUN=function(x) mean(x), na.rm=TRUE), dsGender$TotOutLabourF)
dsGender$RateLabEmpF = ifelse(is.na(dsGender$RateLabEmpF), ave(dsGender$RateLabEmpF,
FUN=function(x) mean(x), na.rm=TRUE), dsGender$RateLabEmpF)
dsGender$RateLabUnEmpF = ifelse(is.na(dsGender$RateLabUnEmpF), ave(dsGender$RateLabUnEmpF,
FUN=function(x) mean(x), na.rm=TRUE), dsGender$RateLabUnEmpF)
return(paste("Data Cleaning Proses for Dataset (dsGender) has been completed"))
}
#-------------------------------------------------------------------------------------
# dsRural Dataset
#-------------------------------------------------------------------------------------
else if (types=="dsRural")
{
dsRural$Year = ifelse(is.na(dsRural$Year), ave(dsRural$Year,
FUN=function(x) mean(x), na.rm=TRUE), dsRural$Year)
dsRural$AllLabourInsR = ifelse(is.na(dsRural$AllLabourInsR), ave(dsRural$AllLabourInsR,
FUN=function(x) mean(x), na.rm=TRUE), dsRural$AllLabourInsR)
dsRural$InsEmpR = ifelse(is.na(dsRural$InsEmpR), ave(dsRural$InsEmpR,
FUN=function(x) mean(x), na.rm=TRUE), dsRural$InsEmpR)
dsRural$InsUnempR = ifelse(is.na(dsRural$InsUnempR), ave(dsRural$InsUnempR,
FUN=function(x) mean(x), na.rm=TRUE), dsRural$InsUnempR)
dsRural$AllLabourOutR = ifelse(is.na(dsRural$AllLabourOutR), ave(dsRural$AllLabourOutR,
FUN=function(x) mean(x), na.rm=TRUE), dsRural$AllLabourOutR)
dsRural$LabForceRateR = ifelse(is.na(dsRural$LabForceRateR), ave(dsRural$LabForceRateR,
FUN=function(x) mean(x), na.rm=TRUE), dsRural$LabForceRateR)
dsRural$LabForceUempRateR = ifelse(is.na(dsRural$LabForceUempRateR), ave(dsRural$LabForceUempRateR,
FUN=function(x) mean(x), na.rm=TRUE), dsRural$LabForceUempRateR)
return(paste("Data Cleaning Proses for Dataset (dsRural) has been completed"))
}
#-------------------------------------------------------------------------------------
# dsUrban Dataset
#-------------------------------------------------------------------------------------
else if (types=="dsUrban")
{
dsUrban$Year = ifelse(is.na(dsUrban$Year), ave(dsUrban$Year,
FUN=function(x) mean(x), na.rm=TRUE), dsUrban$Year)
dsUrban$AllLabourInsU = ifelse(is.na(dsUrban$AllLabourInsU), ave(dsUrban$AllLabourInsU,
FUN=function(x) mean(x), na.rm=TRUE), dsUrban$AllLabourInsU)
dsUrban$InsEmpU = ifelse(is.na(dsUrban$InsEmpU), ave(dsUrban$InsEmpU,
FUN=function(x) mean(x), na.rm=TRUE), dsUrban$InsEmpU)
dsUrban$InsUnempU = ifelse(is.na(dsUrban$InsUnempU), ave(dsUrban$InsUnempU,
FUN=function(x) mean(x), na.rm=TRUE), dsUrban$InsUnempU)
dsUrban$AllLabourOutU = ifelse(is.na(dsUrban$AllLabourOutU), ave(dsUrban$AllLabourOutU,
FUN=function(x) mean(x), na.rm=TRUE), dsUrban$AllLabourOutU)
dsUrban$LabForceRateU = ifelse(is.na(dsUrban$LabForceRateU), ave(dsUrban$LabForceRateU,
FUN=function(x) mean(x), na.rm=TRUE), dsUrban$LabForceRateU)
dsUrban$LabForceUempRateU = ifelse(is.na(dsUrban$LabForceUempRateU), ave(dsUrban$LabForceUempRateU,
FUN=function(x) mean(x), na.rm=TRUE), dsUrban$LabForceUempRateU)
return(paste("Data Cleaning Proses for Dataset (dsUrban) has been completed"))
}
#-------------------------------------------------------------------------------------
# dsAge Dataset
#-------------------------------------------------------------------------------------
else if (types=="dsAge")
{
dsAge$Year = ifelse(is.na(dsAge$Year), ave(dsAge$Year,
FUN=function(x) mean(x), na.rm=TRUE), dsAge$Year)
dsAge$AllTotalAge = ifelse(is.na(dsAge$AllTotalAge), ave(dsAge$AllTotalAge,
FUN=function(x) mean(x), na.rm=TRUE), dsAge$AllTotalAge)
dsAge$Age15to19 = ifelse(is.na(dsAge$Age15to19), ave(dsAge$Age15to19,
FUN=function(x) mean(x), na.rm=TRUE), dsAge$Age15to19)
dsAge$Age20to24 = ifelse(is.na(dsAge$Age20to24), ave(dsAge$Age20to24,
FUN=function(x) mean(x), na.rm=TRUE), dsAge$Age20to24)
dsAge$Age25to29 = ifelse(is.na(dsAge$Age25to29), ave(dsAge$Age25to29,
FUN=function(x) mean(x), na.rm=TRUE), dsAge$Age25to29)
dsAge$Age30to34 = ifelse(is.na(dsAge$Age30to34), ave(dsAge$Age30to34,
FUN=function(x) mean(x), na.rm=TRUE), dsAge$Age30to34)
dsAge$Age35to39 = ifelse(is.na(dsAge$Age35to39), ave(dsAge$Age35to39,
FUN=function(x) mean(x), na.rm=TRUE), dsAge$Age35to39)
dsAge$Age40to44 = ifelse(is.na(dsAge$Age40to44), ave(dsAge$Age40to44,
FUN=function(x) mean(x), na.rm=TRUE), dsAge$Age40to44)
dsAge$Age45_49 = ifelse(is.na(dsAge$Age45to49), ave(dsAge$Age45to49,
FUN=function(x) mean(x), na.rm=TRUE), dsAge$Age45to49)
dsAge$Age50to54 = ifelse(is.na(dsAge$Age50to54), ave(dsAge$Age50to54,
FUN=function(x) mean(x), na.rm=TRUE), dsAge$Age50to54)
dsAge$Age55to59 = ifelse(is.na(dsAge$Age55to59), ave(dsAge$Age55to59,
FUN=function(x) mean(x), na.rm=TRUE), dsAge$Age55to59)
dsAge$Age60to64 = ifelse(is.na(dsAge$Age60to64), ave(dsAge$Age60to64,
FUN=function(x) mean(x), na.rm=TRUE), dsAge$Age60to64)
return(paste("Data Cleaning Proses for Dataset (dsAge) has been completed"))
}
#-------------------------------------------------------------------------------------
# dsEthnic Dataset
#-------------------------------------------------------------------------------------
else if (types=="dsEthnic")
{
dsEthnic$Year = ifelse(is.na(dsEthnic$Year), ave(dsEthnic$Year,
FUN=function(x) mean(x), na.rm=TRUE), dsEthnic$Year)
dsEthnic$AllTotalWorkForce = ifelse(is.na(dsEthnic$AllTotalWorkForce), ave(dsEthnic$AllTotalWorkForce,
FUN=function(x) mean(x), na.rm=TRUE), dsEthnic$AllTotalWorkForce)
dsEthnic$SubTotWorkForce = ifelse(is.na(dsEthnic$SubTotWorkForce), ave(dsEthnic$SubTotWorkForce,
FUN=function(x) mean(x), na.rm=TRUE), dsEthnic$SubTotWorkForce)
dsEthnic$BumiWorkForce = ifelse(is.na(dsEthnic$BumiWorkForce), ave(dsEthnic$BumiWorkForce,
FUN=function(x) mean(x), na.rm=TRUE), dsEthnic$BumiWorkForce)
dsEthnic$ChineseWorkForce = ifelse(is.na(dsEthnic$ChineseWorkForce), ave(dsEthnic$ChineseWorkForce,
FUN=function(x) mean(x), na.rm=TRUE), dsEthnic$ChineseWorkForce)
dsEthnic$IndianWorkForce = ifelse(is.na(dsEthnic$IndianWorkForce), ave(dsEthnic$IndianWorkForce,
FUN=function(x) mean(x), na.rm=TRUE), dsEthnic$IndianWorkForce)
dsEthnic$OtherWorkForce = ifelse(is.na(dsEthnic$OtherWorkForce), ave(dsEthnic$OtherWorkForce,
FUN=function(x) mean(x), na.rm=TRUE), dsEthnic$OtherWorkForce)
dsEthnic$SubTotForeignWorkForce = ifelse(is.na(dsEthnic$SubTotForeignWorkForce), ave(dsEthnic$SubTotForeignWorkForce,
FUN=function(x) mean(x), na.rm=TRUE), dsEthnic$SubTotForeignWorkForce)
return(paste("Data Cleaning Proses for Dataset (dsEthnic) has been completed"))
}
#-------------------------------------------------------------------------------------
# dsEdu Dataset
#-------------------------------------------------------------------------------------
else if (types=="dsEdu")
{
dsEdu$Year = ifelse(is.na(dsEdu$Year), ave(dsEdu$Year,
FUN=function(x) mean(x), na.rm=TRUE), dsEdu$Year)
dsEdu$AllTotalEdu = ifelse(is.na(dsEdu$AllTotalEdu), ave(dsEdu$AllTotalEdu,
FUN=function(x) mean(x), na.rm=TRUE), dsEdu$AllTotalEdu)
dsEdu$NonEduc = ifelse(is.na(dsEdu$NonEduc), ave(dsEdu$NonEduc,
FUN=function(x) mean(x), na.rm=TRUE), dsEdu$NonEduc)
dsEdu$Primary = ifelse(is.na(dsEdu$Primary), ave(dsEdu$Primary,
FUN=function(x) mean(x), na.rm=TRUE), dsEdu$Primary)
dsEdu$Secondary = ifelse(is.na(dsEdu$Secondary), ave(dsEdu$Secondary,
FUN=function(x) mean(x), na.rm=TRUE), dsEdu$Secondary)
dsEdu$Tertiary = ifelse(is.na(dsEdu$Tertiary), ave(dsEdu$Tertiary,
FUN=function(x) mean(x), na.rm=TRUE), dsEdu$Tertiary)
return(paste("Data Cleaning Proses for Dataset (dsEdu) has been completed"))
}
#-------------------------------------------------------------------------------------
# dsCert Dataset
#-------------------------------------------------------------------------------------
else if (types=="dsCert")
{
dsCert$Year = ifelse(is.na(dsCert$Year), ave(dsCert$Year,
FUN=function(x) mean(x), na.rm=TRUE), dsCert$Year)
dsCert$AllTotalCert = ifelse(is.na(dsCert$AllTotalCert), ave(dsCert$AllTotalCert,
FUN=function(x) mean(x), na.rm=TRUE), dsCert$AllTotalCert)
dsCert$UPSRAEquiv = ifelse(is.na(dsCert$UPSRAEquiv), ave(dsCert$UPSRAEquiv,
FUN=function(x) mean(x), na.rm=TRUE), dsCert$UPSRAEquiv)
dsCert$PMRSRPLCEEquiv = ifelse(is.na(dsCert$PMRSRPLCEEquiv), ave(dsCert$PMRSRPLCEEquiv,
FUN=function(x) mean(x), na.rm=TRUE), dsCert$PMRSRPLCEEquiv)
dsCert$SPMEquiv = ifelse(is.na(dsCert$SPMEquiv), ave(dsCert$SPMEquiv,
FUN=function(x) mean(x), na.rm=TRUE), dsCert$SPMEquiv)
dsCert$STPMEquiv = ifelse(is.na(dsCert$STPMEquiv), ave(dsCert$STPMEquiv,
FUN=function(x) mean(x), na.rm=TRUE), dsCert$STPMEquiv)
dsCert$Certificate = ifelse(is.na(dsCert$Certificate), ave(dsCert$Certificate,
FUN=function(x) mean(x), na.rm=TRUE), dsCert$Certificate)
dsCert$Diploma = ifelse(is.na(dsCert$Diploma), ave(dsCert$Diploma,
FUN=function(x) mean(x), na.rm=TRUE), dsCert$Diploma)
dsCert$Degree = ifelse(is.na(dsCert$Degree), ave(dsCert$Degree,
FUN=function(x) mean(x), na.rm=TRUE), dsCert$Degree)
dsCert$ReligCert = ifelse(is.na(dsCert$ReligCert), ave(dsCert$ReligCert,
FUN=function(x) mean(x), na.rm=TRUE), dsCert$ReligCert)
dsCert$NoCert = ifelse(is.na(dsCert$NoCert), ave(dsCert$NoCert,
FUN=function(x) mean(x), na.rm=TRUE), dsCert$NoCert)
dsCert$NoRelevant = ifelse(is.na(dsCert$NoRelevant), ave(dsCert$NoRelevant,
FUN=function(x) mean(x), na.rm=TRUE), dsCert$NoRelevant)
return(paste("Data Cleaning Proses for Dataset (dsCert) has been completed"))
}
#-------------------------------------------------------------------------------------
# dsMarital Dataset
#-------------------------------------------------------------------------------------
else if (types=="dsMarital")
{
dsMarital$AllTotalMarital = ifelse(is.na(dsMarital$AllTotalMarital), ave(dsMarital$AllTotalMarital,
FUN=function(x) mean(x), na.rm=TRUE), dsMarital$AllTotalMarital)
dsMarital$Year = ifelse(is.na(dsMarital$Year), ave(dsMarital$Year,
FUN=function(x) mean(x), na.rm=TRUE), dsMarital$Year)
dsMarital$NeverMarried = ifelse(is.na(dsMarital$NeverMarried), ave(dsMarital$NeverMarried,
FUN=function(x) mean(x), na.rm=TRUE), dsMarital$NeverMarried)
dsMarital$Married = ifelse(is.na(dsMarital$Married), ave(dsMarital$Married,
FUN=function(x) mean(x), na.rm=TRUE), dsMarital$Married)
dsMarital$Widow = ifelse(is.na(dsMarital$Widow), ave(dsMarital$Widow,
FUN=function(x) mean(x), na.rm=TRUE), dsMarital$Widow)
dsMarital$DivorcePermSeparate= ifelse(is.na(dsMarital$DivorcePermSeparate), ave(dsMarital$DivorcePermSeparate,
FUN=function(x) mean(x), na.rm=TRUE), dsMarital$DivorcePermSeparate)
return(paste("Data Cleaning Proses for Dataset (dsMarital) has been completed"))
}
else
{
return(paste("NO Data Cleaning Proses Executed"))
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.