content
large_stringlengths 0
6.46M
| path
large_stringlengths 3
331
| license_type
large_stringclasses 2
values | repo_name
large_stringlengths 5
125
| language
large_stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.46M
| extension
large_stringclasses 75
values | text
stringlengths 0
6.46M
|
---|---|---|---|---|---|---|---|---|---|
library(dplyr, warn.conflicts = F, quietly = T)
source('dt_sim.R')
#weights related functions
source('funs/weight_define_each.R')
source('funs/fun_a.R')
source('funs/fun_b.R')
source('funs/assign_weights.R')
source('funs/pvf_apply.R')
source('funs/pvf.R')
source('funs/mi_weights.R')
#scenario: four weights- BCVA, CST and AEs
#BCVA is defined as a function of BCVA at BL
#AEs are defined as a function of sex
#CST is defined as a function of CST at BL
#Scenario 2: patients care more about PE and ocular AEs than non-ocular AEs or CST
#################
#define weights #
#################
#assume that PE weights are affected only by BCVA at BL
#patients who have lower BCVA at BL would have higher weights on average that patients who have higher
#BCVA values at BL
v1_w1_mu <- c(90, 60, 30)
v1_w1_sd <- rep(7, 3)
#assume that AEs weights are affected by sex, and that women would have lower weights than men
v1_w2_mu <- c(70, 80)
v1_w2_sd <- rep(7, 2)
v1_w3_mu <- c(30, 40)
v1_w3_sd <- rep(7, 2)
#assume that CST weights are affected by CST at BL, patients with higher CST at BL, will give higher
#weights for the CST outcome
v1_w4_mu <- c(15, 30)
v1_w4_sd <- rep(7, 2)
p_miss <- 0.6
x1 <- parallel::mclapply(X = 1:1000,
mc.cores = 24,
FUN = function(i){
#generate simulated data to be used with weights
set.seed(888*i)
dt_out <- dt_sim()
#weights specification
w1_spec <- weight_define_each(data = dt_out, name_weight = 'bcva_48w', br_spec = 'benefit', 'bcvac_bl', w_mu = v1_w1_mu, w_sd = v1_w1_sd)
w2_spec <- weight_define_each(data = dt_out, name_weight = 'ae_oc', br_spec = 'risk', 'sex', w_mu = v1_w2_mu, w_sd = v1_w2_sd)
w3_spec <- weight_define_each(data = dt_out, name_weight = 'ae_noc', br_spec = 'risk', 'sex', w_mu = v1_w3_mu, w_sd = v1_w3_sd)
w4_spec <- weight_define_each(data = dt_out, name_weight = 'cst_16w', br_spec = 'risk', 'cstc_bl', w_mu = v1_w4_mu, w_sd = v1_w4_sd)
#cobmine weights into one list
l <- list(w1_spec, w2_spec, w3_spec, w4_spec)
#assign weights based on the mean/sd specification provided by the user
#for each patient, the highest weight will be assigned 100
dt_w <- assign_weights(data = dt_out, w_spec = l)
#standardize weights and apply utilization function that calculates mcda scores for each patient
dt_final <- pvf_apply(data = dt_w, w_spec = l)
#treatment arms comparison using all the weight, only XX% of the weights
dt_final[, 'miss'] <- stats::rbinom(n = nrow(dt_final), 1, prob = p_miss)
mcda_test_all <- stats::t.test(dt_final$mcda[dt_final$trt=='c'], dt_final$mcda[dt_final$trt=='t'])
mcda_test_obs <- stats::t.test(dt_final$mcda[dt_final$trt=='c' & dt_final$miss == 0],
dt_final$mcda[dt_final$trt=='t' & dt_final$miss == 0])
mcda_test_mi <- mi_weights(data = dt_final,
vars_bl = c('bcva_bl', 'age_bl', 'sex', 'cst_bl', 'srf', 'irf', 'rpe'),
w_spec = l, num_m = 10, mi_method = 'norm',
trunc_range = FALSE)
###########################
#summarise the br results #
###########################
br_comp <- tibble::tibble(meth = 'all',
mean_diff = mcda_test_all$estimate[1] - mcda_test_all$estimate[2],
se_diff = mean_diff/mcda_test_all$statistic)
br_comp[2, 'meth'] <- 'obs'
br_comp[2, 'mean_diff'] <- mcda_test_obs$estimate[1] - mcda_test_obs$estimate[2]
br_comp[2, 'se_diff'] <- (mcda_test_obs$estimate[1] - mcda_test_obs$estimate[2])/
mcda_test_obs$statistic
br_comp[3, 'meth'] <- 'mi'
br_comp[3, 'mean_diff'] <- mcda_test_mi$qbar
br_comp[3, 'se_diff'] <- sqrt(mcda_test_mi$t)
br_comp[3, 'ubar'] <- mcda_test_mi$ubar
br_comp[3, 'b'] <- mcda_test_mi$b
br_result <- tibble::tibble(res = ifelse(mcda_test_all$conf.int[2] < 0, 'benefit', 'no benefit'),
meth = 'all')
br_result[2, 'res'] <- ifelse(mcda_test_obs$conf.int[2] < 0, 'benefit', 'no benefit')
br_result[2, 'meth'] <- 'obs'
br_result[3, 'res'] <- ifelse(mcda_test_mi$qbar + qt(0.975, df = mcda_test_mi$v)*
sqrt(mcda_test_mi$t) < 0, 'benefit', 'no benefit')
br_result[3, 'meth'] <- 'mi'
br_result[, 'sim_id'] <- i
out <- list(br_comp, br_result)%>%purrr::set_names('br_comp', 'br_result')
return(out)
})
saveRDS(x1, sprintf('mcda_results/mcda_c4_sc2_pmiss%d_%s%s.rds', 100*0.6, 'norm', FALSE))
|
/pgms_sim/mcda_c4_sc2_pmiss60_normFALSE.R
|
no_license
|
yuliasidi/ch3sim
|
R
| false | false | 4,453 |
r
|
library(dplyr, warn.conflicts = F, quietly = T)
source('dt_sim.R')
#weights related functions
source('funs/weight_define_each.R')
source('funs/fun_a.R')
source('funs/fun_b.R')
source('funs/assign_weights.R')
source('funs/pvf_apply.R')
source('funs/pvf.R')
source('funs/mi_weights.R')
#scenario: four weights- BCVA, CST and AEs
#BCVA is defined as a function of BCVA at BL
#AEs are defined as a function of sex
#CST is defined as a function of CST at BL
#Scenario 2: patients care more about PE and ocular AEs than non-ocular AEs or CST
#################
#define weights #
#################
#assume that PE weights are affected only by BCVA at BL
#patients who have lower BCVA at BL would have higher weights on average that patients who have higher
#BCVA values at BL
v1_w1_mu <- c(90, 60, 30)
v1_w1_sd <- rep(7, 3)
#assume that AEs weights are affected by sex, and that women would have lower weights than men
v1_w2_mu <- c(70, 80)
v1_w2_sd <- rep(7, 2)
v1_w3_mu <- c(30, 40)
v1_w3_sd <- rep(7, 2)
#assume that CST weights are affected by CST at BL, patients with higher CST at BL, will give higher
#weights for the CST outcome
v1_w4_mu <- c(15, 30)
v1_w4_sd <- rep(7, 2)
p_miss <- 0.6
x1 <- parallel::mclapply(X = 1:1000,
mc.cores = 24,
FUN = function(i){
#generate simulated data to be used with weights
set.seed(888*i)
dt_out <- dt_sim()
#weights specification
w1_spec <- weight_define_each(data = dt_out, name_weight = 'bcva_48w', br_spec = 'benefit', 'bcvac_bl', w_mu = v1_w1_mu, w_sd = v1_w1_sd)
w2_spec <- weight_define_each(data = dt_out, name_weight = 'ae_oc', br_spec = 'risk', 'sex', w_mu = v1_w2_mu, w_sd = v1_w2_sd)
w3_spec <- weight_define_each(data = dt_out, name_weight = 'ae_noc', br_spec = 'risk', 'sex', w_mu = v1_w3_mu, w_sd = v1_w3_sd)
w4_spec <- weight_define_each(data = dt_out, name_weight = 'cst_16w', br_spec = 'risk', 'cstc_bl', w_mu = v1_w4_mu, w_sd = v1_w4_sd)
#cobmine weights into one list
l <- list(w1_spec, w2_spec, w3_spec, w4_spec)
#assign weights based on the mean/sd specification provided by the user
#for each patient, the highest weight will be assigned 100
dt_w <- assign_weights(data = dt_out, w_spec = l)
#standardize weights and apply utilization function that calculates mcda scores for each patient
dt_final <- pvf_apply(data = dt_w, w_spec = l)
#treatment arms comparison using all the weight, only XX% of the weights
dt_final[, 'miss'] <- stats::rbinom(n = nrow(dt_final), 1, prob = p_miss)
mcda_test_all <- stats::t.test(dt_final$mcda[dt_final$trt=='c'], dt_final$mcda[dt_final$trt=='t'])
mcda_test_obs <- stats::t.test(dt_final$mcda[dt_final$trt=='c' & dt_final$miss == 0],
dt_final$mcda[dt_final$trt=='t' & dt_final$miss == 0])
mcda_test_mi <- mi_weights(data = dt_final,
vars_bl = c('bcva_bl', 'age_bl', 'sex', 'cst_bl', 'srf', 'irf', 'rpe'),
w_spec = l, num_m = 10, mi_method = 'norm',
trunc_range = FALSE)
###########################
#summarise the br results #
###########################
br_comp <- tibble::tibble(meth = 'all',
mean_diff = mcda_test_all$estimate[1] - mcda_test_all$estimate[2],
se_diff = mean_diff/mcda_test_all$statistic)
br_comp[2, 'meth'] <- 'obs'
br_comp[2, 'mean_diff'] <- mcda_test_obs$estimate[1] - mcda_test_obs$estimate[2]
br_comp[2, 'se_diff'] <- (mcda_test_obs$estimate[1] - mcda_test_obs$estimate[2])/
mcda_test_obs$statistic
br_comp[3, 'meth'] <- 'mi'
br_comp[3, 'mean_diff'] <- mcda_test_mi$qbar
br_comp[3, 'se_diff'] <- sqrt(mcda_test_mi$t)
br_comp[3, 'ubar'] <- mcda_test_mi$ubar
br_comp[3, 'b'] <- mcda_test_mi$b
br_result <- tibble::tibble(res = ifelse(mcda_test_all$conf.int[2] < 0, 'benefit', 'no benefit'),
meth = 'all')
br_result[2, 'res'] <- ifelse(mcda_test_obs$conf.int[2] < 0, 'benefit', 'no benefit')
br_result[2, 'meth'] <- 'obs'
br_result[3, 'res'] <- ifelse(mcda_test_mi$qbar + qt(0.975, df = mcda_test_mi$v)*
sqrt(mcda_test_mi$t) < 0, 'benefit', 'no benefit')
br_result[3, 'meth'] <- 'mi'
br_result[, 'sim_id'] <- i
out <- list(br_comp, br_result)%>%purrr::set_names('br_comp', 'br_result')
return(out)
})
saveRDS(x1, sprintf('mcda_results/mcda_c4_sc2_pmiss%d_%s%s.rds', 100*0.6, 'norm', FALSE))
|
context("Test output from glmsmurf function")
test_that("Test output class", {
# Check if class of output object is "glmsmurf"
expect_equal(class(munich.fit)[1],
"glmsmurf")
# Check if class of output object inherits from list, glm and lm classes
expect_is(munich.fit,
"list")
expect_is(munich.fit,
"glm")
expect_is(munich.fit,
"lm")
})
test_that("Test coefficients in output", {
# Check if numeric
expect_true(is.numeric(munich.fit$coefficients))
# Check length
expect_length(munich.fit$coefficients,
63L)
})
test_that("Test residuals in output", {
# Check if numeric
expect_true(is.numeric(munich.fit$residuals))
# Check length
expect_length(munich.fit$residuals,
nrow(rent))
})
test_that("Test fitted values in output", {
# Check if numeric
expect_true(is.numeric(munich.fit$fitted.values))
# Check length
expect_length(munich.fit$fitted.values,
nrow(rent))
})
test_that("Test rank in output", {
# Check if numeric
expect_true(is.numeric(munich.fit$rank))
# Check length
expect_length(munich.fit$rank,
1L)
# Check if strictly positive
expect_true(munich.fit$rank > 0)
# Check if integer
expect_true(.is.wholenumber(munich.fit$rank))
})
test_that("Test family in output", {
# Check class
expect_true(class(munich.fit$family) == "family")
})
test_that("Test linear predictors in output", {
# Check if numeric
expect_true(is.numeric(munich.fit$linear.predictors))
# Check length
expect_length(munich.fit$linear.predictors,
nrow(rent))
# Check if can be transformed to fitted values using link function
expect_equal(munich.fit$family$linkfun(munich.fit$linear.predictors),
munich.fit$fitted.values)
})
test_that("Test deviance in output", {
# Check if numeric
expect_true(is.numeric(munich.fit$deviance))
# Check length
expect_length(munich.fit$deviance,
1L)
})
test_that("Test AIC in output", {
# Check if numeric
expect_true(is.numeric(munich.fit$aic))
# Check length
expect_length(munich.fit$aic,
1L)
})
test_that("Test BIC in output", {
# Check if numeric
expect_true(is.numeric(munich.fit$bic))
# Check length
expect_length(munich.fit$bic,
1L)
# Check if BIC can be obtained from AIC
expect_equal(munich.fit$aic + (log(sum(munich.fit$weights != 0)) - 2) * munich.fit$rank,
munich.fit$bic)
})
test_that("Test GCV in output", {
# Check if numeric
expect_true(is.numeric(munich.fit$gcv))
# Check length
expect_length(munich.fit$gcv,
1L)
# Check if GCV can be obtained from deviance
n2 <- sum(munich.fit$weights != 0)
expect_equal(munich.fit$deviance / (n2 * (1 - munich.fit$rank / n2) ^ 2),
munich.fit$gcv)
})
test_that("Test null deviance in output", {
# Check if numeric
expect_true(is.numeric(munich.fit$null.deviance))
# Check length
expect_length(munich.fit$null.deviance,
1L)
})
test_that("Test residual DoF in output", {
# Check if numeric
expect_true(is.numeric(munich.fit$df.residual))
# Check length
expect_length(munich.fit$df.residual,
1L)
# Check if can be obtained using weights and rank
expect_equal(sum(munich.fit$weights != 0) - munich.fit$rank,
munich.fit$df.residual)
})
test_that("Test null DoF in output", {
# Check if numeric
expect_true(is.numeric(munich.fit$df.null))
# Check length
expect_length(munich.fit$df.null,
1L)
# Check if can be obtained using weights and rank of null model (1)
expect_equal(sum(munich.fit$weights != 0) - 1,
munich.fit$df.null)
})
test_that("Test objective function in output", {
# Check if numeric
expect_true(is.numeric(munich.fit$obj.fun))
# Check length
expect_length(munich.fit$obj.fun,
1L)
})
test_that("Test weights in output", {
# Check if numeric
expect_true(is.numeric(munich.fit$weights))
# Check length
expect_length(munich.fit$weights,
nrow(rent))
# Check if positive
expect_true(all(munich.fit$weights >= 0))
})
test_that("Test offset in output", {
# Check if numeric
expect_true(is.numeric(munich.fit$offset))
# Check length
expect_length(munich.fit$offset,
nrow(rent))
})
test_that("Test lambda in output", {
# Check if numeric
expect_true(is.numeric(munich.fit$lambda))
# Check length
expect_length(munich.fit$lambda,
1L)
# Check if positive
expect_true(munich.fit$lambda >= 0)
})
test_that("Test lambda1 in output", {
# Check if numeric
expect_true(is.numeric(munich.fit$lambda1))
# Check length
expect_length(munich.fit$lambda1,
1L)
# Check if positive
expect_true(munich.fit$lambda1 >= 0)
})
test_that("Test lambda2 in output", {
# Check if numeric
expect_true(is.numeric(munich.fit$lambda2))
# Check length
expect_length(munich.fit$lambda2,
1L)
# Check if positive
expect_true(munich.fit$lambda2 >= 0)
})
test_that("Test iter in output", {
# Check if numeric
expect_true(is.numeric(munich.fit$iter))
# Check length
expect_length(munich.fit$iter,
1L)
# Check if strictly positive
expect_true(munich.fit$iter > 0)
# Check if integer
expect_true(.is.wholenumber(munich.fit$iter))
})
test_that("Test converged in output", {
# Check if numeric
expect_true(is.numeric(munich.fit$converged))
# Check length
expect_length(munich.fit$converged,
1L)
# Check if 0, 1, 2 or 3
expect_true(munich.fit$converged %in% 0:3)
})
test_that("Test final stepsize in output", {
# Check if numeric
expect_true(is.numeric(munich.fit$final.stepsize))
# Check length
expect_length(munich.fit$final.stepsize,
1L)
# Check if larger than minimum stepsize
expect_true(munich.fit$final.stepsize >= 1e-14)
})
test_that("Test n.par.cov in output", {
# Check if list
expect_true(is.list(munich.fit$n.par.cov))
# Check length
expect_length(munich.fit$n.par.cov,
11L)
# Check if all numeric
expect_true(all(sapply(munich.fit$n.par.cov, is.numeric)))
# Check lengths
expect_true(all(sapply(munich.fit$n.par.cov, length) == 1L))
# Check if all strictly positive
expect_true(all(unlist(munich.fit$n.par.cov, length) > 0))
# Check if all integers
expect_true(all(sapply(munich.fit$n.par.cov, .is.wholenumber)))
})
test_that("Test pen.cov in output", {
# Check if list
expect_true(is.list(munich.fit$pen.cov))
# Check length
expect_length(munich.fit$pen.cov,
11L)
# Check if all character
expect_true(all(sapply(munich.fit$pen.cov, is.character)))
# Check lengths
expect_true(all(sapply(munich.fit$pen.cov, length) == 1L))
# Check if all correct penalty types
expect_true(all(sapply(munich.fit$pen.cov, function(x) x %in% c("none", "lasso", "grouplasso",
"flasso", "gflasso", "2dflasso", "ggflasso"))))
})
test_that("Test group.cov in output", {
# Check if list
expect_true(is.list(munich.fit$group.cov))
# Check length
expect_length(munich.fit$group.cov,
11L)
# Check lengths
expect_true(all(sapply(munich.fit$group.cov, length) == 1L))
# Check if all numeric
expect_true(all(sapply(munich.fit$group.cov, is.numeric)))
# Check if all positive
expect_true(all(unlist(munich.fit$group.cov) >= 0))
# Check if all integers
expect_true(all(sapply(munich.fit$group.cov, .is.wholenumber)))
})
test_that("Test refcat.cov in output", {
# Check if list
expect_true(is.list(munich.fit$refcat.cov))
# Check length
expect_length(munich.fit$refcat.cov,
11L)
# Check lengths
expect_true(all(sapply(munich.fit$refcat.cov, length) == 1L))
# Check if all numeric
expect_true(all(sapply(munich.fit$refcat.cov, is.numeric)))
# Check if all positive
expect_true(all(unlist(munich.fit$refcat.cov) >= 0))
# Check if all integers
expect_true(all(sapply(munich.fit$refcat.cov, .is.wholenumber)))
})
test_that("Test control in output", {
# Check if list
expect_true(is.list(munich.fit$control))
# Check length
expect_length(munich.fit$control,
16L)
# Check if no error
expect_error(do.call("glmsmurf.control", munich.fit$control), NA)
})
test_that("Test lambda.method in output", {
# Check if character
expect_true(is.character(munich.fit.is$lambda.method))
expect_true(is.character(munich.fit.oos$lambda.method))
expect_true(is.character(munich.fit.cv$lambda.method))
expect_true(is.character(munich.fit.cv1se$lambda.method))
# Check name
expect_equal(munich.fit.is$lambda.method, "is.aic")
expect_equal(munich.fit.oos$lambda.method, "oos.dev")
expect_equal(munich.fit.cv$lambda.method, "cv.mse")
expect_equal(munich.fit.cv1se$lambda.method, "cv1se.mse")
})
test_that("Test lambda.vector in output", {
# Check if numeric
expect_true(is.numeric(munich.fit.is$lambda.vector))
expect_true(is.numeric(munich.fit.oos$lambda.vector))
expect_true(is.numeric(munich.fit.cv$lambda.vector))
expect_true(is.numeric(munich.fit.cv1se$lambda.vector))
# Check length
expect_length(munich.fit.is$lambda.vector,
3L)
expect_length(munich.fit.oos$lambda.vector,
3L)
expect_length(munich.fit.cv$lambda.vector,
3L)
expect_length(munich.fit.cv1se$lambda.vector,
3L)
})
test_that("Test lambda.measures in output", {
# Check if list
expect_true(is.list(munich.fit.is$lambda.measures))
expect_true(is.list(munich.fit.oos$lambda.measures))
expect_true(is.list(munich.fit.cv$lambda.measures))
expect_true(is.list(munich.fit.cv1se$lambda.measures))
# Check length
expect_length(munich.fit.is$lambda.measures,
3L)
expect_length(munich.fit.oos$lambda.measures,
3L)
expect_length(munich.fit.cv$lambda.measures,
3L)
expect_length(munich.fit.cv1se$lambda.measures,
3L)
# Check names
expect_equal(names(munich.fit.is$lambda.measures),
c("aic", "bic", "gcv"))
expect_equal(names(munich.fit.oos$lambda.measures),
c("dev", "mse", "dss"))
expect_equal(names(munich.fit.cv$lambda.measures),
c("dev", "mse", "dss"))
expect_equal(names(munich.fit.cv1se$lambda.measures),
c("dev", "mse", "dss"))
# Check dimensions
expect_equal(as.numeric(sapply(munich.fit.is$lambda.measures, dim)),
rep(c(3, 1), 3L))
expect_equal(as.numeric(sapply(munich.fit.oos$lambda.measures, dim)),
rep(c(3, 1), 3L))
expect_equal(as.numeric(sapply(munich.fit.cv$lambda.measures, dim)),
rep(c(3, 5), 3L))
expect_equal(as.numeric(sapply(munich.fit.cv1se$lambda.measures, dim)),
rep(c(3, 5), 3L))
# Check column names
expect_equal(as.character(sapply(munich.fit.is$lambda.measures, colnames)),
rep("In-sample", 3L))
expect_equal(as.character(sapply(munich.fit.oos$lambda.measures, colnames)),
rep("Out-of-sample", 3L))
expect_equal(as.character(sapply(munich.fit.cv$lambda.measures, colnames)),
rep(paste("Fold", 1:5), 3L))
expect_equal(as.character(sapply(munich.fit.cv1se$lambda.measures, colnames)),
rep(paste("Fold", 1:5), 3L))
# Check row names
expect_equal(as.numeric(sapply(munich.fit.is$lambda.measures, rownames)),
rep(round(munich.fit.is$lambda.vector, 4), 3L))
expect_equal(as.numeric(sapply(munich.fit.oos$lambda.measures, rownames)),
rep(round(munich.fit.oos$lambda.vector, 4), 3L))
expect_equal(as.numeric(sapply(munich.fit.cv$lambda.measures, rownames)),
rep(round(munich.fit.cv$lambda.vector, 4), 3L))
expect_equal(as.numeric(sapply(munich.fit.cv1se$lambda.measures, rownames)),
rep(round(munich.fit.cv1se$lambda.vector, 4), 3L))
})
test_that("Test lambda.coefficients in output", {
# Check if matrix
expect_true(is.matrix(munich.fit.is$lambda.coefficients))
expect_true(is.matrix(munich.fit.oos$lambda.coefficients))
# Check if NULL
expect_true(is.null(munich.fit.cv$lambda.coefficients))
expect_true(is.null(munich.fit.cv1se$lambda.coefficients))
# Check dimensions
expect_equal(dim(munich.fit.is$lambda.coefficients),
c(length(munich.fit.is$lambda.vector), length(coef(munich.fit.is))))
expect_equal(dim(munich.fit.oos$lambda.coefficients),
c(length(munich.fit.oos$lambda.vector), length(coef(munich.fit.is))))
# Check column names
expect_equal(colnames(munich.fit.is$lambda.coefficients),
names(coef(munich.fit.is)))
expect_equal(colnames(munich.fit.oos$lambda.coefficients),
names(coef(munich.fit.oos)))
# Check row names
expect_equal(as.numeric(sapply(munich.fit.is$lambda.measures, rownames)),
rep(round(munich.fit.is$lambda.vector, 4), 3L))
expect_equal(as.numeric(sapply(munich.fit.oos$lambda.measures, rownames)),
rep(round(munich.fit.oos$lambda.vector, 4), 3L))
})
test_that("Test X in output", {
# Check if matrix
expect_true((class(munich.fit$X)[1] %in% c("Matrix", "dgeMatrix", "dgCMatrix")) |
(is.matrix(munich.fit$X) & is.numeric(munich.fit$X)))
# Check dimension
expect_equal(dim(munich.fit$X),
c(nrow(rent), 63L))
# Check if null (not present)
expect_null(munich.fit2$X)
})
test_that("Test re-estimated coefficients in output", {
# Check if numeric
expect_true(is.numeric(munich.fit$coefficients.reest))
# Check length
expect_length(munich.fit$coefficients.reest,
length(munich.fit$coefficients))
# Check if NULL (not present)
expect_null(munich.fit2$coefficients.reest)
})
test_that("Test re-estimated residuals in output", {
# Check if numeric
expect_true(is.numeric(munich.fit$residuals.reest))
# Check length
expect_length(munich.fit$residuals.reest,
length(munich.fit$residuals))
# Check if NULL (not present)
expect_null(munich.fit2$residuals.reest)
})
test_that("Test re-estimated fitted values in output", {
# Check if numeric
expect_true(is.numeric(munich.fit$fitted.values.reest))
# Check length
expect_length(munich.fit$fitted.values.reest,
length(munich.fit$fitted.values))
# Check if NULL (not present)
expect_null(munich.fit2$fitted.values.reest)
})
test_that("Test re-estimated rank in output", {
# Check if numeric
expect_true(is.numeric(munich.fit$rank.reest))
# Check length
expect_length(munich.fit$rank.reest,
1L)
# Check if strictly positive
expect_true(munich.fit$rank.reest > 0)
# Check if integer
expect_true(.is.wholenumber(munich.fit$rank.reest))
# Check if NULL (not present)
expect_null(munich.fit2$rank.reest)
})
test_that("Test re-estimated linear predictors in output", {
# Check if numeric
expect_true(is.numeric(munich.fit$linear.predictors.reest))
# Check length
expect_length(munich.fit$linear.predictors.reest,
length(munich.fit$linear.predictors))
# Check if can be transformed to fitted values using link function
expect_equal(munich.fit$family$linkfun(munich.fit$linear.predictors.reest),
munich.fit$fitted.values.reest)
# Check if NULL (not present)
expect_null(munich.fit2$linear.predictors.reest)
})
test_that("Test re-estimated deviance in output", {
# Check if numeric
expect_true(is.numeric(munich.fit$deviance.reest))
# Check length
expect_length(munich.fit$deviance.reest,
1L)
# Check if NULL (not present)
expect_null(munich.fit2$deviance.reest)
})
test_that("Test re-estimated AIC in output", {
# Check if numeric
expect_true(is.numeric(munich.fit$aic.reest))
# Check length
expect_length(munich.fit$aic.reest,
1L)
# Check if NULL (not present)
expect_null(munich.fit2$aic.reest)
})
test_that("Test re-estimated BIC in output", {
# Check if numeric
expect_true(is.numeric(munich.fit$bic.reest))
# Check length
expect_length(munich.fit$bic.reest,
1L)
# Check if BIC can be obtained from AIC
expect_equal(munich.fit$aic.reest + (log(sum(munich.fit$weights != 0)) - 2) * munich.fit$rank.reest,
munich.fit$bic.reest)
# Check if NULL (not present)
expect_null(munich.fit2$bic.reest)
})
test_that("Test re-estimated GCV in output", {
# Check if numeric
expect_true(is.numeric(munich.fit$gcv.reest))
# Check length
expect_length(munich.fit$gcv,
1L)
# Check if GCV can be obtained from deviance
n2 <- sum(munich.fit$weights != 0)
expect_equal(munich.fit$deviance.reest / (n2 * (1 - munich.fit$rank.reest / n2) ^ 2),
munich.fit$gcv.reest)
# Check if NULL (not present)
expect_null(munich.fit2$gcv.reest)
})
test_that("Test re-estimated residual DoF in output", {
# Check if numeric
expect_true(is.numeric(munich.fit$df.residual.reest))
# Check length
expect_length(munich.fit$df.residual.reest,
1L)
# Check if can be obtained using weights and rank
expect_equal(sum(munich.fit$weights != 0) - munich.fit$rank.reest,
munich.fit$df.residual.reest)
# Check if NULL (not present)
expect_null(munich.fit2$df.residual.reest)
})
test_that("Test re-estimated objective function in output", {
# Check if numeric
expect_true(is.numeric(munich.fit$obj.fun.reest))
# Check length
expect_length(munich.fit$obj.fun.reest,
1L)
# Check if NULL (not present)
expect_null(munich.fit2$obj.fun.reest)
})
test_that("Test X.reest in output", {
# Check if matrix
expect_true((class(munich.fit$X.reest)[1] %in% c("Matrix", "dgeMatrix", "dgCMatrix")) |
(is.matrix(munich.fit$X.reest) & is.numeric(munich.fit$X.reest)))
# Check dimension
expect_equal(dim(munich.fit$X.reest),
c(nrow(rent), munich.fit$rank.reest))
# Check if null (not present)
expect_null(munich.fit2$X.reest)
})
test_that("Test call in output", {
# Check class
expect_true(is.call(munich.fit$call))
})
test_that("Test formula in output", {
# Check class
expect_true(class(munich.fit$formula) == "formula")
})
test_that("Test terms in output", {
# Check class
expect_true(class(munich.fit$terms)[1] == "terms")
})
test_that("Test contrasts in output", {
# Check class
expect_true(is.list(munich.fit$contrasts))
# Check length
expect_length(munich.fit$contrasts,
5L)
})
test_that("Test xlevels in output", {
# Check class
expect_true(is.list(munich.fit$xlevels))
# Check length
expect_length(munich.fit$xlevels,
10L)
})
|
/fuzzedpackages/smurf/tests/testthat/test_output.R
|
no_license
|
akhikolla/testpackages
|
R
| false | false | 20,144 |
r
|
context("Test output from glmsmurf function")
test_that("Test output class", {
# Check if class of output object is "glmsmurf"
expect_equal(class(munich.fit)[1],
"glmsmurf")
# Check if class of output object inherits from list, glm and lm classes
expect_is(munich.fit,
"list")
expect_is(munich.fit,
"glm")
expect_is(munich.fit,
"lm")
})
test_that("Test coefficients in output", {
# Check if numeric
expect_true(is.numeric(munich.fit$coefficients))
# Check length
expect_length(munich.fit$coefficients,
63L)
})
test_that("Test residuals in output", {
# Check if numeric
expect_true(is.numeric(munich.fit$residuals))
# Check length
expect_length(munich.fit$residuals,
nrow(rent))
})
test_that("Test fitted values in output", {
# Check if numeric
expect_true(is.numeric(munich.fit$fitted.values))
# Check length
expect_length(munich.fit$fitted.values,
nrow(rent))
})
test_that("Test rank in output", {
# Check if numeric
expect_true(is.numeric(munich.fit$rank))
# Check length
expect_length(munich.fit$rank,
1L)
# Check if strictly positive
expect_true(munich.fit$rank > 0)
# Check if integer
expect_true(.is.wholenumber(munich.fit$rank))
})
test_that("Test family in output", {
# Check class
expect_true(class(munich.fit$family) == "family")
})
test_that("Test linear predictors in output", {
# Check if numeric
expect_true(is.numeric(munich.fit$linear.predictors))
# Check length
expect_length(munich.fit$linear.predictors,
nrow(rent))
# Check if can be transformed to fitted values using link function
expect_equal(munich.fit$family$linkfun(munich.fit$linear.predictors),
munich.fit$fitted.values)
})
test_that("Test deviance in output", {
# Check if numeric
expect_true(is.numeric(munich.fit$deviance))
# Check length
expect_length(munich.fit$deviance,
1L)
})
test_that("Test AIC in output", {
# Check if numeric
expect_true(is.numeric(munich.fit$aic))
# Check length
expect_length(munich.fit$aic,
1L)
})
test_that("Test BIC in output", {
# Check if numeric
expect_true(is.numeric(munich.fit$bic))
# Check length
expect_length(munich.fit$bic,
1L)
# Check if BIC can be obtained from AIC
expect_equal(munich.fit$aic + (log(sum(munich.fit$weights != 0)) - 2) * munich.fit$rank,
munich.fit$bic)
})
test_that("Test GCV in output", {
# Check if numeric
expect_true(is.numeric(munich.fit$gcv))
# Check length
expect_length(munich.fit$gcv,
1L)
# Check if GCV can be obtained from deviance
n2 <- sum(munich.fit$weights != 0)
expect_equal(munich.fit$deviance / (n2 * (1 - munich.fit$rank / n2) ^ 2),
munich.fit$gcv)
})
test_that("Test null deviance in output", {
# Check if numeric
expect_true(is.numeric(munich.fit$null.deviance))
# Check length
expect_length(munich.fit$null.deviance,
1L)
})
test_that("Test residual DoF in output", {
# Check if numeric
expect_true(is.numeric(munich.fit$df.residual))
# Check length
expect_length(munich.fit$df.residual,
1L)
# Check if can be obtained using weights and rank
expect_equal(sum(munich.fit$weights != 0) - munich.fit$rank,
munich.fit$df.residual)
})
test_that("Test null DoF in output", {
# Check if numeric
expect_true(is.numeric(munich.fit$df.null))
# Check length
expect_length(munich.fit$df.null,
1L)
# Check if can be obtained using weights and rank of null model (1)
expect_equal(sum(munich.fit$weights != 0) - 1,
munich.fit$df.null)
})
test_that("Test objective function in output", {
# Check if numeric
expect_true(is.numeric(munich.fit$obj.fun))
# Check length
expect_length(munich.fit$obj.fun,
1L)
})
test_that("Test weights in output", {
# Check if numeric
expect_true(is.numeric(munich.fit$weights))
# Check length
expect_length(munich.fit$weights,
nrow(rent))
# Check if positive
expect_true(all(munich.fit$weights >= 0))
})
test_that("Test offset in output", {
# Check if numeric
expect_true(is.numeric(munich.fit$offset))
# Check length
expect_length(munich.fit$offset,
nrow(rent))
})
test_that("Test lambda in output", {
# Check if numeric
expect_true(is.numeric(munich.fit$lambda))
# Check length
expect_length(munich.fit$lambda,
1L)
# Check if positive
expect_true(munich.fit$lambda >= 0)
})
test_that("Test lambda1 in output", {
# Check if numeric
expect_true(is.numeric(munich.fit$lambda1))
# Check length
expect_length(munich.fit$lambda1,
1L)
# Check if positive
expect_true(munich.fit$lambda1 >= 0)
})
test_that("Test lambda2 in output", {
# Check if numeric
expect_true(is.numeric(munich.fit$lambda2))
# Check length
expect_length(munich.fit$lambda2,
1L)
# Check if positive
expect_true(munich.fit$lambda2 >= 0)
})
test_that("Test iter in output", {
# Check if numeric
expect_true(is.numeric(munich.fit$iter))
# Check length
expect_length(munich.fit$iter,
1L)
# Check if strictly positive
expect_true(munich.fit$iter > 0)
# Check if integer
expect_true(.is.wholenumber(munich.fit$iter))
})
test_that("Test converged in output", {
# Check if numeric
expect_true(is.numeric(munich.fit$converged))
# Check length
expect_length(munich.fit$converged,
1L)
# Check if 0, 1, 2 or 3
expect_true(munich.fit$converged %in% 0:3)
})
test_that("Test final stepsize in output", {
# Check if numeric
expect_true(is.numeric(munich.fit$final.stepsize))
# Check length
expect_length(munich.fit$final.stepsize,
1L)
# Check if larger than minimum stepsize
expect_true(munich.fit$final.stepsize >= 1e-14)
})
test_that("Test n.par.cov in output", {
# Check if list
expect_true(is.list(munich.fit$n.par.cov))
# Check length
expect_length(munich.fit$n.par.cov,
11L)
# Check if all numeric
expect_true(all(sapply(munich.fit$n.par.cov, is.numeric)))
# Check lengths
expect_true(all(sapply(munich.fit$n.par.cov, length) == 1L))
# Check if all strictly positive
expect_true(all(unlist(munich.fit$n.par.cov, length) > 0))
# Check if all integers
expect_true(all(sapply(munich.fit$n.par.cov, .is.wholenumber)))
})
test_that("Test pen.cov in output", {
# Check if list
expect_true(is.list(munich.fit$pen.cov))
# Check length
expect_length(munich.fit$pen.cov,
11L)
# Check if all character
expect_true(all(sapply(munich.fit$pen.cov, is.character)))
# Check lengths
expect_true(all(sapply(munich.fit$pen.cov, length) == 1L))
# Check if all correct penalty types
expect_true(all(sapply(munich.fit$pen.cov, function(x) x %in% c("none", "lasso", "grouplasso",
"flasso", "gflasso", "2dflasso", "ggflasso"))))
})
test_that("Test group.cov in output", {
# Check if list
expect_true(is.list(munich.fit$group.cov))
# Check length
expect_length(munich.fit$group.cov,
11L)
# Check lengths
expect_true(all(sapply(munich.fit$group.cov, length) == 1L))
# Check if all numeric
expect_true(all(sapply(munich.fit$group.cov, is.numeric)))
# Check if all positive
expect_true(all(unlist(munich.fit$group.cov) >= 0))
# Check if all integers
expect_true(all(sapply(munich.fit$group.cov, .is.wholenumber)))
})
test_that("Test refcat.cov in output", {
# Check if list
expect_true(is.list(munich.fit$refcat.cov))
# Check length
expect_length(munich.fit$refcat.cov,
11L)
# Check lengths
expect_true(all(sapply(munich.fit$refcat.cov, length) == 1L))
# Check if all numeric
expect_true(all(sapply(munich.fit$refcat.cov, is.numeric)))
# Check if all positive
expect_true(all(unlist(munich.fit$refcat.cov) >= 0))
# Check if all integers
expect_true(all(sapply(munich.fit$refcat.cov, .is.wholenumber)))
})
test_that("Test control in output", {
# Check if list
expect_true(is.list(munich.fit$control))
# Check length
expect_length(munich.fit$control,
16L)
# Check if no error
expect_error(do.call("glmsmurf.control", munich.fit$control), NA)
})
test_that("Test lambda.method in output", {
# Check if character
expect_true(is.character(munich.fit.is$lambda.method))
expect_true(is.character(munich.fit.oos$lambda.method))
expect_true(is.character(munich.fit.cv$lambda.method))
expect_true(is.character(munich.fit.cv1se$lambda.method))
# Check name
expect_equal(munich.fit.is$lambda.method, "is.aic")
expect_equal(munich.fit.oos$lambda.method, "oos.dev")
expect_equal(munich.fit.cv$lambda.method, "cv.mse")
expect_equal(munich.fit.cv1se$lambda.method, "cv1se.mse")
})
test_that("Test lambda.vector in output", {
# Check if numeric
expect_true(is.numeric(munich.fit.is$lambda.vector))
expect_true(is.numeric(munich.fit.oos$lambda.vector))
expect_true(is.numeric(munich.fit.cv$lambda.vector))
expect_true(is.numeric(munich.fit.cv1se$lambda.vector))
# Check length
expect_length(munich.fit.is$lambda.vector,
3L)
expect_length(munich.fit.oos$lambda.vector,
3L)
expect_length(munich.fit.cv$lambda.vector,
3L)
expect_length(munich.fit.cv1se$lambda.vector,
3L)
})
test_that("Test lambda.measures in output", {
# Check if list
expect_true(is.list(munich.fit.is$lambda.measures))
expect_true(is.list(munich.fit.oos$lambda.measures))
expect_true(is.list(munich.fit.cv$lambda.measures))
expect_true(is.list(munich.fit.cv1se$lambda.measures))
# Check length
expect_length(munich.fit.is$lambda.measures,
3L)
expect_length(munich.fit.oos$lambda.measures,
3L)
expect_length(munich.fit.cv$lambda.measures,
3L)
expect_length(munich.fit.cv1se$lambda.measures,
3L)
# Check names
expect_equal(names(munich.fit.is$lambda.measures),
c("aic", "bic", "gcv"))
expect_equal(names(munich.fit.oos$lambda.measures),
c("dev", "mse", "dss"))
expect_equal(names(munich.fit.cv$lambda.measures),
c("dev", "mse", "dss"))
expect_equal(names(munich.fit.cv1se$lambda.measures),
c("dev", "mse", "dss"))
# Check dimensions
expect_equal(as.numeric(sapply(munich.fit.is$lambda.measures, dim)),
rep(c(3, 1), 3L))
expect_equal(as.numeric(sapply(munich.fit.oos$lambda.measures, dim)),
rep(c(3, 1), 3L))
expect_equal(as.numeric(sapply(munich.fit.cv$lambda.measures, dim)),
rep(c(3, 5), 3L))
expect_equal(as.numeric(sapply(munich.fit.cv1se$lambda.measures, dim)),
rep(c(3, 5), 3L))
# Check column names
expect_equal(as.character(sapply(munich.fit.is$lambda.measures, colnames)),
rep("In-sample", 3L))
expect_equal(as.character(sapply(munich.fit.oos$lambda.measures, colnames)),
rep("Out-of-sample", 3L))
expect_equal(as.character(sapply(munich.fit.cv$lambda.measures, colnames)),
rep(paste("Fold", 1:5), 3L))
expect_equal(as.character(sapply(munich.fit.cv1se$lambda.measures, colnames)),
rep(paste("Fold", 1:5), 3L))
# Check row names
expect_equal(as.numeric(sapply(munich.fit.is$lambda.measures, rownames)),
rep(round(munich.fit.is$lambda.vector, 4), 3L))
expect_equal(as.numeric(sapply(munich.fit.oos$lambda.measures, rownames)),
rep(round(munich.fit.oos$lambda.vector, 4), 3L))
expect_equal(as.numeric(sapply(munich.fit.cv$lambda.measures, rownames)),
rep(round(munich.fit.cv$lambda.vector, 4), 3L))
expect_equal(as.numeric(sapply(munich.fit.cv1se$lambda.measures, rownames)),
rep(round(munich.fit.cv1se$lambda.vector, 4), 3L))
})
test_that("Test lambda.coefficients in output", {
# Check if matrix
expect_true(is.matrix(munich.fit.is$lambda.coefficients))
expect_true(is.matrix(munich.fit.oos$lambda.coefficients))
# Check if NULL
expect_true(is.null(munich.fit.cv$lambda.coefficients))
expect_true(is.null(munich.fit.cv1se$lambda.coefficients))
# Check dimensions
expect_equal(dim(munich.fit.is$lambda.coefficients),
c(length(munich.fit.is$lambda.vector), length(coef(munich.fit.is))))
expect_equal(dim(munich.fit.oos$lambda.coefficients),
c(length(munich.fit.oos$lambda.vector), length(coef(munich.fit.is))))
# Check column names
expect_equal(colnames(munich.fit.is$lambda.coefficients),
names(coef(munich.fit.is)))
expect_equal(colnames(munich.fit.oos$lambda.coefficients),
names(coef(munich.fit.oos)))
# Check row names
expect_equal(as.numeric(sapply(munich.fit.is$lambda.measures, rownames)),
rep(round(munich.fit.is$lambda.vector, 4), 3L))
expect_equal(as.numeric(sapply(munich.fit.oos$lambda.measures, rownames)),
rep(round(munich.fit.oos$lambda.vector, 4), 3L))
})
test_that("Test X in output", {
# Check if matrix
expect_true((class(munich.fit$X)[1] %in% c("Matrix", "dgeMatrix", "dgCMatrix")) |
(is.matrix(munich.fit$X) & is.numeric(munich.fit$X)))
# Check dimension
expect_equal(dim(munich.fit$X),
c(nrow(rent), 63L))
# Check if null (not present)
expect_null(munich.fit2$X)
})
test_that("Test re-estimated coefficients in output", {
# Check if numeric
expect_true(is.numeric(munich.fit$coefficients.reest))
# Check length
expect_length(munich.fit$coefficients.reest,
length(munich.fit$coefficients))
# Check if NULL (not present)
expect_null(munich.fit2$coefficients.reest)
})
test_that("Test re-estimated residuals in output", {
# Check if numeric
expect_true(is.numeric(munich.fit$residuals.reest))
# Check length
expect_length(munich.fit$residuals.reest,
length(munich.fit$residuals))
# Check if NULL (not present)
expect_null(munich.fit2$residuals.reest)
})
test_that("Test re-estimated fitted values in output", {
# Check if numeric
expect_true(is.numeric(munich.fit$fitted.values.reest))
# Check length
expect_length(munich.fit$fitted.values.reest,
length(munich.fit$fitted.values))
# Check if NULL (not present)
expect_null(munich.fit2$fitted.values.reest)
})
test_that("Test re-estimated rank in output", {
# Check if numeric
expect_true(is.numeric(munich.fit$rank.reest))
# Check length
expect_length(munich.fit$rank.reest,
1L)
# Check if strictly positive
expect_true(munich.fit$rank.reest > 0)
# Check if integer
expect_true(.is.wholenumber(munich.fit$rank.reest))
# Check if NULL (not present)
expect_null(munich.fit2$rank.reest)
})
test_that("Test re-estimated linear predictors in output", {
# Check if numeric
expect_true(is.numeric(munich.fit$linear.predictors.reest))
# Check length
expect_length(munich.fit$linear.predictors.reest,
length(munich.fit$linear.predictors))
# Check if can be transformed to fitted values using link function
expect_equal(munich.fit$family$linkfun(munich.fit$linear.predictors.reest),
munich.fit$fitted.values.reest)
# Check if NULL (not present)
expect_null(munich.fit2$linear.predictors.reest)
})
test_that("Test re-estimated deviance in output", {
# Check if numeric
expect_true(is.numeric(munich.fit$deviance.reest))
# Check length
expect_length(munich.fit$deviance.reest,
1L)
# Check if NULL (not present)
expect_null(munich.fit2$deviance.reest)
})
test_that("Test re-estimated AIC in output", {
# Check if numeric
expect_true(is.numeric(munich.fit$aic.reest))
# Check length
expect_length(munich.fit$aic.reest,
1L)
# Check if NULL (not present)
expect_null(munich.fit2$aic.reest)
})
test_that("Test re-estimated BIC in output", {
# Check if numeric
expect_true(is.numeric(munich.fit$bic.reest))
# Check length
expect_length(munich.fit$bic.reest,
1L)
# Check if BIC can be obtained from AIC
expect_equal(munich.fit$aic.reest + (log(sum(munich.fit$weights != 0)) - 2) * munich.fit$rank.reest,
munich.fit$bic.reest)
# Check if NULL (not present)
expect_null(munich.fit2$bic.reest)
})
test_that("Test re-estimated GCV in output", {
# Check if numeric
expect_true(is.numeric(munich.fit$gcv.reest))
# Check length
expect_length(munich.fit$gcv,
1L)
# Check if GCV can be obtained from deviance
n2 <- sum(munich.fit$weights != 0)
expect_equal(munich.fit$deviance.reest / (n2 * (1 - munich.fit$rank.reest / n2) ^ 2),
munich.fit$gcv.reest)
# Check if NULL (not present)
expect_null(munich.fit2$gcv.reest)
})
test_that("Test re-estimated residual DoF in output", {
# Check if numeric
expect_true(is.numeric(munich.fit$df.residual.reest))
# Check length
expect_length(munich.fit$df.residual.reest,
1L)
# Check if can be obtained using weights and rank
expect_equal(sum(munich.fit$weights != 0) - munich.fit$rank.reest,
munich.fit$df.residual.reest)
# Check if NULL (not present)
expect_null(munich.fit2$df.residual.reest)
})
test_that("Test re-estimated objective function in output", {
# Check if numeric
expect_true(is.numeric(munich.fit$obj.fun.reest))
# Check length
expect_length(munich.fit$obj.fun.reest,
1L)
# Check if NULL (not present)
expect_null(munich.fit2$obj.fun.reest)
})
test_that("Test X.reest in output", {
# Check if matrix
expect_true((class(munich.fit$X.reest)[1] %in% c("Matrix", "dgeMatrix", "dgCMatrix")) |
(is.matrix(munich.fit$X.reest) & is.numeric(munich.fit$X.reest)))
# Check dimension
expect_equal(dim(munich.fit$X.reest),
c(nrow(rent), munich.fit$rank.reest))
# Check if null (not present)
expect_null(munich.fit2$X.reest)
})
test_that("Test call in output", {
# Check class
expect_true(is.call(munich.fit$call))
})
test_that("Test formula in output", {
# Check class
expect_true(class(munich.fit$formula) == "formula")
})
test_that("Test terms in output", {
# Check class
expect_true(class(munich.fit$terms)[1] == "terms")
})
test_that("Test contrasts in output", {
# Check class
expect_true(is.list(munich.fit$contrasts))
# Check length
expect_length(munich.fit$contrasts,
5L)
})
test_that("Test xlevels in output", {
# Check class
expect_true(is.list(munich.fit$xlevels))
# Check length
expect_length(munich.fit$xlevels,
10L)
})
|
\name{random.function}
\alias{random.function}
\title{
Random Draw Generator
}
\description{
This function generates random draws of a continuous random variable
given either its density or its cumulative distribution function.
}
\usage{
random.function(n = 1, f, lower = -Inf, upper = Inf, kind = "density")
}
\arguments{
\item{n}{number of draws, default 1.}
\item{f}{either a density (default) or cumulative distribution function of the random variable.}
\item{lower}{lower limit of the support of the random variable, default -Inf.}
\item{upper}{upper limit of the support of the random variable, default Inf.}
\item{kind}{character string with the function used to identify the distribution,
either "density" (default) or "cumulative", as alternative.}
}
\details{
\code{random.function} uses the method of the inverse of the cdf to generate random draws from \code{f}.
}
\value{
A vector of length \code{n} with \code{n} draws from a random variable with density (or
cumulative distribution) function given by \code{f}.
}
\author{
Jose M. Pavia
}
\note{
\code{random.function} is called by \code{dgeometric.test} when the corresponding r-
function (random generator of \code{f}) is not available in the environment. \code{random.function}
generates random samples from the null hypothesis density function specified in \code{dgeometric.test}.
}
\seealso{
\code{\link{dgeometric.test}}, \code{\link{integrate}}, \code{\link{inverse}} and \code{\link{support.facto}}.
}
\examples{
f0 <- function(x) ifelse(x>=0 & x<=1, 2-2*x, 0)
random.function(10, f0, lower=0, upper=1, kind="density")
}
|
/man/random.function.Rd
|
no_license
|
cran/GoFKernel
|
R
| false | false | 1,658 |
rd
|
\name{random.function}
\alias{random.function}
\title{
Random Draw Generator
}
\description{
This function generates random draws of a continuous random variable
given either its density or its cumulative distribution function.
}
\usage{
random.function(n = 1, f, lower = -Inf, upper = Inf, kind = "density")
}
\arguments{
\item{n}{number of draws, default 1.}
\item{f}{either a density (default) or cumulative distribution function of the random variable.}
\item{lower}{lower limit of the support of the random variable, default -Inf.}
\item{upper}{upper limit of the support of the random variable, default Inf.}
\item{kind}{character string with the function used to identify the distribution,
either "density" (default) or "cumulative", as alternative.}
}
\details{
\code{random.function} uses the method of the inverse of the cdf to generate random draws from \code{f}.
}
\value{
A vector of length \code{n} with \code{n} draws from a random variable with density (or
cumulative distribution) function given by \code{f}.
}
\author{
Jose M. Pavia
}
\note{
\code{random.function} is called by \code{dgeometric.test} when the corresponding r-
function (random generator of \code{f}) is not available in the environment. \code{random.function}
generates random samples from the null hypothesis density function specified in \code{dgeometric.test}.
}
\seealso{
\code{\link{dgeometric.test}}, \code{\link{integrate}}, \code{\link{inverse}} and \code{\link{support.facto}}.
}
\examples{
f0 <- function(x) ifelse(x>=0 & x<=1, 2-2*x, 0)
random.function(10, f0, lower=0, upper=1, kind="density")
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/trim.R
\name{trim}
\alias{trim}
\title{Trim the whitespace from front and back of the words in a vector.}
\usage{
trim(x)
}
\arguments{
\item{x}{a character vector}
}
\value{
a character vector of trimmed text
}
\description{
Trim the whitespace from front and back of the words in a vector.
}
\examples{
trim(" ABC")
# "ABC"
trim("DEF ")
# "DEF"
trim(" ABC ")
# "ABC"
}
\author{
Mark Cowley, 2009-08-19
}
|
/man/trim.Rd
|
no_license
|
drmjc/mjcbase
|
R
| false | true | 491 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/trim.R
\name{trim}
\alias{trim}
\title{Trim the whitespace from front and back of the words in a vector.}
\usage{
trim(x)
}
\arguments{
\item{x}{a character vector}
}
\value{
a character vector of trimmed text
}
\description{
Trim the whitespace from front and back of the words in a vector.
}
\examples{
trim(" ABC")
# "ABC"
trim("DEF ")
# "DEF"
trim(" ABC ")
# "ABC"
}
\author{
Mark Cowley, 2009-08-19
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data_processing.R
\name{deg_to_dec}
\alias{deg_to_dec}
\title{Helper function for cleaning Columbus P-1 datasets.
Given lat or long coords in degrees and a direction, convert to decimal.}
\usage{
deg_to_dec(x, direction)
}
\arguments{
\item{x}{lat or long coords in degrees}
\item{direction}{direction of lat/long}
}
\value{
converted x
}
\description{
Helper function for cleaning Columbus P-1 datasets.
Given lat or long coords in degrees and a direction, convert to decimal.
}
|
/man/deg_to_dec.Rd
|
no_license
|
cran/animaltracker
|
R
| false | true | 559 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data_processing.R
\name{deg_to_dec}
\alias{deg_to_dec}
\title{Helper function for cleaning Columbus P-1 datasets.
Given lat or long coords in degrees and a direction, convert to decimal.}
\usage{
deg_to_dec(x, direction)
}
\arguments{
\item{x}{lat or long coords in degrees}
\item{direction}{direction of lat/long}
}
\value{
converted x
}
\description{
Helper function for cleaning Columbus P-1 datasets.
Given lat or long coords in degrees and a direction, convert to decimal.
}
|
# Extract the residual covariance matrix from an lme object
.extractR.lme <- function(lme.fit) {
n <- length( nlme::getResponse(lme.fit) )
if (length(lme.fit$group) > 1) {
stop("not implemented for multiple levels of nesting")
}
else{
ugroups <- unique(lme.fit$groups[[1]])
if (!is.null(lme.fit$modelStruct$corStruct)) {
V <- Matrix( nlme::corMatrix(lme.fit$modelStruct$corStruct) )
}
else V <- Diagonal(n)
}
if (!is.null(lme.fit$modelStruct$varStruct))
sds <- 1/nlme::varWeights(lme.fit$modelStruct$varStruct)
else sds <- rep(1, n)
sds <- lme.fit$sigma * sds
cond.var <- t(V * sds) * sds
return(cond.var / lme.fit$sigma^2)
}
# Extract the ranef covariance matrix from an lme object
.extractD.lme <- function(lme.fit) {
mod.mats <- RLRsim::extract.lmeDesign(lme.fit)
D <- Matrix( mod.mats$Vr )
return(D)
}
# Extract the Z matrix from a model
.extractZ.lme <- function(model){
Z.lme <- RLRsim::extract.lmeDesign(model)$Z
one.Z <- matrix(1, ncol = ncol(Z.lme)/2, nrow = nrow(Z.lme))
two.Z <- matrix(2, ncol = ncol(Z.lme)/2, nrow = nrow(Z.lme))
my.counter <- 1
for(i in 1:ncol(Z.lme)){
if(i%%2==0){
two.Z[,my.counter] <- Z.lme[,i]
my.counter <- my.counter+1
}else{
one.Z[,my.counter] <- Z.lme[,i]}
}
one.Z <- t(one.Z)
two.Z <- t(two.Z)
Z <- structure(list(one = one.Z, two = two.Z))
return(Z)
}
# Refit the model
updated.model<- function(model, new.y = NULL, new.data = NULL){
# Extract formulas and data
mod.fixd <- as.formula(model$call$fixed)
mod.rand <- as.formula(model$call$random)
if(is.null(new.data)){
# Place ystars in data
mod.data <- model$data
mod.data[,as.character(mod.fixd[[2]])] <- unname(new.y)
} else{
mod.data <- new.data
}
# create new lme
ctrl <- nlme::lmeControl(opt = 'optim')
out.lme <- nlme::lme(fixed = mod.fixd, data = mod.data, random = mod.rand, control = ctrl)
return(out.lme)
}
|
/R/lme_utilities.R
|
no_license
|
baeyc/lmeresampler
|
R
| false | false | 1,965 |
r
|
# Extract the residual covariance matrix from an lme object
.extractR.lme <- function(lme.fit) {
n <- length( nlme::getResponse(lme.fit) )
if (length(lme.fit$group) > 1) {
stop("not implemented for multiple levels of nesting")
}
else{
ugroups <- unique(lme.fit$groups[[1]])
if (!is.null(lme.fit$modelStruct$corStruct)) {
V <- Matrix( nlme::corMatrix(lme.fit$modelStruct$corStruct) )
}
else V <- Diagonal(n)
}
if (!is.null(lme.fit$modelStruct$varStruct))
sds <- 1/nlme::varWeights(lme.fit$modelStruct$varStruct)
else sds <- rep(1, n)
sds <- lme.fit$sigma * sds
cond.var <- t(V * sds) * sds
return(cond.var / lme.fit$sigma^2)
}
# Extract the ranef covariance matrix from an lme object
.extractD.lme <- function(lme.fit) {
mod.mats <- RLRsim::extract.lmeDesign(lme.fit)
D <- Matrix( mod.mats$Vr )
return(D)
}
# Extract the Z matrix from a model
.extractZ.lme <- function(model){
Z.lme <- RLRsim::extract.lmeDesign(model)$Z
one.Z <- matrix(1, ncol = ncol(Z.lme)/2, nrow = nrow(Z.lme))
two.Z <- matrix(2, ncol = ncol(Z.lme)/2, nrow = nrow(Z.lme))
my.counter <- 1
for(i in 1:ncol(Z.lme)){
if(i%%2==0){
two.Z[,my.counter] <- Z.lme[,i]
my.counter <- my.counter+1
}else{
one.Z[,my.counter] <- Z.lme[,i]}
}
one.Z <- t(one.Z)
two.Z <- t(two.Z)
Z <- structure(list(one = one.Z, two = two.Z))
return(Z)
}
# Refit the model
updated.model<- function(model, new.y = NULL, new.data = NULL){
# Extract formulas and data
mod.fixd <- as.formula(model$call$fixed)
mod.rand <- as.formula(model$call$random)
if(is.null(new.data)){
# Place ystars in data
mod.data <- model$data
mod.data[,as.character(mod.fixd[[2]])] <- unname(new.y)
} else{
mod.data <- new.data
}
# create new lme
ctrl <- nlme::lmeControl(opt = 'optim')
out.lme <- nlme::lme(fixed = mod.fixd, data = mod.data, random = mod.rand, control = ctrl)
return(out.lme)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/MRIO_tools.R
\name{SPA_footprint_sector}
\alias{SPA_footprint_sector}
\title{Title}
\usage{
SPA_footprint_sector(n = 8, L_mat, A_mat, y_vec, S_mat, index)
}
\arguments{
\item{n}{number of layers. recommendation >= 8}
\item{L_mat}{}
\item{A_mat}{}
\item{y_vec}{}
\item{S_mat}{}
\item{index}{see ?calc_footprint_sector}
}
\value{
}
\description{
Title
}
|
/man/SPA_footprint_sector.Rd
|
no_license
|
simschul/my.utils
|
R
| false | true | 436 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/MRIO_tools.R
\name{SPA_footprint_sector}
\alias{SPA_footprint_sector}
\title{Title}
\usage{
SPA_footprint_sector(n = 8, L_mat, A_mat, y_vec, S_mat, index)
}
\arguments{
\item{n}{number of layers. recommendation >= 8}
\item{L_mat}{}
\item{A_mat}{}
\item{y_vec}{}
\item{S_mat}{}
\item{index}{see ?calc_footprint_sector}
}
\value{
}
\description{
Title
}
|
#======================== Non-Parametric test - Mann-Whitney test====================
#Also known as : Wilcoxon Rank-Sum test
LungCapData = read.table('LungCapData.txt', header = T, sep = "\t")
View(LungCapData)
attach(LungCapData)
names(LungCapData)
class(LungCap)
class(Smoke)
levels(Smoke)
boxplot(LungCap ~ Smoke)
#Ho: Median Lung Capacity of smokers = Lung Capacity of non-sokers
#two-sided test
wilcox.test(LungCap ~ Smoke, mu=0, alt='two.sided', conf.int=T, conf.level=0.95, paired=F, exact=T, correct=T)
#This gives you some warning, it it because we asked for exact p-values, put exact=F and error will be gone
wilcox.test(LungCap ~ Smoke, mu=0, alt='two.sided', conf.int=T, conf.level=0.95, paired=F, exact=F, correct=T)
#If p-value greate than or equal to 0.05 then null hypothesis holds
#Hence no difference between lung capacity of smokers and non-smokers
|
/Stats/011_ANOVA/3 Non-parametric-test.r
|
no_license
|
roushanprasad/DataScience_ML
|
R
| false | false | 873 |
r
|
#======================== Non-Parametric test - Mann-Whitney test====================
#Also known as : Wilcoxon Rank-Sum test
LungCapData = read.table('LungCapData.txt', header = T, sep = "\t")
View(LungCapData)
attach(LungCapData)
names(LungCapData)
class(LungCap)
class(Smoke)
levels(Smoke)
boxplot(LungCap ~ Smoke)
#Ho: Median Lung Capacity of smokers = Lung Capacity of non-sokers
#two-sided test
wilcox.test(LungCap ~ Smoke, mu=0, alt='two.sided', conf.int=T, conf.level=0.95, paired=F, exact=T, correct=T)
#This gives you some warning, it it because we asked for exact p-values, put exact=F and error will be gone
wilcox.test(LungCap ~ Smoke, mu=0, alt='two.sided', conf.int=T, conf.level=0.95, paired=F, exact=F, correct=T)
#If p-value greate than or equal to 0.05 then null hypothesis holds
#Hence no difference between lung capacity of smokers and non-smokers
|
\encoding{UTF8}
\name{granulo}
\alias{granulo}
\docType{data}
\title{
Data frame for G2Sd package
}
\description{
\kbd{granulo} is a data frame of 29 observations and 21 variables. The first column corresponds to the apertures sizes of AFNOR sieves, in micrometer (25000, 20000, 16000, 12500, 10000, 8000, 6300, 5000, 4000, 2500, 2000, 1600, 1250, 1000, 800, 630, 500, 400, 315, 250, 200, 160, 125, 100, 80, 63, 50, 40, 0).
Warning ! the last sieve 0 corresponds to the material retained in the < 40 micrometer pan after sieving.
The others columns corresponds to the weight of samples beside each size class
}
\usage{data(granulo)}
\format{
A data frame with 29 rows corresponding to the apertures sizes on the following 21 stations sampled
}
\details{
This example provide a data frame of sedimentary data obtained with AFNOR sieves (in micrometer)
}
\source{
\cite{Godet, L., Fournier, J., Toupoint, N., Olivier, F. 2009. Mapping and monitoring intertidal benthic habitats: a review of techniques and proposal of a new visual methodology for the European coasts. \emph{Progress in Physical Geography} \strong{33}, 378-402}
}
\references{
\cite{Fournier, J., Godet, L., Bonnot-Courtois, C., Baltzer, A., Caline, B. 2009. Distribution des formations superficielles de l archipel de Chausey (Manche). \emph{Geologie de la France} \strong{1}, 5-17}
}
\examples{
data(granulo)
}
|
/man/granulo.Rd
|
no_license
|
gallonr/G2Sd
|
R
| false | false | 1,381 |
rd
|
\encoding{UTF8}
\name{granulo}
\alias{granulo}
\docType{data}
\title{
Data frame for G2Sd package
}
\description{
\kbd{granulo} is a data frame of 29 observations and 21 variables. The first column corresponds to the apertures sizes of AFNOR sieves, in micrometer (25000, 20000, 16000, 12500, 10000, 8000, 6300, 5000, 4000, 2500, 2000, 1600, 1250, 1000, 800, 630, 500, 400, 315, 250, 200, 160, 125, 100, 80, 63, 50, 40, 0).
Warning ! the last sieve 0 corresponds to the material retained in the < 40 micrometer pan after sieving.
The others columns corresponds to the weight of samples beside each size class
}
\usage{data(granulo)}
\format{
A data frame with 29 rows corresponding to the apertures sizes on the following 21 stations sampled
}
\details{
This example provide a data frame of sedimentary data obtained with AFNOR sieves (in micrometer)
}
\source{
\cite{Godet, L., Fournier, J., Toupoint, N., Olivier, F. 2009. Mapping and monitoring intertidal benthic habitats: a review of techniques and proposal of a new visual methodology for the European coasts. \emph{Progress in Physical Geography} \strong{33}, 378-402}
}
\references{
\cite{Fournier, J., Godet, L., Bonnot-Courtois, C., Baltzer, A., Caline, B. 2009. Distribution des formations superficielles de l archipel de Chausey (Manche). \emph{Geologie de la France} \strong{1}, 5-17}
}
\examples{
data(granulo)
}
|
#' Add expected walking neighborhoods.
#'
#' @param pump.subset Numeric. Vector of numeric pump IDs to subset from the neighborhoods defined by \code{pump.select}. Negative selection possible. \code{NULL} uses all pumps in \code{pump.select}.
#' @param pump.select Numeric. Numeric vector of pump IDs that define which pump neighborhoods to consider (i.e., specify the "population"). Negative selection possible. \code{NULL} selects all pumps.
#' @param vestry Logical. \code{TRUE} uses the 14 pumps from the Vestry Report. \code{FALSE} uses the 13 in the original map.
#' @param weighted Logical. \code{TRUE} computes shortest path weighted by road length. \code{FALSE} computes shortest path in terms of the number of nodes.
#' @param path Character. "expected" or "observed".
#' @param path.color Character. Use a single color for all paths. \code{NULL} uses neighborhood colors defined by \code{snowColors()}.
#' @param path.width Numeric. Set width of paths.
#' @param alpha.level Numeric. Alpha level transparency for area plot: a value in [0, 1].
#' @param polygon.type Character. "perimeter" or "solid".
#' @param polygon.col Character.
#' @param polygon.lwd Numeric.
#' @param multi.core Logical or Numeric. \code{TRUE} uses \code{parallel::detectCores()}. \code{FALSE} uses one, single core. You can also specify the number logical cores. See \code{vignette("Parallelization")} for details.
#' @param dev.mode Logical. Development mode uses parallel::parLapply().
#' @param latlong Logical. Use estimated longitude and latitude.
#' @import graphics
#' @export
#' @examples
#' \dontrun{
#' streetNameLocator("marshall street", zoom = 0.5)
#' addNeighborhoodWalking()
#' }
addNeighborhoodWalking <- function(pump.subset = NULL, pump.select = NULL,
vestry = FALSE, weighted = TRUE, path = NULL, path.color = NULL,
path.width = 3, alpha.level = 0.25, polygon.type = "solid",
polygon.col = NULL, polygon.lwd = 2, multi.core = TRUE, dev.mode = FALSE,
latlong = FALSE) {
cores <- multiCore(multi.core)
if (latlong) {
w <- latlongNeighborhoodWalking(pump.select = pump.select, vestry = vestry,
multi.core = cores)
dat <- w$neigh.data
edges <- dat$edges
paths <- w$paths
vars <- c("lon", "lat")
obs.edges <- lapply(paths, function(p) {
oe <- lapply(p, function(x) {
nodes.tmp <- names(unlist(unname(x)))
identifyEdgesB(nodes.tmp, edges)
})
unique(unlist(oe))
})
if (is.null(path.color)) {
invisible(lapply(names(obs.edges), function(nm) {
n.edges <- edges[obs.edges[[nm]], ]
segments(n.edges$lon1, n.edges$lat1, n.edges$lon2, n.edges$lat2,
lwd = path.width, col = w$snow.colors[paste0("p", nm)])
}))
invisible(lapply(names(w$cases), function(nm) {
sel <- cholera::fatalities.address$anchor %in% w$cases[[nm]]
points(cholera::fatalities.address[sel, vars], pch = 20, cex = 0.75,
col = w$snow.colors[nm])
}))
} else {
invisible(lapply(names(obs.edges), function(nm) {
n.edges <- edges[obs.edges[[nm]], ]
segments(n.edges$lon1, n.edges$lat1, n.edges$lon2, n.edges$lat2,
lwd = path.width, col = path.color)
}))
invisible(lapply(names(w$cases), function(nm) {
sel <- cholera::fatalities.address$anchor %in% w$cases[[nm]]
points(cholera::fatalities.address[sel, vars], pch = 20, cex = 0.75,
col = path.color)
}))
}
if (vestry) {
p.data <- cholera::pumps.vestry
} else {
p.data <- cholera::pumps
}
if (is.null(pump.select)) {
points(p.data[, vars], col = w$snow.colors, lwd = 2, pch = 24)
text(p.data[, vars], labels = paste0("p", p.data$id), cex = 0.9, pos = 1)
} else {
pump.id <- selectPump(p.data, pump.select = w$pump.select,
vestry = w$vestry)
sel <- p.data$id %in% pump.id
unsel <- setdiff(p.data$id, pump.id)
points(p.data[sel, vars], col = w$snow.colors[sel], lwd = 2, pch = 24)
text(p.data[sel, vars], labels = paste0("p", p.data$id[sel]), cex = 0.9,
pos = 1)
points(p.data[unsel, vars], col = "gray", lwd = 2, pch = 24)
text(p.data[unsel, vars], labels = paste0("p", p.data$id[unsel]),
cex = 0.9, pos = 1, col = "gray")
}
if (is.null(w$pump.select)) {
title(main = "Pump Neighborhoods: Walking")
} else {
title(main = paste0("Pump Neighborhoods: Walking", "\n", "Pumps ",
paste(sort(w$pump.select), collapse = ", ")))
}
} else {
if (is.null(path) == FALSE) {
if (path %in% c("expected", "observed") == FALSE) {
stop('If specified, path must be "expected" or "observed".')
}
}
if (vestry) {
p.count <- nrow(cholera::pumps.vestry)
} else {
p.count <- nrow(cholera::pumps)
}
p.ID <- seq_len(p.count)
if (is.null(pump.select) == FALSE) {
if (any(abs(pump.select) %in% p.ID == FALSE)) {
stop('If specified, 1 >= |pump.select| <= ', p.count, " when vestry = ",
vestry, ".")
}
}
if (is.null(pump.select) & is.null(pump.subset) == FALSE) {
if (any(abs(pump.subset) %in% p.ID == FALSE)) {
stop('If specified, 1 >= |pump.subset| <= ', p.count, " when vestry = ",
vestry, ".")
}
}
if (is.null(pump.subset) == FALSE & is.null(pump.select) == FALSE) {
if (all(pump.select > 0)) {
if (any(pump.subset %in% pump.select == FALSE)) {
stop('pump.subset should be a subset of pump.select.')
}
} else if (all(pump.select < 0)) {
if (any(pump.subset %in% p.ID[pump.select])) {
stop('pump.subset should be a subset of pump.select.')
}
}
}
nearest.data <- nearestPump(pump.select = pump.select,
vestry = vestry,
weighted = weighted,
case.set = "observed",
multi.core = cores,
dev.mode = dev.mode)
nearest.dist <- nearest.data$distance
nearest.path <- nearest.data$path
if (vestry) {
nearest.pump <- vapply(nearest.path, function(paths) {
sel <- cholera::ortho.proj.pump.vestry$node %in% paths[length(paths)]
cholera::ortho.proj.pump.vestry[sel, "pump.id"]
}, numeric(1L))
} else {
nearest.pump <- vapply(nearest.path, function(paths) {
sel <- cholera::ortho.proj.pump$node %in% paths[length(paths)]
cholera::ortho.proj.pump[sel, "pump.id"]
}, numeric(1L))
}
nearest.pump <- data.frame(case = cholera::fatalities.address$anchor,
pump = nearest.dist$pump)
pumpID <- sort(unique(nearest.dist$pump))
neighborhood.cases <- lapply(pumpID, function(p) {
nearest.pump[nearest.pump$pump == p, "case"]
})
names(neighborhood.cases) <- pumpID
neighborhood.paths <- lapply(pumpID, function(p) {
n.case <- neighborhood.cases[[paste(p)]]
nearest.path[which(nearest.pump$case %in% n.case)]
})
names(neighborhood.paths) <- pumpID
x <- list(paths = neighborhood.paths,
cases = neighborhood.cases,
vestry = vestry,
weighted = weighted,
case.set = "observed",
pump.select = pump.select,
cores = cores,
metric = 1 / unitMeter(1),
dev.mode = dev.mode)
snow.colors <- snowColors(x$vestry)
if (!is.null(path.color)) {
snow.colors <- stats::setNames(rep(path.color, length(snow.colors)),
names(snow.colors))
}
n.walk <- neighborhoodWalking(pump.select = x$pump.select,
vestry = x$vestry, case.set = x$case.set, multi.core = x$cores)
n.data <- neighborhoodPathData(n.walk)
dat <- n.data$dat
edges <- n.data$edges
n.path.edges <- n.data$neighborhood.path.edges
p.node <- n.data$p.node
p.name <- n.data$p.name
obs.segment.count <- lapply(n.path.edges, function(x) {
table(edges[unique(unlist(x)), "id"])
})
edge.count <- table(edges$id)
segment.audit <- lapply(obs.segment.count, function(neighborhood) {
whole.id <- vapply(names(neighborhood), function(nm) {
identical(neighborhood[nm], edge.count[nm])
}, logical(1L))
list(whole = names(neighborhood[whole.id]),
partial = names(neighborhood[!whole.id]))
})
## ------------ Observed ------------ ##
# list of whole traversed segments
obs.whole <- lapply(segment.audit, function(x) x$`whole`)
# list of partially traversed segments
obs.partial <- lapply(segment.audit, function(x) x$`partial`)
partial.segs <- unname(unlist(obs.partial))
obs.partial.whole <- wholeSegments(partial.segs, dat, edges, p.name, p.node,
x)
# list of of split segments (lead to different pumps)
# the cutpoint is found using appox. 1 meter increments via cutpointValues()
obs.partial.segments <- setdiff(partial.segs, unlist(obs.partial.whole))
if (length(obs.partial.segments) > 0) {
if ((.Platform$OS.type == "windows" & x$cores > 1) | x$dev.mode) {
cl <- parallel::makeCluster(x$cores)
parallel::clusterExport(cl = cl, envir = environment(),
varlist = c("edges", "p.name", "p.node", "x"))
obs.partial.split.data <- parallel::parLapply(cl, obs.partial.segments,
splitSegments, edges, p.name, p.node, x)
parallel::stopCluster(cl)
} else {
obs.partial.split.data <- parallel::mclapply(obs.partial.segments,
splitSegments, edges, p.name, p.node, x, mc.cores = x$cores)
}
cutpoints <- cutpointValues(obs.partial.split.data, x)
obs.partial.split.pump <- lapply(obs.partial.split.data, function(x)
unique(x$pump))
obs.partial.split <- splitData(obs.partial.segments, cutpoints, edges)
}
## ------------ Unobserved ------------ ##
# list of edges that are wholly or partially traversed
obs.segments <- lapply(n.path.edges, function(x) {
unique(edges[unique(unlist(x)), "id"])
})
# list of edges that are untouched by any path
unobs.segments <- setdiff(cholera::road.segments$id, unlist(obs.segments))
falconberg.ct.mews <- c("40-1", "41-1", "41-2", "63-1")
unobs.segments <- unobs.segments[unobs.segments %in%
falconberg.ct.mews == FALSE]
# Exclude segment if A&E pump is not among selected.
if (is.null(x$pump.select) == FALSE) {
sel <- "Adam and Eve Court"
AE.pump <- cholera::pumps[cholera::pumps$street == sel, "id"]
AE <- cholera::road.segments[cholera::road.segments$name == sel, "id"]
if (all(x$pump.select > 0)) {
if (AE.pump %in% x$pump.select == FALSE) {
unobs.segments <- unobs.segments[unobs.segments %in% AE == FALSE]
}
} else if (all(x$pump < 0)) {
if (AE.pump %in% abs(x$pump.select)) {
unobs.segments <- unobs.segments[unobs.segments %in% AE == FALSE]
}
}
}
unobs.whole <- wholeSegments(unobs.segments, dat, edges, p.name, p.node, x)
unobs.split.segments <- setdiff(unobs.segments, unlist(unobs.whole))
if (length(unobs.split.segments) > 0) {
unobs.split.data <- parallel::mclapply(unobs.split.segments,
splitSegments, edges, p.name, p.node, x, mc.cores = x$cores)
cutpoints <- cutpointValues(unobs.split.data, x)
unobs.split.pump <- lapply(unobs.split.data, function(x) unique(x$pump))
unobs.split <- splitData(unobs.split.segments, cutpoints, edges)
}
## ------------ Data Assembly ------------ ##
wholes <- lapply(paste(p.ID), function(nm) {
c(obs.whole[[nm]],
unobs.whole[[nm]],
obs.partial.whole[[nm]])
})
names(wholes) <- p.ID
# split segments #
split.test1 <- length(obs.partial.segments)
split.test2 <- length(unobs.split.segments)
if (split.test1 > 0 & split.test2 == 0) {
splits <- obs.partial.split
splits.pump <- obs.partial.split.pump
splits.segs <- obs.partial.segments
} else if (split.test1 == 0 & split.test2 > 0) {
splits <- unobs.split
splits.pump <- unobs.split.pump
splits.segs <- unobs.split.segments
} else if (split.test1 > 0 & split.test2 > 0) {
splits <- c(obs.partial.split, unobs.split)
splits.pump <- c(obs.partial.split.pump, unobs.split.pump)
splits.segs <- c(obs.partial.segments, unobs.split.segments)
}
sim.proj <- cholera::sim.ortho.proj
sim.proj.segs <- unique(sim.proj$road.segment)
sim.proj.segs <- sim.proj.segs[!is.na(sim.proj.segs)]
if (split.test1 > 0 | split.test2 > 0) {
split.outcome <- splitOutcomes(x, splits.segs, sim.proj, splits,
splits.pump)
split.outcome <- do.call(rbind, split.outcome)
split.outcome <- split.outcome[!is.na(split.outcome$pump), ]
split.cases <- lapply(sort(unique(split.outcome$pump)), function(p) {
split.outcome[split.outcome$pump == p, "case"]
})
names(split.cases) <- sort(unique(split.outcome$pump))
}
whole.cases <- lapply(names(wholes), function(nm) {
sel <- sim.proj$road.segment %in% wholes[[nm]]
cases <- sim.proj[sel, "case"]
as.numeric(row.names(cholera::regular.cases[cases, ]))
})
names(whole.cases) <- names(wholes)
pearl.neighborhood <- vapply(whole.cases, length, integer(1L))
pearl.neighborhood <- names(pearl.neighborhood[pearl.neighborhood != 0])
if (split.test1 | split.test2) {
neighborhood.cases <- lapply(pearl.neighborhood, function(nm) {
c(whole.cases[[nm]], split.cases[[nm]])
})
} else {
neighborhood.cases <- lapply(pearl.neighborhood, function(nm) {
whole.cases[[nm]]
})
}
names(neighborhood.cases) <- pearl.neighborhood
periphery.cases <- lapply(neighborhood.cases, peripheryCases)
pearl.string <- lapply(periphery.cases, travelingSalesman)
if (is.null(pump.subset)) {
invisible(lapply(names(pearl.string), function(nm) {
sel <- paste0("p", nm)
if (is.null(polygon.col)) {
polygon.col <- grDevices::adjustcolor(snow.colors[sel],
alpha.f = alpha.level)
} else {
polygon.col <- grDevices::adjustcolor(polygon.col,
alpha.f = alpha.level)
}
if (polygon.type == "perimeter") {
polygon(cholera::regular.cases[pearl.string[[nm]], ],
border = polygon.col, lwd = polygon.lwd)
} else if (polygon.type == "solid") {
polygon(cholera::regular.cases[pearl.string[[nm]], ],
col = polygon.col)
} else stop('polygon.type must be "perimeter" or "solid".')
}))
} else {
n.subset <- pearl.string[pump.subset]
invisible(lapply(names(n.subset), function(nm) {
sel <- paste0("p", nm)
if (is.null(polygon.col)) {
polygon.col <- grDevices::adjustcolor(snow.colors[sel],
alpha.f = alpha.level)
} else {
polygon.col <- grDevices::adjustcolor(polygon.col,
alpha.f = alpha.level)
}
if (polygon.type == "perimeter") {
polygon(cholera::regular.cases[pearl.string[[nm]], ],
border = polygon.col, lwd = polygon.lwd)
} else if (polygon.type == "solid") {
polygon(cholera::regular.cases[pearl.string[[nm]], ],
col = polygon.col)
} else stop('polygon.type must be "perimeter" or "solid".')
}))
}
if (is.null(path) == FALSE) {
if (path == "expected") {
if (is.null(pump.subset)) {
invisible(lapply(names(wholes), function(nm) {
n.edges <- edges[edges$id %in% wholes[[nm]], ]
segments(n.edges$x1, n.edges$y1, n.edges$x2, n.edges$y2,
lwd = path.width, col = snow.colors[paste0("p", nm)])
}))
if (split.test1 | split.test2) {
invisible(lapply(seq_along(splits), function(i) {
dat <- splits[[i]]
ps <- splits.pump[[i]]
ps.col <- snow.colors[paste0("p", ps)]
segments(dat[1, "x"], dat[1, "y"], dat[2, "x"], dat[2, "y"],
lwd = path.width, col = ps.col[1])
segments(dat[3, "x"], dat[3, "y"], dat[4, "x"], dat[4, "y"],
lwd = path.width, col = ps.col[2])
}))
}
} else {
if (all(pump.subset > 0)) {
invisible(lapply(paste(pump.subset), function(nm) {
n.edges <- edges[edges$id %in% wholes[[nm]], ]
segments(n.edges$x1, n.edges$y1, n.edges$x2, n.edges$y2,
lwd = path.width, col = snow.colors[paste0("p", nm)])
}))
if (split.test1 | split.test2) {
p.subset <- vapply(splits.pump, function(x) {
any(pump.subset %in% x)
}, logical(1L))
splits.pump.subset <- splits.pump[p.subset]
splits.subset <- splits[p.subset]
split.select <- vapply(splits.pump.subset, function(x) {
which(x %in% pump.subset)
}, integer(1L))
invisible(lapply(seq_along(splits.subset), function(i) {
dat <- splits.subset[[i]]
ps <- splits.pump.subset[[i]]
ps.col <- snow.colors[paste0("p", ps)]
if (split.select[i] == 1) {
segments(dat[1, "x"], dat[1, "y"], dat[2, "x"], dat[2, "y"],
lwd = path.width, col = ps.col[1])
} else if (split.select[i] == 2) {
segments(dat[3, "x"], dat[3, "y"], dat[4, "x"], dat[4, "y"],
lwd = path.width, col = ps.col[2])
}
}))
}
} else if (all(pump.subset < 0)) {
select <- p.ID[p.ID %in% abs(pump.subset) == FALSE]
invisible(lapply(paste(select), function(nm) {
n.edges <- edges[edges$id %in% wholes[[nm]], ]
segments(n.edges$x1, n.edges$y1, n.edges$x2, n.edges$y2,
lwd = path.width, col = snow.colors[paste0("p", nm)])
}))
if (split.test1 | split.test2) {
p.subset <- vapply(splits.pump, function(x) {
any(select %in% x)
}, logical(1L))
splits.pump.subset <- splits.pump[p.subset]
splits.subset <- splits[p.subset]
split.select <- lapply(splits.pump.subset, function(x) {
which(x %in% select)
})
singles <- vapply(split.select, function(x) {
length(x) == 1
}, logical(1L))
invisible(lapply(seq_along(splits.subset[singles]), function(i) {
dat <- splits.subset[singles][[i]]
ps <- splits.pump.subset[singles][[i]]
ps.col <- snow.colors[paste0("p", ps)]
if (split.select[singles][i] == 1) {
segments(dat[1, "x"], dat[1, "y"], dat[2, "x"], dat[2, "y"],
lwd = path.width, col = ps.col[1])
} else if (split.select[singles][i] == 2) {
segments(dat[3, "x"], dat[3, "y"], dat[4, "x"], dat[4, "y"],
lwd = path.width, col = ps.col[2])
}
}))
invisible(lapply(seq_along(splits.subset[!singles]), function(i) {
dat <- splits.subset[!singles][[i]]
ps <- splits.pump.subset[!singles][[i]]
ps.col <- snow.colors[paste0("p", ps)]
segments(dat[1, "x"], dat[1, "y"], dat[2, "x"], dat[2, "y"],
lwd = path.width, col = ps.col[1])
segments(dat[3, "x"], dat[3, "y"], dat[4, "x"], dat[4, "y"],
lwd = path.width, col = ps.col[2])
}))
}
} else {
stop("Use all positive or all negative numbers for pump.subset.")
}
}
} else if (path == "observed") {
if (is.null(pump.subset)) {
edge.data <- lapply(n.path.edges, function(x) unique(unlist(x)))
invisible(lapply(names(edge.data), function(nm) {
n.edges <- edges[edge.data[[nm]], ]
segments(n.edges$x1, n.edges$y1, n.edges$x2, n.edges$y2,
lwd = path.width, col = snow.colors[paste0("p", nm)])
}))
} else {
if (all(pump.subset > 0)) {
sel <- names(n.path.edges) %in% pump.subset
} else if (all(pump.subset < 0)) {
sel <- names(n.path.edges) %in% abs(pump.subset) == FALSE
} else {
stop("Use all positive or all negative numbers for pump.subset.")
}
edge.data <- lapply(n.path.edges[sel], function(x) unique(unlist(x)))
invisible(lapply(names(edge.data), function(nm) {
n.edges <- edges[edge.data[[nm]], ]
segments(n.edges$x1, n.edges$y1, n.edges$x2, n.edges$y2,
lwd = path.width, col = snow.colors[paste0("p", nm)])
}))
}
}
}
}
}
|
/R/addNeighborhoodWalking.R
|
no_license
|
cran/cholera
|
R
| false | false | 21,202 |
r
|
#' Add expected walking neighborhoods.
#'
#' @param pump.subset Numeric. Vector of numeric pump IDs to subset from the neighborhoods defined by \code{pump.select}. Negative selection possible. \code{NULL} uses all pumps in \code{pump.select}.
#' @param pump.select Numeric. Numeric vector of pump IDs that define which pump neighborhoods to consider (i.e., specify the "population"). Negative selection possible. \code{NULL} selects all pumps.
#' @param vestry Logical. \code{TRUE} uses the 14 pumps from the Vestry Report. \code{FALSE} uses the 13 in the original map.
#' @param weighted Logical. \code{TRUE} computes shortest path weighted by road length. \code{FALSE} computes shortest path in terms of the number of nodes.
#' @param path Character. "expected" or "observed".
#' @param path.color Character. Use a single color for all paths. \code{NULL} uses neighborhood colors defined by \code{snowColors()}.
#' @param path.width Numeric. Set width of paths.
#' @param alpha.level Numeric. Alpha level transparency for area plot: a value in [0, 1].
#' @param polygon.type Character. "perimeter" or "solid".
#' @param polygon.col Character.
#' @param polygon.lwd Numeric.
#' @param multi.core Logical or Numeric. \code{TRUE} uses \code{parallel::detectCores()}. \code{FALSE} uses one, single core. You can also specify the number logical cores. See \code{vignette("Parallelization")} for details.
#' @param dev.mode Logical. Development mode uses parallel::parLapply().
#' @param latlong Logical. Use estimated longitude and latitude.
#' @import graphics
#' @export
#' @examples
#' \dontrun{
#' streetNameLocator("marshall street", zoom = 0.5)
#' addNeighborhoodWalking()
#' }
addNeighborhoodWalking <- function(pump.subset = NULL, pump.select = NULL,
vestry = FALSE, weighted = TRUE, path = NULL, path.color = NULL,
path.width = 3, alpha.level = 0.25, polygon.type = "solid",
polygon.col = NULL, polygon.lwd = 2, multi.core = TRUE, dev.mode = FALSE,
latlong = FALSE) {
cores <- multiCore(multi.core)
if (latlong) {
w <- latlongNeighborhoodWalking(pump.select = pump.select, vestry = vestry,
multi.core = cores)
dat <- w$neigh.data
edges <- dat$edges
paths <- w$paths
vars <- c("lon", "lat")
obs.edges <- lapply(paths, function(p) {
oe <- lapply(p, function(x) {
nodes.tmp <- names(unlist(unname(x)))
identifyEdgesB(nodes.tmp, edges)
})
unique(unlist(oe))
})
if (is.null(path.color)) {
invisible(lapply(names(obs.edges), function(nm) {
n.edges <- edges[obs.edges[[nm]], ]
segments(n.edges$lon1, n.edges$lat1, n.edges$lon2, n.edges$lat2,
lwd = path.width, col = w$snow.colors[paste0("p", nm)])
}))
invisible(lapply(names(w$cases), function(nm) {
sel <- cholera::fatalities.address$anchor %in% w$cases[[nm]]
points(cholera::fatalities.address[sel, vars], pch = 20, cex = 0.75,
col = w$snow.colors[nm])
}))
} else {
invisible(lapply(names(obs.edges), function(nm) {
n.edges <- edges[obs.edges[[nm]], ]
segments(n.edges$lon1, n.edges$lat1, n.edges$lon2, n.edges$lat2,
lwd = path.width, col = path.color)
}))
invisible(lapply(names(w$cases), function(nm) {
sel <- cholera::fatalities.address$anchor %in% w$cases[[nm]]
points(cholera::fatalities.address[sel, vars], pch = 20, cex = 0.75,
col = path.color)
}))
}
if (vestry) {
p.data <- cholera::pumps.vestry
} else {
p.data <- cholera::pumps
}
if (is.null(pump.select)) {
points(p.data[, vars], col = w$snow.colors, lwd = 2, pch = 24)
text(p.data[, vars], labels = paste0("p", p.data$id), cex = 0.9, pos = 1)
} else {
pump.id <- selectPump(p.data, pump.select = w$pump.select,
vestry = w$vestry)
sel <- p.data$id %in% pump.id
unsel <- setdiff(p.data$id, pump.id)
points(p.data[sel, vars], col = w$snow.colors[sel], lwd = 2, pch = 24)
text(p.data[sel, vars], labels = paste0("p", p.data$id[sel]), cex = 0.9,
pos = 1)
points(p.data[unsel, vars], col = "gray", lwd = 2, pch = 24)
text(p.data[unsel, vars], labels = paste0("p", p.data$id[unsel]),
cex = 0.9, pos = 1, col = "gray")
}
if (is.null(w$pump.select)) {
title(main = "Pump Neighborhoods: Walking")
} else {
title(main = paste0("Pump Neighborhoods: Walking", "\n", "Pumps ",
paste(sort(w$pump.select), collapse = ", ")))
}
} else {
if (is.null(path) == FALSE) {
if (path %in% c("expected", "observed") == FALSE) {
stop('If specified, path must be "expected" or "observed".')
}
}
if (vestry) {
p.count <- nrow(cholera::pumps.vestry)
} else {
p.count <- nrow(cholera::pumps)
}
p.ID <- seq_len(p.count)
if (is.null(pump.select) == FALSE) {
if (any(abs(pump.select) %in% p.ID == FALSE)) {
stop('If specified, 1 >= |pump.select| <= ', p.count, " when vestry = ",
vestry, ".")
}
}
if (is.null(pump.select) & is.null(pump.subset) == FALSE) {
if (any(abs(pump.subset) %in% p.ID == FALSE)) {
stop('If specified, 1 >= |pump.subset| <= ', p.count, " when vestry = ",
vestry, ".")
}
}
if (is.null(pump.subset) == FALSE & is.null(pump.select) == FALSE) {
if (all(pump.select > 0)) {
if (any(pump.subset %in% pump.select == FALSE)) {
stop('pump.subset should be a subset of pump.select.')
}
} else if (all(pump.select < 0)) {
if (any(pump.subset %in% p.ID[pump.select])) {
stop('pump.subset should be a subset of pump.select.')
}
}
}
nearest.data <- nearestPump(pump.select = pump.select,
vestry = vestry,
weighted = weighted,
case.set = "observed",
multi.core = cores,
dev.mode = dev.mode)
nearest.dist <- nearest.data$distance
nearest.path <- nearest.data$path
if (vestry) {
nearest.pump <- vapply(nearest.path, function(paths) {
sel <- cholera::ortho.proj.pump.vestry$node %in% paths[length(paths)]
cholera::ortho.proj.pump.vestry[sel, "pump.id"]
}, numeric(1L))
} else {
nearest.pump <- vapply(nearest.path, function(paths) {
sel <- cholera::ortho.proj.pump$node %in% paths[length(paths)]
cholera::ortho.proj.pump[sel, "pump.id"]
}, numeric(1L))
}
nearest.pump <- data.frame(case = cholera::fatalities.address$anchor,
pump = nearest.dist$pump)
pumpID <- sort(unique(nearest.dist$pump))
neighborhood.cases <- lapply(pumpID, function(p) {
nearest.pump[nearest.pump$pump == p, "case"]
})
names(neighborhood.cases) <- pumpID
neighborhood.paths <- lapply(pumpID, function(p) {
n.case <- neighborhood.cases[[paste(p)]]
nearest.path[which(nearest.pump$case %in% n.case)]
})
names(neighborhood.paths) <- pumpID
x <- list(paths = neighborhood.paths,
cases = neighborhood.cases,
vestry = vestry,
weighted = weighted,
case.set = "observed",
pump.select = pump.select,
cores = cores,
metric = 1 / unitMeter(1),
dev.mode = dev.mode)
snow.colors <- snowColors(x$vestry)
if (!is.null(path.color)) {
snow.colors <- stats::setNames(rep(path.color, length(snow.colors)),
names(snow.colors))
}
n.walk <- neighborhoodWalking(pump.select = x$pump.select,
vestry = x$vestry, case.set = x$case.set, multi.core = x$cores)
n.data <- neighborhoodPathData(n.walk)
dat <- n.data$dat
edges <- n.data$edges
n.path.edges <- n.data$neighborhood.path.edges
p.node <- n.data$p.node
p.name <- n.data$p.name
obs.segment.count <- lapply(n.path.edges, function(x) {
table(edges[unique(unlist(x)), "id"])
})
edge.count <- table(edges$id)
segment.audit <- lapply(obs.segment.count, function(neighborhood) {
whole.id <- vapply(names(neighborhood), function(nm) {
identical(neighborhood[nm], edge.count[nm])
}, logical(1L))
list(whole = names(neighborhood[whole.id]),
partial = names(neighborhood[!whole.id]))
})
## ------------ Observed ------------ ##
# list of whole traversed segments
obs.whole <- lapply(segment.audit, function(x) x$`whole`)
# list of partially traversed segments
obs.partial <- lapply(segment.audit, function(x) x$`partial`)
partial.segs <- unname(unlist(obs.partial))
obs.partial.whole <- wholeSegments(partial.segs, dat, edges, p.name, p.node,
x)
# list of of split segments (lead to different pumps)
# the cutpoint is found using appox. 1 meter increments via cutpointValues()
obs.partial.segments <- setdiff(partial.segs, unlist(obs.partial.whole))
if (length(obs.partial.segments) > 0) {
if ((.Platform$OS.type == "windows" & x$cores > 1) | x$dev.mode) {
cl <- parallel::makeCluster(x$cores)
parallel::clusterExport(cl = cl, envir = environment(),
varlist = c("edges", "p.name", "p.node", "x"))
obs.partial.split.data <- parallel::parLapply(cl, obs.partial.segments,
splitSegments, edges, p.name, p.node, x)
parallel::stopCluster(cl)
} else {
obs.partial.split.data <- parallel::mclapply(obs.partial.segments,
splitSegments, edges, p.name, p.node, x, mc.cores = x$cores)
}
cutpoints <- cutpointValues(obs.partial.split.data, x)
obs.partial.split.pump <- lapply(obs.partial.split.data, function(x)
unique(x$pump))
obs.partial.split <- splitData(obs.partial.segments, cutpoints, edges)
}
## ------------ Unobserved ------------ ##
# list of edges that are wholly or partially traversed
obs.segments <- lapply(n.path.edges, function(x) {
unique(edges[unique(unlist(x)), "id"])
})
# list of edges that are untouched by any path
unobs.segments <- setdiff(cholera::road.segments$id, unlist(obs.segments))
falconberg.ct.mews <- c("40-1", "41-1", "41-2", "63-1")
unobs.segments <- unobs.segments[unobs.segments %in%
falconberg.ct.mews == FALSE]
# Exclude segment if A&E pump is not among selected.
if (is.null(x$pump.select) == FALSE) {
sel <- "Adam and Eve Court"
AE.pump <- cholera::pumps[cholera::pumps$street == sel, "id"]
AE <- cholera::road.segments[cholera::road.segments$name == sel, "id"]
if (all(x$pump.select > 0)) {
if (AE.pump %in% x$pump.select == FALSE) {
unobs.segments <- unobs.segments[unobs.segments %in% AE == FALSE]
}
} else if (all(x$pump < 0)) {
if (AE.pump %in% abs(x$pump.select)) {
unobs.segments <- unobs.segments[unobs.segments %in% AE == FALSE]
}
}
}
unobs.whole <- wholeSegments(unobs.segments, dat, edges, p.name, p.node, x)
unobs.split.segments <- setdiff(unobs.segments, unlist(unobs.whole))
if (length(unobs.split.segments) > 0) {
unobs.split.data <- parallel::mclapply(unobs.split.segments,
splitSegments, edges, p.name, p.node, x, mc.cores = x$cores)
cutpoints <- cutpointValues(unobs.split.data, x)
unobs.split.pump <- lapply(unobs.split.data, function(x) unique(x$pump))
unobs.split <- splitData(unobs.split.segments, cutpoints, edges)
}
## ------------ Data Assembly ------------ ##
wholes <- lapply(paste(p.ID), function(nm) {
c(obs.whole[[nm]],
unobs.whole[[nm]],
obs.partial.whole[[nm]])
})
names(wholes) <- p.ID
# split segments #
split.test1 <- length(obs.partial.segments)
split.test2 <- length(unobs.split.segments)
if (split.test1 > 0 & split.test2 == 0) {
splits <- obs.partial.split
splits.pump <- obs.partial.split.pump
splits.segs <- obs.partial.segments
} else if (split.test1 == 0 & split.test2 > 0) {
splits <- unobs.split
splits.pump <- unobs.split.pump
splits.segs <- unobs.split.segments
} else if (split.test1 > 0 & split.test2 > 0) {
splits <- c(obs.partial.split, unobs.split)
splits.pump <- c(obs.partial.split.pump, unobs.split.pump)
splits.segs <- c(obs.partial.segments, unobs.split.segments)
}
sim.proj <- cholera::sim.ortho.proj
sim.proj.segs <- unique(sim.proj$road.segment)
sim.proj.segs <- sim.proj.segs[!is.na(sim.proj.segs)]
if (split.test1 > 0 | split.test2 > 0) {
split.outcome <- splitOutcomes(x, splits.segs, sim.proj, splits,
splits.pump)
split.outcome <- do.call(rbind, split.outcome)
split.outcome <- split.outcome[!is.na(split.outcome$pump), ]
split.cases <- lapply(sort(unique(split.outcome$pump)), function(p) {
split.outcome[split.outcome$pump == p, "case"]
})
names(split.cases) <- sort(unique(split.outcome$pump))
}
whole.cases <- lapply(names(wholes), function(nm) {
sel <- sim.proj$road.segment %in% wholes[[nm]]
cases <- sim.proj[sel, "case"]
as.numeric(row.names(cholera::regular.cases[cases, ]))
})
names(whole.cases) <- names(wholes)
pearl.neighborhood <- vapply(whole.cases, length, integer(1L))
pearl.neighborhood <- names(pearl.neighborhood[pearl.neighborhood != 0])
if (split.test1 | split.test2) {
neighborhood.cases <- lapply(pearl.neighborhood, function(nm) {
c(whole.cases[[nm]], split.cases[[nm]])
})
} else {
neighborhood.cases <- lapply(pearl.neighborhood, function(nm) {
whole.cases[[nm]]
})
}
names(neighborhood.cases) <- pearl.neighborhood
periphery.cases <- lapply(neighborhood.cases, peripheryCases)
pearl.string <- lapply(periphery.cases, travelingSalesman)
if (is.null(pump.subset)) {
invisible(lapply(names(pearl.string), function(nm) {
sel <- paste0("p", nm)
if (is.null(polygon.col)) {
polygon.col <- grDevices::adjustcolor(snow.colors[sel],
alpha.f = alpha.level)
} else {
polygon.col <- grDevices::adjustcolor(polygon.col,
alpha.f = alpha.level)
}
if (polygon.type == "perimeter") {
polygon(cholera::regular.cases[pearl.string[[nm]], ],
border = polygon.col, lwd = polygon.lwd)
} else if (polygon.type == "solid") {
polygon(cholera::regular.cases[pearl.string[[nm]], ],
col = polygon.col)
} else stop('polygon.type must be "perimeter" or "solid".')
}))
} else {
n.subset <- pearl.string[pump.subset]
invisible(lapply(names(n.subset), function(nm) {
sel <- paste0("p", nm)
if (is.null(polygon.col)) {
polygon.col <- grDevices::adjustcolor(snow.colors[sel],
alpha.f = alpha.level)
} else {
polygon.col <- grDevices::adjustcolor(polygon.col,
alpha.f = alpha.level)
}
if (polygon.type == "perimeter") {
polygon(cholera::regular.cases[pearl.string[[nm]], ],
border = polygon.col, lwd = polygon.lwd)
} else if (polygon.type == "solid") {
polygon(cholera::regular.cases[pearl.string[[nm]], ],
col = polygon.col)
} else stop('polygon.type must be "perimeter" or "solid".')
}))
}
if (is.null(path) == FALSE) {
if (path == "expected") {
if (is.null(pump.subset)) {
invisible(lapply(names(wholes), function(nm) {
n.edges <- edges[edges$id %in% wholes[[nm]], ]
segments(n.edges$x1, n.edges$y1, n.edges$x2, n.edges$y2,
lwd = path.width, col = snow.colors[paste0("p", nm)])
}))
if (split.test1 | split.test2) {
invisible(lapply(seq_along(splits), function(i) {
dat <- splits[[i]]
ps <- splits.pump[[i]]
ps.col <- snow.colors[paste0("p", ps)]
segments(dat[1, "x"], dat[1, "y"], dat[2, "x"], dat[2, "y"],
lwd = path.width, col = ps.col[1])
segments(dat[3, "x"], dat[3, "y"], dat[4, "x"], dat[4, "y"],
lwd = path.width, col = ps.col[2])
}))
}
} else {
if (all(pump.subset > 0)) {
invisible(lapply(paste(pump.subset), function(nm) {
n.edges <- edges[edges$id %in% wholes[[nm]], ]
segments(n.edges$x1, n.edges$y1, n.edges$x2, n.edges$y2,
lwd = path.width, col = snow.colors[paste0("p", nm)])
}))
if (split.test1 | split.test2) {
p.subset <- vapply(splits.pump, function(x) {
any(pump.subset %in% x)
}, logical(1L))
splits.pump.subset <- splits.pump[p.subset]
splits.subset <- splits[p.subset]
split.select <- vapply(splits.pump.subset, function(x) {
which(x %in% pump.subset)
}, integer(1L))
invisible(lapply(seq_along(splits.subset), function(i) {
dat <- splits.subset[[i]]
ps <- splits.pump.subset[[i]]
ps.col <- snow.colors[paste0("p", ps)]
if (split.select[i] == 1) {
segments(dat[1, "x"], dat[1, "y"], dat[2, "x"], dat[2, "y"],
lwd = path.width, col = ps.col[1])
} else if (split.select[i] == 2) {
segments(dat[3, "x"], dat[3, "y"], dat[4, "x"], dat[4, "y"],
lwd = path.width, col = ps.col[2])
}
}))
}
} else if (all(pump.subset < 0)) {
select <- p.ID[p.ID %in% abs(pump.subset) == FALSE]
invisible(lapply(paste(select), function(nm) {
n.edges <- edges[edges$id %in% wholes[[nm]], ]
segments(n.edges$x1, n.edges$y1, n.edges$x2, n.edges$y2,
lwd = path.width, col = snow.colors[paste0("p", nm)])
}))
if (split.test1 | split.test2) {
p.subset <- vapply(splits.pump, function(x) {
any(select %in% x)
}, logical(1L))
splits.pump.subset <- splits.pump[p.subset]
splits.subset <- splits[p.subset]
split.select <- lapply(splits.pump.subset, function(x) {
which(x %in% select)
})
singles <- vapply(split.select, function(x) {
length(x) == 1
}, logical(1L))
invisible(lapply(seq_along(splits.subset[singles]), function(i) {
dat <- splits.subset[singles][[i]]
ps <- splits.pump.subset[singles][[i]]
ps.col <- snow.colors[paste0("p", ps)]
if (split.select[singles][i] == 1) {
segments(dat[1, "x"], dat[1, "y"], dat[2, "x"], dat[2, "y"],
lwd = path.width, col = ps.col[1])
} else if (split.select[singles][i] == 2) {
segments(dat[3, "x"], dat[3, "y"], dat[4, "x"], dat[4, "y"],
lwd = path.width, col = ps.col[2])
}
}))
invisible(lapply(seq_along(splits.subset[!singles]), function(i) {
dat <- splits.subset[!singles][[i]]
ps <- splits.pump.subset[!singles][[i]]
ps.col <- snow.colors[paste0("p", ps)]
segments(dat[1, "x"], dat[1, "y"], dat[2, "x"], dat[2, "y"],
lwd = path.width, col = ps.col[1])
segments(dat[3, "x"], dat[3, "y"], dat[4, "x"], dat[4, "y"],
lwd = path.width, col = ps.col[2])
}))
}
} else {
stop("Use all positive or all negative numbers for pump.subset.")
}
}
} else if (path == "observed") {
if (is.null(pump.subset)) {
edge.data <- lapply(n.path.edges, function(x) unique(unlist(x)))
invisible(lapply(names(edge.data), function(nm) {
n.edges <- edges[edge.data[[nm]], ]
segments(n.edges$x1, n.edges$y1, n.edges$x2, n.edges$y2,
lwd = path.width, col = snow.colors[paste0("p", nm)])
}))
} else {
if (all(pump.subset > 0)) {
sel <- names(n.path.edges) %in% pump.subset
} else if (all(pump.subset < 0)) {
sel <- names(n.path.edges) %in% abs(pump.subset) == FALSE
} else {
stop("Use all positive or all negative numbers for pump.subset.")
}
edge.data <- lapply(n.path.edges[sel], function(x) unique(unlist(x)))
invisible(lapply(names(edge.data), function(nm) {
n.edges <- edges[edge.data[[nm]], ]
segments(n.edges$x1, n.edges$y1, n.edges$x2, n.edges$y2,
lwd = path.width, col = snow.colors[paste0("p", nm)])
}))
}
}
}
}
}
|
#install.packages('lubridate')
library(dplyr)
library(lubridate)
info <- read.csv(url("https://raw.githubusercontent.com/beduExpert/Programacion-con-R-Santander/master/Sesion-06/Postwork/match.data.csv"), header = TRUE, sep = ',')
info2 <- data.frame(info)
info2 <- info2 %>%
mutate(sumagoles = select(., 3,5) %>% rowSums())
info2$date <- as.Date(info2$date , format = "%Y-%m-%d")
head(info2)
str(info2)
info3 <- info2 %>% group_by(month=floor_date(date, "month")) %>%
summarize(
goalspermonth=sum(sumagoles),
games = n(),
averagegoals = sum(sumagoles)/n()
)
info2
str(info3)
head(info3)
goles.ts <- ts(info3[, 4], start = c(2010, 8), frequency = 12)
print(goles.ts)
plot(goles.ts,
main = "Promedio de goles por partido",
xlab = "Tiempo",
sub = "Agosto de 2010 - Diciembre de 2018")
|
/Session-06/postwork.R
|
no_license
|
eliassevilla/Modulo-2-BEDU-Equipo-15
|
R
| false | false | 855 |
r
|
#install.packages('lubridate')
library(dplyr)
library(lubridate)
info <- read.csv(url("https://raw.githubusercontent.com/beduExpert/Programacion-con-R-Santander/master/Sesion-06/Postwork/match.data.csv"), header = TRUE, sep = ',')
info2 <- data.frame(info)
info2 <- info2 %>%
mutate(sumagoles = select(., 3,5) %>% rowSums())
info2$date <- as.Date(info2$date , format = "%Y-%m-%d")
head(info2)
str(info2)
info3 <- info2 %>% group_by(month=floor_date(date, "month")) %>%
summarize(
goalspermonth=sum(sumagoles),
games = n(),
averagegoals = sum(sumagoles)/n()
)
info2
str(info3)
head(info3)
goles.ts <- ts(info3[, 4], start = c(2010, 8), frequency = 12)
print(goles.ts)
plot(goles.ts,
main = "Promedio de goles por partido",
xlab = "Tiempo",
sub = "Agosto de 2010 - Diciembre de 2018")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/lakes.R
\docType{data}
\name{inputLM}
\alias{inputLM}
\title{lakemorpho class data for \code{lakemorpho} examples}
\format{
lakemorpho class
}
\description{
This example lakemorpho class was generated using lakeSurroundTopo with the
included exampleElev and exampleLake data.
}
\keyword{datasets}
|
/man/inputLM.Rd
|
no_license
|
cran/lakemorpho
|
R
| false | true | 390 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/lakes.R
\docType{data}
\name{inputLM}
\alias{inputLM}
\title{lakemorpho class data for \code{lakemorpho} examples}
\format{
lakemorpho class
}
\description{
This example lakemorpho class was generated using lakeSurroundTopo with the
included exampleElev and exampleLake data.
}
\keyword{datasets}
|
###################################################################
### Load relevant libraries
library(tximport)
library(DRIMSeq)
library(BiocParallel)
###################################################################
### Get arguments passed from command line
args <- commandArgs(TRUE)
dir.in = args[1]
dir.out = args[2]
sample_annot = args[3]
cond = args[4]
###################################################################
### Function to reformat transcript annotation
splitTxNames <- function(txNames){
txSp = strsplit(txNames, split = "\\|")
## Extract information
tx_id = lapply(txSp, function(x){x[1]})
gene_id = lapply(txSp, function(x){x[2]})
gene_name = lapply(txSp, function(x){x[6]})
gene_type = lapply(txSp, function(x){x[8]})
entrez_id = lapply(txSp, function(x){x[7]})
## Create DF
df = data.frame(transcript_id = unlist(tx_id),
gene_id = unlist(gene_id),
gene_name = unlist(gene_name),
gene_type = unlist(gene_type),
entrez_id = unlist(entrez_id))
return(df)
}
###################################################################
### Read quantification and counts from Salmon
sample.annot = read.csv(sample_annot, stringsAsFactors = F)
files <- file.path(dir.in, sample.annot$Sample, "quant.sf/quant.sf")
names(files) <- sample.annot$Sample
tx.quant <- tximport(files, type = "salmon", txOut = TRUE)
names(tx.quant)
tx.abundance = tx.quant$abundance
nonZero = !(rowSums(tx.abundance) == 0)
tx.abundance.noZero = tx.abundance[nonZero,]
tx.names = rownames(tx.abundance.noZero)
###################################################################
### Reformat transcript names
df.tx = splitTxNames(tx.names)
gc()
print("Formatting transcripts")
tx.abundance.df = data.frame(tx.abundance.noZero)
tx.abundance.df$feature_id = df.tx$transcript_id
tx.abundance.df$gene_id = df.tx$gene_id
rownames(tx.abundance.df) <- df.tx$transcript_id
head(tx.abundance.df)
gc()
###################################################################
### Format sample annotations
print("Formatting sample annotations")
drimseq_samples = data.frame(sample_id = sample.annot$Sample,
group = sample.annot$Group)
head(drimseq_samples)
gc()
###################################################################
### Create DRIMSeq object and filter
print("Create object")
d <- DRIMSeq::dmDSdata(counts = tx.abundance.df, samples = drimseq_samples)
d <- dmFilter(d,
min_samps_gene_expr = 3,
min_samps_feature_expr = 3)
###################################################################
### DRIMSeq analysis
print("Estimate precisions")
design_full <- model.matrix(~ group, data = samples(d))
set.seed(123)
multicoreParam <- MulticoreParam(workers = 23)
d <- dmPrecision(d, design = design_full,
BPPARAM = multicoreParam,
prec_grid_range = c(-15, 15))
gc()
print("Fitting")
multicoreParam <- MulticoreParam(workers = 23)
d <- dmFit(d, design = design_full, verbose = 1,
BPPARAM = multicoreParam)
gc()
print("Testing")
multicoreParam <- MulticoreParam(workers = 23)
d.coef <- dmTest(d, coef = "groupkidney", verbose = 1,
BPPARAM = multicoreParam)
## Save output
coef.out = paste0(dir.out, cond, ".drimseq_coef.salmon.RData")
print(coef.out)
print("Done")
save(d.coef,
file = coef.out)
|
/drimseq/DRIMseq_quant_DRIMseq.R
|
no_license
|
chbtchris/Khk_quantifications
|
R
| false | false | 3,425 |
r
|
###################################################################
### Load relevant libraries
library(tximport)
library(DRIMSeq)
library(BiocParallel)
###################################################################
### Get arguments passed from command line
args <- commandArgs(TRUE)
dir.in = args[1]
dir.out = args[2]
sample_annot = args[3]
cond = args[4]
###################################################################
### Function to reformat transcript annotation
splitTxNames <- function(txNames){
txSp = strsplit(txNames, split = "\\|")
## Extract information
tx_id = lapply(txSp, function(x){x[1]})
gene_id = lapply(txSp, function(x){x[2]})
gene_name = lapply(txSp, function(x){x[6]})
gene_type = lapply(txSp, function(x){x[8]})
entrez_id = lapply(txSp, function(x){x[7]})
## Create DF
df = data.frame(transcript_id = unlist(tx_id),
gene_id = unlist(gene_id),
gene_name = unlist(gene_name),
gene_type = unlist(gene_type),
entrez_id = unlist(entrez_id))
return(df)
}
###################################################################
### Read quantification and counts from Salmon
sample.annot = read.csv(sample_annot, stringsAsFactors = F)
files <- file.path(dir.in, sample.annot$Sample, "quant.sf/quant.sf")
names(files) <- sample.annot$Sample
tx.quant <- tximport(files, type = "salmon", txOut = TRUE)
names(tx.quant)
tx.abundance = tx.quant$abundance
nonZero = !(rowSums(tx.abundance) == 0)
tx.abundance.noZero = tx.abundance[nonZero,]
tx.names = rownames(tx.abundance.noZero)
###################################################################
### Reformat transcript names
df.tx = splitTxNames(tx.names)
gc()
print("Formatting transcripts")
tx.abundance.df = data.frame(tx.abundance.noZero)
tx.abundance.df$feature_id = df.tx$transcript_id
tx.abundance.df$gene_id = df.tx$gene_id
rownames(tx.abundance.df) <- df.tx$transcript_id
head(tx.abundance.df)
gc()
###################################################################
### Format sample annotations
print("Formatting sample annotations")
drimseq_samples = data.frame(sample_id = sample.annot$Sample,
group = sample.annot$Group)
head(drimseq_samples)
gc()
###################################################################
### Create DRIMSeq object and filter
print("Create object")
d <- DRIMSeq::dmDSdata(counts = tx.abundance.df, samples = drimseq_samples)
d <- dmFilter(d,
min_samps_gene_expr = 3,
min_samps_feature_expr = 3)
###################################################################
### DRIMSeq analysis
print("Estimate precisions")
design_full <- model.matrix(~ group, data = samples(d))
set.seed(123)
multicoreParam <- MulticoreParam(workers = 23)
d <- dmPrecision(d, design = design_full,
BPPARAM = multicoreParam,
prec_grid_range = c(-15, 15))
gc()
print("Fitting")
multicoreParam <- MulticoreParam(workers = 23)
d <- dmFit(d, design = design_full, verbose = 1,
BPPARAM = multicoreParam)
gc()
print("Testing")
multicoreParam <- MulticoreParam(workers = 23)
d.coef <- dmTest(d, coef = "groupkidney", verbose = 1,
BPPARAM = multicoreParam)
## Save output
coef.out = paste0(dir.out, cond, ".drimseq_coef.salmon.RData")
print(coef.out)
print("Done")
save(d.coef,
file = coef.out)
|
# Sven Reulen
# Code from wikipedia
#if year is divisible by 400 then
#is_leap_year
#else if year is divisible by 100 then
#not_leap_year
#else if year is divisible by 4 then
#is_leap_year
#else
#not_leap_year
# Function
is.leap <- function(year){
if(is.numeric(year) == FALSE){
x = 'wrong input'
}else if(year < 1582){
x = 'Years below 1582 are not returned'
}else if(year %% 400 ==0){
x = TRUE
}else if(year %% 100 == 0){
x = FALSE
}else if(year %% 4 ==0) {
x = TRUE
}else{
x = FALSE
}
return(x)
}
# Testing
# Year 2000 is leap year
is.leap(2000)
# Year 1999 is not a leap
is.leap(1999)
# year 1804 is leap year
is.leap(1804)
# year 1805 is not a leap year
is.leap(1805)
# year 1500 should return error message
is.leap(1500)
# characters should not work
is.leap('appletree')
|
/Exercise_2_calculate_leap_year_Sven_Reulen.R
|
no_license
|
SvenReulen/Exercise_2_Sven_Reulen
|
R
| false | false | 822 |
r
|
# Sven Reulen
# Code from wikipedia
#if year is divisible by 400 then
#is_leap_year
#else if year is divisible by 100 then
#not_leap_year
#else if year is divisible by 4 then
#is_leap_year
#else
#not_leap_year
# Function
is.leap <- function(year){
if(is.numeric(year) == FALSE){
x = 'wrong input'
}else if(year < 1582){
x = 'Years below 1582 are not returned'
}else if(year %% 400 ==0){
x = TRUE
}else if(year %% 100 == 0){
x = FALSE
}else if(year %% 4 ==0) {
x = TRUE
}else{
x = FALSE
}
return(x)
}
# Testing
# Year 2000 is leap year
is.leap(2000)
# Year 1999 is not a leap
is.leap(1999)
# year 1804 is leap year
is.leap(1804)
# year 1805 is not a leap year
is.leap(1805)
# year 1500 should return error message
is.leap(1500)
# characters should not work
is.leap('appletree')
|
\name{ENMevaluate }
\alias{ENMevaluate}
\alias{tuning}
\title{ Tuning and evaluation of ENMs with Maxent }
\description{ \code{ENMevaluate} automatically executes Maxent (Phillips \emph{et al}. 2006; Phillips and Dudik 2008) across a range of settings, returning a \code{data.frame} of evaluation metrics to aid in identifying settings that balance model fit and predictive ability. The function calls Maxent using the \code{maxent} function in the \pkg{dismo} package (Hijmans \emph{et al.} 2011). Users should consult \code{\link{ENMeval-package}} and help documentation of the \pkg{dismo} package for guidelines on how to run Maxent in R. }
\usage{
ENMevaluate(occ, env, bg.coords = NULL, occ.grp = NULL,
bg.grp = NULL, RMvalues = seq(0.5, 4, 0.5),
fc = c("L", "LQ", "H", "LQH", "LQHP", "LQHPT"),
categoricals = NULL, n.bg = 10000, method = NULL,
overlap = FALSE, aggregation.factor = c(2, 2),
kfolds = NA, bin.output = FALSE, clamp = TRUE,
rasterPreds = TRUE, parallel = FALSE, numCores = NULL,
progbar = TRUE, updateProgress = FALSE, ...)
tuning(occ, env, bg.coords, occ.grp, bg.grp, method,
maxent.args, args.lab, categoricals, aggregation.factor,
kfolds, bin.output, clamp, rasterPreds, parallel, numCores,
progbar, updateProgress, userArgs)
}
\arguments{
\item{occ}{ Two-column matrix or data.frame of longitude and latitude (in that order) of occurrence localities. }
\item{env}{ RasterStack of model predictor variables (environmental layers). }
\item{bg.coords}{ Two-column matrix or data.frame of longitude and latitude (in that order) of background localities (required for '\code{user}' method). }
\item{occ.grp}{ Vector of bins of occurrence localities (required for '\code{user}' method). }
\item{bg.grp}{ Vector of bins of background localities (required for '\code{user}' method). }
\item{RMvalues}{ Vector of (non-negative) values to use for the regularization multiplier. }
\item{fc}{ Character vector of feature class combinations to be included in analysis. }
\item{categoricals}{ Vector indicating which (if any) of the input environmental layers are categorical. }
\item{n.bg}{ The number of random background localities to draw from the study extent. }
\item{method}{ Character string designating the method used for data partitioning. Choices are: \code{"jackknife", "randomkfold", "user", "block", "checkerboard1", "checkerboard2"}. See details and \code{\link{get.evaluation.bins}} for more information. }
\item{overlap}{ logical; If \code{TRUE}, provides pairwise metric of niche overlap (see details and \code{\link{calc.niche.overlap}}). }
\item{aggregation.factor}{ List giving the factor by which the original input grid should be aggregated for checkerboard partitioning methods (see details and \code{\link{get.evaluation.bins}}). }
\item{kfolds}{ Number of bins to use in the \emph{k}-fold random method of data partitioning. }
\item{bin.output}{ logical; If \code{TRUE}, appends evaluations metrics for each evaluation bin to results table (i.e., in addition to the average values across bins). }
\item{maxent.args}{ Arguments to pass to Maxent that are generated by the \code{make.args} function }
\item{args.lab}{ Character labels describing feature classes and regularization multiplier values for Maxent runs provided by the \code{make.args} function. }
\item{clamp}{ logical; If \code{TRUE}, 'clamping' is used (see Maxent documentation and tutorial for more details). }
\item{rasterPreds}{ logical; If \code{TRUE}, the \code{predict} function from \code{dismo} is used to predict each full model across the extent of the input environmental variables. Note that AICc (and associated values) are \emph{NOT} calculated if \code{rasterPreds=FALSE} because these calculations require the predicted surfaces. However, setting to \code{FALSE} can significantly reduce run time.}
\item{parallel}{ logical; If \code{TRUE}, parallel processing is used to execute tuning function. }
\item{numCores}{ numeric; indicates the number of cores to use if running in parallel. If \code{parallel=TRUE} and this is not specified, the total number of available cores are used.}
\item{progbar}{ logical; used internally. }
\item{updateProgress}{ logical; used internally. }
\item{...}{ character vector; use this to pass other arguments (e.g., prevalence) to the `maxent` call. Note that not all options are functional or relevant.}
\item{userArgs}{ character vector; use this to pass other arguments (e.g., prevalence) to the `maxent` call. Note that not all options are functional or relevant.}
}
\details{
\code{ENMevaluate} is the primary function for general use in the \pkg{ENMeval} package; the \code{tuning} function is used internally.
\emph{Maxent settings:} In the current default implementation of Maxent, the combination of feature classes (\code{fc}s) allowed depends on the number of occurrence localities, and the value for the regularization multiplier (\code{RM}) is 1.0. \code{ENMevaluate} provides an automated way to execute ecological niche models in Maxent across a user-specified range of (\code{RM}) values and (\code{fc}) combinations, regardless of sample size. Acceptable values for the \code{fc} argument include: L=linear, Q=quadratic, P=product, T=threshold, and H=hinge (see Maxent help documentation, Phillips \emph{et al.} (2006), Phillips and Dudik (2008), Elith \emph{et al.} (2011), and Merow \emph{et al.} (2013) for additional details on \code{RM} and \code{fc}s). Categorical feature classes (C) are specified by the \code{categoricals} argument.
\emph{Methods for partitioning data:} \code{ENMevaluate} includes six methods to partition occurrence and background localities into bins for training and testing (\code{'jackknife', 'randomkfold', 'user', 'block',} \code{'checkerboard1', 'checkerboard2'}). The \code{jackknife} method is a special case of \emph{k}-fold cross validation where the number of folds (\emph{k}) is equal to the number of occurrence localities (\emph{n}) in the dataset. The \code{randomkfold} method partitions occurrence localities randomly into a user-specified number of (\emph{k}) bins - this is equivalent to the method of \emph{k}-fold cross validation currently provided by Maxent. The \code{user} method enables users to define bins \emph{a priori}. For this method, the user is required to provide background coordinates (\code{bg.coords}) and bin designations for both occurrence localities (\code{occ.grp}) and background localities (\code{bg.grp}). The \code{block} method partitions the data into four bins according to the lines of latitude and longitude that divide the occurrence localities into bins of as equal number as possible. The \code{checkerboard1} (and \code{checkerboard2}) methods partition data into two (or four) bins based on one (or two) checkerboard patterns with grain size defined as one (or two) aggregation factor(s) of the original environmental layers. Although the \code{checkerboard1} (and \code{checkerboard2}) methods are designed to partition occurrence localities into two (and four) evaluation bins, they may give fewer bins depending on the location of occurrence localities with respect to the checkerboard grid(s) (e.g., all records happen to fall in the "black" squares). A warning is given if the number of bins is < 4 for the \code{checkerboard2} method, and an error is given if all localities fall in a single evaluation bin. Additional details can be found in \code{\link{get.evaluation.bins}}.
\emph{Evaluation metrics:} Four evaluation metrics are calculated using the partitioned dataset, and one additional metric is provided based on the full dataset. \code{ENMevaluate} uses the same background localities and evaluation bin designations for each of the \emph{k} iterations (for each unique combination of \code{RM} and \code{fc}) to facilitate valid comparisons among model settings.
\code{Mean.AUC} is the area under the curve of the receiver operating characteristic plot made based on the testing data (i.e., AUCtest), averaged across \emph{k} bins. In each iteration, as currently implemented, the AUCtest value is calculated with respect to the full set of background localities to enable comparisons across the \emph{k} iterations (Radosavljevic and Anderson 2014). As a relative measure for a given study species and region, high values of \code{Mean.AUC} are associated with the degree to which a model can successfully discriminate occurrence from background localities. This rank-based non-parametric metric, however, does not reveal the model goodness-of-fit (Lobo \emph{et al.} 2008; Peterson \emph{et al.} 2011).
To quantify the degree of overfitting, \code{ENMevaluate} calculates three metrics. The first is the difference between training and testing AUC, averaged across \emph{k} bins (\code{Mean.AUC.DIFF}) (Warren and Seifert 2011). \code{Mean.AUC.DIFF} is expected to be high for models overfit to the training data. \code{ENMevaluate} also calculates two threshold-dependent omission rates that quantify overfitting when compared with the omission rate expected by the threshold employed: the proportion of testing localities with Maxent output values lower than the value associated with (1) the training locality with the lowest value (i.e., the minimum training presence, MTP; = 0 percent training omission) (\code{Mean.ORmin}) and (2) the value that excludes the 10 percent of training localities with the lowest predicted suitability (\code{Mean.OR10}) (Pearson \emph{et al.} 2007). \code{ENMevaluate} uses \code{\link{corrected.var}} to calculate the variance for each of these metrics across \emph{k} bins (i.e., variances are corrected for non-independence of cross-validation iterations; see Shcheglovitova and Anderson 2013). The value of these metrics for each of the individual \emph{k} bins is returned if \code{bin.output = TRUE}.
Based on the unpartitioned (full) dataset, \code{ENMevaluate} uses \code{\link{calc.aicc}} to calculate the AICc value for each model run and provides delta.AIC, AICc weights, as well as the number of parameters for each model (Warren and Seifert 2011). Note that AICc (and associated values) are \emph{NOT} calculated if \code{rasterPreds=FALSE} because these calculations require the predicted surfaces. The AUCtrain value for the full model is also returned (\code{full.AUC}).
To quantify how resulting predictions differ in geographic space depending on the settings used, \code{ENMevaluate} includes an option to compute pairwise niche overlap between all pairs of full models (i.e., using the unpartitioned dataset) with Schoener's \emph{D} statistic (Schoener 1968; Warren \emph{et al.} 2009).
}
\value{
An object of class \code{ENMevaluation} with named slots:
\code{@results} data.frame of evaluation metrics. If \code{bin.output=TRUE}, evaluation metrics calculated separately for each evaluation bin are included in addition to the averages and corrected variances (see \code{\link{corrected.var}}) across \emph{k} bins.
\code{@predictions} RasterStack of full model predictions with each layer named as: \code{fc_RM} (e.g., \code{L_1}). This will be an empty RasterStack if the \code{rasterPreds} argument is set to \code{FALSE}.
\code{@models} List of objects of class \code{"MaxEnt"} from the \pkg{dismo} package. Each of these entries include slots for lambda values and the original Maxent results table. See Maxent documentation for more information.
\code{@partition.method} character vector with the method used for data partitioning.
\code{@occ.pts} data.frame of the latitude/longitude of input occurrence localities.
\code{@occ.grp} vector identifying the bin for each occurrence locality.
\code{@bg.pts} data.frame of the latitude/longitude of input background localities.
\code{@bg.grp} vector identifying the bin for each background locality.
\code{@overlap} matrix of pairwise niche overlap (blank if \code{overlap = FALSE}).
}
\references{
Elith, J., Phillips, S. J., Hastie, T., Dudik, M., Chee, Y. E., and Yates, C. J. (2011) A statistical explanation of MaxEnt for ecologists. \emph{Diversity and Distributions}, \bold{17}: 43-57.
Hijmans, R. J., Phillips, S., Leathwick, J. and Elith, J. (2011) dismo package for R. Available online at: \url{https://cran.r-project.org/package=dismo}.
Lobo, J. M., Jimenez-Valverde, A., and Real, R. (2008) AUC: A misleading measure of the performance of predictive distribution models. \emph{Global Ecology and Biogeography}, \bold{17}: 145-151.
Muscarella, R., Galante, P.J., Soley-Guardia, M., Boria, R.A., Kass, J., Uriarte, M. and Anderson, R.P. (2014) ENMeval: An R package for conducting spatially independent evaluations and estimating optimal model complexity for ecological niche models. \emph{Methods in Ecology and Evolution}, \bold{5}: 1198-1205.
Pearson, R. G., Raxworthy, C. J., Nakamura, M. and Peterson, A. T. 2007. Predicting species distributions from small numbers of occurrence records: a test case using cryptic geckos in Madagascar. \emph{Journal of Biogeography}, \bold{34}: 102-117.
Peterson, A. T., Soberon, J., Pearson, R. G., Anderson, R. P., Martinez-Meyer, E., Nakamura, M. and Araujo, M. B. (2011) \emph{Ecological Niches and Geographic Distributions}. Monographs in Population Biology, 49. Princeton University Press, Princeton, NJ.
Phillips, S. J., Anderson, R. P., and Schapire, R. E. (2006) Maximum entropy modeling of species geographic distributions. \emph{Ecological Modelling}, \bold{190}: 231-259.
Phillips, S. J. and Dudik, M. (2008) Modeling of species distributions with Maxent: new extensions and a comprehensive evaluation. \emph{Ecography}, \bold{31}: 161-175.
Merow, C., Smith, M., and Silander, J. A. (2013) A practical guide to Maxent: what it does, and why inputs and settings matter. \emph{Ecography}, \bold{36}: 1-12.
Radosavljevic, A. and Anderson, R. P. 2014. Making better Maxent models of species distributions: complexity, overfitting and evaluation. \emph{Journal of Biogeography}, \bold{41}: 629-643.
Schoener, T. W. (1968) The \emph{Anolis} lizards of Bimini: resource partitioning in a complex fauna. \emph{Ecology}, \bold{49}: 704-726.
Shcheglovitova, M. and Anderson, R. P. (2013) Estimating optimal complexity for ecological niche models: A jackknife approach for species with small sample sizes. \emph{Ecological Modelling}, \bold{269}: 9-17.
Warren, D. L., Glor, R. E., Turelli, M. and Funk, D. (2009) Environmental niche equivalency versus conservatism: quantitative approaches to niche evolution. \emph{Evolution}, \bold{62}: 2868-2883; \emph{Erratum: Evolution}, \bold{65}: 1215.
Warren, D.L. and Seifert, S.N. (2011) Ecological niche modeling in Maxent: the importance of model complexity and the performance of model selection criteria. \emph{Ecological Applications}, \bold{21}: 335-342.
}
\author{ Uses the \code{maxent} function in the \pkg{dismo} package (Hijmans \emph{et al.} 2011, Phillips \emph{et al.} 2006)
Robert Muscarella <bob.muscarella@gmail.com> and Jamie M. Kass <jkass@gc.cuny.edu>}
\seealso{
\code{maxent} in the \pkg{dismo} package
}
\examples{
### Simulated data environmental covariates
set.seed(1)
r1 <- raster(matrix(nrow=50, ncol=50, data=runif(10000, 0, 25)))
r2 <- raster(matrix(nrow=50, ncol=50, data=rep(1:100, each=100), byrow=TRUE))
r3 <- raster(matrix(nrow=50, ncol=50, data=rep(1:100, each=100)))
r4 <- raster(matrix(nrow=50, ncol=50, data=c(rep(1,1000),rep(2,500)),byrow=TRUE))
values(r4) <- as.factor(values(r4))
env <- stack(r1,r2,r3,r4)
### Simulate occurrence localities
nocc <- 50
x <- (rpois(nocc, 2) + abs(rnorm(nocc)))/11
y <- runif(nocc, 0, .99)
occ <- cbind(x,y)
\dontrun{
### This call gives the results loaded below
enmeval_results <- ENMevaluate(occ, env, method="block", n.bg=500)
}
data(enmeval_results)
enmeval_results
### See table of evaluation metrics
enmeval_results@results
### Plot prediction with lowest AICc
plot(enmeval_results@predictions[[which (enmeval_results@results$delta.AICc == 0) ]])
points(enmeval_results@occ.pts, pch=21, bg=enmeval_results@occ.grp)
### Niche overlap statistics between model predictions
enmeval_results@overlap
}
|
/man/ENMevaluate.Rd
|
no_license
|
darcyj/ENMeval
|
R
| false | false | 16,251 |
rd
|
\name{ENMevaluate }
\alias{ENMevaluate}
\alias{tuning}
\title{ Tuning and evaluation of ENMs with Maxent }
\description{ \code{ENMevaluate} automatically executes Maxent (Phillips \emph{et al}. 2006; Phillips and Dudik 2008) across a range of settings, returning a \code{data.frame} of evaluation metrics to aid in identifying settings that balance model fit and predictive ability. The function calls Maxent using the \code{maxent} function in the \pkg{dismo} package (Hijmans \emph{et al.} 2011). Users should consult \code{\link{ENMeval-package}} and help documentation of the \pkg{dismo} package for guidelines on how to run Maxent in R. }
\usage{
ENMevaluate(occ, env, bg.coords = NULL, occ.grp = NULL,
bg.grp = NULL, RMvalues = seq(0.5, 4, 0.5),
fc = c("L", "LQ", "H", "LQH", "LQHP", "LQHPT"),
categoricals = NULL, n.bg = 10000, method = NULL,
overlap = FALSE, aggregation.factor = c(2, 2),
kfolds = NA, bin.output = FALSE, clamp = TRUE,
rasterPreds = TRUE, parallel = FALSE, numCores = NULL,
progbar = TRUE, updateProgress = FALSE, ...)
tuning(occ, env, bg.coords, occ.grp, bg.grp, method,
maxent.args, args.lab, categoricals, aggregation.factor,
kfolds, bin.output, clamp, rasterPreds, parallel, numCores,
progbar, updateProgress, userArgs)
}
\arguments{
\item{occ}{ Two-column matrix or data.frame of longitude and latitude (in that order) of occurrence localities. }
\item{env}{ RasterStack of model predictor variables (environmental layers). }
\item{bg.coords}{ Two-column matrix or data.frame of longitude and latitude (in that order) of background localities (required for '\code{user}' method). }
\item{occ.grp}{ Vector of bins of occurrence localities (required for '\code{user}' method). }
\item{bg.grp}{ Vector of bins of background localities (required for '\code{user}' method). }
\item{RMvalues}{ Vector of (non-negative) values to use for the regularization multiplier. }
\item{fc}{ Character vector of feature class combinations to be included in analysis. }
\item{categoricals}{ Vector indicating which (if any) of the input environmental layers are categorical. }
\item{n.bg}{ The number of random background localities to draw from the study extent. }
\item{method}{ Character string designating the method used for data partitioning. Choices are: \code{"jackknife", "randomkfold", "user", "block", "checkerboard1", "checkerboard2"}. See details and \code{\link{get.evaluation.bins}} for more information. }
\item{overlap}{ logical; If \code{TRUE}, provides pairwise metric of niche overlap (see details and \code{\link{calc.niche.overlap}}). }
\item{aggregation.factor}{ List giving the factor by which the original input grid should be aggregated for checkerboard partitioning methods (see details and \code{\link{get.evaluation.bins}}). }
\item{kfolds}{ Number of bins to use in the \emph{k}-fold random method of data partitioning. }
\item{bin.output}{ logical; If \code{TRUE}, appends evaluations metrics for each evaluation bin to results table (i.e., in addition to the average values across bins). }
\item{maxent.args}{ Arguments to pass to Maxent that are generated by the \code{make.args} function }
\item{args.lab}{ Character labels describing feature classes and regularization multiplier values for Maxent runs provided by the \code{make.args} function. }
\item{clamp}{ logical; If \code{TRUE}, 'clamping' is used (see Maxent documentation and tutorial for more details). }
\item{rasterPreds}{ logical; If \code{TRUE}, the \code{predict} function from \code{dismo} is used to predict each full model across the extent of the input environmental variables. Note that AICc (and associated values) are \emph{NOT} calculated if \code{rasterPreds=FALSE} because these calculations require the predicted surfaces. However, setting to \code{FALSE} can significantly reduce run time.}
\item{parallel}{ logical; If \code{TRUE}, parallel processing is used to execute tuning function. }
\item{numCores}{ numeric; indicates the number of cores to use if running in parallel. If \code{parallel=TRUE} and this is not specified, the total number of available cores are used.}
\item{progbar}{ logical; used internally. }
\item{updateProgress}{ logical; used internally. }
\item{...}{ character vector; use this to pass other arguments (e.g., prevalence) to the `maxent` call. Note that not all options are functional or relevant.}
\item{userArgs}{ character vector; use this to pass other arguments (e.g., prevalence) to the `maxent` call. Note that not all options are functional or relevant.}
}
\details{
\code{ENMevaluate} is the primary function for general use in the \pkg{ENMeval} package; the \code{tuning} function is used internally.
\emph{Maxent settings:} In the current default implementation of Maxent, the combination of feature classes (\code{fc}s) allowed depends on the number of occurrence localities, and the value for the regularization multiplier (\code{RM}) is 1.0. \code{ENMevaluate} provides an automated way to execute ecological niche models in Maxent across a user-specified range of (\code{RM}) values and (\code{fc}) combinations, regardless of sample size. Acceptable values for the \code{fc} argument include: L=linear, Q=quadratic, P=product, T=threshold, and H=hinge (see Maxent help documentation, Phillips \emph{et al.} (2006), Phillips and Dudik (2008), Elith \emph{et al.} (2011), and Merow \emph{et al.} (2013) for additional details on \code{RM} and \code{fc}s). Categorical feature classes (C) are specified by the \code{categoricals} argument.
\emph{Methods for partitioning data:} \code{ENMevaluate} includes six methods to partition occurrence and background localities into bins for training and testing (\code{'jackknife', 'randomkfold', 'user', 'block',} \code{'checkerboard1', 'checkerboard2'}). The \code{jackknife} method is a special case of \emph{k}-fold cross validation where the number of folds (\emph{k}) is equal to the number of occurrence localities (\emph{n}) in the dataset. The \code{randomkfold} method partitions occurrence localities randomly into a user-specified number of (\emph{k}) bins - this is equivalent to the method of \emph{k}-fold cross validation currently provided by Maxent. The \code{user} method enables users to define bins \emph{a priori}. For this method, the user is required to provide background coordinates (\code{bg.coords}) and bin designations for both occurrence localities (\code{occ.grp}) and background localities (\code{bg.grp}). The \code{block} method partitions the data into four bins according to the lines of latitude and longitude that divide the occurrence localities into bins of as equal number as possible. The \code{checkerboard1} (and \code{checkerboard2}) methods partition data into two (or four) bins based on one (or two) checkerboard patterns with grain size defined as one (or two) aggregation factor(s) of the original environmental layers. Although the \code{checkerboard1} (and \code{checkerboard2}) methods are designed to partition occurrence localities into two (and four) evaluation bins, they may give fewer bins depending on the location of occurrence localities with respect to the checkerboard grid(s) (e.g., all records happen to fall in the "black" squares). A warning is given if the number of bins is < 4 for the \code{checkerboard2} method, and an error is given if all localities fall in a single evaluation bin. Additional details can be found in \code{\link{get.evaluation.bins}}.
\emph{Evaluation metrics:} Four evaluation metrics are calculated using the partitioned dataset, and one additional metric is provided based on the full dataset. \code{ENMevaluate} uses the same background localities and evaluation bin designations for each of the \emph{k} iterations (for each unique combination of \code{RM} and \code{fc}) to facilitate valid comparisons among model settings.
\code{Mean.AUC} is the area under the curve of the receiver operating characteristic plot made based on the testing data (i.e., AUCtest), averaged across \emph{k} bins. In each iteration, as currently implemented, the AUCtest value is calculated with respect to the full set of background localities to enable comparisons across the \emph{k} iterations (Radosavljevic and Anderson 2014). As a relative measure for a given study species and region, high values of \code{Mean.AUC} are associated with the degree to which a model can successfully discriminate occurrence from background localities. This rank-based non-parametric metric, however, does not reveal the model goodness-of-fit (Lobo \emph{et al.} 2008; Peterson \emph{et al.} 2011).
To quantify the degree of overfitting, \code{ENMevaluate} calculates three metrics. The first is the difference between training and testing AUC, averaged across \emph{k} bins (\code{Mean.AUC.DIFF}) (Warren and Seifert 2011). \code{Mean.AUC.DIFF} is expected to be high for models overfit to the training data. \code{ENMevaluate} also calculates two threshold-dependent omission rates that quantify overfitting when compared with the omission rate expected by the threshold employed: the proportion of testing localities with Maxent output values lower than the value associated with (1) the training locality with the lowest value (i.e., the minimum training presence, MTP; = 0 percent training omission) (\code{Mean.ORmin}) and (2) the value that excludes the 10 percent of training localities with the lowest predicted suitability (\code{Mean.OR10}) (Pearson \emph{et al.} 2007). \code{ENMevaluate} uses \code{\link{corrected.var}} to calculate the variance for each of these metrics across \emph{k} bins (i.e., variances are corrected for non-independence of cross-validation iterations; see Shcheglovitova and Anderson 2013). The value of these metrics for each of the individual \emph{k} bins is returned if \code{bin.output = TRUE}.
Based on the unpartitioned (full) dataset, \code{ENMevaluate} uses \code{\link{calc.aicc}} to calculate the AICc value for each model run and provides delta.AIC, AICc weights, as well as the number of parameters for each model (Warren and Seifert 2011). Note that AICc (and associated values) are \emph{NOT} calculated if \code{rasterPreds=FALSE} because these calculations require the predicted surfaces. The AUCtrain value for the full model is also returned (\code{full.AUC}).
To quantify how resulting predictions differ in geographic space depending on the settings used, \code{ENMevaluate} includes an option to compute pairwise niche overlap between all pairs of full models (i.e., using the unpartitioned dataset) with Schoener's \emph{D} statistic (Schoener 1968; Warren \emph{et al.} 2009).
}
\value{
An object of class \code{ENMevaluation} with named slots:
\code{@results} data.frame of evaluation metrics. If \code{bin.output=TRUE}, evaluation metrics calculated separately for each evaluation bin are included in addition to the averages and corrected variances (see \code{\link{corrected.var}}) across \emph{k} bins.
\code{@predictions} RasterStack of full model predictions with each layer named as: \code{fc_RM} (e.g., \code{L_1}). This will be an empty RasterStack if the \code{rasterPreds} argument is set to \code{FALSE}.
\code{@models} List of objects of class \code{"MaxEnt"} from the \pkg{dismo} package. Each of these entries include slots for lambda values and the original Maxent results table. See Maxent documentation for more information.
\code{@partition.method} character vector with the method used for data partitioning.
\code{@occ.pts} data.frame of the latitude/longitude of input occurrence localities.
\code{@occ.grp} vector identifying the bin for each occurrence locality.
\code{@bg.pts} data.frame of the latitude/longitude of input background localities.
\code{@bg.grp} vector identifying the bin for each background locality.
\code{@overlap} matrix of pairwise niche overlap (blank if \code{overlap = FALSE}).
}
\references{
Elith, J., Phillips, S. J., Hastie, T., Dudik, M., Chee, Y. E., and Yates, C. J. (2011) A statistical explanation of MaxEnt for ecologists. \emph{Diversity and Distributions}, \bold{17}: 43-57.
Hijmans, R. J., Phillips, S., Leathwick, J. and Elith, J. (2011) dismo package for R. Available online at: \url{https://cran.r-project.org/package=dismo}.
Lobo, J. M., Jimenez-Valverde, A., and Real, R. (2008) AUC: A misleading measure of the performance of predictive distribution models. \emph{Global Ecology and Biogeography}, \bold{17}: 145-151.
Muscarella, R., Galante, P.J., Soley-Guardia, M., Boria, R.A., Kass, J., Uriarte, M. and Anderson, R.P. (2014) ENMeval: An R package for conducting spatially independent evaluations and estimating optimal model complexity for ecological niche models. \emph{Methods in Ecology and Evolution}, \bold{5}: 1198-1205.
Pearson, R. G., Raxworthy, C. J., Nakamura, M. and Peterson, A. T. 2007. Predicting species distributions from small numbers of occurrence records: a test case using cryptic geckos in Madagascar. \emph{Journal of Biogeography}, \bold{34}: 102-117.
Peterson, A. T., Soberon, J., Pearson, R. G., Anderson, R. P., Martinez-Meyer, E., Nakamura, M. and Araujo, M. B. (2011) \emph{Ecological Niches and Geographic Distributions}. Monographs in Population Biology, 49. Princeton University Press, Princeton, NJ.
Phillips, S. J., Anderson, R. P., and Schapire, R. E. (2006) Maximum entropy modeling of species geographic distributions. \emph{Ecological Modelling}, \bold{190}: 231-259.
Phillips, S. J. and Dudik, M. (2008) Modeling of species distributions with Maxent: new extensions and a comprehensive evaluation. \emph{Ecography}, \bold{31}: 161-175.
Merow, C., Smith, M., and Silander, J. A. (2013) A practical guide to Maxent: what it does, and why inputs and settings matter. \emph{Ecography}, \bold{36}: 1-12.
Radosavljevic, A. and Anderson, R. P. 2014. Making better Maxent models of species distributions: complexity, overfitting and evaluation. \emph{Journal of Biogeography}, \bold{41}: 629-643.
Schoener, T. W. (1968) The \emph{Anolis} lizards of Bimini: resource partitioning in a complex fauna. \emph{Ecology}, \bold{49}: 704-726.
Shcheglovitova, M. and Anderson, R. P. (2013) Estimating optimal complexity for ecological niche models: A jackknife approach for species with small sample sizes. \emph{Ecological Modelling}, \bold{269}: 9-17.
Warren, D. L., Glor, R. E., Turelli, M. and Funk, D. (2009) Environmental niche equivalency versus conservatism: quantitative approaches to niche evolution. \emph{Evolution}, \bold{62}: 2868-2883; \emph{Erratum: Evolution}, \bold{65}: 1215.
Warren, D.L. and Seifert, S.N. (2011) Ecological niche modeling in Maxent: the importance of model complexity and the performance of model selection criteria. \emph{Ecological Applications}, \bold{21}: 335-342.
}
\author{ Uses the \code{maxent} function in the \pkg{dismo} package (Hijmans \emph{et al.} 2011, Phillips \emph{et al.} 2006)
Robert Muscarella <bob.muscarella@gmail.com> and Jamie M. Kass <jkass@gc.cuny.edu>}
\seealso{
\code{maxent} in the \pkg{dismo} package
}
\examples{
### Simulated data environmental covariates
set.seed(1)
r1 <- raster(matrix(nrow=50, ncol=50, data=runif(10000, 0, 25)))
r2 <- raster(matrix(nrow=50, ncol=50, data=rep(1:100, each=100), byrow=TRUE))
r3 <- raster(matrix(nrow=50, ncol=50, data=rep(1:100, each=100)))
r4 <- raster(matrix(nrow=50, ncol=50, data=c(rep(1,1000),rep(2,500)),byrow=TRUE))
values(r4) <- as.factor(values(r4))
env <- stack(r1,r2,r3,r4)
### Simulate occurrence localities
nocc <- 50
x <- (rpois(nocc, 2) + abs(rnorm(nocc)))/11
y <- runif(nocc, 0, .99)
occ <- cbind(x,y)
\dontrun{
### This call gives the results loaded below
enmeval_results <- ENMevaluate(occ, env, method="block", n.bg=500)
}
data(enmeval_results)
enmeval_results
### See table of evaluation metrics
enmeval_results@results
### Plot prediction with lowest AICc
plot(enmeval_results@predictions[[which (enmeval_results@results$delta.AICc == 0) ]])
points(enmeval_results@occ.pts, pch=21, bg=enmeval_results@occ.grp)
### Niche overlap statistics between model predictions
enmeval_results@overlap
}
|
## DBSCAN
###################################################
# Step 1: load fpc package
#install.packages("fpc")
library(fpc)
# Remove label from iris dataset
iris2 <- iris[-5] # remove class tags
# Step 2: Apply DbScan clustering
ds_model <- dbscan(iris2, eps=0.45, MinPts=5)
# Interpretation of Model
ds_model
# 1 to 3 : identified clusters
# 0: noises or outliers, objects that are not assigned to any clusters
# Check the cluster
ds_model$cluster
# compare clusters with original class labels
table(ds_model$cluster, iris$Species)
# Plot Cluster
plot(ds_model, iris2, main = "DBSCAN")
plot(ds_model, iris2[c(1,4)], main = "Petal Width vs Sepal Length")
############################################################################################
#############################################################################################
#Install and load mlbench and fpc package
#install.packages("mlbench")
library(mlbench)
#install.packages("fpc")
library(fpc)
#Use mlbench libary to draw a cassini problem graph
set.seed(2)
dataset = mlbench.cassini(500)
plot(dataset$x)
?dbscan()
ds = dbscan(dist(dataset$x),eps= 0.2, MinPts = 2,countmode = NULL,method = "dist")
ds
ds$cluster
plot(ds, dataset$x)
y = matrix(0, nrow = 3, ncol = 2)
y[1,] = c(0,0)
y[2,] = c(0,-1.5)
y[3,] = c(1,1)
y
#Use DBScan to predict which cluster the data belongs to
predict(ds, dataset$x, y)
|
/R_Files/Chapter12_DBScan.R
|
permissive
|
djdhiraj/Data_Science_In_R
|
R
| false | false | 1,394 |
r
|
## DBSCAN
###################################################
# Step 1: load fpc package
#install.packages("fpc")
library(fpc)
# Remove label from iris dataset
iris2 <- iris[-5] # remove class tags
# Step 2: Apply DbScan clustering
ds_model <- dbscan(iris2, eps=0.45, MinPts=5)
# Interpretation of Model
ds_model
# 1 to 3 : identified clusters
# 0: noises or outliers, objects that are not assigned to any clusters
# Check the cluster
ds_model$cluster
# compare clusters with original class labels
table(ds_model$cluster, iris$Species)
# Plot Cluster
plot(ds_model, iris2, main = "DBSCAN")
plot(ds_model, iris2[c(1,4)], main = "Petal Width vs Sepal Length")
############################################################################################
#############################################################################################
#Install and load mlbench and fpc package
#install.packages("mlbench")
library(mlbench)
#install.packages("fpc")
library(fpc)
#Use mlbench libary to draw a cassini problem graph
set.seed(2)
dataset = mlbench.cassini(500)
plot(dataset$x)
?dbscan()
ds = dbscan(dist(dataset$x),eps= 0.2, MinPts = 2,countmode = NULL,method = "dist")
ds
ds$cluster
plot(ds, dataset$x)
y = matrix(0, nrow = 3, ncol = 2)
y[1,] = c(0,0)
y[2,] = c(0,-1.5)
y[3,] = c(1,1)
y
#Use DBScan to predict which cluster the data belongs to
predict(ds, dataset$x, y)
|
shrink <- function(l){ l[sapply(l, length) > 0] }
#
#
all <- paste0("inst/xsd/", collate)
all %>% map(function(x){ xsd = read_xml(x); xml_find_all(xsd, "//xs:sequence[@maxOccurs]", ns) }) %>% shrink()
## Prove xs:all is never used:
all %>% map(function(x){ xsd = read_xml(x); xml_find_all(xsd, "//xs:complexType/xs:all", ns) }) %>% shrink()
star <- "[child::xs:choice | child::xs:sequence]"
## All complex types that have a child either a choice or sequence
all %>% map(function(x){
xsd = read_xml(x);
xml_find_all(xsd, paste0("//xs:complexType", star), ns) }) %>%
shrink()
## complexType/choice
## all xs:choice or xs:sequence with parent who is xs:complexType/xs:choice
all %>% map(function(x){
xsd = read_xml(x);
xml_find_all(xsd, paste0("//xs:complexType", star, "/*", star), ns) }) %>%
shrink()
## complexType/choice/choice
## Depth 2. all xs:choice or xs:sequence whose parent is also xs:choice/seq whose parent is complexType
all %>% map(function(x){
xsd = read_xml(x);
xml_find_all(xsd, paste0("//xs:complexType", star, "/*", star, "/*", star), ns) }) %>%
shrink()
## complexType/choice/choice/choice
all %>% map(function(x){
xsd = read_xml(x);
xml_find_all(xsd, paste0("//xs:complexType", star, "/*", star, "/*", star, "/*", star), ns) }) %>%
shrink()
|
/inst/create-package/explore.R
|
no_license
|
mbjones/reml
|
R
| false | false | 1,323 |
r
|
shrink <- function(l){ l[sapply(l, length) > 0] }
#
#
all <- paste0("inst/xsd/", collate)
all %>% map(function(x){ xsd = read_xml(x); xml_find_all(xsd, "//xs:sequence[@maxOccurs]", ns) }) %>% shrink()
## Prove xs:all is never used:
all %>% map(function(x){ xsd = read_xml(x); xml_find_all(xsd, "//xs:complexType/xs:all", ns) }) %>% shrink()
star <- "[child::xs:choice | child::xs:sequence]"
## All complex types that have a child either a choice or sequence
all %>% map(function(x){
xsd = read_xml(x);
xml_find_all(xsd, paste0("//xs:complexType", star), ns) }) %>%
shrink()
## complexType/choice
## all xs:choice or xs:sequence with parent who is xs:complexType/xs:choice
all %>% map(function(x){
xsd = read_xml(x);
xml_find_all(xsd, paste0("//xs:complexType", star, "/*", star), ns) }) %>%
shrink()
## complexType/choice/choice
## Depth 2. all xs:choice or xs:sequence whose parent is also xs:choice/seq whose parent is complexType
all %>% map(function(x){
xsd = read_xml(x);
xml_find_all(xsd, paste0("//xs:complexType", star, "/*", star, "/*", star), ns) }) %>%
shrink()
## complexType/choice/choice/choice
all %>% map(function(x){
xsd = read_xml(x);
xml_find_all(xsd, paste0("//xs:complexType", star, "/*", star, "/*", star, "/*", star), ns) }) %>%
shrink()
|
## This file is designed to be called by "log_number_maker.R".
## It takes about 10 seconds to run.
## Makes R variables 'table_main' and 'table_Delta' (both matrices),
## which are the main and Delta parts of the log table. Also makes R
## variables 'simple_main' and 'simple_Delta' which are the same but
## without the split rows of 'table_main'.
## These four matrices are numeric, not string, so need to be
## processed to (eg) turn integer 86 into "0086" which would appear in
## the finished table.
## I am assuming that we know how log tables are used.
## The documentation of this file includes some overlap with that in
## antilogtable.R, but the two files are sufficiently different to be
## considered separately.
showdebug <- FALSE
log <- function(...){stop("do not use log(), use log10() here")}
func <- log10
tableentry <- function(x,numerical=TRUE){
## Example: x=1.32, we have log10(x)=0.1205739 from R; table entry is
## "1206" (that is, the actual table entry, as it actually appears on
## the table; notionally an integer)
out <- round(func(x)*10000)
if(numerical){
return(out)
} else {
return(noquote(sprintf("%04i",out)))
}
}
tablevalue <- function(x){
## Example: x=1.31, we have log10(x)=0.1172713 from R; table entry
## of "1173" would be interpreted as 0.1173 [that is, the numerical
## equivalent of the table entry as given by tableentry()]
tableentry(x)/10000
}
tablevalue_delta <- function(x, Delta){
## This function evaluates the effect of a suggested Delta value in
## the differences part of the table. The idea is that we will try
## different values of Delta and see which value is the "best", as
## measured by badness(). For example, tablevalue_delta(x=1.31,
## Delta=8) returns the table's value at the point x=1.31, if the
## value of Delta used is 8. The table gives "1173" for x=1.31, and
## if Delta=8 this is 1173+8=1181, which would mean that 0.1181 is
## given by the table.
tablevalue(x) + Delta/10000
}
tableerror <- function(x, third_digit, Delta){
## This function calculates the error induced by using the table for
## a particular value of x and third_digit, when using a particular
## value of Delta. We will try different values of Delta and see
## which one is the "best".
## For example, consider
## tableerror(x=1.32, third_digit=4, Delta=19)
## This means we are considering the log10 of 1.324=0.121888. The
## table would give "1206" with a delta of 19 which would
## correspond to 1206+19=1225, with a numerical value of 0.1225
## So the difference between the true value and the value given by
## the table would be 0.121888-0.1225= -0.000612
true_value <- func(x+third_digit/1000)
table_value <- tablevalue_delta(x,Delta=Delta)
return(table_value-true_value)
}
error <- function(x,third_digit,Delta){
## Returns the error from each of a series of x values: given a
## particular value of Delta, error() returns the difference between
## the true value of log(x) and the value given by the table.
## For example, consider:
## error(x=seq(from=1.3,by=0.01,to=1.34),third_digit=5,Delta=16)
## and compare this with
## error(x=seq(from=1.3,by=0.01,to=1.34),third_digit=5,Delta=17)
## so we can see whether Delta=16 is better or worse than Delta=17
## for the "5" entry of the differences on the "1.3" row of the
## table.
## error(x=seq(from=1.3,by=0.01,to=1.34),third_digit=5,Delta=16)
## [1] -1.1051e-04 -2.5752e-05 -1.5878e-05 1.8734e-05 -2.2284e-05
## > error(x=seq(from=1.3,by=0.01,to=1.34),third_digit=5,Delta=17)
## [1] -1.0511e-05 7.4247e-05 8.4121e-05 1.1873e-04 7.7715e-05
## (above cut-n-pasted but slightly edited for clarity). More
## explicitly, we would have, for Delta=16, the following:
## 1.30 -> 1.305: 1139+16=1155; 0.1155-log10(1.305) = -0.0001105117
## 1.31 -> 1.315: 1173+16=1189; 0.1189-log10(1.315) = -0.0000257528
## 1.32 -> 1.325: 1206+16=1222; 0.1222-log10(1.325) = -0.0000158782
## 1.33 -> 1.335: 1239+16=1255; 0.1255-log10(1.335) = +0.0001873430
## 1.34 -> 1.345: 1271+16=1287; 0.1287-log10(1.345) = -0.0000228434
## See how a *single* Delta value (here we are using 16) gives
## rise to 5 different errors when using a third digit of 5, one
## for each the five cases it would be used. In this case have
## four negative errors and one positive. From the R session
## above, with Delta=17 we have one negative and four positive
## errors.
sapply(x,function(x){tableerror(x, third_digit=third_digit, Delta=Delta)})
}
badness <- function(x,third_digit,Delta,measure){
## As per the comments in error(), any Delta value [we were
## comparing Delta=16 and Delta=17 above, for the '5' entry on the
## '1.3' row] has associated with it 5 or 10 distinct errors, one
## for each column of its row. To choose a particular value of
## Delta, for example to choose whether 16 is preferable to 17, we
## need to summarize *all* the error values associated with the
## different values of Delta. We can do this either by returning
## the maximum absolute error ('max'), the root mean square error
## ('mse'), or the mean absolute deviation ('mad'). Function
## badness() returns either the max, mse, or mad as required.
## Note that these three different summary methods give different
## measures of badness, and this means that the value of Delta
## might differ between max,mse, and mad.
error <- error(x,third_digit,Delta)
switch(measure,
max=max(abs(error)), # max = Maximum error
mse=sqrt(mean(error^2)), # mse = Mean Square Error
mad=mean(abs(error)) # mad = Mean Absolute Deviation
)
}
differences <- function(x,show=FALSE){
## Given a particular value of x, which specifies a row of the
## table, function differences() finds the "best" values to use
## for the entry in the differences section of the table [it tries
## everything from Delta=0 to Delta=40] with respect to the three
## different badness measures above. Here "best" is defined as
## "the value of Delta that minimizes the badness".
## For example, suppose we are wondering what differences to use
## in the 1.05-1.09 (half) line of the log table. The Deltas need
## to be good (ie low badness()) for all five numbers 1.05-1.09:
## R> differences(seq(from=1.05,to=1.09,by=0.001))
## 1 2 3 4 5 6 7 8 9
## max 4 8 12 16 20 25 29 33 37
## mse 4 8 12 16 20 24 28 32 36
## mad 4 8 12 16 20 24 28 32 36
## range 0 0 0 0 0 1 1 1 1
## If we want to use 'max' as a measure of badness, we use the
## first row of the output in the table, which would be
## 4 8 12 16 20 25 29 33 37
## We see that the different badness measures give slightly
## different results, with disagreement of one unit for 6-9.
## Passing show=TRUE gives a little more information:
## R> differences(seq(from=1.05,to=1.09,by=0.001),show=TRUE)
## 1 2 3 4 5 6 7 8 9
## max 4 8 12 16 20 25 29 33 37
## mse 4 8 12 16 20 24 28 32 36
## mad 4 8 12 16 20 24 28 32 36
## range 0 0 0 0 0 1 1 1 1
## max_bad 6 6 7 9 10 11 10 10 10
## mse_bad 3 3 3 4 4 5 5 6 6
## mad_bad 2 3 3 3 3 4 4 4 5
## In the above, the last three lines show the worst (ie highest)
## badness score across the five numbers x <-
## seq(from=1.05,to=1.09,by=0.0001) so we can get some insight
## into how the badnesses are distributed across x
third_digit <- 1:9
max <- sapply(third_digit,function(d){which.min(sapply(0:40,function(Delta){badness(x,d,Delta,'max')}))-1})
mse <- sapply(third_digit,function(d){which.min(sapply(0:40,function(Delta){badness(x,d,Delta,'mse')}))-1})
mad <- sapply(third_digit,function(d){which.min(sapply(0:40,function(Delta){badness(x,d,Delta,'mad')}))-1})
## NB: in the above three lines, "0:40" is the values of Delta that
## we are looking at. NB: the "-1" is because we start at zero
## [i.e. "0:40"], not one [which would be "1:40"]. This is because
## it is possible for the optimal Delta to be zero, and indeed this
## is the case for third_digit=1 if x\geqapprox 8.9
## Take max as an example. 'max' is a vector of 9 entries showing
## the optimal value of Delta for third_digit = 1,2,...,9 [here,
## 'optimal' means 'value of Delta that mimimizes the max() of the
## absolute error values'].
out <- rbind(max,mse,mad)
colnames(out) <- as.character(third_digit)
out <- rbind(out,range=apply(out,2,function(x){max(x)-min(x)}))
jj <- function(x){round(x*1e5)}
if(show){
out <- rbind(out,
max_bad = jj(sapply(third_digit,function(i){badness(x,i,max[i],'max')})),
mse_bad = jj(sapply(third_digit,function(i){badness(x,i,mse[i],'mse')})),
mad_bad = jj(sapply(third_digit,function(i){badness(x,i,mad[i],'mad')}))
)
}
return(out)
}
di <- function(x,l,give=FALSE,norm_choice=1){
## Function di() is a cut-down version of differences() which
## returns a list of length two, the first element of which is the
## main table entry for x, the second is the Delta entries.
## Argument 'l' is the length of the sequence; l=10 for the full
## lines but l=5 for the split entries at the top.
## Argument 'norm_choice' specifies which norm to use in badness().
## It specifies which row of the output of differences() to use, so
## currently norm_choice=1 gives max, 2 gives mse, and 3 gives mad.
## To reproduce the 1.1 line (which is split) of the log table:
## R> di(1.1,5)
## $main_table
## 1.1 1.11 1.12 1.13 1.14
## 414 453 492 531 569
##
## $Delta
## 1 2 3 4 5 6 7 8 9
## 4 8 12 15 19 23 27 31 35
## So the above gives the main body of the table ("0414 0453 0492
## 0531 0569") together with the differences ("4 8 12...35"). The
## norm_choice argument of the di() function specifies which row of
## the output of differences() to use. The rows are max, mse, mad;
## these are defined in badness(). So the default of norm_choice=1
## corresponds to the first row, which is max.
x <- seq(from=x,by=0.01,len=l)
main_table <- tableentry(x)
names(main_table) <- x
if(give){
Delta <- differences(x)
} else {
Delta <- differences(x)[norm_choice,] ## Choose the max() badness measure
}
list(
main_table = main_table,
Delta = Delta
)
}
process_rownames <- function(x){
## Function process_rownames() makes the rownames suitable for
## passing to LaTeX. It turns "1" into "1.0", leaves "1.05" as
## "1.05", leaves "1.3" as "1.3" for use in the split row table
out <- as.character(x)
odd <- round(x*100)%%10 != 0
out[odd] <- " "
out[!odd] <- sprintf("%1.1f",x[!odd])
return(out)
}
if(showdebug){
x <- seq(from=2.4,by=0.01,len=10)
dd <- differences(x)
print(dd)
print(di(1.3,5))
print(di(3.4,10))
}
## Now make the table, variable 'maintable', differences is 'Delta'
xsplit <- seq(from=1.0,to=1.35,by=0.05) # split rows
xfull <- seq(from=1.4,to=9.9,by=0.1) # full rows
table_main <- matrix(NA,length(xsplit),10)
table_Delta <- matrix(NA,0,9) # sic
## First, do the split rows:
for(i in seq_along(xsplit)){
jj <- di(xsplit[i],5)
if(i%%2==1){
indices <- 1:5
} else {
indices <- 6:10
}
table_main[i,indices] <- jj$main_table
table_Delta <- rbind(table_Delta,jj$Delta)
}
## Now the full rows:
for(i in seq_along(xfull)){
jj <- di(xfull[i],10)
table_main <- rbind(table_main,jj$main_table)
table_Delta <- rbind(table_Delta,jj$Delta)
}
rownames(table_main) <- process_rownames(c(xsplit,xfull))
rownames(table_Delta) <- rownames(table_main)
colnames(table_main) <- 0:9
colnames(table_Delta) <- 1:9
## Now make the simple table:
x <- seq(from=1,to=9.9,by=0.1)
simple_main <- matrix(NA,length(x),10)
simple_Delta <- matrix(NA,length(x),9)
## For simple_main, all rows are full:
for(i in seq_along(x)){
jj <- di(x[i],10)
simple_main[i,] <- jj$main_table
simple_Delta[i,] <- jj$Delta
}
rownames(simple_main) <- process_rownames(x)
rownames(simple_Delta) <- rownames(simple_main)
colnames(simple_main) <- 0:9
colnames(simple_Delta) <- 1:9
save(table_main,table_Delta, simple_main,simple_Delta, file="log.Rdata")
|
/logtable.R
|
no_license
|
RobinHankin/tables
|
R
| false | false | 12,533 |
r
|
## This file is designed to be called by "log_number_maker.R".
## It takes about 10 seconds to run.
## Makes R variables 'table_main' and 'table_Delta' (both matrices),
## which are the main and Delta parts of the log table. Also makes R
## variables 'simple_main' and 'simple_Delta' which are the same but
## without the split rows of 'table_main'.
## These four matrices are numeric, not string, so need to be
## processed to (eg) turn integer 86 into "0086" which would appear in
## the finished table.
## I am assuming that we know how log tables are used.
## The documentation of this file includes some overlap with that in
## antilogtable.R, but the two files are sufficiently different to be
## considered separately.
showdebug <- FALSE
log <- function(...){stop("do not use log(), use log10() here")}
func <- log10
tableentry <- function(x,numerical=TRUE){
## Example: x=1.32, we have log10(x)=0.1205739 from R; table entry is
## "1206" (that is, the actual table entry, as it actually appears on
## the table; notionally an integer)
out <- round(func(x)*10000)
if(numerical){
return(out)
} else {
return(noquote(sprintf("%04i",out)))
}
}
tablevalue <- function(x){
## Example: x=1.31, we have log10(x)=0.1172713 from R; table entry
## of "1173" would be interpreted as 0.1173 [that is, the numerical
## equivalent of the table entry as given by tableentry()]
tableentry(x)/10000
}
tablevalue_delta <- function(x, Delta){
## This function evaluates the effect of a suggested Delta value in
## the differences part of the table. The idea is that we will try
## different values of Delta and see which value is the "best", as
## measured by badness(). For example, tablevalue_delta(x=1.31,
## Delta=8) returns the table's value at the point x=1.31, if the
## value of Delta used is 8. The table gives "1173" for x=1.31, and
## if Delta=8 this is 1173+8=1181, which would mean that 0.1181 is
## given by the table.
tablevalue(x) + Delta/10000
}
tableerror <- function(x, third_digit, Delta){
## This function calculates the error induced by using the table for
## a particular value of x and third_digit, when using a particular
## value of Delta. We will try different values of Delta and see
## which one is the "best".
## For example, consider
## tableerror(x=1.32, third_digit=4, Delta=19)
## This means we are considering the log10 of 1.324=0.121888. The
## table would give "1206" with a delta of 19 which would
## correspond to 1206+19=1225, with a numerical value of 0.1225
## So the difference between the true value and the value given by
## the table would be 0.121888-0.1225= -0.000612
true_value <- func(x+third_digit/1000)
table_value <- tablevalue_delta(x,Delta=Delta)
return(table_value-true_value)
}
error <- function(x,third_digit,Delta){
## Returns the error from each of a series of x values: given a
## particular value of Delta, error() returns the difference between
## the true value of log(x) and the value given by the table.
## For example, consider:
## error(x=seq(from=1.3,by=0.01,to=1.34),third_digit=5,Delta=16)
## and compare this with
## error(x=seq(from=1.3,by=0.01,to=1.34),third_digit=5,Delta=17)
## so we can see whether Delta=16 is better or worse than Delta=17
## for the "5" entry of the differences on the "1.3" row of the
## table.
## error(x=seq(from=1.3,by=0.01,to=1.34),third_digit=5,Delta=16)
## [1] -1.1051e-04 -2.5752e-05 -1.5878e-05 1.8734e-05 -2.2284e-05
## > error(x=seq(from=1.3,by=0.01,to=1.34),third_digit=5,Delta=17)
## [1] -1.0511e-05 7.4247e-05 8.4121e-05 1.1873e-04 7.7715e-05
## (above cut-n-pasted but slightly edited for clarity). More
## explicitly, we would have, for Delta=16, the following:
## 1.30 -> 1.305: 1139+16=1155; 0.1155-log10(1.305) = -0.0001105117
## 1.31 -> 1.315: 1173+16=1189; 0.1189-log10(1.315) = -0.0000257528
## 1.32 -> 1.325: 1206+16=1222; 0.1222-log10(1.325) = -0.0000158782
## 1.33 -> 1.335: 1239+16=1255; 0.1255-log10(1.335) = +0.0001873430
## 1.34 -> 1.345: 1271+16=1287; 0.1287-log10(1.345) = -0.0000228434
## See how a *single* Delta value (here we are using 16) gives
## rise to 5 different errors when using a third digit of 5, one
## for each the five cases it would be used. In this case have
## four negative errors and one positive. From the R session
## above, with Delta=17 we have one negative and four positive
## errors.
sapply(x,function(x){tableerror(x, third_digit=third_digit, Delta=Delta)})
}
badness <- function(x,third_digit,Delta,measure){
## As per the comments in error(), any Delta value [we were
## comparing Delta=16 and Delta=17 above, for the '5' entry on the
## '1.3' row] has associated with it 5 or 10 distinct errors, one
## for each column of its row. To choose a particular value of
## Delta, for example to choose whether 16 is preferable to 17, we
## need to summarize *all* the error values associated with the
## different values of Delta. We can do this either by returning
## the maximum absolute error ('max'), the root mean square error
## ('mse'), or the mean absolute deviation ('mad'). Function
## badness() returns either the max, mse, or mad as required.
## Note that these three different summary methods give different
## measures of badness, and this means that the value of Delta
## might differ between max,mse, and mad.
error <- error(x,third_digit,Delta)
switch(measure,
max=max(abs(error)), # max = Maximum error
mse=sqrt(mean(error^2)), # mse = Mean Square Error
mad=mean(abs(error)) # mad = Mean Absolute Deviation
)
}
differences <- function(x,show=FALSE){
## Given a particular value of x, which specifies a row of the
## table, function differences() finds the "best" values to use
## for the entry in the differences section of the table [it tries
## everything from Delta=0 to Delta=40] with respect to the three
## different badness measures above. Here "best" is defined as
## "the value of Delta that minimizes the badness".
## For example, suppose we are wondering what differences to use
## in the 1.05-1.09 (half) line of the log table. The Deltas need
## to be good (ie low badness()) for all five numbers 1.05-1.09:
## R> differences(seq(from=1.05,to=1.09,by=0.001))
## 1 2 3 4 5 6 7 8 9
## max 4 8 12 16 20 25 29 33 37
## mse 4 8 12 16 20 24 28 32 36
## mad 4 8 12 16 20 24 28 32 36
## range 0 0 0 0 0 1 1 1 1
## If we want to use 'max' as a measure of badness, we use the
## first row of the output in the table, which would be
## 4 8 12 16 20 25 29 33 37
## We see that the different badness measures give slightly
## different results, with disagreement of one unit for 6-9.
## Passing show=TRUE gives a little more information:
## R> differences(seq(from=1.05,to=1.09,by=0.001),show=TRUE)
## 1 2 3 4 5 6 7 8 9
## max 4 8 12 16 20 25 29 33 37
## mse 4 8 12 16 20 24 28 32 36
## mad 4 8 12 16 20 24 28 32 36
## range 0 0 0 0 0 1 1 1 1
## max_bad 6 6 7 9 10 11 10 10 10
## mse_bad 3 3 3 4 4 5 5 6 6
## mad_bad 2 3 3 3 3 4 4 4 5
## In the above, the last three lines show the worst (ie highest)
## badness score across the five numbers x <-
## seq(from=1.05,to=1.09,by=0.0001) so we can get some insight
## into how the badnesses are distributed across x
third_digit <- 1:9
max <- sapply(third_digit,function(d){which.min(sapply(0:40,function(Delta){badness(x,d,Delta,'max')}))-1})
mse <- sapply(third_digit,function(d){which.min(sapply(0:40,function(Delta){badness(x,d,Delta,'mse')}))-1})
mad <- sapply(third_digit,function(d){which.min(sapply(0:40,function(Delta){badness(x,d,Delta,'mad')}))-1})
## NB: in the above three lines, "0:40" is the values of Delta that
## we are looking at. NB: the "-1" is because we start at zero
## [i.e. "0:40"], not one [which would be "1:40"]. This is because
## it is possible for the optimal Delta to be zero, and indeed this
## is the case for third_digit=1 if x\geqapprox 8.9
## Take max as an example. 'max' is a vector of 9 entries showing
## the optimal value of Delta for third_digit = 1,2,...,9 [here,
## 'optimal' means 'value of Delta that mimimizes the max() of the
## absolute error values'].
out <- rbind(max,mse,mad)
colnames(out) <- as.character(third_digit)
out <- rbind(out,range=apply(out,2,function(x){max(x)-min(x)}))
jj <- function(x){round(x*1e5)}
if(show){
out <- rbind(out,
max_bad = jj(sapply(third_digit,function(i){badness(x,i,max[i],'max')})),
mse_bad = jj(sapply(third_digit,function(i){badness(x,i,mse[i],'mse')})),
mad_bad = jj(sapply(third_digit,function(i){badness(x,i,mad[i],'mad')}))
)
}
return(out)
}
di <- function(x,l,give=FALSE,norm_choice=1){
## Function di() is a cut-down version of differences() which
## returns a list of length two, the first element of which is the
## main table entry for x, the second is the Delta entries.
## Argument 'l' is the length of the sequence; l=10 for the full
## lines but l=5 for the split entries at the top.
## Argument 'norm_choice' specifies which norm to use in badness().
## It specifies which row of the output of differences() to use, so
## currently norm_choice=1 gives max, 2 gives mse, and 3 gives mad.
## To reproduce the 1.1 line (which is split) of the log table:
## R> di(1.1,5)
## $main_table
## 1.1 1.11 1.12 1.13 1.14
## 414 453 492 531 569
##
## $Delta
## 1 2 3 4 5 6 7 8 9
## 4 8 12 15 19 23 27 31 35
## So the above gives the main body of the table ("0414 0453 0492
## 0531 0569") together with the differences ("4 8 12...35"). The
## norm_choice argument of the di() function specifies which row of
## the output of differences() to use. The rows are max, mse, mad;
## these are defined in badness(). So the default of norm_choice=1
## corresponds to the first row, which is max.
x <- seq(from=x,by=0.01,len=l)
main_table <- tableentry(x)
names(main_table) <- x
if(give){
Delta <- differences(x)
} else {
Delta <- differences(x)[norm_choice,] ## Choose the max() badness measure
}
list(
main_table = main_table,
Delta = Delta
)
}
process_rownames <- function(x){
## Function process_rownames() makes the rownames suitable for
## passing to LaTeX. It turns "1" into "1.0", leaves "1.05" as
## "1.05", leaves "1.3" as "1.3" for use in the split row table
out <- as.character(x)
odd <- round(x*100)%%10 != 0
out[odd] <- " "
out[!odd] <- sprintf("%1.1f",x[!odd])
return(out)
}
if(showdebug){
x <- seq(from=2.4,by=0.01,len=10)
dd <- differences(x)
print(dd)
print(di(1.3,5))
print(di(3.4,10))
}
## Now make the table, variable 'maintable', differences is 'Delta'
xsplit <- seq(from=1.0,to=1.35,by=0.05) # split rows
xfull <- seq(from=1.4,to=9.9,by=0.1) # full rows
table_main <- matrix(NA,length(xsplit),10)
table_Delta <- matrix(NA,0,9) # sic
## First, do the split rows:
for(i in seq_along(xsplit)){
jj <- di(xsplit[i],5)
if(i%%2==1){
indices <- 1:5
} else {
indices <- 6:10
}
table_main[i,indices] <- jj$main_table
table_Delta <- rbind(table_Delta,jj$Delta)
}
## Now the full rows:
for(i in seq_along(xfull)){
jj <- di(xfull[i],10)
table_main <- rbind(table_main,jj$main_table)
table_Delta <- rbind(table_Delta,jj$Delta)
}
rownames(table_main) <- process_rownames(c(xsplit,xfull))
rownames(table_Delta) <- rownames(table_main)
colnames(table_main) <- 0:9
colnames(table_Delta) <- 1:9
## Now make the simple table:
x <- seq(from=1,to=9.9,by=0.1)
simple_main <- matrix(NA,length(x),10)
simple_Delta <- matrix(NA,length(x),9)
## For simple_main, all rows are full:
for(i in seq_along(x)){
jj <- di(x[i],10)
simple_main[i,] <- jj$main_table
simple_Delta[i,] <- jj$Delta
}
rownames(simple_main) <- process_rownames(x)
rownames(simple_Delta) <- rownames(simple_main)
colnames(simple_main) <- 0:9
colnames(simple_Delta) <- 1:9
save(table_main,table_Delta, simple_main,simple_Delta, file="log.Rdata")
|
setClass("PEARSON",
contains = "GeneralTest"
)
setValidity("PEARSON", function(object){
if(object@p.opt == "table")
stop('No "table" option for PEARSON, please use "MC" or "dist".')
})
setMethod("test", signature(object = "PEARSON"), function(object){
p = object@pdata
r = cor(p[[ls(p)]])[1,2]
n = nrow(p[[ls(p)]])
t = r*sqrt((n-2)/(1-r^2))
#dist
if(object@p.opt == "dist"){
pv = 2*min(pt(t, n-2), 1-pt(t, n-2))
}
#MC
else{
sn = object@num.MC
ts = c()
for(i in 1:sn){
if(object@set.seed){set.seed(i)}
sim = cbind(rnorm(n,0,1), rnorm(n,0,1))
r = cor(sim)[1,2]
ts[i] = r*sqrt((n-2)/(1-r^2))
}
NGE = length(which(ts>t))
NLE = sn-NGE
pv = 2*min(NGE/(sn+1), NLE/(sn+1))
}
#BS
if(object@BS.CI != 0){
times = 1000
ts = c()
for(i in 1:times){
if(object@set.seed){set.seed(i)}
index = sample(1:n, n, replace = TRUE)
r = cor(p[[ls(p)]][index,])[1,2]
ts[i] = r*sqrt((n-2)/(1-r^2))
}
CI = getCI(t, ts, object@BS.CI)
return(new("testforDEP_result", TS = t, p_value = pv, CI = CI))
}
else{
return(new("testforDEP_result", TS = t, p_value = pv))
}
})
|
/testforDEP/R/PEARSON.R
|
no_license
|
akhikolla/TestedPackages-NoIssues
|
R
| false | false | 1,259 |
r
|
setClass("PEARSON",
contains = "GeneralTest"
)
setValidity("PEARSON", function(object){
if(object@p.opt == "table")
stop('No "table" option for PEARSON, please use "MC" or "dist".')
})
setMethod("test", signature(object = "PEARSON"), function(object){
p = object@pdata
r = cor(p[[ls(p)]])[1,2]
n = nrow(p[[ls(p)]])
t = r*sqrt((n-2)/(1-r^2))
#dist
if(object@p.opt == "dist"){
pv = 2*min(pt(t, n-2), 1-pt(t, n-2))
}
#MC
else{
sn = object@num.MC
ts = c()
for(i in 1:sn){
if(object@set.seed){set.seed(i)}
sim = cbind(rnorm(n,0,1), rnorm(n,0,1))
r = cor(sim)[1,2]
ts[i] = r*sqrt((n-2)/(1-r^2))
}
NGE = length(which(ts>t))
NLE = sn-NGE
pv = 2*min(NGE/(sn+1), NLE/(sn+1))
}
#BS
if(object@BS.CI != 0){
times = 1000
ts = c()
for(i in 1:times){
if(object@set.seed){set.seed(i)}
index = sample(1:n, n, replace = TRUE)
r = cor(p[[ls(p)]][index,])[1,2]
ts[i] = r*sqrt((n-2)/(1-r^2))
}
CI = getCI(t, ts, object@BS.CI)
return(new("testforDEP_result", TS = t, p_value = pv, CI = CI))
}
else{
return(new("testforDEP_result", TS = t, p_value = pv))
}
})
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tx-scoreFACT_GOG_Ntx11.R
\name{scoreFACTGOG_Ntx11}
\alias{scoreFACTGOG_Ntx11}
\title{Score the FACT/GOG-Ntx-11}
\usage{
scoreFACTGOG_Ntx11(
df,
id = NULL,
AConly = FALSE,
updateItems = FALSE,
keepNvalid = FALSE
)
}
\arguments{
\item{df}{A data frame with the questionnaire items, appropriately-named.}
\item{id}{(optional) The quoted name of a variable in \code{df} with a unique
value for each row of \code{df}. If an \code{id} variable is provided
here, it will also be included with the scale scores in the output data
frame. This can facilitate accurate merging of the scale scores back into
the input \code{df}.}
\item{AConly}{(optional) Logical, if omitted or set to \code{FALSE} (the
default) then the function will expect \code{df} to contain the
FACT-General items as well as the more specific "Additional Concerns" (AC)
items. If \code{TRUE}, then the function will only find the AC items in
\code{df}, and will only score the subscale(s) produced by the AC items.}
\item{updateItems}{(optional) Logical, if \code{TRUE} then updated versions
of the items (i.e., re-coded for score calculation) will be returned in the
output data frame with the scale scores. The default, \code{FALSE}, does
not save any updated versions of the items in the resulting data frame.
Most users will want to omit this argument or, equivalently, set it to
\code{FALSE}.}
\item{keepNvalid}{(optional) Logical, if \code{TRUE} then the output data
frame will have additional variables containing the number of valid,
non-missing responses from each respondent to the items on a given scale
(see Details). If \code{FALSE} (the default), these variables will not be
in the returned data frame. Most users will want to omit this argument
or, equivalently, set it to \code{FALSE}.}
}
\value{
A data frame with the following scale scores is returned:
\itemize{
\item \strong{PWB} - Physical Well-Being subscale
\item \strong{SWB} - Social/Family Well-Being subscale
\item \strong{EWB} - Emotional Well-Being subscale
\item \strong{FWB} - Physical Well-Being subscale
\item \strong{FACTG} - FACT-G Total Score (PWB+SWB+EWB+FWB)
\item \strong{NtxS11} - FACT/GOG-Ntx-11 subscale
\item \strong{FACTGOG_Ntx11_TOTAL} - FACT/GOG-Ntx-11 Total Score (PWB+SWB+EWB+FWB+NtxS11)
\item \strong{FACTGOG_Ntx11_TOI} - FACT/GOG-Ntx-11 Trial Outcome Index (PWB+FWB+NtxS11)
}
If \code{AConly = TRUE}, the only scale score returned is \strong{NtxS11}.
If a variable was given to the \code{id} argument, then that variable will
also be in the returned data frame. Additional, relatively unimportant,
variables will be returned if \code{updateItems = TRUE} or \code{keepNvalid =
TRUE}.
}
\description{
Generates all of the scores of the FACT/GOG-Ntx-11 (version 4) from
item responses.
}
\details{
Given a data frame that includes all of the FACT/GOG-Ntx-11 (Version
4) items as variables, appropriately named, this function generates all of
the FACT/GOG-Ntx-11 scale scores. It is crucial that the item variables in
the supplied data frame are named according to FACT conventions. For
example, the first physical well-being item should be named GP1, the second
GP2, and so on. Please refer to the materials provided by
\url{http://www.facit.org} for the particular questionnaire you are using.
In particular, refer to the left margin of the official questionnaire
(i.e., from facit.org) for the appropriate item variable names.
This questionnaire consists of two components: (1) FACT-G items and (2)
"Additional Concerns" items. The FACT-G items (G for General) measure
general aspects of QoL common to all cancer patients. The "Additional
Concerns" items measure issues relevant for a specific cancer type,
treatment, or symptom. These two questionnaire components are typically
administered together. In some studies, however, ONLY the "Additional
Concerns" items are administered. The \code{AConly} argument is provided to
accommodate such cases, and should be set to \code{AConly = TRUE} if ONLY
the "Additional Concerns" items were administered.
For more details on the \code{updateItems} and \code{keepNvalid} arguments,
see the documentation entry for \code{\link{scoreFACTG}} and
\code{\link{FACTscorer}}.
}
\section{Note}{
Keep in mind that this function (and R in general) is
case-sensitive.
All items in \code{df} should be \code{numeric} (i.e., of type
\code{integer} or \code{double}).
This function expects missing item responses to be coded as \code{NA},
\code{8}, or \code{9}, and valid item responses to be coded as \code{0},
\code{1}, \code{2}, \code{3}, or \code{4}. Any other value for any of the
items will result in an error message and no scores.
}
\examples{
\dontshow{
## FIRST creating a df with fake item data to score
itemNames <- c('Ntx1', 'Ntx2', 'Ntx3', 'Ntx4', 'Ntx5', 'HI12',
'Ntx6', 'Ntx7', 'Ntx8', 'Ntx9', 'An6')
exampleDat <- make_FACTdata(namesAC = itemNames)
## NOW scoring the items in exampleDat
## Returns data frame with ONLY scale scores
(scoredDat <- scoreFACTGOG_Ntx11(exampleDat))
## Using the id argument (makes merging with original data more fool-proof):
(scoredDat <- scoreFACTGOG_Ntx11(exampleDat, id = "ID"))
## Merge back with original data, exampleDat:
mergeDat <- merge(exampleDat, scoredDat, by = "ID")
names(mergeDat)
## If ONLY the "Additional Concerns" items are in df, use AConly = TRUE
(scoredDat <- scoreFACTGOG_Ntx11(exampleDat, AConly = TRUE))
## AConly = TRUE with an id variable
(scoredDat <- scoreFACTGOG_Ntx11(exampleDat, id = "ID", AConly = TRUE))
## Returns scale scores, plus recoded items (updateItems = TRUE)
## Also illustrates effect of setting keepNvalid = TRUE.
scoredDat <- scoreFACTGOG_Ntx11(exampleDat, updateItems = TRUE, keepNvalid = TRUE)
names(scoredDat)
## Descriptives of scored scales
summary(scoredDat[, c('PWB', 'SWB', 'EWB', 'FWB', 'FACTG',
'NtxS11', 'FACTGOG_Ntx11_TOTAL', 'FACTGOG_Ntx11_TOI')])
}
}
\references{
FACT/GOG-Ntx-11 Scoring Guidelines, available at
\url{http://www.facit.org}
}
\seealso{
This function is very similar to the \code{\link{scoreFACT_B}}
function. See the documentation for \code{\link{scoreFACT_B}} for more
details on the arguments and for examples. Also see the documentation
entry for the \code{\link{FACTscorer}} package. For brevity, examples are
omitted below, but can be accessed by running
\code{example(scoreFACTGOG_Ntx11)}.
}
|
/man/scoreFACTGOG_Ntx11.Rd
|
permissive
|
raybaser/FACTscorer
|
R
| false | true | 6,450 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tx-scoreFACT_GOG_Ntx11.R
\name{scoreFACTGOG_Ntx11}
\alias{scoreFACTGOG_Ntx11}
\title{Score the FACT/GOG-Ntx-11}
\usage{
scoreFACTGOG_Ntx11(
df,
id = NULL,
AConly = FALSE,
updateItems = FALSE,
keepNvalid = FALSE
)
}
\arguments{
\item{df}{A data frame with the questionnaire items, appropriately-named.}
\item{id}{(optional) The quoted name of a variable in \code{df} with a unique
value for each row of \code{df}. If an \code{id} variable is provided
here, it will also be included with the scale scores in the output data
frame. This can facilitate accurate merging of the scale scores back into
the input \code{df}.}
\item{AConly}{(optional) Logical, if omitted or set to \code{FALSE} (the
default) then the function will expect \code{df} to contain the
FACT-General items as well as the more specific "Additional Concerns" (AC)
items. If \code{TRUE}, then the function will only find the AC items in
\code{df}, and will only score the subscale(s) produced by the AC items.}
\item{updateItems}{(optional) Logical, if \code{TRUE} then updated versions
of the items (i.e., re-coded for score calculation) will be returned in the
output data frame with the scale scores. The default, \code{FALSE}, does
not save any updated versions of the items in the resulting data frame.
Most users will want to omit this argument or, equivalently, set it to
\code{FALSE}.}
\item{keepNvalid}{(optional) Logical, if \code{TRUE} then the output data
frame will have additional variables containing the number of valid,
non-missing responses from each respondent to the items on a given scale
(see Details). If \code{FALSE} (the default), these variables will not be
in the returned data frame. Most users will want to omit this argument
or, equivalently, set it to \code{FALSE}.}
}
\value{
A data frame with the following scale scores is returned:
\itemize{
\item \strong{PWB} - Physical Well-Being subscale
\item \strong{SWB} - Social/Family Well-Being subscale
\item \strong{EWB} - Emotional Well-Being subscale
\item \strong{FWB} - Physical Well-Being subscale
\item \strong{FACTG} - FACT-G Total Score (PWB+SWB+EWB+FWB)
\item \strong{NtxS11} - FACT/GOG-Ntx-11 subscale
\item \strong{FACTGOG_Ntx11_TOTAL} - FACT/GOG-Ntx-11 Total Score (PWB+SWB+EWB+FWB+NtxS11)
\item \strong{FACTGOG_Ntx11_TOI} - FACT/GOG-Ntx-11 Trial Outcome Index (PWB+FWB+NtxS11)
}
If \code{AConly = TRUE}, the only scale score returned is \strong{NtxS11}.
If a variable was given to the \code{id} argument, then that variable will
also be in the returned data frame. Additional, relatively unimportant,
variables will be returned if \code{updateItems = TRUE} or \code{keepNvalid =
TRUE}.
}
\description{
Generates all of the scores of the FACT/GOG-Ntx-11 (version 4) from
item responses.
}
\details{
Given a data frame that includes all of the FACT/GOG-Ntx-11 (Version
4) items as variables, appropriately named, this function generates all of
the FACT/GOG-Ntx-11 scale scores. It is crucial that the item variables in
the supplied data frame are named according to FACT conventions. For
example, the first physical well-being item should be named GP1, the second
GP2, and so on. Please refer to the materials provided by
\url{http://www.facit.org} for the particular questionnaire you are using.
In particular, refer to the left margin of the official questionnaire
(i.e., from facit.org) for the appropriate item variable names.
This questionnaire consists of two components: (1) FACT-G items and (2)
"Additional Concerns" items. The FACT-G items (G for General) measure
general aspects of QoL common to all cancer patients. The "Additional
Concerns" items measure issues relevant for a specific cancer type,
treatment, or symptom. These two questionnaire components are typically
administered together. In some studies, however, ONLY the "Additional
Concerns" items are administered. The \code{AConly} argument is provided to
accommodate such cases, and should be set to \code{AConly = TRUE} if ONLY
the "Additional Concerns" items were administered.
For more details on the \code{updateItems} and \code{keepNvalid} arguments,
see the documentation entry for \code{\link{scoreFACTG}} and
\code{\link{FACTscorer}}.
}
\section{Note}{
Keep in mind that this function (and R in general) is
case-sensitive.
All items in \code{df} should be \code{numeric} (i.e., of type
\code{integer} or \code{double}).
This function expects missing item responses to be coded as \code{NA},
\code{8}, or \code{9}, and valid item responses to be coded as \code{0},
\code{1}, \code{2}, \code{3}, or \code{4}. Any other value for any of the
items will result in an error message and no scores.
}
\examples{
\dontshow{
## FIRST creating a df with fake item data to score
itemNames <- c('Ntx1', 'Ntx2', 'Ntx3', 'Ntx4', 'Ntx5', 'HI12',
'Ntx6', 'Ntx7', 'Ntx8', 'Ntx9', 'An6')
exampleDat <- make_FACTdata(namesAC = itemNames)
## NOW scoring the items in exampleDat
## Returns data frame with ONLY scale scores
(scoredDat <- scoreFACTGOG_Ntx11(exampleDat))
## Using the id argument (makes merging with original data more fool-proof):
(scoredDat <- scoreFACTGOG_Ntx11(exampleDat, id = "ID"))
## Merge back with original data, exampleDat:
mergeDat <- merge(exampleDat, scoredDat, by = "ID")
names(mergeDat)
## If ONLY the "Additional Concerns" items are in df, use AConly = TRUE
(scoredDat <- scoreFACTGOG_Ntx11(exampleDat, AConly = TRUE))
## AConly = TRUE with an id variable
(scoredDat <- scoreFACTGOG_Ntx11(exampleDat, id = "ID", AConly = TRUE))
## Returns scale scores, plus recoded items (updateItems = TRUE)
## Also illustrates effect of setting keepNvalid = TRUE.
scoredDat <- scoreFACTGOG_Ntx11(exampleDat, updateItems = TRUE, keepNvalid = TRUE)
names(scoredDat)
## Descriptives of scored scales
summary(scoredDat[, c('PWB', 'SWB', 'EWB', 'FWB', 'FACTG',
'NtxS11', 'FACTGOG_Ntx11_TOTAL', 'FACTGOG_Ntx11_TOI')])
}
}
\references{
FACT/GOG-Ntx-11 Scoring Guidelines, available at
\url{http://www.facit.org}
}
\seealso{
This function is very similar to the \code{\link{scoreFACT_B}}
function. See the documentation for \code{\link{scoreFACT_B}} for more
details on the arguments and for examples. Also see the documentation
entry for the \code{\link{FACTscorer}} package. For brevity, examples are
omitted below, but can be accessed by running
\code{example(scoreFACTGOG_Ntx11)}.
}
|
.pkg_env <- new.env()
.pkg_env$python_cmd <- "python"
set_python_cmd <- function(python){
.pkg_env$python_cmd <- python
}
get_python_cmd <- function(){
.pkg_env$python_cmd
}
get_python_version <- function(){
system2(get_python_cmd(), "--version")
}
|
/R/settings.R
|
no_license
|
crazycapivara/imapr
|
R
| false | false | 258 |
r
|
.pkg_env <- new.env()
.pkg_env$python_cmd <- "python"
set_python_cmd <- function(python){
.pkg_env$python_cmd <- python
}
get_python_cmd <- function(){
.pkg_env$python_cmd
}
get_python_version <- function(){
system2(get_python_cmd(), "--version")
}
|
## plot4.R: Across the United States, how have emissions from coal
## combustion-related sources changed from 1999-2008?
# Get data & load R libraries
source(get.data.R)
library(dplyr)
library(ggplot2)
# Initialize png device
png(file = "plot4.png")
# Calculate total coal emissions by year and plot using ggplot2 plotting system
emissions.coal <- emissions %>% filter(grepl("[Cc]oal",EI.Sector)) %>%
group_by(year) %>%
summarise(emissions.total = sum(Emissions))
ggplot(emissions.coal, aes(year, emissions.total)) +
geom_line() +
ggtitle("Total Coal Combustion Emissions in the US (1999-2008)")
# Close png device
dev.off()
|
/plot4.R
|
permissive
|
bbrewington/Coursera-EDA-CourseProject2
|
R
| false | false | 648 |
r
|
## plot4.R: Across the United States, how have emissions from coal
## combustion-related sources changed from 1999-2008?
# Get data & load R libraries
source(get.data.R)
library(dplyr)
library(ggplot2)
# Initialize png device
png(file = "plot4.png")
# Calculate total coal emissions by year and plot using ggplot2 plotting system
emissions.coal <- emissions %>% filter(grepl("[Cc]oal",EI.Sector)) %>%
group_by(year) %>%
summarise(emissions.total = sum(Emissions))
ggplot(emissions.coal, aes(year, emissions.total)) +
geom_line() +
ggtitle("Total Coal Combustion Emissions in the US (1999-2008)")
# Close png device
dev.off()
|
## YSA Stochastic Model of Population Growth
# With changes in immature stage duration of 1:5 years.
# With adult stage duration of 10:6 years.
# With imputed survival rates.
# Density independent.
#### Libraries ----
library(popbio)
library(tidyverse)
library(patchwork)
#### Functions ----
## Matrix model function
source("R/make_projection_matrix.R")
## Stochastic population growth function
source("R/stochastic_proj.R")
#### YSA Data ----
## YSA breeding biology data 2006-2014 from Bonaire
source("R/YSA_life_history_data.R")
# Mean fecundity
fecundity <- c(0, 0, 1.6*total_summary$mean_hatch[1]*total_summary$mean_nestling_surv[1])
# Mean survival (0.73 is from Salinas-Melgoza & Renton 2007, 0.838 is survival from imputation)
survival <- c(0.73, 0.838, 0.838)
# Current population is estimated around 1000 individuals. 1:1 sex ratio means female population is 500
Nc <- 500
# Time to project to
time <- 100
#### YSA Simulated Vital Rates for LSA ----
set.seed(2021)
# Number of simulations
n_sim <- 1000
# Fledgling survival
s1 <- sapply(1:n_sim, function(x) betaval(0.73, 0.2))
# Immature survival
s2 <- sapply(1:n_sim, function(x) betaval(0.838, 0.051))
# Adult survival
s3 <- sapply(1:n_sim, function(x) betaval(0.838, 0.051))
# Fecundity
m3 <- rlnorm(n = n_sim,
log(1.6*total_summary$mean_hatch[1]*total_summary$mean_nestling_surv[1]),
log(1.01)) #replaced sd with small value for log
## Create lists of survival and fecundity
# Survival
survival_df <- data.frame(s1, s2, s3)
colnames(survival_df)<- c()
survival_list <- asplit(survival_df, 1)
# Fecundity
fecundity_df <- data.frame(0, 0, m3)
colnames(fecundity_df)<- c()
fecundity_list <- asplit(fecundity_df, 1)
#### LSA for Immature Duration of 1 and Adult Duration 0f 8 ----
## Stage duration
duration <- c(1, 1, 8)
## Initial Population Vector
# Stable stage distribution of mean matrix
stable_stage <- make_projection_matrix(survival, fecundity, duration) %>%
stable.stage() %>% as.list()
# Initial population vector estimated from stable stage distribution
D1A8_n0 <- c(stable_stage[[1]]*Nc, stable_stage[[2]]*Nc, stable_stage[[3]]*Nc)
### Life-stage Simulation Analysis for Population in Stochastic Environment
## Stage duration list - repeat so that length is the same as survival and fecundity
duration_list <- rep(list(duration), times = n_sim)
## Simulate list of matrices using the vital rates and make_projection_matrix function
D1A8_matrices <- list()
for(i in 1:n_sim){
mpm <- make_projection_matrix(survival_list[[i]],
fecundity_list[[i]],
duration_list[[i]])
D1A8_matrices[[i]] <- mpm
}
head(D1A8_matrices)
## Repeat Stochastic Population Growth
D1A8_stochastic_pop <- list()
for(i in 1:n_sim){
mp <- stochastic_proj(D1A8_matrices, n = D1A8_n0, time = time)
D1A8_stochastic_pop[i] <- mp
}
# Multiply female population sizes by 2 to get total population size
D1A8_total_pop <- lapply(D1A8_stochastic_pop, "*", 2)
# Create for loop for pop sizes in each projection as a data frame to plot with ggplot
D1A8_df_plots <- list()
for(i in 1:n_sim){
mpl <- data.frame(time = 1:time, pop_sizes = D1A8_total_pop[[i]])
D1A8_df_plots[[i]] <- mpl
}
# Add identifier for each simulation
D1A8_plot_data <- bind_rows(D1A8_df_plots, .id = "id")
# Plot projection
D1A8_plot <- ggplot(D1A8_plot_data, aes(time, pop_sizes, fill=id)) +
geom_line() +
theme_classic() +
labs(x = "Time (years)", y = "Total population size")
# Mean population size time series with 95% confidence intervals from LSA
D1A8_mean_plot_data <- D1A8_plot_data %>%
group_by(time) %>%
summarise(mean = mean(pop_sizes),
se_pop_size = sd(pop_sizes)/sqrt(length(pop_sizes)))
# Get predictions and 95% CI
D1A8_plot_pred <- D1A8_mean_plot_data %>% mutate(
pop_size = mean,
# lower limit 95% CI
ll = mean - 1.96 * se_pop_size,
# upper limit 95% CI
ul = mean + 1.96 * se_pop_size
)
# Plot mean population projection with CIs
D1A8_mean_plot <- ggplot(D1A8_plot_pred, aes(x= time, y = mean)) +
geom_line() +
geom_ribbon(data = D1A8_plot_pred, aes(ymin = ll, ymax = ul), alpha = 0.2) +
theme_classic() +
labs(x = "Time (years)", y = "Mean total population size")
#### Calculate final mean population size and standard deviation from LSA
D1A8_pop_sizes <- numeric()
for (i in 1:n_sim) {
ms <- D1A8_total_pop[[i]][time]
D1A8_pop_sizes[i] <- ms
}
#mean pop size
D1A8_pop_mean <- mean(D1A8_pop_sizes)
# standard deviation pop size
D1A8_pop_sd <- sd(D1A8_pop_sizes)
# standard error pop size
D1A8_pop_se <- sd(D1A8_pop_sizes)/sqrt(length(D1A8_pop_sizes))
#### Calculate Stochastic Growth Rate
D1A8_lambda_s <- stoch.growth.rate(D1A8_matrices, prob = NULL, maxt = time,
verbose = TRUE)
#convert from log
D1A8_lambda_s$approx <- exp(D1A8_lambda_s$approx)
D1A8_lambda_s$sim <- exp(D1A8_lambda_s$sim)
D1A8_lambda_s$sim.CI <- exp(D1A8_lambda_s$sim.CI)
#### Calculate Quasi-extinction Probability
D1A8_quasi <- stoch.quasi.ext(D1A8_matrices, n0= D1A8_n0, Nx = 50, tmax = time, maxruns = 1,
nreps = 5000, prob = NULL, sumweight = NULL, verbose = TRUE)
# Plot quasi-extinction probabilities
D1A8_quasi_df <- data.frame(D1A8_quasi, "Year" = 1:time) %>% gather("sim", "quasi", -"Year")
D1A8_quasi_plot <- ggplot(D1A8_quasi_df, aes(x = Year, y = quasi, colour = sim)) +
geom_line() +
theme_bw() +
ylim(0, 1) +
theme(legend.position = "none") +
labs(y = "Cumulative probability of quasi-extinction")
#### Calculate Stochastic Elasticities
D1A8_sens <- stoch.sens(D1A8_matrices, tlimit=time)
D1A8_elas <- D1A8_sens$elasticities
D1A8_elas_v <- c(D1A8_elas[1,1], D1A8_elas[1,2], D1A8_elas[1,3], D1A8_elas[2,1], D1A8_elas[2,2], D1A8_elas[2,3], D1A8_elas[3,1], D1A8_elas[3,2], D1A8_elas[3,3])
stage<-c("m1", "m2", "m3", "s1", "s2", "s3", "g1", "g2", "s3")
D1A8_elas_df <- data.frame(D1A8_elas_v) %>% gather("duration", "elasticity") %>% data.frame(stage)
D1A8_elas_plot <- ggplot(D1A8_elas_df, aes(x = stage, y= elasticity)) +
labs(x = "Vital rate", y = "Stochastic elasticity") +
theme_bw() +
geom_col(fill = "grey20")
#### LSA for Immature Duration of 2 and Adult Duration 0f 7 ----
## Stage duration
duration <- c(1, 2, 7)
## Initial Population Vector
# Stable stage distribution of mean matrix
stable_stage <- make_projection_matrix(survival, fecundity, duration) %>%
stable.stage() %>% as.list()
# Initial population vector estimated from stable stage distribution
D2A7_n0 <- c(stable_stage[[1]]*Nc, stable_stage[[2]]*Nc, stable_stage[[3]]*Nc)
### Life-stage Simulation Analysis for Population in Stochastic Environment
## Stage duration list - repeat so that length is the same as survival and fecundity
duration_list <- rep(list(duration), times = n_sim)
## Simulate list of matrices using the vital rates and make_projection_matrix function
D2A7_matrices <- list()
for(i in 1:n_sim){
mpm <- make_projection_matrix(survival_list[[i]],
fecundity_list[[i]],
duration_list[[i]])
D2A7_matrices[[i]] <- mpm
}
head(D2A7_matrices)
## Repeat Stochastic Population Growth
D2A7_stochastic_pop <- list()
for(i in 1:n_sim){
mp <- stochastic_proj(D2A7_matrices, n = D2A7_n0, time = time)
D2A7_stochastic_pop[i] <- mp
}
# Multiply female population sizes by 2 to get total population size
D2A7_total_pop <- lapply(D2A7_stochastic_pop, "*", 2)
# Create for loop for pop sizes in each projection as a data frame to plot with ggplot
D2A7_df_plots <- list()
for(i in 1:n_sim){
mpl <- data.frame(time = 1:time, pop_sizes = D2A7_total_pop[[i]])
D2A7_df_plots[[i]] <- mpl
}
# Add identifier for each simulation
D2A7_plot_data <- bind_rows(D2A7_df_plots, .id = "id")
# Plot projection
D2A7_plot <- ggplot(D2A7_plot_data, aes(time, pop_sizes, fill=id)) +
geom_line() +
theme_classic() +
labs(x = "Time (years)", y = "Total population size")
# Mean population size time series with 95% confidence intervals from LSA
D2A7_mean_plot_data <- D2A7_plot_data %>%
group_by(time) %>%
summarise(mean = mean(pop_sizes),
se_pop_size = sd(pop_sizes)/sqrt(length(pop_sizes)))
# Get predictions and 95% CI
D2A7_plot_pred <- D2A7_mean_plot_data %>% mutate(
pop_size = mean,
# lower limit 95% CI
ll = mean - 1.96 * se_pop_size,
# upper limit 95% CI
ul = mean + 1.96 * se_pop_size
)
# Plot mean population projection with CIs
D2A7_mean_plot <- ggplot(D2A7_plot_pred, aes(x= time, y = mean)) +
geom_line() +
geom_ribbon(data = D2A7_plot_pred, aes(ymin = ll, ymax = ul), alpha = 0.2) +
theme_classic() +
labs(x = "Time (years)", y = "Mean total population size")
#### Calculate final mean population size and standard deviation from LSA
D2A7_pop_sizes <- numeric()
for (i in 1:n_sim) {
ms <- D2A7_total_pop[[i]][time]
D2A7_pop_sizes[i] <- ms
}
#mean pop size
D2A7_pop_mean <- mean(D2A7_pop_sizes)
# standard deviation pop size
D2A7_pop_sd <- sd(D2A7_pop_sizes)
# standard error pop size
D2A7_pop_se <- sd(D2A7_pop_sizes)/sqrt(length(D2A7_pop_sizes))
#### Calculate Stochastic Growth Rate
D2A7_lambda_s <- stoch.growth.rate(D2A7_matrices, prob = NULL, maxt = time,
verbose = TRUE)
#convert from log
D2A7_lambda_s$approx <- exp(D2A7_lambda_s$approx)
D2A7_lambda_s$sim <- exp(D2A7_lambda_s$sim)
D2A7_lambda_s$sim.CI <- exp(D2A7_lambda_s$sim.CI)
#### Calculate Quasi-extinction Probability
D2A7_quasi <- stoch.quasi.ext(D2A7_matrices, n0= D2A7_n0, Nx = 50, tmax = time, maxruns = 1,
nreps = 5000, prob = NULL, sumweight = NULL, verbose = TRUE)
# Plot quasi-extinction probabilities
D2A7_quasi_df <- data.frame(D2A7_quasi, "Year" = 1:time) %>% gather("sim", "quasi", -"Year")
D2A7_quasi_plot <- ggplot(D2A7_quasi_df, aes(x = Year, y = quasi, colour = sim)) +
geom_line() +
theme_bw() +
ylim(0, 1) +
theme(legend.position = "none") +
labs(y = "Cumulative probability of quasi-extinction")
#### Calculate Stochastic Elasticities
D2A7_sens <- stoch.sens(D2A7_matrices, tlimit=time)
D2A7_elas <- D2A7_sens$elasticities
D2A7_elas_v <- c(D2A7_elas[1,1], D2A7_elas[1,2], D2A7_elas[1,3], D2A7_elas[2,1], D2A7_elas[2,2], D2A7_elas[2,3], D2A7_elas[3,1], D2A7_elas[3,2], D2A7_elas[3,3])
stage<-c("m1", "m2", "m3", "s1", "s2", "s3", "g1", "g2", "s3")
D2A7_elas_df <- data.frame(D2A7_elas_v) %>% gather("duration", "elasticity") %>% data.frame(stage)
D2A7_elas_plot <- ggplot(D2A7_elas_df, aes(x = stage, y= elasticity)) +
labs(x = "Vital rate", y = "Stochastic elasticity") +
theme_bw() +
geom_col(fill = "grey20")
#### LSA for Immature Duration of 3 and Adult Duration of 6 ----
## Stage duration
duration <- c(1, 3, 6)
## Initial Population Vector
# Stable stage distribution of mean matrix
stable_stage <- make_projection_matrix(survival, fecundity, duration) %>%
stable.stage() %>% as.list()
## Initial population vector estimated from stable stage distribution
D3A6_n0 <- c(stable_stage[[1]]*Nc, stable_stage[[2]]*Nc, stable_stage[[3]]*Nc)
### Life-stage Simulation Analysis for Population in Stochastic Environment
## Stage duration list - repeat so that length is the same as survival and fecundity
duration_list <- rep(list(duration), times = n_sim)
## Simulate list of matrices using the vital rates and make_projection_matrix function
D3A6_matrices <- list()
for(i in 1:n_sim){
mpm <- make_projection_matrix(survival_list[[i]],
fecundity_list[[i]],
duration_list[[i]])
D3A6_matrices[[i]] <- mpm
}
## Repeat Stochastic Population Growth
D3A6_stochastic_pop <- list()
for(i in 1:n_sim){
mp <- stochastic_proj(D3A6_matrices, n = D3A6_n0, time = time)
D3A6_stochastic_pop[i] <- mp
}
# Multiply female population sizes by 2 to get total population size
D3A6_total_pop <- lapply(D3A6_stochastic_pop, "*", 2)
# Create for loop for pop sizes in each projection as a data frame to plot with ggplot
D3A6_df_plots <- list()
for(i in 1:n_sim){
mpl <- data.frame(time = 1:time, pop_sizes = D3A6_total_pop[[i]])
D3A6_df_plots[[i]] <- mpl
}
# Add identifier for each simulation
D3A6_plot_data <- bind_rows(D3A6_df_plots, .id = "id")
# Plot projection
D3A6_plot <- ggplot(D3A6_plot_data, aes(time, pop_sizes, fill=id)) +
geom_line() +
theme_classic() +
labs(x = "Time (years)", y = "Total population size")
# Mean population size time series with 95% confidence intervals from LSA
D3A6_mean_plot_data <- D3A6_plot_data %>%
group_by(time) %>%
summarise(mean = mean(pop_sizes),
se_pop_size = sd(pop_sizes)/sqrt(length(pop_sizes)))
# Get predictions and 95% CI
D3A6_plot_pred <- D3A6_mean_plot_data %>% mutate(
pop_size = mean,
# lower limit 95% CI
ll = mean - 1.96 * se_pop_size,
# upper limit 95% CI
ul = mean + 1.96 * se_pop_size
)
# Plot mean population projection with CIs
D3A6_mean_plot <- ggplot(D3A6_plot_pred, aes(x= time, y = mean)) +
geom_line() +
geom_ribbon(data = D3A6_plot_pred, aes(ymin = ll, ymax = ul), alpha = 0.2) +
theme_classic() +
labs(x = "Time (years)", y = "Mean total population size")
#### Calculate final mean population size and standard deviation from LSA
D3A6_pop_sizes <- numeric()
for (i in 1:n_sim) {
ms <- D3A6_total_pop[[i]][time]
D3A6_pop_sizes[i] <- ms
}
# mean pop size
D3A6_pop_mean <- mean(D3A6_pop_sizes)
# standard deviation pop size
D3A6_pop_sd <- sd(D3A6_pop_sizes)
#standard error pop size
D3A6_pop_se <- sd(D3A6_pop_sizes)/sqrt(length(D3A6_pop_sizes))
#### Calculate Stochastic Growth Rate
D3A6_lambda_s <- stoch.growth.rate(D3A6_matrices, prob = NULL, maxt = time,
verbose = TRUE)
#convert from log
D3A6_lambda_s$approx <- exp(D3A6_lambda_s$approx)
D3A6_lambda_s$sim <- exp(D3A6_lambda_s$sim)
D3A6_lambda_s$sim.CI <- exp(D3A6_lambda_s$sim.CI)
#### Calculate Quasi-extinction Probability
D3A6_quasi <- stoch.quasi.ext(D3A6_matrices, n0= D3A6_n0, Nx = 50, tmax = time, maxruns = 1,
nreps = 5000, prob = NULL, sumweight = NULL, verbose = TRUE)
# Plot quasi-extinction probabilities
D3A6_quasi_df <- data.frame(D3A6_quasi, "Year" = 1:time) %>% gather("sim", "quasi", -"Year")
D3A6_quasi_plot <- ggplot(D3A6_quasi_df, aes(x = Year, y = quasi, colour = sim)) +
geom_line() +
theme_bw() +
ylim(0, 1) +
theme(legend.position = "none") +
labs(y = "Cumulative probability of quasi-extinction")
#### Calculate Stochastic Elasticities
D3A6_sens <- stoch.sens(D3A6_matrices, tlimit=time)
D3A6_elas <- D3A6_sens$elasticities
D3A6_elas_v <- c(D3A6_elas[1,1], D3A6_elas[1,2], D3A6_elas[1,3], D3A6_elas[2,1], D3A6_elas[2,2], D3A6_elas[2,3], D3A6_elas[3,1], D3A6_elas[3,2], D3A6_elas[3,3])
stage<-c("m1", "m2", "m3", "s1", "s2", "s3", "g1", "g2", "s3")
D3A6_elas_df <- data.frame(D3A6_elas_v) %>% gather("duration", "elasticity") %>% data.frame(stage)
D3A6_elas_plot <- ggplot(D3A6_elas_df, aes(x = stage, y= elasticity)) +
labs(x = "Vital rate", y = "Stochastic elasticity") +
theme_bw() +
geom_col(fill = "grey20")
#### LSA for Immature Duration of 4 and Adult Duration of 5 ----
## Stage duration
duration <- c(1, 4, 5)
## Initial Population Vector
# Stable stage distribution of mean matrix
stable_stage <- make_projection_matrix(survival, fecundity, duration) %>%
stable.stage() %>% as.list()
# Initial population vector estimated from stable stage distribution
D4A5_n0 <- c(stable_stage[[1]]*Nc, stable_stage[[2]]*Nc, stable_stage[[3]]*Nc)
### Life-stage Simulation Analysis for Population in Stochastic Environment
## Stage duration list - repeat so that length is the same as survival and fecundity
duration_list <- rep(list(duration), times = n_sim)
## Simulate list of matrices using the vital rates and make_projection_matrix function
D4A5_matrices <- list()
for(i in 1:n_sim){
mpm <- make_projection_matrix(survival_list[[i]],
fecundity_list[[i]],
duration_list[[i]])
D4A5_matrices[[i]] <- mpm
}
##Repeat Stochastic Population Growth
D4A5_stochastic_pop <- list()
for(i in 1:n_sim){
mp <- stochastic_proj(D4A5_matrices, n = D4A5_n0, time = time)
D4A5_stochastic_pop[i] <- mp
}
# Multiply female population sizes by 2 to get total population size
D4A5_total_pop <- lapply(D4A5_stochastic_pop, "*", 2)
# Create for loop for pop sizes in each projection as a data frame to plot with ggplot
D4A5_df_plots <- list()
for(i in 1:n_sim){
mpl <- data.frame(time = 1:time, pop_sizes = D4A5_total_pop[[i]])
D4A5_df_plots[[i]] <- mpl
}
# Add identifier for each simulation
D4A5_plot_data <- bind_rows(D4A5_df_plots, .id = "id")
# Plot projection
D4A5_plot <- ggplot(D4A5_plot_data, aes(time, pop_sizes, fill=id)) +
geom_line() +
theme_classic() +
labs(x = "Time (years)", y = "Total population size")
# Mean population size time series with 95% confidence intervals from LSA
D4A5_mean_plot_data <- D4A5_plot_data %>%
group_by(time) %>%
summarise(mean = mean(pop_sizes),
se_pop_size = sd(pop_sizes)/sqrt(length(pop_sizes)))
# Get predictions and 95% CI
D4A5_plot_pred <- D4A5_mean_plot_data %>% mutate(
pop_size = mean,
# lower limit 95% CI
ll = mean - 1.96 * se_pop_size,
# upper limit 95% CI
ul = mean + 1.96 * se_pop_size
)
# Plot mean population projection with CIs
D4A5_mean_plot <- ggplot(D4A5_plot_pred, aes(x= time, y = mean)) +
geom_line() +
geom_ribbon(data = D4A5_plot_pred, aes(ymin = ll, ymax = ul), alpha = 0.2) +
theme_classic() +
labs(x = "Time (years)", y = "Mean total population size")
#### Calculate final mean population size and standard deviation from LSA
D4A5_pop_sizes <- numeric()
for (i in 1:n_sim) {
ms <- D4A5_total_pop[[i]][time]
D4A5_pop_sizes[i] <- ms
}
# mean pop size
D4A5_pop_mean <- mean(D4A5_pop_sizes)
# standard deviation pop size
D4A5_pop_sd <- sd(D4A5_pop_sizes)
# standard error pop size
D4A5_pop_se <- sd(D4A5_pop_sizes)/sqrt(length(D4A5_pop_sizes))
#### Calculate Stochastic Growth Rate
D4A5_lambda_s <- stoch.growth.rate(D4A5_matrices, prob = NULL, maxt = time,
verbose = TRUE)
#convert from log
D4A5_lambda_s$approx <- exp(D4A5_lambda_s$approx)
D4A5_lambda_s$sim <- exp(D4A5_lambda_s$sim)
D4A5_lambda_s$sim.CI <- exp(D4A5_lambda_s$sim.CI)
#### Calculate Quasi-extinction Probability
D4A5_quasi <- stoch.quasi.ext(D4A5_matrices, n0= D4A5_n0, Nx = 50, tmax = time, maxruns = 1,
nreps = 5000, prob = NULL, sumweight = NULL, verbose = TRUE)
# Plot quasi-extinction probabilities
D4A5_quasi_df <- data.frame(D4A5_quasi, "Year" = 1:time) %>% gather("sim", "quasi", -"Year")
D4A5_quasi_plot <- ggplot(D4A5_quasi_df, aes(x = Year, y = quasi, colour = sim)) +
geom_line() +
theme_bw() +
ylim(0, 1) +
theme(legend.position = "none") +
labs(y = "Cumulative probability of quasi-extinction")
#### Calculate Stochastic Elasticities
D4A5_sens <- stoch.sens(D4A5_matrices, tlimit=time)
D4A5_elas <- D4A5_sens$elasticities
D4A5_elas_v <- c(D4A5_elas[1,1], D4A5_elas[1,2], D4A5_elas[1,3], D4A5_elas[2,1], D4A5_elas[2,2], D4A5_elas[2,3], D4A5_elas[3,1], D4A5_elas[3,2], D4A5_elas[3,3])
D4A5_elas_df <- data.frame(D4A5_elas_v) %>% gather("duration", "elasticity") %>% data.frame(stage)
D4A5_elas_plot <- ggplot(D4A5_elas_df, aes(x = stage, y= elasticity)) +
labs(x = "Vital rate", y = "Stochastic elasticity") +
theme_bw() +
geom_col(fill = "grey20")
#### LSA for Immature Duration of 5 and Adult Duration of 4 ----
## Stage duration
duration <- c(1, 5, 4)
## Initial Population Vector
# Stable stage distribution of mean matrix
stable_stage <- make_projection_matrix(survival, fecundity, duration) %>%
stable.stage() %>% as.list()
# Initial population vector estimated from stable stage distribution
D5A4_n0 <- c(stable_stage[[1]]*Nc, stable_stage[[2]]*Nc, stable_stage[[3]]*Nc)
### Life-stage Simulation Analysis for Population in Stochastic Environment
## Stage duration list - repeat so that length is the same as survival and fecundity
duration_list <- rep(list(duration), times = n_sim)
## Simulate list of matrices using the vital rates and make_projection_matrix function
D5A4_matrices <- list()
for(i in 1:n_sim){
mpm <- make_projection_matrix(survival_list[[i]],
fecundity_list[[i]],
duration_list[[i]])
D5A4_matrices[[i]] <- mpm
}
## Repeat Stochastic Population Growth
D5A4_stochastic_pop <- list()
for(i in 1:n_sim){
mp <- stochastic_proj(D5A4_matrices, n = D5A4_n0, time = time)
D5A4_stochastic_pop[i] <- mp
}
# Multiply female population sizes by 2 to get total population size
D5A4_total_pop <- lapply(D5A4_stochastic_pop, "*", 2)
# Create for loop for pop sizes in each projection as a data frame to plot with ggplot
D5A4_df_plots <- list()
for(i in 1:n_sim){
mpl <- data.frame(time = 1:time, pop_sizes = D5A4_total_pop[[i]])
D5A4_df_plots[[i]] <- mpl
}
# Add identifier for each simulation
D5A4_plot_data <- bind_rows(D5A4_df_plots, .id = "id")
# Plot projection
D5A4_plot <- ggplot(D5A4_plot_data, aes(time, pop_sizes, fill=id)) +
geom_line() +
theme_classic() +
labs(x = "Time (years)", y = "Total population size")
# Mean population size time series with 95% confidence intervals from LSA
D5A4_mean_plot_data <- D5A4_plot_data %>%
group_by(time) %>%
summarise(mean = mean(pop_sizes),
se_pop_size = sd(pop_sizes)/sqrt(length(pop_sizes)))
# Get predictions and 95% CI
D5A4_plot_pred <- D5A4_mean_plot_data %>% mutate(
pop_size = mean,
# lower limit 95% CI
ll = mean - 1.96 * se_pop_size,
# upper limit 95% CI
ul = mean + 1.96 * se_pop_size
)
# Plot mean population projection with CIs
D5A4_mean_plot <- ggplot(D5A4_plot_pred, aes(x= time, y = mean)) +
geom_line() +
geom_ribbon(data = D5A4_plot_pred, aes(ymin = ll, ymax = ul), alpha = 0.2) +
theme_classic() +
labs(x = "Time (years)", y = "Mean total population size")
#### Calculate final mean population size and standard deviation from LSA
D5A4_pop_sizes <- numeric()
for (i in 1:n_sim) {
ms <- D5A4_total_pop[[i]][time]
D5A4_pop_sizes[i] <- ms
}
# mean pop size
D5A4_pop_mean <- mean(D5A4_pop_sizes)
# standard deviation pop size
D5A4_pop_sd <- sd(D5A4_pop_sizes)
# standard error pop size
D5A4_pop_se <- sd(D5A4_pop_sizes)/sqrt(length(D5A4_pop_sizes))
#### Calculate Stochastic Growth Rate
D5A4_lambda_s <- stoch.growth.rate(D5A4_matrices, prob = NULL, maxt = time,
verbose = TRUE)
#convert from log
D5A4_lambda_s$approx <- exp(D5A4_lambda_s$approx)
D5A4_lambda_s$sim <- exp(D5A4_lambda_s$sim)
D5A4_lambda_s$sim.CI <- exp(D5A4_lambda_s$sim.CI)
#### Calculate Quasi-extinction Probability
D5A4_quasi <- stoch.quasi.ext(D5A4_matrices, n0= D5A4_n0, Nx = 50, tmax = time, maxruns = 1,
nreps = 5000, prob = NULL, sumweight = NULL, verbose = TRUE)
# Plot quasi-extinction probabilities
D5A4_quasi_df <- data.frame(D5A4_quasi, "Year" = 1:time) %>% gather("sim", "quasi", -"Year")
D5A4_quasi_plot <- ggplot(D5A4_quasi_df, aes(x = Year, y = quasi, colour = sim)) +
geom_line() +
theme_bw() +
ylim(0, 1) +
theme(legend.position = "none") +
labs(y = "Cumulative probability of quasi-extinction")
#### Calculate Stochastic Elasticities
D5A4_sens <- stoch.sens(D5A4_matrices, tlimit=time)
D5A4_elas <- D5A4_sens$elasticities
D5A4_elas_v <- c(D5A4_elas[1,1], D5A4_elas[1,2], D5A4_elas[1,3], D5A4_elas[2,1], D5A4_elas[2,2], D5A4_elas[2,3], D5A4_elas[3,1], D5A4_elas[3,2], D5A4_elas[3,3])
D5A4_elas_df <- data.frame(D5A4_elas_v) %>% gather("duration", "elasticity") %>% data.frame(stage)
D5A4_elas_plot <- ggplot(D5A4_elas_df, aes(x = stage, y= elasticity)) +
labs(x = "Vital rate", y = "Stochastic elasticity") +
theme_bw() +
geom_col(fill = "grey20")
#### PLOTS ----
## Stochastic Population Projection Plot
A10_plot <- D1A8_plot + D2A7_plot + D3A6_plot + D4A5_plot + D5A4_plot
#A8_plot_same_lims <- D1A8_plot + ylim(0, 1.0e+07) + D2A7_plot + ylim(0, 1.0e+07) + D3A6_plot + ylim(0, 1.0e+07) + D4A5_plot + ylim(0, 1.0e+07) + D5A4_plot + ylim(0, 1.0e+07)
## Mean and CI Stochastic Population Plot
A10_mean_plot <- D1A8_mean_plot + D2A7_mean_plot + D3A6_mean_plot + D4A5_mean_plot + D5A4_mean_plot
# Stochastic Population Growth (Lambda s)
A10_lambda_approx <- c(D1A8_lambda_s$approx, D2A7_lambda_s$approx, D3A6_lambda_s$approx, D4A5_lambda_s$approx, D5A4_lambda_s$approx)
A10_lambda_sim <- c(D1A8_lambda_s$sim, D2A7_lambda_s$sim, D3A6_lambda_s$sim, D4A5_lambda_s$sim, D5A4_lambda_s$sim)
A10_lower_CI <- c(D1A8_lambda_s$sim.CI[1], D2A7_lambda_s$sim.CI[1], D3A6_lambda_s$sim.CI[1], D4A5_lambda_s$sim.CI[1], D5A4_lambda_s$sim.CI[1])
A10_upper_CI <- c(D1A8_lambda_s$sim.CI[2], D2A7_lambda_s$sim.CI[2], D3A6_lambda_s$sim.CI[2], D4A5_lambda_s$sim.CI[2], D5A4_lambda_s$sim.CI[2])
stage_duration <- c("1 year", "2 years", "3 years", "4 years", "5 years")
A10_lambda_df <- data.frame(stage_duration, A10_lambda_approx, A10_lambda_sim, A10_upper_CI, A10_lower_CI)
A10_lambda_plot <- ggplot(A10_lambda_df) +
geom_point(aes(x = stage_duration, y = A10_lambda_sim), fill = "grey20", size = 2) +
geom_errorbar(aes(x = stage_duration, ymin = A10_lower_CI, ymax = A10_upper_CI), width = 0.2) +
theme_bw() +
geom_hline(yintercept=1, linetype="dashed", colour = "red") +
scale_x_discrete(labels=c("1 year" = "1", "2 years" = "2", "3 years" = "3", "4 years" = "4", "5 years" = "5")) +
labs(x = "Immature stage duration (years)", y = "Lambda for stochastic population growth")
## Quasi-extinction Threshold Plots
A10_quasi_df<- rbind.data.frame(D1A8_quasi_df, D2A7_quasi_df, D3A6_quasi_df, D4A5_quasi_df, D5A4_quasi_df)
A10_quasi_plot <- ggplot(A10_quasi_df, aes(x = Year, y = quasi, colour = sim)) +
geom_line() +
theme_bw() +
ylim(0, 1) +
labs(y = "Cumulative probability of quasi-extinction") +
scale_colour_discrete(name = "Immature stage \nduration", breaks = c("D1A8_quasi", "D2A7_quasi", "D3A6_quasi", "D4A5_quasi", "D5A4_quasi"), labels = c("1 year", "2 years", "3 years", "4 years", "5 years"))
#A10_quasi_plots <- D1A8_quasi_plot + D2A7_quasi_plot + D3A6_quasi_plot + D4A5_quasi_plot + D5A4_quasi_plot
#Elasticity analysis plots
A10_elas_df<- rbind.data.frame(D1A8_elas_df, D2A7_elas_df, D3A6_elas_df, D4A5_elas_df, D5A4_elas_df)
A10_elas_plot <- ggplot(A10_elas_df, aes(x = stage, y= elasticity, fill = duration)) +
labs(x = "Vital rate", y = "Stochastic elasticity") +
theme_bw() +
geom_col(position = "dodge", colour = "black") +
scale_fill_manual(name = "Immature stage \nduration", breaks = c("D1A8_elas_v", "D2A7_elas_v", "D3A6_elas_v", "D4A5_elas_v", "D5A4_elas_v"), labels = c("1 year", "2 years", "3 years", "4 years", "5 years"), values = c("grey65", "grey40", "grey35", "grey15", "grey0"))
#image2(D1A8_elas)
#image2(D2A7_elas)
#image2(D3A6_elas)
#image2(D4A5_elas)
#image2(D5A4_elas)
|
/R/duration_10.R
|
no_license
|
andbeck/Parrots_2021
|
R
| false | false | 27,152 |
r
|
## YSA Stochastic Model of Population Growth
# With changes in immature stage duration of 1:5 years.
# With adult stage duration of 10:6 years.
# With imputed survival rates.
# Density independent.
#### Libraries ----
library(popbio)
library(tidyverse)
library(patchwork)
#### Functions ----
## Matrix model function
source("R/make_projection_matrix.R")
## Stochastic population growth function
source("R/stochastic_proj.R")
#### YSA Data ----
## YSA breeding biology data 2006-2014 from Bonaire
source("R/YSA_life_history_data.R")
# Mean fecundity
fecundity <- c(0, 0, 1.6*total_summary$mean_hatch[1]*total_summary$mean_nestling_surv[1])
# Mean survival (0.73 is from Salinas-Melgoza & Renton 2007, 0.838 is survival from imputation)
survival <- c(0.73, 0.838, 0.838)
# Current population is estimated around 1000 individuals. 1:1 sex ratio means female population is 500
Nc <- 500
# Time to project to
time <- 100
#### YSA Simulated Vital Rates for LSA ----
set.seed(2021)
# Number of simulations
n_sim <- 1000
# Fledgling survival
s1 <- sapply(1:n_sim, function(x) betaval(0.73, 0.2))
# Immature survival
s2 <- sapply(1:n_sim, function(x) betaval(0.838, 0.051))
# Adult survival
s3 <- sapply(1:n_sim, function(x) betaval(0.838, 0.051))
# Fecundity
m3 <- rlnorm(n = n_sim,
log(1.6*total_summary$mean_hatch[1]*total_summary$mean_nestling_surv[1]),
log(1.01)) #replaced sd with small value for log
## Create lists of survival and fecundity
# Survival
survival_df <- data.frame(s1, s2, s3)
colnames(survival_df)<- c()
survival_list <- asplit(survival_df, 1)
# Fecundity
fecundity_df <- data.frame(0, 0, m3)
colnames(fecundity_df)<- c()
fecundity_list <- asplit(fecundity_df, 1)
#### LSA for Immature Duration of 1 and Adult Duration 0f 8 ----
## Stage duration
duration <- c(1, 1, 8)
## Initial Population Vector
# Stable stage distribution of mean matrix
stable_stage <- make_projection_matrix(survival, fecundity, duration) %>%
stable.stage() %>% as.list()
# Initial population vector estimated from stable stage distribution
D1A8_n0 <- c(stable_stage[[1]]*Nc, stable_stage[[2]]*Nc, stable_stage[[3]]*Nc)
### Life-stage Simulation Analysis for Population in Stochastic Environment
## Stage duration list - repeat so that length is the same as survival and fecundity
duration_list <- rep(list(duration), times = n_sim)
## Simulate list of matrices using the vital rates and make_projection_matrix function
D1A8_matrices <- list()
for(i in 1:n_sim){
mpm <- make_projection_matrix(survival_list[[i]],
fecundity_list[[i]],
duration_list[[i]])
D1A8_matrices[[i]] <- mpm
}
head(D1A8_matrices)
## Repeat Stochastic Population Growth
D1A8_stochastic_pop <- list()
for(i in 1:n_sim){
mp <- stochastic_proj(D1A8_matrices, n = D1A8_n0, time = time)
D1A8_stochastic_pop[i] <- mp
}
# Multiply female population sizes by 2 to get total population size
D1A8_total_pop <- lapply(D1A8_stochastic_pop, "*", 2)
# Create for loop for pop sizes in each projection as a data frame to plot with ggplot
D1A8_df_plots <- list()
for(i in 1:n_sim){
mpl <- data.frame(time = 1:time, pop_sizes = D1A8_total_pop[[i]])
D1A8_df_plots[[i]] <- mpl
}
# Add identifier for each simulation
D1A8_plot_data <- bind_rows(D1A8_df_plots, .id = "id")
# Plot projection
D1A8_plot <- ggplot(D1A8_plot_data, aes(time, pop_sizes, fill=id)) +
geom_line() +
theme_classic() +
labs(x = "Time (years)", y = "Total population size")
# Mean population size time series with 95% confidence intervals from LSA
D1A8_mean_plot_data <- D1A8_plot_data %>%
group_by(time) %>%
summarise(mean = mean(pop_sizes),
se_pop_size = sd(pop_sizes)/sqrt(length(pop_sizes)))
# Get predictions and 95% CI
D1A8_plot_pred <- D1A8_mean_plot_data %>% mutate(
pop_size = mean,
# lower limit 95% CI
ll = mean - 1.96 * se_pop_size,
# upper limit 95% CI
ul = mean + 1.96 * se_pop_size
)
# Plot mean population projection with CIs
D1A8_mean_plot <- ggplot(D1A8_plot_pred, aes(x= time, y = mean)) +
geom_line() +
geom_ribbon(data = D1A8_plot_pred, aes(ymin = ll, ymax = ul), alpha = 0.2) +
theme_classic() +
labs(x = "Time (years)", y = "Mean total population size")
#### Calculate final mean population size and standard deviation from LSA
D1A8_pop_sizes <- numeric()
for (i in 1:n_sim) {
ms <- D1A8_total_pop[[i]][time]
D1A8_pop_sizes[i] <- ms
}
#mean pop size
D1A8_pop_mean <- mean(D1A8_pop_sizes)
# standard deviation pop size
D1A8_pop_sd <- sd(D1A8_pop_sizes)
# standard error pop size
D1A8_pop_se <- sd(D1A8_pop_sizes)/sqrt(length(D1A8_pop_sizes))
#### Calculate Stochastic Growth Rate
D1A8_lambda_s <- stoch.growth.rate(D1A8_matrices, prob = NULL, maxt = time,
verbose = TRUE)
#convert from log
D1A8_lambda_s$approx <- exp(D1A8_lambda_s$approx)
D1A8_lambda_s$sim <- exp(D1A8_lambda_s$sim)
D1A8_lambda_s$sim.CI <- exp(D1A8_lambda_s$sim.CI)
#### Calculate Quasi-extinction Probability
D1A8_quasi <- stoch.quasi.ext(D1A8_matrices, n0= D1A8_n0, Nx = 50, tmax = time, maxruns = 1,
nreps = 5000, prob = NULL, sumweight = NULL, verbose = TRUE)
# Plot quasi-extinction probabilities
D1A8_quasi_df <- data.frame(D1A8_quasi, "Year" = 1:time) %>% gather("sim", "quasi", -"Year")
D1A8_quasi_plot <- ggplot(D1A8_quasi_df, aes(x = Year, y = quasi, colour = sim)) +
geom_line() +
theme_bw() +
ylim(0, 1) +
theme(legend.position = "none") +
labs(y = "Cumulative probability of quasi-extinction")
#### Calculate Stochastic Elasticities
D1A8_sens <- stoch.sens(D1A8_matrices, tlimit=time)
D1A8_elas <- D1A8_sens$elasticities
D1A8_elas_v <- c(D1A8_elas[1,1], D1A8_elas[1,2], D1A8_elas[1,3], D1A8_elas[2,1], D1A8_elas[2,2], D1A8_elas[2,3], D1A8_elas[3,1], D1A8_elas[3,2], D1A8_elas[3,3])
stage<-c("m1", "m2", "m3", "s1", "s2", "s3", "g1", "g2", "s3")
D1A8_elas_df <- data.frame(D1A8_elas_v) %>% gather("duration", "elasticity") %>% data.frame(stage)
D1A8_elas_plot <- ggplot(D1A8_elas_df, aes(x = stage, y= elasticity)) +
labs(x = "Vital rate", y = "Stochastic elasticity") +
theme_bw() +
geom_col(fill = "grey20")
#### LSA for Immature Duration of 2 and Adult Duration 0f 7 ----
## Stage duration
duration <- c(1, 2, 7)
## Initial Population Vector
# Stable stage distribution of mean matrix
stable_stage <- make_projection_matrix(survival, fecundity, duration) %>%
stable.stage() %>% as.list()
# Initial population vector estimated from stable stage distribution
D2A7_n0 <- c(stable_stage[[1]]*Nc, stable_stage[[2]]*Nc, stable_stage[[3]]*Nc)
### Life-stage Simulation Analysis for Population in Stochastic Environment
## Stage duration list - repeat so that length is the same as survival and fecundity
duration_list <- rep(list(duration), times = n_sim)
## Simulate list of matrices using the vital rates and make_projection_matrix function
D2A7_matrices <- list()
for(i in 1:n_sim){
mpm <- make_projection_matrix(survival_list[[i]],
fecundity_list[[i]],
duration_list[[i]])
D2A7_matrices[[i]] <- mpm
}
head(D2A7_matrices)
## Repeat Stochastic Population Growth
D2A7_stochastic_pop <- list()
for(i in 1:n_sim){
mp <- stochastic_proj(D2A7_matrices, n = D2A7_n0, time = time)
D2A7_stochastic_pop[i] <- mp
}
# Multiply female population sizes by 2 to get total population size
D2A7_total_pop <- lapply(D2A7_stochastic_pop, "*", 2)
# Create for loop for pop sizes in each projection as a data frame to plot with ggplot
D2A7_df_plots <- list()
for(i in 1:n_sim){
mpl <- data.frame(time = 1:time, pop_sizes = D2A7_total_pop[[i]])
D2A7_df_plots[[i]] <- mpl
}
# Add identifier for each simulation
D2A7_plot_data <- bind_rows(D2A7_df_plots, .id = "id")
# Plot projection
D2A7_plot <- ggplot(D2A7_plot_data, aes(time, pop_sizes, fill=id)) +
geom_line() +
theme_classic() +
labs(x = "Time (years)", y = "Total population size")
# Mean population size time series with 95% confidence intervals from LSA
D2A7_mean_plot_data <- D2A7_plot_data %>%
group_by(time) %>%
summarise(mean = mean(pop_sizes),
se_pop_size = sd(pop_sizes)/sqrt(length(pop_sizes)))
# Get predictions and 95% CI
D2A7_plot_pred <- D2A7_mean_plot_data %>% mutate(
pop_size = mean,
# lower limit 95% CI
ll = mean - 1.96 * se_pop_size,
# upper limit 95% CI
ul = mean + 1.96 * se_pop_size
)
# Plot mean population projection with CIs
D2A7_mean_plot <- ggplot(D2A7_plot_pred, aes(x= time, y = mean)) +
geom_line() +
geom_ribbon(data = D2A7_plot_pred, aes(ymin = ll, ymax = ul), alpha = 0.2) +
theme_classic() +
labs(x = "Time (years)", y = "Mean total population size")
#### Calculate final mean population size and standard deviation from LSA
D2A7_pop_sizes <- numeric()
for (i in 1:n_sim) {
ms <- D2A7_total_pop[[i]][time]
D2A7_pop_sizes[i] <- ms
}
#mean pop size
D2A7_pop_mean <- mean(D2A7_pop_sizes)
# standard deviation pop size
D2A7_pop_sd <- sd(D2A7_pop_sizes)
# standard error pop size
D2A7_pop_se <- sd(D2A7_pop_sizes)/sqrt(length(D2A7_pop_sizes))
#### Calculate Stochastic Growth Rate
D2A7_lambda_s <- stoch.growth.rate(D2A7_matrices, prob = NULL, maxt = time,
verbose = TRUE)
#convert from log
D2A7_lambda_s$approx <- exp(D2A7_lambda_s$approx)
D2A7_lambda_s$sim <- exp(D2A7_lambda_s$sim)
D2A7_lambda_s$sim.CI <- exp(D2A7_lambda_s$sim.CI)
#### Calculate Quasi-extinction Probability
D2A7_quasi <- stoch.quasi.ext(D2A7_matrices, n0= D2A7_n0, Nx = 50, tmax = time, maxruns = 1,
nreps = 5000, prob = NULL, sumweight = NULL, verbose = TRUE)
# Plot quasi-extinction probabilities
D2A7_quasi_df <- data.frame(D2A7_quasi, "Year" = 1:time) %>% gather("sim", "quasi", -"Year")
D2A7_quasi_plot <- ggplot(D2A7_quasi_df, aes(x = Year, y = quasi, colour = sim)) +
geom_line() +
theme_bw() +
ylim(0, 1) +
theme(legend.position = "none") +
labs(y = "Cumulative probability of quasi-extinction")
#### Calculate Stochastic Elasticities
D2A7_sens <- stoch.sens(D2A7_matrices, tlimit=time)
D2A7_elas <- D2A7_sens$elasticities
D2A7_elas_v <- c(D2A7_elas[1,1], D2A7_elas[1,2], D2A7_elas[1,3], D2A7_elas[2,1], D2A7_elas[2,2], D2A7_elas[2,3], D2A7_elas[3,1], D2A7_elas[3,2], D2A7_elas[3,3])
stage<-c("m1", "m2", "m3", "s1", "s2", "s3", "g1", "g2", "s3")
D2A7_elas_df <- data.frame(D2A7_elas_v) %>% gather("duration", "elasticity") %>% data.frame(stage)
D2A7_elas_plot <- ggplot(D2A7_elas_df, aes(x = stage, y= elasticity)) +
labs(x = "Vital rate", y = "Stochastic elasticity") +
theme_bw() +
geom_col(fill = "grey20")
#### LSA for Immature Duration of 3 and Adult Duration of 6 ----
## Stage duration
duration <- c(1, 3, 6)
## Initial Population Vector
# Stable stage distribution of mean matrix
stable_stage <- make_projection_matrix(survival, fecundity, duration) %>%
stable.stage() %>% as.list()
## Initial population vector estimated from stable stage distribution
D3A6_n0 <- c(stable_stage[[1]]*Nc, stable_stage[[2]]*Nc, stable_stage[[3]]*Nc)
### Life-stage Simulation Analysis for Population in Stochastic Environment
## Stage duration list - repeat so that length is the same as survival and fecundity
duration_list <- rep(list(duration), times = n_sim)
## Simulate list of matrices using the vital rates and make_projection_matrix function
D3A6_matrices <- list()
for(i in 1:n_sim){
mpm <- make_projection_matrix(survival_list[[i]],
fecundity_list[[i]],
duration_list[[i]])
D3A6_matrices[[i]] <- mpm
}
## Repeat Stochastic Population Growth
D3A6_stochastic_pop <- list()
for(i in 1:n_sim){
mp <- stochastic_proj(D3A6_matrices, n = D3A6_n0, time = time)
D3A6_stochastic_pop[i] <- mp
}
# Multiply female population sizes by 2 to get total population size
D3A6_total_pop <- lapply(D3A6_stochastic_pop, "*", 2)
# Create for loop for pop sizes in each projection as a data frame to plot with ggplot
D3A6_df_plots <- list()
for(i in 1:n_sim){
mpl <- data.frame(time = 1:time, pop_sizes = D3A6_total_pop[[i]])
D3A6_df_plots[[i]] <- mpl
}
# Add identifier for each simulation
D3A6_plot_data <- bind_rows(D3A6_df_plots, .id = "id")
# Plot projection
D3A6_plot <- ggplot(D3A6_plot_data, aes(time, pop_sizes, fill=id)) +
geom_line() +
theme_classic() +
labs(x = "Time (years)", y = "Total population size")
# Mean population size time series with 95% confidence intervals from LSA
D3A6_mean_plot_data <- D3A6_plot_data %>%
group_by(time) %>%
summarise(mean = mean(pop_sizes),
se_pop_size = sd(pop_sizes)/sqrt(length(pop_sizes)))
# Get predictions and 95% CI
D3A6_plot_pred <- D3A6_mean_plot_data %>% mutate(
pop_size = mean,
# lower limit 95% CI
ll = mean - 1.96 * se_pop_size,
# upper limit 95% CI
ul = mean + 1.96 * se_pop_size
)
# Plot mean population projection with CIs
D3A6_mean_plot <- ggplot(D3A6_plot_pred, aes(x= time, y = mean)) +
geom_line() +
geom_ribbon(data = D3A6_plot_pred, aes(ymin = ll, ymax = ul), alpha = 0.2) +
theme_classic() +
labs(x = "Time (years)", y = "Mean total population size")
#### Calculate final mean population size and standard deviation from LSA
D3A6_pop_sizes <- numeric()
for (i in 1:n_sim) {
ms <- D3A6_total_pop[[i]][time]
D3A6_pop_sizes[i] <- ms
}
# mean pop size
D3A6_pop_mean <- mean(D3A6_pop_sizes)
# standard deviation pop size
D3A6_pop_sd <- sd(D3A6_pop_sizes)
#standard error pop size
D3A6_pop_se <- sd(D3A6_pop_sizes)/sqrt(length(D3A6_pop_sizes))
#### Calculate Stochastic Growth Rate
D3A6_lambda_s <- stoch.growth.rate(D3A6_matrices, prob = NULL, maxt = time,
verbose = TRUE)
#convert from log
D3A6_lambda_s$approx <- exp(D3A6_lambda_s$approx)
D3A6_lambda_s$sim <- exp(D3A6_lambda_s$sim)
D3A6_lambda_s$sim.CI <- exp(D3A6_lambda_s$sim.CI)
#### Calculate Quasi-extinction Probability
D3A6_quasi <- stoch.quasi.ext(D3A6_matrices, n0= D3A6_n0, Nx = 50, tmax = time, maxruns = 1,
nreps = 5000, prob = NULL, sumweight = NULL, verbose = TRUE)
# Plot quasi-extinction probabilities
D3A6_quasi_df <- data.frame(D3A6_quasi, "Year" = 1:time) %>% gather("sim", "quasi", -"Year")
D3A6_quasi_plot <- ggplot(D3A6_quasi_df, aes(x = Year, y = quasi, colour = sim)) +
geom_line() +
theme_bw() +
ylim(0, 1) +
theme(legend.position = "none") +
labs(y = "Cumulative probability of quasi-extinction")
#### Calculate Stochastic Elasticities
D3A6_sens <- stoch.sens(D3A6_matrices, tlimit=time)
D3A6_elas <- D3A6_sens$elasticities
D3A6_elas_v <- c(D3A6_elas[1,1], D3A6_elas[1,2], D3A6_elas[1,3], D3A6_elas[2,1], D3A6_elas[2,2], D3A6_elas[2,3], D3A6_elas[3,1], D3A6_elas[3,2], D3A6_elas[3,3])
stage<-c("m1", "m2", "m3", "s1", "s2", "s3", "g1", "g2", "s3")
D3A6_elas_df <- data.frame(D3A6_elas_v) %>% gather("duration", "elasticity") %>% data.frame(stage)
D3A6_elas_plot <- ggplot(D3A6_elas_df, aes(x = stage, y= elasticity)) +
labs(x = "Vital rate", y = "Stochastic elasticity") +
theme_bw() +
geom_col(fill = "grey20")
#### LSA for Immature Duration of 4 and Adult Duration of 5 ----
## Stage duration
duration <- c(1, 4, 5)
## Initial Population Vector
# Stable stage distribution of mean matrix
stable_stage <- make_projection_matrix(survival, fecundity, duration) %>%
stable.stage() %>% as.list()
# Initial population vector estimated from stable stage distribution
D4A5_n0 <- c(stable_stage[[1]]*Nc, stable_stage[[2]]*Nc, stable_stage[[3]]*Nc)
### Life-stage Simulation Analysis for Population in Stochastic Environment
## Stage duration list - repeat so that length is the same as survival and fecundity
duration_list <- rep(list(duration), times = n_sim)
## Simulate list of matrices using the vital rates and make_projection_matrix function
D4A5_matrices <- list()
for(i in 1:n_sim){
mpm <- make_projection_matrix(survival_list[[i]],
fecundity_list[[i]],
duration_list[[i]])
D4A5_matrices[[i]] <- mpm
}
##Repeat Stochastic Population Growth
D4A5_stochastic_pop <- list()
for(i in 1:n_sim){
mp <- stochastic_proj(D4A5_matrices, n = D4A5_n0, time = time)
D4A5_stochastic_pop[i] <- mp
}
# Multiply female population sizes by 2 to get total population size
D4A5_total_pop <- lapply(D4A5_stochastic_pop, "*", 2)
# Create for loop for pop sizes in each projection as a data frame to plot with ggplot
D4A5_df_plots <- list()
for(i in 1:n_sim){
mpl <- data.frame(time = 1:time, pop_sizes = D4A5_total_pop[[i]])
D4A5_df_plots[[i]] <- mpl
}
# Add identifier for each simulation
D4A5_plot_data <- bind_rows(D4A5_df_plots, .id = "id")
# Plot projection
D4A5_plot <- ggplot(D4A5_plot_data, aes(time, pop_sizes, fill=id)) +
geom_line() +
theme_classic() +
labs(x = "Time (years)", y = "Total population size")
# Mean population size time series with 95% confidence intervals from LSA
D4A5_mean_plot_data <- D4A5_plot_data %>%
group_by(time) %>%
summarise(mean = mean(pop_sizes),
se_pop_size = sd(pop_sizes)/sqrt(length(pop_sizes)))
# Get predictions and 95% CI
D4A5_plot_pred <- D4A5_mean_plot_data %>% mutate(
pop_size = mean,
# lower limit 95% CI
ll = mean - 1.96 * se_pop_size,
# upper limit 95% CI
ul = mean + 1.96 * se_pop_size
)
# Plot mean population projection with CIs
D4A5_mean_plot <- ggplot(D4A5_plot_pred, aes(x= time, y = mean)) +
geom_line() +
geom_ribbon(data = D4A5_plot_pred, aes(ymin = ll, ymax = ul), alpha = 0.2) +
theme_classic() +
labs(x = "Time (years)", y = "Mean total population size")
#### Calculate final mean population size and standard deviation from LSA
D4A5_pop_sizes <- numeric()
for (i in 1:n_sim) {
ms <- D4A5_total_pop[[i]][time]
D4A5_pop_sizes[i] <- ms
}
# mean pop size
D4A5_pop_mean <- mean(D4A5_pop_sizes)
# standard deviation pop size
D4A5_pop_sd <- sd(D4A5_pop_sizes)
# standard error pop size
D4A5_pop_se <- sd(D4A5_pop_sizes)/sqrt(length(D4A5_pop_sizes))
#### Calculate Stochastic Growth Rate
D4A5_lambda_s <- stoch.growth.rate(D4A5_matrices, prob = NULL, maxt = time,
verbose = TRUE)
#convert from log
D4A5_lambda_s$approx <- exp(D4A5_lambda_s$approx)
D4A5_lambda_s$sim <- exp(D4A5_lambda_s$sim)
D4A5_lambda_s$sim.CI <- exp(D4A5_lambda_s$sim.CI)
#### Calculate Quasi-extinction Probability
D4A5_quasi <- stoch.quasi.ext(D4A5_matrices, n0= D4A5_n0, Nx = 50, tmax = time, maxruns = 1,
nreps = 5000, prob = NULL, sumweight = NULL, verbose = TRUE)
# Plot quasi-extinction probabilities
D4A5_quasi_df <- data.frame(D4A5_quasi, "Year" = 1:time) %>% gather("sim", "quasi", -"Year")
D4A5_quasi_plot <- ggplot(D4A5_quasi_df, aes(x = Year, y = quasi, colour = sim)) +
geom_line() +
theme_bw() +
ylim(0, 1) +
theme(legend.position = "none") +
labs(y = "Cumulative probability of quasi-extinction")
#### Calculate Stochastic Elasticities
D4A5_sens <- stoch.sens(D4A5_matrices, tlimit=time)
D4A5_elas <- D4A5_sens$elasticities
D4A5_elas_v <- c(D4A5_elas[1,1], D4A5_elas[1,2], D4A5_elas[1,3], D4A5_elas[2,1], D4A5_elas[2,2], D4A5_elas[2,3], D4A5_elas[3,1], D4A5_elas[3,2], D4A5_elas[3,3])
D4A5_elas_df <- data.frame(D4A5_elas_v) %>% gather("duration", "elasticity") %>% data.frame(stage)
D4A5_elas_plot <- ggplot(D4A5_elas_df, aes(x = stage, y= elasticity)) +
labs(x = "Vital rate", y = "Stochastic elasticity") +
theme_bw() +
geom_col(fill = "grey20")
#### LSA for Immature Duration of 5 and Adult Duration of 4 ----
## Stage duration
duration <- c(1, 5, 4)
## Initial Population Vector
# Stable stage distribution of mean matrix
stable_stage <- make_projection_matrix(survival, fecundity, duration) %>%
stable.stage() %>% as.list()
# Initial population vector estimated from stable stage distribution
D5A4_n0 <- c(stable_stage[[1]]*Nc, stable_stage[[2]]*Nc, stable_stage[[3]]*Nc)
### Life-stage Simulation Analysis for Population in Stochastic Environment
## Stage duration list - repeat so that length is the same as survival and fecundity
duration_list <- rep(list(duration), times = n_sim)
## Simulate list of matrices using the vital rates and make_projection_matrix function
D5A4_matrices <- list()
for(i in 1:n_sim){
mpm <- make_projection_matrix(survival_list[[i]],
fecundity_list[[i]],
duration_list[[i]])
D5A4_matrices[[i]] <- mpm
}
## Repeat Stochastic Population Growth
D5A4_stochastic_pop <- list()
for(i in 1:n_sim){
mp <- stochastic_proj(D5A4_matrices, n = D5A4_n0, time = time)
D5A4_stochastic_pop[i] <- mp
}
# Multiply female population sizes by 2 to get total population size
D5A4_total_pop <- lapply(D5A4_stochastic_pop, "*", 2)
# Create for loop for pop sizes in each projection as a data frame to plot with ggplot
D5A4_df_plots <- list()
for(i in 1:n_sim){
mpl <- data.frame(time = 1:time, pop_sizes = D5A4_total_pop[[i]])
D5A4_df_plots[[i]] <- mpl
}
# Add identifier for each simulation
D5A4_plot_data <- bind_rows(D5A4_df_plots, .id = "id")
# Plot projection
D5A4_plot <- ggplot(D5A4_plot_data, aes(time, pop_sizes, fill=id)) +
geom_line() +
theme_classic() +
labs(x = "Time (years)", y = "Total population size")
# Mean population size time series with 95% confidence intervals from LSA
D5A4_mean_plot_data <- D5A4_plot_data %>%
group_by(time) %>%
summarise(mean = mean(pop_sizes),
se_pop_size = sd(pop_sizes)/sqrt(length(pop_sizes)))
# Get predictions and 95% CI
D5A4_plot_pred <- D5A4_mean_plot_data %>% mutate(
pop_size = mean,
# lower limit 95% CI
ll = mean - 1.96 * se_pop_size,
# upper limit 95% CI
ul = mean + 1.96 * se_pop_size
)
# Plot mean population projection with CIs
D5A4_mean_plot <- ggplot(D5A4_plot_pred, aes(x= time, y = mean)) +
geom_line() +
geom_ribbon(data = D5A4_plot_pred, aes(ymin = ll, ymax = ul), alpha = 0.2) +
theme_classic() +
labs(x = "Time (years)", y = "Mean total population size")
#### Calculate final mean population size and standard deviation from LSA
D5A4_pop_sizes <- numeric()
for (i in 1:n_sim) {
ms <- D5A4_total_pop[[i]][time]
D5A4_pop_sizes[i] <- ms
}
# mean pop size
D5A4_pop_mean <- mean(D5A4_pop_sizes)
# standard deviation pop size
D5A4_pop_sd <- sd(D5A4_pop_sizes)
# standard error pop size
D5A4_pop_se <- sd(D5A4_pop_sizes)/sqrt(length(D5A4_pop_sizes))
#### Calculate Stochastic Growth Rate
D5A4_lambda_s <- stoch.growth.rate(D5A4_matrices, prob = NULL, maxt = time,
verbose = TRUE)
#convert from log
D5A4_lambda_s$approx <- exp(D5A4_lambda_s$approx)
D5A4_lambda_s$sim <- exp(D5A4_lambda_s$sim)
D5A4_lambda_s$sim.CI <- exp(D5A4_lambda_s$sim.CI)
#### Calculate Quasi-extinction Probability
D5A4_quasi <- stoch.quasi.ext(D5A4_matrices, n0= D5A4_n0, Nx = 50, tmax = time, maxruns = 1,
nreps = 5000, prob = NULL, sumweight = NULL, verbose = TRUE)
# Plot quasi-extinction probabilities
D5A4_quasi_df <- data.frame(D5A4_quasi, "Year" = 1:time) %>% gather("sim", "quasi", -"Year")
D5A4_quasi_plot <- ggplot(D5A4_quasi_df, aes(x = Year, y = quasi, colour = sim)) +
geom_line() +
theme_bw() +
ylim(0, 1) +
theme(legend.position = "none") +
labs(y = "Cumulative probability of quasi-extinction")
#### Calculate Stochastic Elasticities
D5A4_sens <- stoch.sens(D5A4_matrices, tlimit=time)
D5A4_elas <- D5A4_sens$elasticities
D5A4_elas_v <- c(D5A4_elas[1,1], D5A4_elas[1,2], D5A4_elas[1,3], D5A4_elas[2,1], D5A4_elas[2,2], D5A4_elas[2,3], D5A4_elas[3,1], D5A4_elas[3,2], D5A4_elas[3,3])
D5A4_elas_df <- data.frame(D5A4_elas_v) %>% gather("duration", "elasticity") %>% data.frame(stage)
D5A4_elas_plot <- ggplot(D5A4_elas_df, aes(x = stage, y= elasticity)) +
labs(x = "Vital rate", y = "Stochastic elasticity") +
theme_bw() +
geom_col(fill = "grey20")
#### PLOTS ----
## Stochastic Population Projection Plot
A10_plot <- D1A8_plot + D2A7_plot + D3A6_plot + D4A5_plot + D5A4_plot
#A8_plot_same_lims <- D1A8_plot + ylim(0, 1.0e+07) + D2A7_plot + ylim(0, 1.0e+07) + D3A6_plot + ylim(0, 1.0e+07) + D4A5_plot + ylim(0, 1.0e+07) + D5A4_plot + ylim(0, 1.0e+07)
## Mean and CI Stochastic Population Plot
A10_mean_plot <- D1A8_mean_plot + D2A7_mean_plot + D3A6_mean_plot + D4A5_mean_plot + D5A4_mean_plot
# Stochastic Population Growth (Lambda s)
A10_lambda_approx <- c(D1A8_lambda_s$approx, D2A7_lambda_s$approx, D3A6_lambda_s$approx, D4A5_lambda_s$approx, D5A4_lambda_s$approx)
A10_lambda_sim <- c(D1A8_lambda_s$sim, D2A7_lambda_s$sim, D3A6_lambda_s$sim, D4A5_lambda_s$sim, D5A4_lambda_s$sim)
A10_lower_CI <- c(D1A8_lambda_s$sim.CI[1], D2A7_lambda_s$sim.CI[1], D3A6_lambda_s$sim.CI[1], D4A5_lambda_s$sim.CI[1], D5A4_lambda_s$sim.CI[1])
A10_upper_CI <- c(D1A8_lambda_s$sim.CI[2], D2A7_lambda_s$sim.CI[2], D3A6_lambda_s$sim.CI[2], D4A5_lambda_s$sim.CI[2], D5A4_lambda_s$sim.CI[2])
stage_duration <- c("1 year", "2 years", "3 years", "4 years", "5 years")
A10_lambda_df <- data.frame(stage_duration, A10_lambda_approx, A10_lambda_sim, A10_upper_CI, A10_lower_CI)
A10_lambda_plot <- ggplot(A10_lambda_df) +
geom_point(aes(x = stage_duration, y = A10_lambda_sim), fill = "grey20", size = 2) +
geom_errorbar(aes(x = stage_duration, ymin = A10_lower_CI, ymax = A10_upper_CI), width = 0.2) +
theme_bw() +
geom_hline(yintercept=1, linetype="dashed", colour = "red") +
scale_x_discrete(labels=c("1 year" = "1", "2 years" = "2", "3 years" = "3", "4 years" = "4", "5 years" = "5")) +
labs(x = "Immature stage duration (years)", y = "Lambda for stochastic population growth")
## Quasi-extinction Threshold Plots
A10_quasi_df<- rbind.data.frame(D1A8_quasi_df, D2A7_quasi_df, D3A6_quasi_df, D4A5_quasi_df, D5A4_quasi_df)
A10_quasi_plot <- ggplot(A10_quasi_df, aes(x = Year, y = quasi, colour = sim)) +
geom_line() +
theme_bw() +
ylim(0, 1) +
labs(y = "Cumulative probability of quasi-extinction") +
scale_colour_discrete(name = "Immature stage \nduration", breaks = c("D1A8_quasi", "D2A7_quasi", "D3A6_quasi", "D4A5_quasi", "D5A4_quasi"), labels = c("1 year", "2 years", "3 years", "4 years", "5 years"))
#A10_quasi_plots <- D1A8_quasi_plot + D2A7_quasi_plot + D3A6_quasi_plot + D4A5_quasi_plot + D5A4_quasi_plot
#Elasticity analysis plots
A10_elas_df<- rbind.data.frame(D1A8_elas_df, D2A7_elas_df, D3A6_elas_df, D4A5_elas_df, D5A4_elas_df)
A10_elas_plot <- ggplot(A10_elas_df, aes(x = stage, y= elasticity, fill = duration)) +
labs(x = "Vital rate", y = "Stochastic elasticity") +
theme_bw() +
geom_col(position = "dodge", colour = "black") +
scale_fill_manual(name = "Immature stage \nduration", breaks = c("D1A8_elas_v", "D2A7_elas_v", "D3A6_elas_v", "D4A5_elas_v", "D5A4_elas_v"), labels = c("1 year", "2 years", "3 years", "4 years", "5 years"), values = c("grey65", "grey40", "grey35", "grey15", "grey0"))
#image2(D1A8_elas)
#image2(D2A7_elas)
#image2(D3A6_elas)
#image2(D4A5_elas)
#image2(D5A4_elas)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/transition-events.R
\name{transition_evetns}
\alias{transition_evetns}
\title{Transition individual events in and out}
\usage{
transition_evetns(start, end = NULL, range = NULL, enter_length = NULL,
exit_length = NULL)
}
\arguments{
\item{start, end}{The unquoted expression giving the start and end time of
each event. If \code{end}is \code{NULL} the event will be treated as having no duration.}
\item{range}{The range the animation should span. Defaults to the range of
the events from they enter to they have exited.}
\item{enter_length, exit_length}{The unquoted expression giving the length to
be used for enter and exit for each event.}
}
\description{
This transition treats each visual element as an event in time and allows you
to control the duration and enter/exit length individually for each event.
}
\section{Label variables}{
\code{transition_components} makes the following variables available for string
literal interpretation:
\itemize{
\item \strong{frame_time} gives the time that the current frame corresponds to
}
}
\seealso{
Other transitions: \code{\link{transition_components}},
\code{\link{transition_layers}},
\code{\link{transition_manual}},
\code{\link{transition_null}},
\code{\link{transition_states}},
\code{\link{transition_time}}
}
\concept{transitions}
|
/man/transition_evetns.Rd
|
no_license
|
chasemc/gganimate
|
R
| false | true | 1,383 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/transition-events.R
\name{transition_evetns}
\alias{transition_evetns}
\title{Transition individual events in and out}
\usage{
transition_evetns(start, end = NULL, range = NULL, enter_length = NULL,
exit_length = NULL)
}
\arguments{
\item{start, end}{The unquoted expression giving the start and end time of
each event. If \code{end}is \code{NULL} the event will be treated as having no duration.}
\item{range}{The range the animation should span. Defaults to the range of
the events from they enter to they have exited.}
\item{enter_length, exit_length}{The unquoted expression giving the length to
be used for enter and exit for each event.}
}
\description{
This transition treats each visual element as an event in time and allows you
to control the duration and enter/exit length individually for each event.
}
\section{Label variables}{
\code{transition_components} makes the following variables available for string
literal interpretation:
\itemize{
\item \strong{frame_time} gives the time that the current frame corresponds to
}
}
\seealso{
Other transitions: \code{\link{transition_components}},
\code{\link{transition_layers}},
\code{\link{transition_manual}},
\code{\link{transition_null}},
\code{\link{transition_states}},
\code{\link{transition_time}}
}
\concept{transitions}
|
#' Add a sparkline column to a DT datatable
#'
#' @param table The DT
#' @param columns column names or indicies of the data to be used as sparkline columns
#' @param sparklineOpts options passed to sparkline - use spark_options to create
#' @param class_suffix optional suffix (prefixed with spark) - randomly generated if not provided
#' @param width column width
#' @param ... Other options passed to the column definition
#' @return table DT with updated options and dependencies
#' @export
formatSparkline <- function(table, columns, sparklineOpts, class_suffix = '', width = "5%", ...) {
if (class_suffix == '') class_suffix = paste0(sample(letters, 12), collapse = "")
#Get target columns
if (inherits(columns, 'formula')) columns = all.vars(columns)
x = table$x
colnames = base::attr(x, 'colnames', exact = TRUE)
rownames = base::attr(x, 'rownames', exact = TRUE)
#append new column definition
options <- x$options
options <- appendColumnDefs(options, colSparkline(columns, colnames, rownames, class_suffix, ...))
#append new callback
js <- options$fnDrawCallback
options$fnDrawCallback <- appendfnDrawCallback(js, tplSparkline(class_suffix, sparklineOpts))
table$x$options <- options
#add widget dependency
table <- sparkline::spk_add_deps(table)
table
}
#' create a list to be used as a column definition in a DT datatable
#'
#' @param columns column names or indicies
#' @param colnames column names in data set
#' @param rownames row names in data set
#' @param class_suffix class name will be spark + suffix
#' @param width Width of column
#' @param render_js alternative js to render
#' @param ... additional options passed to the column definition
colSparkline <- function(columns, colnames, rownames, class_suffix = '', width = "5%", render_js = NULL, ...) {
i <- name2int(columns, colnames, rownames)
class_name <- paste0("spark",class_suffix)
if (is.null(render_js)) {
render_js <- htmlwidgets::JS("function(data, type, full){ ",
paste0("return '<span class=", class_name, ">' + data + '</span>' }"))
}
list(
targets = i,
width = width,
render = render_js,
...
)
}
#' Create a sparkline template to be used in a drawback in the DT datatable
#'
#' @param class_name will be prefixed with spark
#' @param spark_opts Spark options as javascript (generate with spark_options_)
#' @return single row of javascript to be appended into the fnDrawCallback of the DT
tplSparkline <- function(class_name, spark_opts) {
cl <- paste0("spark", class_name)
sparkline_drawbacks <- paste0(
"$('.", cl, ":not(:has(canvas))').sparkline('html', { ", spark_opts," });"
)
sparkline_drawbacks
}
#' Append a column definition to the column definitions in options
#'
#' @param options list of DT options
#' @param def list representing column definition
#' @return options appended with new definition
appendColumnDefs <- function(options, def) {
defs <- options[['columnDefs']]
if (is.null(defs)) defs <- list()
defs[[length(defs) + 1]] <- def
options$columnDefs <- defs
options
}
#' Append a fnDrawCallback row into the existing js
#'
#' @param js The existing js
#' @param template the js to be inserted into the fdDrawCallback
#' @return js containing the fnDrawCallback with the template inserted
appendfnDrawCallback <- function(js, template) {
js <- if (length(js) == 0) c('function (oSettings, json) {', '}') else {
unlist(strsplit(as.character(js), '\n'))
}
htmlwidgets::JS(append(
js, after = 1,
template
))
}
# turn character/logical indices to numeric indices
name2int <- function(name, names, rownames) {
if (is.numeric(name)) {
i = if (all(name >= 0)) name else seq_along(names)[name]
if (!rownames) i = i - 1
return(i)
}
i = unname(setNames(seq_along(names), names)[name]) - 1
if (any(is.na(i))) stop(
'You specified the columns: ', paste(name, collapse = ', '), ', ',
'but the column names of the data are ', paste(names, collapse = ', ')
)
i
}
|
/R/dt_sparkline.R
|
no_license
|
mrhopko/DTHelper
|
R
| false | false | 4,189 |
r
|
#' Add a sparkline column to a DT datatable
#'
#' @param table The DT
#' @param columns column names or indicies of the data to be used as sparkline columns
#' @param sparklineOpts options passed to sparkline - use spark_options to create
#' @param class_suffix optional suffix (prefixed with spark) - randomly generated if not provided
#' @param width column width
#' @param ... Other options passed to the column definition
#' @return table DT with updated options and dependencies
#' @export
formatSparkline <- function(table, columns, sparklineOpts, class_suffix = '', width = "5%", ...) {
if (class_suffix == '') class_suffix = paste0(sample(letters, 12), collapse = "")
#Get target columns
if (inherits(columns, 'formula')) columns = all.vars(columns)
x = table$x
colnames = base::attr(x, 'colnames', exact = TRUE)
rownames = base::attr(x, 'rownames', exact = TRUE)
#append new column definition
options <- x$options
options <- appendColumnDefs(options, colSparkline(columns, colnames, rownames, class_suffix, ...))
#append new callback
js <- options$fnDrawCallback
options$fnDrawCallback <- appendfnDrawCallback(js, tplSparkline(class_suffix, sparklineOpts))
table$x$options <- options
#add widget dependency
table <- sparkline::spk_add_deps(table)
table
}
#' create a list to be used as a column definition in a DT datatable
#'
#' @param columns column names or indicies
#' @param colnames column names in data set
#' @param rownames row names in data set
#' @param class_suffix class name will be spark + suffix
#' @param width Width of column
#' @param render_js alternative js to render
#' @param ... additional options passed to the column definition
colSparkline <- function(columns, colnames, rownames, class_suffix = '', width = "5%", render_js = NULL, ...) {
i <- name2int(columns, colnames, rownames)
class_name <- paste0("spark",class_suffix)
if (is.null(render_js)) {
render_js <- htmlwidgets::JS("function(data, type, full){ ",
paste0("return '<span class=", class_name, ">' + data + '</span>' }"))
}
list(
targets = i,
width = width,
render = render_js,
...
)
}
#' Create a sparkline template to be used in a drawback in the DT datatable
#'
#' @param class_name will be prefixed with spark
#' @param spark_opts Spark options as javascript (generate with spark_options_)
#' @return single row of javascript to be appended into the fnDrawCallback of the DT
tplSparkline <- function(class_name, spark_opts) {
cl <- paste0("spark", class_name)
sparkline_drawbacks <- paste0(
"$('.", cl, ":not(:has(canvas))').sparkline('html', { ", spark_opts," });"
)
sparkline_drawbacks
}
#' Append a column definition to the column definitions in options
#'
#' @param options list of DT options
#' @param def list representing column definition
#' @return options appended with new definition
appendColumnDefs <- function(options, def) {
defs <- options[['columnDefs']]
if (is.null(defs)) defs <- list()
defs[[length(defs) + 1]] <- def
options$columnDefs <- defs
options
}
#' Append a fnDrawCallback row into the existing js
#'
#' @param js The existing js
#' @param template the js to be inserted into the fdDrawCallback
#' @return js containing the fnDrawCallback with the template inserted
appendfnDrawCallback <- function(js, template) {
js <- if (length(js) == 0) c('function (oSettings, json) {', '}') else {
unlist(strsplit(as.character(js), '\n'))
}
htmlwidgets::JS(append(
js, after = 1,
template
))
}
# turn character/logical indices to numeric indices
name2int <- function(name, names, rownames) {
if (is.numeric(name)) {
i = if (all(name >= 0)) name else seq_along(names)[name]
if (!rownames) i = i - 1
return(i)
}
i = unname(setNames(seq_along(names), names)[name]) - 1
if (any(is.na(i))) stop(
'You specified the columns: ', paste(name, collapse = ', '), ', ',
'but the column names of the data are ', paste(names, collapse = ', ')
)
i
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/format_seconds_as_duration.R
\name{seconds_as_duration}
\alias{seconds_as_duration}
\title{Format Seconds Nicely}
\usage{
seconds_as_duration(x, format = "\%02d:\%02d:\%02d")
}
\arguments{
\item{x}{Vector of seconds (durations). Numeric or coercible to numeric}
\item{format}{for consumption by sprintf}
}
\value{
a vector of characters
}
\description{
Format Seconds Nicely
}
\examples{
seconds_as_duration(c(100,200,3024, 16254))
seconds_as_duration(c(100,200,3024, 16254), format=NULL)
seconds_as_duration(c(100,200,3024, 16254), format='\%02dh\%02dm\%02ds')
}
|
/man/seconds_as_duration.Rd
|
no_license
|
dietrichson/sashaUseful
|
R
| false | true | 643 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/format_seconds_as_duration.R
\name{seconds_as_duration}
\alias{seconds_as_duration}
\title{Format Seconds Nicely}
\usage{
seconds_as_duration(x, format = "\%02d:\%02d:\%02d")
}
\arguments{
\item{x}{Vector of seconds (durations). Numeric or coercible to numeric}
\item{format}{for consumption by sprintf}
}
\value{
a vector of characters
}
\description{
Format Seconds Nicely
}
\examples{
seconds_as_duration(c(100,200,3024, 16254))
seconds_as_duration(c(100,200,3024, 16254), format=NULL)
seconds_as_duration(c(100,200,3024, 16254), format='\%02dh\%02dm\%02ds')
}
|
library(QDComparison)
### Name: eLP.poly
### Title: A function to compute the LP basis functions
### Aliases: eLP.poly
### Keywords: Helper Functions
### ** Examples
x <- c(rep(0,200),rep(1,200))
m <- 6
eLP.poly(x,m)
|
/data/genthat_extracted_code/QDComparison/examples/eLP.poly.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false | false | 223 |
r
|
library(QDComparison)
### Name: eLP.poly
### Title: A function to compute the LP basis functions
### Aliases: eLP.poly
### Keywords: Helper Functions
### ** Examples
x <- c(rep(0,200),rep(1,200))
m <- 6
eLP.poly(x,m)
|
# TODO: Add comment
#
# Author: Administrator
###############################################################################
setup <- function(args='30') {
n<-as.integer(args[1])
if(is.na(n)){ n <- 30 }
return(n)
}
run <- function(n=30)
{
if (n < 2) { 1; }
else {run(n - 1) + run(n - 2);}
}
if (!exists('harness_argc')) {
n <- setup(commandArgs(TRUE))
run(n)
}
|
/scalar/fib/fib_rec.R
|
permissive
|
rbenchmark/benchmarks
|
R
| false | false | 411 |
r
|
# TODO: Add comment
#
# Author: Administrator
###############################################################################
setup <- function(args='30') {
n<-as.integer(args[1])
if(is.na(n)){ n <- 30 }
return(n)
}
run <- function(n=30)
{
if (n < 2) { 1; }
else {run(n - 1) + run(n - 2);}
}
if (!exists('harness_argc')) {
n <- setup(commandArgs(TRUE))
run(n)
}
|
#' Lay out panels in a grid.
#'
#' @param facets a formula with the rows (of the tabular display) on the LHS
#' and the columns (of the tabular display) on the RHS; the dot in the
#' formula is used to indicate there should be no faceting on this dimension
#' (either row or column). The formula can also be provided as a string
#' instead of a classical formula object
#' @param margins either a logical value or a character
#' vector. Margins are additional facets which contain all the data
#' for each of the possible values of the faceting variables. If
#' \code{FALSE}, no additional facets are included (the
#' default). If \code{TRUE}, margins are included for all faceting
#' variables. If specified as a character vector, it is the names of
#' variables for which margins are to be created.
#' @param scales Are scales shared across all facets (the default,
#' \code{"fixed"}), or do they vary across rows (\code{"free_x"}),
#' columns (\code{"free_y"}), or both rows and columns (\code{"free"})
#' @param space If \code{"fixed"}, the default, all panels have the same size.
#' If \code{"free_y"} their height will be proportional to the length of the
#' y scale; if \code{"free_x"} their width will be proportional to the
#' length of the x scale; or if \code{"free"} both height and width will
#' vary. This setting has no effect unless the appropriate scales also vary.
#' @param labeller A function that takes one data frame of labels and
#' returns a list or data frame of character vectors. Each input
#' column corresponds to one factor. Thus there will be more than
#' one with formulae of the type \code{~cyl + am}. Each output
#' column gets displayed as one separate line in the strip
#' label. This function should inherit from the "labeller" S3 class
#' for compatibility with \code{\link{labeller}()}. See
#' \code{\link{label_value}} for more details and pointers to other
#' options.
#' @param as.table If \code{TRUE}, the default, the facets are laid out like
#' a table with highest values at the bottom-right. If \code{FALSE}, the
#' facets are laid out like a plot with the highest value at the top-right.
#' @param switch By default, the labels are displayed on the top and
#' right of the plot. If \code{"x"}, the top labels will be
#' displayed to the bottom. If \code{"y"}, the right-hand side
#' labels will be displayed to the left. Can also be set to
#' \code{"both"}.
#' @param shrink If \code{TRUE}, will shrink scales to fit output of
#' statistics, not raw data. If \code{FALSE}, will be range of raw data
#' before statistical summary.
#' @param drop If \code{TRUE}, the default, all factor levels not used in the
#' data will automatically be dropped. If \code{FALSE}, all factor levels
#' will be shown, regardless of whether or not they appear in the data.
#' @export
#' @examples
#' \donttest{
#' p <- ggplot(mtcars, aes(mpg, wt)) + geom_point()
#' # With one variable
#' p + facet_grid(. ~ cyl)
#' p + facet_grid(cyl ~ .)
#'
#' # With two variables
#' p + facet_grid(vs ~ am)
#' p + facet_grid(am ~ vs)
#' p + facet_grid(vs ~ am, margins=TRUE)
#'
#' # To change plot order of facet grid,
#' # change the order of variable levels with factor()
#'
#' set.seed(6809)
#' diamonds <- diamonds[sample(nrow(diamonds), 1000), ]
#' diamonds$cut <- factor(diamonds$cut,
#' levels = c("Ideal", "Very Good", "Fair", "Good", "Premium"))
#'
#' # Repeat first example with new order
#' p <- ggplot(diamonds, aes(carat, ..density..)) +
#' geom_histogram(binwidth = 1)
#' p + facet_grid(. ~ cut)
#'
#' g <- ggplot(mtcars, aes(mpg, wt)) +
#' geom_point()
#' g + facet_grid(. ~ vs + am)
#' g + facet_grid(vs + am ~ .)
#'
#' # You can also use strings, which makes it a little easier
#' # when writing functions that generate faceting specifications
#'
#' p + facet_grid("cut ~ .")
#'
#' # see also ?plotmatrix for the scatterplot matrix
#'
#' # If there isn't any data for a given combination, that panel
#' # will be empty
#'
#' g + facet_grid(cyl ~ vs)
#'
#' # If you combine a facetted dataset with a dataset that lacks those
#' # facetting variables, the data will be repeated across the missing
#' # combinations:
#'
#' g + facet_grid(vs ~ cyl)
#'
#' df <- data.frame(mpg = 22, wt = 3)
#' g + facet_grid(vs ~ cyl) +
#' geom_point(data = df, colour = "red", size = 2)
#'
#' df2 <- data.frame(mpg = c(19, 22), wt = c(2,4), vs = c(0, 1))
#' g + facet_grid(vs ~ cyl) +
#' geom_point(data = df2, colour = "red", size = 2)
#'
#' df3 <- data.frame(mpg = c(19, 22), wt = c(2,4), vs = c(1, 1))
#' g + facet_grid(vs ~ cyl) +
#' geom_point(data = df3, colour = "red", size = 2)
#'
#'
#' # You can also choose whether the scales should be constant
#' # across all panels (the default), or whether they should be allowed
#' # to vary
#' mt <- ggplot(mtcars, aes(mpg, wt, colour = factor(cyl))) +
#' geom_point()
#'
#' mt + facet_grid(. ~ cyl, scales = "free")
#' # If scales and space are free, then the mapping between position
#' # and values in the data will be the same across all panels
#' mt + facet_grid(. ~ cyl, scales = "free", space = "free")
#'
#' mt + facet_grid(vs ~ am, scales = "free")
#' mt + facet_grid(vs ~ am, scales = "free_x")
#' mt + facet_grid(vs ~ am, scales = "free_y")
#' mt + facet_grid(vs ~ am, scales = "free", space = "free")
#' mt + facet_grid(vs ~ am, scales = "free", space = "free_x")
#' mt + facet_grid(vs ~ am, scales = "free", space = "free_y")
#'
#' # You may need to set your own breaks for consistent display:
#' mt + facet_grid(. ~ cyl, scales = "free_x", space = "free") +
#' scale_x_continuous(breaks = seq(10, 36, by = 2))
#' # Adding scale limits override free scales:
#' last_plot() + xlim(10, 15)
#'
#' # Free scales are particularly useful for categorical variables
#' ggplot(mpg, aes(cty, model)) +
#' geom_point() +
#' facet_grid(manufacturer ~ ., scales = "free", space = "free")
#' # particularly when you reorder factor levels
#' mpg$model <- reorder(mpg$model, mpg$cty)
#' manufacturer <- reorder(mpg$manufacturer, mpg$cty)
#' last_plot() %+% mpg + theme(strip.text.y = element_text())
#'
#' # Use as.table to to control direction of horizontal facets, TRUE by default
#' h <- ggplot(mtcars, aes(x = mpg, y = wt)) +
#' geom_point()
#' h + facet_grid(cyl ~ vs)
#' h + facet_grid(cyl ~ vs, as.table = FALSE)
#'
#' # Use labeller to control facet labels, label_value is default
#' h + facet_grid(cyl ~ vs, labeller = label_both)
#' # Using label_parsed, see ?plotmath for more options
#' mtcars$cyl2 <- factor(mtcars$cyl, labels = c("alpha", "beta", "sqrt(x, y)"))
#' k <- ggplot(mtcars, aes(wt, mpg)) +
#' geom_point()
#' k + facet_grid(. ~ cyl2)
#' k + facet_grid(. ~ cyl2, labeller = label_parsed)
#' # For label_bquote the label value is x.
#' p <- ggplot(mtcars, aes(wt, mpg)) +
#' geom_point()
#' p + facet_grid(. ~ vs, labeller = label_bquote(alpha ^ .(x)))
#' p + facet_grid(. ~ vs, labeller = label_bquote(.(x) ^ .(x)))
#'
#' # Margins can be specified by logically (all yes or all no) or by specific
#' # variables as (character) variable names
#' mg <- ggplot(mtcars, aes(x = mpg, y = wt)) + geom_point()
#' mg + facet_grid(vs + am ~ gear)
#' mg + facet_grid(vs + am ~ gear, margins = TRUE)
#' mg + facet_grid(vs + am ~ gear, margins = "am")
#' # when margins are made over "vs", since the facets for "am" vary
#' # within the values of "vs", the marginal facet for "vs" is also
#' # a margin over "am".
#' mg + facet_grid(vs + am ~ gear, margins = "vs")
#' mg + facet_grid(vs + am ~ gear, margins = "gear")
#' mg + facet_grid(vs + am ~ gear, margins = c("gear", "am"))
#'
#' # The facet strips can be displayed near the axes with switch
#' data <- transform(mtcars,
#' am = factor(am, levels = 0:1, c("Automatic", "Manual")),
#' gear = factor(gear, levels = 3:5, labels = c("Three", "Four", "Five"))
#' )
#' p <- ggplot(data, aes(mpg, disp)) + geom_point()
#' p + facet_grid(am ~ gear, switch = "both") + theme_light()
#'
#' # It may be more aesthetic to use a theme without boxes around
#' # around the strips.
#' p + facet_grid(am ~ gear + vs, switch = "y") + theme_minimal()
#' p + facet_grid(am ~ ., switch = "y") +
#' theme_gray() %+replace% theme(strip.background = element_blank())
#' }
#' @importFrom plyr as.quoted
facet_grid <- function(facets, margins = FALSE, scales = "fixed", space = "fixed", shrink = TRUE, labeller = "label_value", as.table = TRUE, switch = NULL, drop = TRUE) {
scales <- match.arg(scales, c("fixed", "free_x", "free_y", "free"))
free <- list(
x = any(scales %in% c("free_x", "free")),
y = any(scales %in% c("free_y", "free"))
)
space <- match.arg(space, c("fixed", "free_x", "free_y", "free"))
space_free <- list(
x = any(space %in% c("free_x", "free")),
y = any(space %in% c("free_y", "free"))
)
# Facets can either be a formula, a string, or a list of things to be
# convert to quoted
if (is.character(facets)) {
facets <- stats::as.formula(facets)
}
if (is.formula(facets)) {
lhs <- function(x) if (length(x) == 2) NULL else x[-3]
rhs <- function(x) if (length(x) == 2) x else x[-2]
rows <- as.quoted(lhs(facets))
rows <- rows[!sapply(rows, identical, as.name("."))]
cols <- as.quoted(rhs(facets))
cols <- cols[!sapply(cols, identical, as.name("."))]
}
if (is.list(facets)) {
rows <- as.quoted(facets[[1]])
cols <- as.quoted(facets[[2]])
}
if (length(rows) + length(cols) == 0) {
stop("Must specify at least one variable to facet by", call. = FALSE)
}
facet(
rows = rows, cols = cols, margins = margins, shrink = shrink,
free = free, space_free = space_free, labeller = labeller,
as.table = as.table, switch = switch, drop = drop,
subclass = "grid"
)
}
#' @export
facet_train_layout.grid <- function(facet, data) {
layout <- layout_grid(data, facet$rows, facet$cols, facet$margins,
drop = facet$drop, as.table = facet$as.table)
# Relax constraints, if necessary
layout$SCALE_X <- if (facet$free$x) layout$COL else 1L
layout$SCALE_Y <- if (facet$free$y) layout$ROW else 1L
layout
}
#' @export
facet_map_layout.grid <- function(facet, data, layout) {
locate_grid(data, layout, facet$rows, facet$cols, facet$margins)
}
#' @export
facet_render.grid <- function(facet, panel, coord, theme, geom_grobs) {
axes <- facet_axes(facet, panel, coord, theme)
strips <- facet_strips(facet, panel, theme)
panels <- facet_panels(facet, panel, coord, theme, geom_grobs)
# adjust the size of axes to the size of panel
axes$l$heights <- panels$heights
axes$b$widths <- panels$widths
# adjust the size of the strips to the size of the panels
strips$r$heights <- panels$heights
strips$t$widths <- panels$widths
# Check if switch is consistent with grid layout
switch_x <- !is.null(facet$switch) && facet$switch %in% c("both", "x")
switch_y <- !is.null(facet$switch) && facet$switch %in% c("both", "y")
if (switch_x && length(strips$t) == 0) {
facet$switch <- if (facet$switch == "both") "y" else NULL
switch_x <- FALSE
warning("Cannot switch x axis strips as they do not exist", call. = FALSE)
}
if (switch_y && length(strips$r) == 0) {
facet$switch <- if (facet$switch == "both") "x" else NULL
switch_y <- FALSE
warning("Cannot switch y axis strips as they do not exist", call. = FALSE)
}
# Combine components into complete plot
if (is.null(facet$switch)) {
top <- strips$t
top <- gtable_add_cols(top, strips$r$widths)
top <- gtable_add_cols(top, axes$l$widths, pos = 0)
center <- cbind(axes$l, panels, strips$r, z = c(2, 1, 3))
bottom <- axes$b
bottom <- gtable_add_cols(bottom, strips$r$widths)
bottom <- gtable_add_cols(bottom, axes$l$widths, pos = 0)
complete <- rbind(top, center, bottom, z = c(1, 2, 3))
} else {
# Add padding between the switched strips and the axes
padding <- convertUnit(theme$strip.switch.pad.grid, "cm")
if (switch_x) {
t_heights <- c(padding, strips$t$heights)
gt_t <- gtable(widths = strips$t$widths, heights = unit(t_heights, "cm"))
gt_t <- gtable_add_grob(gt_t, strips$t, name = strips$t$name, clip = "off",
t = 1, l = 1, b = -1, r = -1)
}
if (switch_y) {
r_widths <- c(strips$r$widths, padding)
gt_r <- gtable(widths = unit(r_widths, "cm"), heights = strips$r$heights)
gt_r <- gtable_add_grob(gt_r, strips$r, name = strips$r$name, clip = "off",
t = 1, l = 1, b = -1, r = -1)
}
# Combine plot elements according to strip positions
if (switch_x && switch_y) {
center <- cbind(gt_r, axes$l, panels, z = c(3, 2, 1))
bottom <- rbind(axes$b, gt_t)
bottom <- gtable_add_cols(bottom, axes$l$widths, pos = 0)
bottom <- gtable_add_cols(bottom, gt_r$widths, pos = 0)
complete <- rbind(center, bottom, z = c(1, 2))
} else if (switch_x) {
center <- cbind(axes$l, panels, strips$r, z = c(2, 1, 3))
bottom <- rbind(axes$b, gt_t)
bottom <- gtable_add_cols(bottom, strips$r$widths)
bottom <- gtable_add_cols(bottom, axes$l$widths, pos = 0)
complete <- rbind(center, bottom, z = c(1, 2))
} else if (switch_y) {
top <- strips$t
top <- gtable_add_cols(top, axes$l$widths, pos = 0)
top <- gtable_add_cols(top, gt_r$widths, pos = 0)
center <- cbind(gt_r, axes$l, panels, z = c(3, 2, 1))
bottom <- axes$b
bottom <- gtable_add_cols(bottom, axes$l$widths, pos = 0)
bottom <- gtable_add_cols(bottom, gt_r$widths, pos = 0)
complete <- rbind(top, center, bottom, z = c(1, 2, 3))
} else {
stop("`switch` must be either NULL, 'both', 'x', or 'y'",
call. = FALSE)
}
}
complete$respect <- panels$respect
complete$name <- "layout"
bottom <- axes$b
complete
}
#' @export
facet_strips.grid <- function(facet, panel, theme) {
col_vars <- unique(panel$layout[names(facet$cols)])
row_vars <- unique(panel$layout[names(facet$rows)])
# Adding labels metadata, useful for labellers
attr(col_vars, "type") <- "cols"
attr(col_vars, "facet") <- "grid"
attr(row_vars, "type") <- "rows"
attr(row_vars, "facet") <- "grid"
dir <- list(r = "r", t = "t")
if (!is.null(facet$switch) && facet$switch %in% c("both", "x")) {
dir$t <- "b"
}
if (!is.null(facet$switch) && facet$switch %in% c("both", "y")) {
dir$r <- "l"
}
strips <- list(
r = build_strip(panel, row_vars, facet$labeller,
theme, dir$r, switch = facet$switch),
t = build_strip(panel, col_vars, facet$labeller,
theme, dir$t, switch = facet$switch)
)
Map(function(strip, side) {
if (side %in% c("t", "b")) {
gtable_add_col_space(strip, theme$panel.margin.x %||% theme$panel.margin)
} else {
gtable_add_row_space(strip, theme$panel.margin.y %||% theme$panel.margin)
}
}, strips, dir)
}
#' @export
facet_axes.grid <- function(facet, panel, coord, theme) {
axes <- list()
# Horizontal axes
cols <- which(panel$layout$ROW == 1)
grobs <- lapply(panel$ranges[cols], coord$render_axis_h, theme = theme)
axes$b <- gtable_add_col_space(gtable_row("axis-b", grobs),
theme$panel.margin.x %||% theme$panel.margin)
# Vertical axes
rows <- which(panel$layout$COL == 1)
grobs <- lapply(panel$ranges[rows], coord$render_axis_v, theme = theme)
axes$l <- gtable_add_row_space(gtable_col("axis-l", grobs),
theme$panel.margin.y %||% theme$panel.margin)
axes
}
#' @export
facet_panels.grid <- function(facet, panel, coord, theme, geom_grobs) {
# If user hasn't set aspect ratio, and we have fixed scales, then
# ask the coordinate system if it wants to specify one
aspect_ratio <- theme$aspect.ratio
if (is.null(aspect_ratio) && !facet$free$x && !facet$free$y) {
aspect_ratio <- coord$aspect(panel$ranges[[1]])
}
if (is.null(aspect_ratio)) {
aspect_ratio <- 1
respect <- FALSE
} else {
respect <- TRUE
}
# Add background and foreground to panels
panels <- panel$layout$PANEL
ncol <- max(panel$layout$COL)
nrow <- max(panel$layout$ROW)
panel_grobs <- lapply(panels, function(i) {
fg <- coord$render_fg(panel$ranges[[i]], theme)
bg <- coord$render_bg(panel$ranges[[i]], theme)
geom_grobs <- lapply(geom_grobs, `[[`, i)
if (theme$panel.ontop) {
panel_grobs <- c(geom_grobs, list(bg), list(fg))
} else {
panel_grobs <- c(list(bg), geom_grobs, list(fg))
}
gTree(children = do.call("gList", panel_grobs))
})
panel_matrix <- matrix(panel_grobs, nrow = nrow, ncol = ncol, byrow = TRUE)
# @kohske
# Now size of each panel is calculated using PANEL$ranges, which is given by
# coord_train called by train_range.
# So here, "scale" need not to be referred.
#
# In general, panel has all information for building facet.
if (facet$space_free$x) {
ps <- panel$layout$PANEL[panel$layout$ROW == 1]
widths <- vapply(ps, function(i) diff(panel$ranges[[i]]$x.range), numeric(1))
panel_widths <- unit(widths, "null")
} else {
panel_widths <- rep(unit(1, "null"), ncol)
}
if (facet$space_free$y) {
ps <- panel$layout$PANEL[panel$layout$COL == 1]
heights <- vapply(ps, function(i) diff(panel$ranges[[i]]$y.range), numeric(1))
panel_heights <- unit(heights, "null")
} else {
panel_heights <- rep(unit(1 * aspect_ratio, "null"), nrow)
}
panels <- gtable_matrix("panel", panel_matrix,
panel_widths, panel_heights, respect = respect)
panels <- gtable_add_col_space(panels, theme$panel.margin.x %||% theme$panel.margin)
panels <- gtable_add_row_space(panels, theme$panel.margin.y %||% theme$panel.margin)
panels
}
#' @export
facet_vars.grid <- function(facet) {
paste(lapply(list(facet$rows, facet$cols), paste, collapse = ", "),
collapse = " ~ ")
}
|
/R/facet-grid-.r
|
no_license
|
tchaithonov/ggplot2
|
R
| false | false | 17,831 |
r
|
#' Lay out panels in a grid.
#'
#' @param facets a formula with the rows (of the tabular display) on the LHS
#' and the columns (of the tabular display) on the RHS; the dot in the
#' formula is used to indicate there should be no faceting on this dimension
#' (either row or column). The formula can also be provided as a string
#' instead of a classical formula object
#' @param margins either a logical value or a character
#' vector. Margins are additional facets which contain all the data
#' for each of the possible values of the faceting variables. If
#' \code{FALSE}, no additional facets are included (the
#' default). If \code{TRUE}, margins are included for all faceting
#' variables. If specified as a character vector, it is the names of
#' variables for which margins are to be created.
#' @param scales Are scales shared across all facets (the default,
#' \code{"fixed"}), or do they vary across rows (\code{"free_x"}),
#' columns (\code{"free_y"}), or both rows and columns (\code{"free"})
#' @param space If \code{"fixed"}, the default, all panels have the same size.
#' If \code{"free_y"} their height will be proportional to the length of the
#' y scale; if \code{"free_x"} their width will be proportional to the
#' length of the x scale; or if \code{"free"} both height and width will
#' vary. This setting has no effect unless the appropriate scales also vary.
#' @param labeller A function that takes one data frame of labels and
#' returns a list or data frame of character vectors. Each input
#' column corresponds to one factor. Thus there will be more than
#' one with formulae of the type \code{~cyl + am}. Each output
#' column gets displayed as one separate line in the strip
#' label. This function should inherit from the "labeller" S3 class
#' for compatibility with \code{\link{labeller}()}. See
#' \code{\link{label_value}} for more details and pointers to other
#' options.
#' @param as.table If \code{TRUE}, the default, the facets are laid out like
#' a table with highest values at the bottom-right. If \code{FALSE}, the
#' facets are laid out like a plot with the highest value at the top-right.
#' @param switch By default, the labels are displayed on the top and
#' right of the plot. If \code{"x"}, the top labels will be
#' displayed to the bottom. If \code{"y"}, the right-hand side
#' labels will be displayed to the left. Can also be set to
#' \code{"both"}.
#' @param shrink If \code{TRUE}, will shrink scales to fit output of
#' statistics, not raw data. If \code{FALSE}, will be range of raw data
#' before statistical summary.
#' @param drop If \code{TRUE}, the default, all factor levels not used in the
#' data will automatically be dropped. If \code{FALSE}, all factor levels
#' will be shown, regardless of whether or not they appear in the data.
#' @export
#' @examples
#' \donttest{
#' p <- ggplot(mtcars, aes(mpg, wt)) + geom_point()
#' # With one variable
#' p + facet_grid(. ~ cyl)
#' p + facet_grid(cyl ~ .)
#'
#' # With two variables
#' p + facet_grid(vs ~ am)
#' p + facet_grid(am ~ vs)
#' p + facet_grid(vs ~ am, margins=TRUE)
#'
#' # To change plot order of facet grid,
#' # change the order of variable levels with factor()
#'
#' set.seed(6809)
#' diamonds <- diamonds[sample(nrow(diamonds), 1000), ]
#' diamonds$cut <- factor(diamonds$cut,
#' levels = c("Ideal", "Very Good", "Fair", "Good", "Premium"))
#'
#' # Repeat first example with new order
#' p <- ggplot(diamonds, aes(carat, ..density..)) +
#' geom_histogram(binwidth = 1)
#' p + facet_grid(. ~ cut)
#'
#' g <- ggplot(mtcars, aes(mpg, wt)) +
#' geom_point()
#' g + facet_grid(. ~ vs + am)
#' g + facet_grid(vs + am ~ .)
#'
#' # You can also use strings, which makes it a little easier
#' # when writing functions that generate faceting specifications
#'
#' p + facet_grid("cut ~ .")
#'
#' # see also ?plotmatrix for the scatterplot matrix
#'
#' # If there isn't any data for a given combination, that panel
#' # will be empty
#'
#' g + facet_grid(cyl ~ vs)
#'
#' # If you combine a facetted dataset with a dataset that lacks those
#' # facetting variables, the data will be repeated across the missing
#' # combinations:
#'
#' g + facet_grid(vs ~ cyl)
#'
#' df <- data.frame(mpg = 22, wt = 3)
#' g + facet_grid(vs ~ cyl) +
#' geom_point(data = df, colour = "red", size = 2)
#'
#' df2 <- data.frame(mpg = c(19, 22), wt = c(2,4), vs = c(0, 1))
#' g + facet_grid(vs ~ cyl) +
#' geom_point(data = df2, colour = "red", size = 2)
#'
#' df3 <- data.frame(mpg = c(19, 22), wt = c(2,4), vs = c(1, 1))
#' g + facet_grid(vs ~ cyl) +
#' geom_point(data = df3, colour = "red", size = 2)
#'
#'
#' # You can also choose whether the scales should be constant
#' # across all panels (the default), or whether they should be allowed
#' # to vary
#' mt <- ggplot(mtcars, aes(mpg, wt, colour = factor(cyl))) +
#' geom_point()
#'
#' mt + facet_grid(. ~ cyl, scales = "free")
#' # If scales and space are free, then the mapping between position
#' # and values in the data will be the same across all panels
#' mt + facet_grid(. ~ cyl, scales = "free", space = "free")
#'
#' mt + facet_grid(vs ~ am, scales = "free")
#' mt + facet_grid(vs ~ am, scales = "free_x")
#' mt + facet_grid(vs ~ am, scales = "free_y")
#' mt + facet_grid(vs ~ am, scales = "free", space = "free")
#' mt + facet_grid(vs ~ am, scales = "free", space = "free_x")
#' mt + facet_grid(vs ~ am, scales = "free", space = "free_y")
#'
#' # You may need to set your own breaks for consistent display:
#' mt + facet_grid(. ~ cyl, scales = "free_x", space = "free") +
#' scale_x_continuous(breaks = seq(10, 36, by = 2))
#' # Adding scale limits override free scales:
#' last_plot() + xlim(10, 15)
#'
#' # Free scales are particularly useful for categorical variables
#' ggplot(mpg, aes(cty, model)) +
#' geom_point() +
#' facet_grid(manufacturer ~ ., scales = "free", space = "free")
#' # particularly when you reorder factor levels
#' mpg$model <- reorder(mpg$model, mpg$cty)
#' manufacturer <- reorder(mpg$manufacturer, mpg$cty)
#' last_plot() %+% mpg + theme(strip.text.y = element_text())
#'
#' # Use as.table to to control direction of horizontal facets, TRUE by default
#' h <- ggplot(mtcars, aes(x = mpg, y = wt)) +
#' geom_point()
#' h + facet_grid(cyl ~ vs)
#' h + facet_grid(cyl ~ vs, as.table = FALSE)
#'
#' # Use labeller to control facet labels, label_value is default
#' h + facet_grid(cyl ~ vs, labeller = label_both)
#' # Using label_parsed, see ?plotmath for more options
#' mtcars$cyl2 <- factor(mtcars$cyl, labels = c("alpha", "beta", "sqrt(x, y)"))
#' k <- ggplot(mtcars, aes(wt, mpg)) +
#' geom_point()
#' k + facet_grid(. ~ cyl2)
#' k + facet_grid(. ~ cyl2, labeller = label_parsed)
#' # For label_bquote the label value is x.
#' p <- ggplot(mtcars, aes(wt, mpg)) +
#' geom_point()
#' p + facet_grid(. ~ vs, labeller = label_bquote(alpha ^ .(x)))
#' p + facet_grid(. ~ vs, labeller = label_bquote(.(x) ^ .(x)))
#'
#' # Margins can be specified by logically (all yes or all no) or by specific
#' # variables as (character) variable names
#' mg <- ggplot(mtcars, aes(x = mpg, y = wt)) + geom_point()
#' mg + facet_grid(vs + am ~ gear)
#' mg + facet_grid(vs + am ~ gear, margins = TRUE)
#' mg + facet_grid(vs + am ~ gear, margins = "am")
#' # when margins are made over "vs", since the facets for "am" vary
#' # within the values of "vs", the marginal facet for "vs" is also
#' # a margin over "am".
#' mg + facet_grid(vs + am ~ gear, margins = "vs")
#' mg + facet_grid(vs + am ~ gear, margins = "gear")
#' mg + facet_grid(vs + am ~ gear, margins = c("gear", "am"))
#'
#' # The facet strips can be displayed near the axes with switch
#' data <- transform(mtcars,
#' am = factor(am, levels = 0:1, c("Automatic", "Manual")),
#' gear = factor(gear, levels = 3:5, labels = c("Three", "Four", "Five"))
#' )
#' p <- ggplot(data, aes(mpg, disp)) + geom_point()
#' p + facet_grid(am ~ gear, switch = "both") + theme_light()
#'
#' # It may be more aesthetic to use a theme without boxes around
#' # around the strips.
#' p + facet_grid(am ~ gear + vs, switch = "y") + theme_minimal()
#' p + facet_grid(am ~ ., switch = "y") +
#' theme_gray() %+replace% theme(strip.background = element_blank())
#' }
#' @importFrom plyr as.quoted
facet_grid <- function(facets, margins = FALSE, scales = "fixed", space = "fixed", shrink = TRUE, labeller = "label_value", as.table = TRUE, switch = NULL, drop = TRUE) {
scales <- match.arg(scales, c("fixed", "free_x", "free_y", "free"))
free <- list(
x = any(scales %in% c("free_x", "free")),
y = any(scales %in% c("free_y", "free"))
)
space <- match.arg(space, c("fixed", "free_x", "free_y", "free"))
space_free <- list(
x = any(space %in% c("free_x", "free")),
y = any(space %in% c("free_y", "free"))
)
# Facets can either be a formula, a string, or a list of things to be
# convert to quoted
if (is.character(facets)) {
facets <- stats::as.formula(facets)
}
if (is.formula(facets)) {
lhs <- function(x) if (length(x) == 2) NULL else x[-3]
rhs <- function(x) if (length(x) == 2) x else x[-2]
rows <- as.quoted(lhs(facets))
rows <- rows[!sapply(rows, identical, as.name("."))]
cols <- as.quoted(rhs(facets))
cols <- cols[!sapply(cols, identical, as.name("."))]
}
if (is.list(facets)) {
rows <- as.quoted(facets[[1]])
cols <- as.quoted(facets[[2]])
}
if (length(rows) + length(cols) == 0) {
stop("Must specify at least one variable to facet by", call. = FALSE)
}
facet(
rows = rows, cols = cols, margins = margins, shrink = shrink,
free = free, space_free = space_free, labeller = labeller,
as.table = as.table, switch = switch, drop = drop,
subclass = "grid"
)
}
#' @export
facet_train_layout.grid <- function(facet, data) {
layout <- layout_grid(data, facet$rows, facet$cols, facet$margins,
drop = facet$drop, as.table = facet$as.table)
# Relax constraints, if necessary
layout$SCALE_X <- if (facet$free$x) layout$COL else 1L
layout$SCALE_Y <- if (facet$free$y) layout$ROW else 1L
layout
}
#' @export
facet_map_layout.grid <- function(facet, data, layout) {
locate_grid(data, layout, facet$rows, facet$cols, facet$margins)
}
#' @export
facet_render.grid <- function(facet, panel, coord, theme, geom_grobs) {
axes <- facet_axes(facet, panel, coord, theme)
strips <- facet_strips(facet, panel, theme)
panels <- facet_panels(facet, panel, coord, theme, geom_grobs)
# adjust the size of axes to the size of panel
axes$l$heights <- panels$heights
axes$b$widths <- panels$widths
# adjust the size of the strips to the size of the panels
strips$r$heights <- panels$heights
strips$t$widths <- panels$widths
# Check if switch is consistent with grid layout
switch_x <- !is.null(facet$switch) && facet$switch %in% c("both", "x")
switch_y <- !is.null(facet$switch) && facet$switch %in% c("both", "y")
if (switch_x && length(strips$t) == 0) {
facet$switch <- if (facet$switch == "both") "y" else NULL
switch_x <- FALSE
warning("Cannot switch x axis strips as they do not exist", call. = FALSE)
}
if (switch_y && length(strips$r) == 0) {
facet$switch <- if (facet$switch == "both") "x" else NULL
switch_y <- FALSE
warning("Cannot switch y axis strips as they do not exist", call. = FALSE)
}
# Combine components into complete plot
if (is.null(facet$switch)) {
top <- strips$t
top <- gtable_add_cols(top, strips$r$widths)
top <- gtable_add_cols(top, axes$l$widths, pos = 0)
center <- cbind(axes$l, panels, strips$r, z = c(2, 1, 3))
bottom <- axes$b
bottom <- gtable_add_cols(bottom, strips$r$widths)
bottom <- gtable_add_cols(bottom, axes$l$widths, pos = 0)
complete <- rbind(top, center, bottom, z = c(1, 2, 3))
} else {
# Add padding between the switched strips and the axes
padding <- convertUnit(theme$strip.switch.pad.grid, "cm")
if (switch_x) {
t_heights <- c(padding, strips$t$heights)
gt_t <- gtable(widths = strips$t$widths, heights = unit(t_heights, "cm"))
gt_t <- gtable_add_grob(gt_t, strips$t, name = strips$t$name, clip = "off",
t = 1, l = 1, b = -1, r = -1)
}
if (switch_y) {
r_widths <- c(strips$r$widths, padding)
gt_r <- gtable(widths = unit(r_widths, "cm"), heights = strips$r$heights)
gt_r <- gtable_add_grob(gt_r, strips$r, name = strips$r$name, clip = "off",
t = 1, l = 1, b = -1, r = -1)
}
# Combine plot elements according to strip positions
if (switch_x && switch_y) {
center <- cbind(gt_r, axes$l, panels, z = c(3, 2, 1))
bottom <- rbind(axes$b, gt_t)
bottom <- gtable_add_cols(bottom, axes$l$widths, pos = 0)
bottom <- gtable_add_cols(bottom, gt_r$widths, pos = 0)
complete <- rbind(center, bottom, z = c(1, 2))
} else if (switch_x) {
center <- cbind(axes$l, panels, strips$r, z = c(2, 1, 3))
bottom <- rbind(axes$b, gt_t)
bottom <- gtable_add_cols(bottom, strips$r$widths)
bottom <- gtable_add_cols(bottom, axes$l$widths, pos = 0)
complete <- rbind(center, bottom, z = c(1, 2))
} else if (switch_y) {
top <- strips$t
top <- gtable_add_cols(top, axes$l$widths, pos = 0)
top <- gtable_add_cols(top, gt_r$widths, pos = 0)
center <- cbind(gt_r, axes$l, panels, z = c(3, 2, 1))
bottom <- axes$b
bottom <- gtable_add_cols(bottom, axes$l$widths, pos = 0)
bottom <- gtable_add_cols(bottom, gt_r$widths, pos = 0)
complete <- rbind(top, center, bottom, z = c(1, 2, 3))
} else {
stop("`switch` must be either NULL, 'both', 'x', or 'y'",
call. = FALSE)
}
}
complete$respect <- panels$respect
complete$name <- "layout"
bottom <- axes$b
complete
}
#' @export
facet_strips.grid <- function(facet, panel, theme) {
col_vars <- unique(panel$layout[names(facet$cols)])
row_vars <- unique(panel$layout[names(facet$rows)])
# Adding labels metadata, useful for labellers
attr(col_vars, "type") <- "cols"
attr(col_vars, "facet") <- "grid"
attr(row_vars, "type") <- "rows"
attr(row_vars, "facet") <- "grid"
dir <- list(r = "r", t = "t")
if (!is.null(facet$switch) && facet$switch %in% c("both", "x")) {
dir$t <- "b"
}
if (!is.null(facet$switch) && facet$switch %in% c("both", "y")) {
dir$r <- "l"
}
strips <- list(
r = build_strip(panel, row_vars, facet$labeller,
theme, dir$r, switch = facet$switch),
t = build_strip(panel, col_vars, facet$labeller,
theme, dir$t, switch = facet$switch)
)
Map(function(strip, side) {
if (side %in% c("t", "b")) {
gtable_add_col_space(strip, theme$panel.margin.x %||% theme$panel.margin)
} else {
gtable_add_row_space(strip, theme$panel.margin.y %||% theme$panel.margin)
}
}, strips, dir)
}
#' @export
facet_axes.grid <- function(facet, panel, coord, theme) {
axes <- list()
# Horizontal axes
cols <- which(panel$layout$ROW == 1)
grobs <- lapply(panel$ranges[cols], coord$render_axis_h, theme = theme)
axes$b <- gtable_add_col_space(gtable_row("axis-b", grobs),
theme$panel.margin.x %||% theme$panel.margin)
# Vertical axes
rows <- which(panel$layout$COL == 1)
grobs <- lapply(panel$ranges[rows], coord$render_axis_v, theme = theme)
axes$l <- gtable_add_row_space(gtable_col("axis-l", grobs),
theme$panel.margin.y %||% theme$panel.margin)
axes
}
#' @export
facet_panels.grid <- function(facet, panel, coord, theme, geom_grobs) {
# If user hasn't set aspect ratio, and we have fixed scales, then
# ask the coordinate system if it wants to specify one
aspect_ratio <- theme$aspect.ratio
if (is.null(aspect_ratio) && !facet$free$x && !facet$free$y) {
aspect_ratio <- coord$aspect(panel$ranges[[1]])
}
if (is.null(aspect_ratio)) {
aspect_ratio <- 1
respect <- FALSE
} else {
respect <- TRUE
}
# Add background and foreground to panels
panels <- panel$layout$PANEL
ncol <- max(panel$layout$COL)
nrow <- max(panel$layout$ROW)
panel_grobs <- lapply(panels, function(i) {
fg <- coord$render_fg(panel$ranges[[i]], theme)
bg <- coord$render_bg(panel$ranges[[i]], theme)
geom_grobs <- lapply(geom_grobs, `[[`, i)
if (theme$panel.ontop) {
panel_grobs <- c(geom_grobs, list(bg), list(fg))
} else {
panel_grobs <- c(list(bg), geom_grobs, list(fg))
}
gTree(children = do.call("gList", panel_grobs))
})
panel_matrix <- matrix(panel_grobs, nrow = nrow, ncol = ncol, byrow = TRUE)
# @kohske
# Now size of each panel is calculated using PANEL$ranges, which is given by
# coord_train called by train_range.
# So here, "scale" need not to be referred.
#
# In general, panel has all information for building facet.
if (facet$space_free$x) {
ps <- panel$layout$PANEL[panel$layout$ROW == 1]
widths <- vapply(ps, function(i) diff(panel$ranges[[i]]$x.range), numeric(1))
panel_widths <- unit(widths, "null")
} else {
panel_widths <- rep(unit(1, "null"), ncol)
}
if (facet$space_free$y) {
ps <- panel$layout$PANEL[panel$layout$COL == 1]
heights <- vapply(ps, function(i) diff(panel$ranges[[i]]$y.range), numeric(1))
panel_heights <- unit(heights, "null")
} else {
panel_heights <- rep(unit(1 * aspect_ratio, "null"), nrow)
}
panels <- gtable_matrix("panel", panel_matrix,
panel_widths, panel_heights, respect = respect)
panels <- gtable_add_col_space(panels, theme$panel.margin.x %||% theme$panel.margin)
panels <- gtable_add_row_space(panels, theme$panel.margin.y %||% theme$panel.margin)
panels
}
#' @export
facet_vars.grid <- function(facet) {
paste(lapply(list(facet$rows, facet$cols), paste, collapse = ", "),
collapse = " ~ ")
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/lyapFDGrond.R
\name{lyapFDGrond}
\alias{lyapFDGrond}
\title{lyapFDGrond : Computes the Lyapunov spectrum (with
compelled flow direction)}
\usage{
lyapFDGrond(outLyapFD = NULL, nVar, dMax, coeffF, intgrMthod = "rk4",
tDeb = 0, dt, tFin, yDeb, Ddeb = NULL, nIterMin = 1,
nIterStats = 50)
}
\arguments{
\item{outLyapFD}{List of output data that can be used
as an input in order to extend the computation}
\item{nVar}{Model dimension}
\item{dMax}{Maximum degree of the polynomial formulation}
\item{coeffF}{Model matrix. Each column correspond to
one equation. Lines provide the coefficients for each
polynomial term which order is defined with function
\code{poLabs(nVar, dMax)} in package \code{GPoM})}
\item{intgrMthod}{Numerical integration method
('rk4' by default)}
\item{tDeb}{Initial integration time (0 by default)}
\item{dt}{Integration time step}
\item{tFin}{Final integration time}
\item{yDeb}{Model initial conditions}
\item{Ddeb}{Jacobian initial conditions (optional).}
\item{nIterMin}{Minimum number of iterations (nIterMin= 1
by default)}
\item{nIterStats}{Number of iterations used in the statistics computation}
}
\value{
List of output data
}
\description{
Computes all the Lyapunov exponents based
on Gram-Schmidt procedure with zero-Lyapunov exponent compelled
to the flow direction (Grond et al. 1985).
The Jacobian matrix is computed from the original model
by semi-Formal Derivation.
}
\examples{
data(Ebola)
nVar = dim(Ebola$KL)[2]
pMax = dim(Ebola$KL)[1]
dMax = p2dMax(nVar, pMax)
outLyapFD <- NULL
outLyapFD$Grond <- lyapFDGrond(outLyapFD$Grond, nVar= nVar, dMax = dMax, coeffF = Ebola$KL,
tDeb = 0, dt = 0.01, tFin = 2, yDeb = Ebola$yDeb)
}
\references{
F. Grond, H. H. Diebner, S. Sahle, A. Mathias, S. Fischer,
O. E. Rossler, A robust, locally interpretable algorithm for
Lyapunov exponents, Chaos, Solitons \& Fractals, 16, 841-852 (2003).
F. Grond \& H. H. Diebner: Local Lyapunov exponents for
dissipative continuous systems. Chaos, Solitons \& Fractals, 23,
1809-1817 (2005).
}
|
/man/lyapFDGrond.Rd
|
no_license
|
cran/GPoM.FDLyapu
|
R
| false | true | 2,136 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/lyapFDGrond.R
\name{lyapFDGrond}
\alias{lyapFDGrond}
\title{lyapFDGrond : Computes the Lyapunov spectrum (with
compelled flow direction)}
\usage{
lyapFDGrond(outLyapFD = NULL, nVar, dMax, coeffF, intgrMthod = "rk4",
tDeb = 0, dt, tFin, yDeb, Ddeb = NULL, nIterMin = 1,
nIterStats = 50)
}
\arguments{
\item{outLyapFD}{List of output data that can be used
as an input in order to extend the computation}
\item{nVar}{Model dimension}
\item{dMax}{Maximum degree of the polynomial formulation}
\item{coeffF}{Model matrix. Each column correspond to
one equation. Lines provide the coefficients for each
polynomial term which order is defined with function
\code{poLabs(nVar, dMax)} in package \code{GPoM})}
\item{intgrMthod}{Numerical integration method
('rk4' by default)}
\item{tDeb}{Initial integration time (0 by default)}
\item{dt}{Integration time step}
\item{tFin}{Final integration time}
\item{yDeb}{Model initial conditions}
\item{Ddeb}{Jacobian initial conditions (optional).}
\item{nIterMin}{Minimum number of iterations (nIterMin= 1
by default)}
\item{nIterStats}{Number of iterations used in the statistics computation}
}
\value{
List of output data
}
\description{
Computes all the Lyapunov exponents based
on Gram-Schmidt procedure with zero-Lyapunov exponent compelled
to the flow direction (Grond et al. 1985).
The Jacobian matrix is computed from the original model
by semi-Formal Derivation.
}
\examples{
data(Ebola)
nVar = dim(Ebola$KL)[2]
pMax = dim(Ebola$KL)[1]
dMax = p2dMax(nVar, pMax)
outLyapFD <- NULL
outLyapFD$Grond <- lyapFDGrond(outLyapFD$Grond, nVar= nVar, dMax = dMax, coeffF = Ebola$KL,
tDeb = 0, dt = 0.01, tFin = 2, yDeb = Ebola$yDeb)
}
\references{
F. Grond, H. H. Diebner, S. Sahle, A. Mathias, S. Fischer,
O. E. Rossler, A robust, locally interpretable algorithm for
Lyapunov exponents, Chaos, Solitons \& Fractals, 16, 841-852 (2003).
F. Grond \& H. H. Diebner: Local Lyapunov exponents for
dissipative continuous systems. Chaos, Solitons \& Fractals, 23,
1809-1817 (2005).
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/comparisons.R
\name{aggregate_cells}
\alias{aggregate_cells}
\title{A wrapper around scuttle::aggregateAcrossCells that also:
\itemize{
\item computes normcounts (divide by library size) and logcounts (normcounts and take log with pseudocount 1)
\item adds QC metrics
\item marks mito genes
\item formats sample names
}}
\usage{
aggregate_cells(sce, criteria, assay = c("normcounts", "counts"))
}
\description{
A wrapper around scuttle::aggregateAcrossCells that also:
\itemize{
\item computes normcounts (divide by library size) and logcounts (normcounts and take log with pseudocount 1)
\item adds QC metrics
\item marks mito genes
\item formats sample names
}
}
|
/man/aggregate_cells.Rd
|
no_license
|
antortjim/sleepapp
|
R
| false | true | 743 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/comparisons.R
\name{aggregate_cells}
\alias{aggregate_cells}
\title{A wrapper around scuttle::aggregateAcrossCells that also:
\itemize{
\item computes normcounts (divide by library size) and logcounts (normcounts and take log with pseudocount 1)
\item adds QC metrics
\item marks mito genes
\item formats sample names
}}
\usage{
aggregate_cells(sce, criteria, assay = c("normcounts", "counts"))
}
\description{
A wrapper around scuttle::aggregateAcrossCells that also:
\itemize{
\item computes normcounts (divide by library size) and logcounts (normcounts and take log with pseudocount 1)
\item adds QC metrics
\item marks mito genes
\item formats sample names
}
}
|
#!usr/bin/env Rscript
library(methods)
library(Matrix)
library(MASS)
#library(Rcpp)
library(lme4)
# Read in your data as an R dataframe
basedir <- c("/seastor/helenhelen/ISR_2015")
resultdir <- paste(basedir,"/me/results/mem",sep="/")
setwd(resultdir)
r.itemInfo <- matrix(data=NA, nr=2, nc=4)
## read data
#get data for each trial
item_file <- paste(basedir,"/me/tmap/data/item/mem.txt",sep="")
item_data <- read.table(item_file,header=FALSE)
colnames(item_data) <- c("subid","pid",
"p97_act1","p97_act2","p97_actmean","p97_rsaD","p97_rsaDBwc","p97_rsadiff",
"LVVC_act1","LVVC_act2","LVVC_actmean","LVVC_rsaD","LVVC_rsaDBwc","LVVC_rsadiff",
"LANG_act1","LANG_act2","LANG_actmean","LANG_rsaD","LANG_rsaDBwc","LANG_rsadiff",
"LSMG_act1","LSMG_act2","LSMG_actmean","LSMG_rsaD","LSMG_rsaDBwc","LSMG_rsadiff",
"LIFG_act1","LIFG_act2","LIFG_actmean","LIFG_rsaD","LIFG_rsaDBwc","LIFG_rsadiff",
"RVVC_act1","RVVC_act2","RVVC_actmean","RVVC_rsaD","RVVC_rsaDBwc","RVVC_rsadiff",
"RANG_act1","RANG_act2","RANG_actmean","RANG_rsaD","RANG_rsaDBwc","RANG_rsadiff",
"RSMG_act1","RSMG_act2","RSMG_actmean","RSMG_rsaD","RSMG_rsaDBwc","RSMG_rsadiff",
"RIFG_act1","RIFG_act2","RIFG_actmean","RIFG_rsaD","RIFG_rsaDBwc","RIFG_rsadiff",
"HIP_act1","HIP_act2","HIP_actmean","HIP_rsaD","HIP_rsaDBwc","HIP_rsadiff",
"CA1_act1","CA1_act2","CA1_actmean","CA1_rsaD","CA1_rsaDBwc","CA1_rsadiff",
"CA2_act1","CA2_act2","CA2_actmean","CA2_rsaD","CA2_rsaDBwc","CA2_rsadiff",
"DG_act1","DG_act2","DG_actmean","DG_rsaD","DG_rsaDBwc","DG_rsadiff",
"CA3_act1","CA3_act2","CA3_actmean","CA3_rsaD","CA3_rsaDBwc","CA3_rsadiff",
"subiculum_act1","subiculum_act2","subiculum_actmean","subiculum_rsaD","subiculum_rsaDBwc","subiculum_rsadiff",
"ERC_act1","ERC_act2","ERC_actmean","ERC_rsaD","ERC_rsaDBwc","ERC_rsadiff")
item_data$subid <- as.factor(item_data$subid)
item_data$pid <- as.factor(item_data$pid)
subdata <- item_data
itemInfo_actmean <- lmer(RVVC_rsadiff~ERC_actmean+(1+ERC_actmean|subid)+(1+ERC_actmean|pid),REML=FALSE,data=subdata)
itemInfo_actmean.null <- lmer(RVVC_rsadiff~1+(1+ERC_actmean|subid)+(1+ERC_actmean|pid),REML=FALSE,data=subdata)
itemInfo_di <- lmer(RVVC_rsadiff~ERC_actmean+(1+ERC_rsadiff|subid)+(1+ERC_rsadiff|pid),REML=FALSE,data=subdata)
itemInfo_di.null <- lmer(RVVC_rsadiff~1+(1+ERC_rsadiff|subid)+(1+ERC_rsadiff|pid),REML=FALSE,data=subdata)
mainEffect.itemInfo_actmean <- anova(itemInfo_actmean,itemInfo_actmean.null)
r.itemInfo[1,1]=mainEffect.itemInfo_actmean[2,6]
r.itemInfo[1,2]=mainEffect.itemInfo_actmean[2,7]
r.itemInfo[1,3]=mainEffect.itemInfo_actmean[2,8]
r.itemInfo[1,4]=fixef(itemInfo_actmean)[2];
mainEffect.itemInfo_di <- anova(itemInfo_di,itemInfo_di.null)
r.itemInfo[2,1]=mainEffect.itemInfo_di[2,6]
r.itemInfo[2,2]=mainEffect.itemInfo_di[2,7]
r.itemInfo[2,3]=mainEffect.itemInfo_di[2,8]
r.itemInfo[2,4]=fixef(itemInfo_di)[2];
write.matrix(r.itemInfo,file="itemInfso_RVVC_ERC.txt",sep="\t")
|
/ROI_based/me/tln/itemInfo_RVVC_ERC.R
|
no_license
|
QQXiao/ISR_2015
|
R
| false | false | 3,047 |
r
|
#!usr/bin/env Rscript
library(methods)
library(Matrix)
library(MASS)
#library(Rcpp)
library(lme4)
# Read in your data as an R dataframe
basedir <- c("/seastor/helenhelen/ISR_2015")
resultdir <- paste(basedir,"/me/results/mem",sep="/")
setwd(resultdir)
r.itemInfo <- matrix(data=NA, nr=2, nc=4)
## read data
#get data for each trial
item_file <- paste(basedir,"/me/tmap/data/item/mem.txt",sep="")
item_data <- read.table(item_file,header=FALSE)
colnames(item_data) <- c("subid","pid",
"p97_act1","p97_act2","p97_actmean","p97_rsaD","p97_rsaDBwc","p97_rsadiff",
"LVVC_act1","LVVC_act2","LVVC_actmean","LVVC_rsaD","LVVC_rsaDBwc","LVVC_rsadiff",
"LANG_act1","LANG_act2","LANG_actmean","LANG_rsaD","LANG_rsaDBwc","LANG_rsadiff",
"LSMG_act1","LSMG_act2","LSMG_actmean","LSMG_rsaD","LSMG_rsaDBwc","LSMG_rsadiff",
"LIFG_act1","LIFG_act2","LIFG_actmean","LIFG_rsaD","LIFG_rsaDBwc","LIFG_rsadiff",
"RVVC_act1","RVVC_act2","RVVC_actmean","RVVC_rsaD","RVVC_rsaDBwc","RVVC_rsadiff",
"RANG_act1","RANG_act2","RANG_actmean","RANG_rsaD","RANG_rsaDBwc","RANG_rsadiff",
"RSMG_act1","RSMG_act2","RSMG_actmean","RSMG_rsaD","RSMG_rsaDBwc","RSMG_rsadiff",
"RIFG_act1","RIFG_act2","RIFG_actmean","RIFG_rsaD","RIFG_rsaDBwc","RIFG_rsadiff",
"HIP_act1","HIP_act2","HIP_actmean","HIP_rsaD","HIP_rsaDBwc","HIP_rsadiff",
"CA1_act1","CA1_act2","CA1_actmean","CA1_rsaD","CA1_rsaDBwc","CA1_rsadiff",
"CA2_act1","CA2_act2","CA2_actmean","CA2_rsaD","CA2_rsaDBwc","CA2_rsadiff",
"DG_act1","DG_act2","DG_actmean","DG_rsaD","DG_rsaDBwc","DG_rsadiff",
"CA3_act1","CA3_act2","CA3_actmean","CA3_rsaD","CA3_rsaDBwc","CA3_rsadiff",
"subiculum_act1","subiculum_act2","subiculum_actmean","subiculum_rsaD","subiculum_rsaDBwc","subiculum_rsadiff",
"ERC_act1","ERC_act2","ERC_actmean","ERC_rsaD","ERC_rsaDBwc","ERC_rsadiff")
item_data$subid <- as.factor(item_data$subid)
item_data$pid <- as.factor(item_data$pid)
subdata <- item_data
itemInfo_actmean <- lmer(RVVC_rsadiff~ERC_actmean+(1+ERC_actmean|subid)+(1+ERC_actmean|pid),REML=FALSE,data=subdata)
itemInfo_actmean.null <- lmer(RVVC_rsadiff~1+(1+ERC_actmean|subid)+(1+ERC_actmean|pid),REML=FALSE,data=subdata)
itemInfo_di <- lmer(RVVC_rsadiff~ERC_actmean+(1+ERC_rsadiff|subid)+(1+ERC_rsadiff|pid),REML=FALSE,data=subdata)
itemInfo_di.null <- lmer(RVVC_rsadiff~1+(1+ERC_rsadiff|subid)+(1+ERC_rsadiff|pid),REML=FALSE,data=subdata)
mainEffect.itemInfo_actmean <- anova(itemInfo_actmean,itemInfo_actmean.null)
r.itemInfo[1,1]=mainEffect.itemInfo_actmean[2,6]
r.itemInfo[1,2]=mainEffect.itemInfo_actmean[2,7]
r.itemInfo[1,3]=mainEffect.itemInfo_actmean[2,8]
r.itemInfo[1,4]=fixef(itemInfo_actmean)[2];
mainEffect.itemInfo_di <- anova(itemInfo_di,itemInfo_di.null)
r.itemInfo[2,1]=mainEffect.itemInfo_di[2,6]
r.itemInfo[2,2]=mainEffect.itemInfo_di[2,7]
r.itemInfo[2,3]=mainEffect.itemInfo_di[2,8]
r.itemInfo[2,4]=fixef(itemInfo_di)[2];
write.matrix(r.itemInfo,file="itemInfso_RVVC_ERC.txt",sep="\t")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/convert_abx.R
\name{convert_abx}
\alias{convert_abx}
\title{Convert antibiotic codes generated from LCL telepath to full name (and vice-versa). This}
\usage{
convert_abx(.data, .clean_up = F, .abbreviate = F, .been_cleaned = F)
}
\arguments{
\item{.data}{A character vector of antibiotic abbreviations or names}
\item{.clean_up}{Clean up and convert ambiguous names for use with AMR package:
\itemize{
\item 'Amp/Amoxil' = 'Ampicillin',
\item "Eryth/Clarith." = 'Erythromycin',
\item Ceftolozane-tazobactam' = 'Ceftolozane/tazobactam',
\item Caz/Avi' = 'Ceftazidime/avibactam',
\item Pip/Tazo' = 'Piperacillin/Tazobactam'
}}
\item{.abbreviate}{Converts a character vector of full antibiotic names to their LCL abbreviations}
\item{.been_cleaned}{Set as TRUE if the antibiotic names have been "cleaned up" with .clean_up}
}
\description{
Convert antibiotic codes generated from LCL telepath to full name (and vice-versa). This
}
\examples{
abx_list <- c('AML', 'CIP', 'P/T')
convert_abx(abx_list)
[1] "Amp/Amoxil" "Ciprofloxacin" "Pip/Tazo"
convert_abx(abx_list, .clean_up = T)
[1] "Ampicillin" "Ciprofloxacin" "Piperacillin/Tazobactam"
}
\keyword{abx,}
\keyword{convert_abx}
|
/LCL/man/convert_abx.Rd
|
no_license
|
agerada/lcl-package
|
R
| false | true | 1,283 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/convert_abx.R
\name{convert_abx}
\alias{convert_abx}
\title{Convert antibiotic codes generated from LCL telepath to full name (and vice-versa). This}
\usage{
convert_abx(.data, .clean_up = F, .abbreviate = F, .been_cleaned = F)
}
\arguments{
\item{.data}{A character vector of antibiotic abbreviations or names}
\item{.clean_up}{Clean up and convert ambiguous names for use with AMR package:
\itemize{
\item 'Amp/Amoxil' = 'Ampicillin',
\item "Eryth/Clarith." = 'Erythromycin',
\item Ceftolozane-tazobactam' = 'Ceftolozane/tazobactam',
\item Caz/Avi' = 'Ceftazidime/avibactam',
\item Pip/Tazo' = 'Piperacillin/Tazobactam'
}}
\item{.abbreviate}{Converts a character vector of full antibiotic names to their LCL abbreviations}
\item{.been_cleaned}{Set as TRUE if the antibiotic names have been "cleaned up" with .clean_up}
}
\description{
Convert antibiotic codes generated from LCL telepath to full name (and vice-versa). This
}
\examples{
abx_list <- c('AML', 'CIP', 'P/T')
convert_abx(abx_list)
[1] "Amp/Amoxil" "Ciprofloxacin" "Pip/Tazo"
convert_abx(abx_list, .clean_up = T)
[1] "Ampicillin" "Ciprofloxacin" "Piperacillin/Tazobactam"
}
\keyword{abx,}
\keyword{convert_abx}
|
#################################################
# Author: Robin Elahi
# Date: 150828
# Multipanel plot of survival, growth, and recruit size
# Figure 4
#################################################
rm(list=ls(all=TRUE)) # removes all previous material from R's memory
# get growth data
source("./bael_growth.R")
head(dat_growth)
# get survival data
source("./bael_survival.R")
head(dat_survival)
# get recruit size data
source("./bael_recruitSize.R")
head(dat_recruitSize)
# ggplot2 settings
theme_set(theme_classic(base_size = 12))
##########################################################
# FIGURE - SURVIVAL SCALING BY ERA
##########################################################
ylab_surv <- "Survival at time t+3"
xlab_surv <- expression(paste("Size at time t (", cm^2, ")"))
ULClabel <- theme(plot.title = element_text(hjust = -0.2, vjust = 1, size = rel(1.2)))
surv1 <- ggplot(dat_survival, aes(ini.area, survival,
color = time, shape = time)) +
ylab(ylab_surv) + xlab(xlab_surv) +
theme(legend.justification = c(1, 0), legend.position = c(1, 0.2)) +
theme(legend.title = element_blank()) +
geom_point(size = 2, alpha = 0.8,
position = position_jitter(h = 0.05)) +
scale_colour_manual(breaks = c("past", "present"),
values = c("darkgray", "black"),
labels = c("1969-1972", "2007-2010")) +
scale_shape_manual(breaks = c("past", "present"),
values = c(18, 20),
labels = c("1969-1972", "2007-2010"))
survPlot <- surv1 + labs(title = "A") + ULClabel +
survTrendPast2 + survTrendPres2
# theme(legend.position = "none")
survPlot
##########################################################
# FIGURE - GROWTH SCALING BY ERA
##########################################################
ylab_growth <- expression(paste("Size at time t+3 (", cm^2, ")"))
xlab_growth <- expression(paste("Size at time t (", cm^2, ")"))
ULClabel <- theme(plot.title = element_text(hjust = -0.2, vjust = 1, size = rel(1.2)))
size1 <- ggplot(dat_growth, aes(ini.area, fin.area, color = time, shape = time)) +
ylab(ylab_growth) + xlab(xlab_growth) +
theme(legend.justification = c(1, 0), legend.position = c(1, 0.01)) +
theme(legend.title = element_blank()) +
geom_point(size = 2, alpha = 0.8,
position = position_jitter(h = 0.05)) +
scale_colour_manual(breaks = c("past", "present"),
values = c("darkgray", "black"),
labels = c("1969-1972", "2007-2010")) +
scale_shape_manual(breaks = c("past", "present"),
values = c(18, 20),
labels = c("1969-1972", "2007-2010"))
sizePlot <- size1 +
geom_smooth(method = "lm", se = FALSE, size = 1) +
labs(title = "B") + ULClabel +
geom_abline(a = 0, b = 1, linetype = 2, color = "black", size = 0.5) +
theme(legend.position = "none")
sizePlot
##########################################################
# FIGURE - RECRUIT SIZE BY ERA
##########################################################
ylab_recruitSize <- expression(paste("Recruit size (", cm^2, ")"))
ULClabel <- theme(plot.title = element_text(hjust = -0.2, vjust = 1, size = rel(1.2)))
recruit1 <- ggplot(data = dat_recruitSize, aes(time, area, fill = time)) +
geom_boxplot() +
ylab(ylab_recruitSize) + xlab("Year") +
scale_fill_manual(breaks = c("past", "present"),
values = c("darkgray", "white")) +
theme(legend.position = "none") +
scale_x_discrete(labels = c("1972", "2010"))
recruitPlot <- recruit1 + labs(title = "C") + ULClabel
recruitPlot
##########################################################
# Multi-panel plot
##########################################################
pdf("./figs/vitalRatesPlot.pdf", 7, 3.5)
multiplot(survPlot, sizePlot, recruitPlot, cols = 3)
dev.off()
|
/bael_vitalRatesPlot.R
|
permissive
|
elahi/cupCorals
|
R
| false | false | 3,903 |
r
|
#################################################
# Author: Robin Elahi
# Date: 150828
# Multipanel plot of survival, growth, and recruit size
# Figure 4
#################################################
rm(list=ls(all=TRUE)) # removes all previous material from R's memory
# get growth data
source("./bael_growth.R")
head(dat_growth)
# get survival data
source("./bael_survival.R")
head(dat_survival)
# get recruit size data
source("./bael_recruitSize.R")
head(dat_recruitSize)
# ggplot2 settings
theme_set(theme_classic(base_size = 12))
##########################################################
# FIGURE - SURVIVAL SCALING BY ERA
##########################################################
ylab_surv <- "Survival at time t+3"
xlab_surv <- expression(paste("Size at time t (", cm^2, ")"))
ULClabel <- theme(plot.title = element_text(hjust = -0.2, vjust = 1, size = rel(1.2)))
surv1 <- ggplot(dat_survival, aes(ini.area, survival,
color = time, shape = time)) +
ylab(ylab_surv) + xlab(xlab_surv) +
theme(legend.justification = c(1, 0), legend.position = c(1, 0.2)) +
theme(legend.title = element_blank()) +
geom_point(size = 2, alpha = 0.8,
position = position_jitter(h = 0.05)) +
scale_colour_manual(breaks = c("past", "present"),
values = c("darkgray", "black"),
labels = c("1969-1972", "2007-2010")) +
scale_shape_manual(breaks = c("past", "present"),
values = c(18, 20),
labels = c("1969-1972", "2007-2010"))
survPlot <- surv1 + labs(title = "A") + ULClabel +
survTrendPast2 + survTrendPres2
# theme(legend.position = "none")
survPlot
##########################################################
# FIGURE - GROWTH SCALING BY ERA
##########################################################
ylab_growth <- expression(paste("Size at time t+3 (", cm^2, ")"))
xlab_growth <- expression(paste("Size at time t (", cm^2, ")"))
ULClabel <- theme(plot.title = element_text(hjust = -0.2, vjust = 1, size = rel(1.2)))
size1 <- ggplot(dat_growth, aes(ini.area, fin.area, color = time, shape = time)) +
ylab(ylab_growth) + xlab(xlab_growth) +
theme(legend.justification = c(1, 0), legend.position = c(1, 0.01)) +
theme(legend.title = element_blank()) +
geom_point(size = 2, alpha = 0.8,
position = position_jitter(h = 0.05)) +
scale_colour_manual(breaks = c("past", "present"),
values = c("darkgray", "black"),
labels = c("1969-1972", "2007-2010")) +
scale_shape_manual(breaks = c("past", "present"),
values = c(18, 20),
labels = c("1969-1972", "2007-2010"))
sizePlot <- size1 +
geom_smooth(method = "lm", se = FALSE, size = 1) +
labs(title = "B") + ULClabel +
geom_abline(a = 0, b = 1, linetype = 2, color = "black", size = 0.5) +
theme(legend.position = "none")
sizePlot
##########################################################
# FIGURE - RECRUIT SIZE BY ERA
##########################################################
ylab_recruitSize <- expression(paste("Recruit size (", cm^2, ")"))
ULClabel <- theme(plot.title = element_text(hjust = -0.2, vjust = 1, size = rel(1.2)))
recruit1 <- ggplot(data = dat_recruitSize, aes(time, area, fill = time)) +
geom_boxplot() +
ylab(ylab_recruitSize) + xlab("Year") +
scale_fill_manual(breaks = c("past", "present"),
values = c("darkgray", "white")) +
theme(legend.position = "none") +
scale_x_discrete(labels = c("1972", "2010"))
recruitPlot <- recruit1 + labs(title = "C") + ULClabel
recruitPlot
##########################################################
# Multi-panel plot
##########################################################
pdf("./figs/vitalRatesPlot.pdf", 7, 3.5)
multiplot(survPlot, sizePlot, recruitPlot, cols = 3)
dev.off()
|
library(zipcode)
data(zipcode)
library(rgdal)
library(plyr)
library(ggplot2)
library(lubridate)
ca.zip <- zipcode[zipcode$state=="CA",]
ca.zip$value1 <- runif(dim(ca.zip)[1], 100, 1000)
ca.zip$value2 <- runif(dim(ca.zip)[1], 1, 10)
ca.zip$date <- ymd("20150101") + ddays(runif(dim(ca.zip)[1], 0, 365))
ca.zip$date <- ca.zip$date %>% as.Date()
zip.shape <- readOGR("./data/", layer="californiaZIP")
zip.shape@data = data.frame(zip.shape@data, ca.zip[match(zip.shape@data[,"ZCTA5CE10"], ca.zip[,"zip"]),])
|
/ZipMap/global.R
|
no_license
|
jrpepper/jp-shiny-templates
|
R
| false | false | 508 |
r
|
library(zipcode)
data(zipcode)
library(rgdal)
library(plyr)
library(ggplot2)
library(lubridate)
ca.zip <- zipcode[zipcode$state=="CA",]
ca.zip$value1 <- runif(dim(ca.zip)[1], 100, 1000)
ca.zip$value2 <- runif(dim(ca.zip)[1], 1, 10)
ca.zip$date <- ymd("20150101") + ddays(runif(dim(ca.zip)[1], 0, 365))
ca.zip$date <- ca.zip$date %>% as.Date()
zip.shape <- readOGR("./data/", layer="californiaZIP")
zip.shape@data = data.frame(zip.shape@data, ca.zip[match(zip.shape@data[,"ZCTA5CE10"], ca.zip[,"zip"]),])
|
GetSubset <-
function(Lat, Long, Product, Band, StartDate, EndDate, KmAboveBelow, KmLeftRight)
{
if(length(Product) != 1) stop("Incorrect length of Product input. Give only one data product at a time.")
if(length(Band) != 1) stop("Incorrect length of Band input. Give only one data band at a time.")
if(!is.numeric(Lat) | !is.numeric(Long)) stop("Lat and Long inputs must be numeric.")
if(length(Lat) != 1 | length(Long) != 1) stop("Incorrect number of Lats and Longs supplied (only 1 coordinate allowed).")
if(abs(Lat) > 90 | abs(Long) > 180) stop("Detected a lat or long beyond the range of valid coordinates.")
getsubset.xml <- paste('
<soapenv:Envelope xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:xsd="http://www.w3.org/2001/XMLSchema"
xmlns:soapenv="http://schemas.xmlsoap.org/soap/envelope/" xmlns:mod="', daacmodis, '/MODIS_webservice">
<soapenv:Header/>
<soapenv:Body>
<mod:getsubset soapenv:encodingStyle="http://schemas.xmlsoap.org/soap/encoding/">
<Latitude xsi:type="xsd:float">', Lat, '</Latitude>
<Longitude xsi:type="xsd:float">', Long, '</Longitude>
<Product xsi:type="xsd:string">', Product, '</Product>
<Band xsi:type="xsd:string">', Band, '</Band>
<MODIS_Subset_Start_Date xsi:type="xsd:string">', StartDate, '</MODIS_Subset_Start_Date>
<MODIS_Subset_End_Date xsi:type="xsd:string">', EndDate, '</MODIS_Subset_End_Date>
<Km_Above_Below xsi:type="xsd:string">', KmAboveBelow, '</Km_Above_Below>
<Km_Left_Right xsi:type="xsd:string">', KmLeftRight, '</Km_Left_Right>
</mod:getsubset>
</soapenv:Body>
</soapenv:Envelope>',
sep = "")
header.fields <- c(Accept = "text/xml",
Accept = "multipart/*",
'Content-Type' = "text/xml; charset=utf-8",
SOAPAction = "")
reader <- basicTextGatherer()
header <- basicTextGatherer()
curlPerform(url = paste0(daacmodis, wsdl_loc),
httpheader = header.fields,
postfields = getsubset.xml,
writefunction = reader$update,
verbose = FALSE)
# Check the server is not down by insepcting the XML response for internal server error message.
if(grepl("Internal Server Error", reader$value())){
stop("Web service failure: the ORNL DAAC server seems to be down, please try again later.
The online subsetting tool (https://daac.ornl.gov/cgi-bin/MODIS/GLBVIZ_1_Glb/modis_subset_order_global_col5.pl)
will indicate when the server is up and running again.")
}
xmlres <- xmlRoot(xmlTreeParse(reader$value()))
modisres <- xmlSApply(xmlres[[1]],
function(x) xmlSApply(x,
function(x) xmlSApply(x,
function(x) xmlSApply(x,xmlValue))))
if(colnames(modisres) == "Fault"){
if(length(modisres['faultstring.text', ][[1]]) == 0){
stop("Downloading from the web service is currently not working. Please try again later.")
}
stop(modisres['faultstring.text', ])
} else{
modisres <- as.data.frame(t(unname(modisres[-c(7,11)])))
names(modisres) <- c("xll", "yll", "pixelsize", "nrow", "ncol", "band", "scale", "lat", "long", "subset")
return(modisres)
}
}
|
/R/GetSubset.R
|
no_license
|
huananbei/MODISTools
|
R
| false | false | 3,583 |
r
|
GetSubset <-
function(Lat, Long, Product, Band, StartDate, EndDate, KmAboveBelow, KmLeftRight)
{
if(length(Product) != 1) stop("Incorrect length of Product input. Give only one data product at a time.")
if(length(Band) != 1) stop("Incorrect length of Band input. Give only one data band at a time.")
if(!is.numeric(Lat) | !is.numeric(Long)) stop("Lat and Long inputs must be numeric.")
if(length(Lat) != 1 | length(Long) != 1) stop("Incorrect number of Lats and Longs supplied (only 1 coordinate allowed).")
if(abs(Lat) > 90 | abs(Long) > 180) stop("Detected a lat or long beyond the range of valid coordinates.")
getsubset.xml <- paste('
<soapenv:Envelope xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:xsd="http://www.w3.org/2001/XMLSchema"
xmlns:soapenv="http://schemas.xmlsoap.org/soap/envelope/" xmlns:mod="', daacmodis, '/MODIS_webservice">
<soapenv:Header/>
<soapenv:Body>
<mod:getsubset soapenv:encodingStyle="http://schemas.xmlsoap.org/soap/encoding/">
<Latitude xsi:type="xsd:float">', Lat, '</Latitude>
<Longitude xsi:type="xsd:float">', Long, '</Longitude>
<Product xsi:type="xsd:string">', Product, '</Product>
<Band xsi:type="xsd:string">', Band, '</Band>
<MODIS_Subset_Start_Date xsi:type="xsd:string">', StartDate, '</MODIS_Subset_Start_Date>
<MODIS_Subset_End_Date xsi:type="xsd:string">', EndDate, '</MODIS_Subset_End_Date>
<Km_Above_Below xsi:type="xsd:string">', KmAboveBelow, '</Km_Above_Below>
<Km_Left_Right xsi:type="xsd:string">', KmLeftRight, '</Km_Left_Right>
</mod:getsubset>
</soapenv:Body>
</soapenv:Envelope>',
sep = "")
header.fields <- c(Accept = "text/xml",
Accept = "multipart/*",
'Content-Type' = "text/xml; charset=utf-8",
SOAPAction = "")
reader <- basicTextGatherer()
header <- basicTextGatherer()
curlPerform(url = paste0(daacmodis, wsdl_loc),
httpheader = header.fields,
postfields = getsubset.xml,
writefunction = reader$update,
verbose = FALSE)
# Check the server is not down by insepcting the XML response for internal server error message.
if(grepl("Internal Server Error", reader$value())){
stop("Web service failure: the ORNL DAAC server seems to be down, please try again later.
The online subsetting tool (https://daac.ornl.gov/cgi-bin/MODIS/GLBVIZ_1_Glb/modis_subset_order_global_col5.pl)
will indicate when the server is up and running again.")
}
xmlres <- xmlRoot(xmlTreeParse(reader$value()))
modisres <- xmlSApply(xmlres[[1]],
function(x) xmlSApply(x,
function(x) xmlSApply(x,
function(x) xmlSApply(x,xmlValue))))
if(colnames(modisres) == "Fault"){
if(length(modisres['faultstring.text', ][[1]]) == 0){
stop("Downloading from the web service is currently not working. Please try again later.")
}
stop(modisres['faultstring.text', ])
} else{
modisres <- as.data.frame(t(unname(modisres[-c(7,11)])))
names(modisres) <- c("xll", "yll", "pixelsize", "nrow", "ncol", "band", "scale", "lat", "long", "subset")
return(modisres)
}
}
|
#! /usr/bin/env Rscript
library(DESeq2)
library(genefilter)
library(ggplot2)
tag <- "all"
res.path <- "./"
setwd(res.path)
reads.cnt.tbl <- read.table("A.out.txt",
stringsAsFactors=FALSE,
header=TRUE)
#families <- c("F1", "F1", "F1", "F1", "F1", "F1",
# "F2", "F2", "F2", "F2", "F2", "F2")
#h339_WT h341_TOF h342_TOF h343_TOF h344_SRV h345_SRV h347_SLV h348_SLV h349_SLV h384_WT h385_WT TK413_WT TK416_WT TK418_TOF TK420_TOF
phenotypes <- c("CTR", "HD", "HD", "HD", "HD","HD","HD","HD","HD","CTR","CTR","CTR","CTR","HD","HD")
Group=phenotypes
#colData <- as.data.frame(cbind(families=families,
# phenotypes=phenotypes))
colData <- as.data.frame(cbind(phenotypes=phenotypes))
rownames(reads.cnt.tbl) <- reads.cnt.tbl[ ,1]
reads.cnt.tbl <- reads.cnt.tbl[ , -1]
maxCounts=apply(reads.cnt.tbl,1,max)
reads.cnt.tbl=reads.cnt.tbl[which(maxCounts>=2),]
rownames(colData) <- names(reads.cnt.tbl)
print(colData)
cds <- DESeqDataSetFromMatrix(countData = reads.cnt.tbl,
colData = colData,
design = ~ phenotypes+1)
cds <- estimateSizeFactors(cds)
cds <- estimateDispersions(cds)
vsd = varianceStabilizingTransformation(cds)
vsd.exp <- assay(vsd)
write.table(vsd.exp, file="vsd_exp1.txt", sep="\t", quote=FALSE)
cds <- DESeq(cds)
print(resultsNames(cds))
pca <- prcomp(t(vsd.exp))
write.table(cbind(pca$x,Group), file="pca1.txt", sep="\t", quote=FALSE)
percentVar <- pca$sdev^2 / sum( pca$sdev^2 )
write.table(percentVar, file="percentVar1.txt", sep="\t", quote=FALSE)
|
/PCA.R
|
no_license
|
Lei-Tian/CHD
|
R
| false | false | 1,595 |
r
|
#! /usr/bin/env Rscript
library(DESeq2)
library(genefilter)
library(ggplot2)
tag <- "all"
res.path <- "./"
setwd(res.path)
reads.cnt.tbl <- read.table("A.out.txt",
stringsAsFactors=FALSE,
header=TRUE)
#families <- c("F1", "F1", "F1", "F1", "F1", "F1",
# "F2", "F2", "F2", "F2", "F2", "F2")
#h339_WT h341_TOF h342_TOF h343_TOF h344_SRV h345_SRV h347_SLV h348_SLV h349_SLV h384_WT h385_WT TK413_WT TK416_WT TK418_TOF TK420_TOF
phenotypes <- c("CTR", "HD", "HD", "HD", "HD","HD","HD","HD","HD","CTR","CTR","CTR","CTR","HD","HD")
Group=phenotypes
#colData <- as.data.frame(cbind(families=families,
# phenotypes=phenotypes))
colData <- as.data.frame(cbind(phenotypes=phenotypes))
rownames(reads.cnt.tbl) <- reads.cnt.tbl[ ,1]
reads.cnt.tbl <- reads.cnt.tbl[ , -1]
maxCounts=apply(reads.cnt.tbl,1,max)
reads.cnt.tbl=reads.cnt.tbl[which(maxCounts>=2),]
rownames(colData) <- names(reads.cnt.tbl)
print(colData)
cds <- DESeqDataSetFromMatrix(countData = reads.cnt.tbl,
colData = colData,
design = ~ phenotypes+1)
cds <- estimateSizeFactors(cds)
cds <- estimateDispersions(cds)
vsd = varianceStabilizingTransformation(cds)
vsd.exp <- assay(vsd)
write.table(vsd.exp, file="vsd_exp1.txt", sep="\t", quote=FALSE)
cds <- DESeq(cds)
print(resultsNames(cds))
pca <- prcomp(t(vsd.exp))
write.table(cbind(pca$x,Group), file="pca1.txt", sep="\t", quote=FALSE)
percentVar <- pca$sdev^2 / sum( pca$sdev^2 )
write.table(percentVar, file="percentVar1.txt", sep="\t", quote=FALSE)
|
# 7-1
mpg <- as.data.frame(ggplot2::mpg)
mpg <- mpg %>% mutate(effy = cty + hwy)
# 7-2
mpg <- mpg %>% mutate(avg_effy = mpg$effy/2)
# 7-3
mpg %>% arrange(desc(avg_effy)) %>% head(3)
# 7-4
mpg <- as.data.frame(ggplot2::mpg)
mpg %>% arrange(desc((mpg$cty + mpg$hwy)/2)) %>% head(3)
# 8-1
mpg <- as.data.frame(ggplot2::mpg)
mpg %>% group_by(class) %>% summarise(class_avg_cty = mean(cty))
# 8-2
mpg2 <- mpg %>% group_by(class) %>% summarise(class_avg_cty = mean(cty))
mpg2 %>% arrange(desc(class_avg_cty))
# 8-3
mpg_manu <- mpg %>% group_by(manufacturer) %>% summarise(class_avg_cty = mean(cty))
mpg_manu %>% arrange(desc(class_avg_cty)) %>% head(3)
# 8-4
manu <- mpg %>%
group_by(manufacturer) %>%
filter(class == "compact") %>%
summarise(num = length(class))
manu %>% arrange(desc(num))
# 9
fuel <- data.frame(fl = c("c","d","e","p","r"),
price_fl = c(2.35, 2.38, 2.11, 2.76, 2.22),
stringsAsFactors = F)
fuel
# 9-1
mpg <- left_join(mpg, fuel, by = "fl")
# 9-2
mpg %>% select(model, fl, price_fl) %>% head(5)
# 10
midwest <- as.data.frame(ggplot2::midwest)
# 10-1
midwest <- midwest %>% mutate(pop_u18_ratio = (poptotal - popadults)/poptotal*100)
# 10-2
midwest %>% select(county, pop_u18_ratio) %>% arrange(desc(pop_u18_ratio)) %>% head(5)
# 10-3
midwest <- midwest %>%
mutate(LMS = ifelse(pop_u18_ratio < 30, "small",
ifelse(pop_u18_ratio < 40 & pop_u18_ratio >= 30, "middle",
"large")
)
)
midwest %>% group_by(LMS) %>% summarise(size = length(LMS))
# 10-4
midwest <- midwest %>% mutate(asian_ratio = popasian/poptotal*100)
midwest %>% arrange(asian_ratio) %>% select(state, county, asian_ratio) %>% head(10)
# 11
mpg <- as.data.frame(ggplot2::mpg)
mpg[c(65, 124, 131, 153, 212), "hwy"] <- NA
# 11-1
table(is.na(mpg$drv))
table(is.na(mpg$hwy))
# 11-2
mpg %>%
filter(is.na(hwy) != 1) %>%
group_by(drv) %>%
summarise(avg_hwy = mean(hwy)) %>%
arrange(desc(avg_hwy))
# 12
mpg <- mpg <- as.data.frame(ggplot2::mpg)
mpg[c(10, 14, 58, 93), "drv"] <- "k"
mpg[c(29, 43, 129, 203), "cty"] <- c(3, 4, 39, 42)
# 12-1
table(mpg$drv)
mpg$drv <- ifelse(!mpg$drv %in% c(4, "f", "r"), NA, mpg$drv)
# 12-2
boxplot(mpg$cty)
boxplot(mpg$cty)$stats
mpg$cty <- ifelse(mpg$cty <= 9 | mpg$cty >= 26, NA, mpg$cty)
boxplot(mpg$cty)
# 12-3
mpg %>%
filter(is.na(drv) != 1 & is.na(cty) != 1) %>%
group_by(drv) %>%
summarise(avg_cty = mean(cty))
|
/R_training/실습제출/김재현/2019-11-05/dplyr_lab3.R
|
no_license
|
BaeYS-marketing/R
|
R
| false | false | 2,494 |
r
|
# 7-1
mpg <- as.data.frame(ggplot2::mpg)
mpg <- mpg %>% mutate(effy = cty + hwy)
# 7-2
mpg <- mpg %>% mutate(avg_effy = mpg$effy/2)
# 7-3
mpg %>% arrange(desc(avg_effy)) %>% head(3)
# 7-4
mpg <- as.data.frame(ggplot2::mpg)
mpg %>% arrange(desc((mpg$cty + mpg$hwy)/2)) %>% head(3)
# 8-1
mpg <- as.data.frame(ggplot2::mpg)
mpg %>% group_by(class) %>% summarise(class_avg_cty = mean(cty))
# 8-2
mpg2 <- mpg %>% group_by(class) %>% summarise(class_avg_cty = mean(cty))
mpg2 %>% arrange(desc(class_avg_cty))
# 8-3
mpg_manu <- mpg %>% group_by(manufacturer) %>% summarise(class_avg_cty = mean(cty))
mpg_manu %>% arrange(desc(class_avg_cty)) %>% head(3)
# 8-4
manu <- mpg %>%
group_by(manufacturer) %>%
filter(class == "compact") %>%
summarise(num = length(class))
manu %>% arrange(desc(num))
# 9
fuel <- data.frame(fl = c("c","d","e","p","r"),
price_fl = c(2.35, 2.38, 2.11, 2.76, 2.22),
stringsAsFactors = F)
fuel
# 9-1
mpg <- left_join(mpg, fuel, by = "fl")
# 9-2
mpg %>% select(model, fl, price_fl) %>% head(5)
# 10
midwest <- as.data.frame(ggplot2::midwest)
# 10-1
midwest <- midwest %>% mutate(pop_u18_ratio = (poptotal - popadults)/poptotal*100)
# 10-2
midwest %>% select(county, pop_u18_ratio) %>% arrange(desc(pop_u18_ratio)) %>% head(5)
# 10-3
midwest <- midwest %>%
mutate(LMS = ifelse(pop_u18_ratio < 30, "small",
ifelse(pop_u18_ratio < 40 & pop_u18_ratio >= 30, "middle",
"large")
)
)
midwest %>% group_by(LMS) %>% summarise(size = length(LMS))
# 10-4
midwest <- midwest %>% mutate(asian_ratio = popasian/poptotal*100)
midwest %>% arrange(asian_ratio) %>% select(state, county, asian_ratio) %>% head(10)
# 11
mpg <- as.data.frame(ggplot2::mpg)
mpg[c(65, 124, 131, 153, 212), "hwy"] <- NA
# 11-1
table(is.na(mpg$drv))
table(is.na(mpg$hwy))
# 11-2
mpg %>%
filter(is.na(hwy) != 1) %>%
group_by(drv) %>%
summarise(avg_hwy = mean(hwy)) %>%
arrange(desc(avg_hwy))
# 12
mpg <- mpg <- as.data.frame(ggplot2::mpg)
mpg[c(10, 14, 58, 93), "drv"] <- "k"
mpg[c(29, 43, 129, 203), "cty"] <- c(3, 4, 39, 42)
# 12-1
table(mpg$drv)
mpg$drv <- ifelse(!mpg$drv %in% c(4, "f", "r"), NA, mpg$drv)
# 12-2
boxplot(mpg$cty)
boxplot(mpg$cty)$stats
mpg$cty <- ifelse(mpg$cty <= 9 | mpg$cty >= 26, NA, mpg$cty)
boxplot(mpg$cty)
# 12-3
mpg %>%
filter(is.na(drv) != 1 & is.na(cty) != 1) %>%
group_by(drv) %>%
summarise(avg_cty = mean(cty))
|
\name{FitHReg}
\alias{FitHReg}
\title{ Fits Three Parameter Harmonic Regression }
\description{
Estimates A, B and f in the harmonic regression,
y(t)=mu+A*cos(2*pi*f*t)+B*sin(2*pi*f*t)+e(t)
using LS.
}
\usage{FitHReg(y, t = 1:length(y), nf=150)}
\arguments{
\item{y}{ series }
\item{t}{ time points }
\item{nf}{ nf, number of frequencies to enumerate }
}
\details{
Program is interfaced to C for efficient computation.
}
\value{
Object of class "HReg" produced.
This is a list with components:
'coefficients', 'residuals', 'Rsq', 'fstatistic', 'sigma', 'freq', 'LRStat'
corresponding to the 3 regression coefficients, residuals, R-squared,
F-statistic, residual sd, optimal frequency and LR-test statistic for
null hypothesis white noise.
}
\references{
Islam, M.S. (2008).
Peridocity, Change Detection and Prediction in Microarrays.
Ph.D. Thesis, The University of Western Ontario.
}
\seealso{ \code{\link{GetFitHReg}} }
\examples{
z<-SimulateHReg(10, f=2.5/10, 1, 2)
FitHReg(z)
}
\keyword{ ts }
|
/man/FitHReg.Rd
|
no_license
|
cran/pRSR
|
R
| false | false | 1,063 |
rd
|
\name{FitHReg}
\alias{FitHReg}
\title{ Fits Three Parameter Harmonic Regression }
\description{
Estimates A, B and f in the harmonic regression,
y(t)=mu+A*cos(2*pi*f*t)+B*sin(2*pi*f*t)+e(t)
using LS.
}
\usage{FitHReg(y, t = 1:length(y), nf=150)}
\arguments{
\item{y}{ series }
\item{t}{ time points }
\item{nf}{ nf, number of frequencies to enumerate }
}
\details{
Program is interfaced to C for efficient computation.
}
\value{
Object of class "HReg" produced.
This is a list with components:
'coefficients', 'residuals', 'Rsq', 'fstatistic', 'sigma', 'freq', 'LRStat'
corresponding to the 3 regression coefficients, residuals, R-squared,
F-statistic, residual sd, optimal frequency and LR-test statistic for
null hypothesis white noise.
}
\references{
Islam, M.S. (2008).
Peridocity, Change Detection and Prediction in Microarrays.
Ph.D. Thesis, The University of Western Ontario.
}
\seealso{ \code{\link{GetFitHReg}} }
\examples{
z<-SimulateHReg(10, f=2.5/10, 1, 2)
FitHReg(z)
}
\keyword{ ts }
|
TEXT <- scan(file="howtostartastartup.txt",what="char",quote=NULL)
cat(TEXT, file="howto_vector_ver1.0.txt",sep="\n")
TableTEXT <- table(TEXT)
head(TableTEXT)
SortedTableTEXT <- sort(TableTEXT, decreasing = T)
head(SortedTableTEXT)
head(names(SortedTableTEXT))
str(names(SortedTableTEXT))
NamesSorted <- names(SortedTableTEXT)
str(NamesSorted)
head(unname(SortedTableTEXT))
#TEXTDF <- data.frame(row.names=NamesSorted,unname(SortedTableTEXT))
TEXTDF <- data.frame(SortedTableTEXT)
TEXTDF <- data.frame(row.names=TEXTDF$TEXT, Freq=TEXTDF$Freq, Rel.Freq=TEXTDF$Freq/sum(TEXTDF$Freq))
head(TEXTDF)
head(TEXTDF[order(rownames(TEXTDF),decreasing=T),])
write.table(TEXTDF, file="howtostartastartup_analyzed_ver1.1.txt", sep="\t", quote=F, col.names = NA)
Data <- read.delim(file="howtostartastartup_analyzed_ver1.1.txt", sep="\t", header=T, row.names=1, quote=NULL)
head(Data)
|
/20171016_practice.R
|
no_license
|
hyun-park/__R__SogangRClass
|
R
| false | false | 871 |
r
|
TEXT <- scan(file="howtostartastartup.txt",what="char",quote=NULL)
cat(TEXT, file="howto_vector_ver1.0.txt",sep="\n")
TableTEXT <- table(TEXT)
head(TableTEXT)
SortedTableTEXT <- sort(TableTEXT, decreasing = T)
head(SortedTableTEXT)
head(names(SortedTableTEXT))
str(names(SortedTableTEXT))
NamesSorted <- names(SortedTableTEXT)
str(NamesSorted)
head(unname(SortedTableTEXT))
#TEXTDF <- data.frame(row.names=NamesSorted,unname(SortedTableTEXT))
TEXTDF <- data.frame(SortedTableTEXT)
TEXTDF <- data.frame(row.names=TEXTDF$TEXT, Freq=TEXTDF$Freq, Rel.Freq=TEXTDF$Freq/sum(TEXTDF$Freq))
head(TEXTDF)
head(TEXTDF[order(rownames(TEXTDF),decreasing=T),])
write.table(TEXTDF, file="howtostartastartup_analyzed_ver1.1.txt", sep="\t", quote=F, col.names = NA)
Data <- read.delim(file="howtostartastartup_analyzed_ver1.1.txt", sep="\t", header=T, row.names=1, quote=NULL)
head(Data)
|
library(data.table)
rm(list=ls())
subm_01 <- fread("~/Dropbox/fish/sub/subm_full_resnet_cut0.7_20170316.csv")
subm_001 <- fread("~/Dropbox/fish/sub/subm_part_resnet_annos_20170317.csv")
subm_02 <- fread("~/Dropbox/fish/sub/avg_2_best_50_50_morebags_0303.csv")
setnames(subm_01, "image_file", "image")
setnames(subm_001, "image_file", "image")
subm_01 = subm_01[,colnames(subm_02), with=F]
subm_001 = subm_001[,colnames(subm_02), with=F]
subm_02 = subm_02[order(image)]
subm_001 = subm_001[order(image)]
subm_02
subm_001
cols = names(subm_02)[-1]
for (var in cols) subm_02[[var]] = (subm_02[[var]]*.7) + (subm_001[[var]]*.3)
subm_02B = subm_02[image %in% subm_01$image]
subm_02A = subm_02[!image %in% subm_01$image]
subm_02B = subm_02B[order(image)]
subm_01
subm_02B
#subm_03
subm_04 = subm_02B
cols = names(subm_02B)[-1]
for (var in cols) subm_04[[var]] = (subm_01[[var]]*.7) + (subm_02B[[var]]*.3)
subm_04 = data.frame(subm_04)
subm_04$NoF = .000999
subm_04[,2:9] = subm_04[,2:9]/rowSums(data.frame(subm_04[,2:9]))
subm_05 = data.table(rbind(subm_04, subm_02A))
# # Boost the "LAG" scores.
# subm_05[LAG>0.2][["LAG"]] = 1.5
# subm_05 = data.frame(subm_05)
# subm_05[,2:9] = subm_05[,2:9]/rowSums(data.frame(subm_05[,2:9]))
# subm_05 = data.table(subm_05)
setwd("~/Dropbox/fish/sub")
write.csv(subm_05, "avg_(resnet_cut_yolo_over_0.7_full)_70_30_((avg_2_best_50_50_morebags_0303)_(resnet_annossmall))_1703.csv", row.names = F)
# 0.625
# # Use only resnet
# subm_04 = subm_02B
# cols = names(subm_02B)[-1]
# for (var in cols) subm_04[[var]] = (subm_01[[var]]*.99) + (subm_02B[[var]]*.01)
# subm_04 = data.frame(subm_04)
# subm_04$NoF = .000999
# subm_04[,2:9] = subm_04[,2:9]/rowSums(data.frame(subm_04[,2:9]))
# subm_05 = data.table(rbind(subm_04, subm_02A))
#
# # # Boost the "LAG" scores.
# # subm_05[LAG>0.2][["LAG"]] = 1.5
# # subm_05 = data.frame(subm_05)
# # subm_05[,2:9] = subm_05[,2:9]/rowSums(data.frame(subm_05[,2:9]))
# # subm_05 = data.table(subm_05)
#
# setwd("~/Dropbox/fish/sub")
# write.csv(subm_05, "avg_(resnet_cut_yolo_over_0.8)_0.99_0.01_(avg_2_best_50_50_morebags_0303)_1503.csv", row.names = F)
# # 0.68344
|
/blend/avg_subs_1703.R
|
no_license
|
rahasayantan/fish
|
R
| false | false | 2,149 |
r
|
library(data.table)
rm(list=ls())
subm_01 <- fread("~/Dropbox/fish/sub/subm_full_resnet_cut0.7_20170316.csv")
subm_001 <- fread("~/Dropbox/fish/sub/subm_part_resnet_annos_20170317.csv")
subm_02 <- fread("~/Dropbox/fish/sub/avg_2_best_50_50_morebags_0303.csv")
setnames(subm_01, "image_file", "image")
setnames(subm_001, "image_file", "image")
subm_01 = subm_01[,colnames(subm_02), with=F]
subm_001 = subm_001[,colnames(subm_02), with=F]
subm_02 = subm_02[order(image)]
subm_001 = subm_001[order(image)]
subm_02
subm_001
cols = names(subm_02)[-1]
for (var in cols) subm_02[[var]] = (subm_02[[var]]*.7) + (subm_001[[var]]*.3)
subm_02B = subm_02[image %in% subm_01$image]
subm_02A = subm_02[!image %in% subm_01$image]
subm_02B = subm_02B[order(image)]
subm_01
subm_02B
#subm_03
subm_04 = subm_02B
cols = names(subm_02B)[-1]
for (var in cols) subm_04[[var]] = (subm_01[[var]]*.7) + (subm_02B[[var]]*.3)
subm_04 = data.frame(subm_04)
subm_04$NoF = .000999
subm_04[,2:9] = subm_04[,2:9]/rowSums(data.frame(subm_04[,2:9]))
subm_05 = data.table(rbind(subm_04, subm_02A))
# # Boost the "LAG" scores.
# subm_05[LAG>0.2][["LAG"]] = 1.5
# subm_05 = data.frame(subm_05)
# subm_05[,2:9] = subm_05[,2:9]/rowSums(data.frame(subm_05[,2:9]))
# subm_05 = data.table(subm_05)
setwd("~/Dropbox/fish/sub")
write.csv(subm_05, "avg_(resnet_cut_yolo_over_0.7_full)_70_30_((avg_2_best_50_50_morebags_0303)_(resnet_annossmall))_1703.csv", row.names = F)
# 0.625
# # Use only resnet
# subm_04 = subm_02B
# cols = names(subm_02B)[-1]
# for (var in cols) subm_04[[var]] = (subm_01[[var]]*.99) + (subm_02B[[var]]*.01)
# subm_04 = data.frame(subm_04)
# subm_04$NoF = .000999
# subm_04[,2:9] = subm_04[,2:9]/rowSums(data.frame(subm_04[,2:9]))
# subm_05 = data.table(rbind(subm_04, subm_02A))
#
# # # Boost the "LAG" scores.
# # subm_05[LAG>0.2][["LAG"]] = 1.5
# # subm_05 = data.frame(subm_05)
# # subm_05[,2:9] = subm_05[,2:9]/rowSums(data.frame(subm_05[,2:9]))
# # subm_05 = data.table(subm_05)
#
# setwd("~/Dropbox/fish/sub")
# write.csv(subm_05, "avg_(resnet_cut_yolo_over_0.8)_0.99_0.01_(avg_2_best_50_50_morebags_0303)_1503.csv", row.names = F)
# # 0.68344
|
#' @description Add meta.data about CNAs to a Seurat object from an infercnv_obj
#'
#' @title add_to_seurat()
#'
#' @param seurat_obj Seurat object to add meta.data to (default: NULL)
#'
#' @param infercnv_output_path Path to the output folder of the infercnv run to use
#'
#' @param top_n How many of the largest CNA (in number of genes) to get.
#'
#' @param bp_tolerance How many bp of tolerance to have around feature start/end positions for top_n largest CNVs.
#'
#' @return seurat_obj
#'
#' @export
#'
add_to_seurat <- function(seurat_obj = NULL,
infercnv_output_path,
top_n = 10,
bp_tolerance = 2000000) {
lfiles <- list.files(infercnv_output_path, full.names = FALSE)
if (!file.exists(paste(infercnv_output_path, "run.final.infercnv_obj", sep=.Platform$file.sep))) {
flog.warn(sprintf("::Could not find \"run.final.infercnv_obj\" file at: %s"), paste(infercnv_output_path, "run.final.infercnv_obj", sep=.Platform$file.sep))
stop()
}
infercnv_obj = readRDS(paste(infercnv_output_path, "run.final.infercnv_obj", sep=.Platform$file.sep))
if (is.null(seurat_obj)) {
flog.info("No Seurat object provided, will only write metadata matrix.")
}
else if(!(setequal(row.names(seurat_obj@meta.data), colnames(infercnv_obj@expr.data)) ||
setequal(colnames(seurat_obj@assays$RNA), colnames(infercnv_obj@expr.data)))) {
flog.warn("::Cell names in Seurat object and infercnv results do not match")
stop()
}
## add check that data row/col names match seurat obj
if (any(grep(lfiles, pattern="HMM_CNV_predictions.HMM.*.Pnorm_0.[0-9]+"))) {
###### states are 0/0.5/1/1.5/2
scaling_factor = 1
if (any(grep(lfiles, pattern="HMM_CNV_predictions.HMMi6.*.Pnorm_0.[0-9]+"))) {
center_state = 1
}
else if (any(grep(lfiles, pattern="HMM_CNV_predictions.HMMi3.*.Pnorm_0.[0-9]+"))) {
center_state = 1
}
else {
flog.warn("::Found filtered HMM predictions output, but they do not match any known model type.")
stop()
}
# sort to take lowest BayesProb if there are multiple
regions = read.table(paste(infercnv_output_path, sort(lfiles[grep(lfiles, pattern="HMM_CNV_predictions.HMMi6.*.Pnorm_0.[0-9]+.pred_cnv_regions.dat")])[1], sep=.Platform$file.sep), sep="\t", header=TRUE, check.names=FALSE)
hmm_genes = read.table(paste(infercnv_output_path, sort(lfiles[grep(lfiles, pattern="HMM_CNV_predictions.HMMi6.*.Pnorm_0.[0-9]+.pred_cnv_genes.dat")])[1], sep=.Platform$file.sep), sep="\t", header=TRUE, check.names=FALSE)
# from_pbayes()
}
else if (any(grep(lfiles, pattern = "17_HMM_preds"))) {
###### states are 1/2/3/4/5/6
scaling_factor = 2
if (any(grep(lfiles, pattern = "17_HMM_predHMMi6"))) {
center_state = 3
}
else if (any(grep(lfiles, pattern = "17_HMM_predHMMi3"))) {
center_state = 2
}
else {
flog.warn("::Found HMM predictions output, but they do not match any known model type")
stop()
}
regions = read.table(paste(infercnv_output_path, "17_HMM_preds.pred_cnv_regions.dat", sep=.Platform$file.sep), sep="\t", header=TRUE, check.names=FALSE)
hmm_genes = read.table(paste(infercnv_output_path, "17_HMM_preds.pred_cnv_genes.dat", sep=.Platform$file.sep), sep="\t", header=TRUE, check.names=FALSE)
# from_hmm()
}
else {
flog.warn(sprintf("::Could not find any HMM predictions outputs at: %s", infercnv_output_path))
stop()
}
features_to_add <- .get_features(infercnv_obj = infercnv_obj,
regions = regions,
hmm_genes = hmm_genes,
center_state = center_state,
scaling_factor = scaling_factor,
top_n = top_n,
bp_tolerance = bp_tolerance)
if (!is.null(seurat_obj)) {
for (lv in levels(infercnv_obj@gene_order$chr)) {
seurat_obj@meta.data[[paste0("has_cnv_", lv)]] = features_to_add$feature_vector_chrs_has_cnv[[lv]]
seurat_obj@meta.data[[paste0("has_loss_", lv)]] = features_to_add$feature_vector_chrs_has_loss[[lv]]
seurat_obj@meta.data[[paste0("has_dupli_", lv)]] = features_to_add$feature_vector_chrs_has_dupli[[lv]]
seurat_obj@meta.data[[paste0("proportion_cnv_", lv)]] = features_to_add$feature_vector_chrs_gene_cnv_proportion[[lv]]
seurat_obj@meta.data[[paste0("proportion_loss_", lv)]] = features_to_add$feature_vector_chrs_gene_loss_proportion[[lv]]
seurat_obj@meta.data[[paste0("proportion_dupli_", lv)]] = features_to_add$feature_vector_chrs_gene_dupli_proportion[[lv]]
seurat_obj@meta.data[[paste0("proportion_scaled_cnv_", lv)]] = features_to_add$feature_vector_chrs_gene_cnv_proportion_scaled[[lv]]
seurat_obj@meta.data[[paste0("proportion_scaled_loss_", lv)]] = features_to_add$feature_vector_chrs_gene_loss_proportion_scaled[[lv]]
seurat_obj@meta.data[[paste0("proportion_scaled_dupli_", lv)]] = features_to_add$feature_vector_chrs_gene_dupli_proportion_scaled[[lv]]
}
for (n in names(features_to_add)[grep(names(features_to_add), pattern = "top_")] ) {
seurat_obj@meta.data[[n]] = features_to_add[[n]]
}
}
out_mat = matrix(NA, ncol=((9 * length(levels(infercnv_obj@gene_order$chr))) + length(features_to_add) - 9), nrow=ncol(infercnv_obj@expr.data))
out_mat_feature_names = vector("character", ((9 * length(levels(infercnv_obj@gene_order$chr))) + length(features_to_add) - 9))
i = 1
for (lv in levels(infercnv_obj@gene_order$chr)) {
out_mat[, i] = features_to_add$feature_vector_chrs_has_cnv[[lv]]
out_mat[, i+1] = features_to_add$feature_vector_chrs_has_loss[[lv]]
out_mat[, i+2] = features_to_add$feature_vector_chrs_has_dupli[[lv]]
out_mat[, i+3] = features_to_add$feature_vector_chrs_gene_cnv_proportion[[lv]]
out_mat[, i+4] = features_to_add$feature_vector_chrs_gene_loss_proportion[[lv]]
out_mat[, i+5] = features_to_add$feature_vector_chrs_gene_dupli_proportion[[lv]]
out_mat[, i+6] = features_to_add$feature_vector_chrs_gene_cnv_proportion_scaled[[lv]]
out_mat[, i+7] = features_to_add$feature_vector_chrs_gene_loss_proportion_scaled[[lv]]
out_mat[, i+8] = features_to_add$feature_vector_chrs_gene_dupli_proportion_scaled[[lv]]
out_mat_feature_names[i:(i+8)] = c(paste0("has_cnv_", lv), paste0("has_loss_", lv), paste0("has_dupli_", lv), paste0("proportion_cnv_", lv), paste0("proportion_loss_", lv), paste0("proportion_dupli_", lv), paste0("proportion_scaled_cnv_", lv), paste0("proportion_scaled_loss_", lv), paste0("proportion_scaled_dupli_", lv))
i = i + 9
}
for (n in names(features_to_add)[grep(names(features_to_add), pattern = "top_")] ) {
out_mat[, i] = features_to_add[[n]]
out_mat_feature_names[i] = n
i = i + 1
}
colnames(out_mat) = out_mat_feature_names
row.names(out_mat) = colnames(infercnv_obj@expr.data)
write.table(out_mat, paste(infercnv_output_path, "map_metadata_from_infercnv.txt", sep=.Platform$file.sep) , quote=FALSE, sep="\t")
return(seurat_obj)
}
#' @title .get_features()
#'
#' @description Get data from infercnv objects to add to Seurat meta.data
#'
#' @param infercnv_obj infercnv hmm object
#'
#' @param regions Table with predicted CNAs regions from the HMM model
#'
#' @param hmm_genes Table with predicted CNAs genes from the HMM model
#'
#' @param center_state Value that represents the neutral state in the HMM results.
#'
#' @param scaling_factor Factor to multiply divergence from center_state to get CNA amplitude.
#'
#' @param top_n How many of the largest CNA (in number of genes) to get.
#'
#' @return all_features A list of all the calculated meta.data to add.
#'
#' @keywords internal
#' @noRd
#'
.get_features <- function(infercnv_obj, regions, hmm_genes, center_state, scaling_factor, top_n, bp_tolerance) {
chr_gene_count = table(infercnv_obj@gene_order$chr)
# features templates for initialization
double_feature_vector = vector(mode="double", length=ncol(infercnv_obj@expr.data))
names(double_feature_vector) = colnames(infercnv_obj@expr.data)
logical_feature_vector = vector(mode="logical", length=ncol(infercnv_obj@expr.data))
names(logical_feature_vector) = colnames(infercnv_obj@expr.data)
# initialize features lists
all_features = c()
all_features$feature_vector_chrs_has_cnv = c()
all_features$feature_vector_chrs_has_dupli = c()
all_features$feature_vector_chrs_has_loss = c()
all_features$feature_vector_chrs_gene_cnv_proportion = c()
all_features$feature_vector_chrs_gene_dupli_proportion = c()
all_features$feature_vector_chrs_gene_loss_proportion = c()
all_features$feature_vector_chrs_gene_cnv_proportion_scaled = c()
all_features$feature_vector_chrs_gene_dupli_proportion_scaled = c()
all_features$feature_vector_chrs_gene_loss_proportion_scaled = c()
for (lv in levels(infercnv_obj@gene_order$chr)) {
all_features$feature_vector_chrs_has_cnv[[lv]] = logical_feature_vector
all_features$feature_vector_chrs_has_dupli[[lv]] = logical_feature_vector
all_features$feature_vector_chrs_has_loss[[lv]] = logical_feature_vector
all_features$feature_vector_chrs_gene_cnv_proportion[[lv]] = double_feature_vector
all_features$feature_vector_chrs_gene_dupli_proportion[[lv]] = double_feature_vector
all_features$feature_vector_chrs_gene_loss_proportion[[lv]] = double_feature_vector
all_features$feature_vector_chrs_gene_cnv_proportion_scaled[[lv]] = double_feature_vector
all_features$feature_vector_chrs_gene_dupli_proportion_scaled[[lv]] = double_feature_vector
all_features$feature_vector_chrs_gene_loss_proportion_scaled[[lv]] = double_feature_vector
}
# map for top_n mapping
subclust_name_to_clust = list()
for (clust in names(infercnv_obj@tumor_subclusters$subclusters)) {
for (subclust in names(infercnv_obj@tumor_subclusters$subclusters[[clust]])) {
subclust_name = paste(clust, subclust, sep=".")
subclust_name_to_clust[[subclust_name]] = c(clust, subclust)
res = regions[regions$cell_group_name == subclust_name, , drop=FALSE]
gres = hmm_genes[hmm_genes$cell_group_name == subclust_name, , drop=FALSE]
if (nrow(res) > 0) {
for (c in unique(res$chr)) {
all_features$feature_vector_chrs_has_cnv[[c]][names(infercnv_obj@tumor_subclusters$subclusters[[clust]][[subclust]])] = TRUE
all_features$feature_vector_chrs_gene_cnv_proportion[[c]][names(infercnv_obj@tumor_subclusters$subclusters[[clust]][[subclust]])] = (length(which(gres$chr == c)) / chr_gene_count[[c]])
all_features$feature_vector_chrs_gene_cnv_proportion_scaled[[c]][names(infercnv_obj@tumor_subclusters$subclusters[[clust]][[subclust]])] = (sum(abs(gres[(which(gres$chr == c)), "state"] - center_state)) / (chr_gene_count[[c]] * scaling_factor))
}
sub_gres = gres[gres$state < center_state, ]
for (c in unique(sub_gres$chr)) {
all_features$feature_vector_chrs_has_loss[[c]][names(infercnv_obj@tumor_subclusters$subclusters[[clust]][[subclust]])] = TRUE
all_features$feature_vector_chrs_gene_loss_proportion[[c]][names(infercnv_obj@tumor_subclusters$subclusters[[clust]][[subclust]])] = (length(which(sub_gres$chr == c)) / chr_gene_count[[c]])
all_features$feature_vector_chrs_gene_loss_proportion_scaled[[c]][names(infercnv_obj@tumor_subclusters$subclusters[[clust]][[subclust]])] = (abs(sum(sub_gres[(which(sub_gres$chr == c)), "state"] - center_state)) / (chr_gene_count[[c]] * scaling_factor))
}
sub_gres = gres[gres$state > center_state, ]
for (c in unique(sub_gres$chr)) {
all_features$feature_vector_chrs_has_dupli[[c]][names(infercnv_obj@tumor_subclusters$subclusters[[clust]][[subclust]])] = TRUE
all_features$feature_vector_chrs_gene_dupli_proportion[[c]][names(infercnv_obj@tumor_subclusters$subclusters[[clust]][[subclust]])] = (length(which(sub_gres$chr == c)) / chr_gene_count[[c]])
all_features$feature_vector_chrs_gene_dupli_proportion_scaled[[c]][names(infercnv_obj@tumor_subclusters$subclusters[[clust]][[subclust]])] = (sum(sub_gres[(which(sub_gres$chr == c)), "state"] - center_state) / (chr_gene_count[[c]] * scaling_factor))
}
}
}
}
# sorted_regions = sort(table(hmm_genes$gene_region_name), decreasing=TRUE)
sorted_regions_loss = sort(table(hmm_genes$gene_region_name[hmm_genes$state < center_state]), decreasing=TRUE)
sorted_regions_dupli = sort(table(hmm_genes$gene_region_name[hmm_genes$state > center_state]), decreasing=TRUE)
# top_n_cnv = .get_top_n_regions(hmm_genes = hmm_genes, sorted_regions = sorted_regions, top_n = top_n, bp_tolerance = bp_tolerance)
top_n_loss = .get_top_n_regions(hmm_genes = hmm_genes, sorted_regions = sorted_regions_loss, top_n = top_n, bp_tolerance = bp_tolerance)
top_n_dupli = .get_top_n_regions(hmm_genes = hmm_genes, sorted_regions = sorted_regions_dupli, top_n = top_n, bp_tolerance = bp_tolerance)
# for (i in seq_along(top_n_cnv)) {
# feature_name = paste0("top_cnv_", i)
# all_features[[feature_name]] = logical_feature_vector
# for (subclust_name in top_n_cnv[[i]]$subclust_name) {
# clust = subclust_name_to_clust[[subclust_name]][1]
# subclust = subclust_name_to_clust[[subclust_name]][2]
# all_features[[feature_name]][names(infercnv_obj@tumor_subclusters$subclusters[[clust]][[subclust]])] = TRUE
# }
# }
for (i in seq_along(top_n_loss)) {
feature_name = paste0("top_loss_", i)
all_features[[feature_name]] = logical_feature_vector
for (subclust_name in top_n_loss[[i]]$subclust_name) {
clust = subclust_name_to_clust[[subclust_name]][1]
subclust = subclust_name_to_clust[[subclust_name]][2]
all_features[[feature_name]][names(infercnv_obj@tumor_subclusters$subclusters[[clust]][[subclust]])] = TRUE
}
}
for (i in seq_along(top_n_dupli)) {
feature_name = paste0("top_dupli_", i)
all_features[[feature_name]] = logical_feature_vector
for (subclust_name in top_n_dupli[[i]]$subclust_name) {
clust = subclust_name_to_clust[[subclust_name]][1]
subclust = subclust_name_to_clust[[subclust_name]][2]
all_features[[feature_name]][names(infercnv_obj@tumor_subclusters$subclusters[[clust]][[subclust]])] = TRUE
}
}
return(all_features)
}
#' @title .get_top_n_regions()
#'
#' @description Get top n largest CNA regions in number of genes
#'
#' @param hmm_genes Table with predicted CNAs genes from the HMM model
#'
#' @param sorted_region List of regions sorted by size in number of genes for the CNA type desired (gain/loss/both)
#'
#' @param top_n How many of the largest CNA (in number of genes) to get.
#'
#' @return all_features A list of all the calculated meta.data to add.
#'
#' @keywords internal
#' @noRd
#'
.get_top_n_regions <- function(hmm_genes, sorted_regions, top_n, bp_tolerance) {
j = 1
top_regions = vector("list", top_n)
used_regions = c()
# flog.debug("sorted regions are:")
# for(sr in names(sorted_regions)) {
# flog.debug(paste(sr, sorted_regions[sr]))
# }
for (i in seq_len(nrow(sorted_regions))) {
if (names(sorted_regions[i]) %in% used_regions) {
next
}
genes_in_region = hmm_genes[which(hmm_genes$gene_region_name %in% names(sorted_regions[i])), ]
region_chr = genes_in_region$chr[1]
region_start_low = min(genes_in_region$start)
region_start_high = region_start_low
region_end_low = max(genes_in_region$end)
region_end_high = region_end_low
to_ignore = which(hmm_genes$gene_region_name %in% used_regions)
if (length(to_ignore) > 0) {
same_chr = setdiff(which(hmm_genes$chr == region_chr), to_ignore)
}
else {
same_chr = which(hmm_genes$chr == region_chr)
}
initial_close = list()
repeat {
close_start = same_chr[which((hmm_genes$start[same_chr] <= region_start_high + bp_tolerance) & (hmm_genes$start[same_chr] >= region_start_low - bp_tolerance))]
close_end = same_chr[which((hmm_genes$end[same_chr] <= region_end_high + bp_tolerance) & (hmm_genes$end[same_chr] >= region_end_low - bp_tolerance))]
close_start_end = intersect(unique(hmm_genes$gene_region_name[close_start]), unique(hmm_genes$gene_region_name[close_end]))
if ((length(setdiff(close_start_end, initial_close)) == 0) && (length(setdiff(initial_close, close_start_end)) == 0)) {
break
}
else {
initial_close = close_start_end
starts = c()
ends = c()
for (regi in close_start_end) {
starts = c(starts, min(hmm_genes$start[which(hmm_genes$gene_region_name == regi)]))
ends = c(ends, max(hmm_genes$end[which(hmm_genes$gene_region_name == regi)]))
}
region_start_low = min(starts)
region_start_high = max(starts)
region_end_low = min(ends)
region_end_high = max(ends)
}
}
if (length(close_start_end) > 0) {
top_regions[[j]]$subclust_names = unique(hmm_genes$cell_group_name[which(hmm_genes$gene_region_name %in% close_start_end)])
top_regions[[j]]$regions_names = close_start_end
flog.debug(paste0("top cnv ", j, " is composed of subclusts: "))#, paste(close_start_end, sep=" ")))
flog.debug(paste(top_regions[[j]]$subclust_names, sep=" "))
flog.debug("and region names: ")
flog.debug(paste(top_regions[[j]]$regions_names, sep=" "))
}
else {
flog.error("Did not even find itself, error.")
stop()
}
used_regions = c(used_regions, close_start_end)
if (length(used_regions) != length(unique(used_regions))) {
flog.error("Used the same region twice")
stop()
}
if (j == top_n) {
break
}
j = j + 1
}
return(top_regions)
}
# .get_top_n_regions <- function(hmm_genes, sorted_regions, top_n, bp_tolerance) {
# j = 1
# previous_region_chr = -1
# previous_region_start = -1
# previous_region_end = -1
# top_regions = vector("list", top_n)
# for (i in seq_len(nrow(sorted_regions))) {
# genes_in_region = hmm_genes[which(hmm_genes$gene_region_name %in% names(sorted_regions[i])), ]
# region_chr = genes_in_region$chr[1]
# region_start = min(genes_in_region$start)
# region_end = max(genes_in_region$end)
# # check if the current region is the same as the previous one for a different subcluster or not
# # if it is, extend the previous assignment without increasing the count of found top hits
# if (region_chr == previous_region_chr && region_start <= previous_region_start + bp_tolerance && region_start >= previous_region_start - bp_tolerance && region_end <= previous_region_end + bp_tolerance && region_end >= previous_region_end - bp_tolerance) {
# top_regions[[j]]$subclust_names = c(top_regions[[j]]$subclust_names, genes_in_region$cell_group_name[1])
# top_regions[[j]]$regions_names = c(top_regions[[j]]$regions_names, genes_in_region$gene_region_name[1])
# }
# else {
# top_regions[[j]]$subclust_names = genes_in_region$cell_group_name[1]
# top_regions[[j]]$regions_names = genes_in_region$gene_region_name[1]
# previous_region_chr = region_chr
# previous_region_start = region_start
# previous_region_end = region_end
# j = j + 1
# }
# if (j == top_n + 1) {
# break
# }
# }
# if (j < top_n + 1) { # if less non unique regions than top_n
# top_regions = top_regions[1:j]
# }
# return(top_regions)
# }
##' @keywords internal
##' @noRd
##'
#make_seurat_from_infercnv_obj <- function(infercnv_obj) {
# return(CreateSeuratObject(counts = infercnv_obj@count.data, project="infercnv", min.cells = 3, min.features = 200))
#}
#
##' @keywords internal
##' @noRd
##'
#make_seurat_from_infercnv <- function(infercnv_output_path) {
# if (file.exists(paste(infercnv_output_path, "run.final.infercnv_obj", sep=.Platform$file.sep))) {
# return(make_seurat_from_infercnv_obj(readRDS(paste(infercnv_output_path, "run.final.infercnv_obj", sep=.Platform$file.sep))))
# }
# else {
# stop()
# }
#}
|
/R/seurat_interaction.R
|
no_license
|
xinhuang420/infercnv
|
R
| false | false | 21,647 |
r
|
#' @description Add meta.data about CNAs to a Seurat object from an infercnv_obj
#'
#' @title add_to_seurat()
#'
#' @param seurat_obj Seurat object to add meta.data to (default: NULL)
#'
#' @param infercnv_output_path Path to the output folder of the infercnv run to use
#'
#' @param top_n How many of the largest CNA (in number of genes) to get.
#'
#' @param bp_tolerance How many bp of tolerance to have around feature start/end positions for top_n largest CNVs.
#'
#' @return seurat_obj
#'
#' @export
#'
add_to_seurat <- function(seurat_obj = NULL,
infercnv_output_path,
top_n = 10,
bp_tolerance = 2000000) {
lfiles <- list.files(infercnv_output_path, full.names = FALSE)
if (!file.exists(paste(infercnv_output_path, "run.final.infercnv_obj", sep=.Platform$file.sep))) {
flog.warn(sprintf("::Could not find \"run.final.infercnv_obj\" file at: %s"), paste(infercnv_output_path, "run.final.infercnv_obj", sep=.Platform$file.sep))
stop()
}
infercnv_obj = readRDS(paste(infercnv_output_path, "run.final.infercnv_obj", sep=.Platform$file.sep))
if (is.null(seurat_obj)) {
flog.info("No Seurat object provided, will only write metadata matrix.")
}
else if(!(setequal(row.names(seurat_obj@meta.data), colnames(infercnv_obj@expr.data)) ||
setequal(colnames(seurat_obj@assays$RNA), colnames(infercnv_obj@expr.data)))) {
flog.warn("::Cell names in Seurat object and infercnv results do not match")
stop()
}
## add check that data row/col names match seurat obj
if (any(grep(lfiles, pattern="HMM_CNV_predictions.HMM.*.Pnorm_0.[0-9]+"))) {
###### states are 0/0.5/1/1.5/2
scaling_factor = 1
if (any(grep(lfiles, pattern="HMM_CNV_predictions.HMMi6.*.Pnorm_0.[0-9]+"))) {
center_state = 1
}
else if (any(grep(lfiles, pattern="HMM_CNV_predictions.HMMi3.*.Pnorm_0.[0-9]+"))) {
center_state = 1
}
else {
flog.warn("::Found filtered HMM predictions output, but they do not match any known model type.")
stop()
}
# sort to take lowest BayesProb if there are multiple
regions = read.table(paste(infercnv_output_path, sort(lfiles[grep(lfiles, pattern="HMM_CNV_predictions.HMMi6.*.Pnorm_0.[0-9]+.pred_cnv_regions.dat")])[1], sep=.Platform$file.sep), sep="\t", header=TRUE, check.names=FALSE)
hmm_genes = read.table(paste(infercnv_output_path, sort(lfiles[grep(lfiles, pattern="HMM_CNV_predictions.HMMi6.*.Pnorm_0.[0-9]+.pred_cnv_genes.dat")])[1], sep=.Platform$file.sep), sep="\t", header=TRUE, check.names=FALSE)
# from_pbayes()
}
else if (any(grep(lfiles, pattern = "17_HMM_preds"))) {
###### states are 1/2/3/4/5/6
scaling_factor = 2
if (any(grep(lfiles, pattern = "17_HMM_predHMMi6"))) {
center_state = 3
}
else if (any(grep(lfiles, pattern = "17_HMM_predHMMi3"))) {
center_state = 2
}
else {
flog.warn("::Found HMM predictions output, but they do not match any known model type")
stop()
}
regions = read.table(paste(infercnv_output_path, "17_HMM_preds.pred_cnv_regions.dat", sep=.Platform$file.sep), sep="\t", header=TRUE, check.names=FALSE)
hmm_genes = read.table(paste(infercnv_output_path, "17_HMM_preds.pred_cnv_genes.dat", sep=.Platform$file.sep), sep="\t", header=TRUE, check.names=FALSE)
# from_hmm()
}
else {
flog.warn(sprintf("::Could not find any HMM predictions outputs at: %s", infercnv_output_path))
stop()
}
features_to_add <- .get_features(infercnv_obj = infercnv_obj,
regions = regions,
hmm_genes = hmm_genes,
center_state = center_state,
scaling_factor = scaling_factor,
top_n = top_n,
bp_tolerance = bp_tolerance)
if (!is.null(seurat_obj)) {
for (lv in levels(infercnv_obj@gene_order$chr)) {
seurat_obj@meta.data[[paste0("has_cnv_", lv)]] = features_to_add$feature_vector_chrs_has_cnv[[lv]]
seurat_obj@meta.data[[paste0("has_loss_", lv)]] = features_to_add$feature_vector_chrs_has_loss[[lv]]
seurat_obj@meta.data[[paste0("has_dupli_", lv)]] = features_to_add$feature_vector_chrs_has_dupli[[lv]]
seurat_obj@meta.data[[paste0("proportion_cnv_", lv)]] = features_to_add$feature_vector_chrs_gene_cnv_proportion[[lv]]
seurat_obj@meta.data[[paste0("proportion_loss_", lv)]] = features_to_add$feature_vector_chrs_gene_loss_proportion[[lv]]
seurat_obj@meta.data[[paste0("proportion_dupli_", lv)]] = features_to_add$feature_vector_chrs_gene_dupli_proportion[[lv]]
seurat_obj@meta.data[[paste0("proportion_scaled_cnv_", lv)]] = features_to_add$feature_vector_chrs_gene_cnv_proportion_scaled[[lv]]
seurat_obj@meta.data[[paste0("proportion_scaled_loss_", lv)]] = features_to_add$feature_vector_chrs_gene_loss_proportion_scaled[[lv]]
seurat_obj@meta.data[[paste0("proportion_scaled_dupli_", lv)]] = features_to_add$feature_vector_chrs_gene_dupli_proportion_scaled[[lv]]
}
for (n in names(features_to_add)[grep(names(features_to_add), pattern = "top_")] ) {
seurat_obj@meta.data[[n]] = features_to_add[[n]]
}
}
out_mat = matrix(NA, ncol=((9 * length(levels(infercnv_obj@gene_order$chr))) + length(features_to_add) - 9), nrow=ncol(infercnv_obj@expr.data))
out_mat_feature_names = vector("character", ((9 * length(levels(infercnv_obj@gene_order$chr))) + length(features_to_add) - 9))
i = 1
for (lv in levels(infercnv_obj@gene_order$chr)) {
out_mat[, i] = features_to_add$feature_vector_chrs_has_cnv[[lv]]
out_mat[, i+1] = features_to_add$feature_vector_chrs_has_loss[[lv]]
out_mat[, i+2] = features_to_add$feature_vector_chrs_has_dupli[[lv]]
out_mat[, i+3] = features_to_add$feature_vector_chrs_gene_cnv_proportion[[lv]]
out_mat[, i+4] = features_to_add$feature_vector_chrs_gene_loss_proportion[[lv]]
out_mat[, i+5] = features_to_add$feature_vector_chrs_gene_dupli_proportion[[lv]]
out_mat[, i+6] = features_to_add$feature_vector_chrs_gene_cnv_proportion_scaled[[lv]]
out_mat[, i+7] = features_to_add$feature_vector_chrs_gene_loss_proportion_scaled[[lv]]
out_mat[, i+8] = features_to_add$feature_vector_chrs_gene_dupli_proportion_scaled[[lv]]
out_mat_feature_names[i:(i+8)] = c(paste0("has_cnv_", lv), paste0("has_loss_", lv), paste0("has_dupli_", lv), paste0("proportion_cnv_", lv), paste0("proportion_loss_", lv), paste0("proportion_dupli_", lv), paste0("proportion_scaled_cnv_", lv), paste0("proportion_scaled_loss_", lv), paste0("proportion_scaled_dupli_", lv))
i = i + 9
}
for (n in names(features_to_add)[grep(names(features_to_add), pattern = "top_")] ) {
out_mat[, i] = features_to_add[[n]]
out_mat_feature_names[i] = n
i = i + 1
}
colnames(out_mat) = out_mat_feature_names
row.names(out_mat) = colnames(infercnv_obj@expr.data)
write.table(out_mat, paste(infercnv_output_path, "map_metadata_from_infercnv.txt", sep=.Platform$file.sep) , quote=FALSE, sep="\t")
return(seurat_obj)
}
#' @title .get_features()
#'
#' @description Get data from infercnv objects to add to Seurat meta.data
#'
#' @param infercnv_obj infercnv hmm object
#'
#' @param regions Table with predicted CNAs regions from the HMM model
#'
#' @param hmm_genes Table with predicted CNAs genes from the HMM model
#'
#' @param center_state Value that represents the neutral state in the HMM results.
#'
#' @param scaling_factor Factor to multiply divergence from center_state to get CNA amplitude.
#'
#' @param top_n How many of the largest CNA (in number of genes) to get.
#'
#' @return all_features A list of all the calculated meta.data to add.
#'
#' @keywords internal
#' @noRd
#'
.get_features <- function(infercnv_obj, regions, hmm_genes, center_state, scaling_factor, top_n, bp_tolerance) {
chr_gene_count = table(infercnv_obj@gene_order$chr)
# features templates for initialization
double_feature_vector = vector(mode="double", length=ncol(infercnv_obj@expr.data))
names(double_feature_vector) = colnames(infercnv_obj@expr.data)
logical_feature_vector = vector(mode="logical", length=ncol(infercnv_obj@expr.data))
names(logical_feature_vector) = colnames(infercnv_obj@expr.data)
# initialize features lists
all_features = c()
all_features$feature_vector_chrs_has_cnv = c()
all_features$feature_vector_chrs_has_dupli = c()
all_features$feature_vector_chrs_has_loss = c()
all_features$feature_vector_chrs_gene_cnv_proportion = c()
all_features$feature_vector_chrs_gene_dupli_proportion = c()
all_features$feature_vector_chrs_gene_loss_proportion = c()
all_features$feature_vector_chrs_gene_cnv_proportion_scaled = c()
all_features$feature_vector_chrs_gene_dupli_proportion_scaled = c()
all_features$feature_vector_chrs_gene_loss_proportion_scaled = c()
for (lv in levels(infercnv_obj@gene_order$chr)) {
all_features$feature_vector_chrs_has_cnv[[lv]] = logical_feature_vector
all_features$feature_vector_chrs_has_dupli[[lv]] = logical_feature_vector
all_features$feature_vector_chrs_has_loss[[lv]] = logical_feature_vector
all_features$feature_vector_chrs_gene_cnv_proportion[[lv]] = double_feature_vector
all_features$feature_vector_chrs_gene_dupli_proportion[[lv]] = double_feature_vector
all_features$feature_vector_chrs_gene_loss_proportion[[lv]] = double_feature_vector
all_features$feature_vector_chrs_gene_cnv_proportion_scaled[[lv]] = double_feature_vector
all_features$feature_vector_chrs_gene_dupli_proportion_scaled[[lv]] = double_feature_vector
all_features$feature_vector_chrs_gene_loss_proportion_scaled[[lv]] = double_feature_vector
}
# map for top_n mapping
subclust_name_to_clust = list()
for (clust in names(infercnv_obj@tumor_subclusters$subclusters)) {
for (subclust in names(infercnv_obj@tumor_subclusters$subclusters[[clust]])) {
subclust_name = paste(clust, subclust, sep=".")
subclust_name_to_clust[[subclust_name]] = c(clust, subclust)
res = regions[regions$cell_group_name == subclust_name, , drop=FALSE]
gres = hmm_genes[hmm_genes$cell_group_name == subclust_name, , drop=FALSE]
if (nrow(res) > 0) {
for (c in unique(res$chr)) {
all_features$feature_vector_chrs_has_cnv[[c]][names(infercnv_obj@tumor_subclusters$subclusters[[clust]][[subclust]])] = TRUE
all_features$feature_vector_chrs_gene_cnv_proportion[[c]][names(infercnv_obj@tumor_subclusters$subclusters[[clust]][[subclust]])] = (length(which(gres$chr == c)) / chr_gene_count[[c]])
all_features$feature_vector_chrs_gene_cnv_proportion_scaled[[c]][names(infercnv_obj@tumor_subclusters$subclusters[[clust]][[subclust]])] = (sum(abs(gres[(which(gres$chr == c)), "state"] - center_state)) / (chr_gene_count[[c]] * scaling_factor))
}
sub_gres = gres[gres$state < center_state, ]
for (c in unique(sub_gres$chr)) {
all_features$feature_vector_chrs_has_loss[[c]][names(infercnv_obj@tumor_subclusters$subclusters[[clust]][[subclust]])] = TRUE
all_features$feature_vector_chrs_gene_loss_proportion[[c]][names(infercnv_obj@tumor_subclusters$subclusters[[clust]][[subclust]])] = (length(which(sub_gres$chr == c)) / chr_gene_count[[c]])
all_features$feature_vector_chrs_gene_loss_proportion_scaled[[c]][names(infercnv_obj@tumor_subclusters$subclusters[[clust]][[subclust]])] = (abs(sum(sub_gres[(which(sub_gres$chr == c)), "state"] - center_state)) / (chr_gene_count[[c]] * scaling_factor))
}
sub_gres = gres[gres$state > center_state, ]
for (c in unique(sub_gres$chr)) {
all_features$feature_vector_chrs_has_dupli[[c]][names(infercnv_obj@tumor_subclusters$subclusters[[clust]][[subclust]])] = TRUE
all_features$feature_vector_chrs_gene_dupli_proportion[[c]][names(infercnv_obj@tumor_subclusters$subclusters[[clust]][[subclust]])] = (length(which(sub_gres$chr == c)) / chr_gene_count[[c]])
all_features$feature_vector_chrs_gene_dupli_proportion_scaled[[c]][names(infercnv_obj@tumor_subclusters$subclusters[[clust]][[subclust]])] = (sum(sub_gres[(which(sub_gres$chr == c)), "state"] - center_state) / (chr_gene_count[[c]] * scaling_factor))
}
}
}
}
# sorted_regions = sort(table(hmm_genes$gene_region_name), decreasing=TRUE)
sorted_regions_loss = sort(table(hmm_genes$gene_region_name[hmm_genes$state < center_state]), decreasing=TRUE)
sorted_regions_dupli = sort(table(hmm_genes$gene_region_name[hmm_genes$state > center_state]), decreasing=TRUE)
# top_n_cnv = .get_top_n_regions(hmm_genes = hmm_genes, sorted_regions = sorted_regions, top_n = top_n, bp_tolerance = bp_tolerance)
top_n_loss = .get_top_n_regions(hmm_genes = hmm_genes, sorted_regions = sorted_regions_loss, top_n = top_n, bp_tolerance = bp_tolerance)
top_n_dupli = .get_top_n_regions(hmm_genes = hmm_genes, sorted_regions = sorted_regions_dupli, top_n = top_n, bp_tolerance = bp_tolerance)
# for (i in seq_along(top_n_cnv)) {
# feature_name = paste0("top_cnv_", i)
# all_features[[feature_name]] = logical_feature_vector
# for (subclust_name in top_n_cnv[[i]]$subclust_name) {
# clust = subclust_name_to_clust[[subclust_name]][1]
# subclust = subclust_name_to_clust[[subclust_name]][2]
# all_features[[feature_name]][names(infercnv_obj@tumor_subclusters$subclusters[[clust]][[subclust]])] = TRUE
# }
# }
for (i in seq_along(top_n_loss)) {
feature_name = paste0("top_loss_", i)
all_features[[feature_name]] = logical_feature_vector
for (subclust_name in top_n_loss[[i]]$subclust_name) {
clust = subclust_name_to_clust[[subclust_name]][1]
subclust = subclust_name_to_clust[[subclust_name]][2]
all_features[[feature_name]][names(infercnv_obj@tumor_subclusters$subclusters[[clust]][[subclust]])] = TRUE
}
}
for (i in seq_along(top_n_dupli)) {
feature_name = paste0("top_dupli_", i)
all_features[[feature_name]] = logical_feature_vector
for (subclust_name in top_n_dupli[[i]]$subclust_name) {
clust = subclust_name_to_clust[[subclust_name]][1]
subclust = subclust_name_to_clust[[subclust_name]][2]
all_features[[feature_name]][names(infercnv_obj@tumor_subclusters$subclusters[[clust]][[subclust]])] = TRUE
}
}
return(all_features)
}
#' @title .get_top_n_regions()
#'
#' @description Get top n largest CNA regions in number of genes
#'
#' @param hmm_genes Table with predicted CNAs genes from the HMM model
#'
#' @param sorted_region List of regions sorted by size in number of genes for the CNA type desired (gain/loss/both)
#'
#' @param top_n How many of the largest CNA (in number of genes) to get.
#'
#' @return all_features A list of all the calculated meta.data to add.
#'
#' @keywords internal
#' @noRd
#'
.get_top_n_regions <- function(hmm_genes, sorted_regions, top_n, bp_tolerance) {
j = 1
top_regions = vector("list", top_n)
used_regions = c()
# flog.debug("sorted regions are:")
# for(sr in names(sorted_regions)) {
# flog.debug(paste(sr, sorted_regions[sr]))
# }
for (i in seq_len(nrow(sorted_regions))) {
if (names(sorted_regions[i]) %in% used_regions) {
next
}
genes_in_region = hmm_genes[which(hmm_genes$gene_region_name %in% names(sorted_regions[i])), ]
region_chr = genes_in_region$chr[1]
region_start_low = min(genes_in_region$start)
region_start_high = region_start_low
region_end_low = max(genes_in_region$end)
region_end_high = region_end_low
to_ignore = which(hmm_genes$gene_region_name %in% used_regions)
if (length(to_ignore) > 0) {
same_chr = setdiff(which(hmm_genes$chr == region_chr), to_ignore)
}
else {
same_chr = which(hmm_genes$chr == region_chr)
}
initial_close = list()
repeat {
close_start = same_chr[which((hmm_genes$start[same_chr] <= region_start_high + bp_tolerance) & (hmm_genes$start[same_chr] >= region_start_low - bp_tolerance))]
close_end = same_chr[which((hmm_genes$end[same_chr] <= region_end_high + bp_tolerance) & (hmm_genes$end[same_chr] >= region_end_low - bp_tolerance))]
close_start_end = intersect(unique(hmm_genes$gene_region_name[close_start]), unique(hmm_genes$gene_region_name[close_end]))
if ((length(setdiff(close_start_end, initial_close)) == 0) && (length(setdiff(initial_close, close_start_end)) == 0)) {
break
}
else {
initial_close = close_start_end
starts = c()
ends = c()
for (regi in close_start_end) {
starts = c(starts, min(hmm_genes$start[which(hmm_genes$gene_region_name == regi)]))
ends = c(ends, max(hmm_genes$end[which(hmm_genes$gene_region_name == regi)]))
}
region_start_low = min(starts)
region_start_high = max(starts)
region_end_low = min(ends)
region_end_high = max(ends)
}
}
if (length(close_start_end) > 0) {
top_regions[[j]]$subclust_names = unique(hmm_genes$cell_group_name[which(hmm_genes$gene_region_name %in% close_start_end)])
top_regions[[j]]$regions_names = close_start_end
flog.debug(paste0("top cnv ", j, " is composed of subclusts: "))#, paste(close_start_end, sep=" ")))
flog.debug(paste(top_regions[[j]]$subclust_names, sep=" "))
flog.debug("and region names: ")
flog.debug(paste(top_regions[[j]]$regions_names, sep=" "))
}
else {
flog.error("Did not even find itself, error.")
stop()
}
used_regions = c(used_regions, close_start_end)
if (length(used_regions) != length(unique(used_regions))) {
flog.error("Used the same region twice")
stop()
}
if (j == top_n) {
break
}
j = j + 1
}
return(top_regions)
}
# .get_top_n_regions <- function(hmm_genes, sorted_regions, top_n, bp_tolerance) {
# j = 1
# previous_region_chr = -1
# previous_region_start = -1
# previous_region_end = -1
# top_regions = vector("list", top_n)
# for (i in seq_len(nrow(sorted_regions))) {
# genes_in_region = hmm_genes[which(hmm_genes$gene_region_name %in% names(sorted_regions[i])), ]
# region_chr = genes_in_region$chr[1]
# region_start = min(genes_in_region$start)
# region_end = max(genes_in_region$end)
# # check if the current region is the same as the previous one for a different subcluster or not
# # if it is, extend the previous assignment without increasing the count of found top hits
# if (region_chr == previous_region_chr && region_start <= previous_region_start + bp_tolerance && region_start >= previous_region_start - bp_tolerance && region_end <= previous_region_end + bp_tolerance && region_end >= previous_region_end - bp_tolerance) {
# top_regions[[j]]$subclust_names = c(top_regions[[j]]$subclust_names, genes_in_region$cell_group_name[1])
# top_regions[[j]]$regions_names = c(top_regions[[j]]$regions_names, genes_in_region$gene_region_name[1])
# }
# else {
# top_regions[[j]]$subclust_names = genes_in_region$cell_group_name[1]
# top_regions[[j]]$regions_names = genes_in_region$gene_region_name[1]
# previous_region_chr = region_chr
# previous_region_start = region_start
# previous_region_end = region_end
# j = j + 1
# }
# if (j == top_n + 1) {
# break
# }
# }
# if (j < top_n + 1) { # if less non unique regions than top_n
# top_regions = top_regions[1:j]
# }
# return(top_regions)
# }
##' @keywords internal
##' @noRd
##'
#make_seurat_from_infercnv_obj <- function(infercnv_obj) {
# return(CreateSeuratObject(counts = infercnv_obj@count.data, project="infercnv", min.cells = 3, min.features = 200))
#}
#
##' @keywords internal
##' @noRd
##'
#make_seurat_from_infercnv <- function(infercnv_output_path) {
# if (file.exists(paste(infercnv_output_path, "run.final.infercnv_obj", sep=.Platform$file.sep))) {
# return(make_seurat_from_infercnv_obj(readRDS(paste(infercnv_output_path, "run.final.infercnv_obj", sep=.Platform$file.sep))))
# }
# else {
# stop()
# }
#}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/meanShift.R
\name{meanShift}
\alias{meanShift}
\title{Mean shift classification}
\usage{
meanShift(queryData, trainData = queryData, nNeighbors = NROW(trainData),
algorithm = "LINEAR", kernelType = "NORMAL", bandwidth = rep(1,
NCOL(trainData)), alpha = 0, iterations = 10, epsilon = 1e-08,
epsilonCluster = 1e-04, parameters = NULL)
}
\arguments{
\item{queryData}{A matrix or vector of points to be classified by the mean
shift algorithm. Values must be finite and non-missing.}
\item{trainData}{A matrix or vector of points used to form a kernel density
estimate. The local maxima from this kernel density estimate will be used
for steepest ascent classification. If missing, \code{queryData} is set to \code{trainData}.}
\item{nNeighbors}{A scalar indicating the number neighbors to consider for
the kernel density estimate. This is useful to speed up approximation by
approximating the kernel density estimate. The default is all data.}
\item{algorithm}{A string indicating the algorithm to use for nearest neighbor
searches. Currently, only "LINEAR" and "KDTREE" methods are supported.}
\item{kernelType}{A string indicating the kernel associated with the kernel
density estimate that the mean shift is optimizing over. The possible
kernels are NORMAL, EPANECHNIKOV, and BIWEIGHT; the default is NORMAL.}
\item{bandwidth}{A vector of length equal to the number of columns in the
queryData matrix, or length one when queryData is a vector. This value will be
used in the kernel density estimate for steepest ascent classification. The
default is one for each dimension.}
\item{alpha}{A scalar tuning parameter for normal kernels. When this parameter
is set to zero, the mean shift algorithm will operate as usual. When this
parameter is set to one, the mean shift algorithm will be approximated through
Newton's Method. When set to a value between zero and one, a generalization
of Newton's Method and mean shift will be used instead providing a means
to balance convergence speed with stability. The default is zero, mean shift.}
\item{iterations}{The number of iterations to perform mean shift.}
\item{epsilon}{A scalar used to determine when to terminate the iteration of a
individual query point. If the distance between the query point at iteration \code{i}
and \code{i+1} is less than epsilon, then iteration ceases on this point.}
\item{epsilonCluster}{A scalar used to determine the minimum distance between distinct
clusters. This distance is applied after all iterations have finished and in
order of the rows of \code{queryData}.}
\item{parameters}{A scalar or vector of paramters used by the specific algorithm.
There are no optional parameters for the "LINEAR" method, "KDTREE" supports
optional parameters for the maximum number of points to store in a leaf node
and the maximum value for the quadratic form in the normal kernel, ignoring
the constant value -0.5.}
}
\value{
A list is returned containing two items: \code{assignment}, a vector of
classifications. \code{value}, a vector or matrix containing the location of
the classified local maxima in the support, each row is associated with the
classified index in \code{assignment}.
}
\description{
\code{meanShift} performs classification of a set of query points using
steepest ascent to local maxima in a kernel density estimate.
}
\examples{
x <- matrix(runif(20),10,2)
classification <- meanShift(x,x)
x <- matrix(runif(20),10,2)
classification <- meanShift(x,
algorithm="KDTREE",
nNeighbor=8,
parameters=c(5,7.1) )
}
\references{
Cheng, Y. (1995). \emph{Mean shift, mode seeking, and clustering}. IEEE transactions on pattern analysis and machine intelligence, 17(8), 790-799.
Fukunaga, K., & Hostetler, L. (1975). \emph{The estimation of the gradient of a density function, with applications in pattern recognition.} IEEE transactions on information theory, 21(1), 32-40.
Lisic, J. (2015). Parcel Level Agricultural Land Cover Prediction (Doctoral dissertation, George Mason University).
}
|
/man/meanShift.Rd
|
no_license
|
cran/meanShiftR
|
R
| false | true | 4,103 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/meanShift.R
\name{meanShift}
\alias{meanShift}
\title{Mean shift classification}
\usage{
meanShift(queryData, trainData = queryData, nNeighbors = NROW(trainData),
algorithm = "LINEAR", kernelType = "NORMAL", bandwidth = rep(1,
NCOL(trainData)), alpha = 0, iterations = 10, epsilon = 1e-08,
epsilonCluster = 1e-04, parameters = NULL)
}
\arguments{
\item{queryData}{A matrix or vector of points to be classified by the mean
shift algorithm. Values must be finite and non-missing.}
\item{trainData}{A matrix or vector of points used to form a kernel density
estimate. The local maxima from this kernel density estimate will be used
for steepest ascent classification. If missing, \code{queryData} is set to \code{trainData}.}
\item{nNeighbors}{A scalar indicating the number neighbors to consider for
the kernel density estimate. This is useful to speed up approximation by
approximating the kernel density estimate. The default is all data.}
\item{algorithm}{A string indicating the algorithm to use for nearest neighbor
searches. Currently, only "LINEAR" and "KDTREE" methods are supported.}
\item{kernelType}{A string indicating the kernel associated with the kernel
density estimate that the mean shift is optimizing over. The possible
kernels are NORMAL, EPANECHNIKOV, and BIWEIGHT; the default is NORMAL.}
\item{bandwidth}{A vector of length equal to the number of columns in the
queryData matrix, or length one when queryData is a vector. This value will be
used in the kernel density estimate for steepest ascent classification. The
default is one for each dimension.}
\item{alpha}{A scalar tuning parameter for normal kernels. When this parameter
is set to zero, the mean shift algorithm will operate as usual. When this
parameter is set to one, the mean shift algorithm will be approximated through
Newton's Method. When set to a value between zero and one, a generalization
of Newton's Method and mean shift will be used instead providing a means
to balance convergence speed with stability. The default is zero, mean shift.}
\item{iterations}{The number of iterations to perform mean shift.}
\item{epsilon}{A scalar used to determine when to terminate the iteration of a
individual query point. If the distance between the query point at iteration \code{i}
and \code{i+1} is less than epsilon, then iteration ceases on this point.}
\item{epsilonCluster}{A scalar used to determine the minimum distance between distinct
clusters. This distance is applied after all iterations have finished and in
order of the rows of \code{queryData}.}
\item{parameters}{A scalar or vector of paramters used by the specific algorithm.
There are no optional parameters for the "LINEAR" method, "KDTREE" supports
optional parameters for the maximum number of points to store in a leaf node
and the maximum value for the quadratic form in the normal kernel, ignoring
the constant value -0.5.}
}
\value{
A list is returned containing two items: \code{assignment}, a vector of
classifications. \code{value}, a vector or matrix containing the location of
the classified local maxima in the support, each row is associated with the
classified index in \code{assignment}.
}
\description{
\code{meanShift} performs classification of a set of query points using
steepest ascent to local maxima in a kernel density estimate.
}
\examples{
x <- matrix(runif(20),10,2)
classification <- meanShift(x,x)
x <- matrix(runif(20),10,2)
classification <- meanShift(x,
algorithm="KDTREE",
nNeighbor=8,
parameters=c(5,7.1) )
}
\references{
Cheng, Y. (1995). \emph{Mean shift, mode seeking, and clustering}. IEEE transactions on pattern analysis and machine intelligence, 17(8), 790-799.
Fukunaga, K., & Hostetler, L. (1975). \emph{The estimation of the gradient of a density function, with applications in pattern recognition.} IEEE transactions on information theory, 21(1), 32-40.
Lisic, J. (2015). Parcel Level Agricultural Land Cover Prediction (Doctoral dissertation, George Mason University).
}
|
setwd("/Users/USER/Desktop/dwbi data/tb datasets")
tb2<- read.csv("mortality_by_country.csv",header = T,na.strings = c(""))
tb2$nod_exhiv<-tb2$nodexhiv
tb2$nod_exhiv<-gsub('\\[.*?\\]', '', tb2$nod_exhiv)
tb2$nod_hivnegpop<-tb2$nodhivnegpop
tb2$nod_hivnegpop<-gsub('\\[.*?\\]', '', tb2$nod_hivnegpop)
tb2$nodexhiv<-NULL
tb2$nodhivnegpop<-NULL
tb2$nod_exhiv<-as.character((tb2$nod_exhiv))
tb2$nod_exhiv<-as.integer((tb2$nod_exhiv))
tb2$nod_hivnegpop<-as.character((tb2$nod_hivnegpop))
tb2$nod_hivnegpop<-as.integer((tb2$nod_hivnegpop))
tb2$Country<-as.character((tb2$Country))
ConnString <- odbcDriverConnect("Driver=SQL Server;Server=DELL; Database=Staging;trusted_connection=true")
sqlSave(ConnString,tb2,tablename = "Stg_Mortality",rownames = F )
|
/tb datasets/original/mortality_by_country.R
|
no_license
|
atifferoz/Data-warehouse-and-Business-Intelligence-project-on-Tuberculosis-WHO-
|
R
| false | false | 765 |
r
|
setwd("/Users/USER/Desktop/dwbi data/tb datasets")
tb2<- read.csv("mortality_by_country.csv",header = T,na.strings = c(""))
tb2$nod_exhiv<-tb2$nodexhiv
tb2$nod_exhiv<-gsub('\\[.*?\\]', '', tb2$nod_exhiv)
tb2$nod_hivnegpop<-tb2$nodhivnegpop
tb2$nod_hivnegpop<-gsub('\\[.*?\\]', '', tb2$nod_hivnegpop)
tb2$nodexhiv<-NULL
tb2$nodhivnegpop<-NULL
tb2$nod_exhiv<-as.character((tb2$nod_exhiv))
tb2$nod_exhiv<-as.integer((tb2$nod_exhiv))
tb2$nod_hivnegpop<-as.character((tb2$nod_hivnegpop))
tb2$nod_hivnegpop<-as.integer((tb2$nod_hivnegpop))
tb2$Country<-as.character((tb2$Country))
ConnString <- odbcDriverConnect("Driver=SQL Server;Server=DELL; Database=Staging;trusted_connection=true")
sqlSave(ConnString,tb2,tablename = "Stg_Mortality",rownames = F )
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/PLMIXfunctions.R
\name{print.gsPLMIX}
\alias{print.gsPLMIX}
\title{Print of the Gibbs sampling simulation of a Bayesian mixture of Plackett-Luce models}
\usage{
\method{print}{gsPLMIX}(x, ...)
}
\arguments{
\item{x}{Object of class \code{gsPLMIX} returned by the \code{gibbsPLMIX} function.}
\item{...}{Further arguments passed to or from other methods (not used).}
}
\description{
\code{print} method for class \code{gsPLMIX}. It shows some general information on the Gibbs sampling simulation for a Bayesian mixture of Plackett-Luce models.
}
\examples{
## Print of the Gibbs sampling procedure
data(d_carconf)
GIBBS <- gibbsPLMIX(pi_inv=d_carconf, K=ncol(d_carconf), G=3, n_iter=30, n_burn=10)
print(GIBBS)
}
\seealso{
\code{\link{gibbsPLMIX}}
}
\author{
Cristina Mollica and Luca Tardella
}
|
/PLMIX/man/print.gsPLMIX.Rd
|
no_license
|
akhikolla/InformationHouse
|
R
| false | true | 875 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/PLMIXfunctions.R
\name{print.gsPLMIX}
\alias{print.gsPLMIX}
\title{Print of the Gibbs sampling simulation of a Bayesian mixture of Plackett-Luce models}
\usage{
\method{print}{gsPLMIX}(x, ...)
}
\arguments{
\item{x}{Object of class \code{gsPLMIX} returned by the \code{gibbsPLMIX} function.}
\item{...}{Further arguments passed to or from other methods (not used).}
}
\description{
\code{print} method for class \code{gsPLMIX}. It shows some general information on the Gibbs sampling simulation for a Bayesian mixture of Plackett-Luce models.
}
\examples{
## Print of the Gibbs sampling procedure
data(d_carconf)
GIBBS <- gibbsPLMIX(pi_inv=d_carconf, K=ncol(d_carconf), G=3, n_iter=30, n_burn=10)
print(GIBBS)
}
\seealso{
\code{\link{gibbsPLMIX}}
}
\author{
Cristina Mollica and Luca Tardella
}
|
## Function "makeCacheMatrix" creates a special matrix object that can cache its inverse
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y){
x <<- y
m <<- NULL
}
get <- function() x
setinv <- function(solve) m <<- solve
getinv <- function() m
list(set = set, get = get,
setinv = setinv,
getinv = getinv)
}
## Function "cacheSolve" computes the inverse of the matrix returned by makeCacheMatrix above.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
m <- x$getinv()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- solve(data, ...)
x$setinv(m)
m
}
|
/cachematrix.R
|
no_license
|
FrancoGalta/ProgrammingAssignment2
|
R
| false | false | 762 |
r
|
## Function "makeCacheMatrix" creates a special matrix object that can cache its inverse
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y){
x <<- y
m <<- NULL
}
get <- function() x
setinv <- function(solve) m <<- solve
getinv <- function() m
list(set = set, get = get,
setinv = setinv,
getinv = getinv)
}
## Function "cacheSolve" computes the inverse of the matrix returned by makeCacheMatrix above.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
m <- x$getinv()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- solve(data, ...)
x$setinv(m)
m
}
|
#Setting the working directory to my local system
setwd("D:/Scala_Course/Project/Data")
# INSTALL THE PACKAGE FIRST TIME
#install.packages("sqldf")
library("sqldf")
#Identified the fastest way to read the file
#Fread is a function in data.table package which can be used to read a file
# very fast.
## CAN ALSO USE read.csv and read.table in case you want it
## JUST NEED TO INSTALL IT THE FIRST TIME
#install.packages("data.table")
#install.packages("plr")
library(data.table)
library(plyr)
library(dplyr)
# Data with the quarter and year details
data_tkcarrier <- fread(input ="Fare_Carrier.csv",header = TRUE)
#Data with the carrier, seats and source and destination details
data_carrier <- fread(input = "S&D.csv",header = TRUE)
#Creating Lookup data of Airport ID
lookup <- fread(input ="Longitude_Latitude.csv",header = TRUE)
uniquedata<- unique(lookup, by='AIRPORT_ID')
#joining tables
dest_sample<-sqldf("select * from data_carrier as a inner join uniquedata as b on b.AIRPORT_ID=a.DEST_AIRPORT_ID")
origin_sample<-sqldf("select * from dest_sample as a inner join uniquedata as b on b.AIRPORT_ID=a.ORIGIN_AIRPORT_ID")
cordinates<-sqldf("select a.origin_airport_id,c.latitude as ORIGIN_LATITUDE,c.longitude as ORIGIN_LONGITUDE,a.dest_airport_id,b.latitude as DES_LATITUDE,b.longitude as DEST_LONGITUDE from data_carrier as a inner join uniquedata as b on b.AIRPORT_ID=a.DEST_AIRPORT_ID inner join uniquedata as c on c.AIRPORT_ID=a.ORIGIN_AIRPORT_ID")
colnames(origin_sample)
data_input_cordinates <- cbind(origin_sample[,-c(3,8,14:21)],cordinates)
colnames(data_input_cordinates)
#write.csv(sample,file = "testfile.csv",row.names = FALSE)
#head(sample)
#setnames(test2,c("AIRPORT_ID","AIRPORT_ID.1","LATITUDE","LONGITUDE","LONGITUDE.1","LATITUDE.1"),c("DEST_AIRPORT_ID","ORG_AIRPORT_ID","DEST_LATITUDE","DEST_LONGITUDE","ORG_LONGITUDE","ORG_LATITUDE"))
#head(test2)
#Creating a new data frame which contains the first 10000 rows
# BETTER APPROACH TO USE SAMPLE FUCNTION. WILL ADD THAT IN THE NEXT UPDATED
# SCRIPT
# Test sample with data_opcarrier
test_data <- data_tkcarrier[1:10000,]
# Test sample with data_carrier
test_data2 <- data_input_cordinates[1:10000,]
# In opcarrier Data frame, the column OPERATING_CARRIER, is changed to CARRIER
# This is to maintain the same column name for all data frames
setnames(test_data,"TICKET_CARRIER","CARRIER")
# SQL DF you can run any SQL command by treating the Data frame as a TABLE
#test.df <- sqldf("select a.YEAR,a.QUARTER,a.MARKET_FARE,b.SEATS,b.CARRIER,b.ORIGIN_AIRPORT_ID,b.ORIGIN,b.ORIGIN_CITY_NAME,b.ORIGIN_STATE_ABR,b.ORIGIN_STATE_NM,b.DEST_AIRPORT_ID,b.DEST,b.DEST_CITY_NAME,b.DEST_STATE_ABR,b.DEST_STATE_NM,b.MONTH from test_data a INNER JOIN test_data2 b ON a.CARRIER=b.CARRIER")
test.df <- sqldf("select * from test_data a INNER JOIN test_data2 b ON a.CARRIER=b.CARRIER")
test.df <- test.df[,-c(7,17)]
colnames(test.df)
#Generate Random value for Market fare between min and max of the market price
# for that quarter
date_data <- test.df[,c("YEAR","QUARTER")]
size <- nrow(date_data)
random <- sample(min(test.df$MARKET_FARE):max(test.df$MARKET_FARE),size,replace = TRUE)
test.df$MARKET_FARE <- random
# Generate Random value for number of available seats, between 1-400
seats <- sample(1:400,size,replace = TRUE)
test.df$SEATS <- seats
## DATE MANIPULATION ##################################
#New data frame with only year and quarter
#Size of the Data frame, to get the total number of rows
# Generating dates for a givenQuarter
## ***PLEASE CHANGE THE YEAR AND DATES BASED ON THE QUARTER YOU WISH TO GENERATE
DATE <- sample(seq(as.Date('2016/01/01'), as.Date('2016/03/31'), by="day"), size = size,replace = TRUE)
# Days for the date sequence
DAYS <- weekdays(as.Date(DATE,'%Y-%m-%d'))
#Month for the dates generated
MONTH <- months(DATE)
#Binding the date data together
date_data <- cbind(date_data,DATE,DAYS,MONTH)
#Removing the columsn YEAR AND QUARTER FROM THE INITIAL DATA SET
colnames(test.df)
test.df <- test.df[,-c(1:2,5,15)]
#FINAL DATA FRAME
test.df <- cbind(test.df,date_data)
## WRITING IT TO A CSV FILE
# PLEASE CHANGE THE NAME OF THE CSV FILE
write.csv(test.df,file = "2015_Quarter2.csv",row.names = FALSE)
summary(test.df)
dim(test.df)
colnames(test.df)
#Latitude and Long
library(geosphere)
#c<-distm (c(test.df$ORIGIN_LONGITUDE,test.df$ORIGIN_LATITUDE), c(test.df$DEST_LONGITUDE,test.df$DES_LATITUDE), fun = distVincentyEllipsoid)
#distHaversine(c(test.df$ORIGIN_LONGITUDE,test.df$ORIGIN_LATITUDE), c(test.df$DEST_LONGITUDE,test.df$DES_LATITUDE))
#names(test.df)
help(distVincentyEllipsoid)
for( i in 1:nrow(test.df)){
test.df$Distance[i]<-distm (c(test.df$ORIGIN_LONGITUDE[i],test.df$ORIGIN_LATITUDE[i]), c(test.df$DEST_LONGITUDE[i],test.df$DES_LATITUDE[i]), fun = distVincentyEllipsoid)
}
#Base Fare Calculation
min <- min(test.df$Distance)
max <- max(test.df$Distance)
mean <- mean(test.df$Distance)
mean
for( i in 1:nrow(test.df)){
test[i]<-test.df$Distance[i]/mean*100
if(test[i]<50)
{test.df$BaseFare[i]<-50}
}
head(test.df$BaseFare)
max(test.df$BaseFare)
min(test.df$BaseFare)
|
/Dataset/R-Script_Data_Manipulation/DataCleaning.R
|
no_license
|
tusharkm/get_my_flight
|
R
| false | false | 5,109 |
r
|
#Setting the working directory to my local system
setwd("D:/Scala_Course/Project/Data")
# INSTALL THE PACKAGE FIRST TIME
#install.packages("sqldf")
library("sqldf")
#Identified the fastest way to read the file
#Fread is a function in data.table package which can be used to read a file
# very fast.
## CAN ALSO USE read.csv and read.table in case you want it
## JUST NEED TO INSTALL IT THE FIRST TIME
#install.packages("data.table")
#install.packages("plr")
library(data.table)
library(plyr)
library(dplyr)
# Data with the quarter and year details
data_tkcarrier <- fread(input ="Fare_Carrier.csv",header = TRUE)
#Data with the carrier, seats and source and destination details
data_carrier <- fread(input = "S&D.csv",header = TRUE)
#Creating Lookup data of Airport ID
lookup <- fread(input ="Longitude_Latitude.csv",header = TRUE)
uniquedata<- unique(lookup, by='AIRPORT_ID')
#joining tables
dest_sample<-sqldf("select * from data_carrier as a inner join uniquedata as b on b.AIRPORT_ID=a.DEST_AIRPORT_ID")
origin_sample<-sqldf("select * from dest_sample as a inner join uniquedata as b on b.AIRPORT_ID=a.ORIGIN_AIRPORT_ID")
cordinates<-sqldf("select a.origin_airport_id,c.latitude as ORIGIN_LATITUDE,c.longitude as ORIGIN_LONGITUDE,a.dest_airport_id,b.latitude as DES_LATITUDE,b.longitude as DEST_LONGITUDE from data_carrier as a inner join uniquedata as b on b.AIRPORT_ID=a.DEST_AIRPORT_ID inner join uniquedata as c on c.AIRPORT_ID=a.ORIGIN_AIRPORT_ID")
colnames(origin_sample)
data_input_cordinates <- cbind(origin_sample[,-c(3,8,14:21)],cordinates)
colnames(data_input_cordinates)
#write.csv(sample,file = "testfile.csv",row.names = FALSE)
#head(sample)
#setnames(test2,c("AIRPORT_ID","AIRPORT_ID.1","LATITUDE","LONGITUDE","LONGITUDE.1","LATITUDE.1"),c("DEST_AIRPORT_ID","ORG_AIRPORT_ID","DEST_LATITUDE","DEST_LONGITUDE","ORG_LONGITUDE","ORG_LATITUDE"))
#head(test2)
#Creating a new data frame which contains the first 10000 rows
# BETTER APPROACH TO USE SAMPLE FUCNTION. WILL ADD THAT IN THE NEXT UPDATED
# SCRIPT
# Test sample with data_opcarrier
test_data <- data_tkcarrier[1:10000,]
# Test sample with data_carrier
test_data2 <- data_input_cordinates[1:10000,]
# In opcarrier Data frame, the column OPERATING_CARRIER, is changed to CARRIER
# This is to maintain the same column name for all data frames
setnames(test_data,"TICKET_CARRIER","CARRIER")
# SQL DF you can run any SQL command by treating the Data frame as a TABLE
#test.df <- sqldf("select a.YEAR,a.QUARTER,a.MARKET_FARE,b.SEATS,b.CARRIER,b.ORIGIN_AIRPORT_ID,b.ORIGIN,b.ORIGIN_CITY_NAME,b.ORIGIN_STATE_ABR,b.ORIGIN_STATE_NM,b.DEST_AIRPORT_ID,b.DEST,b.DEST_CITY_NAME,b.DEST_STATE_ABR,b.DEST_STATE_NM,b.MONTH from test_data a INNER JOIN test_data2 b ON a.CARRIER=b.CARRIER")
test.df <- sqldf("select * from test_data a INNER JOIN test_data2 b ON a.CARRIER=b.CARRIER")
test.df <- test.df[,-c(7,17)]
colnames(test.df)
#Generate Random value for Market fare between min and max of the market price
# for that quarter
date_data <- test.df[,c("YEAR","QUARTER")]
size <- nrow(date_data)
random <- sample(min(test.df$MARKET_FARE):max(test.df$MARKET_FARE),size,replace = TRUE)
test.df$MARKET_FARE <- random
# Generate Random value for number of available seats, between 1-400
seats <- sample(1:400,size,replace = TRUE)
test.df$SEATS <- seats
## DATE MANIPULATION ##################################
#New data frame with only year and quarter
#Size of the Data frame, to get the total number of rows
# Generating dates for a givenQuarter
## ***PLEASE CHANGE THE YEAR AND DATES BASED ON THE QUARTER YOU WISH TO GENERATE
DATE <- sample(seq(as.Date('2016/01/01'), as.Date('2016/03/31'), by="day"), size = size,replace = TRUE)
# Days for the date sequence
DAYS <- weekdays(as.Date(DATE,'%Y-%m-%d'))
#Month for the dates generated
MONTH <- months(DATE)
#Binding the date data together
date_data <- cbind(date_data,DATE,DAYS,MONTH)
#Removing the columsn YEAR AND QUARTER FROM THE INITIAL DATA SET
colnames(test.df)
test.df <- test.df[,-c(1:2,5,15)]
#FINAL DATA FRAME
test.df <- cbind(test.df,date_data)
## WRITING IT TO A CSV FILE
# PLEASE CHANGE THE NAME OF THE CSV FILE
write.csv(test.df,file = "2015_Quarter2.csv",row.names = FALSE)
summary(test.df)
dim(test.df)
colnames(test.df)
#Latitude and Long
library(geosphere)
#c<-distm (c(test.df$ORIGIN_LONGITUDE,test.df$ORIGIN_LATITUDE), c(test.df$DEST_LONGITUDE,test.df$DES_LATITUDE), fun = distVincentyEllipsoid)
#distHaversine(c(test.df$ORIGIN_LONGITUDE,test.df$ORIGIN_LATITUDE), c(test.df$DEST_LONGITUDE,test.df$DES_LATITUDE))
#names(test.df)
help(distVincentyEllipsoid)
for( i in 1:nrow(test.df)){
test.df$Distance[i]<-distm (c(test.df$ORIGIN_LONGITUDE[i],test.df$ORIGIN_LATITUDE[i]), c(test.df$DEST_LONGITUDE[i],test.df$DES_LATITUDE[i]), fun = distVincentyEllipsoid)
}
#Base Fare Calculation
min <- min(test.df$Distance)
max <- max(test.df$Distance)
mean <- mean(test.df$Distance)
mean
for( i in 1:nrow(test.df)){
test[i]<-test.df$Distance[i]/mean*100
if(test[i]<50)
{test.df$BaseFare[i]<-50}
}
head(test.df$BaseFare)
max(test.df$BaseFare)
min(test.df$BaseFare)
|
### Load Data
rawdata <- read.csv("unimelb_training.csv")
### Look at roles (in columns of rawdata)
roles <- vector("character")
for (i in 1:15){
colstr = paste("Role.", as.character(i), sep="")
roles <- c(roles, as.vector(unique(rawdata[, colstr])))
}
### Get unique roles
roles <- unique(roles)
######################################################################
###
### Initial data cleaning of rawdata
###
### Change long column name
names(rawdata)[5] <- "Contract.Value.Band"
### Features to characters
rawdata$Sponsor.Code <- as.character(rawdata$Sponsor.Code)
rawdata$Grant.Category.Code <- as.character(rawdata$Grant.Category.Code)
rawdata$Contract.Value.Band <- as.character(rawdata$Contract.Value.Band)
### Deal with missing values
rawdata$Sponsor.Code[rawdata[, "Sponsor.Code"] == ""] <- "Unk"
rawdata$Sponsor.Code <- factor(paste("Sponsor", rawdata$Sponsor.Code, sep=""))
rawdata$Grant.Category.Code[rawdata[, "Grant.Category.Code"] == ""] <- "Unk"
rawdata$Grant.Category.Code <- factor(paste("Grant.Category",rawdata$Grant.Category.Code, sep=""))
rawdata$Contract.Value.Band[rawdata[, "Contract.Value.Band"] == ""] <- "Unk"
rawdata$Contract.Value.Band<- factor(paste("Contract.Value.Band",rawdata$Contract.Value.Band, sep=""))
|
/1-initial_data_cleaning.R
|
no_license
|
AlexBerlin/predict_grant_applications
|
R
| false | false | 1,257 |
r
|
### Load Data
rawdata <- read.csv("unimelb_training.csv")
### Look at roles (in columns of rawdata)
roles <- vector("character")
for (i in 1:15){
colstr = paste("Role.", as.character(i), sep="")
roles <- c(roles, as.vector(unique(rawdata[, colstr])))
}
### Get unique roles
roles <- unique(roles)
######################################################################
###
### Initial data cleaning of rawdata
###
### Change long column name
names(rawdata)[5] <- "Contract.Value.Band"
### Features to characters
rawdata$Sponsor.Code <- as.character(rawdata$Sponsor.Code)
rawdata$Grant.Category.Code <- as.character(rawdata$Grant.Category.Code)
rawdata$Contract.Value.Band <- as.character(rawdata$Contract.Value.Band)
### Deal with missing values
rawdata$Sponsor.Code[rawdata[, "Sponsor.Code"] == ""] <- "Unk"
rawdata$Sponsor.Code <- factor(paste("Sponsor", rawdata$Sponsor.Code, sep=""))
rawdata$Grant.Category.Code[rawdata[, "Grant.Category.Code"] == ""] <- "Unk"
rawdata$Grant.Category.Code <- factor(paste("Grant.Category",rawdata$Grant.Category.Code, sep=""))
rawdata$Contract.Value.Band[rawdata[, "Contract.Value.Band"] == ""] <- "Unk"
rawdata$Contract.Value.Band<- factor(paste("Contract.Value.Band",rawdata$Contract.Value.Band, sep=""))
|
c DCNF-Autarky [version 0.0.1].
c Copyright (c) 2018-2019 Swansea University.
c
c Input Clause Count: 4526
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 4526
c
c Input Parameter (command line, file):
c input filename QBFLIB/Rintanen/Sorting_networks/sortnetsort7.AE.stepl.006.qdimacs
c output filename /tmp/dcnfAutarky.dimacs
c autarky level 1
c conformity level 0
c encoding type 2
c no.of var 2697
c no.of clauses 4526
c no.of taut cls 0
c
c Output Parameters:
c remaining no.of clauses 4526
c
c QBFLIB/Rintanen/Sorting_networks/sortnetsort7.AE.stepl.006.qdimacs 2697 4526 E1 [] 0 486 2211 4526 NONE
|
/code/dcnf-ankit-optimized/Results/QBFLIB-2018/E1/Experiments/Rintanen/Sorting_networks/sortnetsort7.AE.stepl.006/sortnetsort7.AE.stepl.006.R
|
no_license
|
arey0pushpa/dcnf-autarky
|
R
| false | false | 664 |
r
|
c DCNF-Autarky [version 0.0.1].
c Copyright (c) 2018-2019 Swansea University.
c
c Input Clause Count: 4526
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 4526
c
c Input Parameter (command line, file):
c input filename QBFLIB/Rintanen/Sorting_networks/sortnetsort7.AE.stepl.006.qdimacs
c output filename /tmp/dcnfAutarky.dimacs
c autarky level 1
c conformity level 0
c encoding type 2
c no.of var 2697
c no.of clauses 4526
c no.of taut cls 0
c
c Output Parameters:
c remaining no.of clauses 4526
c
c QBFLIB/Rintanen/Sorting_networks/sortnetsort7.AE.stepl.006.qdimacs 2697 4526 E1 [] 0 486 2211 4526 NONE
|
##' @title Create speciesRaster
##'
##' @description This function takes a rasterStack and generates both a richness
##' raster and an associated list of species per cell, creating an object of
##' class \code{speciesRaster}.
##'
##'
##' @param ranges Either a RasterStack, RasterBrick, or species by cell matrix. Any non-NA
##' values in rasters are considered presences.
##'
##' @param rasterTemplate If input is a species x cell matrix, then a rasterTemplate
##' must be provided, where the number of cells = the number of columns in the matrix.
##' Cells with a value of 1 will be processed, cells with a value
##' of 0 will be . Therefore, all cells must have a value of 0/1.
##'
##' @param verbose Primarily intended for debugging, print progress to the console.
##'
##'
##' @details
##' This function generates an object of class \code{speciesRaster}, which is a
##' list containing the following elements:
##' \itemize{
##' \item{\code{raster:}} {A raster representing counts of species per cell.}
##' \item{\code{speciesList:}} {A list of species found in each cell.}
##' \item{\code{geogSpecies:}} {a vector of unique species in all cells.}
##' \item{\code{cellCount:}} {a vector of counts of presence cells for each species.}
##' \item{\code{data:}} {An empty spot that morphological data can be added to.}
##' \item{\code{phylo:}} {An empty spot that a phylogeny can be added to.}
##' }
##'
##' If input is a RasterStack, then all parameters are taken from that, such as resolution,
##' extent and projection. Any non-NA and non-zero cell is considered a presence.
##' This function expects that all input rasters in the rasterStack have presence values
##' (i.e., at least 1 non-NA value). If any rasters have exclusively NA cells, then the
##' function will stop with a warning, and the output will be the index in the rasterStack
##' of those rasters.
##'
##' @return an object of class \code{speciesRaster}.
##'
##' @author Pascal Title
##'
##' @examples
##' library(raster)
##' library(sf)
##' # example dataset: a list of 24 chipmunk distributions as polygons
##' head(tamiasPolyList)
##'
##' # convert polygon ranges to raster
##' ranges <- rasterStackFromPolyList(tamiasPolyList, resolution = 20000)
##'
##' spRas <- createSpeciesRaster(ranges = ranges)
##'
##' spRas
##'
##'
##'
##'
##' @export
createSpeciesRaster <- function(ranges, rasterTemplate = NULL, verbose = FALSE) {
if (all(!class(ranges) %in% c('RasterStack', 'RasterBrick', 'matrix', 'data.frame'))) {
stop('Input must be a list of SpatialPolygons or a RasterStack.')
}
# prepare output object
obj <- vector('list', length = 7)
names(obj) <- c('raster', 'speciesList', 'cellCommInd', 'geogSpecies', 'cellCount', 'data', 'phylo')
# if rasterstack as input
if (all(class(ranges) %in% c('RasterStack', 'RasterBrick'))) {
#check that all rasters have values
if (verbose) message('\t...Checking for empty rasters...\n')
valCheck <- raster::minValue(ranges)
badEntries <- which(is.na(valCheck))
badEntriesRet <- badEntries
if (length(badEntries) > 0) {
badEntries <- paste(which(is.na(valCheck)), collapse = ', ')
warning(paste0('The following rasters have no non-NA cells: ', badEntries, '.'))
return(badEntriesRet)
}
# rasterstack calculations only
# create matrix of cells (rows) x raster (cols)
# prepare result objects
ras <- raster::raster(ranges[[1]])
raster::values(ras) <- 0
cellCommVec <- integer(length = raster::ncell(ranges))
spByCell <- vector('list', length = raster::ncell(ranges))
# determine the size of rasterStack that can be processed in memory
if (verbose) message('\t...Determining if rasterstack can be processed in memory...')
if (raster::canProcessInMemory(ranges)) {
if (verbose) message('yes\n')
mat <- matrix(nrow=raster::ncell(ranges), ncol=raster::nlayers(ranges))
colnames(mat) <- names(ranges)
for (i in 1:raster::nlayers(ranges)) {
mat[, i] <- ranges[[i]][]
}
# set all NA to 0
mat[is.na(mat)] <- 0
# check if is binary
if (identical(unique(as.numeric(mat)), c(0,1))) {
# if not binary and probRanking is false, convert all to 0/1
mat[mat != 0] <- 1
}
# get count of species per cell
cellSums <- rowSums(mat)
# assign values to result raster
raster::values(ras) <- cellSums
# get list of which species are found in each cell
spByCell <- spListPerCell(mat)
} else {
# data too big. Split into subsets of rows
if (verbose) message('no\n')
if (verbose) message('\t...Determining how many rasters can be processed in memory...')
n <- 1
while (raster::canProcessInMemory(ranges[[1:n]])) {
n <- n + 1
}
if (verbose) message(n, '\n')
indList <- split(1:raster::nlayers(ranges), ceiling(1:raster::nlayers(ranges)/n))
pb <- raster::pbCreate(length(indList), progress = 'text')
cellVals <- vector('list', length = length(indList))
SpByCellList <- vector('list', length = length(indList))
for (i in 1:length(indList)) {
submat <- matrix(nrow=raster::ncell(ranges), ncol=length(indList[[i]]))
colnames(submat) <- names(ranges)[indList[[i]]]
for (j in 1:length(indList[[i]])) {
submat[, j] <- ranges[[indList[[i]][j]]][]
}
# set all NA to 0
submat[is.na(submat)] <- 0
# check if is binary
if (identical(unique(as.numeric(submat)), c(0,1))) {
# if not binary and probRanking is false, convert all to 0/1
submat[submat != 0] <- 1
}
# get count of species per cell
cellSums <- rowSums(submat)
# assign values to result raster
cellVals[[i]] <- cellSums
# get list of which species are found in each cell
SpByCellList[[i]] <- spListPerCell(submat)
raster::pbStep(pb, step = i)
}
raster::pbClose(pb, timer = FALSE)
# combine pieces
if (verbose) message('\t...Assembling speciesRaster...\n')
raster::values(ras) <- rowSums(do.call(cbind, cellVals))
# for now, replace all NA with 'empty'
for (i in 1:length(SpByCellList)) {
for (j in 1:length(SpByCellList[[i]])) {
if (all(is.na(SpByCellList[[i]][[j]]))) {
SpByCellList[[i]][[j]] <- 'empty'
}
}
}
spByCell <- mergeLists(SpByCellList)
spByCell <- lapply(spByCell, unique)
spByCell[sapply(spByCell, length) == 0] <- NA
}
# reduce spByCell to unique communities and track
if (verbose) message('\t...Reducing species list to unique sets...')
uniqueComm <- unique(spByCell)
spByCell2 <- sapply(spByCell, function(y) paste(y, collapse = '|'))
uniqueComm2 <- sapply(uniqueComm, function(y) paste(y, collapse = '|'))
for (i in 1:length(uniqueComm2)) {
cellCommVec[which(spByCell2 == uniqueComm2[i])] <- i
}
if (verbose) message('done\n')
#remove zero cells
ras[ras == 0] <- NA
names(ras) <- 'spRichness'
obj[['raster']] <- ras
obj[['speciesList']] <- uniqueComm
obj[['cellCommInd']] <- cellCommVec
obj[['geogSpecies']] <- sort(unique(names(ranges)))
# calculate range area for each species ( = number of cells)
if (verbose) message('\t...Calculating species cell counts...\n\n')
obj[['cellCount']] <- countCells(convertNAtoEmpty(spByCell), obj[['geogSpecies']])
names(obj[['cellCount']]) <- obj[['geogSpecies']]
}
# input ranges can be a binary presence/absence sp x cell matrix
# where rownames are species and columns are cells
if (any(class(ranges) %in% c('matrix', 'data.frame'))) {
if (any(class(ranges) == 'data.frame')) {
ranges <- as.matrix(ranges)
}
if (length(unique(rownames(ranges))) != nrow(ranges)) {
stop('rownames in species x cell matrix must be unique.')
}
if (mode(ranges) != 'numeric') {
stop('matrix data does not appear to be numeric.')
}
if (!identical(as.numeric(range(as.vector(ranges))), c(0, 1))) {
mode(ranges) <- 'logical'
mode(ranges) <- 'numeric'
}
if (is.null(rasterTemplate)) {
stop('If input is a species x cell matrix, then a raster template must be provided.')
}
if (raster::ncell(rasterTemplate) != ncol(ranges)) {
stop('If input is species x cell matrix, then number of columns must equal the number of raster cells.')
}
if (!identical(as.numeric(range(raster::values(rasterTemplate))), c(0, 1))) {
stop('rasterTemplate can only have values of 0 or 1.')
}
if (verbose) message('\t...Using species by cell matrix...\n')
if (verbose) message('\t...Calculating species richness...\n')
dropCells <- which(raster::values(rasterTemplate) == 0)
raster::values(rasterTemplate) <- colSums(ranges)
if (length(dropCells) > 0) {
rasterTemplate[dropCells] <- 0
}
rasterTemplate[rasterTemplate == 0] <- NA
names(rasterTemplate) <- 'spRichness'
if (verbose) message('\t...Indexing species in cells...\n')
obj[['raster']] <- rasterTemplate
spByCell <- apply(ranges, 2, function(x) names(x[which(x == 1)]))
emptyInd <- which(sapply(obj[['speciesList']], length) == 0)
if (length(dropCells) > 0) {
emptyInd <- union(emptyInd, dropCells)
}
emptyList <- rep(list(NA), length(emptyInd))
spByCell[emptyInd] <- emptyList
# reduce spByCell to unique communities and track
uniqueComm <- unique(spByCell)
spByCell2 <- sapply(spByCell, function(y) paste(y, collapse = '|'))
uniqueComm2 <- sapply(uniqueComm, function(y) paste(y, collapse = '|'))
cellCommVec <- integer(length = length(spByCell))
for (i in 1:length(uniqueComm2)) {
cellCommVec[which(spByCell2 == uniqueComm2[i])] <- i
}
obj[['speciesList']] <- uniqueComm
obj[['cellCommInd']] <- cellCommVec
obj[['geogSpecies']] <- sort(rownames(ranges))
# calculate range area for each species ( = number of cells)
if (verbose) message('\t...Calculating species cell counts...\n\n')
obj[['cellCount']] <- rowSums(ranges)
}
if (class(obj[[1]]) != 'RasterLayer') {
stop('Input type not supported.')
}
class(obj) <- 'speciesRaster'
return(obj)
}
|
/R/createSpeciesRaster.R
|
no_license
|
yangxhcaf/speciesRaster
|
R
| false | false | 10,034 |
r
|
##' @title Create speciesRaster
##'
##' @description This function takes a rasterStack and generates both a richness
##' raster and an associated list of species per cell, creating an object of
##' class \code{speciesRaster}.
##'
##'
##' @param ranges Either a RasterStack, RasterBrick, or species by cell matrix. Any non-NA
##' values in rasters are considered presences.
##'
##' @param rasterTemplate If input is a species x cell matrix, then a rasterTemplate
##' must be provided, where the number of cells = the number of columns in the matrix.
##' Cells with a value of 1 will be processed, cells with a value
##' of 0 will be . Therefore, all cells must have a value of 0/1.
##'
##' @param verbose Primarily intended for debugging, print progress to the console.
##'
##'
##' @details
##' This function generates an object of class \code{speciesRaster}, which is a
##' list containing the following elements:
##' \itemize{
##' \item{\code{raster:}} {A raster representing counts of species per cell.}
##' \item{\code{speciesList:}} {A list of species found in each cell.}
##' \item{\code{geogSpecies:}} {a vector of unique species in all cells.}
##' \item{\code{cellCount:}} {a vector of counts of presence cells for each species.}
##' \item{\code{data:}} {An empty spot that morphological data can be added to.}
##' \item{\code{phylo:}} {An empty spot that a phylogeny can be added to.}
##' }
##'
##' If input is a RasterStack, then all parameters are taken from that, such as resolution,
##' extent and projection. Any non-NA and non-zero cell is considered a presence.
##' This function expects that all input rasters in the rasterStack have presence values
##' (i.e., at least 1 non-NA value). If any rasters have exclusively NA cells, then the
##' function will stop with a warning, and the output will be the index in the rasterStack
##' of those rasters.
##'
##' @return an object of class \code{speciesRaster}.
##'
##' @author Pascal Title
##'
##' @examples
##' library(raster)
##' library(sf)
##' # example dataset: a list of 24 chipmunk distributions as polygons
##' head(tamiasPolyList)
##'
##' # convert polygon ranges to raster
##' ranges <- rasterStackFromPolyList(tamiasPolyList, resolution = 20000)
##'
##' spRas <- createSpeciesRaster(ranges = ranges)
##'
##' spRas
##'
##'
##'
##'
##' @export
createSpeciesRaster <- function(ranges, rasterTemplate = NULL, verbose = FALSE) {
if (all(!class(ranges) %in% c('RasterStack', 'RasterBrick', 'matrix', 'data.frame'))) {
stop('Input must be a list of SpatialPolygons or a RasterStack.')
}
# prepare output object
obj <- vector('list', length = 7)
names(obj) <- c('raster', 'speciesList', 'cellCommInd', 'geogSpecies', 'cellCount', 'data', 'phylo')
# if rasterstack as input
if (all(class(ranges) %in% c('RasterStack', 'RasterBrick'))) {
#check that all rasters have values
if (verbose) message('\t...Checking for empty rasters...\n')
valCheck <- raster::minValue(ranges)
badEntries <- which(is.na(valCheck))
badEntriesRet <- badEntries
if (length(badEntries) > 0) {
badEntries <- paste(which(is.na(valCheck)), collapse = ', ')
warning(paste0('The following rasters have no non-NA cells: ', badEntries, '.'))
return(badEntriesRet)
}
# rasterstack calculations only
# create matrix of cells (rows) x raster (cols)
# prepare result objects
ras <- raster::raster(ranges[[1]])
raster::values(ras) <- 0
cellCommVec <- integer(length = raster::ncell(ranges))
spByCell <- vector('list', length = raster::ncell(ranges))
# determine the size of rasterStack that can be processed in memory
if (verbose) message('\t...Determining if rasterstack can be processed in memory...')
if (raster::canProcessInMemory(ranges)) {
if (verbose) message('yes\n')
mat <- matrix(nrow=raster::ncell(ranges), ncol=raster::nlayers(ranges))
colnames(mat) <- names(ranges)
for (i in 1:raster::nlayers(ranges)) {
mat[, i] <- ranges[[i]][]
}
# set all NA to 0
mat[is.na(mat)] <- 0
# check if is binary
if (identical(unique(as.numeric(mat)), c(0,1))) {
# if not binary and probRanking is false, convert all to 0/1
mat[mat != 0] <- 1
}
# get count of species per cell
cellSums <- rowSums(mat)
# assign values to result raster
raster::values(ras) <- cellSums
# get list of which species are found in each cell
spByCell <- spListPerCell(mat)
} else {
# data too big. Split into subsets of rows
if (verbose) message('no\n')
if (verbose) message('\t...Determining how many rasters can be processed in memory...')
n <- 1
while (raster::canProcessInMemory(ranges[[1:n]])) {
n <- n + 1
}
if (verbose) message(n, '\n')
indList <- split(1:raster::nlayers(ranges), ceiling(1:raster::nlayers(ranges)/n))
pb <- raster::pbCreate(length(indList), progress = 'text')
cellVals <- vector('list', length = length(indList))
SpByCellList <- vector('list', length = length(indList))
for (i in 1:length(indList)) {
submat <- matrix(nrow=raster::ncell(ranges), ncol=length(indList[[i]]))
colnames(submat) <- names(ranges)[indList[[i]]]
for (j in 1:length(indList[[i]])) {
submat[, j] <- ranges[[indList[[i]][j]]][]
}
# set all NA to 0
submat[is.na(submat)] <- 0
# check if is binary
if (identical(unique(as.numeric(submat)), c(0,1))) {
# if not binary and probRanking is false, convert all to 0/1
submat[submat != 0] <- 1
}
# get count of species per cell
cellSums <- rowSums(submat)
# assign values to result raster
cellVals[[i]] <- cellSums
# get list of which species are found in each cell
SpByCellList[[i]] <- spListPerCell(submat)
raster::pbStep(pb, step = i)
}
raster::pbClose(pb, timer = FALSE)
# combine pieces
if (verbose) message('\t...Assembling speciesRaster...\n')
raster::values(ras) <- rowSums(do.call(cbind, cellVals))
# for now, replace all NA with 'empty'
for (i in 1:length(SpByCellList)) {
for (j in 1:length(SpByCellList[[i]])) {
if (all(is.na(SpByCellList[[i]][[j]]))) {
SpByCellList[[i]][[j]] <- 'empty'
}
}
}
spByCell <- mergeLists(SpByCellList)
spByCell <- lapply(spByCell, unique)
spByCell[sapply(spByCell, length) == 0] <- NA
}
# reduce spByCell to unique communities and track
if (verbose) message('\t...Reducing species list to unique sets...')
uniqueComm <- unique(spByCell)
spByCell2 <- sapply(spByCell, function(y) paste(y, collapse = '|'))
uniqueComm2 <- sapply(uniqueComm, function(y) paste(y, collapse = '|'))
for (i in 1:length(uniqueComm2)) {
cellCommVec[which(spByCell2 == uniqueComm2[i])] <- i
}
if (verbose) message('done\n')
#remove zero cells
ras[ras == 0] <- NA
names(ras) <- 'spRichness'
obj[['raster']] <- ras
obj[['speciesList']] <- uniqueComm
obj[['cellCommInd']] <- cellCommVec
obj[['geogSpecies']] <- sort(unique(names(ranges)))
# calculate range area for each species ( = number of cells)
if (verbose) message('\t...Calculating species cell counts...\n\n')
obj[['cellCount']] <- countCells(convertNAtoEmpty(spByCell), obj[['geogSpecies']])
names(obj[['cellCount']]) <- obj[['geogSpecies']]
}
# input ranges can be a binary presence/absence sp x cell matrix
# where rownames are species and columns are cells
if (any(class(ranges) %in% c('matrix', 'data.frame'))) {
if (any(class(ranges) == 'data.frame')) {
ranges <- as.matrix(ranges)
}
if (length(unique(rownames(ranges))) != nrow(ranges)) {
stop('rownames in species x cell matrix must be unique.')
}
if (mode(ranges) != 'numeric') {
stop('matrix data does not appear to be numeric.')
}
if (!identical(as.numeric(range(as.vector(ranges))), c(0, 1))) {
mode(ranges) <- 'logical'
mode(ranges) <- 'numeric'
}
if (is.null(rasterTemplate)) {
stop('If input is a species x cell matrix, then a raster template must be provided.')
}
if (raster::ncell(rasterTemplate) != ncol(ranges)) {
stop('If input is species x cell matrix, then number of columns must equal the number of raster cells.')
}
if (!identical(as.numeric(range(raster::values(rasterTemplate))), c(0, 1))) {
stop('rasterTemplate can only have values of 0 or 1.')
}
if (verbose) message('\t...Using species by cell matrix...\n')
if (verbose) message('\t...Calculating species richness...\n')
dropCells <- which(raster::values(rasterTemplate) == 0)
raster::values(rasterTemplate) <- colSums(ranges)
if (length(dropCells) > 0) {
rasterTemplate[dropCells] <- 0
}
rasterTemplate[rasterTemplate == 0] <- NA
names(rasterTemplate) <- 'spRichness'
if (verbose) message('\t...Indexing species in cells...\n')
obj[['raster']] <- rasterTemplate
spByCell <- apply(ranges, 2, function(x) names(x[which(x == 1)]))
emptyInd <- which(sapply(obj[['speciesList']], length) == 0)
if (length(dropCells) > 0) {
emptyInd <- union(emptyInd, dropCells)
}
emptyList <- rep(list(NA), length(emptyInd))
spByCell[emptyInd] <- emptyList
# reduce spByCell to unique communities and track
uniqueComm <- unique(spByCell)
spByCell2 <- sapply(spByCell, function(y) paste(y, collapse = '|'))
uniqueComm2 <- sapply(uniqueComm, function(y) paste(y, collapse = '|'))
cellCommVec <- integer(length = length(spByCell))
for (i in 1:length(uniqueComm2)) {
cellCommVec[which(spByCell2 == uniqueComm2[i])] <- i
}
obj[['speciesList']] <- uniqueComm
obj[['cellCommInd']] <- cellCommVec
obj[['geogSpecies']] <- sort(rownames(ranges))
# calculate range area for each species ( = number of cells)
if (verbose) message('\t...Calculating species cell counts...\n\n')
obj[['cellCount']] <- rowSums(ranges)
}
if (class(obj[[1]]) != 'RasterLayer') {
stop('Input type not supported.')
}
class(obj) <- 'speciesRaster'
return(obj)
}
|
###############################################################################
# Scripts to produce plotting artifacts from one main run
#
# Created Date: Thu Oct 31 09:42:59 2019
# Author: Vivek Katial
###############################################################################
#' This function plots the energy gap for a hamiltonian dataframe
#' @param d_solved_system This is a dataframe which consists of the hamiltonian matrix in each column
#' @return A ggplot object containing a plot of the energy gap over time for the system
plot_energy_gap = function(d_solved_system){
# Check the correct data frame
if (!all(c("time", "lambda_1", "lambda_2") %in% names(d_solved_system))) {
stop("Incorrect data frame fed into function 'plot_energy_gap'")
}
p_energy_gap <- d_solved_system %>%
select(time, lambda_1, lambda_2) %>%
gather(var,n, -time) %>%
ggplot(aes(x = time, y = n, col = var)) +
geom_line() +
theme_classic() +
labs(
x = "time",
y = "energy"
)
}
#' This function plots the state vector as a probability distribution
#' @param state_pdf The state vector as a probability distribution ('type' = tibble)
#' @return A ggplot object containing a plot of the energy gap over time for the system
plot_state_pdf = function(state_pdf){
if (!all(c("p", "state", "bit_str") %in% names(state_pdf))) {
stop(sprintf("Column(s) '%s' should not be in 'state_pdf'", diff(names(state_pdf), c("p", "state", "bit_str"))))
}
state_pdf %>%
mutate(type = ifelse(abs(p - max(p)) < 1e-13, "max", "other")) %>%
ggplot(aes(x = bit_str, y = p, group = 1, fill = type)) +
geom_col(alpha = 0.6) +
labs(
x = "State "
) +
theme_classic() +
#stat_smooth(geom = "area", span = 0.4, method = "glm", alpha = 0.4) +
theme(
axis.text.x = element_text(angle = 90, hjust = 1)
)
}
#' This function plots the entanglemnet of a solved system
#' @param d_solved_system A solved quantum system
#' @return A ggplot object containing entanglement plot
plot_entanglement = function(d_solved_system, label){
if (!(is_character(label))) {
stop("Please make label a character")
}
# Plot Shannon Entropy at the end
d_solved_system %>%
select(time, shannon_entropy) %>%
ggplot(aes(x = time, y = shannon_entropy)) +
geom_line() +
theme_classic() +
labs(
x = "t",
y = label
)
}
|
/src/plotting-helpers.R
|
no_license
|
vivekkatial/aqc-three-sat-sim
|
R
| false | false | 2,433 |
r
|
###############################################################################
# Scripts to produce plotting artifacts from one main run
#
# Created Date: Thu Oct 31 09:42:59 2019
# Author: Vivek Katial
###############################################################################
#' This function plots the energy gap for a hamiltonian dataframe
#' @param d_solved_system This is a dataframe which consists of the hamiltonian matrix in each column
#' @return A ggplot object containing a plot of the energy gap over time for the system
plot_energy_gap = function(d_solved_system){
# Check the correct data frame
if (!all(c("time", "lambda_1", "lambda_2") %in% names(d_solved_system))) {
stop("Incorrect data frame fed into function 'plot_energy_gap'")
}
p_energy_gap <- d_solved_system %>%
select(time, lambda_1, lambda_2) %>%
gather(var,n, -time) %>%
ggplot(aes(x = time, y = n, col = var)) +
geom_line() +
theme_classic() +
labs(
x = "time",
y = "energy"
)
}
#' This function plots the state vector as a probability distribution
#' @param state_pdf The state vector as a probability distribution ('type' = tibble)
#' @return A ggplot object containing a plot of the energy gap over time for the system
plot_state_pdf = function(state_pdf){
if (!all(c("p", "state", "bit_str") %in% names(state_pdf))) {
stop(sprintf("Column(s) '%s' should not be in 'state_pdf'", diff(names(state_pdf), c("p", "state", "bit_str"))))
}
state_pdf %>%
mutate(type = ifelse(abs(p - max(p)) < 1e-13, "max", "other")) %>%
ggplot(aes(x = bit_str, y = p, group = 1, fill = type)) +
geom_col(alpha = 0.6) +
labs(
x = "State "
) +
theme_classic() +
#stat_smooth(geom = "area", span = 0.4, method = "glm", alpha = 0.4) +
theme(
axis.text.x = element_text(angle = 90, hjust = 1)
)
}
#' This function plots the entanglemnet of a solved system
#' @param d_solved_system A solved quantum system
#' @return A ggplot object containing entanglement plot
plot_entanglement = function(d_solved_system, label){
if (!(is_character(label))) {
stop("Please make label a character")
}
# Plot Shannon Entropy at the end
d_solved_system %>%
select(time, shannon_entropy) %>%
ggplot(aes(x = time, y = shannon_entropy)) +
geom_line() +
theme_classic() +
labs(
x = "t",
y = label
)
}
|
#' Generate Isolines
#'
#' Takes \code{\link{data-Point}}'s with z-values and an array of value
#' breaks and generates \href{http://en.wikipedia.org/wiki/Isoline}{isolines}
#'
#' @export
#'
#' @param points input points
#' @param z (character) the property name in points from which z-values will be pulled
#' @param resolution (numeric) resolution of the underlying grid
#' @param breaks (numeric) where to draw contours
#' @template lint
#' @family interpolation
#' @return \code{\link{data-FeatureCollection}} of isolines
#' (\code{\link{data-LineString}} features)
#' @examples
#' pts <- lawn_random(n = 100, bbox = c(0, 30, 20, 50))
#' pts$features$properties <- data.frame(z = round(rnorm(100, mean = 5)), stringsAsFactors = FALSE)
#' breaks <- c(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10)
#' lawn_isolines(pts, 'z', 15, breaks)
#'
#' @examples \dontrun{
#' lawn_isolines(pts, 'z', 15, breaks) %>% view
#' }
lawn_isolines <- function(points, z, resolution, breaks, lint = FALSE) {
points <- convert(points)
lawnlint(points, lint)
ct$eval(sprintf("var iso = turf.isolines(%s, '%s', %s, %s);",
points, z, resolution, toj(breaks)))
as.fc(ct$get("iso"))
}
|
/R/isolines.R
|
permissive
|
jbousquin/lawn
|
R
| false | false | 1,176 |
r
|
#' Generate Isolines
#'
#' Takes \code{\link{data-Point}}'s with z-values and an array of value
#' breaks and generates \href{http://en.wikipedia.org/wiki/Isoline}{isolines}
#'
#' @export
#'
#' @param points input points
#' @param z (character) the property name in points from which z-values will be pulled
#' @param resolution (numeric) resolution of the underlying grid
#' @param breaks (numeric) where to draw contours
#' @template lint
#' @family interpolation
#' @return \code{\link{data-FeatureCollection}} of isolines
#' (\code{\link{data-LineString}} features)
#' @examples
#' pts <- lawn_random(n = 100, bbox = c(0, 30, 20, 50))
#' pts$features$properties <- data.frame(z = round(rnorm(100, mean = 5)), stringsAsFactors = FALSE)
#' breaks <- c(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10)
#' lawn_isolines(pts, 'z', 15, breaks)
#'
#' @examples \dontrun{
#' lawn_isolines(pts, 'z', 15, breaks) %>% view
#' }
lawn_isolines <- function(points, z, resolution, breaks, lint = FALSE) {
points <- convert(points)
lawnlint(points, lint)
ct$eval(sprintf("var iso = turf.isolines(%s, '%s', %s, %s);",
points, z, resolution, toj(breaks)))
as.fc(ct$get("iso"))
}
|
testlist <- list(Rs = numeric(0), atmp = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), relh = -1.72131968218895e+83, temp = c(8.5728629954997e-312, 1.56898424065867e+82, 8.96970809549085e-158, -1.3258495253834e-113, 2.79620616433656e-119, -6.80033518839696e+41, 2.6829861350919e-211, 1444042902784.06, 6.68889884134308e+51, -4.05003163986346e-308, -3.52601820453991e+43, -1.49815227045093e+197, -2.61605817623304e+76, -1.18078903777423e-90, 1.86807199752012e+112, -5.58551357556946e+160, 2.00994342527714e-162, 1.81541609400943e-79, 7.89363005545926e+139, 2.3317908961407e-93, 2.16562581831091e+161))
result <- do.call(meteor:::ET0_Makkink,testlist)
str(result)
|
/meteor/inst/testfiles/ET0_Makkink/AFL_ET0_Makkink/ET0_Makkink_valgrind_files/1615845451-test.R
|
no_license
|
akhikolla/updatedatatype-list3
|
R
| false | false | 735 |
r
|
testlist <- list(Rs = numeric(0), atmp = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), relh = -1.72131968218895e+83, temp = c(8.5728629954997e-312, 1.56898424065867e+82, 8.96970809549085e-158, -1.3258495253834e-113, 2.79620616433656e-119, -6.80033518839696e+41, 2.6829861350919e-211, 1444042902784.06, 6.68889884134308e+51, -4.05003163986346e-308, -3.52601820453991e+43, -1.49815227045093e+197, -2.61605817623304e+76, -1.18078903777423e-90, 1.86807199752012e+112, -5.58551357556946e+160, 2.00994342527714e-162, 1.81541609400943e-79, 7.89363005545926e+139, 2.3317908961407e-93, 2.16562581831091e+161))
result <- do.call(meteor:::ET0_Makkink,testlist)
str(result)
|
testlist <- list(Beta = 0, CVLinf = 86341236051411296, FM = 1.53632495265886e-311, L50 = 0, L95 = 0, LenBins = c(2.0975686864138e+162, -2.68131210337361e-144, -1.11215735981244e+199, -4.48649879577108e+143, 1.6611802228813e+218, 900371.947279558, 1.07063092954708e+238, 2.88003257377011e-142, 1.29554141202795e-89, -1.87294312860528e-75, 3.04319010211815e+31, 191.463561345044, 1.58785813294449e+217, 1.90326589719466e-118, -3.75494418025505e-296, -2.63346094087863e+200, -5.15510035957975e+44, 2.59028521047075e+149, 1.60517426337473e+72, 1.74851929178852e+35, 1.32201752290843e-186, -1.29599553894715e-227, 3.20314220604904e+207, 584155875718587, 1.71017833066717e-283, -3.96505607598107e+51, 5.04440990041945e-163, -5.09127626480085e+268, 2.88137633290038e+175, 6.22724404181897e-256, 4.94195713773372e-295, 5.80049493946414e+160, -5612008.23597089, -2.68347267272935e-262, 1.28861520348431e-305, -5.05455182157157e-136, 4.44386438170367e+50, -2.07294901774837e+254, -3.56325845332496e+62, -1.38575911145229e-262, -1.19026551334786e-217, -3.54406233509625e-43, -4.15938611724176e-209, -3.06799941292011e-106, 1.78044357763692e+244, -1.24657398993838e+190, 1.14089212334828e-90, 136766.715673668, -1.47333345730049e-67, -2.92763930406321e+21 ), LenMids = c(-1.121210344879e+131, -1.121210344879e+131, NaN), Linf = 2.81991272491703e-308, MK = -2.08633459786369e-239, Ml = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), Prob = structure(c(4.48157192325537e-103, 2.43305969276274e+59, 6.5730975202806e-96, 2.03987918888949e-104, 4.61871336464985e-39, 1.10811931066926e+139), .Dim = c(1L, 6L)), SL50 = 9.97941197291525e-316, SL95 = 2.12248160522076e-314, nage = 682962941L, nlen = 1623851345L, rLens = c(4.74956174024781e+199, -7.42049538387034e+278, -5.82966399158032e-71, -6.07988133887702e-34, 4.62038276102781e-295, -8.48833146280612e+43, 2.71954993859316e-126 ))
result <- do.call(DLMtool::LBSPRgen,testlist)
str(result)
|
/DLMtool/inst/testfiles/LBSPRgen/AFL_LBSPRgen/LBSPRgen_valgrind_files/1615837071-test.R
|
no_license
|
akhikolla/updatedatatype-list2
|
R
| false | false | 2,048 |
r
|
testlist <- list(Beta = 0, CVLinf = 86341236051411296, FM = 1.53632495265886e-311, L50 = 0, L95 = 0, LenBins = c(2.0975686864138e+162, -2.68131210337361e-144, -1.11215735981244e+199, -4.48649879577108e+143, 1.6611802228813e+218, 900371.947279558, 1.07063092954708e+238, 2.88003257377011e-142, 1.29554141202795e-89, -1.87294312860528e-75, 3.04319010211815e+31, 191.463561345044, 1.58785813294449e+217, 1.90326589719466e-118, -3.75494418025505e-296, -2.63346094087863e+200, -5.15510035957975e+44, 2.59028521047075e+149, 1.60517426337473e+72, 1.74851929178852e+35, 1.32201752290843e-186, -1.29599553894715e-227, 3.20314220604904e+207, 584155875718587, 1.71017833066717e-283, -3.96505607598107e+51, 5.04440990041945e-163, -5.09127626480085e+268, 2.88137633290038e+175, 6.22724404181897e-256, 4.94195713773372e-295, 5.80049493946414e+160, -5612008.23597089, -2.68347267272935e-262, 1.28861520348431e-305, -5.05455182157157e-136, 4.44386438170367e+50, -2.07294901774837e+254, -3.56325845332496e+62, -1.38575911145229e-262, -1.19026551334786e-217, -3.54406233509625e-43, -4.15938611724176e-209, -3.06799941292011e-106, 1.78044357763692e+244, -1.24657398993838e+190, 1.14089212334828e-90, 136766.715673668, -1.47333345730049e-67, -2.92763930406321e+21 ), LenMids = c(-1.121210344879e+131, -1.121210344879e+131, NaN), Linf = 2.81991272491703e-308, MK = -2.08633459786369e-239, Ml = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), Prob = structure(c(4.48157192325537e-103, 2.43305969276274e+59, 6.5730975202806e-96, 2.03987918888949e-104, 4.61871336464985e-39, 1.10811931066926e+139), .Dim = c(1L, 6L)), SL50 = 9.97941197291525e-316, SL95 = 2.12248160522076e-314, nage = 682962941L, nlen = 1623851345L, rLens = c(4.74956174024781e+199, -7.42049538387034e+278, -5.82966399158032e-71, -6.07988133887702e-34, 4.62038276102781e-295, -8.48833146280612e+43, 2.71954993859316e-126 ))
result <- do.call(DLMtool::LBSPRgen,testlist)
str(result)
|
#====NYS MH Data Processing #1====#
# Library load-in====
reqpackages <- c("readr","ggplot2", "tidyverse","ggmap","crosstalk","DT","leaflet","tidygeocoder")
newpackages <- reqpackages[!(reqpackages %in% installed.packages()[,"Package"])]
if(length(newpackages)) install.packages(newpackages)
invisible(suppressPackageStartupMessages(lapply(reqpackages, require, character.only=T)))
# Initial data load in====
#Main goal is to map all data points with an address and display this information within a map and data explorer. "Skipping" columns that are irrelevant to that task.#
MHprograms <- read_csv("Data/Local_Mental_Health_Programs.csv",
col_types = cols(`Row Created Date Time` = col_skip(),
`Sponsor Name` = col_skip(), `Sponsor Code` = col_skip(),
`Agency Code` = col_skip(), `Facility Name` = col_skip(),
`Facility Code` = col_skip(), `Program Code` = col_skip(),
`Program Address 2` = col_skip(),
`Operating Certificate Required?` = col_skip(),
`Program Tier` = col_skip(), `Operating Certificate Duration` = col_skip()))
# Data cleaning and carpentry====
#Recoding all NAs to "Not Reported" for easier front-end clarity.#
MHprograms[is.na(MHprograms)] = "Not Reported"
#The "location" Column includes complete addresses and geocoded data for some observations, but are obviously incorrect. All of the latitude and longitude points are identical. In order to map the data on to a leaflet map, an accurate estimate of latitude and longitude points will need to be acquired. Will proceed to use Free Nominatim and Census APIs in an attempt to get geodata. This has limitations as it may not be possible for every address to be geocoded through these means#
#Will create a new variable "Complete Address" that pastes together all location data in the data set. Dropping the location column as it won't be used.#
MHprograms <- MHprograms %>%
mutate(`Complete Address` = paste(`Program Address 1`,`Program City`,`Program County`, `Program State`,"US",`Program Zip`)) %>%
select(-Location)
#Storing the addresses for the geocoding function#
addresses <- MHprograms$`Complete Address`
#Creating an empty vector to store the geocoded data from Nominatim (OSM)#
adddata <- c()
# Add a check to compare to the master file.- Revisit Later??#
nominatim_osm <- function(address = NULL)
{
if(suppressWarnings(is.null(address)))
return(data.frame("NA"))
tryCatch(
adddata <- jsonlite::fromJSON(
gsub('\\@addr\\@', gsub('\\s+', '\\%20', address),
'http://nominatim.openstreetmap.org/search/@addr@?format=json&addressdetails=0&limit=1')
), error = function(c) return(data.frame("NA"))
)
if(length(adddata) == 0) return(data.frame("NA"))
return(data.frame(lon = as.numeric(adddata$lon), lat = as.numeric(adddata$lat)))
}
#Execution of the geocoding and coercing into a dataframe.#
adddata <- suppressWarnings(lapply(addresses, function(address) {
#calling the nominatim OSM API
api_output <- nominatim_osm(address)
#return a data frame with the input addresses.#
return(data.frame(address = address, api_output))
}) %>%
#stack the list outputs into data frame together.#
bind_rows() %>% data.frame())
adddata <- adddata %>%
select(-X.NA.) %>%
mutate(lonlookup = address) %>%
mutate(latlookup = address)
#Attempting a second sweep of geocoding for NAs.#
#Pulling out and isolating NAs.#
NAadddata <- adddata %>%
filter(is.na(lat & lon))
#Second sweep of Geocoding for empty lats and lons#
NAadddata <- geo_census(as.vector(NAadddata$address))
#Merging found geocodes for NAs into the data set#
lonlookup <- setNames(as.character(NAadddata$long),NAadddata$address)
adddata$lonlookup <- as.character(lapply(adddata$lonlookup , function(i) lonlookup[i]))
latlookup <- setNames(as.character(NAadddata$lat),NAadddata$address)
adddata$latlookup <- as.character(lapply(adddata$latlookup , function(i) latlookup[i]))
#Filling in the NAs for lat and long that were found#
adddata <- adddata %>%
mutate(lon = ifelse(is.na(lon),paste(lonlookup),adddata$lon)) %>%
mutate(lat = ifelse(is.na(lat),paste(latlookup),adddata$lat)) %>%
select(-c(latlookup,lonlookup))
#Placing geodata back into main set while removing duplicates, setting the column type for lat and long, and reorganizing the lat and long columns#
MHprograms <- left_join(MHprograms,adddata, by = c("Complete Address" = "address")) %>%
distinct() %>%
suppressWarnings(mutate(lat = lat)) %>%
suppressWarnings(mutate(lon = lon)) %>%
relocate(lat, .before = lon)
#Isolating points that could not be geocoded#
MHprograms_nogeo <- MHprograms %>%
filter(suppressWarnings(is.na(as.numeric(lat) & as.numeric(lon))))
#Removing observations that do not have any geocodes associated with them - These will be the final points plotted on the map.#
MHprograms <- MHprograms %>%
filter(!is.na(as.numeric(lat) & as.numeric(lon)))
#Writing CSVs out into the data folder.#
write_csv(MHprograms,"data/MHprograms.csv")
write_csv(MHprograms_nogeo,"data/MHprograms_nogeo.csv")
|
/Scripts/GeocodingProcessing.R
|
no_license
|
Meghansaha/NYS_MH_Programs
|
R
| false | false | 5,252 |
r
|
#====NYS MH Data Processing #1====#
# Library load-in====
reqpackages <- c("readr","ggplot2", "tidyverse","ggmap","crosstalk","DT","leaflet","tidygeocoder")
newpackages <- reqpackages[!(reqpackages %in% installed.packages()[,"Package"])]
if(length(newpackages)) install.packages(newpackages)
invisible(suppressPackageStartupMessages(lapply(reqpackages, require, character.only=T)))
# Initial data load in====
#Main goal is to map all data points with an address and display this information within a map and data explorer. "Skipping" columns that are irrelevant to that task.#
MHprograms <- read_csv("Data/Local_Mental_Health_Programs.csv",
col_types = cols(`Row Created Date Time` = col_skip(),
`Sponsor Name` = col_skip(), `Sponsor Code` = col_skip(),
`Agency Code` = col_skip(), `Facility Name` = col_skip(),
`Facility Code` = col_skip(), `Program Code` = col_skip(),
`Program Address 2` = col_skip(),
`Operating Certificate Required?` = col_skip(),
`Program Tier` = col_skip(), `Operating Certificate Duration` = col_skip()))
# Data cleaning and carpentry====
#Recoding all NAs to "Not Reported" for easier front-end clarity.#
MHprograms[is.na(MHprograms)] = "Not Reported"
#The "location" Column includes complete addresses and geocoded data for some observations, but are obviously incorrect. All of the latitude and longitude points are identical. In order to map the data on to a leaflet map, an accurate estimate of latitude and longitude points will need to be acquired. Will proceed to use Free Nominatim and Census APIs in an attempt to get geodata. This has limitations as it may not be possible for every address to be geocoded through these means#
#Will create a new variable "Complete Address" that pastes together all location data in the data set. Dropping the location column as it won't be used.#
MHprograms <- MHprograms %>%
mutate(`Complete Address` = paste(`Program Address 1`,`Program City`,`Program County`, `Program State`,"US",`Program Zip`)) %>%
select(-Location)
#Storing the addresses for the geocoding function#
addresses <- MHprograms$`Complete Address`
#Creating an empty vector to store the geocoded data from Nominatim (OSM)#
adddata <- c()
# Add a check to compare to the master file.- Revisit Later??#
nominatim_osm <- function(address = NULL)
{
if(suppressWarnings(is.null(address)))
return(data.frame("NA"))
tryCatch(
adddata <- jsonlite::fromJSON(
gsub('\\@addr\\@', gsub('\\s+', '\\%20', address),
'http://nominatim.openstreetmap.org/search/@addr@?format=json&addressdetails=0&limit=1')
), error = function(c) return(data.frame("NA"))
)
if(length(adddata) == 0) return(data.frame("NA"))
return(data.frame(lon = as.numeric(adddata$lon), lat = as.numeric(adddata$lat)))
}
#Execution of the geocoding and coercing into a dataframe.#
adddata <- suppressWarnings(lapply(addresses, function(address) {
#calling the nominatim OSM API
api_output <- nominatim_osm(address)
#return a data frame with the input addresses.#
return(data.frame(address = address, api_output))
}) %>%
#stack the list outputs into data frame together.#
bind_rows() %>% data.frame())
adddata <- adddata %>%
select(-X.NA.) %>%
mutate(lonlookup = address) %>%
mutate(latlookup = address)
#Attempting a second sweep of geocoding for NAs.#
#Pulling out and isolating NAs.#
NAadddata <- adddata %>%
filter(is.na(lat & lon))
#Second sweep of Geocoding for empty lats and lons#
NAadddata <- geo_census(as.vector(NAadddata$address))
#Merging found geocodes for NAs into the data set#
lonlookup <- setNames(as.character(NAadddata$long),NAadddata$address)
adddata$lonlookup <- as.character(lapply(adddata$lonlookup , function(i) lonlookup[i]))
latlookup <- setNames(as.character(NAadddata$lat),NAadddata$address)
adddata$latlookup <- as.character(lapply(adddata$latlookup , function(i) latlookup[i]))
#Filling in the NAs for lat and long that were found#
adddata <- adddata %>%
mutate(lon = ifelse(is.na(lon),paste(lonlookup),adddata$lon)) %>%
mutate(lat = ifelse(is.na(lat),paste(latlookup),adddata$lat)) %>%
select(-c(latlookup,lonlookup))
#Placing geodata back into main set while removing duplicates, setting the column type for lat and long, and reorganizing the lat and long columns#
MHprograms <- left_join(MHprograms,adddata, by = c("Complete Address" = "address")) %>%
distinct() %>%
suppressWarnings(mutate(lat = lat)) %>%
suppressWarnings(mutate(lon = lon)) %>%
relocate(lat, .before = lon)
#Isolating points that could not be geocoded#
MHprograms_nogeo <- MHprograms %>%
filter(suppressWarnings(is.na(as.numeric(lat) & as.numeric(lon))))
#Removing observations that do not have any geocodes associated with them - These will be the final points plotted on the map.#
MHprograms <- MHprograms %>%
filter(!is.na(as.numeric(lat) & as.numeric(lon)))
#Writing CSVs out into the data folder.#
write_csv(MHprograms,"data/MHprograms.csv")
write_csv(MHprograms_nogeo,"data/MHprograms_nogeo.csv")
|
source(system.file(file.path('tests', 'test_utils.R'), package = 'nimble'))
RwarnLevel <- options('warn')$warn
options(warn = 1)
nimbleVerboseSetting <- nimbleOptions('verbose')
nimbleOptions(verbose = FALSE)
nimbleProgressBarSetting <- nimbleOptions('MCMCprogressBar')
nimbleOptions(MCMCprogressBar = FALSE)
context('Testing of MCMC_RJ functionality')
test_that("Test configureRJ with no indicator variables", {
## Linear regression with 2 covariates, one in the model
code <- nimbleCode({
beta0 ~ dnorm(0, sd = 100)
beta1 ~ dnorm(0, sd = 100)
beta2 ~ dnorm(0, sd = 100)
sigma ~ dunif(0, 100)
for(i in 1:50) {
Ypred[i] <- beta0 + beta1 * x1[i] + beta2 * x2[i]
Y[i] ~ dnorm(Ypred[i], sd = sigma)
}
})
## Data simulation
set.seed(0)
x1 <- runif(50, -1, 1)
x2 <- runif(50, -1, 1)
Y <- rnorm(50, 1.5 + 2 * x1, sd = 1)
data <- list(Y = Y, x1 = x1, x2 = x2)
inits <- list(beta0 = 0, beta1 = 0, beta2 = 0, sigma = sd(Y))
m <- nimbleModel(code, data=data, inits=inits)
mConf <- configureMCMC(m)
## One node
nodes <- c("beta2")
expect_error(configureRJ(mConf, nodes),
"configureRJ: Provide 'indicatorNodes' or 'priorProb' vector")
#####################################
## One node, multiple parameters
expect_error(configureRJ(mConf, nodes, prior = 0.5, control = list(fixedValue = c(0,1))),
'configureRJ: inconsistent length')
expect_error(configureRJ(mConf, nodes, prior = 0.5, control = list(mean = c(0,1))),
'configureRJ: inconsistent length')
expect_error(configureRJ(mConf, nodes, prior = 0.5, control = list(scale = c(2,1))),
'configureRJ: inconsistent length')
## priorProb not probabilities
expect_error(configureRJ(mConf, nodes, prior = -1))
expect_error(configureRJ(mConf, nodes, prior = 2))
#####################################
## Multiple nodes, less paramters
nodes <- c("beta0", "beta1", "beta2")
expect_error(configureRJ(mConf, nodes, prior = c(0.5, 0.5)),
"configureRJ: Length of 'priorProb' vector must match 'targetNodes' length.")
expect_error(configureRJ(mConf, nodes, prior = 0.5, control = list(fixedValue = c(0,1))),
"configureRJ: inconsistent length")
expect_error(configureRJ(mConf, nodes, prior = 0.5, control = list(mean = c(0,1))),
"configureRJ: inconsistent length")
expect_error(configureRJ(mConf, nodes, prior = 0.5, control = list(scale = c(2,1))),
"configureRJ: inconsistent length")
#####################################
## priorProb not probabilities
expect_error(configureRJ(mConf, nodes, prior = c(0.5, 2, 0.2)),
"configureRJ: Elements in priorProb")
})
test_that("Test configureRJ with multivariate node - no indicator", {
##############################
## Multivariate node
code <- nimbleCode({
beta0 ~ dnorm(0, sd = 100)
mu[1:5] <- rep(0, 5)
sigma[1:5] <- 1/rep(100, 5)
simgma.mat[1:5, 1:5] <- diag(sigma[1:5])
beta[1:5] ~ dmnorm(mu[1:5], sigma_mat[1:5, 1:5])
for(i in 1:10) {
Ypred[i] <- beta0 + sum(X[i,1:5]*beta[1:5])
Y[i] ~ dnorm(Ypred[i], sd = sigma.y)
}
sigma.y ~ dunif(0, 100)
})
## simulate some data
set.seed(1)
X <- matrix(rnorm(10*5), 10, 5)
betaTrue <- c(2, -2, 3, 0, 0)
eps <- rnorm(10)
Y <- as.vector(X%*%betaTrue + eps)
data <- list(Y = Y, X = X)
inits <- list(beta0 = 0, beta = rep(0, 5), sigma.y = sd(Y), sigma_mat = diag(rep(1/100, 5)), mu = rep(0, 5))
m <- nimbleModel(code, data=data, inits=inits)
mConf <- configureMCMC(m)
## test multivariate node with joint sampler
expect_error(configureRJ(mConf, "beta", prior =0.5),
'is multivariate and uses a joint sampler; only univariate samplers can be used with reversible jump sampling.')
## test multivariate node with univariate samplers
nodeAsScalar <- mConf$model$expandNodeNames("beta", returnScalarComponents = TRUE)
## acceptable case
mConf$removeSamplers("beta")
for(node in nodeAsScalar){
mConf$addSampler(node, type = "RW")
}
targetNodes <- c("beta")
control <- list(fixedValue = 0, mean = 0, scale = 2)
## this should work
expect_error(configureRJ(mcmcConf = mConf, targetNodes = targetNodes, priorProb = 0.5, control = control), NA)
## test double call to configureRJ
expect_error(configureRJ(mcmcConf = mConf, targetNodes = targetNodes, priorProb = 0.5, control = control),
"is already configured for reversible jump")
})
test_that("Check passing node vector - no indicator", {
#####################################
## Vector node
code <- nimbleCode({
beta0 ~ dnorm(0, sd = 100)
for(i in 1:5){
beta[i] ~ dnorm(0, sd = 100)
}
sigma ~ dunif(0, 100)
for(i in 1:10) {
Ypred[i] <- beta0 + sum(X[i,1:5]*beta[1:5])
Y[i] ~ dnorm(Ypred[i], sd = sigma)
}
})
## simulate some data
set.seed(1)
X <- matrix(rnorm(10*5), 10, 5)
betaTrue <- c(2, -2, 3, 0, 0)
eps <- rnorm(10)
Y <- as.vector(X%*%betaTrue + eps)
data <- list(Y = Y, X = X)
inits <- list(beta0 = 0, beta = rep(0, 10), sigma = sd(Y))
m <- nimbleModel(code, data=data, inits=inits)
mConf <- configureMCMC(m)
## no error
expect_error(configureRJ(mConf, c("beta"), prior = 0.5), NA)
mConf <- configureMCMC(m)
expect_error(configureRJ(mConf, c("beta[1]", "beta[2:4]"), prior = 0.5), NA)
mConf <- configureMCMC(m)
expect_error(configureRJ(mConf, c("beta[1]", "beta[2:4]"), prior = c(0.5, 0.2)), NA)
})
test_that("Check sampler_RJ behaviour - no indicator", {
## Linear regression with 2 covariates, one in the model
code <- nimbleCode({
beta0 ~ dnorm(0, sd = 100)
beta1 ~ dnorm(0, sd = 100)
beta2 ~ dnorm(0, sd = 100)
sigma ~ dunif(0, 100)
for(i in 1:50) {
Ypred[i] <- beta0 + beta1 * x1[i] + beta2 * x2[i]
Y[i] ~ dnorm(Ypred[i], sd = sigma)
}
})
## Data simulation
set.seed(0)
x1 <- runif(50, -1, 1)
x2 <- runif(50, -1, 1)
Y <- rnorm(50, 1.5 + 2 * x1, sd = 1)
data <- list(Y = Y, x1 = x1, x2 = x2)
## check sampler behaviour
m <- nimbleModel(code, data=data)
cm <- compileNimble(m)
mConf <- configureMCMC(m, monitors = c('beta1', 'beta2'))
configureRJ(mConf, c('beta1', 'beta2'), prior = 0.5)
mMCMC <- buildMCMC(mConf)
cMCMC <- compileNimble(mMCMC, project = m, showCompilerOutput = FALSE)
output <- runMCMC(cMCMC, niter=1000, nburnin = 900, thin=1,
inits = list(beta0 = 1, beta1 = 1, beta2 = 1, sigma = sd(Y)), setSeed = 1)
## beta2 should be more likely to be 0
expect_true(sum(output[, 'beta2'] == 0)/100 > 0.5)
# expect_true(mean(output[which(output[, 'beta2'] != 0), 'beta2']) - coef(lm(Y ~ x1 + x2))[3] < 0.05) ## should check that beta2 is small when in the model
## beta1 should be less likely to be 0
expect_true(sum(output[, 'beta1'] == 0)/100 < 0.5)
## beta1 estimate (comparison with lm estimate)
expect_equal(mean(output[which(output[, 'beta1'] != 0), 'beta1']), as.numeric(coef(lm(Y ~ x1 + x2))[2]) , tolerance=0.1, scale = 1)
# ## beta1 should be in the model in last 100 iterations (chain has converged)
# expect_false(any(output[, 'beta1'] == 0))
#######
## change proposal mean for beta1 - still reasonable even if far
## dnorm(1.5, 3, 1) = 0.12
m <- nimbleModel(code, data=data)
cm <- compileNimble(m)
mConf <- configureMCMC(m, monitors = c('beta1'))
configureRJ(mConf, 'beta1', prior = 0.5, control = list(mean = 3))
mMCMC <- buildMCMC(mConf)
cMCMC <- compileNimble(mMCMC, project = m, showCompilerOutput = FALSE)
output <- runMCMC(cMCMC, niter=100, thin=1,
inits = list(beta0 = 0, beta1 = 0, beta2 = 0, sigma = sd(Y)), setSeed = 1)
## beta1 estimate (comparison with lm estimate)
expect_equal(mean(output[which(output[, 'beta1'] != 0), 'beta1']), as.numeric(coef(lm(Y ~ x1 + x2))[2]) , tolerance=0.1, scale = 1)
#######
## fixed value on true beta1
m <- nimbleModel(code, data=data)
cm <- compileNimble(m)
mConf <- configureMCMC(m, monitors = c('beta1'))
configureRJ(mConf, 'beta1', prior = 0.5, control = list(fixedValue = 1.5))
mMCMC <- buildMCMC(mConf)
cMCMC <- compileNimble(mMCMC, project = m, showCompilerOutput = FALSE)
output <- runMCMC(cMCMC, niter=100, thin=1,
inits = list(beta0 = 0, beta1 = 0, beta2 = 0, sigma = sd(Y)), setSeed = 1)
expect_equal(mean(output[which(output[, 'beta1'] != 0), 'beta1']), 1.5 , tolerance=0.01, scale = 1)
#######
## fixedValue on far value for beta2
m <- nimbleModel(code, data=data)
cm <- compileNimble(m)
mConf <- configureMCMC(m, monitors = c('beta2'))
configureRJ(mConf, 'beta2', prior = 0.5, control = list(fixedValue = 5))
mMCMC <- buildMCMC(mConf)
cMCMC <- compileNimble(mMCMC, project = m, showCompilerOutput = FALSE)
output <- runMCMC(cMCMC, niter=100, thin=1,
inits = list(beta0 = 1, beta1 = 1, beta2 = 1, sigma = sd(Y)), setSeed = 1)
## still beta2 is in the models but really small
expect_equal(mean(output[which(output[, 'beta2'] != 0), 'beta2']), 0 , tolerance=0.1, scale = 1)
if(.Platform$OS.type != "windows") {
nimble:::clearCompiled(m)
}
})
######################################
## Tests using indicator variables
######################################
test_that("Test configureRJ with indicator variables", {
## Linear regression with 2 covariates, one in the model
code <- nimbleCode({
beta0 ~ dnorm(0, sd = 100)
beta1 ~ dnorm(0, sd = 100)
beta2 ~ dnorm(0, sd = 100)
sigma ~ dunif(0, 100)
z1 ~ dbern(psi) ## indicator variable for including beta2
z2 ~ dbern(psi) ## indicator variable for including beta2
psi ~ dbeta(1, 1)
for(i in 1:50) {
Ypred[i] <- beta0 + beta1 * z1 * x1[i] + beta2 * z2 * x2[i]
Y[i] ~ dnorm(Ypred[i], sd = sigma)
}
})
## Data simulation
set.seed(0)
x1 <- runif(50, -1, 1)
x2 <- runif(50, -1, 1)
Y <- rnorm(50, 1.5 + 2 * x1, sd = 1)
data <- list(Y = Y, x1 = x1, x2 = x2)
inits <- list(beta0 = 0, beta1 = 0, beta2 = 0, sigma = sd(Y), z2 = 1, z1 = 1, psi = 0.5)
m <- nimbleModel(code, data=data, inits=inits)
mConf <- configureMCMC(m)
## One node
nodes <- c("beta2")
expect_error(configureRJ(mConf, nodes),
"configureRJ: Provide 'indicatorNodes' or 'priorProb' vector")
expect_error(configureRJ(mConf, nodes, indicatorNodes = c("z1", "z2")),
"configureRJ: Length of 'indicatorNodes' vector must match 'targetNodes' length.")
## One node, multiple parameters
expect_error(configureRJ(mConf, nodes, indicatorNodes = "z1", control = list(mean = c(0,1))),
'configureRJ: inconsistent length')
expect_error(configureRJ(mConf, nodes, indicatorNodes = "z1", control = list(scale = c(2,1))),
'configureRJ: inconsistent length')
## Multiple nodes, less paramters
nodes <- c("beta0", "beta1", "beta2")
expect_error(configureRJ(mConf, nodes, indicatorNodes = c("z1", "z2")),
"configureRJ: Length of 'indicatorNodes' vector must match 'targetNodes' length.")
expect_error(configureRJ(mConf, nodes, indicatorNodes = c("z1", "z2"), control = list(mean = c(0,1))),
'configureRJ: inconsistent length')
expect_error(configureRJ(mConf, nodes, indicatorNodes = c("z1", "z2"), control = list(scale = c(2,1))),
'configureRJ: inconsistent length')
})
test_that("Test configureRJ with multivariate node - indicator", {
##############################
## Multivariate node
code <- nimbleCode({
beta0 ~ dnorm(0, sd = 100)
mu[1:5] <- rep(0, 5)
sigma[1:5] <- 1/rep(100, 5)
simgma.mat[1:5, 1:5] <- diag(sigma[1:5])
beta[1:5] ~ dmnorm(mu[1:5], sigma_mat[1:5, 1:5])
for(i in 1:5){
## indicator variables
z[i] ~ dbern(0.5)
}
for(i in 1:10) {
Ypred[i] <- beta0 + sum(X[i,1:5]*beta[1:5]*z[1:5])
Y[i] ~ dnorm(Ypred[i], sd = sigma.y)
}
sigma.y ~ dunif(0, 100)
})
## simulate some data
set.seed(1)
X <- matrix(rnorm(10*5), 10, 5)
betaTrue <- c(2, -2, 3, 0, 0)
eps <- rnorm(10)
Y <- as.vector(X%*%betaTrue + eps)
data <- list(Y = Y, X = X)
inits <- list(beta0 = 0, beta = rep(0, 5), sigma.y = sd(Y), sigma_mat = diag(rep(1/100, 5)), mu = rep(0, 5))
m <- nimbleModel(code, data=data, inits=inits)
mConf <- configureMCMC(m)
## test multivariate node with joint sampler
expect_error(configureRJ(mConf, "beta", indicatorNodes = "z"),
'is multivariate and uses a joint sampler; only univariate samplers can be used with reversible jump sampling.')
## test multivariate node with univariate samplers
nodeAsScalar <- mConf$model$expandNodeNames("beta", returnScalarComponents = TRUE)
## acceptable case
mConf$removeSamplers("beta")
for(node in nodeAsScalar){
mConf$addSampler(node, type = "RW")
}
## this should work
control <- list(fixedValue = 0, mean = 0, scale = 2)
expect_error(configureRJ(mcmcConf = mConf, targetNodes = "beta", indicatorNodes = "z", control = control), NA)
## test double call to configureRJ
expect_error(configureRJ(mcmcConf = mConf, targetNodes = "beta", indicatorNodes = "z", control = control),
'is already configured for reversible jump')
})
test_that("Check sampler_RJ_indicator behaviour - indicator", {
## Linear regression with 2 covariates, one in the model
code <- nimbleCode({
beta0 ~ dnorm(0, sd = 100)
beta1 ~ dnorm(0, sd = 100)
beta2 ~ dnorm(0, sd = 100)
sigma ~ dunif(0, 100)
z1 ~ dbern(psi) ## indicator variable for including beta2
z2 ~ dbern(psi) ## indicator variable for including beta2
psi ~ dbeta(1, 1)
for(i in 1:50) {
Ypred[i] <- beta0 + beta1 * z1 * x1[i] + beta2 * z2 * x2[i]
Y[i] ~ dnorm(Ypred[i], sd = sigma)
}
})
## Data simulation
set.seed(0)
x1 <- runif(50, -1, 1)
x2 <- runif(50, -1, 1)
Y <- rnorm(50, 1.5 + 2 * x1, sd = 1)
data <- list(Y = Y, x1 = x1, x2 = x2)
inits <- list(beta0 = 0, beta1 = 0, beta2 = 0, sigma = sd(Y), z2 = 1, z1 = 1, psi = 0.5)
m <- nimbleModel(code, data=data, inits=inits)
cm <- compileNimble(m)
## check sampler behaviour
m <- nimbleModel(code, data=data, inits=inits)
cm <- compileNimble(m)
mConf <- configureMCMC(m, monitors = c('beta1', 'beta2', 'z1', 'z2'))
configureRJ(mConf, c('beta1', 'beta2'), indicator =c('z1', 'z2'))
mMCMC <- buildMCMC(mConf)
cMCMC <- compileNimble(mMCMC, project = m, showCompilerOutput = FALSE, resetFunctions = TRUE)
output <- runMCMC(cMCMC, niter=1000, nburnin = 900, thin=1,
inits = list(beta0 = 0, beta1 = 0, beta2 = 0, sigma = sd(Y)), setSeed = 1)
## beta2 should be more likely to be 0
expect_true(mean(output[, 'z2']) < 0.5)
## beta2 should be 0 when z1 is 0
expect_equal(sum(output[, 'beta2'] != 0)/100, mean(output[, 'z2']) )
## beta1 should be less likely to be 0
expect_true(mean(output[, 'z1']) > 0.5)
## beta1 should be 0 when z1 is 0
expect_equal(sum(output[, 'beta1'] != 0)/100, mean(output[, 'z1']) )
## check beta1 estimate
expect_equal(mean(output[which(output[, 'z1'] != 0), 'beta1']), as.numeric(coef(lm(Y ~ x1 + x2))[2]) , tolerance=0.1, scale = 1)
## more challeging data
set.seed(0)
x1 <- runif(50, -1, 1)
x2 <- runif(50, -1, 1)
Y <- rnorm(50, 1.5 + 1 * x1 - 1 * x2, sd = 1)
data <- list(Y = Y, x1 = x1, x2 = x2)
inits <- list(beta0 = 0, beta1 = 0, beta2 = 0, sigma = sd(Y), z2 = 1, z1 = 1, psi = 0.5)
m <- nimbleModel(code, data=data, inits=inits)
cm <- compileNimble(m)
mConf <- configureMCMC(m, monitors = c('beta1', 'beta2', 'z1', 'z2'))
configureRJ(mConf, c('beta1', 'beta2'), indicator =c('z1', 'z2'))
mMCMC <- buildMCMC(mConf)
cMCMC <- compileNimble(mMCMC, project = m, showCompilerOutput = FALSE, resetFunctions = TRUE)
output <- runMCMC(cMCMC, niter=100, nburnin = 0, thin=1,
inits = list(beta0 = 0, beta1 = 0, beta2 = 0, sigma = sd(Y)), setSeed = 1)
## check toggled_sampler
## when indicators are zero parameters are zero
expect_equal(which(output[, 'beta1'] == 0), which(output[, 'z1'] == 0))
expect_equal(which(output[, 'beta2'] == 0), which(output[, 'z2'] == 0))
if(.Platform$OS.type != "windows") {
nimble:::clearCompiled(m)
}
})
test_that("Check passing node vector - indicator", {
#####################################
## Vector node
code <- nimbleCode({
beta0 ~ dnorm(0, sd = 100)
for(i in 1:5){
beta[i] ~ dnorm(0, sd = 100)
z[i] ~ dbern(psi[i])
psi[i] ~ dbeta(1, 1)
}
sigma ~ dunif(0, 100)
for(i in 1:10) {
Ypred[i] <- beta0 + sum(X[i,1:5]*beta[1:5]*z[1:5])
Y[i] ~ dnorm(Ypred[i], sd = sigma)
}
})
## simulate some data
set.seed(1)
X <- matrix(rnorm(10*5), 10, 5)
betaTrue <- c(2, -2, 3, 0, 0)
eps <- rnorm(10)
Y <- as.vector(X%*%betaTrue + eps)
data <- list(Y = Y, X = X)
inits <- list(beta0 = 0, beta = rep(0, 5), z = rep(0, 5), psi = rep(0.5, 5), sigma = sd(Y))
m <- nimbleModel(code, data=data, inits=inits)
mConf <- configureMCMC(m)
## no error
expect_error(configureRJ(mConf, targetNodes = "beta", indicatorNodes = "z"), NA)
mConf <- configureMCMC(m)
expect_error(configureRJ(mConf, c("beta[1]", "beta[2:4]"), indicatorNodes = c("z[1]", "z[2:4]")), NA)
## throws error
mConf <- configureMCMC(m)
expect_error(configureRJ(mConf, c("beta[1]", "beta[2:4]"), indicatorNodes = "z"),
"configureRJ: Length of 'indicatorNodes' vector must match 'targetNodes' length.")
# if(.Platform$OS.type != "windows") {
# nimble:::clearCompiled(m)
# }
})
|
/packages/nimble/inst/tests/test-mcmcrj.R
|
permissive
|
dochvam/nimble
|
R
| false | false | 17,928 |
r
|
source(system.file(file.path('tests', 'test_utils.R'), package = 'nimble'))
RwarnLevel <- options('warn')$warn
options(warn = 1)
nimbleVerboseSetting <- nimbleOptions('verbose')
nimbleOptions(verbose = FALSE)
nimbleProgressBarSetting <- nimbleOptions('MCMCprogressBar')
nimbleOptions(MCMCprogressBar = FALSE)
context('Testing of MCMC_RJ functionality')
test_that("Test configureRJ with no indicator variables", {
## Linear regression with 2 covariates, one in the model
code <- nimbleCode({
beta0 ~ dnorm(0, sd = 100)
beta1 ~ dnorm(0, sd = 100)
beta2 ~ dnorm(0, sd = 100)
sigma ~ dunif(0, 100)
for(i in 1:50) {
Ypred[i] <- beta0 + beta1 * x1[i] + beta2 * x2[i]
Y[i] ~ dnorm(Ypred[i], sd = sigma)
}
})
## Data simulation
set.seed(0)
x1 <- runif(50, -1, 1)
x2 <- runif(50, -1, 1)
Y <- rnorm(50, 1.5 + 2 * x1, sd = 1)
data <- list(Y = Y, x1 = x1, x2 = x2)
inits <- list(beta0 = 0, beta1 = 0, beta2 = 0, sigma = sd(Y))
m <- nimbleModel(code, data=data, inits=inits)
mConf <- configureMCMC(m)
## One node
nodes <- c("beta2")
expect_error(configureRJ(mConf, nodes),
"configureRJ: Provide 'indicatorNodes' or 'priorProb' vector")
#####################################
## One node, multiple parameters
expect_error(configureRJ(mConf, nodes, prior = 0.5, control = list(fixedValue = c(0,1))),
'configureRJ: inconsistent length')
expect_error(configureRJ(mConf, nodes, prior = 0.5, control = list(mean = c(0,1))),
'configureRJ: inconsistent length')
expect_error(configureRJ(mConf, nodes, prior = 0.5, control = list(scale = c(2,1))),
'configureRJ: inconsistent length')
## priorProb not probabilities
expect_error(configureRJ(mConf, nodes, prior = -1))
expect_error(configureRJ(mConf, nodes, prior = 2))
#####################################
## Multiple nodes, less paramters
nodes <- c("beta0", "beta1", "beta2")
expect_error(configureRJ(mConf, nodes, prior = c(0.5, 0.5)),
"configureRJ: Length of 'priorProb' vector must match 'targetNodes' length.")
expect_error(configureRJ(mConf, nodes, prior = 0.5, control = list(fixedValue = c(0,1))),
"configureRJ: inconsistent length")
expect_error(configureRJ(mConf, nodes, prior = 0.5, control = list(mean = c(0,1))),
"configureRJ: inconsistent length")
expect_error(configureRJ(mConf, nodes, prior = 0.5, control = list(scale = c(2,1))),
"configureRJ: inconsistent length")
#####################################
## priorProb not probabilities
expect_error(configureRJ(mConf, nodes, prior = c(0.5, 2, 0.2)),
"configureRJ: Elements in priorProb")
})
test_that("Test configureRJ with multivariate node - no indicator", {
##############################
## Multivariate node
code <- nimbleCode({
beta0 ~ dnorm(0, sd = 100)
mu[1:5] <- rep(0, 5)
sigma[1:5] <- 1/rep(100, 5)
simgma.mat[1:5, 1:5] <- diag(sigma[1:5])
beta[1:5] ~ dmnorm(mu[1:5], sigma_mat[1:5, 1:5])
for(i in 1:10) {
Ypred[i] <- beta0 + sum(X[i,1:5]*beta[1:5])
Y[i] ~ dnorm(Ypred[i], sd = sigma.y)
}
sigma.y ~ dunif(0, 100)
})
## simulate some data
set.seed(1)
X <- matrix(rnorm(10*5), 10, 5)
betaTrue <- c(2, -2, 3, 0, 0)
eps <- rnorm(10)
Y <- as.vector(X%*%betaTrue + eps)
data <- list(Y = Y, X = X)
inits <- list(beta0 = 0, beta = rep(0, 5), sigma.y = sd(Y), sigma_mat = diag(rep(1/100, 5)), mu = rep(0, 5))
m <- nimbleModel(code, data=data, inits=inits)
mConf <- configureMCMC(m)
## test multivariate node with joint sampler
expect_error(configureRJ(mConf, "beta", prior =0.5),
'is multivariate and uses a joint sampler; only univariate samplers can be used with reversible jump sampling.')
## test multivariate node with univariate samplers
nodeAsScalar <- mConf$model$expandNodeNames("beta", returnScalarComponents = TRUE)
## acceptable case
mConf$removeSamplers("beta")
for(node in nodeAsScalar){
mConf$addSampler(node, type = "RW")
}
targetNodes <- c("beta")
control <- list(fixedValue = 0, mean = 0, scale = 2)
## this should work
expect_error(configureRJ(mcmcConf = mConf, targetNodes = targetNodes, priorProb = 0.5, control = control), NA)
## test double call to configureRJ
expect_error(configureRJ(mcmcConf = mConf, targetNodes = targetNodes, priorProb = 0.5, control = control),
"is already configured for reversible jump")
})
test_that("Check passing node vector - no indicator", {
#####################################
## Vector node
code <- nimbleCode({
beta0 ~ dnorm(0, sd = 100)
for(i in 1:5){
beta[i] ~ dnorm(0, sd = 100)
}
sigma ~ dunif(0, 100)
for(i in 1:10) {
Ypred[i] <- beta0 + sum(X[i,1:5]*beta[1:5])
Y[i] ~ dnorm(Ypred[i], sd = sigma)
}
})
## simulate some data
set.seed(1)
X <- matrix(rnorm(10*5), 10, 5)
betaTrue <- c(2, -2, 3, 0, 0)
eps <- rnorm(10)
Y <- as.vector(X%*%betaTrue + eps)
data <- list(Y = Y, X = X)
inits <- list(beta0 = 0, beta = rep(0, 10), sigma = sd(Y))
m <- nimbleModel(code, data=data, inits=inits)
mConf <- configureMCMC(m)
## no error
expect_error(configureRJ(mConf, c("beta"), prior = 0.5), NA)
mConf <- configureMCMC(m)
expect_error(configureRJ(mConf, c("beta[1]", "beta[2:4]"), prior = 0.5), NA)
mConf <- configureMCMC(m)
expect_error(configureRJ(mConf, c("beta[1]", "beta[2:4]"), prior = c(0.5, 0.2)), NA)
})
test_that("Check sampler_RJ behaviour - no indicator", {
## Linear regression with 2 covariates, one in the model
code <- nimbleCode({
beta0 ~ dnorm(0, sd = 100)
beta1 ~ dnorm(0, sd = 100)
beta2 ~ dnorm(0, sd = 100)
sigma ~ dunif(0, 100)
for(i in 1:50) {
Ypred[i] <- beta0 + beta1 * x1[i] + beta2 * x2[i]
Y[i] ~ dnorm(Ypred[i], sd = sigma)
}
})
## Data simulation
set.seed(0)
x1 <- runif(50, -1, 1)
x2 <- runif(50, -1, 1)
Y <- rnorm(50, 1.5 + 2 * x1, sd = 1)
data <- list(Y = Y, x1 = x1, x2 = x2)
## check sampler behaviour
m <- nimbleModel(code, data=data)
cm <- compileNimble(m)
mConf <- configureMCMC(m, monitors = c('beta1', 'beta2'))
configureRJ(mConf, c('beta1', 'beta2'), prior = 0.5)
mMCMC <- buildMCMC(mConf)
cMCMC <- compileNimble(mMCMC, project = m, showCompilerOutput = FALSE)
output <- runMCMC(cMCMC, niter=1000, nburnin = 900, thin=1,
inits = list(beta0 = 1, beta1 = 1, beta2 = 1, sigma = sd(Y)), setSeed = 1)
## beta2 should be more likely to be 0
expect_true(sum(output[, 'beta2'] == 0)/100 > 0.5)
# expect_true(mean(output[which(output[, 'beta2'] != 0), 'beta2']) - coef(lm(Y ~ x1 + x2))[3] < 0.05) ## should check that beta2 is small when in the model
## beta1 should be less likely to be 0
expect_true(sum(output[, 'beta1'] == 0)/100 < 0.5)
## beta1 estimate (comparison with lm estimate)
expect_equal(mean(output[which(output[, 'beta1'] != 0), 'beta1']), as.numeric(coef(lm(Y ~ x1 + x2))[2]) , tolerance=0.1, scale = 1)
# ## beta1 should be in the model in last 100 iterations (chain has converged)
# expect_false(any(output[, 'beta1'] == 0))
#######
## change proposal mean for beta1 - still reasonable even if far
## dnorm(1.5, 3, 1) = 0.12
m <- nimbleModel(code, data=data)
cm <- compileNimble(m)
mConf <- configureMCMC(m, monitors = c('beta1'))
configureRJ(mConf, 'beta1', prior = 0.5, control = list(mean = 3))
mMCMC <- buildMCMC(mConf)
cMCMC <- compileNimble(mMCMC, project = m, showCompilerOutput = FALSE)
output <- runMCMC(cMCMC, niter=100, thin=1,
inits = list(beta0 = 0, beta1 = 0, beta2 = 0, sigma = sd(Y)), setSeed = 1)
## beta1 estimate (comparison with lm estimate)
expect_equal(mean(output[which(output[, 'beta1'] != 0), 'beta1']), as.numeric(coef(lm(Y ~ x1 + x2))[2]) , tolerance=0.1, scale = 1)
#######
## fixed value on true beta1
m <- nimbleModel(code, data=data)
cm <- compileNimble(m)
mConf <- configureMCMC(m, monitors = c('beta1'))
configureRJ(mConf, 'beta1', prior = 0.5, control = list(fixedValue = 1.5))
mMCMC <- buildMCMC(mConf)
cMCMC <- compileNimble(mMCMC, project = m, showCompilerOutput = FALSE)
output <- runMCMC(cMCMC, niter=100, thin=1,
inits = list(beta0 = 0, beta1 = 0, beta2 = 0, sigma = sd(Y)), setSeed = 1)
expect_equal(mean(output[which(output[, 'beta1'] != 0), 'beta1']), 1.5 , tolerance=0.01, scale = 1)
#######
## fixedValue on far value for beta2
m <- nimbleModel(code, data=data)
cm <- compileNimble(m)
mConf <- configureMCMC(m, monitors = c('beta2'))
configureRJ(mConf, 'beta2', prior = 0.5, control = list(fixedValue = 5))
mMCMC <- buildMCMC(mConf)
cMCMC <- compileNimble(mMCMC, project = m, showCompilerOutput = FALSE)
output <- runMCMC(cMCMC, niter=100, thin=1,
inits = list(beta0 = 1, beta1 = 1, beta2 = 1, sigma = sd(Y)), setSeed = 1)
## still beta2 is in the models but really small
expect_equal(mean(output[which(output[, 'beta2'] != 0), 'beta2']), 0 , tolerance=0.1, scale = 1)
if(.Platform$OS.type != "windows") {
nimble:::clearCompiled(m)
}
})
######################################
## Tests using indicator variables
######################################
test_that("Test configureRJ with indicator variables", {
## Linear regression with 2 covariates, one in the model
code <- nimbleCode({
beta0 ~ dnorm(0, sd = 100)
beta1 ~ dnorm(0, sd = 100)
beta2 ~ dnorm(0, sd = 100)
sigma ~ dunif(0, 100)
z1 ~ dbern(psi) ## indicator variable for including beta2
z2 ~ dbern(psi) ## indicator variable for including beta2
psi ~ dbeta(1, 1)
for(i in 1:50) {
Ypred[i] <- beta0 + beta1 * z1 * x1[i] + beta2 * z2 * x2[i]
Y[i] ~ dnorm(Ypred[i], sd = sigma)
}
})
## Data simulation
set.seed(0)
x1 <- runif(50, -1, 1)
x2 <- runif(50, -1, 1)
Y <- rnorm(50, 1.5 + 2 * x1, sd = 1)
data <- list(Y = Y, x1 = x1, x2 = x2)
inits <- list(beta0 = 0, beta1 = 0, beta2 = 0, sigma = sd(Y), z2 = 1, z1 = 1, psi = 0.5)
m <- nimbleModel(code, data=data, inits=inits)
mConf <- configureMCMC(m)
## One node
nodes <- c("beta2")
expect_error(configureRJ(mConf, nodes),
"configureRJ: Provide 'indicatorNodes' or 'priorProb' vector")
expect_error(configureRJ(mConf, nodes, indicatorNodes = c("z1", "z2")),
"configureRJ: Length of 'indicatorNodes' vector must match 'targetNodes' length.")
## One node, multiple parameters
expect_error(configureRJ(mConf, nodes, indicatorNodes = "z1", control = list(mean = c(0,1))),
'configureRJ: inconsistent length')
expect_error(configureRJ(mConf, nodes, indicatorNodes = "z1", control = list(scale = c(2,1))),
'configureRJ: inconsistent length')
## Multiple nodes, less paramters
nodes <- c("beta0", "beta1", "beta2")
expect_error(configureRJ(mConf, nodes, indicatorNodes = c("z1", "z2")),
"configureRJ: Length of 'indicatorNodes' vector must match 'targetNodes' length.")
expect_error(configureRJ(mConf, nodes, indicatorNodes = c("z1", "z2"), control = list(mean = c(0,1))),
'configureRJ: inconsistent length')
expect_error(configureRJ(mConf, nodes, indicatorNodes = c("z1", "z2"), control = list(scale = c(2,1))),
'configureRJ: inconsistent length')
})
test_that("Test configureRJ with multivariate node - indicator", {
##############################
## Multivariate node
code <- nimbleCode({
beta0 ~ dnorm(0, sd = 100)
mu[1:5] <- rep(0, 5)
sigma[1:5] <- 1/rep(100, 5)
simgma.mat[1:5, 1:5] <- diag(sigma[1:5])
beta[1:5] ~ dmnorm(mu[1:5], sigma_mat[1:5, 1:5])
for(i in 1:5){
## indicator variables
z[i] ~ dbern(0.5)
}
for(i in 1:10) {
Ypred[i] <- beta0 + sum(X[i,1:5]*beta[1:5]*z[1:5])
Y[i] ~ dnorm(Ypred[i], sd = sigma.y)
}
sigma.y ~ dunif(0, 100)
})
## simulate some data
set.seed(1)
X <- matrix(rnorm(10*5), 10, 5)
betaTrue <- c(2, -2, 3, 0, 0)
eps <- rnorm(10)
Y <- as.vector(X%*%betaTrue + eps)
data <- list(Y = Y, X = X)
inits <- list(beta0 = 0, beta = rep(0, 5), sigma.y = sd(Y), sigma_mat = diag(rep(1/100, 5)), mu = rep(0, 5))
m <- nimbleModel(code, data=data, inits=inits)
mConf <- configureMCMC(m)
## test multivariate node with joint sampler
expect_error(configureRJ(mConf, "beta", indicatorNodes = "z"),
'is multivariate and uses a joint sampler; only univariate samplers can be used with reversible jump sampling.')
## test multivariate node with univariate samplers
nodeAsScalar <- mConf$model$expandNodeNames("beta", returnScalarComponents = TRUE)
## acceptable case
mConf$removeSamplers("beta")
for(node in nodeAsScalar){
mConf$addSampler(node, type = "RW")
}
## this should work
control <- list(fixedValue = 0, mean = 0, scale = 2)
expect_error(configureRJ(mcmcConf = mConf, targetNodes = "beta", indicatorNodes = "z", control = control), NA)
## test double call to configureRJ
expect_error(configureRJ(mcmcConf = mConf, targetNodes = "beta", indicatorNodes = "z", control = control),
'is already configured for reversible jump')
})
test_that("Check sampler_RJ_indicator behaviour - indicator", {
## Linear regression with 2 covariates, one in the model
code <- nimbleCode({
beta0 ~ dnorm(0, sd = 100)
beta1 ~ dnorm(0, sd = 100)
beta2 ~ dnorm(0, sd = 100)
sigma ~ dunif(0, 100)
z1 ~ dbern(psi) ## indicator variable for including beta2
z2 ~ dbern(psi) ## indicator variable for including beta2
psi ~ dbeta(1, 1)
for(i in 1:50) {
Ypred[i] <- beta0 + beta1 * z1 * x1[i] + beta2 * z2 * x2[i]
Y[i] ~ dnorm(Ypred[i], sd = sigma)
}
})
## Data simulation
set.seed(0)
x1 <- runif(50, -1, 1)
x2 <- runif(50, -1, 1)
Y <- rnorm(50, 1.5 + 2 * x1, sd = 1)
data <- list(Y = Y, x1 = x1, x2 = x2)
inits <- list(beta0 = 0, beta1 = 0, beta2 = 0, sigma = sd(Y), z2 = 1, z1 = 1, psi = 0.5)
m <- nimbleModel(code, data=data, inits=inits)
cm <- compileNimble(m)
## check sampler behaviour
m <- nimbleModel(code, data=data, inits=inits)
cm <- compileNimble(m)
mConf <- configureMCMC(m, monitors = c('beta1', 'beta2', 'z1', 'z2'))
configureRJ(mConf, c('beta1', 'beta2'), indicator =c('z1', 'z2'))
mMCMC <- buildMCMC(mConf)
cMCMC <- compileNimble(mMCMC, project = m, showCompilerOutput = FALSE, resetFunctions = TRUE)
output <- runMCMC(cMCMC, niter=1000, nburnin = 900, thin=1,
inits = list(beta0 = 0, beta1 = 0, beta2 = 0, sigma = sd(Y)), setSeed = 1)
## beta2 should be more likely to be 0
expect_true(mean(output[, 'z2']) < 0.5)
## beta2 should be 0 when z1 is 0
expect_equal(sum(output[, 'beta2'] != 0)/100, mean(output[, 'z2']) )
## beta1 should be less likely to be 0
expect_true(mean(output[, 'z1']) > 0.5)
## beta1 should be 0 when z1 is 0
expect_equal(sum(output[, 'beta1'] != 0)/100, mean(output[, 'z1']) )
## check beta1 estimate
expect_equal(mean(output[which(output[, 'z1'] != 0), 'beta1']), as.numeric(coef(lm(Y ~ x1 + x2))[2]) , tolerance=0.1, scale = 1)
## more challeging data
set.seed(0)
x1 <- runif(50, -1, 1)
x2 <- runif(50, -1, 1)
Y <- rnorm(50, 1.5 + 1 * x1 - 1 * x2, sd = 1)
data <- list(Y = Y, x1 = x1, x2 = x2)
inits <- list(beta0 = 0, beta1 = 0, beta2 = 0, sigma = sd(Y), z2 = 1, z1 = 1, psi = 0.5)
m <- nimbleModel(code, data=data, inits=inits)
cm <- compileNimble(m)
mConf <- configureMCMC(m, monitors = c('beta1', 'beta2', 'z1', 'z2'))
configureRJ(mConf, c('beta1', 'beta2'), indicator =c('z1', 'z2'))
mMCMC <- buildMCMC(mConf)
cMCMC <- compileNimble(mMCMC, project = m, showCompilerOutput = FALSE, resetFunctions = TRUE)
output <- runMCMC(cMCMC, niter=100, nburnin = 0, thin=1,
inits = list(beta0 = 0, beta1 = 0, beta2 = 0, sigma = sd(Y)), setSeed = 1)
## check toggled_sampler
## when indicators are zero parameters are zero
expect_equal(which(output[, 'beta1'] == 0), which(output[, 'z1'] == 0))
expect_equal(which(output[, 'beta2'] == 0), which(output[, 'z2'] == 0))
if(.Platform$OS.type != "windows") {
nimble:::clearCompiled(m)
}
})
test_that("Check passing node vector - indicator", {
#####################################
## Vector node
code <- nimbleCode({
beta0 ~ dnorm(0, sd = 100)
for(i in 1:5){
beta[i] ~ dnorm(0, sd = 100)
z[i] ~ dbern(psi[i])
psi[i] ~ dbeta(1, 1)
}
sigma ~ dunif(0, 100)
for(i in 1:10) {
Ypred[i] <- beta0 + sum(X[i,1:5]*beta[1:5]*z[1:5])
Y[i] ~ dnorm(Ypred[i], sd = sigma)
}
})
## simulate some data
set.seed(1)
X <- matrix(rnorm(10*5), 10, 5)
betaTrue <- c(2, -2, 3, 0, 0)
eps <- rnorm(10)
Y <- as.vector(X%*%betaTrue + eps)
data <- list(Y = Y, X = X)
inits <- list(beta0 = 0, beta = rep(0, 5), z = rep(0, 5), psi = rep(0.5, 5), sigma = sd(Y))
m <- nimbleModel(code, data=data, inits=inits)
mConf <- configureMCMC(m)
## no error
expect_error(configureRJ(mConf, targetNodes = "beta", indicatorNodes = "z"), NA)
mConf <- configureMCMC(m)
expect_error(configureRJ(mConf, c("beta[1]", "beta[2:4]"), indicatorNodes = c("z[1]", "z[2:4]")), NA)
## throws error
mConf <- configureMCMC(m)
expect_error(configureRJ(mConf, c("beta[1]", "beta[2:4]"), indicatorNodes = "z"),
"configureRJ: Length of 'indicatorNodes' vector must match 'targetNodes' length.")
# if(.Platform$OS.type != "windows") {
# nimble:::clearCompiled(m)
# }
})
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/20pattern.05ode.R
\name{create.firstorder.linear.ode}
\alias{create.firstorder.linear.ode}
\title{create.firstorder.linear.ode}
\usage{
create.firstorder.linear.ode(state.vector, A)
}
\arguments{
\item{state.vector}{a list of symbols or \code{TUPLE(x1,x2,x3)} like expression which will be converted into list automatically}
\item{A}{matrix should be list of list type}
}
\value{
ODE object
}
\description{
ODE only consider very simple ones as following
\code{ dx/dt = A \%*\% x }
}
|
/symbolicR/man/create.firstorder.linear.ode.Rd
|
no_license
|
isabella232/symbolicR
|
R
| false | true | 563 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/20pattern.05ode.R
\name{create.firstorder.linear.ode}
\alias{create.firstorder.linear.ode}
\title{create.firstorder.linear.ode}
\usage{
create.firstorder.linear.ode(state.vector, A)
}
\arguments{
\item{state.vector}{a list of symbols or \code{TUPLE(x1,x2,x3)} like expression which will be converted into list automatically}
\item{A}{matrix should be list of list type}
}
\value{
ODE object
}
\description{
ODE only consider very simple ones as following
\code{ dx/dt = A \%*\% x }
}
|
source('all_functions.R')
# remove WT
remove_wild_type <- function(m_or_beta_values){
m_or_beta_values <- m_or_beta_values[m_or_beta_values$p53_germline == 'MUT',]
return(m_or_beta_values)
}
# set fixed variables
method = 'noob'
combat = 'combat_1'
remove_leading_pcs = 'first'
# condition on fixed objects to get saving identifiers
which_methyl = 'beta'
beta_thresh = 0.05
cases_450 <- readRDS(paste0('../../Data/', method,'/cases_450_beta_new', combat,'.rda'))
cases_850 <- readRDS(paste0('../../Data/', method,'/cases_850_beta_new', combat,'.rda'))
con_850 <- readRDS(paste0('../../Data/', method,'/con_850_beta_new', combat,'.rda'))
con_mut <- readRDS(paste0('../../Data/', method,'/con_450_beta_new', combat,'.rda'))
con_wt <- readRDS(paste0('../../Data/', method,'/con_wt_beta_new', combat,'.rda'))
##########
# read in age probes
##########
age_probes <- readRDS('../../Data/age_probes.rda')
##########
# load genomic methyl set (from controls) - you need genetic locations by probe from this object
##########
g_ranges <- readRDS('../../Data/g_ranges.rda')
# get probes from rownames
g_ranges$probe <- rownames(g_ranges)
# remove ch and duplicatee
g_ranges <- g_ranges[!duplicated(g_ranges$start),]
g_ranges <- g_ranges[!grepl('ch', g_ranges$probe),]
names(g_ranges)[1] <- 'chr'
##########
# create variables
##########
# load cases
cases_450 <- cbind(as.data.frame(class.ind(cases_450$gender)),
cases_450)
# rempove old tech variable
cases_450$gender <- NULL
# gender
con_850 <- cbind(as.data.frame(class.ind(con_850$gender)),
con_850)
# rempove old tech variable
con_850$gender <- NULL
# gender
cases_850 <- cbind(as.data.frame(class.ind(cases_850$gender)),
cases_850)
# rempove old tech variable
cases_850$gender <- NULL
# ge tgender
con_wt <- cbind(as.data.frame(class.ind(con_wt$gender)),
con_wt)
con_mut <- cbind(as.data.frame(class.ind(con_mut$gender)),
con_mut)
# rempove old tech variable
con_wt$gender <- NULL
con_mut$gender <- NULL
# subset to get controls lfs and wild type
names(con_wt)[3] <- 'ids'
names(con_mut)[3] <- 'ids'
names(con_850)[3] <- 'ids'
names(cases_850)[3] <- 'ids'
# remove age from literature
clin_names <- names(cases_450)[!grepl('^cg', names(cases_450))]
feats <- names(cases_450)[grepl('^cg', names(cases_450))]
feats <- feats[!feats %in% age_probes]
cases_450 <- cases_450[, c(clin_names, feats)]
con_850 <- con_850[, c(clin_names, feats)]
cases_850 <- cases_850[, c(clin_names, feats)]
con_wt <- con_wt[, c(clin_names, feats)]
con_mut <- con_mut[, c(clin_names, feats)]
# run bumphunter on LFS healthy patients (LFS no cancer) and LFS cancer patients (LFS cancer)
bh_feats <- bump_hunter(dat_1 = con_wt,
dat_2 = con_mut,
bump = 'lfs',
boot_num = 50,
beta_thresh = beta_thresh,
methyl_type = methyl_type,
g_ranges = g_ranges)
# cases
cases_450_small <- join_new_features(cases_450, new_features = bh_feats)
con_850_small <- join_new_features(con_850, new_features = bh_feats)
cases_850_small <- join_new_features(cases_850, new_features = bh_feats)
con_mut_small <- join_new_features(con_mut, new_features = bh_feats)
con_wt_small <- join_new_features(con_wt, new_features = bh_feats)
# lfs probes
lfs_bump_probes <- colnames(cases_450)[grepl('^cg', colnames(cases_450))]
rm(bh_feats)
# add dummy tech variable for data sets with only one, replace family_name
names(cases_450)[9] <- 'tech'
names(con_850)[9] <- 'tech'
names(cases_850)[9] <- 'tech'
# fill them with Zero
cases_450$tech <- '450k'
con_850$tech <- '850k'
cases_850$tech <- '850k'
# do the same to con_mut and con_wt
names(con_mut)[9] <- 'tech'
names(con_wt)[9] <- 'tech'
# fill new variable with right tech indication
con_mut$tech <- '450k'
con_wt$tech <- '450k'
saveRDS(cases_450_small, paste0('../../Data/', method,'/cases_450_small_beta_new', combat,'.rda'))
saveRDS(cases_850_small, paste0('../../Data/', method,'/cases_850_small_beta_new', combat,'.rda'))
saveRDS(con_mut_small, paste0('../../Data/', method,'/con_mut_small_beta_new', combat,'.rda'))
saveRDS(con_850_small, paste0('../../Data/', method,'/con_850_small_beta_new', combat,'.rda'))
saveRDS(con_wt_small, paste0('../../Data/', method,'/con_wt_small_beta_new', combat,'.rda'))
saveRDS(cases_450, paste0('../../Data/', method,'/cases_450_cv_beta', combat,'.rda'))
saveRDS(cases_850, paste0('../../Data/', method,'/cases_850_cv_beta', combat,'.rda'))
saveRDS(con_mut, paste0('../../Data/', method,'/con_mut_cv_beta', combat,'.rda'))
saveRDS(con_850, paste0('../../Data/', method,'/con_850_cv_beta', combat,'.rda'))
saveRDS(con_wt, paste0('../../Data/', method,'/con_wt_cv_beta', combat,'.rda'))
|
/Scripts/predict_age/prepare_pc_data.R
|
no_license
|
goldenberg-lab/LFS-age-of-onset
|
R
| false | false | 4,867 |
r
|
source('all_functions.R')
# remove WT
remove_wild_type <- function(m_or_beta_values){
m_or_beta_values <- m_or_beta_values[m_or_beta_values$p53_germline == 'MUT',]
return(m_or_beta_values)
}
# set fixed variables
method = 'noob'
combat = 'combat_1'
remove_leading_pcs = 'first'
# condition on fixed objects to get saving identifiers
which_methyl = 'beta'
beta_thresh = 0.05
cases_450 <- readRDS(paste0('../../Data/', method,'/cases_450_beta_new', combat,'.rda'))
cases_850 <- readRDS(paste0('../../Data/', method,'/cases_850_beta_new', combat,'.rda'))
con_850 <- readRDS(paste0('../../Data/', method,'/con_850_beta_new', combat,'.rda'))
con_mut <- readRDS(paste0('../../Data/', method,'/con_450_beta_new', combat,'.rda'))
con_wt <- readRDS(paste0('../../Data/', method,'/con_wt_beta_new', combat,'.rda'))
##########
# read in age probes
##########
age_probes <- readRDS('../../Data/age_probes.rda')
##########
# load genomic methyl set (from controls) - you need genetic locations by probe from this object
##########
g_ranges <- readRDS('../../Data/g_ranges.rda')
# get probes from rownames
g_ranges$probe <- rownames(g_ranges)
# remove ch and duplicatee
g_ranges <- g_ranges[!duplicated(g_ranges$start),]
g_ranges <- g_ranges[!grepl('ch', g_ranges$probe),]
names(g_ranges)[1] <- 'chr'
##########
# create variables
##########
# load cases
cases_450 <- cbind(as.data.frame(class.ind(cases_450$gender)),
cases_450)
# rempove old tech variable
cases_450$gender <- NULL
# gender
con_850 <- cbind(as.data.frame(class.ind(con_850$gender)),
con_850)
# rempove old tech variable
con_850$gender <- NULL
# gender
cases_850 <- cbind(as.data.frame(class.ind(cases_850$gender)),
cases_850)
# rempove old tech variable
cases_850$gender <- NULL
# ge tgender
con_wt <- cbind(as.data.frame(class.ind(con_wt$gender)),
con_wt)
con_mut <- cbind(as.data.frame(class.ind(con_mut$gender)),
con_mut)
# rempove old tech variable
con_wt$gender <- NULL
con_mut$gender <- NULL
# subset to get controls lfs and wild type
names(con_wt)[3] <- 'ids'
names(con_mut)[3] <- 'ids'
names(con_850)[3] <- 'ids'
names(cases_850)[3] <- 'ids'
# remove age from literature
clin_names <- names(cases_450)[!grepl('^cg', names(cases_450))]
feats <- names(cases_450)[grepl('^cg', names(cases_450))]
feats <- feats[!feats %in% age_probes]
cases_450 <- cases_450[, c(clin_names, feats)]
con_850 <- con_850[, c(clin_names, feats)]
cases_850 <- cases_850[, c(clin_names, feats)]
con_wt <- con_wt[, c(clin_names, feats)]
con_mut <- con_mut[, c(clin_names, feats)]
# run bumphunter on LFS healthy patients (LFS no cancer) and LFS cancer patients (LFS cancer)
bh_feats <- bump_hunter(dat_1 = con_wt,
dat_2 = con_mut,
bump = 'lfs',
boot_num = 50,
beta_thresh = beta_thresh,
methyl_type = methyl_type,
g_ranges = g_ranges)
# cases
cases_450_small <- join_new_features(cases_450, new_features = bh_feats)
con_850_small <- join_new_features(con_850, new_features = bh_feats)
cases_850_small <- join_new_features(cases_850, new_features = bh_feats)
con_mut_small <- join_new_features(con_mut, new_features = bh_feats)
con_wt_small <- join_new_features(con_wt, new_features = bh_feats)
# lfs probes
lfs_bump_probes <- colnames(cases_450)[grepl('^cg', colnames(cases_450))]
rm(bh_feats)
# add dummy tech variable for data sets with only one, replace family_name
names(cases_450)[9] <- 'tech'
names(con_850)[9] <- 'tech'
names(cases_850)[9] <- 'tech'
# fill them with Zero
cases_450$tech <- '450k'
con_850$tech <- '850k'
cases_850$tech <- '850k'
# do the same to con_mut and con_wt
names(con_mut)[9] <- 'tech'
names(con_wt)[9] <- 'tech'
# fill new variable with right tech indication
con_mut$tech <- '450k'
con_wt$tech <- '450k'
saveRDS(cases_450_small, paste0('../../Data/', method,'/cases_450_small_beta_new', combat,'.rda'))
saveRDS(cases_850_small, paste0('../../Data/', method,'/cases_850_small_beta_new', combat,'.rda'))
saveRDS(con_mut_small, paste0('../../Data/', method,'/con_mut_small_beta_new', combat,'.rda'))
saveRDS(con_850_small, paste0('../../Data/', method,'/con_850_small_beta_new', combat,'.rda'))
saveRDS(con_wt_small, paste0('../../Data/', method,'/con_wt_small_beta_new', combat,'.rda'))
saveRDS(cases_450, paste0('../../Data/', method,'/cases_450_cv_beta', combat,'.rda'))
saveRDS(cases_850, paste0('../../Data/', method,'/cases_850_cv_beta', combat,'.rda'))
saveRDS(con_mut, paste0('../../Data/', method,'/con_mut_cv_beta', combat,'.rda'))
saveRDS(con_850, paste0('../../Data/', method,'/con_850_cv_beta', combat,'.rda'))
saveRDS(con_wt, paste0('../../Data/', method,'/con_wt_cv_beta', combat,'.rda'))
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/auxiliary_visuals.R
\name{violinPlot}
\alias{violinPlot}
\title{violinPlot}
\usage{
violinPlot(
gobject,
expression_values = c("normalized", "scaled", "custom"),
genes,
cluster_column,
cluster_custom_order = NULL,
color_violin = c("genes", "cluster"),
cluster_color_code = NULL,
strip_position = c("top", "right", "left", "bottom"),
strip_text = 7,
axis_text_x_size = 10,
axis_text_y_size = 6,
show_plot = NA,
return_plot = NA,
save_plot = NA,
save_param = list(),
default_save_name = "violinPlot"
)
}
\arguments{
\item{gobject}{giotto object}
\item{expression_values}{expression values to use}
\item{genes}{genes to plot}
\item{cluster_column}{name of column to use for clusters}
\item{cluster_custom_order}{custom order of clusters}
\item{color_violin}{color violin according to genes or clusters}
\item{cluster_color_code}{color code for clusters}
\item{strip_position}{position of gene labels}
\item{strip_text}{size of strip text}
\item{axis_text_x_size}{size of x-axis text}
\item{axis_text_y_size}{size of y-axis text}
\item{show_plot}{show plot}
\item{return_plot}{return ggplot object}
\item{save_plot}{directly save the plot [boolean]}
\item{save_param}{list of saving parameters, see \code{\link{showSaveParameters}}}
\item{default_save_name}{default save name for saving, don't change, change save_name in save_param}
}
\value{
ggplot
}
\description{
Creates violinplot for selected clusters
}
\examples{
\dontrun{
data(mini_giotto_single_cell)
# get all genes
all_genes = slot(mini_giotto_single_cell, 'gene_ID')
# look at cell metadata
cell_metadata = pDataDT(mini_giotto_single_cell)
# plot violinplot with selected genes and stratified for identified cell types
violinPlot(mini_giotto_single_cell,
genes = all_genes[1:10],
cluster_column = 'cell_types')
}
}
|
/man/violinPlot.Rd
|
permissive
|
RubD/Giotto
|
R
| false | true | 1,926 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/auxiliary_visuals.R
\name{violinPlot}
\alias{violinPlot}
\title{violinPlot}
\usage{
violinPlot(
gobject,
expression_values = c("normalized", "scaled", "custom"),
genes,
cluster_column,
cluster_custom_order = NULL,
color_violin = c("genes", "cluster"),
cluster_color_code = NULL,
strip_position = c("top", "right", "left", "bottom"),
strip_text = 7,
axis_text_x_size = 10,
axis_text_y_size = 6,
show_plot = NA,
return_plot = NA,
save_plot = NA,
save_param = list(),
default_save_name = "violinPlot"
)
}
\arguments{
\item{gobject}{giotto object}
\item{expression_values}{expression values to use}
\item{genes}{genes to plot}
\item{cluster_column}{name of column to use for clusters}
\item{cluster_custom_order}{custom order of clusters}
\item{color_violin}{color violin according to genes or clusters}
\item{cluster_color_code}{color code for clusters}
\item{strip_position}{position of gene labels}
\item{strip_text}{size of strip text}
\item{axis_text_x_size}{size of x-axis text}
\item{axis_text_y_size}{size of y-axis text}
\item{show_plot}{show plot}
\item{return_plot}{return ggplot object}
\item{save_plot}{directly save the plot [boolean]}
\item{save_param}{list of saving parameters, see \code{\link{showSaveParameters}}}
\item{default_save_name}{default save name for saving, don't change, change save_name in save_param}
}
\value{
ggplot
}
\description{
Creates violinplot for selected clusters
}
\examples{
\dontrun{
data(mini_giotto_single_cell)
# get all genes
all_genes = slot(mini_giotto_single_cell, 'gene_ID')
# look at cell metadata
cell_metadata = pDataDT(mini_giotto_single_cell)
# plot violinplot with selected genes and stratified for identified cell types
violinPlot(mini_giotto_single_cell,
genes = all_genes[1:10],
cluster_column = 'cell_types')
}
}
|
##set working directory
setwd("~/Desktop/ExData_Plotting1")
##read data file
hpc<-read.csv("household_power_consumption.txt", sep=";",
colClasses = c('character', 'character', 'numeric', 'numeric',
'numeric', 'numeric', 'numeric', 'numeric', 'numeric'),
na.strings='?')
##convert date and time variables
hpc$DateTime <- strptime(paste(hpc$Date, hpc$Time), "%d/%m/%Y %H:%M:%S")
##subset dates
hpc_sub<-subset(hpc, as.Date(DateTime)>=as.Date("2007-02-01") &
as.Date(DateTime)<= ("2007-02-02"))
##create plot #3
png("plot3.png", height=480, width=480)
plot(hpc_sub$DateTime, hpc_sub$Sub_metering_1, pch=NA, xlab="",
ylab="Energy sub metering")
lines(hpc_sub$DateTime, hpc_sub$Sub_metering_1, col = 'black')
lines(hpc_sub$DateTime, hpc_sub$Sub_metering_2, col = 'red')
lines(hpc_sub$DateTime, hpc_sub$Sub_metering_3, col = 'blue')
legend('topright', c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"),
lty = c(1,1,1),
col = c('black', 'red', 'blue'))
dev.off()
|
/plot3.R
|
no_license
|
lbocchin/ExData_Plotting1
|
R
| false | false | 1,025 |
r
|
##set working directory
setwd("~/Desktop/ExData_Plotting1")
##read data file
hpc<-read.csv("household_power_consumption.txt", sep=";",
colClasses = c('character', 'character', 'numeric', 'numeric',
'numeric', 'numeric', 'numeric', 'numeric', 'numeric'),
na.strings='?')
##convert date and time variables
hpc$DateTime <- strptime(paste(hpc$Date, hpc$Time), "%d/%m/%Y %H:%M:%S")
##subset dates
hpc_sub<-subset(hpc, as.Date(DateTime)>=as.Date("2007-02-01") &
as.Date(DateTime)<= ("2007-02-02"))
##create plot #3
png("plot3.png", height=480, width=480)
plot(hpc_sub$DateTime, hpc_sub$Sub_metering_1, pch=NA, xlab="",
ylab="Energy sub metering")
lines(hpc_sub$DateTime, hpc_sub$Sub_metering_1, col = 'black')
lines(hpc_sub$DateTime, hpc_sub$Sub_metering_2, col = 'red')
lines(hpc_sub$DateTime, hpc_sub$Sub_metering_3, col = 'blue')
legend('topright', c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"),
lty = c(1,1,1),
col = c('black', 'red', 'blue'))
dev.off()
|
##### info ####
# file: elymus_adult_seeds_per_biomass_model_2019_density_exp
# author: Amy Kendig
# date last edited: 10/8/20
# goal: analyze Elymus adult seeds per unit biomass
#### set-up ####
# clear all existing data
rm(list=ls())
# load packages
library(tidyverse)
library(brms)
# import data
bioD2Dat <- read_csv("data/ev_biomass_seeds_oct_2019_density_exp.csv")
seedD2Dat <- read_csv("intermediate-data/ev_processed_seeds_both_year_conversion_2019_density_exp.csv")
#### edit data ####
# add columns
# remove missing data
# select Elymus adults
evASeedD2Dat <- seedD2Dat %>%
group_by(site, plot, treatment, sp, ID) %>%
summarise(seeds = sum(seeds)) %>%
ungroup() %>%
full_join(bioD2Dat %>%
rename(bio.g = weight)) %>%
mutate(seeds = replace_na(seeds, 0),
seeds_per_bio = seeds / bio.g,
seeds_prod = ifelse(seeds > 0, 1, 0),
log_seeds_per_bio = log(seeds_per_bio),
fungicide = ifelse(treatment == "fungicide", 1, 0),
log_bio.g = log(bio.g),
treatment = recode(treatment, water = "control"),
plotr = ifelse(treatment == "fungicide", plot + 10, plot)) %>%
filter(!is.na(seeds_per_bio) & ID == "A")
#### initial visualizations ####
# figure
ggplot(evASeedD2Dat, aes(log_bio.g, log_seeds_per_bio, color = treatment)) +
geom_point(size = 2, alpha = 0.5) +
theme_bw()
# histogram
ggplot(evASeedD2Dat, aes(seeds_per_bio)) +
geom_histogram() +
theme_bw()
# 7 zeros
# yes/no seeds
ggplot(evASeedD2Dat, aes(log_bio.g, seeds_prod, color = treatment)) +
geom_point(size = 2, alpha = 0.5) +
theme_bw()
# the range looks very similar to Elymus seedlings -- combine the data
|
/code/elymus_adults_seeds_per_biomass_model_2019_density_exp.R
|
no_license
|
aekendig/microstegium-bipolaris
|
R
| false | false | 1,683 |
r
|
##### info ####
# file: elymus_adult_seeds_per_biomass_model_2019_density_exp
# author: Amy Kendig
# date last edited: 10/8/20
# goal: analyze Elymus adult seeds per unit biomass
#### set-up ####
# clear all existing data
rm(list=ls())
# load packages
library(tidyverse)
library(brms)
# import data
bioD2Dat <- read_csv("data/ev_biomass_seeds_oct_2019_density_exp.csv")
seedD2Dat <- read_csv("intermediate-data/ev_processed_seeds_both_year_conversion_2019_density_exp.csv")
#### edit data ####
# add columns
# remove missing data
# select Elymus adults
evASeedD2Dat <- seedD2Dat %>%
group_by(site, plot, treatment, sp, ID) %>%
summarise(seeds = sum(seeds)) %>%
ungroup() %>%
full_join(bioD2Dat %>%
rename(bio.g = weight)) %>%
mutate(seeds = replace_na(seeds, 0),
seeds_per_bio = seeds / bio.g,
seeds_prod = ifelse(seeds > 0, 1, 0),
log_seeds_per_bio = log(seeds_per_bio),
fungicide = ifelse(treatment == "fungicide", 1, 0),
log_bio.g = log(bio.g),
treatment = recode(treatment, water = "control"),
plotr = ifelse(treatment == "fungicide", plot + 10, plot)) %>%
filter(!is.na(seeds_per_bio) & ID == "A")
#### initial visualizations ####
# figure
ggplot(evASeedD2Dat, aes(log_bio.g, log_seeds_per_bio, color = treatment)) +
geom_point(size = 2, alpha = 0.5) +
theme_bw()
# histogram
ggplot(evASeedD2Dat, aes(seeds_per_bio)) +
geom_histogram() +
theme_bw()
# 7 zeros
# yes/no seeds
ggplot(evASeedD2Dat, aes(log_bio.g, seeds_prod, color = treatment)) +
geom_point(size = 2, alpha = 0.5) +
theme_bw()
# the range looks very similar to Elymus seedlings -- combine the data
|
works_with_R("3.1.2",
"tdhock/PeakError@d9196abd9ba51ad1b8f165d49870039593b94732",
"tdhock/ggplot2@aac38b6c48c016c88123208d497d896864e74bd7",
"tdhock/PeakSegDP@5bcee97f494dcbc01a69e0fe178863564e9985bc",
"Rdatatable/data.table@200b5b40dd3b05112688c3a9ca2dd41319c2bbae",
reshape2="1.2.2",
dplyr="0.4.0")
chunk.name <- "H3K36me3_AM_immune/8"
chunk.name <- "H3K4me3_PGP_immune/7"
counts.file <- file.path("data", chunk.name, "counts.RData")
load(counts.file)
counts.list <- split(counts, counts$sample.id)
sample.id <- "McGill0026"
sample.counts <- counts.list[[sample.id]]
cell.type <- as.character(sample.counts$cell.type[1])
sample.counts$weight <- with(sample.counts, chromEnd-chromStart)
n <- nrow(sample.counts)
l <- 400
seg2.starts <- as.integer(seq(1, n, l=l)[-c(1, l)])
loss.list <- list()
mean.mat <- matrix(NA, length(seg2.starts), 2)
for(model.i in seq_along(seg2.starts)){
seg2.start <- seg2.starts[[model.i]]
seg2.chromStart <- sample.counts$chromStart[seg2.start]
seg.starts <- c(1, seg2.start)
seg.ends <- c(seg2.start-1, n)
for(seg.i in seq_along(seg.starts)){
seg.start <- seg.starts[[seg.i]]
seg.end <- seg.ends[[seg.i]]
seg.data <- sample.counts[seg.start:seg.end, ]
seg.mean <- with(seg.data, sum(coverage * weight)/sum(weight))
mean.mat[model.i, seg.i] <- seg.mean
seg.loss <- with(seg.data, PoissonLoss(coverage, seg.mean, weight))
loss.list[[paste(model.i, seg.i)]] <-
data.table(model.i, seg.i,
seg2.chromStart, seg2.start,
seg.start, seg.end,
seg.chromStart=sample.counts$chromStart[seg.start],
seg.chromEnd=sample.counts$chromEnd[seg.end],
seg.mean, seg.loss)
}
}
loss.dt <- do.call(rbind, loss.list)
model.dt <- loss.dt %>%
group_by(model.i, seg2.chromStart) %>%
summarise(loss=sum(seg.loss))
model.dt$feasible <-
ifelse(mean.mat[,1] < mean.mat[,2], "yes", "no")
feasible <- model.dt %>%
filter(feasible=="yes")
show.models <-
c(5, 40,
which.min(model.dt$loss),
120, 200, 310)
show.loss.list <- split(loss.dt, loss.dt$model.i)
show.model.list <- split(model.dt, model.dt$model.i)
png.list <- list()
last.base <- max(model.dt$seg2.chromStart/1e3)
best.loss <- min(model.dt$loss)
t.dt <- data.table(last.base, best.loss, what="loss")
for(show.model.i in seq_along(show.models)){
model.i <- show.models[[show.model.i]]
show.model <- show.model.list[[model.i]]
show.loss <- show.loss.list[[model.i]]
selectedPlot <-
ggplot()+
geom_step(aes(chromStart/1e3, coverage),
data=data.table(sample.counts, what="profile"),
color="grey50")+
geom_segment(aes(seg.chromStart/1e3, seg.mean,
xend=seg.chromEnd/1e3, yend=seg.mean),
color="green",
data=data.frame(show.loss, what="profile"))+
geom_vline(aes(xintercept=last.base), data=t.dt,
color="grey")+
geom_text(aes(last.base, best.loss, label="t "),
data=t.dt, hjust=1, vjust=0, color="grey")+
## geom_line(aes(seg2.chromStart/1e3, loss),
## data=data.table(model.dt, what="loss"))+
geom_point(aes(seg2.chromStart/1e3, loss, size=feasible),
data=data.table(model.dt, what="loss"),
pch=1)+
geom_point(aes(seg2.chromStart/1e3, loss, size=feasible),
data=data.table(show.model, what="loss"),
pch=1,
color="green")+
geom_vline(aes(xintercept=seg2.chromStart/1e3),
data=show.model,
linetype="dotted",
color="green")+
geom_text(aes(seg2.chromStart/1e3,
max(sample.counts$coverage),
label="t' "),
hjust=1,
vjust=1,
data=data.table(show.model, what="profile"),
color="green")+
scale_size_manual(values=c(yes=2, no=0.5))+
theme_bw()+
theme(panel.margin=grid::unit(0, "cm"))+
facet_grid(what ~ ., scales="free")+
ylab("")+
xlab(paste("position on chromosome (kb = kilo bases)"))
png(png.name <- sprintf("figure-dp-%d.png", show.model.i),
units="in", res=200, width=6, height=3)
print(selectedPlot)
dev.off()
png.list[[png.name]] <- png.name
}
pngs <- do.call(c, png.list)
png.tex <- sprintf("
\\begin{frame}
\\frametitle{Computation of optimal loss $\\mathcal L_{s, t}$
for $s=2$ segments up to last data point $t = d$}
\\includegraphics[width=\\textwidth]{%s}
$$
\\mathcal L_{2, t} =
\\min_{
t' < t
}
\\underbrace{
\\mathcal L_{1, t'}
}_{
\\text{optimal loss in 1 segment up to $t'$}
}
+
\\underbrace{
c_{(t', t]}
}_{
\\text{optimal loss of 2nd segment $(t', t]$}
}
$$
\\end{frame}
", pngs)
cat(png.tex, file="figure-dp.tex")
|
/figure-dp.R
|
no_license
|
tdhock/PeakSeg-paper
|
R
| false | false | 4,791 |
r
|
works_with_R("3.1.2",
"tdhock/PeakError@d9196abd9ba51ad1b8f165d49870039593b94732",
"tdhock/ggplot2@aac38b6c48c016c88123208d497d896864e74bd7",
"tdhock/PeakSegDP@5bcee97f494dcbc01a69e0fe178863564e9985bc",
"Rdatatable/data.table@200b5b40dd3b05112688c3a9ca2dd41319c2bbae",
reshape2="1.2.2",
dplyr="0.4.0")
chunk.name <- "H3K36me3_AM_immune/8"
chunk.name <- "H3K4me3_PGP_immune/7"
counts.file <- file.path("data", chunk.name, "counts.RData")
load(counts.file)
counts.list <- split(counts, counts$sample.id)
sample.id <- "McGill0026"
sample.counts <- counts.list[[sample.id]]
cell.type <- as.character(sample.counts$cell.type[1])
sample.counts$weight <- with(sample.counts, chromEnd-chromStart)
n <- nrow(sample.counts)
l <- 400
seg2.starts <- as.integer(seq(1, n, l=l)[-c(1, l)])
loss.list <- list()
mean.mat <- matrix(NA, length(seg2.starts), 2)
for(model.i in seq_along(seg2.starts)){
seg2.start <- seg2.starts[[model.i]]
seg2.chromStart <- sample.counts$chromStart[seg2.start]
seg.starts <- c(1, seg2.start)
seg.ends <- c(seg2.start-1, n)
for(seg.i in seq_along(seg.starts)){
seg.start <- seg.starts[[seg.i]]
seg.end <- seg.ends[[seg.i]]
seg.data <- sample.counts[seg.start:seg.end, ]
seg.mean <- with(seg.data, sum(coverage * weight)/sum(weight))
mean.mat[model.i, seg.i] <- seg.mean
seg.loss <- with(seg.data, PoissonLoss(coverage, seg.mean, weight))
loss.list[[paste(model.i, seg.i)]] <-
data.table(model.i, seg.i,
seg2.chromStart, seg2.start,
seg.start, seg.end,
seg.chromStart=sample.counts$chromStart[seg.start],
seg.chromEnd=sample.counts$chromEnd[seg.end],
seg.mean, seg.loss)
}
}
loss.dt <- do.call(rbind, loss.list)
model.dt <- loss.dt %>%
group_by(model.i, seg2.chromStart) %>%
summarise(loss=sum(seg.loss))
model.dt$feasible <-
ifelse(mean.mat[,1] < mean.mat[,2], "yes", "no")
feasible <- model.dt %>%
filter(feasible=="yes")
show.models <-
c(5, 40,
which.min(model.dt$loss),
120, 200, 310)
show.loss.list <- split(loss.dt, loss.dt$model.i)
show.model.list <- split(model.dt, model.dt$model.i)
png.list <- list()
last.base <- max(model.dt$seg2.chromStart/1e3)
best.loss <- min(model.dt$loss)
t.dt <- data.table(last.base, best.loss, what="loss")
for(show.model.i in seq_along(show.models)){
model.i <- show.models[[show.model.i]]
show.model <- show.model.list[[model.i]]
show.loss <- show.loss.list[[model.i]]
selectedPlot <-
ggplot()+
geom_step(aes(chromStart/1e3, coverage),
data=data.table(sample.counts, what="profile"),
color="grey50")+
geom_segment(aes(seg.chromStart/1e3, seg.mean,
xend=seg.chromEnd/1e3, yend=seg.mean),
color="green",
data=data.frame(show.loss, what="profile"))+
geom_vline(aes(xintercept=last.base), data=t.dt,
color="grey")+
geom_text(aes(last.base, best.loss, label="t "),
data=t.dt, hjust=1, vjust=0, color="grey")+
## geom_line(aes(seg2.chromStart/1e3, loss),
## data=data.table(model.dt, what="loss"))+
geom_point(aes(seg2.chromStart/1e3, loss, size=feasible),
data=data.table(model.dt, what="loss"),
pch=1)+
geom_point(aes(seg2.chromStart/1e3, loss, size=feasible),
data=data.table(show.model, what="loss"),
pch=1,
color="green")+
geom_vline(aes(xintercept=seg2.chromStart/1e3),
data=show.model,
linetype="dotted",
color="green")+
geom_text(aes(seg2.chromStart/1e3,
max(sample.counts$coverage),
label="t' "),
hjust=1,
vjust=1,
data=data.table(show.model, what="profile"),
color="green")+
scale_size_manual(values=c(yes=2, no=0.5))+
theme_bw()+
theme(panel.margin=grid::unit(0, "cm"))+
facet_grid(what ~ ., scales="free")+
ylab("")+
xlab(paste("position on chromosome (kb = kilo bases)"))
png(png.name <- sprintf("figure-dp-%d.png", show.model.i),
units="in", res=200, width=6, height=3)
print(selectedPlot)
dev.off()
png.list[[png.name]] <- png.name
}
pngs <- do.call(c, png.list)
png.tex <- sprintf("
\\begin{frame}
\\frametitle{Computation of optimal loss $\\mathcal L_{s, t}$
for $s=2$ segments up to last data point $t = d$}
\\includegraphics[width=\\textwidth]{%s}
$$
\\mathcal L_{2, t} =
\\min_{
t' < t
}
\\underbrace{
\\mathcal L_{1, t'}
}_{
\\text{optimal loss in 1 segment up to $t'$}
}
+
\\underbrace{
c_{(t', t]}
}_{
\\text{optimal loss of 2nd segment $(t', t]$}
}
$$
\\end{frame}
", pngs)
cat(png.tex, file="figure-dp.tex")
|
#' Calculate the expected genetic variance in simulated families
#'
#'
#' @description
#' Calculates the expected genetic variance of a cross, assuming complete selfing.
#'
#' @param genome An object of class \code{genome}.
#' @param pedigree A \code{pedigree} detailing the scheme to develop the family.
#' Use \code{\link{sim_pedigree}} to generate.
#' @param founder.pop An object of class \code{pop} with the geno information for
#' the parents. Additional individuals can be present in \code{parent_pop}. They
#' will be filtered according to the parents in the \code{crossing.block}.
#' @param crossing.block A crossing block detailing the crosses to make. Must be a
#' \code{data.frame} with 2 columns: the first gives the name of parent 1, and the
#' second gives the name of parent 2. See \code{\link{sim_crossing.block}}.
#'
#' @examples
#'
#' # Simulate a genome
#' n.mar <- c(505, 505, 505)
#' len <- c(120, 130, 140)
#'
#' genome <- sim_genome(len, n.mar)
#'
#' # Simulate a quantitative trait influenced by 50 QTL
#' qtl.model <- matrix(NA, 50, 4)
#' genome <- sim_gen_model(genome = genome, qtl.model = qtl.model,
#' add.dist = "geometric", max.qtl = 50)
#'
#' # Simulate the genotypes for 8 founders
#' founder.pop <- sim_founders(genome = genome, n.str = 8)
#'
#' # Generate a crossing block with 5 crosses
#' cb <- sim_crossing_block(parents = indnames(founder.pop), n.crosses = 5)
#'
#' # Create a pedigree with 100 individuals selfed to the F_3 generation
#' ped <- sim_pedigree(n.ind = 100, n.selfgen = 2)
#'
#' calc_exp_genvar(genome = genome, pedigree = ped, founder.pop = founder.pop,
#' crossing.block = cb)
#'
#'
#' ## If two traits are present, the genetic correlation is calculated
#' # Simulate two quantitative traits influenced by 50 pleiotropic QTL
#' qtl.model <- replicate(2, matrix(NA, 50, 4), simplify = FALSE)
#' genome <- sim_multi_gen_model(genome = genome, qtl.model = qtl.model, corr = 0.99,
#' prob.corr = cbind(0, 1), add.dist = "normal")
#'
#' # Simulate the genotypes for 8 founders
#' founder.pop <- sim_founders(genome = genome, n.str = 8)
#'
#' calc_exp_genvar(genome = genome, pedigree = ped, founder.pop = founder.pop,
#' crossing.block = cb)
#'
#'
#'
#' @importFrom qtl mf.h
#' @importFrom simcross check_pedigree
#' @importFrom Matrix .bdiag
#' @importFrom tidyr crossing
#' @importFrom purrr pmap_dbl
#'
#' @export
#'
calc_exp_genvar <- function(genome, pedigree, founder.pop, crossing.block) {
# Error handling
if (!inherits(genome, "genome"))
stop("The input 'genome' must be of class 'genome.'")
# Check the pedigree
if (!check_pedigree(pedigree, ignore_sex = TRUE))
stop("The pedigree is not formatted correctly.")
# Check the crossing block
if (ncol(crossing.block) != 2) {
stop("The crossing block should have two columns.")
} else {
crossing.block <- as.data.frame(crossing.block)
}
# founder.pop needs to be a pop object
if (!inherits(founder.pop, "pop"))
stop("The input 'founder.pop' must be of class 'pop'")
# Check the genome and geno
if (!check_geno(genome = genome, geno = founder.pop$geno))
stop("The geno did not pass. See warning for reason.")
## How many traits
n_traits <- length(genome$gen_model)
# If it is more than 2, error out
stopifnot(n_traits <= 2)
## Calculate the expected genetic variance
## What are the expected allele frequencies in the population?
## Is there any backcrossing?
mom_ped <- pedigree[pedigree$mom == 1,]
dad_ped <- pedigree[pedigree$mom == 2,]
mom_dist_gen <- length(unique(mom_ped$gen))
dad_dist_gen <- length(unique(dad_ped$gen))
max_bc_gen <- pmax(mom_dist_gen, dad_dist_gen) - 1
# The expected frequency of the minor allele is 0.5 ^ n_bc_gen + 1
exp_q <- 0.5^(max_bc_gen + 1)
exp_p <- 1 - exp_q
# Get the QTL information - drop unused levels
qtl_info <- pull_qtl(genome, unique = FALSE)
# Filter out QTL with no additive effect
qtl_info <- droplevels(qtl_info[qtl_info$add_eff != 0,,drop = FALSE])
# Split by trait
qtl_info_split <- split(qtl_info, qtl_info$trait)
## Iterate over traits
qtl_covariance <- lapply(X = qtl_info_split, FUN = function(trait_qtl) {
row.names(trait_qtl) <- trait_qtl[["qtl_name"]]
## Calculate the expected genetic variance and covariance of QTL
qtl_info <- as.matrix(trait_qtl[,c("chr", "pos", "add_eff"), drop = FALSE])
add_eff <- qtl_info[,"add_eff", drop = FALSE]
pos <- qtl_info[,"pos", drop = FALSE]
covar <- tcrossprod(add_eff)
## Create an empty matrix
D <- matrix(0, nrow = nrow(pos), ncol = nrow(pos), dimnames = dimnames(covar))
# Calculate separate distance matrices per chromosome
chr_c <- lapply(X = split(trait_qtl, trait_qtl[,"chr",drop = FALSE]), FUN = function(x) as.matrix(dist(x[,"pos",drop = FALSE])))
for (cr in chr_c) {
cr2 <- qtl:::mf.h(cr)
d <- ((1 - (2 * cr2)) / (1 + (2 * cr2)))
D[row.names(cr), colnames(cr)] <- d
}
# The covariance is the QTL effect product multiplied by the expected D
qtl_covar <- covar * D
})
if (n_traits > 1) {
## Calculate the genetic covariance between QTL for different traits
# Split by chromosome
qtl_chr_split <- split(qtl_info, qtl_info$chr)
# Create an empty matrix of trait1 and trait2 QTL
qtl_trait_covariance <- matrix(0, nrow = nrow(qtl_info_split[[1]]), ncol = nrow(qtl_info_split[[2]]),
dimnames = list(qtl_info_split[[1]][["qtl_name"]], qtl_info_split[[2]][["qtl_name"]]))
## Iterate over chromosomes
covar_list <- lapply(X = qtl_chr_split, FUN = function(chr_qtl) {
# Split by trait
trait_split <- split(chr_qtl, chr_qtl$trait)
## QTL names for each trait
qtl_names <- lapply(X = trait_split, FUN = "[[", "qtl_name")
qtl_pos <- lapply(X = trait_split, FUN = "[[", "pos")
qtl_eff <- lapply(X = trait_split, FUN = function(q) as.matrix(q$add_eff))
## Calculate the pairwise distance
d <- abs(outer(X = qtl_pos[[1]], Y = qtl_pos[[2]], FUN = `-`))
# Calculate pairwise D (see Zhong and Jannink, 2007)
# First convert cM to recombination fraction
c <- qtl:::mf.h(d)
D <- ((1 - (2 * c)) / (1 + (2 * c)))
# Product of QTL effects
qtl_crossprod <- tcrossprod(qtl_eff[[1]], qtl_eff[[2]])
dimnames(qtl_crossprod) <- qtl_names
# The covariance is the QTL effect product multiplied by the expected D
qtl_crossprod * D
})
## Add to the large matrix
for (cov in covar_list) {
qtl_trait_covariance[row.names(cov), colnames(cov)] <- cov
}
} else {
qtl_trait_covariance <- NULL
}
## Now we iterate over the parent pairs to determine the QTL that are segregating
# Replicate the crossing block
## Add columns to the crossing.block for exp mu and exp varG
crossing_block <- crossing(crossing.block, trait = paste0("trait", seq(length(genome$gen_model))))
exp_mu <- list()
exp_varG <- list()
exp_corG <- list()
## Pull out the qtl genotypes for each trait
qtl_names <- lapply(X = qtl_info_split, FUN = "[[", "qtl_name")
qtl_geno <- lapply(X = qtl_names, function(q) pull_genotype(genome = genome, geno = founder.pop$geno, loci = q) - 1)
# Iterate over the crossing block
for (j in seq(nrow(crossing.block))) {
pars <- as.character(crossing.block[j,1:2])
## Get a list of the polymorphic QTL
poly_qtl_list <- lapply(X = qtl_geno, FUN = function(tr_qtl) {
# Subset the parents
par_qtl_geno <- tr_qtl[pars,,drop = FALSE]
qtl_means <- colMeans(par_qtl_geno)
par1_qtl <- par_qtl_geno[1,,drop = FALSE]
par1_qtl[,qtl_means == 0, drop = FALSE]
})
# Iterate over the traits and calculate individual genetic variance
trait_var <- mapply(poly_qtl_list, qtl_covariance, FUN = function(x, y) sum(crossprod(x) * y[colnames(x), colnames(x)]))
if (!is.null(qtl_trait_covariance)) {
## Calculate the expected covariance
trait_cov <- sum(qtl_trait_covariance[colnames(poly_qtl_list[[1]]), colnames(poly_qtl_list[[2]])] * crossprod(poly_qtl_list[[1]], poly_qtl_list[[2]]))
# The expected correlation is calculated using the expected sd and expected cov
exp_corG_j <- trait_cov / prod(sqrt(trait_var))
exp_corG[[j]] <- rep(exp_corG_j, 2)
}
# The expected mu is simply the mean of the genotypic values of the two parents
exp_mu_j <- colMeans(founder.pop$geno_val[founder.pop$geno_val$ind %in% pars,-1,drop = F])
## Add to the lists
exp_mu[[j]] <- exp_mu_j
exp_varG[[j]] <- trait_var
}
## Add the variances and means to the crossing block
crossing_block$exp_mu <- unlist(exp_mu)
crossing_block$exp_varG <- unlist(exp_varG)
crossing_block$exp_corG <- unlist(exp_corG)
# Return the crossing block
return(crossing_block)
}
#' Predict the genetic variance in prospective crosses
#'
#' @description
#' Uses the expected genetic variance formula and marker effects to predict the
#' genetic variance and correlation in potential crosses.
#'
#' @param genome An object of class \code{genome}.
#' @param pedigree A \code{pedigree} detailing the scheme to develop the family.
#' Use \code{\link{sim_pedigree}} to generate.
#' @param founder.pop An object of class \code{pop} with the geno information for
#' the parents. Additional individuals can be present in \code{parent_pop}. They
#' will be filtered according to the parents in the \code{crossing.block}.
#' @param training.pop An object of class \code{pop} with the elements \code{geno} and
#' \code{pheno_val}. This is used as the training population.
#' @param crossing.block A crossing block detailing the crosses to make. Must be a
#' \code{data.frame} with 2 columns: the first gives the name of parent 1, and the
#' second gives the name of parent 2. See \code{\link{sim_crossing_block}}.
#' @param method The statistical method to predict marker effects. If \code{"RRBLUP"}, the
#' \code{\link[qtl]{mixed.solve}} function is used. Otherwise, the \code{\link[BGLR]{BGLR}}
#' function is used.
#' @param n.iter,burn.in,thin Number of iterations, number of burn-ins, and thinning, respectively. See
#' \code{\link[BGLR]{BGLR}}.
#' @param save.at See \code{\link[BGLR]{BGLR}}.
#'
#' @examples
#'
#' # Simulate a genome
#' n.mar <- c(505, 505, 505)
#' len <- c(120, 130, 140)
#'
#' genome <- sim_genome(len, n.mar)
#'
#' # Simulate a quantitative trait influenced by 50 QTL
#' qtl.model <- matrix(NA, 50, 4)
#' genome <- sim_gen_model(genome = genome, qtl.model = qtl.model,
#' add.dist = "geometric", max.qtl = 50)
#'
#' # Simulate the genotypes for 8 founders
#' founder.pop <- sim_founders(genome = genome, n.str = 8)
#' training.pop <- sim_phenoval(founder.pop, h2 = 0.8)
#'
#' # Generate a crossing block with 5 crosses
#' cb <- sim_crossing_block(parents = indnames(founder.pop), n.crosses = 5)
#'
#' # Create a pedigree with 100 individuals selfed to the F_3 generation
#' ped <- sim_pedigree(n.ind = 100, n.selfgen = 2)
#'
#' pred_genvar(genome = genome, pedigree = ped, training.pop = training.pop,
#' founder.pop = founder.pop, crossing.block = cb)
#'
#'
#' ## If two traits are present, the genetic correlation is calculated
#' # Simulate two quantitative traits influenced by 50 pleiotropic QTL
#' qtl.model <- replicate(2, matrix(NA, 50, 4), simplify = FALSE)
#' genome <- sim_multi_gen_model(genome = genome, qtl.model = qtl.model, corr = 0.99,
#' prob.corr = cbind(0, 1), add.dist = "normal")
#'
#' # Simulate the genotypes for 8 founders
#' founder.pop <- sim_founders(genome = genome, n.str = 8)
#' training.pop <- sim_phenoval(founder.pop, h2 = 0.8)
#'
#' pred_genvar(genome = genome, pedigree = ped, training.pop = training.pop,
#' founder.pop = founder.pop, crossing.block = cb)
#'
#' @importFrom simcross check_pedigree
#'
#' @export
#'
pred_genvar <- function(genome, pedigree, training.pop, founder.pop, crossing.block,
method = c("RRBLUP", "BRR", "BayesA", "BL", "BayesB", "BayesC"),
n.iter = 1500, burn.in = 500, thin = 5, save.at = "") {
# Error handling
if (!inherits(genome, "genome"))
stop("The input 'genome' must be of class 'genome.'")
# Check the pedigree
if (!check_pedigree(pedigree, ignore_sex = TRUE))
stop("The pedigree is not formatted correctly.")
# Check the crossing block
if (ncol(crossing.block) != 2) {
stop("The crossing block should have two columns.")
} else {
crossing.block <- as.data.frame(crossing.block)
}
# founder.pop needs to be a pop object
if (!inherits(founder.pop, "pop"))
stop("The input 'founder.pop' must be of class 'pop'")
# Check the genome and geno
if (!check_geno(genome = genome, geno = founder.pop$geno))
stop("The geno did not pass. See warning for reason.")
# Check the populations
if (!inherits(training.pop, "pop"))
stop("The input 'training.pop' must be of class 'pop'.")
# Make sure the training population has phenotypes
if (is.null(training.pop$pheno_val))
stop("The 'training.pop' must have phenotypic values.")
n_traits <- length(genome$gen_model)
# Check the method
method <- match.arg(method)
# Predict marker effects - only if the TP does not have them
if (is.null(training.pop$mar_eff)) {
marker_eff <- pred_mar_eff(genome = genome, training.pop = training.pop, method = method, n.iter = n.iter,
burn.in = burn.in, thin = thin, save.at = save.at)
} else {
marker_eff <- training.pop
}
## Predict genotypic values in the founder population - this will use the marker effects in the tp
founder_pop1 <- pred_geno_val(genome = genome, training.pop = marker_eff, candidate.pop = founder.pop)
# Predict marker effects
marker_eff <- marker_eff$mar_eff
## Find the positions of these markers
marker_pos <- find_markerpos(genome = genome, marker = marker_eff$marker)
marker_pos$add_eff <- NA
marker_pos$dom_eff <- 0
marker_pos$qtl_name <- row.names(marker_pos)
marker_pos$qtl1_pair <- row.names(marker_pos)
# Duplicate by the number of traits
marker_pos_list <- replicate(n = n_traits, marker_pos, simplify = FALSE)
marker_pos_list[[1]]$qtl1_pair <- NA
# Add effects
for (i in seq_len(n_traits)) {
marker_pos_list[[i]]$add_eff <- marker_eff[,-1][[i]]
}
## Create a new genome with markers as QTL
genome_use <- genome
# Add to the genome
genome_use$gen_model <- marker_pos_list
## Predict
predicted_genvar <- calc_exp_genvar(genome = genome_use, pedigree = pedigree, founder.pop = founder.pop,
crossing.block = crossing.block)
# PGVs
pgvs <- founder_pop1$pred_val
# Replace the expected mu with the predicted mu
for (i in seq_len(nrow(predicted_genvar))) {
predicted_genvar$exp_mu[i] <- mean(pgvs[pgvs$ind %in% predicted_genvar[i,1:2,drop = T],predicted_genvar$trait[i]])
}
if (n_traits == 1) {
names(predicted_genvar)[-1:-3] <- c("pred_mu", "pred_varG")
} else {
names(predicted_genvar)[-1:-3] <- c("pred_mu", "pred_varG", "pred_corG")
}
# Return
return(predicted_genvar)
}
|
/R/family_genetic_variance.R
|
no_license
|
lijinlong1991/pbsim
|
R
| false | false | 15,712 |
r
|
#' Calculate the expected genetic variance in simulated families
#'
#'
#' @description
#' Calculates the expected genetic variance of a cross, assuming complete selfing.
#'
#' @param genome An object of class \code{genome}.
#' @param pedigree A \code{pedigree} detailing the scheme to develop the family.
#' Use \code{\link{sim_pedigree}} to generate.
#' @param founder.pop An object of class \code{pop} with the geno information for
#' the parents. Additional individuals can be present in \code{parent_pop}. They
#' will be filtered according to the parents in the \code{crossing.block}.
#' @param crossing.block A crossing block detailing the crosses to make. Must be a
#' \code{data.frame} with 2 columns: the first gives the name of parent 1, and the
#' second gives the name of parent 2. See \code{\link{sim_crossing.block}}.
#'
#' @examples
#'
#' # Simulate a genome
#' n.mar <- c(505, 505, 505)
#' len <- c(120, 130, 140)
#'
#' genome <- sim_genome(len, n.mar)
#'
#' # Simulate a quantitative trait influenced by 50 QTL
#' qtl.model <- matrix(NA, 50, 4)
#' genome <- sim_gen_model(genome = genome, qtl.model = qtl.model,
#' add.dist = "geometric", max.qtl = 50)
#'
#' # Simulate the genotypes for 8 founders
#' founder.pop <- sim_founders(genome = genome, n.str = 8)
#'
#' # Generate a crossing block with 5 crosses
#' cb <- sim_crossing_block(parents = indnames(founder.pop), n.crosses = 5)
#'
#' # Create a pedigree with 100 individuals selfed to the F_3 generation
#' ped <- sim_pedigree(n.ind = 100, n.selfgen = 2)
#'
#' calc_exp_genvar(genome = genome, pedigree = ped, founder.pop = founder.pop,
#' crossing.block = cb)
#'
#'
#' ## If two traits are present, the genetic correlation is calculated
#' # Simulate two quantitative traits influenced by 50 pleiotropic QTL
#' qtl.model <- replicate(2, matrix(NA, 50, 4), simplify = FALSE)
#' genome <- sim_multi_gen_model(genome = genome, qtl.model = qtl.model, corr = 0.99,
#' prob.corr = cbind(0, 1), add.dist = "normal")
#'
#' # Simulate the genotypes for 8 founders
#' founder.pop <- sim_founders(genome = genome, n.str = 8)
#'
#' calc_exp_genvar(genome = genome, pedigree = ped, founder.pop = founder.pop,
#' crossing.block = cb)
#'
#'
#'
#' @importFrom qtl mf.h
#' @importFrom simcross check_pedigree
#' @importFrom Matrix .bdiag
#' @importFrom tidyr crossing
#' @importFrom purrr pmap_dbl
#'
#' @export
#'
calc_exp_genvar <- function(genome, pedigree, founder.pop, crossing.block) {
# Error handling
if (!inherits(genome, "genome"))
stop("The input 'genome' must be of class 'genome.'")
# Check the pedigree
if (!check_pedigree(pedigree, ignore_sex = TRUE))
stop("The pedigree is not formatted correctly.")
# Check the crossing block
if (ncol(crossing.block) != 2) {
stop("The crossing block should have two columns.")
} else {
crossing.block <- as.data.frame(crossing.block)
}
# founder.pop needs to be a pop object
if (!inherits(founder.pop, "pop"))
stop("The input 'founder.pop' must be of class 'pop'")
# Check the genome and geno
if (!check_geno(genome = genome, geno = founder.pop$geno))
stop("The geno did not pass. See warning for reason.")
## How many traits
n_traits <- length(genome$gen_model)
# If it is more than 2, error out
stopifnot(n_traits <= 2)
## Calculate the expected genetic variance
## What are the expected allele frequencies in the population?
## Is there any backcrossing?
mom_ped <- pedigree[pedigree$mom == 1,]
dad_ped <- pedigree[pedigree$mom == 2,]
mom_dist_gen <- length(unique(mom_ped$gen))
dad_dist_gen <- length(unique(dad_ped$gen))
max_bc_gen <- pmax(mom_dist_gen, dad_dist_gen) - 1
# The expected frequency of the minor allele is 0.5 ^ n_bc_gen + 1
exp_q <- 0.5^(max_bc_gen + 1)
exp_p <- 1 - exp_q
# Get the QTL information - drop unused levels
qtl_info <- pull_qtl(genome, unique = FALSE)
# Filter out QTL with no additive effect
qtl_info <- droplevels(qtl_info[qtl_info$add_eff != 0,,drop = FALSE])
# Split by trait
qtl_info_split <- split(qtl_info, qtl_info$trait)
## Iterate over traits
qtl_covariance <- lapply(X = qtl_info_split, FUN = function(trait_qtl) {
row.names(trait_qtl) <- trait_qtl[["qtl_name"]]
## Calculate the expected genetic variance and covariance of QTL
qtl_info <- as.matrix(trait_qtl[,c("chr", "pos", "add_eff"), drop = FALSE])
add_eff <- qtl_info[,"add_eff", drop = FALSE]
pos <- qtl_info[,"pos", drop = FALSE]
covar <- tcrossprod(add_eff)
## Create an empty matrix
D <- matrix(0, nrow = nrow(pos), ncol = nrow(pos), dimnames = dimnames(covar))
# Calculate separate distance matrices per chromosome
chr_c <- lapply(X = split(trait_qtl, trait_qtl[,"chr",drop = FALSE]), FUN = function(x) as.matrix(dist(x[,"pos",drop = FALSE])))
for (cr in chr_c) {
cr2 <- qtl:::mf.h(cr)
d <- ((1 - (2 * cr2)) / (1 + (2 * cr2)))
D[row.names(cr), colnames(cr)] <- d
}
# The covariance is the QTL effect product multiplied by the expected D
qtl_covar <- covar * D
})
if (n_traits > 1) {
## Calculate the genetic covariance between QTL for different traits
# Split by chromosome
qtl_chr_split <- split(qtl_info, qtl_info$chr)
# Create an empty matrix of trait1 and trait2 QTL
qtl_trait_covariance <- matrix(0, nrow = nrow(qtl_info_split[[1]]), ncol = nrow(qtl_info_split[[2]]),
dimnames = list(qtl_info_split[[1]][["qtl_name"]], qtl_info_split[[2]][["qtl_name"]]))
## Iterate over chromosomes
covar_list <- lapply(X = qtl_chr_split, FUN = function(chr_qtl) {
# Split by trait
trait_split <- split(chr_qtl, chr_qtl$trait)
## QTL names for each trait
qtl_names <- lapply(X = trait_split, FUN = "[[", "qtl_name")
qtl_pos <- lapply(X = trait_split, FUN = "[[", "pos")
qtl_eff <- lapply(X = trait_split, FUN = function(q) as.matrix(q$add_eff))
## Calculate the pairwise distance
d <- abs(outer(X = qtl_pos[[1]], Y = qtl_pos[[2]], FUN = `-`))
# Calculate pairwise D (see Zhong and Jannink, 2007)
# First convert cM to recombination fraction
c <- qtl:::mf.h(d)
D <- ((1 - (2 * c)) / (1 + (2 * c)))
# Product of QTL effects
qtl_crossprod <- tcrossprod(qtl_eff[[1]], qtl_eff[[2]])
dimnames(qtl_crossprod) <- qtl_names
# The covariance is the QTL effect product multiplied by the expected D
qtl_crossprod * D
})
## Add to the large matrix
for (cov in covar_list) {
qtl_trait_covariance[row.names(cov), colnames(cov)] <- cov
}
} else {
qtl_trait_covariance <- NULL
}
## Now we iterate over the parent pairs to determine the QTL that are segregating
# Replicate the crossing block
## Add columns to the crossing.block for exp mu and exp varG
crossing_block <- crossing(crossing.block, trait = paste0("trait", seq(length(genome$gen_model))))
exp_mu <- list()
exp_varG <- list()
exp_corG <- list()
## Pull out the qtl genotypes for each trait
qtl_names <- lapply(X = qtl_info_split, FUN = "[[", "qtl_name")
qtl_geno <- lapply(X = qtl_names, function(q) pull_genotype(genome = genome, geno = founder.pop$geno, loci = q) - 1)
# Iterate over the crossing block
for (j in seq(nrow(crossing.block))) {
pars <- as.character(crossing.block[j,1:2])
## Get a list of the polymorphic QTL
poly_qtl_list <- lapply(X = qtl_geno, FUN = function(tr_qtl) {
# Subset the parents
par_qtl_geno <- tr_qtl[pars,,drop = FALSE]
qtl_means <- colMeans(par_qtl_geno)
par1_qtl <- par_qtl_geno[1,,drop = FALSE]
par1_qtl[,qtl_means == 0, drop = FALSE]
})
# Iterate over the traits and calculate individual genetic variance
trait_var <- mapply(poly_qtl_list, qtl_covariance, FUN = function(x, y) sum(crossprod(x) * y[colnames(x), colnames(x)]))
if (!is.null(qtl_trait_covariance)) {
## Calculate the expected covariance
trait_cov <- sum(qtl_trait_covariance[colnames(poly_qtl_list[[1]]), colnames(poly_qtl_list[[2]])] * crossprod(poly_qtl_list[[1]], poly_qtl_list[[2]]))
# The expected correlation is calculated using the expected sd and expected cov
exp_corG_j <- trait_cov / prod(sqrt(trait_var))
exp_corG[[j]] <- rep(exp_corG_j, 2)
}
# The expected mu is simply the mean of the genotypic values of the two parents
exp_mu_j <- colMeans(founder.pop$geno_val[founder.pop$geno_val$ind %in% pars,-1,drop = F])
## Add to the lists
exp_mu[[j]] <- exp_mu_j
exp_varG[[j]] <- trait_var
}
## Add the variances and means to the crossing block
crossing_block$exp_mu <- unlist(exp_mu)
crossing_block$exp_varG <- unlist(exp_varG)
crossing_block$exp_corG <- unlist(exp_corG)
# Return the crossing block
return(crossing_block)
}
#' Predict the genetic variance in prospective crosses
#'
#' @description
#' Uses the expected genetic variance formula and marker effects to predict the
#' genetic variance and correlation in potential crosses.
#'
#' @param genome An object of class \code{genome}.
#' @param pedigree A \code{pedigree} detailing the scheme to develop the family.
#' Use \code{\link{sim_pedigree}} to generate.
#' @param founder.pop An object of class \code{pop} with the geno information for
#' the parents. Additional individuals can be present in \code{parent_pop}. They
#' will be filtered according to the parents in the \code{crossing.block}.
#' @param training.pop An object of class \code{pop} with the elements \code{geno} and
#' \code{pheno_val}. This is used as the training population.
#' @param crossing.block A crossing block detailing the crosses to make. Must be a
#' \code{data.frame} with 2 columns: the first gives the name of parent 1, and the
#' second gives the name of parent 2. See \code{\link{sim_crossing_block}}.
#' @param method The statistical method to predict marker effects. If \code{"RRBLUP"}, the
#' \code{\link[qtl]{mixed.solve}} function is used. Otherwise, the \code{\link[BGLR]{BGLR}}
#' function is used.
#' @param n.iter,burn.in,thin Number of iterations, number of burn-ins, and thinning, respectively. See
#' \code{\link[BGLR]{BGLR}}.
#' @param save.at See \code{\link[BGLR]{BGLR}}.
#'
#' @examples
#'
#' # Simulate a genome
#' n.mar <- c(505, 505, 505)
#' len <- c(120, 130, 140)
#'
#' genome <- sim_genome(len, n.mar)
#'
#' # Simulate a quantitative trait influenced by 50 QTL
#' qtl.model <- matrix(NA, 50, 4)
#' genome <- sim_gen_model(genome = genome, qtl.model = qtl.model,
#' add.dist = "geometric", max.qtl = 50)
#'
#' # Simulate the genotypes for 8 founders
#' founder.pop <- sim_founders(genome = genome, n.str = 8)
#' training.pop <- sim_phenoval(founder.pop, h2 = 0.8)
#'
#' # Generate a crossing block with 5 crosses
#' cb <- sim_crossing_block(parents = indnames(founder.pop), n.crosses = 5)
#'
#' # Create a pedigree with 100 individuals selfed to the F_3 generation
#' ped <- sim_pedigree(n.ind = 100, n.selfgen = 2)
#'
#' pred_genvar(genome = genome, pedigree = ped, training.pop = training.pop,
#' founder.pop = founder.pop, crossing.block = cb)
#'
#'
#' ## If two traits are present, the genetic correlation is calculated
#' # Simulate two quantitative traits influenced by 50 pleiotropic QTL
#' qtl.model <- replicate(2, matrix(NA, 50, 4), simplify = FALSE)
#' genome <- sim_multi_gen_model(genome = genome, qtl.model = qtl.model, corr = 0.99,
#' prob.corr = cbind(0, 1), add.dist = "normal")
#'
#' # Simulate the genotypes for 8 founders
#' founder.pop <- sim_founders(genome = genome, n.str = 8)
#' training.pop <- sim_phenoval(founder.pop, h2 = 0.8)
#'
#' pred_genvar(genome = genome, pedigree = ped, training.pop = training.pop,
#' founder.pop = founder.pop, crossing.block = cb)
#'
#' @importFrom simcross check_pedigree
#'
#' @export
#'
pred_genvar <- function(genome, pedigree, training.pop, founder.pop, crossing.block,
method = c("RRBLUP", "BRR", "BayesA", "BL", "BayesB", "BayesC"),
n.iter = 1500, burn.in = 500, thin = 5, save.at = "") {
# Error handling
if (!inherits(genome, "genome"))
stop("The input 'genome' must be of class 'genome.'")
# Check the pedigree
if (!check_pedigree(pedigree, ignore_sex = TRUE))
stop("The pedigree is not formatted correctly.")
# Check the crossing block
if (ncol(crossing.block) != 2) {
stop("The crossing block should have two columns.")
} else {
crossing.block <- as.data.frame(crossing.block)
}
# founder.pop needs to be a pop object
if (!inherits(founder.pop, "pop"))
stop("The input 'founder.pop' must be of class 'pop'")
# Check the genome and geno
if (!check_geno(genome = genome, geno = founder.pop$geno))
stop("The geno did not pass. See warning for reason.")
# Check the populations
if (!inherits(training.pop, "pop"))
stop("The input 'training.pop' must be of class 'pop'.")
# Make sure the training population has phenotypes
if (is.null(training.pop$pheno_val))
stop("The 'training.pop' must have phenotypic values.")
n_traits <- length(genome$gen_model)
# Check the method
method <- match.arg(method)
# Predict marker effects - only if the TP does not have them
if (is.null(training.pop$mar_eff)) {
marker_eff <- pred_mar_eff(genome = genome, training.pop = training.pop, method = method, n.iter = n.iter,
burn.in = burn.in, thin = thin, save.at = save.at)
} else {
marker_eff <- training.pop
}
## Predict genotypic values in the founder population - this will use the marker effects in the tp
founder_pop1 <- pred_geno_val(genome = genome, training.pop = marker_eff, candidate.pop = founder.pop)
# Predict marker effects
marker_eff <- marker_eff$mar_eff
## Find the positions of these markers
marker_pos <- find_markerpos(genome = genome, marker = marker_eff$marker)
marker_pos$add_eff <- NA
marker_pos$dom_eff <- 0
marker_pos$qtl_name <- row.names(marker_pos)
marker_pos$qtl1_pair <- row.names(marker_pos)
# Duplicate by the number of traits
marker_pos_list <- replicate(n = n_traits, marker_pos, simplify = FALSE)
marker_pos_list[[1]]$qtl1_pair <- NA
# Add effects
for (i in seq_len(n_traits)) {
marker_pos_list[[i]]$add_eff <- marker_eff[,-1][[i]]
}
## Create a new genome with markers as QTL
genome_use <- genome
# Add to the genome
genome_use$gen_model <- marker_pos_list
## Predict
predicted_genvar <- calc_exp_genvar(genome = genome_use, pedigree = pedigree, founder.pop = founder.pop,
crossing.block = crossing.block)
# PGVs
pgvs <- founder_pop1$pred_val
# Replace the expected mu with the predicted mu
for (i in seq_len(nrow(predicted_genvar))) {
predicted_genvar$exp_mu[i] <- mean(pgvs[pgvs$ind %in% predicted_genvar[i,1:2,drop = T],predicted_genvar$trait[i]])
}
if (n_traits == 1) {
names(predicted_genvar)[-1:-3] <- c("pred_mu", "pred_varG")
} else {
names(predicted_genvar)[-1:-3] <- c("pred_mu", "pred_varG", "pred_corG")
}
# Return
return(predicted_genvar)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ggplot.R
\name{ggplot.decorated}
\alias{ggplot.decorated}
\title{Create a New ggplot for a Decorated Data Frame}
\usage{
\method{ggplot}{decorated}(data, ...)
}
\arguments{
\item{data}{decorated, see \code{\link{decorate}}}
\item{...}{passed to \code{\link[ggplot2]{ggplot}}}
}
\value{
return value like \code{\link[ggplot2]{ggplot}} but inheriting 'decorated_ggplot'
}
\description{
Creates a new ggplot object for a decorated data.frame.
This is the ggplot() method for class 'decorated'.
It creates a ggplot object using the default method,
but reclassifies it as 'decorated_ggplot' so that a custom print method
is invoked; see \code{\link{print.decorated_ggplot}}.
}
\details{
This approach is similar to but more flexible than
the method for \code{\link{ggready}}.
For finer control, you can switch between 'data.frame'
and 'decorated' using \code{\link{as_decorated}}
(supplies null decorations) and \code{\link{as.data.frame}}
(preserves decorations).
}
\examples{
file <- system.file(package = 'yamlet', 'extdata','quinidine.csv')
library(ggplot2)
library(dplyr)
library(magrittr)
# par(ask = FALSE)
x <- decorate(file)
x \%<>\% filter(!is.na(conc))
# Manipulate class to switch among ggplot methods.
class(x)
class(data.frame(x))
class(as_decorated(data.frame(x)))
# The bare data.frame gives boring labels and unordered groups.
map <- aes(x = time, y = conc, color = Heart)
data.frame(x) \%>\% ggplot(map) + geom_point()
# Decorated data.frame uses supplied labels.
# Notice CHF levels are still not ordered.
x \%>\% ggplot(map) + geom_point()
# We can resolve guide for a chance to enrich the output with units.
# Notice CHF levels are now ordered.
x \%<>\% resolve
suppressWarnings( # because this complains for columns with no units
x <- modify(x, title = paste0(label, '\n(', units, ')'))
)
x \%>\% ggplot(map) + geom_point()
# Or something fancier.
x \%<>\% modify(conc, title = 'conc_serum. (mg*L^-1.)')
x \%>\% ggplot(map) + geom_point()
# The y-axis title is deliberately given in spork syntax for elegant coercion:
library(spork)
x \%<>\% modify(conc, expression = as.expression(as_plotmath(as_spork(title))))
x \%>\% ggplot(map) + geom_point()
# Add a fancier label for Heart, and facet by a factor:
x \%<>\% modify(Heart, expression = as.expression(as_plotmath(as_spork('CHF^\\\\*'))))
x \%>\% ggplot(map) + geom_point() + facet_wrap(~Creatinine)
# ggready handles the units and plotmath implicitly for a 'standard' display:
x \%>\% ggready \%>\% ggplot(map) + geom_point() + facet_wrap(~Creatinine)
# Notice that instead of over-writing the label
# attribute, we are creating a stack of label
# substitutes (title, expression) so that
# label is still available as an argument
# if we want to try something else. The
# print method by default looks for all of these.
# Precedence is expression, title, label, column name.
# Precedence can be controlled using
# options(decorated_ggplot_search = c(a, b, ...) ).
# Here we try a dataset with conditional labels and units.
file <- system.file(package = 'yamlet', 'extdata','phenobarb.csv')
x <- file \%>\% decorate \%>\% resolve
# Note that value has two elements for label and guide.
x \%>\% decorations(value)
# The print method defaults to the first, with warning.
map <- aes(x = time, y = value, color = event)
\donttest{
x \%>\% ggplot(map) + geom_point()
}
# If we subset appropriately, the relevant value is substituted.
x \%>\% filter(event == 'conc') \%>\% ggplot(map) + geom_point()
x \%>\% filter(event == 'conc') \%>\%
ggplot(aes(x = time, y = value, color = ApgarInd)) + geom_point()
x \%>\% filter(event == 'dose') \%>\%
ggplot(aes(x = time, y = value, color = Wt)) +
geom_point() +
scale_y_log10() +
scale_color_gradientn(colours = rainbow(4))
}
\seealso{
decorate resolve ggready
Other decorated_ggplot:
\code{\link{ggplot_build.decorated_ggplot}()},
\code{\link{print.decorated_ggplot}()}
Other interface:
\code{\link{classified.data.frame}()},
\code{\link{decorate.character}()},
\code{\link{decorate.data.frame}()},
\code{\link{desolve.decorated}()},
\code{\link{io_csv.character}()},
\code{\link{io_csv.data.frame}()},
\code{\link{io_res.character}()},
\code{\link{io_table.character}()},
\code{\link{io_table.data.frame}()},
\code{\link{io_yamlet.character}()},
\code{\link{io_yamlet.data.frame}()},
\code{\link{is_parseable.default}()},
\code{\link{mimic.default}()},
\code{\link{modify.default}()},
\code{\link{promote.list}()},
\code{\link{read_yamlet}()},
\code{\link{resolve.decorated}()},
\code{\link{selected.default}()},
\code{\link{write_yamlet}()}
}
\concept{decorated_ggplot}
\concept{interface}
|
/man/ggplot.decorated.Rd
|
no_license
|
jimsforks/yamlet
|
R
| false | true | 4,699 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ggplot.R
\name{ggplot.decorated}
\alias{ggplot.decorated}
\title{Create a New ggplot for a Decorated Data Frame}
\usage{
\method{ggplot}{decorated}(data, ...)
}
\arguments{
\item{data}{decorated, see \code{\link{decorate}}}
\item{...}{passed to \code{\link[ggplot2]{ggplot}}}
}
\value{
return value like \code{\link[ggplot2]{ggplot}} but inheriting 'decorated_ggplot'
}
\description{
Creates a new ggplot object for a decorated data.frame.
This is the ggplot() method for class 'decorated'.
It creates a ggplot object using the default method,
but reclassifies it as 'decorated_ggplot' so that a custom print method
is invoked; see \code{\link{print.decorated_ggplot}}.
}
\details{
This approach is similar to but more flexible than
the method for \code{\link{ggready}}.
For finer control, you can switch between 'data.frame'
and 'decorated' using \code{\link{as_decorated}}
(supplies null decorations) and \code{\link{as.data.frame}}
(preserves decorations).
}
\examples{
file <- system.file(package = 'yamlet', 'extdata','quinidine.csv')
library(ggplot2)
library(dplyr)
library(magrittr)
# par(ask = FALSE)
x <- decorate(file)
x \%<>\% filter(!is.na(conc))
# Manipulate class to switch among ggplot methods.
class(x)
class(data.frame(x))
class(as_decorated(data.frame(x)))
# The bare data.frame gives boring labels and unordered groups.
map <- aes(x = time, y = conc, color = Heart)
data.frame(x) \%>\% ggplot(map) + geom_point()
# Decorated data.frame uses supplied labels.
# Notice CHF levels are still not ordered.
x \%>\% ggplot(map) + geom_point()
# We can resolve guide for a chance to enrich the output with units.
# Notice CHF levels are now ordered.
x \%<>\% resolve
suppressWarnings( # because this complains for columns with no units
x <- modify(x, title = paste0(label, '\n(', units, ')'))
)
x \%>\% ggplot(map) + geom_point()
# Or something fancier.
x \%<>\% modify(conc, title = 'conc_serum. (mg*L^-1.)')
x \%>\% ggplot(map) + geom_point()
# The y-axis title is deliberately given in spork syntax for elegant coercion:
library(spork)
x \%<>\% modify(conc, expression = as.expression(as_plotmath(as_spork(title))))
x \%>\% ggplot(map) + geom_point()
# Add a fancier label for Heart, and facet by a factor:
x \%<>\% modify(Heart, expression = as.expression(as_plotmath(as_spork('CHF^\\\\*'))))
x \%>\% ggplot(map) + geom_point() + facet_wrap(~Creatinine)
# ggready handles the units and plotmath implicitly for a 'standard' display:
x \%>\% ggready \%>\% ggplot(map) + geom_point() + facet_wrap(~Creatinine)
# Notice that instead of over-writing the label
# attribute, we are creating a stack of label
# substitutes (title, expression) so that
# label is still available as an argument
# if we want to try something else. The
# print method by default looks for all of these.
# Precedence is expression, title, label, column name.
# Precedence can be controlled using
# options(decorated_ggplot_search = c(a, b, ...) ).
# Here we try a dataset with conditional labels and units.
file <- system.file(package = 'yamlet', 'extdata','phenobarb.csv')
x <- file \%>\% decorate \%>\% resolve
# Note that value has two elements for label and guide.
x \%>\% decorations(value)
# The print method defaults to the first, with warning.
map <- aes(x = time, y = value, color = event)
\donttest{
x \%>\% ggplot(map) + geom_point()
}
# If we subset appropriately, the relevant value is substituted.
x \%>\% filter(event == 'conc') \%>\% ggplot(map) + geom_point()
x \%>\% filter(event == 'conc') \%>\%
ggplot(aes(x = time, y = value, color = ApgarInd)) + geom_point()
x \%>\% filter(event == 'dose') \%>\%
ggplot(aes(x = time, y = value, color = Wt)) +
geom_point() +
scale_y_log10() +
scale_color_gradientn(colours = rainbow(4))
}
\seealso{
decorate resolve ggready
Other decorated_ggplot:
\code{\link{ggplot_build.decorated_ggplot}()},
\code{\link{print.decorated_ggplot}()}
Other interface:
\code{\link{classified.data.frame}()},
\code{\link{decorate.character}()},
\code{\link{decorate.data.frame}()},
\code{\link{desolve.decorated}()},
\code{\link{io_csv.character}()},
\code{\link{io_csv.data.frame}()},
\code{\link{io_res.character}()},
\code{\link{io_table.character}()},
\code{\link{io_table.data.frame}()},
\code{\link{io_yamlet.character}()},
\code{\link{io_yamlet.data.frame}()},
\code{\link{is_parseable.default}()},
\code{\link{mimic.default}()},
\code{\link{modify.default}()},
\code{\link{promote.list}()},
\code{\link{read_yamlet}()},
\code{\link{resolve.decorated}()},
\code{\link{selected.default}()},
\code{\link{write_yamlet}()}
}
\concept{decorated_ggplot}
\concept{interface}
|
##########################################
## SVR hyperplane 3D Visualization code ##
##########################################
install.packages("e1071")
install.packages("plot3D")
install.packages("plot3Drgl")
install.packages("rgl")
install.packages("lattice")
install.packages("car")
library("e1071")
library("plot3D")
library("plot3Drgl")
library("rgl")
library("misc3d")
library("lattice")
library("car")
#Input data(PCA data)
data.frame<-boiler9_SVR_3daysScores_[c("PC1","PC2","BoilerEFF")]
x<-data.frame$PC1
y<-data.frame$PC2
z<-data.frame$BoilerEFF
#3D scatter plot of PCA data
scatter3D(x,y,z,pch=16,cex=1,theta=20,phi=5,bty='g', col.panel="steelblue",
col.grid="darkblue", expand=0.6,
main="data.frame",xlab="PC1",ylab="PC2",
zlab="BoilerEFF",clab=c("EFF(%)"))
plotrgl()
#Perform SVR with PCA data
model <- svm(BoilerEFF~., kernal="radial", data=data.frame)
summary(model)
#Predict with training dataset
pred<-predict(model,data=data.frame)
#Switch "z" values to SVR prediction values
z<-pred
#3D scatter plot of SVR model
scatter3D(x,y,z,pch=16,cex=1,theta=20,bty='g', col.panel="steelblue",
col.grid="darkblue", expand=0.3, phi=20,
main="SVR_hyperplane",xlab="PC1",ylab="PC2",
zlab="BoilerEFF",clab=c("EFF(%)"), ellipsoid = TRUE)
plotrgl()
#3D surface plot of SVR hyperplane
data.frame2<-data.frame(x,y,z)
scatter3d(z ~x + y, data=data.frame2, ylab="BoilerEFF", xlab="PC1", zlab="PC2",
fit=c("linear","smooth"),surface.col=c("black","red"), bg.col="white",axis.ticks=TRUE,
axis.col=c("black","black","black"), surface.alpha=0.2,neg.res.col=NA,
square.col="white", point.col="darkblue", text.col="black",grid.col="blue",
residuals=FALSE, fill="TRUE", grid.lines=40, sphere.size=1.5)
|
/Sample Code/SVR_hyperplane_3D.R
|
no_license
|
shin-nyum/R_Programming_Self-Practice
|
R
| false | false | 1,863 |
r
|
##########################################
## SVR hyperplane 3D Visualization code ##
##########################################
install.packages("e1071")
install.packages("plot3D")
install.packages("plot3Drgl")
install.packages("rgl")
install.packages("lattice")
install.packages("car")
library("e1071")
library("plot3D")
library("plot3Drgl")
library("rgl")
library("misc3d")
library("lattice")
library("car")
#Input data(PCA data)
data.frame<-boiler9_SVR_3daysScores_[c("PC1","PC2","BoilerEFF")]
x<-data.frame$PC1
y<-data.frame$PC2
z<-data.frame$BoilerEFF
#3D scatter plot of PCA data
scatter3D(x,y,z,pch=16,cex=1,theta=20,phi=5,bty='g', col.panel="steelblue",
col.grid="darkblue", expand=0.6,
main="data.frame",xlab="PC1",ylab="PC2",
zlab="BoilerEFF",clab=c("EFF(%)"))
plotrgl()
#Perform SVR with PCA data
model <- svm(BoilerEFF~., kernal="radial", data=data.frame)
summary(model)
#Predict with training dataset
pred<-predict(model,data=data.frame)
#Switch "z" values to SVR prediction values
z<-pred
#3D scatter plot of SVR model
scatter3D(x,y,z,pch=16,cex=1,theta=20,bty='g', col.panel="steelblue",
col.grid="darkblue", expand=0.3, phi=20,
main="SVR_hyperplane",xlab="PC1",ylab="PC2",
zlab="BoilerEFF",clab=c("EFF(%)"), ellipsoid = TRUE)
plotrgl()
#3D surface plot of SVR hyperplane
data.frame2<-data.frame(x,y,z)
scatter3d(z ~x + y, data=data.frame2, ylab="BoilerEFF", xlab="PC1", zlab="PC2",
fit=c("linear","smooth"),surface.col=c("black","red"), bg.col="white",axis.ticks=TRUE,
axis.col=c("black","black","black"), surface.alpha=0.2,neg.res.col=NA,
square.col="white", point.col="darkblue", text.col="black",grid.col="blue",
residuals=FALSE, fill="TRUE", grid.lines=40, sphere.size=1.5)
|
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
#5
motor_vehicle_data <- SCC[grep("[Vv]ehicle", SCC$Short.Name),]
baltimore_motor_vehicle <- baltimore %>% filter(SCC %in% motor_vehicle_data$SCC)
png("plot5.png", width = 480, height = 480,units = "px")
with(baltimore_motor_vehicle, boxplot(log10(Emissions) ~ year,main="Baltimore"))
dev.off()
|
/Courses/Exploratory Data Analysis/Week 4/plot5.R
|
no_license
|
henryspivey/Data-Science
|
R
| false | false | 382 |
r
|
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
#5
motor_vehicle_data <- SCC[grep("[Vv]ehicle", SCC$Short.Name),]
baltimore_motor_vehicle <- baltimore %>% filter(SCC %in% motor_vehicle_data$SCC)
png("plot5.png", width = 480, height = 480,units = "px")
with(baltimore_motor_vehicle, boxplot(log10(Emissions) ~ year,main="Baltimore"))
dev.off()
|
#' @title Analyze
#' @description Analyzes the dataset generated by generate_data according to
#' specified method.
#' @param data_to_analyze A data frame with a user-specified number of features
#' to analyze repeated reps number of times. The output from the generate_data
#' function.
#' @param method A user-specified method of analysis. Choose one of "ofaat", "mv_glm",
#' or "lasso". Currently can accommodate one feature at a time hypothesis
#' testing (use "ofaat" argument), fitting all features to a general linear
#' model (use "mv_glm" argument), or a lasso method implemented with glmnet
#' functions (use "lasso" argument). Default: NULL
#' @param p_adjust The method by which p value adjustment will take place.
#' See ?p.adjust for list of possible arguments. Default: NULL
#' @return Dataframe of analyzed results. The structure of the datafrane varies
#' depending on the argument passed to "method".
#' @details Must set global_alpha with set.alpha(alpha_value) where alpha_value
#' is the desired cut-off for significance testing before this function will
#' run to completion.
#'
#' Note that different analysis methods are appropriate for different dataset
#' shapes. Each analysis method has a slightly different return dataframe. This
#' method is designed to be used inside the simulation.
#' @examples
#'
#' set.alpha(0.05)
#' example <- generate_data(50, 50, c(1,2,3), c(0.3, 0, 0.7))
#' analyzed1 <- analyze(example, method="ofaat", p_adjust="bonferroni")
#' analyzed2 <- analyze(example, method="mv_glm")
#' analyzed3 <- analyze(example, method="lasso")
#'
#'
#' @rdname analyze
#' @export
analyze <- function(data_to_analyze, method = NULL, p_adjust = NULL) {
if (method != "ofaat" & method != "mv_glm" & method != "lasso") {
stop("A valid approach for analyzing data has not been specified")
}
if (method == "ofaat") {
# fit each gene to a linear model (univariate)
analyzed <- data_to_analyze %>% group_by(reps, n) %>% nest() %>%
mutate(model = map(data, ~tidy_glm_single(p_adj = p_adjust,
data = .))) %>% unnest(model)
analyzed$selected <- ifelse(analyzed$p.value <= global_alpha,
1, 0)
# modify 'selected' column to fit with summary later on
} else if (method == "mv_glm") {
# fit each gene to a linear model (multivariate)
analyzed <- data_to_analyze %>% group_by(reps, n) %>% nest() %>%
mutate(model = map(data, ~tidy_glm_mv(p_adj = p_adjust,
data = .))) %>% unnest(model)
analyzed$selected <- ifelse(analyzed$p.value <= global_alpha,
1, 0)
analyzed <- analyzed %>% mutate(selected = ifelse(term ==
"Model", p.value, selected)) %>% mutate(selected = ifelse(term ==
"AUC", p.value, selected))
} else if (method == "lasso") {
analyzed <- data_to_analyze %>% group_by(reps) %>% nest() %>%
mutate(model = map(data, ~tidy_glmnet(data = .))) %>%
unnest(model)
}
return(analyzed)
}
#' @title Lasso analysis
#' @description Uses glmnet to perform analysis and calculates AUC.
#' @param data_tibble data to be analyzed.
#' @return A 3 column dataframe that tracks the reps number, feature, and
#' whether that feature was selected as significant or not (either a 1 or 0).
#' 'selected' column also stores the calculated AUC before the summarization
#' step.
#' @details An internal function.
#' @seealso
#' \code{\link[dplyr]{select}}
#' @rdname tidy_glmnet
#' @importFrom dplyr select
tidy_glmnet <- function(data_tibble) {
genes <- as.matrix(dplyr::select(data_tibble, contains("Gene_")))
y <- unlist(dplyr::select(data_tibble, treatment))
cvfit <- cv.glmnet(genes, y, family = "binomial", type.measure = "auc",
nfolds = 5)
results <- as.matrix(coef(cvfit, s = "lambda.min"))
# as.matrix used here rather than tidy in order to get all 0
# coefficients for summary later
results <- as.data.frame(t(results)) %>% gather(term, selected)
results$selected <- ifelse(results$selected == 0, 0, 1)
# getting auc
fittedval <- predict(cvfit, genes, type = "response", s = "lambda.min")
pred <- prediction(fittedval, y)
auc <- performance(pred, "auc")@y.values[[1]]
results <- rbind(auc, results)
results$term[[1]] <- "AUC"
# add Model row for future summarization compatibility
results <- rbind(NA, results)
results$term[[1]] <- "Model"
return(results)
}
#' @title Fit all genes to a general linear model
#' @description An internal function. Called by analyze when "mv_glm" argument
#' is passed.
#' @param p_adjust p value adjustment argument, used to call p.adjust Default: NULL
#' @param data_tibble The data to be analyzed
#' @return Returns a dataframe with many columns that are not used in the
#' summarization step (such as estimate, std.error, statistic, p.value). Main
#' columns of importance are reps, term, and selected. The "mv_glm" argument
#' allows a model p.value to be calculated as well.
#' @seealso
#' \code{\link[dplyr]{select}}
#' @rdname tidy_glm_mv
#' @importFrom dplyr select
tidy_glm_mv <- function(p_adjust = NULL, data_tibble) {
models <- data_tibble %>% dplyr::select(contains("Gene_"), treatment)
formula <- reformulate(setdiff(colnames(models), "treatment"),
response = "treatment")
model <- glm(formula, family = "binomial", models)
model0 <- glm(treatment ~ 1, family = "binomial", models)
output_pval <- anova(model0, model, test = "Chisq")
# fussy tidy function rife with inconsequential warnings otherwise
output_pval <- tidy(output_pval)
# get AUC
fittedval <- predict(model, dplyr::select(models, contains("Gene_")),
type = "response")
pred <- prediction(fittedval, dplyr::select(data_tibble, treatment))
auc <- performance(pred, "auc")@y.values[[1]]
model <- tidy(model)
if (!is.null(p_adjust)) {
model$p.value <- p.adjust(model$p.value, method = p_adjust)
}
model <- rbind(output_pval$p.value[[2]], model)
model$term[[1]] <- "Model"
model <- rbind(auc, model)
model$term[[1]] <- "AUC"
models <- model
return(models)
}
#' @title A 'one feature at a time' analysis
#' @description An internal function, called when "ofaat" is the argument
#' supplied to 'analyze' method.
#' @param p_adjust passed to p.adjust for p value adjustment Default: NULL
#' @param data_tibble data to analyze
#' @return Returns a dataframe with many columns that are not used in the
#' summarization step (such as estimate, std.error, statistic, p.value). Main
#' columns of importance are reps, term, and selected.
#' @seealso
#' \code{\link[dplyr]{select}}
#' @rdname tidy_glm_single
#' @importFrom dplyr select
tidy_glm_single <- function(p_adjust = NULL, data_tibble) {
models <- data_tibble %>% gather(key = gene, value = gene_expression,
-sample, -treatment) %>% group_by(gene) %>% nest() %>% mutate(model = map(data,
~glm(treatment ~ gene_expression, family = "binomial", data = .))) %>%
mutate(tidy_ = map(model, ~tidy(.))) %>% unnest(tidy_)
models <- models %>% filter(term == "gene_expression") %>% dplyr::select(-term) %>%
rename(term = gene)
if (!is.null(p_adjust)) {
models$p.value <- p.adjust(models$p.value, method = p_adjust)
}
return(models)
}
|
/R/analyzingData.R
|
no_license
|
emartchenko/mvsimstudy
|
R
| false | false | 7,576 |
r
|
#' @title Analyze
#' @description Analyzes the dataset generated by generate_data according to
#' specified method.
#' @param data_to_analyze A data frame with a user-specified number of features
#' to analyze repeated reps number of times. The output from the generate_data
#' function.
#' @param method A user-specified method of analysis. Choose one of "ofaat", "mv_glm",
#' or "lasso". Currently can accommodate one feature at a time hypothesis
#' testing (use "ofaat" argument), fitting all features to a general linear
#' model (use "mv_glm" argument), or a lasso method implemented with glmnet
#' functions (use "lasso" argument). Default: NULL
#' @param p_adjust The method by which p value adjustment will take place.
#' See ?p.adjust for list of possible arguments. Default: NULL
#' @return Dataframe of analyzed results. The structure of the datafrane varies
#' depending on the argument passed to "method".
#' @details Must set global_alpha with set.alpha(alpha_value) where alpha_value
#' is the desired cut-off for significance testing before this function will
#' run to completion.
#'
#' Note that different analysis methods are appropriate for different dataset
#' shapes. Each analysis method has a slightly different return dataframe. This
#' method is designed to be used inside the simulation.
#' @examples
#'
#' set.alpha(0.05)
#' example <- generate_data(50, 50, c(1,2,3), c(0.3, 0, 0.7))
#' analyzed1 <- analyze(example, method="ofaat", p_adjust="bonferroni")
#' analyzed2 <- analyze(example, method="mv_glm")
#' analyzed3 <- analyze(example, method="lasso")
#'
#'
#' @rdname analyze
#' @export
analyze <- function(data_to_analyze, method = NULL, p_adjust = NULL) {
if (method != "ofaat" & method != "mv_glm" & method != "lasso") {
stop("A valid approach for analyzing data has not been specified")
}
if (method == "ofaat") {
# fit each gene to a linear model (univariate)
analyzed <- data_to_analyze %>% group_by(reps, n) %>% nest() %>%
mutate(model = map(data, ~tidy_glm_single(p_adj = p_adjust,
data = .))) %>% unnest(model)
analyzed$selected <- ifelse(analyzed$p.value <= global_alpha,
1, 0)
# modify 'selected' column to fit with summary later on
} else if (method == "mv_glm") {
# fit each gene to a linear model (multivariate)
analyzed <- data_to_analyze %>% group_by(reps, n) %>% nest() %>%
mutate(model = map(data, ~tidy_glm_mv(p_adj = p_adjust,
data = .))) %>% unnest(model)
analyzed$selected <- ifelse(analyzed$p.value <= global_alpha,
1, 0)
analyzed <- analyzed %>% mutate(selected = ifelse(term ==
"Model", p.value, selected)) %>% mutate(selected = ifelse(term ==
"AUC", p.value, selected))
} else if (method == "lasso") {
analyzed <- data_to_analyze %>% group_by(reps) %>% nest() %>%
mutate(model = map(data, ~tidy_glmnet(data = .))) %>%
unnest(model)
}
return(analyzed)
}
#' @title Lasso analysis
#' @description Uses glmnet to perform analysis and calculates AUC.
#' @param data_tibble data to be analyzed.
#' @return A 3 column dataframe that tracks the reps number, feature, and
#' whether that feature was selected as significant or not (either a 1 or 0).
#' 'selected' column also stores the calculated AUC before the summarization
#' step.
#' @details An internal function.
#' @seealso
#' \code{\link[dplyr]{select}}
#' @rdname tidy_glmnet
#' @importFrom dplyr select
tidy_glmnet <- function(data_tibble) {
genes <- as.matrix(dplyr::select(data_tibble, contains("Gene_")))
y <- unlist(dplyr::select(data_tibble, treatment))
cvfit <- cv.glmnet(genes, y, family = "binomial", type.measure = "auc",
nfolds = 5)
results <- as.matrix(coef(cvfit, s = "lambda.min"))
# as.matrix used here rather than tidy in order to get all 0
# coefficients for summary later
results <- as.data.frame(t(results)) %>% gather(term, selected)
results$selected <- ifelse(results$selected == 0, 0, 1)
# getting auc
fittedval <- predict(cvfit, genes, type = "response", s = "lambda.min")
pred <- prediction(fittedval, y)
auc <- performance(pred, "auc")@y.values[[1]]
results <- rbind(auc, results)
results$term[[1]] <- "AUC"
# add Model row for future summarization compatibility
results <- rbind(NA, results)
results$term[[1]] <- "Model"
return(results)
}
#' @title Fit all genes to a general linear model
#' @description An internal function. Called by analyze when "mv_glm" argument
#' is passed.
#' @param p_adjust p value adjustment argument, used to call p.adjust Default: NULL
#' @param data_tibble The data to be analyzed
#' @return Returns a dataframe with many columns that are not used in the
#' summarization step (such as estimate, std.error, statistic, p.value). Main
#' columns of importance are reps, term, and selected. The "mv_glm" argument
#' allows a model p.value to be calculated as well.
#' @seealso
#' \code{\link[dplyr]{select}}
#' @rdname tidy_glm_mv
#' @importFrom dplyr select
tidy_glm_mv <- function(p_adjust = NULL, data_tibble) {
models <- data_tibble %>% dplyr::select(contains("Gene_"), treatment)
formula <- reformulate(setdiff(colnames(models), "treatment"),
response = "treatment")
model <- glm(formula, family = "binomial", models)
model0 <- glm(treatment ~ 1, family = "binomial", models)
output_pval <- anova(model0, model, test = "Chisq")
# fussy tidy function rife with inconsequential warnings otherwise
output_pval <- tidy(output_pval)
# get AUC
fittedval <- predict(model, dplyr::select(models, contains("Gene_")),
type = "response")
pred <- prediction(fittedval, dplyr::select(data_tibble, treatment))
auc <- performance(pred, "auc")@y.values[[1]]
model <- tidy(model)
if (!is.null(p_adjust)) {
model$p.value <- p.adjust(model$p.value, method = p_adjust)
}
model <- rbind(output_pval$p.value[[2]], model)
model$term[[1]] <- "Model"
model <- rbind(auc, model)
model$term[[1]] <- "AUC"
models <- model
return(models)
}
#' @title A 'one feature at a time' analysis
#' @description An internal function, called when "ofaat" is the argument
#' supplied to 'analyze' method.
#' @param p_adjust passed to p.adjust for p value adjustment Default: NULL
#' @param data_tibble data to analyze
#' @return Returns a dataframe with many columns that are not used in the
#' summarization step (such as estimate, std.error, statistic, p.value). Main
#' columns of importance are reps, term, and selected.
#' @seealso
#' \code{\link[dplyr]{select}}
#' @rdname tidy_glm_single
#' @importFrom dplyr select
tidy_glm_single <- function(p_adjust = NULL, data_tibble) {
models <- data_tibble %>% gather(key = gene, value = gene_expression,
-sample, -treatment) %>% group_by(gene) %>% nest() %>% mutate(model = map(data,
~glm(treatment ~ gene_expression, family = "binomial", data = .))) %>%
mutate(tidy_ = map(model, ~tidy(.))) %>% unnest(tidy_)
models <- models %>% filter(term == "gene_expression") %>% dplyr::select(-term) %>%
rename(term = gene)
if (!is.null(p_adjust)) {
models$p.value <- p.adjust(models$p.value, method = p_adjust)
}
return(models)
}
|
#' Calculate emprical raw regions from \code{myDiff} object.
getPeaks=function(allMyDiff, pcutoff=0.1, dist=100){
print("raw myDiff:")
print(dim(allMyDiff))
myDiff=allMyDiff[allMyDiff$ppvalue<=pcutoff,]
probes.dist=diff(myDiff$pend)
print(paste("max cpgs dist for region definition:", dist))
bpoints=which(probes.dist>=dist | probes.dist<0)
first.peak=c(as.character(myDiff[1,1]), myDiff[1,2],end=myDiff[bpoints[1],3])
mid.peaks=cbind(myDiff[(bpoints[-length(bpoints)]+1),1:2],end=myDiff[bpoints[-1],3])
last.idx=bpoints[length(bpoints)]+1
last.peak=c(as.character(myDiff[last.idx,1]), myDiff[last.idx,2], end=myDiff[nrow(myDiff),3])
peaks=as.data.frame(rbind(first.peak, mid.peaks, last.peak))
colnames(peaks)=c("rchr","rstart","rend")
peaks
}
|
/R/getPeaks.R
|
permissive
|
Shicheng-Guo/edmr
|
R
| false | false | 774 |
r
|
#' Calculate emprical raw regions from \code{myDiff} object.
getPeaks=function(allMyDiff, pcutoff=0.1, dist=100){
print("raw myDiff:")
print(dim(allMyDiff))
myDiff=allMyDiff[allMyDiff$ppvalue<=pcutoff,]
probes.dist=diff(myDiff$pend)
print(paste("max cpgs dist for region definition:", dist))
bpoints=which(probes.dist>=dist | probes.dist<0)
first.peak=c(as.character(myDiff[1,1]), myDiff[1,2],end=myDiff[bpoints[1],3])
mid.peaks=cbind(myDiff[(bpoints[-length(bpoints)]+1),1:2],end=myDiff[bpoints[-1],3])
last.idx=bpoints[length(bpoints)]+1
last.peak=c(as.character(myDiff[last.idx,1]), myDiff[last.idx,2], end=myDiff[nrow(myDiff),3])
peaks=as.data.frame(rbind(first.peak, mid.peaks, last.peak))
colnames(peaks)=c("rchr","rstart","rend")
peaks
}
|
library(shiny)
shinyUI(fluidPage(
titlePanel("download"),
fluidRow(
column(6, plotOutput("plot", brush = brushOpts(id = "brush")),
downloadButton('download_data', 'Download')),
column(6, DT::dataTableOutput("brushed_point"))
)
))
|
/0ShinyBook-梅津/ShinyBook-master/chapter03/31-download/ui.R
|
no_license
|
luka3117/JcShiny
|
R
| false | false | 261 |
r
|
library(shiny)
shinyUI(fluidPage(
titlePanel("download"),
fluidRow(
column(6, plotOutput("plot", brush = brushOpts(id = "brush")),
downloadButton('download_data', 'Download')),
column(6, DT::dataTableOutput("brushed_point"))
)
))
|
\name{cmccm}
\alias{cmccm}
\title{CMCCM}
\usage{
cmccmpredfunc(input)
}
\description{
cmccmpredfunc
}
\examples{
cmccmpredfunc(input)
}
|
/man/cmccm.Rd
|
no_license
|
dy-r/cmccm
|
R
| false | false | 136 |
rd
|
\name{cmccm}
\alias{cmccm}
\title{CMCCM}
\usage{
cmccmpredfunc(input)
}
\description{
cmccmpredfunc
}
\examples{
cmccmpredfunc(input)
}
|
codedir = "~/Desktop/github/COVID_LIC"
datadir = "~/paultangerusda drive/2020_Sync/COVID analysis (Paul Tanger)/data/"
setwd(codedir)
source('functions.R')
source('load_libs.R')
setwd(datadir)
crops_cal = read.csv("GEOGLAM_crop_calendars.csv")
# try with revised file
crops_cal = read.csv("GEOGLAM_crop_calendars_v2.csv")
# this version, we changed zero out of season dates to one day before the plant date (as per Brian B. email)
crops_cal = read.csv("GEOGLAM_crop_calendars_v3.csv")
# the second set of cols we can just recalculate in R and do it correctly - spill over to next yr
crops_cal = crops_cal[,c(1:8)]
################################
origin = as.Date("2018-12-31")
originPlusOne = origin + years(1)
# if plant date before july of 2019, push to 2020 otherwise, keep in 2019
crops_cal$plant_date = ifelse(crops_cal$planting < 180,
as.Date(crops_cal$planting, origin=originPlusOne),
as.Date(crops_cal$planting, origin=origin))
class(crops_cal$plant_date) = "Date"
# get diffs
# how is there not a simple package for this?!!!
# get number of days in origin year
interval = interval(origin, originPlusOne)
days_in_yr = time_length(as.duration(interval), "day")
# specific for plant_date year
crops_cal$plant_date_yr = as.Date(paste0(year(crops_cal$plant_date), "-12-31"))
crops_cal$plant_date_yrPlus1 = crops_cal$plant_date_yr + years(1)
crops_cal$plant_date_yr_interval = interval(crops_cal$plant_date_yr, crops_cal$plant_date_yrPlus1)
crops_cal$days_in_plant_yr = time_length(as.duration(crops_cal$plant_date_yr_interval), "day")
# get plant veg diff
crops_cal$veg_plant_diff = ifelse(crops_cal$vegetative < crops_cal$planting,
(crops_cal$days_in_plant_yr - crops_cal$planting) + crops_cal$vegetative,
crops_cal$vegetative - crops_cal$planting)
# get veg date
crops_cal$veg_date = crops_cal$plant_date + days(crops_cal$veg_plant_diff)
# repeat for others...
# TODO - melt into one col and then sort smallest number to figure out what to add to what
#######################################
crops_cal$veg_date_yr = as.Date(paste0(year(crops_cal$veg_date), "-12-31"))
crops_cal$veg_date_yrPlus1 = crops_cal$veg_date_yr + years(1)
crops_cal$veg_date_yr_interval = interval(crops_cal$veg_date_yr, crops_cal$veg_date_yrPlus1)
crops_cal$days_in_veg_yr = time_length(as.duration(crops_cal$veg_date_yr_interval), "day")
# get harv veg yr diff
crops_cal$harv_veg_diff = ifelse(crops_cal$harvest < crops_cal$vegetative,
(crops_cal$days_in_veg_yr - crops_cal$vegetative) + crops_cal$harvest,
crops_cal$harvest - crops_cal$vegetative)
# get harv date
crops_cal$harv_date = crops_cal$veg_date + days(crops_cal$harv_veg_diff)
#######################################
crops_cal$harv_date_yr = as.Date(paste0(year(crops_cal$harv_date), "-12-31"))
crops_cal$harv_date_yrPlus1 = crops_cal$harv_date_yr + years(1)
crops_cal$harv_date_yr_interval = interval(crops_cal$harv_date_yr, crops_cal$harv_date_yrPlus1)
crops_cal$days_in_harv_yr = time_length(as.duration(crops_cal$harv_date_yr_interval), "day")
# get end harv yr diff
crops_cal$end_harv_diff = ifelse(crops_cal$endofseaso < crops_cal$harvest,
(crops_cal$days_in_harv_yr - crops_cal$harvest) + crops_cal$endofseaso,
crops_cal$endofseaso - crops_cal$harvest)
# get harv date
crops_cal$end_date = crops_cal$harv_date + days(crops_cal$end_harv_diff)
#######################################
crops_cal$end_date_yr = as.Date(paste0(year(crops_cal$end_date), "-12-31"))
crops_cal$end_date_yrPlus1 = crops_cal$end_date_yr + years(1)
crops_cal$end_date_yr_interval = interval(crops_cal$end_date_yr, crops_cal$end_date_yrPlus1)
crops_cal$days_in_end_yr = time_length(as.duration(crops_cal$end_date_yr_interval), "day")
# get out end yr diff
crops_cal$out_end_diff = ifelse(crops_cal$outofseaso < crops_cal$endofseaso,
(crops_cal$days_in_end_yr - crops_cal$endofseaso) + crops_cal$outofseaso,
crops_cal$outofseaso - crops_cal$endofseaso)
# get out date
crops_cal$out_date = crops_cal$end_date + days(crops_cal$out_end_diff)
# for winter wheat, add a new part 7 weeks after start of planting as nonactivity
# just split veg date into two for winter wheat
crops_cal$winter_wheat = crops_cal$veg_date - days(49)
# or maybe as a percent of plant period
# TODO: try as percent
# make NA for all but wheat
crops_cal$winter_wheat[crops_cal$crop != "Winter Wheat"] = NA
# export to check
setwd(datadir)
filename = addStampToFilename("CropCalv4", "csv")
filename = addStampToFilename("CropCalv5", "csv")
filename = addStampToFilename("CropCalv6", "csv")
#write.csv(crops_cal, filename, row.names = F)
# just keep dates
as.data.frame(colnames(crops_cal))
cropcalv3 = crops_cal[,c(1:3, 9, 34, 15, 21, 27, 33)]
filename = addStampToFilename("CropCalv3_just_dates", "csv")
#write.csv(cropcalv3, filename, row.names = F)
# add another year.. will use as new segments
cropcalv3$plant_date2 = cropcalv3$plant_date + dyears(1)
cropcalv3$winter_wheat2 = cropcalv3$winter_wheat + dyears(1)
cropcalv3$veg_date2 = cropcalv3$veg_date + dyears(1)
cropcalv3$harv_date2 = cropcalv3$harv_date + dyears(1)
cropcalv3$end_date2 = cropcalv3$end_date + dyears(1)
cropcalv3$out_date2 = cropcalv3$out_date + dyears(1)
filename = addStampToFilename("CropCalv5_just_dates", "csv")
write.csv(cropcalv3, filename, row.names = F)
#######################################
# for now, let's try to plot this
# first save it
setwd(datadir)
filename = addStampToFilename("CropCalv2", "csv")
#write.csv(crops_cal, filename, row.names = F)
|
/organize_crop_calendar.R
|
no_license
|
paultanger/COVID_LIC
|
R
| false | false | 5,798 |
r
|
codedir = "~/Desktop/github/COVID_LIC"
datadir = "~/paultangerusda drive/2020_Sync/COVID analysis (Paul Tanger)/data/"
setwd(codedir)
source('functions.R')
source('load_libs.R')
setwd(datadir)
crops_cal = read.csv("GEOGLAM_crop_calendars.csv")
# try with revised file
crops_cal = read.csv("GEOGLAM_crop_calendars_v2.csv")
# this version, we changed zero out of season dates to one day before the plant date (as per Brian B. email)
crops_cal = read.csv("GEOGLAM_crop_calendars_v3.csv")
# the second set of cols we can just recalculate in R and do it correctly - spill over to next yr
crops_cal = crops_cal[,c(1:8)]
################################
origin = as.Date("2018-12-31")
originPlusOne = origin + years(1)
# if plant date before july of 2019, push to 2020 otherwise, keep in 2019
crops_cal$plant_date = ifelse(crops_cal$planting < 180,
as.Date(crops_cal$planting, origin=originPlusOne),
as.Date(crops_cal$planting, origin=origin))
class(crops_cal$plant_date) = "Date"
# get diffs
# how is there not a simple package for this?!!!
# get number of days in origin year
interval = interval(origin, originPlusOne)
days_in_yr = time_length(as.duration(interval), "day")
# specific for plant_date year
crops_cal$plant_date_yr = as.Date(paste0(year(crops_cal$plant_date), "-12-31"))
crops_cal$plant_date_yrPlus1 = crops_cal$plant_date_yr + years(1)
crops_cal$plant_date_yr_interval = interval(crops_cal$plant_date_yr, crops_cal$plant_date_yrPlus1)
crops_cal$days_in_plant_yr = time_length(as.duration(crops_cal$plant_date_yr_interval), "day")
# get plant veg diff
crops_cal$veg_plant_diff = ifelse(crops_cal$vegetative < crops_cal$planting,
(crops_cal$days_in_plant_yr - crops_cal$planting) + crops_cal$vegetative,
crops_cal$vegetative - crops_cal$planting)
# get veg date
crops_cal$veg_date = crops_cal$plant_date + days(crops_cal$veg_plant_diff)
# repeat for others...
# TODO - melt into one col and then sort smallest number to figure out what to add to what
#######################################
crops_cal$veg_date_yr = as.Date(paste0(year(crops_cal$veg_date), "-12-31"))
crops_cal$veg_date_yrPlus1 = crops_cal$veg_date_yr + years(1)
crops_cal$veg_date_yr_interval = interval(crops_cal$veg_date_yr, crops_cal$veg_date_yrPlus1)
crops_cal$days_in_veg_yr = time_length(as.duration(crops_cal$veg_date_yr_interval), "day")
# get harv veg yr diff
crops_cal$harv_veg_diff = ifelse(crops_cal$harvest < crops_cal$vegetative,
(crops_cal$days_in_veg_yr - crops_cal$vegetative) + crops_cal$harvest,
crops_cal$harvest - crops_cal$vegetative)
# get harv date
crops_cal$harv_date = crops_cal$veg_date + days(crops_cal$harv_veg_diff)
#######################################
crops_cal$harv_date_yr = as.Date(paste0(year(crops_cal$harv_date), "-12-31"))
crops_cal$harv_date_yrPlus1 = crops_cal$harv_date_yr + years(1)
crops_cal$harv_date_yr_interval = interval(crops_cal$harv_date_yr, crops_cal$harv_date_yrPlus1)
crops_cal$days_in_harv_yr = time_length(as.duration(crops_cal$harv_date_yr_interval), "day")
# get end harv yr diff
crops_cal$end_harv_diff = ifelse(crops_cal$endofseaso < crops_cal$harvest,
(crops_cal$days_in_harv_yr - crops_cal$harvest) + crops_cal$endofseaso,
crops_cal$endofseaso - crops_cal$harvest)
# get harv date
crops_cal$end_date = crops_cal$harv_date + days(crops_cal$end_harv_diff)
#######################################
crops_cal$end_date_yr = as.Date(paste0(year(crops_cal$end_date), "-12-31"))
crops_cal$end_date_yrPlus1 = crops_cal$end_date_yr + years(1)
crops_cal$end_date_yr_interval = interval(crops_cal$end_date_yr, crops_cal$end_date_yrPlus1)
crops_cal$days_in_end_yr = time_length(as.duration(crops_cal$end_date_yr_interval), "day")
# get out end yr diff
crops_cal$out_end_diff = ifelse(crops_cal$outofseaso < crops_cal$endofseaso,
(crops_cal$days_in_end_yr - crops_cal$endofseaso) + crops_cal$outofseaso,
crops_cal$outofseaso - crops_cal$endofseaso)
# get out date
crops_cal$out_date = crops_cal$end_date + days(crops_cal$out_end_diff)
# for winter wheat, add a new part 7 weeks after start of planting as nonactivity
# just split veg date into two for winter wheat
crops_cal$winter_wheat = crops_cal$veg_date - days(49)
# or maybe as a percent of plant period
# TODO: try as percent
# make NA for all but wheat
crops_cal$winter_wheat[crops_cal$crop != "Winter Wheat"] = NA
# export to check
setwd(datadir)
filename = addStampToFilename("CropCalv4", "csv")
filename = addStampToFilename("CropCalv5", "csv")
filename = addStampToFilename("CropCalv6", "csv")
#write.csv(crops_cal, filename, row.names = F)
# just keep dates
as.data.frame(colnames(crops_cal))
cropcalv3 = crops_cal[,c(1:3, 9, 34, 15, 21, 27, 33)]
filename = addStampToFilename("CropCalv3_just_dates", "csv")
#write.csv(cropcalv3, filename, row.names = F)
# add another year.. will use as new segments
cropcalv3$plant_date2 = cropcalv3$plant_date + dyears(1)
cropcalv3$winter_wheat2 = cropcalv3$winter_wheat + dyears(1)
cropcalv3$veg_date2 = cropcalv3$veg_date + dyears(1)
cropcalv3$harv_date2 = cropcalv3$harv_date + dyears(1)
cropcalv3$end_date2 = cropcalv3$end_date + dyears(1)
cropcalv3$out_date2 = cropcalv3$out_date + dyears(1)
filename = addStampToFilename("CropCalv5_just_dates", "csv")
write.csv(cropcalv3, filename, row.names = F)
#######################################
# for now, let's try to plot this
# first save it
setwd(datadir)
filename = addStampToFilename("CropCalv2", "csv")
#write.csv(crops_cal, filename, row.names = F)
|
# Matching C balance of the entire experiment considering C inputs and outputs
C.balance = data.frame(matrix(ncol = 13, nrow = length(treat.group)))
names(C.balance) = c("Treatment","GPP","Ra","Rm_root","Rg_root","Cs_foliage","Cs_wood","Cs_root","Clit_foliage","Cn","Clit_root","C.output","C.imbalance")
C.balance$Treatment = treat.group
for (v in 1:length(treat.group)) {
data.set = subset(data.all,(Treatment %in% treat.group[v]))
# data.set[nrow(data.set),c(10:17)] = data.set[nrow(data.set)-1,c(10:17)]
data.set[,c("LM","WM","RM","litter")] = na.spline(data.set[,c("LM","WM","RM","litter")])
# plot(data.set$Date, data.set$LM)
C.balance$GPP[v] = sum(data.set$GPP)
C.balance$Ra[v] = sum(data.set$Ra)
C.balance$Cs_foliage[v] = data.set$LM[nrow(data.set)] - data.set$LM[1]
C.balance$Cs_wood[v] = data.set$WM[nrow(data.set)] - data.set$WM[1]
C.balance$Cs_root[v] = data.set$RM[nrow(data.set)] - data.set$RM[1]
C.balance$Rm_root[v] = sum(data.set$Rd.fineroot.mean*data.set$RM*data.set$FRratio + data.set$Rd.intermediateroot.mean*data.set$RM*data.set$IRratio +
data.set$Rd.coarseroot.mean*data.set$RM*data.set$CRratio + data.set$Rd.boleroot.mean*data.set$RM*data.set$BRratio)
C.balance$Rg_root[v] = 0.3 * C.balance$Cs_root[v]
C.balance$Clit_foliage[v] = data.set$litter[nrow(data.set)] - data.set$litter[1]
C.balance$Cn[v] = data.set$TNC_tot[max(which(complete.cases(data.set$TNC_tot)))] - data.set$TNC_tot[min(which(complete.cases(data.set$TNC_tot)))]
C.balance$Clit_root[v] = 0.1 * C.balance$Clit_foliage[v]
# C.balance$Cexudate[v] = 0.005 * sum(data.set$RM)
C.balance$C.output[v] = C.balance$Ra[v] + C.balance$Cs_foliage[v] + C.balance$Cs_wood[v] + C.balance$Cs_root[v] + C.balance$Rm_root[v] +
C.balance$Rg_root[v] + C.balance$Clit_foliage[v] + C.balance$Cn[v] + C.balance$Clit_root[v]
C.balance$C.imbalance[v] = C.balance$GPP[v] - C.balance$C.output[v]
}
C.balance.fraction = C.balance[, c(10,3:9,11)]
C.balance.fraction[,] = C.balance.fraction[,] / C.balance[,2] * 100
row.names(C.balance.fraction) <- treat.group
row.names(C.balance.fraction) <- c("amb-dry","amb-wet","warm-dry","warm-wet")
# C.balance.fraction = abs(C.balance.fraction)
C.balance = C.balance[,-c(12,13)]
colnames(C.balance) <- c("Treatment", "GPP (g C)", "Ra (g C)", "Rm_root (g C)", "Rg_root (g C)", "Cs_foliage (g C)", "Cs_wood (g C)", "Cs_root (g C)", "Clit_foliage (g C)", "Cn (g C)", "Clit_root (g C)")
# C.balance = C.balance[,c(10,1,2,3,4,7,5,6,8,9)]
write.csv(C.balance, file = "output/C_partitioning_wtc3.csv", row.names = FALSE)
cbPalette = c("gray", "orange", "skyblue", "green3", "#009E73", "yellow3", "#0072B2", "#D55E00", "black")
png("output/Figure_1a_C_balance_wtc3.png", units="px", width=1200, height=1000, res=200)
par(mfrow = c(1, 1), mar=c(5, 4, 2, 6))
# bb = barplot(as.matrix(t(Ct.fraction.group)), ylim=c(0, 107), ylab = "C Partitioning (%)", xlab = "Treatments (Container size)",
# col = rainbow(20),legend = colnames(Ct.fraction.group),
# args.legend = list(x = "topright", bty = "n", inset=c(-0.15, 0)))
C.balance.fraction1 = C.balance.fraction2 = C.balance.fraction
C.balance.fraction1[C.balance.fraction1<0] <- 0
C.balance.fraction2[C.balance.fraction2>0] <- 0
# myrange <- c(min(rowSums(C.balance.fraction2)),max(rowSums(C.balance.fraction1)))
bb = barplot(as.matrix(t(C.balance.fraction1)), ylim=c(-2, 100), ylab = "C Partitioning (%)", xlab = "Container size (L))",
col = cbPalette,legend = c(expression(C[n]),expression(R[a]),expression(R["m,root"]),expression(R["g,root"]),expression(C["s,foliage"]),
expression(C["s,wood"]),expression(C["s,root"]),expression(C["lit,foliage"]),expression(C["lit,root"])),
args.legend = list(x = "topright", bty = "n", inset=c(-0.22, 0)))
text( bb, rowSums(C.balance.fraction1)+0.5, labels = round(C.balance[,2],1), pos = 3, cex=1, col="red")
bb = bb + barplot(as.matrix(t(C.balance.fraction2)), add=TRUE, col = cbPalette)
# text( bb, Ct.fraction.group[,1]+Ct.fraction.group[,2]+Ct.fraction.group[,3]+Ct.fraction.group[,4]+Ct.fraction.group[,5]+Ct.fraction.group[,6]+Ct.fraction.group[,7]-1, labels = round(Ct.group[,9],1), cex=.9)
dev.off()
|
/R/C_balance_wtc3.R
|
no_license
|
kashifmahmud/DA_WTC3_temp_drought
|
R
| false | false | 4,232 |
r
|
# Matching C balance of the entire experiment considering C inputs and outputs
C.balance = data.frame(matrix(ncol = 13, nrow = length(treat.group)))
names(C.balance) = c("Treatment","GPP","Ra","Rm_root","Rg_root","Cs_foliage","Cs_wood","Cs_root","Clit_foliage","Cn","Clit_root","C.output","C.imbalance")
C.balance$Treatment = treat.group
for (v in 1:length(treat.group)) {
data.set = subset(data.all,(Treatment %in% treat.group[v]))
# data.set[nrow(data.set),c(10:17)] = data.set[nrow(data.set)-1,c(10:17)]
data.set[,c("LM","WM","RM","litter")] = na.spline(data.set[,c("LM","WM","RM","litter")])
# plot(data.set$Date, data.set$LM)
C.balance$GPP[v] = sum(data.set$GPP)
C.balance$Ra[v] = sum(data.set$Ra)
C.balance$Cs_foliage[v] = data.set$LM[nrow(data.set)] - data.set$LM[1]
C.balance$Cs_wood[v] = data.set$WM[nrow(data.set)] - data.set$WM[1]
C.balance$Cs_root[v] = data.set$RM[nrow(data.set)] - data.set$RM[1]
C.balance$Rm_root[v] = sum(data.set$Rd.fineroot.mean*data.set$RM*data.set$FRratio + data.set$Rd.intermediateroot.mean*data.set$RM*data.set$IRratio +
data.set$Rd.coarseroot.mean*data.set$RM*data.set$CRratio + data.set$Rd.boleroot.mean*data.set$RM*data.set$BRratio)
C.balance$Rg_root[v] = 0.3 * C.balance$Cs_root[v]
C.balance$Clit_foliage[v] = data.set$litter[nrow(data.set)] - data.set$litter[1]
C.balance$Cn[v] = data.set$TNC_tot[max(which(complete.cases(data.set$TNC_tot)))] - data.set$TNC_tot[min(which(complete.cases(data.set$TNC_tot)))]
C.balance$Clit_root[v] = 0.1 * C.balance$Clit_foliage[v]
# C.balance$Cexudate[v] = 0.005 * sum(data.set$RM)
C.balance$C.output[v] = C.balance$Ra[v] + C.balance$Cs_foliage[v] + C.balance$Cs_wood[v] + C.balance$Cs_root[v] + C.balance$Rm_root[v] +
C.balance$Rg_root[v] + C.balance$Clit_foliage[v] + C.balance$Cn[v] + C.balance$Clit_root[v]
C.balance$C.imbalance[v] = C.balance$GPP[v] - C.balance$C.output[v]
}
C.balance.fraction = C.balance[, c(10,3:9,11)]
C.balance.fraction[,] = C.balance.fraction[,] / C.balance[,2] * 100
row.names(C.balance.fraction) <- treat.group
row.names(C.balance.fraction) <- c("amb-dry","amb-wet","warm-dry","warm-wet")
# C.balance.fraction = abs(C.balance.fraction)
C.balance = C.balance[,-c(12,13)]
colnames(C.balance) <- c("Treatment", "GPP (g C)", "Ra (g C)", "Rm_root (g C)", "Rg_root (g C)", "Cs_foliage (g C)", "Cs_wood (g C)", "Cs_root (g C)", "Clit_foliage (g C)", "Cn (g C)", "Clit_root (g C)")
# C.balance = C.balance[,c(10,1,2,3,4,7,5,6,8,9)]
write.csv(C.balance, file = "output/C_partitioning_wtc3.csv", row.names = FALSE)
cbPalette = c("gray", "orange", "skyblue", "green3", "#009E73", "yellow3", "#0072B2", "#D55E00", "black")
png("output/Figure_1a_C_balance_wtc3.png", units="px", width=1200, height=1000, res=200)
par(mfrow = c(1, 1), mar=c(5, 4, 2, 6))
# bb = barplot(as.matrix(t(Ct.fraction.group)), ylim=c(0, 107), ylab = "C Partitioning (%)", xlab = "Treatments (Container size)",
# col = rainbow(20),legend = colnames(Ct.fraction.group),
# args.legend = list(x = "topright", bty = "n", inset=c(-0.15, 0)))
C.balance.fraction1 = C.balance.fraction2 = C.balance.fraction
C.balance.fraction1[C.balance.fraction1<0] <- 0
C.balance.fraction2[C.balance.fraction2>0] <- 0
# myrange <- c(min(rowSums(C.balance.fraction2)),max(rowSums(C.balance.fraction1)))
bb = barplot(as.matrix(t(C.balance.fraction1)), ylim=c(-2, 100), ylab = "C Partitioning (%)", xlab = "Container size (L))",
col = cbPalette,legend = c(expression(C[n]),expression(R[a]),expression(R["m,root"]),expression(R["g,root"]),expression(C["s,foliage"]),
expression(C["s,wood"]),expression(C["s,root"]),expression(C["lit,foliage"]),expression(C["lit,root"])),
args.legend = list(x = "topright", bty = "n", inset=c(-0.22, 0)))
text( bb, rowSums(C.balance.fraction1)+0.5, labels = round(C.balance[,2],1), pos = 3, cex=1, col="red")
bb = bb + barplot(as.matrix(t(C.balance.fraction2)), add=TRUE, col = cbPalette)
# text( bb, Ct.fraction.group[,1]+Ct.fraction.group[,2]+Ct.fraction.group[,3]+Ct.fraction.group[,4]+Ct.fraction.group[,5]+Ct.fraction.group[,6]+Ct.fraction.group[,7]-1, labels = round(Ct.group[,9],1), cex=.9)
dev.off()
|
\name{GUILDS-internal}
\title{Internal Guilds functions}
\alias{rho}
\alias{polyaeggenberger}
\alias{logLikguilds}
\alias{localComm}
\alias{getpx}
\alias{local_esf}
\alias{evaluateLogLik}
\alias{conditional.LogLik}
\alias{calc_sum_kda}
\alias{calc_conditional}
\alias{evaluate_cond_lik}
\alias{calcKDA}
\alias{pm_sad}
\alias{pm_sadaux}
\alias{draw_local_cond}
\alias{draw_local}
\alias{sort_aux}
\alias{generate.ZSM}
\alias{octave_index}
\alias{preston_sort}
\description{Internal Guilds functions}
\details{These are not to be called by the user}
\keyword{internal}
|
/man/GUILDS-internal.Rd
|
no_license
|
thijsjanzen/GUILDS
|
R
| false | false | 566 |
rd
|
\name{GUILDS-internal}
\title{Internal Guilds functions}
\alias{rho}
\alias{polyaeggenberger}
\alias{logLikguilds}
\alias{localComm}
\alias{getpx}
\alias{local_esf}
\alias{evaluateLogLik}
\alias{conditional.LogLik}
\alias{calc_sum_kda}
\alias{calc_conditional}
\alias{evaluate_cond_lik}
\alias{calcKDA}
\alias{pm_sad}
\alias{pm_sadaux}
\alias{draw_local_cond}
\alias{draw_local}
\alias{sort_aux}
\alias{generate.ZSM}
\alias{octave_index}
\alias{preston_sort}
\description{Internal Guilds functions}
\details{These are not to be called by the user}
\keyword{internal}
|
## Initialisation
N = 20
h = 0.1
X1 = numeric(N)
for(i in 1:N){
X1[i] = 0.5 + (i-1)*0.95/(N-1)
}
## Implementation of the criterion for minimisation ##
##------------------------------------------------------------##
## function which returns residuals for given theta
e <- function(theta){
return(Y1 - model(theta,X1))
}
## function to calculate kernel, which we assume
## to be gaussian with mean 0 and standard deviation 1
kern <- function(u){
return(1/sqrt(2*pi) * exp( - u^2 / 2))
}
# We created a function to return the values
# at which we evaluate the kernel, for each
# index i and j
# We do this for each side of our symmetrised estimator
a <- function(theta,ind1,ind2){ ## component of kernel argument
return((e(theta)[ind1] - e(theta)[ind2]) / h )
}
b <- function(theta,ind1,ind2){ ## component of kernel argument
return((e(theta)[ind1] + e(theta)[ind2]) / h )
}
# This function returns the f_n,h defined in our project
# This is 1/2Nh times the sum over j of the kernel evaluated at each
# value returned by a and b
F <- function(theta,ind){ ## function to return 1/2Nh * kernel
s = 0
for(i in 1:N){
s = s + kern(a(theta,ind,i)) + kern(b(theta,ind,i))
}
s = 1/(2*N*h) * s
return(s)
}
# This returns the actual criterion function to be
# minimised as defined in our report
J <- function(theta){
s1 = 0
for( j in 1:N){
s1 = s1 + log(F(theta,j))
}
s1 = -1/N * s1
return(s1)
}
## Example 1 ##
thbar = 1 ## initial guess for theta
model <- function(theta,x){ ## function to simulate model of interest
return(exp(-theta*x))
}
Y1 = model(thbar,X) + rnorm(N,m=0,sd=0.1) ## first case where errors ~ N(0, sigma^2)
plot(X1, Y1, pch=20, main="data",col='indianred') ## visual assessment of data points
points(X1,model(X1,thbar),col='skyblue',pch=20) ## model fit
## optimisation of minimum entropy criterion
th.start = 0.5
opt.out <- optim(par=c(th.start),
fn=J,method='Brent',lower =c(0.1),upper=c(2))
## tests whether values are reasonable
(opt.out$par)
## Run a monte carlo simulation for entropy estimation
ans = numeric(1000)
for( i in 1:1000){
Y1 = model(thbar,X) + rnorm(N,m=0,sd=0.1)
opt.out <- optim(par=c(th.start),fn=J,method='Brent',lower =c(-0.5),upper=c(3))
ans[i] = opt.out$par
}
## Least Squares Estimation
crit <- function(theta,x,y){
# must have theta as 1st parameter and return a single value...
return( sum( (y-model(theta,x))^2 ) )
}
## Run a monte carlo simulation for least squares
t = numeric(1000)
thinit = 0.005
for( i in 1:1000){
Y1 = model(thbar,X1) + rnorm(N,m=0,sd=0.1)
optim.out <- optim(par=c(thinit), fn=crit, x=X1, y=Y1,method='Brent',lower=c(0.1),upper=c(2))
t[i] = optim.out$par
}
# Figure 1
plot(density(t),bty='n', ann='F',ylim=c(0,7))
lines(density(ans),lty=2)
legend("topright",'groups',c(expression(hat(theta)[ML]),expression(hat(theta)[e])),lty = c(1,2),col = c('black','black'),ncol=2)
#---------------------------------#
# Now try with uniform errors (Fig.2)
# Here we just repeat what we had already done.
Y2 = model(thbar,X1) + runif(N,-0.4,0.4)
e <- function(theta){
return(Y2 - model(theta,X1))
}
thu.start = 0.5
Y2 = model(thbar,X1) + runif(N,-0.4,0.4)
unif.out <- optim(par=c(thu.start),fn=J,method='Brent',lower =c(0.1),upper=c(2))
(unif.out$par)
# Monte Carlo
ansu = numeric(1000)
thu.start = 0.5
for( i in 1:1000){
Y2= model(thbar,X1) + runif(N,-0.4,0.4)
u.out <- optim(par=c(thu.start),fn=J,method='Brent',lower =c(-0.5),upper=c(3))
ansu[i] = u.out$par
}
plot(density(ansu))
# LS compares to MLE under the wrong assumption that errors are normal
tr = numeric(1000)
thinit = 0.005
for( i in 1:1000){
Y2 = model(thbar,X1) + runif(N,-0.4,0.4)
o.out <- optim(par=c(thinit), fn=crit, x=X1, y=Y2,method='Brent',lower =c(-0.5),upper=c(3))
tr[i] = o.out$par
}
# Figure 2
plot(density(ansu),bty='n', ann='F',ylim=c(0,4))
lines(density(tr),lty=2)
legend("topright",'groups',c(expression(hat(theta)[e]),expression(hat(theta)[LS])),lty = c(1,2),col = c('black','black'),ncol=2)
sd(ansu)
sd(tr)
#-----------------------------------------#
# Inverse transform method to generate a
# random sample from the laplace distribution
rlaplace <- function(N,sigma){
u <- runif(N)
uu<- runif(N)
r <- numeric(N)
for(i in 1:N){
if(uu[i] < 0.5){
r[i] = (sigma/(sqrt(2)) * log(2*u[i]))
}
else{
r[i] = (- sigma/(sqrt(2)) * log(2*u[i]))
}
}
return(r)
}
# we then added an outlier to our sample to
# generate figure 3.
thl.start = 0.5
Y3 = model(thbar,X1) + rlaplace(N,0.15)
Y3[N] = 5
e <- function(theta){
return(Y3 - model(theta,X1))
}
lap.out <- optim(par=c(thl.start),fn=J,method='Brent',lower =c(0.1),upper=c(2))
(lap.out$par)
ansl = numeric(1000)
thl.start = 0.5
for( i in 1:1000){
Y3 = model(thbar,X1) + rlaplace(N,0.15)
Y3[N] = 5
l.out <- optim(par=c(thl.start),fn=J,method='Brent',lower =c(-0.5),upper=c(3))
ansl[i] = l.out$par
}
tl = numeric(1000)
thl.start = 0.5
for( i in 1:1000){
Y3 = model(thbar,X1) + rlaplace(N,0.15)
Y3[N] = 5
o.out <- optim(par=c(thl.start), fn=crit, x=X1, y=Y3,method='Brent',lower =c(-0.5),upper=c(3))
tl[i] = o.out$par
}
plot(X1,Y3)
lines(X1,model(l.out$par,X1))
plot(density(tl))
var(ansl)
var(tl)
curve(exp(-x),0.5,1.45,pch=20,ylim=c(0,1),ann='F',lty=4,col='indianred',lwd=2)
points(X1,Y3,pch=4,col='skyblue')
points(sort(X1),model(sort(X1),o.out$par),type='l',lwd=2,col='darkorange')
points(sort(X1),model(sort(X1),l.out$par),type='l',lwd=2,col='forestgreen')
# Example 5
# Import data
dat <- read.csv("/Users/cianscannell/Desktop/Final_Year/Stats/group6-data.csv",header=FALSE)
head(dat)
X2 <- dat[,1]
Y4 <-dat[,2]
N<-length(X2)
h = 0.1
# linear regression model
lin <- function(th){
return(th[1] + th[2]*X2)
}
# Implement our unsymmetrised criterion
eu <- function(theta){
return(Y4 - lin(theta))
}
au <- function(theta,ind1,ind2){ ## component of kernel argument
return((eu(theta)[ind1] - eu(theta)[ind2]) / h )
}
F_unsym <- function(theta,ind){
s = 0
for(i in 1:N){
s = s + kern(au(theta,ind,i))
}
s = 1/(N*h) * s
return(s)
}
J_unsym <- function(theta){ ## function to return criterion of interest
s1 = 0
for( j in 1:N){
s1 = s1 + log(F_unsym(theta,j))
}
s1 = -1/N * s1
return(s1)
}
plot(X2,Y4)
abline(lm(Y4 ~ X2))
t.st <- c(0.0,0.0)
t.out <- optim(par=t.st,fn=J_unsym)
(t.out$par)
## Least Squares Estimation
crit_u <- function(theta){
# must have theta as 1st parameter and return a single value...
return( sum( (Y4-lin(theta))^2 ) )
}
e <- function(theta){
return(Y4 - lin(theta))
}
th.start = c(0,0)
unsym.out <- optim(par=c(th.start),fn=J_unsym)
sym.out <- optim(par=c(th.start),fn=J)
ls.out <- optim(par=c(th.start),fn=crit_u)
(unsym.out$par)
(sym.out$par)
(ls.out$par)
|
/min-ent-est.R
|
permissive
|
thalesshannonwatson/minimum_entropy_estimation
|
R
| false | false | 14,701 |
r
|
## Initialisation
N = 20
h = 0.1
X1 = numeric(N)
for(i in 1:N){
X1[i] = 0.5 + (i-1)*0.95/(N-1)
}
## Implementation of the criterion for minimisation ##
##------------------------------------------------------------##
## function which returns residuals for given theta
e <- function(theta){
return(Y1 - model(theta,X1))
}
## function to calculate kernel, which we assume
## to be gaussian with mean 0 and standard deviation 1
kern <- function(u){
return(1/sqrt(2*pi) * exp( - u^2 / 2))
}
# We created a function to return the values
# at which we evaluate the kernel, for each
# index i and j
# We do this for each side of our symmetrised estimator
a <- function(theta,ind1,ind2){ ## component of kernel argument
return((e(theta)[ind1] - e(theta)[ind2]) / h )
}
b <- function(theta,ind1,ind2){ ## component of kernel argument
return((e(theta)[ind1] + e(theta)[ind2]) / h )
}
# This function returns the f_n,h defined in our project
# This is 1/2Nh times the sum over j of the kernel evaluated at each
# value returned by a and b
F <- function(theta,ind){ ## function to return 1/2Nh * kernel
s = 0
for(i in 1:N){
s = s + kern(a(theta,ind,i)) + kern(b(theta,ind,i))
}
s = 1/(2*N*h) * s
return(s)
}
# This returns the actual criterion function to be
# minimised as defined in our report
J <- function(theta){
s1 = 0
for( j in 1:N){
s1 = s1 + log(F(theta,j))
}
s1 = -1/N * s1
return(s1)
}
## Example 1 ##
thbar = 1 ## initial guess for theta
model <- function(theta,x){ ## function to simulate model of interest
return(exp(-theta*x))
}
Y1 = model(thbar,X) + rnorm(N,m=0,sd=0.1) ## first case where errors ~ N(0, sigma^2)
plot(X1, Y1, pch=20, main="data",col='indianred') ## visual assessment of data points
points(X1,model(X1,thbar),col='skyblue',pch=20) ## model fit
## optimisation of minimum entropy criterion
th.start = 0.5
opt.out <- optim(par=c(th.start),
fn=J,method='Brent',lower =c(0.1),upper=c(2))
## tests whether values are reasonable
(opt.out$par)
## Run a monte carlo simulation for entropy estimation
ans = numeric(1000)
for( i in 1:1000){
Y1 = model(thbar,X) + rnorm(N,m=0,sd=0.1)
opt.out <- optim(par=c(th.start),fn=J,method='Brent',lower =c(-0.5),upper=c(3))
ans[i] = opt.out$par
}
## Least Squares Estimation
crit <- function(theta,x,y){
# must have theta as 1st parameter and return a single value...
return( sum( (y-model(theta,x))^2 ) )
}
## Run a monte carlo simulation for least squares
t = numeric(1000)
thinit = 0.005
for( i in 1:1000){
Y1 = model(thbar,X1) + rnorm(N,m=0,sd=0.1)
optim.out <- optim(par=c(thinit), fn=crit, x=X1, y=Y1,method='Brent',lower=c(0.1),upper=c(2))
t[i] = optim.out$par
}
# Figure 1
plot(density(t),bty='n', ann='F',ylim=c(0,7))
lines(density(ans),lty=2)
legend("topright",'groups',c(expression(hat(theta)[ML]),expression(hat(theta)[e])),lty = c(1,2),col = c('black','black'),ncol=2)
#---------------------------------#
# Now try with uniform errors (Fig.2)
# Here we just repeat what we had already done.
Y2 = model(thbar,X1) + runif(N,-0.4,0.4)
e <- function(theta){
return(Y2 - model(theta,X1))
}
thu.start = 0.5
Y2 = model(thbar,X1) + runif(N,-0.4,0.4)
unif.out <- optim(par=c(thu.start),fn=J,method='Brent',lower =c(0.1),upper=c(2))
(unif.out$par)
# Monte Carlo
ansu = numeric(1000)
thu.start = 0.5
for( i in 1:1000){
Y2= model(thbar,X1) + runif(N,-0.4,0.4)
u.out <- optim(par=c(thu.start),fn=J,method='Brent',lower =c(-0.5),upper=c(3))
ansu[i] = u.out$par
}
plot(density(ansu))
# LS compares to MLE under the wrong assumption that errors are normal
tr = numeric(1000)
thinit = 0.005
for( i in 1:1000){
Y2 = model(thbar,X1) + runif(N,-0.4,0.4)
o.out <- optim(par=c(thinit), fn=crit, x=X1, y=Y2,method='Brent',lower =c(-0.5),upper=c(3))
tr[i] = o.out$par
}
# Figure 2
plot(density(ansu),bty='n', ann='F',ylim=c(0,4))
lines(density(tr),lty=2)
legend("topright",'groups',c(expression(hat(theta)[e]),expression(hat(theta)[LS])),lty = c(1,2),col = c('black','black'),ncol=2)
sd(ansu)
sd(tr)
#-----------------------------------------#
# Inverse transform method to generate a
# random sample from the laplace distribution
rlaplace <- function(N,sigma){
u <- runif(N)
uu<- runif(N)
r <- numeric(N)
for(i in 1:N){
if(uu[i] < 0.5){
r[i] = (sigma/(sqrt(2)) * log(2*u[i]))
}
else{
r[i] = (- sigma/(sqrt(2)) * log(2*u[i]))
}
}
return(r)
}
# we then added an outlier to our sample to
# generate figure 3.
thl.start = 0.5
Y3 = model(thbar,X1) + rlaplace(N,0.15)
Y3[N] = 5
e <- function(theta){
return(Y3 - model(theta,X1))
}
lap.out <- optim(par=c(thl.start),fn=J,method='Brent',lower =c(0.1),upper=c(2))
(lap.out$par)
ansl = numeric(1000)
thl.start = 0.5
for( i in 1:1000){
Y3 = model(thbar,X1) + rlaplace(N,0.15)
Y3[N] = 5
l.out <- optim(par=c(thl.start),fn=J,method='Brent',lower =c(-0.5),upper=c(3))
ansl[i] = l.out$par
}
tl = numeric(1000)
thl.start = 0.5
for( i in 1:1000){
Y3 = model(thbar,X1) + rlaplace(N,0.15)
Y3[N] = 5
o.out <- optim(par=c(thl.start), fn=crit, x=X1, y=Y3,method='Brent',lower =c(-0.5),upper=c(3))
tl[i] = o.out$par
}
plot(X1,Y3)
lines(X1,model(l.out$par,X1))
plot(density(tl))
var(ansl)
var(tl)
curve(exp(-x),0.5,1.45,pch=20,ylim=c(0,1),ann='F',lty=4,col='indianred',lwd=2)
points(X1,Y3,pch=4,col='skyblue')
points(sort(X1),model(sort(X1),o.out$par),type='l',lwd=2,col='darkorange')
points(sort(X1),model(sort(X1),l.out$par),type='l',lwd=2,col='forestgreen')
# Example 5
# Import data
dat <- read.csv("/Users/cianscannell/Desktop/Final_Year/Stats/group6-data.csv",header=FALSE)
head(dat)
X2 <- dat[,1]
Y4 <-dat[,2]
N<-length(X2)
h = 0.1
# linear regression model
lin <- function(th){
return(th[1] + th[2]*X2)
}
# Implement our unsymmetrised criterion
eu <- function(theta){
return(Y4 - lin(theta))
}
au <- function(theta,ind1,ind2){ ## component of kernel argument
return((eu(theta)[ind1] - eu(theta)[ind2]) / h )
}
F_unsym <- function(theta,ind){
s = 0
for(i in 1:N){
s = s + kern(au(theta,ind,i))
}
s = 1/(N*h) * s
return(s)
}
J_unsym <- function(theta){ ## function to return criterion of interest
s1 = 0
for( j in 1:N){
s1 = s1 + log(F_unsym(theta,j))
}
s1 = -1/N * s1
return(s1)
}
plot(X2,Y4)
abline(lm(Y4 ~ X2))
t.st <- c(0.0,0.0)
t.out <- optim(par=t.st,fn=J_unsym)
(t.out$par)
## Least Squares Estimation
crit_u <- function(theta){
# must have theta as 1st parameter and return a single value...
return( sum( (Y4-lin(theta))^2 ) )
}
e <- function(theta){
return(Y4 - lin(theta))
}
th.start = c(0,0)
unsym.out <- optim(par=c(th.start),fn=J_unsym)
sym.out <- optim(par=c(th.start),fn=J)
ls.out <- optim(par=c(th.start),fn=crit_u)
(unsym.out$par)
(sym.out$par)
(ls.out$par)
|
# Dependencies --------------------------------------------------------------------------------
library(caret)
library(readr) # read/write
library(dplyr) # manipulate data
library(tidyr) # tidy data
library(purrr) # functional programming
library(stringr) # text manipulation
library(qdapRegex) # easy regex
library(tm) # text mining
library(tidytext) # text mining
library(ggplot2)
library(patchwork)
# devtools::install("../input/r-textfeatures-package/textfeatures/")
library(textfeatures)
library(doParallel)
library(foreach)
# library(h2o)
theme_set(theme_bw()) # set theme
# h2o.shutdown()
# Functions -----------------------------------------------------------------------------------
jaccard <- function(str1, str2) {
# r version for: https://www.kaggle.com/c/tweet-sentiment-extraction/overview/evaluation
a <- unlist(strsplit(tolower(str1), split = " "))
b <- unlist(strsplit(tolower(str2), split = " "))
c <- intersect(a, b)
length(c) / (length(a) + length(b) - length(c))
}
logit <- function(x) {
x <- case_when(x == 0 ~.Machine$double.eps,
x == 1 ~ 1-.Machine$double.eps^(.4),
x > 0 & x < 1 ~ x)
log(x / (1 - x))
}
clean_text <- function(x, stem = F) {
x %>%
str_replace_all("(\\')(\\w)", "\\2") %>%
str_remove_all("\\n") %>%
str_remove_all("\\"\\;") %>%
str_remove_all("(RT|via)((?:\\b\\W*@\\w+)+)") %>%
rm_date() %>%
rm_dollar() %>%
rm_angle() %>%
rm_email() %>%
rm_endmark() %>%
rm_hash() %>%
rm_number() %>%
rm_percent() %>%
rm_phone() %>%
rm_tag() %>%
rm_time() %>%
str_to_lower() %>%
str_remove_all("(http://.*?\\s)|(http://.*)") %>%
str_remove_all("@\\w+") %>%
str_replace_all('([[:alpha:]])\\1{2,}', "\\1") %>%
str_remove_all("[[:digit:]]") %>%
str_replace_all("(?! )[^[:alnum:]]", " ") %>%
str_remove_all("\\bh[aeiou]h[aeiou]{1,}\\b") %>%
removeWords(stopwords() %>% .[!. %in% c('no', 'nor', 'not')]) %>%
# str_remove_all("\\b\\w{1,2}\\b") %>%
stringi::stri_trans_general(id = "Latin-ASCII") %>%
str_remove_all("'|\"|'|“|”|\"|\n|,|\\.|…|\\?|\\+|\\-|\\/|\\=|\\(|\\)|‘") %>%
str_trim() %>%
str_squish()
}
get_metadata <- function(x, verbose = F){
if(verbose == T){
t0 <- Sys.time() # to print time
cat("Getting metadata, please wait ..\n")
}
# get metadata with `textfeatures`
metadata <- textfeatures::textfeatures(x, normalize = F, word_dims = 0,verbose = verbose)
# discart default n_words and n_uq_words
metadata <- metadata %>% select(-n_words, -n_uq_words)
# more features
# quantas ngrams possiveis?
# qual ngram antes e qual depois
metadata <-
tibble(text = x) %>%
rowwise() %>%
mutate(
n_words = length(str_split(text, pattern = " ")[[1]]),
n_uq_words = length(unique(str_split(text, pattern = " ")[[1]]))) %>%
ungroup() %>%
transmute(
n_vogals = str_count(str_to_lower(text), "[aeiou]"),
n_consonants = str_count(str_to_lower(text), "[bcdfghjklmnpqrstvwxyz]"),
n_str = str_length(text),
# n_upper = str_count(text, "[A-Z]"), # n_caps
n_neg = str_count(str_to_lower(text), "(\\bno+\\b|\\bnor+\\b|\\bnot+\\b|n\\'t\\b)"), # negatives
n_atpeople = str_count(text, "@\\w+"),
n_question = str_count(text, "\\?+"),
# n_dot = str_count(text, "\\.+"), # n_period
n_retweet = str_count(text, "(RT|via)((?:\\b\\W*@\\w+)+)")
) %>%
bind_cols(metadata)
# combine plural person in metadata
metadata <- metadata %>%
mutate(n_first_person = n_first_person + n_first_personp,
n_second_person = n_second_person + n_second_personp) %>%
select(-n_first_personp, -n_second_personp)
if(verbose == T){
cat(paste0("Metadata successfully obtained!\nThe process took: ",
round(difftime(Sys.time(), t0, units = "mins")) ," min\n")) # Yeah!
}
return(metadata)
}
plot_model <- function(results){
# library(patchwork)
# (
results %>%
gather(key, value) %>%
ggplot(aes(x = value, fill = key))+
geom_density(alpha = .5)
# +
# results %>%
# gather(key, value) %>%
# ggplot(aes(y = value, x = key))+
# geom_boxplot(alpha = .5)
# ) /
# results %>%
# ggplot(aes(y = predict, x = observed))+
# geom_point()+
# geom_smooth(method = "loess")+
# geom_abline(intercept=0, slope = 1, color="red", linetype="dashed")
}
results_cross_validation <- function(h2o_model) {
h2o_model@model$cross_validation_metrics_summary %>%
as.data.frame() %>%
select(-mean, -sd) %>%
t() %>%
as.data.frame() %>%
mutate_all(as.character) %>%
mutate_all(as.numeric) %>%
select(mae = mae ,
mean_residual_deviance = mean_residual_deviance,
mse = mse,
r2 = r2,
residual_deviance = residual_deviance,
rmse = rmse) %>%
return()
}
plot_cross_validation <- function(df_results) {
df_results %>%
gather(Metrics, Values) %>%
ggplot(aes(Metrics, Values, fill = Metrics, color = Metrics)) +
geom_boxplot(alpha = 0.3, show.legend = FALSE) +
theme(plot.margin = unit(c(1, 1, 1, 1), "cm")) +
facet_wrap(~ Metrics, scales = "free") +
labs(title = "Model Performance by Some Criteria Selected", y = NULL)
}
xgboost_model <- function(hyper_xgb,
search_criteria,
training_frame = training_frame,
validation_frame = NULL,
distribution = "AUTO",
nfolds = 5){
# n models
n_models <- map_dbl(hyper_xgb, length) %>% prod()
print(glue::glue("Will train {n_models} models"))
# model grid search
grid_xgb <- h2o.grid(algorithm = "xgboost",
x = x, y = y,
hyper_params = hyper_xgb,
search_criteria = search_criteria,
training_frame = training_frame,
distribution = distribution,
# validation_frame = validation_frame,
seed = 1, nfolds = nfolds)
# Get the grid results, sorted by validation r2
gridperf_xgb <- h2o.getGrid(grid_id = grid_xgb@grid_id,
# sort_by = "r2",
decreasing = TRUE)
# get model
model_xgb <- h2o.getModel(gridperf_xgb@model_ids[[1]])
# evaluate
model_perf_xgb <- h2o.performance(model = model_xgb, newdata = validation_frame)
return(list(
n_models = n_models,
grid_xgb = grid_xgb,
gridperf_xgb = gridperf_xgb,
model_xgb = model_xgb,
model_perf_xgb = model_perf_xgb
))
}
# some interaction columns
parse_metadata <- function(metadata){
metadata %>%
transmute(
textID, text, sel_text, ngram_text, dif_text, sentiment, jaccard,
# text stats
text_n_words = n_words,
# text_n_lowersp,
# text_n_capsp,
# text_n_charsperword,
# sel_text stats
sel_text_n_words = map_dbl(ngram_text, ~length(str_split(.x, pattern = " ")[[1]])),
# sel_text_n_lowersp,
# sel_text_n_capsp,
# sel_text_n_charsperword,
# interaction sel_text x text
sd_sel_text_sent_afinn = text_sent_afinn - sel_text_sent_afinn,
sd_sel_text_sent_bing = text_sent_bing - sel_text_sent_bing,
sd_sel_text_sent_syuzhet = text_sent_syuzhet - sel_text_sent_syuzhet,
sd_sel_text_sent_vader = text_sent_vader - sel_text_sent_vader,
sd_sel_text_n_polite = text_n_polite - sel_text_n_polite,
prop_sel_text_n_vogals = if_else(text_n_vogals == 0, 0, sel_text_n_vogals / text_n_vogals),
prop_sel_text_n_consonants = if_else(text_n_consonants == 0, 0, sel_text_n_consonants / text_n_consonants),
prop_sel_text_n_str = if_else(text_n_str == 0, 0, sel_text_n_str / text_n_str),
prop_sel_text_len = text_n_words / sel_text_n_words,
prop_sel_text_n_chars = if_else(text_n_chars == 0, 0, sel_text_n_chars / text_n_chars),
prop_sel_text_n_uq_chars = if_else(text_n_uq_chars == 0, 0, sel_text_n_uq_chars / text_n_uq_chars),
prop_sel_text_n_lowers = if_else(text_n_lowers == 0, 0, sel_text_n_lowers / text_n_lowers),
prop_sel_text_n_caps = if_else(text_n_caps == 0, 0, sel_text_n_caps / text_n_caps),
prop_sel_text_n_periods = if_else(text_n_periods == 0, 0, sel_text_n_periods / text_n_periods),
prop_sel_text_n_commas = if_else(text_n_commas == 0, 0, sel_text_n_commas / text_n_commas),
prop_sel_text_n_exclaims = if_else(text_n_exclaims == 0, 0, sel_text_n_exclaims / text_n_exclaims),
prop_sel_text_n_puncts = if_else(text_n_puncts == 0, 0, sel_text_n_puncts / text_n_puncts),
prop_sel_text_n_prepositions = if_else(text_n_prepositions == 0, 0, sel_text_n_prepositions / text_n_prepositions),
cat_sel_text_n_neg = if_else(sel_text_n_neg == 0, 0, 1),
cat_sel_text_n_question = if_else(sel_text_n_question == 0, 0, 1),
cat_sel_text_n_digits = if_else(sel_text_n_digits == 0, 0, 1),
cat_sel_text_n_extraspaces = if_else(sel_text_n_extraspaces == 0, 0, 1),
cat_sel_text_n_tobe = if_else(sel_text_n_tobe == 0, 0, 1),
cat_sel_text_n_first_person = if_else(sel_text_n_first_person == 0, 0, 1),
cat_sel_text_n_second_person = if_else(sel_text_n_second_person == 0, 0, 1),
cat_sel_text_n_third_person = if_else(sel_text_n_third_person == 0, 0, 1),
# dif_text stats
dif_text_n_words = map_dbl(dif_text, ~length(str_split(.x, pattern = " ")[[1]])),
# dif_text_n_lowersp,
# dif_text_n_capsp,
# dif_text_n_charsperword,
# interaction dif_text x text
sd_dif_text_sent_afinn = text_sent_afinn - dif_text_sent_afinn,
sd_dif_text_sent_bing = text_sent_bing - dif_text_sent_bing,
sd_dif_text_sent_syuzhet = text_sent_syuzhet - dif_text_sent_syuzhet,
sd_dif_text_sent_vader = text_sent_vader - dif_text_sent_vader,
sd_dif_text_n_polite = text_n_polite - dif_text_n_polite,
prop_dif_text_n_vogals = if_else(text_n_vogals == 0, 0, dif_text_n_vogals / text_n_vogals),
prop_dif_text_n_consonants = if_else(text_n_consonants == 0, 0, dif_text_n_consonants / text_n_consonants),
prop_dif_text_n_str = if_else(text_n_str == 0, 0, dif_text_n_str / text_n_str),
prop_dif_text_len = dif_text_n_words / text_n_words,
prop_dif_text_n_chars = if_else(text_n_chars == 0, 0, dif_text_n_chars / text_n_chars),
prop_dif_text_n_uq_chars = if_else(text_n_uq_chars == 0, 0, dif_text_n_uq_chars / text_n_uq_chars),
prop_dif_text_n_lowers = if_else(text_n_lowers == 0, 0, dif_text_n_lowers / text_n_lowers),
prop_dif_text_n_caps = if_else(text_n_caps == 0, 0, dif_text_n_caps / text_n_caps),
prop_dif_text_n_periods = if_else(text_n_periods == 0, 0, dif_text_n_periods / text_n_periods),
prop_dif_text_n_commas = if_else(text_n_commas == 0, 0, dif_text_n_commas / text_n_commas),
prop_dif_text_n_exclaims = if_else(text_n_exclaims == 0, 0, dif_text_n_exclaims / text_n_exclaims),
prop_dif_text_n_puncts = if_else(text_n_puncts == 0, 0, dif_text_n_puncts / text_n_puncts),
prop_dif_text_n_prepositions = if_else(text_n_prepositions == 0, 0, dif_text_n_prepositions / text_n_prepositions),
cat_dif_text_n_neg = if_else(dif_text_n_neg == 0, 0, 1),
cat_dif_text_n_question = if_else(dif_text_n_question == 0, 0, 1),
cat_dif_text_n_digits = if_else(dif_text_n_digits == 0, 0, 1),
cat_dif_text_n_extraspaces = if_else(dif_text_n_extraspaces == 0, 0, 1),
cat_dif_text_n_tobe = if_else(dif_text_n_tobe == 0, 0, 1),
cat_dif_text_n_first_person = if_else(dif_text_n_first_person == 0, 0, 1),
cat_dif_text_n_second_person = if_else(dif_text_n_second_person == 0, 0, 1),
cat_dif_text_n_third_person = if_else(dif_text_n_third_person == 0, 0, 1),
)
}
to_search <- function(x){
str_replace_all(x, "([[:punct:]]|\\*|\\+|\\.{1,}|\\:|\\$|\\:|\\^|\\?|\\|)", "\\\\\\1")
}
# Load data -----------------------------------------------------------------------------------
train_data <- read_csv("data/train.csv") %>%
rename(sel_text = selected_text)
# remove na
train_data <- train_data %>% filter(!is.na(text) | text == "")
# extract neutral
# train_neutral <- train_data %>% filter(sentiment == "neutral")
# train_data <- train_data %>% filter(sentiment != "neutral")
# remove bad texts
{
bad_text <- train_data %>%
mutate(texts = map(text, ~str_split(.x, " ")[[1]]),
sel_texts = map(sel_text, ~str_split(.x, " ")[[1]]),
bad_text = map2_lgl(texts,sel_texts, ~ sum(.x %in% .y)==0) ) %>%
pull(bad_text)
train_data <- train_data[!bad_text,]
}
# colect all possible ngrams and dif
train_ngrams <-
train_data %>%
mutate(n_words = map_dbl(text, ~str_split(.x, pattern = " ", )[[1]] %>% length())) %>%
mutate(ngram_text = map2(text, n_words, function(text, n_words){
map(1:n_words,
~ tau::textcnt(text, method = "string", split = " ", n = .x, tolower = FALSE) %>% names() %>% unlist()
) } )) %>%
mutate(ngram_text = map(ngram_text, unlist)) %>%
unnest(cols = c(ngram_text)) %>%
mutate(sel = ngram_text == sel_text) %>%
mutate(dif_text = str_remove(text, to_search(ngram_text)))
# Remove text without ngrams located
{
to_remove <-
train_ngrams %>%
nest(-textID) %>%
mutate(sel = map_lgl(data, ~any(.x$ngram_text == .x$sel_text))) %>%
filter(sel != T) %>%
pull(textID)
train_ngrams <- train_ngrams %>% filter(!textID %in% to_remove)
}
# create y
train_ngrams <- train_ngrams %>%
mutate(jaccard = map2_dbl(sel_text, ngram_text, ~jaccard(.x, .y)))
# Remove text ngram selected with jaccard not 1
{
to_remove <-
train_ngrams %>%
nest(-textID) %>%
mutate(sel = map_lgl(data, ~any(.x$jaccard == 1))) %>%
filter(sel != T) %>%
pull(textID)
train_ngrams <- train_ngrams %>% filter(!textID %in% to_remove)
}
g1 <-
train_ngrams %>%
ggplot(aes(x = jaccard, fill = sentiment))+
geom_density(alpha = .5)+
labs(title = "before random sample")
train_ngrams %>%
mutate(jaccard = case_when(jaccard == 1 ~ 1,
jaccard == 0 ~ 0,
T ~ NaN)) %>%
filter(!is.na(jaccard)) %>%
count(jaccard) %>% mutate(prop = n/sum(n))
# fast report
# DataExplorer::create_report(parsed_metadata, y = "jaccard")
set.seed(1)
# select random ngram by texdID and jaccard
train_ngrams <-
train_ngrams %>%
nest(-textID) %>%
mutate(data = map(data, ~.x %>%
group_by(jaccard) %>%
sample_n(1))) %>%
unnest()
g2 <-
train_ngrams %>%
ggplot(aes(x = jaccard, fill = sentiment))+
geom_density(alpha = .5)+
labs(title = "after random sample")
g1 / g2
# get text metadata
text_metadata <-
bind_cols(tibble(textID = train_data$textID), get_metadata(train_data$text, verbose = T) %>%
`colnames<-`(paste0("text_",colnames(.))))
# get sel_text metadata
sel_text_metadata <-
bind_cols(tibble(textID = train_ngrams$textID), get_metadata(train_ngrams$ngram_text, verbose = T) %>%
`colnames<-`(paste0("sel_text_",colnames(.))))
# saveRDS(sel_text_metadata, "sel_text_metadata.rds")
# get dif_text metadata
dif_text_metadata <-
bind_cols(tibble(textID = train_ngrams$textID), get_metadata(train_ngrams$dif_text, verbose = T) %>%
`colnames<-`(paste0("dif_text_",colnames(.))))
# saveRDS(dif_text_metadata , "dif_text_metadata.rds")
# join all in metadata
metadata <-
left_join(
bind_cols(sel_text_metadata, select(dif_text_metadata, -textID)),
bind_cols(train_data, select(text_metadata, -textID)),
by = "textID"
) %>%
bind_cols(select(train_ngrams, ngram_text, dif_text, jaccard, n_words)) %>%
select(textID, text, sel_text, ngram_text, dif_text, sentiment, n_words, jaccard, everything())
# unique colnames
colnames(metadata) %>% str_remove("(text_|sel_text_|dif_text_)") %>% unique()
# Check point ---------------------------------------------------------------------------------
parsed_metadata <- parse_metadata(metadata)
saveRDS(parsed_metadata, "parsed_metadata.rds")
# parsed_metadata <- readRDS("parsed_metadata.rds")
# Model ---------------------------------------------------------------------------------------
# parsed_metadata <- parsed_metadata %>% filter(jaccard != 0 & jaccard != 1)
# split valid data
parsed_metadata <- parsed_metadata %>% group_by(textID) %>% nest() %>% ungroup()
samp <- sample(1:2,nrow(parsed_metadata), T, c(0.8, 0.2))
train_data <-
parsed_metadata[samp == 1,]
valid_data <-
parsed_metadata[samp == 2,]
g1 <-
train_data %>%
unnest() %>%
ggplot(aes(x = jaccard, fill = sentiment))+
geom_density(alpha = .5)+
labs(title = "train random sample")
g2 <-
valid_data %>%
unnest() %>%
ggplot(aes(x = jaccard, fill = sentiment))+
geom_density(alpha = .5)+
labs(title = "valid random sample")
g1 / g2
# h2o mmodel
library(h2o)
h2o.init(nthreads=-1, max_mem_size="8g")
# h2o.no_progress() # Turn off progress bars
n_cores = NULL
# h2o.shutdown()
train_data_h2o <-
train_data %>%
unnest(cols = c(data)) %>%
mutate(text_clean = clean_text(ngram_text))%>%
select(-textID, -text, -sel_text, -ngram_text, -dif_text) %>%
mutate(sentiment = case_when(sentiment == "positive"~1,
sentiment == "neutral"~0,
sentiment == "negative"~-1)) %>%
as.h2o()
valid_data_h2o <-
valid_data %>%
unnest(cols = c(data)) %>%
mutate(text_clean = clean_text(ngram_text))%>%
select(-text, -sel_text, -dif_text) %>%
mutate(sentiment = case_when(sentiment == "positive"~1,
sentiment == "neutral"~0,
sentiment == "negative"~-1)) %>%
as.h2o()
# Word2vec ------------------------------------------------------------------------------------
words_train_h2o <- h2o.tokenize(train_data_h2o$text_clean, " ")
words_valid_h2o <- h2o.tokenize(valid_data_h2o$text_clean, " ")
set.seed(1)
w2v.model <- h2o.word2vec(words_train_h2o,vec_size = 20, sent_sample_rate = 0, epochs = 50)
vecs_train_h2o <- h2o.transform(w2v.model, words_train_h2o, aggregate_method = "AVERAGE")
vecs_valid_h2o <- h2o.transform(w2v.model, words_valid_h2o, aggregate_method = "AVERAGE")
ind_ok <- !is.na(vecs_train_h2o$C1) # remove na for train_h2o
vecs_train_h2o <- h2o.cbind(train_data_h2o[ind_ok, setdiff(colnames(train_data_h2o), c("text_clean"))], vecs_train_h2o[ind_ok,])
vecs_valid_h2o <- h2o.cbind(valid_data_h2o[, setdiff(colnames(valid_data_h2o), c("text_clean"))], vecs_valid_h2o)
# XGBoost -------------------------------------------------------------------------------------
x <- setdiff(colnames(vecs_train_h2o), c("jaccard"))
y <- "jaccard"
xgb0 <- h2o.automl(x, y,
training_frame = vecs_train_h2o,
nfolds = 5,
seed = 1)
h2o.r2(xgb0@leader)
# Plot predict x observed
pred <- predict(xgb0@leader, vecs_valid_h2o)
results <-
valid_data %>%
unnest(cols = c(data)) %>%
bind_cols(as_tibble(pred)) %>%
select(textID, text, sel_text, ngram_text, predict) %>%
group_by(textID) %>%
top_n(1, predict) %>%
rowwise() %>%
mutate(jaccard = jaccard(sel_text, ngram_text)) %>%
ungroup()
mean(results$jaccard)
results %>%
mutate(predict = if_else(predict > 1, 1, predict)) %>%
select(predict, observed = jaccard) %>%
plot_model()
|
/script_ngram_model.R
|
permissive
|
gomesfellipe/kaggle_tweet_sentiment_extraction
|
R
| false | false | 20,041 |
r
|
# Dependencies --------------------------------------------------------------------------------
library(caret)
library(readr) # read/write
library(dplyr) # manipulate data
library(tidyr) # tidy data
library(purrr) # functional programming
library(stringr) # text manipulation
library(qdapRegex) # easy regex
library(tm) # text mining
library(tidytext) # text mining
library(ggplot2)
library(patchwork)
# devtools::install("../input/r-textfeatures-package/textfeatures/")
library(textfeatures)
library(doParallel)
library(foreach)
# library(h2o)
theme_set(theme_bw()) # set theme
# h2o.shutdown()
# Functions -----------------------------------------------------------------------------------
jaccard <- function(str1, str2) {
# r version for: https://www.kaggle.com/c/tweet-sentiment-extraction/overview/evaluation
a <- unlist(strsplit(tolower(str1), split = " "))
b <- unlist(strsplit(tolower(str2), split = " "))
c <- intersect(a, b)
length(c) / (length(a) + length(b) - length(c))
}
logit <- function(x) {
x <- case_when(x == 0 ~.Machine$double.eps,
x == 1 ~ 1-.Machine$double.eps^(.4),
x > 0 & x < 1 ~ x)
log(x / (1 - x))
}
clean_text <- function(x, stem = F) {
x %>%
str_replace_all("(\\')(\\w)", "\\2") %>%
str_remove_all("\\n") %>%
str_remove_all("\\"\\;") %>%
str_remove_all("(RT|via)((?:\\b\\W*@\\w+)+)") %>%
rm_date() %>%
rm_dollar() %>%
rm_angle() %>%
rm_email() %>%
rm_endmark() %>%
rm_hash() %>%
rm_number() %>%
rm_percent() %>%
rm_phone() %>%
rm_tag() %>%
rm_time() %>%
str_to_lower() %>%
str_remove_all("(http://.*?\\s)|(http://.*)") %>%
str_remove_all("@\\w+") %>%
str_replace_all('([[:alpha:]])\\1{2,}', "\\1") %>%
str_remove_all("[[:digit:]]") %>%
str_replace_all("(?! )[^[:alnum:]]", " ") %>%
str_remove_all("\\bh[aeiou]h[aeiou]{1,}\\b") %>%
removeWords(stopwords() %>% .[!. %in% c('no', 'nor', 'not')]) %>%
# str_remove_all("\\b\\w{1,2}\\b") %>%
stringi::stri_trans_general(id = "Latin-ASCII") %>%
str_remove_all("'|\"|'|“|”|\"|\n|,|\\.|…|\\?|\\+|\\-|\\/|\\=|\\(|\\)|‘") %>%
str_trim() %>%
str_squish()
}
get_metadata <- function(x, verbose = F){
if(verbose == T){
t0 <- Sys.time() # to print time
cat("Getting metadata, please wait ..\n")
}
# get metadata with `textfeatures`
metadata <- textfeatures::textfeatures(x, normalize = F, word_dims = 0,verbose = verbose)
# discart default n_words and n_uq_words
metadata <- metadata %>% select(-n_words, -n_uq_words)
# more features
# quantas ngrams possiveis?
# qual ngram antes e qual depois
metadata <-
tibble(text = x) %>%
rowwise() %>%
mutate(
n_words = length(str_split(text, pattern = " ")[[1]]),
n_uq_words = length(unique(str_split(text, pattern = " ")[[1]]))) %>%
ungroup() %>%
transmute(
n_vogals = str_count(str_to_lower(text), "[aeiou]"),
n_consonants = str_count(str_to_lower(text), "[bcdfghjklmnpqrstvwxyz]"),
n_str = str_length(text),
# n_upper = str_count(text, "[A-Z]"), # n_caps
n_neg = str_count(str_to_lower(text), "(\\bno+\\b|\\bnor+\\b|\\bnot+\\b|n\\'t\\b)"), # negatives
n_atpeople = str_count(text, "@\\w+"),
n_question = str_count(text, "\\?+"),
# n_dot = str_count(text, "\\.+"), # n_period
n_retweet = str_count(text, "(RT|via)((?:\\b\\W*@\\w+)+)")
) %>%
bind_cols(metadata)
# combine plural person in metadata
metadata <- metadata %>%
mutate(n_first_person = n_first_person + n_first_personp,
n_second_person = n_second_person + n_second_personp) %>%
select(-n_first_personp, -n_second_personp)
if(verbose == T){
cat(paste0("Metadata successfully obtained!\nThe process took: ",
round(difftime(Sys.time(), t0, units = "mins")) ," min\n")) # Yeah!
}
return(metadata)
}
plot_model <- function(results){
# library(patchwork)
# (
results %>%
gather(key, value) %>%
ggplot(aes(x = value, fill = key))+
geom_density(alpha = .5)
# +
# results %>%
# gather(key, value) %>%
# ggplot(aes(y = value, x = key))+
# geom_boxplot(alpha = .5)
# ) /
# results %>%
# ggplot(aes(y = predict, x = observed))+
# geom_point()+
# geom_smooth(method = "loess")+
# geom_abline(intercept=0, slope = 1, color="red", linetype="dashed")
}
results_cross_validation <- function(h2o_model) {
h2o_model@model$cross_validation_metrics_summary %>%
as.data.frame() %>%
select(-mean, -sd) %>%
t() %>%
as.data.frame() %>%
mutate_all(as.character) %>%
mutate_all(as.numeric) %>%
select(mae = mae ,
mean_residual_deviance = mean_residual_deviance,
mse = mse,
r2 = r2,
residual_deviance = residual_deviance,
rmse = rmse) %>%
return()
}
plot_cross_validation <- function(df_results) {
df_results %>%
gather(Metrics, Values) %>%
ggplot(aes(Metrics, Values, fill = Metrics, color = Metrics)) +
geom_boxplot(alpha = 0.3, show.legend = FALSE) +
theme(plot.margin = unit(c(1, 1, 1, 1), "cm")) +
facet_wrap(~ Metrics, scales = "free") +
labs(title = "Model Performance by Some Criteria Selected", y = NULL)
}
xgboost_model <- function(hyper_xgb,
search_criteria,
training_frame = training_frame,
validation_frame = NULL,
distribution = "AUTO",
nfolds = 5){
# n models
n_models <- map_dbl(hyper_xgb, length) %>% prod()
print(glue::glue("Will train {n_models} models"))
# model grid search
grid_xgb <- h2o.grid(algorithm = "xgboost",
x = x, y = y,
hyper_params = hyper_xgb,
search_criteria = search_criteria,
training_frame = training_frame,
distribution = distribution,
# validation_frame = validation_frame,
seed = 1, nfolds = nfolds)
# Get the grid results, sorted by validation r2
gridperf_xgb <- h2o.getGrid(grid_id = grid_xgb@grid_id,
# sort_by = "r2",
decreasing = TRUE)
# get model
model_xgb <- h2o.getModel(gridperf_xgb@model_ids[[1]])
# evaluate
model_perf_xgb <- h2o.performance(model = model_xgb, newdata = validation_frame)
return(list(
n_models = n_models,
grid_xgb = grid_xgb,
gridperf_xgb = gridperf_xgb,
model_xgb = model_xgb,
model_perf_xgb = model_perf_xgb
))
}
# some interaction columns
parse_metadata <- function(metadata){
metadata %>%
transmute(
textID, text, sel_text, ngram_text, dif_text, sentiment, jaccard,
# text stats
text_n_words = n_words,
# text_n_lowersp,
# text_n_capsp,
# text_n_charsperword,
# sel_text stats
sel_text_n_words = map_dbl(ngram_text, ~length(str_split(.x, pattern = " ")[[1]])),
# sel_text_n_lowersp,
# sel_text_n_capsp,
# sel_text_n_charsperword,
# interaction sel_text x text
sd_sel_text_sent_afinn = text_sent_afinn - sel_text_sent_afinn,
sd_sel_text_sent_bing = text_sent_bing - sel_text_sent_bing,
sd_sel_text_sent_syuzhet = text_sent_syuzhet - sel_text_sent_syuzhet,
sd_sel_text_sent_vader = text_sent_vader - sel_text_sent_vader,
sd_sel_text_n_polite = text_n_polite - sel_text_n_polite,
prop_sel_text_n_vogals = if_else(text_n_vogals == 0, 0, sel_text_n_vogals / text_n_vogals),
prop_sel_text_n_consonants = if_else(text_n_consonants == 0, 0, sel_text_n_consonants / text_n_consonants),
prop_sel_text_n_str = if_else(text_n_str == 0, 0, sel_text_n_str / text_n_str),
prop_sel_text_len = text_n_words / sel_text_n_words,
prop_sel_text_n_chars = if_else(text_n_chars == 0, 0, sel_text_n_chars / text_n_chars),
prop_sel_text_n_uq_chars = if_else(text_n_uq_chars == 0, 0, sel_text_n_uq_chars / text_n_uq_chars),
prop_sel_text_n_lowers = if_else(text_n_lowers == 0, 0, sel_text_n_lowers / text_n_lowers),
prop_sel_text_n_caps = if_else(text_n_caps == 0, 0, sel_text_n_caps / text_n_caps),
prop_sel_text_n_periods = if_else(text_n_periods == 0, 0, sel_text_n_periods / text_n_periods),
prop_sel_text_n_commas = if_else(text_n_commas == 0, 0, sel_text_n_commas / text_n_commas),
prop_sel_text_n_exclaims = if_else(text_n_exclaims == 0, 0, sel_text_n_exclaims / text_n_exclaims),
prop_sel_text_n_puncts = if_else(text_n_puncts == 0, 0, sel_text_n_puncts / text_n_puncts),
prop_sel_text_n_prepositions = if_else(text_n_prepositions == 0, 0, sel_text_n_prepositions / text_n_prepositions),
cat_sel_text_n_neg = if_else(sel_text_n_neg == 0, 0, 1),
cat_sel_text_n_question = if_else(sel_text_n_question == 0, 0, 1),
cat_sel_text_n_digits = if_else(sel_text_n_digits == 0, 0, 1),
cat_sel_text_n_extraspaces = if_else(sel_text_n_extraspaces == 0, 0, 1),
cat_sel_text_n_tobe = if_else(sel_text_n_tobe == 0, 0, 1),
cat_sel_text_n_first_person = if_else(sel_text_n_first_person == 0, 0, 1),
cat_sel_text_n_second_person = if_else(sel_text_n_second_person == 0, 0, 1),
cat_sel_text_n_third_person = if_else(sel_text_n_third_person == 0, 0, 1),
# dif_text stats
dif_text_n_words = map_dbl(dif_text, ~length(str_split(.x, pattern = " ")[[1]])),
# dif_text_n_lowersp,
# dif_text_n_capsp,
# dif_text_n_charsperword,
# interaction dif_text x text
sd_dif_text_sent_afinn = text_sent_afinn - dif_text_sent_afinn,
sd_dif_text_sent_bing = text_sent_bing - dif_text_sent_bing,
sd_dif_text_sent_syuzhet = text_sent_syuzhet - dif_text_sent_syuzhet,
sd_dif_text_sent_vader = text_sent_vader - dif_text_sent_vader,
sd_dif_text_n_polite = text_n_polite - dif_text_n_polite,
prop_dif_text_n_vogals = if_else(text_n_vogals == 0, 0, dif_text_n_vogals / text_n_vogals),
prop_dif_text_n_consonants = if_else(text_n_consonants == 0, 0, dif_text_n_consonants / text_n_consonants),
prop_dif_text_n_str = if_else(text_n_str == 0, 0, dif_text_n_str / text_n_str),
prop_dif_text_len = dif_text_n_words / text_n_words,
prop_dif_text_n_chars = if_else(text_n_chars == 0, 0, dif_text_n_chars / text_n_chars),
prop_dif_text_n_uq_chars = if_else(text_n_uq_chars == 0, 0, dif_text_n_uq_chars / text_n_uq_chars),
prop_dif_text_n_lowers = if_else(text_n_lowers == 0, 0, dif_text_n_lowers / text_n_lowers),
prop_dif_text_n_caps = if_else(text_n_caps == 0, 0, dif_text_n_caps / text_n_caps),
prop_dif_text_n_periods = if_else(text_n_periods == 0, 0, dif_text_n_periods / text_n_periods),
prop_dif_text_n_commas = if_else(text_n_commas == 0, 0, dif_text_n_commas / text_n_commas),
prop_dif_text_n_exclaims = if_else(text_n_exclaims == 0, 0, dif_text_n_exclaims / text_n_exclaims),
prop_dif_text_n_puncts = if_else(text_n_puncts == 0, 0, dif_text_n_puncts / text_n_puncts),
prop_dif_text_n_prepositions = if_else(text_n_prepositions == 0, 0, dif_text_n_prepositions / text_n_prepositions),
cat_dif_text_n_neg = if_else(dif_text_n_neg == 0, 0, 1),
cat_dif_text_n_question = if_else(dif_text_n_question == 0, 0, 1),
cat_dif_text_n_digits = if_else(dif_text_n_digits == 0, 0, 1),
cat_dif_text_n_extraspaces = if_else(dif_text_n_extraspaces == 0, 0, 1),
cat_dif_text_n_tobe = if_else(dif_text_n_tobe == 0, 0, 1),
cat_dif_text_n_first_person = if_else(dif_text_n_first_person == 0, 0, 1),
cat_dif_text_n_second_person = if_else(dif_text_n_second_person == 0, 0, 1),
cat_dif_text_n_third_person = if_else(dif_text_n_third_person == 0, 0, 1),
)
}
to_search <- function(x){
str_replace_all(x, "([[:punct:]]|\\*|\\+|\\.{1,}|\\:|\\$|\\:|\\^|\\?|\\|)", "\\\\\\1")
}
# Load data -----------------------------------------------------------------------------------
train_data <- read_csv("data/train.csv") %>%
rename(sel_text = selected_text)
# remove na
train_data <- train_data %>% filter(!is.na(text) | text == "")
# extract neutral
# train_neutral <- train_data %>% filter(sentiment == "neutral")
# train_data <- train_data %>% filter(sentiment != "neutral")
# remove bad texts
{
bad_text <- train_data %>%
mutate(texts = map(text, ~str_split(.x, " ")[[1]]),
sel_texts = map(sel_text, ~str_split(.x, " ")[[1]]),
bad_text = map2_lgl(texts,sel_texts, ~ sum(.x %in% .y)==0) ) %>%
pull(bad_text)
train_data <- train_data[!bad_text,]
}
# colect all possible ngrams and dif
train_ngrams <-
train_data %>%
mutate(n_words = map_dbl(text, ~str_split(.x, pattern = " ", )[[1]] %>% length())) %>%
mutate(ngram_text = map2(text, n_words, function(text, n_words){
map(1:n_words,
~ tau::textcnt(text, method = "string", split = " ", n = .x, tolower = FALSE) %>% names() %>% unlist()
) } )) %>%
mutate(ngram_text = map(ngram_text, unlist)) %>%
unnest(cols = c(ngram_text)) %>%
mutate(sel = ngram_text == sel_text) %>%
mutate(dif_text = str_remove(text, to_search(ngram_text)))
# Remove text without ngrams located
{
to_remove <-
train_ngrams %>%
nest(-textID) %>%
mutate(sel = map_lgl(data, ~any(.x$ngram_text == .x$sel_text))) %>%
filter(sel != T) %>%
pull(textID)
train_ngrams <- train_ngrams %>% filter(!textID %in% to_remove)
}
# create y
train_ngrams <- train_ngrams %>%
mutate(jaccard = map2_dbl(sel_text, ngram_text, ~jaccard(.x, .y)))
# Remove text ngram selected with jaccard not 1
{
to_remove <-
train_ngrams %>%
nest(-textID) %>%
mutate(sel = map_lgl(data, ~any(.x$jaccard == 1))) %>%
filter(sel != T) %>%
pull(textID)
train_ngrams <- train_ngrams %>% filter(!textID %in% to_remove)
}
g1 <-
train_ngrams %>%
ggplot(aes(x = jaccard, fill = sentiment))+
geom_density(alpha = .5)+
labs(title = "before random sample")
train_ngrams %>%
mutate(jaccard = case_when(jaccard == 1 ~ 1,
jaccard == 0 ~ 0,
T ~ NaN)) %>%
filter(!is.na(jaccard)) %>%
count(jaccard) %>% mutate(prop = n/sum(n))
# fast report
# DataExplorer::create_report(parsed_metadata, y = "jaccard")
set.seed(1)
# select random ngram by texdID and jaccard
train_ngrams <-
train_ngrams %>%
nest(-textID) %>%
mutate(data = map(data, ~.x %>%
group_by(jaccard) %>%
sample_n(1))) %>%
unnest()
g2 <-
train_ngrams %>%
ggplot(aes(x = jaccard, fill = sentiment))+
geom_density(alpha = .5)+
labs(title = "after random sample")
g1 / g2
# get text metadata
text_metadata <-
bind_cols(tibble(textID = train_data$textID), get_metadata(train_data$text, verbose = T) %>%
`colnames<-`(paste0("text_",colnames(.))))
# get sel_text metadata
sel_text_metadata <-
bind_cols(tibble(textID = train_ngrams$textID), get_metadata(train_ngrams$ngram_text, verbose = T) %>%
`colnames<-`(paste0("sel_text_",colnames(.))))
# saveRDS(sel_text_metadata, "sel_text_metadata.rds")
# get dif_text metadata
dif_text_metadata <-
bind_cols(tibble(textID = train_ngrams$textID), get_metadata(train_ngrams$dif_text, verbose = T) %>%
`colnames<-`(paste0("dif_text_",colnames(.))))
# saveRDS(dif_text_metadata , "dif_text_metadata.rds")
# join all in metadata
metadata <-
left_join(
bind_cols(sel_text_metadata, select(dif_text_metadata, -textID)),
bind_cols(train_data, select(text_metadata, -textID)),
by = "textID"
) %>%
bind_cols(select(train_ngrams, ngram_text, dif_text, jaccard, n_words)) %>%
select(textID, text, sel_text, ngram_text, dif_text, sentiment, n_words, jaccard, everything())
# unique colnames
colnames(metadata) %>% str_remove("(text_|sel_text_|dif_text_)") %>% unique()
# Check point ---------------------------------------------------------------------------------
parsed_metadata <- parse_metadata(metadata)
saveRDS(parsed_metadata, "parsed_metadata.rds")
# parsed_metadata <- readRDS("parsed_metadata.rds")
# Model ---------------------------------------------------------------------------------------
# parsed_metadata <- parsed_metadata %>% filter(jaccard != 0 & jaccard != 1)
# split valid data
parsed_metadata <- parsed_metadata %>% group_by(textID) %>% nest() %>% ungroup()
samp <- sample(1:2,nrow(parsed_metadata), T, c(0.8, 0.2))
train_data <-
parsed_metadata[samp == 1,]
valid_data <-
parsed_metadata[samp == 2,]
g1 <-
train_data %>%
unnest() %>%
ggplot(aes(x = jaccard, fill = sentiment))+
geom_density(alpha = .5)+
labs(title = "train random sample")
g2 <-
valid_data %>%
unnest() %>%
ggplot(aes(x = jaccard, fill = sentiment))+
geom_density(alpha = .5)+
labs(title = "valid random sample")
g1 / g2
# h2o mmodel
library(h2o)
h2o.init(nthreads=-1, max_mem_size="8g")
# h2o.no_progress() # Turn off progress bars
n_cores = NULL
# h2o.shutdown()
train_data_h2o <-
train_data %>%
unnest(cols = c(data)) %>%
mutate(text_clean = clean_text(ngram_text))%>%
select(-textID, -text, -sel_text, -ngram_text, -dif_text) %>%
mutate(sentiment = case_when(sentiment == "positive"~1,
sentiment == "neutral"~0,
sentiment == "negative"~-1)) %>%
as.h2o()
valid_data_h2o <-
valid_data %>%
unnest(cols = c(data)) %>%
mutate(text_clean = clean_text(ngram_text))%>%
select(-text, -sel_text, -dif_text) %>%
mutate(sentiment = case_when(sentiment == "positive"~1,
sentiment == "neutral"~0,
sentiment == "negative"~-1)) %>%
as.h2o()
# Word2vec ------------------------------------------------------------------------------------
words_train_h2o <- h2o.tokenize(train_data_h2o$text_clean, " ")
words_valid_h2o <- h2o.tokenize(valid_data_h2o$text_clean, " ")
set.seed(1)
w2v.model <- h2o.word2vec(words_train_h2o,vec_size = 20, sent_sample_rate = 0, epochs = 50)
vecs_train_h2o <- h2o.transform(w2v.model, words_train_h2o, aggregate_method = "AVERAGE")
vecs_valid_h2o <- h2o.transform(w2v.model, words_valid_h2o, aggregate_method = "AVERAGE")
ind_ok <- !is.na(vecs_train_h2o$C1) # remove na for train_h2o
vecs_train_h2o <- h2o.cbind(train_data_h2o[ind_ok, setdiff(colnames(train_data_h2o), c("text_clean"))], vecs_train_h2o[ind_ok,])
vecs_valid_h2o <- h2o.cbind(valid_data_h2o[, setdiff(colnames(valid_data_h2o), c("text_clean"))], vecs_valid_h2o)
# XGBoost -------------------------------------------------------------------------------------
x <- setdiff(colnames(vecs_train_h2o), c("jaccard"))
y <- "jaccard"
xgb0 <- h2o.automl(x, y,
training_frame = vecs_train_h2o,
nfolds = 5,
seed = 1)
h2o.r2(xgb0@leader)
# Plot predict x observed
pred <- predict(xgb0@leader, vecs_valid_h2o)
results <-
valid_data %>%
unnest(cols = c(data)) %>%
bind_cols(as_tibble(pred)) %>%
select(textID, text, sel_text, ngram_text, predict) %>%
group_by(textID) %>%
top_n(1, predict) %>%
rowwise() %>%
mutate(jaccard = jaccard(sel_text, ngram_text)) %>%
ungroup()
mean(results$jaccard)
results %>%
mutate(predict = if_else(predict > 1, 1, predict)) %>%
select(predict, observed = jaccard) %>%
plot_model()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/convert.R
\name{underscoreToPercent}
\alias{underscoreToPercent}
\title{Replace underscore with percent sign}
\usage{
underscoreToPercent(x)
}
\arguments{
\item{x}{character vector containing underscores}
}
\description{
Replace underscore with percent sign. May be used to define time format
strings as defaults in function declarations which are not supported by
inlinedocs.
}
|
/man/underscoreToPercent.Rd
|
permissive
|
KWB-R/kwb.utils
|
R
| false | true | 458 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/convert.R
\name{underscoreToPercent}
\alias{underscoreToPercent}
\title{Replace underscore with percent sign}
\usage{
underscoreToPercent(x)
}
\arguments{
\item{x}{character vector containing underscores}
}
\description{
Replace underscore with percent sign. May be used to define time format
strings as defaults in function declarations which are not supported by
inlinedocs.
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tex_helper.R
\name{matLowerTri}
\alias{matLowerTri}
\title{Lower triangular matrix from a base element}
\usage{
matLowerTri(psBaseElement, pnNrRow, pnNrCol, pvecDiag = NULL)
}
\arguments{
\item{psBaseElement}{constant prefix of each matrix element}
\item{pnNrRow}{number of rows}
\item{pnNrCol}{number of columns}
\item{pvecDiag}{vector specifying diagonal elements}
}
\description{
Lower triangular matrix from a base element
}
|
/man/matLowerTri.Rd
|
permissive
|
charlotte-ngs/rmdhelp
|
R
| false | true | 510 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tex_helper.R
\name{matLowerTri}
\alias{matLowerTri}
\title{Lower triangular matrix from a base element}
\usage{
matLowerTri(psBaseElement, pnNrRow, pnNrCol, pvecDiag = NULL)
}
\arguments{
\item{psBaseElement}{constant prefix of each matrix element}
\item{pnNrRow}{number of rows}
\item{pnNrCol}{number of columns}
\item{pvecDiag}{vector specifying diagonal elements}
}
\description{
Lower triangular matrix from a base element
}
|
loadRequired <- function(pkg){
new.pkg <- pkg[!(pkg %in% installed.packages()[, "Package"])]
if (length(new.pkg))
install.packages(new.pkg, dependencies = TRUE)
sapply(pkg, require, character.only = TRUE)
}
packages <- c("data.table", "caret", "randomForest", "foreach", "rpart", "rpart.plot", "doParallel", "corrplot")
loadRequired(packages)
|
/rawcode/loadRequiredPkg.R
|
no_license
|
slothdev/capstone
|
R
| false | false | 364 |
r
|
loadRequired <- function(pkg){
new.pkg <- pkg[!(pkg %in% installed.packages()[, "Package"])]
if (length(new.pkg))
install.packages(new.pkg, dependencies = TRUE)
sapply(pkg, require, character.only = TRUE)
}
packages <- c("data.table", "caret", "randomForest", "foreach", "rpart", "rpart.plot", "doParallel", "corrplot")
loadRequired(packages)
|
\name{BAtensorfn}
\alias{BAtensorfn}
\title{
Compute the four-way tensors corresponding to pairs of terms in the homogeneous
portions of systems of linear differential equations.
}
\description{
A linear differential equation involves a set of terms consisting of the product
of a coefficient function that must be estimated and a derivative (including a
derivative order 0) of one of the variables in the system.
We call this portion of the equation the homogeneous part of the equation,
as opposed to the part consisting of forcing terms involving known forcing functions.
When both of the functions in either a homogeneous term or a forcing term are defined
by B-splines, the product involves an inner product of two B-spline basis systems.
When a product of a homogeneous term and a forcing term are required,
as is usual in the the use of the Data2LD package, a great improvement in efficiency
of computation can be acheived by an initial computation of the four-way array or
tensor resulting by taking the inner products of all possible quadruples of
of the B-spline basis functions involved.
Memoization is the process of storing these tensors in memory so that they do
not need to be re-computed each time the Data2LD.R function is called.
Memoization is taken care of automatically in the code using the R.cache package,
and is activated the first time a new \code{modelList} object is encountered.
Normally the user does not have to worry about the memorization procedure. It is
possible, however, to manually re-activate the memoization.
However, users may also want to construct these four-way tensors
manually for debugging and other purposes, and this function is made available
for this reason.
}
\usage{
BAtensorfn(XbasisList, modelList, coefList)
}
\arguments{
\item{XbasisList}{A list object of length equal to the number of equations
in the system. Each member of this list is in turn a list specifying the
structure of the equation.}
\item{modelList}{A list object containing the specification of a Data2LD model.
Each member of this list contains a list object that defines a single linear
differential equation.}
\item{coefList}{A list object containing the specifications of one or more
coefficient functions.}
}
\details{
A coefficient specification can be made manually, or can set up in a by a
single invocation of function \code{make.coef}.
Variable specifications can also be set manually, or by an invocation of function
\code{make.variable} for each linear differential equation in the system.
}
\value{
A list object of length equal to the number of variables in the system.
Each of the members of this list is a two-dimensional list object, and the members
of this list are the four-way tensors set up as vectors for each of the possible
pairs of forcing terms. All levels of the this list structure are designed to be
accessed numerically by a call like \code{myBAtensor[[ivar]][[ntermj]][[ntermk]]}.
}
\references{
J. O. Ramsay and G. Hooker (2017) \emph{Dynamic Data Analysis}. Springer.
}
|
/MSc-SimSci/ACM41000-Uncertainty-Quantification/assignement3/Data2LD_Fix/Data2LD_Fix/man/BAtensorfn.Rd
|
no_license
|
iantowey/sandbox
|
R
| false | false | 3,189 |
rd
|
\name{BAtensorfn}
\alias{BAtensorfn}
\title{
Compute the four-way tensors corresponding to pairs of terms in the homogeneous
portions of systems of linear differential equations.
}
\description{
A linear differential equation involves a set of terms consisting of the product
of a coefficient function that must be estimated and a derivative (including a
derivative order 0) of one of the variables in the system.
We call this portion of the equation the homogeneous part of the equation,
as opposed to the part consisting of forcing terms involving known forcing functions.
When both of the functions in either a homogeneous term or a forcing term are defined
by B-splines, the product involves an inner product of two B-spline basis systems.
When a product of a homogeneous term and a forcing term are required,
as is usual in the the use of the Data2LD package, a great improvement in efficiency
of computation can be acheived by an initial computation of the four-way array or
tensor resulting by taking the inner products of all possible quadruples of
of the B-spline basis functions involved.
Memoization is the process of storing these tensors in memory so that they do
not need to be re-computed each time the Data2LD.R function is called.
Memoization is taken care of automatically in the code using the R.cache package,
and is activated the first time a new \code{modelList} object is encountered.
Normally the user does not have to worry about the memorization procedure. It is
possible, however, to manually re-activate the memoization.
However, users may also want to construct these four-way tensors
manually for debugging and other purposes, and this function is made available
for this reason.
}
\usage{
BAtensorfn(XbasisList, modelList, coefList)
}
\arguments{
\item{XbasisList}{A list object of length equal to the number of equations
in the system. Each member of this list is in turn a list specifying the
structure of the equation.}
\item{modelList}{A list object containing the specification of a Data2LD model.
Each member of this list contains a list object that defines a single linear
differential equation.}
\item{coefList}{A list object containing the specifications of one or more
coefficient functions.}
}
\details{
A coefficient specification can be made manually, or can set up in a by a
single invocation of function \code{make.coef}.
Variable specifications can also be set manually, or by an invocation of function
\code{make.variable} for each linear differential equation in the system.
}
\value{
A list object of length equal to the number of variables in the system.
Each of the members of this list is a two-dimensional list object, and the members
of this list are the four-way tensors set up as vectors for each of the possible
pairs of forcing terms. All levels of the this list structure are designed to be
accessed numerically by a call like \code{myBAtensor[[ivar]][[ntermj]][[ntermk]]}.
}
\references{
J. O. Ramsay and G. Hooker (2017) \emph{Dynamic Data Analysis}. Springer.
}
|
\name{sort}
\alias{sort,db.obj-method}
\title{Sort a table or view by a set of columns}
\description{
This function is used to sort a table of view in the database.
}
\usage{
\S4method{sort}{db.obj}(x, decreasing = FALSE, INDICES, ...)
}
\arguments{
\item{x}{
The signature of the method.
A \code{db.obj} (includes \code{db.table} and \code{db.view})
object, which points to a table or view in the database.
}
\item{decreasing}{
A logical, with default value as FALSE. Should the sort be increasing or decreasing?
}
\item{INDICES}{
A list of \code{db.Rquery} objects. Each of the list element selects one or
multiple columns of \code{x}. \code{NULL} to order by random().
}
\item{\dots}{
Further arguments passed to or from other methods.
This is currently not implemented.
}
}
\value{
A \code{db.Rquery} object. It is the query object used to sort the \code{db.obj} in the
database.
}
\author{
Author: Predictive Analytics Team at Pivotal Inc. \email{user@madlib.net}
Maintainer: Hai Qian \email{hqian@gopivotal.com}, Predictive Analytics Team at Pivotal
Inc. \email{user@madlib.net}
}
\seealso{
\code{\link{by}} has similar syntax to this function.
\code{\link{preview}} to view portion of the data table
}
\examples{
\dontrun{
# Suppose that a valid connection with ID 1 exists
x <- db.data.frame("madlibtestdata.lin_ornstein")
preview(x, 10)
y <- sort(x, decreasing = FALSE, list(x$nation, x$sector) )
# get the SQL query to be run
content(y)
# get the sorted output
preview(y)
}
}
\keyword{database}
\keyword{methods}
\keyword{utility}
|
/man/sort-methods.Rd
|
no_license
|
hooi/PivotalR
|
R
| false | false | 1,638 |
rd
|
\name{sort}
\alias{sort,db.obj-method}
\title{Sort a table or view by a set of columns}
\description{
This function is used to sort a table of view in the database.
}
\usage{
\S4method{sort}{db.obj}(x, decreasing = FALSE, INDICES, ...)
}
\arguments{
\item{x}{
The signature of the method.
A \code{db.obj} (includes \code{db.table} and \code{db.view})
object, which points to a table or view in the database.
}
\item{decreasing}{
A logical, with default value as FALSE. Should the sort be increasing or decreasing?
}
\item{INDICES}{
A list of \code{db.Rquery} objects. Each of the list element selects one or
multiple columns of \code{x}. \code{NULL} to order by random().
}
\item{\dots}{
Further arguments passed to or from other methods.
This is currently not implemented.
}
}
\value{
A \code{db.Rquery} object. It is the query object used to sort the \code{db.obj} in the
database.
}
\author{
Author: Predictive Analytics Team at Pivotal Inc. \email{user@madlib.net}
Maintainer: Hai Qian \email{hqian@gopivotal.com}, Predictive Analytics Team at Pivotal
Inc. \email{user@madlib.net}
}
\seealso{
\code{\link{by}} has similar syntax to this function.
\code{\link{preview}} to view portion of the data table
}
\examples{
\dontrun{
# Suppose that a valid connection with ID 1 exists
x <- db.data.frame("madlibtestdata.lin_ornstein")
preview(x, 10)
y <- sort(x, decreasing = FALSE, list(x$nation, x$sector) )
# get the SQL query to be run
content(y)
# get the sorted output
preview(y)
}
}
\keyword{database}
\keyword{methods}
\keyword{utility}
|
library(rstan)
eggs <- c(0, 1, 1, 2, 0,
3, 2, 3, 2, 0)
model_data <- list(
J = length(eggs),
eggs = eggs
)
model <- stan_model('model.stan')
fit <- sampling(model, data = model_data, iter = 10000, cores = 4)
post <- as.data.frame(fit)
p_post <- mean(post$p)
dbinom(0, 12, p_post)
|
/code/corner-room/corner-room.R
|
permissive
|
timbook/uscots-bayes-workshop
|
R
| false | false | 298 |
r
|
library(rstan)
eggs <- c(0, 1, 1, 2, 0,
3, 2, 3, 2, 0)
model_data <- list(
J = length(eggs),
eggs = eggs
)
model <- stan_model('model.stan')
fit <- sampling(model, data = model_data, iter = 10000, cores = 4)
post <- as.data.frame(fit)
p_post <- mean(post$p)
dbinom(0, 12, p_post)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/apigatewayv2_operations.R
\name{apigatewayv2_delete_stage}
\alias{apigatewayv2_delete_stage}
\title{Deletes a Stage}
\usage{
apigatewayv2_delete_stage(ApiId, StageName)
}
\arguments{
\item{ApiId}{[required] The API identifier.}
\item{StageName}{[required] The stage name. Stage names can only contain alphanumeric characters,
hyphens, and underscores. Maximum length is 128 characters.}
}
\description{
Deletes a Stage.
}
\section{Request syntax}{
\preformatted{svc$delete_stage(
ApiId = "string",
StageName = "string"
)
}
}
\keyword{internal}
|
/cran/paws.networking/man/apigatewayv2_delete_stage.Rd
|
permissive
|
johnnytommy/paws
|
R
| false | true | 628 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/apigatewayv2_operations.R
\name{apigatewayv2_delete_stage}
\alias{apigatewayv2_delete_stage}
\title{Deletes a Stage}
\usage{
apigatewayv2_delete_stage(ApiId, StageName)
}
\arguments{
\item{ApiId}{[required] The API identifier.}
\item{StageName}{[required] The stage name. Stage names can only contain alphanumeric characters,
hyphens, and underscores. Maximum length is 128 characters.}
}
\description{
Deletes a Stage.
}
\section{Request syntax}{
\preformatted{svc$delete_stage(
ApiId = "string",
StageName = "string"
)
}
}
\keyword{internal}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/engine_kernlab.R
\name{new_fit_param_specs_kernlab_rbf}
\alias{new_fit_param_specs_kernlab_rbf}
\title{FitParamSpecs Class Constructor for kernlab Engine with RBF Kernel}
\usage{
new_fit_param_specs_kernlab_rbf()
}
\value{
A FitParamSpecs class object.
}
\description{
FitParamSpecs Class Constructor for kernlab Engine with RBF Kernel
}
|
/man/new_fit_param_specs_kernlab_rbf.Rd
|
permissive
|
five-dots/ml4e
|
R
| false | true | 416 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/engine_kernlab.R
\name{new_fit_param_specs_kernlab_rbf}
\alias{new_fit_param_specs_kernlab_rbf}
\title{FitParamSpecs Class Constructor for kernlab Engine with RBF Kernel}
\usage{
new_fit_param_specs_kernlab_rbf()
}
\value{
A FitParamSpecs class object.
}
\description{
FitParamSpecs Class Constructor for kernlab Engine with RBF Kernel
}
|
w = 0.1
mu = mean(log(x))
tau = sd(log(x))
lambda = 20/mean(x)
cc = sample(1:2, n, TRUE, c(1/2, 1/2))
# Full conditional for cc
v = rep(0,2)
for(i in 1:n){
v[1] = log(w) + dexp(x[i], lambda, log=TRUE)
v[2] = log(1-w) + dlnorm(x[i], mu, tau, log=TRUE)
v = exp(v - max(v))/sum(exp(v - max(v)))
cc[i] = sample(1:2, 1, replace=TRUE, prob=v)
}
# Full conditional for w
w = rbeta(1, 1+sum(cc==1), 1+n-sum(cc==1))
# Full conditional for w
w = rbeta(1, 1+sum(cc==1), 1+sum(cc==2))
# Full conditional for lambda
lambda = rgamma(1, 1 + sum(cc==1), 1 + sum(x[cc==1]))
# Full conditional for mu
mean.post = (sum(log(x[cc==2]))/tau^2 + 0)/(sum(cc==2)/tau^2 + 1)
std.post = sqrt(1/(sum(cc==2)/tau^2 + 1))
mu = rnorm(1, mean.post, std.post)
# Full conditional for tau
tau = sqrt(1/rgamma(1, 2 + sum(cc==2), 1 + sum((log(x[cc==2]) - mu)^2)))
|
/chr3_MCMC/GibbsSampling-expo-logGaussian-mixture_reference_answer.R
|
permissive
|
SZJShuffle/BayesianMixtureModels
|
R
| false | false | 895 |
r
|
w = 0.1
mu = mean(log(x))
tau = sd(log(x))
lambda = 20/mean(x)
cc = sample(1:2, n, TRUE, c(1/2, 1/2))
# Full conditional for cc
v = rep(0,2)
for(i in 1:n){
v[1] = log(w) + dexp(x[i], lambda, log=TRUE)
v[2] = log(1-w) + dlnorm(x[i], mu, tau, log=TRUE)
v = exp(v - max(v))/sum(exp(v - max(v)))
cc[i] = sample(1:2, 1, replace=TRUE, prob=v)
}
# Full conditional for w
w = rbeta(1, 1+sum(cc==1), 1+n-sum(cc==1))
# Full conditional for w
w = rbeta(1, 1+sum(cc==1), 1+sum(cc==2))
# Full conditional for lambda
lambda = rgamma(1, 1 + sum(cc==1), 1 + sum(x[cc==1]))
# Full conditional for mu
mean.post = (sum(log(x[cc==2]))/tau^2 + 0)/(sum(cc==2)/tau^2 + 1)
std.post = sqrt(1/(sum(cc==2)/tau^2 + 1))
mu = rnorm(1, mean.post, std.post)
# Full conditional for tau
tau = sqrt(1/rgamma(1, 2 + sum(cc==2), 1 + sum((log(x[cc==2]) - mu)^2)))
|
\name{gpd}
\alias{dgpd}
\alias{pgpd}
\alias{qgpd}
\alias{rgpd}
\title{The Generalized Pareto Distribution}
\description{
Density function, distribution function, quantile function and
random generation for the generalized Pareto distribution (GPD)
with location, scale and shape parameters.
}
\usage{
dgpd(x, loc=0, scale=1, shape=0, log = FALSE)
pgpd(q, loc=0, scale=1, shape=0, lower.tail = TRUE)
qgpd(p, loc=0, scale=1, shape=0, lower.tail = TRUE)
rgpd(n, loc=0, scale=1, shape=0)
}
\arguments{
\item{x, q}{Vector of quantiles.}
\item{p}{Vector of probabilities.}
\item{n}{Number of observations.}
\item{loc, scale, shape}{Location, scale and shape parameters; the
\code{shape} argument cannot be a vector (must have length one).}
\item{log}{Logical; if \code{TRUE}, the log density is returned.}
\item{lower.tail}{Logical; if \code{TRUE} (default), probabilities
are P[X <= x], otherwise, P[X > x]}
}
\details{
The generalized Pareto distribution function (Pickands, 1975) with
parameters \eqn{\code{loc} = a}, \eqn{\code{scale} = b} and
\eqn{\code{shape} = s} is
\deqn{G(z) = 1 - \{1+s(z-a)/b\}^{-1/s}}{
G(z) = 1 - {1+s(z-a)/b}^(-1/s)}
for \eqn{1+s(z-a)/b > 0} and \eqn{z > a}, where \eqn{b > 0}.
If \eqn{s = 0} the distribution is defined by continuity.
}
\value{
\code{dgpd} gives the density function, \code{pgpd} gives the
distribution function, \code{qgpd} gives the quantile function,
and \code{rgpd} generates random deviates.
}
\references{
Pickands, J. (1975)
Statistical inference using extreme order statistics.
\emph{Annals of Statistics}, \bold{3}, 119--131.
}
\seealso{\code{\link{fpot}}, \code{\link{rgev}}}
\examples{
dgpd(2:4, 1, 0.5, 0.8)
pgpd(2:4, 1, 0.5, 0.8)
qgpd(seq(0.9, 0.6, -0.1), 2, 0.5, 0.8)
rgpd(6, 1, 0.5, 0.8)
p <- (1:9)/10
pgpd(qgpd(p, 1, 2, 0.8), 1, 2, 0.8)
## [1] 0.1 0.2 0.3 0.4 0.5 0.6 0.7 0.8 0.9
}
\keyword{distribution}
|
/man/gpd.Rd
|
no_license
|
cran/evd
|
R
| false | false | 1,926 |
rd
|
\name{gpd}
\alias{dgpd}
\alias{pgpd}
\alias{qgpd}
\alias{rgpd}
\title{The Generalized Pareto Distribution}
\description{
Density function, distribution function, quantile function and
random generation for the generalized Pareto distribution (GPD)
with location, scale and shape parameters.
}
\usage{
dgpd(x, loc=0, scale=1, shape=0, log = FALSE)
pgpd(q, loc=0, scale=1, shape=0, lower.tail = TRUE)
qgpd(p, loc=0, scale=1, shape=0, lower.tail = TRUE)
rgpd(n, loc=0, scale=1, shape=0)
}
\arguments{
\item{x, q}{Vector of quantiles.}
\item{p}{Vector of probabilities.}
\item{n}{Number of observations.}
\item{loc, scale, shape}{Location, scale and shape parameters; the
\code{shape} argument cannot be a vector (must have length one).}
\item{log}{Logical; if \code{TRUE}, the log density is returned.}
\item{lower.tail}{Logical; if \code{TRUE} (default), probabilities
are P[X <= x], otherwise, P[X > x]}
}
\details{
The generalized Pareto distribution function (Pickands, 1975) with
parameters \eqn{\code{loc} = a}, \eqn{\code{scale} = b} and
\eqn{\code{shape} = s} is
\deqn{G(z) = 1 - \{1+s(z-a)/b\}^{-1/s}}{
G(z) = 1 - {1+s(z-a)/b}^(-1/s)}
for \eqn{1+s(z-a)/b > 0} and \eqn{z > a}, where \eqn{b > 0}.
If \eqn{s = 0} the distribution is defined by continuity.
}
\value{
\code{dgpd} gives the density function, \code{pgpd} gives the
distribution function, \code{qgpd} gives the quantile function,
and \code{rgpd} generates random deviates.
}
\references{
Pickands, J. (1975)
Statistical inference using extreme order statistics.
\emph{Annals of Statistics}, \bold{3}, 119--131.
}
\seealso{\code{\link{fpot}}, \code{\link{rgev}}}
\examples{
dgpd(2:4, 1, 0.5, 0.8)
pgpd(2:4, 1, 0.5, 0.8)
qgpd(seq(0.9, 0.6, -0.1), 2, 0.5, 0.8)
rgpd(6, 1, 0.5, 0.8)
p <- (1:9)/10
pgpd(qgpd(p, 1, 2, 0.8), 1, 2, 0.8)
## [1] 0.1 0.2 0.3 0.4 0.5 0.6 0.7 0.8 0.9
}
\keyword{distribution}
|
# Pre-processing script
roster.kams<-data.table(roster.kams)
# Rename columns to lowercase letters and and shorter names were possible
# (i.e. GRADE_LEVEL to grade)
setnames(roster.kams, c("student.name", "ps.id", "grade", "date"))
# retype calendar dat with lubridate. The database returns dates in
# YYYY-MM-DD hh:mm:ss format where the times are all 00:00:00, so we can
# dispense with the time in the the date field
roster.kams[, date:=ymd_hms(date)]
# set keys for data.table. The date and PowerSchool ID should be sufficient for
# uniqueness
setkey(roster.kams, ps.id)
# add month and year columns for easier joins
roster.kams[, month:=month(date)]
roster.kams[, year:=year(date)]
cache('roster.kams')
|
/Board Report Attrition/munge/01-KAMS_Prep.R
|
no_license
|
kippchicago/Data_Analysis
|
R
| false | false | 721 |
r
|
# Pre-processing script
roster.kams<-data.table(roster.kams)
# Rename columns to lowercase letters and and shorter names were possible
# (i.e. GRADE_LEVEL to grade)
setnames(roster.kams, c("student.name", "ps.id", "grade", "date"))
# retype calendar dat with lubridate. The database returns dates in
# YYYY-MM-DD hh:mm:ss format where the times are all 00:00:00, so we can
# dispense with the time in the the date field
roster.kams[, date:=ymd_hms(date)]
# set keys for data.table. The date and PowerSchool ID should be sufficient for
# uniqueness
setkey(roster.kams, ps.id)
# add month and year columns for easier joins
roster.kams[, month:=month(date)]
roster.kams[, year:=year(date)]
cache('roster.kams')
|
# install.packages("usethis")
#
# usethis::create_project()
# The goal of categorical
|
/Old Setups/organization.R
|
no_license
|
noahholubow/competition_regression
|
R
| false | false | 87 |
r
|
# install.packages("usethis")
#
# usethis::create_project()
# The goal of categorical
|
library(ecoforecastR)
#making a forecast and having fun
##HERE IS WHAT GOES IN:
#IC=initial conditions, from j.pheno.out$params, see below
#tempcast=max temp forecast from NOAA ensembles
#beta=slope of temp data (assessed from daymet data?)
#q=process error tau_add
#Nmc=# of mcmc runs
#gmin=default value min gcc
#gmax=default value max gcc
##STILL NEED TO SAVE DATA VALUES EACH TIME TO GET GMIN AND GMAX FOR EACH SITE
#the timestep is 16 days:
NT=16
#the number of ensemble members is 10:
Nmc=1000
# #we set gcc min and max values, they are different for each run/site and they are here:
# load(file=paste0(as.character(siteID[i]),".data.Rdata"))
# gmin=data$gmin
# gmax=data$gmax
#load gcc data into list:
site.gcc<-list()
site.gcc$BART<-BART
site.gcc$CLBJ<-CLBJ
site.gcc$DELA<-DELA
site.gcc$GRSM<-GRSM
site.gcc$HARV<-HARV
site.gcc$SCBI<-SCBI
site.gcc$STEI<-STEI
site.gcc$UKFS<-UKFS
#getting all IC's for each site:
IC.ens<-list()
for (s in siteID){
IC.ens[[s]]<-rnorm(Nmc,tail(site.gcc[[s]]$gcc_90,1),tail(site.gcc[[s]]$gcc_sd,1))
}
#FORECAST FUNCTION
phenoforecast <- function(IC,tempcast,beta,Q,n=Nmc,gmin,gmax){
N <- matrix(NA,n,NT)
Nprev <- IC
for(t in 1:NT){
mu = Nprev + beta*tempcast[t,] #or [,t] depending on dim
N[,t] <- pmax(pmin(rnorm(n,mu,Q),gmax),gmin) #ensuring we are btw min and max we set
Nprev <- N[,t]
}
return(N)
}
#finding mean temp from NOAA ensembles
#WAIT! do unit conversions first because its in Kelvin!
#make function to convert from kelvin to celsius (like daymet data we used to calibrate the model)
k.to.c<-function(k){
return(k-273.15)
}
#noaa temp data in celsius
#df1.c<-apply(df1,2,k.to.c)
df1.c <- lapply(df1,k.to.c)
###now we need to group them by site
# df1.BART<-df1.c[1:31,]
# df1.CLBJ<-df1.c[32:62,]
# df1.DELA<-df1.c[63:93,]
# df1.GRSM<-df1.c[94:124,]
# df1.HARV<-df1.c[125:155,]
# df1.SCBI<-df1.c[156:186,]
# df1.STEI<-df1.c[187:217,]
# df1.UKFS<-df1.c[218:248,]
#findmaxtemp<-function(x){
# return(max(x))
#}
#BART.temp.test<-tapply(df1.BART,day,max)
findmaxtemp<-function(x){
try=as.vector(x)
return(tapply(try, rep(1:16, each=24), max))
}
#MUST DO FOR ALL SITES
#temp.max <- matrix(findmaxtemp(df1.BART[1,-1]),ncol=1) #drops the 1st observation (analysis)
#temp.max <- apply(df1.c$BART[,-1],1,findmaxtemp) #days vs ensemble members
#temp.max.mean<-matrix(apply(temp.max,1,mean),ncol=1)
#FINDS MAX TEMP ENSEMBLE MEAN FOR EACH SITE:
#temp.max.mean<-list()
#for (s in siteID){
# temp.max<-apply(df1.c[[s]][,-1],1,findmaxtemp)
# temp.max.mean[[s]]<-matrix(apply(temp.max,1,mean),ncol=1)
#}
temp.max<-list()
temp.max.mean<-list()
for (s in siteID){
temp.max[[s]]<-matrix(apply(df1.c[[s]][,-1],1,findmaxtemp),nrow=NT)
temp.max.mean[[s]]<-matrix(apply(temp.max[[s]],1,mean),ncol=1)
}
## parameters
params <- as.matrix(j.pheno.out)
param.mean <- apply(params,2,mean)
beta<-param.mean["betaTemp"]
q<-1/sqrt(param.mean["tau_add"])
## initial conditions
IC <-data$mu_ic ##we don't have this? START @ END OF GCC TIME SERIES AND ITS UNCERTAINTY(sd) FOR EACH SITE
#phiend<-phenoforecast(IC,temp.max,beta,q,Nmc,gmin,gmax)
#next steps: compute confidence intervals, add in uncertainties 1 by one, do for 35 not 16, then set up for all sites,THEN assess where we're at
time=1:NT
#------THE FORECAST LOOP----------
site.pheno<-list()
#forecast loop
for (s in siteID){
#uncertainties for each forecast
prow<-sample.int(nrow(params),Nmc,replace=TRUE)
Qmc<-1/sqrt(params[prow,"tau_add"])
drow<-sample.int(ncol(temp.max[[s]]),Nmc,replace=TRUE)
#forecast step
site.pheno[[s]]<-phenoforecast(IC=IC.ens[[s]],
tempcast=temp.max[[s]][,drow],
beta=params[prow,"betaTemp"],
Q=Qmc,
n=Nmc,
gmin=min(site.gcc[[s]]$gcc_90,na.rm=T),
gmax=max(site.gcc[[s]]$gcc_90,na.rm=T))
}
##end forecast loop
#next steps: plotting each site with confidence intervals
##EVERYTHING BELOW THIS LINE IS OUR BART FORECAST PRACTICE
##########################################################
#---------------trying the deterministic---------
if(FALSE){
PhF.BART<-phenoforecast(IC=IC,
tempcast=temp.max.mean$BART,
beta=param.mean["betaTemp"],
Q=0,
n=Nmc,
gmin=gmin,
gmax=gmax)
plot(0,0, xlim=c(0,NT),ylim=range(PhF.BART))
for (p in 1:Nmc){
points(PhF.BART[p,],type="l",col=p)
}
#this will make confidence intervals
time.f<-1:NT
ci.PHF.BART <- apply(as.matrix(PhF.BART),2,quantile,c(0.025,0.5,0.975))
plot(0,0,xlim=c(0,NT),ylim=range(PhF.BART))
ecoforecastR::ciEnvelope(time.f,ci.PHF.BART[1,],ci.PHF.BART[3,],col=col.alpha("lightBlue",0.6))
#-----------------
#initial condition ensemble created from last gcc observation point & sd
IC.ens<-rnorm(Nmc,tail(BART$gcc_90,1),tail(BART$gcc_sd,1))
PhF.BART.IC<-phenoforecast(IC=IC.ens,
tempcast=temp.max.mean$BART,
beta=param.mean["betaTemp"],
Q=0,
n=Nmc,
gmin=gmin,
gmax=gmax)
time.f<-1:NT
ci.PHF.BART.IC <- apply(as.matrix(PhF.BART.IC),2,quantile,c(0.025,0.5,0.975))
plot(0,0,xlim=c(0,NT),ylim=range(PhF.BART.IC))
ecoforecastR::ciEnvelope(time.f,ci.PHF.BART.IC[1,],ci.PHF.BART.IC[3,],col=col.alpha("lightBlue",0.6))
#-----------------
#parameter uncertainty for beta
prow <- sample.int(nrow(params),Nmc,replace=TRUE)
PhF.BART.IP<-phenoforecast(IC=IC.ens,
tempcast=temp.max.mean$BART,
beta=params[prow,"betaTemp"],
Q=0,
n=Nmc,
gmin=gmin,
gmax=gmax)
ci.PhF.BART.IP <- apply(as.matrix(PhF.BART.IP),2,quantile,c(0.025,0.5,0.975))
plot(0,0,xlim=c(0,NT),ylim=range(PhF.BART.IP))
ecoforecastR::ciEnvelope(time.f,ci.PhF.BART.IP[1,],ci.PhF.BART.IP[3,],col=col.alpha("lightBlue",0.6))
#---------------driver uncertainty
drow<-sample.int(ncol(temp.max$BART),Nmc,replace=TRUE)
PhF.BART.IPT<-phenoforecast(IC=IC.ens,
tempcast=temp.max$BART[,drow], #this is not working
beta=params[prow,"betaTemp"],
Q=0,
n=Nmc,
gmin=gmin,
gmax=gmax)
ci.PhF.BART.IPT <- apply(as.matrix(PhF.BART.IPT),2,quantile,c(0.025,0.5,0.975))
plot(0,0,xlim=c(0,NT),ylim=range(PhF.BART.IPT))
ecoforecastR::ciEnvelope(time.f,ci.PhF.BART.IPT[1,],ci.PhF.BART.IPT[3,],col=col.alpha("lightBlue",0.6))
#----------------process error
Qmc <- 1/sqrt(params[prow,"tau_add"])
PhF.BART.IPTP<-phenoforecast(IC=IC.ens,
tempcast=temp.max$BART[,drow], #this is not working
beta=params[prow,"betaTemp"],
Q=Qmc,
n=Nmc,
gmin=gmin,
gmax=gmax)
ci.PhF.BART.IPTP <- apply(as.matrix(PhF.BART.IPTP),2,quantile,c(0.025,0.5,0.975))
plot(0,0,xlim=c(0,NT),ylim=range(PhF.BART.IPTP))
ecoforecastR::ciEnvelope(time.f,ci.PhF.BART.IPTP[1,],ci.PhF.BART.IPTP[3,],col=col.alpha("lightBlue",0.6))
plot(0,0,xlim=c(0,NT),ylim=range(PhF.BART.IPTP))
ecoforecastR::ciEnvelope(time.f,ci.PhF.BART.IPTP[1,],ci.PhF.BART.IPTP[3,],col=col.alpha("lightBlue",0.6))
ecoforecastR::ciEnvelope(time.f,ci.PhF.BART.IP[1,],ci.PhF.BART.IP[3,],col=col.alpha("green",0.6))
ecoforecastR::ciEnvelope(time.f,ci.PhF.BART.IPT[1,],ci.PhF.BART.IPT[3,],col=col.alpha("thistle3",0.6))
ecoforecastR::ciEnvelope(time.f,ci.PHF.BART.IC[1,],ci.PHF.BART.IC[3,],col=col.alpha("red2",0.6))
#ecoforecastR::ciEnvelope(time.f,ci.PhF.BART[1,],ci.PhF.BART[3,],col=col.alpha("thistle3"))
}
|
/Milestone6_phenoforecast.R
|
permissive
|
hubbardd109/PhenoPhriends
|
R
| false | false | 8,089 |
r
|
library(ecoforecastR)
#making a forecast and having fun
##HERE IS WHAT GOES IN:
#IC=initial conditions, from j.pheno.out$params, see below
#tempcast=max temp forecast from NOAA ensembles
#beta=slope of temp data (assessed from daymet data?)
#q=process error tau_add
#Nmc=# of mcmc runs
#gmin=default value min gcc
#gmax=default value max gcc
##STILL NEED TO SAVE DATA VALUES EACH TIME TO GET GMIN AND GMAX FOR EACH SITE
#the timestep is 16 days:
NT=16
#the number of ensemble members is 10:
Nmc=1000
# #we set gcc min and max values, they are different for each run/site and they are here:
# load(file=paste0(as.character(siteID[i]),".data.Rdata"))
# gmin=data$gmin
# gmax=data$gmax
#load gcc data into list:
site.gcc<-list()
site.gcc$BART<-BART
site.gcc$CLBJ<-CLBJ
site.gcc$DELA<-DELA
site.gcc$GRSM<-GRSM
site.gcc$HARV<-HARV
site.gcc$SCBI<-SCBI
site.gcc$STEI<-STEI
site.gcc$UKFS<-UKFS
#getting all IC's for each site:
IC.ens<-list()
for (s in siteID){
IC.ens[[s]]<-rnorm(Nmc,tail(site.gcc[[s]]$gcc_90,1),tail(site.gcc[[s]]$gcc_sd,1))
}
#FORECAST FUNCTION
phenoforecast <- function(IC,tempcast,beta,Q,n=Nmc,gmin,gmax){
N <- matrix(NA,n,NT)
Nprev <- IC
for(t in 1:NT){
mu = Nprev + beta*tempcast[t,] #or [,t] depending on dim
N[,t] <- pmax(pmin(rnorm(n,mu,Q),gmax),gmin) #ensuring we are btw min and max we set
Nprev <- N[,t]
}
return(N)
}
#finding mean temp from NOAA ensembles
#WAIT! do unit conversions first because its in Kelvin!
#make function to convert from kelvin to celsius (like daymet data we used to calibrate the model)
k.to.c<-function(k){
return(k-273.15)
}
#noaa temp data in celsius
#df1.c<-apply(df1,2,k.to.c)
df1.c <- lapply(df1,k.to.c)
###now we need to group them by site
# df1.BART<-df1.c[1:31,]
# df1.CLBJ<-df1.c[32:62,]
# df1.DELA<-df1.c[63:93,]
# df1.GRSM<-df1.c[94:124,]
# df1.HARV<-df1.c[125:155,]
# df1.SCBI<-df1.c[156:186,]
# df1.STEI<-df1.c[187:217,]
# df1.UKFS<-df1.c[218:248,]
#findmaxtemp<-function(x){
# return(max(x))
#}
#BART.temp.test<-tapply(df1.BART,day,max)
findmaxtemp<-function(x){
try=as.vector(x)
return(tapply(try, rep(1:16, each=24), max))
}
#MUST DO FOR ALL SITES
#temp.max <- matrix(findmaxtemp(df1.BART[1,-1]),ncol=1) #drops the 1st observation (analysis)
#temp.max <- apply(df1.c$BART[,-1],1,findmaxtemp) #days vs ensemble members
#temp.max.mean<-matrix(apply(temp.max,1,mean),ncol=1)
#FINDS MAX TEMP ENSEMBLE MEAN FOR EACH SITE:
#temp.max.mean<-list()
#for (s in siteID){
# temp.max<-apply(df1.c[[s]][,-1],1,findmaxtemp)
# temp.max.mean[[s]]<-matrix(apply(temp.max,1,mean),ncol=1)
#}
temp.max<-list()
temp.max.mean<-list()
for (s in siteID){
temp.max[[s]]<-matrix(apply(df1.c[[s]][,-1],1,findmaxtemp),nrow=NT)
temp.max.mean[[s]]<-matrix(apply(temp.max[[s]],1,mean),ncol=1)
}
## parameters
params <- as.matrix(j.pheno.out)
param.mean <- apply(params,2,mean)
beta<-param.mean["betaTemp"]
q<-1/sqrt(param.mean["tau_add"])
## initial conditions
IC <-data$mu_ic ##we don't have this? START @ END OF GCC TIME SERIES AND ITS UNCERTAINTY(sd) FOR EACH SITE
#phiend<-phenoforecast(IC,temp.max,beta,q,Nmc,gmin,gmax)
#next steps: compute confidence intervals, add in uncertainties 1 by one, do for 35 not 16, then set up for all sites,THEN assess where we're at
time=1:NT
#------THE FORECAST LOOP----------
site.pheno<-list()
#forecast loop
for (s in siteID){
#uncertainties for each forecast
prow<-sample.int(nrow(params),Nmc,replace=TRUE)
Qmc<-1/sqrt(params[prow,"tau_add"])
drow<-sample.int(ncol(temp.max[[s]]),Nmc,replace=TRUE)
#forecast step
site.pheno[[s]]<-phenoforecast(IC=IC.ens[[s]],
tempcast=temp.max[[s]][,drow],
beta=params[prow,"betaTemp"],
Q=Qmc,
n=Nmc,
gmin=min(site.gcc[[s]]$gcc_90,na.rm=T),
gmax=max(site.gcc[[s]]$gcc_90,na.rm=T))
}
##end forecast loop
#next steps: plotting each site with confidence intervals
##EVERYTHING BELOW THIS LINE IS OUR BART FORECAST PRACTICE
##########################################################
#---------------trying the deterministic---------
if(FALSE){
PhF.BART<-phenoforecast(IC=IC,
tempcast=temp.max.mean$BART,
beta=param.mean["betaTemp"],
Q=0,
n=Nmc,
gmin=gmin,
gmax=gmax)
plot(0,0, xlim=c(0,NT),ylim=range(PhF.BART))
for (p in 1:Nmc){
points(PhF.BART[p,],type="l",col=p)
}
#this will make confidence intervals
time.f<-1:NT
ci.PHF.BART <- apply(as.matrix(PhF.BART),2,quantile,c(0.025,0.5,0.975))
plot(0,0,xlim=c(0,NT),ylim=range(PhF.BART))
ecoforecastR::ciEnvelope(time.f,ci.PHF.BART[1,],ci.PHF.BART[3,],col=col.alpha("lightBlue",0.6))
#-----------------
#initial condition ensemble created from last gcc observation point & sd
IC.ens<-rnorm(Nmc,tail(BART$gcc_90,1),tail(BART$gcc_sd,1))
PhF.BART.IC<-phenoforecast(IC=IC.ens,
tempcast=temp.max.mean$BART,
beta=param.mean["betaTemp"],
Q=0,
n=Nmc,
gmin=gmin,
gmax=gmax)
time.f<-1:NT
ci.PHF.BART.IC <- apply(as.matrix(PhF.BART.IC),2,quantile,c(0.025,0.5,0.975))
plot(0,0,xlim=c(0,NT),ylim=range(PhF.BART.IC))
ecoforecastR::ciEnvelope(time.f,ci.PHF.BART.IC[1,],ci.PHF.BART.IC[3,],col=col.alpha("lightBlue",0.6))
#-----------------
#parameter uncertainty for beta
prow <- sample.int(nrow(params),Nmc,replace=TRUE)
PhF.BART.IP<-phenoforecast(IC=IC.ens,
tempcast=temp.max.mean$BART,
beta=params[prow,"betaTemp"],
Q=0,
n=Nmc,
gmin=gmin,
gmax=gmax)
ci.PhF.BART.IP <- apply(as.matrix(PhF.BART.IP),2,quantile,c(0.025,0.5,0.975))
plot(0,0,xlim=c(0,NT),ylim=range(PhF.BART.IP))
ecoforecastR::ciEnvelope(time.f,ci.PhF.BART.IP[1,],ci.PhF.BART.IP[3,],col=col.alpha("lightBlue",0.6))
#---------------driver uncertainty
drow<-sample.int(ncol(temp.max$BART),Nmc,replace=TRUE)
PhF.BART.IPT<-phenoforecast(IC=IC.ens,
tempcast=temp.max$BART[,drow], #this is not working
beta=params[prow,"betaTemp"],
Q=0,
n=Nmc,
gmin=gmin,
gmax=gmax)
ci.PhF.BART.IPT <- apply(as.matrix(PhF.BART.IPT),2,quantile,c(0.025,0.5,0.975))
plot(0,0,xlim=c(0,NT),ylim=range(PhF.BART.IPT))
ecoforecastR::ciEnvelope(time.f,ci.PhF.BART.IPT[1,],ci.PhF.BART.IPT[3,],col=col.alpha("lightBlue",0.6))
#----------------process error
Qmc <- 1/sqrt(params[prow,"tau_add"])
PhF.BART.IPTP<-phenoforecast(IC=IC.ens,
tempcast=temp.max$BART[,drow], #this is not working
beta=params[prow,"betaTemp"],
Q=Qmc,
n=Nmc,
gmin=gmin,
gmax=gmax)
ci.PhF.BART.IPTP <- apply(as.matrix(PhF.BART.IPTP),2,quantile,c(0.025,0.5,0.975))
plot(0,0,xlim=c(0,NT),ylim=range(PhF.BART.IPTP))
ecoforecastR::ciEnvelope(time.f,ci.PhF.BART.IPTP[1,],ci.PhF.BART.IPTP[3,],col=col.alpha("lightBlue",0.6))
plot(0,0,xlim=c(0,NT),ylim=range(PhF.BART.IPTP))
ecoforecastR::ciEnvelope(time.f,ci.PhF.BART.IPTP[1,],ci.PhF.BART.IPTP[3,],col=col.alpha("lightBlue",0.6))
ecoforecastR::ciEnvelope(time.f,ci.PhF.BART.IP[1,],ci.PhF.BART.IP[3,],col=col.alpha("green",0.6))
ecoforecastR::ciEnvelope(time.f,ci.PhF.BART.IPT[1,],ci.PhF.BART.IPT[3,],col=col.alpha("thistle3",0.6))
ecoforecastR::ciEnvelope(time.f,ci.PHF.BART.IC[1,],ci.PHF.BART.IC[3,],col=col.alpha("red2",0.6))
#ecoforecastR::ciEnvelope(time.f,ci.PhF.BART[1,],ci.PhF.BART[3,],col=col.alpha("thistle3"))
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Func_Get.R
\name{getRiverNodes}
\alias{getRiverNodes}
\title{Get the From/To nodes of the river.
\code{getRiverNodes}}
\usage{
getRiverNodes(spr = readriv.sp())
}
\arguments{
\item{spr}{SpatialLine* of river streams.}
}
\value{
a list, c(points, FT_ID)
}
\description{
Get the From/To nodes of the river.
\code{getRiverNodes}
}
|
/man/getRiverNodes.Rd
|
permissive
|
SHUD-System/rSHUD
|
R
| false | true | 406 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Func_Get.R
\name{getRiverNodes}
\alias{getRiverNodes}
\title{Get the From/To nodes of the river.
\code{getRiverNodes}}
\usage{
getRiverNodes(spr = readriv.sp())
}
\arguments{
\item{spr}{SpatialLine* of river streams.}
}
\value{
a list, c(points, FT_ID)
}
\description{
Get the From/To nodes of the river.
\code{getRiverNodes}
}
|
### 関数loadの使い方
(mydat1 <- subset(airquality, Ozone > 120)) # データフレームの作成
load(file="mydata.rdata") # RData形式の読み込み
mydat1 # saveしたときの名前で読み込まれ上書きされる
mydat2
|
/docs/autumn/example/data-load.r
|
no_license
|
noboru-murata/sda
|
R
| false | false | 237 |
r
|
### 関数loadの使い方
(mydat1 <- subset(airquality, Ozone > 120)) # データフレームの作成
load(file="mydata.rdata") # RData形式の読み込み
mydat1 # saveしたときの名前で読み込まれ上書きされる
mydat2
|
#' @title Historical Reporting Limits
#'
#' @description Imports the historical reporting limits for all methods for a
#'parameter code.
#'
#' @importFrom XML readHTMLTable
#' @importFrom lubridate today
#' @param parm_cd the parameter code.
#' @return A data frame of the analyte, methods, begin and end date for each
#'reporting level change, the reporting level type used, the long-term
#'detection limit and the reporting level.
#' @note This function works only within the internal USGS network.
#' @seealso \code{\link{qw-class}}
#' @references Lorenz, D.L., 2014, USGSqw OFR.
#' @keywords IO
#' @examples
#'
#'\dontrun{
#'readNWQLdl("00608")
#'}
#'
#' @export
readNWQLdl <- function(parm_cd) {
## Coding history:
## 2012Sep21 DLLorenz original Coding
## 2012Dec28 DLLorenz Roxygenized
## 2012Dec28 This version
##
if(missing(parm_cd))
stop("parm_cd is required")
parm_cd <- zeroPad(parm_cd, 5)
myurl <- paste("http://nwql.cr.usgs.gov/usgs/limits/limits.cfm?st=p&ss=",
parm_cd, sep="")
retval <- readHTMLTable(myurl, stringsAsFactors=FALSE)[[3]] # that is the one
names(retval) <- gsub(" ", "", names(retval)) # remove spaces
## Fix the columns
warn <- options("warn")
options(warn=-1) # Supress NAs by coercion messages
retval$StartDate=as.Date(retval$StartDate, format="%Y%m%d")
retval$EndDate=as.Date(retval$EndDate, format="%Y%m%d")
retvalReportLevelCode=toupper(retval$ReportLevelCode)
retval$DetectionLevel=as.numeric(retval$DetectionLevel)
retval$ReportingLevel=as.numeric(retval$ReportingLevel)
# Fix NAs in the date--to be able to make range comparisons
retval$StartDate[is.na(retval$StartDate)] <- as.Date("1900-01-01")
retval$EndDate[is.na(retval$EndDate)] <- today()
## Restore warning and return data
options(warn)
return(retval)
}
|
/R/readNWQLdl.R
|
permissive
|
Zhenglei-BCS/smwrQW
|
R
| false | false | 1,839 |
r
|
#' @title Historical Reporting Limits
#'
#' @description Imports the historical reporting limits for all methods for a
#'parameter code.
#'
#' @importFrom XML readHTMLTable
#' @importFrom lubridate today
#' @param parm_cd the parameter code.
#' @return A data frame of the analyte, methods, begin and end date for each
#'reporting level change, the reporting level type used, the long-term
#'detection limit and the reporting level.
#' @note This function works only within the internal USGS network.
#' @seealso \code{\link{qw-class}}
#' @references Lorenz, D.L., 2014, USGSqw OFR.
#' @keywords IO
#' @examples
#'
#'\dontrun{
#'readNWQLdl("00608")
#'}
#'
#' @export
readNWQLdl <- function(parm_cd) {
## Coding history:
## 2012Sep21 DLLorenz original Coding
## 2012Dec28 DLLorenz Roxygenized
## 2012Dec28 This version
##
if(missing(parm_cd))
stop("parm_cd is required")
parm_cd <- zeroPad(parm_cd, 5)
myurl <- paste("http://nwql.cr.usgs.gov/usgs/limits/limits.cfm?st=p&ss=",
parm_cd, sep="")
retval <- readHTMLTable(myurl, stringsAsFactors=FALSE)[[3]] # that is the one
names(retval) <- gsub(" ", "", names(retval)) # remove spaces
## Fix the columns
warn <- options("warn")
options(warn=-1) # Supress NAs by coercion messages
retval$StartDate=as.Date(retval$StartDate, format="%Y%m%d")
retval$EndDate=as.Date(retval$EndDate, format="%Y%m%d")
retvalReportLevelCode=toupper(retval$ReportLevelCode)
retval$DetectionLevel=as.numeric(retval$DetectionLevel)
retval$ReportingLevel=as.numeric(retval$ReportingLevel)
# Fix NAs in the date--to be able to make range comparisons
retval$StartDate[is.na(retval$StartDate)] <- as.Date("1900-01-01")
retval$EndDate[is.na(retval$EndDate)] <- today()
## Restore warning and return data
options(warn)
return(retval)
}
|
`.sourceCpp_1_DLLInfo` <- dyn.load('C:/Users/Windows/Documents/knapply.com/content/post/advent-of-code-2018/index_cache/html/unnamed-chunk-9_sourceCpp/sourceCpp-x86_64-w64-mingw32-1.0.0/sourcecpp_4246ed55014/sourceCpp_2.dll')
cpp_cumsum_last2 <- Rcpp:::sourceCppFunction(function(x) {}, FALSE, `.sourceCpp_1_DLLInfo`, 'sourceCpp_1_cpp_cumsum_last2')
rm(`.sourceCpp_1_DLLInfo`)
|
/blogdown/post/advent-of-code-2018/index_cache/html/unnamed-chunk-9_sourceCpp/sourceCpp-x86_64-w64-mingw32-1.0.0/sourcecpp_4246ed55014/file42440cd3986.cpp.R
|
no_license
|
knapply/knapply.com
|
R
| false | false | 379 |
r
|
`.sourceCpp_1_DLLInfo` <- dyn.load('C:/Users/Windows/Documents/knapply.com/content/post/advent-of-code-2018/index_cache/html/unnamed-chunk-9_sourceCpp/sourceCpp-x86_64-w64-mingw32-1.0.0/sourcecpp_4246ed55014/sourceCpp_2.dll')
cpp_cumsum_last2 <- Rcpp:::sourceCppFunction(function(x) {}, FALSE, `.sourceCpp_1_DLLInfo`, 'sourceCpp_1_cpp_cumsum_last2')
rm(`.sourceCpp_1_DLLInfo`)
|
# load reqd. library
library(tidyverse)
library(lubridate)
# download file from remote and unzip
if(!file.exists("household_power_consumption.txt")) {
message("Downloading data")
fileURL <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
file="household_power_consumption.zip"
download.file(fileURL, destfile=file)
unzip(file)
}
# read file
data <- read_delim("household_power_consumption.txt", ";")
# convert type to date and add Datetime variable.
data1 <- data %>% mutate(Date = dmy(Date))
data2 <- data1 %>% mutate(Datetime = ymd_hms(paste(as.character(Date), as.character(Time))))
# filter to the reqd. dates.
data_filt <- data2 %>% filter(Date %in% dmy(c("1/2/2007", "2/2/2007")))
# open png file to export the plot.
png(file="plot2.png", height=480, width=480, units="px")
# plot line chart.
with(data_filt, plot(Datetime, Global_active_power, type="l", xlab="", ylab="Global Active Power (kilowatts)"))
# close the pdf device.
dev.off ()
|
/ExData_Plotting1/plot2.R
|
no_license
|
ekkal/datascience_coursera_jhu
|
R
| false | false | 1,006 |
r
|
# load reqd. library
library(tidyverse)
library(lubridate)
# download file from remote and unzip
if(!file.exists("household_power_consumption.txt")) {
message("Downloading data")
fileURL <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
file="household_power_consumption.zip"
download.file(fileURL, destfile=file)
unzip(file)
}
# read file
data <- read_delim("household_power_consumption.txt", ";")
# convert type to date and add Datetime variable.
data1 <- data %>% mutate(Date = dmy(Date))
data2 <- data1 %>% mutate(Datetime = ymd_hms(paste(as.character(Date), as.character(Time))))
# filter to the reqd. dates.
data_filt <- data2 %>% filter(Date %in% dmy(c("1/2/2007", "2/2/2007")))
# open png file to export the plot.
png(file="plot2.png", height=480, width=480, units="px")
# plot line chart.
with(data_filt, plot(Datetime, Global_active_power, type="l", xlab="", ylab="Global Active Power (kilowatts)"))
# close the pdf device.
dev.off ()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/pairwiseComparisons.R
\name{localDiscrepancyMatrix}
\alias{localDiscrepancyMatrix}
\title{Local discrepancy}
\usage{
localDiscrepancyMatrix(matrix, mju)
}
\arguments{
\item{matrix}{- matrix}
\item{mju}{- ranking of matrix}
}
\value{
matrix with locals discrepancy
}
\description{
Compute matrix with entries d_ij = max{e_ij-1, 1/e_ij-1}
}
|
/man/localDiscrepancyMatrix.Rd
|
no_license
|
katal24/PairwiseComparisons_work
|
R
| false | true | 419 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/pairwiseComparisons.R
\name{localDiscrepancyMatrix}
\alias{localDiscrepancyMatrix}
\title{Local discrepancy}
\usage{
localDiscrepancyMatrix(matrix, mju)
}
\arguments{
\item{matrix}{- matrix}
\item{mju}{- ranking of matrix}
}
\value{
matrix with locals discrepancy
}
\description{
Compute matrix with entries d_ij = max{e_ij-1, 1/e_ij-1}
}
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/meta.R
\name{meta}
\alias{meta}
\title{Constructor function for metadata nodes}
\usage{
meta(property = character(0), content = character(0), rel = character(0),
href = character(0), datatype = character(0), id = character(0),
type = character(0), children = list())
}
\arguments{
\item{property}{specify the ontological definition together with it's namespace, e.g. dc:title}
\item{content}{content of the metadata field}
\item{rel}{Ontological definition of the reference provided in href}
\item{href}{A link to some reference}
\item{datatype}{optional RDFa field}
\item{id}{optional id element (otherwise id will be automatically generated).}
\item{type}{optional xsi:type. If not given, will use either "LiteralMeta" or "ResourceMeta" as
determined by the presence of either a property or a href value.}
\item{children}{Optional element containing any valid XML block (XMLInternalElementNode class, see the XML package for details).}
}
\description{
Constructor function for metadata nodes
}
\details{
User must either provide property+content or rel+href. Mixing these will result in potential garbage.
The datatype attribute will be detected automatically from the class of the content argument. Maps from R class
to schema datatypes are as follows:
character - xs:string,
Date - xs:date,
integer - xs:integer,
numeric - xs:decimal,
logical - xs:boolean
}
\examples{
meta(content="example", property="dc:title")
}
\seealso{
\code{\link{nexml_write}}
}
|
/man/meta.Rd
|
permissive
|
vanderphylum/RNeXML
|
R
| false | false | 1,559 |
rd
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/meta.R
\name{meta}
\alias{meta}
\title{Constructor function for metadata nodes}
\usage{
meta(property = character(0), content = character(0), rel = character(0),
href = character(0), datatype = character(0), id = character(0),
type = character(0), children = list())
}
\arguments{
\item{property}{specify the ontological definition together with it's namespace, e.g. dc:title}
\item{content}{content of the metadata field}
\item{rel}{Ontological definition of the reference provided in href}
\item{href}{A link to some reference}
\item{datatype}{optional RDFa field}
\item{id}{optional id element (otherwise id will be automatically generated).}
\item{type}{optional xsi:type. If not given, will use either "LiteralMeta" or "ResourceMeta" as
determined by the presence of either a property or a href value.}
\item{children}{Optional element containing any valid XML block (XMLInternalElementNode class, see the XML package for details).}
}
\description{
Constructor function for metadata nodes
}
\details{
User must either provide property+content or rel+href. Mixing these will result in potential garbage.
The datatype attribute will be detected automatically from the class of the content argument. Maps from R class
to schema datatypes are as follows:
character - xs:string,
Date - xs:date,
integer - xs:integer,
numeric - xs:decimal,
logical - xs:boolean
}
\examples{
meta(content="example", property="dc:title")
}
\seealso{
\code{\link{nexml_write}}
}
|
library(CDM)
### Name: gdina.wald
### Title: Wald Statistic for Item Fit of the DINA and ACDM Rule for GDINA
### Model
### Aliases: gdina.wald summary.gdina.wald
### Keywords: Wald test GDINA model
### ** Examples
## Not run:
##D #############################################################################
##D # EXAMPLE 1: Wald test for DINA simulated data sim.dina
##D #############################################################################
##D
##D data(sim.dina, package="CDM")
##D data(sim.qmatrix, package="CDM")
##D
##D # Model 1: estimate GDINA model
##D mod1 <- CDM::gdina( sim.dina, q.matrix=sim.qmatrix, rule="GDINA")
##D summary(mod1)
##D
##D # perform Wald test
##D res1 <- CDM::gdina.wald( mod1 )
##D summary(res1)
##D # -> results show that all but one item fit according to the DINA rule
##D
##D # select some output
##D summary(res1, vars=c("wgtdist", "p") )
## End(Not run)
|
/data/genthat_extracted_code/CDM/examples/gdina.wald.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false | false | 913 |
r
|
library(CDM)
### Name: gdina.wald
### Title: Wald Statistic for Item Fit of the DINA and ACDM Rule for GDINA
### Model
### Aliases: gdina.wald summary.gdina.wald
### Keywords: Wald test GDINA model
### ** Examples
## Not run:
##D #############################################################################
##D # EXAMPLE 1: Wald test for DINA simulated data sim.dina
##D #############################################################################
##D
##D data(sim.dina, package="CDM")
##D data(sim.qmatrix, package="CDM")
##D
##D # Model 1: estimate GDINA model
##D mod1 <- CDM::gdina( sim.dina, q.matrix=sim.qmatrix, rule="GDINA")
##D summary(mod1)
##D
##D # perform Wald test
##D res1 <- CDM::gdina.wald( mod1 )
##D summary(res1)
##D # -> results show that all but one item fit according to the DINA rule
##D
##D # select some output
##D summary(res1, vars=c("wgtdist", "p") )
## End(Not run)
|
#-------------------------------
# ESM 206 Lab 1 - Meet RStudio, wrangling & viz basics
#-------------------------------
#-------------------------------
# Lab 1 objectives: Testing 06/17/2021 again
#-------------------------------
# By the end of Lab 1, you should be able to...
# Create a new .Rproj and add files to working directory
# Attach packages with library()
# Read in a CSV with readr::read_csv()
# Explore data with base functions (e.g. View, names, head, tail, etc.)
# Do some basic wrangling with dplyr (select, filter, mutate, group_by, summarize)
# Use the pipe operator
# Create basic graphs in ggplot2
# Step 0: make a pathway on our computer that you'll put all of your ESM 206 labs in. Maybe Documents > Bren courses > ESM 206 > Labs
# Step 1: Open RStudio, describe pieces of the RStudio environment
# Step 2: Create a new project called 'esm206_lab_1'. Why does this matter? Working directory = no file path/broken path issues. Notice that a folder now appears wherever you saved this project with the same name, and it contains a .Rproj file.
# Step 3: Create a few variables in the console (variable_name <- value/"string"). Notice they show up in the Environment. How would you store everything you did in the Console? And all errors/messages/etc. show up there. That's a problem for reproducibility & collaboration - hard to follow, no clear history, etc. We want to have a clear, reproducible story of our entire data analysis. One way: working in scripts.
# Step 4: Open a new R script. Introduce basics. What is a comment? Organizing sections? If I try to create some variables and press Enter after each line, nothing happens - how is this different from the Console?
#-------------------------
# 1. Attach packages
#-------------------------
# First, attach packages with library(package_name)
# You can only attach packages that are installed
# Run code with Command + Return, or Command + Shift + Return
# Ask: why does it make sense to include this in a script, versus running this in the Console?
# install.packages('tidyverse')
# install.packages('janitor')
library(tidyverse)
library(janitor)
#-------------------------
# 2. Read in data from a .csv file
#-------------------------
# We'll use readr::read_csv() to get data from a comma separated value (CSV) file. The general structure is:
# store_as_this <- read_csv("file_path/file_name.csv")
# If the file is in your working directory (i.e. in the project folder), then you don't need to add a file_path/ to find it because it's already we're R is pointed to.
# Data: Harry Potter aggression by character, book & number of mentions!
# Collected and made available by Andrew Heiss (https://www.andrewheiss.com/, https://github.com/andrewheiss/Harry-Potter-aggression)
hp_data <- read_csv("lab_1_materials/hp_aggression.csv")
# How do I know it worked? No errors, + look in 'Environment'...then...
#--------------------------
# 3. Check it out (LOOK AT YOUR DATA)
#--------------------------
# How to view your data frame in "spreadsheet" format?
# You can click on the df name in the 'Environment' tab to run the View() function (see Console after clicking on the name - notice that this actually runs View). Others: names, head, tail, dim, summary, stringr, etc.
View(hp_data)
names(hp_data)
summary(hp_data)
ls() # to show all objects in environment
# This format: tidy data
# Each variable is a column. Each row is an observation. This df is in great shape - not all will be. But for Week 1, we'll use fairly tidy data (for lab and Assignment 1).
#-------------------------
# 4. dplyr::select() - subset COLUMNS
#-------------------------
# dplyr::select(): choose/exclude/reorder *COLUMNS*
# Example: select only the columns 'character' and 'book'
# Way 1: No pipe
hp_ex_1 <- select(hp_data, character, book)
# Way 2: Meet the pipe operator %>% (does the same thing)
# Shortcut for pipe: Command + Shift + M
hp_ex_2 <- hp_data %>%
select(character, book)
# The pipe is really nice for sequential operations (avoids excessive intermediate data frames, nesting functions, etc.). Think of adding the pipe as saying "and then..."
# Example: Select columns 'abb' through 'aggressions'
hp_ex_3 <- hp_data %>%
select(abb:aggressions)
# Example: select columns 'character' through 'aggressions', excluding 'book':
hp_ex_4 <- hp_data %>%
select(character:aggressions, -book)
# Example: Select book, character, and aggressions, in that order:
hp_ex_5 <- hp_data %>%
select(book, character, aggressions)
#---------------------------
# 5. dplyr::filter() - conditionally subset ROWS
#---------------------------
# Use filter to set conditions that will decide which rows are kept/excluded in a new subset
# Example: only keep observations from the book "The Goblet of Fire"
hp_ex_6 <- hp_data %>%
filter(book == "The Goblet of Fire")
# Some notes to keep in mind: (1) Case sensitive when trying to match words! (2) Note the double = (==) when looking for word matching
# Example: keep rows where the character abbreviation (abb) matches 'harr', 'herm', 'vold', OR 'ronw.' One way: use the vertical lin e '|' to indicate 'OR' within a filter statement:
hp_ex_7 <- hp_data %>%
filter(abb == "harr" | abb == "herm" | abb == "vold" | abb == "ronw")
# Or, a less tedious way: look for matches within a string series:
hp_ex_8 <- hp_data %>%
filter(abb %in% c("harr", "herm", "vold","ronw"))
# See ?"%in%" to see more details. It's basically a special operator for finding matches (binary - match? yes or no...if yes, keep it)
# Ex: Only keep rows where the book is "The Deathly Hallows" AND aggressions is greater than 5:
hp_ex_9 <- hp_data %>%
filter(book == "The Deathly Hallows", aggressions > 5)
# Other operators also work: >=, <=, >, <, or if a value, use a single '='. Note: for 'AND' statements, you can either just use a comma, or use an ampersand (&), or do them as separate filtering steps
#----------------------
# 6. dplyr::mutate() - add columns, keep existing
#----------------------
# Use dplyr::mutate() to add variables to a data frame, while keeping existing (unless you explicitly overwrite)
# Example: Let's add a column that contains an 'aggression per mention' ratio (call new column 'apm').
hp_ex_10 <- hp_data %>%
mutate(apm = aggressions/mentions)
#----------------------
# 7. dplyr::group_by() + dplyr::summarize()
#----------------------
# Use dplyr::group_by() to create 'groupings' by variable, then dplyr::summarize() to calculate a single value for each group & report a table
# Example: we want to group by character abbreviation, then find the total number of aggressions for all characters across all books.
np_ex_11 <- hp_data %>%
group_by(abb) %>%
summarize(tot_agg = sum(aggressions))
# Other summary statistics: mean, median, sd, var, max, min, etc.
#----------------------
# 8. Linking multiple wrangling steps with the pipe
#----------------------
# Example: We want to only keep rows that contain observations for Harry Potter (Harry), Voldemort, Hermione Granger, and Severus Snape . We also only want to keep the columns for character, book, and mentions. Then, create groups by character abbreviation and find the total number of mentions.
np_ex_12 <- hp_data %>%
filter(character %in% c("Harry","Voldemort","Hermione Granger","Severus Snape")) %>%
select(character, book, mentions) %>%
group_by(character) %>%
summarize(
total = sum(mentions)
)
#------------------------
# 9. Basic graphs with ggplot2
#------------------------
# A ggplot2 graph requires 3 things: (1) that you're using ggplot; (2) what data to plot, including what's x and y as relevant; (3) what type of graph (geom) to create
ggplot(data = np_ex_12, aes(x = character, y = total)) +
geom_col() +
labs(x = "Character",
y = "Total mentions",
title = "My Title!") +
coord_flip()
# Let's make a scatterplot plot of aggressions v. mentions (across all characters, books, etc.)
ggplot(data = hp_data, aes(x = mentions, y = aggressions)) +
geom_point(color = "purple") +
theme_bw()
# Let's make a histogram of all aggression counts to see how they're distributed
ggplot(data = hp_data, aes(x = aggressions)) +
geom_histogram(bins = 10)
# Now, a jitterplot of the number of aggressions by book:
ggplot(data = hp_data, aes(x = book, y = aggressions)) +
geom_jitter(width = 0.1, alpha = 0.5, aes(color = book), show.legend = FALSE) +
coord_flip()
#-----------------------
# 10. Shutting down
#-----------------------
# All of the code you need to reproduce everything you've done should exist in your script. That means that if your script is saved (press save now), then you can close the project without saving the workspace, graphs, etc.
# Then, if you want to open it again, just double click on the .Rproj file, notice that all your files are right in the 'Files' tab (including your script), click on the script to open it, then run the entire thing with Command + Shift + Enter to recreate all of your great work!
# ---------------------
# Shortcuts & goodies
# ---------------------
# ALT/option key + (minus) to add an arrow <-
# Command + Shift + C for multiple lines commenting out/in
# RStudio Cheatsheets
#-----------------------
# END LAB 1
#-----------------------
|
/lab_1_materials/lab_1_key.R
|
no_license
|
katleyq/esm-206-labs-2019
|
R
| false | false | 9,305 |
r
|
#-------------------------------
# ESM 206 Lab 1 - Meet RStudio, wrangling & viz basics
#-------------------------------
#-------------------------------
# Lab 1 objectives: Testing 06/17/2021 again
#-------------------------------
# By the end of Lab 1, you should be able to...
# Create a new .Rproj and add files to working directory
# Attach packages with library()
# Read in a CSV with readr::read_csv()
# Explore data with base functions (e.g. View, names, head, tail, etc.)
# Do some basic wrangling with dplyr (select, filter, mutate, group_by, summarize)
# Use the pipe operator
# Create basic graphs in ggplot2
# Step 0: make a pathway on our computer that you'll put all of your ESM 206 labs in. Maybe Documents > Bren courses > ESM 206 > Labs
# Step 1: Open RStudio, describe pieces of the RStudio environment
# Step 2: Create a new project called 'esm206_lab_1'. Why does this matter? Working directory = no file path/broken path issues. Notice that a folder now appears wherever you saved this project with the same name, and it contains a .Rproj file.
# Step 3: Create a few variables in the console (variable_name <- value/"string"). Notice they show up in the Environment. How would you store everything you did in the Console? And all errors/messages/etc. show up there. That's a problem for reproducibility & collaboration - hard to follow, no clear history, etc. We want to have a clear, reproducible story of our entire data analysis. One way: working in scripts.
# Step 4: Open a new R script. Introduce basics. What is a comment? Organizing sections? If I try to create some variables and press Enter after each line, nothing happens - how is this different from the Console?
#-------------------------
# 1. Attach packages
#-------------------------
# First, attach packages with library(package_name)
# You can only attach packages that are installed
# Run code with Command + Return, or Command + Shift + Return
# Ask: why does it make sense to include this in a script, versus running this in the Console?
# install.packages('tidyverse')
# install.packages('janitor')
library(tidyverse)
library(janitor)
#-------------------------
# 2. Read in data from a .csv file
#-------------------------
# We'll use readr::read_csv() to get data from a comma separated value (CSV) file. The general structure is:
# store_as_this <- read_csv("file_path/file_name.csv")
# If the file is in your working directory (i.e. in the project folder), then you don't need to add a file_path/ to find it because it's already we're R is pointed to.
# Data: Harry Potter aggression by character, book & number of mentions!
# Collected and made available by Andrew Heiss (https://www.andrewheiss.com/, https://github.com/andrewheiss/Harry-Potter-aggression)
hp_data <- read_csv("lab_1_materials/hp_aggression.csv")
# How do I know it worked? No errors, + look in 'Environment'...then...
#--------------------------
# 3. Check it out (LOOK AT YOUR DATA)
#--------------------------
# How to view your data frame in "spreadsheet" format?
# You can click on the df name in the 'Environment' tab to run the View() function (see Console after clicking on the name - notice that this actually runs View). Others: names, head, tail, dim, summary, stringr, etc.
View(hp_data)
names(hp_data)
summary(hp_data)
ls() # to show all objects in environment
# This format: tidy data
# Each variable is a column. Each row is an observation. This df is in great shape - not all will be. But for Week 1, we'll use fairly tidy data (for lab and Assignment 1).
#-------------------------
# 4. dplyr::select() - subset COLUMNS
#-------------------------
# dplyr::select(): choose/exclude/reorder *COLUMNS*
# Example: select only the columns 'character' and 'book'
# Way 1: No pipe
hp_ex_1 <- select(hp_data, character, book)
# Way 2: Meet the pipe operator %>% (does the same thing)
# Shortcut for pipe: Command + Shift + M
hp_ex_2 <- hp_data %>%
select(character, book)
# The pipe is really nice for sequential operations (avoids excessive intermediate data frames, nesting functions, etc.). Think of adding the pipe as saying "and then..."
# Example: Select columns 'abb' through 'aggressions'
hp_ex_3 <- hp_data %>%
select(abb:aggressions)
# Example: select columns 'character' through 'aggressions', excluding 'book':
hp_ex_4 <- hp_data %>%
select(character:aggressions, -book)
# Example: Select book, character, and aggressions, in that order:
hp_ex_5 <- hp_data %>%
select(book, character, aggressions)
#---------------------------
# 5. dplyr::filter() - conditionally subset ROWS
#---------------------------
# Use filter to set conditions that will decide which rows are kept/excluded in a new subset
# Example: only keep observations from the book "The Goblet of Fire"
hp_ex_6 <- hp_data %>%
filter(book == "The Goblet of Fire")
# Some notes to keep in mind: (1) Case sensitive when trying to match words! (2) Note the double = (==) when looking for word matching
# Example: keep rows where the character abbreviation (abb) matches 'harr', 'herm', 'vold', OR 'ronw.' One way: use the vertical lin e '|' to indicate 'OR' within a filter statement:
hp_ex_7 <- hp_data %>%
filter(abb == "harr" | abb == "herm" | abb == "vold" | abb == "ronw")
# Or, a less tedious way: look for matches within a string series:
hp_ex_8 <- hp_data %>%
filter(abb %in% c("harr", "herm", "vold","ronw"))
# See ?"%in%" to see more details. It's basically a special operator for finding matches (binary - match? yes or no...if yes, keep it)
# Ex: Only keep rows where the book is "The Deathly Hallows" AND aggressions is greater than 5:
hp_ex_9 <- hp_data %>%
filter(book == "The Deathly Hallows", aggressions > 5)
# Other operators also work: >=, <=, >, <, or if a value, use a single '='. Note: for 'AND' statements, you can either just use a comma, or use an ampersand (&), or do them as separate filtering steps
#----------------------
# 6. dplyr::mutate() - add columns, keep existing
#----------------------
# Use dplyr::mutate() to add variables to a data frame, while keeping existing (unless you explicitly overwrite)
# Example: Let's add a column that contains an 'aggression per mention' ratio (call new column 'apm').
hp_ex_10 <- hp_data %>%
mutate(apm = aggressions/mentions)
#----------------------
# 7. dplyr::group_by() + dplyr::summarize()
#----------------------
# Use dplyr::group_by() to create 'groupings' by variable, then dplyr::summarize() to calculate a single value for each group & report a table
# Example: we want to group by character abbreviation, then find the total number of aggressions for all characters across all books.
np_ex_11 <- hp_data %>%
group_by(abb) %>%
summarize(tot_agg = sum(aggressions))
# Other summary statistics: mean, median, sd, var, max, min, etc.
#----------------------
# 8. Linking multiple wrangling steps with the pipe
#----------------------
# Example: We want to only keep rows that contain observations for Harry Potter (Harry), Voldemort, Hermione Granger, and Severus Snape . We also only want to keep the columns for character, book, and mentions. Then, create groups by character abbreviation and find the total number of mentions.
np_ex_12 <- hp_data %>%
filter(character %in% c("Harry","Voldemort","Hermione Granger","Severus Snape")) %>%
select(character, book, mentions) %>%
group_by(character) %>%
summarize(
total = sum(mentions)
)
#------------------------
# 9. Basic graphs with ggplot2
#------------------------
# A ggplot2 graph requires 3 things: (1) that you're using ggplot; (2) what data to plot, including what's x and y as relevant; (3) what type of graph (geom) to create
ggplot(data = np_ex_12, aes(x = character, y = total)) +
geom_col() +
labs(x = "Character",
y = "Total mentions",
title = "My Title!") +
coord_flip()
# Let's make a scatterplot plot of aggressions v. mentions (across all characters, books, etc.)
ggplot(data = hp_data, aes(x = mentions, y = aggressions)) +
geom_point(color = "purple") +
theme_bw()
# Let's make a histogram of all aggression counts to see how they're distributed
ggplot(data = hp_data, aes(x = aggressions)) +
geom_histogram(bins = 10)
# Now, a jitterplot of the number of aggressions by book:
ggplot(data = hp_data, aes(x = book, y = aggressions)) +
geom_jitter(width = 0.1, alpha = 0.5, aes(color = book), show.legend = FALSE) +
coord_flip()
#-----------------------
# 10. Shutting down
#-----------------------
# All of the code you need to reproduce everything you've done should exist in your script. That means that if your script is saved (press save now), then you can close the project without saving the workspace, graphs, etc.
# Then, if you want to open it again, just double click on the .Rproj file, notice that all your files are right in the 'Files' tab (including your script), click on the script to open it, then run the entire thing with Command + Shift + Enter to recreate all of your great work!
# ---------------------
# Shortcuts & goodies
# ---------------------
# ALT/option key + (minus) to add an arrow <-
# Command + Shift + C for multiple lines commenting out/in
# RStudio Cheatsheets
#-----------------------
# END LAB 1
#-----------------------
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/write_brainnet.R
\name{write.brainnet}
\alias{write.brainnet}
\title{Write files to be used for visualization with BrainNet Viewer}
\usage{
write.brainnet(g, node.color = c("none", "comm", "comm.wt", "lobe", "comp",
"network"), node.size = "constant", edge.wt = NULL, file.prefix = "")
}
\arguments{
\item{g}{The \code{igraph} graph object of interest}
\item{node.color}{Character string indicating whether to color the vertices or
not; can be 'none', 'lobe', 'comm', 'comm.wt', 'comp', or 'network'}
\item{node.size}{Character string indicating what size the vertices should be;
can be any vertex-level attribute (default: 'constant')}
\item{edge.wt}{Character string indicating the edge attribute to use to
return a weighted adjacency matrix}
\item{file.prefix}{Character string for the basename of the \emph{.node} and
\emph{.edge} files that are written}
}
\description{
This function will write the \emph{.node} and \emph{.edge} files necessary
for visualization with the BrainNet Viewer software (see Reference below).
}
\details{
For the \emph{.node} file, there are 6 columns:
\itemize{
\item \emph{Column 1}: x-coordinates
\item \emph{Column 2}: y-coordinates
\item \emph{Column 3}: z-coordinates
\item \emph{Column 4}: Vertex color
\item \emph{Column 5}: Vertex size
\item \emph{Column 6}: Vertex label
}
The \emph{.edge} file is the graph's associated adjacency matrix; a weighted
adjacency matrix can be returned by using the \code{edge.wt} argument.
}
\examples{
\dontrun{
write.brainnet(g, node.color='community', node.size='degree',
edge.wt='t.stat')
}
}
\author{
Christopher G. Watson, \email{cgwatson@bu.edu}
}
\references{
Xia M, Wang J, He Y (2013). \emph{BrainNet Viewer: a network
visualization tool for human brain connectomics}. PLoS One, 8(7):e68910.
}
|
/man/write.brainnet.Rd
|
no_license
|
nagyistge/brainGraph
|
R
| false | true | 1,864 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/write_brainnet.R
\name{write.brainnet}
\alias{write.brainnet}
\title{Write files to be used for visualization with BrainNet Viewer}
\usage{
write.brainnet(g, node.color = c("none", "comm", "comm.wt", "lobe", "comp",
"network"), node.size = "constant", edge.wt = NULL, file.prefix = "")
}
\arguments{
\item{g}{The \code{igraph} graph object of interest}
\item{node.color}{Character string indicating whether to color the vertices or
not; can be 'none', 'lobe', 'comm', 'comm.wt', 'comp', or 'network'}
\item{node.size}{Character string indicating what size the vertices should be;
can be any vertex-level attribute (default: 'constant')}
\item{edge.wt}{Character string indicating the edge attribute to use to
return a weighted adjacency matrix}
\item{file.prefix}{Character string for the basename of the \emph{.node} and
\emph{.edge} files that are written}
}
\description{
This function will write the \emph{.node} and \emph{.edge} files necessary
for visualization with the BrainNet Viewer software (see Reference below).
}
\details{
For the \emph{.node} file, there are 6 columns:
\itemize{
\item \emph{Column 1}: x-coordinates
\item \emph{Column 2}: y-coordinates
\item \emph{Column 3}: z-coordinates
\item \emph{Column 4}: Vertex color
\item \emph{Column 5}: Vertex size
\item \emph{Column 6}: Vertex label
}
The \emph{.edge} file is the graph's associated adjacency matrix; a weighted
adjacency matrix can be returned by using the \code{edge.wt} argument.
}
\examples{
\dontrun{
write.brainnet(g, node.color='community', node.size='degree',
edge.wt='t.stat')
}
}
\author{
Christopher G. Watson, \email{cgwatson@bu.edu}
}
\references{
Xia M, Wang J, He Y (2013). \emph{BrainNet Viewer: a network
visualization tool for human brain connectomics}. PLoS One, 8(7):e68910.
}
|
# Gestantes_matrizcorrelacoes.R
library(readxl)
library(GGally)
Gestantes <- read_excel("Gestantes.xlsx")
mc <- data.frame(Gestantes$IDADE,
Gestantes$HT,
Gestantes$HB,
Gestantes$HEM,
Gestantes$LEUC,
Gestantes$FOLICO,
Gestantes$B12)
names(mc) <- c("Idade","HT","HB","HEM","LEUC","FOLICO","B12")
print(head(mc))
cat("\n...\n")
print(tail(mc, addrownums = FALSE, n=2L))
cat("\nMatriz de correlacoes:\n")
print(cor(mc)) # matriz de correlacoes
# grafico da matriz
print(GGally::ggcorr(mc,
nbreaks = 6,
label = TRUE,
label_size = 4,
color = "#888888"))
|
/Aula06/Aula_Correlacao_e_Regressao/Gestantes_matrizcorrelacoes.R
|
no_license
|
yadevi/2020
|
R
| false | false | 687 |
r
|
# Gestantes_matrizcorrelacoes.R
library(readxl)
library(GGally)
Gestantes <- read_excel("Gestantes.xlsx")
mc <- data.frame(Gestantes$IDADE,
Gestantes$HT,
Gestantes$HB,
Gestantes$HEM,
Gestantes$LEUC,
Gestantes$FOLICO,
Gestantes$B12)
names(mc) <- c("Idade","HT","HB","HEM","LEUC","FOLICO","B12")
print(head(mc))
cat("\n...\n")
print(tail(mc, addrownums = FALSE, n=2L))
cat("\nMatriz de correlacoes:\n")
print(cor(mc)) # matriz de correlacoes
# grafico da matriz
print(GGally::ggcorr(mc,
nbreaks = 6,
label = TRUE,
label_size = 4,
color = "#888888"))
|
library(dplyr)
library(tidyverse)
suppressPackageStartupMessages(library("optparse"))
suppressPackageStartupMessages(library("stats"))
#Read in the individual flatFiles
option_list <- list(
make_option(c("-v", "--verbose"), action="store_true", default=TRUE,
help="Print output [default]"),
make_option(c("-q", "--quietly"), action="store_false",
dest="verbose", help="Print little output"),
make_option(c("-o", "--outfile"), action="store", default="combinedIgTxCall.txt",
help="output results file"),
make_option(c("-s", "--specimen"),action="store", default="SAMPLE"),
make_option(c("-p", "--pairoscope_file"),action="store",
help="Flat file containing Ig Calls using Pairoscope"),
make_option(c("-m", "--manta_file"), action="store",
help="Flat file containing Ig Calls using manta"),
make_option(c("-g", "--gammit_file"), action="store",
help="Flat file containing Ig Calls using Gammit"),
make_option(c("-c", "--count"), action="store",type="integer", default=2,
help="Minimum caller count [default %default]",
metavar="number")
)
opt <- parse_args(OptionParser(option_list=option_list))
write("Check input files...\n", stderr())
if(!is.null(opt$pairoscope_file) && !file.exists(as.character(opt$pairoscope_file)))
{
write("Pairoscope Ig Tx file not found...\n", stderr())
}
if (!is.null(opt$manta_file) && !file.exists(as.character(opt$manta_file)))
{
write("Manta Ig Tx file not found...\n", stderr())
}
if (!is.null(opt$gammit_file) && !file.exists(as.character(opt$gammit_file)))
{
write("Gammit Ig Tx file not found...\n", stderr())
}
write("Processing Data...\n", stderr())
specimen = tibble(Specimen=opt$specimen)
combined_calls<-NULL
call_list <- list(specimen)
#pairoscope
if(!is.null(opt$pairoscope_file) && file.exists(as.character(opt$pairoscope_file)))
{
pairoscope= read.table(file=opt$pairoscope_file, header = TRUE,sep = '\t')
pair_calls=pairoscope %>% select(ends_with("Call"))
pair_calls <- pair_calls %>% rename_all(list(~ str_replace(., "CALL", "CALL_Pairoscope")))
pair_source = pairoscope %>% select(ends_with("IGSOURCE"))
pair_source <- pair_source %>% rename_all(list(~ str_replace(.,"IGSOURCE", "IgSource")))
pair_source$NSD2_IgSource = ifelse(pair_calls$NSD2_CALL_Pairoscope==1, pair_source$NSD2_IgSource,0)
pair_source$CCND1_IgSource = ifelse(pair_calls$CCND1_CALL_Pairoscope==1, pair_source$CCND1_IgSource,0)
pair_source$CCND2_IgSource = ifelse(pair_calls$CCND2_CALL_Pairoscope==1, pair_source$CCND2_IgSource,0)
pair_source$CCND3_IgSource = ifelse(pair_calls$CCND3_CALL_Pairoscope==1, pair_source$CCND3_IgSource,0)
pair_source$MYC_IgSource = ifelse(pair_calls$MYC_CALL_Pairoscope==1, pair_source$MYC_IgSource,0)
pair_source$MAF_IgSource = ifelse(pair_calls$MAF_CALL_Pairoscope==1, pair_source$MAF_IgSource,0)
pair_source$MAFA_IgSource = ifelse(pair_calls$MAFA_CALL_Pairoscope==1, pair_source$MAFA_IgSource,0)
pair_source$MAFB_IgSource = ifelse(pair_calls$MAFB_CALL_Pairoscope==1, pair_source$MAFB_IgSource,0)
call_list <- append(call_list, pair_calls)
}
#manta
if(!is.null(opt$manta_file) && file.exists(as.character(opt$manta_file)))
{
manta=read.table(file=opt$manta_file,header = TRUE,sep = '\t')
manta_calls = manta %>% select(ends_with("Called"))
manta_calls <- manta_calls %>% rename_all(list(~ str_replace(.,"Target_Called", "CALL_Manta")))
manta_source = manta %>% select(ends_with("Ig_Loci"))
manta_source <- manta_source %>% rename_all(list(~ str_replace(.,"Ig_Loci", "IgSource")))
call_list <- append(call_list, manta_calls)
}
#gammit
if(!is.null(opt$gammit_file) && file.exists(as.character(opt$gammit_file)))
{
gammit = read.table(file=opt$gammit_file,header = TRUE,sep = '\t')
gammit_calls = gammit %>% select(ends_with("Call"))
gammit_calls <- gammit_calls %>% rename_all(list(~ str_replace(.,"Call", "CALL_Gammit")))
gammit_source = gammit %>% select(ends_with("Ig_Loci"))
gammit_source <- gammit_source %>% rename_all(list(~ str_replace(.,"Ig_Loci", "IgSource")))
call_list <- append(call_list, gammit_calls)
}
#merge
combined_calls=vctrs::vec_cbind(!!!call_list)
combined_calls= combined_calls %>%
mutate (NSD2_CALLER_COUNT = combined_calls %>% select(starts_with("NSD2_")) %>% sum(),
NSD2_Summary_CALL = if_else(NSD2_CALLER_COUNT >= opt$count, 1, 0),
MAF_CALLER_COUNT = combined_calls %>% select(starts_with("MAF_")) %>% sum(),
MAF_Summary_CALL = if_else(MAF_CALLER_COUNT >= opt$count, 1, 0),
MAFA_CALLER_COUNT = combined_calls %>% select(starts_with("MAFA_")) %>% sum(),
MAFA_Summary_CALL = if_else(MAFA_CALLER_COUNT >= opt$count, 1, 0),
MAFB_CALLER_COUNT = combined_calls %>% select(starts_with("MAFB_")) %>% sum(),
MAFB_Summary_CALL = if_else(MAFB_CALLER_COUNT >= opt$count, 1, 0),
MYC_CALLER_COUNT = combined_calls %>% select(starts_with("MYC_")) %>% sum(),
MYC_Summary_CALL = if_else(MYC_CALLER_COUNT >= opt$count, 1, 0),
CCND1_CALLER_COUNT = combined_calls %>% select(starts_with("CCND1_")) %>% sum(),
CCND1_Summary_CALL = if_else(CCND1_CALLER_COUNT >= opt$count, 1, 0),
CCND2_CALLER_COUNT = combined_calls %>% select(starts_with("CCND2_")) %>% sum(),
CCND2_Summary_CALL = if_else(CCND2_CALLER_COUNT >= opt$count, 1, 0),
CCND3_CALLER_COUNT = combined_calls %>% select(starts_with("CCND3_")) %>% sum(),
CCND3_Summary_CALL = if_else(CCND3_CALLER_COUNT >= opt$count, 1, 0)
)
#Add IG source if matches across all caller
NSD2_IgSource <- c(if(exists("gammit_source")){ gammit_source$NSD2_IgSource },
if(exists("manta_source")){ manta_source$NSD2_IgSource },
if(exists("pair_source")){ pair_source$NSD2_IgSource })
CCND1_IgSource <- c(if(exists("gammit_source")){ gammit_source$CCND1_IgSource },
if(exists("manta_source")){ manta_source$CCND1_IgSource },
if(exists("pair_source")){ pair_source$CCND1_IgSource })
CCND2_IgSource <- c(if(exists("gammit_source")){ gammit_source$CCND2_IgSource },
if(exists("manta_source")){ manta_source$CCND2_IgSource },
if(exists("pair_source")){ pair_source$CCND2_IgSource })
CCND3_IgSource <- c(if(exists("gammit_source")){ gammit_source$CCND3_IgSource },
if(exists("manta_source")){ manta_source$CCND3_IgSource },
if(exists("pair_source")){ pair_source$CCND3_IgSource })
MYC_IgSource <- c(if(exists("gammit_source")){ gammit_source$MYC_IgSource },
if(exists("manta_source")){ manta_source$MYC_IgSource },
if(exists("pair_source")){ pair_source$MYC_IgSource })
MAF_IgSource <- c(if(exists("gammit_source")){ gammit_source$MAF_IgSource },
if(exists("manta_source")){ manta_source$MAF_IgSource },
if(exists("pair_source")){ pair_source$MAF_IgSource })
MAFA_IgSource <- c(if(exists("gammit_source")){ gammit_source$MAFA_IgSource },
if(exists("manta_source")){ manta_source$MAFA_IgSource },
if(exists("pair_source")){ pair_source$MAFA_IgSource })
MAFB_IgSource <- c(if(exists("gammit_source")){ gammit_source$MAFB_IgSource },
if(exists("manta_source")){ manta_source$MAFB_IgSource },
if(exists("pair_source")){ pair_source$MAFB_IgSource })
NSD2_IgSource <- NSD2_IgSource[NSD2_IgSource != 0]
CCND1_IgSource <- CCND1_IgSource[CCND1_IgSource != 0]
CCND2_IgSource <- CCND2_IgSource[CCND2_IgSource != 0]
CCND3_IgSource <- CCND3_IgSource[CCND3_IgSource != 0]
MYC_IgSource <- MYC_IgSource[MYC_IgSource != 0]
MAF_IgSource <- MAF_IgSource[MAF_IgSource != 0]
MAFA_IgSource <- MAFA_IgSource[MAFA_IgSource != 0]
MAFB_IgSource <- MAFB_IgSource[MAFB_IgSource != 0]
combined_calls$NSD2_IgSource = ifelse(length(unique(NSD2_IgSource))==1, unique(NSD2_IgSource),0)
combined_calls$CCND1_IgSource = ifelse(length(unique(CCND1_IgSource))==1, unique(CCND1_IgSource),0)
combined_calls$CCND2_IgSource = ifelse(length(unique(CCND2_IgSource))==1, unique(CCND2_IgSource),0)
combined_calls$CCND3_IgSource = ifelse(length(unique(CCND3_IgSource))==1, unique(CCND3_IgSource),0)
combined_calls$MYC_IgSource = ifelse(length(unique(MYC_IgSource))==1, unique(MYC_IgSource),0)
combined_calls$MAF_IgSource = ifelse(length(unique(MAF_IgSource))==1, unique(MAF_IgSource),0)
combined_calls$MAFA_IgSource = ifelse(length(unique(MAFA_IgSource))==1, unique(MAFA_IgSource),0)
combined_calls$MAFB_IgSource = ifelse(length(unique(MAFB_IgSource))==1, unique(MAFB_IgSource),0)
combined_calls=combined_calls[,order(colnames(combined_calls), decreasing = TRUE)]
# combined_calls=combined_calls %>% relocate(Specimen)
write("Save results...\n", stderr())
write_tsv(combined_calls, opt$outfile, append = FALSE, na="NA")
write("Done.\n", stderr())
|
/required_scripts/summarize_Ig_4b93aee.R
|
permissive
|
tgen/coyote
|
R
| false | false | 8,949 |
r
|
library(dplyr)
library(tidyverse)
suppressPackageStartupMessages(library("optparse"))
suppressPackageStartupMessages(library("stats"))
#Read in the individual flatFiles
option_list <- list(
make_option(c("-v", "--verbose"), action="store_true", default=TRUE,
help="Print output [default]"),
make_option(c("-q", "--quietly"), action="store_false",
dest="verbose", help="Print little output"),
make_option(c("-o", "--outfile"), action="store", default="combinedIgTxCall.txt",
help="output results file"),
make_option(c("-s", "--specimen"),action="store", default="SAMPLE"),
make_option(c("-p", "--pairoscope_file"),action="store",
help="Flat file containing Ig Calls using Pairoscope"),
make_option(c("-m", "--manta_file"), action="store",
help="Flat file containing Ig Calls using manta"),
make_option(c("-g", "--gammit_file"), action="store",
help="Flat file containing Ig Calls using Gammit"),
make_option(c("-c", "--count"), action="store",type="integer", default=2,
help="Minimum caller count [default %default]",
metavar="number")
)
opt <- parse_args(OptionParser(option_list=option_list))
write("Check input files...\n", stderr())
if(!is.null(opt$pairoscope_file) && !file.exists(as.character(opt$pairoscope_file)))
{
write("Pairoscope Ig Tx file not found...\n", stderr())
}
if (!is.null(opt$manta_file) && !file.exists(as.character(opt$manta_file)))
{
write("Manta Ig Tx file not found...\n", stderr())
}
if (!is.null(opt$gammit_file) && !file.exists(as.character(opt$gammit_file)))
{
write("Gammit Ig Tx file not found...\n", stderr())
}
write("Processing Data...\n", stderr())
specimen = tibble(Specimen=opt$specimen)
combined_calls<-NULL
call_list <- list(specimen)
#pairoscope
if(!is.null(opt$pairoscope_file) && file.exists(as.character(opt$pairoscope_file)))
{
pairoscope= read.table(file=opt$pairoscope_file, header = TRUE,sep = '\t')
pair_calls=pairoscope %>% select(ends_with("Call"))
pair_calls <- pair_calls %>% rename_all(list(~ str_replace(., "CALL", "CALL_Pairoscope")))
pair_source = pairoscope %>% select(ends_with("IGSOURCE"))
pair_source <- pair_source %>% rename_all(list(~ str_replace(.,"IGSOURCE", "IgSource")))
pair_source$NSD2_IgSource = ifelse(pair_calls$NSD2_CALL_Pairoscope==1, pair_source$NSD2_IgSource,0)
pair_source$CCND1_IgSource = ifelse(pair_calls$CCND1_CALL_Pairoscope==1, pair_source$CCND1_IgSource,0)
pair_source$CCND2_IgSource = ifelse(pair_calls$CCND2_CALL_Pairoscope==1, pair_source$CCND2_IgSource,0)
pair_source$CCND3_IgSource = ifelse(pair_calls$CCND3_CALL_Pairoscope==1, pair_source$CCND3_IgSource,0)
pair_source$MYC_IgSource = ifelse(pair_calls$MYC_CALL_Pairoscope==1, pair_source$MYC_IgSource,0)
pair_source$MAF_IgSource = ifelse(pair_calls$MAF_CALL_Pairoscope==1, pair_source$MAF_IgSource,0)
pair_source$MAFA_IgSource = ifelse(pair_calls$MAFA_CALL_Pairoscope==1, pair_source$MAFA_IgSource,0)
pair_source$MAFB_IgSource = ifelse(pair_calls$MAFB_CALL_Pairoscope==1, pair_source$MAFB_IgSource,0)
call_list <- append(call_list, pair_calls)
}
#manta
if(!is.null(opt$manta_file) && file.exists(as.character(opt$manta_file)))
{
manta=read.table(file=opt$manta_file,header = TRUE,sep = '\t')
manta_calls = manta %>% select(ends_with("Called"))
manta_calls <- manta_calls %>% rename_all(list(~ str_replace(.,"Target_Called", "CALL_Manta")))
manta_source = manta %>% select(ends_with("Ig_Loci"))
manta_source <- manta_source %>% rename_all(list(~ str_replace(.,"Ig_Loci", "IgSource")))
call_list <- append(call_list, manta_calls)
}
#gammit
if(!is.null(opt$gammit_file) && file.exists(as.character(opt$gammit_file)))
{
gammit = read.table(file=opt$gammit_file,header = TRUE,sep = '\t')
gammit_calls = gammit %>% select(ends_with("Call"))
gammit_calls <- gammit_calls %>% rename_all(list(~ str_replace(.,"Call", "CALL_Gammit")))
gammit_source = gammit %>% select(ends_with("Ig_Loci"))
gammit_source <- gammit_source %>% rename_all(list(~ str_replace(.,"Ig_Loci", "IgSource")))
call_list <- append(call_list, gammit_calls)
}
#merge
combined_calls=vctrs::vec_cbind(!!!call_list)
combined_calls= combined_calls %>%
mutate (NSD2_CALLER_COUNT = combined_calls %>% select(starts_with("NSD2_")) %>% sum(),
NSD2_Summary_CALL = if_else(NSD2_CALLER_COUNT >= opt$count, 1, 0),
MAF_CALLER_COUNT = combined_calls %>% select(starts_with("MAF_")) %>% sum(),
MAF_Summary_CALL = if_else(MAF_CALLER_COUNT >= opt$count, 1, 0),
MAFA_CALLER_COUNT = combined_calls %>% select(starts_with("MAFA_")) %>% sum(),
MAFA_Summary_CALL = if_else(MAFA_CALLER_COUNT >= opt$count, 1, 0),
MAFB_CALLER_COUNT = combined_calls %>% select(starts_with("MAFB_")) %>% sum(),
MAFB_Summary_CALL = if_else(MAFB_CALLER_COUNT >= opt$count, 1, 0),
MYC_CALLER_COUNT = combined_calls %>% select(starts_with("MYC_")) %>% sum(),
MYC_Summary_CALL = if_else(MYC_CALLER_COUNT >= opt$count, 1, 0),
CCND1_CALLER_COUNT = combined_calls %>% select(starts_with("CCND1_")) %>% sum(),
CCND1_Summary_CALL = if_else(CCND1_CALLER_COUNT >= opt$count, 1, 0),
CCND2_CALLER_COUNT = combined_calls %>% select(starts_with("CCND2_")) %>% sum(),
CCND2_Summary_CALL = if_else(CCND2_CALLER_COUNT >= opt$count, 1, 0),
CCND3_CALLER_COUNT = combined_calls %>% select(starts_with("CCND3_")) %>% sum(),
CCND3_Summary_CALL = if_else(CCND3_CALLER_COUNT >= opt$count, 1, 0)
)
#Add IG source if matches across all caller
NSD2_IgSource <- c(if(exists("gammit_source")){ gammit_source$NSD2_IgSource },
if(exists("manta_source")){ manta_source$NSD2_IgSource },
if(exists("pair_source")){ pair_source$NSD2_IgSource })
CCND1_IgSource <- c(if(exists("gammit_source")){ gammit_source$CCND1_IgSource },
if(exists("manta_source")){ manta_source$CCND1_IgSource },
if(exists("pair_source")){ pair_source$CCND1_IgSource })
CCND2_IgSource <- c(if(exists("gammit_source")){ gammit_source$CCND2_IgSource },
if(exists("manta_source")){ manta_source$CCND2_IgSource },
if(exists("pair_source")){ pair_source$CCND2_IgSource })
CCND3_IgSource <- c(if(exists("gammit_source")){ gammit_source$CCND3_IgSource },
if(exists("manta_source")){ manta_source$CCND3_IgSource },
if(exists("pair_source")){ pair_source$CCND3_IgSource })
MYC_IgSource <- c(if(exists("gammit_source")){ gammit_source$MYC_IgSource },
if(exists("manta_source")){ manta_source$MYC_IgSource },
if(exists("pair_source")){ pair_source$MYC_IgSource })
MAF_IgSource <- c(if(exists("gammit_source")){ gammit_source$MAF_IgSource },
if(exists("manta_source")){ manta_source$MAF_IgSource },
if(exists("pair_source")){ pair_source$MAF_IgSource })
MAFA_IgSource <- c(if(exists("gammit_source")){ gammit_source$MAFA_IgSource },
if(exists("manta_source")){ manta_source$MAFA_IgSource },
if(exists("pair_source")){ pair_source$MAFA_IgSource })
MAFB_IgSource <- c(if(exists("gammit_source")){ gammit_source$MAFB_IgSource },
if(exists("manta_source")){ manta_source$MAFB_IgSource },
if(exists("pair_source")){ pair_source$MAFB_IgSource })
NSD2_IgSource <- NSD2_IgSource[NSD2_IgSource != 0]
CCND1_IgSource <- CCND1_IgSource[CCND1_IgSource != 0]
CCND2_IgSource <- CCND2_IgSource[CCND2_IgSource != 0]
CCND3_IgSource <- CCND3_IgSource[CCND3_IgSource != 0]
MYC_IgSource <- MYC_IgSource[MYC_IgSource != 0]
MAF_IgSource <- MAF_IgSource[MAF_IgSource != 0]
MAFA_IgSource <- MAFA_IgSource[MAFA_IgSource != 0]
MAFB_IgSource <- MAFB_IgSource[MAFB_IgSource != 0]
combined_calls$NSD2_IgSource = ifelse(length(unique(NSD2_IgSource))==1, unique(NSD2_IgSource),0)
combined_calls$CCND1_IgSource = ifelse(length(unique(CCND1_IgSource))==1, unique(CCND1_IgSource),0)
combined_calls$CCND2_IgSource = ifelse(length(unique(CCND2_IgSource))==1, unique(CCND2_IgSource),0)
combined_calls$CCND3_IgSource = ifelse(length(unique(CCND3_IgSource))==1, unique(CCND3_IgSource),0)
combined_calls$MYC_IgSource = ifelse(length(unique(MYC_IgSource))==1, unique(MYC_IgSource),0)
combined_calls$MAF_IgSource = ifelse(length(unique(MAF_IgSource))==1, unique(MAF_IgSource),0)
combined_calls$MAFA_IgSource = ifelse(length(unique(MAFA_IgSource))==1, unique(MAFA_IgSource),0)
combined_calls$MAFB_IgSource = ifelse(length(unique(MAFB_IgSource))==1, unique(MAFB_IgSource),0)
combined_calls=combined_calls[,order(colnames(combined_calls), decreasing = TRUE)]
# combined_calls=combined_calls %>% relocate(Specimen)
write("Save results...\n", stderr())
write_tsv(combined_calls, opt$outfile, append = FALSE, na="NA")
write("Done.\n", stderr())
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.