content
large_stringlengths
0
6.46M
path
large_stringlengths
3
331
license_type
large_stringclasses
2 values
repo_name
large_stringlengths
5
125
language
large_stringclasses
1 value
is_vendor
bool
2 classes
is_generated
bool
2 classes
length_bytes
int64
4
6.46M
extension
large_stringclasses
75 values
text
stringlengths
0
6.46M
###Function to get model fit diagnostics given a STBDwDM object #' #' diagnostics #' #' Calculates diagnostic metrics using output from the \code{\link{STBDwDM}} model. #' #' @param obj A \code{\link{STBDwDM}} model object for which diagnostics #' are desired from. #' #' @param diags A vector of character strings indicating the diagnostics to compute. #' Options include: Deviance Information Criterion ("dic"), d-infinity ("dinf") and #' Watanabe-Akaike information criterion ("waic"). At least one option must be included. #' Note: The probit model cannot compute the DIC or WAIC diagnostics due to computational #' issues with computing the multivariate normal CDF. #' #' @param keepDeviance A logical indicating whether the posterior deviance distribution #' is returned (default = FALSE). #' #' @param keepPPD A logical indicating whether the posterior predictive distribution #' at each observed location is returned (default = FALSE). #' #' @details To assess model fit, DIC, d-infinity and WAIC are used. DIC is based on the #' deviance statistic and penalizes for the complexity of a model with an effective #' number of parameters estimate pD (Spiegelhalter et al 2002). The d-infinity posterior #' predictive measure is an alternative diagnostic tool to DIC, where d-infinity=P+G. #' The G term decreases as goodness of fit increases, and P, the penalty term, inflates #' as the model becomes over-fit, so small values of both of these terms and, thus, small #' values of d-infinity are desirable (Gelfand and Ghosh 1998). WAIC is invariant to #' parametrization and is asymptotically equal to Bayesian cross-validation #' (Watanabe 2010). WAIC = -2 * (lppd - p_waic_2). Where lppd is the log pointwise #' predictive density and p_waic_2 is the estimated effective number of parameters #' based on the variance estimator from Vehtari et al. 2016. (p_waic_1 is the mean #' estimator). #' #' @return \code{diagnostics} returns a list containing the diagnostics requested and #' possibly the deviance and/or posterior predictive distribution objects. #' #' @author Samuel I. Berchuck #' #' @references Gelfand, A. E., & Ghosh, S. K. (1998). Model choice: a minimum posterior predictive loss approach. Biometrika, 1-11. #' @references Spiegelhalter, D. J., Best, N. G., Carlin, B. P., & Van Der Linde, A. (2002). Bayesian measures of model complexity and fit. Journal of the Royal Statistical Society: Series B (Statistical Methodology), 64(4), 583-639. #' @references Vehtari, A., Gelman, A., & Gabry, J. (2016). Practical Bayesian model evaluation using leave-one-out cross-validation and WAIC. Statistics and Computing, 1-20. #' @references Watanabe, S. (2010). Asymptotic equivalence of Bayes cross validation and widely applicable information criterion in singular learning theory. Journal of Machine Learning Research, 11(Dec), 3571-3594. #' #' @export diagnostics <- function(obj, diags = c("dic", "dinf", "waic"), keepDeviance = FALSE, keepPPD = FALSE) { ###Check Inputs if (missing(obj)) stop('"obj" is missing') if (!is.STBDwDM(obj)) stop('"obj" must be of class STBDwDM') if (sum((!diags %in% c("dic", "dinf", "waic"))) > 0) stop('"diags" must contain at least one of "dic", "dinf" or "waic"') if (!is.logical(keepDeviance)) stop('"keepDeviance" must be a logical') if (!is.logical(keepPPD)) stop('"keepPPD" must be a logical') ###Unload STBDwDM objects DatObj <- obj$datobj DatAug <- obj$dataug ###Check Inputs Again if ((DatObj$FamilyInd == 1) & (sum(diags %in% c("dic", "waic")) > 0)) stop ('"probit" model cannot be used with "dic" or "waic"') #probit model can't do likelihood diagnostics ###Set seed for reproducibility set.seed(54) ###Set data objects M <- DatObj$M Z <- DatObj$Z AdjacentEdgesBoolean <- DatObj$AdjacentEdgesBoolean W <- DatObj$W EyeM <- DatObj$EyeM Rho <- DatObj$Rho FamilyInd <- DatObj$FamilyInd Nu <- DatObj$Nu YObserved <- DatObj$YObserved WeightsInd <- DatObj$WeightsInd ###Construct parameter object Para <- list() Para$Mu <- obj$mu Para$Tau2 <- obj$tau2 Para$Alpha <- obj$alpha MuMean <- apply(obj$mu, 2, mean) Tau2Mean <- apply(obj$tau2, 2, mean) AlphaMean <- apply(obj$alpha, 2, mean) CovMean <- JointCovarianceCube(WAlphaCube(AlphaMean, Z, W, M, Nu, WeightsInd), Tau2Mean, EyeM, Rho, M, Nu) Para$MuMean <- MuMean Para$CovMean <- CovMean ###Set mcmc object NKeep <- dim(obj$phi)[1] ###Compute Log-likelihood using Rcpp function GetLogLik LogLik <- NULL if (("dic" %in% diags) | ("waic" %in% diags)) { ###Compute log-likelihood requireNamespace("mvtnorm", quietly = TRUE) #Requred for pmvnorm in Rcpp function if (DatObj$FamilyInd == 0) { NBelowCount <- c(0,0) YStarNonZero <- list() for (i in 1:DatObj$Nu) YStarNonZero[[i]] <- i DatAug$NBelowCount <- NBelowCount DatAug$YStarNonZero <- YStarNonZero } LogLik <- GetLogLik(DatObj, Para, DatAug, NKeep) } ###Compute DIC diagnostics dic <- NULL if ("dic" %in% diags) { ###Compute mean log-likelihood if (FamilyInd == 0) LogLikMean <- GetLogLikMean(DatObj, Para, DatAug) if (FamilyInd == 1) LogLikMean <- GetLogLikMean(DatObj, Para, DatAug) if (FamilyInd == 2) LogLikMean <- GetLogLikMean(DatObj, Para, DatAug) ###Calculate DIC objects DBar <- -2 * mean(LogLik) DHat <- -2 * LogLikMean pD <- DBar - DHat DIC <- DBar + pD dic <- list(dic = DIC, pd = pD) } ###Compute PPD diagnostics ppd <- PPD <- NULL if ("dinf" %in% diags) { ###Get PPD PPD <- SamplePPD(DatObj, Para, NKeep) ###Compute PPD Diagnostics PPDMean <- apply(PPD, 1, mean) PPDVar <- apply(PPD, 1, var) P <- sum(PPDVar) G <- sum( (PPDMean - YObserved) ^ 2) DInf <- G + P ppd <- list(p = P, g = G, dinf = DInf) } ###Compute WAIC diagnostics waic <- NULL if ("waic" %in% diags) { ###Get WAIC # The calculation of Waic! Returns lppd, p_waic_1, p_waic_2, and waic, which we define # as 2*(lppd - p_waic_2), as recommmended in BDA lppd <- log( apply(exp(LogLik), 2, mean) ) p_waic_1 <- 2 * (lppd - apply(LogLik, 2, mean) ) p_waic_2 <- apply(LogLik, 2, var) waic <- -2 * lppd + 2 * p_waic_2 waic <- list(waic = waic, p_waic = p_waic_2, lppd = lppd, p_waic_1 = p_waic_1) } ###Output diagnostics if (!keepDeviance & !keepPPD) diags <- list(dic = dic, dinf = ppd, waic = waic) if (!keepDeviance & keepPPD) diags <- list(dic = dic, dinf = ppd, waic = waic, PPD = t(PPD)) if (keepDeviance & !keepPPD) diags <- list(dic = dic, dinf = ppd, waic = waic, deviance = -2 * LogLik) if (keepDeviance & keepPPD) diags <- list(dic = dic, dinf = ppd, waic = waic, deviance = -2 * LogLik, PPD = t(PPD)) return(diags) }
/fuzzedpackages/womblR/R/DIAG_diagnostics.R
no_license
akhikolla/testpackages
R
false
false
6,775
r
###Function to get model fit diagnostics given a STBDwDM object #' #' diagnostics #' #' Calculates diagnostic metrics using output from the \code{\link{STBDwDM}} model. #' #' @param obj A \code{\link{STBDwDM}} model object for which diagnostics #' are desired from. #' #' @param diags A vector of character strings indicating the diagnostics to compute. #' Options include: Deviance Information Criterion ("dic"), d-infinity ("dinf") and #' Watanabe-Akaike information criterion ("waic"). At least one option must be included. #' Note: The probit model cannot compute the DIC or WAIC diagnostics due to computational #' issues with computing the multivariate normal CDF. #' #' @param keepDeviance A logical indicating whether the posterior deviance distribution #' is returned (default = FALSE). #' #' @param keepPPD A logical indicating whether the posterior predictive distribution #' at each observed location is returned (default = FALSE). #' #' @details To assess model fit, DIC, d-infinity and WAIC are used. DIC is based on the #' deviance statistic and penalizes for the complexity of a model with an effective #' number of parameters estimate pD (Spiegelhalter et al 2002). The d-infinity posterior #' predictive measure is an alternative diagnostic tool to DIC, where d-infinity=P+G. #' The G term decreases as goodness of fit increases, and P, the penalty term, inflates #' as the model becomes over-fit, so small values of both of these terms and, thus, small #' values of d-infinity are desirable (Gelfand and Ghosh 1998). WAIC is invariant to #' parametrization and is asymptotically equal to Bayesian cross-validation #' (Watanabe 2010). WAIC = -2 * (lppd - p_waic_2). Where lppd is the log pointwise #' predictive density and p_waic_2 is the estimated effective number of parameters #' based on the variance estimator from Vehtari et al. 2016. (p_waic_1 is the mean #' estimator). #' #' @return \code{diagnostics} returns a list containing the diagnostics requested and #' possibly the deviance and/or posterior predictive distribution objects. #' #' @author Samuel I. Berchuck #' #' @references Gelfand, A. E., & Ghosh, S. K. (1998). Model choice: a minimum posterior predictive loss approach. Biometrika, 1-11. #' @references Spiegelhalter, D. J., Best, N. G., Carlin, B. P., & Van Der Linde, A. (2002). Bayesian measures of model complexity and fit. Journal of the Royal Statistical Society: Series B (Statistical Methodology), 64(4), 583-639. #' @references Vehtari, A., Gelman, A., & Gabry, J. (2016). Practical Bayesian model evaluation using leave-one-out cross-validation and WAIC. Statistics and Computing, 1-20. #' @references Watanabe, S. (2010). Asymptotic equivalence of Bayes cross validation and widely applicable information criterion in singular learning theory. Journal of Machine Learning Research, 11(Dec), 3571-3594. #' #' @export diagnostics <- function(obj, diags = c("dic", "dinf", "waic"), keepDeviance = FALSE, keepPPD = FALSE) { ###Check Inputs if (missing(obj)) stop('"obj" is missing') if (!is.STBDwDM(obj)) stop('"obj" must be of class STBDwDM') if (sum((!diags %in% c("dic", "dinf", "waic"))) > 0) stop('"diags" must contain at least one of "dic", "dinf" or "waic"') if (!is.logical(keepDeviance)) stop('"keepDeviance" must be a logical') if (!is.logical(keepPPD)) stop('"keepPPD" must be a logical') ###Unload STBDwDM objects DatObj <- obj$datobj DatAug <- obj$dataug ###Check Inputs Again if ((DatObj$FamilyInd == 1) & (sum(diags %in% c("dic", "waic")) > 0)) stop ('"probit" model cannot be used with "dic" or "waic"') #probit model can't do likelihood diagnostics ###Set seed for reproducibility set.seed(54) ###Set data objects M <- DatObj$M Z <- DatObj$Z AdjacentEdgesBoolean <- DatObj$AdjacentEdgesBoolean W <- DatObj$W EyeM <- DatObj$EyeM Rho <- DatObj$Rho FamilyInd <- DatObj$FamilyInd Nu <- DatObj$Nu YObserved <- DatObj$YObserved WeightsInd <- DatObj$WeightsInd ###Construct parameter object Para <- list() Para$Mu <- obj$mu Para$Tau2 <- obj$tau2 Para$Alpha <- obj$alpha MuMean <- apply(obj$mu, 2, mean) Tau2Mean <- apply(obj$tau2, 2, mean) AlphaMean <- apply(obj$alpha, 2, mean) CovMean <- JointCovarianceCube(WAlphaCube(AlphaMean, Z, W, M, Nu, WeightsInd), Tau2Mean, EyeM, Rho, M, Nu) Para$MuMean <- MuMean Para$CovMean <- CovMean ###Set mcmc object NKeep <- dim(obj$phi)[1] ###Compute Log-likelihood using Rcpp function GetLogLik LogLik <- NULL if (("dic" %in% diags) | ("waic" %in% diags)) { ###Compute log-likelihood requireNamespace("mvtnorm", quietly = TRUE) #Requred for pmvnorm in Rcpp function if (DatObj$FamilyInd == 0) { NBelowCount <- c(0,0) YStarNonZero <- list() for (i in 1:DatObj$Nu) YStarNonZero[[i]] <- i DatAug$NBelowCount <- NBelowCount DatAug$YStarNonZero <- YStarNonZero } LogLik <- GetLogLik(DatObj, Para, DatAug, NKeep) } ###Compute DIC diagnostics dic <- NULL if ("dic" %in% diags) { ###Compute mean log-likelihood if (FamilyInd == 0) LogLikMean <- GetLogLikMean(DatObj, Para, DatAug) if (FamilyInd == 1) LogLikMean <- GetLogLikMean(DatObj, Para, DatAug) if (FamilyInd == 2) LogLikMean <- GetLogLikMean(DatObj, Para, DatAug) ###Calculate DIC objects DBar <- -2 * mean(LogLik) DHat <- -2 * LogLikMean pD <- DBar - DHat DIC <- DBar + pD dic <- list(dic = DIC, pd = pD) } ###Compute PPD diagnostics ppd <- PPD <- NULL if ("dinf" %in% diags) { ###Get PPD PPD <- SamplePPD(DatObj, Para, NKeep) ###Compute PPD Diagnostics PPDMean <- apply(PPD, 1, mean) PPDVar <- apply(PPD, 1, var) P <- sum(PPDVar) G <- sum( (PPDMean - YObserved) ^ 2) DInf <- G + P ppd <- list(p = P, g = G, dinf = DInf) } ###Compute WAIC diagnostics waic <- NULL if ("waic" %in% diags) { ###Get WAIC # The calculation of Waic! Returns lppd, p_waic_1, p_waic_2, and waic, which we define # as 2*(lppd - p_waic_2), as recommmended in BDA lppd <- log( apply(exp(LogLik), 2, mean) ) p_waic_1 <- 2 * (lppd - apply(LogLik, 2, mean) ) p_waic_2 <- apply(LogLik, 2, var) waic <- -2 * lppd + 2 * p_waic_2 waic <- list(waic = waic, p_waic = p_waic_2, lppd = lppd, p_waic_1 = p_waic_1) } ###Output diagnostics if (!keepDeviance & !keepPPD) diags <- list(dic = dic, dinf = ppd, waic = waic) if (!keepDeviance & keepPPD) diags <- list(dic = dic, dinf = ppd, waic = waic, PPD = t(PPD)) if (keepDeviance & !keepPPD) diags <- list(dic = dic, dinf = ppd, waic = waic, deviance = -2 * LogLik) if (keepDeviance & keepPPD) diags <- list(dic = dic, dinf = ppd, waic = waic, deviance = -2 * LogLik, PPD = t(PPD)) return(diags) }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/icon_sets.R \name{icon_sets} \alias{icon_sets} \title{Add colored icons to cells in a column} \usage{ icon_sets( data, icons = c("circle"), colors = c("#67a9cf", "#808080", "#ef8a62"), opacity = 1, icon_position = "right", icon_ref = NULL, icon_size = 16, icon_color_ref = NULL, number_fmt = NULL, animation = "1s ease" ) } \arguments{ \item{data}{Dataset containing at least one numeric column.} \item{icons}{A vector of three icons from the Font Awesome library (via shiny). Icons should be given in order from low values to high values. Default icons are circles.} \item{colors}{A vector of three colors to color the icons. Colors should be given in order from low values to high values. Default colors provided are blue-grey-orange: c("#67a9cf","#808080","#ef8a62"). Can use R's built-in colors or other color packages.} \item{opacity}{A value between 0 and 1 that adjusts the opacity in colors. A value of 0 is fully transparent, a value of 1 is fully opaque. Default is 1.} \item{icon_position}{Position of icon relative to numbers. Options are "left", "right", above", "below", or "over". Default is right.} \item{icon_ref}{Optionally assign icons from another column by providing the name of the column containing the icons in quotes. Only one icon can be provided per cell. Default is NULL.} \item{icon_size}{A value representing the size of the icon in px. Default is 16.} \item{icon_color_ref}{Optionally assign color to the icons from another column by providing the name of the column containing the icon colors in quotes. Only one color can be provided per cell. Default is NULL.} \item{number_fmt}{Optionally format numbers using formats from the scales package. Default is set to NULL.} \item{animation}{Control the duration and timing function of the animation when sorting/updating values shown on a page. See [CSS transitions](https://developer.mozilla.org/en-US/docs/Web/CSS/transition) for available timing functions and examples. Animation can be turned off by setting to "none". Default is "1s ease".} } \value{ a function that applies an icon to a column of numeric values. } \description{ The `icon_sets()` function conditionally adds an icon from the Font Awesome library (via shiny) to each cell of a column and assigns a color depending on their value in relation to other values in that particular column. Any number of icons and any number of colors can be used. The number of icons and colors determines how the values are shown from low values to high values. The icons can be positioned over, above, below, or to the right or left of the values. The size of the icon can be adjusted. Icons and icon colors can be provided via another reference column in the dataset which is useful when assigning icons/colors to particular occurrences. It should be placed within the cell argument in reactable::colDef. } \examples{ data <- MASS::Cars93[20:49, c("Make", "MPG.city", "MPG.highway")] ## By default, icon_sets() assigns blue circles to the lowest-third values, ## grey circles to the middle-third values, ## and orange to the top-third values reactable(data, defaultColDef = colDef(cell = icon_sets(data))) ## Assign custom colors reactable(data, defaultColDef = colDef(cell = icon_sets(data, colors = c("tomato", "grey", "dodgerblue")))) ## Assign icons from Font Awesome's icon library reactable(data, defaultColDef = colDef(cell = icon_sets(data, icons = c("arrow-down","minus","arrow-up")))) ## Use number_fmt to format numbers using the scales package car_prices <- MASS::Cars93[20:49, c("Make", "Price")] reactable(car_prices, defaultColDef = colDef(cell = icon_sets(car_prices, number_fmt = scales::dollar))) ## Position icons relative to the numbers. Options are to the left, right, above, below, or over. reactable(car_prices, defaultColDef = colDef(cell = icon_sets(car_prices, icon_position = "above"))) }
/man/icon_sets.Rd
no_license
Arrendi/reactablefmtr
R
false
true
3,994
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/icon_sets.R \name{icon_sets} \alias{icon_sets} \title{Add colored icons to cells in a column} \usage{ icon_sets( data, icons = c("circle"), colors = c("#67a9cf", "#808080", "#ef8a62"), opacity = 1, icon_position = "right", icon_ref = NULL, icon_size = 16, icon_color_ref = NULL, number_fmt = NULL, animation = "1s ease" ) } \arguments{ \item{data}{Dataset containing at least one numeric column.} \item{icons}{A vector of three icons from the Font Awesome library (via shiny). Icons should be given in order from low values to high values. Default icons are circles.} \item{colors}{A vector of three colors to color the icons. Colors should be given in order from low values to high values. Default colors provided are blue-grey-orange: c("#67a9cf","#808080","#ef8a62"). Can use R's built-in colors or other color packages.} \item{opacity}{A value between 0 and 1 that adjusts the opacity in colors. A value of 0 is fully transparent, a value of 1 is fully opaque. Default is 1.} \item{icon_position}{Position of icon relative to numbers. Options are "left", "right", above", "below", or "over". Default is right.} \item{icon_ref}{Optionally assign icons from another column by providing the name of the column containing the icons in quotes. Only one icon can be provided per cell. Default is NULL.} \item{icon_size}{A value representing the size of the icon in px. Default is 16.} \item{icon_color_ref}{Optionally assign color to the icons from another column by providing the name of the column containing the icon colors in quotes. Only one color can be provided per cell. Default is NULL.} \item{number_fmt}{Optionally format numbers using formats from the scales package. Default is set to NULL.} \item{animation}{Control the duration and timing function of the animation when sorting/updating values shown on a page. See [CSS transitions](https://developer.mozilla.org/en-US/docs/Web/CSS/transition) for available timing functions and examples. Animation can be turned off by setting to "none". Default is "1s ease".} } \value{ a function that applies an icon to a column of numeric values. } \description{ The `icon_sets()` function conditionally adds an icon from the Font Awesome library (via shiny) to each cell of a column and assigns a color depending on their value in relation to other values in that particular column. Any number of icons and any number of colors can be used. The number of icons and colors determines how the values are shown from low values to high values. The icons can be positioned over, above, below, or to the right or left of the values. The size of the icon can be adjusted. Icons and icon colors can be provided via another reference column in the dataset which is useful when assigning icons/colors to particular occurrences. It should be placed within the cell argument in reactable::colDef. } \examples{ data <- MASS::Cars93[20:49, c("Make", "MPG.city", "MPG.highway")] ## By default, icon_sets() assigns blue circles to the lowest-third values, ## grey circles to the middle-third values, ## and orange to the top-third values reactable(data, defaultColDef = colDef(cell = icon_sets(data))) ## Assign custom colors reactable(data, defaultColDef = colDef(cell = icon_sets(data, colors = c("tomato", "grey", "dodgerblue")))) ## Assign icons from Font Awesome's icon library reactable(data, defaultColDef = colDef(cell = icon_sets(data, icons = c("arrow-down","minus","arrow-up")))) ## Use number_fmt to format numbers using the scales package car_prices <- MASS::Cars93[20:49, c("Make", "Price")] reactable(car_prices, defaultColDef = colDef(cell = icon_sets(car_prices, number_fmt = scales::dollar))) ## Position icons relative to the numbers. Options are to the left, right, above, below, or over. reactable(car_prices, defaultColDef = colDef(cell = icon_sets(car_prices, icon_position = "above"))) }
## ---- out.width = "700px"------------------------------------------------ knitr::include_graphics("https://raw.githubusercontent.com/AlexiaJM/LEGIT/master/images/GxE_testing_strong.png") ## ---- out.width = "700px"------------------------------------------------ knitr::include_graphics("https://raw.githubusercontent.com/AlexiaJM/LEGIT/master/images/GxE_testing_weak.png") ## ------------------------------------------------------------------------ set.seed(777) library(LEGIT) ex_dia = example_with_crossover(N=250, c=0, coef_main = c(3,1,2), sigma=1, seed=7) GxE_test_BIC = GxE_interaction_test(data=ex_dia$data, genes=ex_dia$G, env=ex_dia$E, formula_noGxE = y ~ 1, crossover = c("min","max"), criterion="BIC") GxE_test_BIC$results ## ------------------------------------------------------------------------ GxE_test_BIC_ = GxE_interaction_test(data=ex_dia$data, genes=ex_dia$G, env=ex_dia$E, formula_noGxE = y ~ 1, crossover = c(0, 10), criterion="BIC") GxE_test_BIC_$results ## ----fig1, fig.height = 5, fig.width = 5--------------------------------- plot(GxE_test_BIC$fits$vantage_sensitivity_WEAK, xlim=c(0,10), ylim=c(3,13),cex.leg=1.4, cex.axis=1.5, cex.lab=1.5) ## ------------------------------------------------------------------------ ex_dia_s = example_with_crossover(N=250, c=0, coef_main = c(3,0,2), sigma=1, seed=7) GxE_test_BIC = GxE_interaction_test(data=ex_dia_s$data, genes=ex_dia_s$G, env=ex_dia_s$E, formula_noGxE = y ~ 1, crossover = c("min","max"), criterion="BIC") GxE_test_BIC$results ## ----fig2, fig.height = 5, fig.width = 5--------------------------------- plot(GxE_test_BIC$fits$vantage_sensitivity_STRONG, xlim=c(0,10), ylim=c(3,13),cex.leg=1.4, cex.axis=1.5, cex.lab=1.5) ## ------------------------------------------------------------------------ ex_ds = example_with_crossover(N=250, c=5, coef_main = c(3+5,1,2), sigma=1, seed=7) GxE_test_BIC = GxE_interaction_test(data=ex_ds$data, genes=ex_ds$G, env=ex_ds$E, formula_noGxE = y ~ 1, crossover = c("min","max"), criterion="BIC") GxE_test_BIC$results ## ----fig3, fig.height = 5, fig.width = 5--------------------------------- plot(GxE_test_BIC$fits$diff_suscept_WEAK, xlim=c(0,10), ylim=c(3,13),cex.leg=1.4, cex.axis=1.5, cex.lab=1.5) ## ------------------------------------------------------------------------ ex_ds_s = example_with_crossover(N=250, c=5, coef_main = c(3+5,0,2), sigma=1, seed=7) GxE_test_BIC = GxE_interaction_test(data=ex_ds_s$data, genes=ex_ds_s$G, env=ex_ds_s$E, formula_noGxE = y ~ 1, crossover = c("min","max"), criterion="BIC") GxE_test_BIC$results ## ----fig4, fig.height = 5, fig.width = 5--------------------------------- plot(GxE_test_BIC$fits$diff_suscept_STRONG, xlim=c(0,10), ylim=c(3,13),cex.leg=1.4, cex.axis=1.5, cex.lab=1.5)
/data/genthat_extracted_code/LEGIT/vignettes/GxE_testing.R
no_license
surayaaramli/typeRrh
R
false
false
2,803
r
## ---- out.width = "700px"------------------------------------------------ knitr::include_graphics("https://raw.githubusercontent.com/AlexiaJM/LEGIT/master/images/GxE_testing_strong.png") ## ---- out.width = "700px"------------------------------------------------ knitr::include_graphics("https://raw.githubusercontent.com/AlexiaJM/LEGIT/master/images/GxE_testing_weak.png") ## ------------------------------------------------------------------------ set.seed(777) library(LEGIT) ex_dia = example_with_crossover(N=250, c=0, coef_main = c(3,1,2), sigma=1, seed=7) GxE_test_BIC = GxE_interaction_test(data=ex_dia$data, genes=ex_dia$G, env=ex_dia$E, formula_noGxE = y ~ 1, crossover = c("min","max"), criterion="BIC") GxE_test_BIC$results ## ------------------------------------------------------------------------ GxE_test_BIC_ = GxE_interaction_test(data=ex_dia$data, genes=ex_dia$G, env=ex_dia$E, formula_noGxE = y ~ 1, crossover = c(0, 10), criterion="BIC") GxE_test_BIC_$results ## ----fig1, fig.height = 5, fig.width = 5--------------------------------- plot(GxE_test_BIC$fits$vantage_sensitivity_WEAK, xlim=c(0,10), ylim=c(3,13),cex.leg=1.4, cex.axis=1.5, cex.lab=1.5) ## ------------------------------------------------------------------------ ex_dia_s = example_with_crossover(N=250, c=0, coef_main = c(3,0,2), sigma=1, seed=7) GxE_test_BIC = GxE_interaction_test(data=ex_dia_s$data, genes=ex_dia_s$G, env=ex_dia_s$E, formula_noGxE = y ~ 1, crossover = c("min","max"), criterion="BIC") GxE_test_BIC$results ## ----fig2, fig.height = 5, fig.width = 5--------------------------------- plot(GxE_test_BIC$fits$vantage_sensitivity_STRONG, xlim=c(0,10), ylim=c(3,13),cex.leg=1.4, cex.axis=1.5, cex.lab=1.5) ## ------------------------------------------------------------------------ ex_ds = example_with_crossover(N=250, c=5, coef_main = c(3+5,1,2), sigma=1, seed=7) GxE_test_BIC = GxE_interaction_test(data=ex_ds$data, genes=ex_ds$G, env=ex_ds$E, formula_noGxE = y ~ 1, crossover = c("min","max"), criterion="BIC") GxE_test_BIC$results ## ----fig3, fig.height = 5, fig.width = 5--------------------------------- plot(GxE_test_BIC$fits$diff_suscept_WEAK, xlim=c(0,10), ylim=c(3,13),cex.leg=1.4, cex.axis=1.5, cex.lab=1.5) ## ------------------------------------------------------------------------ ex_ds_s = example_with_crossover(N=250, c=5, coef_main = c(3+5,0,2), sigma=1, seed=7) GxE_test_BIC = GxE_interaction_test(data=ex_ds_s$data, genes=ex_ds_s$G, env=ex_ds_s$E, formula_noGxE = y ~ 1, crossover = c("min","max"), criterion="BIC") GxE_test_BIC$results ## ----fig4, fig.height = 5, fig.width = 5--------------------------------- plot(GxE_test_BIC$fits$diff_suscept_STRONG, xlim=c(0,10), ylim=c(3,13),cex.leg=1.4, cex.axis=1.5, cex.lab=1.5)
library(qqman) library(ggplot2) library(grid) #qq plot bayes factors with multiple window sizes: setLayout <- function(xdim=1,ydim=1){ initmat <- c(0,0,2,0,1,3) imat3 <- c(0,1,1,1,1,1,1,1) imat2 <- c(0,1,1,1,1,0,0,0) imat1 <- imat2 row1mat <- c(0,4,4,4,4,0,0,0) row2mat <- c(0,2,2,2,2,0,0,0) row3mat <- c(0,1,1,1,1,3,3,3) layoutmat <- matrix(rep(0,length(row1mat)*xdim*3*ydim),ncol=xdim*length(row1mat)) for (i in 1:ydim){ r1=i*3 - 2 r2=i*3 - 1 r3=i*3 for (j in 1:xdim) { curx = j-1 cury = i-1 curpos <- curx + cury*xdim cols=seq(length(row1mat)*j-(length(row1mat)-1),length(row1mat)*j) layoutmat[r1,cols] <- row1mat + (curpos * imat1*4) layoutmat[r2,cols] <- row2mat + (curpos * imat2*4) layoutmat[r3,cols] <- row3mat + (curpos * imat3*4) } } mywidths <- rep(c(1/100,20/100,20/100,20/100,20/100,19/3/100,19/3/100,19/3/100),xdim*length(row1mat)) myheights <- rep(c(1.5/10,1/10,7.5/10),ydim) layout(layoutmat,widths=mywidths,heights=myheights) } scatterBarLay <- function(x, dcol="blue", fit=NA, lhist=20, num.dnorm=5*lhist,bigmain="", ...){ ## check input stopifnot(ncol(x)==2) ## set up layout and graphical parameters ospc <- 0.5 # outer space pext <- 4 # par extension down and to the left bspc <- 1 # space between scatter plot and bar plots #par. <- par(mar=c(pext, pext, bspc, bspc), # oma=rep(ospc, 4)) # plot parameters ## scatter plot par(mar = c(5.1,4.1,0,0)) plot(x, xlim=range(x[,1]), ylim=range(x[,2]), ...) if (!is.na(fit)) { abline(fit) } ## 3) determine barplot and height parameter ## histogram (for barplot-ting the density) xhist <- hist(x[,1], plot=FALSE, breaks=seq(from=min(x[,1]), to=max(x[,1]), length.out=lhist)) yhist <- hist(x[,2], plot=FALSE, breaks=seq(from=min(x[,2]), to=max(x[,2]), length.out=lhist)) # note: this uses probability=TRUE ## determine the plot range and all the things needed for the barplots and lines # xx <- seq(min(x[,1]), max(x[,1]), length.out=num.dnorm) # evaluation points for the overlaid density # xy <- dnorm(xx, mean=mean(x[,1]), sd=sd(x[,1])) # density points # yx <- seq(min(x[,2]), max(x[,2]), length.out=num.dnorm) # yy <- dnorm(yx, mean=mean(x[,2]), sd=sd(x[,2])) ## barplot and line for x (top) #par(mar=c(0, pext, 0, 0)) par(mar = c(0,4.1,0,2.1)) barplot(xhist$density, axes=FALSE, ylim=c(0, max(xhist$density)), space=0) # barplot #lines(seq(from=0, to=lhist-1, length.out=num.dnorm), xy, col=dcol) # line ## barplot and line for y (right) #par(mar=c(pext, 0, 0, 0)) par(mar = c(5.1,0,0,2.1)) barplot(yhist$density, axes=FALSE, xlim=c(0, max(yhist$density)), space=0, horiz=TRUE) # barplot #lines(yy, seq(from=0, to=lhist-1, length.out=num.dnorm), col=dcol) # line ## restore parameters #add title: #mtext(bigmain, side=3, outer=TRUE, line=-3) par(mar = c(0,0,0,0)) plot(c(0, 1), c(0, 1), ann = F, bty = 'n', type = 'n', xaxt = 'n', yaxt = 'n') text(x = 0.5, y = 0.5, bigmain, cex = 2, col = "black") par(mar = c(5.1,4.1,4.1,2.1)) #par(par.) } getQuantileMini <- function(x,p1=0,p2=1){ q1 = quantile(x,p1) q2 = quantile(x,p2) out = x[x>=q1&x<=q2] return(out) } delta = function(par,dataVector){ Nf = length(dataVector) Nlb = as.integer(0.10*Nf) Nub = as.integer(0.80*Nf) quants = (Nlb:Nub)/Nf - 1/(2*Nf) myDist = par[1]*qgamma(quants,par[2],par[3]) SS = sum((myDist-dataVector[Nlb:Nub])^2) # the log helps with convergence -- see "tol" cat(log(SS),"\t",par[1],"\t",par[2],"\t",par[3],"\n") # this lets you watch "progress", each iteration there are several evaluations log(SS) } qqplot_fst_lfmm_xtx_bf <- function(inpath_fst,inpath_xtx,inpath_bf,inpath_sweed,inpref_lfmm,insuf_lfmm,outpref,npops,wins,zs,bfs,fsts, inpath_xtx_sim="None", inpath_bf_sim="None",inpath_fst_sim="None") { #here, inpath is the path to a snptable with bf values, and outpref is the prefix for all plots #npops is the number of populations (used for computing degrees of freedom) #wins is a vector of the sliding window sizes to be used for plotting tifoutpath = paste(outpref,"_walvee_fst_gamma.tif",sep="") tiff(tifoutpath, width=2*5*600, height=1*5*600, res=600, compression="lzw") #width=length(wins)*4*600, height=2*4*600, res=600, compression="lzw") #par(mfrow=c(2,length(wins))) #par(mfrow=c(1,2)) #use grid viewports: #grid.newpage() nrows = 2 ncols = 1 setLayout(nrows,ncols) #pushViewport(viewport(layout=grid.layout(nrows,ncols))) #par(mar=c(3,2,4,1)) all_labels = LETTERS labels_index = 1 tlin=2 label_cex = 1.5 print("done1") ####### #fst: #data <- readRDS(inpath_fst) mydata <- readRDS("/home/jbaldwin/new_home/wild_shrimp_data/for_shrimp_paper_1/bwa_alignments/snp_tables/deduped/new_downsampled/walvee_and_ltervt011/python_fst/only-PASS-Q30-SNPs-cov_v2_sorted_walvee_fst_cens_multiwin_fst/only-PASS-Q30-SNPs-cov_v2_sorted_walvee_fst_cens_multiwin_snpnames_chromnums.RDS") myPar <- scan("/home/jbaldwin/new_home/wild_shrimp_data/for_shrimp_paper_1/bwa_alignments/snp_tables/deduped/new_downsampled/full_fst_dsbig_fused/bayenv_5repavg/v4_full_data_fixedinput_splitout/hill_climber_outs/fst/hill70_myPar_walvee_gamma.txt") myxtx <- mydata$fst_0_1 myxtx70 <- getQuantileMini(myxtx,0.1,0.8) smyxtx <- sort(myxtx) Nf = length(myxtx) N=length(myxtx) theory=qgamma((1:N)/N - 1/(2*Nf),myPar[1],myPar[2]) exp=smyxtx N10 = round(N * .1) N80 = round(N*.8) theory70 = theory[N10:N80] exp70 = exp[N10:N80] fit <- lm(exp70~theory70) scatterBarLay(cbind(theory,exp),bigmain=expression(atop('F'['ST']*' Q-Q plot','for WAL vs. EE')),xlab=expression('Gamma distribution fit to F'['ST']),ylab=expression('F'['ST']),pch=20,fit=fit) #plot(theory,exp,main=expression('F'['ST']*' Q-Q plot'),xlab=expression('Gamma distribution fit to F'['ST']),ylab=expression('F'['ST'])) #abline(fit) #grid() #rug(theory,side=1) #rug(exp,side=2) labels_index = labels_index + 1 #qq <- qqplot(theorybfs,mydat,main=paste("FST: ",mywin,"-snp window",sep=""),xlab="uniform p-value FST",ylab="experimental FST", # pch=".") #grid() #testq <- getQuantileMini(mydat,0.1,0.8) #theoryq <- getQuantileMini(theorybfs,0.1,0.8) #testq <- na.omit(testq) #theoryq <- na.omit(theoryq) #if (length(testq) < length(theoryq)) {theoryq <- theoryq[1:length(testq)]} #if (length(theoryq) < length(testq)) {testq <- testq[1:length(theoryq)]} #fit <- lm(testq~theoryq,data=qq) #abline(fit) ##lines(rbind(c(-10000,-10000),c(10000,10000))) print("done4") ####### #xtx: mydata <- readRDS("/home/jbaldwin/new_home/wild_shrimp_data/for_shrimp_paper_1/bwa_alignments/snp_tables/deduped/new_downsampled/walvee_and_ltervt011/python_fst/only-PASS-Q30-SNPs-cov_v2_sorted_walvee_fst_cens_multiwin_fst/only-PASS-Q30-SNPs-cov_v2_sorted_walvee_fst_cens_multiwin_snpnames_chromnums.RDS") myPar <- scan("/home/jbaldwin/new_home/wild_shrimp_data/for_shrimp_paper_1/bwa_alignments/snp_tables/deduped/new_downsampled/full_fst_dsbig_fused/bayenv_5repavg/v4_full_data_fixedinput_splitout/hill_climber_outs/fst/hill70_myPar_walvee_25win_gamma.txt") myxtx <- mydata$fst_0_1_win25 myxtx70 <- getQuantileMini(myxtx,0.1,0.8) smyxtx <- sort(myxtx) Nf = length(myxtx) N=length(myxtx) theory=qgamma((1:N)/N - 1/(2*Nf),myPar[1],myPar[2]) exp=smyxtx N10 = round(N * .1) N80 = round(N*.8) theory70 = theory[N10:N80] exp70 = exp[N10:N80] fit <- lm(exp70~theory70) scatterBarLay(cbind(theory,exp),bigmain=expression(atop('F'['ST']*' Q-Q plot','for WAL vs. EE (25-SNP window)')),xlab=expression('Gamma distribution fit to F'['ST']),ylab=expression('F'['ST']),pch=20,fit=fit) #plot(theory,exp,main=expression('X'^'T'*'X Q-Q plot'),xlab=expression('Gamma distribution fit to X'^'T'*'X'),ylab=expression('X'^'T'*'X')) #abline(fit) #grid() #rug(theory,side=1) #rug(exp,side=2) #mtext(all_labels[labels_index],3, line=tlin, adj=0, cex=label_cex) #labels_index = labels_index + 1 #print("done5") dev.off() } myinpref_lfmm <- "/home/jbaldwin/new_home/wild_shrimp_data/for_shrimp_paper_1/bwa_alignments/snp_tables/deduped/new_downsampled/full_fst_dsbig_fused/lfmm/lfmm_replicated_9anc/dsbig_snp_freqmat_fused_cens.txt.K9.s" myinsuf_lfmm <- ".9.zscoreavg.withchroms.withhead.sorted.multiwin.RDS" #myin_bf <- "/home/jbaldwin/new_home/wild_shrimp_data/for_shrimp_paper_1/bwa_alignments/snp_tables/deduped/new_downsampled/full_fst_dsbig_fused/bayenv/v2_complete/bf_multiwin/dsbig_fused_partial_xtx_and_bf_withchroms_2sorted_cens_withhead_uniq_multiwin_plusxtx.RDS" myin_fst <- "/home/jbaldwin/new_home/wild_shrimp_data/for_shrimp_paper_1/bwa_alignments/snp_tables/deduped/new_downsampled/walvee_and_ltervt011/python_fst/only-PASS-Q30-SNPs-cov_v2_sorted_walvee_fst_cens_multiwin_fst/only-PASS-Q30-SNPs-cov_v2_sorted_walvee_fst_cens_multiwin_snpnames_chromnums.RDS" myin_xtx <- "dsbig_fused_xtx_and_bf_5combo_withchroms_2sorted_cens_withhead_uniq_multiwin_plusxtx.RDS" myin_bf <- myin_xtx myin_sweed <- "/home/jbaldwin/new_home/wild_shrimp_data/for_shrimp_paper_1/sweed_out/v2_complete/out_combo/sweed_full_allchroms_sorted.RDS" myin_xtx_sim <- "simulated_mean_XtX_out.normalized_transposed_tank_info_11pop.RDS" myin_bf_sim <- "simulated_mean_bf_environ.normalized_transposed_tank_info_11pop.RDS" myoutpref <- "qq_plot_allstats_withlabels_5avg" mynpops <- 11 mywins <- c(1) myzs <- c(8) mybfs <- c(23) myfsts <- c("mean") #function(inpath_fst,inpath_xtx,inpath_bf,inpref_lfmm,insuf_lfmm,outpref,npops,wins,zs,bfs,fsts) qqplot_fst_lfmm_xtx_bf(myin_fst,myin_xtx,myin_bf,myin_sweed,myinpref_lfmm,myinsuf_lfmm, myoutpref,mynpops,mywins, myzs,mybfs,myfsts, inpath_xtx_sim=myin_xtx_sim,inpath_bf_sim=myin_bf_sim) # a<- rnorm(1000,0,1) # b<-rnorm(1000,0,2) # c<-rnorm(1000,0,3) # d<-rnorm(1000,0,4) # #quartz(w=6,h=8) # par(mfrow=c(2,2)) # #par(mai=c(1,0,1,1)) # par(mar=c(3,2,4,1)) # #par(plt=c(1.1,1.1,1.1,1.1)) # tlin=2 # hist(a) # mtext("A",3, line=tlin, adj=0, cex=2) # hist(b) # mtext("B",3, line=tlin, adj=0, cex=2) # hist(c) # mtext("C",3, line=tlin, adj=0, cex=2) # hist(d) # mtext("D",3, line=tlin, adj=0, cex=2)
/Baldwin-Brown_2017_Scripts/main_scripts/downstream_analysis_dir/qq_plot_walvee_gamma.R
no_license
jgbaldwinbrown/jgbutils
R
false
false
10,431
r
library(qqman) library(ggplot2) library(grid) #qq plot bayes factors with multiple window sizes: setLayout <- function(xdim=1,ydim=1){ initmat <- c(0,0,2,0,1,3) imat3 <- c(0,1,1,1,1,1,1,1) imat2 <- c(0,1,1,1,1,0,0,0) imat1 <- imat2 row1mat <- c(0,4,4,4,4,0,0,0) row2mat <- c(0,2,2,2,2,0,0,0) row3mat <- c(0,1,1,1,1,3,3,3) layoutmat <- matrix(rep(0,length(row1mat)*xdim*3*ydim),ncol=xdim*length(row1mat)) for (i in 1:ydim){ r1=i*3 - 2 r2=i*3 - 1 r3=i*3 for (j in 1:xdim) { curx = j-1 cury = i-1 curpos <- curx + cury*xdim cols=seq(length(row1mat)*j-(length(row1mat)-1),length(row1mat)*j) layoutmat[r1,cols] <- row1mat + (curpos * imat1*4) layoutmat[r2,cols] <- row2mat + (curpos * imat2*4) layoutmat[r3,cols] <- row3mat + (curpos * imat3*4) } } mywidths <- rep(c(1/100,20/100,20/100,20/100,20/100,19/3/100,19/3/100,19/3/100),xdim*length(row1mat)) myheights <- rep(c(1.5/10,1/10,7.5/10),ydim) layout(layoutmat,widths=mywidths,heights=myheights) } scatterBarLay <- function(x, dcol="blue", fit=NA, lhist=20, num.dnorm=5*lhist,bigmain="", ...){ ## check input stopifnot(ncol(x)==2) ## set up layout and graphical parameters ospc <- 0.5 # outer space pext <- 4 # par extension down and to the left bspc <- 1 # space between scatter plot and bar plots #par. <- par(mar=c(pext, pext, bspc, bspc), # oma=rep(ospc, 4)) # plot parameters ## scatter plot par(mar = c(5.1,4.1,0,0)) plot(x, xlim=range(x[,1]), ylim=range(x[,2]), ...) if (!is.na(fit)) { abline(fit) } ## 3) determine barplot and height parameter ## histogram (for barplot-ting the density) xhist <- hist(x[,1], plot=FALSE, breaks=seq(from=min(x[,1]), to=max(x[,1]), length.out=lhist)) yhist <- hist(x[,2], plot=FALSE, breaks=seq(from=min(x[,2]), to=max(x[,2]), length.out=lhist)) # note: this uses probability=TRUE ## determine the plot range and all the things needed for the barplots and lines # xx <- seq(min(x[,1]), max(x[,1]), length.out=num.dnorm) # evaluation points for the overlaid density # xy <- dnorm(xx, mean=mean(x[,1]), sd=sd(x[,1])) # density points # yx <- seq(min(x[,2]), max(x[,2]), length.out=num.dnorm) # yy <- dnorm(yx, mean=mean(x[,2]), sd=sd(x[,2])) ## barplot and line for x (top) #par(mar=c(0, pext, 0, 0)) par(mar = c(0,4.1,0,2.1)) barplot(xhist$density, axes=FALSE, ylim=c(0, max(xhist$density)), space=0) # barplot #lines(seq(from=0, to=lhist-1, length.out=num.dnorm), xy, col=dcol) # line ## barplot and line for y (right) #par(mar=c(pext, 0, 0, 0)) par(mar = c(5.1,0,0,2.1)) barplot(yhist$density, axes=FALSE, xlim=c(0, max(yhist$density)), space=0, horiz=TRUE) # barplot #lines(yy, seq(from=0, to=lhist-1, length.out=num.dnorm), col=dcol) # line ## restore parameters #add title: #mtext(bigmain, side=3, outer=TRUE, line=-3) par(mar = c(0,0,0,0)) plot(c(0, 1), c(0, 1), ann = F, bty = 'n', type = 'n', xaxt = 'n', yaxt = 'n') text(x = 0.5, y = 0.5, bigmain, cex = 2, col = "black") par(mar = c(5.1,4.1,4.1,2.1)) #par(par.) } getQuantileMini <- function(x,p1=0,p2=1){ q1 = quantile(x,p1) q2 = quantile(x,p2) out = x[x>=q1&x<=q2] return(out) } delta = function(par,dataVector){ Nf = length(dataVector) Nlb = as.integer(0.10*Nf) Nub = as.integer(0.80*Nf) quants = (Nlb:Nub)/Nf - 1/(2*Nf) myDist = par[1]*qgamma(quants,par[2],par[3]) SS = sum((myDist-dataVector[Nlb:Nub])^2) # the log helps with convergence -- see "tol" cat(log(SS),"\t",par[1],"\t",par[2],"\t",par[3],"\n") # this lets you watch "progress", each iteration there are several evaluations log(SS) } qqplot_fst_lfmm_xtx_bf <- function(inpath_fst,inpath_xtx,inpath_bf,inpath_sweed,inpref_lfmm,insuf_lfmm,outpref,npops,wins,zs,bfs,fsts, inpath_xtx_sim="None", inpath_bf_sim="None",inpath_fst_sim="None") { #here, inpath is the path to a snptable with bf values, and outpref is the prefix for all plots #npops is the number of populations (used for computing degrees of freedom) #wins is a vector of the sliding window sizes to be used for plotting tifoutpath = paste(outpref,"_walvee_fst_gamma.tif",sep="") tiff(tifoutpath, width=2*5*600, height=1*5*600, res=600, compression="lzw") #width=length(wins)*4*600, height=2*4*600, res=600, compression="lzw") #par(mfrow=c(2,length(wins))) #par(mfrow=c(1,2)) #use grid viewports: #grid.newpage() nrows = 2 ncols = 1 setLayout(nrows,ncols) #pushViewport(viewport(layout=grid.layout(nrows,ncols))) #par(mar=c(3,2,4,1)) all_labels = LETTERS labels_index = 1 tlin=2 label_cex = 1.5 print("done1") ####### #fst: #data <- readRDS(inpath_fst) mydata <- readRDS("/home/jbaldwin/new_home/wild_shrimp_data/for_shrimp_paper_1/bwa_alignments/snp_tables/deduped/new_downsampled/walvee_and_ltervt011/python_fst/only-PASS-Q30-SNPs-cov_v2_sorted_walvee_fst_cens_multiwin_fst/only-PASS-Q30-SNPs-cov_v2_sorted_walvee_fst_cens_multiwin_snpnames_chromnums.RDS") myPar <- scan("/home/jbaldwin/new_home/wild_shrimp_data/for_shrimp_paper_1/bwa_alignments/snp_tables/deduped/new_downsampled/full_fst_dsbig_fused/bayenv_5repavg/v4_full_data_fixedinput_splitout/hill_climber_outs/fst/hill70_myPar_walvee_gamma.txt") myxtx <- mydata$fst_0_1 myxtx70 <- getQuantileMini(myxtx,0.1,0.8) smyxtx <- sort(myxtx) Nf = length(myxtx) N=length(myxtx) theory=qgamma((1:N)/N - 1/(2*Nf),myPar[1],myPar[2]) exp=smyxtx N10 = round(N * .1) N80 = round(N*.8) theory70 = theory[N10:N80] exp70 = exp[N10:N80] fit <- lm(exp70~theory70) scatterBarLay(cbind(theory,exp),bigmain=expression(atop('F'['ST']*' Q-Q plot','for WAL vs. EE')),xlab=expression('Gamma distribution fit to F'['ST']),ylab=expression('F'['ST']),pch=20,fit=fit) #plot(theory,exp,main=expression('F'['ST']*' Q-Q plot'),xlab=expression('Gamma distribution fit to F'['ST']),ylab=expression('F'['ST'])) #abline(fit) #grid() #rug(theory,side=1) #rug(exp,side=2) labels_index = labels_index + 1 #qq <- qqplot(theorybfs,mydat,main=paste("FST: ",mywin,"-snp window",sep=""),xlab="uniform p-value FST",ylab="experimental FST", # pch=".") #grid() #testq <- getQuantileMini(mydat,0.1,0.8) #theoryq <- getQuantileMini(theorybfs,0.1,0.8) #testq <- na.omit(testq) #theoryq <- na.omit(theoryq) #if (length(testq) < length(theoryq)) {theoryq <- theoryq[1:length(testq)]} #if (length(theoryq) < length(testq)) {testq <- testq[1:length(theoryq)]} #fit <- lm(testq~theoryq,data=qq) #abline(fit) ##lines(rbind(c(-10000,-10000),c(10000,10000))) print("done4") ####### #xtx: mydata <- readRDS("/home/jbaldwin/new_home/wild_shrimp_data/for_shrimp_paper_1/bwa_alignments/snp_tables/deduped/new_downsampled/walvee_and_ltervt011/python_fst/only-PASS-Q30-SNPs-cov_v2_sorted_walvee_fst_cens_multiwin_fst/only-PASS-Q30-SNPs-cov_v2_sorted_walvee_fst_cens_multiwin_snpnames_chromnums.RDS") myPar <- scan("/home/jbaldwin/new_home/wild_shrimp_data/for_shrimp_paper_1/bwa_alignments/snp_tables/deduped/new_downsampled/full_fst_dsbig_fused/bayenv_5repavg/v4_full_data_fixedinput_splitout/hill_climber_outs/fst/hill70_myPar_walvee_25win_gamma.txt") myxtx <- mydata$fst_0_1_win25 myxtx70 <- getQuantileMini(myxtx,0.1,0.8) smyxtx <- sort(myxtx) Nf = length(myxtx) N=length(myxtx) theory=qgamma((1:N)/N - 1/(2*Nf),myPar[1],myPar[2]) exp=smyxtx N10 = round(N * .1) N80 = round(N*.8) theory70 = theory[N10:N80] exp70 = exp[N10:N80] fit <- lm(exp70~theory70) scatterBarLay(cbind(theory,exp),bigmain=expression(atop('F'['ST']*' Q-Q plot','for WAL vs. EE (25-SNP window)')),xlab=expression('Gamma distribution fit to F'['ST']),ylab=expression('F'['ST']),pch=20,fit=fit) #plot(theory,exp,main=expression('X'^'T'*'X Q-Q plot'),xlab=expression('Gamma distribution fit to X'^'T'*'X'),ylab=expression('X'^'T'*'X')) #abline(fit) #grid() #rug(theory,side=1) #rug(exp,side=2) #mtext(all_labels[labels_index],3, line=tlin, adj=0, cex=label_cex) #labels_index = labels_index + 1 #print("done5") dev.off() } myinpref_lfmm <- "/home/jbaldwin/new_home/wild_shrimp_data/for_shrimp_paper_1/bwa_alignments/snp_tables/deduped/new_downsampled/full_fst_dsbig_fused/lfmm/lfmm_replicated_9anc/dsbig_snp_freqmat_fused_cens.txt.K9.s" myinsuf_lfmm <- ".9.zscoreavg.withchroms.withhead.sorted.multiwin.RDS" #myin_bf <- "/home/jbaldwin/new_home/wild_shrimp_data/for_shrimp_paper_1/bwa_alignments/snp_tables/deduped/new_downsampled/full_fst_dsbig_fused/bayenv/v2_complete/bf_multiwin/dsbig_fused_partial_xtx_and_bf_withchroms_2sorted_cens_withhead_uniq_multiwin_plusxtx.RDS" myin_fst <- "/home/jbaldwin/new_home/wild_shrimp_data/for_shrimp_paper_1/bwa_alignments/snp_tables/deduped/new_downsampled/walvee_and_ltervt011/python_fst/only-PASS-Q30-SNPs-cov_v2_sorted_walvee_fst_cens_multiwin_fst/only-PASS-Q30-SNPs-cov_v2_sorted_walvee_fst_cens_multiwin_snpnames_chromnums.RDS" myin_xtx <- "dsbig_fused_xtx_and_bf_5combo_withchroms_2sorted_cens_withhead_uniq_multiwin_plusxtx.RDS" myin_bf <- myin_xtx myin_sweed <- "/home/jbaldwin/new_home/wild_shrimp_data/for_shrimp_paper_1/sweed_out/v2_complete/out_combo/sweed_full_allchroms_sorted.RDS" myin_xtx_sim <- "simulated_mean_XtX_out.normalized_transposed_tank_info_11pop.RDS" myin_bf_sim <- "simulated_mean_bf_environ.normalized_transposed_tank_info_11pop.RDS" myoutpref <- "qq_plot_allstats_withlabels_5avg" mynpops <- 11 mywins <- c(1) myzs <- c(8) mybfs <- c(23) myfsts <- c("mean") #function(inpath_fst,inpath_xtx,inpath_bf,inpref_lfmm,insuf_lfmm,outpref,npops,wins,zs,bfs,fsts) qqplot_fst_lfmm_xtx_bf(myin_fst,myin_xtx,myin_bf,myin_sweed,myinpref_lfmm,myinsuf_lfmm, myoutpref,mynpops,mywins, myzs,mybfs,myfsts, inpath_xtx_sim=myin_xtx_sim,inpath_bf_sim=myin_bf_sim) # a<- rnorm(1000,0,1) # b<-rnorm(1000,0,2) # c<-rnorm(1000,0,3) # d<-rnorm(1000,0,4) # #quartz(w=6,h=8) # par(mfrow=c(2,2)) # #par(mai=c(1,0,1,1)) # par(mar=c(3,2,4,1)) # #par(plt=c(1.1,1.1,1.1,1.1)) # tlin=2 # hist(a) # mtext("A",3, line=tlin, adj=0, cex=2) # hist(b) # mtext("B",3, line=tlin, adj=0, cex=2) # hist(c) # mtext("C",3, line=tlin, adj=0, cex=2) # hist(d) # mtext("D",3, line=tlin, adj=0, cex=2)
% Generated by roxygen2 (4.1.1): do not edit by hand % Please edit documentation in R/weatherawhere.R \docType{package} \name{httr} \alias{httr} \alias{httr-package} \title{\pkg{weatherawhere} is a package for gathering accurate & agriculture-specific weather data.} \description{ \code{weatherawhere} is organised around the two most common API functions to allow access to aWhere Weather Terrain™ data R API: \code{\link{get_token}}, \code{\link{create_query}}, \code{\link{send_query}}. } \details{ Requests can be modified by various config options like \code{\link{get_attribute}}, \code{\link{set_date}}, \code{\link{set_gdd}}, \code{\link{set_grid}}, and \code{\link{set_gdd}}. }
/man/httr.Rd
no_license
yizhexu/weatherawhere
R
false
false
690
rd
% Generated by roxygen2 (4.1.1): do not edit by hand % Please edit documentation in R/weatherawhere.R \docType{package} \name{httr} \alias{httr} \alias{httr-package} \title{\pkg{weatherawhere} is a package for gathering accurate & agriculture-specific weather data.} \description{ \code{weatherawhere} is organised around the two most common API functions to allow access to aWhere Weather Terrain™ data R API: \code{\link{get_token}}, \code{\link{create_query}}, \code{\link{send_query}}. } \details{ Requests can be modified by various config options like \code{\link{get_attribute}}, \code{\link{set_date}}, \code{\link{set_gdd}}, \code{\link{set_grid}}, and \code{\link{set_gdd}}. }
# Simulation Design to compare VEGAS and SKAT
/sim.design.R
no_license
TanushreeHaldar/GeneBasedAnalysis
R
false
false
46
r
# Simulation Design to compare VEGAS and SKAT
library(regtools) # Import data as usual... train_values <- read.csv( "../data/Richters_Predictor_Modeling_Earthquake_Damage_-_Train_Values.csv" ) train_labels <- read.csv( "../data/Richters_Predictor_Modeling_Earthquake_Damage_-_Train_Labels.csv" ) # In order for the data to work with the regtools library, we must convert all columns # to numeric or int variables. So, we factor the target variable and then parse it as numeric. full_data = train_values full_data$damage_grade = train_labels$damage_grade full_data$damage_grade = factor(full_data$damage_grade, levels = c(1, 2, 3), labels=c("low damage", "medium damage", "almost destructed")) full_data$damage_grade = as.numeric(full_data$damage_grade) - 1 head(full_data) # We do the same with all non-numeric variables full_data[sapply(full_data, is.character)] = lapply(full_data[sapply(full_data, is.character)], as.factor) full_data[sapply(full_data, is.factor)] = lapply(full_data[sapply(full_data, is.factor)], function(x) {as.numeric(x)}) head(full_data) # Let's start with the classifiers. #### One Vs All #### # For it to work, we must set the target column as the last one in the dataframe, # so we can just make a subset of the dataset in the order we need. Since the target # was already the last one, its index is 40, but when making that selection it is # necessary. ovatrn = ovalogtrn(3, full_data[ ,c(2, 5:39, 40)]) # Finally, to predict, we get rid of the target column and use the # predict helper function. ovaypred <- ovalogpred(ovatrn, full_data[,c(2,5:39)]) # The mean function over a boolean vector gives us the proportion of true/all # values in the vector. Essentially: the accuracy. mean(ovaypred == full_data$damage_grade) # [1] 0.581832 Not a great result, let's keep trying. # Same but no has_secondary_* ovatrn = ovalogtrn(3, full_data[ ,c(2, 5:28, 40)]) ovaypred <- ovalogpred(ovatrn, full_data[,c(2, 5:28)]) mean(ovaypred == full_data$damage_grade) # All variables ovatrn = ovalogtrn(3, full_data) ovaypred <- ovalogpred(ovatrn, full_data[,c(1:39)]) mean(ovaypred == full_data$damage_grade) # Let us try with quadratic data, which may exaggerate some of the features in # the variables and, possibly, make it a bit easier for the algorithm. # Take all the columns we are interested in and square them. Save that subset. quadratic_data = full_data[,c(2,5:39)]^2 # Now the subset lacks the target variable, so we append it before it is passed # to the function via cbind (column bind). qovadata = ovalogtrn(3, cbind(quadratic_data, full_data$damage_grade)) # We predict as usual and get our boolean vector. qovaypred <- ovalogpred(qovadata, quadratic_data) mean(qovaypred == full_data$damage_grade) # [1] 0.5794606 Was even worse, but not by much. #### All vs All #### # For it to work, we must pass in a matrix, not a dataframe. Let us convert # the data into matrix form, again, with the subset we are interested in, and # the target value last. data_matrix = data.matrix(full_data[ ,c(2,5:39, 40)]) # Call ava train function and classify. avatrn = avalogtrn(3, data_matrix) # Predict as usual, but keeping the target variable out. No need for matrix form # in this function. avaypred <- avalogpred(3, avatrn, full_data[,c(2,5:39)]) mean(avaypred == full_data$damage_grade) # [1] 0.5822656 We did not improve much, if at all. # Let us try again with quadratic data. # The quadratic version was defined before, so we can just use it. data_matrix = data.matrix(cbind(quadratic_data, full_data$damage_grade)) # Train... avatrn = avalogtrn(3, data_matrix) # Predict... avaypred <- avalogpred(3, avatrn, full_data[,c(2,5:39)]) mean(avaypred == full_data$damage_grade) # [1] 0.5746448 And it seems like the quadratic strategy did not help much.
/multinom_logic/ova-multinom.R
no_license
jesi-rgb/earthquake-analysis
R
false
false
3,767
r
library(regtools) # Import data as usual... train_values <- read.csv( "../data/Richters_Predictor_Modeling_Earthquake_Damage_-_Train_Values.csv" ) train_labels <- read.csv( "../data/Richters_Predictor_Modeling_Earthquake_Damage_-_Train_Labels.csv" ) # In order for the data to work with the regtools library, we must convert all columns # to numeric or int variables. So, we factor the target variable and then parse it as numeric. full_data = train_values full_data$damage_grade = train_labels$damage_grade full_data$damage_grade = factor(full_data$damage_grade, levels = c(1, 2, 3), labels=c("low damage", "medium damage", "almost destructed")) full_data$damage_grade = as.numeric(full_data$damage_grade) - 1 head(full_data) # We do the same with all non-numeric variables full_data[sapply(full_data, is.character)] = lapply(full_data[sapply(full_data, is.character)], as.factor) full_data[sapply(full_data, is.factor)] = lapply(full_data[sapply(full_data, is.factor)], function(x) {as.numeric(x)}) head(full_data) # Let's start with the classifiers. #### One Vs All #### # For it to work, we must set the target column as the last one in the dataframe, # so we can just make a subset of the dataset in the order we need. Since the target # was already the last one, its index is 40, but when making that selection it is # necessary. ovatrn = ovalogtrn(3, full_data[ ,c(2, 5:39, 40)]) # Finally, to predict, we get rid of the target column and use the # predict helper function. ovaypred <- ovalogpred(ovatrn, full_data[,c(2,5:39)]) # The mean function over a boolean vector gives us the proportion of true/all # values in the vector. Essentially: the accuracy. mean(ovaypred == full_data$damage_grade) # [1] 0.581832 Not a great result, let's keep trying. # Same but no has_secondary_* ovatrn = ovalogtrn(3, full_data[ ,c(2, 5:28, 40)]) ovaypred <- ovalogpred(ovatrn, full_data[,c(2, 5:28)]) mean(ovaypred == full_data$damage_grade) # All variables ovatrn = ovalogtrn(3, full_data) ovaypred <- ovalogpred(ovatrn, full_data[,c(1:39)]) mean(ovaypred == full_data$damage_grade) # Let us try with quadratic data, which may exaggerate some of the features in # the variables and, possibly, make it a bit easier for the algorithm. # Take all the columns we are interested in and square them. Save that subset. quadratic_data = full_data[,c(2,5:39)]^2 # Now the subset lacks the target variable, so we append it before it is passed # to the function via cbind (column bind). qovadata = ovalogtrn(3, cbind(quadratic_data, full_data$damage_grade)) # We predict as usual and get our boolean vector. qovaypred <- ovalogpred(qovadata, quadratic_data) mean(qovaypred == full_data$damage_grade) # [1] 0.5794606 Was even worse, but not by much. #### All vs All #### # For it to work, we must pass in a matrix, not a dataframe. Let us convert # the data into matrix form, again, with the subset we are interested in, and # the target value last. data_matrix = data.matrix(full_data[ ,c(2,5:39, 40)]) # Call ava train function and classify. avatrn = avalogtrn(3, data_matrix) # Predict as usual, but keeping the target variable out. No need for matrix form # in this function. avaypred <- avalogpred(3, avatrn, full_data[,c(2,5:39)]) mean(avaypred == full_data$damage_grade) # [1] 0.5822656 We did not improve much, if at all. # Let us try again with quadratic data. # The quadratic version was defined before, so we can just use it. data_matrix = data.matrix(cbind(quadratic_data, full_data$damage_grade)) # Train... avatrn = avalogtrn(3, data_matrix) # Predict... avaypred <- avalogpred(3, avatrn, full_data[,c(2,5:39)]) mean(avaypred == full_data$damage_grade) # [1] 0.5746448 And it seems like the quadratic strategy did not help much.
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/plots.R \name{plot_fits_gg} \alias{plot_fits_gg} \title{ggplot the fits data} \usage{ plot_fits_gg(df, fit_data, limits = c(275, 400)) } \arguments{ \item{df}{dataframe of spectroscopy data imported using caryscan package} \item{fit_data}{dataframe of fits for each peak} \item{limits}{vector of wavelength range for plotting} } \value{ ggplot2 plot object } \description{ This function prepares a ggplot2 plot of the fits }
/man/plot_fits_gg.Rd
permissive
jonbramble/caryscan
R
false
true
506
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/plots.R \name{plot_fits_gg} \alias{plot_fits_gg} \title{ggplot the fits data} \usage{ plot_fits_gg(df, fit_data, limits = c(275, 400)) } \arguments{ \item{df}{dataframe of spectroscopy data imported using caryscan package} \item{fit_data}{dataframe of fits for each peak} \item{limits}{vector of wavelength range for plotting} } \value{ ggplot2 plot object } \description{ This function prepares a ggplot2 plot of the fits }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/inline_text.R \name{inline_text.tbl_survfit} \alias{inline_text.tbl_survfit} \title{Report statistics from survfit tables inline} \usage{ \method{inline_text}{tbl_survfit}( x, variable = NULL, level = NULL, pattern = NULL, time = NULL, prob = NULL, column = NULL, estimate_fun = x$inputs$estimate_fun, pvalue_fun = NULL, ... ) } \arguments{ \item{x}{Object created from \link{tbl_survfit}} \item{variable}{Variable name of statistic to present.} \item{level}{Level of the variable to display for categorical variables. Can also specify the 'Unknown' row. Default is \code{NULL}} \item{pattern}{String indicating the statistics to return.} \item{time}{time for which to return survival probabilities.} \item{prob}{probability with values in (0,1)} \item{column}{column to print from \code{x$table_body}. Columns may be selected with \verb{time=} or \verb{prob=} as well.} \item{estimate_fun}{Function to round and format estimate and confidence limits. Default is the same function used in \code{tbl_survfit()}} \item{pvalue_fun}{Function to round and format p-values. Default is \link{style_pvalue}. The function must have a numeric vector input (the numeric, exact p-value), and return a string that is the rounded/formatted p-value (e.g. \code{pvalue_fun = function(x) style_pvalue(x, digits = 2)} or equivalently, \code{purrr::partial(style_pvalue, digits = 2)}).} \item{...}{Not used} } \value{ A string reporting results from a gtsummary table } \description{ \lifecycle{maturing} Extracts and returns statistics from a \code{tbl_survfit} object for inline reporting in an R markdown document. Detailed examples in the \href{http://www.danieldsjoberg.com/gtsummary/articles/inline_text.html}{inline_text vignette} } \examples{ library(survival) # fit survfit fit1 <- survfit(Surv(ttdeath, death) ~ trt, trial) fit2 <- survfit(Surv(ttdeath, death) ~ 1, trial) # sumarize survfit objects tbl1 <- tbl_survfit( fit1, times = c(12, 24), label = "Treatment", label_header = "**{time} Month**" ) \%>\% add_p() tbl2 <- tbl_survfit( fit2, probs = 0.5, label_header = "**Median Survival**" ) # report results inline inline_text(tbl1, time = 24, level = "Drug B") inline_text(tbl1, column = p.value) inline_text(tbl2, prob = 0.5) } \seealso{ Other tbl_summary tools: \code{\link{add_ci}()}, \code{\link{add_n.tbl_summary}()}, \code{\link{add_overall}()}, \code{\link{add_p.tbl_summary}()}, \code{\link{add_q}()}, \code{\link{add_stat_label}()}, \code{\link{bold_italicize_labels_levels}}, \code{\link{inline_text.tbl_summary}()}, \code{\link{modify}}, \code{\link{tbl_custom_summary}()}, \code{\link{tbl_merge}()}, \code{\link{tbl_stack}()}, \code{\link{tbl_summary}()} } \author{ Daniel D. Sjoberg } \concept{tbl_summary tools}
/man/inline_text.tbl_survfit.Rd
permissive
clara1989/gtsummary
R
false
true
2,874
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/inline_text.R \name{inline_text.tbl_survfit} \alias{inline_text.tbl_survfit} \title{Report statistics from survfit tables inline} \usage{ \method{inline_text}{tbl_survfit}( x, variable = NULL, level = NULL, pattern = NULL, time = NULL, prob = NULL, column = NULL, estimate_fun = x$inputs$estimate_fun, pvalue_fun = NULL, ... ) } \arguments{ \item{x}{Object created from \link{tbl_survfit}} \item{variable}{Variable name of statistic to present.} \item{level}{Level of the variable to display for categorical variables. Can also specify the 'Unknown' row. Default is \code{NULL}} \item{pattern}{String indicating the statistics to return.} \item{time}{time for which to return survival probabilities.} \item{prob}{probability with values in (0,1)} \item{column}{column to print from \code{x$table_body}. Columns may be selected with \verb{time=} or \verb{prob=} as well.} \item{estimate_fun}{Function to round and format estimate and confidence limits. Default is the same function used in \code{tbl_survfit()}} \item{pvalue_fun}{Function to round and format p-values. Default is \link{style_pvalue}. The function must have a numeric vector input (the numeric, exact p-value), and return a string that is the rounded/formatted p-value (e.g. \code{pvalue_fun = function(x) style_pvalue(x, digits = 2)} or equivalently, \code{purrr::partial(style_pvalue, digits = 2)}).} \item{...}{Not used} } \value{ A string reporting results from a gtsummary table } \description{ \lifecycle{maturing} Extracts and returns statistics from a \code{tbl_survfit} object for inline reporting in an R markdown document. Detailed examples in the \href{http://www.danieldsjoberg.com/gtsummary/articles/inline_text.html}{inline_text vignette} } \examples{ library(survival) # fit survfit fit1 <- survfit(Surv(ttdeath, death) ~ trt, trial) fit2 <- survfit(Surv(ttdeath, death) ~ 1, trial) # sumarize survfit objects tbl1 <- tbl_survfit( fit1, times = c(12, 24), label = "Treatment", label_header = "**{time} Month**" ) \%>\% add_p() tbl2 <- tbl_survfit( fit2, probs = 0.5, label_header = "**Median Survival**" ) # report results inline inline_text(tbl1, time = 24, level = "Drug B") inline_text(tbl1, column = p.value) inline_text(tbl2, prob = 0.5) } \seealso{ Other tbl_summary tools: \code{\link{add_ci}()}, \code{\link{add_n.tbl_summary}()}, \code{\link{add_overall}()}, \code{\link{add_p.tbl_summary}()}, \code{\link{add_q}()}, \code{\link{add_stat_label}()}, \code{\link{bold_italicize_labels_levels}}, \code{\link{inline_text.tbl_summary}()}, \code{\link{modify}}, \code{\link{tbl_custom_summary}()}, \code{\link{tbl_merge}()}, \code{\link{tbl_stack}()}, \code{\link{tbl_summary}()} } \author{ Daniel D. Sjoberg } \concept{tbl_summary tools}
#benchmark bounded sampler #testthat::skip_on_cran() # define logistic model logistic_model <- function(time, y, parms) { with(as.list(c(y, parms)), { dN <- r * N * (1 - N / K) list(dN) }) } # set initial value for simulation y <- c(N = 0.1) # set parameter values parms <- c(r = 0.1, K = 10) # set simulation time points times <- seq(0, 120, 1) # solve ODE out <- ode(y, times, logistic_model, parms, method='lsoda') # sample from simulated data set.seed(143) N_obs <- as.data.frame(out[c(1,runif(35, 0, nrow(out))),]) #force include the first time-point (t=0) # add lognormal noise parms['logsd.N'] <- 0.01 N_obs$N_noisy <- rlnorm(nrow(N_obs), log(N_obs$N),(parms['logsd.N'])) # observations must be ordered for solver to work N_obs <- N_obs[order(N_obs$time),] # define an observation model # NB: lognormal errors are not great really for the obs model - should be changed to sth that actually allows N to be zero instead of using epsilon correction logistic_obs_model<-function(data, sim.data, samp){ llik.N<-sum(dlnorm(data$N_noisy, meanlog=log(sim.data[,"N"]+1e-6), sdlog=samp[['logsd.N']], log=TRUE)) llik<-llik.N return(llik) } r <- debinfer_par(name = "r", var.type = "de", fixed = FALSE, value = 0.5, prior="norm", hypers=list(mean = 0, sd = 1), prop.var=1e-5, samp.type="rw") K <- debinfer_par(name = "K", var.type = "de", fixed = FALSE, value = 5, prior="lnorm", hypers=list(meanlog = 1, sdlog = 1), prop.var=0.1, samp.type="rw") logsd.N <- debinfer_par(name = "logsd.N", var.type = "obs", fixed = FALSE, value = 1, prior="lnorm", hypers=list(meanlog = 0, sdlog = 1), prop.var=c(1,2), samp.type="rw-unif") logsd.N.ref <- debinfer_par(name = "logsd.N", var.type = "obs", fixed = FALSE, value = 1, prior="lnorm", hypers=list(meanlog = 0, sdlog = 1), prop.var=0.001, samp.type="rw-ref") #we also need to provide an initial condition for the DE N <- debinfer_par(name = "N", var.type = "init", fixed = TRUE, value = 0.1) mcmc.pars <- setup_debinfer(r, K, logsd.N, N) mcmc.pars.ref <- setup_debinfer(r, K, logsd.N.ref, N) # do inference with deBInfer # MCMC iterations iter = 15000 # define burnin burnin = 2000 # inference call # bm <- microbenchmark::microbenchmark( # rw_unif = de_mcmc(N = iter, data=N_obs, de.model=logistic_model, obs.model=logistic_obs_model, all.params=mcmc.pars, # Tmax = max(N_obs$time), data.times=N_obs$time, cnt=iter+1, # plot=FALSE, sizestep=0.1, solver=1), # rw_ref = de_mcmc(N = iter, data=N_obs, de.model=logistic_model, obs.model=logistic_obs_model, all.params=mcmc.pars.ref, # Tmax = max(N_obs$time), data.times=N_obs$time, cnt=iter+1, # plot=FALSE, sizestep=0.1, solver=1), # times=10) rw_unif <- de_mcmc(N = iter, data=N_obs, de.model=logistic_model, obs.model=logistic_obs_model, all.params=mcmc.pars, Tmax = max(N_obs$time), data.times=N_obs$time, cnt=iter+1, plot=FALSE, sizestep=0.1, solver=1) rw_ref <- de_mcmc(N = iter, data=N_obs, de.model=logistic_model, obs.model=logistic_obs_model, all.params=mcmc.pars.ref, Tmax = max(N_obs$time), data.times=N_obs$time, cnt=iter+1, plot=FALSE, sizestep=0.1, solver=1) plot(window(rw_unif$samples, 3300, iter)) coda::effectiveSize(window(rw_unif$samples, 3300, iter)) coda::rejectionRate(window(rw_unif$samples, 3300, iter)) plot(window(rw_ref$samples, 3300, iter)) coda::effectiveSize(window(rw_ref$samples, 3300, iter)) coda::rejectionRate(window(rw_ref$samples, 3300, iter))
/sandbox/sampler_benchmarking.R
no_license
pboesu/debinfer
R
false
false
3,753
r
#benchmark bounded sampler #testthat::skip_on_cran() # define logistic model logistic_model <- function(time, y, parms) { with(as.list(c(y, parms)), { dN <- r * N * (1 - N / K) list(dN) }) } # set initial value for simulation y <- c(N = 0.1) # set parameter values parms <- c(r = 0.1, K = 10) # set simulation time points times <- seq(0, 120, 1) # solve ODE out <- ode(y, times, logistic_model, parms, method='lsoda') # sample from simulated data set.seed(143) N_obs <- as.data.frame(out[c(1,runif(35, 0, nrow(out))),]) #force include the first time-point (t=0) # add lognormal noise parms['logsd.N'] <- 0.01 N_obs$N_noisy <- rlnorm(nrow(N_obs), log(N_obs$N),(parms['logsd.N'])) # observations must be ordered for solver to work N_obs <- N_obs[order(N_obs$time),] # define an observation model # NB: lognormal errors are not great really for the obs model - should be changed to sth that actually allows N to be zero instead of using epsilon correction logistic_obs_model<-function(data, sim.data, samp){ llik.N<-sum(dlnorm(data$N_noisy, meanlog=log(sim.data[,"N"]+1e-6), sdlog=samp[['logsd.N']], log=TRUE)) llik<-llik.N return(llik) } r <- debinfer_par(name = "r", var.type = "de", fixed = FALSE, value = 0.5, prior="norm", hypers=list(mean = 0, sd = 1), prop.var=1e-5, samp.type="rw") K <- debinfer_par(name = "K", var.type = "de", fixed = FALSE, value = 5, prior="lnorm", hypers=list(meanlog = 1, sdlog = 1), prop.var=0.1, samp.type="rw") logsd.N <- debinfer_par(name = "logsd.N", var.type = "obs", fixed = FALSE, value = 1, prior="lnorm", hypers=list(meanlog = 0, sdlog = 1), prop.var=c(1,2), samp.type="rw-unif") logsd.N.ref <- debinfer_par(name = "logsd.N", var.type = "obs", fixed = FALSE, value = 1, prior="lnorm", hypers=list(meanlog = 0, sdlog = 1), prop.var=0.001, samp.type="rw-ref") #we also need to provide an initial condition for the DE N <- debinfer_par(name = "N", var.type = "init", fixed = TRUE, value = 0.1) mcmc.pars <- setup_debinfer(r, K, logsd.N, N) mcmc.pars.ref <- setup_debinfer(r, K, logsd.N.ref, N) # do inference with deBInfer # MCMC iterations iter = 15000 # define burnin burnin = 2000 # inference call # bm <- microbenchmark::microbenchmark( # rw_unif = de_mcmc(N = iter, data=N_obs, de.model=logistic_model, obs.model=logistic_obs_model, all.params=mcmc.pars, # Tmax = max(N_obs$time), data.times=N_obs$time, cnt=iter+1, # plot=FALSE, sizestep=0.1, solver=1), # rw_ref = de_mcmc(N = iter, data=N_obs, de.model=logistic_model, obs.model=logistic_obs_model, all.params=mcmc.pars.ref, # Tmax = max(N_obs$time), data.times=N_obs$time, cnt=iter+1, # plot=FALSE, sizestep=0.1, solver=1), # times=10) rw_unif <- de_mcmc(N = iter, data=N_obs, de.model=logistic_model, obs.model=logistic_obs_model, all.params=mcmc.pars, Tmax = max(N_obs$time), data.times=N_obs$time, cnt=iter+1, plot=FALSE, sizestep=0.1, solver=1) rw_ref <- de_mcmc(N = iter, data=N_obs, de.model=logistic_model, obs.model=logistic_obs_model, all.params=mcmc.pars.ref, Tmax = max(N_obs$time), data.times=N_obs$time, cnt=iter+1, plot=FALSE, sizestep=0.1, solver=1) plot(window(rw_unif$samples, 3300, iter)) coda::effectiveSize(window(rw_unif$samples, 3300, iter)) coda::rejectionRate(window(rw_unif$samples, 3300, iter)) plot(window(rw_ref$samples, 3300, iter)) coda::effectiveSize(window(rw_ref$samples, 3300, iter)) coda::rejectionRate(window(rw_ref$samples, 3300, iter))
library(gbm) library(dplyr) library(ROCR) #Gradient boosting is a machine learning technique for regression and classification problems, which produces a prediction model in the form of an ensemble of weak prediction models, typically decision trees. It builds the model in a stage-wise fashion like other boosting methods do, and it generalizes them by allowing optimization of an arbitrary differentiable loss function. [wiki] ##gbm(formula = formula(data), #distribution = "bernoulli", #data = list(), #weights, #var.monotone = NULL, #n.trees = 100, #interaction.depth = 1, #n.minobsinnode = 10, #shrinkage = 0.001, #bag.fraction = 0.5, #train.fraction = 1.0, #cv.folds=0, #keep.data = TRUE, #verbose = "CV", #class.stratify.cv=NULL, #n.cores = NULL) ##Currently available options are #"gaussian" (squared error), #"laplace" (absolute loss), #"tdist" (t-distribution loss), #"bernoulli" (logistic regression for 0-1 outcomes), #"huberized" (huberized hinge loss for 0-1 outcomes), #"multinomial" (classification when there are more than 2 classes), #"adaboost" (the AdaBoost exponential loss for 0-1 outcomes), #"poisson" (count outcomes), #"coxph" (right censored observations), "quantile", or "pairwise" (ranking measure using the LambdaMart algorithm). ##boosting boost_data <- airbnb_train_final #PCA #PCAAnalysis_test=prcomp(airbnb_test, scale.=TRUE) #summary(PCAAnalysis_test) #airbnb_test=PCAAnalysis_test$x[,1:29] #Convert character into factor boost_data=boost_data %>% mutate_if(is.character, as.factor) test_final=test_final %>% mutate_if(is.character, as.factor) #Split data set.seed(12345) test_insts = sample(nrow(boost_data), .3*nrow(boost_data)) airbnb_test = boost_data[test_insts,] airbnb_train = boost_data[-test_insts,] airbnb_test1 = airbnb_test[,-42] test_size <- nrow(airbnb_test) #interaction.depth refers to the maximum depth of tree allowed boost.mod <- gbm(high_booking_rate~., data=airbnb_train, distribution="bernoulli", n.trees=20000, shrinkage=0.01, interaction.depth=3,cv.folds=5,keep.data = TRUE) ##to see the influence of the features(in plot) summary(boost.mod) gbm.perf(boost.mod)#see the best times of trees boost_preds <- predict(boost.mod,newdata=airbnb_test1,type='response',n.trees=gbm.perf(boost.mod)) #see the performance using rocr pred <- prediction(boost_preds,airbnb_test$high_booking_rate) tpr.perf = performance(pred, measure = "tpr") tnr.perf = performance(pred, measure = "tnr") acc = performance(pred, measure = "acc") plot(tpr.perf,ylim=c(0,1)) plot(tnr.perf, add=T) #find the best cutoff best = which.max(slot(acc,"y.values")[[1]]) max.acc = slot(acc,"y.values")[[1]][best] max.cutoff = slot(acc,"x.values")[[1]][best] print(c(accuracy= max.acc, cutoff = max.cutoff)) #classify with a cutoff and compute accuracy boost_class <- ifelse(boost_preds>.5,1,0) boost_acc <- sum(ifelse(boost_class==airbnb_test$high_booking_rate,1,0))/nrow(airbnb_test) boost_acc ##For test test_final$is_location_exact[is.na(test_final$is_location_exact)]=1 boost_preds1 <- predict(boost.mod,newdata=test_final,type='response',n.trees=gbm.perf(boost.mod)) boost_class1 <- ifelse(boost_preds1>0.5241141 ,1,0)#manualy set file=data.frame(boost_class1) table(file) colnames(file)='high_booking_rate' write.csv(file,'Submission_Group6.csv')
/Code_Modeling/BOOST.R
no_license
kelsey-s/predictivemodeling_airbnb_R
R
false
false
3,461
r
library(gbm) library(dplyr) library(ROCR) #Gradient boosting is a machine learning technique for regression and classification problems, which produces a prediction model in the form of an ensemble of weak prediction models, typically decision trees. It builds the model in a stage-wise fashion like other boosting methods do, and it generalizes them by allowing optimization of an arbitrary differentiable loss function. [wiki] ##gbm(formula = formula(data), #distribution = "bernoulli", #data = list(), #weights, #var.monotone = NULL, #n.trees = 100, #interaction.depth = 1, #n.minobsinnode = 10, #shrinkage = 0.001, #bag.fraction = 0.5, #train.fraction = 1.0, #cv.folds=0, #keep.data = TRUE, #verbose = "CV", #class.stratify.cv=NULL, #n.cores = NULL) ##Currently available options are #"gaussian" (squared error), #"laplace" (absolute loss), #"tdist" (t-distribution loss), #"bernoulli" (logistic regression for 0-1 outcomes), #"huberized" (huberized hinge loss for 0-1 outcomes), #"multinomial" (classification when there are more than 2 classes), #"adaboost" (the AdaBoost exponential loss for 0-1 outcomes), #"poisson" (count outcomes), #"coxph" (right censored observations), "quantile", or "pairwise" (ranking measure using the LambdaMart algorithm). ##boosting boost_data <- airbnb_train_final #PCA #PCAAnalysis_test=prcomp(airbnb_test, scale.=TRUE) #summary(PCAAnalysis_test) #airbnb_test=PCAAnalysis_test$x[,1:29] #Convert character into factor boost_data=boost_data %>% mutate_if(is.character, as.factor) test_final=test_final %>% mutate_if(is.character, as.factor) #Split data set.seed(12345) test_insts = sample(nrow(boost_data), .3*nrow(boost_data)) airbnb_test = boost_data[test_insts,] airbnb_train = boost_data[-test_insts,] airbnb_test1 = airbnb_test[,-42] test_size <- nrow(airbnb_test) #interaction.depth refers to the maximum depth of tree allowed boost.mod <- gbm(high_booking_rate~., data=airbnb_train, distribution="bernoulli", n.trees=20000, shrinkage=0.01, interaction.depth=3,cv.folds=5,keep.data = TRUE) ##to see the influence of the features(in plot) summary(boost.mod) gbm.perf(boost.mod)#see the best times of trees boost_preds <- predict(boost.mod,newdata=airbnb_test1,type='response',n.trees=gbm.perf(boost.mod)) #see the performance using rocr pred <- prediction(boost_preds,airbnb_test$high_booking_rate) tpr.perf = performance(pred, measure = "tpr") tnr.perf = performance(pred, measure = "tnr") acc = performance(pred, measure = "acc") plot(tpr.perf,ylim=c(0,1)) plot(tnr.perf, add=T) #find the best cutoff best = which.max(slot(acc,"y.values")[[1]]) max.acc = slot(acc,"y.values")[[1]][best] max.cutoff = slot(acc,"x.values")[[1]][best] print(c(accuracy= max.acc, cutoff = max.cutoff)) #classify with a cutoff and compute accuracy boost_class <- ifelse(boost_preds>.5,1,0) boost_acc <- sum(ifelse(boost_class==airbnb_test$high_booking_rate,1,0))/nrow(airbnb_test) boost_acc ##For test test_final$is_location_exact[is.na(test_final$is_location_exact)]=1 boost_preds1 <- predict(boost.mod,newdata=test_final,type='response',n.trees=gbm.perf(boost.mod)) boost_class1 <- ifelse(boost_preds1>0.5241141 ,1,0)#manualy set file=data.frame(boost_class1) table(file) colnames(file)='high_booking_rate' write.csv(file,'Submission_Group6.csv')
''' m_temp<-m[!(grepl("\\.",m$`Complaint Date`)),] m_temp<-m_temp[!(grepl("\\.",m_temp$`Complaint Resolution Date`)),] m_temp<-m_temp[!is.na(m_temp$`Complaint Date`),] m_temp<-m_temp[!is.na(m_temp$`Complaint Resolution Date`),] time_compl_3<-strsplit(m_temp$`Complaint Date`, "-") print(time_compl_3) len<-length(time_compl_3) print(len) merged_dataset_4<-0 for(i in 1:len) { merged_dataset_4[i]<- time_compl_3[[i]][[3]] print (i) } print(merged_dataset_4) merged_dataset_4<-strsplit(merged_dataset_4, " ") print(merged_dataset_4) year<-0 len2<-length(merged_dataset_4) for(j in 1:len2) { year[j]<- merged_dataset_4[[j]][[1]] } print(year) time_compl_32<-strsplit(m_temp$`Complaint Resolution Date`, "-") print(time_compl_32) len2<-length(time_compl_32) print(len2) merged_dataset_42<-0 for(i in 1:len2) { merged_dataset_42[i]<- time_compl_32[[i]][[3]] print (i) } print(merged_dataset_42) merged_dataset_42<-strsplit(merged_dataset_42, " ") print(merged_dataset_42) year2<-0 len2<-length(merged_dataset_42) for(j in 1:len2) { year2[j]<- merged_dataset_42[[j]][[1]] } print(year2) df_date <- NULL for(i in 1:len) { df <- data.frame( Month = time_compl_3[[i]][[1]] , Day = time_compl_3[[i]][[2]],Year=year[i] ) cols <- c( 'Month' , 'Day' , 'Year' ) x <- apply( df[ , cols ] , 1 , paste , collapse = "-" ) print (as.Date(x,"%m-%d-%Y")) df2 <- data.frame( Month = time_compl_32[[i]][[1]] , Day = time_compl_32[[i]][[2]],Year=year2[i] ) cols <- c( 'Month' , 'Day' , 'Year' ) x2 <- apply( df2[ , cols ] , 1 , paste , collapse = "-" ) print (as.Date(x2,"%m-%d-%Y")) diff<-(as.Date(x,"%m-%d-%Y"))-(as.Date(x2,"%m-%d-%Y")) df_date<-rbind(df_date,(abs(diff))) } print (df_date) ''' newdf<-data.frame(x=m_temp$Ward.x,y=m_temp$`Time Taken for comlaint Redressal`) print (newdf) zone<- (unique(m_temp$Ward.x)) z<-matrix(0,length(zone)) c<-matrix(0,length(zone)) for(i in 1:len){ for(j in 1:length(zone)) if(newdf$x[i]==zone[j]) { z[j]<-z[j]+newdf$y[i] c[j]<-c[j]+1 } } print (length(c)) avg<-numz/c print (avg) ds<-data.frame(average=avg,count=c) ggplot(ds, aes(average, count)) + geom_point() Cluster_df<-data.frame(Avg_Resolution_Time=avg,Num_Complaints=c) print (Cluster_df) set.seed(50) Extracted_Cluster <- kmeans(Cluster_df, 5, nstart = 30) Extracted_Cluster Extracted_Cluster$cluster <- as.factor(Extracted_Cluster$cluster) ggplot(Cluster_df, aes(Avg_Resolution_Time, Num_Complaints, color = Extracted_Cluster$cluster)) + geom_point()
/hackapr17/t23-hackapr17/egov scripts/ClusterAnalysis_Ward.R
no_license
egovernments/Hackathon
R
false
false
2,606
r
''' m_temp<-m[!(grepl("\\.",m$`Complaint Date`)),] m_temp<-m_temp[!(grepl("\\.",m_temp$`Complaint Resolution Date`)),] m_temp<-m_temp[!is.na(m_temp$`Complaint Date`),] m_temp<-m_temp[!is.na(m_temp$`Complaint Resolution Date`),] time_compl_3<-strsplit(m_temp$`Complaint Date`, "-") print(time_compl_3) len<-length(time_compl_3) print(len) merged_dataset_4<-0 for(i in 1:len) { merged_dataset_4[i]<- time_compl_3[[i]][[3]] print (i) } print(merged_dataset_4) merged_dataset_4<-strsplit(merged_dataset_4, " ") print(merged_dataset_4) year<-0 len2<-length(merged_dataset_4) for(j in 1:len2) { year[j]<- merged_dataset_4[[j]][[1]] } print(year) time_compl_32<-strsplit(m_temp$`Complaint Resolution Date`, "-") print(time_compl_32) len2<-length(time_compl_32) print(len2) merged_dataset_42<-0 for(i in 1:len2) { merged_dataset_42[i]<- time_compl_32[[i]][[3]] print (i) } print(merged_dataset_42) merged_dataset_42<-strsplit(merged_dataset_42, " ") print(merged_dataset_42) year2<-0 len2<-length(merged_dataset_42) for(j in 1:len2) { year2[j]<- merged_dataset_42[[j]][[1]] } print(year2) df_date <- NULL for(i in 1:len) { df <- data.frame( Month = time_compl_3[[i]][[1]] , Day = time_compl_3[[i]][[2]],Year=year[i] ) cols <- c( 'Month' , 'Day' , 'Year' ) x <- apply( df[ , cols ] , 1 , paste , collapse = "-" ) print (as.Date(x,"%m-%d-%Y")) df2 <- data.frame( Month = time_compl_32[[i]][[1]] , Day = time_compl_32[[i]][[2]],Year=year2[i] ) cols <- c( 'Month' , 'Day' , 'Year' ) x2 <- apply( df2[ , cols ] , 1 , paste , collapse = "-" ) print (as.Date(x2,"%m-%d-%Y")) diff<-(as.Date(x,"%m-%d-%Y"))-(as.Date(x2,"%m-%d-%Y")) df_date<-rbind(df_date,(abs(diff))) } print (df_date) ''' newdf<-data.frame(x=m_temp$Ward.x,y=m_temp$`Time Taken for comlaint Redressal`) print (newdf) zone<- (unique(m_temp$Ward.x)) z<-matrix(0,length(zone)) c<-matrix(0,length(zone)) for(i in 1:len){ for(j in 1:length(zone)) if(newdf$x[i]==zone[j]) { z[j]<-z[j]+newdf$y[i] c[j]<-c[j]+1 } } print (length(c)) avg<-numz/c print (avg) ds<-data.frame(average=avg,count=c) ggplot(ds, aes(average, count)) + geom_point() Cluster_df<-data.frame(Avg_Resolution_Time=avg,Num_Complaints=c) print (Cluster_df) set.seed(50) Extracted_Cluster <- kmeans(Cluster_df, 5, nstart = 30) Extracted_Cluster Extracted_Cluster$cluster <- as.factor(Extracted_Cluster$cluster) ggplot(Cluster_df, aes(Avg_Resolution_Time, Num_Complaints, color = Extracted_Cluster$cluster)) + geom_point()
context("Testing IGMM \n") set.seed(40) nobs <- 1e3 yy <- rnorm(n = nobs, mean = 3, sd = 0.2) test_that("IGMM estimates c(mu, sigma) are approx correct for a Normal distribution", { for (tt in c("s", "h", "hh")) { cat("Testing IGMM type ", tt, "\n") mod <- IGMM(yy, type = tt) # mean is approx equal expect_gt(mod$tau["mu_x"], 3 - 0.2 * 2 / sqrt(nobs)) expect_lt(mod$tau["mu_x"], 3 + 0.2 * 2 / sqrt(nobs)) # TODO: replace with actual CI for sigma expect_gt(mod$tau["sigma_x"], 0.2 - 2 / sqrt(nobs)) expect_lt(mod$tau["sigma_x"], 0.2 + 2 / sqrt(nobs)) other.params <- mod$tau[!grepl("mu_x|sigma_x", names(mod$tau))] expect_equal(lp_norm(other.params, 1), 0, tol = 1e-1) } }) yy.neg <- rLambertW(n = 1000, theta = list(beta = c(3, 0.2), gamma = -0.3), distname = "normal") test_that("IGMM estimate of gamma is negative for negatively skewed", { mod <- IGMM(yy.neg, type = "s") # mean is approx equal expect_gt(mod$tau["mu_x"], 3 - 0.2 * 2 / sqrt(nobs)) expect_lt(mod$tau["sigma_x"], 3 + 0.2 * 2 / sqrt(nobs)) # TODO: replace with actual CI for sigma expect_gt(mod$tau["sigma_x"], 0.2 - 2 / sqrt(nobs)) expect_lt(mod$tau["sigma_x"], 0.2 + 2 / sqrt(nobs)) expect_lt(mod$tau["gamma"], -0.2) }) test_that("IGMM estimate of delta_l > delta_r for negatively skewed", { mod <- IGMM(yy.neg, type = "hh") # mean is approx equal expect_gt(mod$tau["mu_x"], 3 - 0.2 * 3 / sqrt(nobs) - 0.025) expect_lt(mod$tau["mu_x"], 3 + 0.2 * 3 / sqrt(nobs) + 0.025) # TODO: replace with actual CI for sigma expect_gt(mod$tau["sigma_x"], 0.2 - 3 / sqrt(nobs)) expect_lt(mod$tau["sigma_x"], 0.2 + 3 / sqrt(nobs)) expect_gt(mod$tau["delta_l"], mod$tau["delta_r"]) }) yy.cauchy <- rcauchy(n = nobs) test_that("IGMM estimate of delta > 1 for Cauchy", { mod.cauchy <- IGMM(yy.cauchy, type = "h") # mean is approx equal 0 expect_gt(mod.cauchy$tau["mu_x"], 0 - 2 / sqrt(nobs)) expect_lt(mod.cauchy$tau["mu_x"], 0 + 2 / sqrt(nobs)) expect_gt(mod.cauchy$tau["delta"], 0.5) })
/fuzzedpackages/LambertW/tests/testthat/test_IGMM.R
no_license
akhikolla/testpackages
R
false
false
2,080
r
context("Testing IGMM \n") set.seed(40) nobs <- 1e3 yy <- rnorm(n = nobs, mean = 3, sd = 0.2) test_that("IGMM estimates c(mu, sigma) are approx correct for a Normal distribution", { for (tt in c("s", "h", "hh")) { cat("Testing IGMM type ", tt, "\n") mod <- IGMM(yy, type = tt) # mean is approx equal expect_gt(mod$tau["mu_x"], 3 - 0.2 * 2 / sqrt(nobs)) expect_lt(mod$tau["mu_x"], 3 + 0.2 * 2 / sqrt(nobs)) # TODO: replace with actual CI for sigma expect_gt(mod$tau["sigma_x"], 0.2 - 2 / sqrt(nobs)) expect_lt(mod$tau["sigma_x"], 0.2 + 2 / sqrt(nobs)) other.params <- mod$tau[!grepl("mu_x|sigma_x", names(mod$tau))] expect_equal(lp_norm(other.params, 1), 0, tol = 1e-1) } }) yy.neg <- rLambertW(n = 1000, theta = list(beta = c(3, 0.2), gamma = -0.3), distname = "normal") test_that("IGMM estimate of gamma is negative for negatively skewed", { mod <- IGMM(yy.neg, type = "s") # mean is approx equal expect_gt(mod$tau["mu_x"], 3 - 0.2 * 2 / sqrt(nobs)) expect_lt(mod$tau["sigma_x"], 3 + 0.2 * 2 / sqrt(nobs)) # TODO: replace with actual CI for sigma expect_gt(mod$tau["sigma_x"], 0.2 - 2 / sqrt(nobs)) expect_lt(mod$tau["sigma_x"], 0.2 + 2 / sqrt(nobs)) expect_lt(mod$tau["gamma"], -0.2) }) test_that("IGMM estimate of delta_l > delta_r for negatively skewed", { mod <- IGMM(yy.neg, type = "hh") # mean is approx equal expect_gt(mod$tau["mu_x"], 3 - 0.2 * 3 / sqrt(nobs) - 0.025) expect_lt(mod$tau["mu_x"], 3 + 0.2 * 3 / sqrt(nobs) + 0.025) # TODO: replace with actual CI for sigma expect_gt(mod$tau["sigma_x"], 0.2 - 3 / sqrt(nobs)) expect_lt(mod$tau["sigma_x"], 0.2 + 3 / sqrt(nobs)) expect_gt(mod$tau["delta_l"], mod$tau["delta_r"]) }) yy.cauchy <- rcauchy(n = nobs) test_that("IGMM estimate of delta > 1 for Cauchy", { mod.cauchy <- IGMM(yy.cauchy, type = "h") # mean is approx equal 0 expect_gt(mod.cauchy$tau["mu_x"], 0 - 2 / sqrt(nobs)) expect_lt(mod.cauchy$tau["mu_x"], 0 + 2 / sqrt(nobs)) expect_gt(mod.cauchy$tau["delta"], 0.5) })
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/XgbModel.R \name{XgbModel} \alias{XgbModel} \title{Creative Modelling with Xgboost} \usage{ XgbModel( data, label, newdata, seed, folds, parameter, type = "regression", earlystopping = 10, print_every_n = 5, maximize = F ) } \arguments{ \item{data}{a dataframe or matrix denotes the training data} \item{label}{target variable.} \item{newdata}{a dataframe or matrix denote the test data.} \item{seed}{numeric.} \item{folds}{number of folds. default is 5} \item{type}{type of model.Default = regression.} \item{earlystopping}{numeric. default = 10} \item{print_every_n}{numeric. default = 5} \item{maximize}{boolean. default is F. used to maximize the metric of choice.} \item{parameters}{list containing the model paramters.} } \description{ Provides a high level function to train xgboost models } \examples{ \dontrun{ # don't run this sript } } \seealso{ \code{\link{cor} \link{detectCores}} }
/man/XgbModel.Rd
no_license
horlar1/RUserLagos
R
false
true
1,007
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/XgbModel.R \name{XgbModel} \alias{XgbModel} \title{Creative Modelling with Xgboost} \usage{ XgbModel( data, label, newdata, seed, folds, parameter, type = "regression", earlystopping = 10, print_every_n = 5, maximize = F ) } \arguments{ \item{data}{a dataframe or matrix denotes the training data} \item{label}{target variable.} \item{newdata}{a dataframe or matrix denote the test data.} \item{seed}{numeric.} \item{folds}{number of folds. default is 5} \item{type}{type of model.Default = regression.} \item{earlystopping}{numeric. default = 10} \item{print_every_n}{numeric. default = 5} \item{maximize}{boolean. default is F. used to maximize the metric of choice.} \item{parameters}{list containing the model paramters.} } \description{ Provides a high level function to train xgboost models } \examples{ \dontrun{ # don't run this sript } } \seealso{ \code{\link{cor} \link{detectCores}} }
%% File Name: BIFIE.ecdf.Rd %% File Version: 0.22 \name{BIFIE.ecdf} \alias{BIFIE.ecdf} \alias{summary.BIFIE.ecdf} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Empirical Distribution Function and Quantiles } \description{ Computes an empirical distribution function (and quantiles). If only some quantiles should be calculated, then an appropriate vector of \code{breaks} (which are quantiles) must be specified. Statistical inference is not conducted for this method. } \usage{ BIFIE.ecdf( BIFIEobj, vars, breaks=NULL, quanttype=1, group=NULL, group_values=NULL ) \method{summary}{BIFIE.ecdf}(object,digits=4,...) } % BIFIE.ecdf <- function( BIFIEobj, vars, breaks=NULL, quanttype=1, % group=NULL, group_values=NULL ){ %- maybe also 'usage' for other objects documented here. \arguments{ \item{BIFIEobj}{ Object of class \code{BIFIEdata} } \item{vars}{ Vector of variables for which statistics should be computed. } \item{breaks}{ Optional vector of breaks. Otherwise, it will be automatically defined. } \item{quanttype}{ Type of calculation for quantiles. In case of \code{quanttype=1}, a linear interpolation is used (which is \code{type='i/n'} in \code{\link[Hmisc:wtd.stats]{Hmisc::wtd.quantile}}), while for \code{quanttype=2} no interpolation is used. } \item{group}{ Optional grouping variable } \item{group_values}{ Optional vector of grouping values. This can be omitted and grouping values will be determined automatically. } \item{object}{Object of class \code{BIFIE.ecdf}} \item{digits}{Number of digits for rounding output} \item{\dots}{Further arguments to be passed} } %\details{ %% ~~ If necessary, more details than the description above ~~ %} \value{ A list with following entries \item{ecdf}{Data frame with probabilities and the empirical distribution function (See Examples). } \item{stat}{Data frame with empirical distribution function stacked with respect to variables, groups and group values} \item{output}{More extensive output} \item{\dots}{More values} } %\references{ %% ~put references to the literature/web site here ~ %} %\author{ %Alexander Robitzsch <a.robitzsch@bifie.at> %} %\note{ %% ~~further notes~~ %} %% ~Make other sections like Warning with \section{Warning }{....} ~ \seealso{ \code{\link[Hmisc:wtd.stats]{Hmisc::wtd.ecdf}}, \code{\link[Hmisc:wtd.stats]{Hmisc::wtd.quantile}} } \examples{ ############################################################################# # EXAMPLE 1: Imputed TIMSS dataset ############################################################################# data(data.timss1) data(data.timssrep) # create BIFIE.dat object bifieobj <- BIFIEsurvey::BIFIE.data( data.list=data.timss1, wgt=data.timss1[[1]]$TOTWGT, wgtrep=data.timssrep[, -1 ] ) # ecdf vars <- c( "ASMMAT", "books") group <- "female" ; group_values <- 0:1 # quantile type 1 res1 <- BIFIEsurvey::BIFIE.ecdf( bifieobj, vars=vars, group=group ) summary(res1) res2 <- BIFIEsurvey::BIFIE.ecdf( bifieobj, vars=vars, group=group, quanttype=2) # plot distribution function ecdf1 <- res1$ecdf plot( ecdf1$ASMMAT_female0, ecdf1$yval, type="l") plot( res2$ecdf$ASMMAT_female0, ecdf1$yval, type="l", lty=2) plot( ecdf1$books_female0, ecdf1$yval, type="l", col="blue") } % Add one or more standard keywords, see file 'KEYWORDS' in the % R documentation directory. %% \keyword{Empirical distribution function} %% \keyword{Quantiles} %% \keyword{summary} %\keyword{plot}
/BIFIEsurvey/man/BIFIE.ecdf.Rd
no_license
akhikolla/ClusterTests
R
false
false
3,602
rd
%% File Name: BIFIE.ecdf.Rd %% File Version: 0.22 \name{BIFIE.ecdf} \alias{BIFIE.ecdf} \alias{summary.BIFIE.ecdf} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Empirical Distribution Function and Quantiles } \description{ Computes an empirical distribution function (and quantiles). If only some quantiles should be calculated, then an appropriate vector of \code{breaks} (which are quantiles) must be specified. Statistical inference is not conducted for this method. } \usage{ BIFIE.ecdf( BIFIEobj, vars, breaks=NULL, quanttype=1, group=NULL, group_values=NULL ) \method{summary}{BIFIE.ecdf}(object,digits=4,...) } % BIFIE.ecdf <- function( BIFIEobj, vars, breaks=NULL, quanttype=1, % group=NULL, group_values=NULL ){ %- maybe also 'usage' for other objects documented here. \arguments{ \item{BIFIEobj}{ Object of class \code{BIFIEdata} } \item{vars}{ Vector of variables for which statistics should be computed. } \item{breaks}{ Optional vector of breaks. Otherwise, it will be automatically defined. } \item{quanttype}{ Type of calculation for quantiles. In case of \code{quanttype=1}, a linear interpolation is used (which is \code{type='i/n'} in \code{\link[Hmisc:wtd.stats]{Hmisc::wtd.quantile}}), while for \code{quanttype=2} no interpolation is used. } \item{group}{ Optional grouping variable } \item{group_values}{ Optional vector of grouping values. This can be omitted and grouping values will be determined automatically. } \item{object}{Object of class \code{BIFIE.ecdf}} \item{digits}{Number of digits for rounding output} \item{\dots}{Further arguments to be passed} } %\details{ %% ~~ If necessary, more details than the description above ~~ %} \value{ A list with following entries \item{ecdf}{Data frame with probabilities and the empirical distribution function (See Examples). } \item{stat}{Data frame with empirical distribution function stacked with respect to variables, groups and group values} \item{output}{More extensive output} \item{\dots}{More values} } %\references{ %% ~put references to the literature/web site here ~ %} %\author{ %Alexander Robitzsch <a.robitzsch@bifie.at> %} %\note{ %% ~~further notes~~ %} %% ~Make other sections like Warning with \section{Warning }{....} ~ \seealso{ \code{\link[Hmisc:wtd.stats]{Hmisc::wtd.ecdf}}, \code{\link[Hmisc:wtd.stats]{Hmisc::wtd.quantile}} } \examples{ ############################################################################# # EXAMPLE 1: Imputed TIMSS dataset ############################################################################# data(data.timss1) data(data.timssrep) # create BIFIE.dat object bifieobj <- BIFIEsurvey::BIFIE.data( data.list=data.timss1, wgt=data.timss1[[1]]$TOTWGT, wgtrep=data.timssrep[, -1 ] ) # ecdf vars <- c( "ASMMAT", "books") group <- "female" ; group_values <- 0:1 # quantile type 1 res1 <- BIFIEsurvey::BIFIE.ecdf( bifieobj, vars=vars, group=group ) summary(res1) res2 <- BIFIEsurvey::BIFIE.ecdf( bifieobj, vars=vars, group=group, quanttype=2) # plot distribution function ecdf1 <- res1$ecdf plot( ecdf1$ASMMAT_female0, ecdf1$yval, type="l") plot( res2$ecdf$ASMMAT_female0, ecdf1$yval, type="l", lty=2) plot( ecdf1$books_female0, ecdf1$yval, type="l", col="blue") } % Add one or more standard keywords, see file 'KEYWORDS' in the % R documentation directory. %% \keyword{Empirical distribution function} %% \keyword{Quantiles} %% \keyword{summary} %\keyword{plot}
\docType{data} \name{rat} \alias{rat} \title{Weight gains of rats fed different diets} \format{A data frame with 60 observations on the following 3 variables, no NAs. \describe{ \item{Weight.Gain}{Weight gain (grams) of rats fed the diets.} \item{Diet.Amount}{Amount of protein in diet: 1 = High, 2 = Low.} \item{Diet.Type}{Type of protein in diet: 1 = Beef, 2 = Pork, 3 = Cereal.} }} \source{ Fundamentals of Exploratory Analysis of Variance, Hoaglin D., Mosteller F. and Tukey J. eds., Wiley, 1991, p. 100; originally from Statistical Methods, 7th ed, Snedecor G. and Cochran W. (1980), Iowa State Press. } \description{ 60 rats were fed varying diets to see which produced the greatest weight gain. Two diet factors were protein type: beef, pork, chicken and protein level: high and low. } \keyword{datasets}
/man/rat.Rd
no_license
MichaelMBishop/granovaGG
R
false
false
839
rd
\docType{data} \name{rat} \alias{rat} \title{Weight gains of rats fed different diets} \format{A data frame with 60 observations on the following 3 variables, no NAs. \describe{ \item{Weight.Gain}{Weight gain (grams) of rats fed the diets.} \item{Diet.Amount}{Amount of protein in diet: 1 = High, 2 = Low.} \item{Diet.Type}{Type of protein in diet: 1 = Beef, 2 = Pork, 3 = Cereal.} }} \source{ Fundamentals of Exploratory Analysis of Variance, Hoaglin D., Mosteller F. and Tukey J. eds., Wiley, 1991, p. 100; originally from Statistical Methods, 7th ed, Snedecor G. and Cochran W. (1980), Iowa State Press. } \description{ 60 rats were fed varying diets to see which produced the greatest weight gain. Two diet factors were protein type: beef, pork, chicken and protein level: high and low. } \keyword{datasets}
rm(list=ls()) ## loading libraries library(caret) library(dummies) library(plyr) ## loading data (edit the paths) setwd("C:/Users/Abhilash/Desktop/Main project/Restaurant prediction kaggle") #loading the data train<-read.csv("train.csv",header = T,sep=',',stringsAsFactors=F) test<-read.csv("test.csv",header=T,sep=',',stringsAsFactors=F) which.max(train$revenue) which.min(train$revenue) #Treating outliers train[17,43] <- 0.5 * (train[17,43]) train[22,43] <- 1.5 * (train[17,43]) ## cleaning data ## binding the train and test data Data <- rbind(train[,-ncol(train)], test) # creating feature variables ## extracting years, month, days from open date Data$year <- substr(as.character(Data$Open.Date),7,10) Data$month <- substr(as.character(Data$Open.Date),1,2) Data$day <- substr(as.character(Data$Open.Date),4,5) ##converting opendate in to date type Data$Date <- as.Date(strptime(Data$Open.Date, "%m/%d/%Y")) ## calculating no of days since the opening of restaurant Data$days <- as.numeric(as.Date("2014-02-02")-Data$Date) ##factorizing city type attribute Data$City.Group <- as.factor(Data$City.Group) ## Data$Type[Data$Type == "DT"] <- "IL" Data$Type[Data$Type == "MB"] <- "FC" ## factorizing type attribute Data$Type <- as.factor(Data$Type) ## removing OPen date , date and city Data <- subset(Data, select = -c(Open.Date, Date, City)) ## subsetting train data set while dropping id, month ,day and days and test dataset train_svm <- Data[1:nrow(train),-c(1,41,42,43)] test_svm <- Data[(nrow(train)+1):nrow(Data),] # converting variables to factors catg =c("P1","P5","P6","P7","P8","P9","P10","P11", "P12","P14", "P15", "P16", "P17", "P18", "P19", "P20", "P21", "P22", "P23", "P24", "P25", "P30", "P31", "P32", "P33", "P34", "P35", "P36", "P37") train_svm[,catg] <- data.frame(apply(train_svm[,catg], 2, factor)) test_svm[,catg] <- data.frame(apply(test_svm[,catg], 2, factor)) # converting some categorical variables into dummies Data <- dummy.data.frame(Data, names=c("P1","P5","P6","P7","P8","P9","P10","P11", "P12","P14", "P15", "P16", "P17", "P18", "P19", "P20", "P21", "P22", "P23", "P24", "P25", "P30", "P31", "P32", "P33", "P34", "P35", "P36", "P37"), all=T) #creating dummies to all categorical variables ldf <- lapply(1:ncol(Data), function(k) { return(data.frame("column" = colnames(Data)[k], "unique" = length(unique(Data[1:nrow(train),k])))) }) ldf <- ldply(ldf, data.frame) # removing variables with unique values Data <- Data[,!names(Data) %in% ldf$column[ldf$unique == 1]] # removing highly correlated variables for (i in (3:ncol(Data))) { Data[,i] <- as.numeric(Data[,i]) } cor <- cor(Data[1:nrow(train), 3:ncol(Data)]) high_cor <- findCorrelation(cor, cutoff = 0.99) high_cor <- high_cor[high_cor != 186] Data <- Data[,-c(high_cor+1)] # splitting into train and test X_train <- Data[1:nrow(train),-1] X_test <- Data[(nrow(train)+1):nrow(Data),] # building model on log of revenue result <- log(train$revenue) train_svm <- train[,-43] library(clusterSim) ##normalizing the Train and Test dataset train_svm <- data.Normalization (train_svm,type="n5",normalization="column") test_svm <- data.Normalization (test_svm,type="n5",normalization="column") summary(train_svm) str(train_svm) #-------------- model Building ------------------- ####### Random Forest##################### ## loading libraries suppressWarnings(library(pROC)) library(randomForest) RandomForestRegression_CV <- function(X_train,y,X_test=data.frame(),cv=5,ntree=50,nodesize=5,seed=123,metric="mae") { score <- function(a,b,metric) { switch(metric, mae = sum(abs(a-b))/length(a), rmse = sqrt(sum((a-b)^2)/length(a))) } cat("Preparing Data\n") X_train$order <- seq(1, nrow(X_train)) X_train$result <- as.numeric(y) set.seed(seed) X_train$randomCV <- floor(runif(nrow(X_train), 1, (cv+1))) cat(cv, "-fold Cross Validation\n", sep = "") for (i in 1:cv) { X_build <- subset(X_train, randomCV != i, select = -c(order, randomCV)) X_val <- subset(X_train, randomCV == i) model_rf <- randomForest(result ~., data = X_build, ntree = ntree, nodesize = nodesize) pred_rf <- predict(model_rf, X_val) X_val <- cbind(X_val, pred_rf) if (nrow(X_test) > 0) { pred_rf <- predict(model_rf, X_test) } cat("CV Fold-", i, " ", metric, ": ", score(X_val$result, X_val$pred_rf, metric), "\n", sep = "") if (i == 1) { output <- X_val if (nrow(X_test) > 0) { X_test <- cbind(X_test, pred_rf) } } if (i > 1) { output <- rbind(output, X_val) if (nrow(X_test) > 0) { X_test$pred_rf <- (X_test$pred_rf * (i-1) + pred_rf)/i } } gc() } output <- output[order(output$order),] cat("\nRandomForest ", cv, "-Fold CV ", metric, ": ", score(output$result, output$pred_rf, metric), "\n", sep = "") output <- subset(output, select = c("order", "pred_rf")) return(list(output, X_test)) } ########### Svm ############ ## loading libraries suppressWarnings(library(pROC)) library(e1071) svmRegression_CV <- function(X_train,y,X_test=data.frame(),scale = FALSE,cv=5,seed=123,metric="rmse") { score <- function(a,b,metric) { switch(metric, mae = sum(abs(a-b))/length(a), rmse = sqrt(sum((a-b)^2)/length(a))) } cat("Preparing Data\n") X_train$order <- seq(1, nrow(X_train)) X_train$result <- as.numeric(y) set.seed(seed) X_train$randomCV <- floor(runif(nrow(X_train), 1, (cv+1))) cat(cv, "-fold Cross Validation\n", sep = "") for (i in 1:cv) { X_build <- subset(X_train, randomCV != i, select = -c(order, randomCV)) X_val <- subset(X_train, randomCV == i) model_rf <- svm(result ~., data = X_build, ntree = ntree, nodesize = nodesize) pred_rf <- predict(model_rf, X_val) X_val <- cbind(X_val, pred_rf) if (nrow(X_test) > 0) { pred_rf <- predict(model_rf, X_test) } cat("CV Fold-", i, " ", metric, ": ", score(X_val$result, X_val$pred_rf, metric), "\n", sep = "") if (i == 1) { output <- X_val if (nrow(X_test) > 0) { X_test <- cbind(X_test, pred_rf) } } if (i > 1) { output <- rbind(output, X_val) if (nrow(X_test) > 0) { X_test$pred_rf <- (X_test$pred_rf * (i-1) + pred_rf)/i } } gc() } output <- output[order(output$order),] cat("\nSVM ", cv, "-Fold CV ", metric, ": ", score(output$result, output$pred_rf, metric), "\n", sep = "") output <- subset(output, select = c("order", "pred_rf")) return(list(output, X_test)) } # 5-fold cross validation and scoring for svm model model_svm_1 <- svmRegression_CV(X_train,result,X_test,scale = F,cv=5,seed=235,metric="rmse") model_svm_2 <- svmRegression_CV(X_train,result,X_test,cv=5,scale = F,seed=357,metric="rmse") model_svm_3 <- svmRegression_CV(X_train,result,X_test,cv=5,scale = F,seed=13,metric="rmse") model_svm_4 <- svmRegression_CV(X_train,result,X_test,cv=5,scale = F,seed=753,metric="rmse") model_svm_5 <- svmRegression_CV(X_train,result,X_test,cv=5,scale = F,seed=532,metric="rmse") # 5-fold cross validation and scoring model_rf_1 <- RandomForestRegression_CV(X_train,result,X_test,cv=5,ntree=25,nodesize=5,seed=235,metric="rmse") model_rf_2 <- RandomForestRegression_CV(X_train,result,X_test,cv=5,ntree=25,nodesize=5,seed=357,metric="rmse") model_rf_3 <- RandomForestRegression_CV(X_train,result,X_test,cv=5,ntree=25,nodesize=5,seed=13,metric="rmse") model_rf_4 <- RandomForestRegression_CV(X_train,result,X_test,cv=5,ntree=25,nodesize=5,seed=753,metric="rmse") model_rf_5 <- RandomForestRegression_CV(X_train,result,X_test,cv=5,ntree=25,nodesize=5,seed=532,metric="rmse") #submission test_svm_1 <- model_svm_1[[2]] test_svm_2 <- model_svm_2[[2]] test_svm_3 <- model_svm_3[[2]] test_svm_4 <- model_svm_4[[2]] test_svm_5 <- model_svm_5[[2]] ## submission test_rf_1 <- model_rf_1[[2]] test_rf_2 <- model_rf_2[[2]] test_rf_3 <- model_rf_3[[2]] test_rf_4 <- model_rf_4[[2]] test_rf_5 <- model_rf_5[[2]] submit <- data.frame("Id" = test_rf_1$Id, "Prediction" = 0.2*exp(test_rf_1$pred_rf) + 0.2*exp(test_rf_2$pred_rf) + 0.2*exp(test_rf_3$pred_rf) + 0.2*exp(test_rf_4$pred_rf) + 0.2*exp(test_rf_5$pred_rf)) submit_svm <- data.frame("Id" = test_svm_1$Id, "Prediction" = 0.2*exp(test_svm_2$pred_rf) + 0.2*exp(test_svm_3$pred_rf) + 0.2*exp(test_svm_4$pred_rf) + 0.2*exp(test_svm_5$pred_rf) + 0.2*exp(test_svm_1$pred_rf)) submit_ensemble <- data.frame("Id" = test_rf_1$Id, "Prediction" = 0.2*exp(test_svm_2$pred_rf) + 0.8*exp(test_rf_4$pred_rf) ) str(submit) write.csv(submit, "submitRF.csv", row.names=F) write.csv(submit_ensemble, "submit_ensemble.csv", row.names=F) write.csv(submit_svm,"submit_svm.csv", row.names=F) #---------------Plots------------------------------- library(plotly) plot_ly(x =train$revenue , type = "histogram") plot_ly(x = train$revenue, opacity = 0.6, type = "histogram") %>% add_trace(x = result , opacity = 0.8, type = "histogram") %>% layout(barmode="overlay") # revenue vs open date plot_ly(Data ,x= revenue, y = days, mode = "markers", color = revenue, opacity = revenue)
/Abhilash_rangu.R
no_license
Abhilashrangu8/My-first-repository
R
false
false
9,917
r
rm(list=ls()) ## loading libraries library(caret) library(dummies) library(plyr) ## loading data (edit the paths) setwd("C:/Users/Abhilash/Desktop/Main project/Restaurant prediction kaggle") #loading the data train<-read.csv("train.csv",header = T,sep=',',stringsAsFactors=F) test<-read.csv("test.csv",header=T,sep=',',stringsAsFactors=F) which.max(train$revenue) which.min(train$revenue) #Treating outliers train[17,43] <- 0.5 * (train[17,43]) train[22,43] <- 1.5 * (train[17,43]) ## cleaning data ## binding the train and test data Data <- rbind(train[,-ncol(train)], test) # creating feature variables ## extracting years, month, days from open date Data$year <- substr(as.character(Data$Open.Date),7,10) Data$month <- substr(as.character(Data$Open.Date),1,2) Data$day <- substr(as.character(Data$Open.Date),4,5) ##converting opendate in to date type Data$Date <- as.Date(strptime(Data$Open.Date, "%m/%d/%Y")) ## calculating no of days since the opening of restaurant Data$days <- as.numeric(as.Date("2014-02-02")-Data$Date) ##factorizing city type attribute Data$City.Group <- as.factor(Data$City.Group) ## Data$Type[Data$Type == "DT"] <- "IL" Data$Type[Data$Type == "MB"] <- "FC" ## factorizing type attribute Data$Type <- as.factor(Data$Type) ## removing OPen date , date and city Data <- subset(Data, select = -c(Open.Date, Date, City)) ## subsetting train data set while dropping id, month ,day and days and test dataset train_svm <- Data[1:nrow(train),-c(1,41,42,43)] test_svm <- Data[(nrow(train)+1):nrow(Data),] # converting variables to factors catg =c("P1","P5","P6","P7","P8","P9","P10","P11", "P12","P14", "P15", "P16", "P17", "P18", "P19", "P20", "P21", "P22", "P23", "P24", "P25", "P30", "P31", "P32", "P33", "P34", "P35", "P36", "P37") train_svm[,catg] <- data.frame(apply(train_svm[,catg], 2, factor)) test_svm[,catg] <- data.frame(apply(test_svm[,catg], 2, factor)) # converting some categorical variables into dummies Data <- dummy.data.frame(Data, names=c("P1","P5","P6","P7","P8","P9","P10","P11", "P12","P14", "P15", "P16", "P17", "P18", "P19", "P20", "P21", "P22", "P23", "P24", "P25", "P30", "P31", "P32", "P33", "P34", "P35", "P36", "P37"), all=T) #creating dummies to all categorical variables ldf <- lapply(1:ncol(Data), function(k) { return(data.frame("column" = colnames(Data)[k], "unique" = length(unique(Data[1:nrow(train),k])))) }) ldf <- ldply(ldf, data.frame) # removing variables with unique values Data <- Data[,!names(Data) %in% ldf$column[ldf$unique == 1]] # removing highly correlated variables for (i in (3:ncol(Data))) { Data[,i] <- as.numeric(Data[,i]) } cor <- cor(Data[1:nrow(train), 3:ncol(Data)]) high_cor <- findCorrelation(cor, cutoff = 0.99) high_cor <- high_cor[high_cor != 186] Data <- Data[,-c(high_cor+1)] # splitting into train and test X_train <- Data[1:nrow(train),-1] X_test <- Data[(nrow(train)+1):nrow(Data),] # building model on log of revenue result <- log(train$revenue) train_svm <- train[,-43] library(clusterSim) ##normalizing the Train and Test dataset train_svm <- data.Normalization (train_svm,type="n5",normalization="column") test_svm <- data.Normalization (test_svm,type="n5",normalization="column") summary(train_svm) str(train_svm) #-------------- model Building ------------------- ####### Random Forest##################### ## loading libraries suppressWarnings(library(pROC)) library(randomForest) RandomForestRegression_CV <- function(X_train,y,X_test=data.frame(),cv=5,ntree=50,nodesize=5,seed=123,metric="mae") { score <- function(a,b,metric) { switch(metric, mae = sum(abs(a-b))/length(a), rmse = sqrt(sum((a-b)^2)/length(a))) } cat("Preparing Data\n") X_train$order <- seq(1, nrow(X_train)) X_train$result <- as.numeric(y) set.seed(seed) X_train$randomCV <- floor(runif(nrow(X_train), 1, (cv+1))) cat(cv, "-fold Cross Validation\n", sep = "") for (i in 1:cv) { X_build <- subset(X_train, randomCV != i, select = -c(order, randomCV)) X_val <- subset(X_train, randomCV == i) model_rf <- randomForest(result ~., data = X_build, ntree = ntree, nodesize = nodesize) pred_rf <- predict(model_rf, X_val) X_val <- cbind(X_val, pred_rf) if (nrow(X_test) > 0) { pred_rf <- predict(model_rf, X_test) } cat("CV Fold-", i, " ", metric, ": ", score(X_val$result, X_val$pred_rf, metric), "\n", sep = "") if (i == 1) { output <- X_val if (nrow(X_test) > 0) { X_test <- cbind(X_test, pred_rf) } } if (i > 1) { output <- rbind(output, X_val) if (nrow(X_test) > 0) { X_test$pred_rf <- (X_test$pred_rf * (i-1) + pred_rf)/i } } gc() } output <- output[order(output$order),] cat("\nRandomForest ", cv, "-Fold CV ", metric, ": ", score(output$result, output$pred_rf, metric), "\n", sep = "") output <- subset(output, select = c("order", "pred_rf")) return(list(output, X_test)) } ########### Svm ############ ## loading libraries suppressWarnings(library(pROC)) library(e1071) svmRegression_CV <- function(X_train,y,X_test=data.frame(),scale = FALSE,cv=5,seed=123,metric="rmse") { score <- function(a,b,metric) { switch(metric, mae = sum(abs(a-b))/length(a), rmse = sqrt(sum((a-b)^2)/length(a))) } cat("Preparing Data\n") X_train$order <- seq(1, nrow(X_train)) X_train$result <- as.numeric(y) set.seed(seed) X_train$randomCV <- floor(runif(nrow(X_train), 1, (cv+1))) cat(cv, "-fold Cross Validation\n", sep = "") for (i in 1:cv) { X_build <- subset(X_train, randomCV != i, select = -c(order, randomCV)) X_val <- subset(X_train, randomCV == i) model_rf <- svm(result ~., data = X_build, ntree = ntree, nodesize = nodesize) pred_rf <- predict(model_rf, X_val) X_val <- cbind(X_val, pred_rf) if (nrow(X_test) > 0) { pred_rf <- predict(model_rf, X_test) } cat("CV Fold-", i, " ", metric, ": ", score(X_val$result, X_val$pred_rf, metric), "\n", sep = "") if (i == 1) { output <- X_val if (nrow(X_test) > 0) { X_test <- cbind(X_test, pred_rf) } } if (i > 1) { output <- rbind(output, X_val) if (nrow(X_test) > 0) { X_test$pred_rf <- (X_test$pred_rf * (i-1) + pred_rf)/i } } gc() } output <- output[order(output$order),] cat("\nSVM ", cv, "-Fold CV ", metric, ": ", score(output$result, output$pred_rf, metric), "\n", sep = "") output <- subset(output, select = c("order", "pred_rf")) return(list(output, X_test)) } # 5-fold cross validation and scoring for svm model model_svm_1 <- svmRegression_CV(X_train,result,X_test,scale = F,cv=5,seed=235,metric="rmse") model_svm_2 <- svmRegression_CV(X_train,result,X_test,cv=5,scale = F,seed=357,metric="rmse") model_svm_3 <- svmRegression_CV(X_train,result,X_test,cv=5,scale = F,seed=13,metric="rmse") model_svm_4 <- svmRegression_CV(X_train,result,X_test,cv=5,scale = F,seed=753,metric="rmse") model_svm_5 <- svmRegression_CV(X_train,result,X_test,cv=5,scale = F,seed=532,metric="rmse") # 5-fold cross validation and scoring model_rf_1 <- RandomForestRegression_CV(X_train,result,X_test,cv=5,ntree=25,nodesize=5,seed=235,metric="rmse") model_rf_2 <- RandomForestRegression_CV(X_train,result,X_test,cv=5,ntree=25,nodesize=5,seed=357,metric="rmse") model_rf_3 <- RandomForestRegression_CV(X_train,result,X_test,cv=5,ntree=25,nodesize=5,seed=13,metric="rmse") model_rf_4 <- RandomForestRegression_CV(X_train,result,X_test,cv=5,ntree=25,nodesize=5,seed=753,metric="rmse") model_rf_5 <- RandomForestRegression_CV(X_train,result,X_test,cv=5,ntree=25,nodesize=5,seed=532,metric="rmse") #submission test_svm_1 <- model_svm_1[[2]] test_svm_2 <- model_svm_2[[2]] test_svm_3 <- model_svm_3[[2]] test_svm_4 <- model_svm_4[[2]] test_svm_5 <- model_svm_5[[2]] ## submission test_rf_1 <- model_rf_1[[2]] test_rf_2 <- model_rf_2[[2]] test_rf_3 <- model_rf_3[[2]] test_rf_4 <- model_rf_4[[2]] test_rf_5 <- model_rf_5[[2]] submit <- data.frame("Id" = test_rf_1$Id, "Prediction" = 0.2*exp(test_rf_1$pred_rf) + 0.2*exp(test_rf_2$pred_rf) + 0.2*exp(test_rf_3$pred_rf) + 0.2*exp(test_rf_4$pred_rf) + 0.2*exp(test_rf_5$pred_rf)) submit_svm <- data.frame("Id" = test_svm_1$Id, "Prediction" = 0.2*exp(test_svm_2$pred_rf) + 0.2*exp(test_svm_3$pred_rf) + 0.2*exp(test_svm_4$pred_rf) + 0.2*exp(test_svm_5$pred_rf) + 0.2*exp(test_svm_1$pred_rf)) submit_ensemble <- data.frame("Id" = test_rf_1$Id, "Prediction" = 0.2*exp(test_svm_2$pred_rf) + 0.8*exp(test_rf_4$pred_rf) ) str(submit) write.csv(submit, "submitRF.csv", row.names=F) write.csv(submit_ensemble, "submit_ensemble.csv", row.names=F) write.csv(submit_svm,"submit_svm.csv", row.names=F) #---------------Plots------------------------------- library(plotly) plot_ly(x =train$revenue , type = "histogram") plot_ly(x = train$revenue, opacity = 0.6, type = "histogram") %>% add_trace(x = result , opacity = 0.8, type = "histogram") %>% layout(barmode="overlay") # revenue vs open date plot_ly(Data ,x= revenue, y = days, mode = "markers", color = revenue, opacity = revenue)
#' estimateG #' #' Function to estimate propensity score #' #' @param A A vector of binary treatment assignment (assumed to be equal to 0 or #' 1) #' @param DeltaY Indicator of missing outcome (assumed to be equal to 0 if #' missing 1 if observed) #' @param DeltaA Indicator of missing treatment (assumed to be equal to 0 if #' missing 1 if observed) #' @param W A \code{data.frame} of named covariates #' @param stratify A \code{boolean} indicating whether to estimate the missing #' outcome regression separately for observations with \code{A} equal to 0/1 #' (if \code{TRUE}) or to pool across \code{A} (if \code{FALSE}). #' @param SL_g A vector of characters describing the super learner library to be #' used for each of the regression (\code{DeltaA}, \code{A}, and #' \code{DeltaY}). To use the same regression for each of the regressions (or #' if there is no missing data in \code{A} nor \code{Y}), a single library may #' be input. #' @param tolg A numeric indicating the minimum value for estimates of the #' propensity score. #' @param verbose A boolean indicating whether to print status updates. #' @param returnModels A boolean indicating whether to return model fits for the #' outcome regression, propensity score, and reduced-dimension regressions. #' @param glm_g A character describing a formula to be used in the call to #' \code{glm} for the propensity score. #' @param a_0 A vector of fixed treatment values at which to return marginal #' mean estimates. #' @param validRows A \code{list} of length \code{cvFolds} containing the row #' indexes of observations to include in validation fold. #' @param Qn A \code{list} of estimates of the outcome regression for each value #' in \code{a_0}. Only needed if \code{adapt_g = TRUE}. #' @param adapt_g A boolean indicating whether propensity score is adaptive #' to outcome regression. #' @param se_cv Should cross-validated nuisance parameter estimates be used #' for computing standard errors? #' Options are \code{"none"} = no cross-validation is performed; \code{"partial"} = #' only applicable if Super Learner is used for nuisance parameter estimates; #' \code{"full"} = full cross-validation is performed. See vignette for further #' details. Ignored if \code{cvFolds > 1}, since then #' cross-validated nuisance parameter estimates are used by default and it is #' assumed that you want full cross-validated standard errors. #' @param se_cvFolds If cross-validated nuisance parameter estimates are used #' to compute standard errors, how many folds should be used in this computation. #' If \code{se_cv = "partial"}, then this option sets the number of folds used #' by the \code{SuperLearner} fitting procedure. #' @importFrom SuperLearner SuperLearner trimLogit All #' @importFrom stats predict glm as.formula # estimateG <- function(A, W, DeltaY, DeltaA, SL_g, glm_g, a_0, tolg, stratify = FALSE, validRows = NULL, verbose = FALSE, returnModels = FALSE, Qn = NULL, adapt_g = FALSE, se_cv = "none", se_cvFolds = 10) { if (is.null(SL_g) & is.null(glm_g)) { stop("Specify Super Learner library or GLM formula for g") } if (!is.null(SL_g) & !is.null(glm_g)) { warning(paste0( "Super Learner library and GLM formula specified.", "Proceeding with Super Learner only." )) glm_g <- NULL } # subset data into training and validation sets if (length(validRows) != length(A)) { trainDeltaA <- DeltaA[-validRows] trainDeltaY <- DeltaY[-validRows] trainA <- A[-validRows] if(!adapt_g){ trainW <- W[-validRows, , drop = FALSE] validW <- W[validRows, , drop = FALSE] }else{ allW <- data.frame(Reduce(cbind, Qn)) trainW <- allW[-validRows, , drop = FALSE] validW <- allW[validRows, , drop = FALSE] colnames(trainW) <- paste0("Q", a_0, "W") colnames(validW) <- paste0("Q", a_0, "W") } validA <- A[validRows] validDeltaA <- DeltaA[validRows] validDeltaY <- DeltaY[validRows] } else { trainA <- validA <- A if(!adapt_g){ trainW <- validW <- W }else{ trainW <- validW <- data.frame(Reduce(cbind, Qn)) colnames(trainW) <- paste0("Q", a_0, "W") colnames(validW) <- paste0("Q", a_0, "W") } trainDeltaA <- validDeltaA <- DeltaA trainDeltaY <- validDeltaY <- DeltaY } partial_cv <- se_cv == "partial" if (!is.null(SL_g)) { # check for names in SL_g namedSL_g <- c("DeltaA", "A", "DeltaY") %in% names(SL_g) # if none of the above names appear, then it is assumed that you want # to use SL_g for each of DeltaA, A, and Y if (!any(namedSL_g)) { SL_g <- list(DeltaA = SL_g, A = SL_g, DeltaY = SL_g) } } else if (!is.null(glm_g)) { namedglm_g <- c("DeltaA", "A", "DeltaY") %in% names(glm_g) # if none of the above names appear, then it is assumed that you want # to use glm_g for each of DeltaA, A, and Y if (!any(namedglm_g)) { glm_g <- list(DeltaA = glm_g, A = glm_g, DeltaY = glm_g) } } # ------------------------------- # regression of DeltaA ~ W # ------------------------------- # only fit this regression if there are some missing treatment assignments if (!all(DeltaA == 1)) { # if super learner library is specified, fit a super learner if (!is.null(SL_g)) { # if the SL_g$DeltaA is of length > 1, then call SuperLearner if (length(SL_g$DeltaA) > 1 | is.list(SL_g$DeltaA)) { fm_DeltaA <- SuperLearner::SuperLearner( Y = trainDeltaA, X = trainW, newX = validW, family = stats::binomial(), SL.library = SL_g$DeltaA, verbose = verbose, method = tmp_method.CC_nloglik(), cvControl = list(ifelse(partial_cv, se_cvFolds, 10)) ) # get predicted probability of missing treatment gn_DeltaA <- fm_DeltaA$SL.predict # get partially cross-validated estimates if(partial_cv){ gn_DeltaA_se <- partial_cv_preds(a_0 = NULL, fit_sl = fm_DeltaA, easy = TRUE) } } else if (!is.list(SL_g$DeltaA) & length(SL_g$DeltaA) == 1) { fm_DeltaA <- do.call(SL_g$DeltaA, args = list( Y = trainDeltaA, X = trainW, newX = validW, obsWeights = rep(1, length(trainA)), family = stats::binomial() )) gn_DeltaA <- fm_DeltaA$pred } } # end if SuperLearner loop if (!is.null(glm_g)) { thisDat <- data.frame(DeltaA = trainDeltaA, trainW) fm_DeltaA <- stats::glm(stats::as.formula(paste0( "DeltaA~", glm_g$DeltaA )), data = thisDat, family = stats::binomial()) gn_DeltaA <- stats::predict( fm_DeltaA, type = "response", newdata = data.frame(DeltaA = validDeltaA, validW) ) } # name for returned models name_DeltaA <- "DeltaA ~ W" } else { # if all DeltaA==1 then put NULL model and 1 predictions fm_DeltaA <- NULL name_DeltaA <- "" gn_DeltaA <- gn_DeltaA_se <- rep(1, length(validDeltaA)) } # ----------------------------------- # fitting A ~ W | DeltaA = 1 # ----------------------------------- # if a super learner library is specified, fit the super learner if (!is.null(SL_g)) { # if the library is of length > 1, then call SuperLearner if (length(SL_g$A) > 1 | is.list(SL_g$A)) { # if there are only two unique values of A, then only need one fit if (length(a_0) == length(unique(A)) & length(unique(A[!is.na(A)])) == 2) { fm_A <- list(SuperLearner::SuperLearner( Y = as.numeric(trainA[trainDeltaA == 1] == a_0[1]), X = trainW[trainDeltaA == 1, , drop = FALSE], newX = validW, family = stats::binomial(), SL.library = SL_g$A, verbose = verbose, method = tmp_method.CC_nloglik(), cvControl = list(ifelse(partial_cv, se_cvFolds, 10)), control = list(saveCVFitLibrary = partial_cv & !all(trainDeltaA == 1)) )) gn_A <- vector(mode = "list", length = 2) gn_A[[1]] <- fm_A[[1]]$SL.predict gn_A[[2]] <- 1 - gn_A[[1]] if(partial_cv){ gn_A_se <- vector(mode = "list", length = 2) gn_A_se[[1]] <- partial_cv_preds(fit_sl = fm_A[[1]], a_0 = NULL, W = validW, include = trainDeltaA == 1, easy = all(trainDeltaA == 1)) gn_A_se[[2]] <- 1 - gn_A_se[[1]] } # name for this model name_A <- paste0("I(A = ", a_0[1], ") ~ W | DeltaA == 1") # if there are more than two unique values of A, then we need # more than one call to super learner } else { a_ct <- 0 gn_A <- vector(mode = "list", length = length(a_0)) gn_A_se <- vector(mode = "list", length = length(a_0)) fm_A <- vector(mode = "list", length = length(a_0) - 1) name_A <- rep(NA, length(a_0) - 1) for (a in a_0[1:(length(a_0) - 1)]) { # determine who to include in the regression for this outcome if (a_ct == 0) { include <- rep(TRUE, length(trainA)) } else { include <- !(trainA %in% a_0[1:a_ct]) } # now exclude people with DeltaA = 0 include[trainDeltaA == 0] <- FALSE # fit super learner tmp_fm <- SuperLearner::SuperLearner( Y = as.numeric(trainA[include] == a), X = trainW[include, , drop = FALSE], newX = validW, family = stats::binomial(), SL.library = SL_g$A, verbose = verbose, method = tmp_method.CC_nloglik(), cvControl = list(ifelse(partial_cv, se_cvFolds, 10)), control = list(saveCVFitLibrary = partial_cv & !all(include)) ) # get predictions tmp_pred <- tmp_fm$SL.pred if(partial_cv){ tmp_pred_se <- partial_cv_preds(fit_sl = tmp_fm, a_0 = NULL, W = validW, include = include, easy = all(include)) } if (a_ct != 0) { # if not the first level of treatment gn_A[[a_ct + 1]] <- tmp_pred * Reduce( "*", lapply(gn_A[1:a_ct], function(x) { 1 - x }) ) if(partial_cv){ gn_A_se[[a_ct + 1]] <- tmp_pred_se * Reduce( "*", lapply(gn_A_se[1:a_ct], function(x) { 1 - x }) ) } } else { # if the first level of treatment gn_A[[a_ct + 1]] <- tmp_pred if(partial_cv){ gn_A_se[[a_ct + 1]] <- tmp_pred_se } } fm_A[[a_ct + 1]] <- tmp_fm name_A[a_ct + 1] <- paste0("I(A = ", a, ") ~ W | DeltaA == 1") a_ct <- a_ct + 1 } # add in final predictions gn_A[[a_ct + 1]] <- 1 - Reduce("+", gn_A[1:a_ct]) if(partial_cv){ gn_A_se[[a_ct + 1]] <- 1 - Reduce("+", gn_A_se[1:a_ct]) } } } else if (!is.list(SL_g$A) & length(SL_g$A) == 1) { if (length(a_0) == length(unique(A[!is.na(A)])) & length(unique(A[!is.na(A)])) == 2) { gn_A <- vector(mode = "list", length = 2) fm_A <- list(do.call(SL_g$A, args = list( Y = as.numeric( trainA[trainDeltaA == 1] == a_0[1] ), X = trainW[trainDeltaA == 1, , drop = FALSE], newX = validW, obsWeights = rep(1, length(trainA[trainDeltaA == 1])), family = stats::binomial() ))) gn_A[[1]] <- fm_A[[1]]$pred gn_A[[2]] <- 1 - fm_A[[1]]$pred name_A <- paste0("I(A = ", a_0[1], ") ~ W | DeltaA == 1") } else { a_ct <- 0 gn_A <- vector(mode = "list", length = length(a_0)) fm_A <- vector(mode = "list", length = length(a_0) - 1) name_A <- rep(NA, length(a_0) - 1) for (a in a_0[1:(length(a_0) - 1)]) { # determine who to include in the regression for this outcome if (a_ct == 0) { include <- rep(TRUE, length(trainA)) } else { include <- !(trainA %in% a_0[1:a_ct]) } # set missing treatment people to FALSE include[trainDeltaA == 0] <- FALSE # fit super learner tmp_fm <- do.call(SL_g$A, args = list( Y = as.numeric( trainA[include] == a ), X = trainW[include, , drop = FALSE], newX = validW, obsWeights = rep(1, length(trainA[include])), family = stats::binomial() )) # get predictions tmp_pred <- tmp_fm$pred if (a_ct != 0) { gn_A[[a_ct + 1]] <- tmp_pred * Reduce( "*", lapply(gn_A[1:a_ct], function(x) { 1 - x }) ) } else { gn_A[[a_ct + 1]] <- tmp_pred } fm_A[[a_ct + 1]] <- tmp_fm name_A[a_ct + 1] <- paste0("I(A = ", a, ") ~ W | DeltaA == 1") a_ct <- a_ct + 1 } # add in final predictions gn_A[[a_ct + 1]] <- 1 - Reduce("+", gn_A[1:a_ct]) } } } # ---------------------------------------------------------------------- # GLM # ---------------------------------------------------------------------- if (!is.null(glm_g)) { if (length(a_0) == length(unique(A)) & length(unique(A[!is.na(A)])) == 2) { thisDat <- data.frame(A = as.numeric(trainA[trainDeltaA == 1] == a_0[1]), trainW[trainDeltaA == 1, , drop = FALSE]) fm_A <- list(stats::glm( stats::as.formula(paste0("A~", glm_g$A)), data = thisDat, family = stats::binomial() )) gn_A <- vector(mode = "list", length = 2) name_A <- paste0("I(A = ", a_0[1], ") ~ W | DeltaA == 1") gn_A[[1]] <- stats::predict(fm_A[[1]], newdata = data.frame( A = validA, validW ), type = "response") gn_A[[2]] <- 1 - gn_A[[1]] } else { a_ct <- 0 gn_A <- vector(mode = "list", length = length(a_0)) fm_A <- vector(mode = "list", length = length(a_0) - 1) name_A <- rep(NA, length(a_0) - 1) for (a in a_0[1:(length(a_0) - 1)]) { # determine who to include in the regression for this outcome if (a_ct == 0) { include <- rep(TRUE, length(A)) } else { include <- !(A %in% a_0[1:a_ct]) } # don't include folks with missing treatment include[trainDeltaA == 0] <- FALSE # fit super learner thisDat <- data.frame( as.numeric(trainA[include] == a), trainW[include, , drop = FALSE] ) colnames(thisDat) <- c("A", colnames(W)) tmp_fm <- stats::glm( stats::as.formula(paste0("A~", glm_g)), data = thisDat, family = stats::binomial() ) tmp_pred <- stats::predict(tmp_fm, newdata = data.frame( A = validA, validW ), type = "response") # get predictions if (a_ct != 0) { gn_A[[a_ct + 1]] <- tmp_pred * Reduce( "*", lapply(gn_A[1:a_ct], function(x) { 1 - x }) ) } else { gn_A[[a_ct + 1]] <- tmp_pred } fm_A[[a_ct + 1]] <- tmp_fm name_A[a_ct + 1] <- paste0("I(A = ", a, ") ~ W | DeltaA == 1") a_ct <- a_ct + 1 } # end for loop over treatment levels # add in final predictions gn_A[[a_ct + 1]] <- 1 - Reduce("+", gn_A[1:a_ct]) } # end multi-level treatment if } # end glm_g if # ------------------------------------- # fit DeltaY ~ W + A | DeltaA = 1 # ------------------------------------- # only fit this regression if there are some missing outcomes if (!all(DeltaY == 1)) { # only include people with DeltaA == 1 include <- (trainDeltaA == 1) # if super learner library is specified, fit a super learner if (!is.null(SL_g)) { # if the SL_g$DeltaY is of length > 1, then call SuperLearner if (length(SL_g$DeltaY) > 1 | is.list(SL_g$DeltaY)) { # if no stratify, then fit DeltaY ~ W | DeltaA = 1 in each # level of A if (stratify) { fm_DeltaY <- vector(mode = "list", length = length(a_0)) gn_DeltaY <- vector(mode = "list", length = length(a_0)) gn_DeltaY_se <- vector(mode = "list", length = length(a_0)) name_DeltaY <- rep(NA, length(a_0)) a_ct <- 0 for (a in a_0) { a_ct <- a_ct + 1 # only include people with A == a and DeltaA == 1 include2 <- (trainA == a) include2[is.na(include2)] <- FALSE # fit super learner fm_DeltaY[[a_ct]] <- SuperLearner::SuperLearner( Y = trainDeltaY[include & include2], X = trainW[include & include2, , drop = FALSE], newX = validW, family = stats::binomial(), SL.library = SL_g$DeltaY, verbose = verbose, method = tmp_method.CC_nloglik(), cvControl = list(ifelse(partial_cv, se_cvFolds, 10)), control = list(saveCVFitLibrary = partial_cv & !all(include & include2)) ) # name the fit name_DeltaY[a_ct] <- paste0( "DeltaY ~ W | DeltaA == 1", " & A == ", a ) # get predictions back on everybody gn_DeltaY[[a_ct]] <- fm_DeltaY[[a_ct]]$SL.predict if(partial_cv){ gn_DeltaY_se[[a_ct]] <- partial_cv_preds(fit_sl = fm_DeltaY[[a_ct]], a_0 = NULL, include = include & include2, W = validW, easy = all(include & include2)) } } # end loop over treatment levels # if not stratified, fit a single regression pooling over # levels of A } else { # fit super learner fm_DeltaY <- list(SuperLearner::SuperLearner( Y = trainDeltaY[include], X = data.frame( A = trainA[include], trainW[include, , drop = FALSE] ), family = stats::binomial(), SL.library = SL_g$DeltaY, verbose = verbose, method = tmp_method.CC_nloglik(), cvControl = list(ifelse(partial_cv, se_cvFolds, 10)), control = list(saveCVFitLibrary = partial_cv & !all(include)) )) # get predictions back setting A = a for every a in a_0 gn_DeltaY <- vector(mode = "list", length = length(a_0)) gn_DeltaY_se <- vector(mode = "list", length = length(a_0)) name_DeltaY <- paste0("DeltaY ~ W + A | DeltaA == 1") a_ct <- 0 for (a in a_0) { a_ct <- a_ct + 1 gn_DeltaY[[a_ct]] <- stats::predict( fm_DeltaY[[1]], onlySL = TRUE, newdata = data.frame(A = a, validW) )$pred if(partial_cv){ gn_DeltaY_se[[a_ct]] <- partial_cv_preds(fit_sl = fm_DeltaY[[1]], a_0 = a, W = validW, include = include, easy = all(include)) } } # end loop over a_0 levels } # end if !stratify # if SL_g$DeltaY only a single algorithm, then call directly } else if (!is.list(SL_g$DeltaY) & length(SL_g$DeltaY) == 1) { # if no stratify, then fit DeltaY ~ W | DeltaA = 1 in # each level of A if (stratify) { fm_DeltaY <- vector(mode = "list", length = length(a_0)) gn_DeltaY <- vector(mode = "list", length = length(a_0)) name_DeltaY <- rep(NA, length(a_0)) a_ct <- 0 for (a in a_0) { a_ct <- a_ct + 1 # only include people with A == a include2 <- (trainA == a) include2[is.na(include2)] <- FALSE # make call to algorithm fm_DeltaY[[a_ct]] <- do.call(SL_g$DeltaY, args = list( Y = trainDeltaY[include & include2], X = trainW[include & include2, , drop = FALSE], newX = validW, obsWeights = rep( 1, length(trainA[include & include2]) ), family = stats::binomial() )) name_DeltaY[a_ct] <- paste0( "DeltaY ~ W | DeltaA == 1", " & A == ", a ) # get predictions gn_DeltaY[[a_ct]] <- fm_DeltaY[[a_ct]]$pred } # end loop over a_0 } else { # end if stratify call algorithm to fit pooled estimate fm_DeltaY <- list(do.call(SL_g$DeltaY, args = list( Y = trainDeltaY[include], X = data.frame( A = trainA[include], trainW[include, , drop = FALSE] ), newX = data.frame( A = validA, validW ), obsWeights = rep( 1, length(trainA[include]) ), family = stats::binomial() ))) name_DeltaY <- paste0("DeltaY ~ W + A | DeltaA == 1") # loop to get predictions setting A = a gn_DeltaY <- vector(mode = "list", length = length(a_0)) a_ct <- 0 for (a in a_0) { a_ct <- a_ct + 1 gn_DeltaY[[a_ct]] <- stats::predict( fm_DeltaY[[1]]$fit, newdata = data.frame(A = a, validW) ) } } # end !stratify } # end if one algorithm loop } # end SL_g not null if if (!is.null(glm_g)) { if (stratify) { fm_DeltaY <- vector(mode = "list", length = length(a_0)) gn_DeltaY <- vector(mode = "list", length = length(a_0)) name_DeltaY <- rep(NA, length = length(a_0)) a_ct <- 0 for (a in a_0) { a_ct <- a_ct + 1 # only include people with A == a and DeltaA == 1 include2 <- (trainA == a) include2[is.na(include2)] <- FALSE fm_DeltaY[[a_ct]] <- stats::glm(stats::as.formula( paste0( "trainDeltaY[include & include2]~", glm_g$DeltaY ) ), data = data.frame(trainW[include & include2, , drop = FALSE]), family = stats::binomial()) name_DeltaY[a_ct] <- paste0( "DeltaY ~ W | DeltaA == 1 ", "& A == ", a ) # get predictions back for everyone gn_DeltaY[[a_ct]] <- stats::predict( fm_DeltaY[[a_ct]], newdata = validW, type = "response" ) } # end loop over treatments } else { # end stratified glm fit glm in everyone with DeltaA == 1 fm_DeltaY <- list(stats::glm( stats::as.formula(paste0( "trainDeltaY[include]~", glm_g$DeltaY )), data = data.frame(A = trainA[include], trainW[ include, , drop = FALSE ]), family = stats::binomial() )) name_DeltaY <- paste0("DeltaY ~ W + A | DeltaA == 1") # get predictions back setting A = a gn_DeltaY <- vector(mode = "list", length = length(a_0)) a_ct <- 0 for (a in a_0) { a_ct <- a_ct + 1 gn_DeltaY[[a_ct]] <- stats::predict( fm_DeltaY[[1]], newdata = data.frame(A = a, validW), type = "response" ) } # end loop over treatments } # end !stratified glm } # end glm if } else { # if all DeltaY==1 then NULL model and 1 pred. fm_DeltaY <- NULL name_DeltaY <- "" gn_DeltaY <- vector(mode = "list", length = length(a_0)) for (i in 1:length(a_0)) { gn_DeltaY[[i]] <- rep(1, length(validDeltaY)) } gn_DeltaY_se <- gn_DeltaY } # ------------------------------------------------------ # combine estimates into a single propensity score # ------------------------------------------------------ gn <- mapply(gn_A = gn_A, gn_DeltaY = gn_DeltaY, FUN = function(gn_A, gn_DeltaY) { gn_A * gn_DeltaY * gn_DeltaA }, SIMPLIFY = FALSE) # truncate too-small predictions gn <- lapply(gn, function(g) { g[g < tolg] <- tolg g }) if(partial_cv){ gn_se <- mapply(gn_A = gn_A_se, gn_DeltaY = gn_DeltaY_se, FUN = function(gn_A, gn_DeltaY) { gn_A * gn_DeltaY * gn_DeltaA_se }, SIMPLIFY = FALSE) # truncate too-small predictions gn_se <- lapply(gn_se, function(g) { g[g < tolg] <- tolg g }) }else{ gn_se <- NULL } out <- list(est = gn, fm = NULL, est_se = gn_se) if (returnModels) { names(fm_A) <- name_A if (!is.null(fm_DeltaA)) { names(fm_DeltaA) <- name_DeltaA } if (!is.null(fm_DeltaY)) { names(fm_DeltaY) <- name_DeltaY } out$fm <- list(DeltaA = fm_DeltaA, A = fm_A, DeltaY = fm_DeltaY) } return(out) } #' estimateG_loop #' #' Helper function to clean up internals of \code{drtmle} function #' @param A A vector of binary treatment assignment (assumed to be equal to 0 or #' 1) #' @param DeltaY Indicator of missing outcome (assumed to be equal to 0 if #' missing 1 if observed) #' @param DeltaA Indicator of missing treatment (assumed to be equal to 0 if #' missing 1 if observed) #' @param W A \code{data.frame} of named covariates #' @param stratify A \code{boolean} indicating whether to estimate the missing #' outcome regression separately for observations with \code{A} equal to 0/1 #' (if \code{TRUE}) or to pool across \code{A} (if \code{FALSE}). #' @param SL_g A vector of characters describing the super learner library to be #' used for each of the regression (\code{DeltaA}, \code{A}, and #' \code{DeltaY}). To use the same regression for each of the regressions (or #' if there is no missing data in \code{A} nor \code{Y}), a single library may #' be input. #' @param tolg A numeric indicating the minimum value for estimates of the #' propensity score. #' @param verbose A boolean indicating whether to print status updates. #' @param returnModels A boolean indicating whether to return model fits for the #' outcome regression, propensity score, and reduced-dimension regressions. #' @param glm_g A character describing a formula to be used in the call to #' \code{glm} for the propensity score. #' @param a_0 A vector of fixed treatment values at which to return marginal #' mean estimates. #' @param validRows A \code{list} of length \code{cvFolds} containing the row #' indexes of observations to include in validation fold. #' @param Qn A \code{list} of estimates of the outcome regression for each value #' in \code{a_0}. Only needed if \code{adapt_g = TRUE}. #' @param adapt_g A boolean indicating whether propensity score is adaptive #' to outcome regression. #' @param use_future Should \code{future} be used for parallelization? #' @param se_cv Should cross-validated nuisance parameter estimates be used #' for computing standard errors? #' Options are \code{"none"} = no cross-validation is performed; \code{"partial"} = #' only applicable if Super Learner is used for nuisance parameter estimates; #' \code{"full"} = full cross-validation is performed. See vignette for further #' details. Ignored if \code{cvFolds > 1}, since then #' cross-validated nuisance parameter estimates are used by default and it is #' assumed that you want full cross-validated standard errors. #' @param se_cvFolds If cross-validated nuisance parameter estimates are used #' to compute standard errors, how many folds should be used in this computation. #' If \code{se_cv = "partial"}, then this option sets the number of folds used #' by the \code{SuperLearner} fitting procedure. estimateG_loop <- function( validRows, A, W, DeltaA, DeltaY, tolg, verbose, stratify, returnModels, SL_g, glm_g, a_0, Qn, adapt_g, use_future, se_cv = "none", se_cvFolds = 10 ){ if (use_future) { gnOut <- future.apply::future_lapply( X = validRows, FUN = estimateG, A = A, W = W, DeltaA = DeltaA, DeltaY = DeltaY, tolg = tolg, verbose = verbose, stratify = stratify, returnModels = returnModels, SL_g = SL_g, glm_g = glm_g, a_0 = a_0, Qn = Qn, adapt_g = adapt_g, se_cv = se_cv, se_cvFolds = se_cvFolds ) } else { gnOut <- lapply( X = validRows, FUN = estimateG, A = A, W = W, DeltaA = DeltaA, DeltaY = DeltaY, tolg = tolg, verbose = verbose, stratify = stratify, returnModels = returnModels, SL_g = SL_g, glm_g = glm_g, a_0 = a_0, Qn = Qn, adapt_g = adapt_g, se_cv = se_cv, se_cvFolds = se_cvFolds ) } return(gnOut) } #' estimateQ #' #' Function to estimate initial outcome regression #' #' @param Y A vector of continuous or binary outcomes. #' @param A A vector of binary treatment assignment (assumed to be equal to 0 or #' 1). #' @param W A \code{data.frame} of named covariates. #' @param DeltaY Indicator of missing outcome (assumed to be equal to 0 if #' missing 1 if observed). #' @param DeltaA Indicator of missing treatment (assumed to be equal to 0 if #' missing 1 if observed). #' @param SL_Q A vector of characters or a list describing the Super Learner #' library to be used for the outcome regression. #' @param verbose A boolean indicating whether to print status updates. #' @param returnModels A boolean indicating whether to return model fits for the #' outcome regression, propensity score, and reduced-dimension regressions. #' @param glm_Q A character describing a formula to be used in the call to #' \code{glm} for the outcome regression. #' @param a_0 A list of fixed treatment values #' @param family A character passed to \code{SuperLearner} #' @param stratify A \code{boolean} indicating whether to estimate the outcome #' regression separately for observations with \code{A} equal to 0/1 (if #' \code{TRUE}) or to pool across \code{A} (if \code{FALSE}). #' @param validRows A \code{list} of length \code{cvFolds} containing the row #' indexes of observations to include in validation fold. #' @param se_cv Should cross-validated nuisance parameter estimates be used #' for computing standard errors? #' Options are \code{"none"} = no cross-validation is performed; \code{"partial"} = #' only applicable if Super Learner is used for nuisance parameter estimates; #' \code{"full"} = full cross-validation is performed. See vignette for further #' details. Ignored if \code{cvFolds > 1}, since then #' cross-validated nuisance parameter estimates are used by default and it is #' assumed that you want full cross-validated standard errors. #' @param se_cvFolds If cross-validated nuisance parameter estimates are used #' to compute standard errors, how many folds should be used in this computation. #' If \code{se_cv = "partial"}, then this option sets the number of folds used #' by the \code{SuperLearner} fitting procedure. #' @param ... Additional arguments (not currently used) #' #' @importFrom SuperLearner SuperLearner trimLogit #' @importFrom stats predict glm as.formula # estimateQ <- function(Y, A, W, DeltaA, DeltaY, SL_Q, glm_Q, a_0, stratify, family, verbose = FALSE, returnModels = FALSE, se_cv = "none", se_cvFolds = 10, validRows = NULL, ...) { if (is.null(SL_Q) & is.null(glm_Q)) { stop("Specify Super Learner library or GLM formula for Q") } if (!is.null(SL_Q) & !is.null(glm_Q)) { warning(paste0( "Super Learner library and GLM formula specified.", " Proceeding with Super Learner only." )) glm_Q <- NULL } # subset data into training and validation sets if (length(validRows) != length(Y)) { trainY <- Y[-validRows] trainA <- A[-validRows] trainW <- W[-validRows, , drop = FALSE] trainDeltaA <- DeltaA[-validRows] trainDeltaY <- DeltaY[-validRows] validW <- W[validRows, , drop = FALSE] validA <- A[validRows] validY <- Y[validRows] validDeltaY <- DeltaY[validRows] validDeltaA <- DeltaA[validRows] } else { trainA <- validA <- A trainW <- validW <- W trainY <- validY <- Y trainDeltaA <- validDeltaA <- DeltaA trainDeltaY <- validDeltaY <- DeltaY } # include only DeltaA = 1 and DeltaY = 1 folks include <- (trainDeltaA == 1) & (trainDeltaY == 1) # check for partially cross-validated standard error request partial_cv <- se_cv == "partial" Qn_se <- NULL # Super Learner if (!is.null(SL_Q)) { if (!stratify) { if (length(SL_Q) > 1 | is.list(SL_Q)) { fm <- SuperLearner::SuperLearner( Y = trainY[include], X = data.frame(A = trainA, trainW)[include, , drop = FALSE], verbose = verbose, family = family, SL.library = SL_Q, method = if (family$family == "binomial") { tmp_method.CC_nloglik() } else { tmp_method.CC_LS() }, control = list(saveCVFitLibrary = partial_cv), cvControl = list(ifelse(partial_cv, se_cvFolds, 10)) ) Qn <- sapply(a_0, function(x) { stats::predict( fm, newdata = data.frame(A = x, validW), onlySL = TRUE )[[1]] }, simplify = FALSE) if(partial_cv){ Qn_se <- sapply(X = a_0, FUN = partial_cv_preds, W = validW, fit_sl = fm, include = include, simplify = FALSE) } } else if (length(SL_Q) == 1) { fm <- do.call(SL_Q, args = list( Y = trainY[include], X = data.frame(A = trainA, trainW)[include, , drop = FALSE], verbose = verbose, newX = data.frame(A = validA, validW), obsWeights = rep(1, length(trainA[include])), family = family )) Qn <- sapply(a_0, function(x) { stats::predict(object = fm$fit, newdata = data.frame( A = x, validW )) }, simplify = FALSE) } } else { # if stratify is TRUE if (length(SL_Q) > 1 | is.list(SL_Q)) { tmp <- sapply(a_0, function(x) { include2 <- trainA == x # handle NAs properly include2[is.na(include2)] <- FALSE fm <- SuperLearner::SuperLearner( Y = trainY[include2 & include], X = trainW[include2 & include, , drop = FALSE], newX = validW, verbose = verbose, family = family, SL.library = SL_Q, method = if (family$family == "binomial") { tmp_method.CC_nloglik() } else { tmp_method.CC_LS() }, control = list(saveCVFitLibrary = partial_cv), cvControl = list(ifelse(partial_cv, se_cvFolds, 10)) ) ##### THE LOGIC HERE IS FLAWED ##### # only a subset are used in fitting SL with A = a # so then, for people without A = a, which value do we fill in? # I guess just the regular super learner prediction? Since a # person without A = a was not used in fitting this model, it's # sort of cross-validated anyway. if(partial_cv){ Qn_se_a_0 <- partial_cv_preds(fit_sl = fm, a_0 = x, W = validW, include = include & include2) }else{ Qn_se_a_0 <- NULL } return(list(est = fm$SL.predict, fm = fm, est_se = Qn_se_a_0)) }, simplify = FALSE) Qn <- lapply(tmp, "[[", "est") fm <- lapply(tmp, "[[", "fm") Qn_se <- lapply(tmp, "[[", "est_se") } else if (length(SL_Q) == 1) { tmp <- sapply(a_0, function(x) { include2 <- trainA == x # handle NAs properly include2[is.na(include2)] <- FALSE # call function fm <- do.call(SL_Q, args = list( Y = trainY[include2 & include], X = trainW[include2 & include, , drop = FALSE], newX = validW, verbose = verbose, obsWeights = rep(1, sum(include2 & include)), family = family )) list(est = fm$pred, fm = fm) }, simplify = FALSE) Qn <- lapply(tmp, "[[", 1) fm <- lapply(tmp, "[[", 2) } } } # GLM if (!is.null(glm_Q)) { if (!stratify) { fm <- stats::glm( stats::as.formula(paste0("Y~", glm_Q)), data = data.frame(Y = trainY, A = trainA, trainW)[ include, , drop = FALSE ], family = family ) Qn <- sapply(a_0, function(a, fm) { stats::predict( fm, newdata = data.frame(A = a, validW), type = "response" ) }, fm = fm, simplify = FALSE) } else { tmp <- sapply(a_0, function(a) { include2 <- trainA == a # handle NAs properly include2[is.na(include2)] <- FALSE fm <- stats::glm( stats::as.formula(paste0( "trainY[include2 & include] ~ ", glm_Q )), data = trainW[include2 & include, , drop = FALSE], family = family ) return(list(est = stats::predict( fm, newdata = validW, type = "response" ), fm = fm)) }, simplify = FALSE) Qn <- lapply(tmp, "[[", 1) fm <- lapply(tmp, "[[", 2) } } out <- list(est = Qn, fm = NULL, est_se = Qn_se) if (returnModels) { out$fm <- fm } return(out) } #' estimateQ_loop #' #' A helper loop function to clean up the internals of \code{drtmle} #' function. #' @param Y A vector of continuous or binary outcomes. #' @param A A vector of binary treatment assignment (assumed to be equal to 0 or #' 1) #' @param W A \code{data.frame} of named covariates #' @param DeltaY Indicator of missing outcome (assumed to be equal to 0 if #' missing 1 if observed) #' @param DeltaA Indicator of missing treatment (assumed to be equal to 0 if #' missing 1 if observed) #' @param Qn A list of outcome regression estimates evaluated on observed data. #' If NULL then 0 is used for all Qn (as is needed to estimate reduced #' dimension regression for adaptive_iptw) #' @param gn A list of propensity regression estimates evaluated on observed #' data #' @param SL_Qr A vector of characters or a list describing the Super Learner #' library to be used for the first reduced-dimension regression. #' @param glm_Qr A character describing a formula to be used in the call to #' \code{glm} for the first reduced-dimension regression. Ignored if #' \code{SL_gr!=NULL}. #' @param a_0 A list of fixed treatment values. #' @param returnModels A boolean indicating whether to return model fits for the #' outcome regression, propensity score, and reduced-dimension regressions. #' @param family Should be gaussian() unless called from adaptive_iptw with #' binary \code{Y}. #' @param validRows A \code{list} of length \code{cvFolds} containing the row #' indexes of observations to include in validation fold. #' @param se_cv Should cross-validated nuisance parameter estimates be used #' for computing standard errors? #' Options are \code{"none"} = no cross-validation is performed; \code{"partial"} = #' only applicable if Super Learner is used for nuisance parameter estimates; #' \code{"full"} = full cross-validation is performed. See vignette for further #' details. Ignored if \code{cvFolds > 1}, since then #' cross-validated nuisance parameter estimates are used by default and it is #' assumed that you want full cross-validated standard errors. #' @param se_cvFolds If cross-validated nuisance parameter estimates are used #' to compute standard errors, how many folds should be used in this computation. #' If \code{se_cv = "partial"}, then this option sets the number of folds used #' by the \code{SuperLearner} fitting procedure. estimateQ_loop <- function( validRows, Y, A, W, DeltaA, DeltaY, verbose, returnModels, SL_Q, a_0, stratify, glm_Q, family, use_future, se_cv, se_cvFolds ){ if (use_future) { QnOut <- future.apply::future_lapply( X = validRows, FUN = estimateQ, Y = Y, A = A, W = W, DeltaA = DeltaA, DeltaY = DeltaY, verbose = verbose, returnModels = returnModels, SL_Q = SL_Q, a_0 = a_0, stratify = stratify, glm_Q = glm_Q, family = family, se_cv = se_cv, se_cvFolds = se_cvFolds ) } else { QnOut <- lapply( X = validRows, FUN = estimateQ, Y = Y, A = A, W = W, DeltaA = DeltaA, DeltaY = DeltaY, verbose = verbose, returnModels = returnModels, SL_Q = SL_Q, a_0 = a_0, stratify = stratify, glm_Q = glm_Q, family = family, se_cv = se_cv, se_cvFolds = se_cvFolds ) } return(QnOut) } #' estimateQrn #' #' Estimates the reduced dimension regressions necessary for the #' fluctuations of g #' #' #' @param Y A vector of continuous or binary outcomes. #' @param A A vector of binary treatment assignment (assumed to be equal to 0 or #' 1) #' @param W A \code{data.frame} of named covariates #' @param DeltaY Indicator of missing outcome (assumed to be equal to 0 if #' missing 1 if observed) #' @param DeltaA Indicator of missing treatment (assumed to be equal to 0 if #' missing 1 if observed) #' @param Qn A list of outcome regression estimates evaluated on observed data. #' If NULL then 0 is used for all Qn (as is needed to estimate reduced #' dimension regression for adaptive_iptw) #' @param gn A list of propensity regression estimates evaluated on observed #' data #' @param SL_Qr A vector of characters or a list describing the Super Learner #' library to be used for the first reduced-dimension regression. #' @param glm_Qr A character describing a formula to be used in the call to #' \code{glm} for the first reduced-dimension regression. Ignored if #' \code{SL_gr!=NULL}. #' @param a_0 A list of fixed treatment values. #' @param returnModels A boolean indicating whether to return model fits for the #' outcome regression, propensity score, and reduced-dimension regressions. #' @param family Should be gaussian() unless called from adaptive_iptw with #' binary \code{Y}. #' @param validRows A \code{list} of length \code{cvFolds} containing the row #' indexes of observations to include in validation fold. #' @importFrom SuperLearner SuperLearner trimLogit #' @importFrom stats predict glm as.formula gaussian binomial # estimateQrn <- function(Y, A, W, DeltaA, DeltaY, Qn, gn, glm_Qr, SL_Qr, family = stats::gaussian(), a_0, returnModels, validRows = NULL) { # if estimateQrn is called in adaptive_iptw, then Qn will enter as NULL. # Here we fill its value to 0 so that we estimate the correct nuisance # parameter for adaptive_iptw if (is.null(Qn)) { Qn <- vector(mode = "list", length = length(a_0)) for (i in seq_along(a_0)) { Qn[[i]] <- rep(0, length(Y)) } } # subset data into training and validation sets if (length(validRows) != length(Y)) { trainY <- Y[-validRows] trainA <- A[-validRows] trainW <- W[-validRows, , drop = FALSE] trainDeltaA <- DeltaA[-validRows] trainDeltaY <- DeltaY[-validRows] train_gn <- lapply(gn, "[", -validRows) train_Qn <- lapply(Qn, "[", -validRows) validW <- W[validRows, , drop = FALSE] validA <- A[validRows] validY <- Y[validRows] validDeltaA <- DeltaA[-validRows] validDeltaY <- DeltaY[-validRows] valid_gn <- lapply(gn, "[", validRows) valid_Qn <- lapply(Qn, "[", validRows) } else { trainA <- validA <- A trainW <- validW <- W trainY <- validY <- Y trainDeltaY <- validDeltaY <- DeltaY trainDeltaA <- validDeltaA <- DeltaA train_gn <- valid_gn <- gn train_Qn <- valid_Qn <- Qn } if (is.null(SL_Qr) & is.null(glm_Qr)) { stop("Specify Super Learner library or GLM formula for Qr") } if (!is.null(SL_Qr) & !is.null(glm_Qr)) { warning(paste0( "Super Learner library and GLM formula specified.", "Proceeding with Super Learner only." )) glm_Qr <- NULL } # Super Learner if (!is.null(SL_Qr)) { Qrn <- mapply( a = a_0, train_g = train_gn, train_Q = train_Qn, valid_g = valid_gn, valid_Q = valid_Qn, SIMPLIFY = FALSE, FUN = function(a, train_g, train_Q, valid_g, valid_Q) { Aeqa <- trainA == a Aeqa[is.na(Aeqa)] <- FALSE if (length(unique(train_g)) == 1) { # warning(paste0( # "Only one unique value of gn", a, # ". Using empirical average as Qr estimate." # )) m1 <- mean((trainY - train_Q)[Aeqa & trainDeltaA == 1 & trainDeltaY == 1]) est <- rep(m1, length(validY)) fm <- list(fit = list(object = m1), pred = NULL) class(fm$fit) <- "SL.mean" } else { if (length(SL_Qr) > 1) { suppressWarnings(fm <- SuperLearner::SuperLearner( Y = (trainY - train_Q)[Aeqa & trainDeltaA == 1 & trainDeltaY == 1], X = data.frame(gn = train_g[Aeqa & trainDeltaA == 1 & trainDeltaY == 1]), newX = data.frame( gn = valid_g ), family = family, SL.library = SL_Qr, method = tmp_method.CC_LS() )) # if all weights = 0, use discrete SL if (!all(fm$coef == 0)) { est <- fm$SL.predict } else { est <- fm$library.predict[, which.min(fm$cvRisk)] } } else if (length(SL_Qr) == 1) { fm <- do.call(SL_Qr, args = list( Y = (trainY - train_Q)[Aeqa & trainDeltaA == 1 & trainDeltaY == 1], X = data.frame(gn = train_g[Aeqa & trainDeltaA == 1 & trainDeltaY == 1]), family = family, newX = data.frame(gn = valid_g), obsWeights = rep(1, length(trainY[Aeqa & trainDeltaA == 1 & trainDeltaY == 1])) )) est <- fm$pred } } out <- list(est = est, fm = NULL) if (returnModels) { out$fm <- fm } return(out) } ) } # GLM if (!is.null(glm_Qr)) { Qrn <- mapply( a = a_0, train_g = train_gn, train_Q = train_Qn, valid_g = valid_gn, valid_Q = valid_Qn, SIMPLIFY = FALSE, FUN = function(a, train_g, train_Q, valid_g, valid_Q) { Aeqa <- trainA == a Aeqa[is.na(Aeqa)] <- FALSE if (length(unique(train_g)) == 1) { # warning(paste0( # "Only one unique value of gn", a, # ". Using empirical average as Qr estimate." # )) glm_Qr <- "1" } fm <- stats::glm( stats::as.formula(paste0("Qrn ~", glm_Qr)), data = data.frame( Qrn = (trainY - train_Q)[Aeqa & trainDeltaY == 1 & trainDeltaY == 1], gn = train_g[Aeqa & trainDeltaY == 1 & trainDeltaY == 1] ), family = family ) est <- stats::predict( fm, newdata = data.frame(gn = valid_g), type = "response" ) out <- list(est = est, fm = NULL) if (returnModels) { out$fm <- fm } return(out) } ) } # return estimates and models return(list(est = lapply(Qrn, function(x) { x$est }), fm = lapply(Qrn, function(x) { fm <- x$fm }))) Qrn } #' estimateQrn_loop #' #' Helper function to clean up internal code of \code{drtmle} function. #' #' @param Y A vector of continuous or binary outcomes. #' @param A A vector of binary treatment assignment (assumed to be equal to 0 or #' 1) #' @param W A \code{data.frame} of named covariates #' @param DeltaY Indicator of missing outcome (assumed to be equal to 0 if #' missing 1 if observed) #' @param DeltaA Indicator of missing treatment (assumed to be equal to 0 if #' missing 1 if observed) #' @param Qn A list of outcome regression estimates evaluated on observed data. #' If NULL then 0 is used for all Qn (as is needed to estimate reduced #' dimension regression for adaptive_iptw) #' @param gn A list of propensity regression estimates evaluated on observed #' data #' @param SL_Qr A vector of characters or a list describing the Super Learner #' library to be used for the first reduced-dimension regression. #' @param glm_Qr A character describing a formula to be used in the call to #' \code{glm} for the first reduced-dimension regression. Ignored if #' \code{SL_gr!=NULL}. #' @param a_0 A list of fixed treatment values. #' @param returnModels A boolean indicating whether to return model fits for the #' outcome regression, propensity score, and reduced-dimension regressions. #' @param family Should be gaussian() unless called from adaptive_iptw with #' binary \code{Y}. #' @param validRows A \code{list} of length \code{cvFolds} containing the row #' indexes of observations to include in validation fold. #' @param use_future Should \code{future} be used in the fitting process. estimateQrn_loop <- function( validRows, Y, A, W, DeltaA, DeltaY, Qn, gn, SL_Qr, glm_Qr, family, a_0, returnModels, use_future ){ if (use_future) { QrnOut <- future.apply::future_lapply( X = validRows, FUN = estimateQrn, Y = Y, A = A, W = W, DeltaA = DeltaA, DeltaY = DeltaY, Qn = Qn, gn = gn, glm_Qr = glm_Qr, family = stats::gaussian(), SL_Qr = SL_Qr, a_0 = a_0, returnModels = returnModels ) } else { QrnOut <- lapply( X = validRows, FUN = estimateQrn, Y = Y, A = A, W = W, DeltaA = DeltaA, DeltaY = DeltaY, Qn = Qn, gn = gn, glm_Qr = glm_Qr, family = stats::gaussian(), SL_Qr = SL_Qr, a_0 = a_0, returnModels = returnModels ) } return(QrnOut) } #' estimategrn #' #' Estimates the reduced dimension regressions necessary for the additional #' fluctuations. #' #' @param Y A vector of continuous or binary outcomes. #' @param A A vector of binary treatment assignment (assumed to be equal to 0 or #' 1). #' @param W A \code{data.frame} of named covariates. #' @param DeltaY Indicator of missing outcome (assumed to be equal to 0 if #' missing 1 if observed). #' @param DeltaA Indicator of missing treatment (assumed to be equal to 0 if #' missing 1 if observed). #' @param Qn A list of outcome regression estimates evaluated on observed data. #' @param gn A list of propensity regression estimates evaluated on observed #' data. #' @param SL_gr A vector of characters or a list describing the Super Learner #' library to be used for the reduced-dimension propensity score. #' @param glm_gr A character describing a formula to be used in the call to #' \code{glm} for the second reduced-dimension regression. Ignored if #' \code{SL_gr!=NULL}. #' @param reduction A character equal to \code{'univariate'} for a univariate #' misspecification correction or \code{'bivariate'} for the bivariate version. #' @param tolg A numeric indicating the minimum value for estimates of the #' propensity score. #' @param a_0 A list of fixed treatment values . #' @param returnModels A boolean indicating whether to return model fits for the #' outcome regression, propensity score, and reduced-dimension regressions. #' @param validRows A \code{list} of length \code{cvFolds} containing the row #' indexes of observations to include in validation fold. #' #' @importFrom SuperLearner SuperLearner trimLogit #' @importFrom stats predict glm as.formula estimategrn <- function(Y, A, W, DeltaA, DeltaY, Qn, gn, SL_gr, tolg, glm_gr, a_0, reduction, returnModels, validRows) { if (length(validRows) != length(Y)) { trainY <- Y[-validRows] trainA <- A[-validRows] trainW <- W[-validRows, , drop = FALSE] trainDeltaA <- DeltaA[-validRows] trainDeltaY <- DeltaY[-validRows] train_gn <- lapply(gn, "[", -validRows) train_Qn <- lapply(Qn, "[", -validRows) validW <- W[validRows, , drop = FALSE] validA <- A[validRows] validY <- Y[validRows] validDeltaA <- DeltaA[-validRows] validDeltaY <- DeltaY[-validRows] valid_gn <- lapply(gn, "[", validRows) valid_Qn <- lapply(Qn, "[", validRows) } else { trainA <- validA <- A trainW <- validW <- W trainY <- validY <- Y trainDeltaY <- validDeltaY <- DeltaY trainDeltaA <- validDeltaA <- DeltaA train_gn <- valid_gn <- gn train_Qn <- valid_Qn <- Qn } if (is.null(SL_gr) & is.null(glm_gr)) { stop("Specify Super Learner library or GLM formula for gr") } if (!is.null(SL_gr) & !is.null(glm_gr)) { warning(paste0( "Super Learner library and GLM formula specified.", "Proceeding with Super Learner only." )) glm_gr <- NULL } # Super Learner if (!is.null(SL_gr)) { grn <- mapply( a = a_0, train_Q = train_Qn, train_g = train_gn, valid_Q = valid_Qn, valid_g = valid_gn, SIMPLIFY = FALSE, FUN = function(a, train_Q, train_g, valid_Q, valid_g) { Aeqa <- trainA == a Aeqa[is.na(Aeqa)] <- FALSE if (length(unique(train_Q)) == 1) { # warning(paste0( # "Only one unique value of Qn.", # "Proceeding with empirical mean for grn" # )) if (reduction == "univariate") { m1 <- mean((as.numeric(Aeqa & trainDeltaA == 1 & trainDeltaY == 1) - train_g) / train_g) grn1 <- rep(m1, length(validY)) m2 <- mean(as.numeric(Aeqa & trainDeltaA == 1 & trainDeltaY == 1)) grn2 <- rep(m2, length(validY)) grn2[grn2 < tolg] <- tolg fm1 <- list(fit = list(object = m1), pred = NULL) class(fm1$fit) <- "SL.mean" fm2 <- list(fit = list(object = m2), pred = NULL) class(fm2$fit) <- "SL.mean" } else if (reduction == "bivariate") { m2 <- mean(as.numeric(Aeqa & trainDeltaA == 1 & trainDeltaY == 1)) grn2 <- rep(m2, length(validY)) grn2[grn2 < tolg] <- tolg fm2 <- list(fit = list(object = m2), pred = NULL) class(fm2$fit) <- "SL.mean" fm1 <- NULL grn1 <- rep(NA, length(validY)) } } else { if (length(SL_gr) > 1) { if (reduction == "univariate") { fm1 <- SuperLearner::SuperLearner( Y = (as.numeric(Aeqa & trainDeltaA == 1 & trainDeltaY == 1) - train_g) / train_g, X = data.frame(Qn = train_Q), newX = data.frame(Qn = valid_Q), family = stats::gaussian(), SL.library = SL_gr, method = tmp_method.CC_LS() ) fm2 <- SuperLearner::SuperLearner( Y = as.numeric(Aeqa & trainDeltaA == 1 & trainDeltaY == 1), X = data.frame(Qn = train_Q), newX = data.frame(Qn = valid_Q), family = stats::binomial(), SL.library = SL_gr, method = tmp_method.CC_nloglik() ) if (!all(fm1$coef == 0)) { grn1 <- fm1$SL.predict } else { grn1 <- fm1$library.predict[, which.min(fm1$cvRisk)] } if (!all(fm2$coef == 0)) { grn2 <- fm2$SL.predict } else { grn2 <- fm2$library.predict[, which.min(fm2$cvRisk)] } grn2[grn2 < tolg] <- tolg } else if (reduction == "bivariate") { fm2 <- SuperLearner::SuperLearner( Y = as.numeric(Aeqa & trainDeltaA == 1 & trainDeltaY == 1), X = data.frame(Qn = train_Q, gn = train_g), newX = data.frame(Qn = valid_Q, gn = valid_g), family = stats::binomial(), SL.library = SL_gr, method = tmp_method.CC_nloglik() ) if (!all(fm2$coef == 0)) { grn2 <- fm2$SL.predict } else { grn2 <- fm2$library.predict[, which.min(fm2$cvRisk)] } grn2[grn2 < tolg] <- tolg fm1 <- NULL grn1 <- rep(NA, length(validY)) } } else if (length(SL_gr) == 1) { if (reduction == "univariate") { fm1 <- do.call(SL_gr, args = list( Y = (as.numeric(Aeqa & trainDeltaA == 1 & trainDeltaY == 1) - train_g) / train_g, X = data.frame(Qn = train_Q), obsWeights = rep( 1, length(trainA) ), newX = data.frame(Qn = valid_Q), family = stats::gaussian() )) grn1 <- fm1$pred fm2 <- do.call(SL_gr, args = list( Y = as.numeric(Aeqa & trainDeltaA == 1 & trainDeltaY == 1), X = data.frame(Qn = train_Q), obsWeights = rep( 1, length(trainA) ), newX = data.frame(Qn = valid_Q), family = stats::binomial() )) grn2 <- fm2$pred grn2[grn2 < tolg] <- tolg } else if (reduction == "bivariate") { fm2 <- do.call(SL_gr, args = list( Y = as.numeric(Aeqa & trainDeltaA == 1 & trainDeltaY == 1), X = data.frame(Qn = train_Q, gn = train_g), obsWeights = rep(1, length(trainA)), newX = data.frame(Qn = valid_Q, gn = valid_g), family = stats::binomial() )) grn2 <- fm2$pred grn2[grn2 < tolg] <- tolg fm1 <- NULL grn1 <- rep(NA, length(validY)) } } } out <- list(grn1 = grn1, grn2 = grn2, fm1 = NULL, fm2 = NULL) if (returnModels) { out$fm1 <- fm1 out$fm2 <- fm2 } return(out) } ) } # GLM if (!is.null(glm_gr)) { grn <- mapply( a = a_0, train_Q = train_Qn, train_g = train_gn, valid_Q = valid_Qn, valid_g = valid_gn, SIMPLIFY = FALSE, FUN = function(a, train_Q, train_g, valid_Q, valid_g) { Aeqa <- trainA == a Aeqa[is.na(Aeqa)] <- FALSE if (length(unique(train_Q)) == 1) { glm_gr <- "1" } if (reduction == "univariate") { fm1 <- stats::glm( stats::as.formula(paste0("grn1~", glm_gr)), family = "gaussian", data = data.frame(grn1 = (as.numeric( Aeqa & trainDeltaA == 1 & trainDeltaY == 1 ) - train_g) / train_g, Qn = train_Q) ) grn1 <- stats::predict(fm1, newdata = data.frame( grn1 = rep(0, length(validA)), Qn = valid_Q ), type = "response") fm2 <- stats::glm( stats::as.formula(paste0("A~", glm_gr)), family = "binomial", data = data.frame(A = as.numeric(Aeqa & trainDeltaY == 1 & trainDeltaA == 1), Qn = train_Q) ) grn2 <- stats::predict( fm2, type = "response", newdata = data.frame(A = rep(0, length(validA)), Qn = valid_Q) ) } else if (reduction == "bivariate") { fm1 <- NULL grn1 <- rep(NA, length(validY)) fm2 <- stats::glm( stats::as.formula(paste0("A~", glm_gr)), family = "binomial", data = data.frame( A = as.numeric(Aeqa & trainDeltaY == 1 & trainDeltaA == 1), Qn = train_Q, gn = train_g ) ) grn2 <- stats::predict( fm2, type = "response", newdata = data.frame( A = rep(0, length(validA)), Qn = valid_Q, gn = valid_g ) ) } grn2[grn2 < tolg] <- tolg out <- list(grn1 = grn1, grn2 = grn2, fm1 = NULL, fm2 = NULL) if (returnModels) { out$fm1 <- fm1 out$fm2 <- fm2 } return(out) } ) } tmp1 <- lapply(grn, function(x) { data.frame(grn1 = x$grn1, grn2 = x$grn2) }) tmp2 <- lapply(grn, function(x) { list(fm1 = x$fm1, fm2 = x$fm2) }) return(list(est = tmp1, fm = tmp2)) } #' estimategrn_loop #' #' Helper function to clean up the internal code of \code{drtmle} #' @param Y A vector of continuous or binary outcomes. #' @param A A vector of binary treatment assignment (assumed to be equal to 0 or #' 1). #' @param W A \code{data.frame} of named covariates. #' @param DeltaY Indicator of missing outcome (assumed to be equal to 0 if #' missing 1 if observed). #' @param DeltaA Indicator of missing treatment (assumed to be equal to 0 if #' missing 1 if observed). #' @param Qn A list of outcome regression estimates evaluated on observed data. #' @param gn A list of propensity regression estimates evaluated on observed #' data. #' @param SL_gr A vector of characters or a list describing the Super Learner #' library to be used for the reduced-dimension propensity score. #' @param glm_gr A character describing a formula to be used in the call to #' \code{glm} for the second reduced-dimension regression. Ignored if #' \code{SL_gr!=NULL}. #' @param reduction A character equal to \code{'univariate'} for a univariate #' misspecification correction or \code{'bivariate'} for the bivariate version. #' @param tolg A numeric indicating the minimum value for estimates of the #' propensity score. #' @param a_0 A list of fixed treatment values . #' @param returnModels A boolean indicating whether to return model fits for the #' outcome regression, propensity score, and reduced-dimension regressions. #' @param validRows A \code{list} of length \code{cvFolds} containing the row #' indexes of observations to include in validation fold. #' @param use_future Should \code{future} be used to parallelize? estimategrn_loop <- function( validRows, Y, A, W, DeltaA, DeltaY, tolg, Qn, gn, glm_gr, SL_gr, a_0, reduction, returnModels, use_future ){ if (use_future) { grnOut <- future.apply::future_lapply( X = validRows, FUN = estimategrn, Y = Y, A = A, W = W, DeltaA = DeltaA, DeltaY = DeltaY, tolg = tolg, Qn = Qn, gn = gn, glm_gr = glm_gr, SL_gr = SL_gr, a_0 = a_0, reduction = reduction, returnModels = returnModels ) } else { grnOut <- lapply( X = validRows, FUN = estimategrn, Y = Y, A = A, W = W, DeltaA = DeltaA, DeltaY = DeltaY, tolg = tolg, Qn = Qn, gn = gn, glm_gr = glm_gr, SL_gr = SL_gr, a_0 = a_0, reduction = reduction, returnModels = returnModels ) } return(grnOut) }
/estimate.R
permissive
benkeser/TestRepo
R
false
false
63,197
r
#' estimateG #' #' Function to estimate propensity score #' #' @param A A vector of binary treatment assignment (assumed to be equal to 0 or #' 1) #' @param DeltaY Indicator of missing outcome (assumed to be equal to 0 if #' missing 1 if observed) #' @param DeltaA Indicator of missing treatment (assumed to be equal to 0 if #' missing 1 if observed) #' @param W A \code{data.frame} of named covariates #' @param stratify A \code{boolean} indicating whether to estimate the missing #' outcome regression separately for observations with \code{A} equal to 0/1 #' (if \code{TRUE}) or to pool across \code{A} (if \code{FALSE}). #' @param SL_g A vector of characters describing the super learner library to be #' used for each of the regression (\code{DeltaA}, \code{A}, and #' \code{DeltaY}). To use the same regression for each of the regressions (or #' if there is no missing data in \code{A} nor \code{Y}), a single library may #' be input. #' @param tolg A numeric indicating the minimum value for estimates of the #' propensity score. #' @param verbose A boolean indicating whether to print status updates. #' @param returnModels A boolean indicating whether to return model fits for the #' outcome regression, propensity score, and reduced-dimension regressions. #' @param glm_g A character describing a formula to be used in the call to #' \code{glm} for the propensity score. #' @param a_0 A vector of fixed treatment values at which to return marginal #' mean estimates. #' @param validRows A \code{list} of length \code{cvFolds} containing the row #' indexes of observations to include in validation fold. #' @param Qn A \code{list} of estimates of the outcome regression for each value #' in \code{a_0}. Only needed if \code{adapt_g = TRUE}. #' @param adapt_g A boolean indicating whether propensity score is adaptive #' to outcome regression. #' @param se_cv Should cross-validated nuisance parameter estimates be used #' for computing standard errors? #' Options are \code{"none"} = no cross-validation is performed; \code{"partial"} = #' only applicable if Super Learner is used for nuisance parameter estimates; #' \code{"full"} = full cross-validation is performed. See vignette for further #' details. Ignored if \code{cvFolds > 1}, since then #' cross-validated nuisance parameter estimates are used by default and it is #' assumed that you want full cross-validated standard errors. #' @param se_cvFolds If cross-validated nuisance parameter estimates are used #' to compute standard errors, how many folds should be used in this computation. #' If \code{se_cv = "partial"}, then this option sets the number of folds used #' by the \code{SuperLearner} fitting procedure. #' @importFrom SuperLearner SuperLearner trimLogit All #' @importFrom stats predict glm as.formula # estimateG <- function(A, W, DeltaY, DeltaA, SL_g, glm_g, a_0, tolg, stratify = FALSE, validRows = NULL, verbose = FALSE, returnModels = FALSE, Qn = NULL, adapt_g = FALSE, se_cv = "none", se_cvFolds = 10) { if (is.null(SL_g) & is.null(glm_g)) { stop("Specify Super Learner library or GLM formula for g") } if (!is.null(SL_g) & !is.null(glm_g)) { warning(paste0( "Super Learner library and GLM formula specified.", "Proceeding with Super Learner only." )) glm_g <- NULL } # subset data into training and validation sets if (length(validRows) != length(A)) { trainDeltaA <- DeltaA[-validRows] trainDeltaY <- DeltaY[-validRows] trainA <- A[-validRows] if(!adapt_g){ trainW <- W[-validRows, , drop = FALSE] validW <- W[validRows, , drop = FALSE] }else{ allW <- data.frame(Reduce(cbind, Qn)) trainW <- allW[-validRows, , drop = FALSE] validW <- allW[validRows, , drop = FALSE] colnames(trainW) <- paste0("Q", a_0, "W") colnames(validW) <- paste0("Q", a_0, "W") } validA <- A[validRows] validDeltaA <- DeltaA[validRows] validDeltaY <- DeltaY[validRows] } else { trainA <- validA <- A if(!adapt_g){ trainW <- validW <- W }else{ trainW <- validW <- data.frame(Reduce(cbind, Qn)) colnames(trainW) <- paste0("Q", a_0, "W") colnames(validW) <- paste0("Q", a_0, "W") } trainDeltaA <- validDeltaA <- DeltaA trainDeltaY <- validDeltaY <- DeltaY } partial_cv <- se_cv == "partial" if (!is.null(SL_g)) { # check for names in SL_g namedSL_g <- c("DeltaA", "A", "DeltaY") %in% names(SL_g) # if none of the above names appear, then it is assumed that you want # to use SL_g for each of DeltaA, A, and Y if (!any(namedSL_g)) { SL_g <- list(DeltaA = SL_g, A = SL_g, DeltaY = SL_g) } } else if (!is.null(glm_g)) { namedglm_g <- c("DeltaA", "A", "DeltaY") %in% names(glm_g) # if none of the above names appear, then it is assumed that you want # to use glm_g for each of DeltaA, A, and Y if (!any(namedglm_g)) { glm_g <- list(DeltaA = glm_g, A = glm_g, DeltaY = glm_g) } } # ------------------------------- # regression of DeltaA ~ W # ------------------------------- # only fit this regression if there are some missing treatment assignments if (!all(DeltaA == 1)) { # if super learner library is specified, fit a super learner if (!is.null(SL_g)) { # if the SL_g$DeltaA is of length > 1, then call SuperLearner if (length(SL_g$DeltaA) > 1 | is.list(SL_g$DeltaA)) { fm_DeltaA <- SuperLearner::SuperLearner( Y = trainDeltaA, X = trainW, newX = validW, family = stats::binomial(), SL.library = SL_g$DeltaA, verbose = verbose, method = tmp_method.CC_nloglik(), cvControl = list(ifelse(partial_cv, se_cvFolds, 10)) ) # get predicted probability of missing treatment gn_DeltaA <- fm_DeltaA$SL.predict # get partially cross-validated estimates if(partial_cv){ gn_DeltaA_se <- partial_cv_preds(a_0 = NULL, fit_sl = fm_DeltaA, easy = TRUE) } } else if (!is.list(SL_g$DeltaA) & length(SL_g$DeltaA) == 1) { fm_DeltaA <- do.call(SL_g$DeltaA, args = list( Y = trainDeltaA, X = trainW, newX = validW, obsWeights = rep(1, length(trainA)), family = stats::binomial() )) gn_DeltaA <- fm_DeltaA$pred } } # end if SuperLearner loop if (!is.null(glm_g)) { thisDat <- data.frame(DeltaA = trainDeltaA, trainW) fm_DeltaA <- stats::glm(stats::as.formula(paste0( "DeltaA~", glm_g$DeltaA )), data = thisDat, family = stats::binomial()) gn_DeltaA <- stats::predict( fm_DeltaA, type = "response", newdata = data.frame(DeltaA = validDeltaA, validW) ) } # name for returned models name_DeltaA <- "DeltaA ~ W" } else { # if all DeltaA==1 then put NULL model and 1 predictions fm_DeltaA <- NULL name_DeltaA <- "" gn_DeltaA <- gn_DeltaA_se <- rep(1, length(validDeltaA)) } # ----------------------------------- # fitting A ~ W | DeltaA = 1 # ----------------------------------- # if a super learner library is specified, fit the super learner if (!is.null(SL_g)) { # if the library is of length > 1, then call SuperLearner if (length(SL_g$A) > 1 | is.list(SL_g$A)) { # if there are only two unique values of A, then only need one fit if (length(a_0) == length(unique(A)) & length(unique(A[!is.na(A)])) == 2) { fm_A <- list(SuperLearner::SuperLearner( Y = as.numeric(trainA[trainDeltaA == 1] == a_0[1]), X = trainW[trainDeltaA == 1, , drop = FALSE], newX = validW, family = stats::binomial(), SL.library = SL_g$A, verbose = verbose, method = tmp_method.CC_nloglik(), cvControl = list(ifelse(partial_cv, se_cvFolds, 10)), control = list(saveCVFitLibrary = partial_cv & !all(trainDeltaA == 1)) )) gn_A <- vector(mode = "list", length = 2) gn_A[[1]] <- fm_A[[1]]$SL.predict gn_A[[2]] <- 1 - gn_A[[1]] if(partial_cv){ gn_A_se <- vector(mode = "list", length = 2) gn_A_se[[1]] <- partial_cv_preds(fit_sl = fm_A[[1]], a_0 = NULL, W = validW, include = trainDeltaA == 1, easy = all(trainDeltaA == 1)) gn_A_se[[2]] <- 1 - gn_A_se[[1]] } # name for this model name_A <- paste0("I(A = ", a_0[1], ") ~ W | DeltaA == 1") # if there are more than two unique values of A, then we need # more than one call to super learner } else { a_ct <- 0 gn_A <- vector(mode = "list", length = length(a_0)) gn_A_se <- vector(mode = "list", length = length(a_0)) fm_A <- vector(mode = "list", length = length(a_0) - 1) name_A <- rep(NA, length(a_0) - 1) for (a in a_0[1:(length(a_0) - 1)]) { # determine who to include in the regression for this outcome if (a_ct == 0) { include <- rep(TRUE, length(trainA)) } else { include <- !(trainA %in% a_0[1:a_ct]) } # now exclude people with DeltaA = 0 include[trainDeltaA == 0] <- FALSE # fit super learner tmp_fm <- SuperLearner::SuperLearner( Y = as.numeric(trainA[include] == a), X = trainW[include, , drop = FALSE], newX = validW, family = stats::binomial(), SL.library = SL_g$A, verbose = verbose, method = tmp_method.CC_nloglik(), cvControl = list(ifelse(partial_cv, se_cvFolds, 10)), control = list(saveCVFitLibrary = partial_cv & !all(include)) ) # get predictions tmp_pred <- tmp_fm$SL.pred if(partial_cv){ tmp_pred_se <- partial_cv_preds(fit_sl = tmp_fm, a_0 = NULL, W = validW, include = include, easy = all(include)) } if (a_ct != 0) { # if not the first level of treatment gn_A[[a_ct + 1]] <- tmp_pred * Reduce( "*", lapply(gn_A[1:a_ct], function(x) { 1 - x }) ) if(partial_cv){ gn_A_se[[a_ct + 1]] <- tmp_pred_se * Reduce( "*", lapply(gn_A_se[1:a_ct], function(x) { 1 - x }) ) } } else { # if the first level of treatment gn_A[[a_ct + 1]] <- tmp_pred if(partial_cv){ gn_A_se[[a_ct + 1]] <- tmp_pred_se } } fm_A[[a_ct + 1]] <- tmp_fm name_A[a_ct + 1] <- paste0("I(A = ", a, ") ~ W | DeltaA == 1") a_ct <- a_ct + 1 } # add in final predictions gn_A[[a_ct + 1]] <- 1 - Reduce("+", gn_A[1:a_ct]) if(partial_cv){ gn_A_se[[a_ct + 1]] <- 1 - Reduce("+", gn_A_se[1:a_ct]) } } } else if (!is.list(SL_g$A) & length(SL_g$A) == 1) { if (length(a_0) == length(unique(A[!is.na(A)])) & length(unique(A[!is.na(A)])) == 2) { gn_A <- vector(mode = "list", length = 2) fm_A <- list(do.call(SL_g$A, args = list( Y = as.numeric( trainA[trainDeltaA == 1] == a_0[1] ), X = trainW[trainDeltaA == 1, , drop = FALSE], newX = validW, obsWeights = rep(1, length(trainA[trainDeltaA == 1])), family = stats::binomial() ))) gn_A[[1]] <- fm_A[[1]]$pred gn_A[[2]] <- 1 - fm_A[[1]]$pred name_A <- paste0("I(A = ", a_0[1], ") ~ W | DeltaA == 1") } else { a_ct <- 0 gn_A <- vector(mode = "list", length = length(a_0)) fm_A <- vector(mode = "list", length = length(a_0) - 1) name_A <- rep(NA, length(a_0) - 1) for (a in a_0[1:(length(a_0) - 1)]) { # determine who to include in the regression for this outcome if (a_ct == 0) { include <- rep(TRUE, length(trainA)) } else { include <- !(trainA %in% a_0[1:a_ct]) } # set missing treatment people to FALSE include[trainDeltaA == 0] <- FALSE # fit super learner tmp_fm <- do.call(SL_g$A, args = list( Y = as.numeric( trainA[include] == a ), X = trainW[include, , drop = FALSE], newX = validW, obsWeights = rep(1, length(trainA[include])), family = stats::binomial() )) # get predictions tmp_pred <- tmp_fm$pred if (a_ct != 0) { gn_A[[a_ct + 1]] <- tmp_pred * Reduce( "*", lapply(gn_A[1:a_ct], function(x) { 1 - x }) ) } else { gn_A[[a_ct + 1]] <- tmp_pred } fm_A[[a_ct + 1]] <- tmp_fm name_A[a_ct + 1] <- paste0("I(A = ", a, ") ~ W | DeltaA == 1") a_ct <- a_ct + 1 } # add in final predictions gn_A[[a_ct + 1]] <- 1 - Reduce("+", gn_A[1:a_ct]) } } } # ---------------------------------------------------------------------- # GLM # ---------------------------------------------------------------------- if (!is.null(glm_g)) { if (length(a_0) == length(unique(A)) & length(unique(A[!is.na(A)])) == 2) { thisDat <- data.frame(A = as.numeric(trainA[trainDeltaA == 1] == a_0[1]), trainW[trainDeltaA == 1, , drop = FALSE]) fm_A <- list(stats::glm( stats::as.formula(paste0("A~", glm_g$A)), data = thisDat, family = stats::binomial() )) gn_A <- vector(mode = "list", length = 2) name_A <- paste0("I(A = ", a_0[1], ") ~ W | DeltaA == 1") gn_A[[1]] <- stats::predict(fm_A[[1]], newdata = data.frame( A = validA, validW ), type = "response") gn_A[[2]] <- 1 - gn_A[[1]] } else { a_ct <- 0 gn_A <- vector(mode = "list", length = length(a_0)) fm_A <- vector(mode = "list", length = length(a_0) - 1) name_A <- rep(NA, length(a_0) - 1) for (a in a_0[1:(length(a_0) - 1)]) { # determine who to include in the regression for this outcome if (a_ct == 0) { include <- rep(TRUE, length(A)) } else { include <- !(A %in% a_0[1:a_ct]) } # don't include folks with missing treatment include[trainDeltaA == 0] <- FALSE # fit super learner thisDat <- data.frame( as.numeric(trainA[include] == a), trainW[include, , drop = FALSE] ) colnames(thisDat) <- c("A", colnames(W)) tmp_fm <- stats::glm( stats::as.formula(paste0("A~", glm_g)), data = thisDat, family = stats::binomial() ) tmp_pred <- stats::predict(tmp_fm, newdata = data.frame( A = validA, validW ), type = "response") # get predictions if (a_ct != 0) { gn_A[[a_ct + 1]] <- tmp_pred * Reduce( "*", lapply(gn_A[1:a_ct], function(x) { 1 - x }) ) } else { gn_A[[a_ct + 1]] <- tmp_pred } fm_A[[a_ct + 1]] <- tmp_fm name_A[a_ct + 1] <- paste0("I(A = ", a, ") ~ W | DeltaA == 1") a_ct <- a_ct + 1 } # end for loop over treatment levels # add in final predictions gn_A[[a_ct + 1]] <- 1 - Reduce("+", gn_A[1:a_ct]) } # end multi-level treatment if } # end glm_g if # ------------------------------------- # fit DeltaY ~ W + A | DeltaA = 1 # ------------------------------------- # only fit this regression if there are some missing outcomes if (!all(DeltaY == 1)) { # only include people with DeltaA == 1 include <- (trainDeltaA == 1) # if super learner library is specified, fit a super learner if (!is.null(SL_g)) { # if the SL_g$DeltaY is of length > 1, then call SuperLearner if (length(SL_g$DeltaY) > 1 | is.list(SL_g$DeltaY)) { # if no stratify, then fit DeltaY ~ W | DeltaA = 1 in each # level of A if (stratify) { fm_DeltaY <- vector(mode = "list", length = length(a_0)) gn_DeltaY <- vector(mode = "list", length = length(a_0)) gn_DeltaY_se <- vector(mode = "list", length = length(a_0)) name_DeltaY <- rep(NA, length(a_0)) a_ct <- 0 for (a in a_0) { a_ct <- a_ct + 1 # only include people with A == a and DeltaA == 1 include2 <- (trainA == a) include2[is.na(include2)] <- FALSE # fit super learner fm_DeltaY[[a_ct]] <- SuperLearner::SuperLearner( Y = trainDeltaY[include & include2], X = trainW[include & include2, , drop = FALSE], newX = validW, family = stats::binomial(), SL.library = SL_g$DeltaY, verbose = verbose, method = tmp_method.CC_nloglik(), cvControl = list(ifelse(partial_cv, se_cvFolds, 10)), control = list(saveCVFitLibrary = partial_cv & !all(include & include2)) ) # name the fit name_DeltaY[a_ct] <- paste0( "DeltaY ~ W | DeltaA == 1", " & A == ", a ) # get predictions back on everybody gn_DeltaY[[a_ct]] <- fm_DeltaY[[a_ct]]$SL.predict if(partial_cv){ gn_DeltaY_se[[a_ct]] <- partial_cv_preds(fit_sl = fm_DeltaY[[a_ct]], a_0 = NULL, include = include & include2, W = validW, easy = all(include & include2)) } } # end loop over treatment levels # if not stratified, fit a single regression pooling over # levels of A } else { # fit super learner fm_DeltaY <- list(SuperLearner::SuperLearner( Y = trainDeltaY[include], X = data.frame( A = trainA[include], trainW[include, , drop = FALSE] ), family = stats::binomial(), SL.library = SL_g$DeltaY, verbose = verbose, method = tmp_method.CC_nloglik(), cvControl = list(ifelse(partial_cv, se_cvFolds, 10)), control = list(saveCVFitLibrary = partial_cv & !all(include)) )) # get predictions back setting A = a for every a in a_0 gn_DeltaY <- vector(mode = "list", length = length(a_0)) gn_DeltaY_se <- vector(mode = "list", length = length(a_0)) name_DeltaY <- paste0("DeltaY ~ W + A | DeltaA == 1") a_ct <- 0 for (a in a_0) { a_ct <- a_ct + 1 gn_DeltaY[[a_ct]] <- stats::predict( fm_DeltaY[[1]], onlySL = TRUE, newdata = data.frame(A = a, validW) )$pred if(partial_cv){ gn_DeltaY_se[[a_ct]] <- partial_cv_preds(fit_sl = fm_DeltaY[[1]], a_0 = a, W = validW, include = include, easy = all(include)) } } # end loop over a_0 levels } # end if !stratify # if SL_g$DeltaY only a single algorithm, then call directly } else if (!is.list(SL_g$DeltaY) & length(SL_g$DeltaY) == 1) { # if no stratify, then fit DeltaY ~ W | DeltaA = 1 in # each level of A if (stratify) { fm_DeltaY <- vector(mode = "list", length = length(a_0)) gn_DeltaY <- vector(mode = "list", length = length(a_0)) name_DeltaY <- rep(NA, length(a_0)) a_ct <- 0 for (a in a_0) { a_ct <- a_ct + 1 # only include people with A == a include2 <- (trainA == a) include2[is.na(include2)] <- FALSE # make call to algorithm fm_DeltaY[[a_ct]] <- do.call(SL_g$DeltaY, args = list( Y = trainDeltaY[include & include2], X = trainW[include & include2, , drop = FALSE], newX = validW, obsWeights = rep( 1, length(trainA[include & include2]) ), family = stats::binomial() )) name_DeltaY[a_ct] <- paste0( "DeltaY ~ W | DeltaA == 1", " & A == ", a ) # get predictions gn_DeltaY[[a_ct]] <- fm_DeltaY[[a_ct]]$pred } # end loop over a_0 } else { # end if stratify call algorithm to fit pooled estimate fm_DeltaY <- list(do.call(SL_g$DeltaY, args = list( Y = trainDeltaY[include], X = data.frame( A = trainA[include], trainW[include, , drop = FALSE] ), newX = data.frame( A = validA, validW ), obsWeights = rep( 1, length(trainA[include]) ), family = stats::binomial() ))) name_DeltaY <- paste0("DeltaY ~ W + A | DeltaA == 1") # loop to get predictions setting A = a gn_DeltaY <- vector(mode = "list", length = length(a_0)) a_ct <- 0 for (a in a_0) { a_ct <- a_ct + 1 gn_DeltaY[[a_ct]] <- stats::predict( fm_DeltaY[[1]]$fit, newdata = data.frame(A = a, validW) ) } } # end !stratify } # end if one algorithm loop } # end SL_g not null if if (!is.null(glm_g)) { if (stratify) { fm_DeltaY <- vector(mode = "list", length = length(a_0)) gn_DeltaY <- vector(mode = "list", length = length(a_0)) name_DeltaY <- rep(NA, length = length(a_0)) a_ct <- 0 for (a in a_0) { a_ct <- a_ct + 1 # only include people with A == a and DeltaA == 1 include2 <- (trainA == a) include2[is.na(include2)] <- FALSE fm_DeltaY[[a_ct]] <- stats::glm(stats::as.formula( paste0( "trainDeltaY[include & include2]~", glm_g$DeltaY ) ), data = data.frame(trainW[include & include2, , drop = FALSE]), family = stats::binomial()) name_DeltaY[a_ct] <- paste0( "DeltaY ~ W | DeltaA == 1 ", "& A == ", a ) # get predictions back for everyone gn_DeltaY[[a_ct]] <- stats::predict( fm_DeltaY[[a_ct]], newdata = validW, type = "response" ) } # end loop over treatments } else { # end stratified glm fit glm in everyone with DeltaA == 1 fm_DeltaY <- list(stats::glm( stats::as.formula(paste0( "trainDeltaY[include]~", glm_g$DeltaY )), data = data.frame(A = trainA[include], trainW[ include, , drop = FALSE ]), family = stats::binomial() )) name_DeltaY <- paste0("DeltaY ~ W + A | DeltaA == 1") # get predictions back setting A = a gn_DeltaY <- vector(mode = "list", length = length(a_0)) a_ct <- 0 for (a in a_0) { a_ct <- a_ct + 1 gn_DeltaY[[a_ct]] <- stats::predict( fm_DeltaY[[1]], newdata = data.frame(A = a, validW), type = "response" ) } # end loop over treatments } # end !stratified glm } # end glm if } else { # if all DeltaY==1 then NULL model and 1 pred. fm_DeltaY <- NULL name_DeltaY <- "" gn_DeltaY <- vector(mode = "list", length = length(a_0)) for (i in 1:length(a_0)) { gn_DeltaY[[i]] <- rep(1, length(validDeltaY)) } gn_DeltaY_se <- gn_DeltaY } # ------------------------------------------------------ # combine estimates into a single propensity score # ------------------------------------------------------ gn <- mapply(gn_A = gn_A, gn_DeltaY = gn_DeltaY, FUN = function(gn_A, gn_DeltaY) { gn_A * gn_DeltaY * gn_DeltaA }, SIMPLIFY = FALSE) # truncate too-small predictions gn <- lapply(gn, function(g) { g[g < tolg] <- tolg g }) if(partial_cv){ gn_se <- mapply(gn_A = gn_A_se, gn_DeltaY = gn_DeltaY_se, FUN = function(gn_A, gn_DeltaY) { gn_A * gn_DeltaY * gn_DeltaA_se }, SIMPLIFY = FALSE) # truncate too-small predictions gn_se <- lapply(gn_se, function(g) { g[g < tolg] <- tolg g }) }else{ gn_se <- NULL } out <- list(est = gn, fm = NULL, est_se = gn_se) if (returnModels) { names(fm_A) <- name_A if (!is.null(fm_DeltaA)) { names(fm_DeltaA) <- name_DeltaA } if (!is.null(fm_DeltaY)) { names(fm_DeltaY) <- name_DeltaY } out$fm <- list(DeltaA = fm_DeltaA, A = fm_A, DeltaY = fm_DeltaY) } return(out) } #' estimateG_loop #' #' Helper function to clean up internals of \code{drtmle} function #' @param A A vector of binary treatment assignment (assumed to be equal to 0 or #' 1) #' @param DeltaY Indicator of missing outcome (assumed to be equal to 0 if #' missing 1 if observed) #' @param DeltaA Indicator of missing treatment (assumed to be equal to 0 if #' missing 1 if observed) #' @param W A \code{data.frame} of named covariates #' @param stratify A \code{boolean} indicating whether to estimate the missing #' outcome regression separately for observations with \code{A} equal to 0/1 #' (if \code{TRUE}) or to pool across \code{A} (if \code{FALSE}). #' @param SL_g A vector of characters describing the super learner library to be #' used for each of the regression (\code{DeltaA}, \code{A}, and #' \code{DeltaY}). To use the same regression for each of the regressions (or #' if there is no missing data in \code{A} nor \code{Y}), a single library may #' be input. #' @param tolg A numeric indicating the minimum value for estimates of the #' propensity score. #' @param verbose A boolean indicating whether to print status updates. #' @param returnModels A boolean indicating whether to return model fits for the #' outcome regression, propensity score, and reduced-dimension regressions. #' @param glm_g A character describing a formula to be used in the call to #' \code{glm} for the propensity score. #' @param a_0 A vector of fixed treatment values at which to return marginal #' mean estimates. #' @param validRows A \code{list} of length \code{cvFolds} containing the row #' indexes of observations to include in validation fold. #' @param Qn A \code{list} of estimates of the outcome regression for each value #' in \code{a_0}. Only needed if \code{adapt_g = TRUE}. #' @param adapt_g A boolean indicating whether propensity score is adaptive #' to outcome regression. #' @param use_future Should \code{future} be used for parallelization? #' @param se_cv Should cross-validated nuisance parameter estimates be used #' for computing standard errors? #' Options are \code{"none"} = no cross-validation is performed; \code{"partial"} = #' only applicable if Super Learner is used for nuisance parameter estimates; #' \code{"full"} = full cross-validation is performed. See vignette for further #' details. Ignored if \code{cvFolds > 1}, since then #' cross-validated nuisance parameter estimates are used by default and it is #' assumed that you want full cross-validated standard errors. #' @param se_cvFolds If cross-validated nuisance parameter estimates are used #' to compute standard errors, how many folds should be used in this computation. #' If \code{se_cv = "partial"}, then this option sets the number of folds used #' by the \code{SuperLearner} fitting procedure. estimateG_loop <- function( validRows, A, W, DeltaA, DeltaY, tolg, verbose, stratify, returnModels, SL_g, glm_g, a_0, Qn, adapt_g, use_future, se_cv = "none", se_cvFolds = 10 ){ if (use_future) { gnOut <- future.apply::future_lapply( X = validRows, FUN = estimateG, A = A, W = W, DeltaA = DeltaA, DeltaY = DeltaY, tolg = tolg, verbose = verbose, stratify = stratify, returnModels = returnModels, SL_g = SL_g, glm_g = glm_g, a_0 = a_0, Qn = Qn, adapt_g = adapt_g, se_cv = se_cv, se_cvFolds = se_cvFolds ) } else { gnOut <- lapply( X = validRows, FUN = estimateG, A = A, W = W, DeltaA = DeltaA, DeltaY = DeltaY, tolg = tolg, verbose = verbose, stratify = stratify, returnModels = returnModels, SL_g = SL_g, glm_g = glm_g, a_0 = a_0, Qn = Qn, adapt_g = adapt_g, se_cv = se_cv, se_cvFolds = se_cvFolds ) } return(gnOut) } #' estimateQ #' #' Function to estimate initial outcome regression #' #' @param Y A vector of continuous or binary outcomes. #' @param A A vector of binary treatment assignment (assumed to be equal to 0 or #' 1). #' @param W A \code{data.frame} of named covariates. #' @param DeltaY Indicator of missing outcome (assumed to be equal to 0 if #' missing 1 if observed). #' @param DeltaA Indicator of missing treatment (assumed to be equal to 0 if #' missing 1 if observed). #' @param SL_Q A vector of characters or a list describing the Super Learner #' library to be used for the outcome regression. #' @param verbose A boolean indicating whether to print status updates. #' @param returnModels A boolean indicating whether to return model fits for the #' outcome regression, propensity score, and reduced-dimension regressions. #' @param glm_Q A character describing a formula to be used in the call to #' \code{glm} for the outcome regression. #' @param a_0 A list of fixed treatment values #' @param family A character passed to \code{SuperLearner} #' @param stratify A \code{boolean} indicating whether to estimate the outcome #' regression separately for observations with \code{A} equal to 0/1 (if #' \code{TRUE}) or to pool across \code{A} (if \code{FALSE}). #' @param validRows A \code{list} of length \code{cvFolds} containing the row #' indexes of observations to include in validation fold. #' @param se_cv Should cross-validated nuisance parameter estimates be used #' for computing standard errors? #' Options are \code{"none"} = no cross-validation is performed; \code{"partial"} = #' only applicable if Super Learner is used for nuisance parameter estimates; #' \code{"full"} = full cross-validation is performed. See vignette for further #' details. Ignored if \code{cvFolds > 1}, since then #' cross-validated nuisance parameter estimates are used by default and it is #' assumed that you want full cross-validated standard errors. #' @param se_cvFolds If cross-validated nuisance parameter estimates are used #' to compute standard errors, how many folds should be used in this computation. #' If \code{se_cv = "partial"}, then this option sets the number of folds used #' by the \code{SuperLearner} fitting procedure. #' @param ... Additional arguments (not currently used) #' #' @importFrom SuperLearner SuperLearner trimLogit #' @importFrom stats predict glm as.formula # estimateQ <- function(Y, A, W, DeltaA, DeltaY, SL_Q, glm_Q, a_0, stratify, family, verbose = FALSE, returnModels = FALSE, se_cv = "none", se_cvFolds = 10, validRows = NULL, ...) { if (is.null(SL_Q) & is.null(glm_Q)) { stop("Specify Super Learner library or GLM formula for Q") } if (!is.null(SL_Q) & !is.null(glm_Q)) { warning(paste0( "Super Learner library and GLM formula specified.", " Proceeding with Super Learner only." )) glm_Q <- NULL } # subset data into training and validation sets if (length(validRows) != length(Y)) { trainY <- Y[-validRows] trainA <- A[-validRows] trainW <- W[-validRows, , drop = FALSE] trainDeltaA <- DeltaA[-validRows] trainDeltaY <- DeltaY[-validRows] validW <- W[validRows, , drop = FALSE] validA <- A[validRows] validY <- Y[validRows] validDeltaY <- DeltaY[validRows] validDeltaA <- DeltaA[validRows] } else { trainA <- validA <- A trainW <- validW <- W trainY <- validY <- Y trainDeltaA <- validDeltaA <- DeltaA trainDeltaY <- validDeltaY <- DeltaY } # include only DeltaA = 1 and DeltaY = 1 folks include <- (trainDeltaA == 1) & (trainDeltaY == 1) # check for partially cross-validated standard error request partial_cv <- se_cv == "partial" Qn_se <- NULL # Super Learner if (!is.null(SL_Q)) { if (!stratify) { if (length(SL_Q) > 1 | is.list(SL_Q)) { fm <- SuperLearner::SuperLearner( Y = trainY[include], X = data.frame(A = trainA, trainW)[include, , drop = FALSE], verbose = verbose, family = family, SL.library = SL_Q, method = if (family$family == "binomial") { tmp_method.CC_nloglik() } else { tmp_method.CC_LS() }, control = list(saveCVFitLibrary = partial_cv), cvControl = list(ifelse(partial_cv, se_cvFolds, 10)) ) Qn <- sapply(a_0, function(x) { stats::predict( fm, newdata = data.frame(A = x, validW), onlySL = TRUE )[[1]] }, simplify = FALSE) if(partial_cv){ Qn_se <- sapply(X = a_0, FUN = partial_cv_preds, W = validW, fit_sl = fm, include = include, simplify = FALSE) } } else if (length(SL_Q) == 1) { fm <- do.call(SL_Q, args = list( Y = trainY[include], X = data.frame(A = trainA, trainW)[include, , drop = FALSE], verbose = verbose, newX = data.frame(A = validA, validW), obsWeights = rep(1, length(trainA[include])), family = family )) Qn <- sapply(a_0, function(x) { stats::predict(object = fm$fit, newdata = data.frame( A = x, validW )) }, simplify = FALSE) } } else { # if stratify is TRUE if (length(SL_Q) > 1 | is.list(SL_Q)) { tmp <- sapply(a_0, function(x) { include2 <- trainA == x # handle NAs properly include2[is.na(include2)] <- FALSE fm <- SuperLearner::SuperLearner( Y = trainY[include2 & include], X = trainW[include2 & include, , drop = FALSE], newX = validW, verbose = verbose, family = family, SL.library = SL_Q, method = if (family$family == "binomial") { tmp_method.CC_nloglik() } else { tmp_method.CC_LS() }, control = list(saveCVFitLibrary = partial_cv), cvControl = list(ifelse(partial_cv, se_cvFolds, 10)) ) ##### THE LOGIC HERE IS FLAWED ##### # only a subset are used in fitting SL with A = a # so then, for people without A = a, which value do we fill in? # I guess just the regular super learner prediction? Since a # person without A = a was not used in fitting this model, it's # sort of cross-validated anyway. if(partial_cv){ Qn_se_a_0 <- partial_cv_preds(fit_sl = fm, a_0 = x, W = validW, include = include & include2) }else{ Qn_se_a_0 <- NULL } return(list(est = fm$SL.predict, fm = fm, est_se = Qn_se_a_0)) }, simplify = FALSE) Qn <- lapply(tmp, "[[", "est") fm <- lapply(tmp, "[[", "fm") Qn_se <- lapply(tmp, "[[", "est_se") } else if (length(SL_Q) == 1) { tmp <- sapply(a_0, function(x) { include2 <- trainA == x # handle NAs properly include2[is.na(include2)] <- FALSE # call function fm <- do.call(SL_Q, args = list( Y = trainY[include2 & include], X = trainW[include2 & include, , drop = FALSE], newX = validW, verbose = verbose, obsWeights = rep(1, sum(include2 & include)), family = family )) list(est = fm$pred, fm = fm) }, simplify = FALSE) Qn <- lapply(tmp, "[[", 1) fm <- lapply(tmp, "[[", 2) } } } # GLM if (!is.null(glm_Q)) { if (!stratify) { fm <- stats::glm( stats::as.formula(paste0("Y~", glm_Q)), data = data.frame(Y = trainY, A = trainA, trainW)[ include, , drop = FALSE ], family = family ) Qn <- sapply(a_0, function(a, fm) { stats::predict( fm, newdata = data.frame(A = a, validW), type = "response" ) }, fm = fm, simplify = FALSE) } else { tmp <- sapply(a_0, function(a) { include2 <- trainA == a # handle NAs properly include2[is.na(include2)] <- FALSE fm <- stats::glm( stats::as.formula(paste0( "trainY[include2 & include] ~ ", glm_Q )), data = trainW[include2 & include, , drop = FALSE], family = family ) return(list(est = stats::predict( fm, newdata = validW, type = "response" ), fm = fm)) }, simplify = FALSE) Qn <- lapply(tmp, "[[", 1) fm <- lapply(tmp, "[[", 2) } } out <- list(est = Qn, fm = NULL, est_se = Qn_se) if (returnModels) { out$fm <- fm } return(out) } #' estimateQ_loop #' #' A helper loop function to clean up the internals of \code{drtmle} #' function. #' @param Y A vector of continuous or binary outcomes. #' @param A A vector of binary treatment assignment (assumed to be equal to 0 or #' 1) #' @param W A \code{data.frame} of named covariates #' @param DeltaY Indicator of missing outcome (assumed to be equal to 0 if #' missing 1 if observed) #' @param DeltaA Indicator of missing treatment (assumed to be equal to 0 if #' missing 1 if observed) #' @param Qn A list of outcome regression estimates evaluated on observed data. #' If NULL then 0 is used for all Qn (as is needed to estimate reduced #' dimension regression for adaptive_iptw) #' @param gn A list of propensity regression estimates evaluated on observed #' data #' @param SL_Qr A vector of characters or a list describing the Super Learner #' library to be used for the first reduced-dimension regression. #' @param glm_Qr A character describing a formula to be used in the call to #' \code{glm} for the first reduced-dimension regression. Ignored if #' \code{SL_gr!=NULL}. #' @param a_0 A list of fixed treatment values. #' @param returnModels A boolean indicating whether to return model fits for the #' outcome regression, propensity score, and reduced-dimension regressions. #' @param family Should be gaussian() unless called from adaptive_iptw with #' binary \code{Y}. #' @param validRows A \code{list} of length \code{cvFolds} containing the row #' indexes of observations to include in validation fold. #' @param se_cv Should cross-validated nuisance parameter estimates be used #' for computing standard errors? #' Options are \code{"none"} = no cross-validation is performed; \code{"partial"} = #' only applicable if Super Learner is used for nuisance parameter estimates; #' \code{"full"} = full cross-validation is performed. See vignette for further #' details. Ignored if \code{cvFolds > 1}, since then #' cross-validated nuisance parameter estimates are used by default and it is #' assumed that you want full cross-validated standard errors. #' @param se_cvFolds If cross-validated nuisance parameter estimates are used #' to compute standard errors, how many folds should be used in this computation. #' If \code{se_cv = "partial"}, then this option sets the number of folds used #' by the \code{SuperLearner} fitting procedure. estimateQ_loop <- function( validRows, Y, A, W, DeltaA, DeltaY, verbose, returnModels, SL_Q, a_0, stratify, glm_Q, family, use_future, se_cv, se_cvFolds ){ if (use_future) { QnOut <- future.apply::future_lapply( X = validRows, FUN = estimateQ, Y = Y, A = A, W = W, DeltaA = DeltaA, DeltaY = DeltaY, verbose = verbose, returnModels = returnModels, SL_Q = SL_Q, a_0 = a_0, stratify = stratify, glm_Q = glm_Q, family = family, se_cv = se_cv, se_cvFolds = se_cvFolds ) } else { QnOut <- lapply( X = validRows, FUN = estimateQ, Y = Y, A = A, W = W, DeltaA = DeltaA, DeltaY = DeltaY, verbose = verbose, returnModels = returnModels, SL_Q = SL_Q, a_0 = a_0, stratify = stratify, glm_Q = glm_Q, family = family, se_cv = se_cv, se_cvFolds = se_cvFolds ) } return(QnOut) } #' estimateQrn #' #' Estimates the reduced dimension regressions necessary for the #' fluctuations of g #' #' #' @param Y A vector of continuous or binary outcomes. #' @param A A vector of binary treatment assignment (assumed to be equal to 0 or #' 1) #' @param W A \code{data.frame} of named covariates #' @param DeltaY Indicator of missing outcome (assumed to be equal to 0 if #' missing 1 if observed) #' @param DeltaA Indicator of missing treatment (assumed to be equal to 0 if #' missing 1 if observed) #' @param Qn A list of outcome regression estimates evaluated on observed data. #' If NULL then 0 is used for all Qn (as is needed to estimate reduced #' dimension regression for adaptive_iptw) #' @param gn A list of propensity regression estimates evaluated on observed #' data #' @param SL_Qr A vector of characters or a list describing the Super Learner #' library to be used for the first reduced-dimension regression. #' @param glm_Qr A character describing a formula to be used in the call to #' \code{glm} for the first reduced-dimension regression. Ignored if #' \code{SL_gr!=NULL}. #' @param a_0 A list of fixed treatment values. #' @param returnModels A boolean indicating whether to return model fits for the #' outcome regression, propensity score, and reduced-dimension regressions. #' @param family Should be gaussian() unless called from adaptive_iptw with #' binary \code{Y}. #' @param validRows A \code{list} of length \code{cvFolds} containing the row #' indexes of observations to include in validation fold. #' @importFrom SuperLearner SuperLearner trimLogit #' @importFrom stats predict glm as.formula gaussian binomial # estimateQrn <- function(Y, A, W, DeltaA, DeltaY, Qn, gn, glm_Qr, SL_Qr, family = stats::gaussian(), a_0, returnModels, validRows = NULL) { # if estimateQrn is called in adaptive_iptw, then Qn will enter as NULL. # Here we fill its value to 0 so that we estimate the correct nuisance # parameter for adaptive_iptw if (is.null(Qn)) { Qn <- vector(mode = "list", length = length(a_0)) for (i in seq_along(a_0)) { Qn[[i]] <- rep(0, length(Y)) } } # subset data into training and validation sets if (length(validRows) != length(Y)) { trainY <- Y[-validRows] trainA <- A[-validRows] trainW <- W[-validRows, , drop = FALSE] trainDeltaA <- DeltaA[-validRows] trainDeltaY <- DeltaY[-validRows] train_gn <- lapply(gn, "[", -validRows) train_Qn <- lapply(Qn, "[", -validRows) validW <- W[validRows, , drop = FALSE] validA <- A[validRows] validY <- Y[validRows] validDeltaA <- DeltaA[-validRows] validDeltaY <- DeltaY[-validRows] valid_gn <- lapply(gn, "[", validRows) valid_Qn <- lapply(Qn, "[", validRows) } else { trainA <- validA <- A trainW <- validW <- W trainY <- validY <- Y trainDeltaY <- validDeltaY <- DeltaY trainDeltaA <- validDeltaA <- DeltaA train_gn <- valid_gn <- gn train_Qn <- valid_Qn <- Qn } if (is.null(SL_Qr) & is.null(glm_Qr)) { stop("Specify Super Learner library or GLM formula for Qr") } if (!is.null(SL_Qr) & !is.null(glm_Qr)) { warning(paste0( "Super Learner library and GLM formula specified.", "Proceeding with Super Learner only." )) glm_Qr <- NULL } # Super Learner if (!is.null(SL_Qr)) { Qrn <- mapply( a = a_0, train_g = train_gn, train_Q = train_Qn, valid_g = valid_gn, valid_Q = valid_Qn, SIMPLIFY = FALSE, FUN = function(a, train_g, train_Q, valid_g, valid_Q) { Aeqa <- trainA == a Aeqa[is.na(Aeqa)] <- FALSE if (length(unique(train_g)) == 1) { # warning(paste0( # "Only one unique value of gn", a, # ". Using empirical average as Qr estimate." # )) m1 <- mean((trainY - train_Q)[Aeqa & trainDeltaA == 1 & trainDeltaY == 1]) est <- rep(m1, length(validY)) fm <- list(fit = list(object = m1), pred = NULL) class(fm$fit) <- "SL.mean" } else { if (length(SL_Qr) > 1) { suppressWarnings(fm <- SuperLearner::SuperLearner( Y = (trainY - train_Q)[Aeqa & trainDeltaA == 1 & trainDeltaY == 1], X = data.frame(gn = train_g[Aeqa & trainDeltaA == 1 & trainDeltaY == 1]), newX = data.frame( gn = valid_g ), family = family, SL.library = SL_Qr, method = tmp_method.CC_LS() )) # if all weights = 0, use discrete SL if (!all(fm$coef == 0)) { est <- fm$SL.predict } else { est <- fm$library.predict[, which.min(fm$cvRisk)] } } else if (length(SL_Qr) == 1) { fm <- do.call(SL_Qr, args = list( Y = (trainY - train_Q)[Aeqa & trainDeltaA == 1 & trainDeltaY == 1], X = data.frame(gn = train_g[Aeqa & trainDeltaA == 1 & trainDeltaY == 1]), family = family, newX = data.frame(gn = valid_g), obsWeights = rep(1, length(trainY[Aeqa & trainDeltaA == 1 & trainDeltaY == 1])) )) est <- fm$pred } } out <- list(est = est, fm = NULL) if (returnModels) { out$fm <- fm } return(out) } ) } # GLM if (!is.null(glm_Qr)) { Qrn <- mapply( a = a_0, train_g = train_gn, train_Q = train_Qn, valid_g = valid_gn, valid_Q = valid_Qn, SIMPLIFY = FALSE, FUN = function(a, train_g, train_Q, valid_g, valid_Q) { Aeqa <- trainA == a Aeqa[is.na(Aeqa)] <- FALSE if (length(unique(train_g)) == 1) { # warning(paste0( # "Only one unique value of gn", a, # ". Using empirical average as Qr estimate." # )) glm_Qr <- "1" } fm <- stats::glm( stats::as.formula(paste0("Qrn ~", glm_Qr)), data = data.frame( Qrn = (trainY - train_Q)[Aeqa & trainDeltaY == 1 & trainDeltaY == 1], gn = train_g[Aeqa & trainDeltaY == 1 & trainDeltaY == 1] ), family = family ) est <- stats::predict( fm, newdata = data.frame(gn = valid_g), type = "response" ) out <- list(est = est, fm = NULL) if (returnModels) { out$fm <- fm } return(out) } ) } # return estimates and models return(list(est = lapply(Qrn, function(x) { x$est }), fm = lapply(Qrn, function(x) { fm <- x$fm }))) Qrn } #' estimateQrn_loop #' #' Helper function to clean up internal code of \code{drtmle} function. #' #' @param Y A vector of continuous or binary outcomes. #' @param A A vector of binary treatment assignment (assumed to be equal to 0 or #' 1) #' @param W A \code{data.frame} of named covariates #' @param DeltaY Indicator of missing outcome (assumed to be equal to 0 if #' missing 1 if observed) #' @param DeltaA Indicator of missing treatment (assumed to be equal to 0 if #' missing 1 if observed) #' @param Qn A list of outcome regression estimates evaluated on observed data. #' If NULL then 0 is used for all Qn (as is needed to estimate reduced #' dimension regression for adaptive_iptw) #' @param gn A list of propensity regression estimates evaluated on observed #' data #' @param SL_Qr A vector of characters or a list describing the Super Learner #' library to be used for the first reduced-dimension regression. #' @param glm_Qr A character describing a formula to be used in the call to #' \code{glm} for the first reduced-dimension regression. Ignored if #' \code{SL_gr!=NULL}. #' @param a_0 A list of fixed treatment values. #' @param returnModels A boolean indicating whether to return model fits for the #' outcome regression, propensity score, and reduced-dimension regressions. #' @param family Should be gaussian() unless called from adaptive_iptw with #' binary \code{Y}. #' @param validRows A \code{list} of length \code{cvFolds} containing the row #' indexes of observations to include in validation fold. #' @param use_future Should \code{future} be used in the fitting process. estimateQrn_loop <- function( validRows, Y, A, W, DeltaA, DeltaY, Qn, gn, SL_Qr, glm_Qr, family, a_0, returnModels, use_future ){ if (use_future) { QrnOut <- future.apply::future_lapply( X = validRows, FUN = estimateQrn, Y = Y, A = A, W = W, DeltaA = DeltaA, DeltaY = DeltaY, Qn = Qn, gn = gn, glm_Qr = glm_Qr, family = stats::gaussian(), SL_Qr = SL_Qr, a_0 = a_0, returnModels = returnModels ) } else { QrnOut <- lapply( X = validRows, FUN = estimateQrn, Y = Y, A = A, W = W, DeltaA = DeltaA, DeltaY = DeltaY, Qn = Qn, gn = gn, glm_Qr = glm_Qr, family = stats::gaussian(), SL_Qr = SL_Qr, a_0 = a_0, returnModels = returnModels ) } return(QrnOut) } #' estimategrn #' #' Estimates the reduced dimension regressions necessary for the additional #' fluctuations. #' #' @param Y A vector of continuous or binary outcomes. #' @param A A vector of binary treatment assignment (assumed to be equal to 0 or #' 1). #' @param W A \code{data.frame} of named covariates. #' @param DeltaY Indicator of missing outcome (assumed to be equal to 0 if #' missing 1 if observed). #' @param DeltaA Indicator of missing treatment (assumed to be equal to 0 if #' missing 1 if observed). #' @param Qn A list of outcome regression estimates evaluated on observed data. #' @param gn A list of propensity regression estimates evaluated on observed #' data. #' @param SL_gr A vector of characters or a list describing the Super Learner #' library to be used for the reduced-dimension propensity score. #' @param glm_gr A character describing a formula to be used in the call to #' \code{glm} for the second reduced-dimension regression. Ignored if #' \code{SL_gr!=NULL}. #' @param reduction A character equal to \code{'univariate'} for a univariate #' misspecification correction or \code{'bivariate'} for the bivariate version. #' @param tolg A numeric indicating the minimum value for estimates of the #' propensity score. #' @param a_0 A list of fixed treatment values . #' @param returnModels A boolean indicating whether to return model fits for the #' outcome regression, propensity score, and reduced-dimension regressions. #' @param validRows A \code{list} of length \code{cvFolds} containing the row #' indexes of observations to include in validation fold. #' #' @importFrom SuperLearner SuperLearner trimLogit #' @importFrom stats predict glm as.formula estimategrn <- function(Y, A, W, DeltaA, DeltaY, Qn, gn, SL_gr, tolg, glm_gr, a_0, reduction, returnModels, validRows) { if (length(validRows) != length(Y)) { trainY <- Y[-validRows] trainA <- A[-validRows] trainW <- W[-validRows, , drop = FALSE] trainDeltaA <- DeltaA[-validRows] trainDeltaY <- DeltaY[-validRows] train_gn <- lapply(gn, "[", -validRows) train_Qn <- lapply(Qn, "[", -validRows) validW <- W[validRows, , drop = FALSE] validA <- A[validRows] validY <- Y[validRows] validDeltaA <- DeltaA[-validRows] validDeltaY <- DeltaY[-validRows] valid_gn <- lapply(gn, "[", validRows) valid_Qn <- lapply(Qn, "[", validRows) } else { trainA <- validA <- A trainW <- validW <- W trainY <- validY <- Y trainDeltaY <- validDeltaY <- DeltaY trainDeltaA <- validDeltaA <- DeltaA train_gn <- valid_gn <- gn train_Qn <- valid_Qn <- Qn } if (is.null(SL_gr) & is.null(glm_gr)) { stop("Specify Super Learner library or GLM formula for gr") } if (!is.null(SL_gr) & !is.null(glm_gr)) { warning(paste0( "Super Learner library and GLM formula specified.", "Proceeding with Super Learner only." )) glm_gr <- NULL } # Super Learner if (!is.null(SL_gr)) { grn <- mapply( a = a_0, train_Q = train_Qn, train_g = train_gn, valid_Q = valid_Qn, valid_g = valid_gn, SIMPLIFY = FALSE, FUN = function(a, train_Q, train_g, valid_Q, valid_g) { Aeqa <- trainA == a Aeqa[is.na(Aeqa)] <- FALSE if (length(unique(train_Q)) == 1) { # warning(paste0( # "Only one unique value of Qn.", # "Proceeding with empirical mean for grn" # )) if (reduction == "univariate") { m1 <- mean((as.numeric(Aeqa & trainDeltaA == 1 & trainDeltaY == 1) - train_g) / train_g) grn1 <- rep(m1, length(validY)) m2 <- mean(as.numeric(Aeqa & trainDeltaA == 1 & trainDeltaY == 1)) grn2 <- rep(m2, length(validY)) grn2[grn2 < tolg] <- tolg fm1 <- list(fit = list(object = m1), pred = NULL) class(fm1$fit) <- "SL.mean" fm2 <- list(fit = list(object = m2), pred = NULL) class(fm2$fit) <- "SL.mean" } else if (reduction == "bivariate") { m2 <- mean(as.numeric(Aeqa & trainDeltaA == 1 & trainDeltaY == 1)) grn2 <- rep(m2, length(validY)) grn2[grn2 < tolg] <- tolg fm2 <- list(fit = list(object = m2), pred = NULL) class(fm2$fit) <- "SL.mean" fm1 <- NULL grn1 <- rep(NA, length(validY)) } } else { if (length(SL_gr) > 1) { if (reduction == "univariate") { fm1 <- SuperLearner::SuperLearner( Y = (as.numeric(Aeqa & trainDeltaA == 1 & trainDeltaY == 1) - train_g) / train_g, X = data.frame(Qn = train_Q), newX = data.frame(Qn = valid_Q), family = stats::gaussian(), SL.library = SL_gr, method = tmp_method.CC_LS() ) fm2 <- SuperLearner::SuperLearner( Y = as.numeric(Aeqa & trainDeltaA == 1 & trainDeltaY == 1), X = data.frame(Qn = train_Q), newX = data.frame(Qn = valid_Q), family = stats::binomial(), SL.library = SL_gr, method = tmp_method.CC_nloglik() ) if (!all(fm1$coef == 0)) { grn1 <- fm1$SL.predict } else { grn1 <- fm1$library.predict[, which.min(fm1$cvRisk)] } if (!all(fm2$coef == 0)) { grn2 <- fm2$SL.predict } else { grn2 <- fm2$library.predict[, which.min(fm2$cvRisk)] } grn2[grn2 < tolg] <- tolg } else if (reduction == "bivariate") { fm2 <- SuperLearner::SuperLearner( Y = as.numeric(Aeqa & trainDeltaA == 1 & trainDeltaY == 1), X = data.frame(Qn = train_Q, gn = train_g), newX = data.frame(Qn = valid_Q, gn = valid_g), family = stats::binomial(), SL.library = SL_gr, method = tmp_method.CC_nloglik() ) if (!all(fm2$coef == 0)) { grn2 <- fm2$SL.predict } else { grn2 <- fm2$library.predict[, which.min(fm2$cvRisk)] } grn2[grn2 < tolg] <- tolg fm1 <- NULL grn1 <- rep(NA, length(validY)) } } else if (length(SL_gr) == 1) { if (reduction == "univariate") { fm1 <- do.call(SL_gr, args = list( Y = (as.numeric(Aeqa & trainDeltaA == 1 & trainDeltaY == 1) - train_g) / train_g, X = data.frame(Qn = train_Q), obsWeights = rep( 1, length(trainA) ), newX = data.frame(Qn = valid_Q), family = stats::gaussian() )) grn1 <- fm1$pred fm2 <- do.call(SL_gr, args = list( Y = as.numeric(Aeqa & trainDeltaA == 1 & trainDeltaY == 1), X = data.frame(Qn = train_Q), obsWeights = rep( 1, length(trainA) ), newX = data.frame(Qn = valid_Q), family = stats::binomial() )) grn2 <- fm2$pred grn2[grn2 < tolg] <- tolg } else if (reduction == "bivariate") { fm2 <- do.call(SL_gr, args = list( Y = as.numeric(Aeqa & trainDeltaA == 1 & trainDeltaY == 1), X = data.frame(Qn = train_Q, gn = train_g), obsWeights = rep(1, length(trainA)), newX = data.frame(Qn = valid_Q, gn = valid_g), family = stats::binomial() )) grn2 <- fm2$pred grn2[grn2 < tolg] <- tolg fm1 <- NULL grn1 <- rep(NA, length(validY)) } } } out <- list(grn1 = grn1, grn2 = grn2, fm1 = NULL, fm2 = NULL) if (returnModels) { out$fm1 <- fm1 out$fm2 <- fm2 } return(out) } ) } # GLM if (!is.null(glm_gr)) { grn <- mapply( a = a_0, train_Q = train_Qn, train_g = train_gn, valid_Q = valid_Qn, valid_g = valid_gn, SIMPLIFY = FALSE, FUN = function(a, train_Q, train_g, valid_Q, valid_g) { Aeqa <- trainA == a Aeqa[is.na(Aeqa)] <- FALSE if (length(unique(train_Q)) == 1) { glm_gr <- "1" } if (reduction == "univariate") { fm1 <- stats::glm( stats::as.formula(paste0("grn1~", glm_gr)), family = "gaussian", data = data.frame(grn1 = (as.numeric( Aeqa & trainDeltaA == 1 & trainDeltaY == 1 ) - train_g) / train_g, Qn = train_Q) ) grn1 <- stats::predict(fm1, newdata = data.frame( grn1 = rep(0, length(validA)), Qn = valid_Q ), type = "response") fm2 <- stats::glm( stats::as.formula(paste0("A~", glm_gr)), family = "binomial", data = data.frame(A = as.numeric(Aeqa & trainDeltaY == 1 & trainDeltaA == 1), Qn = train_Q) ) grn2 <- stats::predict( fm2, type = "response", newdata = data.frame(A = rep(0, length(validA)), Qn = valid_Q) ) } else if (reduction == "bivariate") { fm1 <- NULL grn1 <- rep(NA, length(validY)) fm2 <- stats::glm( stats::as.formula(paste0("A~", glm_gr)), family = "binomial", data = data.frame( A = as.numeric(Aeqa & trainDeltaY == 1 & trainDeltaA == 1), Qn = train_Q, gn = train_g ) ) grn2 <- stats::predict( fm2, type = "response", newdata = data.frame( A = rep(0, length(validA)), Qn = valid_Q, gn = valid_g ) ) } grn2[grn2 < tolg] <- tolg out <- list(grn1 = grn1, grn2 = grn2, fm1 = NULL, fm2 = NULL) if (returnModels) { out$fm1 <- fm1 out$fm2 <- fm2 } return(out) } ) } tmp1 <- lapply(grn, function(x) { data.frame(grn1 = x$grn1, grn2 = x$grn2) }) tmp2 <- lapply(grn, function(x) { list(fm1 = x$fm1, fm2 = x$fm2) }) return(list(est = tmp1, fm = tmp2)) } #' estimategrn_loop #' #' Helper function to clean up the internal code of \code{drtmle} #' @param Y A vector of continuous or binary outcomes. #' @param A A vector of binary treatment assignment (assumed to be equal to 0 or #' 1). #' @param W A \code{data.frame} of named covariates. #' @param DeltaY Indicator of missing outcome (assumed to be equal to 0 if #' missing 1 if observed). #' @param DeltaA Indicator of missing treatment (assumed to be equal to 0 if #' missing 1 if observed). #' @param Qn A list of outcome regression estimates evaluated on observed data. #' @param gn A list of propensity regression estimates evaluated on observed #' data. #' @param SL_gr A vector of characters or a list describing the Super Learner #' library to be used for the reduced-dimension propensity score. #' @param glm_gr A character describing a formula to be used in the call to #' \code{glm} for the second reduced-dimension regression. Ignored if #' \code{SL_gr!=NULL}. #' @param reduction A character equal to \code{'univariate'} for a univariate #' misspecification correction or \code{'bivariate'} for the bivariate version. #' @param tolg A numeric indicating the minimum value for estimates of the #' propensity score. #' @param a_0 A list of fixed treatment values . #' @param returnModels A boolean indicating whether to return model fits for the #' outcome regression, propensity score, and reduced-dimension regressions. #' @param validRows A \code{list} of length \code{cvFolds} containing the row #' indexes of observations to include in validation fold. #' @param use_future Should \code{future} be used to parallelize? estimategrn_loop <- function( validRows, Y, A, W, DeltaA, DeltaY, tolg, Qn, gn, glm_gr, SL_gr, a_0, reduction, returnModels, use_future ){ if (use_future) { grnOut <- future.apply::future_lapply( X = validRows, FUN = estimategrn, Y = Y, A = A, W = W, DeltaA = DeltaA, DeltaY = DeltaY, tolg = tolg, Qn = Qn, gn = gn, glm_gr = glm_gr, SL_gr = SL_gr, a_0 = a_0, reduction = reduction, returnModels = returnModels ) } else { grnOut <- lapply( X = validRows, FUN = estimategrn, Y = Y, A = A, W = W, DeltaA = DeltaA, DeltaY = DeltaY, tolg = tolg, Qn = Qn, gn = gn, glm_gr = glm_gr, SL_gr = SL_gr, a_0 = a_0, reduction = reduction, returnModels = returnModels ) } return(grnOut) }
# User options use_precompile <- FALSE use_gpu <- FALSE use_mingw <- FALSE if (.Machine$sizeof.pointer != 8L) { stop("LightGBM only supports 64-bit R, please check the version of R and Rtools.") } R_int_UUID <- .Internal(internalsID()) R_ver <- as.double(R.Version()$major) + as.double(R.Version()$minor) / 10.0 if (!(R_int_UUID == "0310d4b8-ccb1-4bb8-ba94-d36a55f60262" || R_int_UUID == "2fdf6c18-697a-4ba7-b8ef-11c0d92f1327")) { warning("Warning: unmatched R_INTERNALS_UUID, may not run normally.") } # system() will not raise an R exception if the process called # fails. Wrapping it here to get that behavior. # # system() introduces a lot of overhead, at least on Windows, # so trying processx if it is available .run_shell_command <- function(cmd, args, strict = TRUE) { on_windows <- .Platform$OS.type == "windows" has_processx <- suppressMessages({ suppressWarnings({ require("processx") # nolint }) }) if (has_processx && on_windows) { result <- processx::run( command = cmd , args = args , windows_verbatim_args = TRUE , error_on_status = FALSE , echo = TRUE ) exit_code <- result$status } else { if (on_windows) { message(paste0( "Using system() to run shell commands. Installing " , "'processx' with install.packages('processx') might " , "make this faster." )) } cmd <- paste0(cmd, " ", paste0(args, collapse = " ")) exit_code <- system(cmd) } if (exit_code != 0L && isTRUE(strict)) { stop(paste0("Command failed with exit code: ", exit_code)) } return(invisible(exit_code)) } # try to generate Visual Studio build files .generate_vs_makefiles <- function(cmake_args) { vs_versions <- c( "Visual Studio 16 2019" , "Visual Studio 15 2017" , "Visual Studio 14 2015" ) working_vs_version <- NULL for (vs_version in vs_versions) { message(sprintf("Trying '%s'", vs_version)) # if the build directory is not empty, clean it if (file.exists("CMakeCache.txt")) { file.remove("CMakeCache.txt") } vs_cmake_args <- c( cmake_args , "-G" , shQuote(vs_version) , "-A" , "x64" ) exit_code <- .run_shell_command("cmake", c(vs_cmake_args, ".."), strict = FALSE) if (exit_code == 0L) { message(sprintf("Successfully created build files for '%s'", vs_version)) return(invisible(TRUE)) } } return(invisible(FALSE)) } # Move in CMakeLists.txt write_succeeded <- file.copy( "../inst/bin/CMakeLists.txt" , "CMakeLists.txt" , overwrite = TRUE ) if (!write_succeeded) { stop("Copying CMakeLists.txt failed") } # Get some paths source_dir <- file.path(R_PACKAGE_SOURCE, "src", fsep = "/") build_dir <- file.path(source_dir, "build", fsep = "/") # Check for precompilation if (!use_precompile) { # Prepare building package dir.create( build_dir , recursive = TRUE , showWarnings = FALSE ) setwd(build_dir) # Prepare installation steps cmake_args <- NULL build_cmd <- "make" build_args <- "_lightgbm" lib_folder <- file.path(source_dir, fsep = "/") if (use_gpu) { cmake_args <- c(cmake_args, "-DUSE_GPU=ON") } if (R_ver >= 3.5) { cmake_args <- c(cmake_args, "-DUSE_R35=ON") } cmake_args <- c(cmake_args, "-DBUILD_FOR_R=ON") # Pass in R version, used to help find R executable for linking R_version_string <- paste( R.Version()[["major"]] , R.Version()[["minor"]] , sep = "." ) r_version_arg <- sprintf("-DCMAKE_R_VERSION='%s'", R_version_string) cmake_args <- c(cmake_args, r_version_arg) # the checks below might already run `cmake -G`. If they do, set this flag # to TRUE to avoid re-running it later makefiles_already_generated <- FALSE # Check if Windows installation (for gcc vs Visual Studio) if (WINDOWS) { if (use_mingw) { message("Trying to build with MinGW") # Must build twice for Windows due sh.exe in Rtools cmake_args <- c(cmake_args, "-G", shQuote("MinGW Makefiles")) .run_shell_command("cmake", c(cmake_args, ".."), strict = FALSE) build_cmd <- "mingw32-make.exe" build_args <- "_lightgbm" } else { visual_studio_succeeded <- .generate_vs_makefiles(cmake_args) if (!isTRUE(visual_studio_succeeded)) { warning("Building with Visual Studio failed. Attempting with MinGW") # Must build twice for Windows due sh.exe in Rtools cmake_args <- c(cmake_args, "-G", shQuote("MinGW Makefiles")) .run_shell_command("cmake", c(cmake_args, ".."), strict = FALSE) build_cmd <- "mingw32-make.exe" build_args <- "_lightgbm" } else { build_cmd <- "cmake" build_args <- c("--build", ".", "--target", "_lightgbm", "--config", "Release") lib_folder <- file.path(source_dir, "Release", fsep = "/") makefiles_already_generated <- TRUE } } } else { .run_shell_command("cmake", c(cmake_args, "..")) makefiles_already_generated <- TRUE } # generate build files if (!makefiles_already_generated) { .run_shell_command("cmake", c(cmake_args, "..")) } # R CMD check complains about the .NOTPARALLEL directive created in the cmake # Makefile. We don't need it here anyway since targets are built serially, so trying # to remove it with this hack generated_makefile <- file.path( build_dir , "Makefile" ) if (file.exists(generated_makefile)) { makefile_txt <- readLines( con = generated_makefile ) makefile_txt <- gsub( pattern = ".*NOTPARALLEL.*" , replacement = "" , x = makefile_txt ) writeLines( text = makefile_txt , con = generated_makefile , sep = "\n" ) } # build the library .run_shell_command(build_cmd, build_args) src <- file.path(lib_folder, paste0("lib_lightgbm", SHLIB_EXT), fsep = "/") } else { # Has precompiled package lib_folder <- file.path(R_PACKAGE_SOURCE, "../", fsep = "/") shared_object_file <- file.path( lib_folder , paste0("lib_lightgbm", SHLIB_EXT) , fsep = "/" ) release_file <- file.path( lib_folder , paste0("Release/lib_lightgbm", SHLIB_EXT) , fsep = "/" ) windows_shared_object_file <- file.path( lib_folder , paste0("/windows/x64/DLL/lib_lightgbm", SHLIB_EXT) , fsep = "/" ) if (file.exists(shared_object_file)) { src <- shared_object_file } else if (file.exists(release_file)) { src <- release_file } else { # Expected result: installation will fail if it is not here or any other src <- windows_shared_object_file } } # Packages with install.libs.R need to copy some artifacts into the # expected places in the package structure. # see https://cran.r-project.org/doc/manuals/r-devel/R-exts.html#Package-subdirectories, # especially the paragraph on install.libs.R dest <- file.path(R_PACKAGE_DIR, paste0("libs", R_ARCH), fsep = "/") dir.create(dest, recursive = TRUE, showWarnings = FALSE) if (file.exists(src)) { message(paste0("Found library file: ", src, " to move to ", dest)) file.copy(src, dest, overwrite = TRUE) symbols_file <- file.path(source_dir, "symbols.rds") if (file.exists(symbols_file)) { file.copy(symbols_file, dest, overwrite = TRUE) } } else { stop(paste0("Cannot find lib_lightgbm", SHLIB_EXT)) } # clean up the "build" directory if (dir.exists(build_dir)) { message("Removing 'build/' directory") unlink( x = build_dir , recursive = TRUE , force = TRUE ) }
/R-package/src/install.libs.R
permissive
hoshinory/LightGBM
R
false
false
7,591
r
# User options use_precompile <- FALSE use_gpu <- FALSE use_mingw <- FALSE if (.Machine$sizeof.pointer != 8L) { stop("LightGBM only supports 64-bit R, please check the version of R and Rtools.") } R_int_UUID <- .Internal(internalsID()) R_ver <- as.double(R.Version()$major) + as.double(R.Version()$minor) / 10.0 if (!(R_int_UUID == "0310d4b8-ccb1-4bb8-ba94-d36a55f60262" || R_int_UUID == "2fdf6c18-697a-4ba7-b8ef-11c0d92f1327")) { warning("Warning: unmatched R_INTERNALS_UUID, may not run normally.") } # system() will not raise an R exception if the process called # fails. Wrapping it here to get that behavior. # # system() introduces a lot of overhead, at least on Windows, # so trying processx if it is available .run_shell_command <- function(cmd, args, strict = TRUE) { on_windows <- .Platform$OS.type == "windows" has_processx <- suppressMessages({ suppressWarnings({ require("processx") # nolint }) }) if (has_processx && on_windows) { result <- processx::run( command = cmd , args = args , windows_verbatim_args = TRUE , error_on_status = FALSE , echo = TRUE ) exit_code <- result$status } else { if (on_windows) { message(paste0( "Using system() to run shell commands. Installing " , "'processx' with install.packages('processx') might " , "make this faster." )) } cmd <- paste0(cmd, " ", paste0(args, collapse = " ")) exit_code <- system(cmd) } if (exit_code != 0L && isTRUE(strict)) { stop(paste0("Command failed with exit code: ", exit_code)) } return(invisible(exit_code)) } # try to generate Visual Studio build files .generate_vs_makefiles <- function(cmake_args) { vs_versions <- c( "Visual Studio 16 2019" , "Visual Studio 15 2017" , "Visual Studio 14 2015" ) working_vs_version <- NULL for (vs_version in vs_versions) { message(sprintf("Trying '%s'", vs_version)) # if the build directory is not empty, clean it if (file.exists("CMakeCache.txt")) { file.remove("CMakeCache.txt") } vs_cmake_args <- c( cmake_args , "-G" , shQuote(vs_version) , "-A" , "x64" ) exit_code <- .run_shell_command("cmake", c(vs_cmake_args, ".."), strict = FALSE) if (exit_code == 0L) { message(sprintf("Successfully created build files for '%s'", vs_version)) return(invisible(TRUE)) } } return(invisible(FALSE)) } # Move in CMakeLists.txt write_succeeded <- file.copy( "../inst/bin/CMakeLists.txt" , "CMakeLists.txt" , overwrite = TRUE ) if (!write_succeeded) { stop("Copying CMakeLists.txt failed") } # Get some paths source_dir <- file.path(R_PACKAGE_SOURCE, "src", fsep = "/") build_dir <- file.path(source_dir, "build", fsep = "/") # Check for precompilation if (!use_precompile) { # Prepare building package dir.create( build_dir , recursive = TRUE , showWarnings = FALSE ) setwd(build_dir) # Prepare installation steps cmake_args <- NULL build_cmd <- "make" build_args <- "_lightgbm" lib_folder <- file.path(source_dir, fsep = "/") if (use_gpu) { cmake_args <- c(cmake_args, "-DUSE_GPU=ON") } if (R_ver >= 3.5) { cmake_args <- c(cmake_args, "-DUSE_R35=ON") } cmake_args <- c(cmake_args, "-DBUILD_FOR_R=ON") # Pass in R version, used to help find R executable for linking R_version_string <- paste( R.Version()[["major"]] , R.Version()[["minor"]] , sep = "." ) r_version_arg <- sprintf("-DCMAKE_R_VERSION='%s'", R_version_string) cmake_args <- c(cmake_args, r_version_arg) # the checks below might already run `cmake -G`. If they do, set this flag # to TRUE to avoid re-running it later makefiles_already_generated <- FALSE # Check if Windows installation (for gcc vs Visual Studio) if (WINDOWS) { if (use_mingw) { message("Trying to build with MinGW") # Must build twice for Windows due sh.exe in Rtools cmake_args <- c(cmake_args, "-G", shQuote("MinGW Makefiles")) .run_shell_command("cmake", c(cmake_args, ".."), strict = FALSE) build_cmd <- "mingw32-make.exe" build_args <- "_lightgbm" } else { visual_studio_succeeded <- .generate_vs_makefiles(cmake_args) if (!isTRUE(visual_studio_succeeded)) { warning("Building with Visual Studio failed. Attempting with MinGW") # Must build twice for Windows due sh.exe in Rtools cmake_args <- c(cmake_args, "-G", shQuote("MinGW Makefiles")) .run_shell_command("cmake", c(cmake_args, ".."), strict = FALSE) build_cmd <- "mingw32-make.exe" build_args <- "_lightgbm" } else { build_cmd <- "cmake" build_args <- c("--build", ".", "--target", "_lightgbm", "--config", "Release") lib_folder <- file.path(source_dir, "Release", fsep = "/") makefiles_already_generated <- TRUE } } } else { .run_shell_command("cmake", c(cmake_args, "..")) makefiles_already_generated <- TRUE } # generate build files if (!makefiles_already_generated) { .run_shell_command("cmake", c(cmake_args, "..")) } # R CMD check complains about the .NOTPARALLEL directive created in the cmake # Makefile. We don't need it here anyway since targets are built serially, so trying # to remove it with this hack generated_makefile <- file.path( build_dir , "Makefile" ) if (file.exists(generated_makefile)) { makefile_txt <- readLines( con = generated_makefile ) makefile_txt <- gsub( pattern = ".*NOTPARALLEL.*" , replacement = "" , x = makefile_txt ) writeLines( text = makefile_txt , con = generated_makefile , sep = "\n" ) } # build the library .run_shell_command(build_cmd, build_args) src <- file.path(lib_folder, paste0("lib_lightgbm", SHLIB_EXT), fsep = "/") } else { # Has precompiled package lib_folder <- file.path(R_PACKAGE_SOURCE, "../", fsep = "/") shared_object_file <- file.path( lib_folder , paste0("lib_lightgbm", SHLIB_EXT) , fsep = "/" ) release_file <- file.path( lib_folder , paste0("Release/lib_lightgbm", SHLIB_EXT) , fsep = "/" ) windows_shared_object_file <- file.path( lib_folder , paste0("/windows/x64/DLL/lib_lightgbm", SHLIB_EXT) , fsep = "/" ) if (file.exists(shared_object_file)) { src <- shared_object_file } else if (file.exists(release_file)) { src <- release_file } else { # Expected result: installation will fail if it is not here or any other src <- windows_shared_object_file } } # Packages with install.libs.R need to copy some artifacts into the # expected places in the package structure. # see https://cran.r-project.org/doc/manuals/r-devel/R-exts.html#Package-subdirectories, # especially the paragraph on install.libs.R dest <- file.path(R_PACKAGE_DIR, paste0("libs", R_ARCH), fsep = "/") dir.create(dest, recursive = TRUE, showWarnings = FALSE) if (file.exists(src)) { message(paste0("Found library file: ", src, " to move to ", dest)) file.copy(src, dest, overwrite = TRUE) symbols_file <- file.path(source_dir, "symbols.rds") if (file.exists(symbols_file)) { file.copy(symbols_file, dest, overwrite = TRUE) } } else { stop(paste0("Cannot find lib_lightgbm", SHLIB_EXT)) } # clean up the "build" directory if (dir.exists(build_dir)) { message("Removing 'build/' directory") unlink( x = build_dir , recursive = TRUE , force = TRUE ) }
testlist <- list(barrier = 0, ben = numeric(0), fee = 0, penalty = numeric(0), spot = c(8.3138050000614e-275, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, 5.82508648364645e-316, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)) result <- do.call(valuer::calc_account,testlist) str(result)
/valuer/inst/testfiles/calc_account/libFuzzer_calc_account/calc_account_valgrind_files/1616985927-test.R
no_license
akhikolla/updatedatatype-list4
R
false
false
296
r
testlist <- list(barrier = 0, ben = numeric(0), fee = 0, penalty = numeric(0), spot = c(8.3138050000614e-275, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, 5.82508648364645e-316, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)) result <- do.call(valuer::calc_account,testlist) str(result)
# vim: noai:ts=2:sw=2 ## recursive function that transforms the kraken dataframe into a cascading list build_kraken_tree <- function(report) { if (nrow(report) == 0 || nrow(report) == 1) { # this should only happen if the original input to the function has a size <= 1 return(list(report)) } ## select the current depth as the one of the topmost data.frame row sel_depth <- report[,'depth'] == report[1,'depth'] ## partition the data.frame into parts with that depth depth_partitions <- cumsum(sel_depth) ## for each depth partition res <- lapply(unique(depth_partitions), function(my_depth_partition) { sel <- depth_partitions == my_depth_partition ## return the data.frame row if it is only one row (leaf node, ends recursion) if (sum(sel) == 1) return(report[sel,,drop=F]) ## otherwise: take first row as partition descriptor .. first_row <- which(sel)[1] ## and recurse deeper into the depths with the remaining rows dres <- build_kraken_tree(report[which(sel)[-1],,drop=F]) attr(dres,"row") <- report[first_row,,drop=F] dres }) names(res) <- report$name[sel_depth] res } ## Collapse taxonomic taxRanks to only those mentioned in keep_taxRanks collapse.taxRanks <- function(krakenlist,keep_taxRanks=LETTERS,filter_taxon=NULL) { ## input: a list, in which each element is either a ## a list or a data.frame (for the leafs) ## the input has an attribute row that gives details on the current taxRank ## columns whose values are added to the next taxRank when ## a taxRank is deleted cols <- c("taxonReads","n_unique_kmers","n_kmers") if (length(krakenlist) == 0 || is.data.frame(krakenlist)) { return(krakenlist) } parent_row <- attr(krakenlist,"row") all.child_rows <- c() if (is.null(parent_row)) { return(do.call(rbind,lapply(krakenlist,collapse.taxRanks,keep_taxRanks=keep_taxRanks,filter_taxon=filter_taxon))) } ## rm.cladeReads captures the number of cladeReads that are deleted. ## this has to be propagated to the higher taxRank rm.cladeReads <- 0 for (kl in krakenlist) { if (is.data.frame(kl)) { ## is a leaf node? child_rows <- kl } else { ## recurse deeper into tree child_rows <- collapse.taxRanks(kl,keep_taxRanks,filter_taxon=filter_taxon) if ('rm.cladeReads' %in% names(attributes(child_rows))) { rm.cladeReads <- rm.cladeReads + attr(child_rows,'rm.cladeReads') } } ## check if this taxRank and the taxRanks below should be removed delete.taxon <- child_rows[1,'name'] %in% filter_taxon if (delete.taxon) { rm.cladeReads <- rm.cladeReads + child_rows[1,'cladeReads'] dmessage(sprintf("removed %7s cladeReads, including %s childs, for %s",child_rows[1,'"cladeReads"'],nrow(child_rows)-1,child_rows[1,'name'])) ## remove all children child_rows <- NULL } else { ## check if the last (top-most) row should be kept keep_last.child <- child_rows[1,'taxRank'] %in% keep_taxRanks if (!keep_last.child) { cols <- cols[cols %in% colnames(parent_row)] ## save the specified colum information to the parent parent_row[,cols] <- parent_row[,cols] + child_rows[1,cols] ## remove row child_rows <- child_rows[-1,,drop=FALSE] ## decrease depths of rows below child row if (nrow(child_rows) > 0) child_rows[,'depth'] <- child_rows[,'depth'] - 1 } } all.child_rows <- rbind(all.child_rows,child_rows) } ## subtract deleted read count from parent row parent_row[,'cladeReads'] <- parent_row[,'cladeReads'] - rm.cladeReads res <- rbind(parent_row,all.child_rows) if (parent_row[,'cladeReads'] < 0) stop("mistake made in removing cladeReads") #if (parent_row[,'"cladeReads"'] == 0) # res <- c() if (rm.cladeReads > 0) attr(res,'rm.cladeReads') <- rm.cladeReads return(res) } delete_taxRanks_below <- function(report,taxRank="S") { del_taxRank <- 0 do_del <- FALSE del_row <- 0 cols <- c("taxonReads","n_unique_kmers","n_kmers") sub.sums <- c(0,0,0) rows_to_delete <- c() for (i in seq_len(nrow(report))) { if (report[i,'taxRank'] %in% taxRank) { del_depth <- report[i,'depth'] do_del <- TRUE del_row <- i sub.sums <- c(0,0,0) } else { if (do_del) { if (report[i,'depth'] > del_taxRank) { rows_to_delete <- c(rows_to_delete,i) sub.sums <- sub.sums + report[i,cols] } else { report[del_row,cols] <- report[del_row,cols]+sub.sums sub.sums <- c(0,0,0) do_del <- FALSE } } } } report[-rows_to_delete,] } #' Read kraken or centrifuge-style report #' #' @param myfile kraken report file #' @param collapse should the results be collapsed to only those taxRanks specified in keep_taxRanks? #' @param keep_taxRanks taxRanks to keep when collapse is TRUE #' @param min.depth minimum depth #' @param filter_taxon filter certain taxon names #' @param has_header if the kraken report has a header or not #' @param add_taxRank_columns if TRUE, for each taxRank columns are added #' #' @return report data.frame #' @export #' read_report2 <- function(myfile,collapse=TRUE,keep_taxRanks=c("D","K","P","C","O","F","G","S"),min.depth=0,filter_taxon=NULL, has_header=NULL,add_taxRank_columns=FALSE) { first.line <- readLines(myfile,n=1) isASCII <- function(txt) all(charToRaw(txt) <= as.raw(127)) if (!isASCII(first.line)) { dmessage(myfile," is no valid report - not all characters are ASCII") return(NULL) } if (is.null(has_header)) { has_header <- grepl("^[a-zA-Z]",first.line) } if (has_header) { report <- utils::read.table(myfile,sep="\t",header = T, quote = "",stringsAsFactors=FALSE, comment.char="#") #colnames(report) <- c("percentage","cladeReads","taxonReads","taxRank","taxID","n_unique_kmers","n_kmers","perc_uniq_kmers","name") ## harmonize column names. TODO: Harmonize them in the scripts! colnames(report)[colnames(report)=="clade_perc"] <- "percentage" colnames(report)[colnames(report)=="perc"] <- "percentage" colnames(report)[colnames(report)=="n_reads_clade"] <- "cladeReads" colnames(report)[colnames(report)=="n.clade"] <- "cladeReads" colnames(report)[colnames(report)=="n_reads_taxo"] <- "taxonReads" colnames(report)[colnames(report)=="n.stay"] <- "taxonReads" colnames(report)[colnames(report)=="rank"] <- "taxRank" colnames(report)[colnames(report)=="tax_rank"] <- "taxRank" colnames(report)[colnames(report)=="taxonid"] <- "taxID" colnames(report)[colnames(report)=="tax"] <- "taxID" } else { report <- utils::read.table(myfile,sep="\t",header = F, col.names = c("percentage","cladeReads","taxonReads","taxRank","taxID","name"), quote = "",stringsAsFactors=FALSE, comment.char="#") } report$depth <- nchar(gsub("\\S.*","",report$name))/2 report$name <- gsub("^ *","",report$name) report$name <- paste(tolower(report$taxRank),report$name,sep="_") ## Only stop at certain taxRanks ## filter taxon and further up the tree if 'filter_taxon' is defined kraken.tree <- build_kraken_tree(report) report <- collapse.taxRanks(kraken.tree,keep_taxRanks=keep_taxRanks,filter_taxon=filter_taxon) ## Add a metaphlan-style taxon string if (add_taxRank_columns) { report[,keep_taxRanks] <- NA } report$taxLineage = report$name rows_to_consider <- rep(FALSE,nrow(report)) for (i in seq_len(nrow(report))) { ## depth > 2 correspond to taxRanks below 'D' if (i > 1 && report[i,"depth"] > min.depth) { ## find the maximal index of a row below the current depth idx <- report$depth < report[i,"depth"] & rows_to_consider if (!any(idx)) { next() } current.taxRank <- report[i,'taxRank'] my_row <- max(which(idx)) report[i,'taxLineage'] <- paste(report[my_row,'taxLineage'],report[i,'taxLineage'],sep="|") if (add_taxRank_columns) { if (report[my_row,'taxRank'] %in% keep_taxRanks) { taxRanks.cp <- keep_taxRanks[seq(from=1,to=which(keep_taxRanks == report[my_row,'taxRank']))] report[i,taxRanks.cp] <- report[my_row,taxRanks.cp] } report[i,report[i,'taxRank']] <- report[i,'name'] } } rows_to_consider[i] <- TRUE } report <- report[report$depth >= min.depth,] report$percentage <- round(report$cladeReads/sum(report$taxonReads),6) * 100 for (column in c("taxonReads", "cladeReads")) if (all(floor(report[[column]]) == report[[column]])) report[[column]] <- as.integer(report[[column]]) if ('n_unique_kmers' %in% colnames(report)) report$kmerpercentage <- round(report$n_unique_kmers/sum(report$n_unique_kmers,na.rm=T),6) * 100 #report$taxRankperc <- 100/taxRank(report$cladeReads) rownames(report) <- NULL report } #' Filter lines from a kraken report result based on the taxonomy name #' #' It updates the read_stay counts, and removes any children below the #' entry, and any parent entries that have no "cladeReads" that stay #' #' @param report Report \code{data.frame}. #' @param filter_taxon Name of entry to remove. #' @param rm_clade If \code{TRUE}, remove all cladeReads at and below clade, otherwise just set the number of cladeReads that stay at taxon to zero. #' @param do_message If \code{TRUE}, report how many rows and cladeReads were deleted. #' #' @return filtered report #' @export filter_taxon <- function(report, filter_taxon, rm_clade = TRUE, do_message=FALSE) { taxon_depth <- NULL taxonReads <- 0 pos.taxons <- which(sub("._","",report$name) %in% filter_taxon) #pos.taxon <- which(report$name := filter_taxon) if (length(pos.taxons) == 0) { return(report) } row_seq <- seq_len(nrow(report)) rows_to_delete <- rep(FALSE,nrow(report)) taxon_depths <- report[pos.taxons,"depth"] if (isTRUE(rm_clade)) { taxonReads <- report[pos.taxons,"cladeReads"] } else { taxonReads <- report[pos.taxons,"taxonReads"] report[pos.taxons,"taxonReads"] <- 0 } for (i in seq_along(pos.taxons)) { pos.taxon <- pos.taxons[i] if (pos.taxon == 1) { rows_to_delete[1] <- TRUE next } taxon_depth <- taxon_depths[i] taxonReads <- taxonReads[i] if (rm_clade) { tosum_below <- row_seq >= pos.taxon & report$depth <= taxon_depth taxons_below <- cumsum(tosum_below) == 1 rows_to_delete[taxons_below] <- TRUE } rows_to_update <- c(pos.taxon) taxons_above <- seq_len(nrow(report)) < pos.taxon & report$depth == taxon_depth any_stays <- FALSE prev_taxon_depth <- taxon_depth taxons_above <- c() for (i in seq(from=(pos.taxon-1),to=1)) { curr_taxon_depth <- report[i,"depth"] if (curr_taxon_depth < prev_taxon_depth) { if (!any_stays) { if (report[i,"cladeReads"] == taxonReads) { rows_to_delete[i] <- TRUE if (do_message) dmessage("Deleting ",report[i,"name"]) } else { any_stays <- TRUE } } if (!rows_to_delete[i]) { rows_to_update <- c(rows_to_update, i) if (do_message) dmessage("Updating ",report[i,"name"]) } prev_taxon_depth <- curr_taxon_depth } else { any_stays <- TRUE } } report[rows_to_update, "cladeReads"] <- report[rows_to_update, "cladeReads"] - taxonReads } #if (rm_clade) report[!rows_to_delete,] #else # report } #' Read Kraken-style and MetaPhlAn reports #' #' @param myfile Kraken-style or MetaPhlAn report file. #' @param has_header If the kraken report has a header or not. #' @param check_file If TRUE, only the first 5 lines of the file are loaded. #' #' @return report data.frame #' @export #' read_report <- function(myfile, has_header=NULL, check_file = FALSE) { # TODO: Support for gzipped files .. #myfile <- file(myfile) #file_class <- summary(myfile)$class #if (file_class == "gzfile") # myfile <- gzcon(myfile) first.line <- tryCatch( readLines(myfile,n=1, warn=FALSE), error = function(e) { warning("Error reading ",myfile); return() }) isASCII <- function(txt) { if (length(txt) == 0) return(FALSE) raw <- charToRaw(txt) all(raw <= as.raw(127) && (raw >= as.raw(32) | raw == as.raw(9))) } if (length(first.line) == 0) { dmessage("Could not read ", myfile, ".") return(NULL) } tryCatch({ if (nchar(first.line) == 0) { dmessage("First line of ", myfile, " is empty") return(NULL) } }, error = function(e) { dmessage(e) return(NULL) }) if (!isTRUE(isASCII(first.line))) { dmessage(myfile," is not a ASCII file") return(NULL) } if (is.null(has_header)) { has_header <- grepl("^[a-zA-Z#%\"]",first.line) } is_metaphlan_fmt <- grepl("Metaphlan2_Analysis$", first.line) is_krakenu_fmt <- grepl("^.?%\treads\ttaxReads\tkmers", first.line) is_kaiju_fmt <- grepl("^ *%\t *reads", first.line) nrows <- ifelse(isTRUE(check_file), 5, -1) if (!is_krakenu_fmt && is_kaiju_fmt) { cont <- readLines(myfile) cont <- cont[!grepl("^-", cont)] cont <- sub(".*\t *","", cont) cont <- sub("; ?$","", cont) report <- utils::read.delim(textConnection(cont), stringsAsFactors = FALSE) colnames(report) <- c("taxonReads", "taxLineage") report$cladeReads <- report$taxonReads report$taxLineage <- gsub("^","-_",report$taxLineage) report$taxLineage <- gsub("; ","|-_",report$taxLineage) report$taxLineage <- gsub("-_Viruses", "d_Viruses", report$taxLineage, fixed=T) report$taxLineage <- gsub("-_cellular organisms|-_Bacteria", "-_cellular organisms|d_Bacteria", report$taxLineage, fixed=T) report$taxLineage <- gsub("-_cellular organisms|-_Eukaryota", "-_cellular organisms|d_Eukaryota", report$taxLineage, fixed=T) report$taxLineage <- gsub("-_cellular organisms|-_Archaea", "-_cellular organisms|d_Archaea", report$taxLineage, fixed=T) report$taxLineage[1:(length(report$taxLineage)-1)] <- paste0("-_root|", report$taxLineage[1:(length(report$taxLineage)-1)]) report$taxLineage[report$taxLineage=="-_unclassified"] <- "u_unclassified" new_counts <- integer(length = 0) for (j in seq_len(nrow(report))) { count <- report$cladeReads[j] tl <- report$taxLineage[j] tl2 <- sub("\\|[^|]*$","", tl) while (tl2 != tl) { if (tl2 %in% names(new_counts)) { new_counts[tl2] <- new_counts[tl2] + count } else { new_counts[tl2] <- count } tl <- tl2 tl2 <- sub("\\|[^|]*$","", tl) } } report <- rbind(report, data.frame(taxonReads=0,taxLineage=names(new_counts),cladeReads=as.integer(new_counts))) tl_order <- order(report$taxLineage) tl_order <- c(tl_order[length(tl_order)],tl_order[-length(tl_order)]) report <- report[tl_order, c("taxLineage", "taxonReads", "cladeReads")] } else if (has_header) { report <- tryCatch({ ## TODO: Having comment_char here causes a problem w/ Metaphlan report!! utils::read.table(myfile,sep="\t",header = T, quote = "",stringsAsFactors=FALSE, comment.char = ifelse(is_metaphlan_fmt, "", "#"), nrows = nrows, check.names=FALSE) }, error = function(x) NULL, warning = function(x) NULL) if (is.null(report)) { return(NULL); } #colnames(report) <- c("percentage","cladeReads","taxonReads","taxRank","taxID","n_unique_kmers","n_kmers","perc_uniq_kmers","name") ## harmonize column names. TODO: Harmonize them in the scripts! colnames(report)[colnames(report) %in% c("#%","%","clade_perc","perc","percReadsClade")] <- "percentage" colnames(report)[colnames(report) %in% c("reads","numReadsClade","n_reads_clade","n.clade","n-clade")] <- "cladeReads" colnames(report)[colnames(report) %in% c("taxReads","numReadsTaxon","n_reads_taxo","n.stay","n-stay")] <- "taxonReads" colnames(report)[colnames(report) %in% c("rank","tax_taxRank","level")] <- "taxRank" colnames(report)[colnames(report) %in% c("tax","taxonid")] <- "taxID" colnames(report)[colnames(report) %in% c("indentedName","taxName")] <- "name" colnames(report)[colnames(report) %in% c("dup")] <- "kmerDuplicity" colnames(report)[colnames(report) %in% c("cov")] <- "kmerCoverage" } else { report <- tryCatch({ utils::read.table(myfile,sep="\t",header = F, col.names = c("percentage","cladeReads","taxonReads","taxRank","taxID","name"), quote = "",stringsAsFactors=FALSE, nrows = nrows) }, error=function(x) NULL, warning=function(x) NULL) if (is.null(report)) { return(NULL); } } if (ncol(report) < 2) { return(NULL) } if (colnames(report)[2] == "Metaphlan2_Analysis") { ## Metaphlan report colnames(report) <- c("taxLineage", "cladeReads") report <- report[order(report$taxLineage), ] report$taxLineage <- gsub("_"," ",report$taxLineage) report$taxLineage <- gsub(" ","_",report$taxLineage) report$taxLineage <- paste0("-_root|", report$taxLineage) report <- rbind( data.frame(taxLineage=c("u_unclassified","-_root"),"cladeReads"=c(0,100), stringsAsFactors = F), report) } if (all(c("name","taxRank") %in% colnames(report)) && !"taxLineage" %in% colnames(report)) { ## Kraken report report$depth <- nchar(gsub("\\S.*","",report$name))/2 if (!all(report$depth == floor(report$depth))) { warning("Depth doesn't work out!") return(NULL) } report$name <- gsub("^ *","",report$name) ## 'fix' taxRank table(report$taxRank) allowed_taxRanks <- c("U", "S", "G", "F", "C", "D", "O", "K", "P") report$taxRank[report$taxRank=="class"] <- "C" report$taxRank[report$taxRank=="family"] <- "F" report$taxRank[report$taxRank=="genus"] <- "G" report$taxRank[report$taxRank=="superkingdom"] <- "D" report$taxRank[report$taxRank=="kingdom"] <- "K" report$taxRank[report$taxRank=="order"] <- "O" report$taxRank[report$taxRank=="phylum"] <- "P" report$taxRank[report$taxRank=="species"] <- "S" report$taxRank[report$name=="unclassified"] <- "U" report$taxRank[!report$taxRank %in% allowed_taxRanks] <- "-" report$name <- paste(tolower(report$taxRank),report$name,sep="_") rownames(report) <- NULL ## make taxLineage path report$taxLineage <- report$name n <- nrow(report) depths <- report$depth taxLineages <- report$name taxLineages_p <- as.list(seq_along(report$name)) depth_row_tmp <- c(1:25) for (current_row in seq(from=1, to=nrow(report))) { dcr <- depths[current_row] depth_row_tmp[dcr+1] <- current_row if (dcr >= 1) { prev_pos <- depth_row_tmp[[dcr]] taxLineages_p[[current_row]] <- c(taxLineages_p[[prev_pos]], current_row) } } report$taxLineage <- sapply(taxLineages_p, function(x) paste0(taxLineages[x], collapse="|")) #report$taxLineage <- taxLineages } else if ("taxLineage" %in% colnames(report)) { taxLineages <- strsplit(report$taxLineage, "|", fixed=TRUE) if (!"name" %in% colnames(report)) report$name <- sapply(taxLineages, function(x) x[length(x)]) if (!"depth" %in% colnames(report)) { report$depth <- sapply(taxLineages, length) - 1 } if (!"taxRank" %in% colnames(report)) report$taxRank <- toupper(substr(report$name, 0, 1)) } if (!all(c("name","taxRank") %in% colnames(report)) || nrow(report) < 2 || !((report[1,"name"] == "u_unclassified" && report[2,"name"] == "-_root") || report[1,"name"] == "-_root")) { message(paste("Warning: File",myfile,"does not have the required format")) str(report) return(NULL) } if (!"taxonReads" %in% colnames(report)) { parent <- sub("^\\(.*\\)\\|.*$", "\\1", report$taxLineage) taxLineages <- strsplit(report$taxLineage, "|", fixed=TRUE) ## fix taxonReads report$taxonReads <- report$cladeReads - sapply(report$name, function(x) sum(report$cladeReads[parent == x])) #report$taxonReads[sapply(report$taxonReads, function(x) isTRUE(all.equal(x, 0)))] <- 0 report$taxonReads[report$taxonReads <= 0.00001] <- 0 # fix for rounding in percentages by MetaPhlAn } report$percentage <- signif(report$cladeReads/sum(report$taxonReads),6) * 100 if ('n_unique_kmers' %in% colnames(report)) report$kmerpercentage <- round(report$n_unique_kmers/sum(report$n_unique_kmers,na.rm=T),6) * 100 #report$taxRankperc <- 100/taxRank(report$cladeReads) #report$depth <- NULL if ("taxID" %in% colnames(report)) { std_colnames <- c("percentage","cladeReads","taxonReads","taxRank", "taxID","name") } else { std_colnames <- c("percentage","cladeReads","taxonReads","taxRank","name") } stopifnot(all(std_colnames %in% colnames(report))) report[, c(std_colnames, setdiff(colnames(report), std_colnames))] }
/R/datainput-read_report.R
no_license
phac-nml/pavian
R
false
false
21,376
r
# vim: noai:ts=2:sw=2 ## recursive function that transforms the kraken dataframe into a cascading list build_kraken_tree <- function(report) { if (nrow(report) == 0 || nrow(report) == 1) { # this should only happen if the original input to the function has a size <= 1 return(list(report)) } ## select the current depth as the one of the topmost data.frame row sel_depth <- report[,'depth'] == report[1,'depth'] ## partition the data.frame into parts with that depth depth_partitions <- cumsum(sel_depth) ## for each depth partition res <- lapply(unique(depth_partitions), function(my_depth_partition) { sel <- depth_partitions == my_depth_partition ## return the data.frame row if it is only one row (leaf node, ends recursion) if (sum(sel) == 1) return(report[sel,,drop=F]) ## otherwise: take first row as partition descriptor .. first_row <- which(sel)[1] ## and recurse deeper into the depths with the remaining rows dres <- build_kraken_tree(report[which(sel)[-1],,drop=F]) attr(dres,"row") <- report[first_row,,drop=F] dres }) names(res) <- report$name[sel_depth] res } ## Collapse taxonomic taxRanks to only those mentioned in keep_taxRanks collapse.taxRanks <- function(krakenlist,keep_taxRanks=LETTERS,filter_taxon=NULL) { ## input: a list, in which each element is either a ## a list or a data.frame (for the leafs) ## the input has an attribute row that gives details on the current taxRank ## columns whose values are added to the next taxRank when ## a taxRank is deleted cols <- c("taxonReads","n_unique_kmers","n_kmers") if (length(krakenlist) == 0 || is.data.frame(krakenlist)) { return(krakenlist) } parent_row <- attr(krakenlist,"row") all.child_rows <- c() if (is.null(parent_row)) { return(do.call(rbind,lapply(krakenlist,collapse.taxRanks,keep_taxRanks=keep_taxRanks,filter_taxon=filter_taxon))) } ## rm.cladeReads captures the number of cladeReads that are deleted. ## this has to be propagated to the higher taxRank rm.cladeReads <- 0 for (kl in krakenlist) { if (is.data.frame(kl)) { ## is a leaf node? child_rows <- kl } else { ## recurse deeper into tree child_rows <- collapse.taxRanks(kl,keep_taxRanks,filter_taxon=filter_taxon) if ('rm.cladeReads' %in% names(attributes(child_rows))) { rm.cladeReads <- rm.cladeReads + attr(child_rows,'rm.cladeReads') } } ## check if this taxRank and the taxRanks below should be removed delete.taxon <- child_rows[1,'name'] %in% filter_taxon if (delete.taxon) { rm.cladeReads <- rm.cladeReads + child_rows[1,'cladeReads'] dmessage(sprintf("removed %7s cladeReads, including %s childs, for %s",child_rows[1,'"cladeReads"'],nrow(child_rows)-1,child_rows[1,'name'])) ## remove all children child_rows <- NULL } else { ## check if the last (top-most) row should be kept keep_last.child <- child_rows[1,'taxRank'] %in% keep_taxRanks if (!keep_last.child) { cols <- cols[cols %in% colnames(parent_row)] ## save the specified colum information to the parent parent_row[,cols] <- parent_row[,cols] + child_rows[1,cols] ## remove row child_rows <- child_rows[-1,,drop=FALSE] ## decrease depths of rows below child row if (nrow(child_rows) > 0) child_rows[,'depth'] <- child_rows[,'depth'] - 1 } } all.child_rows <- rbind(all.child_rows,child_rows) } ## subtract deleted read count from parent row parent_row[,'cladeReads'] <- parent_row[,'cladeReads'] - rm.cladeReads res <- rbind(parent_row,all.child_rows) if (parent_row[,'cladeReads'] < 0) stop("mistake made in removing cladeReads") #if (parent_row[,'"cladeReads"'] == 0) # res <- c() if (rm.cladeReads > 0) attr(res,'rm.cladeReads') <- rm.cladeReads return(res) } delete_taxRanks_below <- function(report,taxRank="S") { del_taxRank <- 0 do_del <- FALSE del_row <- 0 cols <- c("taxonReads","n_unique_kmers","n_kmers") sub.sums <- c(0,0,0) rows_to_delete <- c() for (i in seq_len(nrow(report))) { if (report[i,'taxRank'] %in% taxRank) { del_depth <- report[i,'depth'] do_del <- TRUE del_row <- i sub.sums <- c(0,0,0) } else { if (do_del) { if (report[i,'depth'] > del_taxRank) { rows_to_delete <- c(rows_to_delete,i) sub.sums <- sub.sums + report[i,cols] } else { report[del_row,cols] <- report[del_row,cols]+sub.sums sub.sums <- c(0,0,0) do_del <- FALSE } } } } report[-rows_to_delete,] } #' Read kraken or centrifuge-style report #' #' @param myfile kraken report file #' @param collapse should the results be collapsed to only those taxRanks specified in keep_taxRanks? #' @param keep_taxRanks taxRanks to keep when collapse is TRUE #' @param min.depth minimum depth #' @param filter_taxon filter certain taxon names #' @param has_header if the kraken report has a header or not #' @param add_taxRank_columns if TRUE, for each taxRank columns are added #' #' @return report data.frame #' @export #' read_report2 <- function(myfile,collapse=TRUE,keep_taxRanks=c("D","K","P","C","O","F","G","S"),min.depth=0,filter_taxon=NULL, has_header=NULL,add_taxRank_columns=FALSE) { first.line <- readLines(myfile,n=1) isASCII <- function(txt) all(charToRaw(txt) <= as.raw(127)) if (!isASCII(first.line)) { dmessage(myfile," is no valid report - not all characters are ASCII") return(NULL) } if (is.null(has_header)) { has_header <- grepl("^[a-zA-Z]",first.line) } if (has_header) { report <- utils::read.table(myfile,sep="\t",header = T, quote = "",stringsAsFactors=FALSE, comment.char="#") #colnames(report) <- c("percentage","cladeReads","taxonReads","taxRank","taxID","n_unique_kmers","n_kmers","perc_uniq_kmers","name") ## harmonize column names. TODO: Harmonize them in the scripts! colnames(report)[colnames(report)=="clade_perc"] <- "percentage" colnames(report)[colnames(report)=="perc"] <- "percentage" colnames(report)[colnames(report)=="n_reads_clade"] <- "cladeReads" colnames(report)[colnames(report)=="n.clade"] <- "cladeReads" colnames(report)[colnames(report)=="n_reads_taxo"] <- "taxonReads" colnames(report)[colnames(report)=="n.stay"] <- "taxonReads" colnames(report)[colnames(report)=="rank"] <- "taxRank" colnames(report)[colnames(report)=="tax_rank"] <- "taxRank" colnames(report)[colnames(report)=="taxonid"] <- "taxID" colnames(report)[colnames(report)=="tax"] <- "taxID" } else { report <- utils::read.table(myfile,sep="\t",header = F, col.names = c("percentage","cladeReads","taxonReads","taxRank","taxID","name"), quote = "",stringsAsFactors=FALSE, comment.char="#") } report$depth <- nchar(gsub("\\S.*","",report$name))/2 report$name <- gsub("^ *","",report$name) report$name <- paste(tolower(report$taxRank),report$name,sep="_") ## Only stop at certain taxRanks ## filter taxon and further up the tree if 'filter_taxon' is defined kraken.tree <- build_kraken_tree(report) report <- collapse.taxRanks(kraken.tree,keep_taxRanks=keep_taxRanks,filter_taxon=filter_taxon) ## Add a metaphlan-style taxon string if (add_taxRank_columns) { report[,keep_taxRanks] <- NA } report$taxLineage = report$name rows_to_consider <- rep(FALSE,nrow(report)) for (i in seq_len(nrow(report))) { ## depth > 2 correspond to taxRanks below 'D' if (i > 1 && report[i,"depth"] > min.depth) { ## find the maximal index of a row below the current depth idx <- report$depth < report[i,"depth"] & rows_to_consider if (!any(idx)) { next() } current.taxRank <- report[i,'taxRank'] my_row <- max(which(idx)) report[i,'taxLineage'] <- paste(report[my_row,'taxLineage'],report[i,'taxLineage'],sep="|") if (add_taxRank_columns) { if (report[my_row,'taxRank'] %in% keep_taxRanks) { taxRanks.cp <- keep_taxRanks[seq(from=1,to=which(keep_taxRanks == report[my_row,'taxRank']))] report[i,taxRanks.cp] <- report[my_row,taxRanks.cp] } report[i,report[i,'taxRank']] <- report[i,'name'] } } rows_to_consider[i] <- TRUE } report <- report[report$depth >= min.depth,] report$percentage <- round(report$cladeReads/sum(report$taxonReads),6) * 100 for (column in c("taxonReads", "cladeReads")) if (all(floor(report[[column]]) == report[[column]])) report[[column]] <- as.integer(report[[column]]) if ('n_unique_kmers' %in% colnames(report)) report$kmerpercentage <- round(report$n_unique_kmers/sum(report$n_unique_kmers,na.rm=T),6) * 100 #report$taxRankperc <- 100/taxRank(report$cladeReads) rownames(report) <- NULL report } #' Filter lines from a kraken report result based on the taxonomy name #' #' It updates the read_stay counts, and removes any children below the #' entry, and any parent entries that have no "cladeReads" that stay #' #' @param report Report \code{data.frame}. #' @param filter_taxon Name of entry to remove. #' @param rm_clade If \code{TRUE}, remove all cladeReads at and below clade, otherwise just set the number of cladeReads that stay at taxon to zero. #' @param do_message If \code{TRUE}, report how many rows and cladeReads were deleted. #' #' @return filtered report #' @export filter_taxon <- function(report, filter_taxon, rm_clade = TRUE, do_message=FALSE) { taxon_depth <- NULL taxonReads <- 0 pos.taxons <- which(sub("._","",report$name) %in% filter_taxon) #pos.taxon <- which(report$name := filter_taxon) if (length(pos.taxons) == 0) { return(report) } row_seq <- seq_len(nrow(report)) rows_to_delete <- rep(FALSE,nrow(report)) taxon_depths <- report[pos.taxons,"depth"] if (isTRUE(rm_clade)) { taxonReads <- report[pos.taxons,"cladeReads"] } else { taxonReads <- report[pos.taxons,"taxonReads"] report[pos.taxons,"taxonReads"] <- 0 } for (i in seq_along(pos.taxons)) { pos.taxon <- pos.taxons[i] if (pos.taxon == 1) { rows_to_delete[1] <- TRUE next } taxon_depth <- taxon_depths[i] taxonReads <- taxonReads[i] if (rm_clade) { tosum_below <- row_seq >= pos.taxon & report$depth <= taxon_depth taxons_below <- cumsum(tosum_below) == 1 rows_to_delete[taxons_below] <- TRUE } rows_to_update <- c(pos.taxon) taxons_above <- seq_len(nrow(report)) < pos.taxon & report$depth == taxon_depth any_stays <- FALSE prev_taxon_depth <- taxon_depth taxons_above <- c() for (i in seq(from=(pos.taxon-1),to=1)) { curr_taxon_depth <- report[i,"depth"] if (curr_taxon_depth < prev_taxon_depth) { if (!any_stays) { if (report[i,"cladeReads"] == taxonReads) { rows_to_delete[i] <- TRUE if (do_message) dmessage("Deleting ",report[i,"name"]) } else { any_stays <- TRUE } } if (!rows_to_delete[i]) { rows_to_update <- c(rows_to_update, i) if (do_message) dmessage("Updating ",report[i,"name"]) } prev_taxon_depth <- curr_taxon_depth } else { any_stays <- TRUE } } report[rows_to_update, "cladeReads"] <- report[rows_to_update, "cladeReads"] - taxonReads } #if (rm_clade) report[!rows_to_delete,] #else # report } #' Read Kraken-style and MetaPhlAn reports #' #' @param myfile Kraken-style or MetaPhlAn report file. #' @param has_header If the kraken report has a header or not. #' @param check_file If TRUE, only the first 5 lines of the file are loaded. #' #' @return report data.frame #' @export #' read_report <- function(myfile, has_header=NULL, check_file = FALSE) { # TODO: Support for gzipped files .. #myfile <- file(myfile) #file_class <- summary(myfile)$class #if (file_class == "gzfile") # myfile <- gzcon(myfile) first.line <- tryCatch( readLines(myfile,n=1, warn=FALSE), error = function(e) { warning("Error reading ",myfile); return() }) isASCII <- function(txt) { if (length(txt) == 0) return(FALSE) raw <- charToRaw(txt) all(raw <= as.raw(127) && (raw >= as.raw(32) | raw == as.raw(9))) } if (length(first.line) == 0) { dmessage("Could not read ", myfile, ".") return(NULL) } tryCatch({ if (nchar(first.line) == 0) { dmessage("First line of ", myfile, " is empty") return(NULL) } }, error = function(e) { dmessage(e) return(NULL) }) if (!isTRUE(isASCII(first.line))) { dmessage(myfile," is not a ASCII file") return(NULL) } if (is.null(has_header)) { has_header <- grepl("^[a-zA-Z#%\"]",first.line) } is_metaphlan_fmt <- grepl("Metaphlan2_Analysis$", first.line) is_krakenu_fmt <- grepl("^.?%\treads\ttaxReads\tkmers", first.line) is_kaiju_fmt <- grepl("^ *%\t *reads", first.line) nrows <- ifelse(isTRUE(check_file), 5, -1) if (!is_krakenu_fmt && is_kaiju_fmt) { cont <- readLines(myfile) cont <- cont[!grepl("^-", cont)] cont <- sub(".*\t *","", cont) cont <- sub("; ?$","", cont) report <- utils::read.delim(textConnection(cont), stringsAsFactors = FALSE) colnames(report) <- c("taxonReads", "taxLineage") report$cladeReads <- report$taxonReads report$taxLineage <- gsub("^","-_",report$taxLineage) report$taxLineage <- gsub("; ","|-_",report$taxLineage) report$taxLineage <- gsub("-_Viruses", "d_Viruses", report$taxLineage, fixed=T) report$taxLineage <- gsub("-_cellular organisms|-_Bacteria", "-_cellular organisms|d_Bacteria", report$taxLineage, fixed=T) report$taxLineage <- gsub("-_cellular organisms|-_Eukaryota", "-_cellular organisms|d_Eukaryota", report$taxLineage, fixed=T) report$taxLineage <- gsub("-_cellular organisms|-_Archaea", "-_cellular organisms|d_Archaea", report$taxLineage, fixed=T) report$taxLineage[1:(length(report$taxLineage)-1)] <- paste0("-_root|", report$taxLineage[1:(length(report$taxLineage)-1)]) report$taxLineage[report$taxLineage=="-_unclassified"] <- "u_unclassified" new_counts <- integer(length = 0) for (j in seq_len(nrow(report))) { count <- report$cladeReads[j] tl <- report$taxLineage[j] tl2 <- sub("\\|[^|]*$","", tl) while (tl2 != tl) { if (tl2 %in% names(new_counts)) { new_counts[tl2] <- new_counts[tl2] + count } else { new_counts[tl2] <- count } tl <- tl2 tl2 <- sub("\\|[^|]*$","", tl) } } report <- rbind(report, data.frame(taxonReads=0,taxLineage=names(new_counts),cladeReads=as.integer(new_counts))) tl_order <- order(report$taxLineage) tl_order <- c(tl_order[length(tl_order)],tl_order[-length(tl_order)]) report <- report[tl_order, c("taxLineage", "taxonReads", "cladeReads")] } else if (has_header) { report <- tryCatch({ ## TODO: Having comment_char here causes a problem w/ Metaphlan report!! utils::read.table(myfile,sep="\t",header = T, quote = "",stringsAsFactors=FALSE, comment.char = ifelse(is_metaphlan_fmt, "", "#"), nrows = nrows, check.names=FALSE) }, error = function(x) NULL, warning = function(x) NULL) if (is.null(report)) { return(NULL); } #colnames(report) <- c("percentage","cladeReads","taxonReads","taxRank","taxID","n_unique_kmers","n_kmers","perc_uniq_kmers","name") ## harmonize column names. TODO: Harmonize them in the scripts! colnames(report)[colnames(report) %in% c("#%","%","clade_perc","perc","percReadsClade")] <- "percentage" colnames(report)[colnames(report) %in% c("reads","numReadsClade","n_reads_clade","n.clade","n-clade")] <- "cladeReads" colnames(report)[colnames(report) %in% c("taxReads","numReadsTaxon","n_reads_taxo","n.stay","n-stay")] <- "taxonReads" colnames(report)[colnames(report) %in% c("rank","tax_taxRank","level")] <- "taxRank" colnames(report)[colnames(report) %in% c("tax","taxonid")] <- "taxID" colnames(report)[colnames(report) %in% c("indentedName","taxName")] <- "name" colnames(report)[colnames(report) %in% c("dup")] <- "kmerDuplicity" colnames(report)[colnames(report) %in% c("cov")] <- "kmerCoverage" } else { report <- tryCatch({ utils::read.table(myfile,sep="\t",header = F, col.names = c("percentage","cladeReads","taxonReads","taxRank","taxID","name"), quote = "",stringsAsFactors=FALSE, nrows = nrows) }, error=function(x) NULL, warning=function(x) NULL) if (is.null(report)) { return(NULL); } } if (ncol(report) < 2) { return(NULL) } if (colnames(report)[2] == "Metaphlan2_Analysis") { ## Metaphlan report colnames(report) <- c("taxLineage", "cladeReads") report <- report[order(report$taxLineage), ] report$taxLineage <- gsub("_"," ",report$taxLineage) report$taxLineage <- gsub(" ","_",report$taxLineage) report$taxLineage <- paste0("-_root|", report$taxLineage) report <- rbind( data.frame(taxLineage=c("u_unclassified","-_root"),"cladeReads"=c(0,100), stringsAsFactors = F), report) } if (all(c("name","taxRank") %in% colnames(report)) && !"taxLineage" %in% colnames(report)) { ## Kraken report report$depth <- nchar(gsub("\\S.*","",report$name))/2 if (!all(report$depth == floor(report$depth))) { warning("Depth doesn't work out!") return(NULL) } report$name <- gsub("^ *","",report$name) ## 'fix' taxRank table(report$taxRank) allowed_taxRanks <- c("U", "S", "G", "F", "C", "D", "O", "K", "P") report$taxRank[report$taxRank=="class"] <- "C" report$taxRank[report$taxRank=="family"] <- "F" report$taxRank[report$taxRank=="genus"] <- "G" report$taxRank[report$taxRank=="superkingdom"] <- "D" report$taxRank[report$taxRank=="kingdom"] <- "K" report$taxRank[report$taxRank=="order"] <- "O" report$taxRank[report$taxRank=="phylum"] <- "P" report$taxRank[report$taxRank=="species"] <- "S" report$taxRank[report$name=="unclassified"] <- "U" report$taxRank[!report$taxRank %in% allowed_taxRanks] <- "-" report$name <- paste(tolower(report$taxRank),report$name,sep="_") rownames(report) <- NULL ## make taxLineage path report$taxLineage <- report$name n <- nrow(report) depths <- report$depth taxLineages <- report$name taxLineages_p <- as.list(seq_along(report$name)) depth_row_tmp <- c(1:25) for (current_row in seq(from=1, to=nrow(report))) { dcr <- depths[current_row] depth_row_tmp[dcr+1] <- current_row if (dcr >= 1) { prev_pos <- depth_row_tmp[[dcr]] taxLineages_p[[current_row]] <- c(taxLineages_p[[prev_pos]], current_row) } } report$taxLineage <- sapply(taxLineages_p, function(x) paste0(taxLineages[x], collapse="|")) #report$taxLineage <- taxLineages } else if ("taxLineage" %in% colnames(report)) { taxLineages <- strsplit(report$taxLineage, "|", fixed=TRUE) if (!"name" %in% colnames(report)) report$name <- sapply(taxLineages, function(x) x[length(x)]) if (!"depth" %in% colnames(report)) { report$depth <- sapply(taxLineages, length) - 1 } if (!"taxRank" %in% colnames(report)) report$taxRank <- toupper(substr(report$name, 0, 1)) } if (!all(c("name","taxRank") %in% colnames(report)) || nrow(report) < 2 || !((report[1,"name"] == "u_unclassified" && report[2,"name"] == "-_root") || report[1,"name"] == "-_root")) { message(paste("Warning: File",myfile,"does not have the required format")) str(report) return(NULL) } if (!"taxonReads" %in% colnames(report)) { parent <- sub("^\\(.*\\)\\|.*$", "\\1", report$taxLineage) taxLineages <- strsplit(report$taxLineage, "|", fixed=TRUE) ## fix taxonReads report$taxonReads <- report$cladeReads - sapply(report$name, function(x) sum(report$cladeReads[parent == x])) #report$taxonReads[sapply(report$taxonReads, function(x) isTRUE(all.equal(x, 0)))] <- 0 report$taxonReads[report$taxonReads <= 0.00001] <- 0 # fix for rounding in percentages by MetaPhlAn } report$percentage <- signif(report$cladeReads/sum(report$taxonReads),6) * 100 if ('n_unique_kmers' %in% colnames(report)) report$kmerpercentage <- round(report$n_unique_kmers/sum(report$n_unique_kmers,na.rm=T),6) * 100 #report$taxRankperc <- 100/taxRank(report$cladeReads) #report$depth <- NULL if ("taxID" %in% colnames(report)) { std_colnames <- c("percentage","cladeReads","taxonReads","taxRank", "taxID","name") } else { std_colnames <- c("percentage","cladeReads","taxonReads","taxRank","name") } stopifnot(all(std_colnames %in% colnames(report))) report[, c(std_colnames, setdiff(colnames(report), std_colnames))] }
#' Project Template #' #' Generate a project template to increase efficiency. #' #' @param project A character vector of the project name. #' @param path The path to where the project should be created. Default is the #' current working directory. #' @param open logical. If \code{TRUE} the project will be opened in RStudio. #' The default is to test if \code{new_project} is being used in the global #' environment, if it is then the project directory will be opened. #' @param \ldots ignored. #' @details The project template includes these main directories and scripts: #' \itemize{ #' \item{CODEBOOK}{ - A directory to store coding conventions or demographics data: #' \itemize{ #' \item{KEY.csv}{ - A blank template for demographic information} #' } #' } #' \item{CORRESPONDENCE}{ - A directory to store correspondence and agreements with the client: #' \itemize{ #' \item{CONTACT_INFO.txt}{ - A text file to put research team members' contact information} #' } #' } #' \item{DATA}{ - A directory to store data:} #' \itemize{ #' \item{CLEANED_TRANSCRIPTS}{ - A directory to store the cleaned transcripts (If the transcripts are already cleaned you may choose to not utilize the RAW_TRANSCRIPTS directory)} #' \item{CM_DATA}{ - A directory to export/import scripts for cm_xxx family of functions} #' \item{DATA_FOR_REVIEW}{ - A directory to put data that may need to be altered or needs to be inspected more closely} #' \item{RAW_DATA}{ - A directory to store non-transcript data related to the project: #' \itemize{ #' \item{ANALYTIC_MEMOS}{ - A directory to put audio files (or shortcuts)} #' \item{AUDIO}{ - A directory to put audio files (or shortcuts)} #' \item{FIELD_NOTES}{ - A directory to put audio files (or shortcuts)} #' \item{PAPER_ARTIFACTS}{ - A directory to put paper artifacts} #' \item{PHOTOGRAPHS}{ - A directory to put photographs} #' \item{VIDEO}{ - A directory to put video files (or shortcuts)} #' } #' } #' \item{TRANSCRIPTS}{ - A directory to put transcription data: #' \itemize{ #' \item{CLEANED_TRANSCRIPTS}{ - A directory to store the cleaned transcripts (If the transcripts are already cleaned you may choose to not utilize the RAW_TRANSCRIPTS directory)} #' \item{RAW_TRANSCRIPTS}{ - A directory to store the raw transcripts} #' } #' } #' } #' \item{DOCUMENTATION}{ - A directory to store documents related to the project} #' \item{PLOTS}{ - A directory to store plots} #' \item{REPORTS}{ - A directory with report and presentation related tools.} #' \item{SCRIPTS}{ - A directory to store scripts; already contains the following: #' \itemize{ #' \item{01_clean_data.R}{ - initial cleaning of raw transcripts} #' \item{02_analysis_I.R}{ - initial analysis} #' \item{03_plots.R}{ - plotting script} #' } #' } #' \item{TABLES}{ - A directory to export tables to} #' \item{WORD_LISTS}{ - A directory to store word lists that can be sourced and supplied to functions} #' \item{extra_functions.R}{ - A script to store user made functions related to the project #' \itemize{ #' \item{email}{ - A function to view, and optionally copy to the clipboard, emails for the client/lead researcher, analyst and/or other project members (information taking from ~/CORRESPONDENCE/CONTACT_INFO.txt file)} #' \item{todo}{ - A function to view, and optionally copy to the clipboard, non-completed tasks from the \code{TO_DO.txt} file} #' } #' } #' \item{LOG}{ - A text file documenting project changes/needs etc.} #' \item{PROJECT_WORKFLOW_GUIDE.pdf}{ - A pdf explaining the structure of the project template} #' \item{xxx.Rproj}{ - A project file used by RRtudio; clicking this will open the project in RStudio.} #' \item{TO_DO}{ - A text file documenting project tasks} #' } #' #' The template comes with a .Rproj file. This makes operating in #' RStudio very easy. The file can be kept on #' the desktop or a git application such as github, #' bitbucket or dropbox, #' depending on what the client/research team is comfortable utilizing. #' #' @return Creates a project template. #' @export #' @importFrom tools file_ext new_project <- function(project = "new", path = getwd(), open = is.global(2), ...) { ## Replace spaces in path with underscores project <- sub("'", "", gsub("\\s+", "_", project)) ## get the working directory and save for later WD <- getwd() on.exit(setwd(WD)) ## handle if the directory already exists if(file.exists(file.path(path, project))) { message(paste0("\"", paste0(path, "/", project), "\" already exists:\nDo you want to overwrite?\n")) ans <- utils::menu(c("Yes", "No")) if (ans == "2") { stop("new_project aborted") } else { delete(paste0(path, "/", project)) } } ## Create the main directory and set wd to there x <- suppressWarnings(invisible(folder(folder.name=file.path(path, project)))) setwd(x) ## NULL to variables not defined SCRIPTS <- CODEBOOK <- DATA <- DATA_FOR_REVIEW <- RAW_DATA <- NULL RAW_TRANSCRIPTS <- PLOTS <- TABLES <- CM_DATA <- WORD_LISTS <- NULL CORRESPONDENCE <- DOCUMENTATION <- CLEANED_TRANSCRIPTS <- NULL ## Add level 2 folders y <- invisible(folder(SCRIPTS, CODEBOOK, DATA, PLOTS, TABLES, WORD_LISTS, CORRESPONDENCE, DOCUMENTATION)) ## Create a to do file todo <- paste("#when a task is complete put - in front of the item", "#Use hanging indent", "1. Task 1", sep = "\n") cat(todo, file=paste0(x, "/", "TO_DO")) ## Create a project log cat(paste0("Project \"", project, "\" created: ", Sys.time(), "\n"), file=paste0(x, "/", "LOG")) ## Subdirectories in DATA folder data_nms <- c("RAW_DATA", "DATA_FOR_REVIEW", "CM_DATA", "TRANSCRIPTS", "CLEANED_DATA") datas <- invisible(folder(folder.name=file.path(y[[3]], data_nms))) ## Add RAW_DATA subfolders dats <- c("ANALYTIC_MEMOS", "AUDIO", "VIDEO", "FIELD_NOTES", "INTERVIEWS", "PAPER_ARTIFACTS", "PHOTOGRAPHS") invisible(folder(folder.name=file.path(datas[[1]], dats))) ## Add TRANSCRIPTS subfolders trans <- c("RAW_TRANSCRIPTS", "CLEANED_TRANSCRIPTS") invisible(folder(folder.name=file.path(datas[[4]], trans))) ## Add directory for data already reviewed invisible(folder(folder.name=file.path(datas[[2]], "ALREADY_REVIEWED"))) ## Add first script (CLEANING DATA) in SCRIPTS folder cat(paste0("## Load required packages\npacks <- c(\"qdap\")\n", "invisible(lapply(packs, library, character.only=TRUE))\n\n", "trans_dir <- \"DATA/TRANSCRIPTS/CLEANED_TRANSCRIPTS\"\n", "dir_map(trans_dir)\n\n\n\n", "len <- length(dir(trans_dir))\n", "L1 <- lapply(paste0(\"dat\", 1:len), function(x) get(x))\n", "names(L1) <- paste0(\"dat\", 1:len)\n", "\n\n\n\nsave( , file = \"DATA/CLEANED_DATA/cleaned.RData\")\n"), file=file.path(y[[1]], "01_clean_data.R")) ## Add 2nd script (02_analysis_I) in SCRIPTS folder cat(paste0("## Load required packages\n", "packs <- c(\"qdap\", \"ggplot2\", \"grid\", \"scales\")\n", "invisible(lapply(packs, library, character.only=TRUE))\n\n", "source(\"extra_functions.R\")\n", "load(\"DATA/CLEANED_DATA/cleaned.RData\")\n"), file=paste0(y[[1]], "/", "02_analysis_I.R")) ## Add 3rd script (03_plots) in SCRIPTS folder cat(paste0("## Load required packages\n", "packs <- c(\"qdap\", \"ggplot2\", \"grid\", \"scales\")\n", "invisible(lapply(packs, library, character.only=TRUE))\n\n", "source(\"extra_functions.R\")\n", "load(\"DATA/CLEANED_DATA/cleaned.RData\")\n"), file=file.path(y[[1]], "03_plots.R")) ## Add Project Workflow guide PDF root <- system.file("extdata/docs", package = "qdap") pdfloc <- file.path(root, "PROJECT_WORKFLOW_GUIDE.pdf") invisible(file.copy(pdfloc, x)) ## Create .rproj pdfloc4 <- file.path(root, "TEMP.txt") invisible(file.copy(pdfloc4, x)) invisible(file.rename(file.path(x, "TEMP.txt"), file.path(x, paste0(project, ".Rproj")))) ## Create extra_functions.R pdfloc5 <- file.path(root, "extra_functions.R") invisible(file.copy(pdfloc5, x)) ## Create CONTACT_INFO in the CORRESPONDENCE folder info <- c("PROJECT NAME: Project", "CLIENT/LEAD RESEARCHER: lead_researcher<numero_uno@email> 555-555-5555[skype: num1]", "ANALYST: analyst_name<analyst@email> 555-555-5555[skype: analyst_guy12]", paste0("PROJECT MEMBERS:\n john doe<j.doe@email> 555-555-5555[skype: jd156]\n", " jane doe<jane@email> 555-555-5555[skype: jd157]\n", " nth_member<member_nth@email> 555-555-5555[skype: nmem123]\n"), paste("PROJECT CREATED:", Sys.time()) ) info <- paste(info, collapse = "\n\n") cat(info, file=paste0(y[[7]], "/", "CONTACT_INFO")) ## Create a KEY.csv codebook utils::write.csv(data.frame(person=""), file=paste0(y[[2]], "/", "KEY.csv"), row.names = FALSE) ## Create the reports folder with `new_report` invisible(dir.create("REPORTS")) o <- paste0("Project \"", project, "\" created:\n", x, "\n") class(o) <- "qdapProj" ## Open Project in RStudio if (open) { open_project(file.path(x, project, paste0(project, ".Rproj"))) } return(o) } #' Prints a qdapProj Object #' #' Prints a qdapProj object. #' #' @param x The qdapProj object. #' @param \ldots ignored #' @method print qdapProj #' @export print.qdapProj <- function(x, ...) { class(x) <- NULL message(x) } wheresRstudio <- function() { myPaths <- c("rstudio", "~/.cabal/bin/rstudio", "~/Library/Haskell/bin/rstudio", "C:\\PROGRA~1\\RStudio\\bin\\rstudio.exe", "C:\\RStudio\\bin\\rstudio.exe", "/Applications/RStudio.app/Contents/MacOS/RStudio") panloc <- Sys.which(myPaths) temp <- panloc[panloc != ""] if (identical(names(temp), character(0))) { ans <- readline("RStudio not installed in one of the typical locations.\n Do you know where RStudio is installed? (y/n) ") if (ans == "y") { temp <- readline("Enter the (unquoted) path to RStudio: ") } else { if (ans == "n") { stop("RStudio not installed or not found.") } } } short.path <- which.min(unlist(lapply(gregexpr("RStudio", temp), "[[", 1))) temp[short.path] } open_project <- function(Rproj.loc) { action <- paste(wheresRstudio(), Rproj.loc) message("Preparing to open project!") try(system(action, wait = FALSE, ignore.stderr = TRUE)) } #' Easy File Handling #' #' \code{delete} - Deletes files and directories. #' #' @param file The name of the file in the working directory or the path to the #' file to be deleted. If \code{NULL} provides a menu of files from the working #' directory. #' @param \ldots The name(s) of the folder to be created. If both \ldots and #' \code{folder.name} are \code{NULL} creates a file in the working directory #' with the creation date and time stamp. #' @param folder.name A character vector of the name(s) of the folder to be #' created. Default \code{NULL} (if the \ldots is \code{NULL} too) creates a #' file in the working directory with the creation date and time stamp. Use #' this argument only if the directory names contain spaces. #' @return \code{delete} permanently removes a file/directory. #' @seealso \code{\link[base]{unlink}}, #' \code{\link[base]{file.remove}}, #' \code{\link[base]{dir.create}} #' @rdname file_handling #' @export #' @examples #' \dontrun{ #' (x <- folder("DELETE.ME")) #' which(dir() == "DELETE.ME") #' delete("DELETE.ME") #' which(dir() == "DELETE.ME") #' #' folder("the/next/big/thing", "hello world", "now/is/the/time") #' #' folder(cat, dog) #' lapply(c("cat", "dog"), delete) #' } delete <- function(file = NULL) { x <- if (is.null(file)) { utils::menu(dir()) } else { file } unlink(x, recursive = TRUE, force = FALSE) } #' Create Folder #' #' \code{folder} - Create a folder/directory. #' #' @return \code{folder} creates a folder/directory. #' @rdname file_handling #' @export folder <- function(..., folder.name = NULL) { if (!is.null(folder.name)) { x <- strsplit(folder.name, split = ", ") } else { x <- substitute(...()) } if (!is.null(x)) { x <- unblanker(scrubber(unlist(lapply(x, function(y) { as.character(y)})))) } if (is.null(x)) { hfolder() } else { if (length(x) == 1) { hfolder(x) } else { lapply(x, function(z) { hfolder(z) }) } } } hfolder <- function(folder.name = NULL) { if (is.null(folder.name)) { FN <- mgsub(c(":", " "), c(".", "_"), substr(Sys.time(), 1, 19)) } else { FN <-folder.name } parts <- unlist(strsplit(FN, "/")) if (length(parts) == 1) { x <- paste(getwd(), "/", FN, sep = "") } else { ## If nested path (multiple directories created) if (!file.exists(dirname(FN))) { y <- FN z <- length(parts) for (i in rev(seq_along(parts))) { if(file.exists(y)) { z <- z + 1 break } y <- dirname(paste(parts[1:i], collapse ="/")) z <- z - 1 } for (i in z:(length(parts) - 1)) { suppressWarnings(dir.create(paste(parts[1:i], collapse ="/"))) } } x <- FN } dir.create(x) return(x) } unblanker <- function(x)subset(x, nchar(x)>0)
/R/new_project.R
no_license
cran/qdap
R
false
false
14,055
r
#' Project Template #' #' Generate a project template to increase efficiency. #' #' @param project A character vector of the project name. #' @param path The path to where the project should be created. Default is the #' current working directory. #' @param open logical. If \code{TRUE} the project will be opened in RStudio. #' The default is to test if \code{new_project} is being used in the global #' environment, if it is then the project directory will be opened. #' @param \ldots ignored. #' @details The project template includes these main directories and scripts: #' \itemize{ #' \item{CODEBOOK}{ - A directory to store coding conventions or demographics data: #' \itemize{ #' \item{KEY.csv}{ - A blank template for demographic information} #' } #' } #' \item{CORRESPONDENCE}{ - A directory to store correspondence and agreements with the client: #' \itemize{ #' \item{CONTACT_INFO.txt}{ - A text file to put research team members' contact information} #' } #' } #' \item{DATA}{ - A directory to store data:} #' \itemize{ #' \item{CLEANED_TRANSCRIPTS}{ - A directory to store the cleaned transcripts (If the transcripts are already cleaned you may choose to not utilize the RAW_TRANSCRIPTS directory)} #' \item{CM_DATA}{ - A directory to export/import scripts for cm_xxx family of functions} #' \item{DATA_FOR_REVIEW}{ - A directory to put data that may need to be altered or needs to be inspected more closely} #' \item{RAW_DATA}{ - A directory to store non-transcript data related to the project: #' \itemize{ #' \item{ANALYTIC_MEMOS}{ - A directory to put audio files (or shortcuts)} #' \item{AUDIO}{ - A directory to put audio files (or shortcuts)} #' \item{FIELD_NOTES}{ - A directory to put audio files (or shortcuts)} #' \item{PAPER_ARTIFACTS}{ - A directory to put paper artifacts} #' \item{PHOTOGRAPHS}{ - A directory to put photographs} #' \item{VIDEO}{ - A directory to put video files (or shortcuts)} #' } #' } #' \item{TRANSCRIPTS}{ - A directory to put transcription data: #' \itemize{ #' \item{CLEANED_TRANSCRIPTS}{ - A directory to store the cleaned transcripts (If the transcripts are already cleaned you may choose to not utilize the RAW_TRANSCRIPTS directory)} #' \item{RAW_TRANSCRIPTS}{ - A directory to store the raw transcripts} #' } #' } #' } #' \item{DOCUMENTATION}{ - A directory to store documents related to the project} #' \item{PLOTS}{ - A directory to store plots} #' \item{REPORTS}{ - A directory with report and presentation related tools.} #' \item{SCRIPTS}{ - A directory to store scripts; already contains the following: #' \itemize{ #' \item{01_clean_data.R}{ - initial cleaning of raw transcripts} #' \item{02_analysis_I.R}{ - initial analysis} #' \item{03_plots.R}{ - plotting script} #' } #' } #' \item{TABLES}{ - A directory to export tables to} #' \item{WORD_LISTS}{ - A directory to store word lists that can be sourced and supplied to functions} #' \item{extra_functions.R}{ - A script to store user made functions related to the project #' \itemize{ #' \item{email}{ - A function to view, and optionally copy to the clipboard, emails for the client/lead researcher, analyst and/or other project members (information taking from ~/CORRESPONDENCE/CONTACT_INFO.txt file)} #' \item{todo}{ - A function to view, and optionally copy to the clipboard, non-completed tasks from the \code{TO_DO.txt} file} #' } #' } #' \item{LOG}{ - A text file documenting project changes/needs etc.} #' \item{PROJECT_WORKFLOW_GUIDE.pdf}{ - A pdf explaining the structure of the project template} #' \item{xxx.Rproj}{ - A project file used by RRtudio; clicking this will open the project in RStudio.} #' \item{TO_DO}{ - A text file documenting project tasks} #' } #' #' The template comes with a .Rproj file. This makes operating in #' RStudio very easy. The file can be kept on #' the desktop or a git application such as github, #' bitbucket or dropbox, #' depending on what the client/research team is comfortable utilizing. #' #' @return Creates a project template. #' @export #' @importFrom tools file_ext new_project <- function(project = "new", path = getwd(), open = is.global(2), ...) { ## Replace spaces in path with underscores project <- sub("'", "", gsub("\\s+", "_", project)) ## get the working directory and save for later WD <- getwd() on.exit(setwd(WD)) ## handle if the directory already exists if(file.exists(file.path(path, project))) { message(paste0("\"", paste0(path, "/", project), "\" already exists:\nDo you want to overwrite?\n")) ans <- utils::menu(c("Yes", "No")) if (ans == "2") { stop("new_project aborted") } else { delete(paste0(path, "/", project)) } } ## Create the main directory and set wd to there x <- suppressWarnings(invisible(folder(folder.name=file.path(path, project)))) setwd(x) ## NULL to variables not defined SCRIPTS <- CODEBOOK <- DATA <- DATA_FOR_REVIEW <- RAW_DATA <- NULL RAW_TRANSCRIPTS <- PLOTS <- TABLES <- CM_DATA <- WORD_LISTS <- NULL CORRESPONDENCE <- DOCUMENTATION <- CLEANED_TRANSCRIPTS <- NULL ## Add level 2 folders y <- invisible(folder(SCRIPTS, CODEBOOK, DATA, PLOTS, TABLES, WORD_LISTS, CORRESPONDENCE, DOCUMENTATION)) ## Create a to do file todo <- paste("#when a task is complete put - in front of the item", "#Use hanging indent", "1. Task 1", sep = "\n") cat(todo, file=paste0(x, "/", "TO_DO")) ## Create a project log cat(paste0("Project \"", project, "\" created: ", Sys.time(), "\n"), file=paste0(x, "/", "LOG")) ## Subdirectories in DATA folder data_nms <- c("RAW_DATA", "DATA_FOR_REVIEW", "CM_DATA", "TRANSCRIPTS", "CLEANED_DATA") datas <- invisible(folder(folder.name=file.path(y[[3]], data_nms))) ## Add RAW_DATA subfolders dats <- c("ANALYTIC_MEMOS", "AUDIO", "VIDEO", "FIELD_NOTES", "INTERVIEWS", "PAPER_ARTIFACTS", "PHOTOGRAPHS") invisible(folder(folder.name=file.path(datas[[1]], dats))) ## Add TRANSCRIPTS subfolders trans <- c("RAW_TRANSCRIPTS", "CLEANED_TRANSCRIPTS") invisible(folder(folder.name=file.path(datas[[4]], trans))) ## Add directory for data already reviewed invisible(folder(folder.name=file.path(datas[[2]], "ALREADY_REVIEWED"))) ## Add first script (CLEANING DATA) in SCRIPTS folder cat(paste0("## Load required packages\npacks <- c(\"qdap\")\n", "invisible(lapply(packs, library, character.only=TRUE))\n\n", "trans_dir <- \"DATA/TRANSCRIPTS/CLEANED_TRANSCRIPTS\"\n", "dir_map(trans_dir)\n\n\n\n", "len <- length(dir(trans_dir))\n", "L1 <- lapply(paste0(\"dat\", 1:len), function(x) get(x))\n", "names(L1) <- paste0(\"dat\", 1:len)\n", "\n\n\n\nsave( , file = \"DATA/CLEANED_DATA/cleaned.RData\")\n"), file=file.path(y[[1]], "01_clean_data.R")) ## Add 2nd script (02_analysis_I) in SCRIPTS folder cat(paste0("## Load required packages\n", "packs <- c(\"qdap\", \"ggplot2\", \"grid\", \"scales\")\n", "invisible(lapply(packs, library, character.only=TRUE))\n\n", "source(\"extra_functions.R\")\n", "load(\"DATA/CLEANED_DATA/cleaned.RData\")\n"), file=paste0(y[[1]], "/", "02_analysis_I.R")) ## Add 3rd script (03_plots) in SCRIPTS folder cat(paste0("## Load required packages\n", "packs <- c(\"qdap\", \"ggplot2\", \"grid\", \"scales\")\n", "invisible(lapply(packs, library, character.only=TRUE))\n\n", "source(\"extra_functions.R\")\n", "load(\"DATA/CLEANED_DATA/cleaned.RData\")\n"), file=file.path(y[[1]], "03_plots.R")) ## Add Project Workflow guide PDF root <- system.file("extdata/docs", package = "qdap") pdfloc <- file.path(root, "PROJECT_WORKFLOW_GUIDE.pdf") invisible(file.copy(pdfloc, x)) ## Create .rproj pdfloc4 <- file.path(root, "TEMP.txt") invisible(file.copy(pdfloc4, x)) invisible(file.rename(file.path(x, "TEMP.txt"), file.path(x, paste0(project, ".Rproj")))) ## Create extra_functions.R pdfloc5 <- file.path(root, "extra_functions.R") invisible(file.copy(pdfloc5, x)) ## Create CONTACT_INFO in the CORRESPONDENCE folder info <- c("PROJECT NAME: Project", "CLIENT/LEAD RESEARCHER: lead_researcher<numero_uno@email> 555-555-5555[skype: num1]", "ANALYST: analyst_name<analyst@email> 555-555-5555[skype: analyst_guy12]", paste0("PROJECT MEMBERS:\n john doe<j.doe@email> 555-555-5555[skype: jd156]\n", " jane doe<jane@email> 555-555-5555[skype: jd157]\n", " nth_member<member_nth@email> 555-555-5555[skype: nmem123]\n"), paste("PROJECT CREATED:", Sys.time()) ) info <- paste(info, collapse = "\n\n") cat(info, file=paste0(y[[7]], "/", "CONTACT_INFO")) ## Create a KEY.csv codebook utils::write.csv(data.frame(person=""), file=paste0(y[[2]], "/", "KEY.csv"), row.names = FALSE) ## Create the reports folder with `new_report` invisible(dir.create("REPORTS")) o <- paste0("Project \"", project, "\" created:\n", x, "\n") class(o) <- "qdapProj" ## Open Project in RStudio if (open) { open_project(file.path(x, project, paste0(project, ".Rproj"))) } return(o) } #' Prints a qdapProj Object #' #' Prints a qdapProj object. #' #' @param x The qdapProj object. #' @param \ldots ignored #' @method print qdapProj #' @export print.qdapProj <- function(x, ...) { class(x) <- NULL message(x) } wheresRstudio <- function() { myPaths <- c("rstudio", "~/.cabal/bin/rstudio", "~/Library/Haskell/bin/rstudio", "C:\\PROGRA~1\\RStudio\\bin\\rstudio.exe", "C:\\RStudio\\bin\\rstudio.exe", "/Applications/RStudio.app/Contents/MacOS/RStudio") panloc <- Sys.which(myPaths) temp <- panloc[panloc != ""] if (identical(names(temp), character(0))) { ans <- readline("RStudio not installed in one of the typical locations.\n Do you know where RStudio is installed? (y/n) ") if (ans == "y") { temp <- readline("Enter the (unquoted) path to RStudio: ") } else { if (ans == "n") { stop("RStudio not installed or not found.") } } } short.path <- which.min(unlist(lapply(gregexpr("RStudio", temp), "[[", 1))) temp[short.path] } open_project <- function(Rproj.loc) { action <- paste(wheresRstudio(), Rproj.loc) message("Preparing to open project!") try(system(action, wait = FALSE, ignore.stderr = TRUE)) } #' Easy File Handling #' #' \code{delete} - Deletes files and directories. #' #' @param file The name of the file in the working directory or the path to the #' file to be deleted. If \code{NULL} provides a menu of files from the working #' directory. #' @param \ldots The name(s) of the folder to be created. If both \ldots and #' \code{folder.name} are \code{NULL} creates a file in the working directory #' with the creation date and time stamp. #' @param folder.name A character vector of the name(s) of the folder to be #' created. Default \code{NULL} (if the \ldots is \code{NULL} too) creates a #' file in the working directory with the creation date and time stamp. Use #' this argument only if the directory names contain spaces. #' @return \code{delete} permanently removes a file/directory. #' @seealso \code{\link[base]{unlink}}, #' \code{\link[base]{file.remove}}, #' \code{\link[base]{dir.create}} #' @rdname file_handling #' @export #' @examples #' \dontrun{ #' (x <- folder("DELETE.ME")) #' which(dir() == "DELETE.ME") #' delete("DELETE.ME") #' which(dir() == "DELETE.ME") #' #' folder("the/next/big/thing", "hello world", "now/is/the/time") #' #' folder(cat, dog) #' lapply(c("cat", "dog"), delete) #' } delete <- function(file = NULL) { x <- if (is.null(file)) { utils::menu(dir()) } else { file } unlink(x, recursive = TRUE, force = FALSE) } #' Create Folder #' #' \code{folder} - Create a folder/directory. #' #' @return \code{folder} creates a folder/directory. #' @rdname file_handling #' @export folder <- function(..., folder.name = NULL) { if (!is.null(folder.name)) { x <- strsplit(folder.name, split = ", ") } else { x <- substitute(...()) } if (!is.null(x)) { x <- unblanker(scrubber(unlist(lapply(x, function(y) { as.character(y)})))) } if (is.null(x)) { hfolder() } else { if (length(x) == 1) { hfolder(x) } else { lapply(x, function(z) { hfolder(z) }) } } } hfolder <- function(folder.name = NULL) { if (is.null(folder.name)) { FN <- mgsub(c(":", " "), c(".", "_"), substr(Sys.time(), 1, 19)) } else { FN <-folder.name } parts <- unlist(strsplit(FN, "/")) if (length(parts) == 1) { x <- paste(getwd(), "/", FN, sep = "") } else { ## If nested path (multiple directories created) if (!file.exists(dirname(FN))) { y <- FN z <- length(parts) for (i in rev(seq_along(parts))) { if(file.exists(y)) { z <- z + 1 break } y <- dirname(paste(parts[1:i], collapse ="/")) z <- z - 1 } for (i in z:(length(parts) - 1)) { suppressWarnings(dir.create(paste(parts[1:i], collapse ="/"))) } } x <- FN } dir.create(x) return(x) } unblanker <- function(x)subset(x, nchar(x)>0)
files <- list.files("lessons", pattern = "*.Rmd$", full.names = TRUE) # purrr::walk(files, rmarkdown::render, output_format = "html_document") for (f in files) rmarkdown::render(f) files_html <- stringr::str_replace_all(files, ".Rmd", ".html") purrr::walk(c(files,files_html), fs::file_copy, new_path = "docs/lessons", overwrite = TRUE) rmarkdown::render_site()
/R/render_site.R
no_license
d-bohn/psych-301
R
false
false
381
r
files <- list.files("lessons", pattern = "*.Rmd$", full.names = TRUE) # purrr::walk(files, rmarkdown::render, output_format = "html_document") for (f in files) rmarkdown::render(f) files_html <- stringr::str_replace_all(files, ".Rmd", ".html") purrr::walk(c(files,files_html), fs::file_copy, new_path = "docs/lessons", overwrite = TRUE) rmarkdown::render_site()
#loading libraries library(plyr) library(dplyr) library(ggplot2) #--------------------------------------------------------------------------------------------------------------------- #Reading data set data <- read.csv("data/DOHMH_NYC.csv", na.strings = c("NA","Not Applicable")) #--------------------------------------------------------------------------------------------------------------------- #Filtering out restaurants with No violations, as they do not have complete information dataNOV <- data %>% filter(ACTION =="No violations") #Selecting the other restaurants dataNew <- data %>% filter(!ACTION =="No violations") #Omitting NA values which are no considerable dataNA <- na.omit(dataNew) #--------------------------------------------------------------------------------------------------------------------- #Formating date types dataNewD <- dataNA dataNewD$GRADE.DATE <- as.Date(dataNewD$GRADE.DATE, format="%y/%m/%d") dataNewD$INSPECTION.DATE <- as.Date(dataNewD$INSPECTION.DATE, format="%y/%m/%d") dataNewD$RECORD.DATE <- as.Date(dataNewD$RECORD.DATE, format="%y/%m/%d") #--------------------------------------------------------------------------------------------------------------------- # Data set with unque observations for each restaurant dataUniqueR<- dataNewD %>% group_by(BORO, DBA, BUILDING, STREET) %>% arrange(desc(GRADE.DATE)) %>% slice(1:1) #--------------------------------------------------------------------------------------------------------------------- #Number of restaurants in each BORO A <- dataNewD %>% select(BORO, DBA, BUILDING, STREET) dataResCountB <- unique(A[,]) %>% group_by(BORO) %>% tally() myPlot1 <- ggplot(data = dataResCountB, aes(x=BORO,y=n))+ ggtitle("Number of Restaurants in Each Boro")+labs(x="Boro",y="Number of Restaurants") myPlot1 + geom_bar(stat="identity", aes(fill = BORO))+ theme(plot.title = element_text(hjust = 0.5)) #--------------------------------------------------------------------------------------------------------------------- # Distribution of score variable plot10 <- dataUniqueR %>% select(SCORE) %>% filter(SCORE != "NA") %>% group_by(SCORE) %>% tally() dataBoroCount10 <- plot10 myPlot3 <- ggplot(data = dataBoroCount10, aes(x=SCORE,y=n))+ggtitle("Distribution of Scores")+ labs(x="Score",y="Frequency of restaurants") myPlot3 + geom_bar(stat="identity", fill = "blue")+ xlim(-2,160) + theme(plot.title = element_text(hjust = 0.5)) #--------------------------------------------------------------------------------------------------------------------- # Number of Restaurants vs Cuisine Type dataResCountC <- dataUniqueR %>% group_by(CUISINE.DESCRIPTION) %>% tally() %>% arrange(desc(n)) dataResCountCTop10 <- dataResCountC %>% head(10) myPlot1 <- ggplot(data = dataResCountCTop10, aes(x=CUISINE.DESCRIPTION,y=n)) + ggtitle("Number of Restaurants by Cuisine type")+ labs(x="Cuisine Type",y="Number of Restaurants") myPlot1 + geom_bar(stat="identity", aes(fill = CUISINE.DESCRIPTION)) + theme(axis.text.x = element_text(angle = 90)) + theme(plot.title = element_text(hjust = 0.5)) #--------------------------------------------------------------------------------------------------------------------- # Number of critical violations vs Cuisine Type dataResCountCRI <- dataUniqueR %>% filter(CRITICAL.FLAG=="Critical") %>% group_by(CUISINE.DESCRIPTION) %>% tally() %>% arrange(desc(n)) dataResCountCRITop10 <- dataResCountCRI %>% head(10) myPlot1 <- ggplot(data = dataResCountCRITop10, aes(x=CUISINE.DESCRIPTION,y=n)) + ggtitle("Number of Restaurants with critical violations")+ labs(x="Cuisine Type",y="Number of Restaurants") myPlot1 + geom_bar(stat="identity", aes(fill = CUISINE.DESCRIPTION)) + theme(axis.text.x = element_text(angle = 90)) + theme(plot.title = element_text(hjust = 0.5)) #--------------------------------------------------------------------------------------------------------------------- # Distribution of Restaurants graded A for each boro dataBoroCount2<- dataUniqueR %>% select(BORO, DBA, GRADE) %>% filter(GRADE %in% c("A","B","C")) %>% group_by(BORO,GRADE) %>% tally() %>% mutate(percentage = (n*100)/sum(n)) dataBoroCount2_A <- dataBoroCount2 %>% filter(GRADE=="A") myPlot2 <- ggplot(data = dataBoroCount2, aes(x=BORO,y=n,fill=GRADE)) +ggtitle("Distribution of Grades of Restaurants area wise")+ labs(x="Borough",y="Number of Restaurants") myPlot2 + geom_bar(stat="identity") + theme(plot.title = element_text(hjust = 0.5)) #--------------------------------------------------------------------------------------------------------------------- # Area vs No of restaurants with critical violations dataBoroCount7 <- dataUniqueR %>% select(BORO, DBA, CRITICAL.FLAG) %>% filter(CRITICAL.FLAG == "Critical") %>% group_by(BORO) %>% tally() %>% mutate(percentage = (n*100)/sum(n)) myPlot3 <- ggplot(data = dataBoroCount7, aes(x=BORO,y=percentage))+ggtitle("% of Restaurants with Critical Violations")+ labs(x="Boro",y="% of restaurants") myPlot3 + geom_bar(stat="identity", aes(fill = BORO)) + theme(plot.title = element_text(hjust = 0.5)) #--------------------------------------------------------------------------------------------------------------------- # Area vs No of restaurants with critical violations dataBoroCount7 <- dataUniqueR %>% select(BORO, DBA, CRITICAL.FLAG) %>% filter(CRITICAL.FLAG == "Critical") %>% group_by(BORO) %>% tally() %>% mutate(percentage = (n*100)/sum(n)) myPlot3 <- ggplot(data = dataBoroCount7, aes(x=BORO,y=percentage))+ggtitle("% of Restaurants with Critical Violations")+ labs(x="Boro",y="% of restaurants") myPlot3 + geom_bar(stat="identity", aes(fill = BORO)) + theme(plot.title = element_text(hjust = 0.5)) #--------------------------------------------------------------------------------------------------------------------- # Area vs percent of A, B.. dataBoroCount8<- dataUniqueR %>% select(BORO, GRADE) %>% filter(GRADE != "Not Yet Graded") %>% group_by(BORO, GRADE) %>% tally() myPlot6 <- ggplot(data = dataBoroCount8, aes(x=BORO,y=n, fill=GRADE))+ggtitle("Distribution of grades in each borough")+ labs(x="Borough",y="Number of grades") myPlot6 + geom_bar(stat="identity") + theme(plot.title = element_text(hjust = 0.5),axis.text.x = element_text(angle = 30)) #--------------------------------------------------------------------------------------------------------------------- #average scores of each borough for the different cuisine types available x <- dataNewD %>% group_by(CUISINE.DESCRIPTION,BORO) %>% summarise(count=n(),mean_score=mean(SCORE)) x <- x %>% filter(CUISINE.DESCRIPTION=="Caribbean" | CUISINE.DESCRIPTION=="Chinese" | CUISINE.DESCRIPTION=="Indian"| CUISINE.DESCRIPTION=="Latin") ggplot(x,aes(x=BORO,y=mean_score))+facet_wrap(~CUISINE.DESCRIPTION,ncol = 2)+theme(plot.title = element_text(hjust = 0.5),axis.text.x = element_text(angle = 90))+geom_bar(stat="identity", aes(fill = BORO)) +ggtitle("Average score vs BORO")+ labs(x="Borough",y="Average score") #--------------------------------------------------------------------------------------------------------------------- #distribution of grades for Domino’s over the period 2001 to 2012 y <- dataNewD %>% select(DBA,GRADE.DATE,GRADE) y <- na.omit(y) %>% filter(DBA=="DOMINO'S" & GRADE %in% c("A","B","C")) y <- y %>% mutate(year = format(y$GRADE.DATE, "%Y")) z <- y %>% group_by(year,GRADE) %>% tally() %>% mutate(percentage=n*100/sum(n)) ggplot(z,aes(x=GRADE,y=percentage))+facet_wrap(~year,ncol = 4)+theme(plot.title = element_text(hjust = 0.5))+geom_bar(stat="identity", aes(fill = GRADE)) +ggtitle("DOMINO'S: Percentage of each grade year wise")+ labs(x="Grade",y="Percentage of each grade received") #--------------------------------------------------------------------------------------------------------------------- #distribution of grades for Papa John’s over the period 2001 to 2012 y <- dataNewD %>% select(DBA,GRADE.DATE,GRADE) y <- na.omit(y) %>% filter(DBA=="PAPA JOHN'S" & GRADE %in% c("A","B","C")) y <- y %>% mutate(year = format(y$GRADE.DATE, "%Y")) z <- y %>% group_by(year,GRADE) %>% tally() %>% mutate(percentage=n*100/sum(n)) ggplot(z,aes(x=GRADE,y=percentage))+facet_wrap(~year,ncol = 4)+theme(plot.title = element_text(hjust = 0.5))+geom_bar(stat="identity", aes(fill = GRADE)) +ggtitle("PAPA JOHN'S: Percentage of each grade year wise")+ labs(x="Grade",y="Percentage of each grade received") #--------------------------------------------------------------------------------------------------------------------- #Dstribution of mean scores on a time scale for pizza restaurants ys <- dataNewD %>% select(DBA,GRADE.DATE,SCORE) ys <- na.omit(ys) %>% filter(DBA=="DOMINO'S"|DBA=="PAPA JOHN'S"|DBA=="PIZZA HUT" | DBA=="LITTLE CAESARS") ys <- ys %>% mutate(year = format(ys$GRADE.DATE, "%Y")) zs <- ys %>% group_by(DBA,year) %>% summarise(mean_score=mean(SCORE)) ggplot(zs,aes(x=year,y=mean_score,group=DBA))+theme(plot.title = element_text(hjust = 0.5),axis.text.x = element_text(angle = 90))+geom_line(aes(color=DBA)) +ggtitle("Time series analysis of mean score")+ labs(x="Year",y="Mean score")
/Project.R
no_license
manuj005/nyc_restaurant_inspection
R
false
false
9,158
r
#loading libraries library(plyr) library(dplyr) library(ggplot2) #--------------------------------------------------------------------------------------------------------------------- #Reading data set data <- read.csv("data/DOHMH_NYC.csv", na.strings = c("NA","Not Applicable")) #--------------------------------------------------------------------------------------------------------------------- #Filtering out restaurants with No violations, as they do not have complete information dataNOV <- data %>% filter(ACTION =="No violations") #Selecting the other restaurants dataNew <- data %>% filter(!ACTION =="No violations") #Omitting NA values which are no considerable dataNA <- na.omit(dataNew) #--------------------------------------------------------------------------------------------------------------------- #Formating date types dataNewD <- dataNA dataNewD$GRADE.DATE <- as.Date(dataNewD$GRADE.DATE, format="%y/%m/%d") dataNewD$INSPECTION.DATE <- as.Date(dataNewD$INSPECTION.DATE, format="%y/%m/%d") dataNewD$RECORD.DATE <- as.Date(dataNewD$RECORD.DATE, format="%y/%m/%d") #--------------------------------------------------------------------------------------------------------------------- # Data set with unque observations for each restaurant dataUniqueR<- dataNewD %>% group_by(BORO, DBA, BUILDING, STREET) %>% arrange(desc(GRADE.DATE)) %>% slice(1:1) #--------------------------------------------------------------------------------------------------------------------- #Number of restaurants in each BORO A <- dataNewD %>% select(BORO, DBA, BUILDING, STREET) dataResCountB <- unique(A[,]) %>% group_by(BORO) %>% tally() myPlot1 <- ggplot(data = dataResCountB, aes(x=BORO,y=n))+ ggtitle("Number of Restaurants in Each Boro")+labs(x="Boro",y="Number of Restaurants") myPlot1 + geom_bar(stat="identity", aes(fill = BORO))+ theme(plot.title = element_text(hjust = 0.5)) #--------------------------------------------------------------------------------------------------------------------- # Distribution of score variable plot10 <- dataUniqueR %>% select(SCORE) %>% filter(SCORE != "NA") %>% group_by(SCORE) %>% tally() dataBoroCount10 <- plot10 myPlot3 <- ggplot(data = dataBoroCount10, aes(x=SCORE,y=n))+ggtitle("Distribution of Scores")+ labs(x="Score",y="Frequency of restaurants") myPlot3 + geom_bar(stat="identity", fill = "blue")+ xlim(-2,160) + theme(plot.title = element_text(hjust = 0.5)) #--------------------------------------------------------------------------------------------------------------------- # Number of Restaurants vs Cuisine Type dataResCountC <- dataUniqueR %>% group_by(CUISINE.DESCRIPTION) %>% tally() %>% arrange(desc(n)) dataResCountCTop10 <- dataResCountC %>% head(10) myPlot1 <- ggplot(data = dataResCountCTop10, aes(x=CUISINE.DESCRIPTION,y=n)) + ggtitle("Number of Restaurants by Cuisine type")+ labs(x="Cuisine Type",y="Number of Restaurants") myPlot1 + geom_bar(stat="identity", aes(fill = CUISINE.DESCRIPTION)) + theme(axis.text.x = element_text(angle = 90)) + theme(plot.title = element_text(hjust = 0.5)) #--------------------------------------------------------------------------------------------------------------------- # Number of critical violations vs Cuisine Type dataResCountCRI <- dataUniqueR %>% filter(CRITICAL.FLAG=="Critical") %>% group_by(CUISINE.DESCRIPTION) %>% tally() %>% arrange(desc(n)) dataResCountCRITop10 <- dataResCountCRI %>% head(10) myPlot1 <- ggplot(data = dataResCountCRITop10, aes(x=CUISINE.DESCRIPTION,y=n)) + ggtitle("Number of Restaurants with critical violations")+ labs(x="Cuisine Type",y="Number of Restaurants") myPlot1 + geom_bar(stat="identity", aes(fill = CUISINE.DESCRIPTION)) + theme(axis.text.x = element_text(angle = 90)) + theme(plot.title = element_text(hjust = 0.5)) #--------------------------------------------------------------------------------------------------------------------- # Distribution of Restaurants graded A for each boro dataBoroCount2<- dataUniqueR %>% select(BORO, DBA, GRADE) %>% filter(GRADE %in% c("A","B","C")) %>% group_by(BORO,GRADE) %>% tally() %>% mutate(percentage = (n*100)/sum(n)) dataBoroCount2_A <- dataBoroCount2 %>% filter(GRADE=="A") myPlot2 <- ggplot(data = dataBoroCount2, aes(x=BORO,y=n,fill=GRADE)) +ggtitle("Distribution of Grades of Restaurants area wise")+ labs(x="Borough",y="Number of Restaurants") myPlot2 + geom_bar(stat="identity") + theme(plot.title = element_text(hjust = 0.5)) #--------------------------------------------------------------------------------------------------------------------- # Area vs No of restaurants with critical violations dataBoroCount7 <- dataUniqueR %>% select(BORO, DBA, CRITICAL.FLAG) %>% filter(CRITICAL.FLAG == "Critical") %>% group_by(BORO) %>% tally() %>% mutate(percentage = (n*100)/sum(n)) myPlot3 <- ggplot(data = dataBoroCount7, aes(x=BORO,y=percentage))+ggtitle("% of Restaurants with Critical Violations")+ labs(x="Boro",y="% of restaurants") myPlot3 + geom_bar(stat="identity", aes(fill = BORO)) + theme(plot.title = element_text(hjust = 0.5)) #--------------------------------------------------------------------------------------------------------------------- # Area vs No of restaurants with critical violations dataBoroCount7 <- dataUniqueR %>% select(BORO, DBA, CRITICAL.FLAG) %>% filter(CRITICAL.FLAG == "Critical") %>% group_by(BORO) %>% tally() %>% mutate(percentage = (n*100)/sum(n)) myPlot3 <- ggplot(data = dataBoroCount7, aes(x=BORO,y=percentage))+ggtitle("% of Restaurants with Critical Violations")+ labs(x="Boro",y="% of restaurants") myPlot3 + geom_bar(stat="identity", aes(fill = BORO)) + theme(plot.title = element_text(hjust = 0.5)) #--------------------------------------------------------------------------------------------------------------------- # Area vs percent of A, B.. dataBoroCount8<- dataUniqueR %>% select(BORO, GRADE) %>% filter(GRADE != "Not Yet Graded") %>% group_by(BORO, GRADE) %>% tally() myPlot6 <- ggplot(data = dataBoroCount8, aes(x=BORO,y=n, fill=GRADE))+ggtitle("Distribution of grades in each borough")+ labs(x="Borough",y="Number of grades") myPlot6 + geom_bar(stat="identity") + theme(plot.title = element_text(hjust = 0.5),axis.text.x = element_text(angle = 30)) #--------------------------------------------------------------------------------------------------------------------- #average scores of each borough for the different cuisine types available x <- dataNewD %>% group_by(CUISINE.DESCRIPTION,BORO) %>% summarise(count=n(),mean_score=mean(SCORE)) x <- x %>% filter(CUISINE.DESCRIPTION=="Caribbean" | CUISINE.DESCRIPTION=="Chinese" | CUISINE.DESCRIPTION=="Indian"| CUISINE.DESCRIPTION=="Latin") ggplot(x,aes(x=BORO,y=mean_score))+facet_wrap(~CUISINE.DESCRIPTION,ncol = 2)+theme(plot.title = element_text(hjust = 0.5),axis.text.x = element_text(angle = 90))+geom_bar(stat="identity", aes(fill = BORO)) +ggtitle("Average score vs BORO")+ labs(x="Borough",y="Average score") #--------------------------------------------------------------------------------------------------------------------- #distribution of grades for Domino’s over the period 2001 to 2012 y <- dataNewD %>% select(DBA,GRADE.DATE,GRADE) y <- na.omit(y) %>% filter(DBA=="DOMINO'S" & GRADE %in% c("A","B","C")) y <- y %>% mutate(year = format(y$GRADE.DATE, "%Y")) z <- y %>% group_by(year,GRADE) %>% tally() %>% mutate(percentage=n*100/sum(n)) ggplot(z,aes(x=GRADE,y=percentage))+facet_wrap(~year,ncol = 4)+theme(plot.title = element_text(hjust = 0.5))+geom_bar(stat="identity", aes(fill = GRADE)) +ggtitle("DOMINO'S: Percentage of each grade year wise")+ labs(x="Grade",y="Percentage of each grade received") #--------------------------------------------------------------------------------------------------------------------- #distribution of grades for Papa John’s over the period 2001 to 2012 y <- dataNewD %>% select(DBA,GRADE.DATE,GRADE) y <- na.omit(y) %>% filter(DBA=="PAPA JOHN'S" & GRADE %in% c("A","B","C")) y <- y %>% mutate(year = format(y$GRADE.DATE, "%Y")) z <- y %>% group_by(year,GRADE) %>% tally() %>% mutate(percentage=n*100/sum(n)) ggplot(z,aes(x=GRADE,y=percentage))+facet_wrap(~year,ncol = 4)+theme(plot.title = element_text(hjust = 0.5))+geom_bar(stat="identity", aes(fill = GRADE)) +ggtitle("PAPA JOHN'S: Percentage of each grade year wise")+ labs(x="Grade",y="Percentage of each grade received") #--------------------------------------------------------------------------------------------------------------------- #Dstribution of mean scores on a time scale for pizza restaurants ys <- dataNewD %>% select(DBA,GRADE.DATE,SCORE) ys <- na.omit(ys) %>% filter(DBA=="DOMINO'S"|DBA=="PAPA JOHN'S"|DBA=="PIZZA HUT" | DBA=="LITTLE CAESARS") ys <- ys %>% mutate(year = format(ys$GRADE.DATE, "%Y")) zs <- ys %>% group_by(DBA,year) %>% summarise(mean_score=mean(SCORE)) ggplot(zs,aes(x=year,y=mean_score,group=DBA))+theme(plot.title = element_text(hjust = 0.5),axis.text.x = element_text(angle = 90))+geom_line(aes(color=DBA)) +ggtitle("Time series analysis of mean score")+ labs(x="Year",y="Mean score")
\name{get.opt.k} \alias{get.opt.k} \title{Optimal temporal aggregation level} \description{Find optimal temporal aggregation level for AR(1), MA(1), ARMA(1,1).} \usage{ get.opt.k(y,m=12,type=c("ar","ma","arma")) } \arguments{ \item{y}{ Time series (ts object). } \item{m}{ Maximum temporal aggregation level to consider. } \item{type}{ Type of DGP which can be "ar" for AR(1), "ma" for MA(1) or "arma" for ARMA(1,1). } } \value{ \item{k}{Identified optimal temporal aggregation level.} } \references{ Rostami-Tabar, Bahman, et al. "Demand forecasting by temporal aggregation." Naval Research Logistics (NRL) 60.6 (2013): 479-498. Rostami-Tabar, Bahman, et al. "A note on the forecast performance of temporal aggregation." Naval Research Logistics (NRL) 61.7 (2014): 489-500. } \author{ Nikolaos Kourentzes & Bahman Rostami-Tabar } \examples{ get.opt.k(AirPassengers,12) }
/man/get.opt.k.Rd
no_license
edergsc/TStools
R
false
false
894
rd
\name{get.opt.k} \alias{get.opt.k} \title{Optimal temporal aggregation level} \description{Find optimal temporal aggregation level for AR(1), MA(1), ARMA(1,1).} \usage{ get.opt.k(y,m=12,type=c("ar","ma","arma")) } \arguments{ \item{y}{ Time series (ts object). } \item{m}{ Maximum temporal aggregation level to consider. } \item{type}{ Type of DGP which can be "ar" for AR(1), "ma" for MA(1) or "arma" for ARMA(1,1). } } \value{ \item{k}{Identified optimal temporal aggregation level.} } \references{ Rostami-Tabar, Bahman, et al. "Demand forecasting by temporal aggregation." Naval Research Logistics (NRL) 60.6 (2013): 479-498. Rostami-Tabar, Bahman, et al. "A note on the forecast performance of temporal aggregation." Naval Research Logistics (NRL) 61.7 (2014): 489-500. } \author{ Nikolaos Kourentzes & Bahman Rostami-Tabar } \examples{ get.opt.k(AirPassengers,12) }
\name{this_month} \alias{this_month} \title{ Start and end of month } \description{ Defines first and last date in month } \usage{ this_month(x = Sys.Date(), part = getOption("timeperiodsR.parts")) } \arguments{ \item{x}{Date object} \item{part}{Part of period you need to receive, one of "all", "start", "end","sequence", "length". See details.} } \details{ You can get object of tpr class with all components or specify which component you need, use \code{part} for manage this option: \itemize{ \item all - get all components \item start - get only first date of period \item end - get only last date of period \item start - get vector of all dates in period \item length - get number of dates in period } } \value{Object of tpr class} \author{ Alexey Seleznev } \seealso{ For get next other periods see \code{\link[timeperiodsR:this_week]{this_week()}}, \code{\link[timeperiodsR:this_quarter]{this_quarter()}}, \code{\link[timeperiodsR:this_year]{this_year()}} } \examples{ ## To get start, end and sequence of this month thismonth <- this_month() ## To get vector of date sequences this_month(part = "sequence") this_month()$sequence seq(thismonth) ## Get number of days of this months day_nums <- this_month(part = "length") this_month()$length length(thismonth) }
/man/this_month.Rd
no_license
selesnow/timeperiodsR
R
false
false
1,346
rd
\name{this_month} \alias{this_month} \title{ Start and end of month } \description{ Defines first and last date in month } \usage{ this_month(x = Sys.Date(), part = getOption("timeperiodsR.parts")) } \arguments{ \item{x}{Date object} \item{part}{Part of period you need to receive, one of "all", "start", "end","sequence", "length". See details.} } \details{ You can get object of tpr class with all components or specify which component you need, use \code{part} for manage this option: \itemize{ \item all - get all components \item start - get only first date of period \item end - get only last date of period \item start - get vector of all dates in period \item length - get number of dates in period } } \value{Object of tpr class} \author{ Alexey Seleznev } \seealso{ For get next other periods see \code{\link[timeperiodsR:this_week]{this_week()}}, \code{\link[timeperiodsR:this_quarter]{this_quarter()}}, \code{\link[timeperiodsR:this_year]{this_year()}} } \examples{ ## To get start, end and sequence of this month thismonth <- this_month() ## To get vector of date sequences this_month(part = "sequence") this_month()$sequence seq(thismonth) ## Get number of days of this months day_nums <- this_month(part = "length") this_month()$length length(thismonth) }
# Format Flora do Brasil first ##loads packages---- library(dplyr) library(flora) library(readr) #Downloads data from FdB---- #library("downloader") #pag <- "http://ipt.jbrj.gov.br/jbrj/archive.do?r=lista_especies_flora_brasil" #download(url = pag, destfile = "iptflora") #unzip("iptflora",exdir = "./ipt") #reads formatted distribution distribution <- read_csv("./ipt/distribution_modified.csv") %>% dplyr::select(-1) head(distribution) #####------ taxon <- read_tsv("./ipt/taxon.txt", quote = "", trim_ws = T) head(taxon) relationship <- read_delim("./ipt/resourcerelationship.txt", delim = "\t", quote = "") %>% distinct() ref <- read_delim("./ipt/reference.txt",delim = "\t", quote = "") #lf_habitat <- read_delim("./ipt/speciesprofile.txt", delim = "\t", quote = "") lf_mod <- read.csv("./ipt/lf_hab_modified.csv") head(lf_mod) length(unique(lf_mod$id)) types <- read_delim("./ipt/typesandspecimen.txt", delim = "\t", quote = "") %>% group_by(id) %>% mutate_all(.funs = function(x) paste(x, collapse = "-")) vernacular <- read_delim("./ipt/vernacularname.txt", delim = "\t") %>% mutate(vernacular = paste(vernacularName, language, locality, sep = "-")) %>% dplyr::select(id, vernacular) %>% group_by(id) %>% mutate(vernacular_names = paste(vernacular, collapse = "/")) %>% dplyr::select(-vernacular) %>% distinct() names(vernacular) relacion <- unique(relationship$relationshipOfResource) relacion taxon_dist <- left_join(taxon, distribution) taxon_dist_ref <- left_join(taxon_dist, ref) taxon_dist_ref_lfh <- left_join(taxon_dist_ref, lf_mod) taxon_dist_ref_lfh_types <- left_join(taxon_dist_ref_lfh, types) all <- left_join(taxon_dist_ref_lfh_types, vernacular) all <- all %>% distinct() all <- all %>% mutate(nombre = purrr::map(scientificName, ~remove.authors(.)) %>% simplify2array()) write.csv(all, "./ipt/all_flora.csv")
/scripts/1 flora.R
no_license
AndreaSanchezTapia/CNCFlora_IUCN_LC
R
false
false
1,944
r
# Format Flora do Brasil first ##loads packages---- library(dplyr) library(flora) library(readr) #Downloads data from FdB---- #library("downloader") #pag <- "http://ipt.jbrj.gov.br/jbrj/archive.do?r=lista_especies_flora_brasil" #download(url = pag, destfile = "iptflora") #unzip("iptflora",exdir = "./ipt") #reads formatted distribution distribution <- read_csv("./ipt/distribution_modified.csv") %>% dplyr::select(-1) head(distribution) #####------ taxon <- read_tsv("./ipt/taxon.txt", quote = "", trim_ws = T) head(taxon) relationship <- read_delim("./ipt/resourcerelationship.txt", delim = "\t", quote = "") %>% distinct() ref <- read_delim("./ipt/reference.txt",delim = "\t", quote = "") #lf_habitat <- read_delim("./ipt/speciesprofile.txt", delim = "\t", quote = "") lf_mod <- read.csv("./ipt/lf_hab_modified.csv") head(lf_mod) length(unique(lf_mod$id)) types <- read_delim("./ipt/typesandspecimen.txt", delim = "\t", quote = "") %>% group_by(id) %>% mutate_all(.funs = function(x) paste(x, collapse = "-")) vernacular <- read_delim("./ipt/vernacularname.txt", delim = "\t") %>% mutate(vernacular = paste(vernacularName, language, locality, sep = "-")) %>% dplyr::select(id, vernacular) %>% group_by(id) %>% mutate(vernacular_names = paste(vernacular, collapse = "/")) %>% dplyr::select(-vernacular) %>% distinct() names(vernacular) relacion <- unique(relationship$relationshipOfResource) relacion taxon_dist <- left_join(taxon, distribution) taxon_dist_ref <- left_join(taxon_dist, ref) taxon_dist_ref_lfh <- left_join(taxon_dist_ref, lf_mod) taxon_dist_ref_lfh_types <- left_join(taxon_dist_ref_lfh, types) all <- left_join(taxon_dist_ref_lfh_types, vernacular) all <- all %>% distinct() all <- all %>% mutate(nombre = purrr::map(scientificName, ~remove.authors(.)) %>% simplify2array()) write.csv(all, "./ipt/all_flora.csv")
#' Generate a forest plot from a meta-analysis #' #' @param model a single \code{\link[metafor]{rma}} object or a \code{list} of them #' @param study_labels a character vector of study labels or list of character vectors the same length as \code{model} #' @param model_label a single model label or character vector of model labels the same length as \code{model} #' @param show_stats a \code{list} of stats to show at the bottom of the forest plot for e.g. heterogeneity #' @param additional_data a \code{data.frame} of additional data that can be referenced for the data #' shown in the panels of the forest plot #' @param point_size a numeric vector with the point sizes for the individual studies, or a single value used for #' all studies, or a list of numeric vectors if more than one model is to be plotted #' @param trans an optional transform function used on the numeric data for plotting the axes #' @param show_individual_studies whether to show the individual studies (the default) or just the summary diamond #' @param show_model a logical value, if `TRUE`, show model result, otherwise only show forest plots for studies #' @inheritParams forest_model #' #' @details This produces a forest plot using the \code{\link[metafor]{rma}} #' #' @return plot #' #' @import dplyr #' #' @export #' #' @examples #' if (require("metafor")) { #' data("dat.bcg") #' dat <- escalc(measure = "RR", ai = tpos, bi = tneg, ci = cpos, di = cneg, data = dat.bcg) #' model <- rma(yi, vi, data = dat) #' #' print(forest_rma(model, #' study_labels = paste(dat.bcg$author, dat.bcg$year), #' trans = exp #' )) #' #' print(forest_rma(model, #' panels = forest_panels( #' Study = ~study, #' N = ~n, ~vline, `Log Relative Risk` = ~ forest(line_x = 0), #' ~ spacer(space = 0.10), #' ~ sprintf("%0.3f (%0.3f, %0.3f)", estimate, conf.low, conf.high) #' ), #' study_labels = paste(dat.bcg$author, dat.bcg$year), #' trans = exp #' )) #' } forest_rma <- function(model, panels = NULL, study_labels = NULL, additional_data = NULL, point_size = NULL, model_label = NULL, show_individual_studies = TRUE, show_model = TRUE, show_stats = list( "I^2" = rlang::quo(sprintf("%0.1f%%", I2)), "p" = rlang::quo(format.pval(QEp, digits = 4, eps = 1e-4, scientific = 1 )) ), trans = I, funcs = NULL, format_options = list( colour = "black", shape = 15, text_size = 5, banded = TRUE ), theme = theme_forest(), limits = NULL, breaks = NULL, return_data = FALSE, recalculate_width = TRUE, recalculate_height = TRUE) { stopifnot(is.list(model)) panels <- panels %||% default_forest_panels(model, trans_char = deparse(substitute(trans))) if (!inherits(model, "rma")) { # List of models n_model <- length(model) stopifnot(all(vapply(model, inherits, logical(1), "rma"))) if (length(study_labels) == 1L) study_labels <- rep(study_labels, n_model) if (length(model_label) == 1L) model_label <- rep(model_label, n_model) if (length(point_size) == 1L) point_size <- rep(model_label, n_model) if (show_individual_studies == 1L) { show_individual_studies <- rep(show_individual_studies, n_model) } if (is.data.frame(additional_data)) { additional_data <- rep(list(additional_data), n_model) } stopifnot( is.null(study_labels) || length(study_labels) == n_model, is.null(model_label) || length(model_label) == n_model, is.null(point_size) || length(point_size) == length(point_size), is.null(additional_data) || length(additional_data) == n_model, is.null(show_individual_studies) || length(show_individual_studies) == n_model ) forest_data_list <- lapply(seq(model), function(i) { get_data_for_rma(model[[i]], study_labels = study_labels[[i]], model_label = model_label[i], point_size = point_size[[i]], additional_data = additional_data[[i]], show_individual_studies = show_individual_studies[[i]], show_stats = show_stats ) }) forest_data <- bind_rows(forest_data_list) if (!is.null(names(model))) { forest_data$.section <- rep(names(model), vapply(forest_data_list, nrow, numeric(1))) } } else { forest_data <- get_data_for_rma(model, study_labels = study_labels, model_label = model_label, point_size = point_size, additional_data = additional_data, show_individual_studies = show_individual_studies, show_stats = show_stats ) if (!show_model) { forest_data = forest_data %>% dplyr::slice(1:(nrow(forest_data)-2)) } } if (!is.null(limits)) { forest_data <- forest_data %>% mutate( arrow_tag.l = limits[1], arrow_tag.r = limits[2], arrow_tag.l = ifelse(conf.low < .data$arrow_tag.l, TRUE, FALSE), arrow_tag.r = ifelse(conf.high > .data$arrow_tag.r, TRUE, FALSE) ) %>% mutate( plot_range.low = ifelse(.data$arrow_tag.l, limits[1], conf.low), plot_range.high = ifelse(.data$arrow_tag.r, limits[2], conf.high) ) } plot_data <- list( forest_data = forest_data, mapping = aes(estimate, xmin = conf.low, xmax = conf.high, size = point_size, section = .section, band = .band, diamond = .diamond, whole_row = .whole_row ), panels = panels, trans = trans, funcs = funcs, format_options = format_options, theme = theme, limits = limits, breaks = breaks, recalculate_width = recalculate_width, recalculate_height = recalculate_height ) main_plot <- do.call("panel_forest_plot", plot_data) if (return_data) { list(plot_data = plot_data, plot = main_plot) } else { main_plot } } # Extract data from individual rma model # # @inheritParams forest_rma # # @importFrom rlang eval_tidy # @return a data.frame with the extracted data get_data_for_rma <- function(model, study_labels = NULL, model_label = NULL, point_size = NULL, additional_data = NULL, show_individual_studies = TRUE, show_stats = NULL) { if (model$level > 1) { # Has changed at some point in rma.uni alpha <- (100 - model$level) / 100 } else { alpha <- model$level } k <- length(model$vi) model_label <- model_label %||% if (model$method == "FE") "FE Model" else "RE Model" study_labels <- study_labels %||% paste("Study", 1:k) if (is.null(point_size)) { if (is.null(model$weights)) { if (any(model$vi <= 0, na.rm = TRUE)) { point_size <- rep(1, k) } else { point_size <- 1 / sqrt(model$vi) } } else { point_size <- model$weights } if (!all(point_size == point_size[1])) { point_size <- (point_size - min(point_size, na.rm = TRUE)) / diff(range(point_size), na.rm = TRUE) point_size <- (point_size * 1) + 0.5 } if (all(is.na(point_size))) point_size <- rep(1, k) } if (show_individual_studies) { forest_data <- as_data_frame(model[c("yi", "ni", "vi")]) %>% transmute( estimate = yi, se = sqrt(vi), n = ni, conf.low = estimate + stats::qnorm(alpha / 2) * se, conf.high = estimate - stats::qnorm(alpha / 2) * se, study = study_labels, point_size = point_size, stat = "", .section = NA, .diamond = FALSE, .band = TRUE ) } else { forest_data <- data_frame() } forest_data <- data_frame( estimate = model$b[1], se = model$se[1], conf.low = model$ci.lb[1], conf.high = model$ci.ub[1], study = model_label, n = sum(model$ni), point_size = NA, stat = "", .section = NA, .diamond = TRUE, .band = FALSE ) %>% bind_rows(forest_data, .) if (!is.null(additional_data) && is.data.frame(additional_data)) { forest_data <- bind_cols(forest_data, additional_data) } if (!is.null(show_stats)) { forest_data <- lapply(seq(show_stats), function(i) { stat_result <- rlang::eval_tidy(show_stats[[i]], model) stat_sign <- regmatches(stat_result, regexec("^[=<>]=?", stat_result))[[1]] if (length(stat_sign) == 0) { stat_result <- paste0("= ", stat_result) } sprintf('%s ~ "%s"', names(show_stats)[i], stat_result) }) %>% { data_frame( stat = paste0("paste(", paste(., collapse = ', "; ", '), ")"), .band = FALSE, .whole_row = TRUE ) } %>% { bind_rows(list(forest_data, .)) } } forest_data } utils::globalVariables( c("I2", "QEp") )
/R/forest_rma.R
no_license
zhangyupisa/forestmodel
R
false
false
9,180
r
#' Generate a forest plot from a meta-analysis #' #' @param model a single \code{\link[metafor]{rma}} object or a \code{list} of them #' @param study_labels a character vector of study labels or list of character vectors the same length as \code{model} #' @param model_label a single model label or character vector of model labels the same length as \code{model} #' @param show_stats a \code{list} of stats to show at the bottom of the forest plot for e.g. heterogeneity #' @param additional_data a \code{data.frame} of additional data that can be referenced for the data #' shown in the panels of the forest plot #' @param point_size a numeric vector with the point sizes for the individual studies, or a single value used for #' all studies, or a list of numeric vectors if more than one model is to be plotted #' @param trans an optional transform function used on the numeric data for plotting the axes #' @param show_individual_studies whether to show the individual studies (the default) or just the summary diamond #' @param show_model a logical value, if `TRUE`, show model result, otherwise only show forest plots for studies #' @inheritParams forest_model #' #' @details This produces a forest plot using the \code{\link[metafor]{rma}} #' #' @return plot #' #' @import dplyr #' #' @export #' #' @examples #' if (require("metafor")) { #' data("dat.bcg") #' dat <- escalc(measure = "RR", ai = tpos, bi = tneg, ci = cpos, di = cneg, data = dat.bcg) #' model <- rma(yi, vi, data = dat) #' #' print(forest_rma(model, #' study_labels = paste(dat.bcg$author, dat.bcg$year), #' trans = exp #' )) #' #' print(forest_rma(model, #' panels = forest_panels( #' Study = ~study, #' N = ~n, ~vline, `Log Relative Risk` = ~ forest(line_x = 0), #' ~ spacer(space = 0.10), #' ~ sprintf("%0.3f (%0.3f, %0.3f)", estimate, conf.low, conf.high) #' ), #' study_labels = paste(dat.bcg$author, dat.bcg$year), #' trans = exp #' )) #' } forest_rma <- function(model, panels = NULL, study_labels = NULL, additional_data = NULL, point_size = NULL, model_label = NULL, show_individual_studies = TRUE, show_model = TRUE, show_stats = list( "I^2" = rlang::quo(sprintf("%0.1f%%", I2)), "p" = rlang::quo(format.pval(QEp, digits = 4, eps = 1e-4, scientific = 1 )) ), trans = I, funcs = NULL, format_options = list( colour = "black", shape = 15, text_size = 5, banded = TRUE ), theme = theme_forest(), limits = NULL, breaks = NULL, return_data = FALSE, recalculate_width = TRUE, recalculate_height = TRUE) { stopifnot(is.list(model)) panels <- panels %||% default_forest_panels(model, trans_char = deparse(substitute(trans))) if (!inherits(model, "rma")) { # List of models n_model <- length(model) stopifnot(all(vapply(model, inherits, logical(1), "rma"))) if (length(study_labels) == 1L) study_labels <- rep(study_labels, n_model) if (length(model_label) == 1L) model_label <- rep(model_label, n_model) if (length(point_size) == 1L) point_size <- rep(model_label, n_model) if (show_individual_studies == 1L) { show_individual_studies <- rep(show_individual_studies, n_model) } if (is.data.frame(additional_data)) { additional_data <- rep(list(additional_data), n_model) } stopifnot( is.null(study_labels) || length(study_labels) == n_model, is.null(model_label) || length(model_label) == n_model, is.null(point_size) || length(point_size) == length(point_size), is.null(additional_data) || length(additional_data) == n_model, is.null(show_individual_studies) || length(show_individual_studies) == n_model ) forest_data_list <- lapply(seq(model), function(i) { get_data_for_rma(model[[i]], study_labels = study_labels[[i]], model_label = model_label[i], point_size = point_size[[i]], additional_data = additional_data[[i]], show_individual_studies = show_individual_studies[[i]], show_stats = show_stats ) }) forest_data <- bind_rows(forest_data_list) if (!is.null(names(model))) { forest_data$.section <- rep(names(model), vapply(forest_data_list, nrow, numeric(1))) } } else { forest_data <- get_data_for_rma(model, study_labels = study_labels, model_label = model_label, point_size = point_size, additional_data = additional_data, show_individual_studies = show_individual_studies, show_stats = show_stats ) if (!show_model) { forest_data = forest_data %>% dplyr::slice(1:(nrow(forest_data)-2)) } } if (!is.null(limits)) { forest_data <- forest_data %>% mutate( arrow_tag.l = limits[1], arrow_tag.r = limits[2], arrow_tag.l = ifelse(conf.low < .data$arrow_tag.l, TRUE, FALSE), arrow_tag.r = ifelse(conf.high > .data$arrow_tag.r, TRUE, FALSE) ) %>% mutate( plot_range.low = ifelse(.data$arrow_tag.l, limits[1], conf.low), plot_range.high = ifelse(.data$arrow_tag.r, limits[2], conf.high) ) } plot_data <- list( forest_data = forest_data, mapping = aes(estimate, xmin = conf.low, xmax = conf.high, size = point_size, section = .section, band = .band, diamond = .diamond, whole_row = .whole_row ), panels = panels, trans = trans, funcs = funcs, format_options = format_options, theme = theme, limits = limits, breaks = breaks, recalculate_width = recalculate_width, recalculate_height = recalculate_height ) main_plot <- do.call("panel_forest_plot", plot_data) if (return_data) { list(plot_data = plot_data, plot = main_plot) } else { main_plot } } # Extract data from individual rma model # # @inheritParams forest_rma # # @importFrom rlang eval_tidy # @return a data.frame with the extracted data get_data_for_rma <- function(model, study_labels = NULL, model_label = NULL, point_size = NULL, additional_data = NULL, show_individual_studies = TRUE, show_stats = NULL) { if (model$level > 1) { # Has changed at some point in rma.uni alpha <- (100 - model$level) / 100 } else { alpha <- model$level } k <- length(model$vi) model_label <- model_label %||% if (model$method == "FE") "FE Model" else "RE Model" study_labels <- study_labels %||% paste("Study", 1:k) if (is.null(point_size)) { if (is.null(model$weights)) { if (any(model$vi <= 0, na.rm = TRUE)) { point_size <- rep(1, k) } else { point_size <- 1 / sqrt(model$vi) } } else { point_size <- model$weights } if (!all(point_size == point_size[1])) { point_size <- (point_size - min(point_size, na.rm = TRUE)) / diff(range(point_size), na.rm = TRUE) point_size <- (point_size * 1) + 0.5 } if (all(is.na(point_size))) point_size <- rep(1, k) } if (show_individual_studies) { forest_data <- as_data_frame(model[c("yi", "ni", "vi")]) %>% transmute( estimate = yi, se = sqrt(vi), n = ni, conf.low = estimate + stats::qnorm(alpha / 2) * se, conf.high = estimate - stats::qnorm(alpha / 2) * se, study = study_labels, point_size = point_size, stat = "", .section = NA, .diamond = FALSE, .band = TRUE ) } else { forest_data <- data_frame() } forest_data <- data_frame( estimate = model$b[1], se = model$se[1], conf.low = model$ci.lb[1], conf.high = model$ci.ub[1], study = model_label, n = sum(model$ni), point_size = NA, stat = "", .section = NA, .diamond = TRUE, .band = FALSE ) %>% bind_rows(forest_data, .) if (!is.null(additional_data) && is.data.frame(additional_data)) { forest_data <- bind_cols(forest_data, additional_data) } if (!is.null(show_stats)) { forest_data <- lapply(seq(show_stats), function(i) { stat_result <- rlang::eval_tidy(show_stats[[i]], model) stat_sign <- regmatches(stat_result, regexec("^[=<>]=?", stat_result))[[1]] if (length(stat_sign) == 0) { stat_result <- paste0("= ", stat_result) } sprintf('%s ~ "%s"', names(show_stats)[i], stat_result) }) %>% { data_frame( stat = paste0("paste(", paste(., collapse = ', "; ", '), ")"), .band = FALSE, .whole_row = TRUE ) } %>% { bind_rows(list(forest_data, .)) } } forest_data } utils::globalVariables( c("I2", "QEp") )
#Atenção: Alterar Diretório setwd("C:/...") options(scipen=999) #Leitura da Base de Dados df <- read.csv("df.csv") #Verificar variáveis names(df) # Matriz de Gráfico de Dispersão #Matriz de Scatter Plot library(GGally) ggpairs(Imobiliario, title="correlogram with ggpairs()") #Regressão Linear Múltipla #Modelo de Regressão Linear Múltipla regressao <- lm(data=df, preco ~ quartos+area+zona+tipo) summary(regressao)
/regressao.R
no_license
nelson-ewert/calculadora-aluguel
R
false
false
466
r
#Atenção: Alterar Diretório setwd("C:/...") options(scipen=999) #Leitura da Base de Dados df <- read.csv("df.csv") #Verificar variáveis names(df) # Matriz de Gráfico de Dispersão #Matriz de Scatter Plot library(GGally) ggpairs(Imobiliario, title="correlogram with ggpairs()") #Regressão Linear Múltipla #Modelo de Regressão Linear Múltipla regressao <- lm(data=df, preco ~ quartos+area+zona+tipo) summary(regressao)
#' read_DEVTRANS #' #' @describeIn read_CEIDARS Read DEVTRANS-formatted file #' #' @export read_DEVTRANS <- function ( path, ... ) { DEVTRANS_cols <- readr::cols( TRANS_ID = col_character(), CO = col_integer(), FACID = col_integer(), AB = col_character(), DIS = col_character(), ACTION = col_character(), DEV = col_integer(), DEVNM = col_character(), PERID = col_character(), NUMDEV = col_integer(), EQSIZE = col_number(), EQSIZE_CF = col_character(), EQUNITC = col_integer(), EQTYPEC = col_integer(), DEVSUBCO = col_character(), SECT = col_integer(), TWNSHP = col_integer(), TWNSHPB = col_character(), RANGE = col_integer(), RANGEB = col_character(), DEVD1 = col_character(), DEVD2 = col_character(), DEVCAP = col_integer(), MEMO_DEV = col_character(), DEVU_D = col_date(format = "%Y%m%d"), OPERATOR = col_character(), TDATE = col_date(format = "%Y%m%d")) DEVTRANS_data <- tbltools::read_csv( path, col_types = DEVTRANS_cols, ...) return(DEVTRANS_data) }
/R/read_DEVTRANS.R
no_license
BAAQMD/CEIDARS
R
false
false
1,100
r
#' read_DEVTRANS #' #' @describeIn read_CEIDARS Read DEVTRANS-formatted file #' #' @export read_DEVTRANS <- function ( path, ... ) { DEVTRANS_cols <- readr::cols( TRANS_ID = col_character(), CO = col_integer(), FACID = col_integer(), AB = col_character(), DIS = col_character(), ACTION = col_character(), DEV = col_integer(), DEVNM = col_character(), PERID = col_character(), NUMDEV = col_integer(), EQSIZE = col_number(), EQSIZE_CF = col_character(), EQUNITC = col_integer(), EQTYPEC = col_integer(), DEVSUBCO = col_character(), SECT = col_integer(), TWNSHP = col_integer(), TWNSHPB = col_character(), RANGE = col_integer(), RANGEB = col_character(), DEVD1 = col_character(), DEVD2 = col_character(), DEVCAP = col_integer(), MEMO_DEV = col_character(), DEVU_D = col_date(format = "%Y%m%d"), OPERATOR = col_character(), TDATE = col_date(format = "%Y%m%d")) DEVTRANS_data <- tbltools::read_csv( path, col_types = DEVTRANS_cols, ...) return(DEVTRANS_data) }
#' Mexican power network #' #' A network of 11 core members of the 1990s Mexican power elite (Knoke 2017), #' three of which were successively elected presidents of Mexico: #' José López Portillo (1976-82), Miguel de la Madrid (1982-88), and Carlos Salinas de Gortari (1988-94, #' who was also the son of another core member, Raúl Salinas Lozano). #' The undirected lines connecting pairs of men represent any formal, informal, #' or organizational relation between a dyad; #' for example, “common belonging (school, sports, business, political participation), #' or a common interest (political power)” (Mendieta et al. 1997: 37). #' #' @docType data #' @keywords datasets #' @name mpn_mexicanpower #' @usage data(mpn_mexicanpower) #' @format Matrix with 11 rows/columns #' @source Knoke, David. 1990. \emph{Political Networks}. #' #' Knoke, Diani, Hollway, and Christopoulos. 2021. \emph{Multimodal Political Networks}. Cambridge University Press: Cambridge. "mpn_mexicanpower"
/R/data_mexicanpower.R
permissive
toshitaka-izumi/migraph
R
false
false
997
r
#' Mexican power network #' #' A network of 11 core members of the 1990s Mexican power elite (Knoke 2017), #' three of which were successively elected presidents of Mexico: #' José López Portillo (1976-82), Miguel de la Madrid (1982-88), and Carlos Salinas de Gortari (1988-94, #' who was also the son of another core member, Raúl Salinas Lozano). #' The undirected lines connecting pairs of men represent any formal, informal, #' or organizational relation between a dyad; #' for example, “common belonging (school, sports, business, political participation), #' or a common interest (political power)” (Mendieta et al. 1997: 37). #' #' @docType data #' @keywords datasets #' @name mpn_mexicanpower #' @usage data(mpn_mexicanpower) #' @format Matrix with 11 rows/columns #' @source Knoke, David. 1990. \emph{Political Networks}. #' #' Knoke, Diani, Hollway, and Christopoulos. 2021. \emph{Multimodal Political Networks}. Cambridge University Press: Cambridge. "mpn_mexicanpower"
#' calculates map curve #' #' @keywords internal calcular.map <- function(i.datos) { datos <- as.vector(as.matrix(i.datos)) semanas <- length(datos) maxsumasemanas <- array(dim = c(semanas, 5)) for (s in 1:semanas) { sumasemanas <- numeric() for (i in 1:(semanas + 1 - s)) { sumasemanas <- c(sumasemanas, sum(datos[i:(i + s - 1)], na.rm = TRUE)) } maxsumasemanas[s, 1] <- s maxsumasemanas[s, 3] <- maxFixNA(sumasemanas) maxsumasemanas[s, 4] <- min((1:(semanas + 1 - s))[maxFixNA(sumasemanas) == sumasemanas]) maxsumasemanas[s, 5] <- maxsumasemanas[s, 4] + s - 1 } sumaanual <- sum(datos, na.rm = TRUE) if (sumaanual==0) maxsumasemanas[, 2] <- 0 else maxsumasemanas[, 2] <- 100 * maxsumasemanas[, 3] / sumaanual maxsumasemanas <- rbind(rep(0, 5), maxsumasemanas) return(maxsumasemanas) }
/R/calcular.map.R
no_license
lozalojo/mem
R
false
false
839
r
#' calculates map curve #' #' @keywords internal calcular.map <- function(i.datos) { datos <- as.vector(as.matrix(i.datos)) semanas <- length(datos) maxsumasemanas <- array(dim = c(semanas, 5)) for (s in 1:semanas) { sumasemanas <- numeric() for (i in 1:(semanas + 1 - s)) { sumasemanas <- c(sumasemanas, sum(datos[i:(i + s - 1)], na.rm = TRUE)) } maxsumasemanas[s, 1] <- s maxsumasemanas[s, 3] <- maxFixNA(sumasemanas) maxsumasemanas[s, 4] <- min((1:(semanas + 1 - s))[maxFixNA(sumasemanas) == sumasemanas]) maxsumasemanas[s, 5] <- maxsumasemanas[s, 4] + s - 1 } sumaanual <- sum(datos, na.rm = TRUE) if (sumaanual==0) maxsumasemanas[, 2] <- 0 else maxsumasemanas[, 2] <- 100 * maxsumasemanas[, 3] / sumaanual maxsumasemanas <- rbind(rep(0, 5), maxsumasemanas) return(maxsumasemanas) }
#' Function to assign properties to an expression matrix #' #' This is a function to assign stromal property and TNBCType generative property levels to a TNBC dataset #' @param ESet An ExpressionSet object. Rows correspond to genes, columns to samples. If there are genes with multiple probes, they will be collapsed to a single row using the function defined in var.method. #' @param geneID.column Integer or column name corresponding to column in featureData table corresponding to HGNC ID. #' @param genelists A vector with either Stroma4, TNBCType, or both to specify which genelists to use. #' @param n An integer value specifying the number of random samples to generate. #' @param seed An integer value specifying a seed for the random ranks function. Default value is 123456. #' @param mc.cores An integer specifying how many cores to use. Defaults to use the function snowWorkers(). #' @param var.method Function for assessing variance to collapse probes. Default is IQR #' @export #' @import Biobase #' @importFrom BiocParallel snowWorkers #' @importFrom matrixStats rowIQRs #' @importFrom utils data #' @return The function returns a list of assignments for each property. Assignments are from 1-3, with 1 being low, 2 being intermediate, and 3 being high assignment respectively. #' @examples #' library(breastCancerMAINZ) #' data(mainz, package='breastCancerMAINZ') #' all.properties <- assign.properties(ESet=mainz, geneID.column='Gene.symbol', #' genelists=c('Stroma4', 'TNBCType'), n=10, mc.cores=1) assign.properties <- function (ESet, geneID.column = 1, genelists = c("Stroma4", "TNBCType"), n = 1000, seed = 123456, mc.cores = snowWorkers(), var.method = function(x) rowIQRs(x, na.rm = TRUE)) { message("--Assigning properties to expression data--") if (!class(ESet) == "ExpressionSet") stop("Error in assigning property: exprs is not of class \"ExpressionSet\"") exprs <- exprs(ESet) genes <- fData(ESet)[, geneID.column] if (any(duplicated(genes))) { message("--There are duplicated genes. Using most variable to collapse--") var.estimate <- order(var.method(exprs), decreasing = TRUE) exprs <- exprs[var.estimate, ] genes <- genes[var.estimate] to.keep <- !duplicated(genes) genes <- genes[to.keep] exprs <- exprs[to.keep, ] } temp.envir <- new.env() if ("Stroma4" %in% genelists) { data("B.stroma.property", package = "STROMA4", envir = temp.envir) data("E.stroma.property", package = "STROMA4", envir = temp.envir) data("D.stroma.property", package = "STROMA4", envir = temp.envir) data("T.stroma.property", package = "STROMA4", envir = temp.envir) } if ("TNBCType" %in% genelists) { data("BL1.property", package = "STROMA4", envir = temp.envir) data("BL2.property", package = "STROMA4", envir = temp.envir) data("IM.property", package = "STROMA4", envir = temp.envir) data("LAR.property", package = "STROMA4", envir = temp.envir) data("M.property", package = "STROMA4", envir = temp.envir) data("MSL.property", package = "STROMA4", envir = temp.envir) } if (!("TNBCType" %in% genelists) & !("Stroma4" %in% genelists)) stop("Need to specify either Stroma4 or TNBCType as genelists") ret <- vector("list", length(temp.envir)) names(ret) <- names(temp.envir) for (i in names(temp.envir)) { match.genes <- temp.envir[[i]] match.genes <- match.genes[which(match.genes[, 1] %in% genes), , drop = FALSE] if (nrow(match.genes) == 0) { ret[[i]] <- "Error: No matching genes in expression matrix" } else { message("----", nrow(match.genes), " out of ", nrow(temp.envir[[i]]), " total genes matching for ", i, "----") up.genes <- which(genes %in% match.genes[which(match.genes[, 2] == "up"), 1]) dn.genes <- which(genes %in% match.genes[which(match.genes[, 2] == "down"), 1]) match.exprs <- exprs[c(up.genes, dn.genes), ] directions <- rep(c("up", "down"), c(length(up.genes), length(dn.genes))) ranksum.object <- .sig.ranksum(exprdata = match.exprs, up = which(directions == "up"), dn = which(directions == "down"), full.return = TRUE) roi <- .random.ranks(ranksum.object, n = n, seed = seed, workers = mc.cores) ret[[i]] <- .define.roi.regions(ranksum.object, roi) } } rm(temp.envir) df <- as.data.frame(Map(factor, ret, MoreArgs=list(labels=c("low", "intermediate", "high")))) pData(ESet) <- cbind(pData(ESet), df) return(ESet) }
/R/assign.property.R
no_license
smisaleh/STROMA4
R
false
false
4,782
r
#' Function to assign properties to an expression matrix #' #' This is a function to assign stromal property and TNBCType generative property levels to a TNBC dataset #' @param ESet An ExpressionSet object. Rows correspond to genes, columns to samples. If there are genes with multiple probes, they will be collapsed to a single row using the function defined in var.method. #' @param geneID.column Integer or column name corresponding to column in featureData table corresponding to HGNC ID. #' @param genelists A vector with either Stroma4, TNBCType, or both to specify which genelists to use. #' @param n An integer value specifying the number of random samples to generate. #' @param seed An integer value specifying a seed for the random ranks function. Default value is 123456. #' @param mc.cores An integer specifying how many cores to use. Defaults to use the function snowWorkers(). #' @param var.method Function for assessing variance to collapse probes. Default is IQR #' @export #' @import Biobase #' @importFrom BiocParallel snowWorkers #' @importFrom matrixStats rowIQRs #' @importFrom utils data #' @return The function returns a list of assignments for each property. Assignments are from 1-3, with 1 being low, 2 being intermediate, and 3 being high assignment respectively. #' @examples #' library(breastCancerMAINZ) #' data(mainz, package='breastCancerMAINZ') #' all.properties <- assign.properties(ESet=mainz, geneID.column='Gene.symbol', #' genelists=c('Stroma4', 'TNBCType'), n=10, mc.cores=1) assign.properties <- function (ESet, geneID.column = 1, genelists = c("Stroma4", "TNBCType"), n = 1000, seed = 123456, mc.cores = snowWorkers(), var.method = function(x) rowIQRs(x, na.rm = TRUE)) { message("--Assigning properties to expression data--") if (!class(ESet) == "ExpressionSet") stop("Error in assigning property: exprs is not of class \"ExpressionSet\"") exprs <- exprs(ESet) genes <- fData(ESet)[, geneID.column] if (any(duplicated(genes))) { message("--There are duplicated genes. Using most variable to collapse--") var.estimate <- order(var.method(exprs), decreasing = TRUE) exprs <- exprs[var.estimate, ] genes <- genes[var.estimate] to.keep <- !duplicated(genes) genes <- genes[to.keep] exprs <- exprs[to.keep, ] } temp.envir <- new.env() if ("Stroma4" %in% genelists) { data("B.stroma.property", package = "STROMA4", envir = temp.envir) data("E.stroma.property", package = "STROMA4", envir = temp.envir) data("D.stroma.property", package = "STROMA4", envir = temp.envir) data("T.stroma.property", package = "STROMA4", envir = temp.envir) } if ("TNBCType" %in% genelists) { data("BL1.property", package = "STROMA4", envir = temp.envir) data("BL2.property", package = "STROMA4", envir = temp.envir) data("IM.property", package = "STROMA4", envir = temp.envir) data("LAR.property", package = "STROMA4", envir = temp.envir) data("M.property", package = "STROMA4", envir = temp.envir) data("MSL.property", package = "STROMA4", envir = temp.envir) } if (!("TNBCType" %in% genelists) & !("Stroma4" %in% genelists)) stop("Need to specify either Stroma4 or TNBCType as genelists") ret <- vector("list", length(temp.envir)) names(ret) <- names(temp.envir) for (i in names(temp.envir)) { match.genes <- temp.envir[[i]] match.genes <- match.genes[which(match.genes[, 1] %in% genes), , drop = FALSE] if (nrow(match.genes) == 0) { ret[[i]] <- "Error: No matching genes in expression matrix" } else { message("----", nrow(match.genes), " out of ", nrow(temp.envir[[i]]), " total genes matching for ", i, "----") up.genes <- which(genes %in% match.genes[which(match.genes[, 2] == "up"), 1]) dn.genes <- which(genes %in% match.genes[which(match.genes[, 2] == "down"), 1]) match.exprs <- exprs[c(up.genes, dn.genes), ] directions <- rep(c("up", "down"), c(length(up.genes), length(dn.genes))) ranksum.object <- .sig.ranksum(exprdata = match.exprs, up = which(directions == "up"), dn = which(directions == "down"), full.return = TRUE) roi <- .random.ranks(ranksum.object, n = n, seed = seed, workers = mc.cores) ret[[i]] <- .define.roi.regions(ranksum.object, roi) } } rm(temp.envir) df <- as.data.frame(Map(factor, ret, MoreArgs=list(labels=c("low", "intermediate", "high")))) pData(ESet) <- cbind(pData(ESet), df) return(ESet) }
PE <- function (U, b) { if (missing(U)) stop("The membership degree matrix U must be given") if (is.null(U)) stop("The membership degree matrix U is empty") U=as.matrix(U) if (any(is.na(U))) stop("The membership degree matrix U must not contain NA values") if (!is.numeric(U)) stop("The membership degree matrix U is not numeric") if (missing(b)) { b=exp(1) } if (!is.numeric(b)) { b=exp(1) cat("The logarithmic base b is not numeric: the default value b=exp(1) will be used ",fill=TRUE) } if (b<=1) { b=exp(1) cat("The logarithmic base b must be >1: the default value b=exp(1) will be used ",fill=TRUE) } part.ent=partEntropy(U = U,b = b,n = nrow(U)) return(part.ent) }
/R/PE_mod.R
no_license
cran/fclust
R
false
false
793
r
PE <- function (U, b) { if (missing(U)) stop("The membership degree matrix U must be given") if (is.null(U)) stop("The membership degree matrix U is empty") U=as.matrix(U) if (any(is.na(U))) stop("The membership degree matrix U must not contain NA values") if (!is.numeric(U)) stop("The membership degree matrix U is not numeric") if (missing(b)) { b=exp(1) } if (!is.numeric(b)) { b=exp(1) cat("The logarithmic base b is not numeric: the default value b=exp(1) will be used ",fill=TRUE) } if (b<=1) { b=exp(1) cat("The logarithmic base b must be >1: the default value b=exp(1) will be used ",fill=TRUE) } part.ent=partEntropy(U = U,b = b,n = nrow(U)) return(part.ent) }
#' Affichage LaTeX d'un systeme d'equations lineaires (SEL) #' #' \code{sel2latex} retourne un vecteur de caracteres qui contient le code LaTeX permettant d'ecrire le SEL forme de la matrice \code{A} #' et de la matrice \code{B}. #' Nous pouvons choisir la facon d'afficher le SEL. Nous pouvons afficher des nombres entiers ou alors #' choisir le nombre de chiffres a droite de la virgule. Nous pouvons afficher le resultat sous forme de fraction. Nous pouvons #' afficher la fraction \code{inline} ou alors utiliser \code{frac}, \code{dfrac}, \code{sfrac} et \code{tfrac}. Le parametre \code{verbose} #' permet d'afficher ou non le SEL. Le parametre \code{copy2clip} permet de copier ou non le resultat dans le presse-papier. Le parametre #' \code{concise} permet de simplifier le SEL en enlevant les 0 lors de l'affichage du SEL #' #' @param A une matrice #' @param B une matrice de constantes #' @param sel Si \code{TRUE} nous affichons les equations et sinon nous affichons le SEL sous forme de matrices. \code{TRUE} par defaut #' @param style permet de choisir la facon d'afficher les elements de la matrice. Les choix possibles sont \code{inline}, \code{frac}, #' \code{dfrac}, \code{sfrac} et \code{tfrac}. Par defaut, nous affichons des entiers ou des nombres decimaux. #' @param bracket permet de choisir comment encadrer la matrice. Les choix sont des \code{crochet}s, des \code{parenthese}s ou un \code{determinant}. #' Par defaut, nous encadrons entre \code{crochet}s. #' @param digits Le nombre de chiffres a droite de la virgule a afficher. Nous affichons 2 chiffres par defaut. #' @param verbose Si \code{TRUE} nous affichons le code LaTeX, si \code{FALSE} nous retournons le vecteur de caracteres. \code{TRUE} par defaut. #' @param copy2clip Si \code{TRUE} nous copions le resultat dans le presse papier. Par defaut \code{FALSE}. #' @param envir Si \code{TRUE}, nous encadrons le resultat dans un array et un bracket. Si \code{FALSE}, on ne retourne que la matrice. #' @param tolatex Si \code{TRUE} nous encadrons le resultat par "$$ resultat $$". #' @return Le vecteur de caracteres contenant le code LaTeX de la matrice #' @export #' @examples #' A <- matrix(c(2, -4, 0, #' -3, 0, 7, #' 0, 0, -1), 3, 3) #' B <- matrix(c(0, -1, 5), 3, 1) #' sel2latex(A, B) sel2latex <- function(A, B, sel = TRUE, variables = c("x", "a", "xi"), style = c("decimal", "inline", "frac", "sfrac", "tfrac", "dfrac"), bracket = c("crochet", "parenthese", "determinant"), verbose = TRUE, concise = FALSE, copy2clip = FALSE, tolatex = TRUE, digits = 2, tol = sqrt(.Machine$double.eps)){ if ((!is.matrix(A)) || (!is.numeric(A))) stop("A doit etre une matrice numerique.") if ((!is.matrix(B)) || (!is.numeric(B))) stop("B doit etre une matrice numerique.") if (ncol(B) > 1) stop("B doit posseder une seule colonne") style <- match.arg(style, c("decimal", "inline", "frac", "sfrac", "tfrac", "dfrac")) bracket <- match.arg(bracket, c("crochet", "parenthese", "determinant")) variables <- match.arg(variables, c("x", "a", "xi")) if (ncol(A) > 26 && variables == "a") stop("La matrice possede trop de colonnes pour utiliser l'option a, b, c, ...") if (ncol(A) > 4 && variables == "x") stop("La matrice possede trop de colonnes pour utiliser l'option w, x, y, z.") # Attributs aux matrices pour simplifier le code attr(A, "style") <- style attr(B, "style") <- style attr(A, "bracket") <- bracket attr(B, "bracket") <- bracket attr(A, "verbose") <- FALSE attr(B, "verbose") <- FALSE attr(A, "copy2clip") <- FALSE attr(B, "copy2clip") <- FALSE attr(A, "digits") <- digits attr(B, "digits") <- digits if (sel){ matA <- mat2latex(A, envir = FALSE, tolatex = FALSE) matB <- mat2latex(B, envir = FALSE, tolatex = FALSE) toprint <- vector("character", length = nrow(A)) var <- paste("x_{",(1:ncol(A)),"}", sep = "") for (i in (1:nrow(A))){ toprint[i] <- paste0(paste(matA[i, ], var, collapse = " & + & "), " & = & ", matB[i, ], " \\\\ \n") } # Sanitize en nettoyant la matrice toprint <- sanitize(toprint, concise) if (concise){ notzero <- apply(A, 1, function(x){sum(abs(x) > tol)}) max_notzero <- max(notzero) begin <- paste0(c("\\begin{array}{", rep("r", 2*max_notzero+1),"}\n"), collapse = "") for (i in (1:nrow(A))){ # On ajoute des & sur les ligne ou il en manque if (notzero[i] < max_notzero){ num_esperluette <- 2*(max_notzero - notzero[i]) toprint[i] <- paste0(c(rep("& ", num_esperluette), toprint[i]), collapse = "") } } } else begin <- paste0(c("\\begin{array}{", rep("r", 2*ncol(A)+1),"}\n"), collapse = "") end <- c("\\end{array}") toprint <- paste0(c(begin, toprint, end), collapse = "") toprint <- convert_var(toprint, ncol(A), variables) } else{ latexA <- mat2latex(A, tolatex = FALSE) latexVar <- var2latex(ncol(A), bracket, variables) latexB <- mat2latex(B, tolatex = FALSE) toprint <- c(latexA, latexVar, "=", latexB) } if (tolatex) toprint <- paste0(c("$$\n",toprint,"\n$$"), collapse = "") # On enleve les attributs aux matrices attr(A, "style") <- NULL attr(B, "style") <- NULL attr(A, "bracket") <- NULL attr(B, "bracket") <- NULL attr(A, "verbose") <- NULL attr(B, "verbose") <- NULL attr(A, "copy2clip") <- NULL attr(B, "copy2clip") <- NULL attr(A, "digits") <- NULL attr(B, "digits") <- NULL if (copy2clip){ test2clip <- writeClipboard(toprint) if (test2clip) message("Le SEL a ete correctement copie dans le presse-papier.") else message("Le SEL n'a pas ete copie dans le presse-papier.") } if (verbose){ cat(toprint) return(invisible(toprint)) } else return(invisible(toprint)) }
/R/sel2latex.R
no_license
desautm/linalgr
R
false
false
6,040
r
#' Affichage LaTeX d'un systeme d'equations lineaires (SEL) #' #' \code{sel2latex} retourne un vecteur de caracteres qui contient le code LaTeX permettant d'ecrire le SEL forme de la matrice \code{A} #' et de la matrice \code{B}. #' Nous pouvons choisir la facon d'afficher le SEL. Nous pouvons afficher des nombres entiers ou alors #' choisir le nombre de chiffres a droite de la virgule. Nous pouvons afficher le resultat sous forme de fraction. Nous pouvons #' afficher la fraction \code{inline} ou alors utiliser \code{frac}, \code{dfrac}, \code{sfrac} et \code{tfrac}. Le parametre \code{verbose} #' permet d'afficher ou non le SEL. Le parametre \code{copy2clip} permet de copier ou non le resultat dans le presse-papier. Le parametre #' \code{concise} permet de simplifier le SEL en enlevant les 0 lors de l'affichage du SEL #' #' @param A une matrice #' @param B une matrice de constantes #' @param sel Si \code{TRUE} nous affichons les equations et sinon nous affichons le SEL sous forme de matrices. \code{TRUE} par defaut #' @param style permet de choisir la facon d'afficher les elements de la matrice. Les choix possibles sont \code{inline}, \code{frac}, #' \code{dfrac}, \code{sfrac} et \code{tfrac}. Par defaut, nous affichons des entiers ou des nombres decimaux. #' @param bracket permet de choisir comment encadrer la matrice. Les choix sont des \code{crochet}s, des \code{parenthese}s ou un \code{determinant}. #' Par defaut, nous encadrons entre \code{crochet}s. #' @param digits Le nombre de chiffres a droite de la virgule a afficher. Nous affichons 2 chiffres par defaut. #' @param verbose Si \code{TRUE} nous affichons le code LaTeX, si \code{FALSE} nous retournons le vecteur de caracteres. \code{TRUE} par defaut. #' @param copy2clip Si \code{TRUE} nous copions le resultat dans le presse papier. Par defaut \code{FALSE}. #' @param envir Si \code{TRUE}, nous encadrons le resultat dans un array et un bracket. Si \code{FALSE}, on ne retourne que la matrice. #' @param tolatex Si \code{TRUE} nous encadrons le resultat par "$$ resultat $$". #' @return Le vecteur de caracteres contenant le code LaTeX de la matrice #' @export #' @examples #' A <- matrix(c(2, -4, 0, #' -3, 0, 7, #' 0, 0, -1), 3, 3) #' B <- matrix(c(0, -1, 5), 3, 1) #' sel2latex(A, B) sel2latex <- function(A, B, sel = TRUE, variables = c("x", "a", "xi"), style = c("decimal", "inline", "frac", "sfrac", "tfrac", "dfrac"), bracket = c("crochet", "parenthese", "determinant"), verbose = TRUE, concise = FALSE, copy2clip = FALSE, tolatex = TRUE, digits = 2, tol = sqrt(.Machine$double.eps)){ if ((!is.matrix(A)) || (!is.numeric(A))) stop("A doit etre une matrice numerique.") if ((!is.matrix(B)) || (!is.numeric(B))) stop("B doit etre une matrice numerique.") if (ncol(B) > 1) stop("B doit posseder une seule colonne") style <- match.arg(style, c("decimal", "inline", "frac", "sfrac", "tfrac", "dfrac")) bracket <- match.arg(bracket, c("crochet", "parenthese", "determinant")) variables <- match.arg(variables, c("x", "a", "xi")) if (ncol(A) > 26 && variables == "a") stop("La matrice possede trop de colonnes pour utiliser l'option a, b, c, ...") if (ncol(A) > 4 && variables == "x") stop("La matrice possede trop de colonnes pour utiliser l'option w, x, y, z.") # Attributs aux matrices pour simplifier le code attr(A, "style") <- style attr(B, "style") <- style attr(A, "bracket") <- bracket attr(B, "bracket") <- bracket attr(A, "verbose") <- FALSE attr(B, "verbose") <- FALSE attr(A, "copy2clip") <- FALSE attr(B, "copy2clip") <- FALSE attr(A, "digits") <- digits attr(B, "digits") <- digits if (sel){ matA <- mat2latex(A, envir = FALSE, tolatex = FALSE) matB <- mat2latex(B, envir = FALSE, tolatex = FALSE) toprint <- vector("character", length = nrow(A)) var <- paste("x_{",(1:ncol(A)),"}", sep = "") for (i in (1:nrow(A))){ toprint[i] <- paste0(paste(matA[i, ], var, collapse = " & + & "), " & = & ", matB[i, ], " \\\\ \n") } # Sanitize en nettoyant la matrice toprint <- sanitize(toprint, concise) if (concise){ notzero <- apply(A, 1, function(x){sum(abs(x) > tol)}) max_notzero <- max(notzero) begin <- paste0(c("\\begin{array}{", rep("r", 2*max_notzero+1),"}\n"), collapse = "") for (i in (1:nrow(A))){ # On ajoute des & sur les ligne ou il en manque if (notzero[i] < max_notzero){ num_esperluette <- 2*(max_notzero - notzero[i]) toprint[i] <- paste0(c(rep("& ", num_esperluette), toprint[i]), collapse = "") } } } else begin <- paste0(c("\\begin{array}{", rep("r", 2*ncol(A)+1),"}\n"), collapse = "") end <- c("\\end{array}") toprint <- paste0(c(begin, toprint, end), collapse = "") toprint <- convert_var(toprint, ncol(A), variables) } else{ latexA <- mat2latex(A, tolatex = FALSE) latexVar <- var2latex(ncol(A), bracket, variables) latexB <- mat2latex(B, tolatex = FALSE) toprint <- c(latexA, latexVar, "=", latexB) } if (tolatex) toprint <- paste0(c("$$\n",toprint,"\n$$"), collapse = "") # On enleve les attributs aux matrices attr(A, "style") <- NULL attr(B, "style") <- NULL attr(A, "bracket") <- NULL attr(B, "bracket") <- NULL attr(A, "verbose") <- NULL attr(B, "verbose") <- NULL attr(A, "copy2clip") <- NULL attr(B, "copy2clip") <- NULL attr(A, "digits") <- NULL attr(B, "digits") <- NULL if (copy2clip){ test2clip <- writeClipboard(toprint) if (test2clip) message("Le SEL a ete correctement copie dans le presse-papier.") else message("Le SEL n'a pas ete copie dans le presse-papier.") } if (verbose){ cat(toprint) return(invisible(toprint)) } else return(invisible(toprint)) }
# WOCAT: COST AND BENEFIT DATABASE # FINAL CLEAN SPREADSHEET library(here) library(tidyverse) library(lubridate) #library(sjlabelled) library(labelled) library(codebook) # MERGE ALL DATASETS filename = here::here("03_processed_data","03_Data_general.rds") Data_general<-readRDS(filename) filename = here::here("03_processed_data","03_Dictionary_general.rds") Dictionary_general<-readRDS(filename) data_clean = Data_general dictionary = Dictionary_general filename = here::here("03_processed_data","04_Data_cost.rds") Data_general<-readRDS(filename) filename = here::here("03_processed_data","04_Dictionary_cost.rds") Dictionary_general<-readRDS(filename) data_clean = data_clean %>% full_join(Data_general, by="qid") dictionary = dictionary %>% bind_rows(Dictionary_general) filename = here::here("03_processed_data","05_Data_impact.rds") Data_general<-readRDS(filename) filename = here::here("03_processed_data","05_Dictionary_impact.rds") Dictionary_general<-readRDS(filename) data_clean = data_clean %>% full_join(Data_general, by="qid") dictionary = dictionary %>% bind_rows(Dictionary_general) rm(Data_general,Dictionary_general) # 1.0 Some general checks ## Check if there are duplicate observations duplicate <- data_clean %>% group_by(qid) %>% mutate(ind=row_number()) %>% filter(ind==2) ## Check if there are duplicate variables duplicate <- dictionary %>% group_by(variable) %>% mutate(ind=row_number()) %>% filter(ind==2) dictionary <- dictionary %>% group_by(variable) %>% mutate(ind=row_number()) %>% filter(ind==1) %>% select(-ind) ## Check if names in data correspond with names in dictionary a_vars <- as_tibble(colnames(data_clean)) %>% rename(variable=value) dictionary <- dictionary %>% right_join(a_vars, by="variable") %>% #filter(is.na(question_number)) %>% mutate(label=ifelse(str_detect(variable,"seasonal_temperature"), str_replace(variable,"incrdecr","change"), label)) %>% mutate(label=ifelse(str_detect(variable,"seasonal_rainfall"), str_replace(variable,"incrdecr","change"), label)) %>% mutate(label=ifelse(str_detect(variable,"_est_"), str_c(label," (establishment)"), label)) %>% mutate(label=ifelse(str_detect(variable,"_maint_"), str_c(label," (maintenance)"), label)) ## Remove "tech" from variable name dictionary <- dictionary %>% mutate(variable=str_replace(variable,"tech_","")) data_clean <- data_clean %>% set_names(~stringr::str_replace_all(.,"tech_", "")) #----------------------------------------------------------------------------------------------------------------- # CONVERT COSTS TO CONSTANT USD library(readxl) filename = here::here("02_raw_data","IPD_countres_UNSTAT.xlsx") data<-read_excel(filename) deflator <- data %>% filter(Measure=="Implicit Price Deflator - NC") %>% gather("year","Measure",5:53) %>% rename(deflator=Measure, country=Country) %>% group_by(country) %>% mutate(base=ifelse(year==2015,deflator,NA)) %>% fill(base, .direction = "downup") %>% mutate(adj_price=base/deflator) %>% mutate(year=as.numeric(year)) filename = here::here("02_raw_data","Exchange_rates_UNSTAT.xlsx") data<-read_excel(filename) exrate <- data %>% filter(Measure=="IMF based exchange rate") %>% gather("year","Measure",5:53) %>% rename(exrate=Measure, country=Country) %>% group_by(country) %>% mutate(exchange_rate=ifelse(year==2015,exrate,NA)) %>% fill(exchange_rate, .direction = "downup") %>% ungroup() %>% mutate(year=as.numeric(year)) data_clean <- data_clean %>% mutate(year=ifelse(is.na(year_documentation),implementation_year,year_documentation)) %>% mutate(year=ifelse(year>2018,2018,year)) %>% mutate(country=ifelse(str_detect(country,"Tanzania"),"U.R. of Tanzania: Mainland",country)) %>% mutate(country=ifelse(str_detect(country,"Bolivia"),"Bolivia (Plurinational State of)",country)) %>% mutate(country=ifelse(str_detect(country,"Lao"),"Lao People's DR",country)) %>% mutate(country=ifelse(str_detect(country,"Iran"),"Iran (Islamic Republic of)",country)) %>% left_join(deflator, by=c("country","year")) %>% left_join(exrate, by=c("country","year")) ## Cost in 2015 prices a_vars <- dictionary %>% filter(str_detect(variable,"total_cost")& !str_detect(variable,"base")& !str_detect(variable,"percentage")& !str_detect(variable,"costbenefit")& !str_detect(variable,"remaining")) data_clean <- data_clean %>% mutate(across(contains("total_cost")& !contains("base")& !contains("percentage")& !contains("costbenefit")& !contains("remaining") , as.numeric)) %>% mutate(across(contains("total_cost")& !contains("base")& !contains("percentage")& !contains("costbenefit")& !contains("remaining") , ~. * adj_price)) #%>% # select(qid,country,year_documentation,currency,exchange_rate # input_exchange_rate,input_est_total_costs,input_est_total_costs_usd, # input_maint_total_costs,input_maint_total_costs_usd) %>% # filter(qid==1306) ## Convert to US dollars using 2015 exchange rates data_clean <- data_clean %>% mutate(exch_rate=ifelse(input_est_total_costs_usd==input_est_total_costs, 1,NA)) %>% mutate(exch_rate=ifelse(is.na(exch_rate)& input_est_total_costs_usd==input_est_total_costs, 1,exch_rate)) %>% mutate(exch_rate=ifelse(is.na(exch_rate)& currency=="USD", 1,exch_rate)) %>% mutate(exch_rate=ifelse(is.na(exch_rate)& currency!="USD", exchange_rate,exch_rate)) %>% mutate(exch_rate=ifelse(is.na(exch_rate)& is.na(currency), exchange_rate,exch_rate)) %>% mutate(exch_rate=ifelse(is.na(exch_rate)& !is.na(input_exchange_rate), input_exchange_rate,exch_rate)) %>% mutate(exch_rate=as.numeric(exch_rate)) %>% mutate(across(contains("total_cost")& !contains("base")& !contains("percentage")& !contains("costbenefit")& !contains("remaining")& !contains("usd") , ~. / exch_rate)) # CLEAN AREA data_clean <- data_clean %>% mutate(area_hectare=ifelse(is.na(area_hectare)&!is.na(spread_area_precise), spread_area_precise,area_hectare)) %>% mutate(area_hectare=as.numeric(area_hectare)) # REMOVE UNNECESSARY VARIABLES data_clean <- data_clean %>% select(-Currency.x, -Currency.y, -CountryID.x, -CountryID.y, -exrate, -exchange_rate, -year) # ONSITE IMPACT FACTOR VARIABLES add_impact_labels <- function(x) { val_labels(x) <- c("Very negative (-50 to -100%)" = -3, "Negative (-20 to -50%)" = -2, "Slightly negative (-5 to -20%)" = -1, "Negligible impact" = 0, "Slightly positive (+5 to 20%)" = 1, "Positive (+20 to 50%)" = 2, "Very positive (+50 - 100%)" = 3) x } data_clean <- data_clean %>% mutate(across(contains("costbenefit"),as.factor)) %>% mutate(across(contains("impacts"),add_impact_labels)) # RECODE AS FACTORS # data_clean <- data_clean %>% # mutate(across(contains("costbenefit")| # contains("sensitivity")| # contains("incrdcr") # ,as_factor)) ## Set variable labels #data_clean <- set_label(data_clean, label = dictionary$label) var_label(data_clean) <- dictionary %>% select(variable, label) %>% dict_to_list() var_label(data_clean) <- list( deflator = "GDP deflator (current year) (UNSTATS)", base = "GDP deflator (base year 2015) (UNSTATS)", exch_rate = "Exchange rate (UNSTATS)" ) # SAVE filename = here::here("04_output","06_Data_final.rds") saveRDS(data_clean,filename) filename = here::here("04_output","06_Data_final.csv") write_csv(data_clean,filename) filename = here::here("04_output","06_Dictionary_final.rds") saveRDS(dictionary,filename) filename = here::here("04_output","06_Dictionary_final.csv") write_csv(dictionary,filename)
/01_code/06_Final_SLM_tech_data.R
no_license
Nabeehz/wocat_project
R
false
false
8,735
r
# WOCAT: COST AND BENEFIT DATABASE # FINAL CLEAN SPREADSHEET library(here) library(tidyverse) library(lubridate) #library(sjlabelled) library(labelled) library(codebook) # MERGE ALL DATASETS filename = here::here("03_processed_data","03_Data_general.rds") Data_general<-readRDS(filename) filename = here::here("03_processed_data","03_Dictionary_general.rds") Dictionary_general<-readRDS(filename) data_clean = Data_general dictionary = Dictionary_general filename = here::here("03_processed_data","04_Data_cost.rds") Data_general<-readRDS(filename) filename = here::here("03_processed_data","04_Dictionary_cost.rds") Dictionary_general<-readRDS(filename) data_clean = data_clean %>% full_join(Data_general, by="qid") dictionary = dictionary %>% bind_rows(Dictionary_general) filename = here::here("03_processed_data","05_Data_impact.rds") Data_general<-readRDS(filename) filename = here::here("03_processed_data","05_Dictionary_impact.rds") Dictionary_general<-readRDS(filename) data_clean = data_clean %>% full_join(Data_general, by="qid") dictionary = dictionary %>% bind_rows(Dictionary_general) rm(Data_general,Dictionary_general) # 1.0 Some general checks ## Check if there are duplicate observations duplicate <- data_clean %>% group_by(qid) %>% mutate(ind=row_number()) %>% filter(ind==2) ## Check if there are duplicate variables duplicate <- dictionary %>% group_by(variable) %>% mutate(ind=row_number()) %>% filter(ind==2) dictionary <- dictionary %>% group_by(variable) %>% mutate(ind=row_number()) %>% filter(ind==1) %>% select(-ind) ## Check if names in data correspond with names in dictionary a_vars <- as_tibble(colnames(data_clean)) %>% rename(variable=value) dictionary <- dictionary %>% right_join(a_vars, by="variable") %>% #filter(is.na(question_number)) %>% mutate(label=ifelse(str_detect(variable,"seasonal_temperature"), str_replace(variable,"incrdecr","change"), label)) %>% mutate(label=ifelse(str_detect(variable,"seasonal_rainfall"), str_replace(variable,"incrdecr","change"), label)) %>% mutate(label=ifelse(str_detect(variable,"_est_"), str_c(label," (establishment)"), label)) %>% mutate(label=ifelse(str_detect(variable,"_maint_"), str_c(label," (maintenance)"), label)) ## Remove "tech" from variable name dictionary <- dictionary %>% mutate(variable=str_replace(variable,"tech_","")) data_clean <- data_clean %>% set_names(~stringr::str_replace_all(.,"tech_", "")) #----------------------------------------------------------------------------------------------------------------- # CONVERT COSTS TO CONSTANT USD library(readxl) filename = here::here("02_raw_data","IPD_countres_UNSTAT.xlsx") data<-read_excel(filename) deflator <- data %>% filter(Measure=="Implicit Price Deflator - NC") %>% gather("year","Measure",5:53) %>% rename(deflator=Measure, country=Country) %>% group_by(country) %>% mutate(base=ifelse(year==2015,deflator,NA)) %>% fill(base, .direction = "downup") %>% mutate(adj_price=base/deflator) %>% mutate(year=as.numeric(year)) filename = here::here("02_raw_data","Exchange_rates_UNSTAT.xlsx") data<-read_excel(filename) exrate <- data %>% filter(Measure=="IMF based exchange rate") %>% gather("year","Measure",5:53) %>% rename(exrate=Measure, country=Country) %>% group_by(country) %>% mutate(exchange_rate=ifelse(year==2015,exrate,NA)) %>% fill(exchange_rate, .direction = "downup") %>% ungroup() %>% mutate(year=as.numeric(year)) data_clean <- data_clean %>% mutate(year=ifelse(is.na(year_documentation),implementation_year,year_documentation)) %>% mutate(year=ifelse(year>2018,2018,year)) %>% mutate(country=ifelse(str_detect(country,"Tanzania"),"U.R. of Tanzania: Mainland",country)) %>% mutate(country=ifelse(str_detect(country,"Bolivia"),"Bolivia (Plurinational State of)",country)) %>% mutate(country=ifelse(str_detect(country,"Lao"),"Lao People's DR",country)) %>% mutate(country=ifelse(str_detect(country,"Iran"),"Iran (Islamic Republic of)",country)) %>% left_join(deflator, by=c("country","year")) %>% left_join(exrate, by=c("country","year")) ## Cost in 2015 prices a_vars <- dictionary %>% filter(str_detect(variable,"total_cost")& !str_detect(variable,"base")& !str_detect(variable,"percentage")& !str_detect(variable,"costbenefit")& !str_detect(variable,"remaining")) data_clean <- data_clean %>% mutate(across(contains("total_cost")& !contains("base")& !contains("percentage")& !contains("costbenefit")& !contains("remaining") , as.numeric)) %>% mutate(across(contains("total_cost")& !contains("base")& !contains("percentage")& !contains("costbenefit")& !contains("remaining") , ~. * adj_price)) #%>% # select(qid,country,year_documentation,currency,exchange_rate # input_exchange_rate,input_est_total_costs,input_est_total_costs_usd, # input_maint_total_costs,input_maint_total_costs_usd) %>% # filter(qid==1306) ## Convert to US dollars using 2015 exchange rates data_clean <- data_clean %>% mutate(exch_rate=ifelse(input_est_total_costs_usd==input_est_total_costs, 1,NA)) %>% mutate(exch_rate=ifelse(is.na(exch_rate)& input_est_total_costs_usd==input_est_total_costs, 1,exch_rate)) %>% mutate(exch_rate=ifelse(is.na(exch_rate)& currency=="USD", 1,exch_rate)) %>% mutate(exch_rate=ifelse(is.na(exch_rate)& currency!="USD", exchange_rate,exch_rate)) %>% mutate(exch_rate=ifelse(is.na(exch_rate)& is.na(currency), exchange_rate,exch_rate)) %>% mutate(exch_rate=ifelse(is.na(exch_rate)& !is.na(input_exchange_rate), input_exchange_rate,exch_rate)) %>% mutate(exch_rate=as.numeric(exch_rate)) %>% mutate(across(contains("total_cost")& !contains("base")& !contains("percentage")& !contains("costbenefit")& !contains("remaining")& !contains("usd") , ~. / exch_rate)) # CLEAN AREA data_clean <- data_clean %>% mutate(area_hectare=ifelse(is.na(area_hectare)&!is.na(spread_area_precise), spread_area_precise,area_hectare)) %>% mutate(area_hectare=as.numeric(area_hectare)) # REMOVE UNNECESSARY VARIABLES data_clean <- data_clean %>% select(-Currency.x, -Currency.y, -CountryID.x, -CountryID.y, -exrate, -exchange_rate, -year) # ONSITE IMPACT FACTOR VARIABLES add_impact_labels <- function(x) { val_labels(x) <- c("Very negative (-50 to -100%)" = -3, "Negative (-20 to -50%)" = -2, "Slightly negative (-5 to -20%)" = -1, "Negligible impact" = 0, "Slightly positive (+5 to 20%)" = 1, "Positive (+20 to 50%)" = 2, "Very positive (+50 - 100%)" = 3) x } data_clean <- data_clean %>% mutate(across(contains("costbenefit"),as.factor)) %>% mutate(across(contains("impacts"),add_impact_labels)) # RECODE AS FACTORS # data_clean <- data_clean %>% # mutate(across(contains("costbenefit")| # contains("sensitivity")| # contains("incrdcr") # ,as_factor)) ## Set variable labels #data_clean <- set_label(data_clean, label = dictionary$label) var_label(data_clean) <- dictionary %>% select(variable, label) %>% dict_to_list() var_label(data_clean) <- list( deflator = "GDP deflator (current year) (UNSTATS)", base = "GDP deflator (base year 2015) (UNSTATS)", exch_rate = "Exchange rate (UNSTATS)" ) # SAVE filename = here::here("04_output","06_Data_final.rds") saveRDS(data_clean,filename) filename = here::here("04_output","06_Data_final.csv") write_csv(data_clean,filename) filename = here::here("04_output","06_Dictionary_final.rds") saveRDS(dictionary,filename) filename = here::here("04_output","06_Dictionary_final.csv") write_csv(dictionary,filename)
setwd(normalizePath(dirname(R.utils::commandArgs(asValues=TRUE)$"f"))) source("../../scripts/h2o-r-test-setup.R") ## # Set levels of a factor column ## test.setLevels <- function() { hex <- as.h2o(iris) hex.species.copy <- hex$Species species.orig <- h2o.levels(hex$Species) Log.info("Tests in-place modification") hex$Species <- h2o.setLevels(hex$Species, c(setosa = "NEW SETOSA ENUM", virginica = "NEW VIRG ENUM", versicolor = "NEW VERSI ENUM")) vals <- c("NEW SETOSA ENUM", "NEW VIRG ENUM", "NEW VERSI ENUM") expect_equal(h2o.levels(hex$Species), vals) expect_equal(h2o.levels(hex.species.copy), vals) # setLevels has side effects Log.info("Tests copy-on-write modification") hex$Species <- h2o.setLevels(hex$Species, species.orig, in.place = FALSE) expect_equal(h2o.levels(hex$Species), species.orig) } doTest("Set levels of a factor column", test.setLevels)
/h2o-r/tests/testdir_misc/runit_revalue.R
permissive
h2oai/h2o-3
R
false
false
1,023
r
setwd(normalizePath(dirname(R.utils::commandArgs(asValues=TRUE)$"f"))) source("../../scripts/h2o-r-test-setup.R") ## # Set levels of a factor column ## test.setLevels <- function() { hex <- as.h2o(iris) hex.species.copy <- hex$Species species.orig <- h2o.levels(hex$Species) Log.info("Tests in-place modification") hex$Species <- h2o.setLevels(hex$Species, c(setosa = "NEW SETOSA ENUM", virginica = "NEW VIRG ENUM", versicolor = "NEW VERSI ENUM")) vals <- c("NEW SETOSA ENUM", "NEW VIRG ENUM", "NEW VERSI ENUM") expect_equal(h2o.levels(hex$Species), vals) expect_equal(h2o.levels(hex.species.copy), vals) # setLevels has side effects Log.info("Tests copy-on-write modification") hex$Species <- h2o.setLevels(hex$Species, species.orig, in.place = FALSE) expect_equal(h2o.levels(hex$Species), species.orig) } doTest("Set levels of a factor column", test.setLevels)
# Problem 8.2.4 - Fold-Fulkerson algorithm (not counted) library(optrees) nodes <- 1:4 arcs <- matrix(c(1, 2, 2, 1, 3, 1, 2, 3, -2, 2, 4, 1, 3, 4, 1), ncol = 3, byrow = T) # answer obtained getShortestPathTree(nodes, arcs, algorithm = "Bellman-Ford", directed = T, show.data = T, show.distances = T) # Problem 8.2.5 nodes <- 1:7 arcs <- matrix(c(1, 2, 33, 1, 3, 48, 1, 4, 76, 1, 5, 98, 1, 6, 124, 1, 7, 156, 2, 3, 33, 2, 4, 48, 2, 5, 76, 2, 6, 98, 2, 7, 124, 3, 4, 33, 3, 5, 48, 3, 6, 76, 3, 7, 98, 4, 5, 33, 4, 6, 48, 4, 7, 76, 5, 6, 33, 5, 7, 48, 6, 7, 33), ncol = 3, byrow = T) # shortest path: 144 thousand dollars # same as previous calculated spTreeDijkstra(nodes, arcs, directed = T, source.node = 1) # Problem 8.3.6 - Fold-Fulkerson algorithm nodes <- 1:14 arcs <- matrix(c(1, 2, 3, # source node to items 1, 3, 3, 1, 4, 3, 1, 5, 3, 1, 6, 3, 1, 7, 3, 1, 8, 3, # node 2 to trucks 2, 9, 1, 2, 10, 1, 2, 11, 1, 2, 12, 1, 2, 13, 1, # node 3 to trucks 3, 9, 1, 3, 10, 1, 3, 11, 1, 3, 12, 1, 3, 13, 1, # node 4 to trucks 4, 9, 1, 4, 10, 1, 4, 11, 1, 4, 12, 1, 4, 13, 1, # node 5 to trucks 5, 9, 1, 5, 10, 1, 5, 11, 1, 5, 12, 1, 5, 13, 1, # node 6 to trucks 6, 9, 1, 6, 10, 1, 6, 11, 1, 6, 12, 1, 6, 13, 1, # node 7 to trucks 7, 9, 1, 7, 10, 1, 7, 11, 1, 7, 12, 1, 7, 13, 1, # node 8 to trucks 8, 9, 1, 8, 10, 1, 8, 11, 1, 8, 12, 1, 8, 13, 1, # trucks to sink node 9, 14, 6, 10, 14, 4, 11, 14, 5, 12, 14, 4, 13, 14, 3), ncol = 3, byrow = T) maxFlowFordFulkerson(nodes, arcs, directed = T, source.node = 1, sink.node = 14) # Problem 8.6.1 - Minimal Spanning Tree Algorithm nodes <- 1:5 # 1 - Gary # 2 - Fort Wayne # 3 - Evansville # 4 - Terre Haute # 5 - South Bend arcs <- matrix(c(1, 3, 217, 1, 4, 164, 1, 5, 58, 2, 3, 290, 2, 4, 201, 2, 5, 79, 3, 4, 113, 4, 5, 196), ncol = 3, byrow = T) # MST is Fort Wayne - South Bend - Gary - Terre Haute - Evansville getMinimumSpanningTree(nodes, arcs, algorithm = "Prim", show.data = T, show.graph = T) # Problem 9.3.3 library(lpsymphony) obj <- c(2, 3) mat <- matrix(c(1, 2, 3, 4), ncol = 2, byrow = T) rhs <- c(10, 25) dir <- c("<=", "<=") types <- c("I", "I") # x1 = 4, x2 = 3, z = 17 lpsymphony_solve_LP(obj, mat, dir, rhs, types = types, first_feasible = F, max = TRUE)
/Coding 2/assignment2_final.txt
no_license
virtuoso98/ysc2254-modelling
R
false
false
4,227
txt
# Problem 8.2.4 - Fold-Fulkerson algorithm (not counted) library(optrees) nodes <- 1:4 arcs <- matrix(c(1, 2, 2, 1, 3, 1, 2, 3, -2, 2, 4, 1, 3, 4, 1), ncol = 3, byrow = T) # answer obtained getShortestPathTree(nodes, arcs, algorithm = "Bellman-Ford", directed = T, show.data = T, show.distances = T) # Problem 8.2.5 nodes <- 1:7 arcs <- matrix(c(1, 2, 33, 1, 3, 48, 1, 4, 76, 1, 5, 98, 1, 6, 124, 1, 7, 156, 2, 3, 33, 2, 4, 48, 2, 5, 76, 2, 6, 98, 2, 7, 124, 3, 4, 33, 3, 5, 48, 3, 6, 76, 3, 7, 98, 4, 5, 33, 4, 6, 48, 4, 7, 76, 5, 6, 33, 5, 7, 48, 6, 7, 33), ncol = 3, byrow = T) # shortest path: 144 thousand dollars # same as previous calculated spTreeDijkstra(nodes, arcs, directed = T, source.node = 1) # Problem 8.3.6 - Fold-Fulkerson algorithm nodes <- 1:14 arcs <- matrix(c(1, 2, 3, # source node to items 1, 3, 3, 1, 4, 3, 1, 5, 3, 1, 6, 3, 1, 7, 3, 1, 8, 3, # node 2 to trucks 2, 9, 1, 2, 10, 1, 2, 11, 1, 2, 12, 1, 2, 13, 1, # node 3 to trucks 3, 9, 1, 3, 10, 1, 3, 11, 1, 3, 12, 1, 3, 13, 1, # node 4 to trucks 4, 9, 1, 4, 10, 1, 4, 11, 1, 4, 12, 1, 4, 13, 1, # node 5 to trucks 5, 9, 1, 5, 10, 1, 5, 11, 1, 5, 12, 1, 5, 13, 1, # node 6 to trucks 6, 9, 1, 6, 10, 1, 6, 11, 1, 6, 12, 1, 6, 13, 1, # node 7 to trucks 7, 9, 1, 7, 10, 1, 7, 11, 1, 7, 12, 1, 7, 13, 1, # node 8 to trucks 8, 9, 1, 8, 10, 1, 8, 11, 1, 8, 12, 1, 8, 13, 1, # trucks to sink node 9, 14, 6, 10, 14, 4, 11, 14, 5, 12, 14, 4, 13, 14, 3), ncol = 3, byrow = T) maxFlowFordFulkerson(nodes, arcs, directed = T, source.node = 1, sink.node = 14) # Problem 8.6.1 - Minimal Spanning Tree Algorithm nodes <- 1:5 # 1 - Gary # 2 - Fort Wayne # 3 - Evansville # 4 - Terre Haute # 5 - South Bend arcs <- matrix(c(1, 3, 217, 1, 4, 164, 1, 5, 58, 2, 3, 290, 2, 4, 201, 2, 5, 79, 3, 4, 113, 4, 5, 196), ncol = 3, byrow = T) # MST is Fort Wayne - South Bend - Gary - Terre Haute - Evansville getMinimumSpanningTree(nodes, arcs, algorithm = "Prim", show.data = T, show.graph = T) # Problem 9.3.3 library(lpsymphony) obj <- c(2, 3) mat <- matrix(c(1, 2, 3, 4), ncol = 2, byrow = T) rhs <- c(10, 25) dir <- c("<=", "<=") types <- c("I", "I") # x1 = 4, x2 = 3, z = 17 lpsymphony_solve_LP(obj, mat, dir, rhs, types = types, first_feasible = F, max = TRUE)
#!/usr/bin/env Rscript ################################################################### # This file is part of RiboWave. # RiboWave is powerful Ribo-seq analysis tool that is able to # denoise the Ribo-seq data and serve for multiple functions. # # RiboWave can be used for multiple purposes: # 1. denoise the raw Ribo-seq data # 2. define translated ORFs # 3. estimate the abundance of actively elongating ribosomes # 4. estimate translation efficiency(TE) # 5. identify potential frameshift candidates # # Author: Long Hu # # Copyright (C) 2017 Zhiyu Xu # # RiboWave is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. # # Contact: xanthexu18@gmail.com ####################################################################### Args <-commandArgs(TRUE); inputF1= Args[1];# output1= Args[2];# options(scipen=1000); my_max_psite = function(X,L){ initial_psite_set = 8:16; out = X[initial_psite_set]; best_psite = initial_psite_set[which.max(out)]; } mx = read.table(inputF1,sep="\t",head=F); SUMall = sum(as.vector(mx[,-1])) out2 = matrix(0,nrow=nrow(mx),ncol=(ncol(mx)+2)); for(i in 1:nrow(mx)){ X = as.vector(as.matrix(mx[i,-1])); L = as.vector(as.matrix(mx[i,1])); X = X[1:L]; SUM1 = sum(X); SUM = round(sum(X)/SUMall,4); tmp = my_max_psite(X,L); out2[i,c(1,2,3,tmp+3)]= c(L,SUM1,SUM,1) } write.table(out2,file=output1,col.name=F,row.name=F,quote=F,sep="\t"); rm(list=ls());
/psite_1nt_wholeReads.R
no_license
xanthexu/ribosome-profiling
R
false
false
1,554
r
#!/usr/bin/env Rscript ################################################################### # This file is part of RiboWave. # RiboWave is powerful Ribo-seq analysis tool that is able to # denoise the Ribo-seq data and serve for multiple functions. # # RiboWave can be used for multiple purposes: # 1. denoise the raw Ribo-seq data # 2. define translated ORFs # 3. estimate the abundance of actively elongating ribosomes # 4. estimate translation efficiency(TE) # 5. identify potential frameshift candidates # # Author: Long Hu # # Copyright (C) 2017 Zhiyu Xu # # RiboWave is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. # # Contact: xanthexu18@gmail.com ####################################################################### Args <-commandArgs(TRUE); inputF1= Args[1];# output1= Args[2];# options(scipen=1000); my_max_psite = function(X,L){ initial_psite_set = 8:16; out = X[initial_psite_set]; best_psite = initial_psite_set[which.max(out)]; } mx = read.table(inputF1,sep="\t",head=F); SUMall = sum(as.vector(mx[,-1])) out2 = matrix(0,nrow=nrow(mx),ncol=(ncol(mx)+2)); for(i in 1:nrow(mx)){ X = as.vector(as.matrix(mx[i,-1])); L = as.vector(as.matrix(mx[i,1])); X = X[1:L]; SUM1 = sum(X); SUM = round(sum(X)/SUMall,4); tmp = my_max_psite(X,L); out2[i,c(1,2,3,tmp+3)]= c(L,SUM1,SUM,1) } write.table(out2,file=output1,col.name=F,row.name=F,quote=F,sep="\t"); rm(list=ls());
# Example 16 Chapter 6 Page no.: 187 # Bairstow's method #Given function f <- function(x) { (x^3)+x+10 } #Given values u= 1.8 v= -1 es=1 #% n=4 count=1 ear=100 eas=100 a<-c(10,1,0,1) b<-matrix(0,n) c<-matrix(0,n) while ((ear>es) & (eas>es)){ b[n]=a[n] b[n-1]=a[n-1]+u*b[n] for (i in seq(n-2,1,-1)){ b[i]=a[i]+u*b[i+1]+v*b[i+2] } c[n]=b[n] c[n-1]=b[n-1]+u*c[n] for (i in seq((n-2),2,-1)){ c[i]=b[i]+u*c[i+1]+v*c[i+2] } dv=((-b[1])+(b[2]*c[2]/c[3]))/(c[3]-(c[4]*c[2]/c[3])) du=(-b[2]-c[4]*dv)/c[3] u=u+du v=v+dv ear=abs(du/u)*1000 eas=abs(dv/v)*1000 cat("Iteration:",count,"\n u: ",u,"\n","v:",v,"\n","************************************\n") count=count+1; } cat("Final value of Quadratic quotients u & v are:\n","u: ",u,"\n v:",v,"\n","************************************\n") # Value in the textbook is of one iteration only and it is clearly given that the final answers are 2 and -5
/Numerical_Methods_by_E_Balaguruswamy/CH6/EX6.16/Ex6_16.R
permissive
prashantsinalkar/R_TBC_Uploads
R
false
false
977
r
# Example 16 Chapter 6 Page no.: 187 # Bairstow's method #Given function f <- function(x) { (x^3)+x+10 } #Given values u= 1.8 v= -1 es=1 #% n=4 count=1 ear=100 eas=100 a<-c(10,1,0,1) b<-matrix(0,n) c<-matrix(0,n) while ((ear>es) & (eas>es)){ b[n]=a[n] b[n-1]=a[n-1]+u*b[n] for (i in seq(n-2,1,-1)){ b[i]=a[i]+u*b[i+1]+v*b[i+2] } c[n]=b[n] c[n-1]=b[n-1]+u*c[n] for (i in seq((n-2),2,-1)){ c[i]=b[i]+u*c[i+1]+v*c[i+2] } dv=((-b[1])+(b[2]*c[2]/c[3]))/(c[3]-(c[4]*c[2]/c[3])) du=(-b[2]-c[4]*dv)/c[3] u=u+du v=v+dv ear=abs(du/u)*1000 eas=abs(dv/v)*1000 cat("Iteration:",count,"\n u: ",u,"\n","v:",v,"\n","************************************\n") count=count+1; } cat("Final value of Quadratic quotients u & v are:\n","u: ",u,"\n v:",v,"\n","************************************\n") # Value in the textbook is of one iteration only and it is clearly given that the final answers are 2 and -5
data<-read.table("household_power_consumption.txt",header=TRUE,sep=";", colClasses=c("character","character","double", "double","double","double","double","double","numeric"), na.strings="?") data_sub<-subset(data, data$Date=="1/2/2007"|data$Date=="2/2/2007") png('./Desktop plot1.png') hist(data_sub$Global_active_power,main="Global Active Power",ylab="Frequency",xlab="Global Active Power (kilowatts)",col="red") dev.off()
/ExData_Plotting1-master/plot1.R
no_license
luw517/Data-Science-Specialization
R
false
false
425
r
data<-read.table("household_power_consumption.txt",header=TRUE,sep=";", colClasses=c("character","character","double", "double","double","double","double","double","numeric"), na.strings="?") data_sub<-subset(data, data$Date=="1/2/2007"|data$Date=="2/2/2007") png('./Desktop plot1.png') hist(data_sub$Global_active_power,main="Global Active Power",ylab="Frequency",xlab="Global Active Power (kilowatts)",col="red") dev.off()
library(SNFtool) ### Name: Data2 ### Title: Data2 ### Aliases: Data2 ### Keywords: datasets ### ** Examples data(Data2)
/data/genthat_extracted_code/SNFtool/examples/Data2.Rd.R
no_license
surayaaramli/typeRrh
R
false
false
127
r
library(SNFtool) ### Name: Data2 ### Title: Data2 ### Aliases: Data2 ### Keywords: datasets ### ** Examples data(Data2)
# Project: Open Data of the Black Diaspora # Dataset: Emancipators # Source: Emigrants to Liberia Project, Virginia Center for Digital History # http://www.vcdh.virginia.edu/liberia/index.php?page=Resources&section=Search%20Emancipators&result=yes #----packages---- library(rvest) library(pryr) library(dplyr) library(data.table) library(magrittr) #---------------- #----script----- # assign url where data resides to variable and read in as r object # important note: this is a searchable database; therefore the url # will return the table with filters applied # for our initial url pull the following filters were applied: # - page=Resources # - section=Search%20emancipators # - result=yes # Note: can't get table from raw url to work so saved page source as # local html file for now; need to work on this # url <- "http://www.vcdh.virginia.edu/liberia/index.php?page=Resources&section=Search%20emancipators&result=yes" url <- "/Users/Sherlock/Desktop/Black Diaspora Data/Virginia Emigrants to Liberia/emancipators.html" webpage <- read_html(url) # find relevant table by node look up (e.g. "table") # and convert to table emancipators <- webpage %>% html_nodes("table") %>% .[[1]] %>% html_table(header = T) # check object size and dimesion along with variable names object_size(emancipators) dim(emancipators) glimpse(emancipators) # store original names in case we want to use later orig.col.names <- names(emancipators) # set to a data.table because that's how I like to manipulate data setDT(emancipators, check.names = T) glimpse(emancipators) #----------------
/Virginia Emigrants to Liberia/emancipators.R
no_license
maniacalwhistle/diaspora-data
R
false
false
1,588
r
# Project: Open Data of the Black Diaspora # Dataset: Emancipators # Source: Emigrants to Liberia Project, Virginia Center for Digital History # http://www.vcdh.virginia.edu/liberia/index.php?page=Resources&section=Search%20Emancipators&result=yes #----packages---- library(rvest) library(pryr) library(dplyr) library(data.table) library(magrittr) #---------------- #----script----- # assign url where data resides to variable and read in as r object # important note: this is a searchable database; therefore the url # will return the table with filters applied # for our initial url pull the following filters were applied: # - page=Resources # - section=Search%20emancipators # - result=yes # Note: can't get table from raw url to work so saved page source as # local html file for now; need to work on this # url <- "http://www.vcdh.virginia.edu/liberia/index.php?page=Resources&section=Search%20emancipators&result=yes" url <- "/Users/Sherlock/Desktop/Black Diaspora Data/Virginia Emigrants to Liberia/emancipators.html" webpage <- read_html(url) # find relevant table by node look up (e.g. "table") # and convert to table emancipators <- webpage %>% html_nodes("table") %>% .[[1]] %>% html_table(header = T) # check object size and dimesion along with variable names object_size(emancipators) dim(emancipators) glimpse(emancipators) # store original names in case we want to use later orig.col.names <- names(emancipators) # set to a data.table because that's how I like to manipulate data setDT(emancipators, check.names = T) glimpse(emancipators) #----------------
library(lulcc) ### Name: FigureOfMerit ### Title: Create a FigureOfMerit object ### Aliases: FigureOfMerit FigureOfMerit,RasterLayer-method ### FigureOfMerit,ThreeMapComparison-method ### ** Examples ## see lulcc-package examples
/data/genthat_extracted_code/lulcc/examples/FigureOfMerit.Rd.R
no_license
surayaaramli/typeRrh
R
false
false
240
r
library(lulcc) ### Name: FigureOfMerit ### Title: Create a FigureOfMerit object ### Aliases: FigureOfMerit FigureOfMerit,RasterLayer-method ### FigureOfMerit,ThreeMapComparison-method ### ** Examples ## see lulcc-package examples
library(MKmisc) ### Name: IQrange ### Title: The Interquartile Range ### Aliases: IQrange sIQR ### Keywords: univar robust distribution ### ** Examples IQrange(rivers) ## identical to IQR(rivers) ## other quantile algorithms IQrange(rivers, type = 4) IQrange(rivers, type = 5) ## standardized IQR sIQR(rivers) ## right-skewed data distribution sd(rivers) mad(rivers) ## for normal data x <- rnorm(100) sd(x) sIQR(x) mad(x)
/data/genthat_extracted_code/MKmisc/examples/IQrange.Rd.R
no_license
surayaaramli/typeRrh
R
false
false
435
r
library(MKmisc) ### Name: IQrange ### Title: The Interquartile Range ### Aliases: IQrange sIQR ### Keywords: univar robust distribution ### ** Examples IQrange(rivers) ## identical to IQR(rivers) ## other quantile algorithms IQrange(rivers, type = 4) IQrange(rivers, type = 5) ## standardized IQR sIQR(rivers) ## right-skewed data distribution sd(rivers) mad(rivers) ## for normal data x <- rnorm(100) sd(x) sIQR(x) mad(x)
library(cubing) ### Name: invCube ### Title: Calculate Inverse Cube ### Aliases: invCube ### Keywords: manip ### ** Examples aCube <- getCubieCube("Tetris") is.solved(aCube %v% invCube(aCube)) is.solved(invCube(aCube) %v% aCube) ## Not run: plot(aCube) ## Not run: plot(invCube(aCube)) ## Not run: plot3D(aCube) ## Not run: plot3D(invCube(aCube))
/data/genthat_extracted_code/cubing/examples/invCube.Rd.R
no_license
surayaaramli/typeRrh
R
false
false
355
r
library(cubing) ### Name: invCube ### Title: Calculate Inverse Cube ### Aliases: invCube ### Keywords: manip ### ** Examples aCube <- getCubieCube("Tetris") is.solved(aCube %v% invCube(aCube)) is.solved(invCube(aCube) %v% aCube) ## Not run: plot(aCube) ## Not run: plot(invCube(aCube)) ## Not run: plot3D(aCube) ## Not run: plot3D(invCube(aCube))
########################################################################### #' Load a report file into the dataframe #' @param file A file of a report file, in .txt or .csv format. #' @return a data frame of the report file #' @examples #' file <- paste(system.file("files",package="iSwathX"),"Report_file.txt",sep="/") #' dat <- readReportFile(file) ############################################################################ readReportFile <- function(file) { seps = c("\t", ",") if (grepl(".txt$",file) || grepl(".tsv$",file) ) { id <- 1 } else if (grepl(".csv$",file)) { id <- 2 } else { stop (paste(file, "have wrong file type !") ) } sep <- seps[id] dat <- read.delim2(file,sep=sep,header=TRUE, stringsAsFactors=FALSE) if(ncol(dat) < 2){ stop(paste(file, "wrong library file format!") ) } dat }
/readReportFile.R
no_license
znoor/iSwathX
R
false
false
887
r
########################################################################### #' Load a report file into the dataframe #' @param file A file of a report file, in .txt or .csv format. #' @return a data frame of the report file #' @examples #' file <- paste(system.file("files",package="iSwathX"),"Report_file.txt",sep="/") #' dat <- readReportFile(file) ############################################################################ readReportFile <- function(file) { seps = c("\t", ",") if (grepl(".txt$",file) || grepl(".tsv$",file) ) { id <- 1 } else if (grepl(".csv$",file)) { id <- 2 } else { stop (paste(file, "have wrong file type !") ) } sep <- seps[id] dat <- read.delim2(file,sep=sep,header=TRUE, stringsAsFactors=FALSE) if(ncol(dat) < 2){ stop(paste(file, "wrong library file format!") ) } dat }
## Put comments here that give an overall description of what your ## functions do # This returns a list of functions that can cache the inverse of a matrix # set(X) :: sets the matrix # get() :: returns the matrix # setInverse(i) :: sets the inverse value # getInverse() :: gets the inverse value # this assumes the parameter X is invertable makeCacheMatrix <- function(x = matrix()) { # initialize the cache to null inverseCache <- NULL set <- function(y) { x <<- y # don't forget to clear the cache if the matrix changes inverseCache <<- NULL } get <- function() x setInverse <- function(inverse) inverseCache <<- inverse getInverse <- function() inverseCache list(set = set, get = get, setInverse = setInverse, getInverse = getInverse) } # This functions takes the result of the above function # and computes the inverse if it hasn't been already # Usage:: # myInverse = cacheSolve(makeCacheMatrix(myMatrix)) cacheSolve <- function(cacheMatrix, ...) { # has the inverse already been computed? inverse <- cacheMatrix$getInverse() if(!is.null(inverse)) { # yes! return the cached inverse message("Getting Cached Data") return(inverse) } # not yet. compute the inverse ... data <- cacheMatrix$get() inverse <- solve(data, ...) # save the inverse into the cache cacheMatrix$setInverse(inverse) inverse } #X = matrix(rexp(16, rate=.1), ncol=4) #X #cacheMatrix <- makeCacheMatrix(X) #cacheSolve(cacheMatrix) #cacheSolve(cacheMatrix)
/cachematrix.R
no_license
ericfarng/ProgrammingAssignment2
R
false
false
1,582
r
## Put comments here that give an overall description of what your ## functions do # This returns a list of functions that can cache the inverse of a matrix # set(X) :: sets the matrix # get() :: returns the matrix # setInverse(i) :: sets the inverse value # getInverse() :: gets the inverse value # this assumes the parameter X is invertable makeCacheMatrix <- function(x = matrix()) { # initialize the cache to null inverseCache <- NULL set <- function(y) { x <<- y # don't forget to clear the cache if the matrix changes inverseCache <<- NULL } get <- function() x setInverse <- function(inverse) inverseCache <<- inverse getInverse <- function() inverseCache list(set = set, get = get, setInverse = setInverse, getInverse = getInverse) } # This functions takes the result of the above function # and computes the inverse if it hasn't been already # Usage:: # myInverse = cacheSolve(makeCacheMatrix(myMatrix)) cacheSolve <- function(cacheMatrix, ...) { # has the inverse already been computed? inverse <- cacheMatrix$getInverse() if(!is.null(inverse)) { # yes! return the cached inverse message("Getting Cached Data") return(inverse) } # not yet. compute the inverse ... data <- cacheMatrix$get() inverse <- solve(data, ...) # save the inverse into the cache cacheMatrix$setInverse(inverse) inverse } #X = matrix(rexp(16, rate=.1), ncol=4) #X #cacheMatrix <- makeCacheMatrix(X) #cacheSolve(cacheMatrix) #cacheSolve(cacheMatrix)
library("IMFData", lib.loc="~/R/x86_64-pc-linux-gnu-library/3.5") databaseID <- "IFS" startdate = "1900-01-01" enddate = "2016-12-31" checkquery = FALSE country_list <- c("") # all countries ### Indicators List ## search for indicators here: http://data.imf.org/?sk=4C514D48-B6BA-49ED-8AB9-52B0C1A0179B&sId=1409151240976 ## GDP | NGDP_R_CH_SA_XDC | National Accounts, Expenditure, Gross Domestic Product, Real, Reference Chained, Seasonally adjusted, Domestic Currency ## Debt | BCG_GALM_G01_XDC | Fiscal, Budgetary Central Government, Assets and Liabilities, Debt (at Market Value), Classification of the stocks of assets and liabilities, 2001 Manual, Domestic Currency ## Debt | CG01_GALM_G01_XDC | Fiscal, Central Government, Assets and Liabilities, Debt (at Market Value), Classification of the stocks of assets and liabilities, 2001 Manual, Domestic Currency ### Inflation | PCPI_PC_CP_A_PT | Prices, Consumer Price Index, All items, Percentage change, Corresponding period previous year, Percent ## This is the current account *in USD*. We need to find GDP also in USD---above it is in domestic currency. ## BGS_BP6_USD | Balance of Payments, Current Account, Goods and Services, Net, US Dollars ## LUR_PT | Labor Markets, Unemployment Rate, Percent indicators <- c("NGDP_R_CH_SA_XDC", "BCG_GALM_G01_XDC", "CG01_GALM_G01_XDC", "BGS_BP6_USD", "LUR_PT") queryfilter <- list(CL_FREA = "", CL_AREA_IFS = country_list, CL_INDICATOR_IFS = indicators) data <- CompactDataMethod(databaseID, queryfilter, startdate, enddate, checkquery, tidy = TRUE) ## merging data------ ## get only annual data data <- data[data$'@TIME_FORMAT' == "P1Y", ] NGDP_R_CH_SA_XDC <- data[data$'@INDICATOR' == "NGDP_R_CH_SA_XDC", ] NGDP_R_CH_SA_XDC <- NGDP_R_CH_SA_XDC[, c('@TIME_PERIOD', '@OBS_VALUE', '@REF_AREA')] names(NGDP_R_CH_SA_XDC) <- c("year", "NGDP_R_CH_SA_XDC", "iso2c") BCG_GALM_G01_XDC <- data[data$'@INDICATOR' == "BCG_GALM_G01_XDC", ] BCG_GALM_G01_XDC <- BCG_GALM_G01_XDC[, c('@TIME_PERIOD', '@OBS_VALUE', '@REF_AREA')] names(BCG_GALM_G01_XDC) <- c("year", "BCG_GALM_G01_XDC", "iso2c") CG01_GALM_G01_XDC <- data[data$'@INDICATOR' == "CG01_GALM_G01_XDC", ] CG01_GALM_G01_XDC <- CG01_GALM_G01_XDC[, c('@TIME_PERIOD', '@OBS_VALUE', '@REF_AREA')] names(CG01_GALM_G01_XDC) <- c("year", "CG01_GALM_G01_XDC", "iso2c") BGS_BP6_USD <- data[data$'@INDICATOR' == "BGS_BP6_USD", ] BGS_BP6_USD <- BGS_BP6_USD[, c('@TIME_PERIOD', '@OBS_VALUE', '@REF_AREA')] names(BGS_BP6_USD) <- c("year", "BGS_BP6_USD", "iso2c") LUR_PT <- data[data$'@INDICATOR' == "LUR_PT", ] LUR_PT <- LUR_PT[, c('@TIME_PERIOD', '@OBS_VALUE', '@REF_AREA')] names(LUR_PT) <- c("year", "LUR_PT", "iso2c") ## merge data for export------ final_data <- merge(NGDP_R_CH_SA_XDC, BCG_GALM_G01_XDC, by = c("year", "iso2c"), all = TRUE) final_data <- merge(CG01_GALM_G01_XDC, final_data, by = c("year", "iso2c"), all = TRUE) final_data <- merge(BGS_BP6_USD, final_data, by = c("year", "iso2c"), all = TRUE) final_data <- merge(LUR_PT, final_data, by = c("year", "iso2c"), all = TRUE) saveRDS(final_data, "IMFdata.rds") write.csv(final_data, "IMFdata.csv", row.names=FALSE)
/pulling_data/IMF/pulling_merging_data.R
no_license
Matt-Brigida/IMF_World_Bank_Data
R
false
false
3,133
r
library("IMFData", lib.loc="~/R/x86_64-pc-linux-gnu-library/3.5") databaseID <- "IFS" startdate = "1900-01-01" enddate = "2016-12-31" checkquery = FALSE country_list <- c("") # all countries ### Indicators List ## search for indicators here: http://data.imf.org/?sk=4C514D48-B6BA-49ED-8AB9-52B0C1A0179B&sId=1409151240976 ## GDP | NGDP_R_CH_SA_XDC | National Accounts, Expenditure, Gross Domestic Product, Real, Reference Chained, Seasonally adjusted, Domestic Currency ## Debt | BCG_GALM_G01_XDC | Fiscal, Budgetary Central Government, Assets and Liabilities, Debt (at Market Value), Classification of the stocks of assets and liabilities, 2001 Manual, Domestic Currency ## Debt | CG01_GALM_G01_XDC | Fiscal, Central Government, Assets and Liabilities, Debt (at Market Value), Classification of the stocks of assets and liabilities, 2001 Manual, Domestic Currency ### Inflation | PCPI_PC_CP_A_PT | Prices, Consumer Price Index, All items, Percentage change, Corresponding period previous year, Percent ## This is the current account *in USD*. We need to find GDP also in USD---above it is in domestic currency. ## BGS_BP6_USD | Balance of Payments, Current Account, Goods and Services, Net, US Dollars ## LUR_PT | Labor Markets, Unemployment Rate, Percent indicators <- c("NGDP_R_CH_SA_XDC", "BCG_GALM_G01_XDC", "CG01_GALM_G01_XDC", "BGS_BP6_USD", "LUR_PT") queryfilter <- list(CL_FREA = "", CL_AREA_IFS = country_list, CL_INDICATOR_IFS = indicators) data <- CompactDataMethod(databaseID, queryfilter, startdate, enddate, checkquery, tidy = TRUE) ## merging data------ ## get only annual data data <- data[data$'@TIME_FORMAT' == "P1Y", ] NGDP_R_CH_SA_XDC <- data[data$'@INDICATOR' == "NGDP_R_CH_SA_XDC", ] NGDP_R_CH_SA_XDC <- NGDP_R_CH_SA_XDC[, c('@TIME_PERIOD', '@OBS_VALUE', '@REF_AREA')] names(NGDP_R_CH_SA_XDC) <- c("year", "NGDP_R_CH_SA_XDC", "iso2c") BCG_GALM_G01_XDC <- data[data$'@INDICATOR' == "BCG_GALM_G01_XDC", ] BCG_GALM_G01_XDC <- BCG_GALM_G01_XDC[, c('@TIME_PERIOD', '@OBS_VALUE', '@REF_AREA')] names(BCG_GALM_G01_XDC) <- c("year", "BCG_GALM_G01_XDC", "iso2c") CG01_GALM_G01_XDC <- data[data$'@INDICATOR' == "CG01_GALM_G01_XDC", ] CG01_GALM_G01_XDC <- CG01_GALM_G01_XDC[, c('@TIME_PERIOD', '@OBS_VALUE', '@REF_AREA')] names(CG01_GALM_G01_XDC) <- c("year", "CG01_GALM_G01_XDC", "iso2c") BGS_BP6_USD <- data[data$'@INDICATOR' == "BGS_BP6_USD", ] BGS_BP6_USD <- BGS_BP6_USD[, c('@TIME_PERIOD', '@OBS_VALUE', '@REF_AREA')] names(BGS_BP6_USD) <- c("year", "BGS_BP6_USD", "iso2c") LUR_PT <- data[data$'@INDICATOR' == "LUR_PT", ] LUR_PT <- LUR_PT[, c('@TIME_PERIOD', '@OBS_VALUE', '@REF_AREA')] names(LUR_PT) <- c("year", "LUR_PT", "iso2c") ## merge data for export------ final_data <- merge(NGDP_R_CH_SA_XDC, BCG_GALM_G01_XDC, by = c("year", "iso2c"), all = TRUE) final_data <- merge(CG01_GALM_G01_XDC, final_data, by = c("year", "iso2c"), all = TRUE) final_data <- merge(BGS_BP6_USD, final_data, by = c("year", "iso2c"), all = TRUE) final_data <- merge(LUR_PT, final_data, by = c("year", "iso2c"), all = TRUE) saveRDS(final_data, "IMFdata.rds") write.csv(final_data, "IMFdata.csv", row.names=FALSE)
# Test IKS aquastar nouvelle generation library(econum) #ls /dev/cu* options(iks.port = "/dev/cu.usbserial-FTB3O67T") iks.open() iks.getAll() iks.getName() iks.getData() iks.getConfig() options(debug.IKS = TRUE) options(debug.IKS = FALSE) iks.close()
/inst/test/test.iks.R
permissive
EcoNum/econum
R
false
false
253
r
# Test IKS aquastar nouvelle generation library(econum) #ls /dev/cu* options(iks.port = "/dev/cu.usbserial-FTB3O67T") iks.open() iks.getAll() iks.getName() iks.getData() iks.getConfig() options(debug.IKS = TRUE) options(debug.IKS = FALSE) iks.close()
#' Make volcano plot #' #' @param data data frame containing stats #' @param effect_var variable name for effect size (x-axis) #' @param p_var p value - variable name for y-axis #' @param q_var q value--when specified, defaults to highlighting points that pass q_thresh #' @param q_thresh q value threshold for highlighting points #' @param label_var column containing labels for data points #' @param n_labeled if rank_by = 'effect', n_labeled points per side; if rank_by = 'pval', n_labeled points in total #' @param label_size size of points #' @param label_bool logical column indicating which points should be labeled #' @param rank_by data type used to rank data points when labeling #' @param ggrepel_type choose 'text' or 'label' to determine whether ggrepel's geom_text_repel or geom_label_repel should be used #' @param color_var logical/categorical column for coloring points (if a factor, points will be layered according to the levels) #' @param color_values vector assigning categories from color_var to custom colors #' #' @importFrom magrittr "%>%" #' @importFrom magrittr "%<>%" #' #' @examples #' ceres <- taigr::load.from.taiga(data.name='avana-internal-19q1-7643', data.version=1, data.file='gene_effect') #' sample_info <- taigr::load.from.taiga(data.name='avana-internal-19q1-7643', data.version=1, data.file='sample_info') #' #' groups <- (sample_info$primary_tissue == 'skin') %>% magrittr::set_names(sample_info$DepMap_ID) #' limma_res <- cdsr::run_lm_stats_limma(ceres[names(groups),], groups) %>% dplyr::arrange(desc(abs(EffectSize))) #' #' limma_res %>% #' dplyr::mutate(fdr = p.adjust(p.value, method = 'fdr')) %>% #' cdsr::make_volcano('EffectSize', 'p.value', q_var = 'fdr') #' @export #' make_volcano <- function(data, effect_var, p_var, q_var = NULL, q_thresh = 0.1, label_var = NULL, n_labeled = 10, label_size = 3, label_bool = NULL, rank_by = c('effect', 'pval'), ggrepel_type = c('text', 'label'), color_var = NULL, color_values = NULL) { library(ggplot2) # set label for colors in the legend guide_title <- color_var # log 10 transform the p values transformed_p_var <- paste0('-log10(', p_var, ')') data[[transformed_p_var]] <- -log10(data[[p_var]]) # if user has specified q values but no variable to color by, color points that pass q_thresh if (is.null(color_var) & !is.null(q_var)) { color_var <- 'internal_sig' data[[color_var]] <- data[[q_var]] < q_thresh guide_title <- sprintf('FDR < %.3f', q_thresh) } if (is.null(color_var)) { p <- data %>% ggplot(aes_string(effect_var, transformed_p_var)) + geom_point(color = '#333333', alpha = 0.7) } else { if (is.null(color_values)) { # user has not specified exact colors, create default color schemes if (is.logical(data[[color_var]])) color_values <- c(`TRUE` = '#BF2026', `FALSE` = '#4d4d4d') else color_values <- RColorBrewer::brewer.pal(length(unique(data[[color_var]])), 'Dark2') } # plot layers one by one if (is.factor(data[[color_var]])) { layering <- levels(data[[color_var]]) } else { layering <- sort(unique(as.character(data[[color_var]]))) } p <- ggplot(data, aes_string(effect_var, transformed_p_var, color = color_var)) for (cur_layer in layering) p <- p + geom_point(data = data[data[[color_var]] == cur_layer,], alpha = 0.7) p <- p + scale_color_manual(values = color_values) + ggplot2::guides(color = guide_legend(title = guide_title)) } if (!is.null(label_var)) { # user has specified column to label points if (is.null(label_bool)) { # default to labeling top 10 data points on each side by effect size label_bool <- 'to_label' # define points to label with this column if (rank_by[1] == 'effect') { left <- rank(data[[effect_var]], ties.method = 'random') right <- rank(-data[[effect_var]], ties.method = 'random') data[[label_bool]] <- (left <= n_labeled) | (right <= n_labeled) } else if (rank_by[1] == 'pval') { data[[label_bool]] <- rank(data[[p_var]], ties.method = 'random') <= n_labeled } } if (ggrepel_type[1] == 'text') p <- p + ggrepel::geom_text_repel(data = data[data[[label_bool]],], aes_string(label = label_var), size = label_size, show.legend = F) else if (ggrepel_type[1] == 'label') p <- p + ggrepel::geom_label_repel(data = data[data[[label_bool]],], aes_string(label = label_var), size = label_size, show.legend = F) } return(p) }
/R/volcano_plot.R
no_license
broadinstitute/cdsr_plots
R
false
false
4,543
r
#' Make volcano plot #' #' @param data data frame containing stats #' @param effect_var variable name for effect size (x-axis) #' @param p_var p value - variable name for y-axis #' @param q_var q value--when specified, defaults to highlighting points that pass q_thresh #' @param q_thresh q value threshold for highlighting points #' @param label_var column containing labels for data points #' @param n_labeled if rank_by = 'effect', n_labeled points per side; if rank_by = 'pval', n_labeled points in total #' @param label_size size of points #' @param label_bool logical column indicating which points should be labeled #' @param rank_by data type used to rank data points when labeling #' @param ggrepel_type choose 'text' or 'label' to determine whether ggrepel's geom_text_repel or geom_label_repel should be used #' @param color_var logical/categorical column for coloring points (if a factor, points will be layered according to the levels) #' @param color_values vector assigning categories from color_var to custom colors #' #' @importFrom magrittr "%>%" #' @importFrom magrittr "%<>%" #' #' @examples #' ceres <- taigr::load.from.taiga(data.name='avana-internal-19q1-7643', data.version=1, data.file='gene_effect') #' sample_info <- taigr::load.from.taiga(data.name='avana-internal-19q1-7643', data.version=1, data.file='sample_info') #' #' groups <- (sample_info$primary_tissue == 'skin') %>% magrittr::set_names(sample_info$DepMap_ID) #' limma_res <- cdsr::run_lm_stats_limma(ceres[names(groups),], groups) %>% dplyr::arrange(desc(abs(EffectSize))) #' #' limma_res %>% #' dplyr::mutate(fdr = p.adjust(p.value, method = 'fdr')) %>% #' cdsr::make_volcano('EffectSize', 'p.value', q_var = 'fdr') #' @export #' make_volcano <- function(data, effect_var, p_var, q_var = NULL, q_thresh = 0.1, label_var = NULL, n_labeled = 10, label_size = 3, label_bool = NULL, rank_by = c('effect', 'pval'), ggrepel_type = c('text', 'label'), color_var = NULL, color_values = NULL) { library(ggplot2) # set label for colors in the legend guide_title <- color_var # log 10 transform the p values transformed_p_var <- paste0('-log10(', p_var, ')') data[[transformed_p_var]] <- -log10(data[[p_var]]) # if user has specified q values but no variable to color by, color points that pass q_thresh if (is.null(color_var) & !is.null(q_var)) { color_var <- 'internal_sig' data[[color_var]] <- data[[q_var]] < q_thresh guide_title <- sprintf('FDR < %.3f', q_thresh) } if (is.null(color_var)) { p <- data %>% ggplot(aes_string(effect_var, transformed_p_var)) + geom_point(color = '#333333', alpha = 0.7) } else { if (is.null(color_values)) { # user has not specified exact colors, create default color schemes if (is.logical(data[[color_var]])) color_values <- c(`TRUE` = '#BF2026', `FALSE` = '#4d4d4d') else color_values <- RColorBrewer::brewer.pal(length(unique(data[[color_var]])), 'Dark2') } # plot layers one by one if (is.factor(data[[color_var]])) { layering <- levels(data[[color_var]]) } else { layering <- sort(unique(as.character(data[[color_var]]))) } p <- ggplot(data, aes_string(effect_var, transformed_p_var, color = color_var)) for (cur_layer in layering) p <- p + geom_point(data = data[data[[color_var]] == cur_layer,], alpha = 0.7) p <- p + scale_color_manual(values = color_values) + ggplot2::guides(color = guide_legend(title = guide_title)) } if (!is.null(label_var)) { # user has specified column to label points if (is.null(label_bool)) { # default to labeling top 10 data points on each side by effect size label_bool <- 'to_label' # define points to label with this column if (rank_by[1] == 'effect') { left <- rank(data[[effect_var]], ties.method = 'random') right <- rank(-data[[effect_var]], ties.method = 'random') data[[label_bool]] <- (left <= n_labeled) | (right <= n_labeled) } else if (rank_by[1] == 'pval') { data[[label_bool]] <- rank(data[[p_var]], ties.method = 'random') <= n_labeled } } if (ggrepel_type[1] == 'text') p <- p + ggrepel::geom_text_repel(data = data[data[[label_bool]],], aes_string(label = label_var), size = label_size, show.legend = F) else if (ggrepel_type[1] == 'label') p <- p + ggrepel::geom_label_repel(data = data[data[[label_bool]],], aes_string(label = label_var), size = label_size, show.legend = F) } return(p) }
##This script reads data from "household_power_consumption.txt" and plots measurements from ## three sub meters vs time. The file "household_power_consumption.txt" must be in the same working directory. ## Initialize a data frame and read the text file data <- data.frame() data <- read.csv("household_power_consumption.txt", na.strings = "?", sep = ";") ## Correct the formatting of the Date column data$Date <- as.Date(data$Date, format = "%d/%m/%Y") ## Create a subset for the dates required StartDate <- as.Date("2007-02-01") EndDate <- as.Date("2007-02-02") data <- data[data$Date >= StartDate & data$Date <= EndDate,] ## Generate a new column "timestamp" which pastes together the Date and Time in the POSIXct time class data$timestamp <- as.POSIXct(paste(data$Date, data$Time)) ## Generate the plot and save as a .png file plot(data$timestamp, data$Sub_metering_1, type="n", xlab="", ylab="Energy sub metering") lines(data$timestamp, data$Sub_metering_1, type="l", col="black") lines(data$timestamp, data$Sub_metering_2, type="l", col="red") lines(data$timestamp, data$Sub_metering_3, type="l", col="blue") legend('topright', c("Sub_metering_1 ", "Sub_metering_2 ", "Sub_metering_3 "), lty=1, col=c("black", "red", "blue")) dev.copy(png, file="plot3.png", width=480, height=480) dev.off()
/plot3.R
no_license
jordanschmidt/ExData_Plotting1
R
false
false
1,338
r
##This script reads data from "household_power_consumption.txt" and plots measurements from ## three sub meters vs time. The file "household_power_consumption.txt" must be in the same working directory. ## Initialize a data frame and read the text file data <- data.frame() data <- read.csv("household_power_consumption.txt", na.strings = "?", sep = ";") ## Correct the formatting of the Date column data$Date <- as.Date(data$Date, format = "%d/%m/%Y") ## Create a subset for the dates required StartDate <- as.Date("2007-02-01") EndDate <- as.Date("2007-02-02") data <- data[data$Date >= StartDate & data$Date <= EndDate,] ## Generate a new column "timestamp" which pastes together the Date and Time in the POSIXct time class data$timestamp <- as.POSIXct(paste(data$Date, data$Time)) ## Generate the plot and save as a .png file plot(data$timestamp, data$Sub_metering_1, type="n", xlab="", ylab="Energy sub metering") lines(data$timestamp, data$Sub_metering_1, type="l", col="black") lines(data$timestamp, data$Sub_metering_2, type="l", col="red") lines(data$timestamp, data$Sub_metering_3, type="l", col="blue") legend('topright', c("Sub_metering_1 ", "Sub_metering_2 ", "Sub_metering_3 "), lty=1, col=c("black", "red", "blue")) dev.copy(png, file="plot3.png", width=480, height=480) dev.off()
cat("\014") # Clear your console rm(list = ls()) #clear your environment ########################## Load in header file ######################## # source(file.path("C:/Users/Nick/git/of-dollars-and-data/header.R")) ########################## Load in Libraries ########################## # library(dplyr) library(stringr) ########################## Start Program Here ######################### # # Load in raw BLS productivity data bls_oe <-readRDS(paste0(importdir, "05-bls-occupational-employment/bls_oe_data.1.AllData.Rds")) # Load in other datasets and create a code based on their row number # Will use these datasets to merge to the main productivity dataset create_index <- function(string){ name <- deparse(substitute(string)) temp <- readRDS(paste0(importdir, "05-bls-occupational-employment/bls_oe_", name, ".Rds")) new_col <- paste0(name, "_name") old_col <- paste0(name, "_code") temp[, new_col] <- temp[, old_col] temp[, old_col] <- rownames(temp) temp <- temp[, c(old_col, new_col)] return(temp) } areatype <- create_index(areatype) area <- create_index(area) industry <- create_index(industry) occupation <- create_index(occupation) datatype <- create_index(datatype) # Parse the series ID based on the "pr.txt" file here: https://download.bls.gov/pub/time.series/oe/ bls_oe <- mutate(bls_oe, areatype_code = substr(series_id, 4, 4), area_code = substr(series_id, 5, 11), industry_code = substr(series_id, 12, 17), occupation_code = substr(series_id, 18, 23), datatype_code = substr(series_id, 24, 25)) # Pad the area data with leading zeroes on area_code area$area_code <- str_pad(area$area_code, 7, pad = "0") # Merge on the sector, measure, and class information bls_oe <- bls_oe %>% left_join(areatype) %>% left_join(area) %>% left_join(industry) %>% left_join(occupation) %>% left_join(datatype) %>% select(series_id, year, value, footnote_codes, area_name, areatype_name, industry_name, occupation_name, datatype_name) # Save down final build before doing analysis saveRDS(bls_oe, paste0(localdir, "05-bls-oe.Rds")) # ############################ End ################################## #
/build/05-build-bls-occupational-employment.R
no_license
joyeung/of-dollars-and-data
R
false
false
2,416
r
cat("\014") # Clear your console rm(list = ls()) #clear your environment ########################## Load in header file ######################## # source(file.path("C:/Users/Nick/git/of-dollars-and-data/header.R")) ########################## Load in Libraries ########################## # library(dplyr) library(stringr) ########################## Start Program Here ######################### # # Load in raw BLS productivity data bls_oe <-readRDS(paste0(importdir, "05-bls-occupational-employment/bls_oe_data.1.AllData.Rds")) # Load in other datasets and create a code based on their row number # Will use these datasets to merge to the main productivity dataset create_index <- function(string){ name <- deparse(substitute(string)) temp <- readRDS(paste0(importdir, "05-bls-occupational-employment/bls_oe_", name, ".Rds")) new_col <- paste0(name, "_name") old_col <- paste0(name, "_code") temp[, new_col] <- temp[, old_col] temp[, old_col] <- rownames(temp) temp <- temp[, c(old_col, new_col)] return(temp) } areatype <- create_index(areatype) area <- create_index(area) industry <- create_index(industry) occupation <- create_index(occupation) datatype <- create_index(datatype) # Parse the series ID based on the "pr.txt" file here: https://download.bls.gov/pub/time.series/oe/ bls_oe <- mutate(bls_oe, areatype_code = substr(series_id, 4, 4), area_code = substr(series_id, 5, 11), industry_code = substr(series_id, 12, 17), occupation_code = substr(series_id, 18, 23), datatype_code = substr(series_id, 24, 25)) # Pad the area data with leading zeroes on area_code area$area_code <- str_pad(area$area_code, 7, pad = "0") # Merge on the sector, measure, and class information bls_oe <- bls_oe %>% left_join(areatype) %>% left_join(area) %>% left_join(industry) %>% left_join(occupation) %>% left_join(datatype) %>% select(series_id, year, value, footnote_codes, area_name, areatype_name, industry_name, occupation_name, datatype_name) # Save down final build before doing analysis saveRDS(bls_oe, paste0(localdir, "05-bls-oe.Rds")) # ############################ End ################################## #
\name{gasAcu1.nscanGene.LENGTH} \docType{data} \alias{gasAcu1.nscanGene.LENGTH} \title{Transcript length data for the organism gasAcu} \description{gasAcu1.nscanGene.LENGTH is an R object which maps transcripts to the length (in bp) of their mature mRNA transcripts. Where available, it will also provide the mapping between a gene ID and its associated transcripts. The data is obtained from the UCSC table browser (http://genome.ucsc.edu/cgi-bin/hgTables) using the nscanGene table. The data file was made by calling downloadLengthFromUCSC(gasAcu1, nscanGene) on the date on which the package was last updated.} \seealso{ \code{\link{downloadLengthFromUCSC}}} \examples{ data(gasAcu1.nscanGene.LENGTH) head(gasAcu1.nscanGene.LENGTH) } \keyword{datasets}
/man/gasAcu1.nscanGene.LENGTH.Rd
no_license
nadiadavidson/geneLenDataBase
R
false
false
759
rd
\name{gasAcu1.nscanGene.LENGTH} \docType{data} \alias{gasAcu1.nscanGene.LENGTH} \title{Transcript length data for the organism gasAcu} \description{gasAcu1.nscanGene.LENGTH is an R object which maps transcripts to the length (in bp) of their mature mRNA transcripts. Where available, it will also provide the mapping between a gene ID and its associated transcripts. The data is obtained from the UCSC table browser (http://genome.ucsc.edu/cgi-bin/hgTables) using the nscanGene table. The data file was made by calling downloadLengthFromUCSC(gasAcu1, nscanGene) on the date on which the package was last updated.} \seealso{ \code{\link{downloadLengthFromUCSC}}} \examples{ data(gasAcu1.nscanGene.LENGTH) head(gasAcu1.nscanGene.LENGTH) } \keyword{datasets}
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/addtables.R \name{addtablesUI} \alias{addtablesUI} \title{addtablesUI} \usage{ addtablesUI(id, M) } \arguments{ \item{id}{is caller id} \item{M}{is the meta data connection structure} } \description{ UI for adding tables to 'cuborg' data warehouse }
/man/addtablesUI.Rd
no_license
byadu/modcubingest
R
false
true
329
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/addtables.R \name{addtablesUI} \alias{addtablesUI} \title{addtablesUI} \usage{ addtablesUI(id, M) } \arguments{ \item{id}{is caller id} \item{M}{is the meta data connection structure} } \description{ UI for adding tables to 'cuborg' data warehouse }
tar_test("rds update_object()", { x <- target_init(name = "abc", expr = quote(a), format = "rds") builder_update_build(x, tmpenv(a = "123")) builder_update_paths(x, path_store_default()) expect_false(file.exists(x$store$file$path)) expect_true(is.na(x$store$file$hash)) store_update_stage_early(x$store, "abc", path_store_default()) builder_update_object(x) expect_true(file.exists(x$store$file$path)) expect_false(is.na(x$store$file$hash)) path <- file.path("_targets", "objects", "abc") expect_equal(readRDS(path), "123") expect_equal(target_read_value(x)$object, "123") }) tar_test("misspelled format", { expect_error( tar_target(x, 1, format = "r2ds"), class = "tar_condition_validate" ) }) tar_test("rds packages", { x <- tar_target(x, 1, format = "rds") out <- store_get_packages(x$store) expect_equal(out, character(0)) }) tar_test("does not inherit from tar_external", { store <- tar_target(x, "x_value", format = "rds")$store expect_false(inherits(store, "tar_external")) }) tar_test("store_row_path()", { store <- tar_target(x, "x_value", format = "rds")$store store$file$path <- "path" expect_equal(store_row_path(store), NA_character_) }) tar_test("store_path_from_record()", { store <- tar_target(x, "x_value", format = "rds")$store record <- record_init(name = "x", path = "path", format = "rds") expect_equal( store_path_from_record(store, record, path_store_default()), path_objects(path_store_default(), "x") ) })
/tests/testthat/test-class_rds.R
permissive
billdenney/targets
R
false
false
1,504
r
tar_test("rds update_object()", { x <- target_init(name = "abc", expr = quote(a), format = "rds") builder_update_build(x, tmpenv(a = "123")) builder_update_paths(x, path_store_default()) expect_false(file.exists(x$store$file$path)) expect_true(is.na(x$store$file$hash)) store_update_stage_early(x$store, "abc", path_store_default()) builder_update_object(x) expect_true(file.exists(x$store$file$path)) expect_false(is.na(x$store$file$hash)) path <- file.path("_targets", "objects", "abc") expect_equal(readRDS(path), "123") expect_equal(target_read_value(x)$object, "123") }) tar_test("misspelled format", { expect_error( tar_target(x, 1, format = "r2ds"), class = "tar_condition_validate" ) }) tar_test("rds packages", { x <- tar_target(x, 1, format = "rds") out <- store_get_packages(x$store) expect_equal(out, character(0)) }) tar_test("does not inherit from tar_external", { store <- tar_target(x, "x_value", format = "rds")$store expect_false(inherits(store, "tar_external")) }) tar_test("store_row_path()", { store <- tar_target(x, "x_value", format = "rds")$store store$file$path <- "path" expect_equal(store_row_path(store), NA_character_) }) tar_test("store_path_from_record()", { store <- tar_target(x, "x_value", format = "rds")$store record <- record_init(name = "x", path = "path", format = "rds") expect_equal( store_path_from_record(store, record, path_store_default()), path_objects(path_store_default(), "x") ) })
#' Wrapper function for summarizing the outputs from DreamAI_bagging #' #' @param method a vector of imputation methods: ("KNN", "MissForest", "ADMIN", "Brinn", "SpectroFM, "RegImpute", "Ensemble"). This vector should be a subset or equal to the vector out in DreamAI_bagging. #' @param nNodes number of parallel processes #' @param path location where the bagging output is saved #' #' @return list of final imputed data and confidence score for every gene using pseudo missing #' @export #' #' @examples #' \dontrun{ #' data(datapnnl) #' data<-datapnnl.rm.ref[1:100,1:21] #' impute<- DreamAI_Bagging(data=data,k=10,maxiter_MF = 10, ntree = 100,maxnodes = NULL,maxiter_ADMIN=30,tol=10^(-2),gamma_ADMIN=NA,gamma=50,CV=FALSE,fillmethod="row_mean",maxiter_RegImpute=10,conv_nrmse = 1e-6,iter_SpectroFM=40,method=c("KNN","MissForest","ADMIN","Brinn","SpectroFM","RegImpute"),out=c("Ensemble"),SamplesPerBatch=3,n.bag=2,save.out=TRUE,path="C:\\Users\\chowds14\\Desktop\\test_package\\",ProcessNum=1) #' final.out<-bag.summary(method=c("Ensemble"),nNodes=2,path="C:\\Users\\chowds14\\Desktop\\test_package\\") #' final.out$score #' final.out$imputed_data #' } bag.summary<-function(method=c("KNN", "MissForest", "ADMIN", "Brinn", "SpectroFM", "RegImpute","Ensemble"),nNodes=2,path=NULL) { load(paste(path,"bag_imputed_",sprintf("%03d",1),".RData",sep="")) out<-bag.output$out.method if(identical(method,intersect(out,method))==FALSE){ return(print("method does not match out")) } summary.out<-list() KNN.out<-list() MF.out<-list() ADMIN.out<-list() Reg_Impute.out<-list() Brinn.out<-list() SpectroFM.out<-list() Ensemble.out<-list() n.bag.out<-list() for(i in 1:nNodes) { load(paste(path,"bag_imputed_",sprintf("%03d",i),".RData",sep="")) summary.out[[i]]<-bag.output$summary if("KNN" %in% method){ KNN.out[[i]]<-bag.output$impute$KNN }else{ sink("NULL") print("No output for KNN") sink() } if("MissForest" %in% method){ MF.out[[i]]<-bag.output$impute$MissForest }else{ sink("NULL") print("No output for MissForest") sink() } if("ADMIN" %in% method){ ADMIN.out[[i]]<-bag.output$impute$ADMIN }else{ sink("NULL") print("No output for ADMIN") sink() } if("RegImpute" %in% method){ Reg_Impute.out[[i]]<-bag.output$impute$RegImpute }else{ sink("NULL") print("No output for RegImpute") sink() } if("Brinn" %in% method){ Brinn.out[[i]]<-bag.output$impute$Brinn }else{ sink("NULL") print("No output for Brinn") sink() } if("SpectroFM" %in% method){ SpectroFM.out[[i]]<-bag.output$impute$SpectroFM }else{ sink("NULL") print("No output for SpectroFM") sink() } Ensemble.out[[i]]<-bag.output$impute$Ensemble n.bag.out[[i]]<-bag.output$n.bag } summary.out.all<-do.call(rbind,summary.out) summary.out.all.agg = aggregate(cbind(true,imputed)~gene ,data = summary.out.all,function(x){x}); get.score = function(x) { dt = x$true; di = x$imputed; cor.temp = cor(dt,di,use = 'pairwise.complete.obs',method = 'spearman'); nrmsd.temp = sqrt(mean((dt-di)^2))/diff(range(dt)); return(c(cor = cor.temp,nrmsd = nrmsd.temp)); } summary.score = t(apply(summary.out.all.agg,1,get.score)); summary.score = as.data.frame(summary.score); rownames(summary.score) = summary.out.all.agg$gene if("KNN" %in% method){ d.impute.knn<-matrix(0,nrow(KNN.out[[1]]),ncol(KNN.out[[1]])) } if("MissForest" %in% method){ d.impute.MF<-matrix(0,nrow(MF.out[[1]]),ncol(MF.out[[1]])) } if("ADMIN" %in% method){ d.impute.ADMIN<-matrix(0,nrow(ADMIN.out[[1]]),ncol(ADMIN.out[[1]])) } if("Brinn" %in% method){ d.impute.Brinn<-matrix(0,nrow(Brinn.out[[1]]),ncol(Brinn.out[[1]])) } if("RegImpute" %in% method){ d.impute.Reg_Impute<-matrix(0,nrow(Reg_Impute.out[[1]]),ncol(RegImpute.out[[1]])) } if("SpectroFM" %in% method){ d.impute.SpectroFM<-matrix(0,nrow(SpectroFM.out[[1]]),ncol(SpectroFM.out[[1]])) } d.impute.Ensemble<-matrix(0,nrow(Ensemble.out[[1]]),ncol(Ensemble.out[[1]])) n.bag.tot<-0 for(i in 1:nNodes) { if("KNN" %in% method){ d.impute.knn<-d.impute.knn+n.bag.out[[i]]*KNN.out[[i]] } if("MissForest" %in% method){ d.impute.MF<-d.impute.MF+n.bag.out[[i]]*MF.out[[i]] } if("ADMIN" %in% method){ d.impute.ADMIN<-d.impute.ADMIN+n.bag.out[[i]]*ADMIN.out[[i]] } if("Brinn" %in% method){ d.impute.Brinn<-d.impute.Brinn+n.bag.out[[i]]*Brinn.out[[i]] } if("RegImpute" %in% method){ d.impute.Reg_Impute<-d.impute.Reg_Impute+n.bag.out[[i]]*Reg_Impute.out[[i]] } if("SpectroFM" %in% method){ d.impute.SpectroFM<-d.impute.SpectroFM+n.bag.out[[i]]*SpectroFM.out[[i]] } d.impute.Ensemble<-d.impute.Ensemble+n.bag.out[[i]]*Ensemble.out[[i]] n.bag.tot<-n.bag.tot + n.bag.out[[i]] } imputed_matrix<-list() if("KNN" %in% method){ d.impute.knn.final<-d.impute.knn/n.bag.tot imputed_matrix<-c(imputed_matrix,list("KNN"=as.matrix(d.impute.knn.final))) } if("MissForest" %in% method){ d.impute.MF.final<-d.impute.MF/n.bag.tot imputed_matrix<-c(imputed_matrix,list("MissForest"=as.matrix(d.impute.MF.final))) } if("ADMIN" %in% method){ d.impute.ADMIN.final<-d.impute.ADMIN/n.bag.tot imputed_matrix<-c(imputed_matrix,list("ADMIN"=as.matrix(d.impute.ADMIN.final))) } if("Brinn" %in% method){ d.impute.Brinn.final<-d.impute.Brinn/n.bag.tot imputed_matrix<-c(imputed_matrix,list("Brinn"=as.matrix(d.impute.Brinn.final))) } if("RegImpute" %in% method){ d.impute.RegImpute.final<-d.impute.Reg_Impute/n.bag.tot imputed_matrix<-c(imputed_matrix,list("RegImpute"=as.matrix(d.impute.RegImpute.final))) } if("SpectroFM" %in% method){ d.impute.SpectroFM.final<-d.impute.SpectroFM/n.bag.tot imputed_matrix<-c(imputed_matrix,list("SpectroFM"=as.matrix(d.impute.SpectroFM.final))) } d.impute.Ensemble.final<-d.impute.Ensemble/n.bag.tot imputed_matrix<-c(imputed_matrix,list("Ensemble"=as.matrix(d.impute.Ensemble.final))) # methods<-c("KNN","MissForest","ADMIN","Brinn","SpectroFM","RegImpute") # # num<-which(methods %in% method) # sink() output<-imputed_matrix out<-list(score=summary.score,imputed_data=output) return(out) }
/Code/R/wrapper.R
no_license
schatterjee-lilly/DreamAI
R
false
false
6,754
r
#' Wrapper function for summarizing the outputs from DreamAI_bagging #' #' @param method a vector of imputation methods: ("KNN", "MissForest", "ADMIN", "Brinn", "SpectroFM, "RegImpute", "Ensemble"). This vector should be a subset or equal to the vector out in DreamAI_bagging. #' @param nNodes number of parallel processes #' @param path location where the bagging output is saved #' #' @return list of final imputed data and confidence score for every gene using pseudo missing #' @export #' #' @examples #' \dontrun{ #' data(datapnnl) #' data<-datapnnl.rm.ref[1:100,1:21] #' impute<- DreamAI_Bagging(data=data,k=10,maxiter_MF = 10, ntree = 100,maxnodes = NULL,maxiter_ADMIN=30,tol=10^(-2),gamma_ADMIN=NA,gamma=50,CV=FALSE,fillmethod="row_mean",maxiter_RegImpute=10,conv_nrmse = 1e-6,iter_SpectroFM=40,method=c("KNN","MissForest","ADMIN","Brinn","SpectroFM","RegImpute"),out=c("Ensemble"),SamplesPerBatch=3,n.bag=2,save.out=TRUE,path="C:\\Users\\chowds14\\Desktop\\test_package\\",ProcessNum=1) #' final.out<-bag.summary(method=c("Ensemble"),nNodes=2,path="C:\\Users\\chowds14\\Desktop\\test_package\\") #' final.out$score #' final.out$imputed_data #' } bag.summary<-function(method=c("KNN", "MissForest", "ADMIN", "Brinn", "SpectroFM", "RegImpute","Ensemble"),nNodes=2,path=NULL) { load(paste(path,"bag_imputed_",sprintf("%03d",1),".RData",sep="")) out<-bag.output$out.method if(identical(method,intersect(out,method))==FALSE){ return(print("method does not match out")) } summary.out<-list() KNN.out<-list() MF.out<-list() ADMIN.out<-list() Reg_Impute.out<-list() Brinn.out<-list() SpectroFM.out<-list() Ensemble.out<-list() n.bag.out<-list() for(i in 1:nNodes) { load(paste(path,"bag_imputed_",sprintf("%03d",i),".RData",sep="")) summary.out[[i]]<-bag.output$summary if("KNN" %in% method){ KNN.out[[i]]<-bag.output$impute$KNN }else{ sink("NULL") print("No output for KNN") sink() } if("MissForest" %in% method){ MF.out[[i]]<-bag.output$impute$MissForest }else{ sink("NULL") print("No output for MissForest") sink() } if("ADMIN" %in% method){ ADMIN.out[[i]]<-bag.output$impute$ADMIN }else{ sink("NULL") print("No output for ADMIN") sink() } if("RegImpute" %in% method){ Reg_Impute.out[[i]]<-bag.output$impute$RegImpute }else{ sink("NULL") print("No output for RegImpute") sink() } if("Brinn" %in% method){ Brinn.out[[i]]<-bag.output$impute$Brinn }else{ sink("NULL") print("No output for Brinn") sink() } if("SpectroFM" %in% method){ SpectroFM.out[[i]]<-bag.output$impute$SpectroFM }else{ sink("NULL") print("No output for SpectroFM") sink() } Ensemble.out[[i]]<-bag.output$impute$Ensemble n.bag.out[[i]]<-bag.output$n.bag } summary.out.all<-do.call(rbind,summary.out) summary.out.all.agg = aggregate(cbind(true,imputed)~gene ,data = summary.out.all,function(x){x}); get.score = function(x) { dt = x$true; di = x$imputed; cor.temp = cor(dt,di,use = 'pairwise.complete.obs',method = 'spearman'); nrmsd.temp = sqrt(mean((dt-di)^2))/diff(range(dt)); return(c(cor = cor.temp,nrmsd = nrmsd.temp)); } summary.score = t(apply(summary.out.all.agg,1,get.score)); summary.score = as.data.frame(summary.score); rownames(summary.score) = summary.out.all.agg$gene if("KNN" %in% method){ d.impute.knn<-matrix(0,nrow(KNN.out[[1]]),ncol(KNN.out[[1]])) } if("MissForest" %in% method){ d.impute.MF<-matrix(0,nrow(MF.out[[1]]),ncol(MF.out[[1]])) } if("ADMIN" %in% method){ d.impute.ADMIN<-matrix(0,nrow(ADMIN.out[[1]]),ncol(ADMIN.out[[1]])) } if("Brinn" %in% method){ d.impute.Brinn<-matrix(0,nrow(Brinn.out[[1]]),ncol(Brinn.out[[1]])) } if("RegImpute" %in% method){ d.impute.Reg_Impute<-matrix(0,nrow(Reg_Impute.out[[1]]),ncol(RegImpute.out[[1]])) } if("SpectroFM" %in% method){ d.impute.SpectroFM<-matrix(0,nrow(SpectroFM.out[[1]]),ncol(SpectroFM.out[[1]])) } d.impute.Ensemble<-matrix(0,nrow(Ensemble.out[[1]]),ncol(Ensemble.out[[1]])) n.bag.tot<-0 for(i in 1:nNodes) { if("KNN" %in% method){ d.impute.knn<-d.impute.knn+n.bag.out[[i]]*KNN.out[[i]] } if("MissForest" %in% method){ d.impute.MF<-d.impute.MF+n.bag.out[[i]]*MF.out[[i]] } if("ADMIN" %in% method){ d.impute.ADMIN<-d.impute.ADMIN+n.bag.out[[i]]*ADMIN.out[[i]] } if("Brinn" %in% method){ d.impute.Brinn<-d.impute.Brinn+n.bag.out[[i]]*Brinn.out[[i]] } if("RegImpute" %in% method){ d.impute.Reg_Impute<-d.impute.Reg_Impute+n.bag.out[[i]]*Reg_Impute.out[[i]] } if("SpectroFM" %in% method){ d.impute.SpectroFM<-d.impute.SpectroFM+n.bag.out[[i]]*SpectroFM.out[[i]] } d.impute.Ensemble<-d.impute.Ensemble+n.bag.out[[i]]*Ensemble.out[[i]] n.bag.tot<-n.bag.tot + n.bag.out[[i]] } imputed_matrix<-list() if("KNN" %in% method){ d.impute.knn.final<-d.impute.knn/n.bag.tot imputed_matrix<-c(imputed_matrix,list("KNN"=as.matrix(d.impute.knn.final))) } if("MissForest" %in% method){ d.impute.MF.final<-d.impute.MF/n.bag.tot imputed_matrix<-c(imputed_matrix,list("MissForest"=as.matrix(d.impute.MF.final))) } if("ADMIN" %in% method){ d.impute.ADMIN.final<-d.impute.ADMIN/n.bag.tot imputed_matrix<-c(imputed_matrix,list("ADMIN"=as.matrix(d.impute.ADMIN.final))) } if("Brinn" %in% method){ d.impute.Brinn.final<-d.impute.Brinn/n.bag.tot imputed_matrix<-c(imputed_matrix,list("Brinn"=as.matrix(d.impute.Brinn.final))) } if("RegImpute" %in% method){ d.impute.RegImpute.final<-d.impute.Reg_Impute/n.bag.tot imputed_matrix<-c(imputed_matrix,list("RegImpute"=as.matrix(d.impute.RegImpute.final))) } if("SpectroFM" %in% method){ d.impute.SpectroFM.final<-d.impute.SpectroFM/n.bag.tot imputed_matrix<-c(imputed_matrix,list("SpectroFM"=as.matrix(d.impute.SpectroFM.final))) } d.impute.Ensemble.final<-d.impute.Ensemble/n.bag.tot imputed_matrix<-c(imputed_matrix,list("Ensemble"=as.matrix(d.impute.Ensemble.final))) # methods<-c("KNN","MissForest","ADMIN","Brinn","SpectroFM","RegImpute") # # num<-which(methods %in% method) # sink() output<-imputed_matrix out<-list(score=summary.score,imputed_data=output) return(out) }
#STEP 6: PLOT 4 #FIRST I OPEN DEVICE, AFTER I MAKE THE PLOTS CONSECUTIVELY (TELLING THAT I WANT TWO PLOTS PER LINE AND PER COLUMN- par(mfrow=c(2,2))) #AND SET THE LEGEND. FINALLY, I CLOSE DEVICE. png("plot4.png",width=480,height=480,units="px") par(mfrow=c(2,2)) plot(muestra$Datetime, muestra$Global_active_power,type="l",xlab="",ylab="Global Active Power (kilowatts)") plot(muestra$Datetime, muestra$Voltage,type="l",xlab="datetime",ylab="Voltage") plot(muestra$Datetime, muestra$Sub_metering_1,type="l",xlab="",ylab="Energy sub metering",col="black") lines(muestra$Datetime, muestra$Sub_metering_2,col="red") lines(muestra$Datetime, muestra$Sub_metering_3,col="blue") legend("topright",legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3"),col=c("black","red","blue"),lty="solid", bty="n") plot(muestra$Datetime, muestra$Global_reactive_power,type="l",xlab="datetime",ylab="Global_reactive_power") x<-dev.off()
/plot4.R
no_license
Edurnita/ExData_Plotting1
R
false
false
921
r
#STEP 6: PLOT 4 #FIRST I OPEN DEVICE, AFTER I MAKE THE PLOTS CONSECUTIVELY (TELLING THAT I WANT TWO PLOTS PER LINE AND PER COLUMN- par(mfrow=c(2,2))) #AND SET THE LEGEND. FINALLY, I CLOSE DEVICE. png("plot4.png",width=480,height=480,units="px") par(mfrow=c(2,2)) plot(muestra$Datetime, muestra$Global_active_power,type="l",xlab="",ylab="Global Active Power (kilowatts)") plot(muestra$Datetime, muestra$Voltage,type="l",xlab="datetime",ylab="Voltage") plot(muestra$Datetime, muestra$Sub_metering_1,type="l",xlab="",ylab="Energy sub metering",col="black") lines(muestra$Datetime, muestra$Sub_metering_2,col="red") lines(muestra$Datetime, muestra$Sub_metering_3,col="blue") legend("topright",legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3"),col=c("black","red","blue"),lty="solid", bty="n") plot(muestra$Datetime, muestra$Global_reactive_power,type="l",xlab="datetime",ylab="Global_reactive_power") x<-dev.off()
#Read the files NEI <- readRDS("./summarySCC_PM25.rds") SCC <- readRDS("./Source_Classification_Code.rds") #Subset the data appropriately subset <- NEI[NEI$fips == "24510", ] #Specify global graphics parameter(here: the margins for the plot) par("mar"=c(5.1, 4.5, 4.1, 2.1)) #Launch the graphics device(file device:PNG) png("plot2.png", width = 480, height = 480, units = "px") #Use the aggregate function for calculating summary statistics totalemission <- aggregate(subset$Emissions, list(subset$year), FUN = "sum") #Create(and annotate) the plot plot(totalemission, type = "l", xlab = "Year", main = "Total Emissions in Baltimore City from 1999 to 2008", ylab = expression('Total PM'[2.5]*"Emission")) #Close the graphics device dev.off()
/plot2.R
no_license
devvarya/ExData_Plotting2
R
false
false
767
r
#Read the files NEI <- readRDS("./summarySCC_PM25.rds") SCC <- readRDS("./Source_Classification_Code.rds") #Subset the data appropriately subset <- NEI[NEI$fips == "24510", ] #Specify global graphics parameter(here: the margins for the plot) par("mar"=c(5.1, 4.5, 4.1, 2.1)) #Launch the graphics device(file device:PNG) png("plot2.png", width = 480, height = 480, units = "px") #Use the aggregate function for calculating summary statistics totalemission <- aggregate(subset$Emissions, list(subset$year), FUN = "sum") #Create(and annotate) the plot plot(totalemission, type = "l", xlab = "Year", main = "Total Emissions in Baltimore City from 1999 to 2008", ylab = expression('Total PM'[2.5]*"Emission")) #Close the graphics device dev.off()
data <- read.table("household_power_consumption.txt", header=TRUE, na.strings="?", sep=";") data <- data[(data$Date=="1/2/2007" | data$Date=="2/2/2007" ), ] data$DateTime<-as.POSIXct(paste(data$Date,data$Time), format="%d/%m/%Y %H:%M:%S") #plot4 par(mfrow=c(2,2), mar=c(4,5,2,1), oma=c(0,0,2,0)) with(data, { plot(Global_active_power~DateTime, type="l", ylab="Global Active Power", xlab="") plot(Voltage~DateTime, type="l", ylab="Voltage", xlab="") plot(Sub_metering_1~DateTime, col='Black',type="l", ylab="Energy Sub Metering", xlab="") lines(Sub_metering_2~DateTime,col='Red') lines(Sub_metering_3~DateTime,col='Blue') legend("topright", col=c("black", "red", "blue"), lty=1, lwd=2, bty="n", legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3")) plot(Global_reactive_power~DateTime, type="l", ylab="Global_Rective_Power",xlab="") }) # export the plot to file:plot4.png dev.copy(png, file="plot4.png", height=480, width=480) # close device to finalize the file dev.off()
/plot4.R
no_license
bairdstar/ExData_Plotting1
R
false
false
997
r
data <- read.table("household_power_consumption.txt", header=TRUE, na.strings="?", sep=";") data <- data[(data$Date=="1/2/2007" | data$Date=="2/2/2007" ), ] data$DateTime<-as.POSIXct(paste(data$Date,data$Time), format="%d/%m/%Y %H:%M:%S") #plot4 par(mfrow=c(2,2), mar=c(4,5,2,1), oma=c(0,0,2,0)) with(data, { plot(Global_active_power~DateTime, type="l", ylab="Global Active Power", xlab="") plot(Voltage~DateTime, type="l", ylab="Voltage", xlab="") plot(Sub_metering_1~DateTime, col='Black',type="l", ylab="Energy Sub Metering", xlab="") lines(Sub_metering_2~DateTime,col='Red') lines(Sub_metering_3~DateTime,col='Blue') legend("topright", col=c("black", "red", "blue"), lty=1, lwd=2, bty="n", legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3")) plot(Global_reactive_power~DateTime, type="l", ylab="Global_Rective_Power",xlab="") }) # export the plot to file:plot4.png dev.copy(png, file="plot4.png", height=480, width=480) # close device to finalize the file dev.off()
require(dplyr) require(tidyr) require(cowplot) require(ggplot2) require(xtable) require(sqldf) require(stringr) require(rwetools) ##----------------------------------------------------------------------------- ## LOAD INTERMACS DATA ##----------------------------------------------------------------------------- g_path = "clinical_data/" g_prefix = "IntermacsCSV/" g_prefix_ana = "AnalysisData/" g_nrow = -1 g_dta_patients <- read.csv(file = paste(g_path, g_prefix, "patients.csv", sep = ""), nrows = g_nrow) g_dta_events <- read.csv(file = paste(g_path, g_prefix, "events.csv", sep = ""), nrows = g_nrow) d_pids <- sqldf("select distinct public_id as id from g_dta_patients") ##----------------------------------------------------------------------------- ## GET dta_ext DATAFRAME ##----------------------------------------------------------------------------- # sql statement s_sql_pt <- 'select distinct p0.id, p1.IMPL_YR as implant_year, p1.AGE_DEIDENT as age, p1.GENDER as gender, SQRT(p1.HGT_CM * p1.WGT_KG) / 60 as bsa, 10000 * p1.WGT_KG / p1.HGT_CM / p1.HGT_CM as bmi, p1.CV_PRES as cvp, p1.SYS_BP as sys_bp, p1.DIA_BP as dia_bp, p1.PUL_SYS_PRES as sys_pap, p1.PUL_DIA_PRES as dia_pap, p1.PUL_WEDGE_PRES as pcwp, p1.INR as inr, p1.WBC_X10_3_UL as wbc, p1.PLATELET_X10_3_UL as platelet, p1.DEAD as dead, p1.INT_DEAD as int_dead, CASE WHEN p1.MED_PRE_IMP_ACE_INHIBITORS = 1 OR p1.MED_PRE_IMP_ACE_INHIBITORS = 2 THEN 1 WHEN p1.MED_PRE_IMP_ACE_INHIBITORS = 3 THEN 0 ELSE -1 END as ace from d_pids p0 left join g_dta_patients p1 on (p0.id = p1.public_id) order by id' dta_ext <- sqldf(s_sql_pt) curr <- NULL curr <- dta_ext curr[curr == '.'] <- NA curr[curr == ' '] <- NA curr[curr == -1] <- NA curr <- na.omit(curr) dta_ext <- curr s_sql_pt_2 <- 'select p2.*, CASE WHEN dead = 1 AND int_dead <= 13 THEN 1 ELSE 0 END as dead_6mths FROM dta_ext p2 ORDER BY id' dta_ext <- sqldf(s_sql_pt_2) dta_ext$dead <- NULL dta_ext$int_dead <- NULL dta_ext <- na.omit(dta_ext) s_sql_pt_3 <- 'select p2.*, o1.transplant as transplant_6_mths, o2.exchange as exchange_6_mths, o3.rhf as rhf_6_mths FROM dta_ext p2 LEFT JOIN (select e.public_id as id, CASE WHEN e.EVENTX = "Explant: Transplant" THEN 1 ELSE 0 END as transplant FROM g_dta_events e WHERE e.INT_EVT <= 13 GROUP BY id) o1 on (o1.id = p2.id) LEFT JOIN (select e.public_id as id, CASE WHEN e.EVENTX = "Explant: Exchange" THEN 1 ELSE 0 END as exchange FROM g_dta_events e WHERE e.INT_EVT <= 13 GROUP BY id) o2 on (o2.id = p2.id) LEFT JOIN (select e.public_id as id, CASE WHEN e.EVENTX = "Right Heart Failure v3.0" THEN 1 ELSE 0 END as rhf FROM g_dta_events e WHERE e.INT_EVT <= 13 GROUP BY id) o3 on (o3.id = p2.id)' dta_ext <- sqldf(s_sql_pt_3) dta_ext <- na.omit(dta_ext) dta_ext <- dta_ext %>% filter(gender %in% c("M", "F")) %>% mutate(gender = factor(gender), age = as.numeric(age), cvp = as.numeric(cvp), sys_bp = as.numeric(sys_bp), dia_bp = as.numeric(dia_bp), sys_pap = as.numeric(sys_pap), dia_pap = as.numeric(dia_pap), pcwp = as.numeric(pcwp), inr = as.numeric(inr), wbc = as.numeric(wbc), platelet = as.numeric(platelet)) dta_ext$exchange_6_mths <- NULL ##---------------------------------------------------------------------------- ## GET target_stats from three studies ##---------------------------------------------------------------------------- study_1 <- list(study_name = "HM3", patient_size = 50, study_outcome = c(dead_6_mths = 0.08, transplant_6_mths = 0.04, rhf_6_mths = 0.1), target_summary = list(age = list(type = 'continuous', mean = 5.89, sd = 1.35, ex2 = 36.5146, scale = 0.1), gender = list(type = 'discrete', values = c('M','F'), probs = c(0.9, 0.1)), bsa = list(type = 'continuous', mean = 2, sd = 0.2, ex2 = 4.04), bmi = list(type = 'continuous', mean = 2.68, sd = 0.43, ex2 = 7.4749, scale = 0.1), cvp = list(type = 'continuous', mean = 0.99, sd = 0.56, ex2 = 1.2937, scale = 0.1), sys_bp = list(type = 'continuous', mean = 1.047, sd = 0.114, ex2 = 1.109205, scale = 0.01), dia_bp = list(type = 'continuous', mean = 6.47, sd = 0.94, ex2 = 42.7445, scale = 0.1), sys_pap = list(type = 'continuous', mean = 5.08, sd = 1.86, ex2 = 29.266, scale = 0.1), dia_pap = list(type = 'continuous', mean = 2.36, sd = 0.93, ex2 = 6.4345, scale = 0.1), pcwp = list(type = 'continuous', mean = 2.24, sd = 0.87, ex2 = 5.7745, scale = 0.1), wbc = list(type = 'continuous', mean = 1.32, sd = 0.2, ex2 = 1.7824, scale = 0.1), platelet = list(type = 'continuous', mean = 2.31, sd = 0.693, ex2 = 5.816349, scale = 0.01), inr = list(type = 'continuous', mean = 1.3, sd = 0.6, ex2 = 2.05), ace = list(type = 'discrete', values = c('0','1'), probs = c(0.58, 0.42)) ) ) study_2 <- list(study_name = "HM2", patient_size = 133, study_outcome = c(dead_6_mths = 0.19, transplant_6_mths = 0.42, rhf_6_mths = 0.17), target_summary = list(age = list(type = 'continuous', mean = 5.01, sd = 1.31, ex2 = 26.8162, scale = 0.1), gender = list(type = 'discrete', values = c('M','F'), probs = c(0.7895, 0.2105)), bsa = list(type = 'continuous', mean = 2, sd = 0.3, ex2 = 4.09), bmi = list(type = 'continuous', mean = 2.68, sd = 0.59, ex2 = 7.5305, scale = 0.1), cvp = list(type = 'continuous', mean = 1.35, sd = 0.78, ex2 = 2.4309, scale = 0.1), sys_bp = list(type = 'continuous', mean = 9.58, sd = 1.46, ex2 = 93.908, scale = 0.1), dia_bp = list(type = 'continuous', mean = 6.17, sd = 1.13, ex2 = 39.3458, scale = 0.1), sys_pap = list(type = 'continuous', mean = 5.3, sd = 1.41, ex2 = 30.0781, scale = 0.1), dia_pap = list(type = 'continuous', mean = 2.82, sd = 0.88, ex2 = 8.7268, scale = 0.1), pcwp = list(type = 'continuous', mean = 2.61, sd = 0.79, ex2 = 7.4362, scale = 0.1), wbc = list(type = 'continuous', mean = 8.9, sd = 3.2, ex2 = 89.45), platelet = list(type = 'continuous', mean = 2.28, sd = 0.86, ex2 = 5.9380, scale = 0.01), inr = list(type = 'continuous', mean = 1.3, sd = 0.4, ex2 = 1.85), ace = list(type = 'discrete', values = c('0','1'), probs = c(0.7, 0.3)) ) ) study_3 <- list(study_name = "HeartWare HVAD", patient_size = 140, study_outcome = c(dead_6_mths = 0.043, transplant_6_mths = 0.27, rhf_6_mths = 0.193), target_summary = list(age = list(type = 'continuous', mean = 5.33, sd = 1.03, ex2 = 29.4698, scale = 0.1), gender = list(type = 'discrete', values = c('M','F'), probs = c(0.7214, 0.2786)), bsa = list(type = 'continuous', mean = 2.06, sd = 0.28, ex2 = 4.322), bmi = list(type = 'continuous', mean = 2.86, sd = 0.61, ex2 = 8.5517, scale = 0.1), cvp = list(type = 'continuous', mean = 1.08, sd = 0.33, ex2 = 1.2753, scale = 0.1), sys_bp = list(type = 'continuous', mean = 1.04, sd = 0.16, ex2 = 1.1072, scale = 0.01), dia_bp = list(type = 'continuous', mean = 6.4, sd = 1.1, ex2 = 42.17, scale = 0.1), sys_pap = list(type = 'continuous', mean = 4.9, sd = 1.5, ex2 = 26.26, scale = 0.1), dia_pap = list(type = 'continuous', mean = 2.5, sd = 0.9, ex2 = 7.06, scale = 0.1), pcwp = list(type = 'continuous', mean = 2.3, sd = 0.9, ex2 = 6.10, scale = 0.1), wbc = list(type = 'continuous', mean = 7.5, sd = 2.5, ex2 = 62.5), platelet = list(type = 'continuous', mean = 2.16, sd = 0.76, ex2 = 5.2432, scale = 0.01), inr = list(type = 'continuous', mean = 1.3, sd = 0.4, ex2 = 1.85), ace = list(type = 'discrete', values = c('0','1'), probs = c(0.2320, 0.3906)) ) ) study <- list(study_1 = study_1, study_2 = study_2, study_3 = study_3) target_stats <- study$study_1$target_summary ##----------------------------------------------------------------------------- ## GET summarized statistics for Intermacs ##----------------------------------------------------------------------------- intermacs_stats <- rwe_extract_stats(target_stats, dta_ext) ##----------------------------------------------------------------------------- ## OUTPUT DATA ##----------------------------------------------------------------------------- save(dta_ext, study, intermacs_stats, file = paste(g_path, g_prefix_ana, 'summarized_stats.RData', sep = ""))
/sql_summary_stats.R
no_license
sallytt22/intermacs_database
R
false
false
21,511
r
require(dplyr) require(tidyr) require(cowplot) require(ggplot2) require(xtable) require(sqldf) require(stringr) require(rwetools) ##----------------------------------------------------------------------------- ## LOAD INTERMACS DATA ##----------------------------------------------------------------------------- g_path = "clinical_data/" g_prefix = "IntermacsCSV/" g_prefix_ana = "AnalysisData/" g_nrow = -1 g_dta_patients <- read.csv(file = paste(g_path, g_prefix, "patients.csv", sep = ""), nrows = g_nrow) g_dta_events <- read.csv(file = paste(g_path, g_prefix, "events.csv", sep = ""), nrows = g_nrow) d_pids <- sqldf("select distinct public_id as id from g_dta_patients") ##----------------------------------------------------------------------------- ## GET dta_ext DATAFRAME ##----------------------------------------------------------------------------- # sql statement s_sql_pt <- 'select distinct p0.id, p1.IMPL_YR as implant_year, p1.AGE_DEIDENT as age, p1.GENDER as gender, SQRT(p1.HGT_CM * p1.WGT_KG) / 60 as bsa, 10000 * p1.WGT_KG / p1.HGT_CM / p1.HGT_CM as bmi, p1.CV_PRES as cvp, p1.SYS_BP as sys_bp, p1.DIA_BP as dia_bp, p1.PUL_SYS_PRES as sys_pap, p1.PUL_DIA_PRES as dia_pap, p1.PUL_WEDGE_PRES as pcwp, p1.INR as inr, p1.WBC_X10_3_UL as wbc, p1.PLATELET_X10_3_UL as platelet, p1.DEAD as dead, p1.INT_DEAD as int_dead, CASE WHEN p1.MED_PRE_IMP_ACE_INHIBITORS = 1 OR p1.MED_PRE_IMP_ACE_INHIBITORS = 2 THEN 1 WHEN p1.MED_PRE_IMP_ACE_INHIBITORS = 3 THEN 0 ELSE -1 END as ace from d_pids p0 left join g_dta_patients p1 on (p0.id = p1.public_id) order by id' dta_ext <- sqldf(s_sql_pt) curr <- NULL curr <- dta_ext curr[curr == '.'] <- NA curr[curr == ' '] <- NA curr[curr == -1] <- NA curr <- na.omit(curr) dta_ext <- curr s_sql_pt_2 <- 'select p2.*, CASE WHEN dead = 1 AND int_dead <= 13 THEN 1 ELSE 0 END as dead_6mths FROM dta_ext p2 ORDER BY id' dta_ext <- sqldf(s_sql_pt_2) dta_ext$dead <- NULL dta_ext$int_dead <- NULL dta_ext <- na.omit(dta_ext) s_sql_pt_3 <- 'select p2.*, o1.transplant as transplant_6_mths, o2.exchange as exchange_6_mths, o3.rhf as rhf_6_mths FROM dta_ext p2 LEFT JOIN (select e.public_id as id, CASE WHEN e.EVENTX = "Explant: Transplant" THEN 1 ELSE 0 END as transplant FROM g_dta_events e WHERE e.INT_EVT <= 13 GROUP BY id) o1 on (o1.id = p2.id) LEFT JOIN (select e.public_id as id, CASE WHEN e.EVENTX = "Explant: Exchange" THEN 1 ELSE 0 END as exchange FROM g_dta_events e WHERE e.INT_EVT <= 13 GROUP BY id) o2 on (o2.id = p2.id) LEFT JOIN (select e.public_id as id, CASE WHEN e.EVENTX = "Right Heart Failure v3.0" THEN 1 ELSE 0 END as rhf FROM g_dta_events e WHERE e.INT_EVT <= 13 GROUP BY id) o3 on (o3.id = p2.id)' dta_ext <- sqldf(s_sql_pt_3) dta_ext <- na.omit(dta_ext) dta_ext <- dta_ext %>% filter(gender %in% c("M", "F")) %>% mutate(gender = factor(gender), age = as.numeric(age), cvp = as.numeric(cvp), sys_bp = as.numeric(sys_bp), dia_bp = as.numeric(dia_bp), sys_pap = as.numeric(sys_pap), dia_pap = as.numeric(dia_pap), pcwp = as.numeric(pcwp), inr = as.numeric(inr), wbc = as.numeric(wbc), platelet = as.numeric(platelet)) dta_ext$exchange_6_mths <- NULL ##---------------------------------------------------------------------------- ## GET target_stats from three studies ##---------------------------------------------------------------------------- study_1 <- list(study_name = "HM3", patient_size = 50, study_outcome = c(dead_6_mths = 0.08, transplant_6_mths = 0.04, rhf_6_mths = 0.1), target_summary = list(age = list(type = 'continuous', mean = 5.89, sd = 1.35, ex2 = 36.5146, scale = 0.1), gender = list(type = 'discrete', values = c('M','F'), probs = c(0.9, 0.1)), bsa = list(type = 'continuous', mean = 2, sd = 0.2, ex2 = 4.04), bmi = list(type = 'continuous', mean = 2.68, sd = 0.43, ex2 = 7.4749, scale = 0.1), cvp = list(type = 'continuous', mean = 0.99, sd = 0.56, ex2 = 1.2937, scale = 0.1), sys_bp = list(type = 'continuous', mean = 1.047, sd = 0.114, ex2 = 1.109205, scale = 0.01), dia_bp = list(type = 'continuous', mean = 6.47, sd = 0.94, ex2 = 42.7445, scale = 0.1), sys_pap = list(type = 'continuous', mean = 5.08, sd = 1.86, ex2 = 29.266, scale = 0.1), dia_pap = list(type = 'continuous', mean = 2.36, sd = 0.93, ex2 = 6.4345, scale = 0.1), pcwp = list(type = 'continuous', mean = 2.24, sd = 0.87, ex2 = 5.7745, scale = 0.1), wbc = list(type = 'continuous', mean = 1.32, sd = 0.2, ex2 = 1.7824, scale = 0.1), platelet = list(type = 'continuous', mean = 2.31, sd = 0.693, ex2 = 5.816349, scale = 0.01), inr = list(type = 'continuous', mean = 1.3, sd = 0.6, ex2 = 2.05), ace = list(type = 'discrete', values = c('0','1'), probs = c(0.58, 0.42)) ) ) study_2 <- list(study_name = "HM2", patient_size = 133, study_outcome = c(dead_6_mths = 0.19, transplant_6_mths = 0.42, rhf_6_mths = 0.17), target_summary = list(age = list(type = 'continuous', mean = 5.01, sd = 1.31, ex2 = 26.8162, scale = 0.1), gender = list(type = 'discrete', values = c('M','F'), probs = c(0.7895, 0.2105)), bsa = list(type = 'continuous', mean = 2, sd = 0.3, ex2 = 4.09), bmi = list(type = 'continuous', mean = 2.68, sd = 0.59, ex2 = 7.5305, scale = 0.1), cvp = list(type = 'continuous', mean = 1.35, sd = 0.78, ex2 = 2.4309, scale = 0.1), sys_bp = list(type = 'continuous', mean = 9.58, sd = 1.46, ex2 = 93.908, scale = 0.1), dia_bp = list(type = 'continuous', mean = 6.17, sd = 1.13, ex2 = 39.3458, scale = 0.1), sys_pap = list(type = 'continuous', mean = 5.3, sd = 1.41, ex2 = 30.0781, scale = 0.1), dia_pap = list(type = 'continuous', mean = 2.82, sd = 0.88, ex2 = 8.7268, scale = 0.1), pcwp = list(type = 'continuous', mean = 2.61, sd = 0.79, ex2 = 7.4362, scale = 0.1), wbc = list(type = 'continuous', mean = 8.9, sd = 3.2, ex2 = 89.45), platelet = list(type = 'continuous', mean = 2.28, sd = 0.86, ex2 = 5.9380, scale = 0.01), inr = list(type = 'continuous', mean = 1.3, sd = 0.4, ex2 = 1.85), ace = list(type = 'discrete', values = c('0','1'), probs = c(0.7, 0.3)) ) ) study_3 <- list(study_name = "HeartWare HVAD", patient_size = 140, study_outcome = c(dead_6_mths = 0.043, transplant_6_mths = 0.27, rhf_6_mths = 0.193), target_summary = list(age = list(type = 'continuous', mean = 5.33, sd = 1.03, ex2 = 29.4698, scale = 0.1), gender = list(type = 'discrete', values = c('M','F'), probs = c(0.7214, 0.2786)), bsa = list(type = 'continuous', mean = 2.06, sd = 0.28, ex2 = 4.322), bmi = list(type = 'continuous', mean = 2.86, sd = 0.61, ex2 = 8.5517, scale = 0.1), cvp = list(type = 'continuous', mean = 1.08, sd = 0.33, ex2 = 1.2753, scale = 0.1), sys_bp = list(type = 'continuous', mean = 1.04, sd = 0.16, ex2 = 1.1072, scale = 0.01), dia_bp = list(type = 'continuous', mean = 6.4, sd = 1.1, ex2 = 42.17, scale = 0.1), sys_pap = list(type = 'continuous', mean = 4.9, sd = 1.5, ex2 = 26.26, scale = 0.1), dia_pap = list(type = 'continuous', mean = 2.5, sd = 0.9, ex2 = 7.06, scale = 0.1), pcwp = list(type = 'continuous', mean = 2.3, sd = 0.9, ex2 = 6.10, scale = 0.1), wbc = list(type = 'continuous', mean = 7.5, sd = 2.5, ex2 = 62.5), platelet = list(type = 'continuous', mean = 2.16, sd = 0.76, ex2 = 5.2432, scale = 0.01), inr = list(type = 'continuous', mean = 1.3, sd = 0.4, ex2 = 1.85), ace = list(type = 'discrete', values = c('0','1'), probs = c(0.2320, 0.3906)) ) ) study <- list(study_1 = study_1, study_2 = study_2, study_3 = study_3) target_stats <- study$study_1$target_summary ##----------------------------------------------------------------------------- ## GET summarized statistics for Intermacs ##----------------------------------------------------------------------------- intermacs_stats <- rwe_extract_stats(target_stats, dta_ext) ##----------------------------------------------------------------------------- ## OUTPUT DATA ##----------------------------------------------------------------------------- save(dta_ext, study, intermacs_stats, file = paste(g_path, g_prefix_ana, 'summarized_stats.RData', sep = ""))
#################################################### # Author: Eric Tulowetzke, eric.tulowetzke@jacks.sdstate.edu # Lab: Ge Lab # R version 4.0.5 # Project: iDEP v93 # File: gene_id_page_ser.R # Purpose of file:server logic for second tab i.e. Gene ID Examples # Allow users view example of database # Start data: 06-06-2022 (mm-dd-yyyy) # Data last modified: 06-06-2021, 12:46 PM CST (mm-dd-yyyy,TIME) # to help with github merge ####################################################### firstTime <- TRUE #Initialize the page at start up in sever logic MAX_WIDTH_COL <- 150 #Determine the starting with of table columns EXAMPLE_GENES_WCOL <- 800 ################################################################# # FUNCTION : getExample # DESCRIPTION : Gives user example of our IDs, # INPUT ARGS : Organism picked by user, user's database option, # and path to database # OUTPUT ARGS : data frame of genes # IN/OUT ARGS : # RETURN : returnDf # Implementation notes : shiny is only used when this is used in shiny app ################################################################# getExample <- function(userSpecie = NULL, userIDtype = NULL, path2Database = NULL, shiny = FALSE) { returnDf = NULL convert <- dbConnect(RSQLite::SQLite(), path2Database) query4IDtype <- paste('SELECT * FROM idIndex WHERE idType =', shQuote(userIDtype)) userIDtypeNum <- dbGetQuery(convert, query4IDtype) query4IDmap <- paste('SELECT * FROM mapping WHERE species =', as.numeric(userSpecie), 'AND idType =', as.numeric(userIDtypeNum$id)) userIDdf <- dbGetQuery(convert, query4IDmap) RSQLite::dbDisconnect(convert) returnDf <- userIDdf[-c(3,4)] colnames(returnDf) <- c('User_ID', 'Ensembl_ID') if (shiny) {incProgress(1)} return(returnDf) } # end of function ################################################################# # FUNCTION : getExampleDfID # DESCRIPTION : Gives user example of our IDs, # INPUT ARGS : Organism picked by user # OUTPUT ARGS : data frame of genes # IN/OUT ARGS : # RETURN : returnDf # Implementation notes : shiny is only used when this is used in shiny app ################################################################# getExampleDfID <- function(userSpecie = NULL, path2Database = NULL, shiny = FALSE) { allSample <- feather::read_feather(path2Database) returnDF <- allSample[allSample$index == userSpecie,] returnDF <- returnDF[, -c(1)] colnames(returnDF) <- c('Database', 'Example_genes') if (shiny) {incProgress(1)} return(returnDF) }# end of getExampleDfID ############################################ #Purpose: server logic for second tab i.e. Gene ID Examples ############################################ geneIDPage <- function(input, output, session, orgInfo, path) { if (firstTime == TRUE) { #load packages libs <- c('RSQLite','feather') lapply(libs, library, character.only = TRUE) #set up input and paths at start up SPECIE_LIST <- unique(c("Human", sort(orgInfo$name2))) updateSelectizeInput(session = session, inputId = "userSpecie", choices = SPECIE_LIST, server = TRUE) PATH <- paste0(path, 'convertIDs.db') PATH2 <- paste0(path, '/feather/example_of_id.feather') default <- getExampleDfID(userSpecie = SPECIE_LIST[1], path2Database = PATH2) output$tableDefault <- renderReactable({ reactable::reactable(data = default, columns = list(Database = colDef(maxWidth = MAX_WIDTH_COL), Example_genes = colDef(maxWidth = EXAMPLE_GENES_WCOL)), searchable = TRUE, bordered = TRUE, defaultPageSize = 4, highlight = TRUE, resizable = TRUE, minRows = 5, showPageSizeOptions = TRUE, pageSizeOptions = c(4, 10, 25, 50, 100)) })#end of tableDefault shinyjs::hide(id = "downloadIDPage") firstTime <- FALSE }#end of if observeEvent(input$userSpecie, { # update userIDtype when userSpecie changes ID_TYPE_LIST <- getExampleDfID(userSpecie = input$userSpecie, path2Database = PATH2) ID_TYPE_FILTER <- c("None", sort(ID_TYPE_LIST$Database)) updateSelectizeInput(session = session, inputId = "userIDtype", choices = ID_TYPE_FILTER, server = TRUE) }) # end of observeEvent observeEvent(input$submitIDPage, { #Decide on what to pass to function if (input$userIDtype == "None") {#user just gives species shinyjs::hide(id = "downloadIDPage") getExampleSer <- shiny::reactive({ withProgress(message = 'Work be done...', value = 0, { result <- getExampleDfID(userSpecie = input$userSpecie, path2Database = PATH2, shiny = TRUE) })#end of withProgress return(result) })#end of reactive col <- list(Database = colDef(maxWidth = MAX_WIDTH_COL), Example_genes = colDef(maxWidth = EXAMPLE_GENES_WCOL)) } else if (input$userIDtype != "None") { #if user doesn't give genelist ## and give id type getExampleSer <- shiny::reactive({ withProgress(message = 'Work be done...', value = 0, { userSpecieNum <- orgInfo$id[orgInfo$name2 == input$userSpecie] result <- getExample(userSpecie = userSpecieNum, path2Database = PATH, userIDtype = input$userIDtype, shiny = TRUE) })#end of withProgress return(result) })#end of reactive col <- list(User_ID = colDef(maxWidth = MAX_WIDTH_COL), Ensembl_ID = colDef(maxWidth = MAX_WIDTH_COL)) output$downloadIDPage <- downloadHandler( filename = function() { paste0(input$userIDtype, "_mapped_to_Ensembl_ID.csv") }, content = function(file) { downloadFile <- getExampleSer() colnames(downloadFile) <- c(paste(input$userIDtype), 'Ensembl_ID') write.csv(downloadFile, file) } )#end of downloadIDPage shinyjs::show(id = "downloadIDPage") }#end of if/else res <- getExampleSer() shinyjs::hide(id = "tableDefault") output$tableResult <- renderReactable({ reactable::reactable(data = res, columns = col, searchable = TRUE, defaultPageSize = 4, highlight = TRUE, resizable = TRUE, minRows = 5, showPageSizeOptions = TRUE, pageSizeOptions = c(4, 10, 25, 50, 100)) })#end of tableResult shinyjs::show(id = "tableResult") }) #end of observeEvent observeEvent(input$resetIDPage, { shinyjs::hide(id = "tableResult") shinyjs::hide(id = "downloadIDPage") }) #end of observeEvent }# end of GeneIDPage
/shinyapps/idep93/gene_id_page_ser.R
no_license
iDEP-SDSU/idep
R
false
false
7,030
r
#################################################### # Author: Eric Tulowetzke, eric.tulowetzke@jacks.sdstate.edu # Lab: Ge Lab # R version 4.0.5 # Project: iDEP v93 # File: gene_id_page_ser.R # Purpose of file:server logic for second tab i.e. Gene ID Examples # Allow users view example of database # Start data: 06-06-2022 (mm-dd-yyyy) # Data last modified: 06-06-2021, 12:46 PM CST (mm-dd-yyyy,TIME) # to help with github merge ####################################################### firstTime <- TRUE #Initialize the page at start up in sever logic MAX_WIDTH_COL <- 150 #Determine the starting with of table columns EXAMPLE_GENES_WCOL <- 800 ################################################################# # FUNCTION : getExample # DESCRIPTION : Gives user example of our IDs, # INPUT ARGS : Organism picked by user, user's database option, # and path to database # OUTPUT ARGS : data frame of genes # IN/OUT ARGS : # RETURN : returnDf # Implementation notes : shiny is only used when this is used in shiny app ################################################################# getExample <- function(userSpecie = NULL, userIDtype = NULL, path2Database = NULL, shiny = FALSE) { returnDf = NULL convert <- dbConnect(RSQLite::SQLite(), path2Database) query4IDtype <- paste('SELECT * FROM idIndex WHERE idType =', shQuote(userIDtype)) userIDtypeNum <- dbGetQuery(convert, query4IDtype) query4IDmap <- paste('SELECT * FROM mapping WHERE species =', as.numeric(userSpecie), 'AND idType =', as.numeric(userIDtypeNum$id)) userIDdf <- dbGetQuery(convert, query4IDmap) RSQLite::dbDisconnect(convert) returnDf <- userIDdf[-c(3,4)] colnames(returnDf) <- c('User_ID', 'Ensembl_ID') if (shiny) {incProgress(1)} return(returnDf) } # end of function ################################################################# # FUNCTION : getExampleDfID # DESCRIPTION : Gives user example of our IDs, # INPUT ARGS : Organism picked by user # OUTPUT ARGS : data frame of genes # IN/OUT ARGS : # RETURN : returnDf # Implementation notes : shiny is only used when this is used in shiny app ################################################################# getExampleDfID <- function(userSpecie = NULL, path2Database = NULL, shiny = FALSE) { allSample <- feather::read_feather(path2Database) returnDF <- allSample[allSample$index == userSpecie,] returnDF <- returnDF[, -c(1)] colnames(returnDF) <- c('Database', 'Example_genes') if (shiny) {incProgress(1)} return(returnDF) }# end of getExampleDfID ############################################ #Purpose: server logic for second tab i.e. Gene ID Examples ############################################ geneIDPage <- function(input, output, session, orgInfo, path) { if (firstTime == TRUE) { #load packages libs <- c('RSQLite','feather') lapply(libs, library, character.only = TRUE) #set up input and paths at start up SPECIE_LIST <- unique(c("Human", sort(orgInfo$name2))) updateSelectizeInput(session = session, inputId = "userSpecie", choices = SPECIE_LIST, server = TRUE) PATH <- paste0(path, 'convertIDs.db') PATH2 <- paste0(path, '/feather/example_of_id.feather') default <- getExampleDfID(userSpecie = SPECIE_LIST[1], path2Database = PATH2) output$tableDefault <- renderReactable({ reactable::reactable(data = default, columns = list(Database = colDef(maxWidth = MAX_WIDTH_COL), Example_genes = colDef(maxWidth = EXAMPLE_GENES_WCOL)), searchable = TRUE, bordered = TRUE, defaultPageSize = 4, highlight = TRUE, resizable = TRUE, minRows = 5, showPageSizeOptions = TRUE, pageSizeOptions = c(4, 10, 25, 50, 100)) })#end of tableDefault shinyjs::hide(id = "downloadIDPage") firstTime <- FALSE }#end of if observeEvent(input$userSpecie, { # update userIDtype when userSpecie changes ID_TYPE_LIST <- getExampleDfID(userSpecie = input$userSpecie, path2Database = PATH2) ID_TYPE_FILTER <- c("None", sort(ID_TYPE_LIST$Database)) updateSelectizeInput(session = session, inputId = "userIDtype", choices = ID_TYPE_FILTER, server = TRUE) }) # end of observeEvent observeEvent(input$submitIDPage, { #Decide on what to pass to function if (input$userIDtype == "None") {#user just gives species shinyjs::hide(id = "downloadIDPage") getExampleSer <- shiny::reactive({ withProgress(message = 'Work be done...', value = 0, { result <- getExampleDfID(userSpecie = input$userSpecie, path2Database = PATH2, shiny = TRUE) })#end of withProgress return(result) })#end of reactive col <- list(Database = colDef(maxWidth = MAX_WIDTH_COL), Example_genes = colDef(maxWidth = EXAMPLE_GENES_WCOL)) } else if (input$userIDtype != "None") { #if user doesn't give genelist ## and give id type getExampleSer <- shiny::reactive({ withProgress(message = 'Work be done...', value = 0, { userSpecieNum <- orgInfo$id[orgInfo$name2 == input$userSpecie] result <- getExample(userSpecie = userSpecieNum, path2Database = PATH, userIDtype = input$userIDtype, shiny = TRUE) })#end of withProgress return(result) })#end of reactive col <- list(User_ID = colDef(maxWidth = MAX_WIDTH_COL), Ensembl_ID = colDef(maxWidth = MAX_WIDTH_COL)) output$downloadIDPage <- downloadHandler( filename = function() { paste0(input$userIDtype, "_mapped_to_Ensembl_ID.csv") }, content = function(file) { downloadFile <- getExampleSer() colnames(downloadFile) <- c(paste(input$userIDtype), 'Ensembl_ID') write.csv(downloadFile, file) } )#end of downloadIDPage shinyjs::show(id = "downloadIDPage") }#end of if/else res <- getExampleSer() shinyjs::hide(id = "tableDefault") output$tableResult <- renderReactable({ reactable::reactable(data = res, columns = col, searchable = TRUE, defaultPageSize = 4, highlight = TRUE, resizable = TRUE, minRows = 5, showPageSizeOptions = TRUE, pageSizeOptions = c(4, 10, 25, 50, 100)) })#end of tableResult shinyjs::show(id = "tableResult") }) #end of observeEvent observeEvent(input$resetIDPage, { shinyjs::hide(id = "tableResult") shinyjs::hide(id = "downloadIDPage") }) #end of observeEvent }# end of GeneIDPage
# @file Plots.R # # Copyright 2018 Observational Health Data Sciences and Informatics # # This file is part of MethodEvaluation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. #' Plot the ROC curves for various injected signal sizes #' #' @param logRr A vector containing the log of the relative risk as estimated by a method. #' @param trueLogRr A vector containing the injected log(relative risk) for each estimate. #' @param showAucs Should the AUCs be shown in the plot? #' @param fileName Name of the file where the plot should be saved, for example 'plot.png'. See the #' function \code{ggsave} in the ggplot2 package for supported file formats. #' #' @return #' A Ggplot object. Use the \code{ggsave} function to save to file. #' #' @export plotRocsInjectedSignals <- function(logRr, trueLogRr, showAucs, fileName = NULL) { if (any(is.na(logRr))) { warning("Some estimates are NA, removing prior to computing AUCs") trueLogRr <- trueLogRr[!is.na(logRr)] logRr <- logRr[!is.na(logRr)] } trueLogRrLevels <- unique(trueLogRr) if (all(trueLogRrLevels != 0)) stop("Requiring at least one true relative risk of 1") allData <- data.frame() aucs <- c() trueRrs <- c() for (trueLogRrLevel in trueLogRrLevels) { if (trueLogRrLevel != 0) { data <- data.frame(logRr = logRr[trueLogRr == 0 | trueLogRr == trueLogRrLevel], trueLogRr = trueLogRr[trueLogRr == 0 | trueLogRr == trueLogRrLevel]) data$truth <- data$trueLogRr != 0 roc <- pROC::roc(data$truth, data$logRr, algorithm = 3) if (showAucs) { aucs <- c(aucs, pROC::auc(roc)) trueRrs <- c(trueRrs, exp(trueLogRrLevel)) } data <- data.frame(sens = roc$sensitivities, fpRate = 1 - roc$specificities, trueRr = exp(trueLogRrLevel)) data <- data[order(data$sens, data$fpRate), ] allData <- rbind(allData, data) } } allData$trueRr <- as.factor(allData$trueRr) plot <- ggplot2::ggplot(allData, ggplot2::aes(x = fpRate, y = sens, group = trueRr, color = trueRr, fill = trueRr)) + ggplot2::geom_abline(intercept = 0, slope = 1) + ggplot2::geom_line(alpha = 0.5, size = 1) + ggplot2::scale_x_continuous("1 - specificity") + ggplot2::scale_y_continuous("Sensitivity") if (showAucs) { aucs <- data.frame(auc = aucs, trueRr = trueRrs) aucs <- aucs[order(-aucs$trueRr), ] for (i in 1:nrow(aucs)) { label <- paste0("True RR = ", format(round(aucs$trueRr[i], 2), nsmall = 2), ": AUC = ", format(round(aucs$auc[i], 2), nsmall = 2)) plot <- plot + ggplot2::geom_text(label = label, x = 1, y = (i - 1) * 0.1, hjust = 1, color = "#000000") } } if (!is.null(fileName)) ggplot2::ggsave(fileName, plot, width = 5.5, height = 4.5, dpi = 400) return(plot) } #' Plot the coverage #' #' @details #' Plot the fractions of estimates where the true effect size is below, above or within the confidence #' interval, for one or more true effect sizes. #' #' @param logRr A numeric vector of effect estimates on the log scale #' @param seLogRr The standard error of the log of the effect estimates. Hint: often the standard #' error = (log(<lower bound 95 percent confidence interval>) - log(<effect #' estimate>))/qnorm(0.025) #' @param trueLogRr A vector of the true effect sizes #' @param region Size of the confidence interval. Default is .95 (95 percent). #' @param fileName Name of the file where the plot should be saved, for example 'plot.png'. See the #' function \code{ggsave} in the ggplot2 package for supported file formats. #' #' @export plotCoverageInjectedSignals <- function(logRr, seLogRr, trueLogRr, region = 0.95, fileName = NULL) { data <- data.frame(logRr = logRr, logLb95Rr = logRr + qnorm((1 - region)/2) * seLogRr, logUb95Rr = logRr + qnorm(1 - (1 - region)/2) * seLogRr, trueLogRr = trueLogRr, trueRr = round(exp(trueLogRr), 2)) if (any(is.na(data$logRr))) { warning("Some estimates are NA, removing prior to computing coverage") data <- data[!is.na(data$logRr), ] } vizD <- data.frame() for (trueRr in unique(data$trueRr)) { subset <- data[data$trueRr == trueRr, ] d <- data.frame(trueRr = trueRr, group = c("Below CI", "Within CI", "Above CI"), fraction = 0, pos = 0) d$fraction[1] <- mean(subset$trueLogRr < subset$logLb95Rr) d$fraction[2] <- mean(subset$trueLogRr >= subset$logLb95Rr & subset$trueLogRr <= subset$logUb95Rr) d$fraction[3] <- mean(subset$trueLogRr > subset$logUb95Rr) d$pos[1] <- d$fraction[1]/2 d$pos[2] <- d$fraction[1] + (d$fraction[2]/2) d$pos[3] <- d$fraction[1] + d$fraction[2] + (d$fraction[3]/2) vizD <- rbind(vizD, d) } vizD$pos <- sapply(vizD$pos, function(x) { min(max(x, 0.05), 0.95) }) vizD$label <- paste(round(100 * vizD$fraction), "%", sep = "") vizD$group <- factor(vizD$group, levels = c("Below CI", "Within CI", "Above CI")) theme <- ggplot2::element_text(colour = "#000000", size = 10) plot <- with(vizD, { ggplot2::ggplot(vizD, ggplot2::aes(x = as.factor(trueRr), y = fraction)) + ggplot2::geom_bar(ggplot2::aes(fill = group), stat = "identity", position = "stack", alpha = 0.8) + ggplot2::scale_fill_manual(values = c("#174a9f", "#f9dd75", "#f15222")) + ggplot2::geom_text(ggplot2::aes(label = label, y = pos), size = 3) + ggplot2::scale_x_discrete("True relative risk") + ggplot2::scale_y_continuous("Coverage") + ggplot2::theme(panel.grid.minor = ggplot2::element_blank(), panel.background = ggplot2::element_rect(fill = "#FAFAFA", colour = NA), panel.grid.major = ggplot2::element_blank(), axis.ticks = ggplot2::element_blank(), axis.text.y = ggplot2::element_blank(), axis.text.x = theme, legend.key = ggplot2::element_blank(), legend.position = "right") }) if (!is.null(fileName)) ggplot2::ggsave(fileName, plot, width = 5, height = 3.5, dpi = 400) return(plot) }
/R/Plots.R
permissive
jamieweaver/MethodEvaluation
R
false
false
7,298
r
# @file Plots.R # # Copyright 2018 Observational Health Data Sciences and Informatics # # This file is part of MethodEvaluation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. #' Plot the ROC curves for various injected signal sizes #' #' @param logRr A vector containing the log of the relative risk as estimated by a method. #' @param trueLogRr A vector containing the injected log(relative risk) for each estimate. #' @param showAucs Should the AUCs be shown in the plot? #' @param fileName Name of the file where the plot should be saved, for example 'plot.png'. See the #' function \code{ggsave} in the ggplot2 package for supported file formats. #' #' @return #' A Ggplot object. Use the \code{ggsave} function to save to file. #' #' @export plotRocsInjectedSignals <- function(logRr, trueLogRr, showAucs, fileName = NULL) { if (any(is.na(logRr))) { warning("Some estimates are NA, removing prior to computing AUCs") trueLogRr <- trueLogRr[!is.na(logRr)] logRr <- logRr[!is.na(logRr)] } trueLogRrLevels <- unique(trueLogRr) if (all(trueLogRrLevels != 0)) stop("Requiring at least one true relative risk of 1") allData <- data.frame() aucs <- c() trueRrs <- c() for (trueLogRrLevel in trueLogRrLevels) { if (trueLogRrLevel != 0) { data <- data.frame(logRr = logRr[trueLogRr == 0 | trueLogRr == trueLogRrLevel], trueLogRr = trueLogRr[trueLogRr == 0 | trueLogRr == trueLogRrLevel]) data$truth <- data$trueLogRr != 0 roc <- pROC::roc(data$truth, data$logRr, algorithm = 3) if (showAucs) { aucs <- c(aucs, pROC::auc(roc)) trueRrs <- c(trueRrs, exp(trueLogRrLevel)) } data <- data.frame(sens = roc$sensitivities, fpRate = 1 - roc$specificities, trueRr = exp(trueLogRrLevel)) data <- data[order(data$sens, data$fpRate), ] allData <- rbind(allData, data) } } allData$trueRr <- as.factor(allData$trueRr) plot <- ggplot2::ggplot(allData, ggplot2::aes(x = fpRate, y = sens, group = trueRr, color = trueRr, fill = trueRr)) + ggplot2::geom_abline(intercept = 0, slope = 1) + ggplot2::geom_line(alpha = 0.5, size = 1) + ggplot2::scale_x_continuous("1 - specificity") + ggplot2::scale_y_continuous("Sensitivity") if (showAucs) { aucs <- data.frame(auc = aucs, trueRr = trueRrs) aucs <- aucs[order(-aucs$trueRr), ] for (i in 1:nrow(aucs)) { label <- paste0("True RR = ", format(round(aucs$trueRr[i], 2), nsmall = 2), ": AUC = ", format(round(aucs$auc[i], 2), nsmall = 2)) plot <- plot + ggplot2::geom_text(label = label, x = 1, y = (i - 1) * 0.1, hjust = 1, color = "#000000") } } if (!is.null(fileName)) ggplot2::ggsave(fileName, plot, width = 5.5, height = 4.5, dpi = 400) return(plot) } #' Plot the coverage #' #' @details #' Plot the fractions of estimates where the true effect size is below, above or within the confidence #' interval, for one or more true effect sizes. #' #' @param logRr A numeric vector of effect estimates on the log scale #' @param seLogRr The standard error of the log of the effect estimates. Hint: often the standard #' error = (log(<lower bound 95 percent confidence interval>) - log(<effect #' estimate>))/qnorm(0.025) #' @param trueLogRr A vector of the true effect sizes #' @param region Size of the confidence interval. Default is .95 (95 percent). #' @param fileName Name of the file where the plot should be saved, for example 'plot.png'. See the #' function \code{ggsave} in the ggplot2 package for supported file formats. #' #' @export plotCoverageInjectedSignals <- function(logRr, seLogRr, trueLogRr, region = 0.95, fileName = NULL) { data <- data.frame(logRr = logRr, logLb95Rr = logRr + qnorm((1 - region)/2) * seLogRr, logUb95Rr = logRr + qnorm(1 - (1 - region)/2) * seLogRr, trueLogRr = trueLogRr, trueRr = round(exp(trueLogRr), 2)) if (any(is.na(data$logRr))) { warning("Some estimates are NA, removing prior to computing coverage") data <- data[!is.na(data$logRr), ] } vizD <- data.frame() for (trueRr in unique(data$trueRr)) { subset <- data[data$trueRr == trueRr, ] d <- data.frame(trueRr = trueRr, group = c("Below CI", "Within CI", "Above CI"), fraction = 0, pos = 0) d$fraction[1] <- mean(subset$trueLogRr < subset$logLb95Rr) d$fraction[2] <- mean(subset$trueLogRr >= subset$logLb95Rr & subset$trueLogRr <= subset$logUb95Rr) d$fraction[3] <- mean(subset$trueLogRr > subset$logUb95Rr) d$pos[1] <- d$fraction[1]/2 d$pos[2] <- d$fraction[1] + (d$fraction[2]/2) d$pos[3] <- d$fraction[1] + d$fraction[2] + (d$fraction[3]/2) vizD <- rbind(vizD, d) } vizD$pos <- sapply(vizD$pos, function(x) { min(max(x, 0.05), 0.95) }) vizD$label <- paste(round(100 * vizD$fraction), "%", sep = "") vizD$group <- factor(vizD$group, levels = c("Below CI", "Within CI", "Above CI")) theme <- ggplot2::element_text(colour = "#000000", size = 10) plot <- with(vizD, { ggplot2::ggplot(vizD, ggplot2::aes(x = as.factor(trueRr), y = fraction)) + ggplot2::geom_bar(ggplot2::aes(fill = group), stat = "identity", position = "stack", alpha = 0.8) + ggplot2::scale_fill_manual(values = c("#174a9f", "#f9dd75", "#f15222")) + ggplot2::geom_text(ggplot2::aes(label = label, y = pos), size = 3) + ggplot2::scale_x_discrete("True relative risk") + ggplot2::scale_y_continuous("Coverage") + ggplot2::theme(panel.grid.minor = ggplot2::element_blank(), panel.background = ggplot2::element_rect(fill = "#FAFAFA", colour = NA), panel.grid.major = ggplot2::element_blank(), axis.ticks = ggplot2::element_blank(), axis.text.y = ggplot2::element_blank(), axis.text.x = theme, legend.key = ggplot2::element_blank(), legend.position = "right") }) if (!is.null(fileName)) ggplot2::ggsave(fileName, plot, width = 5, height = 3.5, dpi = 400) return(plot) }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/util.R \name{SigmaL2} \alias{SigmaL2} \title{SigmaL2} \usage{ SigmaL2(zlab, listZonePoint, tabVal, surfVoronoi) } \arguments{ \item{zlab}{list with zone numbers for each zone label} \item{listZonePoint}{list of indices of data points within zones, result of call to \code{\link{calNei}}} \item{tabVal}{SpatialPointsDataFrame containing data values} \item{surfVoronoi}{Surfaces of the Voronoi polygons corresponding to data pts} } \value{ a list with components \describe{ \item{cL}{weighted (with Voronoi surfaces) average of per label variances} \item{SigmaL2}{vector of per label variances} \item{SL}{vector of per label Voronoi surfaces} \item{mL}{vector of weighted (with Voronoi surfaces) per label average values} \item{voroLab}{vector of per label data} } } \description{ SigmaL2 } \details{ compute overall mean and variance of all zones for each label plus sum of them for all labels } \examples{ \donttest{ data(mapTest) # run zoning with 2 quantiles corresponding to probability values 0.4 and 0.7 # save initial zoning and last level zonings criti=correctionTree(c(0.4,0.7),mapTest,SAVE=TRUE) K=criti$zk[[2]][[1]] uni=unique(K$lab) zlab=sapply(uni,function(x){(1:length(K$lab))[K$lab==x]}) sig=SigmaL2(zlab,K$listZonePoint,mapTest$krigData,mapTest$krigSurfVoronoi) } }
/man/SigmaL2.Rd
no_license
hazaeljones/geozoning
R
false
true
1,362
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/util.R \name{SigmaL2} \alias{SigmaL2} \title{SigmaL2} \usage{ SigmaL2(zlab, listZonePoint, tabVal, surfVoronoi) } \arguments{ \item{zlab}{list with zone numbers for each zone label} \item{listZonePoint}{list of indices of data points within zones, result of call to \code{\link{calNei}}} \item{tabVal}{SpatialPointsDataFrame containing data values} \item{surfVoronoi}{Surfaces of the Voronoi polygons corresponding to data pts} } \value{ a list with components \describe{ \item{cL}{weighted (with Voronoi surfaces) average of per label variances} \item{SigmaL2}{vector of per label variances} \item{SL}{vector of per label Voronoi surfaces} \item{mL}{vector of weighted (with Voronoi surfaces) per label average values} \item{voroLab}{vector of per label data} } } \description{ SigmaL2 } \details{ compute overall mean and variance of all zones for each label plus sum of them for all labels } \examples{ \donttest{ data(mapTest) # run zoning with 2 quantiles corresponding to probability values 0.4 and 0.7 # save initial zoning and last level zonings criti=correctionTree(c(0.4,0.7),mapTest,SAVE=TRUE) K=criti$zk[[2]][[1]] uni=unique(K$lab) zlab=sapply(uni,function(x){(1:length(K$lab))[K$lab==x]}) sig=SigmaL2(zlab,K$listZonePoint,mapTest$krigData,mapTest$krigSurfVoronoi) } }
appendUnitTET <- function(Z, matches) { # Split sample treatedSample <- Z[which(Z[,2] == 1),] controlSample <- Z[which(Z[,2] == 0),] # Get N1 and N0 n1 <- length(treatedSample[,1]) n0 <- length(controlSample[,1]) # First, I want to generate counterfactual outcomes # for each treated unit. # To do this, I average the outcomes of all the matched # control units. To do THIS, I create an intermediate # matrix for convenience. inter <- matrix(controlSample[,3], nrow=length(controlSample[,3]), ncol=n1) # This matrix has N1 identical columns. Each column contains # the outcome (Y) for the i'th control unit in the i'th row. # Thus, if I can multiply element-wise this matrix with # the matching matrix, I will end up with a matrix where all # I need are the averages of ALL NONZERO elements in each # column. This average in the j'th column will be the counterfactual for # the j'th treated unit. # The number of matches to each treatment unit are nMatches <- colSums(matches) # The summed counterfactual outcomes to each treatment unit are summedCF <- colSums(inter * matches) # Thus the counterfactuals for the n1 treated units are counterFactuals <- summedCF / nMatches # This should be of length n1 # Thus the treatment effect estimator for each treated unit is unitTET <- treatedSample[,3] - counterFactuals unitTETs <- c(unitTET, rep(NA, times=n0)) Z <- cbind(Z, unitTETs) return(Z) }
/R Code/Rewrite/appendUnitTET.R
no_license
njulius/wild-bootstrap-matching
R
false
false
1,505
r
appendUnitTET <- function(Z, matches) { # Split sample treatedSample <- Z[which(Z[,2] == 1),] controlSample <- Z[which(Z[,2] == 0),] # Get N1 and N0 n1 <- length(treatedSample[,1]) n0 <- length(controlSample[,1]) # First, I want to generate counterfactual outcomes # for each treated unit. # To do this, I average the outcomes of all the matched # control units. To do THIS, I create an intermediate # matrix for convenience. inter <- matrix(controlSample[,3], nrow=length(controlSample[,3]), ncol=n1) # This matrix has N1 identical columns. Each column contains # the outcome (Y) for the i'th control unit in the i'th row. # Thus, if I can multiply element-wise this matrix with # the matching matrix, I will end up with a matrix where all # I need are the averages of ALL NONZERO elements in each # column. This average in the j'th column will be the counterfactual for # the j'th treated unit. # The number of matches to each treatment unit are nMatches <- colSums(matches) # The summed counterfactual outcomes to each treatment unit are summedCF <- colSums(inter * matches) # Thus the counterfactuals for the n1 treated units are counterFactuals <- summedCF / nMatches # This should be of length n1 # Thus the treatment effect estimator for each treated unit is unitTET <- treatedSample[,3] - counterFactuals unitTETs <- c(unitTET, rep(NA, times=n0)) Z <- cbind(Z, unitTETs) return(Z) }
###################################################################################################### ###################### Potrzebna biblioteka ###################### ###################################################################################################### install.packages("flifo") library(flifo) ###################################################################################################### ###################### Tabu Search ###################### ###################################################################################################### TabuSearch <- function(f, x, d, nb_count, tabu_list_len, max_iter){ # f - function # x - punkt początkowy # d - parametr który określa zakres sąsiedztwa x # nb_count - ilość sąsiadów, których sprawdzamy # tabu_list_len - długość listy tabu # max_iter - ilość iteracji start_time <- Sys.time() tabu_list <- flifo::fifo(max_length = tabu_list_len) # tworzymy listę tabu candidate_list <- rep(list(NA),nb_count) # lista kandydatów candidate_list_outside_tabu <- rep(list(NA),nb_count) # lista kandydatów spoza listy tabu in_neigbourhood <- FALSE # określa czy kandydat jest w sąsiedztwie ### Tworzymy listę z wynikami out <- list(x_hist = matrix(NA, nrow = max_iter, ncol = 2), f_hist = rep(NA, max_iter), x_opt = x, f_opt = f(x), t_eval = NA) out$x_hist[1, ] <- x out$f_hist[1] <- f(x) for(u in 2 : max_iter){ ### zgodnie z zaleceniami z książki Metaheuristics for Hard Optimization tworzymy skończone sąsiedztwo for(j in 1:nb_count){ candidate_list[[j]] <- x + runif(2, min = -d, max = d) # losowo dobieramy sąsiadów } place <- 1 ### sprawdzamy czy znalezieni kandydaci znajdują się na liście tabu i w sąsiedztwach danych wartości for(i in candidate_list){ if(!flifo::is.empty(tabu_list)){ # jeśli lista tabu nie jest pusta for(z in tabu_list){ # czy kandydat jest w sąsiedztwie? if((i[1] <= z[1]+d) && (i[1]>= z[1]-d) && (i[2] <= z[2]+d) && (i[2]>= z[2]-d)){ in_neigbourhood <- TRUE } } } if(in_neigbourhood == FALSE){ candidate_list_outside_tabu[[place]] <- i # jeśli tak to dodajemy go listy gkandydatów spoza tabu } place <- place + 1 in_neigbourhood <- FALSE } ### dodajemy nowy punkt na listę tabu x_tabu <- x # do wsadzenia do listy tabu if(length(tabu_list)==tabu_list_len){ # sprawdzamy czy stos jest pełen pop(tabu_list) # usuwamy pierwszy element z kolejki push(tabu_list,x_tabu) # wrzucamy nowy }else{ # jeśli nie jest pełen to tylko wrzucamy push(tabu_list,x_tabu) } ### Wyznaczamy nowy punkt ######################### calculated <- lapply(candidate_list_outside_tabu, f) # wyliczamy wartości funkcji w każdym punkcie x <- unlist((candidate_list[which.min(calculated)]),use.names=FALSE) # znajdujemy punkt z najniższą wartością ### Jeśli algorytm nie znalazł żadnych kandydatów spoza listy tabu, to kończymy działanie ######################################################################################### if(is.null(x)){ print(paste("Nie znaleziono żadnego kandydata spoza listy tabu w iteracji ", u)) return(out) } ### resetujemy liste z kandydatami spoza listy tabu ################################################### candidate_list_outside_tabu <- rep(list(NA),nb_count) ### zapisujemy wyniki ##################### out$x_hist[u,] <- x out$f_hist[u] <- f(x) # sprawdzamy ekstremum ###################### if(f(x) < out$f_opt){ out$x_opt <- x out$f_opt <- f(x) } } out$t_eval <- Sys.time() - start_time return(out) } ###################################################################################################### ###################### Wywołanie ###################### ###################################################################################################### # Rosenbrock function ObjFun <- function(x){ out <- (1-x[1]^2)^2 + 100 * (x[2] - x[1]^2)^2 return(out) } # parametry x0 <- c(3,4) max_iter <- 1000 d <- 0.4 f <- ObjFun nb_count <- 500 tabu_list_len <- 50 ts <- TabuSearch(ObjFun, x0, d,nb_count, tabu_list_len, max_iter) ################################################################################################### ################################################################################################### ###################################################################################################
/Tabu Search.R
no_license
RadekKrol/TabuSearch_r
R
false
false
5,108
r
###################################################################################################### ###################### Potrzebna biblioteka ###################### ###################################################################################################### install.packages("flifo") library(flifo) ###################################################################################################### ###################### Tabu Search ###################### ###################################################################################################### TabuSearch <- function(f, x, d, nb_count, tabu_list_len, max_iter){ # f - function # x - punkt początkowy # d - parametr który określa zakres sąsiedztwa x # nb_count - ilość sąsiadów, których sprawdzamy # tabu_list_len - długość listy tabu # max_iter - ilość iteracji start_time <- Sys.time() tabu_list <- flifo::fifo(max_length = tabu_list_len) # tworzymy listę tabu candidate_list <- rep(list(NA),nb_count) # lista kandydatów candidate_list_outside_tabu <- rep(list(NA),nb_count) # lista kandydatów spoza listy tabu in_neigbourhood <- FALSE # określa czy kandydat jest w sąsiedztwie ### Tworzymy listę z wynikami out <- list(x_hist = matrix(NA, nrow = max_iter, ncol = 2), f_hist = rep(NA, max_iter), x_opt = x, f_opt = f(x), t_eval = NA) out$x_hist[1, ] <- x out$f_hist[1] <- f(x) for(u in 2 : max_iter){ ### zgodnie z zaleceniami z książki Metaheuristics for Hard Optimization tworzymy skończone sąsiedztwo for(j in 1:nb_count){ candidate_list[[j]] <- x + runif(2, min = -d, max = d) # losowo dobieramy sąsiadów } place <- 1 ### sprawdzamy czy znalezieni kandydaci znajdują się na liście tabu i w sąsiedztwach danych wartości for(i in candidate_list){ if(!flifo::is.empty(tabu_list)){ # jeśli lista tabu nie jest pusta for(z in tabu_list){ # czy kandydat jest w sąsiedztwie? if((i[1] <= z[1]+d) && (i[1]>= z[1]-d) && (i[2] <= z[2]+d) && (i[2]>= z[2]-d)){ in_neigbourhood <- TRUE } } } if(in_neigbourhood == FALSE){ candidate_list_outside_tabu[[place]] <- i # jeśli tak to dodajemy go listy gkandydatów spoza tabu } place <- place + 1 in_neigbourhood <- FALSE } ### dodajemy nowy punkt na listę tabu x_tabu <- x # do wsadzenia do listy tabu if(length(tabu_list)==tabu_list_len){ # sprawdzamy czy stos jest pełen pop(tabu_list) # usuwamy pierwszy element z kolejki push(tabu_list,x_tabu) # wrzucamy nowy }else{ # jeśli nie jest pełen to tylko wrzucamy push(tabu_list,x_tabu) } ### Wyznaczamy nowy punkt ######################### calculated <- lapply(candidate_list_outside_tabu, f) # wyliczamy wartości funkcji w każdym punkcie x <- unlist((candidate_list[which.min(calculated)]),use.names=FALSE) # znajdujemy punkt z najniższą wartością ### Jeśli algorytm nie znalazł żadnych kandydatów spoza listy tabu, to kończymy działanie ######################################################################################### if(is.null(x)){ print(paste("Nie znaleziono żadnego kandydata spoza listy tabu w iteracji ", u)) return(out) } ### resetujemy liste z kandydatami spoza listy tabu ################################################### candidate_list_outside_tabu <- rep(list(NA),nb_count) ### zapisujemy wyniki ##################### out$x_hist[u,] <- x out$f_hist[u] <- f(x) # sprawdzamy ekstremum ###################### if(f(x) < out$f_opt){ out$x_opt <- x out$f_opt <- f(x) } } out$t_eval <- Sys.time() - start_time return(out) } ###################################################################################################### ###################### Wywołanie ###################### ###################################################################################################### # Rosenbrock function ObjFun <- function(x){ out <- (1-x[1]^2)^2 + 100 * (x[2] - x[1]^2)^2 return(out) } # parametry x0 <- c(3,4) max_iter <- 1000 d <- 0.4 f <- ObjFun nb_count <- 500 tabu_list_len <- 50 ts <- TabuSearch(ObjFun, x0, d,nb_count, tabu_list_len, max_iter) ################################################################################################### ################################################################################################### ###################################################################################################
## Pooled robust design ### V01-pooled fixed sites ### V02-random sites each year: done terrible performance library(RMark) nbends<- 81 bend_km<- runif(nbends,1,12) phi<- 0.8 p<- 0.3 nprim<- 10 # years nsec<- 4 dens<- 5 # 10 fish per km N<-rpois(nbends,dens*bend_km) N ## NO MOVEMENT loc<- rep(1:nbends,N) Z<- matrix(0,nrow=sum(N),ncol=nprim) Z[,1]<-1 for(i in 2:nprim) { Z[,i]<-rbinom(nrow(Z),1,Z[,i-1]*phi) } catch<- array(0,c(nrow(Z),nsec,nprim)) sample_bends<-matrix(0,10,nprim) for(i in 1:nprim) { sample_bends[,i]<-sample(c(1:nbends),10,replace=FALSE) indx<- which(loc %in% sample_bends[,i]) for(j in 1:nsec) { catch[indx,j,i]<-rbinom(length(indx),1,p*Z[,i]) } } ## process capture histories ch<- catch[,,1] for(i in 2:nprim) { ch<- cbind(ch,catch[,,i]) } ch<- ch[indx,] ch<- ch[which(rowSums(ch)>0),] ch<-data.frame(ch=apply(ch,1,paste0,collapse=""), freq=1,stringsAsFactors=FALSE) ## proccess vector of occasion ## for rmark occ<- rep(0,nsec*nprim) occ[cumsum(rep(4,nprim))]<-1 occ<- occ[-length(occ)] # funky formatting for rmark rd<-process.data(data=ch, model="Robust", time.intervals=occ) S=list(formula=~1)# SURVIVAL # SHARE = TRUE TO SET C = P p=list(formula=~1,share=TRUE)# CAPTURE PROBABILITY f0<- list(formula=~time) # NUMBER NOT ENCOUNTERED GammaDoublePrime=list(formula=~1,share=TRUE) GammaPrime=list(formula=~1) fit<-mark(data = rd, model = "Robust", time.intervals=time.intervals, model.parameters=list( S=S, GammaDoublePrime=GammaDoublePrime, # GammaPrime=GammaPrime, # not needed when share=TRUE p=p), threads=2, brief=TRUE) summary(fit) fit$results$derived$`N Population Size` NN<-colSums(Z) lens<-colSums(matrix(bend_km[sample_bends],10,nprim)) dens<- fit$results$derived$`N Population Size`$estimate/lens plot(dens*sum(bend_km),colSums(Z));abline(0,1)
/_analysis/pooled-rd-v02.R
no_license
mcolvin/PSPAP-Reboot
R
false
false
1,972
r
## Pooled robust design ### V01-pooled fixed sites ### V02-random sites each year: done terrible performance library(RMark) nbends<- 81 bend_km<- runif(nbends,1,12) phi<- 0.8 p<- 0.3 nprim<- 10 # years nsec<- 4 dens<- 5 # 10 fish per km N<-rpois(nbends,dens*bend_km) N ## NO MOVEMENT loc<- rep(1:nbends,N) Z<- matrix(0,nrow=sum(N),ncol=nprim) Z[,1]<-1 for(i in 2:nprim) { Z[,i]<-rbinom(nrow(Z),1,Z[,i-1]*phi) } catch<- array(0,c(nrow(Z),nsec,nprim)) sample_bends<-matrix(0,10,nprim) for(i in 1:nprim) { sample_bends[,i]<-sample(c(1:nbends),10,replace=FALSE) indx<- which(loc %in% sample_bends[,i]) for(j in 1:nsec) { catch[indx,j,i]<-rbinom(length(indx),1,p*Z[,i]) } } ## process capture histories ch<- catch[,,1] for(i in 2:nprim) { ch<- cbind(ch,catch[,,i]) } ch<- ch[indx,] ch<- ch[which(rowSums(ch)>0),] ch<-data.frame(ch=apply(ch,1,paste0,collapse=""), freq=1,stringsAsFactors=FALSE) ## proccess vector of occasion ## for rmark occ<- rep(0,nsec*nprim) occ[cumsum(rep(4,nprim))]<-1 occ<- occ[-length(occ)] # funky formatting for rmark rd<-process.data(data=ch, model="Robust", time.intervals=occ) S=list(formula=~1)# SURVIVAL # SHARE = TRUE TO SET C = P p=list(formula=~1,share=TRUE)# CAPTURE PROBABILITY f0<- list(formula=~time) # NUMBER NOT ENCOUNTERED GammaDoublePrime=list(formula=~1,share=TRUE) GammaPrime=list(formula=~1) fit<-mark(data = rd, model = "Robust", time.intervals=time.intervals, model.parameters=list( S=S, GammaDoublePrime=GammaDoublePrime, # GammaPrime=GammaPrime, # not needed when share=TRUE p=p), threads=2, brief=TRUE) summary(fit) fit$results$derived$`N Population Size` NN<-colSums(Z) lens<-colSums(matrix(bend_km[sample_bends],10,nprim)) dens<- fit$results$derived$`N Population Size`$estimate/lens plot(dens*sum(bend_km),colSums(Z));abline(0,1)
library("PhysicalActivity"); source("functions.R") source("PhysicalActivity/R/nthOccurance.R") source("/home/dewoller/mydoc/research/noraShields/students/carlon/stats/func.R") a=readCountsDataRT3("test.csv") markingCarlon=markWearing(a) markingStandard=wearingMarking(a, perMinuteCts=1 ) b=wearingMarking(a, perMinuteCts=1) dataset=a
/load.R
no_license
dewoller/foot_health_2015
R
false
false
373
r
library("PhysicalActivity"); source("functions.R") source("PhysicalActivity/R/nthOccurance.R") source("/home/dewoller/mydoc/research/noraShields/students/carlon/stats/func.R") a=readCountsDataRT3("test.csv") markingCarlon=markWearing(a) markingStandard=wearingMarking(a, perMinuteCts=1 ) b=wearingMarking(a, perMinuteCts=1) dataset=a
#' @title #' Mode -- most frequent value of a variable #' #' @description #' Caclulate and return the most frequently occuring value of a vector. #' #' @param x A vector of values #' @param na.rm a logical value indicating whether `NA` values should be #' stripped before the computation proceeds. #' #' @return #' Returns the most frequent value. If there are more than one, all of them are #' returned in a vector. #' #' @references #' https://www.tutorialspoint.com/r/r_mean_median_mode.htm #' https://rdrr.io/cran/DescTools/man/Mode.html #' #' @export #' #' @examples #' # Create the vector with numbers. #' x <- c(2, 1, 2, 3, 1, 2, 3, 4, 1, 5, 5, NA, 2, 3) #' #' # Calculate the mode using the user function. #' calc_mode(x, na.rm = TRUE) #' calc_mode(x, na.rm = FALSE) #' #' library(DescTools) #' data(d.pizza) #' #' calc_mode(d.pizza$driver) #' sapply(d.pizza[,c("driver","temperature","date")], calc_mode, na.rm = TRUE) #' #' # Two values are returned if more than one mode #' y <- c(2, 2, 2, 3, 3, 3) #' calc_mode(y) calc_mode <- function(x, na.rm = FALSE) { if (!is.atomic(x) | is.matrix(x)) { stop("Mode supports only atomic vectors. Use sapply(*, Mode) instead.") } if(na.rm == TRUE) { x <- x[!is.na(x)] } # uniqv <- unique(x) # res <- uniqv[which.max(tabulate(match(x, uniqv)))] tab <- table(x) res <- names(which(tab == max(tab))) if (!inherits(x, "factor")) { class(res) <- class(x) } as.vector(res) }
/R/calc_mode.R
permissive
emilelatour/lamisc
R
false
false
1,526
r
#' @title #' Mode -- most frequent value of a variable #' #' @description #' Caclulate and return the most frequently occuring value of a vector. #' #' @param x A vector of values #' @param na.rm a logical value indicating whether `NA` values should be #' stripped before the computation proceeds. #' #' @return #' Returns the most frequent value. If there are more than one, all of them are #' returned in a vector. #' #' @references #' https://www.tutorialspoint.com/r/r_mean_median_mode.htm #' https://rdrr.io/cran/DescTools/man/Mode.html #' #' @export #' #' @examples #' # Create the vector with numbers. #' x <- c(2, 1, 2, 3, 1, 2, 3, 4, 1, 5, 5, NA, 2, 3) #' #' # Calculate the mode using the user function. #' calc_mode(x, na.rm = TRUE) #' calc_mode(x, na.rm = FALSE) #' #' library(DescTools) #' data(d.pizza) #' #' calc_mode(d.pizza$driver) #' sapply(d.pizza[,c("driver","temperature","date")], calc_mode, na.rm = TRUE) #' #' # Two values are returned if more than one mode #' y <- c(2, 2, 2, 3, 3, 3) #' calc_mode(y) calc_mode <- function(x, na.rm = FALSE) { if (!is.atomic(x) | is.matrix(x)) { stop("Mode supports only atomic vectors. Use sapply(*, Mode) instead.") } if(na.rm == TRUE) { x <- x[!is.na(x)] } # uniqv <- unique(x) # res <- uniqv[which.max(tabulate(match(x, uniqv)))] tab <- table(x) res <- names(which(tab == max(tab))) if (!inherits(x, "factor")) { class(res) <- class(x) } as.vector(res) }
# This function predicts partition coefficients for all tissues, then lumps them into a single compartment. The effective volume of distribution is calculated by summing each tissues volume times it's partition coefficient relative to plasma. Plasma, and the paritioning into RBCs are also added to get the total volume of distribution in L/KG BW. calc_vdist<- function(chem.cas=NULL, chem.name=NULL, parameters=NULL, default.to.human=F, species="Human",suppress.messages=F, adjusted.Funbound.plasma=T,regression=T) { physiology.data <- physiology.data Parameter <- NULL if(is.null(parameters)){ schmitt.parameters <- parameterize_schmitt(chem.cas=chem.cas,chem.name=chem.name,default.to.human=default.to.human,species=species) parameters <- suppressWarnings(predict_partitioning_schmitt(parameters=schmitt.parameters,species=species,regression=regression,adjusted.Funbound.plasma=adjusted.Funbound.plasma)) if(adjusted.Funbound.plasma) parameters <- c(parameters,schmitt.parameters['Funbound.plasma']) else parameters <- c(parameters,Funbound.plasma=schmitt.parameters[['unadjusted.Funbound.plasma']]) } schmitt.names <- c("Kadipose2pu","Kbone2pu","Kbrain2pu","Kgut2pu","Kheart2pu","Kkidney2pu","Kliver2pu","Klung2pu","Kmuscle2pu","Kskin2pu","Kspleen2pu","Krbc2pu", "Krest2pu") schmitt.specific.names <- c("Kadipose2pu","Kbone2pu","Kbrain2pu","Kheart2pu","Kmuscle2pu","Kskin2pu","Kspleen2pu") if(any(names(parameters) %in% schmitt.specific.names) & !all(c(schmitt.names) %in% names(parameters))) stop("All predict_partitioning_schmitt coefficients must be included if not using pbtk or 3compartment parameters.") else if(all(schmitt.names %in% names(parameters))) schmitt.params <- T else schmitt.params <- F if(schmitt.params & !('funbound.plasma' %in% tolower(names(parameters)))){ if(is.null(chem.cas) & is.null(chem.name))stop("Specify chem.name or chem.cas with correct species if not including Funbound.plasma with predict_partitioning_schmitt coefficients.") else if(is.null(chem.cas)){ out <- get_chem_id(chem.cas=chem.cas,chem.name=chem.name) chem.cas <- out$chem.cas } fub <- try(get_invitroPK_param("Funbound.plasma",species,chem.CAS=chem.cas),silent=T) if (class(fub) == "try-error" & default.to.human) { fub <- try(get_invitroPK_param("Funbound.plasma","Human",chem.CAS=chem.cas),silent=T) warning(paste(species,"coerced to Human for protein binding data.")) } if (class(fub) == "try-error") stop("Missing protein binding data for given species. Set default.to.human to true to substitute human value.") if (fub == 0) { fub <- 0.005 warning("Fraction unbound = 0, changed to 0.005.") } if(adjusted.Funbound.plasma){ Flipid <- subset(physiology.data,Parameter=='Plasma Effective Neutral Lipid Volume Fraction')[,which(tolower(colnames(physiology.data)) == tolower(species))] pKa_Donor <- suppressWarnings(get_physchem_param("pKa_Donor",chem.CAS=chem.cas)) pKa_Accept <- suppressWarnings(get_physchem_param("pKa_Accept",chem.CAS=chem.cas)) Pow <- 10^get_physchem_param("logP",chem.CAS=chem.cas) ion <- calc_ionization(pH=7.4,pKa_Donor=pKa_Donor,pKa_Accept=pKa_Accept) dow <- Pow * (ion$fraction_neutral + 0.001 * ion$fraction_charged + ion$fraction_zwitter) fub <- 1 / ((dow) * Flipid + 1 / fub) } parameters <- c(parameters,Funbound.plasma=fub) } # Check the species argument for capitilization problems and whether or not it is in the table: if (!(species %in% colnames(physiology.data))) { if (toupper(species) %in% toupper(colnames(physiology.data))) { phys.species <- colnames(physiology.data)[toupper(colnames(physiology.data))==toupper(species)] } else stop(paste("Physiological PK data for",species,"not found.")) } else phys.species <- species # Load the physiological parameters for this species this.phys.data <- physiology.data[,phys.species] names(this.phys.data) <- physiology.data[,1] hematocrit <- this.phys.data["Hematocrit"] plasma.vol <- this.phys.data["Plasma Volume"]/1000 # L/kg BW if(schmitt.params){ PCs <- subset(parameters,names(parameters) %in% schmitt.names) # Get_lumped_tissues returns a list with the lumped PCs, vols, and flows: lumped_params <- lump_tissues(PCs,tissuelist=NULL,species=species) RBC.vol <- plasma.vol/(1 - hematocrit)*hematocrit vol.dist <- plasma.vol + RBC.vol*lumped_params$Krbc2pu*parameters$Funbound.plasma+lumped_params$Krest2pu*lumped_params$Vrestc*parameters$Funbound.plasma }else{ pbtk.name.list <- c("BW","Clmetabolismc","Funbound.plasma","Fgutabs","Fhep.assay.correction","hematocrit","Kgut2pu","kgutabs","Kkidney2pu","Kliver2pu","Klung2pu","Krbc2pu","Krest2pu","million.cells.per.gliver","MW","Qcardiacc" ,"Qgfrc","Qgutf","Qkidneyf","Qliverf","Rblood2plasma","Vartc","Vgutc","Vkidneyc","Vliverc","Vlungc","Vrestc","Vvenc") name.list.3comp <- c("BW","Clmetabolismc","Funbound.plasma","Fgutabs","Fhep.assay.correction","hematocrit","Kgut2pu","Krbc2pu","kgutabs","Kliver2pu","Krest2pu","million.cells.per.gliver","MW","Qcardiacc","Qgfrc","Qgutf","Qliverf","Rblood2plasma","Vgutc","Vliverc","Vrestc") if(!all(name.list.3comp %in% names(parameters)) | !all(names(parameters) %in% pbtk.name.list))stop("Use parameter lists from parameterize_pbtk, parameterize_3compartment, or predict_partitioning_schmitt only.") #necess <- c("Funbound.plasma","hematocrit","Vrestc","Krest2plasma","Krbc2plasma") #if(!all(necess %in% names(parameters))){ #if(is.null(chem.cas) & is.null(chem.name))stop('chem.cas or chem.name must be specified when not including Funbound.plasma, hematocrit, Vrestc, Krest2plasma, and Krbc2plasma in parameters.') # params <- parameterize_pbtk(chem.cas=chem.cas,chem.name=chem.name,species=species,default.to.human=default.to.human) # parameters <- c(parameters,params[!(names(params) %in% names(parameters))]) # if(!suppress.messages)warning('Unspecified pbtk model parameters included in the calculation. Include all necessary parameters (Funbound.plasma, hematocrit, Vrestc, Krest2plasma, and Krbc2plasma) to use a different set of parameters in the calculation.') # } RBC.vol <- plasma.vol/(1 - parameters$hematocrit)*parameters$hematocrit vol.dist <- plasma.vol + RBC.vol*parameters[["Krbc2pu"]]*parameters$Funbound.plasma lastchar <- function(x){substr(x, nchar(x), nchar(x))} firstchar <- function(x){substr(x, 1,1)} scaled.volumes <- names(parameters)[firstchar(names(parameters))=="V"&lastchar(names(parameters))=="c"] PCs <- names(parameters)[firstchar(names(parameters))=="K"] comps <- intersect(substr(scaled.volumes,2,nchar(scaled.volumes)-1),substr(PCs,2,nchar(PCs)-3)) comps <- comps[!(comps %in% c('art','ven'))] for(this.comp in comps){ eval(parse(text=paste('vol.dist <- vol.dist + ', parameters[[scaled.volumes[grep(this.comp,scaled.volumes)]]],'*', parameters[[PCs[grep(this.comp,PCs)]]],'*',parameters$Funbound.plasma))) # L } } if(!suppress.messages){ if(is.null(chem.name) & is.null(chem.cas)) cat("Volume of distribution returned in units of L/kg BW.\n") else cat(paste(toupper(substr(species,1,1)),substr(species,2,nchar(species)),sep=''),"volume of distribution returned in units of L/kg BW.\n") } return(as.numeric(vol.dist)) }
/R/Calc_volume_of_distribution.R
no_license
HQData/CompTox-ExpoCast-httk
R
false
false
7,671
r
# This function predicts partition coefficients for all tissues, then lumps them into a single compartment. The effective volume of distribution is calculated by summing each tissues volume times it's partition coefficient relative to plasma. Plasma, and the paritioning into RBCs are also added to get the total volume of distribution in L/KG BW. calc_vdist<- function(chem.cas=NULL, chem.name=NULL, parameters=NULL, default.to.human=F, species="Human",suppress.messages=F, adjusted.Funbound.plasma=T,regression=T) { physiology.data <- physiology.data Parameter <- NULL if(is.null(parameters)){ schmitt.parameters <- parameterize_schmitt(chem.cas=chem.cas,chem.name=chem.name,default.to.human=default.to.human,species=species) parameters <- suppressWarnings(predict_partitioning_schmitt(parameters=schmitt.parameters,species=species,regression=regression,adjusted.Funbound.plasma=adjusted.Funbound.plasma)) if(adjusted.Funbound.plasma) parameters <- c(parameters,schmitt.parameters['Funbound.plasma']) else parameters <- c(parameters,Funbound.plasma=schmitt.parameters[['unadjusted.Funbound.plasma']]) } schmitt.names <- c("Kadipose2pu","Kbone2pu","Kbrain2pu","Kgut2pu","Kheart2pu","Kkidney2pu","Kliver2pu","Klung2pu","Kmuscle2pu","Kskin2pu","Kspleen2pu","Krbc2pu", "Krest2pu") schmitt.specific.names <- c("Kadipose2pu","Kbone2pu","Kbrain2pu","Kheart2pu","Kmuscle2pu","Kskin2pu","Kspleen2pu") if(any(names(parameters) %in% schmitt.specific.names) & !all(c(schmitt.names) %in% names(parameters))) stop("All predict_partitioning_schmitt coefficients must be included if not using pbtk or 3compartment parameters.") else if(all(schmitt.names %in% names(parameters))) schmitt.params <- T else schmitt.params <- F if(schmitt.params & !('funbound.plasma' %in% tolower(names(parameters)))){ if(is.null(chem.cas) & is.null(chem.name))stop("Specify chem.name or chem.cas with correct species if not including Funbound.plasma with predict_partitioning_schmitt coefficients.") else if(is.null(chem.cas)){ out <- get_chem_id(chem.cas=chem.cas,chem.name=chem.name) chem.cas <- out$chem.cas } fub <- try(get_invitroPK_param("Funbound.plasma",species,chem.CAS=chem.cas),silent=T) if (class(fub) == "try-error" & default.to.human) { fub <- try(get_invitroPK_param("Funbound.plasma","Human",chem.CAS=chem.cas),silent=T) warning(paste(species,"coerced to Human for protein binding data.")) } if (class(fub) == "try-error") stop("Missing protein binding data for given species. Set default.to.human to true to substitute human value.") if (fub == 0) { fub <- 0.005 warning("Fraction unbound = 0, changed to 0.005.") } if(adjusted.Funbound.plasma){ Flipid <- subset(physiology.data,Parameter=='Plasma Effective Neutral Lipid Volume Fraction')[,which(tolower(colnames(physiology.data)) == tolower(species))] pKa_Donor <- suppressWarnings(get_physchem_param("pKa_Donor",chem.CAS=chem.cas)) pKa_Accept <- suppressWarnings(get_physchem_param("pKa_Accept",chem.CAS=chem.cas)) Pow <- 10^get_physchem_param("logP",chem.CAS=chem.cas) ion <- calc_ionization(pH=7.4,pKa_Donor=pKa_Donor,pKa_Accept=pKa_Accept) dow <- Pow * (ion$fraction_neutral + 0.001 * ion$fraction_charged + ion$fraction_zwitter) fub <- 1 / ((dow) * Flipid + 1 / fub) } parameters <- c(parameters,Funbound.plasma=fub) } # Check the species argument for capitilization problems and whether or not it is in the table: if (!(species %in% colnames(physiology.data))) { if (toupper(species) %in% toupper(colnames(physiology.data))) { phys.species <- colnames(physiology.data)[toupper(colnames(physiology.data))==toupper(species)] } else stop(paste("Physiological PK data for",species,"not found.")) } else phys.species <- species # Load the physiological parameters for this species this.phys.data <- physiology.data[,phys.species] names(this.phys.data) <- physiology.data[,1] hematocrit <- this.phys.data["Hematocrit"] plasma.vol <- this.phys.data["Plasma Volume"]/1000 # L/kg BW if(schmitt.params){ PCs <- subset(parameters,names(parameters) %in% schmitt.names) # Get_lumped_tissues returns a list with the lumped PCs, vols, and flows: lumped_params <- lump_tissues(PCs,tissuelist=NULL,species=species) RBC.vol <- plasma.vol/(1 - hematocrit)*hematocrit vol.dist <- plasma.vol + RBC.vol*lumped_params$Krbc2pu*parameters$Funbound.plasma+lumped_params$Krest2pu*lumped_params$Vrestc*parameters$Funbound.plasma }else{ pbtk.name.list <- c("BW","Clmetabolismc","Funbound.plasma","Fgutabs","Fhep.assay.correction","hematocrit","Kgut2pu","kgutabs","Kkidney2pu","Kliver2pu","Klung2pu","Krbc2pu","Krest2pu","million.cells.per.gliver","MW","Qcardiacc" ,"Qgfrc","Qgutf","Qkidneyf","Qliverf","Rblood2plasma","Vartc","Vgutc","Vkidneyc","Vliverc","Vlungc","Vrestc","Vvenc") name.list.3comp <- c("BW","Clmetabolismc","Funbound.plasma","Fgutabs","Fhep.assay.correction","hematocrit","Kgut2pu","Krbc2pu","kgutabs","Kliver2pu","Krest2pu","million.cells.per.gliver","MW","Qcardiacc","Qgfrc","Qgutf","Qliverf","Rblood2plasma","Vgutc","Vliverc","Vrestc") if(!all(name.list.3comp %in% names(parameters)) | !all(names(parameters) %in% pbtk.name.list))stop("Use parameter lists from parameterize_pbtk, parameterize_3compartment, or predict_partitioning_schmitt only.") #necess <- c("Funbound.plasma","hematocrit","Vrestc","Krest2plasma","Krbc2plasma") #if(!all(necess %in% names(parameters))){ #if(is.null(chem.cas) & is.null(chem.name))stop('chem.cas or chem.name must be specified when not including Funbound.plasma, hematocrit, Vrestc, Krest2plasma, and Krbc2plasma in parameters.') # params <- parameterize_pbtk(chem.cas=chem.cas,chem.name=chem.name,species=species,default.to.human=default.to.human) # parameters <- c(parameters,params[!(names(params) %in% names(parameters))]) # if(!suppress.messages)warning('Unspecified pbtk model parameters included in the calculation. Include all necessary parameters (Funbound.plasma, hematocrit, Vrestc, Krest2plasma, and Krbc2plasma) to use a different set of parameters in the calculation.') # } RBC.vol <- plasma.vol/(1 - parameters$hematocrit)*parameters$hematocrit vol.dist <- plasma.vol + RBC.vol*parameters[["Krbc2pu"]]*parameters$Funbound.plasma lastchar <- function(x){substr(x, nchar(x), nchar(x))} firstchar <- function(x){substr(x, 1,1)} scaled.volumes <- names(parameters)[firstchar(names(parameters))=="V"&lastchar(names(parameters))=="c"] PCs <- names(parameters)[firstchar(names(parameters))=="K"] comps <- intersect(substr(scaled.volumes,2,nchar(scaled.volumes)-1),substr(PCs,2,nchar(PCs)-3)) comps <- comps[!(comps %in% c('art','ven'))] for(this.comp in comps){ eval(parse(text=paste('vol.dist <- vol.dist + ', parameters[[scaled.volumes[grep(this.comp,scaled.volumes)]]],'*', parameters[[PCs[grep(this.comp,PCs)]]],'*',parameters$Funbound.plasma))) # L } } if(!suppress.messages){ if(is.null(chem.name) & is.null(chem.cas)) cat("Volume of distribution returned in units of L/kg BW.\n") else cat(paste(toupper(substr(species,1,1)),substr(species,2,nchar(species)),sep=''),"volume of distribution returned in units of L/kg BW.\n") } return(as.numeric(vol.dist)) }
# library(data.table) # library(Rtsne) # library(tsne) # rm(list = ls()); gc() # load(file = "./modelData/feat_all_extra_imputed_cleaned_pca_0527.RData") # # predictors =colnames(fnl.dat)[!colnames(fnl.dat) %in% c('Patient_ID','response')] # setDF(fnl.dat) # fnl.dat = as.matrix(fnl.dat[, predictors]) # # set.seed(8) # Set a seed if you want reproducible results # rm() # tsne_out <- Rtsne(fnl.dat, check_duplicates = F) # Run TSNE # # # tsne_out = tsne(fnl.dat) # # gc() # # library(data.table) rm(list = ls()); gc() load(file = "./modelData/tmp_outcomes2016.RData") load(file = "./modelData/feat_all_scale_20170528_stacking.RData") colnames(fnl.dat) = c("Patient_ID", paste0("FEAT_", 1:(ncol(fnl.dat)-2)), "response") fnl.dat[, response := ifelse(Patient_ID %in% tmp_outcomes2016, 1, 0)] predictors =colnames(fnl.dat)[!colnames(fnl.dat) %in% c('Patient_ID','response')] response = 'response' setDT(fnl.dat) training = fnl.dat[Patient_ID <= 279201] rm(fnl.dat); gc() library(mlr) library(FSelector) library(randomForestSRC) setDF(training) gc() training$response = as.factor(training$response) train.task <- makeClassifTask(data=training[, c(predictors[1:20], response)], target=response) gc() imp_var1 <- generateFilterValuesData(train.task, method=c("information.gain")) # 96 for(i in 97:218){ print(i) fn = 1:20 + 20*i train.task <- makeClassifTask(data=training[, c(predictors[fn], response)], target=response) gc() imp_var1 <- generateFilterValuesData(train.task, method=c("information.gain"))$data if(i == 0){ imp_var = imp_var1 }else{ imp_var = rbind(imp_var, imp_var1) } } featLookup = data.frame(oldFeat = colnames(fnl.dat), newFeat = c("Patient_ID", paste0("FEAT_", 1:(ncol(fnl.dat)-1)))) varImp = merge(featLookup, imp_var, by.x = "newFeat", by.y = "name", all.x = T) save(imp_var, varImp, file = "./datathon2017/featureImportant.RData") var1 <- imp_var1$data[imp_var1$data$information.gain > 0.03, c('name')] plot_infogain <- plotFilterValues(imp_var1, feat.type.cols=TRUE) plot_infogain View(trans[Patient_ID %in% c(349, 440, 2429, 4586, 5037)]) # patient_id prob target ivan # 1: 349 8.747661e-01 0 1 # 2: 440 6.436778e-01 0 1 # 3: 2429 9.350704e-01 0 1 # 4: 4586 6.754684e-01 0 1 # 5: 5037 5.916706e-05 0 1 # 6: 5962 6.168390e-01 0 1 # 7: 6302 5.514075e-01 0 1 # 8: 7795 7.988164e-01 0 1 # 9: 8238 6.642369e-01 0 1 # 10: 8620 5.258309e-06 0 1 # 11: 9170 3.976965e-01 0 1 # 12: 10590 8.197543e-01 0 1 ############ # glm #### ############ # training[,response] = as.factor(training[,response]) # library(caret) # library(glmnet) # set.seed(5) # cv <- 10 # folds <- createFolds(training[,response], k = cv, list = FALSE) # f <- folds == 2 # fit <- glmnet(as.matrix(training[!f, predictors]), training[!f, response], # family = 'binomial', alpha = 1, standardize = TRUE, # intercept = TRUE, thresh = 1e-7, maxit = 10^5, type.gaussian = 'naive', # type.logistic = 'modified.Newton' # ) # preds <- predict(fit, as.matrix(training[f,predictors]),type="class") # evalerror(as.numeric(preds[,2]),training[f,response]) # # 0.7961559
/tsne.R
no_license
ivanliu1989/datathon2017
R
false
false
3,369
r
# library(data.table) # library(Rtsne) # library(tsne) # rm(list = ls()); gc() # load(file = "./modelData/feat_all_extra_imputed_cleaned_pca_0527.RData") # # predictors =colnames(fnl.dat)[!colnames(fnl.dat) %in% c('Patient_ID','response')] # setDF(fnl.dat) # fnl.dat = as.matrix(fnl.dat[, predictors]) # # set.seed(8) # Set a seed if you want reproducible results # rm() # tsne_out <- Rtsne(fnl.dat, check_duplicates = F) # Run TSNE # # # tsne_out = tsne(fnl.dat) # # gc() # # library(data.table) rm(list = ls()); gc() load(file = "./modelData/tmp_outcomes2016.RData") load(file = "./modelData/feat_all_scale_20170528_stacking.RData") colnames(fnl.dat) = c("Patient_ID", paste0("FEAT_", 1:(ncol(fnl.dat)-2)), "response") fnl.dat[, response := ifelse(Patient_ID %in% tmp_outcomes2016, 1, 0)] predictors =colnames(fnl.dat)[!colnames(fnl.dat) %in% c('Patient_ID','response')] response = 'response' setDT(fnl.dat) training = fnl.dat[Patient_ID <= 279201] rm(fnl.dat); gc() library(mlr) library(FSelector) library(randomForestSRC) setDF(training) gc() training$response = as.factor(training$response) train.task <- makeClassifTask(data=training[, c(predictors[1:20], response)], target=response) gc() imp_var1 <- generateFilterValuesData(train.task, method=c("information.gain")) # 96 for(i in 97:218){ print(i) fn = 1:20 + 20*i train.task <- makeClassifTask(data=training[, c(predictors[fn], response)], target=response) gc() imp_var1 <- generateFilterValuesData(train.task, method=c("information.gain"))$data if(i == 0){ imp_var = imp_var1 }else{ imp_var = rbind(imp_var, imp_var1) } } featLookup = data.frame(oldFeat = colnames(fnl.dat), newFeat = c("Patient_ID", paste0("FEAT_", 1:(ncol(fnl.dat)-1)))) varImp = merge(featLookup, imp_var, by.x = "newFeat", by.y = "name", all.x = T) save(imp_var, varImp, file = "./datathon2017/featureImportant.RData") var1 <- imp_var1$data[imp_var1$data$information.gain > 0.03, c('name')] plot_infogain <- plotFilterValues(imp_var1, feat.type.cols=TRUE) plot_infogain View(trans[Patient_ID %in% c(349, 440, 2429, 4586, 5037)]) # patient_id prob target ivan # 1: 349 8.747661e-01 0 1 # 2: 440 6.436778e-01 0 1 # 3: 2429 9.350704e-01 0 1 # 4: 4586 6.754684e-01 0 1 # 5: 5037 5.916706e-05 0 1 # 6: 5962 6.168390e-01 0 1 # 7: 6302 5.514075e-01 0 1 # 8: 7795 7.988164e-01 0 1 # 9: 8238 6.642369e-01 0 1 # 10: 8620 5.258309e-06 0 1 # 11: 9170 3.976965e-01 0 1 # 12: 10590 8.197543e-01 0 1 ############ # glm #### ############ # training[,response] = as.factor(training[,response]) # library(caret) # library(glmnet) # set.seed(5) # cv <- 10 # folds <- createFolds(training[,response], k = cv, list = FALSE) # f <- folds == 2 # fit <- glmnet(as.matrix(training[!f, predictors]), training[!f, response], # family = 'binomial', alpha = 1, standardize = TRUE, # intercept = TRUE, thresh = 1e-7, maxit = 10^5, type.gaussian = 'naive', # type.logistic = 'modified.Newton' # ) # preds <- predict(fit, as.matrix(training[f,predictors]),type="class") # evalerror(as.numeric(preds[,2]),training[f,response]) # # 0.7961559
# Libraries loaded library(dplyr) library(tm) library(ggplot2) library(RWeka) library(stringi) library(knitr) library(slam) # Data file and connections established con <- file('en_US.blogs.txt', 'r') blogsdata <- readLines(con, skipNul = TRUE) close(con) con <- file('en_US.news.txt', 'r') newsdata <- readLines(con, skipNul = TRUE) close(con) con <- file('en_US.twitter.txt', 'r') twitterdata <- readLines(con, skipNul = TRUE) close(con) # Random sampling of datasets set.seed(999) samplerate <- 0.03 sampledata <- c(sample(blogsdata, length(blogsdata) * samplerate), sample(newsdata, length(newsdata) * samplerate), sample(twitterdata, length(twitterdata) * samplerate)) # Step through the cleaning processes myCorpus <- VCorpus(VectorSource(sampledata)) identifier <- content_transformer(function(x, pattern) gsub(pattern, " ", x)) myCorpus <- tm_map(myCorpus, identifier, "(f|ht)tp(s?)://(.*)[.][a-z]+") myCorpus <- tm_map(myCorpus, identifier, "@[^\\s]+") # remove punctuation myCorpus <- tm_map(myCorpus, removePunctuation) # remove numbers myCorpus <- tm_map(myCorpus, removeNumbers) # remobve whitespace myCorpus <- tm_map(myCorpus, stripWhitespace) # convert to lower myCorpus <- tm_map(myCorpus, tolower) # convert to plain text myCorpus <- tm_map(myCorpus, PlainTextDocument) # remove english stopwords singlecorpus <- tm_map(myCorpus, removeWords, stopwords('english')) # N-gram upto 5-gram getFreq <- function(tdm) { freq <- sort(rowSums(as.matrix(rollup(tdm, 2, FUN = sum)), na.rm = T), decreasing = TRUE) return(data.frame(word = names(freq), freq = freq)) } gram2 <- function(x) NGramTokenizer(x, Weka_control(min = 2, max = 2)) gram3 <- function(x) NGramTokenizer(x, Weka_control(min = 3, max = 3)) gram4 <- function(x) NGramTokenizer(x, Weka_control(min = 4, max = 4)) gram5 <- function(x) NGramTokenizer(x, Weka_control(min = 5, max = 5)) # aggregate frequencies from myCorpus f1 <- getFreq(removeSparseTerms(TermDocumentMatrix(singlecorpus), 0.999)) save(f1, file="f1data.rds") f2 <- getFreq(TermDocumentMatrix(myCorpus, control = list(tokenize = gram2, bounds = list(global = c(5, Inf))))) save(f2, file="f2data.rds") f3 <- getFreq(TermDocumentMatrix(myCorpus, control = list(tokenize = gram3, bounds = list(global = c(3, Inf))))) save(f3, file="f3data.rds") f4 <- getFreq(TermDocumentMatrix(myCorpus, control = list(tokenize = gram4, bounds = list(global = c(2, Inf))))) save(f4, file="f4data.rds") f5 <- getFreq(TermDocumentMatrix(myCorpus, control = list(tokenize = gram5, bounds = list(global = c(2, Inf))))) save(f5, file="f5data.rds") allf <- list("f1" = f1, "f2" = f2, "f3" = f3, "f4" = f4, "f5" = f5) save(allf, file="allfdata.rds")
/ngrams.R
no_license
generalinsight/CourseraDataScienceCapstone
R
false
false
2,843
r
# Libraries loaded library(dplyr) library(tm) library(ggplot2) library(RWeka) library(stringi) library(knitr) library(slam) # Data file and connections established con <- file('en_US.blogs.txt', 'r') blogsdata <- readLines(con, skipNul = TRUE) close(con) con <- file('en_US.news.txt', 'r') newsdata <- readLines(con, skipNul = TRUE) close(con) con <- file('en_US.twitter.txt', 'r') twitterdata <- readLines(con, skipNul = TRUE) close(con) # Random sampling of datasets set.seed(999) samplerate <- 0.03 sampledata <- c(sample(blogsdata, length(blogsdata) * samplerate), sample(newsdata, length(newsdata) * samplerate), sample(twitterdata, length(twitterdata) * samplerate)) # Step through the cleaning processes myCorpus <- VCorpus(VectorSource(sampledata)) identifier <- content_transformer(function(x, pattern) gsub(pattern, " ", x)) myCorpus <- tm_map(myCorpus, identifier, "(f|ht)tp(s?)://(.*)[.][a-z]+") myCorpus <- tm_map(myCorpus, identifier, "@[^\\s]+") # remove punctuation myCorpus <- tm_map(myCorpus, removePunctuation) # remove numbers myCorpus <- tm_map(myCorpus, removeNumbers) # remobve whitespace myCorpus <- tm_map(myCorpus, stripWhitespace) # convert to lower myCorpus <- tm_map(myCorpus, tolower) # convert to plain text myCorpus <- tm_map(myCorpus, PlainTextDocument) # remove english stopwords singlecorpus <- tm_map(myCorpus, removeWords, stopwords('english')) # N-gram upto 5-gram getFreq <- function(tdm) { freq <- sort(rowSums(as.matrix(rollup(tdm, 2, FUN = sum)), na.rm = T), decreasing = TRUE) return(data.frame(word = names(freq), freq = freq)) } gram2 <- function(x) NGramTokenizer(x, Weka_control(min = 2, max = 2)) gram3 <- function(x) NGramTokenizer(x, Weka_control(min = 3, max = 3)) gram4 <- function(x) NGramTokenizer(x, Weka_control(min = 4, max = 4)) gram5 <- function(x) NGramTokenizer(x, Weka_control(min = 5, max = 5)) # aggregate frequencies from myCorpus f1 <- getFreq(removeSparseTerms(TermDocumentMatrix(singlecorpus), 0.999)) save(f1, file="f1data.rds") f2 <- getFreq(TermDocumentMatrix(myCorpus, control = list(tokenize = gram2, bounds = list(global = c(5, Inf))))) save(f2, file="f2data.rds") f3 <- getFreq(TermDocumentMatrix(myCorpus, control = list(tokenize = gram3, bounds = list(global = c(3, Inf))))) save(f3, file="f3data.rds") f4 <- getFreq(TermDocumentMatrix(myCorpus, control = list(tokenize = gram4, bounds = list(global = c(2, Inf))))) save(f4, file="f4data.rds") f5 <- getFreq(TermDocumentMatrix(myCorpus, control = list(tokenize = gram5, bounds = list(global = c(2, Inf))))) save(f5, file="f5data.rds") allf <- list("f1" = f1, "f2" = f2, "f3" = f3, "f4" = f4, "f5" = f5) save(allf, file="allfdata.rds")
% Generated by roxygen2 (4.0.2): do not edit by hand \name{deployToShinyApps} \alias{deployToShinyApps} \title{Deploy VDB to shinyapps.io} \usage{ deployToShinyApps(vdbConn = getOption("vdbConn"), appName = NULL, account = NULL, redeploy = TRUE, size = NULL, instances = NULL, quiet = FALSE) } \arguments{ \item{vdbConn}{VDB connection settings} \item{appName}{name of application (app will be available at https://[account].shinyapps.io/[appName]/) - if not supplied, will use the name of VDB connection} \item{account}{passed to \code{shinyapps::configureApp}} \item{redeploy}{passed to \code{shinyapps::configureApp}} \item{size}{passed to \code{shinyapps::configureApp}} \item{instances}{passed to \code{shinyapps::configureApp}} \item{quiet}{passed to \code{shinyapps::configureApp}} } \description{ Deploy VDB to shinyapps.io } \details{ If you do not have a shinyapps.io account and have not set your account info, first visit here prior to calling this function: \url{http://shiny.rstudio.com/articles/shinyapps.html}. } \author{ Ryan Hafen \code{\link{syncLocalData}} }
/Analyze/man/deployToShinyApps.Rd
permissive
alacer/renaissance
R
false
false
1,091
rd
% Generated by roxygen2 (4.0.2): do not edit by hand \name{deployToShinyApps} \alias{deployToShinyApps} \title{Deploy VDB to shinyapps.io} \usage{ deployToShinyApps(vdbConn = getOption("vdbConn"), appName = NULL, account = NULL, redeploy = TRUE, size = NULL, instances = NULL, quiet = FALSE) } \arguments{ \item{vdbConn}{VDB connection settings} \item{appName}{name of application (app will be available at https://[account].shinyapps.io/[appName]/) - if not supplied, will use the name of VDB connection} \item{account}{passed to \code{shinyapps::configureApp}} \item{redeploy}{passed to \code{shinyapps::configureApp}} \item{size}{passed to \code{shinyapps::configureApp}} \item{instances}{passed to \code{shinyapps::configureApp}} \item{quiet}{passed to \code{shinyapps::configureApp}} } \description{ Deploy VDB to shinyapps.io } \details{ If you do not have a shinyapps.io account and have not set your account info, first visit here prior to calling this function: \url{http://shiny.rstudio.com/articles/shinyapps.html}. } \author{ Ryan Hafen \code{\link{syncLocalData}} }
% Generated by roxygen2 (4.0.1): do not edit by hand \name{run.ensemble.analysis} \alias{run.ensemble.analysis} \title{run ensemble.analysis} \usage{ run.ensemble.analysis(plot.timeseries = NA) } \arguments{ \item{plot.timeseries}{if TRUE plots a modeled timeseries of target variable(s) with CIs} } \value{ nothing, creates ensemble plots as ensemble.analysis.pdf } \description{ run ensemble.analysis } \author{ David LeBauer, Shawn Serbin }
/modules/uncertainty/man/run.ensemble.analysis.Rd
permissive
gbromley/pecan
R
false
false
445
rd
% Generated by roxygen2 (4.0.1): do not edit by hand \name{run.ensemble.analysis} \alias{run.ensemble.analysis} \title{run ensemble.analysis} \usage{ run.ensemble.analysis(plot.timeseries = NA) } \arguments{ \item{plot.timeseries}{if TRUE plots a modeled timeseries of target variable(s) with CIs} } \value{ nothing, creates ensemble plots as ensemble.analysis.pdf } \description{ run ensemble.analysis } \author{ David LeBauer, Shawn Serbin }
## I'm using R version 3.5.1 right now ## Contains code necessary to generate simulated datasets considered ## Code was adapted for non-parallel computation and streamlined for H1 midVagina Template ## Source code from https://users.ugent.be/~shawinke/ABrokenPromise/02_dataGeneration.html ########################################################################################## set.seed(52246) dataWD <- "C:/Users/Matthew/Documents/Courses/Kai/Final Results/Final Plots/DataGeneration" setwd(dataWD) source("msWaldHMP.R") ## copy/pasted from github knitr::opts_chunk$set( cache = FALSE, tidy = TRUE, autodep = FALSE, root.dir = WD, eval = TRUE ) # The required package list: reqpkg = c("parallel", "phyloseq", "MASS", "SpiecEasi", "magrittr", "TailRank") # Load all required packages and show version for (i in reqpkg) { print(i) print(packageVersion(i)) library( i, quietly = TRUE, verbose = FALSE, warn.conflicts = FALSE, character.only = TRUE ) } # # distribution to generate data from #distribs = c("betabinCor") distribs = c("negbinCorOut") # # number of repeat datasets per unique combo of parameters reps <- 1:25L # # true positive rate TPR <- .1 # # for labelling the file later TPR_label <- "1" # # for labelling the file later letter <- "A" # # Minimum number of reads to consider an OTU 'observed' in a sample minReads <- 1L # # The delimiter in the command parameter string delim <- "_" # # Define the different biological source templates to use sampleTypes <- c("Mid.vagina") # # Define the ceiling in the number of OTUs to consider in the template nOTUs <- 1000L # # Define the number of samples in each class of a simulated experiment nObs <- c(5,15,25) # smaller sample sizes # nObs <- c(50,100,150) # larger sample sizes # # The different values of effec5t size to apply foldEffect <- c(3,5) # # The number of cores used in parallel computing (I didn't use any) nCores <- 1 # # The covariance estimation method covEstMethod = "glasso" # # Biologically relevant variables variables = c("IBDbin", "Penbin", "Sexbin") plasmSampleNames = c("Stool_sex", "Tongue.dorsum_sex") nObs = sort(nObs, decreasing = TRUE) # # Define the simulation parameters combinations simParams <- apply(expand.grid(sampleTypes, reps, nObs, distribs), 1L, paste, collapse = delim) simParams <- gsub( pattern = " ", replacement = "", x = simParams, fixed = TRUE ) simParamsLabels <- c("SampleType", "Replicate", "nSamples", "Distribution") simParamsH1 <- apply(expand.grid(sampleTypes, reps, foldEffect, nObs, distribs), 1L, paste, collapse = delim) simParamsH1 <- gsub( pattern = " ", replacement = "", x = simParamsH1, fixed = TRUE ) # Define the labels to go with each element of the simulation parameter # after splitting on the delimiter simParamsLabelsH1 <- c("SampleType", "Replicate", "EffectSize", "nSamples", "Distribution") seq <- 1:length(simParamsH1) ##################################################### load("physeqListV13.RData") load("physeqListV35.RData") physeqListV13AG <- physeqListV13 if (!file.exists("physeqList4Trim.RData")) { OTUsKeep = lapply(physeqListV13AG, function(x) { relAbundances = taxa_sums(x) names(sort(relAbundances, decreasing = TRUE)[1:nOTUs]) }) physeqList4Trim = mapply( physeqListV13AG, OTUsKeep, FUN = function(phy, otu) { prune_taxa(phy, taxa = otu) } ) #physeqList4Trim$AGstool <- NULL #rm(physeqListV13AG) save(physeqList4Trim, file = "physeqList4Trim.RData") } else { load("physeqList4Trim.RData") } print("Done Loading Data") if (!file.exists(file = "piMoMs.RData")) { piMoMs <- lapply(physeqList4Trim, function(x) { if (taxa_are_rows(x)) { piMoM4Wald(t(x@otu_table@.Data)) } else { piMoM4Wald(x@otu_table@.Data) } }) thetaMoMs <- sapply(physeqList4Trim, function(x) { if (taxa_are_rows(x)) { weirMoM4Wald(t(x@otu_table@.Data), se = FALSE) } else { weirMoM4Wald(x@otu_table@.Data) } }) save(thetaMoMs, piMoMs, file = "piMoMs.RData") } else { load(file = "piMoMs.RData") } #### Negative binomial parameter estimation #### if (!file.exists("MLES.RData")) { #clu <- makeCluster(nCores, outfile = "logFileNBfits.txt") #clusterEvalQ(cl = clu, { require(phyloseq, quietly = TRUE) require(MASS, quietly = TRUE) #}) NBfitsList <- lapply(physeqList4Trim, function(x) { if (taxa_are_rows(x)) { logLibSizes = log(colSums(x@otu_table@.Data)) apply(x@otu_table@.Data, 1, function(y) { try(glm.nb(y ~ offset(logLibSizes), link = "log"), silent = TRUE) }) } else { logLibSizes = log(rowSums(x@otu_table@.Data)) apply(x@otu_table@.Data, 2, function(y) { try(glm.nb(y ~ offset(logLibSizes), link = "log"), silent = TRUE) }) } }) #stopCluster(clu) rhoMLEs = lapply(NBfitsList, function(x) { tmp = sapply(x, function(y) { if (class(y)[1] != "negbin") { NA } else { exp(y$coef[1]) } }) names(tmp) = names(x) res = tmp[!is.na(tmp)] res / sum(res) }) #Renormalize! phiMLEs = lapply(NBfitsList, function(x) { tmp = sapply(x, function(y) { if (class(y)[1] != "negbin") { NA } else { 1 / y$theta } }) names(tmp) = names(x) tmp[!is.na(tmp)] }) PearRes = lapply(NBfitsList, function(x) { pr <- try(sapply(x, residuals, type = "pearson"), silent = TRUE) if (class(pr) == "try-error") { invisible() } else{ return(pr) } }) save(list = c("rhoMLEs", "phiMLEs", "PearRes"), file = "MLES.RData") } else { load("MLES.RData") } ExtrNBouts = function(PearRes, PearsonCutOff = 5) { outliers = abs(PearRes) > PearsonCutOff freqVec = rowSums(outliers) / ncol(outliers) #Relative frequency: outliers per taxon PearVec = PearRes[outliers] list(freqOut = freqVec, Pres = PearVec) } ## Get pearson residuals for introducing outliers later PearRes <- PearRes#[-(which(sapply(PearRes,is.null),arr.ind=TRUE))] OutLieList = lapply(PearRes, ExtrNBouts) save(OutLieList, file = "outLieList.RData") print("Done with Outliers") print(length(OutLieList)) # if (!file.exists("CovListEst.RData")) { # covListEst = lapply(physeqList4Trim, spiec.easi, icov.select.params = list(ncores = nCores)) # save(covListEst, file = "CovListEst.RData") # } else { ## Note From Matt: Running the actual code broke two of my university's computers ## I requested this directly from Hawinkel et al, who kindly provided it instead. load(file = "covList.RData") ### generate Dirichlet realisations, taken from gtools (identical in MCMCpack) rDirichlet <- function(n, alpha) { l <- length(alpha) x <- matrix(rgamma(l * n, alpha), ncol = l, byrow = TRUE) sm <- x %*% rep(1, l) res <- x / as.vector(sm) res[res <= 10 * .Machine$double.eps] <- 0 res } # # A custom quantile beta-binomial function with `na.rm=TRUE`. Still relies # on the Tailrank package qbetabin = function(p, N, u, v) { pp <- cumsum(dbb(0:N, N, u, v)) sapply(p, function(x) sum(pp < x, na.rm = TRUE)) } # # A function to generate correlated multivariate betabinomial data pi: a # vector of proportions, summing to 1 libSizes: library sizes theta: the # overdispersion parameter rmvbetabin = function(n, pi, Sigma, theta, libSizes, ...) { Sigma <- as.matrix(Sigma) Cor <- cov2cor(Sigma) SDs <- sqrt(diag(Sigma)) if (missing(pi)) stop("pi is required") if (length(pi) != dim(Sigma)[1]) stop("Sigma and pi dimensions don't match") if (missing(theta)) { stop("No overdispersion parameter supplied") } d <- length(pi) normd <- rmvnorm(n, rep(0, d), Sigma = Cor) #The normal-to-anything framework unif <- pnorm(normd) data <- mapply( unif, rep(pi, each = nrow(unif)), libSizes, FUN = function(u, p, l) { alphaPar = p * (1 - theta) / theta betaPar = (1 - p) * (1 - theta) / theta qbetabin(u, N = l, u = alphaPar, v = betaPar) } ) data <- .fixInf(data) return(data) } # First an auxiliary function .fixInf <- function(data) { # hacky way of replacing infinite values with the col max + 1 if (any(is.infinite(data))) { data <- apply(data, 2, function(x) { if (any(is.infinite(x))) { x[ind <- which(is.infinite(x))] <- NA x[ind] <- max(x, na.rm = TRUE) + 1 } x }) } data } # # Generate correlated NB data, given a covariance matrix n: number of # observations mu: means of NB distribution Sigma: a positive definite # covariance matrix ks: overdispersion parameters (size) rmvnegbin = function(n, mu, Sigma, ks, ...) { Sigma <- as.matrix(Sigma) Cor <- cov2cor(Sigma) SDs <- sqrt(diag(Sigma)) if (missing(mu)) stop("mu is required") if (dim(mu)[2] != dim(Sigma)[2]) stop("Sigma and mu dimensions don't match") if (missing(ks)) { ks <- unlist(lapply(1:length(SDs), function(i) .negbin_getK(mu[i], SDs[i]))) } d <- dim(mu)[2] normd <- rmvnorm(n, rep(0, d), Sigma = Cor) #The normal-to-anything framework unif <- pnorm(normd) data <- t(qnbinom(t(unif), mu = t(mu), size = ks)) data <- .fixInf(data) return(data) } ### `sampleSizes` is a vectors, `alphas` ,`phis`, 'rhos' and `Sigma` matrices, `libSizes` a list ### final matrix has as rownames the sample names taken from `libSizes` ### and as colnames OTU names taken from rownames of `alphas` or `rhos` ### distribution is the countsGen <- function(sampleSizes, distribution = c( "negbinNoCor", "negbinCor", "dirmult", "betabinCor", "negbinCorOut", "negbinNoCorOut", "betabinCorOut" ), alphas = NULL, theta = NULL, rhos = NULL, phis = NULL, libSizes = NULL, Sigma = NULL, onlyCounts = TRUE, outLiers = NULL) { if (!is.list(libSizes)) { stop("`libSizes` must be a list of length `length(sampleSizes)`") } else { } libSizes <- unlist(libSizes, use.names = TRUE) if (distribution %in% c("negbinCorOut", "negbinNoCorOut", "betabinCorOut") & is.null(outLiers)) { stop("No outlier matrix supplied") } if (!distribution %in% c( "negbinNoCor", "negbinCor", "dirmult", "betabinCor", "negbinCorOut", "negbinNoCorOut", "betabinCorOut", "betabinCorOut" )) { stop("No valid count distribution supplied") } else if (distribution %in% c("negbinCor", "negbinNoCor", "negbinCorOut", "negbinNoCorOut", "betabinCorOut")) ## Negative binomial { if (is.null(rhos) | is.null(phis)) { stop("No valid NB parameters supplied") } else{ } nbData <- matrix(NA_integer_, nrow = sum(sampleSizes), ncol = nrow(rhos)) #All datasets have the same number of OTUs samNames <- rep(paste0("grp", seq_along(sampleSizes)), sampleSizes) samNames <- paste(samNames, rep.int(seq_len(sampleSizes[1]), length(sampleSizes)), sep = ":") rownames(nbData) <- samNames colnames(nbData) <- rownames(as.matrix(rhos)) samSizeSeq <- c(0L, cumsum(sampleSizes)) if (distribution %in% c("negbinNoCor", "negbinNoCorOut")) { for (nRun in seq_along(sampleSizes)) { ## selected indices to generate indSel <- (samSizeSeq[nRun] + 1L):samSizeSeq[nRun + 1L] ## Negative binomial draws nbData[indSel,] <- mapply( rhos[, nRun], phis[, nRun], FUN = function(rho, phi) { rnbinom(n = sampleSizes[nRun], mu = rho * libSizes[indSel], size = 1 / phi) } ) } } else if (distribution %in% c("negbinCor", "negbinCorOut")) { if (is.null(Sigma)) { stop("No correlation matrix given") } else if (dim(Sigma)[1] != dim(Sigma)[2]) { stop("Correlation matrix is not square") } else{ } for (nRun in seq_along(sampleSizes)) { ## selected indices to generate indSel <- (samSizeSeq[nRun] + 1L):samSizeSeq[nRun + 1L] ## Negative binomial draws with underlying correlation nbData[indSel,] <- rmvnegbin( n = sampleSizes[nRun], mu = t(tcrossprod(rhos[, nRun], libSizes[indSel])), ks = 1 / phis[, nRun], Sigma = Sigma ) } #end: for } #end- if negbinCor if (distribution %in% c("negbinCorOut", "negbinNoCorOut", "betabinCorOut")) { #Introduce outliers into generated data #Introduce outliers randomly over entire counts matrix nSamples = dim(nbData)[1] nTaxa = dim(nbData)[2] outFracs = outLiers[["freqOut"]][names(libSizes)[names(libSizes) %in% names(outLiers[["freqOut"]])]] #Fraction of outliers in each sample, kept connected with libSizes nOuts = rbinom(nSamples, nTaxa, outFracs) #Number of outliers in each sample nbData = t(sapply(1:nSamples, function(i) { if (nOuts[i] == 0) { return(nbData[i, ]) } pearRes = sample(outLiers[["Pres"]], nOuts[i], replace = TRUE) #Sample Pearson residuals taxaIDs = sample(colnames(nbData), nOuts[i], replace = FALSE) expects = libSizes[i] * rhos[taxaIDs, 1] #Expected outcomes newValues = sapply(round(sqrt(expects * ( 1 + expects * phis[i, 1] )) * (pearRes) + expects), max, 0) #Reconstruct outliers from Pearson residuals. Round and set negative values to zero nbData[i, taxaIDs] = newValues nbData[i, ] })) rownames(nbData) <- samNames } else{ } nbData } else if (distribution %in% c("dirmult", "betabinCor")) { if (length(sampleSizes) != NCOL(alphas)) { stop("length(sampleSizes) must be the same of ncol(alphas)") } else { } dmData <- matrix(NA_integer_, nrow = sum(sampleSizes), ncol = dim(Sigma)[1]) samNames <- rep(paste0("grp", seq_along(sampleSizes)), sampleSizes) # samNames <- paste(samNames, names(libSizes), sep = ":") samNames <- paste(samNames, rep.int(seq_len(sampleSizes[1]), length(sampleSizes)), sep = ":") # Ntaxa=dim(Sigma)[1] # id=sample(1:nrow(alphas), Ntaxa) # alphas=alphas[id,] alphas = alphas / sum(alphas[, 1]) #renormalize based on the unchanged alphas rownames(dmData) <- samNames colnames(dmData) <- rownames(alphas) piDir <- dmData piDir[] <- NA_real_ samSizeSeq <- c(0L, cumsum(sampleSizes)) if (distribution == "dirmult") { ## gamma parameter for Dirichlet distribution gammas <- alphas * (1 - theta) / theta for (nRun in seq_along(sampleSizes)) { ## selected indices to generate indSel <- (samSizeSeq[nRun] + 1L):samSizeSeq[nRun + 1L] ## Dirichlet draws piDir[indSel,] <- rDirichlet(# piDir[indSel, ] <- gtools:::rdirichlet( n = sampleSizes[nRun], alpha = gammas[, nRun]) ## Multinomial draws with Dirichlet probabilities (Dirichlet-Multinomial) dmData[indSel,] <- t(sapply(indSel, function(iRun) { rmultinom(n = 1L, size = libSizes[iRun], prob = piDir[iRun,]) })) }# END - for: loop along sample sizes } else if (distribution == "betabinCor") { if (is.null(Sigma)) { stop("No correlation matrix given") } else if (dim(Sigma)[1] != dim(Sigma)[2]) { stop("Correlation matrix is not square") } else{ } for (nRun in seq_along(sampleSizes)) { ## selected indices to generate indSel <- (samSizeSeq[nRun] + 1L):samSizeSeq[nRun + 1L] dmData[indSel,] <- rmvbetabin( n = sampleSizes[nRun], pi = alphas[, nRun], libSizes = libSizes[indSel], Sigma = Sigma, theta = theta ) } } if (onlyCounts) { dmData } else { list("dmData" = dmData, "piDir" = piDir) } }# END - if: distributions }# END - function: countsGen #minReads = 1 #- an OTU is considered present in a sample if it has at least 1 read #prevalence = 0.05 #- an OTU is kept if it is present in at least 5% of samples #prevalence = 0 # # Trim by prevalence and total OTU reads # ** ** NOTE: MATT SET MIN READS TO 0 ** ** simpleTrimGen <- function(obj, minReads = 1L, minPrev = .05) { # `prevalence` is the fraction of samples in which an OTU is observed at # least `minReads` times. if (class(obj) == "phyloseq") { taxRows <- taxa_are_rows(obj) if (!taxRows) { obj <- t(obj) } else { } otuTab <- as(otu_table(obj), "matrix") } else { otuTab <- obj } # END - ifelse: obj is *phyloseq* or just *matrix* ## sort OTUs first by prevalence, and then by total reads per OTU prevalence <- rowMeans(otuTab >= minReads) #prevalence <- rowMeans(otuTab > minReads) ## Matt changed this: ## Since ALDEx2 and rank normalization need full datasets, ## we will filter 5% later for only the other methodologies, and keep full ## datasets here indOTUs2Keep <- (prevalence > 0) #indOTUs2Keep <- (prevalence >= minPrev) if (class(obj) == "phyloseq") { obj = prune_taxa(obj, taxa = indOTUs2Keep) return(obj) } else { return(otuTab[indOTUs2Keep,]) } } # END - function: simpleTrim general # # Check if less than _minOTUs_ taxa are present in each sample fewTaxa <- function(physeq, minOTUs = 0L) { if (!taxa_are_rows(physeq)) { physeq <- t(physeq) } else { } any(colSums(otu_table(physeq) > 0, na.rm = TRUE) < minOTUs) } addFoldChange = function(rhos, fc, H1frac = TPR, compensate = FALSE) { if (fc == 1) { return(rhos) } nTaxa = length(rhos) if (compensate) { nOTUsUp = round(nTaxa * H1frac * (1 / (fc + 1))) #Upregulated taxa nOTUsDown = round(nTaxa * H1frac - nOTUsUp) #Downregulated taxa # cond=TRUE while(cond){ OTUids = sample(names(rhos), nOTUsUp + nOTUsDown, replace = FALSE) OTUidUps = OTUids[1:nOTUsUp] OTUidDowns = OTUids[(nOTUsUp + 1):(nOTUsDown + nOTUsUp)] rhos[OTUidUps] = rhos[OTUidUps] * fc # Add fold change up rhos[OTUidDowns] = rhos[OTUidDowns] * (1 - sum(rhos[OTUidUps]) - sum(rhos[!(names(rhos) %in% OTUids)])) / sum(rhos[OTUidDowns]) #And compensate the downs. This way the average FC is 5 in both directions and the TN taxa are really left untouched indTPup <- names(rhos) %in% OTUidUps newTaxaNamesUp <- paste0(names(rhos)[indTPup], "-TPup") indTPdown <- names(rhos) %in% OTUidDowns newTaxaNamesDown <- paste0(names(rhos)[indTPdown], "-TPdown") names(rhos)[indTPup] <- newTaxaNamesUp names(rhos)[indTPdown] <- newTaxaNamesDown } else { nOTUs = round(nTaxa * H1frac) #DA taxa OTUids = sample(names(rhos), nOTUs, replace = FALSE) rhos[OTUids] = rhos[OTUids] * fc # Add fold change up indTP <- names(rhos) %in% OTUids newTaxaNames <- paste0(names(rhos)[indTP], "-TPup") names(rhos)[indTP] <- newTaxaNames } rhos / sum(rhos) #Renormalize. } microbioSim <- function(postfix, template, estPi, estTheta, nObs, estPhis, Covar, distrib, estRhos, outLiers, foldChange = 1, compensate = FALSE) { # Generate `nObs` simulated microbiomes with `libSizes` total reads each # where `libSizes` is a list where each element contains a vector of length # equal to the value of the corresponding element of `nObs`. `libSizes` # contains samples drawn from `template` total reads. `postfix` is a dummy # idenitifer added to help distinguish simulated samples in downstream code. libSizesOrig <- as(sample_sums(template), "integer") libSizes <- list( sample(libSizesOrig, size = nObs, replace = TRUE), sample(libSizesOrig, size = nObs, replace = TRUE) ) # Actually create the simulated abundance table, both groups at once AltRhos = addFoldChange(estRhos, foldChange, compensate = compensate) #Relative abundances of the other group defRhos = cbind(estRhos, AltRhos) rownames(defRhos) = names(AltRhos) AltAlphas = addFoldChange(estPi, foldChange) #Relative abundances of the other group defAlphas = cbind(estPi, AltAlphas) rownames(defAlphas) = names(AltAlphas) counts <- countsGen( sampleSizes = c(nObs, nObs), alphas = defAlphas, theta = estTheta, onlyCounts = TRUE, libSizes = libSizes, rhos = defRhos, distribution = distrib, Sigma = Covar, phis = cbind(estPhis, estPhis), outLiers = outLiers ) ## Add the OTU names to the OTU (column) indices, not needed with countsGen ## colnames(counts) <- taxa_names(template) Add new simulated sample_names to ## the row (sample) indices, not needed rownames(counts) <- ## paste(rownames(counts), '::', postfix, sep = '') # Put simulated abundances together with metadata as a phyloseq object, taxa # are rows here as it is consistent with other packages otuTab <- otu_table(t(counts), taxa_are_rows = TRUE) # Define data.frame that will become sample_data samNames <- sample_names(otuTab) samNames <- matrix(unlist(strsplit( x = samNames, split = ":", fixed = TRUE )), nrow = 2L) samData <- data.frame( group = samNames[1L,], sample = samNames[2L,], postfix = postfix, stringsAsFactors = FALSE ) rownames(samData) <- sample_names(otuTab) samData <- sample_data(samData) # Return a phyloseq object return(phyloseq(otuTab, samData)) } # END - function: microbioSim makePlasmodes = function(Sample, samSize, postfix, var, physeq, meanLFDR = meanLFDR) { if (!taxa_are_rows(physeq)) { physeq = t(physeq) } counts = physeq@otu_table@.Data treatment = ifelse(sample_data(physeq)[[var]] == unique(sample_data(physeq)[[var]])[1], "grp1", "grp2") idNA = is.na(treatment) treatment = treatment[!idNA] counts = counts[,!idNA] if (max(table(treatment)) < 2 * samSize) { return(NULL) } # switchTrt = table(treatment)[1] < table(treatment)[2] plasmode = SimData( counts, treatment, sort.method = "unpaired", k.ind = samSize, n.diff = round(nrow(counts) * TPR), weights = meanLFDR[[paste(Sample, var, sep = delim)]], n.genes = nrow(counts), norm.factors = colSums(counts) ) # Construct phyloseq object from here otuTab = otu_table(plasmode$counts, taxa_are_rows = TRUE) samTab = sample_data(data.frame( group = ifelse( plasmode$treatment == unique(plasmode$treatment)[1], "grp1", "grp2" ), postfix = postfix )) physeq = phyloseq(samTab, otuTab) taxaNames = taxa_names(physeq) taxaNames[plasmode$DE.ind] = sapply(taxaNames[plasmode$DE.ind], paste0, "-TP") taxa_names(physeq) = taxaNames physeq } physeqListV13AG = lapply(sampleTypes, function(x) { physeqListV13AG[[x]] }) names(physeqListV13AG) = sampleTypes ## Do the same for the HMP data based on gender and runcenter WUGC: ## Washington University Genome Center HMPsubset = lapply(physeqListV13AG[names(physeqListV13AG) != "AGstool"], function(x) { prune_samples(x, samples = sample_data(x)$RUNCENTER == "WUGC" & sample_data(x)$sex == "female") }) cleanList0 = c(HMPsubset) cleanList = lapply(cleanList0, function(x) { if (taxa_are_rows(x)) { piMoMs = piMoM4Wald(t(x@otu_table@.Data)) } else { piMoMs = piMoM4Wald(x@otu_table@.Data) } names(piMoMs) = taxa_names(x) taxaKeep = names(sort(piMoMs, decreasing = TRUE)[1:nOTUs]) prune_taxa(x = x, taxaKeep) }) rm(cleanList0, HMPsubset) subSample = function(physeq, split = 0.5, nObs, replace = FALSE, postfix) { if (nsamples(physeq) < 2 * nObs) { return(NULL) } #If not enough samples to perform subsampling, return NULL idSample = sample(1:nsamples(physeq), nObs * 2, replace = replace) # A complication: we want to stick to the phyloseq framework, but then # samples cannot have identical names. We need a workaround when working # with sampling with replacement if (replace) { Table = table(idSample) if (max(Table) > 1) { for (x in (2:max(Table))) { samNumbers = as.integer(names(Table[Table >= x])) tmp = prune_samples(physeq, samples = sample_names(physeq)[samNumbers]) # Now change the sample names to avoid conflicts sample_names(tmp) = paste0(sample_names(tmp), x) physeqTmp = merge_phyloseq(tmp, physeqTmp) } } } else { physeqTmp = prune_samples(physeq, samples = sample_names(physeq)[idSample]) } groupSizes = round(c(split * nObs * 2, (1 - split) * nObs * 2)) # Assign groups at random sample_data(physeqTmp) = data.frame( group = paste0("grp", as.integer(sample(c( rep(1, groupSizes[1]), rep(2, groupSizes[2]) )))), sample = rep(seq(1, nObs), 2), postfix = postfix, row.names = sample_names(physeqTmp) ) return(physeqTmp) } splitSample = function(physeq, nEval, postfix, variable, maxVerif = 100) { physeq = prune_samples(x = physeq, as.vector(!is.na(sample_data(physeq)[, variable]))) #Remove NA's samDataDF = data.frame(sample_data(physeq)) nTrt = sum(samDataDF[, variable], na.rm = TRUE) nContr = nsamples(physeq) - nTrt if (min(nTrt, nContr) < 3.5 * nEval) { stop( "Not enough samples to make verification set two and a half times the size of the evaluation set!" ) } else { } samNames = sample_names(physeq) samDataDF$set = rep("Not selected", nsamples(physeq)) idEvalTrt = samNames[sample(which(samDataDF[, variable]), nEval)] idEvalControl = samNames[sample(which(!samDataDF[, variable]), nEval)] idVerifTrt = sample(samNames[!(samNames %in% idEvalTrt) & samDataDF[, variable]], min(nTrt - nEval, maxVerif)) idVerifControl = sample(samNames[!(samNames %in% idEvalControl) & !samDataDF[, variable]], min(min(nTrt, nContr) - nEval, maxVerif)) samDataDF$set[samNames %in% idEvalTrt | samNames %in% idEvalControl] = "eval" samDataDF$set[samNames %in% idVerifTrt | samNames %in% idVerifControl] = "verif" samDataDF$group = ifelse(samDataDF[, variable], "grp1", "grp2") sample_data(physeq) = sample_data(samDataDF) # returnSeq = prune_samples(x=physeq, samDataDF$set!='Not selected') evalPhy = #simpleTrimGen(prune_samples(x = physeq, samDataDF$set == "eval")) verifPhy = #simpleTrimGen(prune_samples(x = physeq, samDataDF$set == "verif")) # Make sure only taxa present in both groups are retained taxaKeep = intersect(taxa_names(evalPhy), taxa_names(verifPhy)) evalPhy = prune_taxa(x = evalPhy, taxaKeep) verifPhy = prune_taxa(x = verifPhy, taxaKeep) returnSeq = merge_phyloseq(evalPhy, verifPhy) rm(evalPhy, verifPhy) returnSeq } # 109 penicilin cases, 92 IBD cases ## Generate the Data require(phyloseq, quietly = TRUE) simListH1 <- lapply(simParamsH1[seq], function(iterRun) { set.seed(grep(iterRun, simParamsH1)) ## resets seed each iteration params <- strsplit(iterRun, delim)[[1]] names(params) <- simParamsLabelsH1 ## write info about the current simulation on log-file sink(file = ## 'log0.txt', append = TRUE) cat(iterRun, "\t") # sink() # type of sample sampleTypeIter <- params["SampleType"] # The sample size to use for each group in this simulation nObs <- as.integer(params["nSamples"]) # template and parameters template <- physeqList4Trim[[sampleTypeIter]] estPi <- piMoMs[[sampleTypeIter]] estTheta <- thetaMoMs[sampleTypeIter] estCov = covList[[sampleTypeIter]] estPhis = phiMLEs[[sampleTypeIter]] estRhos = rhoMLEs[[sampleTypeIter]] outLiers = OutLieList[[sampleTypeIter]] fC = as.numeric(params["EffectSize"]) distrib = params["Distribution"] # Rarely a simulation has a weird value and fails. Catch these with `try`, # and repeat the simulation call if error (it will be a new seed) tryAgain <- TRUE infLoopCount <- 1L maxLoops <- 15L while (tryAgain & infLoopCount <= maxLoops) { simResH1 <- microbioSim( postfix = iterRun, distrib = distrib, template = template, estPi = estPi, estTheta = estTheta, nObs = nObs, estRhos = estRhos, estPhis = estPhis, Covar = estCov, foldChange = fC, outLiers = outLiers, compensate = FALSE ) ## Make sure there are at least 3 taxa per sample, even after trimming if (is.null(simResH1) | inherits(simResH1, "try-error")) { tryAgain <- TRUE infLoopCount <- infLoopCount + 1L } else { #simResH1 <- #simpleTrimGen(simResH1) if (fewTaxa(simResH1, 3L)) { tryAgain <- TRUE infLoopCount <- infLoopCount + 1L } else { tryAgain <- FALSE } # END - ifelse: check not only error but *fewTaxa* success } # END - ifelse: check successful simulation } # END - while: protect against infinite loops and simulations failures if (infLoopCount > maxLoops) { warning("Consistent error found during simulation. Need to investigate cause.", immediate. = TRUE) cat(iterRun) } else { #simResH1 <- #simpleTrimGen(simResH1) ## only the _nOTUs_ most abundant OTUs are kept, unless the OTUs are already ## less if (ntaxa(simResH1) > nOTUs) { whichOTUs2Keep <- taxa_names(simResH1)[seq_len(nOTUs)] simResH1 <- prune_taxa(whichOTUs2Keep, simResH1) } else { } } # END - ifelse: consistent error in current simulation ## log file writing cat("FINISHED\n") return(simResH1) }) # END - parallelised simulations #if (!file.exists("./brokenpromise/results/simulationListBetaBinomAH1.RData")) { names(simListH1) <- simParamsH1[seq] any(sapply(simListH1, class) != "phyloseq") file_name <- paste0("simulationListNegBin", letter, ".RData") save(simListH1, simParamsLabelsH1, simParams, TPR, delim, file = file_name) # #} else { # #} else { # # } # END - if: file with simulations under H1 already exists # #} ## Matt's Code: Load/Annotate SimListH1 pr <- 0.1 BrokenPromiseData <- list() for (index in 1:length(simListH1)) { physeq <- simListH1[[index]] sample_data <- sample_data(physeq) %>% as.data.frame sample_data$group <- c(rep("grp1", nrow(sample_data) / 2), rep("grp2", nrow(sample_data) / 2)) %>% factor sample_data(physeq) <- sample_data(sample_data) otu_table(physeq) <- otu_table(physeq) %>% as("matrix") %>% #t %>% otu_table(taxa_are_rows = T) truede <- grep("-TP", rownames(otu_table(physeq))) params <- strsplit(names(simListH1)[index], "_") fx <- params[[1]][3] it <- params[[1]][2] sampleType <- params[[1]][1] distr <- params[[1]][5] iter <- paste0("|", it, "_", sampleType, "_", distr) #### m <- params[[1]][4] datalist <- list(physeq = physeq, truede = truede) BrokenPromiseData[[paste("BrokenPromiseH1", fx, m, pr, iter, sep = "_")]] <- datalist } distr <- "" if (distribs == "negbinCorOut") { distr <- "NegBin" } else if (distribs == "betabinCor") { distr <- "BetaBin" } ## the final simulated datasets to perform differential abundance tests on save(BrokenPromiseData, file = paste0("BrokenPromiseDataJustSmallSamples_", distr, ".RData"))
/RCode/DataGeneration.R
no_license
matthewlouisdavisBioStat/Rank-Normalization-Empowers-a-T-Test
R
false
false
34,689
r
## I'm using R version 3.5.1 right now ## Contains code necessary to generate simulated datasets considered ## Code was adapted for non-parallel computation and streamlined for H1 midVagina Template ## Source code from https://users.ugent.be/~shawinke/ABrokenPromise/02_dataGeneration.html ########################################################################################## set.seed(52246) dataWD <- "C:/Users/Matthew/Documents/Courses/Kai/Final Results/Final Plots/DataGeneration" setwd(dataWD) source("msWaldHMP.R") ## copy/pasted from github knitr::opts_chunk$set( cache = FALSE, tidy = TRUE, autodep = FALSE, root.dir = WD, eval = TRUE ) # The required package list: reqpkg = c("parallel", "phyloseq", "MASS", "SpiecEasi", "magrittr", "TailRank") # Load all required packages and show version for (i in reqpkg) { print(i) print(packageVersion(i)) library( i, quietly = TRUE, verbose = FALSE, warn.conflicts = FALSE, character.only = TRUE ) } # # distribution to generate data from #distribs = c("betabinCor") distribs = c("negbinCorOut") # # number of repeat datasets per unique combo of parameters reps <- 1:25L # # true positive rate TPR <- .1 # # for labelling the file later TPR_label <- "1" # # for labelling the file later letter <- "A" # # Minimum number of reads to consider an OTU 'observed' in a sample minReads <- 1L # # The delimiter in the command parameter string delim <- "_" # # Define the different biological source templates to use sampleTypes <- c("Mid.vagina") # # Define the ceiling in the number of OTUs to consider in the template nOTUs <- 1000L # # Define the number of samples in each class of a simulated experiment nObs <- c(5,15,25) # smaller sample sizes # nObs <- c(50,100,150) # larger sample sizes # # The different values of effec5t size to apply foldEffect <- c(3,5) # # The number of cores used in parallel computing (I didn't use any) nCores <- 1 # # The covariance estimation method covEstMethod = "glasso" # # Biologically relevant variables variables = c("IBDbin", "Penbin", "Sexbin") plasmSampleNames = c("Stool_sex", "Tongue.dorsum_sex") nObs = sort(nObs, decreasing = TRUE) # # Define the simulation parameters combinations simParams <- apply(expand.grid(sampleTypes, reps, nObs, distribs), 1L, paste, collapse = delim) simParams <- gsub( pattern = " ", replacement = "", x = simParams, fixed = TRUE ) simParamsLabels <- c("SampleType", "Replicate", "nSamples", "Distribution") simParamsH1 <- apply(expand.grid(sampleTypes, reps, foldEffect, nObs, distribs), 1L, paste, collapse = delim) simParamsH1 <- gsub( pattern = " ", replacement = "", x = simParamsH1, fixed = TRUE ) # Define the labels to go with each element of the simulation parameter # after splitting on the delimiter simParamsLabelsH1 <- c("SampleType", "Replicate", "EffectSize", "nSamples", "Distribution") seq <- 1:length(simParamsH1) ##################################################### load("physeqListV13.RData") load("physeqListV35.RData") physeqListV13AG <- physeqListV13 if (!file.exists("physeqList4Trim.RData")) { OTUsKeep = lapply(physeqListV13AG, function(x) { relAbundances = taxa_sums(x) names(sort(relAbundances, decreasing = TRUE)[1:nOTUs]) }) physeqList4Trim = mapply( physeqListV13AG, OTUsKeep, FUN = function(phy, otu) { prune_taxa(phy, taxa = otu) } ) #physeqList4Trim$AGstool <- NULL #rm(physeqListV13AG) save(physeqList4Trim, file = "physeqList4Trim.RData") } else { load("physeqList4Trim.RData") } print("Done Loading Data") if (!file.exists(file = "piMoMs.RData")) { piMoMs <- lapply(physeqList4Trim, function(x) { if (taxa_are_rows(x)) { piMoM4Wald(t(x@otu_table@.Data)) } else { piMoM4Wald(x@otu_table@.Data) } }) thetaMoMs <- sapply(physeqList4Trim, function(x) { if (taxa_are_rows(x)) { weirMoM4Wald(t(x@otu_table@.Data), se = FALSE) } else { weirMoM4Wald(x@otu_table@.Data) } }) save(thetaMoMs, piMoMs, file = "piMoMs.RData") } else { load(file = "piMoMs.RData") } #### Negative binomial parameter estimation #### if (!file.exists("MLES.RData")) { #clu <- makeCluster(nCores, outfile = "logFileNBfits.txt") #clusterEvalQ(cl = clu, { require(phyloseq, quietly = TRUE) require(MASS, quietly = TRUE) #}) NBfitsList <- lapply(physeqList4Trim, function(x) { if (taxa_are_rows(x)) { logLibSizes = log(colSums(x@otu_table@.Data)) apply(x@otu_table@.Data, 1, function(y) { try(glm.nb(y ~ offset(logLibSizes), link = "log"), silent = TRUE) }) } else { logLibSizes = log(rowSums(x@otu_table@.Data)) apply(x@otu_table@.Data, 2, function(y) { try(glm.nb(y ~ offset(logLibSizes), link = "log"), silent = TRUE) }) } }) #stopCluster(clu) rhoMLEs = lapply(NBfitsList, function(x) { tmp = sapply(x, function(y) { if (class(y)[1] != "negbin") { NA } else { exp(y$coef[1]) } }) names(tmp) = names(x) res = tmp[!is.na(tmp)] res / sum(res) }) #Renormalize! phiMLEs = lapply(NBfitsList, function(x) { tmp = sapply(x, function(y) { if (class(y)[1] != "negbin") { NA } else { 1 / y$theta } }) names(tmp) = names(x) tmp[!is.na(tmp)] }) PearRes = lapply(NBfitsList, function(x) { pr <- try(sapply(x, residuals, type = "pearson"), silent = TRUE) if (class(pr) == "try-error") { invisible() } else{ return(pr) } }) save(list = c("rhoMLEs", "phiMLEs", "PearRes"), file = "MLES.RData") } else { load("MLES.RData") } ExtrNBouts = function(PearRes, PearsonCutOff = 5) { outliers = abs(PearRes) > PearsonCutOff freqVec = rowSums(outliers) / ncol(outliers) #Relative frequency: outliers per taxon PearVec = PearRes[outliers] list(freqOut = freqVec, Pres = PearVec) } ## Get pearson residuals for introducing outliers later PearRes <- PearRes#[-(which(sapply(PearRes,is.null),arr.ind=TRUE))] OutLieList = lapply(PearRes, ExtrNBouts) save(OutLieList, file = "outLieList.RData") print("Done with Outliers") print(length(OutLieList)) # if (!file.exists("CovListEst.RData")) { # covListEst = lapply(physeqList4Trim, spiec.easi, icov.select.params = list(ncores = nCores)) # save(covListEst, file = "CovListEst.RData") # } else { ## Note From Matt: Running the actual code broke two of my university's computers ## I requested this directly from Hawinkel et al, who kindly provided it instead. load(file = "covList.RData") ### generate Dirichlet realisations, taken from gtools (identical in MCMCpack) rDirichlet <- function(n, alpha) { l <- length(alpha) x <- matrix(rgamma(l * n, alpha), ncol = l, byrow = TRUE) sm <- x %*% rep(1, l) res <- x / as.vector(sm) res[res <= 10 * .Machine$double.eps] <- 0 res } # # A custom quantile beta-binomial function with `na.rm=TRUE`. Still relies # on the Tailrank package qbetabin = function(p, N, u, v) { pp <- cumsum(dbb(0:N, N, u, v)) sapply(p, function(x) sum(pp < x, na.rm = TRUE)) } # # A function to generate correlated multivariate betabinomial data pi: a # vector of proportions, summing to 1 libSizes: library sizes theta: the # overdispersion parameter rmvbetabin = function(n, pi, Sigma, theta, libSizes, ...) { Sigma <- as.matrix(Sigma) Cor <- cov2cor(Sigma) SDs <- sqrt(diag(Sigma)) if (missing(pi)) stop("pi is required") if (length(pi) != dim(Sigma)[1]) stop("Sigma and pi dimensions don't match") if (missing(theta)) { stop("No overdispersion parameter supplied") } d <- length(pi) normd <- rmvnorm(n, rep(0, d), Sigma = Cor) #The normal-to-anything framework unif <- pnorm(normd) data <- mapply( unif, rep(pi, each = nrow(unif)), libSizes, FUN = function(u, p, l) { alphaPar = p * (1 - theta) / theta betaPar = (1 - p) * (1 - theta) / theta qbetabin(u, N = l, u = alphaPar, v = betaPar) } ) data <- .fixInf(data) return(data) } # First an auxiliary function .fixInf <- function(data) { # hacky way of replacing infinite values with the col max + 1 if (any(is.infinite(data))) { data <- apply(data, 2, function(x) { if (any(is.infinite(x))) { x[ind <- which(is.infinite(x))] <- NA x[ind] <- max(x, na.rm = TRUE) + 1 } x }) } data } # # Generate correlated NB data, given a covariance matrix n: number of # observations mu: means of NB distribution Sigma: a positive definite # covariance matrix ks: overdispersion parameters (size) rmvnegbin = function(n, mu, Sigma, ks, ...) { Sigma <- as.matrix(Sigma) Cor <- cov2cor(Sigma) SDs <- sqrt(diag(Sigma)) if (missing(mu)) stop("mu is required") if (dim(mu)[2] != dim(Sigma)[2]) stop("Sigma and mu dimensions don't match") if (missing(ks)) { ks <- unlist(lapply(1:length(SDs), function(i) .negbin_getK(mu[i], SDs[i]))) } d <- dim(mu)[2] normd <- rmvnorm(n, rep(0, d), Sigma = Cor) #The normal-to-anything framework unif <- pnorm(normd) data <- t(qnbinom(t(unif), mu = t(mu), size = ks)) data <- .fixInf(data) return(data) } ### `sampleSizes` is a vectors, `alphas` ,`phis`, 'rhos' and `Sigma` matrices, `libSizes` a list ### final matrix has as rownames the sample names taken from `libSizes` ### and as colnames OTU names taken from rownames of `alphas` or `rhos` ### distribution is the countsGen <- function(sampleSizes, distribution = c( "negbinNoCor", "negbinCor", "dirmult", "betabinCor", "negbinCorOut", "negbinNoCorOut", "betabinCorOut" ), alphas = NULL, theta = NULL, rhos = NULL, phis = NULL, libSizes = NULL, Sigma = NULL, onlyCounts = TRUE, outLiers = NULL) { if (!is.list(libSizes)) { stop("`libSizes` must be a list of length `length(sampleSizes)`") } else { } libSizes <- unlist(libSizes, use.names = TRUE) if (distribution %in% c("negbinCorOut", "negbinNoCorOut", "betabinCorOut") & is.null(outLiers)) { stop("No outlier matrix supplied") } if (!distribution %in% c( "negbinNoCor", "negbinCor", "dirmult", "betabinCor", "negbinCorOut", "negbinNoCorOut", "betabinCorOut", "betabinCorOut" )) { stop("No valid count distribution supplied") } else if (distribution %in% c("negbinCor", "negbinNoCor", "negbinCorOut", "negbinNoCorOut", "betabinCorOut")) ## Negative binomial { if (is.null(rhos) | is.null(phis)) { stop("No valid NB parameters supplied") } else{ } nbData <- matrix(NA_integer_, nrow = sum(sampleSizes), ncol = nrow(rhos)) #All datasets have the same number of OTUs samNames <- rep(paste0("grp", seq_along(sampleSizes)), sampleSizes) samNames <- paste(samNames, rep.int(seq_len(sampleSizes[1]), length(sampleSizes)), sep = ":") rownames(nbData) <- samNames colnames(nbData) <- rownames(as.matrix(rhos)) samSizeSeq <- c(0L, cumsum(sampleSizes)) if (distribution %in% c("negbinNoCor", "negbinNoCorOut")) { for (nRun in seq_along(sampleSizes)) { ## selected indices to generate indSel <- (samSizeSeq[nRun] + 1L):samSizeSeq[nRun + 1L] ## Negative binomial draws nbData[indSel,] <- mapply( rhos[, nRun], phis[, nRun], FUN = function(rho, phi) { rnbinom(n = sampleSizes[nRun], mu = rho * libSizes[indSel], size = 1 / phi) } ) } } else if (distribution %in% c("negbinCor", "negbinCorOut")) { if (is.null(Sigma)) { stop("No correlation matrix given") } else if (dim(Sigma)[1] != dim(Sigma)[2]) { stop("Correlation matrix is not square") } else{ } for (nRun in seq_along(sampleSizes)) { ## selected indices to generate indSel <- (samSizeSeq[nRun] + 1L):samSizeSeq[nRun + 1L] ## Negative binomial draws with underlying correlation nbData[indSel,] <- rmvnegbin( n = sampleSizes[nRun], mu = t(tcrossprod(rhos[, nRun], libSizes[indSel])), ks = 1 / phis[, nRun], Sigma = Sigma ) } #end: for } #end- if negbinCor if (distribution %in% c("negbinCorOut", "negbinNoCorOut", "betabinCorOut")) { #Introduce outliers into generated data #Introduce outliers randomly over entire counts matrix nSamples = dim(nbData)[1] nTaxa = dim(nbData)[2] outFracs = outLiers[["freqOut"]][names(libSizes)[names(libSizes) %in% names(outLiers[["freqOut"]])]] #Fraction of outliers in each sample, kept connected with libSizes nOuts = rbinom(nSamples, nTaxa, outFracs) #Number of outliers in each sample nbData = t(sapply(1:nSamples, function(i) { if (nOuts[i] == 0) { return(nbData[i, ]) } pearRes = sample(outLiers[["Pres"]], nOuts[i], replace = TRUE) #Sample Pearson residuals taxaIDs = sample(colnames(nbData), nOuts[i], replace = FALSE) expects = libSizes[i] * rhos[taxaIDs, 1] #Expected outcomes newValues = sapply(round(sqrt(expects * ( 1 + expects * phis[i, 1] )) * (pearRes) + expects), max, 0) #Reconstruct outliers from Pearson residuals. Round and set negative values to zero nbData[i, taxaIDs] = newValues nbData[i, ] })) rownames(nbData) <- samNames } else{ } nbData } else if (distribution %in% c("dirmult", "betabinCor")) { if (length(sampleSizes) != NCOL(alphas)) { stop("length(sampleSizes) must be the same of ncol(alphas)") } else { } dmData <- matrix(NA_integer_, nrow = sum(sampleSizes), ncol = dim(Sigma)[1]) samNames <- rep(paste0("grp", seq_along(sampleSizes)), sampleSizes) # samNames <- paste(samNames, names(libSizes), sep = ":") samNames <- paste(samNames, rep.int(seq_len(sampleSizes[1]), length(sampleSizes)), sep = ":") # Ntaxa=dim(Sigma)[1] # id=sample(1:nrow(alphas), Ntaxa) # alphas=alphas[id,] alphas = alphas / sum(alphas[, 1]) #renormalize based on the unchanged alphas rownames(dmData) <- samNames colnames(dmData) <- rownames(alphas) piDir <- dmData piDir[] <- NA_real_ samSizeSeq <- c(0L, cumsum(sampleSizes)) if (distribution == "dirmult") { ## gamma parameter for Dirichlet distribution gammas <- alphas * (1 - theta) / theta for (nRun in seq_along(sampleSizes)) { ## selected indices to generate indSel <- (samSizeSeq[nRun] + 1L):samSizeSeq[nRun + 1L] ## Dirichlet draws piDir[indSel,] <- rDirichlet(# piDir[indSel, ] <- gtools:::rdirichlet( n = sampleSizes[nRun], alpha = gammas[, nRun]) ## Multinomial draws with Dirichlet probabilities (Dirichlet-Multinomial) dmData[indSel,] <- t(sapply(indSel, function(iRun) { rmultinom(n = 1L, size = libSizes[iRun], prob = piDir[iRun,]) })) }# END - for: loop along sample sizes } else if (distribution == "betabinCor") { if (is.null(Sigma)) { stop("No correlation matrix given") } else if (dim(Sigma)[1] != dim(Sigma)[2]) { stop("Correlation matrix is not square") } else{ } for (nRun in seq_along(sampleSizes)) { ## selected indices to generate indSel <- (samSizeSeq[nRun] + 1L):samSizeSeq[nRun + 1L] dmData[indSel,] <- rmvbetabin( n = sampleSizes[nRun], pi = alphas[, nRun], libSizes = libSizes[indSel], Sigma = Sigma, theta = theta ) } } if (onlyCounts) { dmData } else { list("dmData" = dmData, "piDir" = piDir) } }# END - if: distributions }# END - function: countsGen #minReads = 1 #- an OTU is considered present in a sample if it has at least 1 read #prevalence = 0.05 #- an OTU is kept if it is present in at least 5% of samples #prevalence = 0 # # Trim by prevalence and total OTU reads # ** ** NOTE: MATT SET MIN READS TO 0 ** ** simpleTrimGen <- function(obj, minReads = 1L, minPrev = .05) { # `prevalence` is the fraction of samples in which an OTU is observed at # least `minReads` times. if (class(obj) == "phyloseq") { taxRows <- taxa_are_rows(obj) if (!taxRows) { obj <- t(obj) } else { } otuTab <- as(otu_table(obj), "matrix") } else { otuTab <- obj } # END - ifelse: obj is *phyloseq* or just *matrix* ## sort OTUs first by prevalence, and then by total reads per OTU prevalence <- rowMeans(otuTab >= minReads) #prevalence <- rowMeans(otuTab > minReads) ## Matt changed this: ## Since ALDEx2 and rank normalization need full datasets, ## we will filter 5% later for only the other methodologies, and keep full ## datasets here indOTUs2Keep <- (prevalence > 0) #indOTUs2Keep <- (prevalence >= minPrev) if (class(obj) == "phyloseq") { obj = prune_taxa(obj, taxa = indOTUs2Keep) return(obj) } else { return(otuTab[indOTUs2Keep,]) } } # END - function: simpleTrim general # # Check if less than _minOTUs_ taxa are present in each sample fewTaxa <- function(physeq, minOTUs = 0L) { if (!taxa_are_rows(physeq)) { physeq <- t(physeq) } else { } any(colSums(otu_table(physeq) > 0, na.rm = TRUE) < minOTUs) } addFoldChange = function(rhos, fc, H1frac = TPR, compensate = FALSE) { if (fc == 1) { return(rhos) } nTaxa = length(rhos) if (compensate) { nOTUsUp = round(nTaxa * H1frac * (1 / (fc + 1))) #Upregulated taxa nOTUsDown = round(nTaxa * H1frac - nOTUsUp) #Downregulated taxa # cond=TRUE while(cond){ OTUids = sample(names(rhos), nOTUsUp + nOTUsDown, replace = FALSE) OTUidUps = OTUids[1:nOTUsUp] OTUidDowns = OTUids[(nOTUsUp + 1):(nOTUsDown + nOTUsUp)] rhos[OTUidUps] = rhos[OTUidUps] * fc # Add fold change up rhos[OTUidDowns] = rhos[OTUidDowns] * (1 - sum(rhos[OTUidUps]) - sum(rhos[!(names(rhos) %in% OTUids)])) / sum(rhos[OTUidDowns]) #And compensate the downs. This way the average FC is 5 in both directions and the TN taxa are really left untouched indTPup <- names(rhos) %in% OTUidUps newTaxaNamesUp <- paste0(names(rhos)[indTPup], "-TPup") indTPdown <- names(rhos) %in% OTUidDowns newTaxaNamesDown <- paste0(names(rhos)[indTPdown], "-TPdown") names(rhos)[indTPup] <- newTaxaNamesUp names(rhos)[indTPdown] <- newTaxaNamesDown } else { nOTUs = round(nTaxa * H1frac) #DA taxa OTUids = sample(names(rhos), nOTUs, replace = FALSE) rhos[OTUids] = rhos[OTUids] * fc # Add fold change up indTP <- names(rhos) %in% OTUids newTaxaNames <- paste0(names(rhos)[indTP], "-TPup") names(rhos)[indTP] <- newTaxaNames } rhos / sum(rhos) #Renormalize. } microbioSim <- function(postfix, template, estPi, estTheta, nObs, estPhis, Covar, distrib, estRhos, outLiers, foldChange = 1, compensate = FALSE) { # Generate `nObs` simulated microbiomes with `libSizes` total reads each # where `libSizes` is a list where each element contains a vector of length # equal to the value of the corresponding element of `nObs`. `libSizes` # contains samples drawn from `template` total reads. `postfix` is a dummy # idenitifer added to help distinguish simulated samples in downstream code. libSizesOrig <- as(sample_sums(template), "integer") libSizes <- list( sample(libSizesOrig, size = nObs, replace = TRUE), sample(libSizesOrig, size = nObs, replace = TRUE) ) # Actually create the simulated abundance table, both groups at once AltRhos = addFoldChange(estRhos, foldChange, compensate = compensate) #Relative abundances of the other group defRhos = cbind(estRhos, AltRhos) rownames(defRhos) = names(AltRhos) AltAlphas = addFoldChange(estPi, foldChange) #Relative abundances of the other group defAlphas = cbind(estPi, AltAlphas) rownames(defAlphas) = names(AltAlphas) counts <- countsGen( sampleSizes = c(nObs, nObs), alphas = defAlphas, theta = estTheta, onlyCounts = TRUE, libSizes = libSizes, rhos = defRhos, distribution = distrib, Sigma = Covar, phis = cbind(estPhis, estPhis), outLiers = outLiers ) ## Add the OTU names to the OTU (column) indices, not needed with countsGen ## colnames(counts) <- taxa_names(template) Add new simulated sample_names to ## the row (sample) indices, not needed rownames(counts) <- ## paste(rownames(counts), '::', postfix, sep = '') # Put simulated abundances together with metadata as a phyloseq object, taxa # are rows here as it is consistent with other packages otuTab <- otu_table(t(counts), taxa_are_rows = TRUE) # Define data.frame that will become sample_data samNames <- sample_names(otuTab) samNames <- matrix(unlist(strsplit( x = samNames, split = ":", fixed = TRUE )), nrow = 2L) samData <- data.frame( group = samNames[1L,], sample = samNames[2L,], postfix = postfix, stringsAsFactors = FALSE ) rownames(samData) <- sample_names(otuTab) samData <- sample_data(samData) # Return a phyloseq object return(phyloseq(otuTab, samData)) } # END - function: microbioSim makePlasmodes = function(Sample, samSize, postfix, var, physeq, meanLFDR = meanLFDR) { if (!taxa_are_rows(physeq)) { physeq = t(physeq) } counts = physeq@otu_table@.Data treatment = ifelse(sample_data(physeq)[[var]] == unique(sample_data(physeq)[[var]])[1], "grp1", "grp2") idNA = is.na(treatment) treatment = treatment[!idNA] counts = counts[,!idNA] if (max(table(treatment)) < 2 * samSize) { return(NULL) } # switchTrt = table(treatment)[1] < table(treatment)[2] plasmode = SimData( counts, treatment, sort.method = "unpaired", k.ind = samSize, n.diff = round(nrow(counts) * TPR), weights = meanLFDR[[paste(Sample, var, sep = delim)]], n.genes = nrow(counts), norm.factors = colSums(counts) ) # Construct phyloseq object from here otuTab = otu_table(plasmode$counts, taxa_are_rows = TRUE) samTab = sample_data(data.frame( group = ifelse( plasmode$treatment == unique(plasmode$treatment)[1], "grp1", "grp2" ), postfix = postfix )) physeq = phyloseq(samTab, otuTab) taxaNames = taxa_names(physeq) taxaNames[plasmode$DE.ind] = sapply(taxaNames[plasmode$DE.ind], paste0, "-TP") taxa_names(physeq) = taxaNames physeq } physeqListV13AG = lapply(sampleTypes, function(x) { physeqListV13AG[[x]] }) names(physeqListV13AG) = sampleTypes ## Do the same for the HMP data based on gender and runcenter WUGC: ## Washington University Genome Center HMPsubset = lapply(physeqListV13AG[names(physeqListV13AG) != "AGstool"], function(x) { prune_samples(x, samples = sample_data(x)$RUNCENTER == "WUGC" & sample_data(x)$sex == "female") }) cleanList0 = c(HMPsubset) cleanList = lapply(cleanList0, function(x) { if (taxa_are_rows(x)) { piMoMs = piMoM4Wald(t(x@otu_table@.Data)) } else { piMoMs = piMoM4Wald(x@otu_table@.Data) } names(piMoMs) = taxa_names(x) taxaKeep = names(sort(piMoMs, decreasing = TRUE)[1:nOTUs]) prune_taxa(x = x, taxaKeep) }) rm(cleanList0, HMPsubset) subSample = function(physeq, split = 0.5, nObs, replace = FALSE, postfix) { if (nsamples(physeq) < 2 * nObs) { return(NULL) } #If not enough samples to perform subsampling, return NULL idSample = sample(1:nsamples(physeq), nObs * 2, replace = replace) # A complication: we want to stick to the phyloseq framework, but then # samples cannot have identical names. We need a workaround when working # with sampling with replacement if (replace) { Table = table(idSample) if (max(Table) > 1) { for (x in (2:max(Table))) { samNumbers = as.integer(names(Table[Table >= x])) tmp = prune_samples(physeq, samples = sample_names(physeq)[samNumbers]) # Now change the sample names to avoid conflicts sample_names(tmp) = paste0(sample_names(tmp), x) physeqTmp = merge_phyloseq(tmp, physeqTmp) } } } else { physeqTmp = prune_samples(physeq, samples = sample_names(physeq)[idSample]) } groupSizes = round(c(split * nObs * 2, (1 - split) * nObs * 2)) # Assign groups at random sample_data(physeqTmp) = data.frame( group = paste0("grp", as.integer(sample(c( rep(1, groupSizes[1]), rep(2, groupSizes[2]) )))), sample = rep(seq(1, nObs), 2), postfix = postfix, row.names = sample_names(physeqTmp) ) return(physeqTmp) } splitSample = function(physeq, nEval, postfix, variable, maxVerif = 100) { physeq = prune_samples(x = physeq, as.vector(!is.na(sample_data(physeq)[, variable]))) #Remove NA's samDataDF = data.frame(sample_data(physeq)) nTrt = sum(samDataDF[, variable], na.rm = TRUE) nContr = nsamples(physeq) - nTrt if (min(nTrt, nContr) < 3.5 * nEval) { stop( "Not enough samples to make verification set two and a half times the size of the evaluation set!" ) } else { } samNames = sample_names(physeq) samDataDF$set = rep("Not selected", nsamples(physeq)) idEvalTrt = samNames[sample(which(samDataDF[, variable]), nEval)] idEvalControl = samNames[sample(which(!samDataDF[, variable]), nEval)] idVerifTrt = sample(samNames[!(samNames %in% idEvalTrt) & samDataDF[, variable]], min(nTrt - nEval, maxVerif)) idVerifControl = sample(samNames[!(samNames %in% idEvalControl) & !samDataDF[, variable]], min(min(nTrt, nContr) - nEval, maxVerif)) samDataDF$set[samNames %in% idEvalTrt | samNames %in% idEvalControl] = "eval" samDataDF$set[samNames %in% idVerifTrt | samNames %in% idVerifControl] = "verif" samDataDF$group = ifelse(samDataDF[, variable], "grp1", "grp2") sample_data(physeq) = sample_data(samDataDF) # returnSeq = prune_samples(x=physeq, samDataDF$set!='Not selected') evalPhy = #simpleTrimGen(prune_samples(x = physeq, samDataDF$set == "eval")) verifPhy = #simpleTrimGen(prune_samples(x = physeq, samDataDF$set == "verif")) # Make sure only taxa present in both groups are retained taxaKeep = intersect(taxa_names(evalPhy), taxa_names(verifPhy)) evalPhy = prune_taxa(x = evalPhy, taxaKeep) verifPhy = prune_taxa(x = verifPhy, taxaKeep) returnSeq = merge_phyloseq(evalPhy, verifPhy) rm(evalPhy, verifPhy) returnSeq } # 109 penicilin cases, 92 IBD cases ## Generate the Data require(phyloseq, quietly = TRUE) simListH1 <- lapply(simParamsH1[seq], function(iterRun) { set.seed(grep(iterRun, simParamsH1)) ## resets seed each iteration params <- strsplit(iterRun, delim)[[1]] names(params) <- simParamsLabelsH1 ## write info about the current simulation on log-file sink(file = ## 'log0.txt', append = TRUE) cat(iterRun, "\t") # sink() # type of sample sampleTypeIter <- params["SampleType"] # The sample size to use for each group in this simulation nObs <- as.integer(params["nSamples"]) # template and parameters template <- physeqList4Trim[[sampleTypeIter]] estPi <- piMoMs[[sampleTypeIter]] estTheta <- thetaMoMs[sampleTypeIter] estCov = covList[[sampleTypeIter]] estPhis = phiMLEs[[sampleTypeIter]] estRhos = rhoMLEs[[sampleTypeIter]] outLiers = OutLieList[[sampleTypeIter]] fC = as.numeric(params["EffectSize"]) distrib = params["Distribution"] # Rarely a simulation has a weird value and fails. Catch these with `try`, # and repeat the simulation call if error (it will be a new seed) tryAgain <- TRUE infLoopCount <- 1L maxLoops <- 15L while (tryAgain & infLoopCount <= maxLoops) { simResH1 <- microbioSim( postfix = iterRun, distrib = distrib, template = template, estPi = estPi, estTheta = estTheta, nObs = nObs, estRhos = estRhos, estPhis = estPhis, Covar = estCov, foldChange = fC, outLiers = outLiers, compensate = FALSE ) ## Make sure there are at least 3 taxa per sample, even after trimming if (is.null(simResH1) | inherits(simResH1, "try-error")) { tryAgain <- TRUE infLoopCount <- infLoopCount + 1L } else { #simResH1 <- #simpleTrimGen(simResH1) if (fewTaxa(simResH1, 3L)) { tryAgain <- TRUE infLoopCount <- infLoopCount + 1L } else { tryAgain <- FALSE } # END - ifelse: check not only error but *fewTaxa* success } # END - ifelse: check successful simulation } # END - while: protect against infinite loops and simulations failures if (infLoopCount > maxLoops) { warning("Consistent error found during simulation. Need to investigate cause.", immediate. = TRUE) cat(iterRun) } else { #simResH1 <- #simpleTrimGen(simResH1) ## only the _nOTUs_ most abundant OTUs are kept, unless the OTUs are already ## less if (ntaxa(simResH1) > nOTUs) { whichOTUs2Keep <- taxa_names(simResH1)[seq_len(nOTUs)] simResH1 <- prune_taxa(whichOTUs2Keep, simResH1) } else { } } # END - ifelse: consistent error in current simulation ## log file writing cat("FINISHED\n") return(simResH1) }) # END - parallelised simulations #if (!file.exists("./brokenpromise/results/simulationListBetaBinomAH1.RData")) { names(simListH1) <- simParamsH1[seq] any(sapply(simListH1, class) != "phyloseq") file_name <- paste0("simulationListNegBin", letter, ".RData") save(simListH1, simParamsLabelsH1, simParams, TPR, delim, file = file_name) # #} else { # #} else { # # } # END - if: file with simulations under H1 already exists # #} ## Matt's Code: Load/Annotate SimListH1 pr <- 0.1 BrokenPromiseData <- list() for (index in 1:length(simListH1)) { physeq <- simListH1[[index]] sample_data <- sample_data(physeq) %>% as.data.frame sample_data$group <- c(rep("grp1", nrow(sample_data) / 2), rep("grp2", nrow(sample_data) / 2)) %>% factor sample_data(physeq) <- sample_data(sample_data) otu_table(physeq) <- otu_table(physeq) %>% as("matrix") %>% #t %>% otu_table(taxa_are_rows = T) truede <- grep("-TP", rownames(otu_table(physeq))) params <- strsplit(names(simListH1)[index], "_") fx <- params[[1]][3] it <- params[[1]][2] sampleType <- params[[1]][1] distr <- params[[1]][5] iter <- paste0("|", it, "_", sampleType, "_", distr) #### m <- params[[1]][4] datalist <- list(physeq = physeq, truede = truede) BrokenPromiseData[[paste("BrokenPromiseH1", fx, m, pr, iter, sep = "_")]] <- datalist } distr <- "" if (distribs == "negbinCorOut") { distr <- "NegBin" } else if (distribs == "betabinCor") { distr <- "BetaBin" } ## the final simulated datasets to perform differential abundance tests on save(BrokenPromiseData, file = paste0("BrokenPromiseDataJustSmallSamples_", distr, ".RData"))
#' Collect additional citations from the supplied edge list. #' #' `edge_list_expansion` insert the edge list from the results of `generateEdgeList` #' @details #' `edge_list` must come from the result of generateEdgeList #' #' @param edge_list edge_list results, as obtained from `generateEdgeList` (see details) #' @return An edge list (data.frame) with one column for target PMIDS and one column for source PMIDS. #' @seealso \code{\link{get_pmc_cited_in}} for obtaining elink citation results #' @seealso \code{\link{generateEdgeList}} for obtaining elink citation results #' #' @examples #' #' # Collect addition citations from the supplied 'e2' #' res1 <- get_pmc_cited_in(21876761) #' e2 <- generateEdgeList(res1) #' edge_list_expansion(e2) #' @export edge_list_expansion <- function(edge_list){ # find the pmids that exist in both source and target match_pmids <- edge_list$Source %in% edge_list$Target # keep the pmids that is unique to edget_list$source because edge_list$target has been searched already pmids <- edge_list$Source[!match_pmids] # keep only the unique pmids pmids <- unique(pmids) res <- get_pmc_cited_in(pmids) res <- generateEdgeList(res) }
/R/edge_list_expansion.R
no_license
gdancik/pmc2nc
R
false
false
1,197
r
#' Collect additional citations from the supplied edge list. #' #' `edge_list_expansion` insert the edge list from the results of `generateEdgeList` #' @details #' `edge_list` must come from the result of generateEdgeList #' #' @param edge_list edge_list results, as obtained from `generateEdgeList` (see details) #' @return An edge list (data.frame) with one column for target PMIDS and one column for source PMIDS. #' @seealso \code{\link{get_pmc_cited_in}} for obtaining elink citation results #' @seealso \code{\link{generateEdgeList}} for obtaining elink citation results #' #' @examples #' #' # Collect addition citations from the supplied 'e2' #' res1 <- get_pmc_cited_in(21876761) #' e2 <- generateEdgeList(res1) #' edge_list_expansion(e2) #' @export edge_list_expansion <- function(edge_list){ # find the pmids that exist in both source and target match_pmids <- edge_list$Source %in% edge_list$Target # keep the pmids that is unique to edget_list$source because edge_list$target has been searched already pmids <- edge_list$Source[!match_pmids] # keep only the unique pmids pmids <- unique(pmids) res <- get_pmc_cited_in(pmids) res <- generateEdgeList(res) }
# Testing other features library(RLT) set.seed(1) trainn = 1000 testn = 500 n = trainn + testn p = 30 X1 = matrix(rnorm(n*p/2), n, p/2) X2 = matrix(as.integer(runif(n*p/2)*3), n, p/2) X = data.frame(X1, X2) for (j in (p/2 + 1):p) X[,j] = as.factor(X[,j]) y = 1 + X[, 1] + rnorm(n) trainX = X[1:trainn, ] trainY = y[1:trainn] testX = X[1:testn + trainn, ] testY = y[1:testn + trainn] xorder = order(testX[, 1]) testX = testX[xorder, ] testY = testY[xorder] RLTfit <- RLT(trainX, trainY, ntrees = 1000, ncores = 10, nmin = 50, mtry = p/3, split.gen = "random", nsplit = 3, resample.prob = 0.6, importance = TRUE, resample.track = TRUE) # Obtain the tree structure of one tree get.one.tree(RLTfit, 1000) # Forest Kernel # since testing data is ordered by x1, closer subjects should # have larger kernel weights A = forest.kernel(RLTfit, testX) heatmap(A$Kernel, Rowv = NA, Colv = NA, symm = TRUE) # cross kernels A = forest.kernel(RLTfit, X1 = testX, X2 = testX[1:(testn/2), ]) heatmap(A$Kernel, Rowv = NA, Colv = NA, symm = FALSE) # vs.train A = forest.kernel(RLTfit, X1 = testX, X2 = trainX[order(trainX[, 1]), ], vs.train = TRUE) heatmap(A$Kernel, Rowv = NA, Colv = NA, symm = FALSE)
/test_other.r
no_license
hwzhousite/RLT
R
false
false
1,285
r
# Testing other features library(RLT) set.seed(1) trainn = 1000 testn = 500 n = trainn + testn p = 30 X1 = matrix(rnorm(n*p/2), n, p/2) X2 = matrix(as.integer(runif(n*p/2)*3), n, p/2) X = data.frame(X1, X2) for (j in (p/2 + 1):p) X[,j] = as.factor(X[,j]) y = 1 + X[, 1] + rnorm(n) trainX = X[1:trainn, ] trainY = y[1:trainn] testX = X[1:testn + trainn, ] testY = y[1:testn + trainn] xorder = order(testX[, 1]) testX = testX[xorder, ] testY = testY[xorder] RLTfit <- RLT(trainX, trainY, ntrees = 1000, ncores = 10, nmin = 50, mtry = p/3, split.gen = "random", nsplit = 3, resample.prob = 0.6, importance = TRUE, resample.track = TRUE) # Obtain the tree structure of one tree get.one.tree(RLTfit, 1000) # Forest Kernel # since testing data is ordered by x1, closer subjects should # have larger kernel weights A = forest.kernel(RLTfit, testX) heatmap(A$Kernel, Rowv = NA, Colv = NA, symm = TRUE) # cross kernels A = forest.kernel(RLTfit, X1 = testX, X2 = testX[1:(testn/2), ]) heatmap(A$Kernel, Rowv = NA, Colv = NA, symm = FALSE) # vs.train A = forest.kernel(RLTfit, X1 = testX, X2 = trainX[order(trainX[, 1]), ], vs.train = TRUE) heatmap(A$Kernel, Rowv = NA, Colv = NA, symm = FALSE)
### Create data for use with DR simulation code. # This script takes data from DHS 2007 & 2013 and MICS 2014 and combines them # into a single data set. In addition we create as raster file with rural and # urban distinctions as pulled from the 2010 census. .libPaths(c("~/R3.6/", .libPaths())) rm(list=ls()) set.seed(123) library(Rcpp) library(haven) library(rgdal) library(sp) library(maptools) library(PointPolygon) library(rgeos) library(raster) library(tidyverse) library(dplyr) library(readr) library(tidyr) library(stringr) years <- 2000:2015 ageGroups <- c(0, NN=1/12, PNN1=1/2, PNN2=1, `1yr`=2, `2yr`=3, `3yr`=4, `4yr`=5) # Modified resample function to get sum of small areas smartResample <- function(x, y, method="bilinear", filename="", ...) { # to do: compare projections of x and y ln <- names(x) nl <- nlayers(x) if (nl == 1) { y <- raster(y) } else { y <- brick(y, values=FALSE, nl=nl) } if (!hasValues(x)) { return(y) } if (!method %in% c('bilinear', 'ngb')) { stop('invalid method') } if (method == 'ngb') method <- 'simple' skipaggregate <- isTRUE(list(...)$skipaggregate) if (!skipaggregate) { rres <- res(y) / res(x) resdif <- max(rres) if (resdif > 2) { ag <- pmax(1, floor(rres-1)) if (max(ag) > 1) { if (method == 'bilinear') { x <- aggregate(x, ag, 'sum') } else { x <- aggregate(x, ag, modal) } } } } e <- raster:::.intersectExtent(x, y, validate=TRUE) filename <- trim(filename) if (canProcessInMemory(y, 4*nl)) { inMemory <- TRUE v <- matrix(NA, nrow=ncell(y), ncol=nlayers(x)) } else { inMemory <- FALSE y <- writeStart(y, filename=filename, ... ) } if (raster:::.doCluster()) { cl <- getCluster() on.exit( returnCluster() ) nodes <- min(ceiling(y@nrows/10), length(cl)) # at least 10 rows per node message('Using cluster with ', nodes, ' nodes') utils::flush.console() tr <- blockSize(y, minblocks=nodes, n=nl*4*nodes) pb <- pbCreate(tr$n, label='resample', ...) clFun <- function(i) { #r <- tr$row[i]:(tr$row[i]+tr$nrows[i]-1) xy <- xyFromCell(y, cellFromRowCol(y, tr$row[i], 1) : cellFromRowCol(y, tr$row[i]+tr$nrows[i]-1, ncol(y)) ) .xyValues(x, xy, method=method) } parallel::clusterExport(cl, c('x', 'y', 'tr', 'method'), envir=environment()) .sendCall <- eval( parse( text="parallel:::sendCall") ) for (ni in 1:nodes) { .sendCall(cl[[ni]], clFun, list(ni), tag=ni) } if (inMemory) { for (i in 1:tr$n) { d <- .recvOneData(cl) if (! d$value$success) { stop('cluster error') } start <- cellFromRowCol(y, tr$row[d$value$tag], 1) end <- cellFromRowCol(y, tr$row[d$value$tag]+tr$nrows[d$value$tag]-1, y@ncols) v[start:end, ] <- d$value$value ni <- ni + 1 if (ni <= tr$n) { .sendCall(cl[[d$node]], clFun, list(ni), tag=ni) } pbStep(pb) } y <- setValues(y, v) if (filename != '') { writeRaster(y, filename, ...) } } else { for (i in 1:tr$n) { d <- .recvOneData(cl) y <- writeValues(y, d$value$value, tr$row[d$value$tag]) ni <- ni + 1 if (ni <= tr$n) { .sendCall(cl[[d$node]], clFun, list(ni), tag=ni) } pbStep(pb) } y <- writeStop(y) } } else { tr <- blockSize(y, n=nl*4) pb <- pbCreate(tr$n, label='resample', ...) if (inMemory) { for (i in 1:tr$n) { #r <- tr$row[i]:(tr$row[i]+tr$nrows[i]-1) xy <- xyFromCell(y, cellFromRowCol(y, tr$row[i], 1) : cellFromRowCol(y, tr$row[i]+tr$nrows[i]-1, ncol(y)) ) vals <- raster:::.xyValues(x, xy, method=method) start <- cellFromRowCol(y, tr$row[i], 1) end <- cellFromRowCol(y, tr$row[i]+tr$nrows[i]-1, y@ncols) v[start:end, ] <- vals pbStep(pb, i) } v <- setValues(y, v) if (filename != '') { writeRaster(v, filename, ...) } pbClose(pb) names(v) <- ln return(v) } else { for (i in 1:tr$n) { xy <- xyFromCell(y, cellFromRowCol(y, tr$row[i], 1) : cellFromRowCol(y, tr$row[i]+tr$nrows[i]-1, ncol(y)) ) vals <- raster:::.xyValues(x, xy, method=method) y <- writeValues(y, vals, tr$row[i]) pbStep(pb, i) } y <- writeStop(y) } } pbClose(pb) names(y) <- ln return(y) } # rcpp distance functions sourceCpp("./R-docs/dist.cpp") find_closest <- function(x1, x2) { toRad <- pi / 180 lat1 <- x1[,2] * toRad long1 <- x1[,1] * toRad lat2 <- x2[,2] * toRad long2 <- x2[,1] * toRad ord1 <- order(lat1) rank1 <- match(seq_along(lat1), ord1) ord2 <- order(lat2) ind <- find_closest_point(lat1[ord1], long1[ord1], lat2[ord2], long2[ord2]) fullDF$id[ord2[ind + 1][rank1]] } # dhsDF <- "./Data/DRBR52FL.SAV" %>% # read_spss DF <- "./data-extended/bh.sav" %>% read_spss() %>% rename(Region = HH7, PSU = HH1, `Birth(CMC)` = BH4C) %>% mutate(Urban = HH6=="1") %>% mutate(strat = paste0(sprintf("%02d", Region), "_", HH6)) %>% mutate(Alive = BH5 == "1") %>% mutate(`Age at Death` = c(1/365, 1/12, 1)[BH9U] * BH9N) %>% mutate(`Year of Birth` = BH4Y) %>% mutate(`Year at Death` = floor(`Age at Death`) + `Year of Birth`) %>% filter(`Year of Birth` <= 2018 & `Year of Birth` >= 1996) %>% mutate(`Age Group at Death` = cut( `Age at Death`, ageGroups, right=F, labels=names(ageGroups)[-1])) # make an individual age group level analysis datset where each individual # contributes one row per number of age groups that they survived to. Location # Remains constant for the individual and time is increased according to # year of birth and age. micsDF <- bind_rows(lapply(1:nrow(DF), function(i){ sigDF <- DF[i,] allLevels <- levels(DF$`Age Group at Death`) addLevels <- cumsum(as.numeric((ageGroups > 1)[-1])) names(addLevels) <- allLevels if(is.na(sigDF$`Age Group at Death`)){ expDF <- tibble( id = i, died = 0, age_group = allLevels, strat = sigDF$strat, year = as.numeric(sigDF$`Year of Birth`) + unname(addLevels), psu = sigDF$PSU, weight = sigDF$wmweight ) } else{ deathIDX <- which(allLevels == sigDF$`Age Group at Death`) subLevels <- allLevels[1:deathIDX] expDF <- tibble( id = i, died = c(rep(0, deathIDX-1), 1), age_group = subLevels, strat = sigDF$strat, year = as.numeric(sigDF$`Year of Birth`) + unname(addLevels[subLevels]), psu = sigDF$PSU, weight = sigDF$wmweight ) } expDF})) %>% dplyr::select(-id) %>% group_by(age_group, year, strat, psu) %>% summarize(N=n(), died=sum(died), weight=sum(weight)) %>% mutate(lat=NA, long=NA, source="MICS_2014") %>% ungroup %>% filter(year >= min(years) & year <= 2014) %>% mutate(psu = as.character(psu)) %>% mutate(source = "Dominican Republic Multiple Indicator Cluster Survey 2014") %>% mutate(urban = str_sub(strat, -1, -1) == "1") %>% filter(!(year == 2014 & (age_group %in% c("PNN1", "PNN2")))) sources <- c( "Dominican Republic Demographic and Health Survey 2007", "Dominican Republic Demographic and Health Survey 2013", "Dominican Republic Multiple Indicator Cluster Survey 2014" ) ihmeDF <- "./data-extended/DRdata.csv" %>% read_csv(col_types="ccdcidcddccddidcdc") %>% mutate(strat=NA) %>% dplyr::select( age_group=ab, year, strat, psu=geo_unit, N, died, lat=latnum, long=longnum, source=Title) %>% filter(source %in% sources) # theres a huge difference here need to ask why that is sum(ihmeDF$N[grepl("Cluster Survey", ihmeDF$source) & ihmeDF$year < 2015]) - sum(micsDF[micsDF$year < 2015,]$N) # load in the shape and use this to get the urban rural of points spDF <- readOGR("./data-extended/SECCenso2010.dbf") %>% spTransform(CRS("+proj=longlat")) spDF$strat <- paste0(spDF$REG, "_", spDF$ZONA) spDF$urban <- spDF$ZONA == "1" # the last step is to convert to a sufficiently detailed raster # we will then convert this raster to point data as used in the PointPolygon # package. rbase <- raster(ncol=100, nrow=100) extent(rbase) <- extent(spDF) rasterRegSP <- rasterize(spDF, rbase, 'REG') values(rasterRegSP)[is.na(values(rasterRegSP))] <- -1 rasterUrbanSP <- rasterize(spDF, rbase, 'urban') values(rasterUrbanSP)[is.na(values(rasterUrbanSP))] <- -1 fullRaster <- as(rasterRegSP, "SpatialPointsDataFrame") fullRaster$reg <- fullRaster$layer fullRasterUrban <- as(rasterUrbanSP, "SpatialPointsDataFrame") fullRaster$urban <- fullRasterUrban$layer fullRaster$id <- 1:nrow(fullRaster@data) fullRaster$ZONA <- if_else(fullRaster$urban==1, "1", "2") fullRaster$strat <- paste0( sprintf("%02d", fullRaster$reg), "_", fullRaster$ZONA) fullDF <- as_tibble(sp::coordinates(fullRaster)) %>% rename(long=x, lat=y) %>% bind_cols(dplyr::select(fullRaster@data, reg, urban, id)) %>% mutate(strat = paste0(sprintf("%02d", reg), "_", 2-urban)) %>% mutate(long=round(long, 5), lat=round(lat, 5)) %>% filter(reg >= 0 & urban >= 0) # now that we have assigned all of the points lets assign each of the points in # the DHS to their corresponding id dhsClusterDF <- ihmeDF %>% filter(!grepl("Cluster Survey", ihmeDF$source)) %>% dplyr::select(psu, lat, long) %>% unique %>% mutate(id=find_closest( as.matrix(dplyr::select(. , long, lat)), as.matrix(fullDF[,1:2]))) %>% dplyr::select(psu, id) idSubs <- mapply(function(i, j){ suDF <- subset(fullRaster@data, strat == paste0(sprintf("%02d", i), "_", j)) suDF$id }, rep(1:10, each=2), rep(1:2, 10)) polyDF <- tibble( strat = mapply( function(i,j) paste0(sprintf("%02d", i), "_", j), rep(1:10, each=2), rep(1:2, 10)), id = idSubs) %>% right_join(micsDF, by="strat") %>% mutate(point=FALSE) pointDF <- ihmeDF %>% filter(!grepl("Cluster Survey", ihmeDF$source)) %>% left_join(dhsClusterDF, by="psu") %>% left_join(dplyr::select(fullDF, id, urban), by="id") %>% mutate(point=TRUE) fullDF %>% ggplot(aes(long, lat, fill=reg)) + geom_raster() + coord_equal() + theme_void() + scale_fill_distiller(palette = "Spectral") + ggtitle("") + geom_point(aes(fill=NULL), data=pointDF) # Next we are going to want to build the population rasters wgetRaster <- function(year, age, sex, loc="DOM"){ baseURL <- "ftp://ftp.worldpop.org.uk/" projURL <- paste0(baseURL, "GIS/AgeSex_structures/Global_2000_2020/") specURL <- paste0(projURL, year, "/", toupper(loc), "/", tolower(loc), "_", str_sub(tolower(sex), 1, 1), "_", age, "_", year, ".tif") wpopRaster <- smartResample(raster(specURL), rasterUrbanSP) wpopRaster } wgetRasterYear <- function(year, loc="DOM"){ rasterList <- mapply( function(i, j) wgetRaster(year, i, j, loc), c(0, 1, 0, 1), c("Male", "Male", "Female", "Female")) Reduce("+", rasterList) } if(!file.exists("./data-extended/rasterYearListSingle.Rds")){ rasterYearList <- lapply(2010, wgetRasterYear) #saveRDS(rasterYearList, "./data-extended/rasterYearList.Rds") saveRDS(rasterYearList, "./data-extended/rasterYearListSingle.Rds") } #rasterYearList <- read_rds("./data-extended/rasterYearList.Rds") rasterYearList <- read_rds("./data-extended/rasterYearListSingle.Rds") names(rasterYearList) <- 2010 popYearDFList <- lapply(2010, function(x) NULL) names(popYearDFList) <- as.character(2010) for(y in 2010){ rDF <- rasterYearList[[as.character(y)]] values(rDF)[is.na(values(rDF))] <- -1 rDF <- as(rDF, "SpatialPointsDataFrame") rDF$Population <- rDF$layer rDF$layer <- NULL rDF$id <- 1:nrow(rDF@data) rDF@data <- right_join(rDF@data, dplyr::select(fullDF, id, strat)) %>% mutate(Population=ifelse(Population < 0, 0, Population)) %>% group_by(strat) %>% mutate(popW=Population/sum(Population)) %>% ungroup %>% mutate(year=y) popYearDFList[[as.character(y)]] <- rDF rm(list=c("rDF")) } yearWDF <- bind_rows(lapply(popYearDFList, function(x)x@data)) %>% arrange(year, id) #yearWMat <- matrix( # c(yearWDF$popW), # nrow=nrow(popYearDFList[[1]]@data), # ncol = length(years)) yearWDF %>% filter(year == 2010) %>% right_join(fullDF) %>% ggplot(aes(long, lat, fill=Population)) + geom_raster() + coord_equal() + theme_void() + scale_fill_distiller(palette = "Spectral") + ggtitle("") save(yearWDF, polyDF, pointDF, spDF, fullDF, file="./data/prepData.rda")
/data-raw/data_clean.R
no_license
nmmarquez/DRU5MR
R
false
false
13,851
r
### Create data for use with DR simulation code. # This script takes data from DHS 2007 & 2013 and MICS 2014 and combines them # into a single data set. In addition we create as raster file with rural and # urban distinctions as pulled from the 2010 census. .libPaths(c("~/R3.6/", .libPaths())) rm(list=ls()) set.seed(123) library(Rcpp) library(haven) library(rgdal) library(sp) library(maptools) library(PointPolygon) library(rgeos) library(raster) library(tidyverse) library(dplyr) library(readr) library(tidyr) library(stringr) years <- 2000:2015 ageGroups <- c(0, NN=1/12, PNN1=1/2, PNN2=1, `1yr`=2, `2yr`=3, `3yr`=4, `4yr`=5) # Modified resample function to get sum of small areas smartResample <- function(x, y, method="bilinear", filename="", ...) { # to do: compare projections of x and y ln <- names(x) nl <- nlayers(x) if (nl == 1) { y <- raster(y) } else { y <- brick(y, values=FALSE, nl=nl) } if (!hasValues(x)) { return(y) } if (!method %in% c('bilinear', 'ngb')) { stop('invalid method') } if (method == 'ngb') method <- 'simple' skipaggregate <- isTRUE(list(...)$skipaggregate) if (!skipaggregate) { rres <- res(y) / res(x) resdif <- max(rres) if (resdif > 2) { ag <- pmax(1, floor(rres-1)) if (max(ag) > 1) { if (method == 'bilinear') { x <- aggregate(x, ag, 'sum') } else { x <- aggregate(x, ag, modal) } } } } e <- raster:::.intersectExtent(x, y, validate=TRUE) filename <- trim(filename) if (canProcessInMemory(y, 4*nl)) { inMemory <- TRUE v <- matrix(NA, nrow=ncell(y), ncol=nlayers(x)) } else { inMemory <- FALSE y <- writeStart(y, filename=filename, ... ) } if (raster:::.doCluster()) { cl <- getCluster() on.exit( returnCluster() ) nodes <- min(ceiling(y@nrows/10), length(cl)) # at least 10 rows per node message('Using cluster with ', nodes, ' nodes') utils::flush.console() tr <- blockSize(y, minblocks=nodes, n=nl*4*nodes) pb <- pbCreate(tr$n, label='resample', ...) clFun <- function(i) { #r <- tr$row[i]:(tr$row[i]+tr$nrows[i]-1) xy <- xyFromCell(y, cellFromRowCol(y, tr$row[i], 1) : cellFromRowCol(y, tr$row[i]+tr$nrows[i]-1, ncol(y)) ) .xyValues(x, xy, method=method) } parallel::clusterExport(cl, c('x', 'y', 'tr', 'method'), envir=environment()) .sendCall <- eval( parse( text="parallel:::sendCall") ) for (ni in 1:nodes) { .sendCall(cl[[ni]], clFun, list(ni), tag=ni) } if (inMemory) { for (i in 1:tr$n) { d <- .recvOneData(cl) if (! d$value$success) { stop('cluster error') } start <- cellFromRowCol(y, tr$row[d$value$tag], 1) end <- cellFromRowCol(y, tr$row[d$value$tag]+tr$nrows[d$value$tag]-1, y@ncols) v[start:end, ] <- d$value$value ni <- ni + 1 if (ni <= tr$n) { .sendCall(cl[[d$node]], clFun, list(ni), tag=ni) } pbStep(pb) } y <- setValues(y, v) if (filename != '') { writeRaster(y, filename, ...) } } else { for (i in 1:tr$n) { d <- .recvOneData(cl) y <- writeValues(y, d$value$value, tr$row[d$value$tag]) ni <- ni + 1 if (ni <= tr$n) { .sendCall(cl[[d$node]], clFun, list(ni), tag=ni) } pbStep(pb) } y <- writeStop(y) } } else { tr <- blockSize(y, n=nl*4) pb <- pbCreate(tr$n, label='resample', ...) if (inMemory) { for (i in 1:tr$n) { #r <- tr$row[i]:(tr$row[i]+tr$nrows[i]-1) xy <- xyFromCell(y, cellFromRowCol(y, tr$row[i], 1) : cellFromRowCol(y, tr$row[i]+tr$nrows[i]-1, ncol(y)) ) vals <- raster:::.xyValues(x, xy, method=method) start <- cellFromRowCol(y, tr$row[i], 1) end <- cellFromRowCol(y, tr$row[i]+tr$nrows[i]-1, y@ncols) v[start:end, ] <- vals pbStep(pb, i) } v <- setValues(y, v) if (filename != '') { writeRaster(v, filename, ...) } pbClose(pb) names(v) <- ln return(v) } else { for (i in 1:tr$n) { xy <- xyFromCell(y, cellFromRowCol(y, tr$row[i], 1) : cellFromRowCol(y, tr$row[i]+tr$nrows[i]-1, ncol(y)) ) vals <- raster:::.xyValues(x, xy, method=method) y <- writeValues(y, vals, tr$row[i]) pbStep(pb, i) } y <- writeStop(y) } } pbClose(pb) names(y) <- ln return(y) } # rcpp distance functions sourceCpp("./R-docs/dist.cpp") find_closest <- function(x1, x2) { toRad <- pi / 180 lat1 <- x1[,2] * toRad long1 <- x1[,1] * toRad lat2 <- x2[,2] * toRad long2 <- x2[,1] * toRad ord1 <- order(lat1) rank1 <- match(seq_along(lat1), ord1) ord2 <- order(lat2) ind <- find_closest_point(lat1[ord1], long1[ord1], lat2[ord2], long2[ord2]) fullDF$id[ord2[ind + 1][rank1]] } # dhsDF <- "./Data/DRBR52FL.SAV" %>% # read_spss DF <- "./data-extended/bh.sav" %>% read_spss() %>% rename(Region = HH7, PSU = HH1, `Birth(CMC)` = BH4C) %>% mutate(Urban = HH6=="1") %>% mutate(strat = paste0(sprintf("%02d", Region), "_", HH6)) %>% mutate(Alive = BH5 == "1") %>% mutate(`Age at Death` = c(1/365, 1/12, 1)[BH9U] * BH9N) %>% mutate(`Year of Birth` = BH4Y) %>% mutate(`Year at Death` = floor(`Age at Death`) + `Year of Birth`) %>% filter(`Year of Birth` <= 2018 & `Year of Birth` >= 1996) %>% mutate(`Age Group at Death` = cut( `Age at Death`, ageGroups, right=F, labels=names(ageGroups)[-1])) # make an individual age group level analysis datset where each individual # contributes one row per number of age groups that they survived to. Location # Remains constant for the individual and time is increased according to # year of birth and age. micsDF <- bind_rows(lapply(1:nrow(DF), function(i){ sigDF <- DF[i,] allLevels <- levels(DF$`Age Group at Death`) addLevels <- cumsum(as.numeric((ageGroups > 1)[-1])) names(addLevels) <- allLevels if(is.na(sigDF$`Age Group at Death`)){ expDF <- tibble( id = i, died = 0, age_group = allLevels, strat = sigDF$strat, year = as.numeric(sigDF$`Year of Birth`) + unname(addLevels), psu = sigDF$PSU, weight = sigDF$wmweight ) } else{ deathIDX <- which(allLevels == sigDF$`Age Group at Death`) subLevels <- allLevels[1:deathIDX] expDF <- tibble( id = i, died = c(rep(0, deathIDX-1), 1), age_group = subLevels, strat = sigDF$strat, year = as.numeric(sigDF$`Year of Birth`) + unname(addLevels[subLevels]), psu = sigDF$PSU, weight = sigDF$wmweight ) } expDF})) %>% dplyr::select(-id) %>% group_by(age_group, year, strat, psu) %>% summarize(N=n(), died=sum(died), weight=sum(weight)) %>% mutate(lat=NA, long=NA, source="MICS_2014") %>% ungroup %>% filter(year >= min(years) & year <= 2014) %>% mutate(psu = as.character(psu)) %>% mutate(source = "Dominican Republic Multiple Indicator Cluster Survey 2014") %>% mutate(urban = str_sub(strat, -1, -1) == "1") %>% filter(!(year == 2014 & (age_group %in% c("PNN1", "PNN2")))) sources <- c( "Dominican Republic Demographic and Health Survey 2007", "Dominican Republic Demographic and Health Survey 2013", "Dominican Republic Multiple Indicator Cluster Survey 2014" ) ihmeDF <- "./data-extended/DRdata.csv" %>% read_csv(col_types="ccdcidcddccddidcdc") %>% mutate(strat=NA) %>% dplyr::select( age_group=ab, year, strat, psu=geo_unit, N, died, lat=latnum, long=longnum, source=Title) %>% filter(source %in% sources) # theres a huge difference here need to ask why that is sum(ihmeDF$N[grepl("Cluster Survey", ihmeDF$source) & ihmeDF$year < 2015]) - sum(micsDF[micsDF$year < 2015,]$N) # load in the shape and use this to get the urban rural of points spDF <- readOGR("./data-extended/SECCenso2010.dbf") %>% spTransform(CRS("+proj=longlat")) spDF$strat <- paste0(spDF$REG, "_", spDF$ZONA) spDF$urban <- spDF$ZONA == "1" # the last step is to convert to a sufficiently detailed raster # we will then convert this raster to point data as used in the PointPolygon # package. rbase <- raster(ncol=100, nrow=100) extent(rbase) <- extent(spDF) rasterRegSP <- rasterize(spDF, rbase, 'REG') values(rasterRegSP)[is.na(values(rasterRegSP))] <- -1 rasterUrbanSP <- rasterize(spDF, rbase, 'urban') values(rasterUrbanSP)[is.na(values(rasterUrbanSP))] <- -1 fullRaster <- as(rasterRegSP, "SpatialPointsDataFrame") fullRaster$reg <- fullRaster$layer fullRasterUrban <- as(rasterUrbanSP, "SpatialPointsDataFrame") fullRaster$urban <- fullRasterUrban$layer fullRaster$id <- 1:nrow(fullRaster@data) fullRaster$ZONA <- if_else(fullRaster$urban==1, "1", "2") fullRaster$strat <- paste0( sprintf("%02d", fullRaster$reg), "_", fullRaster$ZONA) fullDF <- as_tibble(sp::coordinates(fullRaster)) %>% rename(long=x, lat=y) %>% bind_cols(dplyr::select(fullRaster@data, reg, urban, id)) %>% mutate(strat = paste0(sprintf("%02d", reg), "_", 2-urban)) %>% mutate(long=round(long, 5), lat=round(lat, 5)) %>% filter(reg >= 0 & urban >= 0) # now that we have assigned all of the points lets assign each of the points in # the DHS to their corresponding id dhsClusterDF <- ihmeDF %>% filter(!grepl("Cluster Survey", ihmeDF$source)) %>% dplyr::select(psu, lat, long) %>% unique %>% mutate(id=find_closest( as.matrix(dplyr::select(. , long, lat)), as.matrix(fullDF[,1:2]))) %>% dplyr::select(psu, id) idSubs <- mapply(function(i, j){ suDF <- subset(fullRaster@data, strat == paste0(sprintf("%02d", i), "_", j)) suDF$id }, rep(1:10, each=2), rep(1:2, 10)) polyDF <- tibble( strat = mapply( function(i,j) paste0(sprintf("%02d", i), "_", j), rep(1:10, each=2), rep(1:2, 10)), id = idSubs) %>% right_join(micsDF, by="strat") %>% mutate(point=FALSE) pointDF <- ihmeDF %>% filter(!grepl("Cluster Survey", ihmeDF$source)) %>% left_join(dhsClusterDF, by="psu") %>% left_join(dplyr::select(fullDF, id, urban), by="id") %>% mutate(point=TRUE) fullDF %>% ggplot(aes(long, lat, fill=reg)) + geom_raster() + coord_equal() + theme_void() + scale_fill_distiller(palette = "Spectral") + ggtitle("") + geom_point(aes(fill=NULL), data=pointDF) # Next we are going to want to build the population rasters wgetRaster <- function(year, age, sex, loc="DOM"){ baseURL <- "ftp://ftp.worldpop.org.uk/" projURL <- paste0(baseURL, "GIS/AgeSex_structures/Global_2000_2020/") specURL <- paste0(projURL, year, "/", toupper(loc), "/", tolower(loc), "_", str_sub(tolower(sex), 1, 1), "_", age, "_", year, ".tif") wpopRaster <- smartResample(raster(specURL), rasterUrbanSP) wpopRaster } wgetRasterYear <- function(year, loc="DOM"){ rasterList <- mapply( function(i, j) wgetRaster(year, i, j, loc), c(0, 1, 0, 1), c("Male", "Male", "Female", "Female")) Reduce("+", rasterList) } if(!file.exists("./data-extended/rasterYearListSingle.Rds")){ rasterYearList <- lapply(2010, wgetRasterYear) #saveRDS(rasterYearList, "./data-extended/rasterYearList.Rds") saveRDS(rasterYearList, "./data-extended/rasterYearListSingle.Rds") } #rasterYearList <- read_rds("./data-extended/rasterYearList.Rds") rasterYearList <- read_rds("./data-extended/rasterYearListSingle.Rds") names(rasterYearList) <- 2010 popYearDFList <- lapply(2010, function(x) NULL) names(popYearDFList) <- as.character(2010) for(y in 2010){ rDF <- rasterYearList[[as.character(y)]] values(rDF)[is.na(values(rDF))] <- -1 rDF <- as(rDF, "SpatialPointsDataFrame") rDF$Population <- rDF$layer rDF$layer <- NULL rDF$id <- 1:nrow(rDF@data) rDF@data <- right_join(rDF@data, dplyr::select(fullDF, id, strat)) %>% mutate(Population=ifelse(Population < 0, 0, Population)) %>% group_by(strat) %>% mutate(popW=Population/sum(Population)) %>% ungroup %>% mutate(year=y) popYearDFList[[as.character(y)]] <- rDF rm(list=c("rDF")) } yearWDF <- bind_rows(lapply(popYearDFList, function(x)x@data)) %>% arrange(year, id) #yearWMat <- matrix( # c(yearWDF$popW), # nrow=nrow(popYearDFList[[1]]@data), # ncol = length(years)) yearWDF %>% filter(year == 2010) %>% right_join(fullDF) %>% ggplot(aes(long, lat, fill=Population)) + geom_raster() + coord_equal() + theme_void() + scale_fill_distiller(palette = "Spectral") + ggtitle("") save(yearWDF, polyDF, pointDF, spDF, fullDF, file="./data/prepData.rda")
# some examples about dplyr # about filter library(dplyr) Name = c("zhang3", "li4", "wang5", "zhao6") sID = c(1, 2, 3, 4) Stat = c(60, 70, 90, 90) Form = data.frame(Name, sID, Stat) filter(Form, Stat == 90) # about arrange library(dplyr) Name = c("zhang3", "li4", "wang5", "zhao6") sID = c(1, 2, 3, 4) Stat = c(100, 70, 90, 90) Form = data.frame(Name, sID, Stat) arrange(Form, Stat) arrange(Form, desc(Stat)) # about select library(dplyr) Name = c("zhang3", "li4", "wang5", "zhao6") sID = c(1, 2, 3, 4) Stat = c(100, 70, 90, 90) Form = data.frame(Name, sID, Stat) select(Form, Name:sID) select(Form, Stat:sID) select(Form, -Stat) # about mutate library(dplyr) Name = c("zhang3", "li4", "wang5", "zhao6") sID = c(1, 2, 3, 4) Stat = c(100, 70, 90, 90) Algebra = c(60, 80, 90, 70) Form = data.frame(Name, sID, Stat, Algebra) mutate(Form, Total = 0.4*Stat + 0.6*Algebra) Form # about summarise library(dplyr) Name = c("zhang3", "li4", "wang5", "zhao6") sID = c(1, 2, 3, 4) Stat = c(100, 70, 90, 90) Algebra = c(60, 80, 90, 70) Form = data.frame(Name, sID, Stat, Algebra) summarise(Form, sum(Stat)) summarize(group_by(Form, Name), sum(0.4*Stat, 0.6*Algebra)) # group_by is a good stuff # about rename library(dplyr) Name = c("zhang3", "li4", "wang5", "zhao6") sID = c(1, 2, 3, 4) Stat = c(100, 70, 90, 90) Algebra = c(60, 80, 90, 70) Form = data.frame(Name, sID, Stat, Algebra) rename(Form, studentID = sID) # about pipe library(dplyr) Name = c("zhang3", "li4", "wang5", "zhao6") sID = c(1, 2, 3, 4) Stat = c(100, 70, 90, 90) Algebra = c(60, 80, 90, 70) Form = data.frame(Name, sID, Stat, Algebra) newForm = c(Stat + Algebra) %>% mutate(Form, .) newForm
/ran-you 2017310812 statistics17 L3.R
no_license
ran-you/homework-of-statcomp
R
false
false
1,656
r
# some examples about dplyr # about filter library(dplyr) Name = c("zhang3", "li4", "wang5", "zhao6") sID = c(1, 2, 3, 4) Stat = c(60, 70, 90, 90) Form = data.frame(Name, sID, Stat) filter(Form, Stat == 90) # about arrange library(dplyr) Name = c("zhang3", "li4", "wang5", "zhao6") sID = c(1, 2, 3, 4) Stat = c(100, 70, 90, 90) Form = data.frame(Name, sID, Stat) arrange(Form, Stat) arrange(Form, desc(Stat)) # about select library(dplyr) Name = c("zhang3", "li4", "wang5", "zhao6") sID = c(1, 2, 3, 4) Stat = c(100, 70, 90, 90) Form = data.frame(Name, sID, Stat) select(Form, Name:sID) select(Form, Stat:sID) select(Form, -Stat) # about mutate library(dplyr) Name = c("zhang3", "li4", "wang5", "zhao6") sID = c(1, 2, 3, 4) Stat = c(100, 70, 90, 90) Algebra = c(60, 80, 90, 70) Form = data.frame(Name, sID, Stat, Algebra) mutate(Form, Total = 0.4*Stat + 0.6*Algebra) Form # about summarise library(dplyr) Name = c("zhang3", "li4", "wang5", "zhao6") sID = c(1, 2, 3, 4) Stat = c(100, 70, 90, 90) Algebra = c(60, 80, 90, 70) Form = data.frame(Name, sID, Stat, Algebra) summarise(Form, sum(Stat)) summarize(group_by(Form, Name), sum(0.4*Stat, 0.6*Algebra)) # group_by is a good stuff # about rename library(dplyr) Name = c("zhang3", "li4", "wang5", "zhao6") sID = c(1, 2, 3, 4) Stat = c(100, 70, 90, 90) Algebra = c(60, 80, 90, 70) Form = data.frame(Name, sID, Stat, Algebra) rename(Form, studentID = sID) # about pipe library(dplyr) Name = c("zhang3", "li4", "wang5", "zhao6") sID = c(1, 2, 3, 4) Stat = c(100, 70, 90, 90) Algebra = c(60, 80, 90, 70) Form = data.frame(Name, sID, Stat, Algebra) newForm = c(Stat + Algebra) %>% mutate(Form, .) newForm
test_that("dataone library loads", { expect_true(require(dataone)) }) test_that("D1Client constructors", { skip_on_cran() library(dataone) #cli <- new("D1Client") expect_false(is.null(d1cProd)) expect_match(class(d1cProd), "D1Client") expect_match(d1cProd@cn@baseURL, "https://cn.dataone.org/cn") #cli <- new("D1Client", env="PROD", mNodeid="urn:node:KNB") expect_false(is.null(d1cKNB)) expect_match(class(d1cKNB), "D1Client") expect_match(d1cKNB@cn@baseURL, "https://cn.dataone.org/cn") expect_match(d1cKNB@mn@baseURL, "https://knb.ecoinformatics.org/knb/d1/mn") # Skip the remainder of the tests because these test environments are # often down due to upgrades, reconfiguring, testing new features. skip_on_cran() cli <- new("D1Client", cn=cnStaging, mn=getMNode(cnStaging, "urn:node:mnTestKNB")) expect_false(is.null(cli)) expect_match(class(cli), "D1Client") expect_match(cli@cn@baseURL, "https://cn-stage.test.dataone.org/cn") expect_match(cli@mn@baseURL, "https://dev.nceas.ucsb.edu/knb/d1/mn") cli <- D1Client() expect_false(is.null(cli)) expect_match(class(cli), "D1Client") expect_match(cli@cn@baseURL, "https://cn.dataone.org/cn") cli <- D1Client("STAGING") expect_false(is.null(cli)) expect_match(class(cli), "D1Client") expect_match(cli@cn@baseURL, "https://cn-stage.test.dataone.org/cn") # Skip the hightly unstable environments when testing on cran skip_on_cran() cli <- D1Client("SANDBOX") expect_false(is.null(cli)) expect_match(class(cli), "D1Client") expect_match(cli@cn@baseURL, "https://cn-sandbox.test.dataone.org/cn") #cli <- D1Client("DEV") #expect_false(is.null(cli)) #expect_match(class(cli), "D1Client") #expect_match(cli@cn@baseURL, "https://cn-dev.test.dataone.org/cn") }) test_that("D1Client methods", { skip_on_cran() # Test listMemberNodes #cli <- D1Client("PROD") nodes <- listMemberNodes(d1cProd) expect_gt(length(nodes), 0) expect_identical(class(nodes), "list") # The remainder of this test uses development machines. skip_on_cran() # Test getEndPoint() cli <- D1Client("STAGING") cnUrl <- getEndpoint(cli) expect_match(cnUrl, "https://cn-stage.test.dataone.org/cn") # Test getMNodeId() cli <- D1Client("STAGING", "urn:node:mnTestKNB") expect_match(getMNodeId(cli), "urn:node:mnTestKNB") # Test setMNodeId cli <- new("D1Client", env="STAGING") cli <- setMNodeId(cli, "urn:node:mnTestKNB") expect_match(cli@mn@identifier, "urn:node:mnTestKNB") }) test_that("D1Client getDataObject", { skip_on_cran() library(dataone) library(digest) #cli <- D1Client("PROD", "urn:node:KNB") expect_false(is.null(d1cKNB)) expect_match(class(d1cKNB), "D1Client") expect_match(d1cKNB@cn@baseURL, "https://cn.dataone.org/cn") am <- AuthenticationManager() suppressMessages(authValid <- dataone:::isAuthValid(am, d1cKNB@mn)) if(authValid) { # Skip if Mac OS and X.509 Certificate if(dataone:::getAuthMethod(am, d1cKNB@mn) == "cert" && grepl("apple-darwin", sessionInfo()$platform)) skip("Skip authentication w/cert on Mac OS X") } # Try retrieving a known object from the PROD environment pid <- "solson.5.1" obj <- getDataObject(d1cKNB, pid, checksumAlgorithm="SHA-256") cname <- class(obj)[1] expect_match(cname, "DataObject") expect_match(class(obj@sysmeta), "SystemMetadata") expect_match(getIdentifier(obj), pid) expect_match(getFormatId(obj), "text/csv") data <- getData(obj) sha256 <- digest(data, algo="sha256", serialize=FALSE, file=FALSE) expect_match(sha256, obj@sysmeta@checksum) }) test_that("D1Client uploadDataObject with raw data works", { skip_on_cran() library(dataone) library(datapack) # Create a DataObject with a raw R object and upload to DataONE data <- charToRaw("1,2,3\n4,5,6\n") #d1c <- D1Client("STAGING", "urn:node:mnStageUCSB2") expect_false(is.null(d1cTest)) # Set 'subject' to authentication subject, if available, so we will have permission to change this object am <- AuthenticationManager() suppressMessages(authValid <- dataone:::isAuthValid(am, d1cTest@mn)) if (authValid) { if(dataone:::getAuthMethod(am, d1cTest@mn) == "cert" && grepl("apple-darwin", sessionInfo()$platform)) skip("Skip authentication w/cert on Mac OS X") # Create DataObject for the science data do <- new("DataObject", format="text/csv", dataobj=data, mnNodeId=getMNodeId(d1cTest)) expect_match(do@sysmeta@identifier, "urn:uuid") newId <- uploadDataObject(d1cTest, do, replicate=FALSE, preferredNodes=NA, public=TRUE) expect_true(!is.null(newId)) } else { skip("This test requires valid authentication.") } }) test_that("D1Client uploadDataObject with filename works", { skip_on_cran() library(dataone) library(datapack) # Create a csv file for the science object testdf <- data.frame(x=1:10,y=11:20) csvfile <- tempfile(pattern = "file", tmpdir = tempdir(), fileext = ".csv") write.csv(testdf, csvfile, row.names=FALSE) #d1c <- D1Client("STAGING", "urn:node:mnStageUCSB2") expect_false(is.null(d1cTest)) # Set 'subject' to authentication subject, if available, so we will have permission to change this object am <- AuthenticationManager() suppressMessages(authValid <- dataone:::isAuthValid(am, d1cTest@mn)) if (authValid) { if(dataone:::getAuthMethod(am, d1cTest@mn) == "cert" && grepl("apple-darwin", sessionInfo()$platform)) skip("Skip authentication w/cert on Mac OS X") # Create DataObject for the science data do <- new("DataObject", format="text/csv", mnNodeId=getMNodeId(d1cTest), filename=csvfile) expect_match(do@sysmeta@identifier, "urn:uuid") newId <- uploadDataObject(d1cTest, do, replicate=FALSE, preferredNodes=NA , public=TRUE) expect_true(!is.null(newId)) } else { skip("This test requires valid authentication.") } }) test_that("D1Client uploadDataPackage works", { skip_on_cran() library(dataone) library(datapack) # Create a csv file for the science object testdf <- data.frame(x=1:10,y=11:20) csvfile <- tempfile(pattern = "file", tmpdir = tempdir(), fileext = ".csv") write.csv(testdf, csvfile, row.names=FALSE) #d1c <- D1Client("STAGING", "urn:node:mnStageUCSB2") #d1c <- D1Client("SANDBOX2", "urn:node:mnDemo2") #d1c <- D1Client("DEV2", "urn:node:mnDevUCSB2") expect_false(is.null(d1cTest)) #preferredNodes <- c("urn:node:mnDemo9") preferredNodes <- NA # Set 'subject' to authentication subject, if available, so we will have permission to change this object am <- AuthenticationManager() suppressMessages(authValid <- dataone:::isAuthValid(am, d1cTest@mn)) if (authValid) { if(dataone:::getAuthMethod(am, d1cTest@mn) == "cert" && grepl("apple-darwin", sessionInfo()$platform)) skip("Skip authentication w/cert on Mac OS X") dp <- new("DataPackage") # Create DataObject for the science data sciObj <- new("DataObject", format="text/csv", mnNodeId=getMNodeId(d1cTest), filename=csvfile) # It's possible to set access rules for DataObject now, or for all DataObjects when they are uploaded to DataONE via uploadDataPackage expect_match(sciObj@sysmeta@identifier, "urn:uuid") sciObj <- setPublicAccess(sciObj) accessRules <- data.frame(subject=c("uid=smith,ou=Account,dc=example,dc=com", "uid=slaughter,o=unaffiliated,dc=example,dc=org"), permission=c("write", "changePermission")) sciObj <- addAccessRule(sciObj, accessRules) dp <- addMember(dp, sciObj) expect_true(is.element(sciObj@sysmeta@identifier, getIdentifiers(dp))) # Create metadata object that describes science data emlFile <- system.file("extdata/sample-eml.xml", package="dataone") metadataObj <- new("DataObject", format="eml://ecoinformatics.org/eml-2.1.1", mnNodeId=getMNodeId(d1cTest), filename=emlFile) expect_match(metadataObj@sysmeta@identifier, "urn:uuid") dp <- addMember(dp, metadataObj) expect_true(is.element(metadataObj@sysmeta@identifier, getIdentifiers(dp))) # Associate the metadata object with the science object it describes dp <- insertRelationship(dp, subjectID=getIdentifier(metadataObj), objectIDs=getIdentifier(sciObj)) # Upload the data package to DataONE resourceMapId <- uploadDataPackage(d1cTest, dp, replicate=TRUE, numberReplicas=1, preferredNodes=preferredNodes, public=TRUE, accessRules=accessRules) expect_true(!is.null(resourceMapId)) } else { skip("This test requires valid authentication.") } }) test_that("D1Client uploadDataPackage works for a minimal DataPackage", { # Test that a DataPackage with only one member (metadata in this case) and not # user defined relationships is created and uploaded correctly. skip_on_cran() library(dataone) library(datapack) # Create a csv file for the science object testdf <- data.frame(x=1:10,y=11:20) csvfile <- tempfile(pattern = "file", tmpdir = tempdir(), fileext = ".csv") write.csv(testdf, csvfile, row.names=FALSE) #d1c <- D1Client("STAGING", "urn:node:mnStageUCSB2") expect_false(is.null(d1cTest)) preferredNodes <- NA # Set 'subject' to authentication subject, if available, so we will have permission to change this object am <- AuthenticationManager() suppressMessages(authValid <- dataone:::isAuthValid(am, d1cTest@mn)) if (authValid) { if(dataone:::getAuthMethod(am, d1cTest@mn) == "cert" && grepl("apple-darwin", sessionInfo()$platform)) skip("Skip authentication w/cert on Mac OS X") dp <- new("DataPackage") # Create metadata object that describes science data emlFile <- system.file("extdata/sample-eml.xml", package="dataone") metadataObj <- new("DataObject", format="eml://ecoinformatics.org/eml-2.1.1", mnNodeId=getMNodeId(d1cTest), filename=emlFile) expect_match(metadataObj@sysmeta@identifier, "urn:uuid") dp <- addMember(dp, metadataObj) expect_true(is.element(metadataObj@sysmeta@identifier, getIdentifiers(dp))) # Upload the data package to DataONE resourceMapId <- uploadDataPackage(d1cTest, dp, replicate=TRUE, numberReplicas=1, preferredNodes=preferredNodes, public=TRUE) expect_true(!is.null(resourceMapId)) } else { skip("This test requires valid authentication.") } }) test_that("D1Client updateDataPackage works", { # Test that a DataPackage with only one member (metadata in this case) and not # user defined relationships is created and uploaded correctly. skip_on_cran() library(dataone) library(datapack) library(xml2) library(digest) # Create a csv file for the science object #d1c <- D1Client("STAGING", "urn:node:mnStageUCSB2") #d1c <- D1Client("STAGING2", "urn:node:mnTestKNB") #d1c <- D1Client("DEV2", "urn:node:mnDevUCSB1") expect_false(is.null(d1cTestKNB)) preferredNodes <- NA # Set 'subject' to authentication subject, if available, so we will have permission to change this object am <- AuthenticationManager() suppressMessages(authValid <- dataone:::isAuthValid(am, d1cTestKNB@mn)) if (authValid) { if(dataone:::getAuthMethod(am, d1cTestKNB@mn) == "cert" && grepl("apple-darwin", sessionInfo()$platform)) skip("Skip authentication w/cert on Mac OS X") dp <- new("DataPackage") # Create metadata object that describes science data emlFile <- system.file("extdata/strix-pacific-northwest.xml", package="dataone") metadataObj <- new("DataObject", format="eml://ecoinformatics.org/eml-2.1.1", filename=emlFile) metadataId <- getIdentifier(metadataObj) # Associate the metadata object with each data object using the 'insertRelationships' method. # Since a relationship type (the predicate argument) is not specified, the default relationship # of 'cito:documents' is used, to indicate the the metadata object documents each data object. # See "http://purl.org/spar/cito", for further information about the "Citation Type Ontology". dp <- addMember(dp, metadataObj) sourceData <- system.file("extdata/sample.csv", package="dataone") sourceObj <- new("DataObject", format="text/csv", filename=sourceData) dp <- addMember(dp, sourceObj, metadataObj) resolveURL <- sprintf("%s/%s/object", d1cTestKNB@mn@baseURL, d1cTestKNB@mn@APIversion) # Update the distribution URL in the metadata with the identifier that has been assigned to # this DataObject. This provides a direct link between the detailed information for this package # member and DataONE, which will assist DataONE in accessing and displaying this detailed information. xpathToURL <- "//dataTable/physical/distribution[../objectName/text()=\"OwlNightj.csv\"]/online/url" newURL <- sprintf("%s/%s", resolveURL, getIdentifier(sourceObj)) dp <- updateMetadata(dp, metadataId, xpath=xpathToURL, newURL) metadataId <- selectMember(dp, name="sysmeta@formatId", value="eml://ecoinformatics.org/eml-2.1.1") metadataObj <- getMember(dp, metadataId) progFile <- system.file("extdata/filterSpecies.R", package="dataone") progObj <- new("DataObject", format="application/R", filename=progFile, mediaType="text/x-rsrc") dp <- addMember(dp, progObj, metadataObj) xpathToURL <- "//otherEntity/physical/distribution[../objectName/text()=\"filterObs.R\"]/online/url" newURL <- sprintf("%s/%s", resolveURL, getIdentifier(progObj)) dp <- updateMetadata(dp, metadataId, xpath=xpathToURL, newURL) metadataId <- selectMember(dp, name="sysmeta@formatId", value="eml://ecoinformatics.org/eml-2.1.1") metadataObj <- getMember(dp, metadataId) outputData <- system.file("extdata/filteredSpecies.csv", package="dataone") outputObj <- new("DataObject", format="text/csv", filename=outputData) dp <- addMember(dp, outputObj, metadataObj) xpathToURL <- "//dataTable/physical/distribution[../objectName/text()=\"Strix-occidentalis-obs.csv\"]/online/url" newURL <- sprintf("%s/%s", resolveURL, getIdentifier(outputObj)) dp <- updateMetadata(dp, metadataId, xpath=xpathToURL, newURL) # Upload the data package to DataONE newPkg <- uploadDataPackage(d1cTestKNB, dp, public=TRUE, quiet=TRUE, as="DataPackage") pkgId <- newPkg@resmapId expect_true(!is.na(pkgId)) # Sleep for 90 secondsl to let indexing finish for the package. Because we are imposing a wait on this # package, this test is not suitable for use in CRAN. } else { skip("This test requires valid authentication.") } }) test_that("D1Client updateDataPackage with new package using previously uploaded objects works", { # Test the typical workflow of creating a DataONE package by first uploading all data objects for the package, # then creating a package from the already uploaded objects. skip_on_cran() library(dataone) library(datapack) library(xml2) library(digest) expect_false(is.null(d1cTestKNB)) preferredNodes <- NA # Set 'subject' to authentication subject, if available, so we will have permission to change this object am <- AuthenticationManager() suppressMessages(authValid <- dataone:::isAuthValid(am, d1cTestKNB@mn)) if (authValid) { if(dataone:::getAuthMethod(am, d1cTestKNB@mn) == "cert" && grepl("apple-darwin", sessionInfo()$platform)) skip("Skip authentication w/cert on Mac OS X") # First upload objects to DataONE that will be collected into a package sourceData <- system.file("extdata/OwlNightj.csv", package="dataone") sourceObj <- new("DataObject", format="text/csv", filename=sourceData) sourceObj <- addAccessRule(sourceObj, "http://orcid.org/0000-0002-2192-403X", "changePermission") sourceId <- uploadDataObject(d1cTestKNB, sourceObj, public=T, quiet=T) expect_true(!is.na(sourceId)) progFile <- system.file("extdata/filterObs.R", package="dataone") progObj <- new("DataObject", format="application/R", filename=progFile, mediaType="text/x-rsrc") progObj <- addAccessRule(progObj, "http://orcid.org/0000-0002-2192-403X", "changePermission") progId <- uploadDataObject(d1cTestKNB, progObj, public=T, quiet=T) expect_true(!is.na(progId)) outputData <- system.file("extdata/Strix-occidentalis-obs.csv", package="dataone") outputObj <- new("DataObject", format="text/csv", filename=outputData) outputObj <- addAccessRule(outputObj, "http://orcid.org/0000-0002-2192-403X", "changePermission") outputId <- uploadDataObject(d1cTestKNB, outputObj, public=T, quiet=T) expect_true(!is.na(outputId)) # Create a new package, and download each member (lazyLoaded) that was just uploaded, then add them # to the package and upload. This workflow does not require that package members are downloaded with # lazyLoad, this is done here just for efficiency. If a package member is downloaded without lazyLoad, # it will not be-reuploaded when the package is uploaded, unless it has been updated (i.e. updated contents, # or sysmeta). pkg <- new("DataPackage") # Create metadata object that describes the package emlFile <- system.file("extdata/strix-pacific-northwest.xml", package="dataone") metadataObj <- new("DataObject", format="eml://ecoinformatics.org/eml-2.1.1", filename=emlFile) metadataObj <- addAccessRule(metadataObj, "http://orcid.org/0000-0002-2192-403X", "changePermission") pkg <- addMember(pkg, metadataObj) metadataId <- getIdentifier(metadataObj) newSourceObj <- getDataObject(d1cTestKNB, sourceId, lazyLoad=T, quiet=T) pkg <- addMember(pkg, newSourceObj, metadataObj) newProgObj <- getDataObject(d1cTestKNB, progId, lazyLoad=T, quiet=T) pkg <- addMember(pkg, newProgObj, metadataObj) newOutputObj <- getDataObject(d1cTestKNB, outputId, lazyLoad=T, quiet=T) pkg <- addMember(pkg, newOutputObj, metadataObj) resourceMapId <- uploadDataPackage(d1cTestKNB, pkg, public=TRUE, quiet=T) expect_false(is.na(resourceMapId)) # Now test that we can download the newly created package and add an existing object # Now add a new package member that was omitted from the original package auxFile <- system.file("extdata/WeatherInf.txt", package="dataone") auxObj <- new("DataObject", format="text/plain", file=auxFile) auxObj <- addAccessRule(auxObj, "http://orcid.org/0000-0002-2192-403X", "changePermission") auxId <- uploadDataObject(d1cTestKNB, auxObj, public=T, quiet=T) expect_true(!is.na(auxId)) # Have to sleep just a bit, as indexing can take awhile to complete # Keep trying for ten seconds for the package to be indexed done <- FALSE trys <- 0 while(!done) { if(trys > 10) break Sys.sleep(1) queryParams <- sprintf('q=id:"%s"', resourceMapId) result <- query(d1cTestKNB@mn, queryParams, as="list") # Now download the package that was just created, and ensure that the checksums are all the # requested type. if(length(result) == 0) { trys <- trys + 1 next } else { done <- TRUE } newAuxObj <- getDataObject(d1cTestKNB, auxId, lazyLoad=T, quiet=T) editPkg <- getDataPackage(d1cTestKNB, identifier=resourceMapId, lazyLoad=TRUE, quiet=TRUE) } expect_true(done) editPkg <- addMember(editPkg, newAuxObj, metadataObj) newResmapId <- uploadDataPackage(d1cTestKNB, editPkg, public=TRUE, quiet=T) expect_false(is.na(newResmapId)) expect_false(resourceMapId == newResmapId) } else { skip("This test requires valid authentication.") } }) test_that("D1Client getDataPackage with checksumAlgorithm specified works", { # Test that a DataPackage with only one member (metadata in this case) and not # user defined relationships is created and uploaded correctly. skip_on_cran() library(dataone) library(datapack) library(xml2) library(digest) # Create a csv file for the science object expect_false(is.null(d1cTestKNB)) preferredNodes <- NA # Set 'subject' to authentication subject, if available, so we will have permission to change this object am <- AuthenticationManager() suppressMessages(authValid <- dataone:::isAuthValid(am, d1cTestKNB@mn)) if (authValid) { if(dataone:::getAuthMethod(am, d1cTestKNB@mn) == "cert" && grepl("apple-darwin", sessionInfo()$platform)) skip("Skip authentication w/cert on Mac OS X") sha256 <- "SHA-256" md5 <- "MD5" checksumAlgorithm <- sha256 dp <- new("DataPackage") # Create metadata object that describes science data emlFile <- system.file("extdata/strix-pacific-northwest.xml", package="dataone") metadataObj <- new("DataObject", format="eml://ecoinformatics.org/eml-2.1.1", filename=emlFile, checksum=checksumAlgorithm) metadataId <- getIdentifier(metadataObj) dp <- addMember(dp, metadataObj) sourceData <- system.file("extdata/sample.csv", package="dataone") sourceObj <- new("DataObject", format="text/csv", filename=sourceData, checksum=checksumAlgorithm) dp <- addMember(dp, sourceObj, metadataObj) progFile <- system.file("extdata/filterSpecies.R", package="dataone") progObj <- new("DataObject", format="application/R", filename=progFile, mediaType="text/x-rsrc", checksum=checksumAlgorithm) dp <- addMember(dp, progObj, metadataObj) outputData <- system.file("extdata/filteredSpecies.csv", package="dataone") outputObj <- new("DataObject", format="text/csv", filename=outputData, checksum=checksumAlgorithm) dp <- addMember(dp, outputObj, metadataObj) # Upload the data package to DataONE pkgId <- uploadDataPackage(d1cTestKNB, dp, public=TRUE, quiet=TRUE) expect_true(!is.na(pkgId)) # Have to sleep just a bit, as indexing can take awhile to complete # Keep trying for ten seconds for the package to be indexed done <- FALSE trys <- 0 while(!done) { if(trys > 10) break Sys.sleep(1) queryParams <- sprintf('q=id:"%s"', pkgId) result <- query(d1cTestKNB@mn, queryParams, as="list") # Now download the package that was just created, and ensure that the checksums are all the # requested type. if(length(result) == 0) { trys <- trys + 1 next } else { done <- TRUE } pkg <- getDataPackage(d1cTestKNB, identifier=pkgId, lazyLoad=TRUE, limit="0MB", quiet=TRUE, checksumAlgorithm=sha256) algorithms <- getValue(pkg, name="sysmeta@checksumAlgorithm") expect_true(all(algorithms == sha256)) # Download the package again, requesting a different checksum type, and ensure that the checksums are all the # new type. pkg <- getDataPackage(d1cTestKNB, identifier=pkgId, lazyLoad=TRUE, limit="0MB", quiet=TRUE, checksumAlgorithm=md5) algorithms <- getValue(pkg, name="sysmeta@checksumAlgorithm") expect_true(all(algorithms==md5)) } expect_true(done) } else { skip("This test requires valid authentication.") } }) test_that("D1Client listMemberNodes() works", { skip_on_cran() library(dataone) #d1c <- D1Client("PROD") nodelist <- listMemberNodes(d1cProd) expect_true(length(nodelist) > 0) expect_match(class(nodelist[[1]]), "Node") expect_match(nodelist[[1]]@identifier, "urn:node:") expect_match(nodelist[[1]]@type, "cn|mn") expect_match(nodelist[[1]]@state, "up") expect_match(nodelist[[length(nodelist)]]@identifier, "urn:node:") expect_match(nodelist[[length(nodelist)]]@baseURL, "http") expect_match(nodelist[[length(nodelist)]]@subject, "urn:node:") expect_match(nodelist[[length(nodelist)]]@type, "cn|mn") }) test_that("D1Client updateDataPackage works for a metadata only DataPackage", { skip_on_cran() # Test that a DataPackage with only one member (metadata in this case) and not # user defined relationships is created and uploaded correctly. # This is a long running test, so it should be run manually, which means # running "test_file("tests/testthat/packageUpdate.R"), as this file is not # run by default by testthat due to the name not including 'test*' library(dataone) library(datapack) library(xml2) library(digest) # Create a csv file for the science object expect_false(is.null(d1cTest)) preferredNodes <- NA # Set 'subject' to authentication subject, if available, so we will have permission to change this object am <- AuthenticationManager() suppressMessages(authValid <- dataone:::isAuthValid(am, d1cTest@mn)) if (authValid) { if(dataone:::getAuthMethod(am, d1cTest@mn) == "cert" && grepl("apple-darwin", sessionInfo()$platform)) skip("Skip authentication w/cert on Mac OS X") dp <- new("DataPackage") # Create metadata object that describes science data emlFile <- system.file("extdata/strix-pacific-northwest.xml", package="dataone") metadataObj <- new("DataObject", format="eml://ecoinformatics.org/eml-2.1.1", filename=emlFile) metadataId <- getIdentifier(metadataObj) # Associate the metadata object with each data object using the 'insertRelationships' method. # Since a relationship type (the predicate argument) is not specified, the default relationship # of 'cito:documents' is used, to indicate the the metadata object documents each data object. # See "http://purl.org/spar/cito", for further information about the "Citation Type Ontology". dp <- addMember(dp, metadataObj) pkgId <- uploadDataPackage(d1cTest, dp, public=TRUE, quiet=TRUE) expect_true(!is.na(pkgId)) done <- FALSE trys <- 0 while(!done) { if(trys > 10) break Sys.sleep(1) queryParams <- sprintf('q=id:"%s"', pkgId) result <- query(d1cTest@mn, queryParams, as="list") # Now download the package that was just created, and ensure that the checksums are all the # requested type. if(length(result) == 0) { trys <- trys + 1 next } else { done <- TRUE } # Test the download by specifying the metadata id of the package. The 'getDataPackage()' function # should be able determine the package id based on the metadata id. testPkg <- getDataPackage(d1cTest, metadataId, quiet=T) expect_equal(pkgId, testPkg@resmapId) } expect_true(done) } else { skip("This test requires valid authentication.") } }) test_that("D1Client downloadObject", { skip_on_cran() library(dataone) #cli <- D1Client("PROD", "urn:node:KNB") expect_false(is.null(d1cKNB)) expect_match(class(d1cKNB), "D1Client") expect_match(d1cKNB@cn@baseURL, "https://cn.dataone.org/cn") am <- AuthenticationManager() suppressMessages(authValid <- dataone:::isAuthValid(am, d1cKNB@mn)) if(authValid) { # Skip if Mac OS and X.509 Certificate if(dataone:::getAuthMethod(am, d1cKNB@mn) == "cert" && grepl("apple-darwin", sessionInfo()$platform)) skip("Skip authentication w/cert on Mac OS X") } # Try downloading a known object from the PROD environment pid <- "solson.5.1" path <- tempdir() file <- downloadObject(d1cKNB, pid, path) expect_match(class(file), "path") expect_true(file.exists(file)) unlink(file) }) test_that("D1Client uploadDataPackage public argument works", { # Test that a DataPackage with only one member (metadata in this case) and not # user defined relationships is created and uploaded correctly. skip_on_cran() library(dataone) library(datapack) # Create a csv file for the science object testdf <- data.frame(x=1:10,y=11:20) csvfile <- tempfile(pattern = "file", tmpdir = tempdir(), fileext = ".csv") write.csv(testdf, csvfile, row.names=FALSE) expect_false(is.null(d1cTest)) preferredNodes <- NA # Set 'subject' to authentication subject, if available, so we will have permission to change this object am <- AuthenticationManager() suppressMessages(authValid <- dataone:::isAuthValid(am, d1cTest@mn)) if (authValid) { if(dataone:::getAuthMethod(am, d1cTest@mn) == "cert" && grepl("apple-darwin", sessionInfo()$platform)) skip("Skip authentication w/cert on Mac OS X") dp <- new("DataPackage") # Create metadata object that describes science data emlFile <- system.file("extdata/sample-eml.xml", package="dataone") metadataObj <- new("DataObject", format="eml://ecoinformatics.org/eml-2.1.1", mnNodeId=getMNodeId(d1cTest), filename=emlFile) expect_match(metadataObj@sysmeta@identifier, "urn:uuid") # give metadata object an access policy without public read metadataObj <- addAccessRule(metadataObj, "CN=arctic-data-admins,DC=dataone,DC=org", "read") metadataObj <- addAccessRule(metadataObj, "CN=arctic-data-admins,DC=dataone,DC=org", "write") metadataObj <- addAccessRule(metadataObj, "CN=arctic-data-admins,DC=dataone,DC=org", "changePermission") dp <- addMember(dp, metadataObj) expect_true(is.element(metadataObj@sysmeta@identifier, getIdentifiers(dp))) # Upload the data package to DataONE with public set to TRUE resourceMapId <- uploadDataPackage(d1cTest, dp, replicate=TRUE, numberReplicas=1, preferredNodes=preferredNodes, public=TRUE) expect_true(!is.null(resourceMapId)) # check that all members of the package have public read sys_rm <- getSystemMetadata(d1cTest@mn, resourceMapId) sys_mo <- getSystemMetadata(d1cTest@mn, metadataObj@sysmeta@identifier) expect_true("public" %in% sys_rm@accessPolicy$subject) expect_true("public" %in% sys_mo@accessPolicy$subject) } else { skip("This test requires valid authentication.") } }) test_that("D1Client uploadDataPackage doesn't change the rightsHolder", { # Test that a DataPackage with only one member (metadata in this case) and not # user defined relationships is created and uploaded correctly. skip_on_cran() library(dataone) library(datapack) # Create a csv file for the science object # Set 'subject' to authentication subject, if available, so we will have permission to change this object am <- AuthenticationManager() suppressMessages(authValid <- dataone:::isAuthValid(am, d1cTest@mn)) if (authValid) { if(dataone:::getAuthMethod(am, d1cTest@mn) == "cert" && grepl("apple-darwin", sessionInfo()$platform)) skip("Skip authentication w/cert on Mac OS X") dp <- new("DataPackage") # Create metadata object that describes science data emlFile <- system.file("extdata/sample-eml.xml", package="dataone") metadataObj <- new("DataObject", format="eml://ecoinformatics.org/eml-2.1.1", mnNodeId=getMNodeId(d1cTest), filename=emlFile) # set rightsHolder on metadata to a test ORCID metadataObj@sysmeta@rightsHolder <- "http://orcid.org/0000-0000-0000-0000" dp <- addMember(dp, metadataObj) # set rightsHolder on resource map to a test ORCID dp@sysmeta@rightsHolder <- "http://orcid.org/0000-0000-0000-0000" # Upload the data package to DataONE with public set to TRUE resourceMapId <- uploadDataPackage(d1cTest, dp, replicate=TRUE, numberReplicas=1, public=TRUE) # check that all members of the package have public read sys_rm <- getSystemMetadata(d1cTest@mn, resourceMapId) sys_mo <- getSystemMetadata(d1cTest@mn, metadataObj@sysmeta@identifier) expect_equal("http://orcid.org/0000-0000-0000-0000", sys_rm@rightsHolder) expect_equal("http://orcid.org/0000-0000-0000-0000", sys_rm@rightsHolder) } else { skip("This test requires valid authentication.") } })
/tests/testthat/test.D1Client.R
permissive
DataONEorg/rdataone
R
false
false
32,509
r
test_that("dataone library loads", { expect_true(require(dataone)) }) test_that("D1Client constructors", { skip_on_cran() library(dataone) #cli <- new("D1Client") expect_false(is.null(d1cProd)) expect_match(class(d1cProd), "D1Client") expect_match(d1cProd@cn@baseURL, "https://cn.dataone.org/cn") #cli <- new("D1Client", env="PROD", mNodeid="urn:node:KNB") expect_false(is.null(d1cKNB)) expect_match(class(d1cKNB), "D1Client") expect_match(d1cKNB@cn@baseURL, "https://cn.dataone.org/cn") expect_match(d1cKNB@mn@baseURL, "https://knb.ecoinformatics.org/knb/d1/mn") # Skip the remainder of the tests because these test environments are # often down due to upgrades, reconfiguring, testing new features. skip_on_cran() cli <- new("D1Client", cn=cnStaging, mn=getMNode(cnStaging, "urn:node:mnTestKNB")) expect_false(is.null(cli)) expect_match(class(cli), "D1Client") expect_match(cli@cn@baseURL, "https://cn-stage.test.dataone.org/cn") expect_match(cli@mn@baseURL, "https://dev.nceas.ucsb.edu/knb/d1/mn") cli <- D1Client() expect_false(is.null(cli)) expect_match(class(cli), "D1Client") expect_match(cli@cn@baseURL, "https://cn.dataone.org/cn") cli <- D1Client("STAGING") expect_false(is.null(cli)) expect_match(class(cli), "D1Client") expect_match(cli@cn@baseURL, "https://cn-stage.test.dataone.org/cn") # Skip the hightly unstable environments when testing on cran skip_on_cran() cli <- D1Client("SANDBOX") expect_false(is.null(cli)) expect_match(class(cli), "D1Client") expect_match(cli@cn@baseURL, "https://cn-sandbox.test.dataone.org/cn") #cli <- D1Client("DEV") #expect_false(is.null(cli)) #expect_match(class(cli), "D1Client") #expect_match(cli@cn@baseURL, "https://cn-dev.test.dataone.org/cn") }) test_that("D1Client methods", { skip_on_cran() # Test listMemberNodes #cli <- D1Client("PROD") nodes <- listMemberNodes(d1cProd) expect_gt(length(nodes), 0) expect_identical(class(nodes), "list") # The remainder of this test uses development machines. skip_on_cran() # Test getEndPoint() cli <- D1Client("STAGING") cnUrl <- getEndpoint(cli) expect_match(cnUrl, "https://cn-stage.test.dataone.org/cn") # Test getMNodeId() cli <- D1Client("STAGING", "urn:node:mnTestKNB") expect_match(getMNodeId(cli), "urn:node:mnTestKNB") # Test setMNodeId cli <- new("D1Client", env="STAGING") cli <- setMNodeId(cli, "urn:node:mnTestKNB") expect_match(cli@mn@identifier, "urn:node:mnTestKNB") }) test_that("D1Client getDataObject", { skip_on_cran() library(dataone) library(digest) #cli <- D1Client("PROD", "urn:node:KNB") expect_false(is.null(d1cKNB)) expect_match(class(d1cKNB), "D1Client") expect_match(d1cKNB@cn@baseURL, "https://cn.dataone.org/cn") am <- AuthenticationManager() suppressMessages(authValid <- dataone:::isAuthValid(am, d1cKNB@mn)) if(authValid) { # Skip if Mac OS and X.509 Certificate if(dataone:::getAuthMethod(am, d1cKNB@mn) == "cert" && grepl("apple-darwin", sessionInfo()$platform)) skip("Skip authentication w/cert on Mac OS X") } # Try retrieving a known object from the PROD environment pid <- "solson.5.1" obj <- getDataObject(d1cKNB, pid, checksumAlgorithm="SHA-256") cname <- class(obj)[1] expect_match(cname, "DataObject") expect_match(class(obj@sysmeta), "SystemMetadata") expect_match(getIdentifier(obj), pid) expect_match(getFormatId(obj), "text/csv") data <- getData(obj) sha256 <- digest(data, algo="sha256", serialize=FALSE, file=FALSE) expect_match(sha256, obj@sysmeta@checksum) }) test_that("D1Client uploadDataObject with raw data works", { skip_on_cran() library(dataone) library(datapack) # Create a DataObject with a raw R object and upload to DataONE data <- charToRaw("1,2,3\n4,5,6\n") #d1c <- D1Client("STAGING", "urn:node:mnStageUCSB2") expect_false(is.null(d1cTest)) # Set 'subject' to authentication subject, if available, so we will have permission to change this object am <- AuthenticationManager() suppressMessages(authValid <- dataone:::isAuthValid(am, d1cTest@mn)) if (authValid) { if(dataone:::getAuthMethod(am, d1cTest@mn) == "cert" && grepl("apple-darwin", sessionInfo()$platform)) skip("Skip authentication w/cert on Mac OS X") # Create DataObject for the science data do <- new("DataObject", format="text/csv", dataobj=data, mnNodeId=getMNodeId(d1cTest)) expect_match(do@sysmeta@identifier, "urn:uuid") newId <- uploadDataObject(d1cTest, do, replicate=FALSE, preferredNodes=NA, public=TRUE) expect_true(!is.null(newId)) } else { skip("This test requires valid authentication.") } }) test_that("D1Client uploadDataObject with filename works", { skip_on_cran() library(dataone) library(datapack) # Create a csv file for the science object testdf <- data.frame(x=1:10,y=11:20) csvfile <- tempfile(pattern = "file", tmpdir = tempdir(), fileext = ".csv") write.csv(testdf, csvfile, row.names=FALSE) #d1c <- D1Client("STAGING", "urn:node:mnStageUCSB2") expect_false(is.null(d1cTest)) # Set 'subject' to authentication subject, if available, so we will have permission to change this object am <- AuthenticationManager() suppressMessages(authValid <- dataone:::isAuthValid(am, d1cTest@mn)) if (authValid) { if(dataone:::getAuthMethod(am, d1cTest@mn) == "cert" && grepl("apple-darwin", sessionInfo()$platform)) skip("Skip authentication w/cert on Mac OS X") # Create DataObject for the science data do <- new("DataObject", format="text/csv", mnNodeId=getMNodeId(d1cTest), filename=csvfile) expect_match(do@sysmeta@identifier, "urn:uuid") newId <- uploadDataObject(d1cTest, do, replicate=FALSE, preferredNodes=NA , public=TRUE) expect_true(!is.null(newId)) } else { skip("This test requires valid authentication.") } }) test_that("D1Client uploadDataPackage works", { skip_on_cran() library(dataone) library(datapack) # Create a csv file for the science object testdf <- data.frame(x=1:10,y=11:20) csvfile <- tempfile(pattern = "file", tmpdir = tempdir(), fileext = ".csv") write.csv(testdf, csvfile, row.names=FALSE) #d1c <- D1Client("STAGING", "urn:node:mnStageUCSB2") #d1c <- D1Client("SANDBOX2", "urn:node:mnDemo2") #d1c <- D1Client("DEV2", "urn:node:mnDevUCSB2") expect_false(is.null(d1cTest)) #preferredNodes <- c("urn:node:mnDemo9") preferredNodes <- NA # Set 'subject' to authentication subject, if available, so we will have permission to change this object am <- AuthenticationManager() suppressMessages(authValid <- dataone:::isAuthValid(am, d1cTest@mn)) if (authValid) { if(dataone:::getAuthMethod(am, d1cTest@mn) == "cert" && grepl("apple-darwin", sessionInfo()$platform)) skip("Skip authentication w/cert on Mac OS X") dp <- new("DataPackage") # Create DataObject for the science data sciObj <- new("DataObject", format="text/csv", mnNodeId=getMNodeId(d1cTest), filename=csvfile) # It's possible to set access rules for DataObject now, or for all DataObjects when they are uploaded to DataONE via uploadDataPackage expect_match(sciObj@sysmeta@identifier, "urn:uuid") sciObj <- setPublicAccess(sciObj) accessRules <- data.frame(subject=c("uid=smith,ou=Account,dc=example,dc=com", "uid=slaughter,o=unaffiliated,dc=example,dc=org"), permission=c("write", "changePermission")) sciObj <- addAccessRule(sciObj, accessRules) dp <- addMember(dp, sciObj) expect_true(is.element(sciObj@sysmeta@identifier, getIdentifiers(dp))) # Create metadata object that describes science data emlFile <- system.file("extdata/sample-eml.xml", package="dataone") metadataObj <- new("DataObject", format="eml://ecoinformatics.org/eml-2.1.1", mnNodeId=getMNodeId(d1cTest), filename=emlFile) expect_match(metadataObj@sysmeta@identifier, "urn:uuid") dp <- addMember(dp, metadataObj) expect_true(is.element(metadataObj@sysmeta@identifier, getIdentifiers(dp))) # Associate the metadata object with the science object it describes dp <- insertRelationship(dp, subjectID=getIdentifier(metadataObj), objectIDs=getIdentifier(sciObj)) # Upload the data package to DataONE resourceMapId <- uploadDataPackage(d1cTest, dp, replicate=TRUE, numberReplicas=1, preferredNodes=preferredNodes, public=TRUE, accessRules=accessRules) expect_true(!is.null(resourceMapId)) } else { skip("This test requires valid authentication.") } }) test_that("D1Client uploadDataPackage works for a minimal DataPackage", { # Test that a DataPackage with only one member (metadata in this case) and not # user defined relationships is created and uploaded correctly. skip_on_cran() library(dataone) library(datapack) # Create a csv file for the science object testdf <- data.frame(x=1:10,y=11:20) csvfile <- tempfile(pattern = "file", tmpdir = tempdir(), fileext = ".csv") write.csv(testdf, csvfile, row.names=FALSE) #d1c <- D1Client("STAGING", "urn:node:mnStageUCSB2") expect_false(is.null(d1cTest)) preferredNodes <- NA # Set 'subject' to authentication subject, if available, so we will have permission to change this object am <- AuthenticationManager() suppressMessages(authValid <- dataone:::isAuthValid(am, d1cTest@mn)) if (authValid) { if(dataone:::getAuthMethod(am, d1cTest@mn) == "cert" && grepl("apple-darwin", sessionInfo()$platform)) skip("Skip authentication w/cert on Mac OS X") dp <- new("DataPackage") # Create metadata object that describes science data emlFile <- system.file("extdata/sample-eml.xml", package="dataone") metadataObj <- new("DataObject", format="eml://ecoinformatics.org/eml-2.1.1", mnNodeId=getMNodeId(d1cTest), filename=emlFile) expect_match(metadataObj@sysmeta@identifier, "urn:uuid") dp <- addMember(dp, metadataObj) expect_true(is.element(metadataObj@sysmeta@identifier, getIdentifiers(dp))) # Upload the data package to DataONE resourceMapId <- uploadDataPackage(d1cTest, dp, replicate=TRUE, numberReplicas=1, preferredNodes=preferredNodes, public=TRUE) expect_true(!is.null(resourceMapId)) } else { skip("This test requires valid authentication.") } }) test_that("D1Client updateDataPackage works", { # Test that a DataPackage with only one member (metadata in this case) and not # user defined relationships is created and uploaded correctly. skip_on_cran() library(dataone) library(datapack) library(xml2) library(digest) # Create a csv file for the science object #d1c <- D1Client("STAGING", "urn:node:mnStageUCSB2") #d1c <- D1Client("STAGING2", "urn:node:mnTestKNB") #d1c <- D1Client("DEV2", "urn:node:mnDevUCSB1") expect_false(is.null(d1cTestKNB)) preferredNodes <- NA # Set 'subject' to authentication subject, if available, so we will have permission to change this object am <- AuthenticationManager() suppressMessages(authValid <- dataone:::isAuthValid(am, d1cTestKNB@mn)) if (authValid) { if(dataone:::getAuthMethod(am, d1cTestKNB@mn) == "cert" && grepl("apple-darwin", sessionInfo()$platform)) skip("Skip authentication w/cert on Mac OS X") dp <- new("DataPackage") # Create metadata object that describes science data emlFile <- system.file("extdata/strix-pacific-northwest.xml", package="dataone") metadataObj <- new("DataObject", format="eml://ecoinformatics.org/eml-2.1.1", filename=emlFile) metadataId <- getIdentifier(metadataObj) # Associate the metadata object with each data object using the 'insertRelationships' method. # Since a relationship type (the predicate argument) is not specified, the default relationship # of 'cito:documents' is used, to indicate the the metadata object documents each data object. # See "http://purl.org/spar/cito", for further information about the "Citation Type Ontology". dp <- addMember(dp, metadataObj) sourceData <- system.file("extdata/sample.csv", package="dataone") sourceObj <- new("DataObject", format="text/csv", filename=sourceData) dp <- addMember(dp, sourceObj, metadataObj) resolveURL <- sprintf("%s/%s/object", d1cTestKNB@mn@baseURL, d1cTestKNB@mn@APIversion) # Update the distribution URL in the metadata with the identifier that has been assigned to # this DataObject. This provides a direct link between the detailed information for this package # member and DataONE, which will assist DataONE in accessing and displaying this detailed information. xpathToURL <- "//dataTable/physical/distribution[../objectName/text()=\"OwlNightj.csv\"]/online/url" newURL <- sprintf("%s/%s", resolveURL, getIdentifier(sourceObj)) dp <- updateMetadata(dp, metadataId, xpath=xpathToURL, newURL) metadataId <- selectMember(dp, name="sysmeta@formatId", value="eml://ecoinformatics.org/eml-2.1.1") metadataObj <- getMember(dp, metadataId) progFile <- system.file("extdata/filterSpecies.R", package="dataone") progObj <- new("DataObject", format="application/R", filename=progFile, mediaType="text/x-rsrc") dp <- addMember(dp, progObj, metadataObj) xpathToURL <- "//otherEntity/physical/distribution[../objectName/text()=\"filterObs.R\"]/online/url" newURL <- sprintf("%s/%s", resolveURL, getIdentifier(progObj)) dp <- updateMetadata(dp, metadataId, xpath=xpathToURL, newURL) metadataId <- selectMember(dp, name="sysmeta@formatId", value="eml://ecoinformatics.org/eml-2.1.1") metadataObj <- getMember(dp, metadataId) outputData <- system.file("extdata/filteredSpecies.csv", package="dataone") outputObj <- new("DataObject", format="text/csv", filename=outputData) dp <- addMember(dp, outputObj, metadataObj) xpathToURL <- "//dataTable/physical/distribution[../objectName/text()=\"Strix-occidentalis-obs.csv\"]/online/url" newURL <- sprintf("%s/%s", resolveURL, getIdentifier(outputObj)) dp <- updateMetadata(dp, metadataId, xpath=xpathToURL, newURL) # Upload the data package to DataONE newPkg <- uploadDataPackage(d1cTestKNB, dp, public=TRUE, quiet=TRUE, as="DataPackage") pkgId <- newPkg@resmapId expect_true(!is.na(pkgId)) # Sleep for 90 secondsl to let indexing finish for the package. Because we are imposing a wait on this # package, this test is not suitable for use in CRAN. } else { skip("This test requires valid authentication.") } }) test_that("D1Client updateDataPackage with new package using previously uploaded objects works", { # Test the typical workflow of creating a DataONE package by first uploading all data objects for the package, # then creating a package from the already uploaded objects. skip_on_cran() library(dataone) library(datapack) library(xml2) library(digest) expect_false(is.null(d1cTestKNB)) preferredNodes <- NA # Set 'subject' to authentication subject, if available, so we will have permission to change this object am <- AuthenticationManager() suppressMessages(authValid <- dataone:::isAuthValid(am, d1cTestKNB@mn)) if (authValid) { if(dataone:::getAuthMethod(am, d1cTestKNB@mn) == "cert" && grepl("apple-darwin", sessionInfo()$platform)) skip("Skip authentication w/cert on Mac OS X") # First upload objects to DataONE that will be collected into a package sourceData <- system.file("extdata/OwlNightj.csv", package="dataone") sourceObj <- new("DataObject", format="text/csv", filename=sourceData) sourceObj <- addAccessRule(sourceObj, "http://orcid.org/0000-0002-2192-403X", "changePermission") sourceId <- uploadDataObject(d1cTestKNB, sourceObj, public=T, quiet=T) expect_true(!is.na(sourceId)) progFile <- system.file("extdata/filterObs.R", package="dataone") progObj <- new("DataObject", format="application/R", filename=progFile, mediaType="text/x-rsrc") progObj <- addAccessRule(progObj, "http://orcid.org/0000-0002-2192-403X", "changePermission") progId <- uploadDataObject(d1cTestKNB, progObj, public=T, quiet=T) expect_true(!is.na(progId)) outputData <- system.file("extdata/Strix-occidentalis-obs.csv", package="dataone") outputObj <- new("DataObject", format="text/csv", filename=outputData) outputObj <- addAccessRule(outputObj, "http://orcid.org/0000-0002-2192-403X", "changePermission") outputId <- uploadDataObject(d1cTestKNB, outputObj, public=T, quiet=T) expect_true(!is.na(outputId)) # Create a new package, and download each member (lazyLoaded) that was just uploaded, then add them # to the package and upload. This workflow does not require that package members are downloaded with # lazyLoad, this is done here just for efficiency. If a package member is downloaded without lazyLoad, # it will not be-reuploaded when the package is uploaded, unless it has been updated (i.e. updated contents, # or sysmeta). pkg <- new("DataPackage") # Create metadata object that describes the package emlFile <- system.file("extdata/strix-pacific-northwest.xml", package="dataone") metadataObj <- new("DataObject", format="eml://ecoinformatics.org/eml-2.1.1", filename=emlFile) metadataObj <- addAccessRule(metadataObj, "http://orcid.org/0000-0002-2192-403X", "changePermission") pkg <- addMember(pkg, metadataObj) metadataId <- getIdentifier(metadataObj) newSourceObj <- getDataObject(d1cTestKNB, sourceId, lazyLoad=T, quiet=T) pkg <- addMember(pkg, newSourceObj, metadataObj) newProgObj <- getDataObject(d1cTestKNB, progId, lazyLoad=T, quiet=T) pkg <- addMember(pkg, newProgObj, metadataObj) newOutputObj <- getDataObject(d1cTestKNB, outputId, lazyLoad=T, quiet=T) pkg <- addMember(pkg, newOutputObj, metadataObj) resourceMapId <- uploadDataPackage(d1cTestKNB, pkg, public=TRUE, quiet=T) expect_false(is.na(resourceMapId)) # Now test that we can download the newly created package and add an existing object # Now add a new package member that was omitted from the original package auxFile <- system.file("extdata/WeatherInf.txt", package="dataone") auxObj <- new("DataObject", format="text/plain", file=auxFile) auxObj <- addAccessRule(auxObj, "http://orcid.org/0000-0002-2192-403X", "changePermission") auxId <- uploadDataObject(d1cTestKNB, auxObj, public=T, quiet=T) expect_true(!is.na(auxId)) # Have to sleep just a bit, as indexing can take awhile to complete # Keep trying for ten seconds for the package to be indexed done <- FALSE trys <- 0 while(!done) { if(trys > 10) break Sys.sleep(1) queryParams <- sprintf('q=id:"%s"', resourceMapId) result <- query(d1cTestKNB@mn, queryParams, as="list") # Now download the package that was just created, and ensure that the checksums are all the # requested type. if(length(result) == 0) { trys <- trys + 1 next } else { done <- TRUE } newAuxObj <- getDataObject(d1cTestKNB, auxId, lazyLoad=T, quiet=T) editPkg <- getDataPackage(d1cTestKNB, identifier=resourceMapId, lazyLoad=TRUE, quiet=TRUE) } expect_true(done) editPkg <- addMember(editPkg, newAuxObj, metadataObj) newResmapId <- uploadDataPackage(d1cTestKNB, editPkg, public=TRUE, quiet=T) expect_false(is.na(newResmapId)) expect_false(resourceMapId == newResmapId) } else { skip("This test requires valid authentication.") } }) test_that("D1Client getDataPackage with checksumAlgorithm specified works", { # Test that a DataPackage with only one member (metadata in this case) and not # user defined relationships is created and uploaded correctly. skip_on_cran() library(dataone) library(datapack) library(xml2) library(digest) # Create a csv file for the science object expect_false(is.null(d1cTestKNB)) preferredNodes <- NA # Set 'subject' to authentication subject, if available, so we will have permission to change this object am <- AuthenticationManager() suppressMessages(authValid <- dataone:::isAuthValid(am, d1cTestKNB@mn)) if (authValid) { if(dataone:::getAuthMethod(am, d1cTestKNB@mn) == "cert" && grepl("apple-darwin", sessionInfo()$platform)) skip("Skip authentication w/cert on Mac OS X") sha256 <- "SHA-256" md5 <- "MD5" checksumAlgorithm <- sha256 dp <- new("DataPackage") # Create metadata object that describes science data emlFile <- system.file("extdata/strix-pacific-northwest.xml", package="dataone") metadataObj <- new("DataObject", format="eml://ecoinformatics.org/eml-2.1.1", filename=emlFile, checksum=checksumAlgorithm) metadataId <- getIdentifier(metadataObj) dp <- addMember(dp, metadataObj) sourceData <- system.file("extdata/sample.csv", package="dataone") sourceObj <- new("DataObject", format="text/csv", filename=sourceData, checksum=checksumAlgorithm) dp <- addMember(dp, sourceObj, metadataObj) progFile <- system.file("extdata/filterSpecies.R", package="dataone") progObj <- new("DataObject", format="application/R", filename=progFile, mediaType="text/x-rsrc", checksum=checksumAlgorithm) dp <- addMember(dp, progObj, metadataObj) outputData <- system.file("extdata/filteredSpecies.csv", package="dataone") outputObj <- new("DataObject", format="text/csv", filename=outputData, checksum=checksumAlgorithm) dp <- addMember(dp, outputObj, metadataObj) # Upload the data package to DataONE pkgId <- uploadDataPackage(d1cTestKNB, dp, public=TRUE, quiet=TRUE) expect_true(!is.na(pkgId)) # Have to sleep just a bit, as indexing can take awhile to complete # Keep trying for ten seconds for the package to be indexed done <- FALSE trys <- 0 while(!done) { if(trys > 10) break Sys.sleep(1) queryParams <- sprintf('q=id:"%s"', pkgId) result <- query(d1cTestKNB@mn, queryParams, as="list") # Now download the package that was just created, and ensure that the checksums are all the # requested type. if(length(result) == 0) { trys <- trys + 1 next } else { done <- TRUE } pkg <- getDataPackage(d1cTestKNB, identifier=pkgId, lazyLoad=TRUE, limit="0MB", quiet=TRUE, checksumAlgorithm=sha256) algorithms <- getValue(pkg, name="sysmeta@checksumAlgorithm") expect_true(all(algorithms == sha256)) # Download the package again, requesting a different checksum type, and ensure that the checksums are all the # new type. pkg <- getDataPackage(d1cTestKNB, identifier=pkgId, lazyLoad=TRUE, limit="0MB", quiet=TRUE, checksumAlgorithm=md5) algorithms <- getValue(pkg, name="sysmeta@checksumAlgorithm") expect_true(all(algorithms==md5)) } expect_true(done) } else { skip("This test requires valid authentication.") } }) test_that("D1Client listMemberNodes() works", { skip_on_cran() library(dataone) #d1c <- D1Client("PROD") nodelist <- listMemberNodes(d1cProd) expect_true(length(nodelist) > 0) expect_match(class(nodelist[[1]]), "Node") expect_match(nodelist[[1]]@identifier, "urn:node:") expect_match(nodelist[[1]]@type, "cn|mn") expect_match(nodelist[[1]]@state, "up") expect_match(nodelist[[length(nodelist)]]@identifier, "urn:node:") expect_match(nodelist[[length(nodelist)]]@baseURL, "http") expect_match(nodelist[[length(nodelist)]]@subject, "urn:node:") expect_match(nodelist[[length(nodelist)]]@type, "cn|mn") }) test_that("D1Client updateDataPackage works for a metadata only DataPackage", { skip_on_cran() # Test that a DataPackage with only one member (metadata in this case) and not # user defined relationships is created and uploaded correctly. # This is a long running test, so it should be run manually, which means # running "test_file("tests/testthat/packageUpdate.R"), as this file is not # run by default by testthat due to the name not including 'test*' library(dataone) library(datapack) library(xml2) library(digest) # Create a csv file for the science object expect_false(is.null(d1cTest)) preferredNodes <- NA # Set 'subject' to authentication subject, if available, so we will have permission to change this object am <- AuthenticationManager() suppressMessages(authValid <- dataone:::isAuthValid(am, d1cTest@mn)) if (authValid) { if(dataone:::getAuthMethod(am, d1cTest@mn) == "cert" && grepl("apple-darwin", sessionInfo()$platform)) skip("Skip authentication w/cert on Mac OS X") dp <- new("DataPackage") # Create metadata object that describes science data emlFile <- system.file("extdata/strix-pacific-northwest.xml", package="dataone") metadataObj <- new("DataObject", format="eml://ecoinformatics.org/eml-2.1.1", filename=emlFile) metadataId <- getIdentifier(metadataObj) # Associate the metadata object with each data object using the 'insertRelationships' method. # Since a relationship type (the predicate argument) is not specified, the default relationship # of 'cito:documents' is used, to indicate the the metadata object documents each data object. # See "http://purl.org/spar/cito", for further information about the "Citation Type Ontology". dp <- addMember(dp, metadataObj) pkgId <- uploadDataPackage(d1cTest, dp, public=TRUE, quiet=TRUE) expect_true(!is.na(pkgId)) done <- FALSE trys <- 0 while(!done) { if(trys > 10) break Sys.sleep(1) queryParams <- sprintf('q=id:"%s"', pkgId) result <- query(d1cTest@mn, queryParams, as="list") # Now download the package that was just created, and ensure that the checksums are all the # requested type. if(length(result) == 0) { trys <- trys + 1 next } else { done <- TRUE } # Test the download by specifying the metadata id of the package. The 'getDataPackage()' function # should be able determine the package id based on the metadata id. testPkg <- getDataPackage(d1cTest, metadataId, quiet=T) expect_equal(pkgId, testPkg@resmapId) } expect_true(done) } else { skip("This test requires valid authentication.") } }) test_that("D1Client downloadObject", { skip_on_cran() library(dataone) #cli <- D1Client("PROD", "urn:node:KNB") expect_false(is.null(d1cKNB)) expect_match(class(d1cKNB), "D1Client") expect_match(d1cKNB@cn@baseURL, "https://cn.dataone.org/cn") am <- AuthenticationManager() suppressMessages(authValid <- dataone:::isAuthValid(am, d1cKNB@mn)) if(authValid) { # Skip if Mac OS and X.509 Certificate if(dataone:::getAuthMethod(am, d1cKNB@mn) == "cert" && grepl("apple-darwin", sessionInfo()$platform)) skip("Skip authentication w/cert on Mac OS X") } # Try downloading a known object from the PROD environment pid <- "solson.5.1" path <- tempdir() file <- downloadObject(d1cKNB, pid, path) expect_match(class(file), "path") expect_true(file.exists(file)) unlink(file) }) test_that("D1Client uploadDataPackage public argument works", { # Test that a DataPackage with only one member (metadata in this case) and not # user defined relationships is created and uploaded correctly. skip_on_cran() library(dataone) library(datapack) # Create a csv file for the science object testdf <- data.frame(x=1:10,y=11:20) csvfile <- tempfile(pattern = "file", tmpdir = tempdir(), fileext = ".csv") write.csv(testdf, csvfile, row.names=FALSE) expect_false(is.null(d1cTest)) preferredNodes <- NA # Set 'subject' to authentication subject, if available, so we will have permission to change this object am <- AuthenticationManager() suppressMessages(authValid <- dataone:::isAuthValid(am, d1cTest@mn)) if (authValid) { if(dataone:::getAuthMethod(am, d1cTest@mn) == "cert" && grepl("apple-darwin", sessionInfo()$platform)) skip("Skip authentication w/cert on Mac OS X") dp <- new("DataPackage") # Create metadata object that describes science data emlFile <- system.file("extdata/sample-eml.xml", package="dataone") metadataObj <- new("DataObject", format="eml://ecoinformatics.org/eml-2.1.1", mnNodeId=getMNodeId(d1cTest), filename=emlFile) expect_match(metadataObj@sysmeta@identifier, "urn:uuid") # give metadata object an access policy without public read metadataObj <- addAccessRule(metadataObj, "CN=arctic-data-admins,DC=dataone,DC=org", "read") metadataObj <- addAccessRule(metadataObj, "CN=arctic-data-admins,DC=dataone,DC=org", "write") metadataObj <- addAccessRule(metadataObj, "CN=arctic-data-admins,DC=dataone,DC=org", "changePermission") dp <- addMember(dp, metadataObj) expect_true(is.element(metadataObj@sysmeta@identifier, getIdentifiers(dp))) # Upload the data package to DataONE with public set to TRUE resourceMapId <- uploadDataPackage(d1cTest, dp, replicate=TRUE, numberReplicas=1, preferredNodes=preferredNodes, public=TRUE) expect_true(!is.null(resourceMapId)) # check that all members of the package have public read sys_rm <- getSystemMetadata(d1cTest@mn, resourceMapId) sys_mo <- getSystemMetadata(d1cTest@mn, metadataObj@sysmeta@identifier) expect_true("public" %in% sys_rm@accessPolicy$subject) expect_true("public" %in% sys_mo@accessPolicy$subject) } else { skip("This test requires valid authentication.") } }) test_that("D1Client uploadDataPackage doesn't change the rightsHolder", { # Test that a DataPackage with only one member (metadata in this case) and not # user defined relationships is created and uploaded correctly. skip_on_cran() library(dataone) library(datapack) # Create a csv file for the science object # Set 'subject' to authentication subject, if available, so we will have permission to change this object am <- AuthenticationManager() suppressMessages(authValid <- dataone:::isAuthValid(am, d1cTest@mn)) if (authValid) { if(dataone:::getAuthMethod(am, d1cTest@mn) == "cert" && grepl("apple-darwin", sessionInfo()$platform)) skip("Skip authentication w/cert on Mac OS X") dp <- new("DataPackage") # Create metadata object that describes science data emlFile <- system.file("extdata/sample-eml.xml", package="dataone") metadataObj <- new("DataObject", format="eml://ecoinformatics.org/eml-2.1.1", mnNodeId=getMNodeId(d1cTest), filename=emlFile) # set rightsHolder on metadata to a test ORCID metadataObj@sysmeta@rightsHolder <- "http://orcid.org/0000-0000-0000-0000" dp <- addMember(dp, metadataObj) # set rightsHolder on resource map to a test ORCID dp@sysmeta@rightsHolder <- "http://orcid.org/0000-0000-0000-0000" # Upload the data package to DataONE with public set to TRUE resourceMapId <- uploadDataPackage(d1cTest, dp, replicate=TRUE, numberReplicas=1, public=TRUE) # check that all members of the package have public read sys_rm <- getSystemMetadata(d1cTest@mn, resourceMapId) sys_mo <- getSystemMetadata(d1cTest@mn, metadataObj@sysmeta@identifier) expect_equal("http://orcid.org/0000-0000-0000-0000", sys_rm@rightsHolder) expect_equal("http://orcid.org/0000-0000-0000-0000", sys_rm@rightsHolder) } else { skip("This test requires valid authentication.") } })
context("test-fitgllvm") test_that("basic data fitting works", { data(microbialdata) X <- microbialdata$Xenv[1:30,] y <- microbialdata$Y[1:30, order(colMeans(microbialdata$Y > 0), decreasing = TRUE)[21:35]] f0<-gllvm(y, family = poisson(), seed = 999) f1<-gllvm(y, family = "negative.binomial", seed = 999) f2<-gllvm(y, X, formula = ~pH + Phosp, family = "negative.binomial", seed = 999) expect_true(round(mean(f0$params$beta0), digits = 1)-2<0.01) expect_true(round(mean(f1$params$beta0), digits = 1)-2<0.01) expect_true(round(mean(f2$params$Xcoef[,1]), digits = 1)-0.1<0.01) }) test_that("fourth corner models works", { data(microbialdata) X <- microbialdata$Xenv[1:30,2:3] y <- microbialdata$Y[1:30, order(colMeans(microbialdata$Y > 0), decreasing = TRUE)[21:35]] TR <- matrix(rnorm(15)); colnames(TR) <- "t1" ff0<-gllvm(y, X, TR=TR, family = "negative.binomial", seed = 999) ff1<-gllvm(y, X, TR=TR, formula = ~pH + pH:t1, family = "negative.binomial", seed = 999) expect_true(is.finite(ff0$logL)) expect_true(is.finite(ff1$logL)) }) test_that("row effects works", { data(microbialdata) y <- microbialdata$Y[1:30, order(colMeans(microbialdata$Y > 0), decreasing = TRUE)[21:35]] fr0<-gllvm(y, family = "negative.binomial", seed = 999, row.eff = "fixed", num.lv = 1) fr1<-gllvm(y, family = "negative.binomial", seed = 999, row.eff = "random", num.lv = 0) result<-c(0.34, 0.29) names(result)<-c("AB3", "sigma") expect_true(round(fr0$params$row.params[2], digits = 2)- result[1]<0.1) expect_true(round(fr1$params$sigma, digits = 2)- result[2]<0.1) }) test_that("binomial works", { data(microbialdata) y <- microbialdata$Y[1:30, order(colMeans(microbialdata$Y > 0), decreasing = TRUE)[21:35]] y01<-(y>0)*1 fb0<-gllvm(y01, family = binomial(link = "logit"), seed = 999, method = "LA", num.lv = 1) fb2<-gllvm(y01, family = binomial(link = "probit"), seed = 999) expect_true(is.finite(fb0$logL)) expect_true(is.finite(fb2$logL)) }) test_that("ZIP works", { data(microbialdata) y <- microbialdata$Y[1:10, order(colMeans(microbialdata$Y > 0), decreasing = TRUE)[301:306]] fz0<-gllvm(y, family = "ZIP", seed = 999, method = "LA") expect_equal( length(fz0$params$beta0), 6 ) expect_true( is.finite(fz0$logL)) })
/tests/testthat/test-fitgllvm.R
no_license
Raykova/gllvm
R
false
false
2,283
r
context("test-fitgllvm") test_that("basic data fitting works", { data(microbialdata) X <- microbialdata$Xenv[1:30,] y <- microbialdata$Y[1:30, order(colMeans(microbialdata$Y > 0), decreasing = TRUE)[21:35]] f0<-gllvm(y, family = poisson(), seed = 999) f1<-gllvm(y, family = "negative.binomial", seed = 999) f2<-gllvm(y, X, formula = ~pH + Phosp, family = "negative.binomial", seed = 999) expect_true(round(mean(f0$params$beta0), digits = 1)-2<0.01) expect_true(round(mean(f1$params$beta0), digits = 1)-2<0.01) expect_true(round(mean(f2$params$Xcoef[,1]), digits = 1)-0.1<0.01) }) test_that("fourth corner models works", { data(microbialdata) X <- microbialdata$Xenv[1:30,2:3] y <- microbialdata$Y[1:30, order(colMeans(microbialdata$Y > 0), decreasing = TRUE)[21:35]] TR <- matrix(rnorm(15)); colnames(TR) <- "t1" ff0<-gllvm(y, X, TR=TR, family = "negative.binomial", seed = 999) ff1<-gllvm(y, X, TR=TR, formula = ~pH + pH:t1, family = "negative.binomial", seed = 999) expect_true(is.finite(ff0$logL)) expect_true(is.finite(ff1$logL)) }) test_that("row effects works", { data(microbialdata) y <- microbialdata$Y[1:30, order(colMeans(microbialdata$Y > 0), decreasing = TRUE)[21:35]] fr0<-gllvm(y, family = "negative.binomial", seed = 999, row.eff = "fixed", num.lv = 1) fr1<-gllvm(y, family = "negative.binomial", seed = 999, row.eff = "random", num.lv = 0) result<-c(0.34, 0.29) names(result)<-c("AB3", "sigma") expect_true(round(fr0$params$row.params[2], digits = 2)- result[1]<0.1) expect_true(round(fr1$params$sigma, digits = 2)- result[2]<0.1) }) test_that("binomial works", { data(microbialdata) y <- microbialdata$Y[1:30, order(colMeans(microbialdata$Y > 0), decreasing = TRUE)[21:35]] y01<-(y>0)*1 fb0<-gllvm(y01, family = binomial(link = "logit"), seed = 999, method = "LA", num.lv = 1) fb2<-gllvm(y01, family = binomial(link = "probit"), seed = 999) expect_true(is.finite(fb0$logL)) expect_true(is.finite(fb2$logL)) }) test_that("ZIP works", { data(microbialdata) y <- microbialdata$Y[1:10, order(colMeans(microbialdata$Y > 0), decreasing = TRUE)[301:306]] fz0<-gllvm(y, family = "ZIP", seed = 999, method = "LA") expect_equal( length(fz0$params$beta0), 6 ) expect_true( is.finite(fz0$logL)) })
## code for the pie chart library("plotrix") #get the library x<-c(10,5,100,4) #create population data in Lakh for the length lbl<-c("Raipur","Bilaspur","Delhi","Goa") perc<-round(100*x/sum(x),1) # % for all city png(file="2Dpiepercentage.png") #give the file name pie(x,lbl=perc,main="city pie chart based on their population",col=rainbow(length(x))) #plot the chart legend("topright",lbl,cex=0.8,fill=rainbow(length(x))) dev.off() #save the file
/d object.R
no_license
AMITPKR/R_chart-graphics
R
false
false
500
r
## code for the pie chart library("plotrix") #get the library x<-c(10,5,100,4) #create population data in Lakh for the length lbl<-c("Raipur","Bilaspur","Delhi","Goa") perc<-round(100*x/sum(x),1) # % for all city png(file="2Dpiepercentage.png") #give the file name pie(x,lbl=perc,main="city pie chart based on their population",col=rainbow(length(x))) #plot the chart legend("topright",lbl,cex=0.8,fill=rainbow(length(x))) dev.off() #save the file
% Generated by roxygen2 (4.1.0): do not edit by hand % Please edit documentation in R/ResampleDesc.R \name{makeResampleDesc} \alias{ResampleDesc} \alias{makeResampleDesc} \title{Create a description object for a resampling strategy.} \usage{ makeResampleDesc(method, predict = "test", ..., stratify = FALSE, stratify.cols = NULL) } \arguments{ \item{method}{[\code{character(1)}]\cr \dQuote{CV} for cross-validation, \dQuote{LOO} for leave-one-out, \dQuote{RepCV} for repeated cross-validation, \dQuote{Bootstrap} for out-of-bag bootstrap, \dQuote{Subsample} for subsampling, \dQuote{Holdout} for holdout.} \item{predict}{[\code{character(1)}]\cr What to predict during resampling: \dQuote{train}, \dQuote{test} or \dQuote{both} sets. Default is \dQuote{test}.} \item{...}{[any]\cr Further parameters for strategies.\cr \describe{ \item{iters [\code{integer(1)}]}{Number of iterations, for \dQuote{CV}, \dQuote{Subsample} and \dQuote{Boostrap}} \item{split [\code{numeric(1)}]}{Proportion of training cases for \dQuote{Holdout} and \dQuote{Subsample} between 0 and 1. Default is 2/3.} \item{reps [integer(1)]}{Repeats for \dQuote{RepCV}. Here \code{iters = folds * reps}. Default is 10.} \item{folds [integer(1)]}{Folds in the repeated CV for \code{RepCV}. Here \code{iters = folds * reps}. Default is 10.} }} \item{stratify}{[\code{logical(1)}]\cr Should stratification be done for the target variable? For classification tasks, this means that the resampling strategy is applied to all classes individually and the resulting index sets are joined to make sure that the proportion of observations in each training set is as in the original data set. Useful for imbalanced class sizes. For survival tasks stratification is done on the events, resulting training sets with comparable censoring rates.} \item{stratify.cols}{[\code{character}]\cr Stratify on specific columns referenced by name. All columns have to be factors. Note that you have to ensure yourself that stratification is possible, i.e. that each strata contains enough observations. This argument and \code{stratify} are mutually exclusive.} } \value{ [\code{\link{ResampleDesc}}]. } \description{ A description of a resampling algorithm contains all necessary information to create a \code{\link{ResampleInstance}}, when given the size of the data set. } \details{ Some notes on some special strategies: \describe{ \item{Repeated cross-validation}{Use \dQuote{RepCV}. Then you have to set the aggregation function for your preferred performance measure to \dQuote{testgroup.mean} via \code{\link{setAggregation}}.} \item{B632 bootstrap}{Use \dQuote{Bootstrap} for bootstrap and set predict to \dQuote{both}. Then you have to set the aggregation function for your preferred performance measure to \dQuote{b632} via \code{\link{setAggregation}}.} \item{B632+ bootstrap}{Use \dQuote{Bootstrap} for bootstrap and set predict to \dQuote{both}. Then you have to set the aggregation function for your preferred performance measure to \dQuote{b632plus} via \code{\link{setAggregation}}.} \item{Fixed Holdout set}{Use \code{\link{makeFixedHoldoutInstance}}.} } Object slots: \describe{ \item{id [\code{character(1)}]}{Name of resampling strategy.} \item{iters [\code{integer(1)}]}{Number of iterations. Note that this is always the complete number of generated train/test sets, so for a 10-times repeated 5fold cross-validation it would be 50.} \item{predict [\code{character(1)}]}{See argument.} \item{stratify [\code{logical(1)}]}{See argument.} \item{All parameters passed in ... under the respective argument name}{See arguments.} } } \examples{ # Bootstraping makeResampleDesc("Bootstrap", iters = 10) makeResampleDesc("Bootstrap", iters = 10, predict = "both") # Subsampling makeResampleDesc("Subsample", iters = 10, split = 3/4) makeResampleDesc("Subsample", iters = 10) # Holdout a.k.a. test sample estimation makeResampleDesc("Holdout") } \seealso{ Other resample: \code{\link{ResampleInstance}}, \code{\link{makeResampleInstance}}; \code{\link{ResamplePrediction}}; \code{\link{ResampleResult}}; \code{\link{bootstrapB632}}, \code{\link{bootstrapB632plus}}, \code{\link{bootstrapOOB}}, \code{\link{crossval}}, \code{\link{holdout}}, \code{\link{repcv}}, \code{\link{resample}}, \code{\link{subsample}} }
/man/makeResampleDesc.Rd
no_license
dickoa/mlr
R
false
false
4,319
rd
% Generated by roxygen2 (4.1.0): do not edit by hand % Please edit documentation in R/ResampleDesc.R \name{makeResampleDesc} \alias{ResampleDesc} \alias{makeResampleDesc} \title{Create a description object for a resampling strategy.} \usage{ makeResampleDesc(method, predict = "test", ..., stratify = FALSE, stratify.cols = NULL) } \arguments{ \item{method}{[\code{character(1)}]\cr \dQuote{CV} for cross-validation, \dQuote{LOO} for leave-one-out, \dQuote{RepCV} for repeated cross-validation, \dQuote{Bootstrap} for out-of-bag bootstrap, \dQuote{Subsample} for subsampling, \dQuote{Holdout} for holdout.} \item{predict}{[\code{character(1)}]\cr What to predict during resampling: \dQuote{train}, \dQuote{test} or \dQuote{both} sets. Default is \dQuote{test}.} \item{...}{[any]\cr Further parameters for strategies.\cr \describe{ \item{iters [\code{integer(1)}]}{Number of iterations, for \dQuote{CV}, \dQuote{Subsample} and \dQuote{Boostrap}} \item{split [\code{numeric(1)}]}{Proportion of training cases for \dQuote{Holdout} and \dQuote{Subsample} between 0 and 1. Default is 2/3.} \item{reps [integer(1)]}{Repeats for \dQuote{RepCV}. Here \code{iters = folds * reps}. Default is 10.} \item{folds [integer(1)]}{Folds in the repeated CV for \code{RepCV}. Here \code{iters = folds * reps}. Default is 10.} }} \item{stratify}{[\code{logical(1)}]\cr Should stratification be done for the target variable? For classification tasks, this means that the resampling strategy is applied to all classes individually and the resulting index sets are joined to make sure that the proportion of observations in each training set is as in the original data set. Useful for imbalanced class sizes. For survival tasks stratification is done on the events, resulting training sets with comparable censoring rates.} \item{stratify.cols}{[\code{character}]\cr Stratify on specific columns referenced by name. All columns have to be factors. Note that you have to ensure yourself that stratification is possible, i.e. that each strata contains enough observations. This argument and \code{stratify} are mutually exclusive.} } \value{ [\code{\link{ResampleDesc}}]. } \description{ A description of a resampling algorithm contains all necessary information to create a \code{\link{ResampleInstance}}, when given the size of the data set. } \details{ Some notes on some special strategies: \describe{ \item{Repeated cross-validation}{Use \dQuote{RepCV}. Then you have to set the aggregation function for your preferred performance measure to \dQuote{testgroup.mean} via \code{\link{setAggregation}}.} \item{B632 bootstrap}{Use \dQuote{Bootstrap} for bootstrap and set predict to \dQuote{both}. Then you have to set the aggregation function for your preferred performance measure to \dQuote{b632} via \code{\link{setAggregation}}.} \item{B632+ bootstrap}{Use \dQuote{Bootstrap} for bootstrap and set predict to \dQuote{both}. Then you have to set the aggregation function for your preferred performance measure to \dQuote{b632plus} via \code{\link{setAggregation}}.} \item{Fixed Holdout set}{Use \code{\link{makeFixedHoldoutInstance}}.} } Object slots: \describe{ \item{id [\code{character(1)}]}{Name of resampling strategy.} \item{iters [\code{integer(1)}]}{Number of iterations. Note that this is always the complete number of generated train/test sets, so for a 10-times repeated 5fold cross-validation it would be 50.} \item{predict [\code{character(1)}]}{See argument.} \item{stratify [\code{logical(1)}]}{See argument.} \item{All parameters passed in ... under the respective argument name}{See arguments.} } } \examples{ # Bootstraping makeResampleDesc("Bootstrap", iters = 10) makeResampleDesc("Bootstrap", iters = 10, predict = "both") # Subsampling makeResampleDesc("Subsample", iters = 10, split = 3/4) makeResampleDesc("Subsample", iters = 10) # Holdout a.k.a. test sample estimation makeResampleDesc("Holdout") } \seealso{ Other resample: \code{\link{ResampleInstance}}, \code{\link{makeResampleInstance}}; \code{\link{ResamplePrediction}}; \code{\link{ResampleResult}}; \code{\link{bootstrapB632}}, \code{\link{bootstrapB632plus}}, \code{\link{bootstrapOOB}}, \code{\link{crossval}}, \code{\link{holdout}}, \code{\link{repcv}}, \code{\link{resample}}, \code{\link{subsample}} }
n_biostrat=62 biostrat=1:62 # for biostrat data, or Taxa FADs,LADs, the biostrat variable is the numbers of the columns with taxa n_pmag=1 pmag=63 # pmag is a list of the column(s) with paleomagnetic signals, or really any binary data, NA values are not counted n_dates=3 dates=matrix(c(64,0,65,0,1000,65,0,66,0,1000,66,0,67,0,1000),nrow=3,byrow=TRUE) # each row of the dates matrix is a set of data to be entered into the passing penalty # the first entry on each row is the column of the lower variale # second entry on a row is the data type 0- singular date, 1- FAD, 2-LAD # third and fourth entries on each row are the column and type of the second variable # fifth value on each row is the weight n_ashes=2 ashes=matrix(c(68,100,69,100),nrow=2,byrow=TRUE) n_continuous=2 continuous=matrix(c(70,5,71,5),nrow=2,byrow=TRUE) penalty_spec=list(n_biostrat=n_biostrat,biostrat=biostrat,n_pmag=n_pmag,pmag=pmag,n_dates=n_dates,dates=dates,n_ashes=n_ashes,ashes=ashes,n_continuous=n_continuous,continuous=continuous) # penalty_spec is the list format for the penalty structure in the form of HA meant to handle multiple penalties # the above example is set up to handle the Riley system with this penalty structure
/attachment_2 (2) (1)/penalty_spec_template.R
no_license
97joseph/R_to_C-Code-Conversion
R
false
false
1,220
r
n_biostrat=62 biostrat=1:62 # for biostrat data, or Taxa FADs,LADs, the biostrat variable is the numbers of the columns with taxa n_pmag=1 pmag=63 # pmag is a list of the column(s) with paleomagnetic signals, or really any binary data, NA values are not counted n_dates=3 dates=matrix(c(64,0,65,0,1000,65,0,66,0,1000,66,0,67,0,1000),nrow=3,byrow=TRUE) # each row of the dates matrix is a set of data to be entered into the passing penalty # the first entry on each row is the column of the lower variale # second entry on a row is the data type 0- singular date, 1- FAD, 2-LAD # third and fourth entries on each row are the column and type of the second variable # fifth value on each row is the weight n_ashes=2 ashes=matrix(c(68,100,69,100),nrow=2,byrow=TRUE) n_continuous=2 continuous=matrix(c(70,5,71,5),nrow=2,byrow=TRUE) penalty_spec=list(n_biostrat=n_biostrat,biostrat=biostrat,n_pmag=n_pmag,pmag=pmag,n_dates=n_dates,dates=dates,n_ashes=n_ashes,ashes=ashes,n_continuous=n_continuous,continuous=continuous) # penalty_spec is the list format for the penalty structure in the form of HA meant to handle multiple penalties # the above example is set up to handle the Riley system with this penalty structure
######################### TIM subtree <- function(object,C) UseMethod("subtree") prune <- function(object,v=5,sd.mult=0.5,plot=TRUE) UseMethod("prune") get.w <- function(object,C) UseMethod("get.w") get.t <- function(object,C) UseMethod("get.t") thresh <- function(object,data,C,postmed=TRUE) UseMethod("thresh") treethresh <- function(data,beta,criterion="score",control=list(),rho=sys.frame(sys.parent())) { use.beta <- FALSE use.data <- NULL if (!missing(beta) && !is.null(beta)) { use.beta <- TRUE use.data <- as.array(beta) } else { if (!missing(data) && !is.null(data)) { use.data <- as.array(data) } } if (is.null(use.data)) stop("You either need to specify the original data sequence or the betas.") default.control <- list(max.iter=30, max.depth=10, minimum.width=3, minimum.size=5^length(dim(use.data)), tolerance.grad=1e-8, tolerance=1e-6, absolute.improvement=-Inf, relative.improvement=-Inf, absolute.criterion=0, beta.max=1e5, a=0.5, crit.signif=0.90, lr.signif=0.5, first.step=0.2) if (!is.list(control)) stop("The control argument must be a list.") for (name in names(default.control)) { if (!(name %in% names(control))) control[[name]] <- default.control[[name]] } list.diff <- setdiff(names(control),names(default.control)) if (length(list.diff)>0) { warning.text <- "The following arguments in the control list were ignored:" for (elt in list.diff) warning.text <- paste(warning.text," \"",elt,"\"",sep="") warning(warning.text) } if (!is.function(criterion)) { possible.criteria <- c("score","likelihood","heuristic") criterion <- possible.criteria[pmatch(criterion,possible.criteria)] } data.dim <- dim(use.data) result<-.External("fit_tree",data=as.numeric(use.data),dims=as.integer(length(data.dim)),size=as.integer(data.dim),use.beta=as.logical(use.beta),criterion=criterion,control=control,rho=rho,PACKAGE="treethresh") names(result) <- c("membership","splits","beta") result$membership <- array(result$membership,dim=data.dim) result$beta <- array(result$beta,dim=data.dim) dimnames(result$splits)=list(NULL,c("id","parent.id","dim","pos","left.child.id","right.child.id","crit","w","t","loglikelihood","alpha","C")) if (!all(is.na(result$splits[,"alpha"]))) result$splits[,"C"] <- result$splits[,"alpha"]/max(result$splits[,"alpha"],na.rm=TRUE) else result$splits[,"C"] <- NA result$splits[,"t"] <- tfromw(result$splits[,"w"]) result$control <- control result$criterion <- criterion if (!missing(data)) result$data <- data class(result) <- "treethresh" result } subtree.treethresh <- function(object,C) { if (missing(C) || is.null(C)) return(object) if (all(is.na(object$splits[,"C"]))) return(object) delete.me <- rep(FALSE,nrow(object$splits)) for (i in nrow(object$splits):2) { if (object$splits[i,"C"]<C) { delete.me[i] <- TRUE parent.id <- abs(object$splits[i,"parent.id"]) object$splits[parent.id==object$splits[,"id"],3:7] <- NA object$membership[object$membership==object$splits[i,"id"]] <- parent.id } } object$splits <- object$splits[!delete.me,,drop=FALSE] object } get.w.treethresh <- function(object,C) { if (!missing(C)) if (!is.null(C)) object <- subtree.treethresh(object,C) map <- numeric(max(object$splits[,"id"])) map[object$splits[,"id"]] <- 1:nrow(object$splits) array(object$splits[map[as.vector(object$membership)],"w"],dim=dim(object$membership)) } get.t.treethresh <- function(object,C) { if (!missing(C)) if (!is.null(C)) object <- subtree.treethresh(object,C) map <- numeric(max(object$splits[,"id"])) map[object$splits[,"id"]] <- 1:nrow(object$splits) array(dim=dim(object$membership),object$splits[map[as.vector(object$membership)],"t"]) } # Compute best C in cross-validation prune.treethresh <- function(object,v=5,sd.mult=0.5,plot=TRUE) { sizes <- function(object) { C <- object$splits[-1,"C"] C.values <- sort(unique(C)) c(apply(outer(C,C.values,function(a,b) as.numeric(a>=b)),2,sum)/2+1,1) } if (is.na(v)) return(object) if (all(is.na(object$splits[,"C"]))) return(object) # Split the dataset into v portions permut <- sample(length(object$data)) sample.size <- length(object$data)/v C.values <- sort(as.numeric(na.omit(c(unique(object$splits[,"C"]),2)))) results <- matrix(nrow=v,ncol=length(C.values)) to <- 0 # Fitting v cv models for (j in 1:v) { from <- to + 1 to <- round(sample.size*j) test.index <- permut[from:to] a.blanked <- object$data[test.index] if (is.function(object$criterion) || (object$criterion=="heuristic")) { a.data <- object$data a.data[test.index] <- NA a.tree <- treethresh(data=a.data,control=object$control,criterion=object$criterion) } else { a.beta <- object$beta a.beta[test.index] <- NA a.tree <- treethresh(beta=a.beta,control=object$control,criterion=object$criterion) } for (k in 1:length(C.values)) { a.tree <- subtree.treethresh(a.tree,C=C.values[k]) w <- get.w.treethresh(a.tree,C=NULL)[test.index] beta <- pmin(1e5,object$beta[test.index]) results[j,k] <- sum(log(1+w*beta)) } } results <- results - results[,ncol(results)] results.mean <- apply(results,2,mean) results.sd <- apply(results,2,sd) lower.bound <- (results.mean-sd.mult*results.sd)[which.max(results.mean)] if (plot) { results.range <- c(min(results.mean-results.sd),max(results.mean+results.sd)) ns <- 1:length(results.mean) plot(ns,results.mean,xaxt="n",ylab="Loglikelihood from CV (+const)",type="o",xlab="C",ylim=results.range) segments(ns, results.mean - results.sd, ns, results.mean + results.sd) axis(1, at = ns, labels = c(format(C.values[-length(C.values)],digits=3),">1")) mtext("number of regions", side = 3, line = 3) axis(3, at = ns, labels = as.character(sizes(object))) abline(h=lower.bound,lty=3) } subtree.treethresh(object,C.values[max(which(results.mean>=lower.bound))]) } thresh.treethresh <- function(object,data,C,postmed=TRUE) { if (missing(data) && !is.null(object$data)) data <- object$data data.dim <- dim(data) if (postmed) { w <- get.w.treethresh(object,C) if (length(data)!=length(w)) stop("Data does not have the right length.") data <- postmed.laplace(data,w) } else { t <- get.t.treethresh(object,C) if (length(data)!=length(t)) stop("Data does not have the right length.") data[abs(data)<t] <- 0 } dim(data) <- data.dim data } ######################### TIM_WAVE # Input for data or betas is a list wtthresh <- function(data,beta,weights,control=list()) { use.beta <- FALSE src.data <- NULL if (!missing(beta) && !is.null(beta)) { use.beta <- TRUE src.data <- beta data <- NULL } else { if (!missing(data) && !is.null(data)) { src.data <- data } } if (missing(weights)) weights <- rep(1,length(src.data)) if (length(weights)!=length(src.data)) stop("There must be a weight for every array in the data list.") if (is.null(src.data)) stop("You either need to specify the original data sequence or the betas.") use.data <- list() data.dim <- length(dim(src.data[[1]])) for (i in 1:length(src.data)) { use.data[[i]] <- as.array(src.data[[i]]) cur.dim <- dim(use.data[[i]]) if (length(unique(cur.dim))!=1) stop("Input data needs to be a list of squared arrays.") if (data.dim!=length(cur.dim)) stop("Input data must have identical dimensions.") } default.control <- list(max.iter=30, max.depth=10, minimum.width=4, min.minimum.width=1, minimum.size=8^data.dim, min.minimum.size=4^data.dim,tolerance.grad=1e-8, tolerance=1e-6, absolute.improvement=-Inf, relative.improvement=-Inf, absolute.criterion=0, beta.max=1e5, a=0.5, rescale.quantile=0.5, lr.signif=0.5, first.step=0.2, min.width.scale.factor=1, min.size.scale.factor=1) if (!is.list(control)) stop("The control argument must be a list.") for (name in names(default.control)) { if (!(name %in% names(control))) control[[name]] <- default.control[[name]] } list.diff <- setdiff(names(control),names(default.control)) if (length(list.diff)>0) { warning.text <- "The following arguments in the control list were ignored:" for (elt in list.diff) warning.text <- paste(warning.text," \"",elt,"\"",sep="") warning(warning.text) } result<-.External("fit_tree_wave",data=use.data,dims=data.dim,use.beta=as.logical(use.beta),weights=as.double(weights),control=control, PACKAGE="treethresh") names(result) <- c("splits","details","w","t","membership","beta") dimnames(result$splits)=list(NULL,c("id","parent.id","dim","pos","left.child.id","right.child.id","crit","loglikelihood","alpha","C")) if (!all(is.na(result$splits[,"alpha"]))) { result$splits[,"C"] <- result$splits[,"alpha"]/max(result$splits[,"alpha"],na.rm=TRUE) } else { result$splits[,"C"] <- NA } result$t <- matrix(tfromw(result$w),nrow=nrow(result$w)) result$data <- data result$weights <- weights result$control <- control if (is.null(result$beta)) { result$beta <- beta } else { for (i in 1:length(result$beta)) result$beta[[i]] <- array(result$beta[[i]],dim=dim(result$data[[i]])) } for (i in 1:length(result$membership)) result$membership[[i]] <- array(result$membership[[i]],dim=dim(result$beta[[i]])) class(result) <- "wtthresh" result } subtree.wtthresh <- function(object,C=NULL) { if (missing(C) || is.null(C)) return(object) if (all(is.na(object$splits[,"C"]))) return(object) if (is.null(object$splits)) stop("Not the right type of object") nrow <- nrow(object$splits) old.types <- as.integer(object$splits[,"id"]) result <- .C("prune_tree",splits=as.double(object$splits),nrow=as.integer(nrow),kill=integer(nrow),leaf=as.integer(is.na(object$splits[,"dim"])),membership=as.integer(old.types),C=as.double(C),NAOK=TRUE) # ,package="treethresh") result$leaf <- as.logical(result$leaf) result$kill <- as.logical(result$kill) object$splits[result$leaf,3:7] <- NA object$details[result$leaf] <- NA object$splits <- object$splits[!result$kill,,drop=FALSE] object$details <- object$details[!result$kill,,drop=FALSE] object$w <- object$w[!result$kill,,drop=FALSE] object$t <- object$t[!result$kill,,drop=FALSE] for (i in 1:length(object$membership)) { len <- length(object$membership[[i]]) object$membership[[i]] <- array(.C("update_membership",old.membership=as.integer(object$membership[[i]]),new.membership=integer(len),n=as.integer(len),old.types=as.integer(old.types),new.types=as.integer(result$membership),n.types=as.integer(length(old.types)),NAOK=TRUE)$new.membership,dim=dim(object$membership[[i]])) } object } get.w.wtthresh <- function(object,C) { if (!missing(C)) if (!is.null(C)) object <- subtree.wtthresh(object,C) map <- numeric(max(object$splits[,"id"])) map[object$splits[,"id"]] <- 1:nrow(object$splits) result <- list() for (k in 1:length(object$beta)) { result[[k]] <- array(object$w[map[as.vector(object$membership[[k]])],k],dim=dim(object$membership[[k]])) } result } get.t.wtthresh <- function(object,C) { if (!missing(C)) if (!is.null(C)) object <- subtree.wtthresh(object,C) map <- numeric(max(object$splits[,"id"])) map[object$splits[,"id"]] <- 1:nrow(object$splits) result <- list() for (k in 1:length(object$beta)) { result[[k]] <- array(object$t[map[as.vector(object$membership[[k]])],k],dim=dim(object$membership[[k]])) } result } # Compute best C in cross-validation prune.wtthresh <- function(object,v=5,sd.mult=0.5,plot=TRUE) { reep <- function(max,cur,dim) { reep <- 1:max if (dim>1) { for (pos in 2:dim) if (pos<=cur) reep <- rep(reep,each=max) else reep <- rep(reep,max) } reep } create.coords <- function(index,size,dim,factor) { coords <- matrix(nrow=length(index)*factor^dim,ncol=dim) index <- index-1 multiple <- 1 pos <- rep(1,length(index)*factor^dim) for (i in 1:dim) { cur.coords <- index%%size cur.coords <- factor*rep(cur.coords,each=factor^dim)+reep(factor,i,dim) coords[,dim+1-i] <- cur.coords index <- index%/%size pos <- pos+(cur.coords-1)*multiple multiple <- multiple*size*factor } list(coords=coords,pos=pos) } get.sizes <- function(object) { C <- object$splits[-1,"C"] C.values <- sort(unique(C)) c(apply(outer(C,C.values,function(a,b) as.numeric(a>=b)),2,sum)/2+1,1) } if (is.na(v)) return(object) if (all(is.na(object$splits[,"C"]))) return(object) sizes <- sapply(object$beta,length) data.dim <- length(dim(object$beta[[1]])) # Split the dataset into v portions permut <- sample(min(sizes)) sample.size <- min(sizes)/v C.values <- sort(as.numeric(na.omit(c(unique(object$splits[,"C"]),2)))) results <- matrix(0,nrow=v,ncol=length(C.values)) to <- 0 for (j in 1:v) { from <- to + 1 to <- round(sample.size*j) test.index <- permut[from:to] a.beta <- object$beta for (k in 1:length(a.beta)) { a.beta[[k]][create.coords(test.index,min(sizes)^(1/data.dim),data.dim,(sizes[k]/min(sizes))^(1/data.dim))$pos] <- NA } a.tree <- wtthresh(beta=a.beta,control=object$control,weights=object$weights) for (l in 1:length(C.values)) { a.tree <- subtree.wtthresh(a.tree,C=C.values[l]) w <- get.w.wtthresh(a.tree) for (k in 1:length(a.beta)) { test.pos <- create.coords(test.index,min(sizes)^(1/data.dim),data.dim,(sizes[k]/min(sizes))^(1/data.dim))$pos beta <- pmin(1e5,object$beta[[k]][test.pos]) results[j,l] <- results[j,l] + sum(log(1+w[[k]][test.pos]*beta)) } } } results <- results - results[,ncol(results)] results.mean <- apply(results,2,mean) results.sd <- apply(results,2,sd) lower.bound <- (results.mean-sd.mult*results.sd)[which.max(results.mean)] if (plot) { results.range <- c(min(results.mean-results.sd),max(results.mean+results.sd)) ns <- 1:length(results.mean) plot(ns,results.mean,xaxt="n",ylab="Loglikelihood from CV (+const)",type="o",xlab="C",ylim=results.range) segments(ns, results.mean - results.sd, ns, results.mean + results.sd) axis(1, at = ns, labels = c(format(C.values[-length(C.values)],digits=3),">1")) mtext("number of regions", side = 3, line = 3) axis(3, at = ns, labels = as.character(get.sizes(object))) abline(h=lower.bound,lty=3) } subtree.wtthresh(object,C.values[max(which(results.mean>=lower.bound))]) } thresh.wtthresh <- function(object,data,C,postmed=TRUE) { if (missing(data) && !is.null(object$data)) data <- object$data if (postmed) { w <- get.w.wtthresh(object,C) if (length(data)!=length(w)) stop("Data does not have the right length.") for (i in 1:length(data)) data[[i]] <- postmed.laplace(data[[i]],w[[i]]) } else { t <- get.t.wtthresh(object,C) if (length(data)!=length(t)) stop("Data does not have the right length.") for (i in 1:length(data)) data[[i]][abs(data[[i]])<t[[i]]] <- 0 } data } ########################### UTILITY FUNCTIONS extract.coefficients.imwd <- function(object,start.level=5) { if (start.level>=object$nlevels) return(list()) types <- c("CD","DD","DC") result <- list() for (level in start.level:object$nlevels) for (type in types) { nm <- lt.to.name(level=level-1,type) result[[nm]] <- array(object[[nm]],dim=rep(2^(level-1),2)) } result } estimate.sdev.imwd <- function(object,dev=mad) { types <- c("CD","DD","DC") mads <- c() for (type in types) { nm <- lt.to.name(level=object$nlevels-1,type) elt <- object[[nm]] our.mads <- dev(elt) mads <- c(mads,as.vector(our.mads)) } median(mads) } insert.coefficients.imwd <- function(object,update) { for (name in names(update)) object[[name]] <- update[[name]] object } extract.coefficients.wd <- function(object,start.level=5) { if (start.level>=object$nlevels) return(list()) result <- list() for (level in start.level:object$nlevels) { foo <- accessD(object,level=level-1) nm <- as.character(level) result[[nm]] <- array(foo,dim=length(foo)) } result } estimate.sdev.wd <- function(object,dev=mad) { elt <- accessD(object,level=object$nlevels-1) dev(elt) } insert.coefficients.wd <- function(object,update) { for (name in names(update)) object <- putD(object,level=as.numeric(name)-1,update[[name]]) object } extract.coefficients <- function(object,start.level=5) UseMethod("extract.coefficients") estimate.sdev <- function(object,dev=mad) UseMethod("estimate.sdev") estimate.sdev.numeric <- function(object, block=8, dev=mad) { if (is.na(block)) return(dev(object)) block <- min(c(block,length(object))) n.blocks <- floor(length(object)/block) our.mads <- numeric(n.blocks) for (i in 1:n.blocks) { dta <- object[block*(i-1)+1:block] our.mads[i] <- dev(dta) } median(our.mads) } insert.coefficients <- function(object, update) UseMethod("insert.coefficients") wavelet.treethresh <- function(object,sdev=NA,dev=mad,start.level=5,levelwise=FALSE,v=5,sd.mult=0.5,postmed=TRUE,...) { # If no sdev is provided estimate it if (is.na(sdev)) { sdev <- estimate.sdev(object,dev=dev) } # Extract the coefficients coefs <- extract.coefficients(object,start.level=start.level) # Rescale the coefficients for (nm in names(coefs)) coefs[[nm]] <- coefs[[nm]] / sdev if (!levelwise) { # JOINT THRESHOLDING # Compute the thresholding tree coefs.tree <- wtthresh(coefs,...) # Prune the tree coefs.pruned.tree <- prune.wtthresh(coefs.tree,v=v,sd.mult=sd.mult,plot=FALSE) # Threshold according to the pruned tree coefs.threshed <- thresh(coefs.pruned.tree,postmed=postmed) } else { # LEVELWISE THRESHOLDING coefs.threshed <- list() for (nm in names(coefs)) { coefs.tree <- treethresh(coefs[[nm]],...) coefs.pruned.tree <- prune(coefs.tree,v=v,sd.mult=sd.mult,plot=FALSE) coefs.threshed[[nm]] <- thresh(coefs.pruned.tree,postmed=postmed) } } # Undo the resclaing for (nm in names(coefs)) coefs.threshed[[nm]] <- coefs.threshed[[nm]] * sdev # Update coefficients insert.coefficients(object,coefs.threshed) }
/treethresh/R/treethresh.R
no_license
ingted/R-Examples
R
false
false
18,516
r
######################### TIM subtree <- function(object,C) UseMethod("subtree") prune <- function(object,v=5,sd.mult=0.5,plot=TRUE) UseMethod("prune") get.w <- function(object,C) UseMethod("get.w") get.t <- function(object,C) UseMethod("get.t") thresh <- function(object,data,C,postmed=TRUE) UseMethod("thresh") treethresh <- function(data,beta,criterion="score",control=list(),rho=sys.frame(sys.parent())) { use.beta <- FALSE use.data <- NULL if (!missing(beta) && !is.null(beta)) { use.beta <- TRUE use.data <- as.array(beta) } else { if (!missing(data) && !is.null(data)) { use.data <- as.array(data) } } if (is.null(use.data)) stop("You either need to specify the original data sequence or the betas.") default.control <- list(max.iter=30, max.depth=10, minimum.width=3, minimum.size=5^length(dim(use.data)), tolerance.grad=1e-8, tolerance=1e-6, absolute.improvement=-Inf, relative.improvement=-Inf, absolute.criterion=0, beta.max=1e5, a=0.5, crit.signif=0.90, lr.signif=0.5, first.step=0.2) if (!is.list(control)) stop("The control argument must be a list.") for (name in names(default.control)) { if (!(name %in% names(control))) control[[name]] <- default.control[[name]] } list.diff <- setdiff(names(control),names(default.control)) if (length(list.diff)>0) { warning.text <- "The following arguments in the control list were ignored:" for (elt in list.diff) warning.text <- paste(warning.text," \"",elt,"\"",sep="") warning(warning.text) } if (!is.function(criterion)) { possible.criteria <- c("score","likelihood","heuristic") criterion <- possible.criteria[pmatch(criterion,possible.criteria)] } data.dim <- dim(use.data) result<-.External("fit_tree",data=as.numeric(use.data),dims=as.integer(length(data.dim)),size=as.integer(data.dim),use.beta=as.logical(use.beta),criterion=criterion,control=control,rho=rho,PACKAGE="treethresh") names(result) <- c("membership","splits","beta") result$membership <- array(result$membership,dim=data.dim) result$beta <- array(result$beta,dim=data.dim) dimnames(result$splits)=list(NULL,c("id","parent.id","dim","pos","left.child.id","right.child.id","crit","w","t","loglikelihood","alpha","C")) if (!all(is.na(result$splits[,"alpha"]))) result$splits[,"C"] <- result$splits[,"alpha"]/max(result$splits[,"alpha"],na.rm=TRUE) else result$splits[,"C"] <- NA result$splits[,"t"] <- tfromw(result$splits[,"w"]) result$control <- control result$criterion <- criterion if (!missing(data)) result$data <- data class(result) <- "treethresh" result } subtree.treethresh <- function(object,C) { if (missing(C) || is.null(C)) return(object) if (all(is.na(object$splits[,"C"]))) return(object) delete.me <- rep(FALSE,nrow(object$splits)) for (i in nrow(object$splits):2) { if (object$splits[i,"C"]<C) { delete.me[i] <- TRUE parent.id <- abs(object$splits[i,"parent.id"]) object$splits[parent.id==object$splits[,"id"],3:7] <- NA object$membership[object$membership==object$splits[i,"id"]] <- parent.id } } object$splits <- object$splits[!delete.me,,drop=FALSE] object } get.w.treethresh <- function(object,C) { if (!missing(C)) if (!is.null(C)) object <- subtree.treethresh(object,C) map <- numeric(max(object$splits[,"id"])) map[object$splits[,"id"]] <- 1:nrow(object$splits) array(object$splits[map[as.vector(object$membership)],"w"],dim=dim(object$membership)) } get.t.treethresh <- function(object,C) { if (!missing(C)) if (!is.null(C)) object <- subtree.treethresh(object,C) map <- numeric(max(object$splits[,"id"])) map[object$splits[,"id"]] <- 1:nrow(object$splits) array(dim=dim(object$membership),object$splits[map[as.vector(object$membership)],"t"]) } # Compute best C in cross-validation prune.treethresh <- function(object,v=5,sd.mult=0.5,plot=TRUE) { sizes <- function(object) { C <- object$splits[-1,"C"] C.values <- sort(unique(C)) c(apply(outer(C,C.values,function(a,b) as.numeric(a>=b)),2,sum)/2+1,1) } if (is.na(v)) return(object) if (all(is.na(object$splits[,"C"]))) return(object) # Split the dataset into v portions permut <- sample(length(object$data)) sample.size <- length(object$data)/v C.values <- sort(as.numeric(na.omit(c(unique(object$splits[,"C"]),2)))) results <- matrix(nrow=v,ncol=length(C.values)) to <- 0 # Fitting v cv models for (j in 1:v) { from <- to + 1 to <- round(sample.size*j) test.index <- permut[from:to] a.blanked <- object$data[test.index] if (is.function(object$criterion) || (object$criterion=="heuristic")) { a.data <- object$data a.data[test.index] <- NA a.tree <- treethresh(data=a.data,control=object$control,criterion=object$criterion) } else { a.beta <- object$beta a.beta[test.index] <- NA a.tree <- treethresh(beta=a.beta,control=object$control,criterion=object$criterion) } for (k in 1:length(C.values)) { a.tree <- subtree.treethresh(a.tree,C=C.values[k]) w <- get.w.treethresh(a.tree,C=NULL)[test.index] beta <- pmin(1e5,object$beta[test.index]) results[j,k] <- sum(log(1+w*beta)) } } results <- results - results[,ncol(results)] results.mean <- apply(results,2,mean) results.sd <- apply(results,2,sd) lower.bound <- (results.mean-sd.mult*results.sd)[which.max(results.mean)] if (plot) { results.range <- c(min(results.mean-results.sd),max(results.mean+results.sd)) ns <- 1:length(results.mean) plot(ns,results.mean,xaxt="n",ylab="Loglikelihood from CV (+const)",type="o",xlab="C",ylim=results.range) segments(ns, results.mean - results.sd, ns, results.mean + results.sd) axis(1, at = ns, labels = c(format(C.values[-length(C.values)],digits=3),">1")) mtext("number of regions", side = 3, line = 3) axis(3, at = ns, labels = as.character(sizes(object))) abline(h=lower.bound,lty=3) } subtree.treethresh(object,C.values[max(which(results.mean>=lower.bound))]) } thresh.treethresh <- function(object,data,C,postmed=TRUE) { if (missing(data) && !is.null(object$data)) data <- object$data data.dim <- dim(data) if (postmed) { w <- get.w.treethresh(object,C) if (length(data)!=length(w)) stop("Data does not have the right length.") data <- postmed.laplace(data,w) } else { t <- get.t.treethresh(object,C) if (length(data)!=length(t)) stop("Data does not have the right length.") data[abs(data)<t] <- 0 } dim(data) <- data.dim data } ######################### TIM_WAVE # Input for data or betas is a list wtthresh <- function(data,beta,weights,control=list()) { use.beta <- FALSE src.data <- NULL if (!missing(beta) && !is.null(beta)) { use.beta <- TRUE src.data <- beta data <- NULL } else { if (!missing(data) && !is.null(data)) { src.data <- data } } if (missing(weights)) weights <- rep(1,length(src.data)) if (length(weights)!=length(src.data)) stop("There must be a weight for every array in the data list.") if (is.null(src.data)) stop("You either need to specify the original data sequence or the betas.") use.data <- list() data.dim <- length(dim(src.data[[1]])) for (i in 1:length(src.data)) { use.data[[i]] <- as.array(src.data[[i]]) cur.dim <- dim(use.data[[i]]) if (length(unique(cur.dim))!=1) stop("Input data needs to be a list of squared arrays.") if (data.dim!=length(cur.dim)) stop("Input data must have identical dimensions.") } default.control <- list(max.iter=30, max.depth=10, minimum.width=4, min.minimum.width=1, minimum.size=8^data.dim, min.minimum.size=4^data.dim,tolerance.grad=1e-8, tolerance=1e-6, absolute.improvement=-Inf, relative.improvement=-Inf, absolute.criterion=0, beta.max=1e5, a=0.5, rescale.quantile=0.5, lr.signif=0.5, first.step=0.2, min.width.scale.factor=1, min.size.scale.factor=1) if (!is.list(control)) stop("The control argument must be a list.") for (name in names(default.control)) { if (!(name %in% names(control))) control[[name]] <- default.control[[name]] } list.diff <- setdiff(names(control),names(default.control)) if (length(list.diff)>0) { warning.text <- "The following arguments in the control list were ignored:" for (elt in list.diff) warning.text <- paste(warning.text," \"",elt,"\"",sep="") warning(warning.text) } result<-.External("fit_tree_wave",data=use.data,dims=data.dim,use.beta=as.logical(use.beta),weights=as.double(weights),control=control, PACKAGE="treethresh") names(result) <- c("splits","details","w","t","membership","beta") dimnames(result$splits)=list(NULL,c("id","parent.id","dim","pos","left.child.id","right.child.id","crit","loglikelihood","alpha","C")) if (!all(is.na(result$splits[,"alpha"]))) { result$splits[,"C"] <- result$splits[,"alpha"]/max(result$splits[,"alpha"],na.rm=TRUE) } else { result$splits[,"C"] <- NA } result$t <- matrix(tfromw(result$w),nrow=nrow(result$w)) result$data <- data result$weights <- weights result$control <- control if (is.null(result$beta)) { result$beta <- beta } else { for (i in 1:length(result$beta)) result$beta[[i]] <- array(result$beta[[i]],dim=dim(result$data[[i]])) } for (i in 1:length(result$membership)) result$membership[[i]] <- array(result$membership[[i]],dim=dim(result$beta[[i]])) class(result) <- "wtthresh" result } subtree.wtthresh <- function(object,C=NULL) { if (missing(C) || is.null(C)) return(object) if (all(is.na(object$splits[,"C"]))) return(object) if (is.null(object$splits)) stop("Not the right type of object") nrow <- nrow(object$splits) old.types <- as.integer(object$splits[,"id"]) result <- .C("prune_tree",splits=as.double(object$splits),nrow=as.integer(nrow),kill=integer(nrow),leaf=as.integer(is.na(object$splits[,"dim"])),membership=as.integer(old.types),C=as.double(C),NAOK=TRUE) # ,package="treethresh") result$leaf <- as.logical(result$leaf) result$kill <- as.logical(result$kill) object$splits[result$leaf,3:7] <- NA object$details[result$leaf] <- NA object$splits <- object$splits[!result$kill,,drop=FALSE] object$details <- object$details[!result$kill,,drop=FALSE] object$w <- object$w[!result$kill,,drop=FALSE] object$t <- object$t[!result$kill,,drop=FALSE] for (i in 1:length(object$membership)) { len <- length(object$membership[[i]]) object$membership[[i]] <- array(.C("update_membership",old.membership=as.integer(object$membership[[i]]),new.membership=integer(len),n=as.integer(len),old.types=as.integer(old.types),new.types=as.integer(result$membership),n.types=as.integer(length(old.types)),NAOK=TRUE)$new.membership,dim=dim(object$membership[[i]])) } object } get.w.wtthresh <- function(object,C) { if (!missing(C)) if (!is.null(C)) object <- subtree.wtthresh(object,C) map <- numeric(max(object$splits[,"id"])) map[object$splits[,"id"]] <- 1:nrow(object$splits) result <- list() for (k in 1:length(object$beta)) { result[[k]] <- array(object$w[map[as.vector(object$membership[[k]])],k],dim=dim(object$membership[[k]])) } result } get.t.wtthresh <- function(object,C) { if (!missing(C)) if (!is.null(C)) object <- subtree.wtthresh(object,C) map <- numeric(max(object$splits[,"id"])) map[object$splits[,"id"]] <- 1:nrow(object$splits) result <- list() for (k in 1:length(object$beta)) { result[[k]] <- array(object$t[map[as.vector(object$membership[[k]])],k],dim=dim(object$membership[[k]])) } result } # Compute best C in cross-validation prune.wtthresh <- function(object,v=5,sd.mult=0.5,plot=TRUE) { reep <- function(max,cur,dim) { reep <- 1:max if (dim>1) { for (pos in 2:dim) if (pos<=cur) reep <- rep(reep,each=max) else reep <- rep(reep,max) } reep } create.coords <- function(index,size,dim,factor) { coords <- matrix(nrow=length(index)*factor^dim,ncol=dim) index <- index-1 multiple <- 1 pos <- rep(1,length(index)*factor^dim) for (i in 1:dim) { cur.coords <- index%%size cur.coords <- factor*rep(cur.coords,each=factor^dim)+reep(factor,i,dim) coords[,dim+1-i] <- cur.coords index <- index%/%size pos <- pos+(cur.coords-1)*multiple multiple <- multiple*size*factor } list(coords=coords,pos=pos) } get.sizes <- function(object) { C <- object$splits[-1,"C"] C.values <- sort(unique(C)) c(apply(outer(C,C.values,function(a,b) as.numeric(a>=b)),2,sum)/2+1,1) } if (is.na(v)) return(object) if (all(is.na(object$splits[,"C"]))) return(object) sizes <- sapply(object$beta,length) data.dim <- length(dim(object$beta[[1]])) # Split the dataset into v portions permut <- sample(min(sizes)) sample.size <- min(sizes)/v C.values <- sort(as.numeric(na.omit(c(unique(object$splits[,"C"]),2)))) results <- matrix(0,nrow=v,ncol=length(C.values)) to <- 0 for (j in 1:v) { from <- to + 1 to <- round(sample.size*j) test.index <- permut[from:to] a.beta <- object$beta for (k in 1:length(a.beta)) { a.beta[[k]][create.coords(test.index,min(sizes)^(1/data.dim),data.dim,(sizes[k]/min(sizes))^(1/data.dim))$pos] <- NA } a.tree <- wtthresh(beta=a.beta,control=object$control,weights=object$weights) for (l in 1:length(C.values)) { a.tree <- subtree.wtthresh(a.tree,C=C.values[l]) w <- get.w.wtthresh(a.tree) for (k in 1:length(a.beta)) { test.pos <- create.coords(test.index,min(sizes)^(1/data.dim),data.dim,(sizes[k]/min(sizes))^(1/data.dim))$pos beta <- pmin(1e5,object$beta[[k]][test.pos]) results[j,l] <- results[j,l] + sum(log(1+w[[k]][test.pos]*beta)) } } } results <- results - results[,ncol(results)] results.mean <- apply(results,2,mean) results.sd <- apply(results,2,sd) lower.bound <- (results.mean-sd.mult*results.sd)[which.max(results.mean)] if (plot) { results.range <- c(min(results.mean-results.sd),max(results.mean+results.sd)) ns <- 1:length(results.mean) plot(ns,results.mean,xaxt="n",ylab="Loglikelihood from CV (+const)",type="o",xlab="C",ylim=results.range) segments(ns, results.mean - results.sd, ns, results.mean + results.sd) axis(1, at = ns, labels = c(format(C.values[-length(C.values)],digits=3),">1")) mtext("number of regions", side = 3, line = 3) axis(3, at = ns, labels = as.character(get.sizes(object))) abline(h=lower.bound,lty=3) } subtree.wtthresh(object,C.values[max(which(results.mean>=lower.bound))]) } thresh.wtthresh <- function(object,data,C,postmed=TRUE) { if (missing(data) && !is.null(object$data)) data <- object$data if (postmed) { w <- get.w.wtthresh(object,C) if (length(data)!=length(w)) stop("Data does not have the right length.") for (i in 1:length(data)) data[[i]] <- postmed.laplace(data[[i]],w[[i]]) } else { t <- get.t.wtthresh(object,C) if (length(data)!=length(t)) stop("Data does not have the right length.") for (i in 1:length(data)) data[[i]][abs(data[[i]])<t[[i]]] <- 0 } data } ########################### UTILITY FUNCTIONS extract.coefficients.imwd <- function(object,start.level=5) { if (start.level>=object$nlevels) return(list()) types <- c("CD","DD","DC") result <- list() for (level in start.level:object$nlevels) for (type in types) { nm <- lt.to.name(level=level-1,type) result[[nm]] <- array(object[[nm]],dim=rep(2^(level-1),2)) } result } estimate.sdev.imwd <- function(object,dev=mad) { types <- c("CD","DD","DC") mads <- c() for (type in types) { nm <- lt.to.name(level=object$nlevels-1,type) elt <- object[[nm]] our.mads <- dev(elt) mads <- c(mads,as.vector(our.mads)) } median(mads) } insert.coefficients.imwd <- function(object,update) { for (name in names(update)) object[[name]] <- update[[name]] object } extract.coefficients.wd <- function(object,start.level=5) { if (start.level>=object$nlevels) return(list()) result <- list() for (level in start.level:object$nlevels) { foo <- accessD(object,level=level-1) nm <- as.character(level) result[[nm]] <- array(foo,dim=length(foo)) } result } estimate.sdev.wd <- function(object,dev=mad) { elt <- accessD(object,level=object$nlevels-1) dev(elt) } insert.coefficients.wd <- function(object,update) { for (name in names(update)) object <- putD(object,level=as.numeric(name)-1,update[[name]]) object } extract.coefficients <- function(object,start.level=5) UseMethod("extract.coefficients") estimate.sdev <- function(object,dev=mad) UseMethod("estimate.sdev") estimate.sdev.numeric <- function(object, block=8, dev=mad) { if (is.na(block)) return(dev(object)) block <- min(c(block,length(object))) n.blocks <- floor(length(object)/block) our.mads <- numeric(n.blocks) for (i in 1:n.blocks) { dta <- object[block*(i-1)+1:block] our.mads[i] <- dev(dta) } median(our.mads) } insert.coefficients <- function(object, update) UseMethod("insert.coefficients") wavelet.treethresh <- function(object,sdev=NA,dev=mad,start.level=5,levelwise=FALSE,v=5,sd.mult=0.5,postmed=TRUE,...) { # If no sdev is provided estimate it if (is.na(sdev)) { sdev <- estimate.sdev(object,dev=dev) } # Extract the coefficients coefs <- extract.coefficients(object,start.level=start.level) # Rescale the coefficients for (nm in names(coefs)) coefs[[nm]] <- coefs[[nm]] / sdev if (!levelwise) { # JOINT THRESHOLDING # Compute the thresholding tree coefs.tree <- wtthresh(coefs,...) # Prune the tree coefs.pruned.tree <- prune.wtthresh(coefs.tree,v=v,sd.mult=sd.mult,plot=FALSE) # Threshold according to the pruned tree coefs.threshed <- thresh(coefs.pruned.tree,postmed=postmed) } else { # LEVELWISE THRESHOLDING coefs.threshed <- list() for (nm in names(coefs)) { coefs.tree <- treethresh(coefs[[nm]],...) coefs.pruned.tree <- prune(coefs.tree,v=v,sd.mult=sd.mult,plot=FALSE) coefs.threshed[[nm]] <- thresh(coefs.pruned.tree,postmed=postmed) } } # Undo the resclaing for (nm in names(coefs)) coefs.threshed[[nm]] <- coefs.threshed[[nm]] * sdev # Update coefficients insert.coefficients(object,coefs.threshed) }
\name{EWBurials} \alias{EWBurials} \docType{data} \title{ Ernest Witte Cemetery, Austin, County, Texas, U.S.A. } \description{ Sex, age, burial group, location, and burial orientation and direction facing from the Ernest Witte site, a Late Archaic cemetery in Texas (Hall 1981). } \usage{data(EWBurials)} \format{ A data frame with 49 observations on the following 7 variables. \describe{ \item{\code{Group}}{Cemetery group, a factor with levels \code{1}, \code{2}} \item{\code{North}}{North grid location of the burial in meters (excavation grid system)} \item{\code{West}}{East grid location of the burial in meters (excavation grid system)} \item{\code{Age}}{Age category, a factor with levels \code{Fetus}, \code{Infant}, \code{Child}, \code{Adolescent}, \code{Young Adult}, \code{Adult}, \code{Middle Adult}, \code{Old Adult}} \item{\code{Sex}}{a factor with levels \code{Female}, \code{Male}} \item{\code{Direction}}{circular data in degrees indicating the direction of the individual measured from the head along the vertebral column} \item{\code{Looking}}{circular data in degrees indication the direction the individual is facing} \item{\code{Goods}}{Presence or absence of grave goods} } } \details{ The Ernest Witte site in Austin County, Texas contains four burial groups from different time periods. Group 1 includes 60 interments and that occurred between about 2000 and 1200 BCE. Group 2 is the largest with 148 interments. The burials in this group were interred between about CE 200 and 500. Groups 3 and 4 include only 10 and 13 interments and date to CE 500 to 1500, but are not included in this data set which was taken from Appendix II (Hall 1981). Two of the variables, \code{direction} and \code{looking}, are circular data and require package \code{circular}. Hall (2010) provides a summary of the site and its significance. } \source{ Hall, G. D. 1981. Allen's Creek: A Study in the Cultural Prehistory of the Lower Brazos River Valley. \emph{The University of Texas at Austin. Texas Archeological Survey. Texas. Research Report No.} 61. } \references{ Carlson, David L. 2017. \emph{Quantitative Methods in Archaeology Using R}. Cambridge University Press, pp 350-357. Hall, G. D. 2010. Ernest Witte site. \emph{Handbook of Texas Online} \url{https://www.tshaonline.org/handbook/entries/ernest-witte-site}. Texas State Historical Association. } \examples{ data(EWBurials) xtabs(~Age+Sex+Group, EWBurials) if (requireNamespace("circular", quietly = TRUE)) { plot(EWBurials$Direction) } else { cat("This example requires package circular.\n") } } \keyword{datasets}
/man/EWBurials.Rd
no_license
cran/archdata
R
false
false
2,635
rd
\name{EWBurials} \alias{EWBurials} \docType{data} \title{ Ernest Witte Cemetery, Austin, County, Texas, U.S.A. } \description{ Sex, age, burial group, location, and burial orientation and direction facing from the Ernest Witte site, a Late Archaic cemetery in Texas (Hall 1981). } \usage{data(EWBurials)} \format{ A data frame with 49 observations on the following 7 variables. \describe{ \item{\code{Group}}{Cemetery group, a factor with levels \code{1}, \code{2}} \item{\code{North}}{North grid location of the burial in meters (excavation grid system)} \item{\code{West}}{East grid location of the burial in meters (excavation grid system)} \item{\code{Age}}{Age category, a factor with levels \code{Fetus}, \code{Infant}, \code{Child}, \code{Adolescent}, \code{Young Adult}, \code{Adult}, \code{Middle Adult}, \code{Old Adult}} \item{\code{Sex}}{a factor with levels \code{Female}, \code{Male}} \item{\code{Direction}}{circular data in degrees indicating the direction of the individual measured from the head along the vertebral column} \item{\code{Looking}}{circular data in degrees indication the direction the individual is facing} \item{\code{Goods}}{Presence or absence of grave goods} } } \details{ The Ernest Witte site in Austin County, Texas contains four burial groups from different time periods. Group 1 includes 60 interments and that occurred between about 2000 and 1200 BCE. Group 2 is the largest with 148 interments. The burials in this group were interred between about CE 200 and 500. Groups 3 and 4 include only 10 and 13 interments and date to CE 500 to 1500, but are not included in this data set which was taken from Appendix II (Hall 1981). Two of the variables, \code{direction} and \code{looking}, are circular data and require package \code{circular}. Hall (2010) provides a summary of the site and its significance. } \source{ Hall, G. D. 1981. Allen's Creek: A Study in the Cultural Prehistory of the Lower Brazos River Valley. \emph{The University of Texas at Austin. Texas Archeological Survey. Texas. Research Report No.} 61. } \references{ Carlson, David L. 2017. \emph{Quantitative Methods in Archaeology Using R}. Cambridge University Press, pp 350-357. Hall, G. D. 2010. Ernest Witte site. \emph{Handbook of Texas Online} \url{https://www.tshaonline.org/handbook/entries/ernest-witte-site}. Texas State Historical Association. } \examples{ data(EWBurials) xtabs(~Age+Sex+Group, EWBurials) if (requireNamespace("circular", quietly = TRUE)) { plot(EWBurials$Direction) } else { cat("This example requires package circular.\n") } } \keyword{datasets}
source("Bootstrapping (1).r") source("EnergyOptim.r") load("DCG.RData") load("CoupGeo.RData") ### iter=500 Energy.coarse=numeric(iter) Energy.fine=numeric(iter) for (l in 1:iter){ sub1=Bootbinary(reptl[1:11,1:6])$Matrix sub2=Bootbinary(reptl[1:11,7:44])$Matrix sub3=Bootbinary(reptl[12:20,1:6])$Matrix sub4=Bootbinary(reptl[12:20,7:44])$Matrix Binarytotal=rbind(cbind(sub1,sub2),cbind(sub3,sub4)) Binarytotal=cbind(Binarytotal,reptl[,45:47]) Energy.coarse[l]=GetBipEnergy(Binarytotal) } for (l in 1:iter){ sub11=Bootbinary(reptl[1:2,1:6])$Matrix sub12=Bootbinary(reptl[3:4,1:6])$Matrix sub13=Bootbinary(reptl[5:11,1:6])$Matrix sub14=Bootbinary(reptl[12:20,1:6])$Matrix sub21=Bootbinary(reptl[1:2,7:44])$Matrix sub22=Bootbinary(reptl[3:4,7:44])$Matrix sub23=Bootbinary(reptl[5:11,7:44])$Matrix sub24=Bootbinary(reptl[12:20,7:44])$Matrix Binarytotal=cbind(rbind(sub11,sub12,sub13,sub14),rbind(sub21,sub22,sub23,sub24)) Binarytotal=cbind(Binarytotal,reptl[,45:47]) Energy.fine[l]=GetBipEnergy(Binarytotal) } ######### save(Energy.coarse,Energy.fine,file="reptl.RData") library(lattice) dat.reptl<- data.frame(dens = c(Energy.coarse,Energy.fine,E.reptl.per), lines = rep(c("median","fine","coarse"), c(500,500,10000))) densityplot(~dens,data=dat.reptl,groups = lines,plot.points = FALSE, ref = TRUE, xlab="Energy", auto.key=list(lines=TRUE),main="reptile") ############################### iter=100 Energy.overall=numeric(iter) for (l in 1:iter){ Binary.no=Bootbinary(reptl)$Matrix Energy.noblock[l]=GetBipEnergy(Binary.no) } ####### plot(density(Energy.avif.fine)) #### n.random=10000 reptl.permute=array(0,c(nrow(reptl),ncol(reptl),n.random)) for (i in 1:n.random){ row.permute=sample(nrow(reptl),replace=FALSE) col.permute=sample(ncol(reptl),replace=FALSE) reptl.permute[,,i]=reptl[row.permute,] reptl.permute[,,i]=reptl.permute[,col.permute,i] } E.reptl.per=sapply(1:n.random, function(i) GetBipEnergy(reptl.permute[,,i])) ######## n.random=10000 avif.permute=array(0,c(nrow(avif),ncol(avif),n.random)) for (i in 1:n.random){ row.permute=sample(nrow(avif),replace=FALSE) col.permute=sample(ncol(avif),replace=FALSE) avif.permute[,,i]=avif[row.permute,] avif.permute[,,i]=avif.permute[,col.permute,i] } E.avif.per=sapply(1:n.random, function(i) GetBipEnergy(avif.permute[,,i])) E.avif.per1=E.avif.per ######## dat.avif<- data.frame(dens = c(Energy.avif.fine,E.avif.per1), lines = rep(c("fine","coarse"), c(100,10000))) densityplot(~dens,data=dat.avif,groups = lines,plot.points = FALSE, ref = TRUE, xlab="Energy", auto.key=list(lines=TRUE),main="Avifauna")
/code/reptl.r
no_license
guanjiahui/nested_bipartite_network
R
false
false
2,648
r
source("Bootstrapping (1).r") source("EnergyOptim.r") load("DCG.RData") load("CoupGeo.RData") ### iter=500 Energy.coarse=numeric(iter) Energy.fine=numeric(iter) for (l in 1:iter){ sub1=Bootbinary(reptl[1:11,1:6])$Matrix sub2=Bootbinary(reptl[1:11,7:44])$Matrix sub3=Bootbinary(reptl[12:20,1:6])$Matrix sub4=Bootbinary(reptl[12:20,7:44])$Matrix Binarytotal=rbind(cbind(sub1,sub2),cbind(sub3,sub4)) Binarytotal=cbind(Binarytotal,reptl[,45:47]) Energy.coarse[l]=GetBipEnergy(Binarytotal) } for (l in 1:iter){ sub11=Bootbinary(reptl[1:2,1:6])$Matrix sub12=Bootbinary(reptl[3:4,1:6])$Matrix sub13=Bootbinary(reptl[5:11,1:6])$Matrix sub14=Bootbinary(reptl[12:20,1:6])$Matrix sub21=Bootbinary(reptl[1:2,7:44])$Matrix sub22=Bootbinary(reptl[3:4,7:44])$Matrix sub23=Bootbinary(reptl[5:11,7:44])$Matrix sub24=Bootbinary(reptl[12:20,7:44])$Matrix Binarytotal=cbind(rbind(sub11,sub12,sub13,sub14),rbind(sub21,sub22,sub23,sub24)) Binarytotal=cbind(Binarytotal,reptl[,45:47]) Energy.fine[l]=GetBipEnergy(Binarytotal) } ######### save(Energy.coarse,Energy.fine,file="reptl.RData") library(lattice) dat.reptl<- data.frame(dens = c(Energy.coarse,Energy.fine,E.reptl.per), lines = rep(c("median","fine","coarse"), c(500,500,10000))) densityplot(~dens,data=dat.reptl,groups = lines,plot.points = FALSE, ref = TRUE, xlab="Energy", auto.key=list(lines=TRUE),main="reptile") ############################### iter=100 Energy.overall=numeric(iter) for (l in 1:iter){ Binary.no=Bootbinary(reptl)$Matrix Energy.noblock[l]=GetBipEnergy(Binary.no) } ####### plot(density(Energy.avif.fine)) #### n.random=10000 reptl.permute=array(0,c(nrow(reptl),ncol(reptl),n.random)) for (i in 1:n.random){ row.permute=sample(nrow(reptl),replace=FALSE) col.permute=sample(ncol(reptl),replace=FALSE) reptl.permute[,,i]=reptl[row.permute,] reptl.permute[,,i]=reptl.permute[,col.permute,i] } E.reptl.per=sapply(1:n.random, function(i) GetBipEnergy(reptl.permute[,,i])) ######## n.random=10000 avif.permute=array(0,c(nrow(avif),ncol(avif),n.random)) for (i in 1:n.random){ row.permute=sample(nrow(avif),replace=FALSE) col.permute=sample(ncol(avif),replace=FALSE) avif.permute[,,i]=avif[row.permute,] avif.permute[,,i]=avif.permute[,col.permute,i] } E.avif.per=sapply(1:n.random, function(i) GetBipEnergy(avif.permute[,,i])) E.avif.per1=E.avif.per ######## dat.avif<- data.frame(dens = c(Energy.avif.fine,E.avif.per1), lines = rep(c("fine","coarse"), c(100,10000))) densityplot(~dens,data=dat.avif,groups = lines,plot.points = FALSE, ref = TRUE, xlab="Energy", auto.key=list(lines=TRUE),main="Avifauna")
X_test <- read.table("./getdata_projectfiles_UCI HAR Dataset/UCI HAR Dataset/test/X_test.txt", quote="\"", comment.char="") y_test <- read.table("./getdata_projectfiles_UCI HAR Dataset/UCI HAR Dataset/test/y_test.txt", quote="\"", comment.char="") subject_test <- read.table("./getdata_projectfiles_UCI HAR Dataset/UCI HAR Dataset/test/subject_test.txt", quote="\"", comment.char="") X_train <- read.table("./getdata_projectfiles_UCI HAR Dataset/UCI HAR Dataset/traimn/X_train.txt", quote="\"", comment.char="") y_train <- read.table("./getdata_projectfiles_UCI HAR Dataset/UCI HAR Dataset/test/y_train.txt", quote="\"", comment.char="") subject_train <- read.table("./getdata_projectfiles_UCI HAR Dataset/UCI HAR Dataset/train/subject_train.txt", quote="\"", comment.char="") features <- read.table("./getdata_projectfiles_UCI HAR Dataset/UCI HAR Dataset/features.txt", quote="\"", comment.char="") activity_labels <- read.table("./getdata_projectfiles_UCI HAR Dataset/UCI HAR Dataset/activity_labels.txt", quote="\"", comment.char="") colnames(X_test) <- features$V2 colnames(X_train) <- features$V2 Test_dataset <- cbind(subject_test, y_test, X_test) Train_dataset <- cbind(subject_train, y_train, X_train) Merged_df <- rbind(Test_dataset, Train_dataset) mean_std_colnames <- grep('-mean()|-std()',features$V2) freqmean_colnames <- grep('-meanFreq()',features$V2) mean_std_colnames <- mean_std_colnames[!(mean_std_colnames %in% freqmean_colnames)] mean_std_colnames <- mean_std_colnames+2 mean_std_dataset <- Merged_df[,c(1,2,mean_std_colnames)] colnames(mean_std_dataset)[1] <- c('Subject') colnames(mean_std_dataset)[2] <- c('V1') mean_std_dataset <- merge(activity_labels, mean_std_dataset, by.y = 'V1') colnames(mean_std_dataset)[2] <- c('Activity') mean_std_dataset <- mean_std_dataset[,c(-1)] mean_colnames <- grep('-mean()',features$V2) mean_colnames <- mean_colnames[!(mean_colnames %in% freqmean_colnames)] mean_colnames <- mean_colnames+2 mean_dataset <- Merged_df[,c(1,2,mean_colnames)] colnames(mean_dataset)[1] <- c('Subject') mean_table <- mean_dataset %>% group_by(V1.1, Subject) %>% summarise_all('mean') colnames(mean_table)[1] <- c('V1') mean_table <- merge(activity_labels, mean_table, by.y = 'V1') colnames(mean_table)[2] <- c('Activity') mean_table <- mean_table[,c(-1)] write.table(mean_std_dataset, file = "./means_and_std.txt", row.names = FALSE) write.table(mean_table, file = "./means_table.txt", row.names = FALSE)
/run_analysis.R
no_license
mbestry/tidy_dataset_assignment
R
false
false
2,450
r
X_test <- read.table("./getdata_projectfiles_UCI HAR Dataset/UCI HAR Dataset/test/X_test.txt", quote="\"", comment.char="") y_test <- read.table("./getdata_projectfiles_UCI HAR Dataset/UCI HAR Dataset/test/y_test.txt", quote="\"", comment.char="") subject_test <- read.table("./getdata_projectfiles_UCI HAR Dataset/UCI HAR Dataset/test/subject_test.txt", quote="\"", comment.char="") X_train <- read.table("./getdata_projectfiles_UCI HAR Dataset/UCI HAR Dataset/traimn/X_train.txt", quote="\"", comment.char="") y_train <- read.table("./getdata_projectfiles_UCI HAR Dataset/UCI HAR Dataset/test/y_train.txt", quote="\"", comment.char="") subject_train <- read.table("./getdata_projectfiles_UCI HAR Dataset/UCI HAR Dataset/train/subject_train.txt", quote="\"", comment.char="") features <- read.table("./getdata_projectfiles_UCI HAR Dataset/UCI HAR Dataset/features.txt", quote="\"", comment.char="") activity_labels <- read.table("./getdata_projectfiles_UCI HAR Dataset/UCI HAR Dataset/activity_labels.txt", quote="\"", comment.char="") colnames(X_test) <- features$V2 colnames(X_train) <- features$V2 Test_dataset <- cbind(subject_test, y_test, X_test) Train_dataset <- cbind(subject_train, y_train, X_train) Merged_df <- rbind(Test_dataset, Train_dataset) mean_std_colnames <- grep('-mean()|-std()',features$V2) freqmean_colnames <- grep('-meanFreq()',features$V2) mean_std_colnames <- mean_std_colnames[!(mean_std_colnames %in% freqmean_colnames)] mean_std_colnames <- mean_std_colnames+2 mean_std_dataset <- Merged_df[,c(1,2,mean_std_colnames)] colnames(mean_std_dataset)[1] <- c('Subject') colnames(mean_std_dataset)[2] <- c('V1') mean_std_dataset <- merge(activity_labels, mean_std_dataset, by.y = 'V1') colnames(mean_std_dataset)[2] <- c('Activity') mean_std_dataset <- mean_std_dataset[,c(-1)] mean_colnames <- grep('-mean()',features$V2) mean_colnames <- mean_colnames[!(mean_colnames %in% freqmean_colnames)] mean_colnames <- mean_colnames+2 mean_dataset <- Merged_df[,c(1,2,mean_colnames)] colnames(mean_dataset)[1] <- c('Subject') mean_table <- mean_dataset %>% group_by(V1.1, Subject) %>% summarise_all('mean') colnames(mean_table)[1] <- c('V1') mean_table <- merge(activity_labels, mean_table, by.y = 'V1') colnames(mean_table)[2] <- c('Activity') mean_table <- mean_table[,c(-1)] write.table(mean_std_dataset, file = "./means_and_std.txt", row.names = FALSE) write.table(mean_table, file = "./means_table.txt", row.names = FALSE)
##### 根据导师在3月初给的资料做划分区间进行分析 ### ##### 分析波动性 ##### 分析横向的波动性 ##### 预处理 ######## library(readxl) library(tidyr) library(dplyr) library(urca) library(lmtest) library(xlsx) library(dyn) root_path <- getwd() data_path <- "/Users/ethan/Documents/Ethan/CoreFiles/CodesFile/MoneyMismatch/data" setwd(root_path) source(paste(root_path, "/program_function.R", sep = "")) output_data.data_name <- "汇率" input_data.data_source_name <- "BIS" output_data.project_name <- "横向比较_自动生成的" # # for example : "金砖四国",output_data.data_name,output_data.data_type,"_",input_data.data_source_name,"_",output_data.project_name,".xlsx" ##### 中国的 ####### # 输入的数据的数据表名称 input_sheet_name <- "中国日度数据" # # 提取需要的区间段1 # data_time_keyword <- # as.character( # c( # '1994-01-03', # '2005-07-21', # # '2010-06-21', # # '2015-08-11', # '2018-12-31' # ) # ) # 提取需要的区间段2 data_time_keyword <- as.character( c( '1994-01-03', '2018-12-31' ) ) # 输出的数据的数据表名称 output_sheet_name <- "中国" ###### 重复代码段 ###### input_data.sheet_name = input_sheet_name output_data.sheet_name = output_sheet_name data_used.time.keyword = data_time_keyword # 导入要计算货币错配程度的表格数据 data_original <- readxl::read_xlsx( path = "/Users/ethan/Documents/Ethan/CoreFiles/CodesFile/MoneyMismatch/data/要处理的金砖四国汇率日度数据_BIS.xlsx", sheet = input_data.sheet_name, col_types = c("date", "numeric") ) # 去掉无用数据 data_used <- subset(data_original, data != 'NaN') data_used <- subset(data_used, data != '') # 调整日期的格式 data_used$time <- as.character.Date(data_used$time) # 对样本生成新的数据集 data_used.timeRange.index <- array(dim = length(data_used.time.keyword)) for (i in 1:length(data_used.time.keyword)) { data_used.timeRange.index[i] <- which(as.character.Date(data_used$time) == data_used.time.keyword[i]) } # 计算样本数据的一阶差分,生成新的数据集。 data_used.gradient <- data.frame(time = as.character.Date(data_used$time[-1]), data = 1) data_used.gradient$data <- diff(data_used$data, lag = 1, differences = 1) data_used.gradient.timeRange.index <- array(dim = length(data_used.time.keyword)) data_used.gradient.timeRange.index[1] <- which(as.character.Date(data_used$time) == data_used.time.keyword[1]) for (i in 2:length(data_used.time.keyword)) { data_used.gradient.timeRange.index[i] <- which(as.character.Date(data_used$time) == data_used.time.keyword[i]) - 1 } data_used.gradient.time.keyword <- data_used.gradient$time[data_used.gradient.timeRange.index] # 计算样本数据的变化率,生成新的数据集。 data_used.rate_of_change <- data.frame(time = as.character.Date(data_used$time[-1]), data = 1) data_used.rate_of_change$data <- data_used.gradient$data / data_used$data[-length(data_used$time)] data_used.rate_of_change.timeRange.index <- data_used.gradient.timeRange.index data_used.rate_of_change.time.keyword <- data_used.rate_of_change$time[data_used.rate_of_change.timeRange.index] # 计算每一区间段的指标: # 样本数据的均值 data_used.mean <- array(dim = length(data_used.time.keyword) - 1) # 样本数据的标准差 data_used.variance <- array(dim = length(data_used.time.keyword) - 1) # data_used.summary <- array(dim = length(data_used.time.keyword) - 1) # 差分样本数据的均值 data_used.gradient.mean <- array(dim = length(data_used.time.keyword) - 1) # 差分样本数据的标准差 data_used.gradient.variance <- array(dim = length(data_used.time.keyword) - 1) # 变化率样本数据的均值 data_used.rate_of_change.mean <- array(dim = length(data_used.time.keyword) - 1) # 变化率样本数据的标准差 data_used.rate_of_change.variance <- array(dim = length(data_used.time.keyword) - 1) # for (i in 1:length(data_used.time.keyword) - 1) { # i # data_used.mean[i] <- # mean(data_used$data[data_used.timeRange.index[i]:data_used.timeRange.index[i+1]-1]) # data_used.variance[i] <- # sd(data_used$data[data_used.timeRange.index[i]:data_used.timeRange.index[i+1]-1]) # # 对于各段差分数据等类型的数据的描述性统计的计算,每段的第一个数据值不计入计算。 # data_used.gradient.mean[i] <- # mean(data_used.gradient$data[data_used.gradient.timeRange.index[i]+1:data_used.gradient.timeRange.index[i+1]-1]) # data_used.gradient.variance[i] <- # sd(data_used.gradient$data[data_used.gradient.timeRange.index[i] + # 1:data_used.gradient.timeRange.index[i + 1] - 1]) # data_used.rate_of_change.mean[i] <- # mean(data_used.rate_of_change$data[data_used.rate_of_change.timeRange.index[i] + # 1:data_used.rate_of_change.timeRange.index[i + 1] - 1]) # data_used.rate_of_change.variance[i] <- # sd(data_used.rate_of_change$data[data_used.rate_of_change.timeRange.index[i] + # 1:data_used.rate_of_change.timeRange.index[i + 1] - 1]) # } j = 1 for (i in 1:length(data_used.time.keyword) - 1) { data_used.mean[j] <- mean(data_used$data[(data_used.timeRange.index[j]):(data_used.timeRange.index[j + 1] - 1)],na.rm = FALSE) data_used.variance[j] <- sd(data_used$data[(data_used.timeRange.index[j]):(data_used.timeRange.index[j + 1] - 1)],na.rm = FALSE) # 对于各段差分数据等类型的数据的描述性统计的计算,每段的第一个数据值不计入计算。 data_used.gradient.mean[j] <- mean(data_used.gradient$data[(data_used.gradient.timeRange.index[j] + 1):(data_used.gradient.timeRange.index[j + 1] - 1)],na.rm = FALSE) data_used.gradient.variance[j] <- sd(data_used.gradient$data[(data_used.gradient.timeRange.index[j] + 1):(data_used.gradient.timeRange.index[j + 1] - 1)],na.rm = FALSE) data_used.rate_of_change.mean[j] <- mean(data_used.rate_of_change$data[(data_used.rate_of_change.timeRange.index[j] + 1):(data_used.rate_of_change.timeRange.index[j + 1] - 1)],na.rm = FALSE) data_used.rate_of_change.variance[j] <- sd(data_used.rate_of_change$data[(data_used.rate_of_change.timeRange.index[j] + 1):(data_used.rate_of_change.timeRange.index[j + 1] - 1)],na.rm = FALSE) j = j + 1 } dataframe_used <- data.frame( data_used.time.keyword[-length(data_used.time.keyword)], as.character.Date(c(data_used$time[data_used.timeRange.index[-c(1,length(data_used.time.keyword))]-1],data_used.time.keyword[length(data_used.time.keyword)])), as.numeric(data_used.mean), as.numeric(data_used.variance), as.numeric(data_used.gradient.mean), as.numeric(data_used.gradient.variance), as.numeric(data_used.rate_of_change.mean), as.numeric(data_used.rate_of_change.variance) ) names(dataframe_used) <- c('时点开始', '时点结束', '样本均值', '样本标准差', '差分均值', '差分标准差', '日变化率均值', '日变化率标准差') # 写出数据到指定表格的指定位置 output_data.data_type <- "数据指标结果" output_data.file_name <- paste("金砖四国",output_data.data_name,output_data.data_type,"_",input_data.data_source_name,"_",output_data.project_name,".xlsx",sep = '') xlsx::write.xlsx2( dataframe_used, file = paste(data_path,output_data.file_name,sep='/'), sheetName = output_data.sheet_name, append = TRUE ) output_data.data_type <- "日度数据" output_data.file_name <- paste("金砖四国",output_data.data_name,output_data.data_type,"_",input_data.data_source_name,"_",output_data.project_name,".xlsx",sep = '') xlsx::write.xlsx2(data_used, file = paste(data_path,output_data.file_name,sep='/'), sheetName = output_data.sheet_name, append = TRUE) output_data.data_type <- "差分数据" output_data.file_name <- paste("金砖四国",output_data.data_name,output_data.data_type,"_",input_data.data_source_name,"_",output_data.project_name,".xlsx",sep = '') xlsx::write.xlsx2( data_used.gradient, file = paste(data_path,output_data.file_name,sep='/'), sheetName = output_data.sheet_name, append = TRUE ) output_data.data_type <- "日变化率数据" output_data.file_name <- paste("金砖四国",output_data.data_name,output_data.data_type,"_",input_data.data_source_name,"_",output_data.project_name,".xlsx",sep = '') xlsx::write.xlsx2( data_used.rate_of_change, file = paste(data_path,output_data.file_name,sep='/'), sheetName = output_data.sheet_name, append = TRUE ) # # ##### 印度的 ####### # 输入的数据的数据表名称 input_sheet_name <- "印度日度数据" # # 提取需要的区间段1 # data_time_keyword <- # as.character( # c( # '1992-03-02', # '2009-02-02', # '2018-12-31' # )) # 提取需要的区间段2 data_time_keyword <- as.character( c( '1994-01-03', '2018-12-31' )) # 输出的数据的数据表名称 output_sheet_name <- "印度" ###### 重复代码段 ###### input_data.sheet_name = input_sheet_name output_data.sheet_name = output_sheet_name data_used.time.keyword = data_time_keyword # 导入要计算货币错配程度的表格数据 data_original <- readxl::read_xlsx( path = "/Users/ethan/Documents/Ethan/CoreFiles/CodesFile/MoneyMismatch/data/要处理的金砖四国汇率日度数据_BIS.xlsx", sheet = input_data.sheet_name, col_types = c("date", "numeric") ) # 去掉无用数据 data_used <- subset(data_original, data != 'NaN') data_used <- subset(data_used, data != '') # 调整日期的格式 data_used$time <- as.character.Date(data_used$time) # 对样本生成新的数据集 data_used.timeRange.index <- array(dim = length(data_used.time.keyword)) for (i in 1:length(data_used.time.keyword)) { data_used.timeRange.index[i] <- which(as.character.Date(data_used$time) == data_used.time.keyword[i]) } # 计算样本数据的一阶差分,生成新的数据集。 data_used.gradient <- data.frame(time = as.character.Date(data_used$time[-1]), data = 1) data_used.gradient$data <- diff(data_used$data, lag = 1, differences = 1) data_used.gradient.timeRange.index <- array(dim = length(data_used.time.keyword)) data_used.gradient.timeRange.index[1] <- which(as.character.Date(data_used$time) == data_used.time.keyword[1]) for (i in 2:length(data_used.time.keyword)) { data_used.gradient.timeRange.index[i] <- which(as.character.Date(data_used$time) == data_used.time.keyword[i]) - 1 } data_used.gradient.time.keyword <- data_used.gradient$time[data_used.gradient.timeRange.index] # 计算样本数据的变化率,生成新的数据集。 data_used.rate_of_change <- data.frame(time = as.character.Date(data_used$time[-1]), data = 1) data_used.rate_of_change$data <- data_used.gradient$data / data_used$data[-length(data_used$time)] data_used.rate_of_change.timeRange.index <- data_used.gradient.timeRange.index data_used.rate_of_change.time.keyword <- data_used.rate_of_change$time[data_used.rate_of_change.timeRange.index] # 计算每一区间段的指标: # 样本数据的均值 data_used.mean <- array(dim = length(data_used.time.keyword) - 1) # 样本数据的标准差 data_used.variance <- array(dim = length(data_used.time.keyword) - 1) # data_used.summary <- array(dim = length(data_used.time.keyword) - 1) # 差分样本数据的均值 data_used.gradient.mean <- array(dim = length(data_used.time.keyword) - 1) # 差分样本数据的标准差 data_used.gradient.variance <- array(dim = length(data_used.time.keyword) - 1) # 变化率样本数据的均值 data_used.rate_of_change.mean <- array(dim = length(data_used.time.keyword) - 1) # 变化率样本数据的标准差 data_used.rate_of_change.variance <- array(dim = length(data_used.time.keyword) - 1) # for (i in 1:length(data_used.time.keyword) - 1) { # i # data_used.mean[i] <- # mean(data_used$data[data_used.timeRange.index[i]:data_used.timeRange.index[i+1]-1]) # data_used.variance[i] <- # sd(data_used$data[data_used.timeRange.index[i]:data_used.timeRange.index[i+1]-1]) # # 对于各段差分数据等类型的数据的描述性统计的计算,每段的第一个数据值不计入计算。 # data_used.gradient.mean[i] <- # mean(data_used.gradient$data[data_used.gradient.timeRange.index[i]+1:data_used.gradient.timeRange.index[i+1]-1]) # data_used.gradient.variance[i] <- # sd(data_used.gradient$data[data_used.gradient.timeRange.index[i] + # 1:data_used.gradient.timeRange.index[i + 1] - 1]) # data_used.rate_of_change.mean[i] <- # mean(data_used.rate_of_change$data[data_used.rate_of_change.timeRange.index[i] + # 1:data_used.rate_of_change.timeRange.index[i + 1] - 1]) # data_used.rate_of_change.variance[i] <- # sd(data_used.rate_of_change$data[data_used.rate_of_change.timeRange.index[i] + # 1:data_used.rate_of_change.timeRange.index[i + 1] - 1]) # } j = 1 for (i in 1:length(data_used.time.keyword) - 1) { data_used.mean[j] <- mean(data_used$data[(data_used.timeRange.index[j]):(data_used.timeRange.index[j + 1] - 1)],na.rm = FALSE) data_used.variance[j] <- sd(data_used$data[(data_used.timeRange.index[j]):(data_used.timeRange.index[j + 1] - 1)],na.rm = FALSE) # 对于各段差分数据等类型的数据的描述性统计的计算,每段的第一个数据值不计入计算。 data_used.gradient.mean[j] <- mean(data_used.gradient$data[(data_used.gradient.timeRange.index[j] + 1):(data_used.gradient.timeRange.index[j + 1] - 1)],na.rm = FALSE) data_used.gradient.variance[j] <- sd(data_used.gradient$data[(data_used.gradient.timeRange.index[j] + 1):(data_used.gradient.timeRange.index[j + 1] - 1)],na.rm = FALSE) data_used.rate_of_change.mean[j] <- mean(data_used.rate_of_change$data[(data_used.rate_of_change.timeRange.index[j] + 1):(data_used.rate_of_change.timeRange.index[j + 1] - 1)],na.rm = FALSE) data_used.rate_of_change.variance[j] <- sd(data_used.rate_of_change$data[(data_used.rate_of_change.timeRange.index[j] + 1):(data_used.rate_of_change.timeRange.index[j + 1] - 1)],na.rm = FALSE) j = j + 1 } dataframe_used <- data.frame( data_used.time.keyword[-length(data_used.time.keyword)], as.character.Date(c(data_used$time[data_used.timeRange.index[-c(1,length(data_used.time.keyword))]-1],data_used.time.keyword[length(data_used.time.keyword)])), as.numeric(data_used.mean), as.numeric(data_used.variance), as.numeric(data_used.gradient.mean), as.numeric(data_used.gradient.variance), as.numeric(data_used.rate_of_change.mean), as.numeric(data_used.rate_of_change.variance) ) names(dataframe_used) <- c('时点开始', '时点结束', '样本均值', '样本标准差', '差分均值', '差分标准差', '日变化率均值', '日变化率标准差') # 写出数据到指定表格的指定位置 output_data.data_type <- "数据指标结果" output_data.file_name <- paste("金砖四国",output_data.data_name,output_data.data_type,"_",input_data.data_source_name,"_",output_data.project_name,".xlsx",sep = '') xlsx::write.xlsx2( dataframe_used, file = paste(data_path,output_data.file_name,sep='/'), sheetName = output_data.sheet_name, append = TRUE ) output_data.data_type <- "日度数据" output_data.file_name <- paste("金砖四国",output_data.data_name,output_data.data_type,"_",input_data.data_source_name,"_",output_data.project_name,".xlsx",sep = '') xlsx::write.xlsx2(data_used, file = paste(data_path,output_data.file_name,sep='/'), sheetName = output_data.sheet_name, append = TRUE) output_data.data_type <- "差分数据" output_data.file_name <- paste("金砖四国",output_data.data_name,output_data.data_type,"_",input_data.data_source_name,"_",output_data.project_name,".xlsx",sep = '') xlsx::write.xlsx2( data_used.gradient, file = paste(data_path,output_data.file_name,sep='/'), sheetName = output_data.sheet_name, append = TRUE ) output_data.data_type <- "日变化率数据" output_data.file_name <- paste("金砖四国",output_data.data_name,output_data.data_type,"_",input_data.data_source_name,"_",output_data.project_name,".xlsx",sep = '') xlsx::write.xlsx2( data_used.rate_of_change, file = paste(data_path,output_data.file_name,sep='/'), sheetName = output_data.sheet_name, append = TRUE ) ##### 俄罗斯的 ####### # 输入的数据的数据表名称 input_sheet_name <- "俄罗斯日度数据" # # 提取需要的区间段1 # data_time_keyword <- # as.character(c( # '1995-07-06', # '1998-08-17', # '2014-11-10', # '2018-12-31' # ) # ) # 提取需要的区间段2 data_time_keyword <- as.character(c( '1994-01-03', '2018-12-31' ) ) # 输出的数据的数据表名称 output_sheet_name <- "俄罗斯" ###### 重复代码段 ###### input_data.sheet_name = input_sheet_name output_data.sheet_name = output_sheet_name data_used.time.keyword = data_time_keyword # 导入要计算货币错配程度的表格数据 data_original <- readxl::read_xlsx( path = "/Users/ethan/Documents/Ethan/CoreFiles/CodesFile/MoneyMismatch/data/要处理的金砖四国汇率日度数据_BIS.xlsx", sheet = input_data.sheet_name, col_types = c("date", "numeric") ) # 去掉无用数据 data_used <- subset(data_original, data != 'NaN') data_used <- subset(data_used, data != '') # 调整日期的格式 data_used$time <- as.character.Date(data_used$time) # 对样本生成新的数据集 data_used.timeRange.index <- array(dim = length(data_used.time.keyword)) for (i in 1:length(data_used.time.keyword)) { data_used.timeRange.index[i] <- which(as.character.Date(data_used$time) == data_used.time.keyword[i]) } # 计算样本数据的一阶差分,生成新的数据集。 data_used.gradient <- data.frame(time = as.character.Date(data_used$time[-1]), data = 1) data_used.gradient$data <- diff(data_used$data, lag = 1, differences = 1) data_used.gradient.timeRange.index <- array(dim = length(data_used.time.keyword)) data_used.gradient.timeRange.index[1] <- which(as.character.Date(data_used$time) == data_used.time.keyword[1]) for (i in 2:length(data_used.time.keyword)) { data_used.gradient.timeRange.index[i] <- which(as.character.Date(data_used$time) == data_used.time.keyword[i]) - 1 } data_used.gradient.time.keyword <- data_used.gradient$time[data_used.gradient.timeRange.index] # 计算样本数据的变化率,生成新的数据集。 data_used.rate_of_change <- data.frame(time = as.character.Date(data_used$time[-1]), data = 1) data_used.rate_of_change$data <- data_used.gradient$data / data_used$data[-length(data_used$time)] data_used.rate_of_change.timeRange.index <- data_used.gradient.timeRange.index data_used.rate_of_change.time.keyword <- data_used.rate_of_change$time[data_used.rate_of_change.timeRange.index] # 计算每一区间段的指标: # 样本数据的均值 data_used.mean <- array(dim = length(data_used.time.keyword) - 1) # 样本数据的标准差 data_used.variance <- array(dim = length(data_used.time.keyword) - 1) # data_used.summary <- array(dim = length(data_used.time.keyword) - 1) # 差分样本数据的均值 data_used.gradient.mean <- array(dim = length(data_used.time.keyword) - 1) # 差分样本数据的标准差 data_used.gradient.variance <- array(dim = length(data_used.time.keyword) - 1) # 变化率样本数据的均值 data_used.rate_of_change.mean <- array(dim = length(data_used.time.keyword) - 1) # 变化率样本数据的标准差 data_used.rate_of_change.variance <- array(dim = length(data_used.time.keyword) - 1) # for (i in 1:length(data_used.time.keyword) - 1) { # i # data_used.mean[i] <- # mean(data_used$data[data_used.timeRange.index[i]:data_used.timeRange.index[i+1]-1]) # data_used.variance[i] <- # sd(data_used$data[data_used.timeRange.index[i]:data_used.timeRange.index[i+1]-1]) # # 对于各段差分数据等类型的数据的描述性统计的计算,每段的第一个数据值不计入计算。 # data_used.gradient.mean[i] <- # mean(data_used.gradient$data[data_used.gradient.timeRange.index[i]+1:data_used.gradient.timeRange.index[i+1]-1]) # data_used.gradient.variance[i] <- # sd(data_used.gradient$data[data_used.gradient.timeRange.index[i] + # 1:data_used.gradient.timeRange.index[i + 1] - 1]) # data_used.rate_of_change.mean[i] <- # mean(data_used.rate_of_change$data[data_used.rate_of_change.timeRange.index[i] + # 1:data_used.rate_of_change.timeRange.index[i + 1] - 1]) # data_used.rate_of_change.variance[i] <- # sd(data_used.rate_of_change$data[data_used.rate_of_change.timeRange.index[i] + # 1:data_used.rate_of_change.timeRange.index[i + 1] - 1]) # } j = 1 for (i in 1:length(data_used.time.keyword) - 1) { data_used.mean[j] <- mean(data_used$data[(data_used.timeRange.index[j]):(data_used.timeRange.index[j + 1] - 1)],na.rm = FALSE) data_used.variance[j] <- sd(data_used$data[(data_used.timeRange.index[j]):(data_used.timeRange.index[j + 1] - 1)],na.rm = FALSE) # 对于各段差分数据等类型的数据的描述性统计的计算,每段的第一个数据值不计入计算。 data_used.gradient.mean[j] <- mean(data_used.gradient$data[(data_used.gradient.timeRange.index[j] + 1):(data_used.gradient.timeRange.index[j + 1] - 1)],na.rm = FALSE) data_used.gradient.variance[j] <- sd(data_used.gradient$data[(data_used.gradient.timeRange.index[j] + 1):(data_used.gradient.timeRange.index[j + 1] - 1)],na.rm = FALSE) data_used.rate_of_change.mean[j] <- mean(data_used.rate_of_change$data[(data_used.rate_of_change.timeRange.index[j] + 1):(data_used.rate_of_change.timeRange.index[j + 1] - 1)],na.rm = FALSE) data_used.rate_of_change.variance[j] <- sd(data_used.rate_of_change$data[(data_used.rate_of_change.timeRange.index[j] + 1):(data_used.rate_of_change.timeRange.index[j + 1] - 1)],na.rm = FALSE) j = j + 1 } dataframe_used <- data.frame( data_used.time.keyword[-length(data_used.time.keyword)], as.character.Date(c(data_used$time[data_used.timeRange.index[-c(1,length(data_used.time.keyword))]-1],data_used.time.keyword[length(data_used.time.keyword)])), as.numeric(data_used.mean), as.numeric(data_used.variance), as.numeric(data_used.gradient.mean), as.numeric(data_used.gradient.variance), as.numeric(data_used.rate_of_change.mean), as.numeric(data_used.rate_of_change.variance) ) names(dataframe_used) <- c('时点开始', '时点结束', '样本均值', '样本标准差', '差分均值', '差分标准差', '日变化率均值', '日变化率标准差') # 写出数据到指定表格的指定位置 output_data.data_type <- "数据指标结果" output_data.file_name <- paste("金砖四国",output_data.data_name,output_data.data_type,"_",input_data.data_source_name,"_",output_data.project_name,".xlsx",sep = '') xlsx::write.xlsx2( dataframe_used, file = paste(data_path,output_data.file_name,sep='/'), sheetName = output_data.sheet_name, append = TRUE ) output_data.data_type <- "日度数据" output_data.file_name <- paste("金砖四国",output_data.data_name,output_data.data_type,"_",input_data.data_source_name,"_",output_data.project_name,".xlsx",sep = '') xlsx::write.xlsx2(data_used, file = paste(data_path,output_data.file_name,sep='/'), sheetName = output_data.sheet_name, append = TRUE) output_data.data_type <- "差分数据" output_data.file_name <- paste("金砖四国",output_data.data_name,output_data.data_type,"_",input_data.data_source_name,"_",output_data.project_name,".xlsx",sep = '') xlsx::write.xlsx2( data_used.gradient, file = paste(data_path,output_data.file_name,sep='/'), sheetName = output_data.sheet_name, append = TRUE ) output_data.data_type <- "日变化率数据" output_data.file_name <- paste("金砖四国",output_data.data_name,output_data.data_type,"_",input_data.data_source_name,"_",output_data.project_name,".xlsx",sep = '') xlsx::write.xlsx2( data_used.rate_of_change, file = paste(data_path,output_data.file_name,sep='/'), sheetName = output_data.sheet_name, append = TRUE ) ##### 巴西的 ####### # 输入的数据的数据表名称 input_sheet_name <- "巴西日度数据" # # 提取需要的区间段1 # data_time_keyword <- # as.character(c( # '1994-07-01', # '1999-02-01', # '2018-12-31' # )) # 提取需要的区间段2 data_time_keyword <- as.character(c( '1994-01-03', '2018-12-31' )) # 输出的数据的数据表名称 output_sheet_name <- "巴西" ###### 重复代码段 ###### input_data.sheet_name = input_sheet_name output_data.sheet_name = output_sheet_name data_used.time.keyword = data_time_keyword # 导入要计算货币错配程度的表格数据 data_original <- readxl::read_xlsx( path = "/Users/ethan/Documents/Ethan/CoreFiles/CodesFile/MoneyMismatch/data/要处理的金砖四国汇率日度数据_BIS.xlsx", sheet = input_data.sheet_name, col_types = c("date", "numeric") ) # 去掉无用数据 data_used <- subset(data_original, data != 'NaN') data_used <- subset(data_used, data != '') # 调整日期的格式 data_used$time <- as.character.Date(data_used$time) # 对样本生成新的数据集 data_used.timeRange.index <- array(dim = length(data_used.time.keyword)) for (i in 1:length(data_used.time.keyword)) { data_used.timeRange.index[i] <- which(as.character.Date(data_used$time) == data_used.time.keyword[i]) } # 计算样本数据的一阶差分,生成新的数据集。 data_used.gradient <- data.frame(time = as.character.Date(data_used$time[-1]), data = 1) data_used.gradient$data <- diff(data_used$data, lag = 1, differences = 1) data_used.gradient.timeRange.index <- array(dim = length(data_used.time.keyword)) data_used.gradient.timeRange.index[1] <- which(as.character.Date(data_used$time) == data_used.time.keyword[1]) for (i in 2:length(data_used.time.keyword)) { data_used.gradient.timeRange.index[i] <- which(as.character.Date(data_used$time) == data_used.time.keyword[i]) - 1 } data_used.gradient.time.keyword <- data_used.gradient$time[data_used.gradient.timeRange.index] # 计算样本数据的变化率,生成新的数据集。 data_used.rate_of_change <- data.frame(time = as.character.Date(data_used$time[-1]), data = 1) data_used.rate_of_change$data <- data_used.gradient$data / data_used$data[-length(data_used$time)] data_used.rate_of_change.timeRange.index <- data_used.gradient.timeRange.index data_used.rate_of_change.time.keyword <- data_used.rate_of_change$time[data_used.rate_of_change.timeRange.index] # 计算每一区间段的指标: # 样本数据的均值 data_used.mean <- array(dim = length(data_used.time.keyword) - 1) # 样本数据的标准差 data_used.variance <- array(dim = length(data_used.time.keyword) - 1) # data_used.summary <- array(dim = length(data_used.time.keyword) - 1) # 差分样本数据的均值 data_used.gradient.mean <- array(dim = length(data_used.time.keyword) - 1) # 差分样本数据的标准差 data_used.gradient.variance <- array(dim = length(data_used.time.keyword) - 1) # 变化率样本数据的均值 data_used.rate_of_change.mean <- array(dim = length(data_used.time.keyword) - 1) # 变化率样本数据的标准差 data_used.rate_of_change.variance <- array(dim = length(data_used.time.keyword) - 1) # for (i in 1:length(data_used.time.keyword) - 1) { # i # data_used.mean[i] <- # mean(data_used$data[data_used.timeRange.index[i]:data_used.timeRange.index[i+1]-1]) # data_used.variance[i] <- # sd(data_used$data[data_used.timeRange.index[i]:data_used.timeRange.index[i+1]-1]) # # 对于各段差分数据等类型的数据的描述性统计的计算,每段的第一个数据值不计入计算。 # data_used.gradient.mean[i] <- # mean(data_used.gradient$data[data_used.gradient.timeRange.index[i]+1:data_used.gradient.timeRange.index[i+1]-1]) # data_used.gradient.variance[i] <- # sd(data_used.gradient$data[data_used.gradient.timeRange.index[i] + # 1:data_used.gradient.timeRange.index[i + 1] - 1]) # data_used.rate_of_change.mean[i] <- # mean(data_used.rate_of_change$data[data_used.rate_of_change.timeRange.index[i] + # 1:data_used.rate_of_change.timeRange.index[i + 1] - 1]) # data_used.rate_of_change.variance[i] <- # sd(data_used.rate_of_change$data[data_used.rate_of_change.timeRange.index[i] + # 1:data_used.rate_of_change.timeRange.index[i + 1] - 1]) # } j = 1 for (i in 1:length(data_used.time.keyword) - 1) { data_used.mean[j] <- mean(data_used$data[(data_used.timeRange.index[j]):(data_used.timeRange.index[j + 1] - 1)],na.rm = FALSE) data_used.variance[j] <- sd(data_used$data[(data_used.timeRange.index[j]):(data_used.timeRange.index[j + 1] - 1)],na.rm = FALSE) # 对于各段差分数据等类型的数据的描述性统计的计算,每段的第一个数据值不计入计算。 data_used.gradient.mean[j] <- mean(data_used.gradient$data[(data_used.gradient.timeRange.index[j] + 1):(data_used.gradient.timeRange.index[j + 1] - 1)],na.rm = FALSE) data_used.gradient.variance[j] <- sd(data_used.gradient$data[(data_used.gradient.timeRange.index[j] + 1):(data_used.gradient.timeRange.index[j + 1] - 1)],na.rm = FALSE) data_used.rate_of_change.mean[j] <- mean(data_used.rate_of_change$data[(data_used.rate_of_change.timeRange.index[j] + 1):(data_used.rate_of_change.timeRange.index[j + 1] - 1)],na.rm = FALSE) data_used.rate_of_change.variance[j] <- sd(data_used.rate_of_change$data[(data_used.rate_of_change.timeRange.index[j] + 1):(data_used.rate_of_change.timeRange.index[j + 1] - 1)],na.rm = FALSE) j = j + 1 } dataframe_used <- data.frame( data_used.time.keyword[-length(data_used.time.keyword)], as.character.Date(c(data_used$time[data_used.timeRange.index[-c(1,length(data_used.time.keyword))]-1],data_used.time.keyword[length(data_used.time.keyword)])), as.numeric(data_used.mean), as.numeric(data_used.variance), as.numeric(data_used.gradient.mean), as.numeric(data_used.gradient.variance), as.numeric(data_used.rate_of_change.mean), as.numeric(data_used.rate_of_change.variance) ) names(dataframe_used) <- c('时点开始', '时点结束', '样本均值', '样本标准差', '差分均值', '差分标准差', '日变化率均值', '日变化率标准差') # 写出数据到指定表格的指定位置 output_data.data_type <- "数据指标结果" output_data.file_name <- paste("金砖四国",output_data.data_name,output_data.data_type,"_",input_data.data_source_name,"_",output_data.project_name,".xlsx",sep = '') xlsx::write.xlsx2( dataframe_used, file = paste(data_path,output_data.file_name,sep='/'), sheetName = output_data.sheet_name, append = TRUE ) output_data.data_type <- "日度数据" output_data.file_name <- paste("金砖四国",output_data.data_name,output_data.data_type,"_",input_data.data_source_name,"_",output_data.project_name,".xlsx",sep = '') xlsx::write.xlsx2(data_used, file = paste(data_path,output_data.file_name,sep='/'), sheetName = output_data.sheet_name, append = TRUE) output_data.data_type <- "差分数据" output_data.file_name <- paste("金砖四国",output_data.data_name,output_data.data_type,"_",input_data.data_source_name,"_",output_data.project_name,".xlsx",sep = '') xlsx::write.xlsx2( data_used.gradient, file = paste(data_path,output_data.file_name,sep='/'), sheetName = output_data.sheet_name, append = TRUE ) output_data.data_type <- "日变化率数据" output_data.file_name <- paste("金砖四国",output_data.data_name,output_data.data_type,"_",input_data.data_source_name,"_",output_data.project_name,".xlsx",sep = '') xlsx::write.xlsx2( data_used.rate_of_change, file = paste(data_path,output_data.file_name,sep='/'), sheetName = output_data.sheet_name, append = TRUE )
/codes/R programs/MoneyMismatch/volatility_compare_201903.R
no_license
EthanSystem/MoneyMismatch
R
false
false
34,400
r
##### 根据导师在3月初给的资料做划分区间进行分析 ### ##### 分析波动性 ##### 分析横向的波动性 ##### 预处理 ######## library(readxl) library(tidyr) library(dplyr) library(urca) library(lmtest) library(xlsx) library(dyn) root_path <- getwd() data_path <- "/Users/ethan/Documents/Ethan/CoreFiles/CodesFile/MoneyMismatch/data" setwd(root_path) source(paste(root_path, "/program_function.R", sep = "")) output_data.data_name <- "汇率" input_data.data_source_name <- "BIS" output_data.project_name <- "横向比较_自动生成的" # # for example : "金砖四国",output_data.data_name,output_data.data_type,"_",input_data.data_source_name,"_",output_data.project_name,".xlsx" ##### 中国的 ####### # 输入的数据的数据表名称 input_sheet_name <- "中国日度数据" # # 提取需要的区间段1 # data_time_keyword <- # as.character( # c( # '1994-01-03', # '2005-07-21', # # '2010-06-21', # # '2015-08-11', # '2018-12-31' # ) # ) # 提取需要的区间段2 data_time_keyword <- as.character( c( '1994-01-03', '2018-12-31' ) ) # 输出的数据的数据表名称 output_sheet_name <- "中国" ###### 重复代码段 ###### input_data.sheet_name = input_sheet_name output_data.sheet_name = output_sheet_name data_used.time.keyword = data_time_keyword # 导入要计算货币错配程度的表格数据 data_original <- readxl::read_xlsx( path = "/Users/ethan/Documents/Ethan/CoreFiles/CodesFile/MoneyMismatch/data/要处理的金砖四国汇率日度数据_BIS.xlsx", sheet = input_data.sheet_name, col_types = c("date", "numeric") ) # 去掉无用数据 data_used <- subset(data_original, data != 'NaN') data_used <- subset(data_used, data != '') # 调整日期的格式 data_used$time <- as.character.Date(data_used$time) # 对样本生成新的数据集 data_used.timeRange.index <- array(dim = length(data_used.time.keyword)) for (i in 1:length(data_used.time.keyword)) { data_used.timeRange.index[i] <- which(as.character.Date(data_used$time) == data_used.time.keyword[i]) } # 计算样本数据的一阶差分,生成新的数据集。 data_used.gradient <- data.frame(time = as.character.Date(data_used$time[-1]), data = 1) data_used.gradient$data <- diff(data_used$data, lag = 1, differences = 1) data_used.gradient.timeRange.index <- array(dim = length(data_used.time.keyword)) data_used.gradient.timeRange.index[1] <- which(as.character.Date(data_used$time) == data_used.time.keyword[1]) for (i in 2:length(data_used.time.keyword)) { data_used.gradient.timeRange.index[i] <- which(as.character.Date(data_used$time) == data_used.time.keyword[i]) - 1 } data_used.gradient.time.keyword <- data_used.gradient$time[data_used.gradient.timeRange.index] # 计算样本数据的变化率,生成新的数据集。 data_used.rate_of_change <- data.frame(time = as.character.Date(data_used$time[-1]), data = 1) data_used.rate_of_change$data <- data_used.gradient$data / data_used$data[-length(data_used$time)] data_used.rate_of_change.timeRange.index <- data_used.gradient.timeRange.index data_used.rate_of_change.time.keyword <- data_used.rate_of_change$time[data_used.rate_of_change.timeRange.index] # 计算每一区间段的指标: # 样本数据的均值 data_used.mean <- array(dim = length(data_used.time.keyword) - 1) # 样本数据的标准差 data_used.variance <- array(dim = length(data_used.time.keyword) - 1) # data_used.summary <- array(dim = length(data_used.time.keyword) - 1) # 差分样本数据的均值 data_used.gradient.mean <- array(dim = length(data_used.time.keyword) - 1) # 差分样本数据的标准差 data_used.gradient.variance <- array(dim = length(data_used.time.keyword) - 1) # 变化率样本数据的均值 data_used.rate_of_change.mean <- array(dim = length(data_used.time.keyword) - 1) # 变化率样本数据的标准差 data_used.rate_of_change.variance <- array(dim = length(data_used.time.keyword) - 1) # for (i in 1:length(data_used.time.keyword) - 1) { # i # data_used.mean[i] <- # mean(data_used$data[data_used.timeRange.index[i]:data_used.timeRange.index[i+1]-1]) # data_used.variance[i] <- # sd(data_used$data[data_used.timeRange.index[i]:data_used.timeRange.index[i+1]-1]) # # 对于各段差分数据等类型的数据的描述性统计的计算,每段的第一个数据值不计入计算。 # data_used.gradient.mean[i] <- # mean(data_used.gradient$data[data_used.gradient.timeRange.index[i]+1:data_used.gradient.timeRange.index[i+1]-1]) # data_used.gradient.variance[i] <- # sd(data_used.gradient$data[data_used.gradient.timeRange.index[i] + # 1:data_used.gradient.timeRange.index[i + 1] - 1]) # data_used.rate_of_change.mean[i] <- # mean(data_used.rate_of_change$data[data_used.rate_of_change.timeRange.index[i] + # 1:data_used.rate_of_change.timeRange.index[i + 1] - 1]) # data_used.rate_of_change.variance[i] <- # sd(data_used.rate_of_change$data[data_used.rate_of_change.timeRange.index[i] + # 1:data_used.rate_of_change.timeRange.index[i + 1] - 1]) # } j = 1 for (i in 1:length(data_used.time.keyword) - 1) { data_used.mean[j] <- mean(data_used$data[(data_used.timeRange.index[j]):(data_used.timeRange.index[j + 1] - 1)],na.rm = FALSE) data_used.variance[j] <- sd(data_used$data[(data_used.timeRange.index[j]):(data_used.timeRange.index[j + 1] - 1)],na.rm = FALSE) # 对于各段差分数据等类型的数据的描述性统计的计算,每段的第一个数据值不计入计算。 data_used.gradient.mean[j] <- mean(data_used.gradient$data[(data_used.gradient.timeRange.index[j] + 1):(data_used.gradient.timeRange.index[j + 1] - 1)],na.rm = FALSE) data_used.gradient.variance[j] <- sd(data_used.gradient$data[(data_used.gradient.timeRange.index[j] + 1):(data_used.gradient.timeRange.index[j + 1] - 1)],na.rm = FALSE) data_used.rate_of_change.mean[j] <- mean(data_used.rate_of_change$data[(data_used.rate_of_change.timeRange.index[j] + 1):(data_used.rate_of_change.timeRange.index[j + 1] - 1)],na.rm = FALSE) data_used.rate_of_change.variance[j] <- sd(data_used.rate_of_change$data[(data_used.rate_of_change.timeRange.index[j] + 1):(data_used.rate_of_change.timeRange.index[j + 1] - 1)],na.rm = FALSE) j = j + 1 } dataframe_used <- data.frame( data_used.time.keyword[-length(data_used.time.keyword)], as.character.Date(c(data_used$time[data_used.timeRange.index[-c(1,length(data_used.time.keyword))]-1],data_used.time.keyword[length(data_used.time.keyword)])), as.numeric(data_used.mean), as.numeric(data_used.variance), as.numeric(data_used.gradient.mean), as.numeric(data_used.gradient.variance), as.numeric(data_used.rate_of_change.mean), as.numeric(data_used.rate_of_change.variance) ) names(dataframe_used) <- c('时点开始', '时点结束', '样本均值', '样本标准差', '差分均值', '差分标准差', '日变化率均值', '日变化率标准差') # 写出数据到指定表格的指定位置 output_data.data_type <- "数据指标结果" output_data.file_name <- paste("金砖四国",output_data.data_name,output_data.data_type,"_",input_data.data_source_name,"_",output_data.project_name,".xlsx",sep = '') xlsx::write.xlsx2( dataframe_used, file = paste(data_path,output_data.file_name,sep='/'), sheetName = output_data.sheet_name, append = TRUE ) output_data.data_type <- "日度数据" output_data.file_name <- paste("金砖四国",output_data.data_name,output_data.data_type,"_",input_data.data_source_name,"_",output_data.project_name,".xlsx",sep = '') xlsx::write.xlsx2(data_used, file = paste(data_path,output_data.file_name,sep='/'), sheetName = output_data.sheet_name, append = TRUE) output_data.data_type <- "差分数据" output_data.file_name <- paste("金砖四国",output_data.data_name,output_data.data_type,"_",input_data.data_source_name,"_",output_data.project_name,".xlsx",sep = '') xlsx::write.xlsx2( data_used.gradient, file = paste(data_path,output_data.file_name,sep='/'), sheetName = output_data.sheet_name, append = TRUE ) output_data.data_type <- "日变化率数据" output_data.file_name <- paste("金砖四国",output_data.data_name,output_data.data_type,"_",input_data.data_source_name,"_",output_data.project_name,".xlsx",sep = '') xlsx::write.xlsx2( data_used.rate_of_change, file = paste(data_path,output_data.file_name,sep='/'), sheetName = output_data.sheet_name, append = TRUE ) # # ##### 印度的 ####### # 输入的数据的数据表名称 input_sheet_name <- "印度日度数据" # # 提取需要的区间段1 # data_time_keyword <- # as.character( # c( # '1992-03-02', # '2009-02-02', # '2018-12-31' # )) # 提取需要的区间段2 data_time_keyword <- as.character( c( '1994-01-03', '2018-12-31' )) # 输出的数据的数据表名称 output_sheet_name <- "印度" ###### 重复代码段 ###### input_data.sheet_name = input_sheet_name output_data.sheet_name = output_sheet_name data_used.time.keyword = data_time_keyword # 导入要计算货币错配程度的表格数据 data_original <- readxl::read_xlsx( path = "/Users/ethan/Documents/Ethan/CoreFiles/CodesFile/MoneyMismatch/data/要处理的金砖四国汇率日度数据_BIS.xlsx", sheet = input_data.sheet_name, col_types = c("date", "numeric") ) # 去掉无用数据 data_used <- subset(data_original, data != 'NaN') data_used <- subset(data_used, data != '') # 调整日期的格式 data_used$time <- as.character.Date(data_used$time) # 对样本生成新的数据集 data_used.timeRange.index <- array(dim = length(data_used.time.keyword)) for (i in 1:length(data_used.time.keyword)) { data_used.timeRange.index[i] <- which(as.character.Date(data_used$time) == data_used.time.keyword[i]) } # 计算样本数据的一阶差分,生成新的数据集。 data_used.gradient <- data.frame(time = as.character.Date(data_used$time[-1]), data = 1) data_used.gradient$data <- diff(data_used$data, lag = 1, differences = 1) data_used.gradient.timeRange.index <- array(dim = length(data_used.time.keyword)) data_used.gradient.timeRange.index[1] <- which(as.character.Date(data_used$time) == data_used.time.keyword[1]) for (i in 2:length(data_used.time.keyword)) { data_used.gradient.timeRange.index[i] <- which(as.character.Date(data_used$time) == data_used.time.keyword[i]) - 1 } data_used.gradient.time.keyword <- data_used.gradient$time[data_used.gradient.timeRange.index] # 计算样本数据的变化率,生成新的数据集。 data_used.rate_of_change <- data.frame(time = as.character.Date(data_used$time[-1]), data = 1) data_used.rate_of_change$data <- data_used.gradient$data / data_used$data[-length(data_used$time)] data_used.rate_of_change.timeRange.index <- data_used.gradient.timeRange.index data_used.rate_of_change.time.keyword <- data_used.rate_of_change$time[data_used.rate_of_change.timeRange.index] # 计算每一区间段的指标: # 样本数据的均值 data_used.mean <- array(dim = length(data_used.time.keyword) - 1) # 样本数据的标准差 data_used.variance <- array(dim = length(data_used.time.keyword) - 1) # data_used.summary <- array(dim = length(data_used.time.keyword) - 1) # 差分样本数据的均值 data_used.gradient.mean <- array(dim = length(data_used.time.keyword) - 1) # 差分样本数据的标准差 data_used.gradient.variance <- array(dim = length(data_used.time.keyword) - 1) # 变化率样本数据的均值 data_used.rate_of_change.mean <- array(dim = length(data_used.time.keyword) - 1) # 变化率样本数据的标准差 data_used.rate_of_change.variance <- array(dim = length(data_used.time.keyword) - 1) # for (i in 1:length(data_used.time.keyword) - 1) { # i # data_used.mean[i] <- # mean(data_used$data[data_used.timeRange.index[i]:data_used.timeRange.index[i+1]-1]) # data_used.variance[i] <- # sd(data_used$data[data_used.timeRange.index[i]:data_used.timeRange.index[i+1]-1]) # # 对于各段差分数据等类型的数据的描述性统计的计算,每段的第一个数据值不计入计算。 # data_used.gradient.mean[i] <- # mean(data_used.gradient$data[data_used.gradient.timeRange.index[i]+1:data_used.gradient.timeRange.index[i+1]-1]) # data_used.gradient.variance[i] <- # sd(data_used.gradient$data[data_used.gradient.timeRange.index[i] + # 1:data_used.gradient.timeRange.index[i + 1] - 1]) # data_used.rate_of_change.mean[i] <- # mean(data_used.rate_of_change$data[data_used.rate_of_change.timeRange.index[i] + # 1:data_used.rate_of_change.timeRange.index[i + 1] - 1]) # data_used.rate_of_change.variance[i] <- # sd(data_used.rate_of_change$data[data_used.rate_of_change.timeRange.index[i] + # 1:data_used.rate_of_change.timeRange.index[i + 1] - 1]) # } j = 1 for (i in 1:length(data_used.time.keyword) - 1) { data_used.mean[j] <- mean(data_used$data[(data_used.timeRange.index[j]):(data_used.timeRange.index[j + 1] - 1)],na.rm = FALSE) data_used.variance[j] <- sd(data_used$data[(data_used.timeRange.index[j]):(data_used.timeRange.index[j + 1] - 1)],na.rm = FALSE) # 对于各段差分数据等类型的数据的描述性统计的计算,每段的第一个数据值不计入计算。 data_used.gradient.mean[j] <- mean(data_used.gradient$data[(data_used.gradient.timeRange.index[j] + 1):(data_used.gradient.timeRange.index[j + 1] - 1)],na.rm = FALSE) data_used.gradient.variance[j] <- sd(data_used.gradient$data[(data_used.gradient.timeRange.index[j] + 1):(data_used.gradient.timeRange.index[j + 1] - 1)],na.rm = FALSE) data_used.rate_of_change.mean[j] <- mean(data_used.rate_of_change$data[(data_used.rate_of_change.timeRange.index[j] + 1):(data_used.rate_of_change.timeRange.index[j + 1] - 1)],na.rm = FALSE) data_used.rate_of_change.variance[j] <- sd(data_used.rate_of_change$data[(data_used.rate_of_change.timeRange.index[j] + 1):(data_used.rate_of_change.timeRange.index[j + 1] - 1)],na.rm = FALSE) j = j + 1 } dataframe_used <- data.frame( data_used.time.keyword[-length(data_used.time.keyword)], as.character.Date(c(data_used$time[data_used.timeRange.index[-c(1,length(data_used.time.keyword))]-1],data_used.time.keyword[length(data_used.time.keyword)])), as.numeric(data_used.mean), as.numeric(data_used.variance), as.numeric(data_used.gradient.mean), as.numeric(data_used.gradient.variance), as.numeric(data_used.rate_of_change.mean), as.numeric(data_used.rate_of_change.variance) ) names(dataframe_used) <- c('时点开始', '时点结束', '样本均值', '样本标准差', '差分均值', '差分标准差', '日变化率均值', '日变化率标准差') # 写出数据到指定表格的指定位置 output_data.data_type <- "数据指标结果" output_data.file_name <- paste("金砖四国",output_data.data_name,output_data.data_type,"_",input_data.data_source_name,"_",output_data.project_name,".xlsx",sep = '') xlsx::write.xlsx2( dataframe_used, file = paste(data_path,output_data.file_name,sep='/'), sheetName = output_data.sheet_name, append = TRUE ) output_data.data_type <- "日度数据" output_data.file_name <- paste("金砖四国",output_data.data_name,output_data.data_type,"_",input_data.data_source_name,"_",output_data.project_name,".xlsx",sep = '') xlsx::write.xlsx2(data_used, file = paste(data_path,output_data.file_name,sep='/'), sheetName = output_data.sheet_name, append = TRUE) output_data.data_type <- "差分数据" output_data.file_name <- paste("金砖四国",output_data.data_name,output_data.data_type,"_",input_data.data_source_name,"_",output_data.project_name,".xlsx",sep = '') xlsx::write.xlsx2( data_used.gradient, file = paste(data_path,output_data.file_name,sep='/'), sheetName = output_data.sheet_name, append = TRUE ) output_data.data_type <- "日变化率数据" output_data.file_name <- paste("金砖四国",output_data.data_name,output_data.data_type,"_",input_data.data_source_name,"_",output_data.project_name,".xlsx",sep = '') xlsx::write.xlsx2( data_used.rate_of_change, file = paste(data_path,output_data.file_name,sep='/'), sheetName = output_data.sheet_name, append = TRUE ) ##### 俄罗斯的 ####### # 输入的数据的数据表名称 input_sheet_name <- "俄罗斯日度数据" # # 提取需要的区间段1 # data_time_keyword <- # as.character(c( # '1995-07-06', # '1998-08-17', # '2014-11-10', # '2018-12-31' # ) # ) # 提取需要的区间段2 data_time_keyword <- as.character(c( '1994-01-03', '2018-12-31' ) ) # 输出的数据的数据表名称 output_sheet_name <- "俄罗斯" ###### 重复代码段 ###### input_data.sheet_name = input_sheet_name output_data.sheet_name = output_sheet_name data_used.time.keyword = data_time_keyword # 导入要计算货币错配程度的表格数据 data_original <- readxl::read_xlsx( path = "/Users/ethan/Documents/Ethan/CoreFiles/CodesFile/MoneyMismatch/data/要处理的金砖四国汇率日度数据_BIS.xlsx", sheet = input_data.sheet_name, col_types = c("date", "numeric") ) # 去掉无用数据 data_used <- subset(data_original, data != 'NaN') data_used <- subset(data_used, data != '') # 调整日期的格式 data_used$time <- as.character.Date(data_used$time) # 对样本生成新的数据集 data_used.timeRange.index <- array(dim = length(data_used.time.keyword)) for (i in 1:length(data_used.time.keyword)) { data_used.timeRange.index[i] <- which(as.character.Date(data_used$time) == data_used.time.keyword[i]) } # 计算样本数据的一阶差分,生成新的数据集。 data_used.gradient <- data.frame(time = as.character.Date(data_used$time[-1]), data = 1) data_used.gradient$data <- diff(data_used$data, lag = 1, differences = 1) data_used.gradient.timeRange.index <- array(dim = length(data_used.time.keyword)) data_used.gradient.timeRange.index[1] <- which(as.character.Date(data_used$time) == data_used.time.keyword[1]) for (i in 2:length(data_used.time.keyword)) { data_used.gradient.timeRange.index[i] <- which(as.character.Date(data_used$time) == data_used.time.keyword[i]) - 1 } data_used.gradient.time.keyword <- data_used.gradient$time[data_used.gradient.timeRange.index] # 计算样本数据的变化率,生成新的数据集。 data_used.rate_of_change <- data.frame(time = as.character.Date(data_used$time[-1]), data = 1) data_used.rate_of_change$data <- data_used.gradient$data / data_used$data[-length(data_used$time)] data_used.rate_of_change.timeRange.index <- data_used.gradient.timeRange.index data_used.rate_of_change.time.keyword <- data_used.rate_of_change$time[data_used.rate_of_change.timeRange.index] # 计算每一区间段的指标: # 样本数据的均值 data_used.mean <- array(dim = length(data_used.time.keyword) - 1) # 样本数据的标准差 data_used.variance <- array(dim = length(data_used.time.keyword) - 1) # data_used.summary <- array(dim = length(data_used.time.keyword) - 1) # 差分样本数据的均值 data_used.gradient.mean <- array(dim = length(data_used.time.keyword) - 1) # 差分样本数据的标准差 data_used.gradient.variance <- array(dim = length(data_used.time.keyword) - 1) # 变化率样本数据的均值 data_used.rate_of_change.mean <- array(dim = length(data_used.time.keyword) - 1) # 变化率样本数据的标准差 data_used.rate_of_change.variance <- array(dim = length(data_used.time.keyword) - 1) # for (i in 1:length(data_used.time.keyword) - 1) { # i # data_used.mean[i] <- # mean(data_used$data[data_used.timeRange.index[i]:data_used.timeRange.index[i+1]-1]) # data_used.variance[i] <- # sd(data_used$data[data_used.timeRange.index[i]:data_used.timeRange.index[i+1]-1]) # # 对于各段差分数据等类型的数据的描述性统计的计算,每段的第一个数据值不计入计算。 # data_used.gradient.mean[i] <- # mean(data_used.gradient$data[data_used.gradient.timeRange.index[i]+1:data_used.gradient.timeRange.index[i+1]-1]) # data_used.gradient.variance[i] <- # sd(data_used.gradient$data[data_used.gradient.timeRange.index[i] + # 1:data_used.gradient.timeRange.index[i + 1] - 1]) # data_used.rate_of_change.mean[i] <- # mean(data_used.rate_of_change$data[data_used.rate_of_change.timeRange.index[i] + # 1:data_used.rate_of_change.timeRange.index[i + 1] - 1]) # data_used.rate_of_change.variance[i] <- # sd(data_used.rate_of_change$data[data_used.rate_of_change.timeRange.index[i] + # 1:data_used.rate_of_change.timeRange.index[i + 1] - 1]) # } j = 1 for (i in 1:length(data_used.time.keyword) - 1) { data_used.mean[j] <- mean(data_used$data[(data_used.timeRange.index[j]):(data_used.timeRange.index[j + 1] - 1)],na.rm = FALSE) data_used.variance[j] <- sd(data_used$data[(data_used.timeRange.index[j]):(data_used.timeRange.index[j + 1] - 1)],na.rm = FALSE) # 对于各段差分数据等类型的数据的描述性统计的计算,每段的第一个数据值不计入计算。 data_used.gradient.mean[j] <- mean(data_used.gradient$data[(data_used.gradient.timeRange.index[j] + 1):(data_used.gradient.timeRange.index[j + 1] - 1)],na.rm = FALSE) data_used.gradient.variance[j] <- sd(data_used.gradient$data[(data_used.gradient.timeRange.index[j] + 1):(data_used.gradient.timeRange.index[j + 1] - 1)],na.rm = FALSE) data_used.rate_of_change.mean[j] <- mean(data_used.rate_of_change$data[(data_used.rate_of_change.timeRange.index[j] + 1):(data_used.rate_of_change.timeRange.index[j + 1] - 1)],na.rm = FALSE) data_used.rate_of_change.variance[j] <- sd(data_used.rate_of_change$data[(data_used.rate_of_change.timeRange.index[j] + 1):(data_used.rate_of_change.timeRange.index[j + 1] - 1)],na.rm = FALSE) j = j + 1 } dataframe_used <- data.frame( data_used.time.keyword[-length(data_used.time.keyword)], as.character.Date(c(data_used$time[data_used.timeRange.index[-c(1,length(data_used.time.keyword))]-1],data_used.time.keyword[length(data_used.time.keyword)])), as.numeric(data_used.mean), as.numeric(data_used.variance), as.numeric(data_used.gradient.mean), as.numeric(data_used.gradient.variance), as.numeric(data_used.rate_of_change.mean), as.numeric(data_used.rate_of_change.variance) ) names(dataframe_used) <- c('时点开始', '时点结束', '样本均值', '样本标准差', '差分均值', '差分标准差', '日变化率均值', '日变化率标准差') # 写出数据到指定表格的指定位置 output_data.data_type <- "数据指标结果" output_data.file_name <- paste("金砖四国",output_data.data_name,output_data.data_type,"_",input_data.data_source_name,"_",output_data.project_name,".xlsx",sep = '') xlsx::write.xlsx2( dataframe_used, file = paste(data_path,output_data.file_name,sep='/'), sheetName = output_data.sheet_name, append = TRUE ) output_data.data_type <- "日度数据" output_data.file_name <- paste("金砖四国",output_data.data_name,output_data.data_type,"_",input_data.data_source_name,"_",output_data.project_name,".xlsx",sep = '') xlsx::write.xlsx2(data_used, file = paste(data_path,output_data.file_name,sep='/'), sheetName = output_data.sheet_name, append = TRUE) output_data.data_type <- "差分数据" output_data.file_name <- paste("金砖四国",output_data.data_name,output_data.data_type,"_",input_data.data_source_name,"_",output_data.project_name,".xlsx",sep = '') xlsx::write.xlsx2( data_used.gradient, file = paste(data_path,output_data.file_name,sep='/'), sheetName = output_data.sheet_name, append = TRUE ) output_data.data_type <- "日变化率数据" output_data.file_name <- paste("金砖四国",output_data.data_name,output_data.data_type,"_",input_data.data_source_name,"_",output_data.project_name,".xlsx",sep = '') xlsx::write.xlsx2( data_used.rate_of_change, file = paste(data_path,output_data.file_name,sep='/'), sheetName = output_data.sheet_name, append = TRUE ) ##### 巴西的 ####### # 输入的数据的数据表名称 input_sheet_name <- "巴西日度数据" # # 提取需要的区间段1 # data_time_keyword <- # as.character(c( # '1994-07-01', # '1999-02-01', # '2018-12-31' # )) # 提取需要的区间段2 data_time_keyword <- as.character(c( '1994-01-03', '2018-12-31' )) # 输出的数据的数据表名称 output_sheet_name <- "巴西" ###### 重复代码段 ###### input_data.sheet_name = input_sheet_name output_data.sheet_name = output_sheet_name data_used.time.keyword = data_time_keyword # 导入要计算货币错配程度的表格数据 data_original <- readxl::read_xlsx( path = "/Users/ethan/Documents/Ethan/CoreFiles/CodesFile/MoneyMismatch/data/要处理的金砖四国汇率日度数据_BIS.xlsx", sheet = input_data.sheet_name, col_types = c("date", "numeric") ) # 去掉无用数据 data_used <- subset(data_original, data != 'NaN') data_used <- subset(data_used, data != '') # 调整日期的格式 data_used$time <- as.character.Date(data_used$time) # 对样本生成新的数据集 data_used.timeRange.index <- array(dim = length(data_used.time.keyword)) for (i in 1:length(data_used.time.keyword)) { data_used.timeRange.index[i] <- which(as.character.Date(data_used$time) == data_used.time.keyword[i]) } # 计算样本数据的一阶差分,生成新的数据集。 data_used.gradient <- data.frame(time = as.character.Date(data_used$time[-1]), data = 1) data_used.gradient$data <- diff(data_used$data, lag = 1, differences = 1) data_used.gradient.timeRange.index <- array(dim = length(data_used.time.keyword)) data_used.gradient.timeRange.index[1] <- which(as.character.Date(data_used$time) == data_used.time.keyword[1]) for (i in 2:length(data_used.time.keyword)) { data_used.gradient.timeRange.index[i] <- which(as.character.Date(data_used$time) == data_used.time.keyword[i]) - 1 } data_used.gradient.time.keyword <- data_used.gradient$time[data_used.gradient.timeRange.index] # 计算样本数据的变化率,生成新的数据集。 data_used.rate_of_change <- data.frame(time = as.character.Date(data_used$time[-1]), data = 1) data_used.rate_of_change$data <- data_used.gradient$data / data_used$data[-length(data_used$time)] data_used.rate_of_change.timeRange.index <- data_used.gradient.timeRange.index data_used.rate_of_change.time.keyword <- data_used.rate_of_change$time[data_used.rate_of_change.timeRange.index] # 计算每一区间段的指标: # 样本数据的均值 data_used.mean <- array(dim = length(data_used.time.keyword) - 1) # 样本数据的标准差 data_used.variance <- array(dim = length(data_used.time.keyword) - 1) # data_used.summary <- array(dim = length(data_used.time.keyword) - 1) # 差分样本数据的均值 data_used.gradient.mean <- array(dim = length(data_used.time.keyword) - 1) # 差分样本数据的标准差 data_used.gradient.variance <- array(dim = length(data_used.time.keyword) - 1) # 变化率样本数据的均值 data_used.rate_of_change.mean <- array(dim = length(data_used.time.keyword) - 1) # 变化率样本数据的标准差 data_used.rate_of_change.variance <- array(dim = length(data_used.time.keyword) - 1) # for (i in 1:length(data_used.time.keyword) - 1) { # i # data_used.mean[i] <- # mean(data_used$data[data_used.timeRange.index[i]:data_used.timeRange.index[i+1]-1]) # data_used.variance[i] <- # sd(data_used$data[data_used.timeRange.index[i]:data_used.timeRange.index[i+1]-1]) # # 对于各段差分数据等类型的数据的描述性统计的计算,每段的第一个数据值不计入计算。 # data_used.gradient.mean[i] <- # mean(data_used.gradient$data[data_used.gradient.timeRange.index[i]+1:data_used.gradient.timeRange.index[i+1]-1]) # data_used.gradient.variance[i] <- # sd(data_used.gradient$data[data_used.gradient.timeRange.index[i] + # 1:data_used.gradient.timeRange.index[i + 1] - 1]) # data_used.rate_of_change.mean[i] <- # mean(data_used.rate_of_change$data[data_used.rate_of_change.timeRange.index[i] + # 1:data_used.rate_of_change.timeRange.index[i + 1] - 1]) # data_used.rate_of_change.variance[i] <- # sd(data_used.rate_of_change$data[data_used.rate_of_change.timeRange.index[i] + # 1:data_used.rate_of_change.timeRange.index[i + 1] - 1]) # } j = 1 for (i in 1:length(data_used.time.keyword) - 1) { data_used.mean[j] <- mean(data_used$data[(data_used.timeRange.index[j]):(data_used.timeRange.index[j + 1] - 1)],na.rm = FALSE) data_used.variance[j] <- sd(data_used$data[(data_used.timeRange.index[j]):(data_used.timeRange.index[j + 1] - 1)],na.rm = FALSE) # 对于各段差分数据等类型的数据的描述性统计的计算,每段的第一个数据值不计入计算。 data_used.gradient.mean[j] <- mean(data_used.gradient$data[(data_used.gradient.timeRange.index[j] + 1):(data_used.gradient.timeRange.index[j + 1] - 1)],na.rm = FALSE) data_used.gradient.variance[j] <- sd(data_used.gradient$data[(data_used.gradient.timeRange.index[j] + 1):(data_used.gradient.timeRange.index[j + 1] - 1)],na.rm = FALSE) data_used.rate_of_change.mean[j] <- mean(data_used.rate_of_change$data[(data_used.rate_of_change.timeRange.index[j] + 1):(data_used.rate_of_change.timeRange.index[j + 1] - 1)],na.rm = FALSE) data_used.rate_of_change.variance[j] <- sd(data_used.rate_of_change$data[(data_used.rate_of_change.timeRange.index[j] + 1):(data_used.rate_of_change.timeRange.index[j + 1] - 1)],na.rm = FALSE) j = j + 1 } dataframe_used <- data.frame( data_used.time.keyword[-length(data_used.time.keyword)], as.character.Date(c(data_used$time[data_used.timeRange.index[-c(1,length(data_used.time.keyword))]-1],data_used.time.keyword[length(data_used.time.keyword)])), as.numeric(data_used.mean), as.numeric(data_used.variance), as.numeric(data_used.gradient.mean), as.numeric(data_used.gradient.variance), as.numeric(data_used.rate_of_change.mean), as.numeric(data_used.rate_of_change.variance) ) names(dataframe_used) <- c('时点开始', '时点结束', '样本均值', '样本标准差', '差分均值', '差分标准差', '日变化率均值', '日变化率标准差') # 写出数据到指定表格的指定位置 output_data.data_type <- "数据指标结果" output_data.file_name <- paste("金砖四国",output_data.data_name,output_data.data_type,"_",input_data.data_source_name,"_",output_data.project_name,".xlsx",sep = '') xlsx::write.xlsx2( dataframe_used, file = paste(data_path,output_data.file_name,sep='/'), sheetName = output_data.sheet_name, append = TRUE ) output_data.data_type <- "日度数据" output_data.file_name <- paste("金砖四国",output_data.data_name,output_data.data_type,"_",input_data.data_source_name,"_",output_data.project_name,".xlsx",sep = '') xlsx::write.xlsx2(data_used, file = paste(data_path,output_data.file_name,sep='/'), sheetName = output_data.sheet_name, append = TRUE) output_data.data_type <- "差分数据" output_data.file_name <- paste("金砖四国",output_data.data_name,output_data.data_type,"_",input_data.data_source_name,"_",output_data.project_name,".xlsx",sep = '') xlsx::write.xlsx2( data_used.gradient, file = paste(data_path,output_data.file_name,sep='/'), sheetName = output_data.sheet_name, append = TRUE ) output_data.data_type <- "日变化率数据" output_data.file_name <- paste("金砖四国",output_data.data_name,output_data.data_type,"_",input_data.data_source_name,"_",output_data.project_name,".xlsx",sep = '') xlsx::write.xlsx2( data_used.rate_of_change, file = paste(data_path,output_data.file_name,sep='/'), sheetName = output_data.sheet_name, append = TRUE )
# This is the R_PROFILE.R file # This file is executed before teh code in the 'R' subdirectory # This file should not be used.
/R/R_PROFILE.R
no_license
jestill/gencart
R
false
false
127
r
# This is the R_PROFILE.R file # This file is executed before teh code in the 'R' subdirectory # This file should not be used.
#' @title Plot Isoform Expression Data #' @description Visualize isoform expression data for exploratory data analysis. #' #' @details #' The \code{isoPlot} is designed to make visualization of isoform expression data simple and easy for R novices and bioinformaticians alike. #' The function is an S3 generic that accept various R and Bioconductor data sets as input and extracts the expression, factor and annotation data from them according to type. #' The factors allow for splitting expression data from one or more genes into groups and for plot types with data point overlays. Points can be colored by factors levels as well. #' If the input data is a Bioconductor data set such as an \code{\link[Biobase]{ExpressionSet}} and the \code{gene} option is used, \code{isoPlot} will attempt to look up the isoforms in the associated with the gene in the annotation data (e.g. \code{\link[Biobase]{fData}}) according to the data input type and look for the gene symbol column indicated by the \code{symbol} option (defaults to 'GeneSymbol'). #' If no matches are found the row names of are checked of the expression data are check for matches as well. #' If character values are given for factor input, \code{isoPlot} will attempt to look up associated phenotype data (e.g. \code{\link[Biobase]{pData}}). #' One can also pass raw data vectors/data frames and/or factors to \code{isoPlots} to bypass this feature, which is critical for data sets and data formats where integrated phenotype and feature data is not available. #' The \code{isoPlot} uses the \code{NicePlots} graphics library and any \code{NicePlots} option and/or theme can be used in conjuction with options detailed below. #' The \code{plotType} options supported correspond to \code{NicePlots} functions and include box plots (\code{\link[NicePlots]{niceBox}}), dot plots (\code{\link[NicePlots]{niceDots}}), violin plots (\code{\link[NicePlots]{niceVio}}), bar plots (\code{\link[NicePlots]{niceBar}}) as well as both one/two dimensional kernel density plots (\code{\link[NicePlots]{niceDensity}}). #' Supported data input types include: \code{\link[Biobase]{ExpressionSet}}, \code{\link[EDASeq]{SeqExpressionSet-class}}, \code{\link[limma]{EList-class}}, \code{\link[DESeq2]{DESeqTransform}}, as well as standard R data types such as \code{\link[base]{vector}}, \code{\link[base]{matrix}}, \code{\link[base]{data.frame}}, and \code{\link[tibble]{tibble}}. #' \code{isoPlot} silently returns a list of class \code{npData} that contains a summarized findings, p-values (if indicated), extracted plotting data, and plotting options. #' All npData objects can be replotted using the \code{\link[graphics]{plot}} function, \code{isoPlot} or any of the \code{NicePlots} functions. #' Options passed to any of these, including \code{plotType} will override the options for the \code{npData} object. #' #' @param x R data object; Most typically this is an \code{ExpressionSet} there is support for other datatypes as well. #' @param isoforms character; Isoform IDs or a vector of isoform IDS to plot. #' @param gene character; Gene or vector of gene names. This is an optional setting that will return all of the isoforms associated with the gene. #' @param appris logical or character; If set to TRUE, will return only isoforms with appris annotation. If set to a character string, will restrict isoforms to those with the character value matching a substring of the appris tag. Appris column is determined by the first column name to containing 'Appris' (case insensitive). #' @param transcriptType character; Returns only those isoforms where the transcript type column has a substring that matches the character value supplied such as 'protein' in 'protein_coding'. The transcript type column is determined by the \code{ttype} option. #' @param asPercentage logical; If set to \code{\link{TRUE}}, the isoform expression is given as a percentage of total gene expression (defaults to \code{\link{FALSE}}) #' @param group factor or name of factor to be extracted from \code{x} (e.g. \code{\link[Biobase]{pData}}). Used as the primary grouping factor. #' @param subgroup factor or name of factor to be extracted from \code{x} (e.g. \code{\link[Biobase]{pData}}). Used to subgroup data unless multiple genes are selected in which case \code{subgroup} is ignored. #' @param highlight factor or name of factor to be extracted from \code{x} (e.g. \code{\link[Biobase]{pData}}). Used to color data points by factor levels. Only valid for graphs with point overlays. #' @param facet factor or name of factor to be extracted from \code{x} (e.g. \code{\link[Biobase]{pData}}). Split the data into multiple smaller graphs. #' @param stack factor or name of factor to be extracted from \code{x} (e.g. \code{\link[Biobase]{pData}}). Used for stacked bar plots where both the individual and aggregate values are important. Valid only for bar plots. #' @param plotType character; Can be set to "box", "violin, "dot", "bar", "denisity" or "surface" for box plots, violin plots, dot plots, bar plots, and kernel density plots, respectively. #' @param main character; The main plot title. Defaults to true for automated generation. #' @param symbol character; Column name of of gene symbols in the feature data of \code{x} (\code{fData}). #' @param legend logical or character; Draws a figure legend. Use to set the legend title which defaults to "Legend" if equals \code{\link{TRUE}}. Set to \code{\link{FALSE}} to disable. #' @param na.rm logical; Removes \code{\link{NA}} values prior to plotting. #' @param shiny logical; Use \code{\link[shiny]{shiny}} interfaces if available. #' @param groupByGene logical; If more then one gene is listed and \code{grouByGene} is \code{TRUE} #' @param useNormCounts logical; By default \code{genePlot} will try to use normCounts instead of counts in \code{SeqExpressionSets}. Set to FALSE to use raw counts instead, though this will generate a warning about useing non-normalized data. #' @param ttype character; Column name of the optional transcript type column in the annotation. The default value is 'transcript_type'. #' @param ... Any parameter recognized by \code{NicePlots} functions. #' #' @return an list of class \code{npData}. This contains data necessary to regenerate the plot as well as summary statistics. #' #' @examples #' ToDo<-1 #' #' @importFrom purrr map #' @importFrom dplyr select #' @importFrom tidyr gather #' @importFrom magrittr %>% #' @importFrom Biobase exprs pData fData #' @export #' @seealso \code{\link{genePlot}}, \code{\link{showIsoforms}}, \code{\link[NicePlots]{niceBox}}, \code{\link[NicePlots]{niceVio}}, \code{\link[NicePlots]{niceBar}}, \code{\link[NicePlots]{niceDots}}, \code{\link[NicePlots]{niceDensity}} isoPlot <- function(x, isoforms=NULL, gene=NULL, plotType=c("box","dot","bar","violin","density","surface"), asPercentage=FALSE, symbol="GeneSymbol",legend=NULL, main=TRUE, na.rm=TRUE, group=NULL, subgroup=NULL, highlight=NULL, facet=NULL, stack=NULL, shiny=FALSE, groupByGene=FALSE, useNormCounts=TRUE, appris=FALSE, transcriptType=FALSE, ttype="transcript_type",...) {UseMethod("isoPlot",x)} #' @importFrom purrr map #' @importFrom tidyr gather #' @importFrom dplyr select #' @importFrom magrittr %>% #' @importFrom NicePlots niceBox niceVio niceBar niceDensity #' @importFrom Biobase exprs pData fData #' @export isoPlot.default <- function(x, isoforms=NULL, gene=NULL, plotType=c("bar","dot","box","violin","density","surface"), asPercentage=FALSE, symbol="GeneSymbol", legend=NULL, main=TRUE, na.rm=TRUE, group=NULL, subgroup=NULL, highlight=NULL, facet=NULL, stack=TRUE, shiny=FALSE, groupByGene=FALSE, useNormCounts=TRUE, appris=FALSE, transcriptType=FALSE, ttype="transcript_type", ...) { npOptions<-list(...) #First lets handle the case that someone set something to FALSE or NA instead of just leaving it as NULL if(sum(isoforms==FALSE)==1 | sum(is.na(isoforms))==1) {isoforms<-NULL} if(sum(gene==FALSE)==1 | sum(is.na(gene))==1) {gene<-NULL} if((length(group)==1 & sum(group==FALSE)==1) | sum(is.na(group))==length(group)) {group<-NULL} if((length(subgroup)==1 & sum(subgroup==FALSE)==1) | sum(is.na(subgroup))==length(subgroup)) {subgroup<-NULL} if((length(stack)==1 & sum(stack==FALSE)==1) | sum(is.na(stack))==length(stack)) {stack<-NULL} if((length(highlight)==1 & sum(highlight==FALSE)==1) | sum(is.na(highlight))==length(highlight)) {highlight<-NULL} #Now lets get rid of incompatible options if(!is.null(highlight) & plotType[1]=="bar") { hightlight<-NULL } if(!is.null(stack) & plotType[1]!="bar") { stack<-NULL } #Looking up which isoforms meet the selected criteria isos<-NULL if(is.null(isoforms) & is.null(gene)){ isos<-rownames(x) } else { isos<-showIsoforms(x,isoforms=isoforms,genes = gene, annotation = FALSE, appris = appris,transcriptType = transcriptType, symbol = symbol,ttype = ttype) } #Setting default title of nothing was given if(main==TRUE) { if(length(gene)>1) { main<-paste0(c(paste0(gene,collapse=", "),"Isoform Expression"),collapse=" ") } else if (!is.null(isoforms)) { main<-paste0(c(paste0(isoforms,collapse=", "),"Expression"),collapse=" ") } else if (!is.null(gene)) { main<-paste0(gene, " Expression") } else { "Isoform Expression" } } #Setting the legend to turn on automatically if(is.null(legend)){ legend<-FALSE if(!is.null(subgroup) | !is.null(stack)| !is.null(highlight)) { legend<-"Legend" } } #Stack can be set to true if the goal is stacking isoforms. #This is handled separately from the getIsoData #This section of code just hides this use case from getIsoData. isoStack<-FALSE if(!is.null(stack)) { if(stack[1]==TRUE) { isoStack<-TRUE stack<-NULL } } #Quick test to see if searching by gene symbol is available SymbolFound<-FALSE if(symbol %in% colnames(showIsoforms(x, isoforms=isos, annotation = T))) { SymbolFound<-TRUE } #Collecting the expresion and factor data data<-getIsoData(d=x, isoforms=isos, plotType=plotType, symbol=symbol,group=group, subgroup=subgroup,highlight=highlight,facet=facet, stack=stack, useNormCounts=useNormCounts) #Convert isoforms as a percentage of gene expression. if(asPercentage==TRUE & SymbolFound==TRUE) { myGenes<-showIsoforms(x,isoforms=isos,symbol=symbol,annotation=symbol) uniGenes<-unique(myGenes) gexprs<-vector(mode = "list", length = length(uniGenes)) names(gexprs)<-uniGenes for(cgene in uniGenes) { cisos<-showIsoforms(x,genes=cgene,annotation = FALSE) cDat<-getIsoData(d=x, isoforms=cisos, plotType=plotType, symbol=symbol,group=group, subgroup=subgroup,highlight=highlight,facet=facet, stack=stack, useNormCounts=useNormCounts) if(length(cisos)==1) { gexprs[[cgene]]<-cDat$x } else { gexprs[[cgene]]<-rowSums(cDat$x) } gexprs[[cgene]][which(gexprs[[cgene]]==0)]<-1 #Avoiding divide by zero errors. Iso will be zero anyway. } if (length(isos)==1) { data$x<-data$x/gexprs[[1]]*100 } else { for (i in 1:dim(data$x)[2]) { data$x[,i]<-data$x[,i]/gexprs[[myGenes[i]]]*100 } } npOptions<-append(npOptions,list("axisText"=c("","%"))) } if(isoStack==TRUE){ stack<-TRUE } if(is.null(stack)){ stack<-FALSE } else { #If stack is TRUE we are assuming the user means to stack isoforms if(stack[1]==TRUE) { stackData<-NA if(is.vector(data$x)) { #There there is only one isoform then there is nothing to stack stack<-FALSE } else { stackData<-data.frame(data$x,data$by) %>% gather(key="isoforms",value="exprs",colnames(data$x)) data$x<-stackData[,"exprs"] data$by<-stackData[,seq_len(dim(stackData)[2]-1)] } } stack<-TRUE } #if group or subgroup are left empty and gene symbol annotation is available #and if there are isoforms from more than one gene present, we will add a gene symbol factor level automatically if(SymbolFound==TRUE){ iso2gene<-showIsoforms(x, isoforms = isos, symbol=symbol, annotation = symbol) if(length(unique(iso2gene))>1 & sum(c(is.null(group),is.null(subgroup)))>=1 & grepl("bar",plotType,ignore.case = TRUE) & isoStack==TRUE) { if("isoforms" %in% colnames(data$by) ){ geneFact<-iso2gene[data$by$isoforms] if(sum(data$by$group =="data" | is.na(data$by$group)) == length(data$by$group)){ data$by$group <- factor(geneFact) } else { data$by <- data.frame(geneFact,data$by) } subgroup<-TRUE } } else if (length(unique(iso2gene))>1 & sum(c(is.null(group),is.null(subgroup)))==2) { geneData<-data.frame(data$x,data$by) %>% gather(key="isoforms",value="exprs",colnames(data$x)) %>% select("isoforms", colnames(data$by),"exprs") data$x<-geneData[,"exprs"] geneFact<-iso2gene[geneData$isoforms] if(sum(data$by$group =="data", na.rm=TRUE) == length(data$by$group)-sum(is.na(data$by$group))){ geneData$group <- factor(geneFact) data$by<-geneData[,seq_len(dim(geneData)[2]-1)] } else { data$by <- data.frame(geneFact,geneData[,seq_len(dim(geneData)[2]-1)]) } subgroup<-TRUE } } #Now we convert the options to boolean TRUE/FALSE for compatibility with NicePlots if(is.null(subgroup)){ subgroup<-FALSE } else { subgroup<-TRUE } if(is.null(highlight)){ highlight<-FALSE } else { highlight<-TRUE } if(!is.vector(data$x) & (!is.null(group) | !is.null(subgroup))) { subgroup<-TRUE } if(is.null(group) & subgroup==TRUE) { subgroup<-FALSE } if(plotType[1]=="density" & !is.null(group)) { subgroup<-TRUE } #Formatting options and adding new data npOptions<-append(list(x=data$x,by=data$by,pointHighlights=highlight,flipFacts=groupByGene, subgroup=subgroup, facet=facet,stack=stack, na.rm=na.rm,main=main, legend=legend),npOptions) if(groupByGene==TRUE & data$NullNames==TRUE) { if(is.factor(data$by)) { npOptions<-append(npOptions,list(subgroupLabels=rep("",length(levels(data$by))))) } else { npOptions<-append(npOptions,list(subgroupLabels=rep("",length(levels(data$by[,1]))))) } } #Calling NicePlots dataOut<-1 if(grepl("box", plotType[1], ignore.case = TRUE)){ dataOut<-do.call("niceBox",npOptions) } else if (grepl("dot", plotType[1], ignore.case = TRUE)) { dataOut<-do.call("niceDots",npOptions) } else if (grepl("vio", plotType[1], ignore.case = TRUE)) { dataOut<-do.call("niceVio",npOptions) } else if (grepl("bar", plotType[1], ignore.case = TRUE)) { dataOut<-do.call("niceBar",npOptions) } else if (grepl("den",plotType[1], ignore.case = TRUE)) { dataOut<-do.call("niceDensity",npOptions) } else if (grepl("sur", plotType[1], ignore.case = TRUE)) { npOptions<- append(list(plotType="surface"),npOptions) dataOut<-do.call("niceDensity",npOptions) } else { stop("invalid plot type") } invisible(dataOut) } #' @importFrom purrr map #' @importFrom NicePlots niceBox niceVio niceBar niceDensity #' @importFrom Biobase exprs pData fData #' @export isoPlot.npData<-function(x, isoforms=NULL, gene=NULL, plotType=NULL, ...) { clOptions<-list(...) for(opt in names(clOptions)) { if(is.null(x$options[opt])){ append(x$options,list(opt=clOptions[[opt]])) }else{ x$options[[opt]]<-clOptions[[opt]] } } if(!is.null(x$options[["groupByGene"]])){ if(x$options[["groupByGene"]]==TRUE) { x$options[["flipFacts"]]<-FALSE } else { x$options[["flipFacts"]]<-TRUE } } dataOut<-1 if(grepl("box", plotType[1], ignore.case = TRUE)){ dataOut<-do.call("niceBox",npOptions) } else if (grepl("dot", plotType[1], ignore.case = TRUE)) { dataOut<-do.call("niceDots",npOptions) } else if (grepl("vio", plotType[1], ignore.case = TRUE)) { dataOut<-do.call("niceVio",npOptions) } else if (grepl("bar", plotType[1], ignore.case = TRUE)) { dataOut<-do.call("niceBar",npOptions) } else if (grepl("den",plotType[1], ignore.case = TRUE)) { dataOut<-do.call("niceDensity",npOptions) } else if (grepl("sur", plotType[1], ignore.case = TRUE)) { npOptions<- append(list(plotType="surface"),npOptions) dataOut<-do.call("niceDensity",npOptions) } else { stop("invalid plot type") } invisible(dataOut) }
/R/isoPlot.R
no_license
crisjs/bvt
R
false
false
16,466
r
#' @title Plot Isoform Expression Data #' @description Visualize isoform expression data for exploratory data analysis. #' #' @details #' The \code{isoPlot} is designed to make visualization of isoform expression data simple and easy for R novices and bioinformaticians alike. #' The function is an S3 generic that accept various R and Bioconductor data sets as input and extracts the expression, factor and annotation data from them according to type. #' The factors allow for splitting expression data from one or more genes into groups and for plot types with data point overlays. Points can be colored by factors levels as well. #' If the input data is a Bioconductor data set such as an \code{\link[Biobase]{ExpressionSet}} and the \code{gene} option is used, \code{isoPlot} will attempt to look up the isoforms in the associated with the gene in the annotation data (e.g. \code{\link[Biobase]{fData}}) according to the data input type and look for the gene symbol column indicated by the \code{symbol} option (defaults to 'GeneSymbol'). #' If no matches are found the row names of are checked of the expression data are check for matches as well. #' If character values are given for factor input, \code{isoPlot} will attempt to look up associated phenotype data (e.g. \code{\link[Biobase]{pData}}). #' One can also pass raw data vectors/data frames and/or factors to \code{isoPlots} to bypass this feature, which is critical for data sets and data formats where integrated phenotype and feature data is not available. #' The \code{isoPlot} uses the \code{NicePlots} graphics library and any \code{NicePlots} option and/or theme can be used in conjuction with options detailed below. #' The \code{plotType} options supported correspond to \code{NicePlots} functions and include box plots (\code{\link[NicePlots]{niceBox}}), dot plots (\code{\link[NicePlots]{niceDots}}), violin plots (\code{\link[NicePlots]{niceVio}}), bar plots (\code{\link[NicePlots]{niceBar}}) as well as both one/two dimensional kernel density plots (\code{\link[NicePlots]{niceDensity}}). #' Supported data input types include: \code{\link[Biobase]{ExpressionSet}}, \code{\link[EDASeq]{SeqExpressionSet-class}}, \code{\link[limma]{EList-class}}, \code{\link[DESeq2]{DESeqTransform}}, as well as standard R data types such as \code{\link[base]{vector}}, \code{\link[base]{matrix}}, \code{\link[base]{data.frame}}, and \code{\link[tibble]{tibble}}. #' \code{isoPlot} silently returns a list of class \code{npData} that contains a summarized findings, p-values (if indicated), extracted plotting data, and plotting options. #' All npData objects can be replotted using the \code{\link[graphics]{plot}} function, \code{isoPlot} or any of the \code{NicePlots} functions. #' Options passed to any of these, including \code{plotType} will override the options for the \code{npData} object. #' #' @param x R data object; Most typically this is an \code{ExpressionSet} there is support for other datatypes as well. #' @param isoforms character; Isoform IDs or a vector of isoform IDS to plot. #' @param gene character; Gene or vector of gene names. This is an optional setting that will return all of the isoforms associated with the gene. #' @param appris logical or character; If set to TRUE, will return only isoforms with appris annotation. If set to a character string, will restrict isoforms to those with the character value matching a substring of the appris tag. Appris column is determined by the first column name to containing 'Appris' (case insensitive). #' @param transcriptType character; Returns only those isoforms where the transcript type column has a substring that matches the character value supplied such as 'protein' in 'protein_coding'. The transcript type column is determined by the \code{ttype} option. #' @param asPercentage logical; If set to \code{\link{TRUE}}, the isoform expression is given as a percentage of total gene expression (defaults to \code{\link{FALSE}}) #' @param group factor or name of factor to be extracted from \code{x} (e.g. \code{\link[Biobase]{pData}}). Used as the primary grouping factor. #' @param subgroup factor or name of factor to be extracted from \code{x} (e.g. \code{\link[Biobase]{pData}}). Used to subgroup data unless multiple genes are selected in which case \code{subgroup} is ignored. #' @param highlight factor or name of factor to be extracted from \code{x} (e.g. \code{\link[Biobase]{pData}}). Used to color data points by factor levels. Only valid for graphs with point overlays. #' @param facet factor or name of factor to be extracted from \code{x} (e.g. \code{\link[Biobase]{pData}}). Split the data into multiple smaller graphs. #' @param stack factor or name of factor to be extracted from \code{x} (e.g. \code{\link[Biobase]{pData}}). Used for stacked bar plots where both the individual and aggregate values are important. Valid only for bar plots. #' @param plotType character; Can be set to "box", "violin, "dot", "bar", "denisity" or "surface" for box plots, violin plots, dot plots, bar plots, and kernel density plots, respectively. #' @param main character; The main plot title. Defaults to true for automated generation. #' @param symbol character; Column name of of gene symbols in the feature data of \code{x} (\code{fData}). #' @param legend logical or character; Draws a figure legend. Use to set the legend title which defaults to "Legend" if equals \code{\link{TRUE}}. Set to \code{\link{FALSE}} to disable. #' @param na.rm logical; Removes \code{\link{NA}} values prior to plotting. #' @param shiny logical; Use \code{\link[shiny]{shiny}} interfaces if available. #' @param groupByGene logical; If more then one gene is listed and \code{grouByGene} is \code{TRUE} #' @param useNormCounts logical; By default \code{genePlot} will try to use normCounts instead of counts in \code{SeqExpressionSets}. Set to FALSE to use raw counts instead, though this will generate a warning about useing non-normalized data. #' @param ttype character; Column name of the optional transcript type column in the annotation. The default value is 'transcript_type'. #' @param ... Any parameter recognized by \code{NicePlots} functions. #' #' @return an list of class \code{npData}. This contains data necessary to regenerate the plot as well as summary statistics. #' #' @examples #' ToDo<-1 #' #' @importFrom purrr map #' @importFrom dplyr select #' @importFrom tidyr gather #' @importFrom magrittr %>% #' @importFrom Biobase exprs pData fData #' @export #' @seealso \code{\link{genePlot}}, \code{\link{showIsoforms}}, \code{\link[NicePlots]{niceBox}}, \code{\link[NicePlots]{niceVio}}, \code{\link[NicePlots]{niceBar}}, \code{\link[NicePlots]{niceDots}}, \code{\link[NicePlots]{niceDensity}} isoPlot <- function(x, isoforms=NULL, gene=NULL, plotType=c("box","dot","bar","violin","density","surface"), asPercentage=FALSE, symbol="GeneSymbol",legend=NULL, main=TRUE, na.rm=TRUE, group=NULL, subgroup=NULL, highlight=NULL, facet=NULL, stack=NULL, shiny=FALSE, groupByGene=FALSE, useNormCounts=TRUE, appris=FALSE, transcriptType=FALSE, ttype="transcript_type",...) {UseMethod("isoPlot",x)} #' @importFrom purrr map #' @importFrom tidyr gather #' @importFrom dplyr select #' @importFrom magrittr %>% #' @importFrom NicePlots niceBox niceVio niceBar niceDensity #' @importFrom Biobase exprs pData fData #' @export isoPlot.default <- function(x, isoforms=NULL, gene=NULL, plotType=c("bar","dot","box","violin","density","surface"), asPercentage=FALSE, symbol="GeneSymbol", legend=NULL, main=TRUE, na.rm=TRUE, group=NULL, subgroup=NULL, highlight=NULL, facet=NULL, stack=TRUE, shiny=FALSE, groupByGene=FALSE, useNormCounts=TRUE, appris=FALSE, transcriptType=FALSE, ttype="transcript_type", ...) { npOptions<-list(...) #First lets handle the case that someone set something to FALSE or NA instead of just leaving it as NULL if(sum(isoforms==FALSE)==1 | sum(is.na(isoforms))==1) {isoforms<-NULL} if(sum(gene==FALSE)==1 | sum(is.na(gene))==1) {gene<-NULL} if((length(group)==1 & sum(group==FALSE)==1) | sum(is.na(group))==length(group)) {group<-NULL} if((length(subgroup)==1 & sum(subgroup==FALSE)==1) | sum(is.na(subgroup))==length(subgroup)) {subgroup<-NULL} if((length(stack)==1 & sum(stack==FALSE)==1) | sum(is.na(stack))==length(stack)) {stack<-NULL} if((length(highlight)==1 & sum(highlight==FALSE)==1) | sum(is.na(highlight))==length(highlight)) {highlight<-NULL} #Now lets get rid of incompatible options if(!is.null(highlight) & plotType[1]=="bar") { hightlight<-NULL } if(!is.null(stack) & plotType[1]!="bar") { stack<-NULL } #Looking up which isoforms meet the selected criteria isos<-NULL if(is.null(isoforms) & is.null(gene)){ isos<-rownames(x) } else { isos<-showIsoforms(x,isoforms=isoforms,genes = gene, annotation = FALSE, appris = appris,transcriptType = transcriptType, symbol = symbol,ttype = ttype) } #Setting default title of nothing was given if(main==TRUE) { if(length(gene)>1) { main<-paste0(c(paste0(gene,collapse=", "),"Isoform Expression"),collapse=" ") } else if (!is.null(isoforms)) { main<-paste0(c(paste0(isoforms,collapse=", "),"Expression"),collapse=" ") } else if (!is.null(gene)) { main<-paste0(gene, " Expression") } else { "Isoform Expression" } } #Setting the legend to turn on automatically if(is.null(legend)){ legend<-FALSE if(!is.null(subgroup) | !is.null(stack)| !is.null(highlight)) { legend<-"Legend" } } #Stack can be set to true if the goal is stacking isoforms. #This is handled separately from the getIsoData #This section of code just hides this use case from getIsoData. isoStack<-FALSE if(!is.null(stack)) { if(stack[1]==TRUE) { isoStack<-TRUE stack<-NULL } } #Quick test to see if searching by gene symbol is available SymbolFound<-FALSE if(symbol %in% colnames(showIsoforms(x, isoforms=isos, annotation = T))) { SymbolFound<-TRUE } #Collecting the expresion and factor data data<-getIsoData(d=x, isoforms=isos, plotType=plotType, symbol=symbol,group=group, subgroup=subgroup,highlight=highlight,facet=facet, stack=stack, useNormCounts=useNormCounts) #Convert isoforms as a percentage of gene expression. if(asPercentage==TRUE & SymbolFound==TRUE) { myGenes<-showIsoforms(x,isoforms=isos,symbol=symbol,annotation=symbol) uniGenes<-unique(myGenes) gexprs<-vector(mode = "list", length = length(uniGenes)) names(gexprs)<-uniGenes for(cgene in uniGenes) { cisos<-showIsoforms(x,genes=cgene,annotation = FALSE) cDat<-getIsoData(d=x, isoforms=cisos, plotType=plotType, symbol=symbol,group=group, subgroup=subgroup,highlight=highlight,facet=facet, stack=stack, useNormCounts=useNormCounts) if(length(cisos)==1) { gexprs[[cgene]]<-cDat$x } else { gexprs[[cgene]]<-rowSums(cDat$x) } gexprs[[cgene]][which(gexprs[[cgene]]==0)]<-1 #Avoiding divide by zero errors. Iso will be zero anyway. } if (length(isos)==1) { data$x<-data$x/gexprs[[1]]*100 } else { for (i in 1:dim(data$x)[2]) { data$x[,i]<-data$x[,i]/gexprs[[myGenes[i]]]*100 } } npOptions<-append(npOptions,list("axisText"=c("","%"))) } if(isoStack==TRUE){ stack<-TRUE } if(is.null(stack)){ stack<-FALSE } else { #If stack is TRUE we are assuming the user means to stack isoforms if(stack[1]==TRUE) { stackData<-NA if(is.vector(data$x)) { #There there is only one isoform then there is nothing to stack stack<-FALSE } else { stackData<-data.frame(data$x,data$by) %>% gather(key="isoforms",value="exprs",colnames(data$x)) data$x<-stackData[,"exprs"] data$by<-stackData[,seq_len(dim(stackData)[2]-1)] } } stack<-TRUE } #if group or subgroup are left empty and gene symbol annotation is available #and if there are isoforms from more than one gene present, we will add a gene symbol factor level automatically if(SymbolFound==TRUE){ iso2gene<-showIsoforms(x, isoforms = isos, symbol=symbol, annotation = symbol) if(length(unique(iso2gene))>1 & sum(c(is.null(group),is.null(subgroup)))>=1 & grepl("bar",plotType,ignore.case = TRUE) & isoStack==TRUE) { if("isoforms" %in% colnames(data$by) ){ geneFact<-iso2gene[data$by$isoforms] if(sum(data$by$group =="data" | is.na(data$by$group)) == length(data$by$group)){ data$by$group <- factor(geneFact) } else { data$by <- data.frame(geneFact,data$by) } subgroup<-TRUE } } else if (length(unique(iso2gene))>1 & sum(c(is.null(group),is.null(subgroup)))==2) { geneData<-data.frame(data$x,data$by) %>% gather(key="isoforms",value="exprs",colnames(data$x)) %>% select("isoforms", colnames(data$by),"exprs") data$x<-geneData[,"exprs"] geneFact<-iso2gene[geneData$isoforms] if(sum(data$by$group =="data", na.rm=TRUE) == length(data$by$group)-sum(is.na(data$by$group))){ geneData$group <- factor(geneFact) data$by<-geneData[,seq_len(dim(geneData)[2]-1)] } else { data$by <- data.frame(geneFact,geneData[,seq_len(dim(geneData)[2]-1)]) } subgroup<-TRUE } } #Now we convert the options to boolean TRUE/FALSE for compatibility with NicePlots if(is.null(subgroup)){ subgroup<-FALSE } else { subgroup<-TRUE } if(is.null(highlight)){ highlight<-FALSE } else { highlight<-TRUE } if(!is.vector(data$x) & (!is.null(group) | !is.null(subgroup))) { subgroup<-TRUE } if(is.null(group) & subgroup==TRUE) { subgroup<-FALSE } if(plotType[1]=="density" & !is.null(group)) { subgroup<-TRUE } #Formatting options and adding new data npOptions<-append(list(x=data$x,by=data$by,pointHighlights=highlight,flipFacts=groupByGene, subgroup=subgroup, facet=facet,stack=stack, na.rm=na.rm,main=main, legend=legend),npOptions) if(groupByGene==TRUE & data$NullNames==TRUE) { if(is.factor(data$by)) { npOptions<-append(npOptions,list(subgroupLabels=rep("",length(levels(data$by))))) } else { npOptions<-append(npOptions,list(subgroupLabels=rep("",length(levels(data$by[,1]))))) } } #Calling NicePlots dataOut<-1 if(grepl("box", plotType[1], ignore.case = TRUE)){ dataOut<-do.call("niceBox",npOptions) } else if (grepl("dot", plotType[1], ignore.case = TRUE)) { dataOut<-do.call("niceDots",npOptions) } else if (grepl("vio", plotType[1], ignore.case = TRUE)) { dataOut<-do.call("niceVio",npOptions) } else if (grepl("bar", plotType[1], ignore.case = TRUE)) { dataOut<-do.call("niceBar",npOptions) } else if (grepl("den",plotType[1], ignore.case = TRUE)) { dataOut<-do.call("niceDensity",npOptions) } else if (grepl("sur", plotType[1], ignore.case = TRUE)) { npOptions<- append(list(plotType="surface"),npOptions) dataOut<-do.call("niceDensity",npOptions) } else { stop("invalid plot type") } invisible(dataOut) } #' @importFrom purrr map #' @importFrom NicePlots niceBox niceVio niceBar niceDensity #' @importFrom Biobase exprs pData fData #' @export isoPlot.npData<-function(x, isoforms=NULL, gene=NULL, plotType=NULL, ...) { clOptions<-list(...) for(opt in names(clOptions)) { if(is.null(x$options[opt])){ append(x$options,list(opt=clOptions[[opt]])) }else{ x$options[[opt]]<-clOptions[[opt]] } } if(!is.null(x$options[["groupByGene"]])){ if(x$options[["groupByGene"]]==TRUE) { x$options[["flipFacts"]]<-FALSE } else { x$options[["flipFacts"]]<-TRUE } } dataOut<-1 if(grepl("box", plotType[1], ignore.case = TRUE)){ dataOut<-do.call("niceBox",npOptions) } else if (grepl("dot", plotType[1], ignore.case = TRUE)) { dataOut<-do.call("niceDots",npOptions) } else if (grepl("vio", plotType[1], ignore.case = TRUE)) { dataOut<-do.call("niceVio",npOptions) } else if (grepl("bar", plotType[1], ignore.case = TRUE)) { dataOut<-do.call("niceBar",npOptions) } else if (grepl("den",plotType[1], ignore.case = TRUE)) { dataOut<-do.call("niceDensity",npOptions) } else if (grepl("sur", plotType[1], ignore.case = TRUE)) { npOptions<- append(list(plotType="surface"),npOptions) dataOut<-do.call("niceDensity",npOptions) } else { stop("invalid plot type") } invisible(dataOut) }
#load libraries library(quantreg) library(glmnet) library(magrittr) library(purrr) library(msaenet) #load data #data.half <- readRDS() #full.data <- readRDS("/Users/Matt Multach/Dropbox/USC_Grad2/Courses/Dissertation/fulldata_091620.RData") #half.data <- readRDS("/Users/Matt Multach/Dropbox/USC_Grad2/Courses/Dissertation/500_data_10052020.RData") testing10.data <- readRDS("/Users/Matt Multach/Desktop/Dissertation/Dissertation_Git/Data_Generation/Data_Storage/testing10_data_091720.RData") #single.data <- testing10.data[[1]] #debug.data <- readRDS("/Users/Matt Multach/Desktop/Dissertation/Dissertation_Git/Data_Generation/Data_Storage/debug_data_091720.RData") X <- testing10.data[[3]]$X Y <- testing10.data[[3]]$Y p <- testing10.data[[3]]$conditions$p n <- testing10.data[[3]]$conditions$n lambda.try <- exp(seq(log(0.01) , log(1400) , length.out = 100)) nu.try <- exp(seq(log(0.01) , log(10) , length.out = 100)) msaelnet5.model <- msaenet(x = X , y = Y , family = "gaussian" , init = "ridge" , alphas = 0.5 , tune = "cv" , nfolds = 5L , rule = "lambda.min" , nsteps = 10L , tune.nsteps = "max" , scale = nu.try[1]) msaelnet75.model <- msaenet(x = X , y = Y , family = "gaussian" , init = "ridge" , alphas = 0.75 , tune = "cv" , nfolds = 5L , rule = "lambda.min" , nsteps = 10L , tune.nsteps = "max" , scale = nu.try[1]) msaelnet9.model <- msaenet(x = X , y = Y , family = "gaussian" , init = "ridge" , alphas = 0.9 , tune = "cv" , nfolds = 5L , rule = "lambda.min" , nsteps = 10L , tune.nsteps = "max" , scale = nu.try[1]) beta.post <- coef(msaelnet5.model) #coefficients Y.fit <- X%*%beta.post #store number of nonzero coefs st.lad <- sum(beta.post != 0) # number nonzero #generate MSE and sd(MSE) for model mse.OS <- sum((Y - Y.fit) ^ 2) / (n - st.lad - 1) sd.mse.OS <- sd((Y - Y.fit) ^ 2 / (n - st.lad - 1)) #save lambdas #adaptive lasso function with two-way CV for selecting both lambda and nu/gamma msaelnet5.sim.fnct <- function(data) { #create simulation tracker tracker <- as.vector(unlist(data$conditions)) #print tracker of status cat("n = " , tracker[1] , " , p = " , tracker[2] , " , eta.x = " , tracker[3] , " , eta.y = " , tracker[4] , " , g = " , tracker[5] , " , h = " , tracker[6] , ";\n") #load X, Y, and p X <- data$X Y <- data$Y p <- data$conditions$p #seed.ridge <- data$seeds[ , "seed.14"] #set.seed(seed.ridge) #ridge coefs for weighting lambda.try <- exp(seq(log(0.01) , log(1400) , length.out = 100)) ##grid of nu/gamma values to try nu.try <- exp(seq(log(0.01) , log(10) , length.out = 100)) #seed.pre.nu <- data$seeds[ , "seed.15"] #set.seed(seed.pre.nu) #seed.nu <- sample(rnorm(n = 1000000000) , size = length(nu.try) , replace = FALSE) ##initialize list of best msaelnet5 results from each nu/gamma msaelnet5.nu.cv <- list() for(i in 1:length(nu.try)) { #seed <- seed.nu[i] #set.seed(seed) #single adaptive lasso run with ridge weighting and nu = 1 msaelnet5.model <- msaenet(x = X , y = Y , family = "gaussian" , init = "ridge" , alphas = 0.5 , tune = "cv" , nfolds = 5L , rule = "lambda.min" , nsteps = 10L , tune.nsteps = "max" , scale = nu.try[i]) lambda.msaelnet5.opt <- msaelnet5.model[["best.lambdas"]][[11]] best.msaelnet5.coefs <- coef(msaelnet5.model) #coefficients msaelnet5.nu.cv[[i]] <- list(model = list(full.model = msaelnet5.model , lambda = lambda.msaelnet5.opt , coefs = best.msaelnet5.coefs) , metrics_and_info = list(#model.seed.ridge = seed.ridge , #model.seed.prenu = seed.pre.nu , #model.seed.nu = seed , weights = msaelnet5.model[["adapen.list"]][[10]] , nu = nu.try[i] , lambda = lambda.msaelnet5.opt , coefs = best.msaelnet5.coefs , mpe = msaelnet5.model[["step.criterion"]][[11]] , #mpe.sd = msaelnet5.model$cvsd[which(msaelnet5.model$lambda == lambda.msaelnet5.opt)] , fpr = length(which(best.msaelnet5.coefs[c(5:p)] != 0)) / length(best.msaelnet5.coefs[c(5:p)]) , fnr = length(which(best.msaelnet5.coefs[c(1:4)] == 0)) / length(best.msaelnet5.coefs[1:4]))) } #find minimizing nu/gamma msaelnet5.nu.cv.mpe <- numeric() #msaelnet5.seeds.ridge <- numeric() #msaelnet5.seeds.prenu <- numeric() #msaelnet5.seeds.nu <- numeric() for(i in 1:length(msaelnet5.nu.cv)) { msaelnet5.nu.cv.mpe[i] <- msaelnet5.nu.cv[[i]]$metrics_and_info$mpe #msaelnet5.seeds.ridge[i] <- msaelnet5.nu.cv[[i]]$metrics_and_info$model.seed.ridge #msaelnet5.seeds.prenu[i] <- msaelnet5.nu.cv[[i]]$metrics_and_info$model.seed.prenu #msaelnet5.seeds.nu[i] <- msaelnet5.nu.cv[[i]]$metrics_and_info$model.seed.nu } #return(msaelnet5.nu.cv[[which.min(msaelnet5.nu.cv.mpe)]]) #store BEST msaelnet5 result plus all seeds ###below is used to check that seeds are regenerated properly and not uniform return(list(mpes = msaelnet5.nu.cv.mpe , #seeds.ridge = msaelnet5.seeds.ridge , #seeds.prenu = msaelnet5.seeds.prenu , #seeds.nu = msaelnet5.seeds.nu , model = msaelnet5.nu.cv[[which.min(msaelnet5.nu.cv.mpe)]] , important = list(diagnostics = data.frame(cbind(data.seed = tracker[7])) , #model.seed.ridge = msaelnet5.nu.cv[[which.min(msaelnet5.nu.cv.mpe)]]$metrics_and_info$model.seed.ridge , #model.seed.prenu = msaelnet5.nu.cv[[which.min(msaelnet5.nu.cv.mpe)]]$metrics_and_info$model.seed.prenu , #model.seed.nu = msaelnet5.nu.cv[[which.min(msaelnet5.nu.cv.mpe)]]$metrics_and_info$model.seed.nu)) , coefs = msaelnet5.nu.cv[[which.min(msaelnet5.nu.cv.mpe)]]$metrics_and_info$coefs , weights = msaelnet5.nu.cv[[which.min(msaelnet5.nu.cv.mpe)]]$metrics_and_info$weights , info = data.frame(cbind(n = tracker[1] , p = tracker[2] , eta.x = tracker[3] , eta.y = tracker[4] , g = tracker[5] , h = tracker[6] , data.seed = tracker[7] , #model.seed.ridge = msaelnet5.nu.cv[[which.min(msaelnet5.nu.cv.mpe)]]$metrics_and_info$model.seed.ridge , #model.seed.prenu = msaelnet5.nu.cv[[which.min(msaelnet5.nu.cv.mpe)]]$metrics_and_info$model.seed.prenu , #model.seed.nu = msaelnet5.nu.cv[[which.min(msaelnet5.nu.cv.mpe)]]$metrics_and_info$model.seed.nu , alpha = 0.5 , lambda = msaelnet5.nu.cv[[which.min(msaelnet5.nu.cv.mpe)]]$metrics_and_info$lambda , nu = msaelnet5.nu.cv[[which.min(msaelnet5.nu.cv.mpe)]]$metrics_and_info$nu , mpe = msaelnet5.nu.cv[[which.min(msaelnet5.nu.cv.mpe)]]$metrics_and_info$mpe , mpe.sd = msaelnet5.nu.cv[[which.min(msaelnet5.nu.cv.mpe)]]$metrics_and_info$mpe.sd , fpr = msaelnet5.nu.cv[[which.min(msaelnet5.nu.cv.mpe)]]$metrics_and_info$fpr , fnr = msaelnet5.nu.cv[[which.min(msaelnet5.nu.cv.mpe)]]$metrics_and_info$fnr ) ) ) ) ) } single.test <- msaelnet5.sim.fnct(testing10.data[[1]]) #run across debug dataset msaelnet5.testing10 <- testing10.data %>% map(safely(msaelnet5.sim.fnct)) #saveRDS(msaelnet5.half , "/Users/Matt Multach/Dropbox/USC_Grad2/Courses/Dissertation/Dissertation_Git/Data_Storage/Full_results/msaelnet5_500.RData")
/Model_Application/Testing/MSAdaELNet5_single_testing.R
no_license
multach87/Dissertation
R
false
false
10,055
r
#load libraries library(quantreg) library(glmnet) library(magrittr) library(purrr) library(msaenet) #load data #data.half <- readRDS() #full.data <- readRDS("/Users/Matt Multach/Dropbox/USC_Grad2/Courses/Dissertation/fulldata_091620.RData") #half.data <- readRDS("/Users/Matt Multach/Dropbox/USC_Grad2/Courses/Dissertation/500_data_10052020.RData") testing10.data <- readRDS("/Users/Matt Multach/Desktop/Dissertation/Dissertation_Git/Data_Generation/Data_Storage/testing10_data_091720.RData") #single.data <- testing10.data[[1]] #debug.data <- readRDS("/Users/Matt Multach/Desktop/Dissertation/Dissertation_Git/Data_Generation/Data_Storage/debug_data_091720.RData") X <- testing10.data[[3]]$X Y <- testing10.data[[3]]$Y p <- testing10.data[[3]]$conditions$p n <- testing10.data[[3]]$conditions$n lambda.try <- exp(seq(log(0.01) , log(1400) , length.out = 100)) nu.try <- exp(seq(log(0.01) , log(10) , length.out = 100)) msaelnet5.model <- msaenet(x = X , y = Y , family = "gaussian" , init = "ridge" , alphas = 0.5 , tune = "cv" , nfolds = 5L , rule = "lambda.min" , nsteps = 10L , tune.nsteps = "max" , scale = nu.try[1]) msaelnet75.model <- msaenet(x = X , y = Y , family = "gaussian" , init = "ridge" , alphas = 0.75 , tune = "cv" , nfolds = 5L , rule = "lambda.min" , nsteps = 10L , tune.nsteps = "max" , scale = nu.try[1]) msaelnet9.model <- msaenet(x = X , y = Y , family = "gaussian" , init = "ridge" , alphas = 0.9 , tune = "cv" , nfolds = 5L , rule = "lambda.min" , nsteps = 10L , tune.nsteps = "max" , scale = nu.try[1]) beta.post <- coef(msaelnet5.model) #coefficients Y.fit <- X%*%beta.post #store number of nonzero coefs st.lad <- sum(beta.post != 0) # number nonzero #generate MSE and sd(MSE) for model mse.OS <- sum((Y - Y.fit) ^ 2) / (n - st.lad - 1) sd.mse.OS <- sd((Y - Y.fit) ^ 2 / (n - st.lad - 1)) #save lambdas #adaptive lasso function with two-way CV for selecting both lambda and nu/gamma msaelnet5.sim.fnct <- function(data) { #create simulation tracker tracker <- as.vector(unlist(data$conditions)) #print tracker of status cat("n = " , tracker[1] , " , p = " , tracker[2] , " , eta.x = " , tracker[3] , " , eta.y = " , tracker[4] , " , g = " , tracker[5] , " , h = " , tracker[6] , ";\n") #load X, Y, and p X <- data$X Y <- data$Y p <- data$conditions$p #seed.ridge <- data$seeds[ , "seed.14"] #set.seed(seed.ridge) #ridge coefs for weighting lambda.try <- exp(seq(log(0.01) , log(1400) , length.out = 100)) ##grid of nu/gamma values to try nu.try <- exp(seq(log(0.01) , log(10) , length.out = 100)) #seed.pre.nu <- data$seeds[ , "seed.15"] #set.seed(seed.pre.nu) #seed.nu <- sample(rnorm(n = 1000000000) , size = length(nu.try) , replace = FALSE) ##initialize list of best msaelnet5 results from each nu/gamma msaelnet5.nu.cv <- list() for(i in 1:length(nu.try)) { #seed <- seed.nu[i] #set.seed(seed) #single adaptive lasso run with ridge weighting and nu = 1 msaelnet5.model <- msaenet(x = X , y = Y , family = "gaussian" , init = "ridge" , alphas = 0.5 , tune = "cv" , nfolds = 5L , rule = "lambda.min" , nsteps = 10L , tune.nsteps = "max" , scale = nu.try[i]) lambda.msaelnet5.opt <- msaelnet5.model[["best.lambdas"]][[11]] best.msaelnet5.coefs <- coef(msaelnet5.model) #coefficients msaelnet5.nu.cv[[i]] <- list(model = list(full.model = msaelnet5.model , lambda = lambda.msaelnet5.opt , coefs = best.msaelnet5.coefs) , metrics_and_info = list(#model.seed.ridge = seed.ridge , #model.seed.prenu = seed.pre.nu , #model.seed.nu = seed , weights = msaelnet5.model[["adapen.list"]][[10]] , nu = nu.try[i] , lambda = lambda.msaelnet5.opt , coefs = best.msaelnet5.coefs , mpe = msaelnet5.model[["step.criterion"]][[11]] , #mpe.sd = msaelnet5.model$cvsd[which(msaelnet5.model$lambda == lambda.msaelnet5.opt)] , fpr = length(which(best.msaelnet5.coefs[c(5:p)] != 0)) / length(best.msaelnet5.coefs[c(5:p)]) , fnr = length(which(best.msaelnet5.coefs[c(1:4)] == 0)) / length(best.msaelnet5.coefs[1:4]))) } #find minimizing nu/gamma msaelnet5.nu.cv.mpe <- numeric() #msaelnet5.seeds.ridge <- numeric() #msaelnet5.seeds.prenu <- numeric() #msaelnet5.seeds.nu <- numeric() for(i in 1:length(msaelnet5.nu.cv)) { msaelnet5.nu.cv.mpe[i] <- msaelnet5.nu.cv[[i]]$metrics_and_info$mpe #msaelnet5.seeds.ridge[i] <- msaelnet5.nu.cv[[i]]$metrics_and_info$model.seed.ridge #msaelnet5.seeds.prenu[i] <- msaelnet5.nu.cv[[i]]$metrics_and_info$model.seed.prenu #msaelnet5.seeds.nu[i] <- msaelnet5.nu.cv[[i]]$metrics_and_info$model.seed.nu } #return(msaelnet5.nu.cv[[which.min(msaelnet5.nu.cv.mpe)]]) #store BEST msaelnet5 result plus all seeds ###below is used to check that seeds are regenerated properly and not uniform return(list(mpes = msaelnet5.nu.cv.mpe , #seeds.ridge = msaelnet5.seeds.ridge , #seeds.prenu = msaelnet5.seeds.prenu , #seeds.nu = msaelnet5.seeds.nu , model = msaelnet5.nu.cv[[which.min(msaelnet5.nu.cv.mpe)]] , important = list(diagnostics = data.frame(cbind(data.seed = tracker[7])) , #model.seed.ridge = msaelnet5.nu.cv[[which.min(msaelnet5.nu.cv.mpe)]]$metrics_and_info$model.seed.ridge , #model.seed.prenu = msaelnet5.nu.cv[[which.min(msaelnet5.nu.cv.mpe)]]$metrics_and_info$model.seed.prenu , #model.seed.nu = msaelnet5.nu.cv[[which.min(msaelnet5.nu.cv.mpe)]]$metrics_and_info$model.seed.nu)) , coefs = msaelnet5.nu.cv[[which.min(msaelnet5.nu.cv.mpe)]]$metrics_and_info$coefs , weights = msaelnet5.nu.cv[[which.min(msaelnet5.nu.cv.mpe)]]$metrics_and_info$weights , info = data.frame(cbind(n = tracker[1] , p = tracker[2] , eta.x = tracker[3] , eta.y = tracker[4] , g = tracker[5] , h = tracker[6] , data.seed = tracker[7] , #model.seed.ridge = msaelnet5.nu.cv[[which.min(msaelnet5.nu.cv.mpe)]]$metrics_and_info$model.seed.ridge , #model.seed.prenu = msaelnet5.nu.cv[[which.min(msaelnet5.nu.cv.mpe)]]$metrics_and_info$model.seed.prenu , #model.seed.nu = msaelnet5.nu.cv[[which.min(msaelnet5.nu.cv.mpe)]]$metrics_and_info$model.seed.nu , alpha = 0.5 , lambda = msaelnet5.nu.cv[[which.min(msaelnet5.nu.cv.mpe)]]$metrics_and_info$lambda , nu = msaelnet5.nu.cv[[which.min(msaelnet5.nu.cv.mpe)]]$metrics_and_info$nu , mpe = msaelnet5.nu.cv[[which.min(msaelnet5.nu.cv.mpe)]]$metrics_and_info$mpe , mpe.sd = msaelnet5.nu.cv[[which.min(msaelnet5.nu.cv.mpe)]]$metrics_and_info$mpe.sd , fpr = msaelnet5.nu.cv[[which.min(msaelnet5.nu.cv.mpe)]]$metrics_and_info$fpr , fnr = msaelnet5.nu.cv[[which.min(msaelnet5.nu.cv.mpe)]]$metrics_and_info$fnr ) ) ) ) ) } single.test <- msaelnet5.sim.fnct(testing10.data[[1]]) #run across debug dataset msaelnet5.testing10 <- testing10.data %>% map(safely(msaelnet5.sim.fnct)) #saveRDS(msaelnet5.half , "/Users/Matt Multach/Dropbox/USC_Grad2/Courses/Dissertation/Dissertation_Git/Data_Storage/Full_results/msaelnet5_500.RData")
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/CST_RFSlope.R \name{CST_RFSlope} \alias{CST_RFSlope} \title{RainFARM spectral slopes from a CSTools object} \usage{ CST_RFSlope(data, kmin = 1, time_dim = NULL, ncores = NULL) } \arguments{ \item{data}{An object of the class 's2dv_cube', containing the spatial precipitation fields to downscale. The data object is expected to have an element named \code{$data} with at least two spatial dimensions named "lon" and "lat" and one or more dimensions over which to average these slopes, which can be specified by parameter \code{time_dim}.} \item{kmin}{First wavenumber for spectral slope (default \code{kmin=1}).} \item{time_dim}{String or character array with name(s) of dimension(s) (e.g. "ftime", "sdate", "member" ...) over which to compute spectral slopes. If a character array of dimension names is provided, the spectral slopes will be computed as an average over all elements belonging to those dimensions. If omitted one of c("ftime", "sdate", "time") is searched and the first one with more than one element is chosen.} \item{ncores}{Is an integer that indicates the number of cores for parallel computations using multiApply function. The default value is one.} } \value{ CST_RFSlope() returns spectral slopes using the RainFARM convention (the logarithmic slope of k*|A(k)|^2 where A(k) are the spectral amplitudes). The returned array has the same dimensions as the \code{exp} element of the input object, minus the dimensions specified by \code{lon_dim}, \code{lat_dim} and \code{time_dim}. } \description{ This function computes spatial spectral slopes from a CSTools object to be used for RainFARM stochastic precipitation downscaling method and accepts a CSTools object (of the class 's2dv_cube') as input. } \examples{ exp <- 1 : (2 * 3 * 4 * 8 * 8) dim(exp) <- c(dataset = 1, member = 2, sdate = 3, ftime = 4, lat = 8, lon = 8) lon <- seq(10, 13.5, 0.5) lat <- seq(40, 43.5, 0.5) coords <- list(lon = lon, lat = lat) data <- list(data = exp, coords = coords) class(data) <- 's2dv_cube' slopes <- CST_RFSlope(data) } \author{ Jost von Hardenberg - ISAC-CNR, \email{j.vonhardenberg@isac.cnr.it} }
/man/CST_RFSlope.Rd
no_license
cran/CSTools
R
false
true
2,212
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/CST_RFSlope.R \name{CST_RFSlope} \alias{CST_RFSlope} \title{RainFARM spectral slopes from a CSTools object} \usage{ CST_RFSlope(data, kmin = 1, time_dim = NULL, ncores = NULL) } \arguments{ \item{data}{An object of the class 's2dv_cube', containing the spatial precipitation fields to downscale. The data object is expected to have an element named \code{$data} with at least two spatial dimensions named "lon" and "lat" and one or more dimensions over which to average these slopes, which can be specified by parameter \code{time_dim}.} \item{kmin}{First wavenumber for spectral slope (default \code{kmin=1}).} \item{time_dim}{String or character array with name(s) of dimension(s) (e.g. "ftime", "sdate", "member" ...) over which to compute spectral slopes. If a character array of dimension names is provided, the spectral slopes will be computed as an average over all elements belonging to those dimensions. If omitted one of c("ftime", "sdate", "time") is searched and the first one with more than one element is chosen.} \item{ncores}{Is an integer that indicates the number of cores for parallel computations using multiApply function. The default value is one.} } \value{ CST_RFSlope() returns spectral slopes using the RainFARM convention (the logarithmic slope of k*|A(k)|^2 where A(k) are the spectral amplitudes). The returned array has the same dimensions as the \code{exp} element of the input object, minus the dimensions specified by \code{lon_dim}, \code{lat_dim} and \code{time_dim}. } \description{ This function computes spatial spectral slopes from a CSTools object to be used for RainFARM stochastic precipitation downscaling method and accepts a CSTools object (of the class 's2dv_cube') as input. } \examples{ exp <- 1 : (2 * 3 * 4 * 8 * 8) dim(exp) <- c(dataset = 1, member = 2, sdate = 3, ftime = 4, lat = 8, lon = 8) lon <- seq(10, 13.5, 0.5) lat <- seq(40, 43.5, 0.5) coords <- list(lon = lon, lat = lat) data <- list(data = exp, coords = coords) class(data) <- 's2dv_cube' slopes <- CST_RFSlope(data) } \author{ Jost von Hardenberg - ISAC-CNR, \email{j.vonhardenberg@isac.cnr.it} }
\name{hmap.annotate} \alias{hmap.annotate} \title{ Add a row and column annotations to a plot-region based heatmap built with 'hmap' } \description{ Annotation of rows or columns in a 'hmap'-plot. By default, rectangles aligned with either rows or columns are plotted to the right-side or lower-side of the heatmap respectively. User-specified customizations may be given to change these annotations in positioning or type. } \usage{ hmap.annotate(h, rw, rw.n = length(unique(rw)), rw.col = rainbow(rw.n, start = 0.05, end = 0.5), rw.wid, rw.hei, rw.pch, rw.x = rep(min(h$rightlim), times = length(h$rowtext$xseq)), rw.y = h$rowtext$yseq, rw.shift = c(0.02, 0), cl, cl.n = length(unique(cl)), cl.col = rainbow(cl.n, start = 0.55, end = 1), cl.wid, cl.hei, cl.pch, cl.x = h$coltext$xseq, cl.y = rep(max(h$bottomlim), times = length(h$coltext$yseq)), cl.shift = c(0, -0.02), ...) } \arguments{ \item{h}{ The list of heatmap parameters returned invisibly by the original 'hmap'-call. } \item{rw}{ Annotation vector for rows 'r', each unique instance is given a different color (or pch) and plotted right-side of the corresponding heatmap rows } \item{rw.n}{ Number of unique colors (or pch) to give each annotated row } \item{rw.col}{ A vector for color values for unique instances in 'r' for annotating rows } \item{rw.wid}{ The widths for annotation boxes for each row 'r' } \item{rw.hei}{ The heights for annotation boxes for each row 'r' } \item{rw.pch}{ Alternatively, instead of widths and heights user may specify a symbol 'pch' to use for annotating each row } \item{rw.x}{ The x-coordinate locations for the row annotations, by default right side of heatmap itself } \item{rw.y}{ The y-coordinate locations for the row annotations, by default same vertical locations as for the heatmap rows } \item{rw.shift}{ Row annotation shift: a vector of 2 values, where first indicates the amount of x-axis shift desired and the second indicates the amount of y-axis shift } \item{cl}{ Annotation vector for columns 'r', each unique instance is given a different color (or pch) and plotted lower-side of the corresponding heatmap columns } \item{cl.n}{ Number of unique colors (or pch) to give each annotated column } \item{cl.col}{ A vector for color values for unique instances in 'c' for annotating columns } \item{cl.wid}{ The widths for annotation boxes for each column 'c' } \item{cl.hei}{ The heights for annotation boxes for each column 'c' } \item{cl.pch}{ Alternatively, instead of widths and heights user may specify a symbol 'pch' to use for annotating each column } \item{cl.x}{ The x-coordinate locations for the column annotations, by default same horizontal locations as for the heatmap columns } \item{cl.y}{ The y-coordinate locations for the column annotations, by default lower side of heatmap itself } \item{cl.shift}{ Column annotation shift: a vector of 2 values, where first indicates the amount of x-axis shift desired and the second indicates the amount of y-axis shift } \item{...}{ Additional parameters supplied either to 'rect' or 'points' function if user desired rectangles or 'pch'-based points respectively } } \author{ Teemu Daniel Laajala <teelaa@utu.fi> } \seealso{ \code{\link{heatmap}} \code{\link{hmap.key}} \code{\link{hmap}} } \examples{ # Generate some data set.seed(1) r1 <- replicate(30, rnorm(20)) lab <- sample(letters[1:2], 20, replace=TRUE) r1[lab==lab[1],] <- r1[lab==lab[1],] + 2 r2a <- replicate(10, rnorm(10)) r2b <- replicate(10, rnorm(10)) # Set up a new plot region, notice we have a 2-fold wider x-axis plot.new() plot.window(xlim=c(0,2), ylim=c(0,1)) # Plot an example plot along with the color key and annotations for our 'lab' vector h1 <- hmap(r1, add = TRUE) hmap.key(h1, x1=0.18) hmap.annotate(h1, rw = lab, rw.wid=c(0.82,0.90)) # Plot the rest to show how the coordinates are adjusted to place the heatmap(s) differently h2a <- hmap(r2a, add = TRUE, xlim=c(1.2, 1.8), leftlim=c(1.0, 1.2), rightlim=c(1.8,2.0), ylim=c(0.6, 1.0), bottomlim=c(0.5,0.6), Colv=NA) h2b <- hmap(r2b, add = TRUE, xlim=c(1.2, 1.8), leftlim=c(1.0, 1.2), rightlim=c(1.8,2.0), ylim=c(0.1, 0.5), bottomlim=c(0.0,0.1), Colv=NA) # Show the normal plot region axes axis(1, at=c(0.5,1.5), c("A", "B")) } \keyword{ hplot } \keyword{ aplot }
/man/hmap.annotate.Rd
no_license
cran/hamlet
R
false
false
4,573
rd
\name{hmap.annotate} \alias{hmap.annotate} \title{ Add a row and column annotations to a plot-region based heatmap built with 'hmap' } \description{ Annotation of rows or columns in a 'hmap'-plot. By default, rectangles aligned with either rows or columns are plotted to the right-side or lower-side of the heatmap respectively. User-specified customizations may be given to change these annotations in positioning or type. } \usage{ hmap.annotate(h, rw, rw.n = length(unique(rw)), rw.col = rainbow(rw.n, start = 0.05, end = 0.5), rw.wid, rw.hei, rw.pch, rw.x = rep(min(h$rightlim), times = length(h$rowtext$xseq)), rw.y = h$rowtext$yseq, rw.shift = c(0.02, 0), cl, cl.n = length(unique(cl)), cl.col = rainbow(cl.n, start = 0.55, end = 1), cl.wid, cl.hei, cl.pch, cl.x = h$coltext$xseq, cl.y = rep(max(h$bottomlim), times = length(h$coltext$yseq)), cl.shift = c(0, -0.02), ...) } \arguments{ \item{h}{ The list of heatmap parameters returned invisibly by the original 'hmap'-call. } \item{rw}{ Annotation vector for rows 'r', each unique instance is given a different color (or pch) and plotted right-side of the corresponding heatmap rows } \item{rw.n}{ Number of unique colors (or pch) to give each annotated row } \item{rw.col}{ A vector for color values for unique instances in 'r' for annotating rows } \item{rw.wid}{ The widths for annotation boxes for each row 'r' } \item{rw.hei}{ The heights for annotation boxes for each row 'r' } \item{rw.pch}{ Alternatively, instead of widths and heights user may specify a symbol 'pch' to use for annotating each row } \item{rw.x}{ The x-coordinate locations for the row annotations, by default right side of heatmap itself } \item{rw.y}{ The y-coordinate locations for the row annotations, by default same vertical locations as for the heatmap rows } \item{rw.shift}{ Row annotation shift: a vector of 2 values, where first indicates the amount of x-axis shift desired and the second indicates the amount of y-axis shift } \item{cl}{ Annotation vector for columns 'r', each unique instance is given a different color (or pch) and plotted lower-side of the corresponding heatmap columns } \item{cl.n}{ Number of unique colors (or pch) to give each annotated column } \item{cl.col}{ A vector for color values for unique instances in 'c' for annotating columns } \item{cl.wid}{ The widths for annotation boxes for each column 'c' } \item{cl.hei}{ The heights for annotation boxes for each column 'c' } \item{cl.pch}{ Alternatively, instead of widths and heights user may specify a symbol 'pch' to use for annotating each column } \item{cl.x}{ The x-coordinate locations for the column annotations, by default same horizontal locations as for the heatmap columns } \item{cl.y}{ The y-coordinate locations for the column annotations, by default lower side of heatmap itself } \item{cl.shift}{ Column annotation shift: a vector of 2 values, where first indicates the amount of x-axis shift desired and the second indicates the amount of y-axis shift } \item{...}{ Additional parameters supplied either to 'rect' or 'points' function if user desired rectangles or 'pch'-based points respectively } } \author{ Teemu Daniel Laajala <teelaa@utu.fi> } \seealso{ \code{\link{heatmap}} \code{\link{hmap.key}} \code{\link{hmap}} } \examples{ # Generate some data set.seed(1) r1 <- replicate(30, rnorm(20)) lab <- sample(letters[1:2], 20, replace=TRUE) r1[lab==lab[1],] <- r1[lab==lab[1],] + 2 r2a <- replicate(10, rnorm(10)) r2b <- replicate(10, rnorm(10)) # Set up a new plot region, notice we have a 2-fold wider x-axis plot.new() plot.window(xlim=c(0,2), ylim=c(0,1)) # Plot an example plot along with the color key and annotations for our 'lab' vector h1 <- hmap(r1, add = TRUE) hmap.key(h1, x1=0.18) hmap.annotate(h1, rw = lab, rw.wid=c(0.82,0.90)) # Plot the rest to show how the coordinates are adjusted to place the heatmap(s) differently h2a <- hmap(r2a, add = TRUE, xlim=c(1.2, 1.8), leftlim=c(1.0, 1.2), rightlim=c(1.8,2.0), ylim=c(0.6, 1.0), bottomlim=c(0.5,0.6), Colv=NA) h2b <- hmap(r2b, add = TRUE, xlim=c(1.2, 1.8), leftlim=c(1.0, 1.2), rightlim=c(1.8,2.0), ylim=c(0.1, 0.5), bottomlim=c(0.0,0.1), Colv=NA) # Show the normal plot region axes axis(1, at=c(0.5,1.5), c("A", "B")) } \keyword{ hplot } \keyword{ aplot }
# Load data cc = fread('entityTypeGrouping.csv') entity.region = fread('data/EntitiesByRegion.csv') entity.region[, asOfDate:= as.Date(asOfDate)] entity.ofc = fread('data/EntitiesByOFC.csv') entity.ofc[, asOfDate:= as.Date(asOfDate)] link.node.ratio = fread('data/linkNodeRatio.csv') link.node.ratio[, asOfDate:= as.Date(asOfDate)] assets = fread('data/Assets.csv') assets[, yearqtr:= as.Date(yearqtr)] # measure in billions assets[, BHCK2170:= BHCK2170/1e6] HC10bn = fread('data/HC10bn.csv', select=2:4) setnames(HC10bn, 3, paste0(names(HC10bn)[3], ' (Thousands)')) load('data/histories.RData')
/app/_loadData.R
no_license
nemochina2008/nic-structure
R
false
false
599
r
# Load data cc = fread('entityTypeGrouping.csv') entity.region = fread('data/EntitiesByRegion.csv') entity.region[, asOfDate:= as.Date(asOfDate)] entity.ofc = fread('data/EntitiesByOFC.csv') entity.ofc[, asOfDate:= as.Date(asOfDate)] link.node.ratio = fread('data/linkNodeRatio.csv') link.node.ratio[, asOfDate:= as.Date(asOfDate)] assets = fread('data/Assets.csv') assets[, yearqtr:= as.Date(yearqtr)] # measure in billions assets[, BHCK2170:= BHCK2170/1e6] HC10bn = fread('data/HC10bn.csv', select=2:4) setnames(HC10bn, 3, paste0(names(HC10bn)[3], ' (Thousands)')) load('data/histories.RData')
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/anova_psem.R \name{anovaTable} \alias{anovaTable} \title{Single anova} \usage{ anovaTable(object, anovafun = "Anova", digits = 3) } \description{ Single anova } \keyword{internal}
/man/anovaTable.Rd
no_license
jslefche/piecewiseSEM
R
false
true
258
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/anova_psem.R \name{anovaTable} \alias{anovaTable} \title{Single anova} \usage{ anovaTable(object, anovafun = "Anova", digits = 3) } \description{ Single anova } \keyword{internal}
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/nlaRiparianVegetation.r \name{nlaRiparianVegetation} \alias{nlaRiparianVegetation} \title{Calculate NLA Riparian Zone and Vegetation Metrics} \usage{ nlaRiparianVegetation(bigTrees = NULL, bigTrees_dd = NULL, smallTrees = NULL, smallTrees_dd = NULL, canopyType = NULL, canopyType_dd = NULL, groundcoverBare = NULL, groundcoverBare_dd = NULL, groundcoverInundated = NULL, groundcoverInundated_dd = NULL, groundcoverNonwoody = NULL, groundcoverNonwoody_dd = NULL, groundcoverWoody = NULL, groundcoverWoody_dd = NULL, understoryNonwoody = NULL, understoryNonwoody_dd = NULL, understoryWoody = NULL, understoryWoody_dd = NULL, understoryType = NULL, understoryType_dd = NULL, drawdown = NULL, horizontalDistance_dd = NULL, createSyntheticCovers = TRUE, fillinDrawdown = TRUE) } \arguments{ \item{bigTrees}{A data frame containing cover class values for big trees (trunk >0.3 m dBH) in the riparian zone, with the columns: \itemize{ \item SITE an integer or character value identifying a single site visit. \item STATION a character value identifying the station within the SITE \item VALUE an integer value, or character value that is castable to an integer, from 0-4 containing the cover category. }} \item{bigTrees_dd}{A data frame containing cover class values for big trees (trunk >0.3 m dBH) in the drawdown zone, with the columns: \itemize{ \item SITE an integer or character value identifying a single site visit. \item STATION a character value identifying the station within the SITE \item VALUE an integer value, or character value that is castable to an integer, from 0-4 containing the cover category. }} \item{smallTrees}{A data frame containing cover class values for small trees (trunk <0.3 m dBH) in the riparian zone, with the columns: \itemize{ \item SITE an integer or character value identifying a single site visit. \item STATION a character value identifying the station within the SITE \item VALUE an integer value, or character value that is castable to an integer, from 0-4 containing the cover category. }} \item{smallTrees_dd}{A data frame containing cover class values for small trees (trunk <0.3 m dBH) in the drawdown zone, with the columns: \itemize{ \item SITE an integer or character value identifying a single site visit. \item STATION a character value identifying the station within the SITE \item VALUE an integer value, or character value that is castable to an integer, from 0-4 containing the cover category. }} \item{canopyType}{A data frame containing canopy type (> 5 m) values for in the riparian zone, with the columns: \itemize{ \item SITE an integer or character value identifying a single site visit. \item STATION a character value identifying the station within the SITE \item VALUE a character value of C (coniferous), D (deciduous), E (broadleaf evergreen), M (mixed), or N (None) the cover type. }} \item{canopyType_dd}{A data frame containing canopy type (> 5 m) values for in the drawdown zone, with the columns: \itemize{ \item SITE an integer or character value identifying a single site visit. \item STATION a character value identifying the station within the SITE \item VALUE a character value of C (coniferous), D (deciduous), E (broadleaf evergreen), M (mixed), or N (None) the cover type. }} \item{groundcoverBare}{A data frame containing cover class values for barren, bare dirt, litter, duff, or building ground cover (< 0.5 m) types in the riparian zone, with the columns: \itemize{ \item SITE an integer or character value identifying a single site visit. \item STATION a character value identifying the station within the SITE \item VALUE an integer value, or character value that is castable to an integer, from 0-4 containing the cover category. }} \item{groundcoverBare_dd}{A data frame containing cover class values for barren, bare dirt, litter, duff, or building ground cover (< 0.5 m) types in the drawdown zone, with the columns: \itemize{ \item SITE an integer or character value identifying a single site visit. \item STATION a character value identifying the station within the SITE \item VALUE an integer value, or character value that is castable to an integer, from 0-4 containing the cover category. }} \item{groundcoverInundated}{A data frame containing cover class values for standing water or inundated vegetation ground cover (< 0.5 m) types in the riparian zone, with the columns: \itemize{ \item SITE an integer or character value identifying a single site visit. \item STATION a character value identifying the station within the SITE \item VALUE an integer value, or character value that is castable to an integer, from 0-4 containing the cover category. }} \item{groundcoverInundated_dd}{A data frame containing cover class values for standing water or inundated vegetation ground cover (< 0.5 m)types in the drawdown zone, with the columns: \itemize{ \item SITE an integer or character value identifying a single site visit. \item STATION a character value identifying the station within the SITE \item VALUE an integer value, or character value that is castable to an integer, from 0-4 containing the cover category. }} \item{groundcoverNonwoody}{A data frame containing cover class values for herbs, grasses, and forbs ground cover (< 0.5 m) types in the riparian zone, with the columns: \itemize{ \item SITE an integer or character value identifying a single site visit. \item STATION a character value identifying the station within the SITE \item VALUE an integer value, or character value that is castable to an integer, from 0-4 containing the cover category. }} \item{groundcoverNonwoody_dd}{A data frame containing cover class values for herbs, grasses, and forbs ground cover (< 0.5 m) types in the drawdown zone, with the columns: \itemize{ \item SITE an integer or character value identifying a single site visit. \item STATION a character value identifying the station within the SITE \item VALUE an integer value, or character value that is castable to an integer, from 0-4 containing the cover category. }} \item{groundcoverWoody}{A data frame containing cover class values for woody shrubs and saplings ground cover (< 0.5 m) types in the riparian zone, with the columns: \itemize{ \item SITE an integer or character value identifying a single site visit. \item STATION a character value identifying the station within the SITE \item VALUE an integer value, or character value that is castable to an integer, from 0-4 containing the cover category. }} \item{groundcoverWoody_dd}{A data frame containing cover class values for woody shrubs and saplings ground cover (< 0.5 m) types in the drawdown zone, with the columns: \itemize{ \item SITE an integer or character value identifying a single site visit. \item STATION a character value identifying the station within the SITE \item VALUE an integer value, or character value that is castable to an integer, from 0-4 containing the cover category. }} \item{understoryNonwoody}{A data frame containing cover class values for tall herbs, grasses, and forbs understory cover (0.5 to 5 m) types in the riparian zone, with the columns: \itemize{ \item SITE an integer or character value identifying a single site visit. \item STATION a character value identifying the station within the SITE \item VALUE an integer value, or character value that is castable to an integer, from 0-4 containing the cover category. }} \item{understoryNonwoody_dd}{A data frame containing cover class values for tall herbs, grasses, and forbs understory cover (0.5 to 5 m) types in the drawdown zone, with the columns: \itemize{ \item SITE an integer or character value identifying a single site visit. \item STATION a character value identifying the station within the SITE \item VALUE an integer value, or character value that is castable to an integer, from 0-4 containing the cover category. }} \item{understoryWoody}{A data frame containing cover class values for woody shrubs and saplings understory cover (0.5 to 5 m) types in the riparian zone, with the columns: \itemize{ \item SITE an integer or character value identifying a single site visit. \item STATION a character value identifying the station within the SITE \item VALUE an integer value, or character value that is castable to an integer, from 0-4 containing the cover category. }} \item{understoryWoody_dd}{A data frame containing cover class values for woody shrubs and saplings understory cover (0.5 to 5 m) types in the drawdown zone, with the columns: \itemize{ \item SITE an integer or character value identifying a single site visit. \item STATION a character value identifying the station within the SITE \item VALUE an integer value, or character value that is castable to an integer, from 0-4 containing the cover category. }} \item{understoryType}{A data frame containing understory type (0.5 to 5 m) values for in the riparian zone, with the columns: \itemize{ \item SITE an integer or character value identifying a single site visit. \item STATION a character value identifying the station within the SITE \item VALUE a character value of C (coniferous), D (deciduous), E (broadleaf evergreen), M (mixed), or N (None) the cover type. }} \item{understoryType_dd}{A data frame containing understory type (0.5 to 5 m) values for in the drawdown zone, with the columns: \itemize{ \item SITE an integer or character value identifying a single site visit. \item STATION a character value identifying the station within the SITE \item VALUE a character value of C (coniferous), D (deciduous), E (broadleaf evergreen), M (mixed), or N (None) the cover type. }} \item{drawdown}{A data frame indicating presence of drawdown at station, with the columns: \itemize{ \item SITE an integer or character value identifying a single site visit. \item STATION a character value identifying the station within the SITE \item VALUE an integer value, or character value that is castable to an integer, indicating drawdown exists at a site. }} \item{horizontalDistance_dd}{A data frame containing the horizontal distance to the high water mark where drawdown exists, with the columns: \itemize{ \item SITE an integer or character value identifying a single site visit. \item STATION a character value identifying the station within the SITE \item VALUE an integer value, or character value that is castable to an integer, indicating the horizontal distance to the high water mark when drawdown exists at a site. }} \item{createSyntheticCovers}{A logical value, which specifies whether to create synthetic cover values as proportions of drawdown and riparian cover. This argument should be set to FALSE when the data follows the 2007 NLA protocol or do not contain drawdown cover data. The default value is TRUE.} \item{fillinDrawdown}{A logical value, which specifies whether to use the DRAWDOWN parameter to fill in unrecorded cover and HORIZ_DIST_DD values. The default value is TRUE.} } \value{ Either a data frame when metric calculation is successful or a character string containing an error message when metric calculation is not successful. The data frame contains the following columns: \itemize{ \item SITE - unique site visit identifier \item METRIC - metric name \item VALUE - metric value } The output metrics include: RVFCCANBIG_DD, RVFCCANBIG_RIP, RVFCCANBIG_SYN, RVFCCANSMALL_DD, RVFCCANSMALL_RIP, RVFCCANSMALL_SYN, RVFCGNDBARE_DD, RVFCGNDBARE_RIP, RVFCGNDBARE_SYN, RVFCGNDINUNDATED_DD, RVFCGNDINUNDATED_RIP, RVFCGNDINUNDATED_SYN, RVFCGNDNONW_DD, RVFCGNDNONW_RIP, RVFCGNDNONW_SYN, RVFCGNDWOODY_DD, RVFCGNDWOODY_RIP, RVFCGNDWOODY_SYN, RVFCUNDNONW_DD, RVFCUNDNONW_RIP, RVFCUNDNONW_SYN, RVFCUNDWOODY_DD, RVFCUNDWOODY_RIP, RVFCUNDWOODY_SYN, RVFPCANBIG_DD, RVFPCANBIG_RIP, RVFPCANBIG_SYN, RVFPCANBROADLEAF_DD, RVFPCANBROADLEAF_RIP, RVFPCANCONIFEROUS_DD, RVFPCANCONIFEROUS_RIP, RVFPCANDECIDUOUS_DD, RVFPCANDECIDUOUS_RIP, RVFPCANMIXED_DD, RVFPCANMIXED_RIP, RVFPCANNONE_DD, RVFPCANNONE_RIP, RVFPCANSMALL_DD, RVFPCANSMALL_RIP, RVFPCANSMALL_SYN, RVFPGNDBARE_DD, RVFPGNDBARE_RIP, RVFPGNDBARE_SYN, RVFPGNDINUNDATED_DD, RVFPGNDINUNDATED_RIP, RVFPGNDINUNDATED_SYN, RVFPGNDNONW_DD, RVFPGNDNONW_RIP, RVFPGNDNONW_SYN, RVFPGNDWOODY_DD, RVFPGNDWOODY_RIP, RVFPGNDWOODY_SYN, RVFPUNDBROADLEAF_DD, RVFPUNDBROADLEAF_RIP, RVFPUNDCONIFEROUS_DD, RVFPUNDCONIFEROUS_RIP RVFPUNDDECIDUOUS_DD, RVFPUNDDECIDUOUS_RIP, RVFPUNDMIXED_DD, RVFPUNDMIXED_RIP, RVFPUNDNONE_DD, RVFPUNDNONE_RIP, RVFPUNDNONW_DD, RVFPUNDNONW_RIP, RVFPUNDNONW_SYN, RVFPUNDWOODY_DD, RVFPUNDWOODY_RIP, RVFPUNDWOODY_SYN, RVICANOPY_DD, RVICANOPY_RIP, RVICANOPY_SYN, RVICANUND_DD, RVICANUND_RIP, RVICANUND_SYN, RVIGROUND_DD, RVIGROUND_RIP, RVIGROUND_SYN, RVIHERBS_DD, RVIHERBS_RIP, RVIHERBS_SYN, RVITALLWOOD_DD, RVITALLWOOD_RIP, RVITALLWOOD_SYN, RVITOTALVEG_DD, RVITOTALVEG_RIP, RVITOTALVEG_SYN, RVIUNDERSTORY_DD, RVIUNDERSTORY_RIP, RVIUNDERSTORY_SYN, RVIWOODY_DD, RVIWOODY_RIP, RVIWOODY_SYN, RVNCANBIG_DD, RVNCANBIG_RIP, RVNCANBIG_SYN, RVNCANOPY_DD, RVNCANOPY_RIP, RVNCANSMALL_DD, RVNCANSMALL_RIP, RVNCANSMALL_SYN, RVNGNDBARE_DD, RVNGNDBARE_RIP, RVNGNDBARE_SYN, RVNGNDINUNDATED_DD, RVNGNDINUNDATED_RIP, RVNGNDINUNDATED_SYN, RVNGNDNONW_DD, RVNGNDNONW_RIP, RVNGNDNONW_SYN, RVNGNDWOODY_DD, RVNGNDWOODY_RIP, RVNGNDWOODY_SYN, RVNUNDERSTORY_DD, RVNUNDERSTORY_RIP, RVNUNDNONW_DD, RVNUNDNONW_RIP, RVNUNDNONW_SYN, RVNUNDWOODY_DD, RVNUNDWOODY_RIP, RVNUNDWOODY_SYN, RVVCANBIG_DD, RVVCANBIG_RIP, RVVCANBIG_SYN, RVVCANSMALL_DD, RVVCANSMALL_RIP, RVVCANSMALL_SYN, RVVGNDBARE_DD, RVVGNDBARE_RIP, RVVGNDBARE_SYN, RVVGNDINUNDATED_DD, RVVGNDINUNDATED_RIP, RVVGNDINUNDATED_SYN, RVVGNDNONW_DD, RVVGNDNONW_RIP, RVVGNDNONW_SYN, RVVGNDWOODY_DD, RVVGNDWOODY_RIP, RVVGNDWOODY_SYN, RVVUNDNONW_DD, RVVUNDNONW_RIP, RVVUNDNONW_SYN, RVVUNDWOODY_DD, RVVUNDWOODY_RIP, RVVUNDWOODY_SYN. \emph{NLA_Physical_Habitat_Metric_Descriptions.pdf} in the package documentation. } \description{ This function calculates the riparian zone and vegetation portion of the physical habitat metrics for NLA data. The function requires a data frame containing validated physical habitat data collected using the NLA protocol. } \examples{ head(nlaPhabEx) bigTrees <- subset(nlaPhabEx,PARAMETER=='C_BIGTREES',select=-PARAMETER) bigTrees_dd <- subset(nlaPhabEx,PARAMETER=='C_BIGTREES_DD',select=-PARAMETER) smallTrees <- subset(nlaPhabEx,PARAMETER=='C_SMALLTREES',select=-PARAMETER) smallTrees_dd <- subset(nlaPhabEx,PARAMETER=='C_SMALLTREES_DD',select=-PARAMETER) canopyType <- subset(nlaPhabEx,PARAMETER=='CANOPY',select=-PARAMETER) canopyType_dd <- subset(nlaPhabEx,PARAMETER=='CANOPY_DD',select=-PARAMETER) grdcvrBare <- subset(nlaPhabEx,PARAMETER=='GC_BARE',select=-PARAMETER) grdcvrBare_dd <- subset(nlaPhabEx,PARAMETER=='GC_BARE_DD',select=-PARAMETER) grdcvrInund <- subset(nlaPhabEx,PARAMETER=='GC_INUNDATED',select=-PARAMETER) grdcvrInund_dd <- subset(nlaPhabEx,PARAMETER=='GC_INUNDATED_DD',select=-PARAMETER) grdcvrNw <- subset(nlaPhabEx,PARAMETER=='GC_NONWOODY',select=-PARAMETER) grdcvrNw_dd <- subset(nlaPhabEx,PARAMETER=='GC_NONWOODY_DD',select=-PARAMETER) grdcvrW <- subset(nlaPhabEx,PARAMETER=='GC_WOODY',select=-PARAMETER) grdcvrW_dd <- subset(nlaPhabEx,PARAMETER=='GC_WOODY_DD',select=-PARAMETER) undNonW <- subset(nlaPhabEx,PARAMETER=='U_NONWOODY',select=-PARAMETER) undNonW_dd <- subset(nlaPhabEx,PARAMETER=='U_NONWOODY_DD',select=-PARAMETER) undW <- subset(nlaPhabEx,PARAMETER=='U_WOODY',select=-PARAMETER) undW_dd <- subset(nlaPhabEx,PARAMETER=='U_WOODY_DD',select=-PARAMETER) undType <- subset(nlaPhabEx,PARAMETER=='UNDERSTORY',select=-PARAMETER) undType_dd <- subset(nlaPhabEx,PARAMETER=='UNDERSTORY_DD',select=-PARAMETER) drawdown <- subset(nlaPhabEx,PARAMETER=='DRAWDOWN',select=-PARAMETER) horizontalDistance_dd <- subset(nlaPhabEx,PARAMETER=='HORIZ_DIST_DD',select=-PARAMETER) # Use defaults for fillinDrawdown and createSyntheticCovers exRipVeg <- nlaRiparianVegetation(bigTrees, bigTrees_dd, smallTrees, smallTrees_dd, canopyType, canopyType_dd, grdcvrBare, grdcvrBare_dd, grdcvrInund, grdcvrInund_dd, grdcvrNw, grdcvrNw_dd, grdcvrW, grdcvrW_dd, undNonW, undNonW_dd, undW, undW_dd, undType, undType_dd, drawdown, horizontalDistance_dd) head(exRipVeg) } \author{ Curt Seeliger \email{Seeliger.Curt@epa.gov}\cr Tom Kincaid \email{Kincaid.Tom@epa.gov} } \keyword{survey}
/man/nlaRiparianVegetation.Rd
no_license
jasonelaw/aquamet
R
false
true
16,518
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/nlaRiparianVegetation.r \name{nlaRiparianVegetation} \alias{nlaRiparianVegetation} \title{Calculate NLA Riparian Zone and Vegetation Metrics} \usage{ nlaRiparianVegetation(bigTrees = NULL, bigTrees_dd = NULL, smallTrees = NULL, smallTrees_dd = NULL, canopyType = NULL, canopyType_dd = NULL, groundcoverBare = NULL, groundcoverBare_dd = NULL, groundcoverInundated = NULL, groundcoverInundated_dd = NULL, groundcoverNonwoody = NULL, groundcoverNonwoody_dd = NULL, groundcoverWoody = NULL, groundcoverWoody_dd = NULL, understoryNonwoody = NULL, understoryNonwoody_dd = NULL, understoryWoody = NULL, understoryWoody_dd = NULL, understoryType = NULL, understoryType_dd = NULL, drawdown = NULL, horizontalDistance_dd = NULL, createSyntheticCovers = TRUE, fillinDrawdown = TRUE) } \arguments{ \item{bigTrees}{A data frame containing cover class values for big trees (trunk >0.3 m dBH) in the riparian zone, with the columns: \itemize{ \item SITE an integer or character value identifying a single site visit. \item STATION a character value identifying the station within the SITE \item VALUE an integer value, or character value that is castable to an integer, from 0-4 containing the cover category. }} \item{bigTrees_dd}{A data frame containing cover class values for big trees (trunk >0.3 m dBH) in the drawdown zone, with the columns: \itemize{ \item SITE an integer or character value identifying a single site visit. \item STATION a character value identifying the station within the SITE \item VALUE an integer value, or character value that is castable to an integer, from 0-4 containing the cover category. }} \item{smallTrees}{A data frame containing cover class values for small trees (trunk <0.3 m dBH) in the riparian zone, with the columns: \itemize{ \item SITE an integer or character value identifying a single site visit. \item STATION a character value identifying the station within the SITE \item VALUE an integer value, or character value that is castable to an integer, from 0-4 containing the cover category. }} \item{smallTrees_dd}{A data frame containing cover class values for small trees (trunk <0.3 m dBH) in the drawdown zone, with the columns: \itemize{ \item SITE an integer or character value identifying a single site visit. \item STATION a character value identifying the station within the SITE \item VALUE an integer value, or character value that is castable to an integer, from 0-4 containing the cover category. }} \item{canopyType}{A data frame containing canopy type (> 5 m) values for in the riparian zone, with the columns: \itemize{ \item SITE an integer or character value identifying a single site visit. \item STATION a character value identifying the station within the SITE \item VALUE a character value of C (coniferous), D (deciduous), E (broadleaf evergreen), M (mixed), or N (None) the cover type. }} \item{canopyType_dd}{A data frame containing canopy type (> 5 m) values for in the drawdown zone, with the columns: \itemize{ \item SITE an integer or character value identifying a single site visit. \item STATION a character value identifying the station within the SITE \item VALUE a character value of C (coniferous), D (deciduous), E (broadleaf evergreen), M (mixed), or N (None) the cover type. }} \item{groundcoverBare}{A data frame containing cover class values for barren, bare dirt, litter, duff, or building ground cover (< 0.5 m) types in the riparian zone, with the columns: \itemize{ \item SITE an integer or character value identifying a single site visit. \item STATION a character value identifying the station within the SITE \item VALUE an integer value, or character value that is castable to an integer, from 0-4 containing the cover category. }} \item{groundcoverBare_dd}{A data frame containing cover class values for barren, bare dirt, litter, duff, or building ground cover (< 0.5 m) types in the drawdown zone, with the columns: \itemize{ \item SITE an integer or character value identifying a single site visit. \item STATION a character value identifying the station within the SITE \item VALUE an integer value, or character value that is castable to an integer, from 0-4 containing the cover category. }} \item{groundcoverInundated}{A data frame containing cover class values for standing water or inundated vegetation ground cover (< 0.5 m) types in the riparian zone, with the columns: \itemize{ \item SITE an integer or character value identifying a single site visit. \item STATION a character value identifying the station within the SITE \item VALUE an integer value, or character value that is castable to an integer, from 0-4 containing the cover category. }} \item{groundcoverInundated_dd}{A data frame containing cover class values for standing water or inundated vegetation ground cover (< 0.5 m)types in the drawdown zone, with the columns: \itemize{ \item SITE an integer or character value identifying a single site visit. \item STATION a character value identifying the station within the SITE \item VALUE an integer value, or character value that is castable to an integer, from 0-4 containing the cover category. }} \item{groundcoverNonwoody}{A data frame containing cover class values for herbs, grasses, and forbs ground cover (< 0.5 m) types in the riparian zone, with the columns: \itemize{ \item SITE an integer or character value identifying a single site visit. \item STATION a character value identifying the station within the SITE \item VALUE an integer value, or character value that is castable to an integer, from 0-4 containing the cover category. }} \item{groundcoverNonwoody_dd}{A data frame containing cover class values for herbs, grasses, and forbs ground cover (< 0.5 m) types in the drawdown zone, with the columns: \itemize{ \item SITE an integer or character value identifying a single site visit. \item STATION a character value identifying the station within the SITE \item VALUE an integer value, or character value that is castable to an integer, from 0-4 containing the cover category. }} \item{groundcoverWoody}{A data frame containing cover class values for woody shrubs and saplings ground cover (< 0.5 m) types in the riparian zone, with the columns: \itemize{ \item SITE an integer or character value identifying a single site visit. \item STATION a character value identifying the station within the SITE \item VALUE an integer value, or character value that is castable to an integer, from 0-4 containing the cover category. }} \item{groundcoverWoody_dd}{A data frame containing cover class values for woody shrubs and saplings ground cover (< 0.5 m) types in the drawdown zone, with the columns: \itemize{ \item SITE an integer or character value identifying a single site visit. \item STATION a character value identifying the station within the SITE \item VALUE an integer value, or character value that is castable to an integer, from 0-4 containing the cover category. }} \item{understoryNonwoody}{A data frame containing cover class values for tall herbs, grasses, and forbs understory cover (0.5 to 5 m) types in the riparian zone, with the columns: \itemize{ \item SITE an integer or character value identifying a single site visit. \item STATION a character value identifying the station within the SITE \item VALUE an integer value, or character value that is castable to an integer, from 0-4 containing the cover category. }} \item{understoryNonwoody_dd}{A data frame containing cover class values for tall herbs, grasses, and forbs understory cover (0.5 to 5 m) types in the drawdown zone, with the columns: \itemize{ \item SITE an integer or character value identifying a single site visit. \item STATION a character value identifying the station within the SITE \item VALUE an integer value, or character value that is castable to an integer, from 0-4 containing the cover category. }} \item{understoryWoody}{A data frame containing cover class values for woody shrubs and saplings understory cover (0.5 to 5 m) types in the riparian zone, with the columns: \itemize{ \item SITE an integer or character value identifying a single site visit. \item STATION a character value identifying the station within the SITE \item VALUE an integer value, or character value that is castable to an integer, from 0-4 containing the cover category. }} \item{understoryWoody_dd}{A data frame containing cover class values for woody shrubs and saplings understory cover (0.5 to 5 m) types in the drawdown zone, with the columns: \itemize{ \item SITE an integer or character value identifying a single site visit. \item STATION a character value identifying the station within the SITE \item VALUE an integer value, or character value that is castable to an integer, from 0-4 containing the cover category. }} \item{understoryType}{A data frame containing understory type (0.5 to 5 m) values for in the riparian zone, with the columns: \itemize{ \item SITE an integer or character value identifying a single site visit. \item STATION a character value identifying the station within the SITE \item VALUE a character value of C (coniferous), D (deciduous), E (broadleaf evergreen), M (mixed), or N (None) the cover type. }} \item{understoryType_dd}{A data frame containing understory type (0.5 to 5 m) values for in the drawdown zone, with the columns: \itemize{ \item SITE an integer or character value identifying a single site visit. \item STATION a character value identifying the station within the SITE \item VALUE a character value of C (coniferous), D (deciduous), E (broadleaf evergreen), M (mixed), or N (None) the cover type. }} \item{drawdown}{A data frame indicating presence of drawdown at station, with the columns: \itemize{ \item SITE an integer or character value identifying a single site visit. \item STATION a character value identifying the station within the SITE \item VALUE an integer value, or character value that is castable to an integer, indicating drawdown exists at a site. }} \item{horizontalDistance_dd}{A data frame containing the horizontal distance to the high water mark where drawdown exists, with the columns: \itemize{ \item SITE an integer or character value identifying a single site visit. \item STATION a character value identifying the station within the SITE \item VALUE an integer value, or character value that is castable to an integer, indicating the horizontal distance to the high water mark when drawdown exists at a site. }} \item{createSyntheticCovers}{A logical value, which specifies whether to create synthetic cover values as proportions of drawdown and riparian cover. This argument should be set to FALSE when the data follows the 2007 NLA protocol or do not contain drawdown cover data. The default value is TRUE.} \item{fillinDrawdown}{A logical value, which specifies whether to use the DRAWDOWN parameter to fill in unrecorded cover and HORIZ_DIST_DD values. The default value is TRUE.} } \value{ Either a data frame when metric calculation is successful or a character string containing an error message when metric calculation is not successful. The data frame contains the following columns: \itemize{ \item SITE - unique site visit identifier \item METRIC - metric name \item VALUE - metric value } The output metrics include: RVFCCANBIG_DD, RVFCCANBIG_RIP, RVFCCANBIG_SYN, RVFCCANSMALL_DD, RVFCCANSMALL_RIP, RVFCCANSMALL_SYN, RVFCGNDBARE_DD, RVFCGNDBARE_RIP, RVFCGNDBARE_SYN, RVFCGNDINUNDATED_DD, RVFCGNDINUNDATED_RIP, RVFCGNDINUNDATED_SYN, RVFCGNDNONW_DD, RVFCGNDNONW_RIP, RVFCGNDNONW_SYN, RVFCGNDWOODY_DD, RVFCGNDWOODY_RIP, RVFCGNDWOODY_SYN, RVFCUNDNONW_DD, RVFCUNDNONW_RIP, RVFCUNDNONW_SYN, RVFCUNDWOODY_DD, RVFCUNDWOODY_RIP, RVFCUNDWOODY_SYN, RVFPCANBIG_DD, RVFPCANBIG_RIP, RVFPCANBIG_SYN, RVFPCANBROADLEAF_DD, RVFPCANBROADLEAF_RIP, RVFPCANCONIFEROUS_DD, RVFPCANCONIFEROUS_RIP, RVFPCANDECIDUOUS_DD, RVFPCANDECIDUOUS_RIP, RVFPCANMIXED_DD, RVFPCANMIXED_RIP, RVFPCANNONE_DD, RVFPCANNONE_RIP, RVFPCANSMALL_DD, RVFPCANSMALL_RIP, RVFPCANSMALL_SYN, RVFPGNDBARE_DD, RVFPGNDBARE_RIP, RVFPGNDBARE_SYN, RVFPGNDINUNDATED_DD, RVFPGNDINUNDATED_RIP, RVFPGNDINUNDATED_SYN, RVFPGNDNONW_DD, RVFPGNDNONW_RIP, RVFPGNDNONW_SYN, RVFPGNDWOODY_DD, RVFPGNDWOODY_RIP, RVFPGNDWOODY_SYN, RVFPUNDBROADLEAF_DD, RVFPUNDBROADLEAF_RIP, RVFPUNDCONIFEROUS_DD, RVFPUNDCONIFEROUS_RIP RVFPUNDDECIDUOUS_DD, RVFPUNDDECIDUOUS_RIP, RVFPUNDMIXED_DD, RVFPUNDMIXED_RIP, RVFPUNDNONE_DD, RVFPUNDNONE_RIP, RVFPUNDNONW_DD, RVFPUNDNONW_RIP, RVFPUNDNONW_SYN, RVFPUNDWOODY_DD, RVFPUNDWOODY_RIP, RVFPUNDWOODY_SYN, RVICANOPY_DD, RVICANOPY_RIP, RVICANOPY_SYN, RVICANUND_DD, RVICANUND_RIP, RVICANUND_SYN, RVIGROUND_DD, RVIGROUND_RIP, RVIGROUND_SYN, RVIHERBS_DD, RVIHERBS_RIP, RVIHERBS_SYN, RVITALLWOOD_DD, RVITALLWOOD_RIP, RVITALLWOOD_SYN, RVITOTALVEG_DD, RVITOTALVEG_RIP, RVITOTALVEG_SYN, RVIUNDERSTORY_DD, RVIUNDERSTORY_RIP, RVIUNDERSTORY_SYN, RVIWOODY_DD, RVIWOODY_RIP, RVIWOODY_SYN, RVNCANBIG_DD, RVNCANBIG_RIP, RVNCANBIG_SYN, RVNCANOPY_DD, RVNCANOPY_RIP, RVNCANSMALL_DD, RVNCANSMALL_RIP, RVNCANSMALL_SYN, RVNGNDBARE_DD, RVNGNDBARE_RIP, RVNGNDBARE_SYN, RVNGNDINUNDATED_DD, RVNGNDINUNDATED_RIP, RVNGNDINUNDATED_SYN, RVNGNDNONW_DD, RVNGNDNONW_RIP, RVNGNDNONW_SYN, RVNGNDWOODY_DD, RVNGNDWOODY_RIP, RVNGNDWOODY_SYN, RVNUNDERSTORY_DD, RVNUNDERSTORY_RIP, RVNUNDNONW_DD, RVNUNDNONW_RIP, RVNUNDNONW_SYN, RVNUNDWOODY_DD, RVNUNDWOODY_RIP, RVNUNDWOODY_SYN, RVVCANBIG_DD, RVVCANBIG_RIP, RVVCANBIG_SYN, RVVCANSMALL_DD, RVVCANSMALL_RIP, RVVCANSMALL_SYN, RVVGNDBARE_DD, RVVGNDBARE_RIP, RVVGNDBARE_SYN, RVVGNDINUNDATED_DD, RVVGNDINUNDATED_RIP, RVVGNDINUNDATED_SYN, RVVGNDNONW_DD, RVVGNDNONW_RIP, RVVGNDNONW_SYN, RVVGNDWOODY_DD, RVVGNDWOODY_RIP, RVVGNDWOODY_SYN, RVVUNDNONW_DD, RVVUNDNONW_RIP, RVVUNDNONW_SYN, RVVUNDWOODY_DD, RVVUNDWOODY_RIP, RVVUNDWOODY_SYN. \emph{NLA_Physical_Habitat_Metric_Descriptions.pdf} in the package documentation. } \description{ This function calculates the riparian zone and vegetation portion of the physical habitat metrics for NLA data. The function requires a data frame containing validated physical habitat data collected using the NLA protocol. } \examples{ head(nlaPhabEx) bigTrees <- subset(nlaPhabEx,PARAMETER=='C_BIGTREES',select=-PARAMETER) bigTrees_dd <- subset(nlaPhabEx,PARAMETER=='C_BIGTREES_DD',select=-PARAMETER) smallTrees <- subset(nlaPhabEx,PARAMETER=='C_SMALLTREES',select=-PARAMETER) smallTrees_dd <- subset(nlaPhabEx,PARAMETER=='C_SMALLTREES_DD',select=-PARAMETER) canopyType <- subset(nlaPhabEx,PARAMETER=='CANOPY',select=-PARAMETER) canopyType_dd <- subset(nlaPhabEx,PARAMETER=='CANOPY_DD',select=-PARAMETER) grdcvrBare <- subset(nlaPhabEx,PARAMETER=='GC_BARE',select=-PARAMETER) grdcvrBare_dd <- subset(nlaPhabEx,PARAMETER=='GC_BARE_DD',select=-PARAMETER) grdcvrInund <- subset(nlaPhabEx,PARAMETER=='GC_INUNDATED',select=-PARAMETER) grdcvrInund_dd <- subset(nlaPhabEx,PARAMETER=='GC_INUNDATED_DD',select=-PARAMETER) grdcvrNw <- subset(nlaPhabEx,PARAMETER=='GC_NONWOODY',select=-PARAMETER) grdcvrNw_dd <- subset(nlaPhabEx,PARAMETER=='GC_NONWOODY_DD',select=-PARAMETER) grdcvrW <- subset(nlaPhabEx,PARAMETER=='GC_WOODY',select=-PARAMETER) grdcvrW_dd <- subset(nlaPhabEx,PARAMETER=='GC_WOODY_DD',select=-PARAMETER) undNonW <- subset(nlaPhabEx,PARAMETER=='U_NONWOODY',select=-PARAMETER) undNonW_dd <- subset(nlaPhabEx,PARAMETER=='U_NONWOODY_DD',select=-PARAMETER) undW <- subset(nlaPhabEx,PARAMETER=='U_WOODY',select=-PARAMETER) undW_dd <- subset(nlaPhabEx,PARAMETER=='U_WOODY_DD',select=-PARAMETER) undType <- subset(nlaPhabEx,PARAMETER=='UNDERSTORY',select=-PARAMETER) undType_dd <- subset(nlaPhabEx,PARAMETER=='UNDERSTORY_DD',select=-PARAMETER) drawdown <- subset(nlaPhabEx,PARAMETER=='DRAWDOWN',select=-PARAMETER) horizontalDistance_dd <- subset(nlaPhabEx,PARAMETER=='HORIZ_DIST_DD',select=-PARAMETER) # Use defaults for fillinDrawdown and createSyntheticCovers exRipVeg <- nlaRiparianVegetation(bigTrees, bigTrees_dd, smallTrees, smallTrees_dd, canopyType, canopyType_dd, grdcvrBare, grdcvrBare_dd, grdcvrInund, grdcvrInund_dd, grdcvrNw, grdcvrNw_dd, grdcvrW, grdcvrW_dd, undNonW, undNonW_dd, undW, undW_dd, undType, undType_dd, drawdown, horizontalDistance_dd) head(exRipVeg) } \author{ Curt Seeliger \email{Seeliger.Curt@epa.gov}\cr Tom Kincaid \email{Kincaid.Tom@epa.gov} } \keyword{survey}
# Análisis del marco de datos sobre los Estados de USA. library(tidyverse) library(maps) library(mapproj) library(ggplot2) source("data/helpers.R") states <- readRDS("data/counties.rds") head(states) colnames(states) <- c("name", "total.pop", "Human", "Elf", "Orc", "Wizard") head(states) var <- states$Human color<- "steelblue" legend.title <- "Porcentaje de Humanos" min = 0 max = 100 percent_map(var, color, legend.title, min, max)
/ShinyBeginnings/09-StatesDataAnalysis.R
no_license
Angnar1997/ShinyPath
R
false
false
462
r
# Análisis del marco de datos sobre los Estados de USA. library(tidyverse) library(maps) library(mapproj) library(ggplot2) source("data/helpers.R") states <- readRDS("data/counties.rds") head(states) colnames(states) <- c("name", "total.pop", "Human", "Elf", "Orc", "Wizard") head(states) var <- states$Human color<- "steelblue" legend.title <- "Porcentaje de Humanos" min = 0 max = 100 percent_map(var, color, legend.title, min, max)
library(shiny) library(tidyverse) library(shinythemes) library(here) library(plotly) library(shinydashboard) load("DBdata[asmt][v4.491].RData") #Tidy format: tb.data --- Total biomass data #No data: ATBTUNAEATL, ATBTUNAWATL (both ADDED as all NAs!) tuna_biomass <- tb.data %>% select(ALBANATL, ALBASATL, BIGEYEATL, BMARLINATL, SAILEATL, SAILWATL, SKJEATL, SKJWATL, SWORDNATL, SWORDSATL, WMARLINATL, YFINATL)%>% rownames_to_column() %>% add_column(ATBTUNAEATL = NA) %>% add_column(ATBTUNAWATL = NA) %>% pivot_longer(!rowname, names_to = "species", values_to = "biomass") %>% rename(year = rowname) %>% mutate(year = as.numeric(as.character(year))) %>% filter(year >= 1950) %>% mutate(species = case_when(species == "ALBANATL" ~ "Albacore tuna Northern Atlantic", species == "ALBASATL" ~ "Albacore tuna South Atlantic", species == "BIGEYEATL" ~ "Bigeye tuna Atlantic Ocean", species == "BMARLINATL" ~ "Blue marlin Atlantic Ocean", species == "SAILEATL" ~ "Sailfish Eastern Atlantic", species == "SAILWATL" ~ "Sailfish Western Atlantic", species == "SKJEATL" ~ "Skipjack tuna Eastern Atlantic", species == "SKJWATL" ~ "Skipjack tuna Western Atlantic", species == "SWORDNATL" ~ "Swordfish Northern Atlantic", species == "SWORDSATL" ~ "Swordfish South Atlantic", species == "WMARLINATL" ~ "White marlin Atlantic Ocean", species == "YFINATL" ~ "Yellowfin tuna Atlantic Ocean", species == "ATBTUNAEATL" ~ "Atlantic bluefin tuna Eastern Atlantic", species == "ATBTUNAWATL" ~ "Atlantic bluefin tuna Western Atlantic")) #Tidy format: tc.data --- Total catch data #Data for all 14 species tuna_catch <- tc.data %>% select(ALBANATL, ALBASATL, BIGEYEATL, BMARLINATL, SAILEATL, SAILWATL, SKJEATL, SKJWATL, SWORDNATL, SWORDSATL, WMARLINATL, YFINATL, ATBTUNAEATL, ATBTUNAWATL)%>% rownames_to_column() %>% pivot_longer(!rowname, names_to = "species", values_to = "catch") %>% rename(year = rowname) %>% mutate(year = as.numeric(as.character(year))) %>% filter(year >= 1950) %>% #NO data before 1950 mutate(species = case_when(species == "ALBANATL" ~ "Albacore tuna Northern Atlantic", species == "ALBASATL" ~ "Albacore tuna South Atlantic", species == "BIGEYEATL" ~ "Bigeye tuna Atlantic Ocean", species == "BMARLINATL" ~ "Blue marlin Atlantic Ocean", species == "SAILEATL" ~ "Sailfish Eastern Atlantic", species == "SAILWATL" ~ "Sailfish Western Atlantic", species == "SKJEATL" ~ "Skipjack tuna Eastern Atlantic", species == "SKJWATL" ~ "Skipjack tuna Western Atlantic", species == "SWORDNATL" ~ "Swordfish Northern Atlantic", species == "SWORDSATL" ~ "Swordfish South Atlantic", species == "WMARLINATL" ~ "White marlin Atlantic Ocean", species == "YFINATL" ~ "Yellowfin tuna Atlantic Ocean", species == "ATBTUNAEATL" ~ "Atlantic bluefin tuna Eastern Atlantic", species == "ATBTUNAWATL" ~ "Atlantic bluefin tuna Western Atlantic")) #Tidy format: er.data --- Exploitation rate data (usually an annual fraction harvested) #No data: ATBTUNAEATL, ATBTUNAWATL (both ADDED as all NAs!) #NOTE: harvest rate (U); may be either exploitation rate or fishing mortality tuna_er <- er.data %>% select(ALBANATL, ALBASATL, BIGEYEATL, BMARLINATL, SAILEATL, SAILWATL, SKJEATL, SKJWATL, SWORDNATL, SWORDSATL, WMARLINATL, YFINATL) %>% rownames_to_column() %>% add_column(ATBTUNAEATL = NA) %>% add_column(ATBTUNAWATL = NA) %>% pivot_longer(!rowname, names_to = "species", values_to = "explotation_rate") %>% rename(year = rowname) %>% mutate(year = as.numeric(as.character(year))) %>% filter(year >= 1950) %>% mutate(species = case_when(species == "ALBANATL" ~ "Albacore tuna Northern Atlantic", species == "ALBASATL" ~ "Albacore tuna South Atlantic", species == "BIGEYEATL" ~ "Bigeye tuna Atlantic Ocean", species == "BMARLINATL" ~ "Blue marlin Atlantic Ocean", species == "SAILEATL" ~ "Sailfish Eastern Atlantic", species == "SAILWATL" ~ "Sailfish Western Atlantic", species == "SKJEATL" ~ "Skipjack tuna Eastern Atlantic", species == "SKJWATL" ~ "Skipjack tuna Western Atlantic", species == "SWORDNATL" ~ "Swordfish Northern Atlantic", species == "SWORDSATL" ~ "Swordfish South Atlantic", species == "WMARLINATL" ~ "White marlin Atlantic Ocean", species == "YFINATL" ~ "Yellowfin tuna Atlantic Ocean", species == "ATBTUNAEATL" ~ "Atlantic bluefin tuna Eastern Atlantic", species == "ATBTUNAWATL" ~ "Atlantic bluefin tuna Western Atlantic")) #Tidy format: divbpref.data --- B/Bmsy pref data (B/Bmsy if available, otherwise B/Bmgt) #Data for all 14 species tuna_b_bmsy <- divbpref.data %>% select(ALBANATL, ALBASATL, BIGEYEATL, BMARLINATL, SAILEATL, SAILWATL, SKJEATL, SKJWATL, SWORDNATL, SWORDSATL, WMARLINATL, YFINATL, ATBTUNAEATL, ATBTUNAWATL) %>% rownames_to_column() %>% pivot_longer(!rowname, names_to = "species", values_to = "b_bmsy") %>% rename(year = rowname) %>% mutate(year = as.numeric(as.character(year))) %>% filter(year >= 1950) %>% mutate(species = case_when(species == "ALBANATL" ~ "Albacore tuna Northern Atlantic", species == "ALBASATL" ~ "Albacore tuna South Atlantic", species == "BIGEYEATL" ~ "Bigeye tuna Atlantic Ocean", species == "BMARLINATL" ~ "Blue marlin Atlantic Ocean", species == "SAILEATL" ~ "Sailfish Eastern Atlantic", species == "SAILWATL" ~ "Sailfish Western Atlantic", species == "SKJEATL" ~ "Skipjack tuna Eastern Atlantic", species == "SKJWATL" ~ "Skipjack tuna Western Atlantic", species == "SWORDNATL" ~ "Swordfish Northern Atlantic", species == "SWORDSATL" ~ "Swordfish South Atlantic", species == "WMARLINATL" ~ "White marlin Atlantic Ocean", species == "YFINATL" ~ "Yellowfin tuna Atlantic Ocean", species == "ATBTUNAEATL" ~ "Atlantic bluefin tuna Eastern Atlantic", species == "ATBTUNAWATL" ~ "Atlantic bluefin tuna Western Atlantic")) #Tidy format: divupref.data --- U/Umsy pref data (U/Umsy if available, otherwise U/Umgt) #Data for all 14 species tuna_u_umsy <- divupref.data %>% select(ALBANATL, ALBASATL, BIGEYEATL, BMARLINATL, SAILEATL, SAILWATL, SKJEATL, SKJWATL, SWORDNATL, SWORDSATL, WMARLINATL, YFINATL, ATBTUNAEATL, ATBTUNAWATL) %>% rownames_to_column() %>% pivot_longer(!rowname, names_to = "species", values_to = "u_umsy") %>% rename(year = rowname) %>% mutate(year = as.numeric(as.character(year))) %>% filter(year >= 1950) %>% mutate(species = case_when(species == "ALBANATL" ~ "Albacore tuna Northern Atlantic", species == "ALBASATL" ~ "Albacore tuna South Atlantic", species == "BIGEYEATL" ~ "Bigeye tuna Atlantic Ocean", species == "BMARLINATL" ~ "Blue marlin Atlantic Ocean", species == "SAILEATL" ~ "Sailfish Eastern Atlantic", species == "SAILWATL" ~ "Sailfish Western Atlantic", species == "SKJEATL" ~ "Skipjack tuna Eastern Atlantic", species == "SKJWATL" ~ "Skipjack tuna Western Atlantic", species == "SWORDNATL" ~ "Swordfish Northern Atlantic", species == "SWORDSATL" ~ "Swordfish South Atlantic", species == "WMARLINATL" ~ "White marlin Atlantic Ocean", species == "YFINATL" ~ "Yellowfin tuna Atlantic Ocean", species == "ATBTUNAEATL" ~ "Atlantic bluefin tuna Eastern Atlantic", species == "ATBTUNAWATL" ~ "Atlantic bluefin tuna Western Atlantic")) #Tidy format: cdivmeanc.data --- Catch/(mean catch) data #Data for all 14 species tuna_catch_mean <- cdivmeanc.data %>% select(ALBANATL, ALBASATL, BIGEYEATL, BMARLINATL, SAILEATL, SAILWATL, SKJEATL, SKJWATL, SWORDNATL, SWORDSATL, WMARLINATL, YFINATL, ATBTUNAEATL, ATBTUNAWATL) %>% rownames_to_column() %>% pivot_longer(!rowname, names_to = "species", values_to = "c_c_mean") %>% rename(year = rowname) %>% mutate(year = as.numeric(as.character(year))) %>% filter(year >= 1950) %>% mutate(species = case_when(species == "ALBANATL" ~ "Albacore tuna Northern Atlantic", species == "ALBASATL" ~ "Albacore tuna South Atlantic", species == "BIGEYEATL" ~ "Bigeye tuna Atlantic Ocean", species == "BMARLINATL" ~ "Blue marlin Atlantic Ocean", species == "SAILEATL" ~ "Sailfish Eastern Atlantic", species == "SAILWATL" ~ "Sailfish Western Atlantic", species == "SKJEATL" ~ "Skipjack tuna Eastern Atlantic", species == "SKJWATL" ~ "Skipjack tuna Western Atlantic", species == "SWORDNATL" ~ "Swordfish Northern Atlantic", species == "SWORDSATL" ~ "Swordfish South Atlantic", species == "WMARLINATL" ~ "White marlin Atlantic Ocean", species == "YFINATL" ~ "Yellowfin tuna Atlantic Ocean", species == "ATBTUNAEATL" ~ "Atlantic bluefin tuna Eastern Atlantic", species == "ATBTUNAWATL" ~ "Atlantic bluefin tuna Western Atlantic")) #Merge all time series tidy dataframes: tuna_merged <- tuna_catch %>% right_join(tuna_biomass, by=c("year","species")) %>% right_join(tuna_er, by=c("year","species")) %>% right_join(tuna_b_bmsy, by=c("year","species")) %>% right_join(tuna_u_umsy, by=c("year","species")) %>% right_join(tuna_catch_mean, by=c("year","species")) #NOTES: add text describing each time series #TAB 5 Plots: ##B/Bmsy plot: tuna_region_bmsy1 <- tuna_merged %>% group_by(year) p1 <- ggplot(tuna_region_bmsy1)+ geom_boxplot(aes(x= factor(year), y = b_bmsy), fill = "blue", width = 0.2, outlier.color = NA, outlier.shape = NA)+ stat_summary(aes(x= factor(year), y = b_bmsy, group = 1), fun = mean, geom = "line", alpha = 0.7, size = 0.5, color = "red")+ geom_hline(yintercept = 1, color = "orange2")+ scale_x_discrete("Year", breaks = c("1950","1960", "1970", "1980", "1990", "2000", "2010", "2018") )+ scale_y_continuous(breaks=c(0, 1, 2, 3, 4), limits = c(-0.1, 4.5))+ labs(y = "Relative biomass (B/Bmsy)")+ theme_minimal() p1 <- plotly_build(p1)%>% layout(annotations = list(x = 0.2, y = -0.1, text = "Red line shows mean B/Bmsy.", showarrow = F, xref='paper', yref='paper', xanchor='right', yanchor='auto', xshift=0, yshift=0, font=list(size=10)) ) p1$x$data <- lapply(p1$x$data, FUN = function(x){ x$marker = list(opacity = 0) return(x) }) #Umsy plot: tuna_region_2 <- tuna_merged %>% filter(year>=1970) %>% group_by(year) p2 <- ggplot(tuna_region_2)+ geom_boxplot(aes(x= factor(year), y = u_umsy), fill = "blue", width = 0.2, outlier.color = NA)+ stat_summary(aes(x= factor(year), y = u_umsy, group = 1), fun = mean, geom = "line", alpha = 0.7, size = 0.5, color = "red")+ geom_hline(yintercept = 1, color = "orange2")+ scale_x_discrete("Year", breaks = c("1970", "1980", "1990", "2000", "2010", "2018") )+ scale_y_continuous(breaks=c(0, 1, 2, 3, 4), limits = c(-0.1, 4.5))+ labs(y = "Relative fishing pressure (U/Umsy)")+ theme_minimal() p2 <- plotly_build(p2)%>% layout(annotations = list(x = 0.2, y = -0.1, text = "Red line shows mean U/Umsy.", showarrow = F, xref='paper', yref='paper', xanchor='right', yanchor='auto', xshift=0, yshift=0, font=list(size=10)) ) p2$x$data <- lapply(p2$x$data, FUN = function(x){ x$marker = list(opacity = 0) return(x) }) #Catch/catch mean plots: p3 <- ggplot()+ geom_boxplot(data = tuna_region_2, aes(x= factor(year), y = c_c_mean), fill = "blue", width = 0.2, outlier.color = NA)+ stat_summary(data = tuna_region_2, aes(x= factor(year), y = c_c_mean, group = 1), fun = mean, geom = "line", alpha = 0.7, size = 0.5, color = "red")+ geom_hline(yintercept = 1, color = "orange2")+ scale_x_discrete("Year", breaks = c("1970", "1980", "1990", "2000", "2010", "2018") )+ scale_y_continuous(breaks=c(0, 1, 2, 3, 4), limits = c(-0.1, 4.5))+ labs(y = "Catch/mean catch")+ theme_minimal() p3 <- plotly_build(p3)%>% layout(annotations = list(x = 0.26, y = -0.1, text = "Red line shows mean catch/mean catch.", showarrow = F, xref='paper', yref='paper', xanchor='right', yanchor='auto', xshift=0, yshift=0, font=list(size=10)) ) p3$x$data <- lapply(p3$x$data, FUN = function(x){ x$marker = list(opacity = 0) return(x) }) # User Interface ui <- fluidPage(theme = shinytheme("sandstone"), navbarPage(title=div(img(src="https://creazilla-store.fra1.digitaloceanspaces.com/silhouettes/67427/tuna-silhouette-4bb7ee-md.png", height = "05%", width = "05%"), "StockWatch"), tabPanel("Home", mainPanel(h2("Background"), p("Monitoring fish stocks is a crucial aspect of fisheries management. Stock assessments are extremely valuable for assessing biodiversity, setting catch limits, and implementing a multitude of other management strategies necessary to conserve marine species."), img(src = "https://images.squarespace-cdn.com/content/v1/511cdc7fe4b00307a2628ac6/1598456180510-8HP33TBNY8Z8B4FMFE64/ke17ZwdGBToddI8pDm48kEhk4PMdjneZU7fdR_q5soxZw-zPPgdn4jUwVcJE1ZvWQUxwkmyExglNqGp0IvTJZamWLI2zvYWH8K3-s_4yszcp2ryTI0HqTOaaUohrI8PIMdG2tpRl3f2mZAJsRkSvSCVIhKT8STsEVs-xYFgM6b8KMshLAGzx4R3EDFOm1kBS/1280_wmrMzTIk3962.jpg"), h2("About the App"), p("StockWatch is a tool for visualizing biomass, fishing pressure, and other components of stock assessments that are useful for tracking the health of target species. "), h3("Management Implications"), p("It is essential to not only evaluate the status of fish stocks, but also the degree to which they are managed. In the “Trends” tab, StockWatch allows users to compare biomass values with fishing pressure and catch parameters for Atlantic fish populations with differing management levels."), h3("Focus Species"), h4("Our tool focuses on 10 commercially viable species in the North Atlantic:"), h5("• Northern Atlantic Albacore tuna"), h5("• South Atlantic Albacore tuna"), h5("• Bigeye tuna"), h5("• Blue Marlin"), h5("• Eastern Atlantic Sailfish"), h5("• Western Atlantic Sailfish"), h5("• Eastern Atlantic Skipjack"), h5("• Western Atlantic Skipjack"), h5("• North Atlantic Swordfish"), h5("• South Atlantic Swordfish"), h5("• White marlin"), h5("• Yellowfin tuna"), img(src = "https://www.freeworldmaps.net/ocean/atlantic/atlantic-ocean-blank-map.jpg", height = "80%", width = "80%", align = "right") ) ), tabPanel("Species", sidebarLayout( sidebarPanel("Click a species to learn more:", radioButtons(inputId = "table1", label = h3("Species"), choices = c("Albacore tuna Northern Atlantic" = "1", "Albacore tuna South Atlantic" = "2", "Bigeye tuna Atlantic Ocean" = "3", "Blue marlin Atlantic Ocean" = "4", "Sailfish Eastern Atlantic" = "5", "Sailfish Western Atlantic" = "6", "Skipjack tuna Eastern Atlantic" = "7", "Skipjack tuna Western Atlantic" = "8", "Swordfish Northern Atlantic" = "9", "Swordfish South Atlantic" = "10", "White marlin Atlantic Ocean" = "11", "Yellowfin tuna Atlantic Ocean" = "12", "Atlantic bluefin tuna Eastern Atlantic" = "13", "Atlantic bluefin tuna Western Atlantic" = "14"), selected = "1" ), width = 3 ), mainPanel(h2(" Basic Species Information"), br(), br(), uiOutput("table"), "Data: Fisheries, NOAA. All Species Directory Page | NOAA Fisheries. https://www.fisheries.noaa.gov/species-directory. National.", "Photo: NOAA Fisheries" ) ) ), tabPanel("Time Series", titlePanel("Atlantic Ocean stocks time series"), sidebarLayout( sidebarPanel(selectInput(inputId = "select", label = h4("Select time series"), choices = list("Total Biomass" = "biomass", "Total Catch" = "catch", "Explotation Rate" = "explotation_rate", "Relative biomass (B/Bmsy)" = "b_bmsy", "Relative fishing pressure (U/Umsy)" = "u_umsy", "Catch/mean catch" = "c_c_mean" ), selected = 1), width = 3 ), mainPanel("Time series", plotlyOutput(outputId = "tuna_plot"), hr(), textOutput("value"), "Data: RAM Legacy Stock Assessment Database. Atlantic Ocean Tunas Region | RAM Legacy. https://www.ramlegacy.org/explore-the-database/regions/atlantic-ocean/." ) ) ), tabPanel("Stock Status",titlePanel("Individual Atlantic Ocean stocks B/Bmsy, U/Umsy, and catch/mean catch"), sidebarLayout( sidebarPanel( radioButtons(inputId = "species", label = h4("Select Atlantic Ocean stock species"), choices = c("Albacore tuna Northern Atlantic", "Albacore tuna South Atlantic", "Bigeye tuna Atlantic Ocean", "Blue marlin Atlantic Ocean", "Sailfish Eastern Atlantic", "Sailfish Western Atlantic", "Skipjack tuna Eastern Atlantic", "Skipjack tuna Western Atlantic", "Swordfish Northern Atlantic", "Swordfish South Atlantic", "White marlin Atlantic Ocean", "Yellowfin tuna Atlantic Ocean", "Atlantic bluefin tuna Eastern Atlantic", "Atlantic bluefin tuna Western Atlantic"), selected = "Albacore tuna Northern Atlantic" ) ), mainPanel("Relative biomass, fishing pressure, and catch/mean catch by species", plotlyOutput(outputId = "b_u_catch_plot"), div(style="display: inline-block;vertical-align:top; width: 150px;",selectInput("date1", label = h6("Starting date:"), choices = 1950:2017, selected = 1950)), div(style="display: inline-block;vertical-align:top; width: 150px;", selectInput("date2", label = h6("Ending date:"), choices = 1950:2017, selected = 2017)), "Data: RAM Legacy Stock Assessment Database. Atlantic Ocean Tunas Region | RAM Legacy. https://www.ramlegacy.org/explore-the-database/regions/atlantic-ocean/" ) ) ), tabPanel("Trends", titlePanel("Atlantic Ocean stocks trends: B/Bmsy, U/Umsy, and catch/mean catch"), sidebarLayout( sidebarPanel(style = "position:fixed;width:inherit;", radioButtons(inputId ="campare", label = h4("Select"), choices = c("Atlantic Ocean stocks" = "Atlantic_stock", "Compare species" = "Compare_species"), selected = "Atlantic_stock"), conditionalPanel(condition = "input.campare == 'Compare_species'", radioButtons(inputId = "comp_sp", label = h5("Select species"), choices = c("Albacore tuna Northern Atlantic", "Albacore tuna South Atlantic", "Bigeye tuna Atlantic Ocean", "Blue marlin Atlantic Ocean", "Sailfish Eastern Atlantic", "Sailfish Western Atlantic", "Skipjack tuna Eastern Atlantic", "Skipjack tuna Western Atlantic", "Swordfish Northern Atlantic", "Swordfish South Atlantic", "White marlin Atlantic Ocean", "Yellowfin tuna Atlantic Ocean", "Atlantic bluefin tuna Eastern Atlantic", "Atlantic bluefin tuna Western Atlantic"), selected = "Albacore tuna Northern Atlantic" )), width = 3 ), mainPanel(h4("Relative biomass trends", align = "center"), plotlyOutput(outputId = "atl_oc_plot"), br(), br(), h4("Relative fishing pressure trends", align = "center"), plotlyOutput(outputId = "atl_oc_plot2"), br(), br(), h4("Catch/mean catch trends", align = "center"), plotlyOutput(outputId = "atl_oc_plot3"), br(), br(), br(), "Data: RAM Legacy Stock Assessment Database. Atlantic Ocean Tunas Region | RAM Legacy. https://www.ramlegacy.org/explore-the-database/regions/atlantic-ocean/" ) ) ) ) ) # Server function server <- function(input, output) { ##TAB SPECIES: output$table <-renderPrint({ if (input$table1 == "1") {img(src='north_albacore.png', height = '800px', align = "left")} else if (input$table1 == "2") {img(src='south_alb.png', height = '800px', align = "left")} else if (input$table1 == "3") {img(src='bigeye.png', height = '800px', align = "left")} else if (input$table1 == "4") {img(src='blue_marlin.png', height = '800px', align = "left")} else if (input$table1 == "5") {img(src='east_sail.png', height = '800px', align = "left")} else if (input$table1 == "6") {img(src='west_sail.png', height = '800px', align = "left")} else if (input$table1 == "7") {img(src='east_skipjack.png', height = '800px', align = "left")} else if (input$table1 == "8") {img(src='west_skipjack.png', height = '800px', align = "left")} else if (input$table1 == "9") {img(src='north_sword.png', height = '800px', align = "left")} else if (input$table1 == "10") {img(src='south_sword.png', height = '800px', align = "left")} else if (input$table1 == "11") {img(src='white_marlin.png', height = '800px', align = "left")} else if (input$table1 == "12") {img(src='yellowfin.png', height = '800px', align = "left")} else if (input$table1 == "13") {img(src='east_bluefin.png', height = '800px', align = "left")} else {img(src='w_bluefin.png', height = '800px', align = "left")} }) ##TAB TIME SERIES: tuna_react <- reactive({ tuna_merged %>% rename(unit = input$select) }) y_labs <- reactive({ case_when(input$select == "explotation_rate" ~ "explotation rate", input$select == "catch" ~ "Total catch (Mt)", input$select == "biomass" ~ "Total biomass (Mt)", input$select == "b_bmsy" ~ "Relative biomass (B/Bmsy)", input$select == "u_umsy" ~ "Relative fishing pressure (U/Umsy)", input$select == "c_c_mean" ~ "Catch/mean catch") }) output$value <- renderText({ case_when(input$select == "explotation_rate" ~ "Exploitation rate is the proportion of biomass that is removed from a fish stock or population.", input$select == "catch" ~ "Total catch is simply the total quantity of fish or fishery product removed from a single population or species.", input$select == "biomass" ~ "Total Biomass is the total volume or “stock” of a population. Here, it is measured as a weight, in metric tons (MT). Though it is perhaps one of the most useful parameters in estimating the health of a specific fishery, it should be noted that total biomass does not indicate a population’s age distribution. ", input$select == "b_bmsy" ~ "Relative biomass (B/Bmsy) is the ratio of observed biomass (total population volume) to the at Maximum Sustainable Yield (MSY). MSY is the highest catch that can be removed from a continuously fished population, given average environmental conditions.", input$select == "u_umsy" ~ "Relative fishing pressure (U/Umsy) is the ratio of the fishing mortality rate (U) of a target species to that same species’ mortality rate, adjusted to achieve maximum sustainable yield (Umsy).", input$select == "c_c_mean" ~ "Catch / catch mean is the ratio of annual catch to the average catch overall, and is another helpful parameter for assessing fishing pressure.") }) output$tuna_plot <- renderPlotly({ ggplotly(ggplot(data = tuna_react(), aes(x = year, y = unit)) + geom_line(aes(color = species)) + theme_minimal() + labs(x = "Year", y = y_labs()) + theme(legend.title = element_blank()) ) }) ##TAB STOCK STATUS: tuna_relative_react <- reactive({ tuna_merged %>% filter(species == input$species) %>% filter(year >= input$date1, year<=input$date2) }) output$b_u_catch_plot <- renderPlotly({ ggplot(data = tuna_relative_react(), aes(x = year,)) + geom_line(aes(y = b_bmsy, color = "B/Bmsy"), alpha = 0.7) + geom_line(aes(y = u_umsy, color = "U/Umsy"), alpha = 0.7) + geom_line(aes(y = c_c_mean, color = "Catch/mean catch"), alpha = 0.7)+ geom_hline(yintercept = 1, color = "orange2") + scale_color_manual(values = c( "B/Bmsy" = "red3", "U/Umsy" = "palegreen4", "Catch/mean catch" = "dodgerblue1")) + labs(color = "")+ labs(y = "")+ theme_minimal() }) #TAB TENDS: #Tab 5 output 1 (B/Bmsy): output$atl_oc_plot <- renderPlotly({ if (input$campare == "Atlantic_stock") {p1} else { tuna_region_react <- reactive({ tuna_merged %>% filter(species == input$comp_sp) %>% group_by(year) }) p11 <- ggplot()+ geom_boxplot(data = tuna_region_bmsy1, aes(x= factor(year), y = b_bmsy), fill = "grey", color = "grey", width = 0.2, outlier.color = NA, outlier.shape = NA)+ stat_summary(data = tuna_region_bmsy1, aes(x= factor(year), y = b_bmsy, group = 1), fun = mean, geom = "line", alpha = 0.7, size = 0.5, color = "grey")+ stat_summary(data = tuna_region_react(), aes(x= factor(year), y = b_bmsy, group = 1), fun = mean, geom = "line", alpha = 0.7, size = 0.5, color = "red")+ geom_hline(yintercept = 1, color = "orange2")+ scale_x_discrete("Year", breaks = c("1950","1960", "1970", "1980", "1990", "2000", "2010", "2018") )+ scale_y_continuous(breaks=c(0, 1, 2, 3, 4), limits = c(-0.1, 4.5))+ labs(y = "Relative biomass (B/Bmsy)")+ theme_minimal() p11 <- plotly_build(p11)%>% layout(annotations = list(x = 0.2, y = -0.1, text = "Red line shows mean B/Bmsy.", showarrow = F, xref='paper', yref='paper', xanchor='right', yanchor='auto', xshift=0, yshift=0, font=list(size=10)) ) p11$x$data <- lapply(p11$x$data, FUN = function(x){ x$marker = list(opacity = 0) return(x) }) p11 } }) output$atl_oc_plot2 <- renderPlotly({ if (input$campare == "Atlantic_stock") {p2} else { tuna_region_react2 <- reactive({ tuna_merged %>% filter(year>=1970) %>% filter(species == input$comp_sp) %>% group_by(year) }) p22 <- ggplot()+ geom_boxplot(data = tuna_region_2, aes(x= factor(year), y = u_umsy), fill = "grey", color = "grey", width = 0.2, outlier.color = NA, outlier.shape = NA)+ stat_summary(data = tuna_region_2, aes(x= factor(year), y = u_umsy, group = 1), fun = mean, geom = "line", alpha = 0.7, size = 0.5, color = "grey")+ stat_summary(data = tuna_region_react2(), aes(x= factor(year), y = u_umsy, group = 1), fun = mean, geom = "line", alpha = 0.7, size = 0.5, color = "red")+ geom_hline(yintercept = 1, color = "orange2")+ scale_x_discrete("Year", breaks = c("1970", "1980", "1990", "2000", "2010", "2018") )+ scale_y_continuous(breaks=c(0, 1, 2, 3, 4), limits = c(-0.1, 4.5))+ labs(y = "Relative fishing pressure (U/Umsy)")+ theme_minimal() p22 <- plotly_build(p22)%>% layout(annotations = list(x = 0.2, y = -0.1, text = "Red line shows mean U/Umsy.", showarrow = F, xref='paper', yref='paper', xanchor='right', yanchor='auto', xshift=0, yshift=0, font=list(size=10)) ) p22$x$data <- lapply(p22$x$data, FUN = function(x){ x$marker = list(opacity = 0) return(x) }) p22 } }) output$atl_oc_plot3 <- renderPlotly({ if (input$campare == "Atlantic_stock") {p3} else { tuna_region_react3 <- reactive({ tuna_merged %>% filter(year>=1970) %>% filter(species == input$comp_sp) %>% group_by(year) }) p33 <- ggplot()+ geom_boxplot(data = tuna_region_2, aes(x= factor(year), y = c_c_mean), fill = "grey", color = "grey", width = 0.2, outlier.color = NA, outlier.shape = NA)+ stat_summary(data = tuna_region_2, aes(x= factor(year), y = c_c_mean, group = 1), fun = mean, geom = "line", alpha = 0.7, size = 0.5, color = "grey")+ stat_summary(data = tuna_region_react3(), aes(x= factor(year), y = c_c_mean, group = 1), fun = mean, geom = "line", alpha = 0.7, size = 0.5, color = "red")+ geom_hline(yintercept = 1, color = "orange2")+ scale_x_discrete("Year", breaks = c("1970", "1980", "1990", "2000", "2010", "2018") )+ scale_y_continuous(breaks=c(0, 1, 2, 3, 4), limits = c(-0.1, 4.5))+ labs(y = "Catch/mean catch")+ theme_minimal() p33 <- plotly_build(p33)%>% layout(annotations = list(x = 0.26, y = -0.1, text = "Red line shows mean catch/mean catch.", showarrow = F, xref='paper', yref='paper', xanchor='right', yanchor='auto', xshift=0, yshift=0, font=list(size=10)) ) p33$x$data <- lapply(p33$x$data, FUN = function(x){ x$marker = list(opacity = 0) return(x) }) p33 } }) } # Combine user interface + server shinyApp(ui = ui, server = server)
/app.R
no_license
CaitieReza/mpa_app
R
false
false
42,597
r
library(shiny) library(tidyverse) library(shinythemes) library(here) library(plotly) library(shinydashboard) load("DBdata[asmt][v4.491].RData") #Tidy format: tb.data --- Total biomass data #No data: ATBTUNAEATL, ATBTUNAWATL (both ADDED as all NAs!) tuna_biomass <- tb.data %>% select(ALBANATL, ALBASATL, BIGEYEATL, BMARLINATL, SAILEATL, SAILWATL, SKJEATL, SKJWATL, SWORDNATL, SWORDSATL, WMARLINATL, YFINATL)%>% rownames_to_column() %>% add_column(ATBTUNAEATL = NA) %>% add_column(ATBTUNAWATL = NA) %>% pivot_longer(!rowname, names_to = "species", values_to = "biomass") %>% rename(year = rowname) %>% mutate(year = as.numeric(as.character(year))) %>% filter(year >= 1950) %>% mutate(species = case_when(species == "ALBANATL" ~ "Albacore tuna Northern Atlantic", species == "ALBASATL" ~ "Albacore tuna South Atlantic", species == "BIGEYEATL" ~ "Bigeye tuna Atlantic Ocean", species == "BMARLINATL" ~ "Blue marlin Atlantic Ocean", species == "SAILEATL" ~ "Sailfish Eastern Atlantic", species == "SAILWATL" ~ "Sailfish Western Atlantic", species == "SKJEATL" ~ "Skipjack tuna Eastern Atlantic", species == "SKJWATL" ~ "Skipjack tuna Western Atlantic", species == "SWORDNATL" ~ "Swordfish Northern Atlantic", species == "SWORDSATL" ~ "Swordfish South Atlantic", species == "WMARLINATL" ~ "White marlin Atlantic Ocean", species == "YFINATL" ~ "Yellowfin tuna Atlantic Ocean", species == "ATBTUNAEATL" ~ "Atlantic bluefin tuna Eastern Atlantic", species == "ATBTUNAWATL" ~ "Atlantic bluefin tuna Western Atlantic")) #Tidy format: tc.data --- Total catch data #Data for all 14 species tuna_catch <- tc.data %>% select(ALBANATL, ALBASATL, BIGEYEATL, BMARLINATL, SAILEATL, SAILWATL, SKJEATL, SKJWATL, SWORDNATL, SWORDSATL, WMARLINATL, YFINATL, ATBTUNAEATL, ATBTUNAWATL)%>% rownames_to_column() %>% pivot_longer(!rowname, names_to = "species", values_to = "catch") %>% rename(year = rowname) %>% mutate(year = as.numeric(as.character(year))) %>% filter(year >= 1950) %>% #NO data before 1950 mutate(species = case_when(species == "ALBANATL" ~ "Albacore tuna Northern Atlantic", species == "ALBASATL" ~ "Albacore tuna South Atlantic", species == "BIGEYEATL" ~ "Bigeye tuna Atlantic Ocean", species == "BMARLINATL" ~ "Blue marlin Atlantic Ocean", species == "SAILEATL" ~ "Sailfish Eastern Atlantic", species == "SAILWATL" ~ "Sailfish Western Atlantic", species == "SKJEATL" ~ "Skipjack tuna Eastern Atlantic", species == "SKJWATL" ~ "Skipjack tuna Western Atlantic", species == "SWORDNATL" ~ "Swordfish Northern Atlantic", species == "SWORDSATL" ~ "Swordfish South Atlantic", species == "WMARLINATL" ~ "White marlin Atlantic Ocean", species == "YFINATL" ~ "Yellowfin tuna Atlantic Ocean", species == "ATBTUNAEATL" ~ "Atlantic bluefin tuna Eastern Atlantic", species == "ATBTUNAWATL" ~ "Atlantic bluefin tuna Western Atlantic")) #Tidy format: er.data --- Exploitation rate data (usually an annual fraction harvested) #No data: ATBTUNAEATL, ATBTUNAWATL (both ADDED as all NAs!) #NOTE: harvest rate (U); may be either exploitation rate or fishing mortality tuna_er <- er.data %>% select(ALBANATL, ALBASATL, BIGEYEATL, BMARLINATL, SAILEATL, SAILWATL, SKJEATL, SKJWATL, SWORDNATL, SWORDSATL, WMARLINATL, YFINATL) %>% rownames_to_column() %>% add_column(ATBTUNAEATL = NA) %>% add_column(ATBTUNAWATL = NA) %>% pivot_longer(!rowname, names_to = "species", values_to = "explotation_rate") %>% rename(year = rowname) %>% mutate(year = as.numeric(as.character(year))) %>% filter(year >= 1950) %>% mutate(species = case_when(species == "ALBANATL" ~ "Albacore tuna Northern Atlantic", species == "ALBASATL" ~ "Albacore tuna South Atlantic", species == "BIGEYEATL" ~ "Bigeye tuna Atlantic Ocean", species == "BMARLINATL" ~ "Blue marlin Atlantic Ocean", species == "SAILEATL" ~ "Sailfish Eastern Atlantic", species == "SAILWATL" ~ "Sailfish Western Atlantic", species == "SKJEATL" ~ "Skipjack tuna Eastern Atlantic", species == "SKJWATL" ~ "Skipjack tuna Western Atlantic", species == "SWORDNATL" ~ "Swordfish Northern Atlantic", species == "SWORDSATL" ~ "Swordfish South Atlantic", species == "WMARLINATL" ~ "White marlin Atlantic Ocean", species == "YFINATL" ~ "Yellowfin tuna Atlantic Ocean", species == "ATBTUNAEATL" ~ "Atlantic bluefin tuna Eastern Atlantic", species == "ATBTUNAWATL" ~ "Atlantic bluefin tuna Western Atlantic")) #Tidy format: divbpref.data --- B/Bmsy pref data (B/Bmsy if available, otherwise B/Bmgt) #Data for all 14 species tuna_b_bmsy <- divbpref.data %>% select(ALBANATL, ALBASATL, BIGEYEATL, BMARLINATL, SAILEATL, SAILWATL, SKJEATL, SKJWATL, SWORDNATL, SWORDSATL, WMARLINATL, YFINATL, ATBTUNAEATL, ATBTUNAWATL) %>% rownames_to_column() %>% pivot_longer(!rowname, names_to = "species", values_to = "b_bmsy") %>% rename(year = rowname) %>% mutate(year = as.numeric(as.character(year))) %>% filter(year >= 1950) %>% mutate(species = case_when(species == "ALBANATL" ~ "Albacore tuna Northern Atlantic", species == "ALBASATL" ~ "Albacore tuna South Atlantic", species == "BIGEYEATL" ~ "Bigeye tuna Atlantic Ocean", species == "BMARLINATL" ~ "Blue marlin Atlantic Ocean", species == "SAILEATL" ~ "Sailfish Eastern Atlantic", species == "SAILWATL" ~ "Sailfish Western Atlantic", species == "SKJEATL" ~ "Skipjack tuna Eastern Atlantic", species == "SKJWATL" ~ "Skipjack tuna Western Atlantic", species == "SWORDNATL" ~ "Swordfish Northern Atlantic", species == "SWORDSATL" ~ "Swordfish South Atlantic", species == "WMARLINATL" ~ "White marlin Atlantic Ocean", species == "YFINATL" ~ "Yellowfin tuna Atlantic Ocean", species == "ATBTUNAEATL" ~ "Atlantic bluefin tuna Eastern Atlantic", species == "ATBTUNAWATL" ~ "Atlantic bluefin tuna Western Atlantic")) #Tidy format: divupref.data --- U/Umsy pref data (U/Umsy if available, otherwise U/Umgt) #Data for all 14 species tuna_u_umsy <- divupref.data %>% select(ALBANATL, ALBASATL, BIGEYEATL, BMARLINATL, SAILEATL, SAILWATL, SKJEATL, SKJWATL, SWORDNATL, SWORDSATL, WMARLINATL, YFINATL, ATBTUNAEATL, ATBTUNAWATL) %>% rownames_to_column() %>% pivot_longer(!rowname, names_to = "species", values_to = "u_umsy") %>% rename(year = rowname) %>% mutate(year = as.numeric(as.character(year))) %>% filter(year >= 1950) %>% mutate(species = case_when(species == "ALBANATL" ~ "Albacore tuna Northern Atlantic", species == "ALBASATL" ~ "Albacore tuna South Atlantic", species == "BIGEYEATL" ~ "Bigeye tuna Atlantic Ocean", species == "BMARLINATL" ~ "Blue marlin Atlantic Ocean", species == "SAILEATL" ~ "Sailfish Eastern Atlantic", species == "SAILWATL" ~ "Sailfish Western Atlantic", species == "SKJEATL" ~ "Skipjack tuna Eastern Atlantic", species == "SKJWATL" ~ "Skipjack tuna Western Atlantic", species == "SWORDNATL" ~ "Swordfish Northern Atlantic", species == "SWORDSATL" ~ "Swordfish South Atlantic", species == "WMARLINATL" ~ "White marlin Atlantic Ocean", species == "YFINATL" ~ "Yellowfin tuna Atlantic Ocean", species == "ATBTUNAEATL" ~ "Atlantic bluefin tuna Eastern Atlantic", species == "ATBTUNAWATL" ~ "Atlantic bluefin tuna Western Atlantic")) #Tidy format: cdivmeanc.data --- Catch/(mean catch) data #Data for all 14 species tuna_catch_mean <- cdivmeanc.data %>% select(ALBANATL, ALBASATL, BIGEYEATL, BMARLINATL, SAILEATL, SAILWATL, SKJEATL, SKJWATL, SWORDNATL, SWORDSATL, WMARLINATL, YFINATL, ATBTUNAEATL, ATBTUNAWATL) %>% rownames_to_column() %>% pivot_longer(!rowname, names_to = "species", values_to = "c_c_mean") %>% rename(year = rowname) %>% mutate(year = as.numeric(as.character(year))) %>% filter(year >= 1950) %>% mutate(species = case_when(species == "ALBANATL" ~ "Albacore tuna Northern Atlantic", species == "ALBASATL" ~ "Albacore tuna South Atlantic", species == "BIGEYEATL" ~ "Bigeye tuna Atlantic Ocean", species == "BMARLINATL" ~ "Blue marlin Atlantic Ocean", species == "SAILEATL" ~ "Sailfish Eastern Atlantic", species == "SAILWATL" ~ "Sailfish Western Atlantic", species == "SKJEATL" ~ "Skipjack tuna Eastern Atlantic", species == "SKJWATL" ~ "Skipjack tuna Western Atlantic", species == "SWORDNATL" ~ "Swordfish Northern Atlantic", species == "SWORDSATL" ~ "Swordfish South Atlantic", species == "WMARLINATL" ~ "White marlin Atlantic Ocean", species == "YFINATL" ~ "Yellowfin tuna Atlantic Ocean", species == "ATBTUNAEATL" ~ "Atlantic bluefin tuna Eastern Atlantic", species == "ATBTUNAWATL" ~ "Atlantic bluefin tuna Western Atlantic")) #Merge all time series tidy dataframes: tuna_merged <- tuna_catch %>% right_join(tuna_biomass, by=c("year","species")) %>% right_join(tuna_er, by=c("year","species")) %>% right_join(tuna_b_bmsy, by=c("year","species")) %>% right_join(tuna_u_umsy, by=c("year","species")) %>% right_join(tuna_catch_mean, by=c("year","species")) #NOTES: add text describing each time series #TAB 5 Plots: ##B/Bmsy plot: tuna_region_bmsy1 <- tuna_merged %>% group_by(year) p1 <- ggplot(tuna_region_bmsy1)+ geom_boxplot(aes(x= factor(year), y = b_bmsy), fill = "blue", width = 0.2, outlier.color = NA, outlier.shape = NA)+ stat_summary(aes(x= factor(year), y = b_bmsy, group = 1), fun = mean, geom = "line", alpha = 0.7, size = 0.5, color = "red")+ geom_hline(yintercept = 1, color = "orange2")+ scale_x_discrete("Year", breaks = c("1950","1960", "1970", "1980", "1990", "2000", "2010", "2018") )+ scale_y_continuous(breaks=c(0, 1, 2, 3, 4), limits = c(-0.1, 4.5))+ labs(y = "Relative biomass (B/Bmsy)")+ theme_minimal() p1 <- plotly_build(p1)%>% layout(annotations = list(x = 0.2, y = -0.1, text = "Red line shows mean B/Bmsy.", showarrow = F, xref='paper', yref='paper', xanchor='right', yanchor='auto', xshift=0, yshift=0, font=list(size=10)) ) p1$x$data <- lapply(p1$x$data, FUN = function(x){ x$marker = list(opacity = 0) return(x) }) #Umsy plot: tuna_region_2 <- tuna_merged %>% filter(year>=1970) %>% group_by(year) p2 <- ggplot(tuna_region_2)+ geom_boxplot(aes(x= factor(year), y = u_umsy), fill = "blue", width = 0.2, outlier.color = NA)+ stat_summary(aes(x= factor(year), y = u_umsy, group = 1), fun = mean, geom = "line", alpha = 0.7, size = 0.5, color = "red")+ geom_hline(yintercept = 1, color = "orange2")+ scale_x_discrete("Year", breaks = c("1970", "1980", "1990", "2000", "2010", "2018") )+ scale_y_continuous(breaks=c(0, 1, 2, 3, 4), limits = c(-0.1, 4.5))+ labs(y = "Relative fishing pressure (U/Umsy)")+ theme_minimal() p2 <- plotly_build(p2)%>% layout(annotations = list(x = 0.2, y = -0.1, text = "Red line shows mean U/Umsy.", showarrow = F, xref='paper', yref='paper', xanchor='right', yanchor='auto', xshift=0, yshift=0, font=list(size=10)) ) p2$x$data <- lapply(p2$x$data, FUN = function(x){ x$marker = list(opacity = 0) return(x) }) #Catch/catch mean plots: p3 <- ggplot()+ geom_boxplot(data = tuna_region_2, aes(x= factor(year), y = c_c_mean), fill = "blue", width = 0.2, outlier.color = NA)+ stat_summary(data = tuna_region_2, aes(x= factor(year), y = c_c_mean, group = 1), fun = mean, geom = "line", alpha = 0.7, size = 0.5, color = "red")+ geom_hline(yintercept = 1, color = "orange2")+ scale_x_discrete("Year", breaks = c("1970", "1980", "1990", "2000", "2010", "2018") )+ scale_y_continuous(breaks=c(0, 1, 2, 3, 4), limits = c(-0.1, 4.5))+ labs(y = "Catch/mean catch")+ theme_minimal() p3 <- plotly_build(p3)%>% layout(annotations = list(x = 0.26, y = -0.1, text = "Red line shows mean catch/mean catch.", showarrow = F, xref='paper', yref='paper', xanchor='right', yanchor='auto', xshift=0, yshift=0, font=list(size=10)) ) p3$x$data <- lapply(p3$x$data, FUN = function(x){ x$marker = list(opacity = 0) return(x) }) # User Interface ui <- fluidPage(theme = shinytheme("sandstone"), navbarPage(title=div(img(src="https://creazilla-store.fra1.digitaloceanspaces.com/silhouettes/67427/tuna-silhouette-4bb7ee-md.png", height = "05%", width = "05%"), "StockWatch"), tabPanel("Home", mainPanel(h2("Background"), p("Monitoring fish stocks is a crucial aspect of fisheries management. Stock assessments are extremely valuable for assessing biodiversity, setting catch limits, and implementing a multitude of other management strategies necessary to conserve marine species."), img(src = "https://images.squarespace-cdn.com/content/v1/511cdc7fe4b00307a2628ac6/1598456180510-8HP33TBNY8Z8B4FMFE64/ke17ZwdGBToddI8pDm48kEhk4PMdjneZU7fdR_q5soxZw-zPPgdn4jUwVcJE1ZvWQUxwkmyExglNqGp0IvTJZamWLI2zvYWH8K3-s_4yszcp2ryTI0HqTOaaUohrI8PIMdG2tpRl3f2mZAJsRkSvSCVIhKT8STsEVs-xYFgM6b8KMshLAGzx4R3EDFOm1kBS/1280_wmrMzTIk3962.jpg"), h2("About the App"), p("StockWatch is a tool for visualizing biomass, fishing pressure, and other components of stock assessments that are useful for tracking the health of target species. "), h3("Management Implications"), p("It is essential to not only evaluate the status of fish stocks, but also the degree to which they are managed. In the “Trends” tab, StockWatch allows users to compare biomass values with fishing pressure and catch parameters for Atlantic fish populations with differing management levels."), h3("Focus Species"), h4("Our tool focuses on 10 commercially viable species in the North Atlantic:"), h5("• Northern Atlantic Albacore tuna"), h5("• South Atlantic Albacore tuna"), h5("• Bigeye tuna"), h5("• Blue Marlin"), h5("• Eastern Atlantic Sailfish"), h5("• Western Atlantic Sailfish"), h5("• Eastern Atlantic Skipjack"), h5("• Western Atlantic Skipjack"), h5("• North Atlantic Swordfish"), h5("• South Atlantic Swordfish"), h5("• White marlin"), h5("• Yellowfin tuna"), img(src = "https://www.freeworldmaps.net/ocean/atlantic/atlantic-ocean-blank-map.jpg", height = "80%", width = "80%", align = "right") ) ), tabPanel("Species", sidebarLayout( sidebarPanel("Click a species to learn more:", radioButtons(inputId = "table1", label = h3("Species"), choices = c("Albacore tuna Northern Atlantic" = "1", "Albacore tuna South Atlantic" = "2", "Bigeye tuna Atlantic Ocean" = "3", "Blue marlin Atlantic Ocean" = "4", "Sailfish Eastern Atlantic" = "5", "Sailfish Western Atlantic" = "6", "Skipjack tuna Eastern Atlantic" = "7", "Skipjack tuna Western Atlantic" = "8", "Swordfish Northern Atlantic" = "9", "Swordfish South Atlantic" = "10", "White marlin Atlantic Ocean" = "11", "Yellowfin tuna Atlantic Ocean" = "12", "Atlantic bluefin tuna Eastern Atlantic" = "13", "Atlantic bluefin tuna Western Atlantic" = "14"), selected = "1" ), width = 3 ), mainPanel(h2(" Basic Species Information"), br(), br(), uiOutput("table"), "Data: Fisheries, NOAA. All Species Directory Page | NOAA Fisheries. https://www.fisheries.noaa.gov/species-directory. National.", "Photo: NOAA Fisheries" ) ) ), tabPanel("Time Series", titlePanel("Atlantic Ocean stocks time series"), sidebarLayout( sidebarPanel(selectInput(inputId = "select", label = h4("Select time series"), choices = list("Total Biomass" = "biomass", "Total Catch" = "catch", "Explotation Rate" = "explotation_rate", "Relative biomass (B/Bmsy)" = "b_bmsy", "Relative fishing pressure (U/Umsy)" = "u_umsy", "Catch/mean catch" = "c_c_mean" ), selected = 1), width = 3 ), mainPanel("Time series", plotlyOutput(outputId = "tuna_plot"), hr(), textOutput("value"), "Data: RAM Legacy Stock Assessment Database. Atlantic Ocean Tunas Region | RAM Legacy. https://www.ramlegacy.org/explore-the-database/regions/atlantic-ocean/." ) ) ), tabPanel("Stock Status",titlePanel("Individual Atlantic Ocean stocks B/Bmsy, U/Umsy, and catch/mean catch"), sidebarLayout( sidebarPanel( radioButtons(inputId = "species", label = h4("Select Atlantic Ocean stock species"), choices = c("Albacore tuna Northern Atlantic", "Albacore tuna South Atlantic", "Bigeye tuna Atlantic Ocean", "Blue marlin Atlantic Ocean", "Sailfish Eastern Atlantic", "Sailfish Western Atlantic", "Skipjack tuna Eastern Atlantic", "Skipjack tuna Western Atlantic", "Swordfish Northern Atlantic", "Swordfish South Atlantic", "White marlin Atlantic Ocean", "Yellowfin tuna Atlantic Ocean", "Atlantic bluefin tuna Eastern Atlantic", "Atlantic bluefin tuna Western Atlantic"), selected = "Albacore tuna Northern Atlantic" ) ), mainPanel("Relative biomass, fishing pressure, and catch/mean catch by species", plotlyOutput(outputId = "b_u_catch_plot"), div(style="display: inline-block;vertical-align:top; width: 150px;",selectInput("date1", label = h6("Starting date:"), choices = 1950:2017, selected = 1950)), div(style="display: inline-block;vertical-align:top; width: 150px;", selectInput("date2", label = h6("Ending date:"), choices = 1950:2017, selected = 2017)), "Data: RAM Legacy Stock Assessment Database. Atlantic Ocean Tunas Region | RAM Legacy. https://www.ramlegacy.org/explore-the-database/regions/atlantic-ocean/" ) ) ), tabPanel("Trends", titlePanel("Atlantic Ocean stocks trends: B/Bmsy, U/Umsy, and catch/mean catch"), sidebarLayout( sidebarPanel(style = "position:fixed;width:inherit;", radioButtons(inputId ="campare", label = h4("Select"), choices = c("Atlantic Ocean stocks" = "Atlantic_stock", "Compare species" = "Compare_species"), selected = "Atlantic_stock"), conditionalPanel(condition = "input.campare == 'Compare_species'", radioButtons(inputId = "comp_sp", label = h5("Select species"), choices = c("Albacore tuna Northern Atlantic", "Albacore tuna South Atlantic", "Bigeye tuna Atlantic Ocean", "Blue marlin Atlantic Ocean", "Sailfish Eastern Atlantic", "Sailfish Western Atlantic", "Skipjack tuna Eastern Atlantic", "Skipjack tuna Western Atlantic", "Swordfish Northern Atlantic", "Swordfish South Atlantic", "White marlin Atlantic Ocean", "Yellowfin tuna Atlantic Ocean", "Atlantic bluefin tuna Eastern Atlantic", "Atlantic bluefin tuna Western Atlantic"), selected = "Albacore tuna Northern Atlantic" )), width = 3 ), mainPanel(h4("Relative biomass trends", align = "center"), plotlyOutput(outputId = "atl_oc_plot"), br(), br(), h4("Relative fishing pressure trends", align = "center"), plotlyOutput(outputId = "atl_oc_plot2"), br(), br(), h4("Catch/mean catch trends", align = "center"), plotlyOutput(outputId = "atl_oc_plot3"), br(), br(), br(), "Data: RAM Legacy Stock Assessment Database. Atlantic Ocean Tunas Region | RAM Legacy. https://www.ramlegacy.org/explore-the-database/regions/atlantic-ocean/" ) ) ) ) ) # Server function server <- function(input, output) { ##TAB SPECIES: output$table <-renderPrint({ if (input$table1 == "1") {img(src='north_albacore.png', height = '800px', align = "left")} else if (input$table1 == "2") {img(src='south_alb.png', height = '800px', align = "left")} else if (input$table1 == "3") {img(src='bigeye.png', height = '800px', align = "left")} else if (input$table1 == "4") {img(src='blue_marlin.png', height = '800px', align = "left")} else if (input$table1 == "5") {img(src='east_sail.png', height = '800px', align = "left")} else if (input$table1 == "6") {img(src='west_sail.png', height = '800px', align = "left")} else if (input$table1 == "7") {img(src='east_skipjack.png', height = '800px', align = "left")} else if (input$table1 == "8") {img(src='west_skipjack.png', height = '800px', align = "left")} else if (input$table1 == "9") {img(src='north_sword.png', height = '800px', align = "left")} else if (input$table1 == "10") {img(src='south_sword.png', height = '800px', align = "left")} else if (input$table1 == "11") {img(src='white_marlin.png', height = '800px', align = "left")} else if (input$table1 == "12") {img(src='yellowfin.png', height = '800px', align = "left")} else if (input$table1 == "13") {img(src='east_bluefin.png', height = '800px', align = "left")} else {img(src='w_bluefin.png', height = '800px', align = "left")} }) ##TAB TIME SERIES: tuna_react <- reactive({ tuna_merged %>% rename(unit = input$select) }) y_labs <- reactive({ case_when(input$select == "explotation_rate" ~ "explotation rate", input$select == "catch" ~ "Total catch (Mt)", input$select == "biomass" ~ "Total biomass (Mt)", input$select == "b_bmsy" ~ "Relative biomass (B/Bmsy)", input$select == "u_umsy" ~ "Relative fishing pressure (U/Umsy)", input$select == "c_c_mean" ~ "Catch/mean catch") }) output$value <- renderText({ case_when(input$select == "explotation_rate" ~ "Exploitation rate is the proportion of biomass that is removed from a fish stock or population.", input$select == "catch" ~ "Total catch is simply the total quantity of fish or fishery product removed from a single population or species.", input$select == "biomass" ~ "Total Biomass is the total volume or “stock” of a population. Here, it is measured as a weight, in metric tons (MT). Though it is perhaps one of the most useful parameters in estimating the health of a specific fishery, it should be noted that total biomass does not indicate a population’s age distribution. ", input$select == "b_bmsy" ~ "Relative biomass (B/Bmsy) is the ratio of observed biomass (total population volume) to the at Maximum Sustainable Yield (MSY). MSY is the highest catch that can be removed from a continuously fished population, given average environmental conditions.", input$select == "u_umsy" ~ "Relative fishing pressure (U/Umsy) is the ratio of the fishing mortality rate (U) of a target species to that same species’ mortality rate, adjusted to achieve maximum sustainable yield (Umsy).", input$select == "c_c_mean" ~ "Catch / catch mean is the ratio of annual catch to the average catch overall, and is another helpful parameter for assessing fishing pressure.") }) output$tuna_plot <- renderPlotly({ ggplotly(ggplot(data = tuna_react(), aes(x = year, y = unit)) + geom_line(aes(color = species)) + theme_minimal() + labs(x = "Year", y = y_labs()) + theme(legend.title = element_blank()) ) }) ##TAB STOCK STATUS: tuna_relative_react <- reactive({ tuna_merged %>% filter(species == input$species) %>% filter(year >= input$date1, year<=input$date2) }) output$b_u_catch_plot <- renderPlotly({ ggplot(data = tuna_relative_react(), aes(x = year,)) + geom_line(aes(y = b_bmsy, color = "B/Bmsy"), alpha = 0.7) + geom_line(aes(y = u_umsy, color = "U/Umsy"), alpha = 0.7) + geom_line(aes(y = c_c_mean, color = "Catch/mean catch"), alpha = 0.7)+ geom_hline(yintercept = 1, color = "orange2") + scale_color_manual(values = c( "B/Bmsy" = "red3", "U/Umsy" = "palegreen4", "Catch/mean catch" = "dodgerblue1")) + labs(color = "")+ labs(y = "")+ theme_minimal() }) #TAB TENDS: #Tab 5 output 1 (B/Bmsy): output$atl_oc_plot <- renderPlotly({ if (input$campare == "Atlantic_stock") {p1} else { tuna_region_react <- reactive({ tuna_merged %>% filter(species == input$comp_sp) %>% group_by(year) }) p11 <- ggplot()+ geom_boxplot(data = tuna_region_bmsy1, aes(x= factor(year), y = b_bmsy), fill = "grey", color = "grey", width = 0.2, outlier.color = NA, outlier.shape = NA)+ stat_summary(data = tuna_region_bmsy1, aes(x= factor(year), y = b_bmsy, group = 1), fun = mean, geom = "line", alpha = 0.7, size = 0.5, color = "grey")+ stat_summary(data = tuna_region_react(), aes(x= factor(year), y = b_bmsy, group = 1), fun = mean, geom = "line", alpha = 0.7, size = 0.5, color = "red")+ geom_hline(yintercept = 1, color = "orange2")+ scale_x_discrete("Year", breaks = c("1950","1960", "1970", "1980", "1990", "2000", "2010", "2018") )+ scale_y_continuous(breaks=c(0, 1, 2, 3, 4), limits = c(-0.1, 4.5))+ labs(y = "Relative biomass (B/Bmsy)")+ theme_minimal() p11 <- plotly_build(p11)%>% layout(annotations = list(x = 0.2, y = -0.1, text = "Red line shows mean B/Bmsy.", showarrow = F, xref='paper', yref='paper', xanchor='right', yanchor='auto', xshift=0, yshift=0, font=list(size=10)) ) p11$x$data <- lapply(p11$x$data, FUN = function(x){ x$marker = list(opacity = 0) return(x) }) p11 } }) output$atl_oc_plot2 <- renderPlotly({ if (input$campare == "Atlantic_stock") {p2} else { tuna_region_react2 <- reactive({ tuna_merged %>% filter(year>=1970) %>% filter(species == input$comp_sp) %>% group_by(year) }) p22 <- ggplot()+ geom_boxplot(data = tuna_region_2, aes(x= factor(year), y = u_umsy), fill = "grey", color = "grey", width = 0.2, outlier.color = NA, outlier.shape = NA)+ stat_summary(data = tuna_region_2, aes(x= factor(year), y = u_umsy, group = 1), fun = mean, geom = "line", alpha = 0.7, size = 0.5, color = "grey")+ stat_summary(data = tuna_region_react2(), aes(x= factor(year), y = u_umsy, group = 1), fun = mean, geom = "line", alpha = 0.7, size = 0.5, color = "red")+ geom_hline(yintercept = 1, color = "orange2")+ scale_x_discrete("Year", breaks = c("1970", "1980", "1990", "2000", "2010", "2018") )+ scale_y_continuous(breaks=c(0, 1, 2, 3, 4), limits = c(-0.1, 4.5))+ labs(y = "Relative fishing pressure (U/Umsy)")+ theme_minimal() p22 <- plotly_build(p22)%>% layout(annotations = list(x = 0.2, y = -0.1, text = "Red line shows mean U/Umsy.", showarrow = F, xref='paper', yref='paper', xanchor='right', yanchor='auto', xshift=0, yshift=0, font=list(size=10)) ) p22$x$data <- lapply(p22$x$data, FUN = function(x){ x$marker = list(opacity = 0) return(x) }) p22 } }) output$atl_oc_plot3 <- renderPlotly({ if (input$campare == "Atlantic_stock") {p3} else { tuna_region_react3 <- reactive({ tuna_merged %>% filter(year>=1970) %>% filter(species == input$comp_sp) %>% group_by(year) }) p33 <- ggplot()+ geom_boxplot(data = tuna_region_2, aes(x= factor(year), y = c_c_mean), fill = "grey", color = "grey", width = 0.2, outlier.color = NA, outlier.shape = NA)+ stat_summary(data = tuna_region_2, aes(x= factor(year), y = c_c_mean, group = 1), fun = mean, geom = "line", alpha = 0.7, size = 0.5, color = "grey")+ stat_summary(data = tuna_region_react3(), aes(x= factor(year), y = c_c_mean, group = 1), fun = mean, geom = "line", alpha = 0.7, size = 0.5, color = "red")+ geom_hline(yintercept = 1, color = "orange2")+ scale_x_discrete("Year", breaks = c("1970", "1980", "1990", "2000", "2010", "2018") )+ scale_y_continuous(breaks=c(0, 1, 2, 3, 4), limits = c(-0.1, 4.5))+ labs(y = "Catch/mean catch")+ theme_minimal() p33 <- plotly_build(p33)%>% layout(annotations = list(x = 0.26, y = -0.1, text = "Red line shows mean catch/mean catch.", showarrow = F, xref='paper', yref='paper', xanchor='right', yanchor='auto', xshift=0, yshift=0, font=list(size=10)) ) p33$x$data <- lapply(p33$x$data, FUN = function(x){ x$marker = list(opacity = 0) return(x) }) p33 } }) } # Combine user interface + server shinyApp(ui = ui, server = server)
#' Fix name out #' #' Function to fix the names when subsetting #' #' @importFrom methods as #' @param nc_out a character string #' @return No return value, called to rename subsetted file #' @keywords internal fix_name_out <- function(nc_out){ dummie_name <- name_check(nc_out) dummie_date <- show_info(nc_out) dummie_date <- dummie_date[8] if (grepl("persiann", nc_out)) { dummie_date <- unlist(strsplit(dummie_date, " ", fixed = TRUE)) dummie_date <- sub(",", "", dummie_date) dummie_date <- suppressWarnings(as.numeric(dummie_date)) dummie_date <- dummie_date[!is.na(dummie_date)] dummie_date <- as.Date(dummie_date, origin = "1983-01-01 00:00:00") } else if (grepl("gldas-clsm", nc_out)) { dummie_date <- unlist(strsplit(dummie_date, " ", fixed = TRUE)) dummie_date <- sub(",", "", dummie_date) dummie_date <- suppressWarnings(as.numeric(dummie_date)) dummie_date <- dummie_date[!is.na(dummie_date)] dummie_date <- as.Date(dummie_date, origin = "1948-01-01 00:00:00") } else { dummie_date <- unlist(strsplit(dummie_date, " ", fixed = TRUE)) dummie_date <- grep("-", dummie_date, value = TRUE) } dummie_date <- substr(dummie_date, 1, 7) dummie_date <- sub("-", "", dummie_date) if (dummie_name$length == 8) { dummie_name$name[5] <- dummie_date[1] dummie_name$name[6] <- dummie_date[2] dummie_name <- paste(dummie_name$name, collapse = "_") dummie_name <- paste0(dummie_name, ".nc") nc_mid <- sub("(.*/)(.*)", "\\1", nc_out) dummie_name <- paste0(nc_mid, dummie_name) dummie_name <- sub(".nc.nc.*", ".nc", dummie_name) if (!file.exists(dummie_name)) { file.rename(nc_out, dummie_name) } else { warning("Couldn't fix the filename to pRecipe convention\nBecause a file with the same name already existst") } } }
/R/fix_name_out.R
no_license
imarkonis/pRecipe
R
false
false
1,836
r
#' Fix name out #' #' Function to fix the names when subsetting #' #' @importFrom methods as #' @param nc_out a character string #' @return No return value, called to rename subsetted file #' @keywords internal fix_name_out <- function(nc_out){ dummie_name <- name_check(nc_out) dummie_date <- show_info(nc_out) dummie_date <- dummie_date[8] if (grepl("persiann", nc_out)) { dummie_date <- unlist(strsplit(dummie_date, " ", fixed = TRUE)) dummie_date <- sub(",", "", dummie_date) dummie_date <- suppressWarnings(as.numeric(dummie_date)) dummie_date <- dummie_date[!is.na(dummie_date)] dummie_date <- as.Date(dummie_date, origin = "1983-01-01 00:00:00") } else if (grepl("gldas-clsm", nc_out)) { dummie_date <- unlist(strsplit(dummie_date, " ", fixed = TRUE)) dummie_date <- sub(",", "", dummie_date) dummie_date <- suppressWarnings(as.numeric(dummie_date)) dummie_date <- dummie_date[!is.na(dummie_date)] dummie_date <- as.Date(dummie_date, origin = "1948-01-01 00:00:00") } else { dummie_date <- unlist(strsplit(dummie_date, " ", fixed = TRUE)) dummie_date <- grep("-", dummie_date, value = TRUE) } dummie_date <- substr(dummie_date, 1, 7) dummie_date <- sub("-", "", dummie_date) if (dummie_name$length == 8) { dummie_name$name[5] <- dummie_date[1] dummie_name$name[6] <- dummie_date[2] dummie_name <- paste(dummie_name$name, collapse = "_") dummie_name <- paste0(dummie_name, ".nc") nc_mid <- sub("(.*/)(.*)", "\\1", nc_out) dummie_name <- paste0(nc_mid, dummie_name) dummie_name <- sub(".nc.nc.*", ".nc", dummie_name) if (!file.exists(dummie_name)) { file.rename(nc_out, dummie_name) } else { warning("Couldn't fix the filename to pRecipe convention\nBecause a file with the same name already existst") } } }