content
large_stringlengths
0
6.46M
path
large_stringlengths
3
331
license_type
large_stringclasses
2 values
repo_name
large_stringlengths
5
125
language
large_stringclasses
1 value
is_vendor
bool
2 classes
is_generated
bool
2 classes
length_bytes
int64
4
6.46M
extension
large_stringclasses
75 values
text
stringlengths
0
6.46M
### Terrence D. Jorgensen & Yves Rosseel ### Last updated: 9 May 2022 ### adaptation of lavaan::modindices() for lavaan.mi-class objects ##' Modification Indices for Multiple Imputations ##' ##' Modification indices (1-\emph{df} Lagrange multiplier tests) from a ##' latent variable model fitted to multiple imputed data sets. Statistics ##' for releasing one or more fixed or constrained parameters in model can ##' be calculated by pooling the gradient and information matrices ##' across imputed data sets in a method proposed by Mansolf, Jorgensen, & ##' Enders (2020)---analogous to the "D1" Wald test proposed by Li, Meng, ##' Raghunathan, & Rubin (1991)---or by pooling the complete-data score-test ##' statistics across imputed data sets (i.e., "D2"; Li et al., 1991). ##' ##' @name modindices.mi ##' @aliases modificationIndices.mi modificationindices.mi modindices.mi ##' @importFrom lavaan lavInspect lavListInspect lavNames ##' @importFrom methods getMethod ##' @importFrom stats cov pchisq qchisq ##' ##' @param object An object of class \code{\linkS4class{lavaan.mi}} ##' @param test \code{character} indicating which pooling method to use. ##' \code{"D1"} requests Mansolf, Jorgensen, & Enders' (2020) proposed ##' Wald-like test for pooling the gradient and information, which are then ##' used to calculate score-test statistics in the usual manner. \code{"D2"} ##' (default because it is less computationall intensive) requests to pool the ##' complete-data score-test statistics from each imputed data set, then pool ##' them across imputations, described by Li et al. (1991) and Enders (2010). ##' @param omit.imps \code{character} vector specifying criteria for omitting ##' imputations from pooled results. Can include any of ##' \code{c("no.conv", "no.se", "no.npd")}, the first 2 of which are the ##' default setting, which excludes any imputations that did not ##' converge or for which standard errors could not be computed. The ##' last option (\code{"no.npd"}) would exclude any imputations which ##' yielded a nonpositive definite covariance matrix for observed or ##' latent variables, which would include any "improper solutions" such ##' as Heywood cases. Specific imputation numbers can also be included in this ##' argument, in case users want to apply their own custom omission criteria ##' (or simulations can use different numbers of imputations without ##' redundantly refitting the model). ##' @param standardized \code{logical}. If \code{TRUE}, two extra columns ##' (\code{$sepc.lv} and \code{$sepc.all}) will contain standardized values ##' for the EPCs. In the first column (\code{$sepc.lv}), standardizization is ##' based on the variances of the (continuous) latent variables. In the second ##' column (\code{$sepc.all}), standardization is based on both the variances ##' of both (continuous) observed and latent variables. (Residual) covariances ##' are standardized using (residual) variances. ##' @param cov.std \code{logical}. \code{TRUE} if \code{test == "D2"}. ##' If \code{TRUE} (default), the (residual) ##' observed covariances are scaled by the square-root of the diagonal elements ##' of the \eqn{\Theta} matrix, and the (residual) latent covariances are ##' scaled by the square-root of the diagonal elements of the \eqn{\Psi} ##' matrix. If \code{FALSE}, the (residual) observed covariances are scaled by ##' the square-root of the diagonal elements of the model-implied covariance ##' matrix of observed variables (\eqn{\Sigma}), and the (residual) latent ##' covariances are scaled by the square-root of the diagonal elements of the ##' model-implied covariance matrix of the latent variables. ##' @param information \code{character} indicating the type of information ##' matrix to use (check \code{\link{lavInspect}} for available options). ##' \code{"expected"} information is the default, which provides better ##' control of Type I errors. ##' @param power \code{logical}. If \code{TRUE}, the (post-hoc) power is ##' computed for each modification index, using the values of \code{delta} ##' and \code{alpha}. ##' @param delta The value of the effect size, as used in the post-hoc power ##' computation, currently using the unstandardized metric of the \code{$epc} ##' column. ##' @param alpha The significance level used for deciding if the modification ##' index is statistically significant or not. ##' @param high.power If the computed power is higher than this cutoff value, ##' the power is considered 'high'. If not, the power is considered 'low'. ##' This affects the values in the \code{$decision} column in the output. ##' @param sort. \code{logical}. If \code{TRUE}, sort the output using the ##' values of the modification index values. Higher values appear first. ##' @param minimum.value \code{numeric}. Filter output and only show rows with a ##' modification index value equal or higher than this minimum value. ##' @param maximum.number \code{integer}. Filter output and only show the first ##' maximum number rows. Most useful when combined with the \code{sort.} option. ##' @param na.remove \code{logical}. If \code{TRUE} (default), filter output by ##' removing all rows with \code{NA} values for the modification indices. ##' @param op \code{character} string. Filter the output by selecting only those ##' rows with operator \code{op}. ##' ##' @note When \code{test = "D2"}, each (S)EPC will be pooled by taking its ##' average across imputations. When \code{test = "D1"}, EPCs will be ##' calculated in the standard way using the pooled gradient and information, ##' and SEPCs will be calculated by standardizing the EPCs using model-implied ##' (residual) variances. ##' ##' @return A \code{data.frame} containing modification indices and (S)EPCs. ##' ##' @author ##' Terrence D. Jorgensen (University of Amsterdam; \email{TJorgensen314@@gmail.com}) ##' ##' Adapted from \pkg{lavaan} source code, written by ##' Yves Rosseel (Ghent University; \email{Yves.Rosseel@@UGent.be}) ##' ##' \code{test = "D1"} method proposed by ##' Maxwell Mansolf (University of California, Los Angeles; ##' \email{mamansolf@@gmail.com}) ##' ##' @references ##' Enders, C. K. (2010). \emph{Applied missing data analysis}. ##' New York, NY: Guilford. ##' ##' Li, K.-H., Meng, X.-L., Raghunathan, T. E., & Rubin, D. B. (1991). ##' Significance levels from repeated \emph{p}-values with multiply-imputed ##' data.\emph{Statistica Sinica, 1}(1), 65--92. Retrieved from ##' \url{https://www.jstor.org/stable/24303994} ##' ##' Mansolf, M., Jorgensen, T. D., & Enders, C. K. (2020). A multiple ##' imputation score test for model modification in structural equation ##' models. \emph{Psychological Methods, 25}(4), 393--411. ##' \doi{10.1037/met0000243} ##' ##' @examples ##' \dontrun{ ##' ## impose missing data for example ##' HSMiss <- HolzingerSwineford1939[ , c(paste("x", 1:9, sep = ""), ##' "ageyr","agemo","school")] ##' set.seed(12345) ##' HSMiss$x5 <- ifelse(HSMiss$x5 <= quantile(HSMiss$x5, .3), NA, HSMiss$x5) ##' age <- HSMiss$ageyr + HSMiss$agemo/12 ##' HSMiss$x9 <- ifelse(age <= quantile(age, .3), NA, HSMiss$x9) ##' ##' ## impute missing data ##' library(Amelia) ##' set.seed(12345) ##' HS.amelia <- amelia(HSMiss, m = 20, noms = "school", p2s = FALSE) ##' imps <- HS.amelia$imputations ##' ##' ## specify CFA model from lavaan's ?cfa help page ##' HS.model <- ' ##' visual =~ x1 + x2 + x3 ##' textual =~ x4 + x5 + x6 ##' speed =~ x7 + x8 + x9 ##' ' ##' ##' out <- cfa.mi(HS.model, data = imps) ##' ##' modindices.mi(out) # default: Li et al.'s (1991) "D2" method ##' modindices.mi(out, test = "D1") # Li et al.'s (1991) "D1" method ##' ##' } ##' ##' @export modindices.mi <- function(object, test = c("D2","D1"), omit.imps = c("no.conv","no.se"), standardized = TRUE, cov.std = TRUE, information = "expected", # power statistics? power = FALSE, delta = 0.1, alpha = 0.05, high.power = 0.75, # customize output sort. = FALSE, minimum.value = 0.0, maximum.number = nrow(LIST), na.remove = TRUE, op = NULL) { stopifnot(inherits(object, "lavaan.mi")) useImps <- rep(TRUE, length(object@DataList)) if ("no.conv" %in% omit.imps) useImps <- sapply(object@convergence, "[[", i = "converged") if ("no.se" %in% omit.imps) useImps <- useImps & sapply(object@convergence, "[[", i = "SE") if ("no.npd" %in% omit.imps) { Heywood.lv <- sapply(object@convergence, "[[", i = "Heywood.lv") Heywood.ov <- sapply(object@convergence, "[[", i = "Heywood.ov") useImps <- useImps & !(Heywood.lv | Heywood.ov) } ## custom removal by imputation number rm.imps <- omit.imps[ which(omit.imps %in% 1:length(useImps)) ] if (length(rm.imps)) useImps[as.numeric(rm.imps)] <- FALSE ## whatever is left m <- sum(useImps) if (m == 0L) stop('No imputations meet "omit.imps" criteria.') useImps <- which(useImps) test <- tolower(test[1]) N <- lavListInspect(object, "ntotal") #FIXME: if (lavoptions$mimic == "EQS") N <- N - 1 # not in lavaan, why? # not ready for estimator = "PML" if (object@Options$estimator == "PML") { stop("Modification indices not yet implemented for estimator PML.") } # sanity check if (power) standardized <- TRUE ## use first available modification indices as template to store pooled results ngroups <- lavListInspect(object, "ngroups") nlevels <- lavListInspect(object, "nlevels") myCols <- c("lhs","op","rhs") if (ngroups > 1L) myCols <- c(myCols,"block","group") if (nlevels > 1L) myCols <- c(myCols,"block","level") myCols <- unique(myCols) for (i in useImps) { LIST <- object@miList[[i]][myCols] nR <- try(nrow(LIST), silent = TRUE) if (inherits(nR, "try-error") || is.null(nR)) { if (i == max(useImps)) { stop("No modification indices could be computed for any imputations.") } else next } else break } ## D2 pooling method if (test == "d2") { chiList <- lapply(object@miList[useImps], "[[", i = "mi") ## imputations in columns, parameters in rows pooledList <- apply(do.call(cbind, chiList), 1, function(x) { calculate.D2(x, DF = 1, asymptotic = TRUE) }) LIST$mi <- pooledList[1, ] # could be "F" or "chisq" ## diagnostics LIST$riv <- pooledList["ariv", ] LIST$fmi <- pooledList["fmi", ] ## also take average of epc & sepc.all epcList <- lapply(object@miList[useImps], "[[", i = "epc") LIST$epc <- rowMeans(do.call(cbind, epcList)) if (standardized) { sepcList <- lapply(object@miList[useImps], "[[", i = "sepc.lv") LIST$sepc.lv <- rowMeans(do.call(cbind, sepcList)) sepcList <- lapply(object@miList[useImps], "[[", i = "sepc.all") LIST$sepc.all <- rowMeans(do.call(cbind, sepcList)) fixed.x <- lavListInspect(object, "options")$fixed.x && length(lavNames(object, "ov.x")) if (fixed.x && "sepc.nox" %in% colnames(object@miList[useImps][[1]])) { sepcList <- lapply(object@miList[useImps], "[[", i = "sepc.nox") LIST$sepc.nox <- rowMeans(do.call(cbind, sepcList)) } } } else { scoreOut <- lavTestScore.mi(object, add = cbind(LIST, user = 10L, free = 1, start = 0), test = "d1", omit.imps = omit.imps, epc = TRUE, scale.W = FALSE, asymptotic = TRUE, information = information)$uni LIST$mi <- scoreOut$X2 LIST$riv <- scoreOut$riv LIST$fmi <- scoreOut$fmi LIST$epc <- scoreOut$epc #FIXME: use average across imputations? # standardize? if (standardized) { ## Need full parameter table for lavaan::standardizedSolution() ## Merge parameterEstimates() with modindices() oldPE <- getMethod("summary","lavaan.mi")(object, se = FALSE, output = "data.frame", omit.imps = omit.imps) PE <- lavaan::lav_partable_merge(oldPE, cbind(LIST, est = 0), remove.duplicated = TRUE, warn = FALSE) ## merge EPCs, using parameter labels (unavailable for estimates) rownames(LIST) <- paste0(LIST$lhs, LIST$op, LIST$rhs, ".g", LIST$group) #FIXME: multilevel? rownames(PE) <- paste0(PE$lhs, PE$op, PE$rhs, ".g", PE$group) PE[rownames(LIST), "epc"] <- LIST$epc ## need "exo" column? PT <- parTable(object) if ("exo" %in% names(PT)) { rownames(PT) <- paste0(PT$lhs, PT$op, PT$rhs, ".g", PT$group) PE[rownames(PT), "exo"] <- PT$exo } else PE$exo <- 0L rownames(LIST) <- NULL rownames(PE) <- NULL EPC <- PE$epc if (cov.std) { # replace epc values for variances by est values var.idx <- which(PE$op == "~~" & PE$lhs == PE$rhs) EPC[ var.idx ] <- PE$est[ var.idx ] } # two problems: # - EPC of variances can be negative, and that is perfectly legal # - EPC (of variances) can be tiny (near-zero), and we should # not divide by tiny variables small.idx <- which(PE$op == "~~" & PE$lhs == PE$rhs & abs(EPC) < sqrt( .Machine$double.eps ) ) if (length(small.idx) > 0L) EPC[small.idx] <- as.numeric(NA) # get the sign EPC.sign <- sign(PE$epc) ## pooled estimates for standardizedSolution() pooledest <- getMethod("coef", "lavaan.mi")(object, omit.imps = omit.imps) ## update @Model@GLIST for standardizedSolution(..., GLIST=) object@Model <- lavaan::lav_model_set_parameters(object@Model, x = pooledest) PE$sepc.lv <- EPC.sign * lavaan::standardizedSolution(object, se = FALSE, type = "std.lv", cov.std = cov.std, partable = PE, GLIST = object@Model@GLIST, est = abs(EPC))$est.std PE$sepc.all <- EPC.sign * lavaan::standardizedSolution(object, se = FALSE, type = "std.all", cov.std = cov.std, partable = PE, GLIST = object@Model@GLIST, est = abs(EPC))$est.std fixed.x <- lavListInspect(object, "options")$fixed.x && length(lavNames(object, "ov.x")) if (fixed.x) { PE$sepc.nox <- EPC.sign * lavaan::standardizedSolution(object, se = FALSE, type = "std.nox", cov.std = cov.std, partable = PE, GLIST = object@Model@GLIST, est = abs(EPC))$est.std } if (length(small.idx) > 0L) { PE$sepc.lv[small.idx] <- 0 PE$sepc.all[small.idx] <- 0 if (fixed.x) PE$sepc.nox[small.idx] <- 0 } ## remove unnecessary columns, then merge if (is.null(LIST$block)) PE$block <- NULL PE$est <- NULL PE$mi <- NULL PE$epc <- NULL PE$exo <- NULL LIST <- merge(LIST, PE, sort = FALSE) class(LIST) <- c("lavaan.data.frame","data.frame") } } # power? if (power) { LIST$sepc.lv <- NULL LIST$delta <- delta # FIXME: this is using epc in unstandardized metric # this would be much more useful in standardized metric # we need a standardize.est.all.reverse function... LIST$ncp <- (LIST$mi / (LIST$epc*LIST$epc)) * (delta*delta) LIST$power <- 1 - pchisq(qchisq((1.0 - alpha), df=1), df=1, ncp=LIST$ncp) LIST$decision <- character( length(LIST$power) ) # five possibilities (Table 6 in Saris, Satorra, van der Veld, 2009) mi.significant <- ifelse( 1 - pchisq(LIST$mi, df=1) < alpha, TRUE, FALSE ) high.power <- LIST$power > high.power # FIXME: sepc.all or epc?? #epc.high <- LIST$sepc.all > LIST$delta epc.high <- LIST$epc > LIST$delta LIST$decision[ which(!mi.significant & !high.power)] <- "(i)" LIST$decision[ which( mi.significant & !high.power)] <- "**(m)**" LIST$decision[ which(!mi.significant & high.power)] <- "(nm)" LIST$decision[ which( mi.significant & high.power & !epc.high)] <- "epc:nm" LIST$decision[ which( mi.significant & high.power & epc.high)] <- "*epc:m*" #LIST$decision[ which(mi.significant & high.power) ] <- "epc" #LIST$decision[ which(mi.significant & !high.power) ] <- "***" #LIST$decision[ which(!mi.significant & !high.power) ] <- "(i)" } # sort? if (sort.) { LIST <- LIST[order(LIST$mi, decreasing = TRUE),] } if (minimum.value > 0.0) { LIST <- LIST[!is.na(LIST$mi) & LIST$mi > minimum.value,] } if (maximum.number < nrow(LIST)) { LIST <- LIST[seq_len(maximum.number),] } if (na.remove) { idx <- which(is.na(LIST$mi)) if (length(idx) > 0) LIST <- LIST[-idx,] } if (!is.null(op)) { idx <- LIST$op %in% op if (length(idx) > 0) LIST <- LIST[idx,] } # add header # TODO: small explanation of the columns in the header? # attr(LIST, "header") <- # c("modification indices for newly added parameters only; to\n", # "see the effects of releasing equality constraints, use the\n", # "lavTestScore() function") LIST } ## alias ##' @rdname modindices.mi ##' @aliases modindices.mi modificationIndices.mi ##' @export modificationIndices.mi <- modindices.mi
/semTools/R/runMI-modification.R
no_license
simsem/semTools
R
false
false
18,564
r
### Terrence D. Jorgensen & Yves Rosseel ### Last updated: 9 May 2022 ### adaptation of lavaan::modindices() for lavaan.mi-class objects ##' Modification Indices for Multiple Imputations ##' ##' Modification indices (1-\emph{df} Lagrange multiplier tests) from a ##' latent variable model fitted to multiple imputed data sets. Statistics ##' for releasing one or more fixed or constrained parameters in model can ##' be calculated by pooling the gradient and information matrices ##' across imputed data sets in a method proposed by Mansolf, Jorgensen, & ##' Enders (2020)---analogous to the "D1" Wald test proposed by Li, Meng, ##' Raghunathan, & Rubin (1991)---or by pooling the complete-data score-test ##' statistics across imputed data sets (i.e., "D2"; Li et al., 1991). ##' ##' @name modindices.mi ##' @aliases modificationIndices.mi modificationindices.mi modindices.mi ##' @importFrom lavaan lavInspect lavListInspect lavNames ##' @importFrom methods getMethod ##' @importFrom stats cov pchisq qchisq ##' ##' @param object An object of class \code{\linkS4class{lavaan.mi}} ##' @param test \code{character} indicating which pooling method to use. ##' \code{"D1"} requests Mansolf, Jorgensen, & Enders' (2020) proposed ##' Wald-like test for pooling the gradient and information, which are then ##' used to calculate score-test statistics in the usual manner. \code{"D2"} ##' (default because it is less computationall intensive) requests to pool the ##' complete-data score-test statistics from each imputed data set, then pool ##' them across imputations, described by Li et al. (1991) and Enders (2010). ##' @param omit.imps \code{character} vector specifying criteria for omitting ##' imputations from pooled results. Can include any of ##' \code{c("no.conv", "no.se", "no.npd")}, the first 2 of which are the ##' default setting, which excludes any imputations that did not ##' converge or for which standard errors could not be computed. The ##' last option (\code{"no.npd"}) would exclude any imputations which ##' yielded a nonpositive definite covariance matrix for observed or ##' latent variables, which would include any "improper solutions" such ##' as Heywood cases. Specific imputation numbers can also be included in this ##' argument, in case users want to apply their own custom omission criteria ##' (or simulations can use different numbers of imputations without ##' redundantly refitting the model). ##' @param standardized \code{logical}. If \code{TRUE}, two extra columns ##' (\code{$sepc.lv} and \code{$sepc.all}) will contain standardized values ##' for the EPCs. In the first column (\code{$sepc.lv}), standardizization is ##' based on the variances of the (continuous) latent variables. In the second ##' column (\code{$sepc.all}), standardization is based on both the variances ##' of both (continuous) observed and latent variables. (Residual) covariances ##' are standardized using (residual) variances. ##' @param cov.std \code{logical}. \code{TRUE} if \code{test == "D2"}. ##' If \code{TRUE} (default), the (residual) ##' observed covariances are scaled by the square-root of the diagonal elements ##' of the \eqn{\Theta} matrix, and the (residual) latent covariances are ##' scaled by the square-root of the diagonal elements of the \eqn{\Psi} ##' matrix. If \code{FALSE}, the (residual) observed covariances are scaled by ##' the square-root of the diagonal elements of the model-implied covariance ##' matrix of observed variables (\eqn{\Sigma}), and the (residual) latent ##' covariances are scaled by the square-root of the diagonal elements of the ##' model-implied covariance matrix of the latent variables. ##' @param information \code{character} indicating the type of information ##' matrix to use (check \code{\link{lavInspect}} for available options). ##' \code{"expected"} information is the default, which provides better ##' control of Type I errors. ##' @param power \code{logical}. If \code{TRUE}, the (post-hoc) power is ##' computed for each modification index, using the values of \code{delta} ##' and \code{alpha}. ##' @param delta The value of the effect size, as used in the post-hoc power ##' computation, currently using the unstandardized metric of the \code{$epc} ##' column. ##' @param alpha The significance level used for deciding if the modification ##' index is statistically significant or not. ##' @param high.power If the computed power is higher than this cutoff value, ##' the power is considered 'high'. If not, the power is considered 'low'. ##' This affects the values in the \code{$decision} column in the output. ##' @param sort. \code{logical}. If \code{TRUE}, sort the output using the ##' values of the modification index values. Higher values appear first. ##' @param minimum.value \code{numeric}. Filter output and only show rows with a ##' modification index value equal or higher than this minimum value. ##' @param maximum.number \code{integer}. Filter output and only show the first ##' maximum number rows. Most useful when combined with the \code{sort.} option. ##' @param na.remove \code{logical}. If \code{TRUE} (default), filter output by ##' removing all rows with \code{NA} values for the modification indices. ##' @param op \code{character} string. Filter the output by selecting only those ##' rows with operator \code{op}. ##' ##' @note When \code{test = "D2"}, each (S)EPC will be pooled by taking its ##' average across imputations. When \code{test = "D1"}, EPCs will be ##' calculated in the standard way using the pooled gradient and information, ##' and SEPCs will be calculated by standardizing the EPCs using model-implied ##' (residual) variances. ##' ##' @return A \code{data.frame} containing modification indices and (S)EPCs. ##' ##' @author ##' Terrence D. Jorgensen (University of Amsterdam; \email{TJorgensen314@@gmail.com}) ##' ##' Adapted from \pkg{lavaan} source code, written by ##' Yves Rosseel (Ghent University; \email{Yves.Rosseel@@UGent.be}) ##' ##' \code{test = "D1"} method proposed by ##' Maxwell Mansolf (University of California, Los Angeles; ##' \email{mamansolf@@gmail.com}) ##' ##' @references ##' Enders, C. K. (2010). \emph{Applied missing data analysis}. ##' New York, NY: Guilford. ##' ##' Li, K.-H., Meng, X.-L., Raghunathan, T. E., & Rubin, D. B. (1991). ##' Significance levels from repeated \emph{p}-values with multiply-imputed ##' data.\emph{Statistica Sinica, 1}(1), 65--92. Retrieved from ##' \url{https://www.jstor.org/stable/24303994} ##' ##' Mansolf, M., Jorgensen, T. D., & Enders, C. K. (2020). A multiple ##' imputation score test for model modification in structural equation ##' models. \emph{Psychological Methods, 25}(4), 393--411. ##' \doi{10.1037/met0000243} ##' ##' @examples ##' \dontrun{ ##' ## impose missing data for example ##' HSMiss <- HolzingerSwineford1939[ , c(paste("x", 1:9, sep = ""), ##' "ageyr","agemo","school")] ##' set.seed(12345) ##' HSMiss$x5 <- ifelse(HSMiss$x5 <= quantile(HSMiss$x5, .3), NA, HSMiss$x5) ##' age <- HSMiss$ageyr + HSMiss$agemo/12 ##' HSMiss$x9 <- ifelse(age <= quantile(age, .3), NA, HSMiss$x9) ##' ##' ## impute missing data ##' library(Amelia) ##' set.seed(12345) ##' HS.amelia <- amelia(HSMiss, m = 20, noms = "school", p2s = FALSE) ##' imps <- HS.amelia$imputations ##' ##' ## specify CFA model from lavaan's ?cfa help page ##' HS.model <- ' ##' visual =~ x1 + x2 + x3 ##' textual =~ x4 + x5 + x6 ##' speed =~ x7 + x8 + x9 ##' ' ##' ##' out <- cfa.mi(HS.model, data = imps) ##' ##' modindices.mi(out) # default: Li et al.'s (1991) "D2" method ##' modindices.mi(out, test = "D1") # Li et al.'s (1991) "D1" method ##' ##' } ##' ##' @export modindices.mi <- function(object, test = c("D2","D1"), omit.imps = c("no.conv","no.se"), standardized = TRUE, cov.std = TRUE, information = "expected", # power statistics? power = FALSE, delta = 0.1, alpha = 0.05, high.power = 0.75, # customize output sort. = FALSE, minimum.value = 0.0, maximum.number = nrow(LIST), na.remove = TRUE, op = NULL) { stopifnot(inherits(object, "lavaan.mi")) useImps <- rep(TRUE, length(object@DataList)) if ("no.conv" %in% omit.imps) useImps <- sapply(object@convergence, "[[", i = "converged") if ("no.se" %in% omit.imps) useImps <- useImps & sapply(object@convergence, "[[", i = "SE") if ("no.npd" %in% omit.imps) { Heywood.lv <- sapply(object@convergence, "[[", i = "Heywood.lv") Heywood.ov <- sapply(object@convergence, "[[", i = "Heywood.ov") useImps <- useImps & !(Heywood.lv | Heywood.ov) } ## custom removal by imputation number rm.imps <- omit.imps[ which(omit.imps %in% 1:length(useImps)) ] if (length(rm.imps)) useImps[as.numeric(rm.imps)] <- FALSE ## whatever is left m <- sum(useImps) if (m == 0L) stop('No imputations meet "omit.imps" criteria.') useImps <- which(useImps) test <- tolower(test[1]) N <- lavListInspect(object, "ntotal") #FIXME: if (lavoptions$mimic == "EQS") N <- N - 1 # not in lavaan, why? # not ready for estimator = "PML" if (object@Options$estimator == "PML") { stop("Modification indices not yet implemented for estimator PML.") } # sanity check if (power) standardized <- TRUE ## use first available modification indices as template to store pooled results ngroups <- lavListInspect(object, "ngroups") nlevels <- lavListInspect(object, "nlevels") myCols <- c("lhs","op","rhs") if (ngroups > 1L) myCols <- c(myCols,"block","group") if (nlevels > 1L) myCols <- c(myCols,"block","level") myCols <- unique(myCols) for (i in useImps) { LIST <- object@miList[[i]][myCols] nR <- try(nrow(LIST), silent = TRUE) if (inherits(nR, "try-error") || is.null(nR)) { if (i == max(useImps)) { stop("No modification indices could be computed for any imputations.") } else next } else break } ## D2 pooling method if (test == "d2") { chiList <- lapply(object@miList[useImps], "[[", i = "mi") ## imputations in columns, parameters in rows pooledList <- apply(do.call(cbind, chiList), 1, function(x) { calculate.D2(x, DF = 1, asymptotic = TRUE) }) LIST$mi <- pooledList[1, ] # could be "F" or "chisq" ## diagnostics LIST$riv <- pooledList["ariv", ] LIST$fmi <- pooledList["fmi", ] ## also take average of epc & sepc.all epcList <- lapply(object@miList[useImps], "[[", i = "epc") LIST$epc <- rowMeans(do.call(cbind, epcList)) if (standardized) { sepcList <- lapply(object@miList[useImps], "[[", i = "sepc.lv") LIST$sepc.lv <- rowMeans(do.call(cbind, sepcList)) sepcList <- lapply(object@miList[useImps], "[[", i = "sepc.all") LIST$sepc.all <- rowMeans(do.call(cbind, sepcList)) fixed.x <- lavListInspect(object, "options")$fixed.x && length(lavNames(object, "ov.x")) if (fixed.x && "sepc.nox" %in% colnames(object@miList[useImps][[1]])) { sepcList <- lapply(object@miList[useImps], "[[", i = "sepc.nox") LIST$sepc.nox <- rowMeans(do.call(cbind, sepcList)) } } } else { scoreOut <- lavTestScore.mi(object, add = cbind(LIST, user = 10L, free = 1, start = 0), test = "d1", omit.imps = omit.imps, epc = TRUE, scale.W = FALSE, asymptotic = TRUE, information = information)$uni LIST$mi <- scoreOut$X2 LIST$riv <- scoreOut$riv LIST$fmi <- scoreOut$fmi LIST$epc <- scoreOut$epc #FIXME: use average across imputations? # standardize? if (standardized) { ## Need full parameter table for lavaan::standardizedSolution() ## Merge parameterEstimates() with modindices() oldPE <- getMethod("summary","lavaan.mi")(object, se = FALSE, output = "data.frame", omit.imps = omit.imps) PE <- lavaan::lav_partable_merge(oldPE, cbind(LIST, est = 0), remove.duplicated = TRUE, warn = FALSE) ## merge EPCs, using parameter labels (unavailable for estimates) rownames(LIST) <- paste0(LIST$lhs, LIST$op, LIST$rhs, ".g", LIST$group) #FIXME: multilevel? rownames(PE) <- paste0(PE$lhs, PE$op, PE$rhs, ".g", PE$group) PE[rownames(LIST), "epc"] <- LIST$epc ## need "exo" column? PT <- parTable(object) if ("exo" %in% names(PT)) { rownames(PT) <- paste0(PT$lhs, PT$op, PT$rhs, ".g", PT$group) PE[rownames(PT), "exo"] <- PT$exo } else PE$exo <- 0L rownames(LIST) <- NULL rownames(PE) <- NULL EPC <- PE$epc if (cov.std) { # replace epc values for variances by est values var.idx <- which(PE$op == "~~" & PE$lhs == PE$rhs) EPC[ var.idx ] <- PE$est[ var.idx ] } # two problems: # - EPC of variances can be negative, and that is perfectly legal # - EPC (of variances) can be tiny (near-zero), and we should # not divide by tiny variables small.idx <- which(PE$op == "~~" & PE$lhs == PE$rhs & abs(EPC) < sqrt( .Machine$double.eps ) ) if (length(small.idx) > 0L) EPC[small.idx] <- as.numeric(NA) # get the sign EPC.sign <- sign(PE$epc) ## pooled estimates for standardizedSolution() pooledest <- getMethod("coef", "lavaan.mi")(object, omit.imps = omit.imps) ## update @Model@GLIST for standardizedSolution(..., GLIST=) object@Model <- lavaan::lav_model_set_parameters(object@Model, x = pooledest) PE$sepc.lv <- EPC.sign * lavaan::standardizedSolution(object, se = FALSE, type = "std.lv", cov.std = cov.std, partable = PE, GLIST = object@Model@GLIST, est = abs(EPC))$est.std PE$sepc.all <- EPC.sign * lavaan::standardizedSolution(object, se = FALSE, type = "std.all", cov.std = cov.std, partable = PE, GLIST = object@Model@GLIST, est = abs(EPC))$est.std fixed.x <- lavListInspect(object, "options")$fixed.x && length(lavNames(object, "ov.x")) if (fixed.x) { PE$sepc.nox <- EPC.sign * lavaan::standardizedSolution(object, se = FALSE, type = "std.nox", cov.std = cov.std, partable = PE, GLIST = object@Model@GLIST, est = abs(EPC))$est.std } if (length(small.idx) > 0L) { PE$sepc.lv[small.idx] <- 0 PE$sepc.all[small.idx] <- 0 if (fixed.x) PE$sepc.nox[small.idx] <- 0 } ## remove unnecessary columns, then merge if (is.null(LIST$block)) PE$block <- NULL PE$est <- NULL PE$mi <- NULL PE$epc <- NULL PE$exo <- NULL LIST <- merge(LIST, PE, sort = FALSE) class(LIST) <- c("lavaan.data.frame","data.frame") } } # power? if (power) { LIST$sepc.lv <- NULL LIST$delta <- delta # FIXME: this is using epc in unstandardized metric # this would be much more useful in standardized metric # we need a standardize.est.all.reverse function... LIST$ncp <- (LIST$mi / (LIST$epc*LIST$epc)) * (delta*delta) LIST$power <- 1 - pchisq(qchisq((1.0 - alpha), df=1), df=1, ncp=LIST$ncp) LIST$decision <- character( length(LIST$power) ) # five possibilities (Table 6 in Saris, Satorra, van der Veld, 2009) mi.significant <- ifelse( 1 - pchisq(LIST$mi, df=1) < alpha, TRUE, FALSE ) high.power <- LIST$power > high.power # FIXME: sepc.all or epc?? #epc.high <- LIST$sepc.all > LIST$delta epc.high <- LIST$epc > LIST$delta LIST$decision[ which(!mi.significant & !high.power)] <- "(i)" LIST$decision[ which( mi.significant & !high.power)] <- "**(m)**" LIST$decision[ which(!mi.significant & high.power)] <- "(nm)" LIST$decision[ which( mi.significant & high.power & !epc.high)] <- "epc:nm" LIST$decision[ which( mi.significant & high.power & epc.high)] <- "*epc:m*" #LIST$decision[ which(mi.significant & high.power) ] <- "epc" #LIST$decision[ which(mi.significant & !high.power) ] <- "***" #LIST$decision[ which(!mi.significant & !high.power) ] <- "(i)" } # sort? if (sort.) { LIST <- LIST[order(LIST$mi, decreasing = TRUE),] } if (minimum.value > 0.0) { LIST <- LIST[!is.na(LIST$mi) & LIST$mi > minimum.value,] } if (maximum.number < nrow(LIST)) { LIST <- LIST[seq_len(maximum.number),] } if (na.remove) { idx <- which(is.na(LIST$mi)) if (length(idx) > 0) LIST <- LIST[-idx,] } if (!is.null(op)) { idx <- LIST$op %in% op if (length(idx) > 0) LIST <- LIST[idx,] } # add header # TODO: small explanation of the columns in the header? # attr(LIST, "header") <- # c("modification indices for newly added parameters only; to\n", # "see the effects of releasing equality constraints, use the\n", # "lavTestScore() function") LIST } ## alias ##' @rdname modindices.mi ##' @aliases modindices.mi modificationIndices.mi ##' @export modificationIndices.mi <- modindices.mi
plot2 <- function(filetxt = "household_power_consumption.txt"){ ##download text file to WD first #name of data file is assigned to filetxt #filetxt <- "household_power_consumption.txt" #read in data to to data object data <- read.csv(filetxt, header = T, sep = ";") #convert Date column from factor to character data$Date <- strptime(as.character(data$Date), format = "%d/%m/%Y") #convert Date column from character to Date format data$Date <- as.Date(data$Date) #subset data from 2007-02-01 day1 <- data[data$Date == "2007-02-01",] #subset data from 2007-02-02 day2 <- data[data$Date == "2007-02-02",] #combine the two subsets days <- rbind(day1, day2) #convert global_active_power from factor to character days$Global_active_power <- as.character(days$Global_active_power) #convert global_active_power from character to numeric days$Global_active_power <- as.numeric(days$Global_active_power) #plot histogram of global_active_power with red columns, and required labels & title png(filename = "plot2.png") plot(days$Global_active_power, type = "l", xaxt = "n", xlab = "", ylab = "Global Active Power (kilowatts)") axis(1, at = c(0, 1440, 2880), labels= c("Thursday", "Friday", "Saturday")) # dev.copy(png, file = "plot2.png") dev.off() }
/plot2.R
no_license
mattyp83/ExData_Plotting1
R
false
false
1,338
r
plot2 <- function(filetxt = "household_power_consumption.txt"){ ##download text file to WD first #name of data file is assigned to filetxt #filetxt <- "household_power_consumption.txt" #read in data to to data object data <- read.csv(filetxt, header = T, sep = ";") #convert Date column from factor to character data$Date <- strptime(as.character(data$Date), format = "%d/%m/%Y") #convert Date column from character to Date format data$Date <- as.Date(data$Date) #subset data from 2007-02-01 day1 <- data[data$Date == "2007-02-01",] #subset data from 2007-02-02 day2 <- data[data$Date == "2007-02-02",] #combine the two subsets days <- rbind(day1, day2) #convert global_active_power from factor to character days$Global_active_power <- as.character(days$Global_active_power) #convert global_active_power from character to numeric days$Global_active_power <- as.numeric(days$Global_active_power) #plot histogram of global_active_power with red columns, and required labels & title png(filename = "plot2.png") plot(days$Global_active_power, type = "l", xaxt = "n", xlab = "", ylab = "Global Active Power (kilowatts)") axis(1, at = c(0, 1440, 2880), labels= c("Thursday", "Friday", "Saturday")) # dev.copy(png, file = "plot2.png") dev.off() }
# This file is generated by make.paws. Please do not edit here. #' @importFrom paws.common new_handlers new_service set_config NULL #' AmazonApiGatewayV2 #' #' @description #' Amazon API Gateway V2 #' #' @param #' config #' Optional configuration of credentials, endpoint, and/or region. #' #' @section Service syntax: #' ``` #' svc <- apigatewayv2( #' config = list( #' credentials = list( #' creds = list( #' access_key_id = "string", #' secret_access_key = "string", #' session_token = "string" #' ), #' profile = "string" #' ), #' endpoint = "string", #' region = "string" #' ) #' ) #' ``` #' #' @examples #' \dontrun{ #' svc <- apigatewayv2() #' svc$create_api( #' Foo = 123 #' ) #' } #' #' @section Operations: #' \tabular{ll}{ #' \link[=apigatewayv2_create_api]{create_api} \tab Creates an Api resource \cr #' \link[=apigatewayv2_create_api_mapping]{create_api_mapping} \tab Creates an API mapping \cr #' \link[=apigatewayv2_create_authorizer]{create_authorizer} \tab Creates an Authorizer for an API \cr #' \link[=apigatewayv2_create_deployment]{create_deployment} \tab Creates a Deployment for an API \cr #' \link[=apigatewayv2_create_domain_name]{create_domain_name} \tab Creates a domain name \cr #' \link[=apigatewayv2_create_integration]{create_integration} \tab Creates an Integration \cr #' \link[=apigatewayv2_create_integration_response]{create_integration_response} \tab Creates an IntegrationResponses \cr #' \link[=apigatewayv2_create_model]{create_model} \tab Creates a Model for an API \cr #' \link[=apigatewayv2_create_route]{create_route} \tab Creates a Route for an API \cr #' \link[=apigatewayv2_create_route_response]{create_route_response} \tab Creates a RouteResponse for a Route \cr #' \link[=apigatewayv2_create_stage]{create_stage} \tab Creates a Stage for an API \cr #' \link[=apigatewayv2_delete_api]{delete_api} \tab Deletes an Api resource \cr #' \link[=apigatewayv2_delete_api_mapping]{delete_api_mapping} \tab Deletes an API mapping \cr #' \link[=apigatewayv2_delete_authorizer]{delete_authorizer} \tab Deletes an Authorizer \cr #' \link[=apigatewayv2_delete_cors_configuration]{delete_cors_configuration} \tab Deletes a CORS configuration \cr #' \link[=apigatewayv2_delete_deployment]{delete_deployment} \tab Deletes a Deployment \cr #' \link[=apigatewayv2_delete_domain_name]{delete_domain_name} \tab Deletes a domain name \cr #' \link[=apigatewayv2_delete_integration]{delete_integration} \tab Deletes an Integration \cr #' \link[=apigatewayv2_delete_integration_response]{delete_integration_response} \tab Deletes an IntegrationResponses \cr #' \link[=apigatewayv2_delete_model]{delete_model} \tab Deletes a Model \cr #' \link[=apigatewayv2_delete_route]{delete_route} \tab Deletes a Route \cr #' \link[=apigatewayv2_delete_route_response]{delete_route_response} \tab Deletes a RouteResponse \cr #' \link[=apigatewayv2_delete_route_settings]{delete_route_settings} \tab Deletes the RouteSettings for a stage \cr #' \link[=apigatewayv2_delete_stage]{delete_stage} \tab Deletes a Stage \cr #' \link[=apigatewayv2_get_api]{get_api} \tab Gets an Api resource \cr #' \link[=apigatewayv2_get_api_mapping]{get_api_mapping} \tab Gets an API mapping \cr #' \link[=apigatewayv2_get_api_mappings]{get_api_mappings} \tab Gets API mappings \cr #' \link[=apigatewayv2_get_apis]{get_apis} \tab Gets a collection of Api resources \cr #' \link[=apigatewayv2_get_authorizer]{get_authorizer} \tab Gets an Authorizer \cr #' \link[=apigatewayv2_get_authorizers]{get_authorizers} \tab Gets the Authorizers for an API \cr #' \link[=apigatewayv2_get_deployment]{get_deployment} \tab Gets a Deployment \cr #' \link[=apigatewayv2_get_deployments]{get_deployments} \tab Gets the Deployments for an API \cr #' \link[=apigatewayv2_get_domain_name]{get_domain_name} \tab Gets a domain name \cr #' \link[=apigatewayv2_get_domain_names]{get_domain_names} \tab Gets the domain names for an AWS account \cr #' \link[=apigatewayv2_get_integration]{get_integration} \tab Gets an Integration \cr #' \link[=apigatewayv2_get_integration_response]{get_integration_response} \tab Gets an IntegrationResponses \cr #' \link[=apigatewayv2_get_integration_responses]{get_integration_responses} \tab Gets the IntegrationResponses for an Integration\cr #' \link[=apigatewayv2_get_integrations]{get_integrations} \tab Gets the Integrations for an API \cr #' \link[=apigatewayv2_get_model]{get_model} \tab Gets a Model \cr #' \link[=apigatewayv2_get_models]{get_models} \tab Gets the Models for an API \cr #' \link[=apigatewayv2_get_model_template]{get_model_template} \tab Gets a model template \cr #' \link[=apigatewayv2_get_route]{get_route} \tab Gets a Route \cr #' \link[=apigatewayv2_get_route_response]{get_route_response} \tab Gets a RouteResponse \cr #' \link[=apigatewayv2_get_route_responses]{get_route_responses} \tab Gets the RouteResponses for a Route \cr #' \link[=apigatewayv2_get_routes]{get_routes} \tab Gets the Routes for an API \cr #' \link[=apigatewayv2_get_stage]{get_stage} \tab Gets a Stage \cr #' \link[=apigatewayv2_get_stages]{get_stages} \tab Gets the Stages for an API \cr #' \link[=apigatewayv2_get_tags]{get_tags} \tab Gets a collection of Tag resources \cr #' \link[=apigatewayv2_import_api]{import_api} \tab Imports an API \cr #' \link[=apigatewayv2_reimport_api]{reimport_api} \tab Puts an Api resource \cr #' \link[=apigatewayv2_tag_resource]{tag_resource} \tab Creates a new Tag resource to represent a tag \cr #' \link[=apigatewayv2_untag_resource]{untag_resource} \tab Deletes a Tag \cr #' \link[=apigatewayv2_update_api]{update_api} \tab Updates an Api resource \cr #' \link[=apigatewayv2_update_api_mapping]{update_api_mapping} \tab The API mapping \cr #' \link[=apigatewayv2_update_authorizer]{update_authorizer} \tab Updates an Authorizer \cr #' \link[=apigatewayv2_update_deployment]{update_deployment} \tab Updates a Deployment \cr #' \link[=apigatewayv2_update_domain_name]{update_domain_name} \tab Updates a domain name \cr #' \link[=apigatewayv2_update_integration]{update_integration} \tab Updates an Integration \cr #' \link[=apigatewayv2_update_integration_response]{update_integration_response} \tab Updates an IntegrationResponses \cr #' \link[=apigatewayv2_update_model]{update_model} \tab Updates a Model \cr #' \link[=apigatewayv2_update_route]{update_route} \tab Updates a Route \cr #' \link[=apigatewayv2_update_route_response]{update_route_response} \tab Updates a RouteResponse \cr #' \link[=apigatewayv2_update_stage]{update_stage} \tab Updates a Stage #' } #' #' @rdname apigatewayv2 #' @export apigatewayv2 <- function(config = list()) { svc <- .apigatewayv2$operations svc <- set_config(svc, config) return(svc) } # Private API objects: metadata, handlers, interfaces, etc. .apigatewayv2 <- list() .apigatewayv2$operations <- list() .apigatewayv2$metadata <- list( service_name = "apigatewayv2", endpoints = list("*" = list(endpoint = "apigatewayv2.{region}.amazonaws.com", global = FALSE), "cn-*" = list(endpoint = "apigatewayv2.{region}.amazonaws.com.cn", global = FALSE)), service_id = "ApiGatewayV2", api_version = "2018-11-29", signing_name = "apigateway", json_version = "1.1", target_prefix = "" ) .apigatewayv2$service <- function(config = list()) { handlers <- new_handlers("restjson", "v4") new_service(.apigatewayv2$metadata, handlers, config) }
/cran/paws.networking/R/apigatewayv2_service.R
permissive
johnnytommy/paws
R
false
false
7,471
r
# This file is generated by make.paws. Please do not edit here. #' @importFrom paws.common new_handlers new_service set_config NULL #' AmazonApiGatewayV2 #' #' @description #' Amazon API Gateway V2 #' #' @param #' config #' Optional configuration of credentials, endpoint, and/or region. #' #' @section Service syntax: #' ``` #' svc <- apigatewayv2( #' config = list( #' credentials = list( #' creds = list( #' access_key_id = "string", #' secret_access_key = "string", #' session_token = "string" #' ), #' profile = "string" #' ), #' endpoint = "string", #' region = "string" #' ) #' ) #' ``` #' #' @examples #' \dontrun{ #' svc <- apigatewayv2() #' svc$create_api( #' Foo = 123 #' ) #' } #' #' @section Operations: #' \tabular{ll}{ #' \link[=apigatewayv2_create_api]{create_api} \tab Creates an Api resource \cr #' \link[=apigatewayv2_create_api_mapping]{create_api_mapping} \tab Creates an API mapping \cr #' \link[=apigatewayv2_create_authorizer]{create_authorizer} \tab Creates an Authorizer for an API \cr #' \link[=apigatewayv2_create_deployment]{create_deployment} \tab Creates a Deployment for an API \cr #' \link[=apigatewayv2_create_domain_name]{create_domain_name} \tab Creates a domain name \cr #' \link[=apigatewayv2_create_integration]{create_integration} \tab Creates an Integration \cr #' \link[=apigatewayv2_create_integration_response]{create_integration_response} \tab Creates an IntegrationResponses \cr #' \link[=apigatewayv2_create_model]{create_model} \tab Creates a Model for an API \cr #' \link[=apigatewayv2_create_route]{create_route} \tab Creates a Route for an API \cr #' \link[=apigatewayv2_create_route_response]{create_route_response} \tab Creates a RouteResponse for a Route \cr #' \link[=apigatewayv2_create_stage]{create_stage} \tab Creates a Stage for an API \cr #' \link[=apigatewayv2_delete_api]{delete_api} \tab Deletes an Api resource \cr #' \link[=apigatewayv2_delete_api_mapping]{delete_api_mapping} \tab Deletes an API mapping \cr #' \link[=apigatewayv2_delete_authorizer]{delete_authorizer} \tab Deletes an Authorizer \cr #' \link[=apigatewayv2_delete_cors_configuration]{delete_cors_configuration} \tab Deletes a CORS configuration \cr #' \link[=apigatewayv2_delete_deployment]{delete_deployment} \tab Deletes a Deployment \cr #' \link[=apigatewayv2_delete_domain_name]{delete_domain_name} \tab Deletes a domain name \cr #' \link[=apigatewayv2_delete_integration]{delete_integration} \tab Deletes an Integration \cr #' \link[=apigatewayv2_delete_integration_response]{delete_integration_response} \tab Deletes an IntegrationResponses \cr #' \link[=apigatewayv2_delete_model]{delete_model} \tab Deletes a Model \cr #' \link[=apigatewayv2_delete_route]{delete_route} \tab Deletes a Route \cr #' \link[=apigatewayv2_delete_route_response]{delete_route_response} \tab Deletes a RouteResponse \cr #' \link[=apigatewayv2_delete_route_settings]{delete_route_settings} \tab Deletes the RouteSettings for a stage \cr #' \link[=apigatewayv2_delete_stage]{delete_stage} \tab Deletes a Stage \cr #' \link[=apigatewayv2_get_api]{get_api} \tab Gets an Api resource \cr #' \link[=apigatewayv2_get_api_mapping]{get_api_mapping} \tab Gets an API mapping \cr #' \link[=apigatewayv2_get_api_mappings]{get_api_mappings} \tab Gets API mappings \cr #' \link[=apigatewayv2_get_apis]{get_apis} \tab Gets a collection of Api resources \cr #' \link[=apigatewayv2_get_authorizer]{get_authorizer} \tab Gets an Authorizer \cr #' \link[=apigatewayv2_get_authorizers]{get_authorizers} \tab Gets the Authorizers for an API \cr #' \link[=apigatewayv2_get_deployment]{get_deployment} \tab Gets a Deployment \cr #' \link[=apigatewayv2_get_deployments]{get_deployments} \tab Gets the Deployments for an API \cr #' \link[=apigatewayv2_get_domain_name]{get_domain_name} \tab Gets a domain name \cr #' \link[=apigatewayv2_get_domain_names]{get_domain_names} \tab Gets the domain names for an AWS account \cr #' \link[=apigatewayv2_get_integration]{get_integration} \tab Gets an Integration \cr #' \link[=apigatewayv2_get_integration_response]{get_integration_response} \tab Gets an IntegrationResponses \cr #' \link[=apigatewayv2_get_integration_responses]{get_integration_responses} \tab Gets the IntegrationResponses for an Integration\cr #' \link[=apigatewayv2_get_integrations]{get_integrations} \tab Gets the Integrations for an API \cr #' \link[=apigatewayv2_get_model]{get_model} \tab Gets a Model \cr #' \link[=apigatewayv2_get_models]{get_models} \tab Gets the Models for an API \cr #' \link[=apigatewayv2_get_model_template]{get_model_template} \tab Gets a model template \cr #' \link[=apigatewayv2_get_route]{get_route} \tab Gets a Route \cr #' \link[=apigatewayv2_get_route_response]{get_route_response} \tab Gets a RouteResponse \cr #' \link[=apigatewayv2_get_route_responses]{get_route_responses} \tab Gets the RouteResponses for a Route \cr #' \link[=apigatewayv2_get_routes]{get_routes} \tab Gets the Routes for an API \cr #' \link[=apigatewayv2_get_stage]{get_stage} \tab Gets a Stage \cr #' \link[=apigatewayv2_get_stages]{get_stages} \tab Gets the Stages for an API \cr #' \link[=apigatewayv2_get_tags]{get_tags} \tab Gets a collection of Tag resources \cr #' \link[=apigatewayv2_import_api]{import_api} \tab Imports an API \cr #' \link[=apigatewayv2_reimport_api]{reimport_api} \tab Puts an Api resource \cr #' \link[=apigatewayv2_tag_resource]{tag_resource} \tab Creates a new Tag resource to represent a tag \cr #' \link[=apigatewayv2_untag_resource]{untag_resource} \tab Deletes a Tag \cr #' \link[=apigatewayv2_update_api]{update_api} \tab Updates an Api resource \cr #' \link[=apigatewayv2_update_api_mapping]{update_api_mapping} \tab The API mapping \cr #' \link[=apigatewayv2_update_authorizer]{update_authorizer} \tab Updates an Authorizer \cr #' \link[=apigatewayv2_update_deployment]{update_deployment} \tab Updates a Deployment \cr #' \link[=apigatewayv2_update_domain_name]{update_domain_name} \tab Updates a domain name \cr #' \link[=apigatewayv2_update_integration]{update_integration} \tab Updates an Integration \cr #' \link[=apigatewayv2_update_integration_response]{update_integration_response} \tab Updates an IntegrationResponses \cr #' \link[=apigatewayv2_update_model]{update_model} \tab Updates a Model \cr #' \link[=apigatewayv2_update_route]{update_route} \tab Updates a Route \cr #' \link[=apigatewayv2_update_route_response]{update_route_response} \tab Updates a RouteResponse \cr #' \link[=apigatewayv2_update_stage]{update_stage} \tab Updates a Stage #' } #' #' @rdname apigatewayv2 #' @export apigatewayv2 <- function(config = list()) { svc <- .apigatewayv2$operations svc <- set_config(svc, config) return(svc) } # Private API objects: metadata, handlers, interfaces, etc. .apigatewayv2 <- list() .apigatewayv2$operations <- list() .apigatewayv2$metadata <- list( service_name = "apigatewayv2", endpoints = list("*" = list(endpoint = "apigatewayv2.{region}.amazonaws.com", global = FALSE), "cn-*" = list(endpoint = "apigatewayv2.{region}.amazonaws.com.cn", global = FALSE)), service_id = "ApiGatewayV2", api_version = "2018-11-29", signing_name = "apigateway", json_version = "1.1", target_prefix = "" ) .apigatewayv2$service <- function(config = list()) { handlers <- new_handlers("restjson", "v4") new_service(.apigatewayv2$metadata, handlers, config) }
# ================================================================== # An?lisis del generador congruencial RANDU de IBM # ================================================================== source("Material/RANDC.r") # Cargar RANDC.r initRANDC(semilla = 321,a = 5,c = 1,m = 512) initRANDC(54321) # Fijar semilla para reproductibilidad nsim <- 9999 u <- RANDCN(nsim) # Generar # ------------------------------------------------------------------ # EJEMPLO problemas generador RANDU de IBM # ------------------------------------------------------------------ require(rgl) y <- matrix(u, ncol = 3, byrow = TRUE) plot3d(y) # Rotar para ver los hiperplanos # ------------------------------------------------------------------ # Bondad de ajuste # ------------------------------------------------------------------ # Histograma hist(u, freq = FALSE) curve(dunif(x, 0, 1), add = TRUE) # abline(h=1) # Distribuci?n emp?rica curve(ecdf(u)(x), type = "s", lwd = 2) curve(punif(x, 0, 1), add = TRUE) # Test chi-cuadrado source("Funciones/Test Chi-cuadrado continua.r") # Cargar chisq.test.cont chisq.test.cont(u, distribution="unif", nclasses=100, output= FALSE, nestpar=0, min=0, max=1) # Test de Kolmogorov-Smirnov ks.test(u, "punif", 0, 1) # ------------------------------------------------------------------ # Aleatoriedad # ------------------------------------------------------------------ # Gr?fico secuencial plot(as.ts(u)) # plot(u, type = 'l') # Gr?fico de dispersion retardado plot(u[-nsim], u[-1], xlab="u_t", ylab="u_t+1", pch=21, bg="white") #plot(matrix(u,ncol=2,byrow=T)) # Alternativa # Gr?fico de autocorrelaciones #acf(u) #correlaciones #pacf(u) #correlaciones parciales # Test de rachas library (tseries) runs.test(as.factor(u > median(u))) # Test de Ljung-Box Box.test(u, lag = 10, type = "Ljung") Box.test(u, lag = 10*log10(length(u))-1, type = "Ljung") # ------------------------------------------------------------------ # Repetici?n de contrastes # ------------------------------------------------------------------ # ------------------------------------------------------------------ # Valores iniciales #initRANDC(semilla = 321,a = 5,c = 1,m = 512) initRANDC(54321) #Fijar semilla para reproductibilidad # set.seed(54321) n <- 500 nsim <- 1000 estadistico <- numeric(nsim) pvalor <- numeric(nsim) # ------------------------------------------------------------------ # Realizar contrastes for(isim in 1:nsim) { u <- RANDCN(n) #Generar #u <- runif(n) tmp <- chisq.test.cont(u, distribution="unif", nclasses=100, output= FALSE, nestpar=0, min=0, max=1) estadistico[isim] <- tmp$statistic pvalor[isim] <- tmp$p.value } # ------------------------------------------------------------------ # Proporci?n de rechazos cat("\nProporci?n de rechazos al 1% =", sum(pvalor < 0.01)/nsim, "\n") cat("Proporci?n de rechazos al 5% =", sum(pvalor < 0.05)/nsim, "\n") cat("Proporci?n de rechazos al 10% =", sum(pvalor < 0.1)/nsim, "\n") # ------------------------------------------------------------------ # An?lisis del estad?stico contraste # Histograma hist(estadistico, freq=FALSE) curve(dchisq(x,99), add=TRUE) # Test ji-cuadrado chisq.test.cont(estadistico, distribution="chisq", nclasses=20, nestpar=0, df=99) # Test de Kolmogorov-Smirnov ks.test(estadistico, "pchisq", df=99,alternative = ) # ------------------------------------------------------------------ # An?lisis de los p-valores # Histograma hist(pvalor, freq=FALSE) curve(dunif(x,0,1), add=TRUE) #abline(h=1) # Test ji-cuadrado chisq.test.cont(pvalor, distribution="unif", nclasses=20, nestpar=0, min=0, max=1) # Test de Kolmogorov-Smirnov ks.test(pvalor, "punif", min=0, max=1)
/Taller_3/TallerA/TallerA/Material/Análisis RANDU de IBM.R
no_license
Virtutibus/TALLERES-SIMULACION
R
false
false
3,718
r
# ================================================================== # An?lisis del generador congruencial RANDU de IBM # ================================================================== source("Material/RANDC.r") # Cargar RANDC.r initRANDC(semilla = 321,a = 5,c = 1,m = 512) initRANDC(54321) # Fijar semilla para reproductibilidad nsim <- 9999 u <- RANDCN(nsim) # Generar # ------------------------------------------------------------------ # EJEMPLO problemas generador RANDU de IBM # ------------------------------------------------------------------ require(rgl) y <- matrix(u, ncol = 3, byrow = TRUE) plot3d(y) # Rotar para ver los hiperplanos # ------------------------------------------------------------------ # Bondad de ajuste # ------------------------------------------------------------------ # Histograma hist(u, freq = FALSE) curve(dunif(x, 0, 1), add = TRUE) # abline(h=1) # Distribuci?n emp?rica curve(ecdf(u)(x), type = "s", lwd = 2) curve(punif(x, 0, 1), add = TRUE) # Test chi-cuadrado source("Funciones/Test Chi-cuadrado continua.r") # Cargar chisq.test.cont chisq.test.cont(u, distribution="unif", nclasses=100, output= FALSE, nestpar=0, min=0, max=1) # Test de Kolmogorov-Smirnov ks.test(u, "punif", 0, 1) # ------------------------------------------------------------------ # Aleatoriedad # ------------------------------------------------------------------ # Gr?fico secuencial plot(as.ts(u)) # plot(u, type = 'l') # Gr?fico de dispersion retardado plot(u[-nsim], u[-1], xlab="u_t", ylab="u_t+1", pch=21, bg="white") #plot(matrix(u,ncol=2,byrow=T)) # Alternativa # Gr?fico de autocorrelaciones #acf(u) #correlaciones #pacf(u) #correlaciones parciales # Test de rachas library (tseries) runs.test(as.factor(u > median(u))) # Test de Ljung-Box Box.test(u, lag = 10, type = "Ljung") Box.test(u, lag = 10*log10(length(u))-1, type = "Ljung") # ------------------------------------------------------------------ # Repetici?n de contrastes # ------------------------------------------------------------------ # ------------------------------------------------------------------ # Valores iniciales #initRANDC(semilla = 321,a = 5,c = 1,m = 512) initRANDC(54321) #Fijar semilla para reproductibilidad # set.seed(54321) n <- 500 nsim <- 1000 estadistico <- numeric(nsim) pvalor <- numeric(nsim) # ------------------------------------------------------------------ # Realizar contrastes for(isim in 1:nsim) { u <- RANDCN(n) #Generar #u <- runif(n) tmp <- chisq.test.cont(u, distribution="unif", nclasses=100, output= FALSE, nestpar=0, min=0, max=1) estadistico[isim] <- tmp$statistic pvalor[isim] <- tmp$p.value } # ------------------------------------------------------------------ # Proporci?n de rechazos cat("\nProporci?n de rechazos al 1% =", sum(pvalor < 0.01)/nsim, "\n") cat("Proporci?n de rechazos al 5% =", sum(pvalor < 0.05)/nsim, "\n") cat("Proporci?n de rechazos al 10% =", sum(pvalor < 0.1)/nsim, "\n") # ------------------------------------------------------------------ # An?lisis del estad?stico contraste # Histograma hist(estadistico, freq=FALSE) curve(dchisq(x,99), add=TRUE) # Test ji-cuadrado chisq.test.cont(estadistico, distribution="chisq", nclasses=20, nestpar=0, df=99) # Test de Kolmogorov-Smirnov ks.test(estadistico, "pchisq", df=99,alternative = ) # ------------------------------------------------------------------ # An?lisis de los p-valores # Histograma hist(pvalor, freq=FALSE) curve(dunif(x,0,1), add=TRUE) #abline(h=1) # Test ji-cuadrado chisq.test.cont(pvalor, distribution="unif", nclasses=20, nestpar=0, min=0, max=1) # Test de Kolmogorov-Smirnov ks.test(pvalor, "punif", min=0, max=1)
context("check cosine pairs") cmpds <- c(rep('a', 100), rep('b', 100), rep('c', 100)) replicate <- rep(1:100, 3) PC1 <- rnorm(300) PC2 <- rnorm(300) df <- data.frame(cmpds, replicate, PC1, PC2) df_split <- split(df, df$cmpds) out <- cosine_pairs(df_split, 3:4) # works with unequal replicate sizes df_split$a <- df_split$a[-c(1:10), ] out_unequal <- cosine_pairs(df_split, 3:4) # test_that("cosine_pairs returns errors when expected",{ # expect_error(cosine_pairs(df, 1:4)) # expect_error(print(cosine_pairs(df_split, 1:2))) # }) test_that("cosine_pairs returns expected output",{ expect_true(is.data.frame(out_unequal)) expect_equal(nrow(out), 30000) expect_equal(ncol(out), 3) expect_equal(nrow(out_unequal), 28000) }) # known answers: A <- c('a', 'a', 'b', 'b') PC1 <- c(1, 1, -1, -1) PC2 <- c(2, 2, -2, -2) df <- data.frame(A, PC1, PC2) df split_df <- split(df, df$A) test <- cosine_pairs(split_df, 2:3) test_that("cosine_pairs returns correct results",{ expect_equal(test$val, c(-1, -1, -1, -1), tolerance = 1e-5) }) # answer from original algorithm set.seed(12321) cmpds <- c(rep('a', 100), rep('b', 100), rep('c', 100)) replicate <- rep(1:100, 3) PC1 <- rnorm(300) PC2 <- rnorm(300) df <- data.frame(cmpds, replicate, PC1, PC2) df_split <- split(df, df$cmpds) # works with unequal replicate sizes df_split$a <- df_split$a[-c(1:10), ] cosine_pairs_orig <- function(x, a, b){ if (!is.list(x) || is.data.frame(x)){ stop("Expecting a list", call. = FALSE) } # initialise empty vectors vals <- numeric() A <- character() B <- character() # get pairs of compounds name <- names(x) pairs_names <- t(combn(name, 2)) for (i in 1:nrow(pairs_names)){ tmp1 <- x[[pairs_names[i, 1]]] tmp2 <- x[[pairs_names[i, 2]]] # loop through rows in cmpd A and cmpd B # calculate the cosine similarity between the two vectors for (j in 1:nrow(tmp1)){ for (k in 1:nrow(tmp2)){ vals <- c(vals, cosine_sim_vector( c(tmp1[j, a], tmp1[j, b]), c(tmp2[k, a], tmp2[k, b]))) A <- c(A, pairs_names[i, 1]) B <- c(B, pairs_names[i, 2]) } } } data.frame(A, B, vals) } out_orig <- cosine_pairs_orig(x = df_split, a = 'PC1', b = 'PC2') # check same as original out_current <- cosine_pairs(df_split, 3:4) test_that("current alg returns same answers as original version",{ expect_true(all(out_orig == out_current)) })
/tests/testthat/test-cosine_pairs.R
no_license
Swarchal/phenoDist
R
false
false
2,443
r
context("check cosine pairs") cmpds <- c(rep('a', 100), rep('b', 100), rep('c', 100)) replicate <- rep(1:100, 3) PC1 <- rnorm(300) PC2 <- rnorm(300) df <- data.frame(cmpds, replicate, PC1, PC2) df_split <- split(df, df$cmpds) out <- cosine_pairs(df_split, 3:4) # works with unequal replicate sizes df_split$a <- df_split$a[-c(1:10), ] out_unequal <- cosine_pairs(df_split, 3:4) # test_that("cosine_pairs returns errors when expected",{ # expect_error(cosine_pairs(df, 1:4)) # expect_error(print(cosine_pairs(df_split, 1:2))) # }) test_that("cosine_pairs returns expected output",{ expect_true(is.data.frame(out_unequal)) expect_equal(nrow(out), 30000) expect_equal(ncol(out), 3) expect_equal(nrow(out_unequal), 28000) }) # known answers: A <- c('a', 'a', 'b', 'b') PC1 <- c(1, 1, -1, -1) PC2 <- c(2, 2, -2, -2) df <- data.frame(A, PC1, PC2) df split_df <- split(df, df$A) test <- cosine_pairs(split_df, 2:3) test_that("cosine_pairs returns correct results",{ expect_equal(test$val, c(-1, -1, -1, -1), tolerance = 1e-5) }) # answer from original algorithm set.seed(12321) cmpds <- c(rep('a', 100), rep('b', 100), rep('c', 100)) replicate <- rep(1:100, 3) PC1 <- rnorm(300) PC2 <- rnorm(300) df <- data.frame(cmpds, replicate, PC1, PC2) df_split <- split(df, df$cmpds) # works with unequal replicate sizes df_split$a <- df_split$a[-c(1:10), ] cosine_pairs_orig <- function(x, a, b){ if (!is.list(x) || is.data.frame(x)){ stop("Expecting a list", call. = FALSE) } # initialise empty vectors vals <- numeric() A <- character() B <- character() # get pairs of compounds name <- names(x) pairs_names <- t(combn(name, 2)) for (i in 1:nrow(pairs_names)){ tmp1 <- x[[pairs_names[i, 1]]] tmp2 <- x[[pairs_names[i, 2]]] # loop through rows in cmpd A and cmpd B # calculate the cosine similarity between the two vectors for (j in 1:nrow(tmp1)){ for (k in 1:nrow(tmp2)){ vals <- c(vals, cosine_sim_vector( c(tmp1[j, a], tmp1[j, b]), c(tmp2[k, a], tmp2[k, b]))) A <- c(A, pairs_names[i, 1]) B <- c(B, pairs_names[i, 2]) } } } data.frame(A, B, vals) } out_orig <- cosine_pairs_orig(x = df_split, a = 'PC1', b = 'PC2') # check same as original out_current <- cosine_pairs(df_split, 3:4) test_that("current alg returns same answers as original version",{ expect_true(all(out_orig == out_current)) })
setwd(file.path(getwd(), "testdata")) context("Test that getSeqsAcrossBSJs() function works correctly") test_that("getSeqsAcrossBSJs() retrieves the correct sequences", { gtf <- formatGTF(pathToGTF = "gencodeVM16.gtf") # Create the backSplicedJunctions data frame backSplicedJunctions <- getBackSplicedJunctions(gtf) mergedBSJunctions <- mergeBSJunctions(backSplicedJunctions, gtf) # Retrive the genomic features annotatedBSJs <- annotateBSJs(mergedBSJunctions, gtf) if (requireNamespace("BSgenome.Mmusculus.UCSC.mm10", quietly = TRUE)){ # Get BSgenome object genome <- BSgenome::getBSgenome("BSgenome.Mmusculus.UCSC.mm10") # retrieve target sequences targets <- getSeqsAcrossBSJs(annotatedBSJs, gtf, genome) # For positive strand expect_equal(targets$bsj$id[11], "Arhgap5:+:chr12:52516079:52542636") expect_equal(targets$bsj$length[11], 22) # The back-spliced sequences should be the one reported below expect_equal(targets$bsj$seq[11], "UGAAGACACAGAGGAAGAUGAU") #For negative strand expect_equal(targets$bsj$id[7], "Eps15l1:-:chr8:72380306:72367904") expect_equal(targets$bsj$length[7], 22) # The back-spliced sequences should be the one reported below expect_equal(targets$bsj$seq[7], "AGAUGUCCAAGAUCUCAUCAUU") }else{ cat( "Missing package BSgenome.Mmusculus.UCSC.mm10. Use BiocManager to install it." ) } })
/tests/testthat/test_getSeqsAcrossBSJs.R
no_license
Aufiero/circRNAprofiler
R
false
false
1,626
r
setwd(file.path(getwd(), "testdata")) context("Test that getSeqsAcrossBSJs() function works correctly") test_that("getSeqsAcrossBSJs() retrieves the correct sequences", { gtf <- formatGTF(pathToGTF = "gencodeVM16.gtf") # Create the backSplicedJunctions data frame backSplicedJunctions <- getBackSplicedJunctions(gtf) mergedBSJunctions <- mergeBSJunctions(backSplicedJunctions, gtf) # Retrive the genomic features annotatedBSJs <- annotateBSJs(mergedBSJunctions, gtf) if (requireNamespace("BSgenome.Mmusculus.UCSC.mm10", quietly = TRUE)){ # Get BSgenome object genome <- BSgenome::getBSgenome("BSgenome.Mmusculus.UCSC.mm10") # retrieve target sequences targets <- getSeqsAcrossBSJs(annotatedBSJs, gtf, genome) # For positive strand expect_equal(targets$bsj$id[11], "Arhgap5:+:chr12:52516079:52542636") expect_equal(targets$bsj$length[11], 22) # The back-spliced sequences should be the one reported below expect_equal(targets$bsj$seq[11], "UGAAGACACAGAGGAAGAUGAU") #For negative strand expect_equal(targets$bsj$id[7], "Eps15l1:-:chr8:72380306:72367904") expect_equal(targets$bsj$length[7], 22) # The back-spliced sequences should be the one reported below expect_equal(targets$bsj$seq[7], "AGAUGUCCAAGAUCUCAUCAUU") }else{ cat( "Missing package BSgenome.Mmusculus.UCSC.mm10. Use BiocManager to install it." ) } })
# Plot 2.r # Use Data.Table Library for quickly loading data into R. library(data.table) library(lubridate) # Create Column Names for reading in the datatable columnnames = c("Date", "Time", "Global_active_power", "Global_reactive_power", "Voltage", "Global_intensity","Sub_metering_1", "Sub_metering_2","Sub_metering_3") # Read Data Table from Associated Text File dat <- fread("exdata-data-household_power_consumption/household_power_consumption.txt", sep = ";", col.names = columnnames, na.strings = "?") # Create a subset for data from 2007-02-01 and 2007-02-02 specificdata <- subset(dat, dat$Date == "1/2/2007" | dat$Date == "2/2/2007") # Coerce dates into Date objects #specificdata$Date <- as.Date(specificdata$Date, "%d/%m/%Y") DateTime <- paste(specificdata$Date,specificdata$Time) DateTime <- strptime(DateTime,"%d/%m/%Y %H:%M:%S") # Plot the data as a line plot, and add appropriate labels plot(DateTime, specificdata$Global_active_power, type = "l", xlab = "", ylab = "Global Active Power (kilowatts)") # Output current graphics display to a png device, then close the device dev.copy(png, file = "plot2.png", width = 480, height = 480) ## Copy my plot to a PNG file # Turn off file device dev.off()
/plot2.R
no_license
sundalex/Exploratory_Data_Analysis_Project_1
R
false
false
1,296
r
# Plot 2.r # Use Data.Table Library for quickly loading data into R. library(data.table) library(lubridate) # Create Column Names for reading in the datatable columnnames = c("Date", "Time", "Global_active_power", "Global_reactive_power", "Voltage", "Global_intensity","Sub_metering_1", "Sub_metering_2","Sub_metering_3") # Read Data Table from Associated Text File dat <- fread("exdata-data-household_power_consumption/household_power_consumption.txt", sep = ";", col.names = columnnames, na.strings = "?") # Create a subset for data from 2007-02-01 and 2007-02-02 specificdata <- subset(dat, dat$Date == "1/2/2007" | dat$Date == "2/2/2007") # Coerce dates into Date objects #specificdata$Date <- as.Date(specificdata$Date, "%d/%m/%Y") DateTime <- paste(specificdata$Date,specificdata$Time) DateTime <- strptime(DateTime,"%d/%m/%Y %H:%M:%S") # Plot the data as a line plot, and add appropriate labels plot(DateTime, specificdata$Global_active_power, type = "l", xlab = "", ylab = "Global Active Power (kilowatts)") # Output current graphics display to a png device, then close the device dev.copy(png, file = "plot2.png", width = 480, height = 480) ## Copy my plot to a PNG file # Turn off file device dev.off()
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/sem_fitmeasures.R \name{sem_fitmeasures} \alias{sem_fitmeasures} \title{Model fit statistics} \usage{ sem_fitmeasures(x, print = TRUE) } \arguments{ \item{x}{a cfa() or sem() lavaan model} \item{print}{Create a knitr table for displaying as html table? (default = TRUE)} } \description{ This function will display a table of Model fit measures }
/man/sem_fitmeasures.Rd
no_license
dr-JT/semoutput
R
false
true
441
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/sem_fitmeasures.R \name{sem_fitmeasures} \alias{sem_fitmeasures} \title{Model fit statistics} \usage{ sem_fitmeasures(x, print = TRUE) } \arguments{ \item{x}{a cfa() or sem() lavaan model} \item{print}{Create a knitr table for displaying as html table? (default = TRUE)} } \description{ This function will display a table of Model fit measures }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/seqDesign.R \name{monitorTrial} \alias{monitorTrial} \title{Group Sequential Monitoring of Simulated Efficacy Trials for the Event of Potential Harm, Non-Efficacy, and High Efficacy} \usage{ monitorTrial(dataFile, stage1, stage2, harmMonitorRange, harmMonitorAlpha = 0.05, alphaPerTest = NULL, nonEffStartMethod = c("FKG", "fixed", "?", "old"), nonEffStartParams = NULL, nonEffInterval, nonEffIntervalUnit = c("counts", "time"), lowerVEnoneff = NULL, upperVEnoneff, highVE, stage1VE, lowerVEuncPower = NULL, alphaNoneff, alphaHigh, alphaStage1, alphaUncPower = NULL, estimand = c("combined", "cox", "cuminc"), laggedMonitoring = FALSE, lagTime, saveFile = NULL, saveDir = NULL, verbose = TRUE) } \arguments{ \item{dataFile}{if \code{saveDir = NULL}, a list returned by \code{simTrial}; otherwise a name (character string) of an \code{.RData} file created by \code{simTrial}} \item{stage1}{the final week of stage 1 in a two-stage trial} \item{stage2}{the final week of stage 2 in a two-stage trial, i.e., the maximum follow-up time} \item{harmMonitorRange}{a 2-component numeric vector specifying the range of the pooled number of infections (pooled over the placebo and vaccine arm accruing infections the fastest) over which the type I error rate, specified in \code{harmMonitorAlpha}, will be spent (per vaccine arm). Note that \code{harmMonitorRange} does not specify a range for which potential-harm stopping boundaries will be computed; instead, it specifies when potential-harm monitoring will start, and the range over which \code{harmMonitorAlpha} will be spent.} \item{harmMonitorAlpha}{a numeric value (0.05 by default) specifying the overall type I error rate for potential-harm monitoring (per vaccine arm). To turn off potential-harm monitoring, set \code{harmMonitorAlpha} equal to 0.00001.} \item{alphaPerTest}{a per-test nominal/unadjusted alpha level for potential-harm monitoring. If \code{NULL}, a per-test alpha level is calculated that yields a cumulative alpha of \code{harmMonitorAlpha} at the end of \code{harmMonitorRange}.} \item{nonEffStartMethod}{a character string specifying the method used for determining when non-efficacy monitoring is to start. The default method of Freidlin, Korn, and Gray (2010) ("\code{FKG}") calculates the minimal pooled infection count (pooled over the placebo and vaccine arm accruing infections the fastest) such that a hazard-ratio-based VE point estimate of 0\% would result in declaring non-efficacy, i.e., the upper bound of the two-sided (1-\code{alphaNoneff}) x 100\% confidence interval for VE based on the asymptotic variance of the log-rank statistic is (barely) below the non-efficacy threshold specified as component \code{upperVEnonEff} in the list \code{nonEffStartParams}. If this list component is left unspecified, the argument \code{upperVEnonEff} is used as the non-efficacy threshold. The alternative method ("\code{fixed}") starts non-efficacy monitoring at a fixed pooled infection count (pooled over the placebo and vaccine arm accruing infections the fastest) specified by component \code{N1} in the list \code{nonEffStartParams}.} \item{nonEffStartParams}{a list with named components specifying parameters required by \code{nonEffStartMethod} (\code{NULL} by default)} \item{nonEffInterval}{a numeric value (a number of infections or a number of weeks) specifying the interval between two adjacent non-efficacy interim analyses} \item{nonEffIntervalUnit}{a character string specifying whether intervals between two adjacent non-efficacy interim analyses should be event-driven (default option "\code{counts}") or calendar time-driven (option "\code{time}")} \item{lowerVEnoneff}{specifies criterion 1 for declaring non-efficacy: the lower bound of the two-sided (1-\code{alphaNoneff}) x 100\% confidence interval(s) for the VE estimand(s) lie(s) below \code{lowerVEnoneff} (typically set equal to 0). If \code{NULL} (default), this criterion is ignored.} \item{upperVEnoneff}{specifies criterion 2 for declaring non-efficacy: the upper bound of the two-sided (1-\code{alphaNoneff}) x 100\% confidence interval(s) for the VE estimand(s) lie(s) below \code{upperVEnoneff} (typically a number in the 0--0.5 range)} \item{highVE}{specifies a criterion for declaring high-efficacy: the lower bound of the two-sided (1-\code{alphaHigh}) x 100\% confidence interval for the VE estimand lies above \code{highVE} (typically a number in the 0.5--1 range). To turn off high efficacy monitoring, set \code{highVE} equal to 1.} \item{stage1VE}{specifies a criterion for advancement of a treatment's evaluation into Stage 2: the lower bound of the two-sided (1-\code{alphaStage1}) x 100\% confidence interval for the VE estimand lies above \code{stage1VE} (typically set equal to 0)} \item{lowerVEuncPower}{a numeric vector with each component specifying a one-sided null hypothesis H0: VE(0--\code{stage1}) \eqn{\le} \code{lowerVEuncPower} x 100\%. Unconditional power (i.e., accounting for sequential monitoring) to reject each H0 is calculated, where the rejection region is defined by the lower bound of the two-sided (1-\code{alphaUncPower}) x 100\% confidence interval for the VE estimand being above the respective component of \code{lowerVEuncPower} (typically values in the 0--0.5 range).} \item{alphaNoneff}{one minus the nominal confidence level of the two-sided confidence interval used for non-efficacy monitoring} \item{alphaHigh}{one minus the nominal confidence level of the two-sided confidence interval used for high efficacy monitoring} \item{alphaStage1}{one minus the nominal confidence level of the two-sided confidence interval used for determining whether a treatment's evaluation advances into Stage 2} \item{alphaUncPower}{one minus the nominal confidence level of the two-sided confidence interval used to test one-sided null hypotheses H0: VE(0-\code{stage1}) \eqn{\le} \code{lowerVEuncPower} x 100\% against alternative hypotheses H1: VE(0--\code{stage1}) \eqn{>} \code{lowerVEuncPower} x 100\%. The same nominal confidence level is applied for each component of \code{lowerVEuncPower}.} \item{estimand}{a character string specifying the choice of VE estimand(s) used in non- and high efficacy monitoring, advancement rule for Stage 2, and unconditional power calculations. Three options are implemented: (1) the `pure' Cox approach (\code{"cox"}), where VE is defined as 1-hazard ratio (treatment/control) and estimated by the maximum partial likelihood estimator in the Cox model; (2) the `pure' cumulative incidence-based approach (\code{"cuminc"}), where VE is defined as 1-cumulative incidence ratio (treatment/control) and estimated by the transformation of the Nelson-Aalen estimator for the cumulative hazard function; and (3) the combined approach (\code{"combined"}), where both aforementioned VE estimands are used for non-efficacy monitoring while the cumulative VE estimand is used for all other purposes. Only the first three characters are necessary.} \item{laggedMonitoring}{a logical value (\code{FALSE} by default) indicating whether "per-protocol" non-efficacy monitoring should additionally be conducted for events occurring after \code{lagTime} weeks as a more conservative non-efficacy monitoring approach. If \code{TRUE} and \code{estimand = "combined"}, the cumulative VE estimand is considered only for non-efficacy monitoring.} \item{lagTime}{a time point (in weeks) defining the per-protocol VE estimand, i.e., VE(\code{lagTime}--\code{stage1}). This VE estimand is also used in "per-protocol" non-efficacy monitoring if \code{laggedMonitoring} equals \code{TRUE}. It is typically chosen as the date of the last immunization or the date of the visit following the last immunization.} \item{saveFile}{a character string specifying the name of the output \code{.RData} file. If \code{NULL} (default), a default file name will be used.} \item{saveDir}{a character string specifying a path for \code{dataFile}. If supplied, the output is also saved as an \code{.RData} file in this directory; otherwise the output is returned as a list.} \item{verbose}{a logical value indicating whether information on the output directory, file name, and monitoring outcomes should be printed out (default is \code{TRUE})} } \value{ If \code{saveDir} (and, optionally \code{saveFile}) is specified, the output list (named \code{out}) is saved as an \code{.RData} file in \code{saveDir} (the path to \code{saveDir} is printed); otherwise it is returned. The output object is a list of length equal to the number of simulated trials, each of which is a list of length equal to the number of treatment arms, each of which is a list with (at least) the following components: \itemize{ \item \code{boundHit}: a character string stating the monitoring outcome in this treatment arm, i.e., one of \code{"Harm"}, \code{"NonEffInterim"}, \code{"NonEffFinal"}, \code{"Eff"}, or \code{"HighEff"}. The first four outcomes can occur in Stage 1, whereas the last outcome can combine data over Stage 1 and Stage 2. \item \code{stopTime}: the time of hitting a stopping boundary since the first subject enrolled in the trial \item \code{stopInfectCnt}: the pooled number of infections at \code{stopTime} \item \code{summObj}: a \code{data.frame} containing summary information from each non-/high efficacy interim analysis \item \code{finalHRci}: the final CI for the hazard ratio, available if \code{estimand!="cuminc"} and there is at least 1 infection in each arm \item \code{firstNonEffCnt}: the number of infections that triggered non-efficacy monitoring (if available) \item \code{totInfecCnt}: the total number of \code{stage1} (\code{stage2} if \code{boundHit = "HighEff"}) infections \item \code{totInfecSplit}: a table with the numbers of \code{stage1} (\code{stage2} if \code{boundHit = "HighEff"}) infections in the treatment and control arm \item \code{lastExitTime}: the time between the first subject's enrollment and the last subject's exiting from the trial } } \description{ \code{monitorTrial} applies a group sequential monitoring procedure to data-sets generated by \code{simTrial}, which may result in modification or termination of each simulated trial. } \details{ All time variables use week as the unit of time. Month is defined as 52/12 weeks. Potential harm monitoring starts at the \code{harmMonitorRange[1]}-th infection pooled over the placebo group and the vaccine regimen that accrues infections the fastest. The potential harm analyses continue at each additional infection until the first interim analysis for non-efficacy. The monitoring is implemented with exact one-sided binomial tests of H0: \eqn{p \le p0} versus H1: \eqn{p > p0}, where \eqn{p} is the probability that an infected participant was assigned to the vaccine group, and \eqn{p0} is a fixed constant that represents the null hypothesis that an infection is equally likely to be assigned vaccine or placebo. Each test is performed at the same prespecified nominal/unadjusted alpha-level (\code{alphaPerTest}), chosen based on simulations such that, for each vaccine regimen, the overall type I error rate by the \code{harmMonitorRange[2]}-th arm-pooled infection (i.e., the probability that the potential harm boundary is reached when the vaccine is actually safe, \eqn{p = p0}) equals \code{harmMonitorAlpha}. Non-efficacy is defined as evidence that it is highly unlikely that the vaccine has a beneficial effect measured as VE(0--\code{stage1}) of \code{upperVEnoneff} x 100\% or more. The non-efficacy analyses for each vaccine regimen will start at the first infection (pooled over the vaccine and placebo arm) determined by \code{nonEffStartMethod}. Stopping for non-efficacy will lead to a reported two-sided (1-\code{alphaNoneff}) x 100\% CI for VE(0--\code{stage1}) with, optionally, the lower confidence bound below \code{lowerVEnoneff} and the upper confidence bound below \code{upperVEnoneff}, where \code{estimand} determines the choice of the VE(0--\code{stage1}) estimand. This approach is similar to the inefficacy monitoring approach of Freidlin, Korn, and Gray (2010). If \code{estimand = "combined"}, stopping for non-efficacy will lead to reported (1-\code{alphaNoneff}) x 100\% CIs for both VE parameters with, optionally, lower confidence bounds below \code{lowerVEnoneff} and upper confidence bounds below \code{upperVEnoneff}. If \code{laggedMonitoring = TRUE}, stopping for non-efficacy will lead to reported (1-\code{alphaNoneff}) x 100\% CIs for both VE(0--\code{stage1}) and VE(\code{lagTime}--\code{stage1}) with, optionally, lower confidence bounds below \code{lowerVEnoneff} and upper confidence bounds below \code{upperVEnoneff}. High efficacy monitoring allows early detection of a highly protective vaccine if there is evidence that VE(0--\code{stage2}) \eqn{>} \code{highVE} x 100\%. It is synchronized with non-efficacy monitoring during Stage 1, and a single high-efficacy interim analysis during Stage 2 is conducted halfway between the end of Stage 1 and the end of the trial. While monitoring for potential harm and non-efficacy restricts to \code{stage1} infections, monitoring for high efficacy counts all infections during \code{stage1} or \code{stage2}, given that early stopping for high efficacy would only be warranted under evidence for durability of the efficacy. The following principles and rules are applied in the monitoring procedure: \itemize{ \item Exclude all follow-up data from the analysis post-unblinding (and include all data pre-unblinding). \item The monitoring is based on modified ITT analysis, i.e., all subjects documented to be free of the study endpoint at baseline are included and analyzed according to the treatment assigned by randomization, ignoring how many vaccinations they received (only pre-unblinding follow-up included). \item If a vaccine hits the harm boundary, immediately discontinue vaccinations and accrual into this vaccine arm, and unblind this vaccine arm (continue post-unblinded follow-up until the end of Stage 1 for this vaccine arm). \item If a vaccine hits the non-efficacy boundary, immediately discontinue vaccinations and accrual into this vaccine arm, keep blinded and continue follow-up until the end of Stage 1 for this vaccine arm. \item If and when the last vaccine arm hits the non-efficacy (or harm) boundary, discontinue vaccinations and accrual into this vaccine arm, and unblind (the trial is over, completed in Stage 1). \item Stage 1 for the whole trial is over on the earliest date of the two events: (1) all vaccine arms have hit the harm or non-efficacy boundary; and (2) the last enrolled subject in the trial reaches the final \code{stage1} visit. \item Continue blinded follow-up until the end of Stage 2 for each vaccine arm that reaches the end of \code{stage1} with a positive efficacy (as defined by \code{stage1VE}) or high efficacy (as defined by \code{highVE}) result. \item If at least one vaccine arm reaches the end of \code{stage1} with a positive efficacy or high efficacy result, continue blinded follow-up in the placebo arm until the end of Stage 2. \item Stage 2 for the whole trial is over on the earliest date of the two events: (1) all subjects in the placebo arm and each vaccine arm that registered efficacy or high efficacy in \code{stage1} have failed or been censored; and (2) all subjects in the placebo arm and each vaccine arm that registered efficacy or high efficacy in \code{stage1} have completed the final \code{stage2} visit. } The above rules have the following implications: \itemize{ \item If a vaccine hits the non-efficacy boundary but Stage 1 for the whole trial is not over, then one includes in the analysis all follow-up through the final \code{stage1} visit for that vaccine regimen, including all individuals accrued up through the date of hitting the non-efficacy boundary (which will be the total number accrued to this vaccine arm). \item If a vaccine hits the harm boundary, all follow-up information through the date of hitting the harm boundary is included for this vaccine; no follow-up data are included after this date. \item If and when the last vaccine arm hits the non-efficacy (or harm) boundary, all follow-up information through the date of hitting the non-efficacy (or harm) boundary is included for this vaccine; no follow-up data are included after this date. } } \examples{ simData <- simTrial(N=c(1000, rep(700, 2)), aveVE=seq(0, 0.4, by=0.2), VEmodel="half", vePeriods=c(1, 27, 79), enrollPeriod=78, enrollPartial=13, enrollPartialRelRate=0.5, dropoutRate=0.05, infecRate=0.04, fuTime=156, visitSchedule=c(0, (13/3)*(1:4), seq(13*6/3, 156, by=13*2/3)), missVaccProb=c(0,0.05,0.1,0.15), VEcutoffWeek=26, nTrials=5, stage1=78, randomSeed=300) monitorData <- monitorTrial(dataFile=simData, stage1=78, stage2=156, harmMonitorRange=c(10,100), alphaPerTest=NULL, nonEffStartMethod="FKG", nonEffInterval=20, lowerVEnoneff=0, upperVEnoneff=0.4, highVE=0.7, stage1VE=0, lowerVEuncPower=0, alphaNoneff=0.05, alphaHigh=0.05, alphaStage1=0.05, alphaUncPower=0.05, estimand="cuminc", lagTime=26) ### alternatively, to save the .RData output file (no '<-' needed): ### ### simTrial(N=c(1400, rep(1000, 2)), aveVE=seq(0, 0.4, by=0.2), VEmodel="half", ### vePeriods=c(1, 27, 79), enrollPeriod=78, enrollPartial=13, ### enrollPartialRelRate=0.5, dropoutRate=0.05, infecRate=0.04, fuTime=156, ### visitSchedule=c(0, (13/3)*(1:4), seq(13*6/3, 156, by=13*2/3)), ### missVaccProb=c(0,0.05,0.1,0.15), VEcutoffWeek=26, nTrials=30, ### stage1=78, saveDir="./", randomSeed=300) ### ### monitorTrial(dataFile= ### "simTrial_nPlac=1400_nVacc=1000_1000_aveVE=0.2_0.4_infRate=0.04.RData", ### stage1=78, stage2=156, harmMonitorRange=c(10,100), alphaPerTest=NULL, ### nonEffStartMethod="FKG", nonEffInterval=20, lowerVEnoneff=0, ### upperVEnoneff=0.4, highVE=0.7, stage1VE=0, lowerVEuncPower=0, ### alphaNoneff=0.05, alphaHigh=0.05, alphaStage1=0.05, alphaUncPower=0.05, ### estimand="cuminc", lagTime=26, saveDir="./") } \references{ Freidlin B., Korn E. L., and Gray R. (2010), A general inefficacy interim monitoring rule for randomized clinical trials. \emph{Clinical Trials} 7(3):197-208. } \seealso{ \code{\link{simTrial}}, \code{\link{censTrial}}, and \code{\link{rankTrial}} }
/man/monitorTrial.Rd
no_license
wma9/seqDesign
R
false
true
18,839
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/seqDesign.R \name{monitorTrial} \alias{monitorTrial} \title{Group Sequential Monitoring of Simulated Efficacy Trials for the Event of Potential Harm, Non-Efficacy, and High Efficacy} \usage{ monitorTrial(dataFile, stage1, stage2, harmMonitorRange, harmMonitorAlpha = 0.05, alphaPerTest = NULL, nonEffStartMethod = c("FKG", "fixed", "?", "old"), nonEffStartParams = NULL, nonEffInterval, nonEffIntervalUnit = c("counts", "time"), lowerVEnoneff = NULL, upperVEnoneff, highVE, stage1VE, lowerVEuncPower = NULL, alphaNoneff, alphaHigh, alphaStage1, alphaUncPower = NULL, estimand = c("combined", "cox", "cuminc"), laggedMonitoring = FALSE, lagTime, saveFile = NULL, saveDir = NULL, verbose = TRUE) } \arguments{ \item{dataFile}{if \code{saveDir = NULL}, a list returned by \code{simTrial}; otherwise a name (character string) of an \code{.RData} file created by \code{simTrial}} \item{stage1}{the final week of stage 1 in a two-stage trial} \item{stage2}{the final week of stage 2 in a two-stage trial, i.e., the maximum follow-up time} \item{harmMonitorRange}{a 2-component numeric vector specifying the range of the pooled number of infections (pooled over the placebo and vaccine arm accruing infections the fastest) over which the type I error rate, specified in \code{harmMonitorAlpha}, will be spent (per vaccine arm). Note that \code{harmMonitorRange} does not specify a range for which potential-harm stopping boundaries will be computed; instead, it specifies when potential-harm monitoring will start, and the range over which \code{harmMonitorAlpha} will be spent.} \item{harmMonitorAlpha}{a numeric value (0.05 by default) specifying the overall type I error rate for potential-harm monitoring (per vaccine arm). To turn off potential-harm monitoring, set \code{harmMonitorAlpha} equal to 0.00001.} \item{alphaPerTest}{a per-test nominal/unadjusted alpha level for potential-harm monitoring. If \code{NULL}, a per-test alpha level is calculated that yields a cumulative alpha of \code{harmMonitorAlpha} at the end of \code{harmMonitorRange}.} \item{nonEffStartMethod}{a character string specifying the method used for determining when non-efficacy monitoring is to start. The default method of Freidlin, Korn, and Gray (2010) ("\code{FKG}") calculates the minimal pooled infection count (pooled over the placebo and vaccine arm accruing infections the fastest) such that a hazard-ratio-based VE point estimate of 0\% would result in declaring non-efficacy, i.e., the upper bound of the two-sided (1-\code{alphaNoneff}) x 100\% confidence interval for VE based on the asymptotic variance of the log-rank statistic is (barely) below the non-efficacy threshold specified as component \code{upperVEnonEff} in the list \code{nonEffStartParams}. If this list component is left unspecified, the argument \code{upperVEnonEff} is used as the non-efficacy threshold. The alternative method ("\code{fixed}") starts non-efficacy monitoring at a fixed pooled infection count (pooled over the placebo and vaccine arm accruing infections the fastest) specified by component \code{N1} in the list \code{nonEffStartParams}.} \item{nonEffStartParams}{a list with named components specifying parameters required by \code{nonEffStartMethod} (\code{NULL} by default)} \item{nonEffInterval}{a numeric value (a number of infections or a number of weeks) specifying the interval between two adjacent non-efficacy interim analyses} \item{nonEffIntervalUnit}{a character string specifying whether intervals between two adjacent non-efficacy interim analyses should be event-driven (default option "\code{counts}") or calendar time-driven (option "\code{time}")} \item{lowerVEnoneff}{specifies criterion 1 for declaring non-efficacy: the lower bound of the two-sided (1-\code{alphaNoneff}) x 100\% confidence interval(s) for the VE estimand(s) lie(s) below \code{lowerVEnoneff} (typically set equal to 0). If \code{NULL} (default), this criterion is ignored.} \item{upperVEnoneff}{specifies criterion 2 for declaring non-efficacy: the upper bound of the two-sided (1-\code{alphaNoneff}) x 100\% confidence interval(s) for the VE estimand(s) lie(s) below \code{upperVEnoneff} (typically a number in the 0--0.5 range)} \item{highVE}{specifies a criterion for declaring high-efficacy: the lower bound of the two-sided (1-\code{alphaHigh}) x 100\% confidence interval for the VE estimand lies above \code{highVE} (typically a number in the 0.5--1 range). To turn off high efficacy monitoring, set \code{highVE} equal to 1.} \item{stage1VE}{specifies a criterion for advancement of a treatment's evaluation into Stage 2: the lower bound of the two-sided (1-\code{alphaStage1}) x 100\% confidence interval for the VE estimand lies above \code{stage1VE} (typically set equal to 0)} \item{lowerVEuncPower}{a numeric vector with each component specifying a one-sided null hypothesis H0: VE(0--\code{stage1}) \eqn{\le} \code{lowerVEuncPower} x 100\%. Unconditional power (i.e., accounting for sequential monitoring) to reject each H0 is calculated, where the rejection region is defined by the lower bound of the two-sided (1-\code{alphaUncPower}) x 100\% confidence interval for the VE estimand being above the respective component of \code{lowerVEuncPower} (typically values in the 0--0.5 range).} \item{alphaNoneff}{one minus the nominal confidence level of the two-sided confidence interval used for non-efficacy monitoring} \item{alphaHigh}{one minus the nominal confidence level of the two-sided confidence interval used for high efficacy monitoring} \item{alphaStage1}{one minus the nominal confidence level of the two-sided confidence interval used for determining whether a treatment's evaluation advances into Stage 2} \item{alphaUncPower}{one minus the nominal confidence level of the two-sided confidence interval used to test one-sided null hypotheses H0: VE(0-\code{stage1}) \eqn{\le} \code{lowerVEuncPower} x 100\% against alternative hypotheses H1: VE(0--\code{stage1}) \eqn{>} \code{lowerVEuncPower} x 100\%. The same nominal confidence level is applied for each component of \code{lowerVEuncPower}.} \item{estimand}{a character string specifying the choice of VE estimand(s) used in non- and high efficacy monitoring, advancement rule for Stage 2, and unconditional power calculations. Three options are implemented: (1) the `pure' Cox approach (\code{"cox"}), where VE is defined as 1-hazard ratio (treatment/control) and estimated by the maximum partial likelihood estimator in the Cox model; (2) the `pure' cumulative incidence-based approach (\code{"cuminc"}), where VE is defined as 1-cumulative incidence ratio (treatment/control) and estimated by the transformation of the Nelson-Aalen estimator for the cumulative hazard function; and (3) the combined approach (\code{"combined"}), where both aforementioned VE estimands are used for non-efficacy monitoring while the cumulative VE estimand is used for all other purposes. Only the first three characters are necessary.} \item{laggedMonitoring}{a logical value (\code{FALSE} by default) indicating whether "per-protocol" non-efficacy monitoring should additionally be conducted for events occurring after \code{lagTime} weeks as a more conservative non-efficacy monitoring approach. If \code{TRUE} and \code{estimand = "combined"}, the cumulative VE estimand is considered only for non-efficacy monitoring.} \item{lagTime}{a time point (in weeks) defining the per-protocol VE estimand, i.e., VE(\code{lagTime}--\code{stage1}). This VE estimand is also used in "per-protocol" non-efficacy monitoring if \code{laggedMonitoring} equals \code{TRUE}. It is typically chosen as the date of the last immunization or the date of the visit following the last immunization.} \item{saveFile}{a character string specifying the name of the output \code{.RData} file. If \code{NULL} (default), a default file name will be used.} \item{saveDir}{a character string specifying a path for \code{dataFile}. If supplied, the output is also saved as an \code{.RData} file in this directory; otherwise the output is returned as a list.} \item{verbose}{a logical value indicating whether information on the output directory, file name, and monitoring outcomes should be printed out (default is \code{TRUE})} } \value{ If \code{saveDir} (and, optionally \code{saveFile}) is specified, the output list (named \code{out}) is saved as an \code{.RData} file in \code{saveDir} (the path to \code{saveDir} is printed); otherwise it is returned. The output object is a list of length equal to the number of simulated trials, each of which is a list of length equal to the number of treatment arms, each of which is a list with (at least) the following components: \itemize{ \item \code{boundHit}: a character string stating the monitoring outcome in this treatment arm, i.e., one of \code{"Harm"}, \code{"NonEffInterim"}, \code{"NonEffFinal"}, \code{"Eff"}, or \code{"HighEff"}. The first four outcomes can occur in Stage 1, whereas the last outcome can combine data over Stage 1 and Stage 2. \item \code{stopTime}: the time of hitting a stopping boundary since the first subject enrolled in the trial \item \code{stopInfectCnt}: the pooled number of infections at \code{stopTime} \item \code{summObj}: a \code{data.frame} containing summary information from each non-/high efficacy interim analysis \item \code{finalHRci}: the final CI for the hazard ratio, available if \code{estimand!="cuminc"} and there is at least 1 infection in each arm \item \code{firstNonEffCnt}: the number of infections that triggered non-efficacy monitoring (if available) \item \code{totInfecCnt}: the total number of \code{stage1} (\code{stage2} if \code{boundHit = "HighEff"}) infections \item \code{totInfecSplit}: a table with the numbers of \code{stage1} (\code{stage2} if \code{boundHit = "HighEff"}) infections in the treatment and control arm \item \code{lastExitTime}: the time between the first subject's enrollment and the last subject's exiting from the trial } } \description{ \code{monitorTrial} applies a group sequential monitoring procedure to data-sets generated by \code{simTrial}, which may result in modification or termination of each simulated trial. } \details{ All time variables use week as the unit of time. Month is defined as 52/12 weeks. Potential harm monitoring starts at the \code{harmMonitorRange[1]}-th infection pooled over the placebo group and the vaccine regimen that accrues infections the fastest. The potential harm analyses continue at each additional infection until the first interim analysis for non-efficacy. The monitoring is implemented with exact one-sided binomial tests of H0: \eqn{p \le p0} versus H1: \eqn{p > p0}, where \eqn{p} is the probability that an infected participant was assigned to the vaccine group, and \eqn{p0} is a fixed constant that represents the null hypothesis that an infection is equally likely to be assigned vaccine or placebo. Each test is performed at the same prespecified nominal/unadjusted alpha-level (\code{alphaPerTest}), chosen based on simulations such that, for each vaccine regimen, the overall type I error rate by the \code{harmMonitorRange[2]}-th arm-pooled infection (i.e., the probability that the potential harm boundary is reached when the vaccine is actually safe, \eqn{p = p0}) equals \code{harmMonitorAlpha}. Non-efficacy is defined as evidence that it is highly unlikely that the vaccine has a beneficial effect measured as VE(0--\code{stage1}) of \code{upperVEnoneff} x 100\% or more. The non-efficacy analyses for each vaccine regimen will start at the first infection (pooled over the vaccine and placebo arm) determined by \code{nonEffStartMethod}. Stopping for non-efficacy will lead to a reported two-sided (1-\code{alphaNoneff}) x 100\% CI for VE(0--\code{stage1}) with, optionally, the lower confidence bound below \code{lowerVEnoneff} and the upper confidence bound below \code{upperVEnoneff}, where \code{estimand} determines the choice of the VE(0--\code{stage1}) estimand. This approach is similar to the inefficacy monitoring approach of Freidlin, Korn, and Gray (2010). If \code{estimand = "combined"}, stopping for non-efficacy will lead to reported (1-\code{alphaNoneff}) x 100\% CIs for both VE parameters with, optionally, lower confidence bounds below \code{lowerVEnoneff} and upper confidence bounds below \code{upperVEnoneff}. If \code{laggedMonitoring = TRUE}, stopping for non-efficacy will lead to reported (1-\code{alphaNoneff}) x 100\% CIs for both VE(0--\code{stage1}) and VE(\code{lagTime}--\code{stage1}) with, optionally, lower confidence bounds below \code{lowerVEnoneff} and upper confidence bounds below \code{upperVEnoneff}. High efficacy monitoring allows early detection of a highly protective vaccine if there is evidence that VE(0--\code{stage2}) \eqn{>} \code{highVE} x 100\%. It is synchronized with non-efficacy monitoring during Stage 1, and a single high-efficacy interim analysis during Stage 2 is conducted halfway between the end of Stage 1 and the end of the trial. While monitoring for potential harm and non-efficacy restricts to \code{stage1} infections, monitoring for high efficacy counts all infections during \code{stage1} or \code{stage2}, given that early stopping for high efficacy would only be warranted under evidence for durability of the efficacy. The following principles and rules are applied in the monitoring procedure: \itemize{ \item Exclude all follow-up data from the analysis post-unblinding (and include all data pre-unblinding). \item The monitoring is based on modified ITT analysis, i.e., all subjects documented to be free of the study endpoint at baseline are included and analyzed according to the treatment assigned by randomization, ignoring how many vaccinations they received (only pre-unblinding follow-up included). \item If a vaccine hits the harm boundary, immediately discontinue vaccinations and accrual into this vaccine arm, and unblind this vaccine arm (continue post-unblinded follow-up until the end of Stage 1 for this vaccine arm). \item If a vaccine hits the non-efficacy boundary, immediately discontinue vaccinations and accrual into this vaccine arm, keep blinded and continue follow-up until the end of Stage 1 for this vaccine arm. \item If and when the last vaccine arm hits the non-efficacy (or harm) boundary, discontinue vaccinations and accrual into this vaccine arm, and unblind (the trial is over, completed in Stage 1). \item Stage 1 for the whole trial is over on the earliest date of the two events: (1) all vaccine arms have hit the harm or non-efficacy boundary; and (2) the last enrolled subject in the trial reaches the final \code{stage1} visit. \item Continue blinded follow-up until the end of Stage 2 for each vaccine arm that reaches the end of \code{stage1} with a positive efficacy (as defined by \code{stage1VE}) or high efficacy (as defined by \code{highVE}) result. \item If at least one vaccine arm reaches the end of \code{stage1} with a positive efficacy or high efficacy result, continue blinded follow-up in the placebo arm until the end of Stage 2. \item Stage 2 for the whole trial is over on the earliest date of the two events: (1) all subjects in the placebo arm and each vaccine arm that registered efficacy or high efficacy in \code{stage1} have failed or been censored; and (2) all subjects in the placebo arm and each vaccine arm that registered efficacy or high efficacy in \code{stage1} have completed the final \code{stage2} visit. } The above rules have the following implications: \itemize{ \item If a vaccine hits the non-efficacy boundary but Stage 1 for the whole trial is not over, then one includes in the analysis all follow-up through the final \code{stage1} visit for that vaccine regimen, including all individuals accrued up through the date of hitting the non-efficacy boundary (which will be the total number accrued to this vaccine arm). \item If a vaccine hits the harm boundary, all follow-up information through the date of hitting the harm boundary is included for this vaccine; no follow-up data are included after this date. \item If and when the last vaccine arm hits the non-efficacy (or harm) boundary, all follow-up information through the date of hitting the non-efficacy (or harm) boundary is included for this vaccine; no follow-up data are included after this date. } } \examples{ simData <- simTrial(N=c(1000, rep(700, 2)), aveVE=seq(0, 0.4, by=0.2), VEmodel="half", vePeriods=c(1, 27, 79), enrollPeriod=78, enrollPartial=13, enrollPartialRelRate=0.5, dropoutRate=0.05, infecRate=0.04, fuTime=156, visitSchedule=c(0, (13/3)*(1:4), seq(13*6/3, 156, by=13*2/3)), missVaccProb=c(0,0.05,0.1,0.15), VEcutoffWeek=26, nTrials=5, stage1=78, randomSeed=300) monitorData <- monitorTrial(dataFile=simData, stage1=78, stage2=156, harmMonitorRange=c(10,100), alphaPerTest=NULL, nonEffStartMethod="FKG", nonEffInterval=20, lowerVEnoneff=0, upperVEnoneff=0.4, highVE=0.7, stage1VE=0, lowerVEuncPower=0, alphaNoneff=0.05, alphaHigh=0.05, alphaStage1=0.05, alphaUncPower=0.05, estimand="cuminc", lagTime=26) ### alternatively, to save the .RData output file (no '<-' needed): ### ### simTrial(N=c(1400, rep(1000, 2)), aveVE=seq(0, 0.4, by=0.2), VEmodel="half", ### vePeriods=c(1, 27, 79), enrollPeriod=78, enrollPartial=13, ### enrollPartialRelRate=0.5, dropoutRate=0.05, infecRate=0.04, fuTime=156, ### visitSchedule=c(0, (13/3)*(1:4), seq(13*6/3, 156, by=13*2/3)), ### missVaccProb=c(0,0.05,0.1,0.15), VEcutoffWeek=26, nTrials=30, ### stage1=78, saveDir="./", randomSeed=300) ### ### monitorTrial(dataFile= ### "simTrial_nPlac=1400_nVacc=1000_1000_aveVE=0.2_0.4_infRate=0.04.RData", ### stage1=78, stage2=156, harmMonitorRange=c(10,100), alphaPerTest=NULL, ### nonEffStartMethod="FKG", nonEffInterval=20, lowerVEnoneff=0, ### upperVEnoneff=0.4, highVE=0.7, stage1VE=0, lowerVEuncPower=0, ### alphaNoneff=0.05, alphaHigh=0.05, alphaStage1=0.05, alphaUncPower=0.05, ### estimand="cuminc", lagTime=26, saveDir="./") } \references{ Freidlin B., Korn E. L., and Gray R. (2010), A general inefficacy interim monitoring rule for randomized clinical trials. \emph{Clinical Trials} 7(3):197-208. } \seealso{ \code{\link{simTrial}}, \code{\link{censTrial}}, and \code{\link{rankTrial}} }
validateInput <- function(selector, value, type = NULL, required = TRUE) { if (required) { if (length(value) == 0) { removeUI(selector = ".error-message", multiple = TRUE) insertUI( selector = selector, where = "beforeEnd", ui = h6(class = "error-message", "This field cannot be empty. Please fill in a value") ) } else { validateByType(type = type, value = value, selector = selector) } } else { if (length(value) > 0) { validateByType(type = type, value = value, selector = selector) } } } validateByType <- function(type, value, selector) { if (type == "numeric" & !is.numeric(value)) { removeUI(selector = ".error-message", multiple = TRUE) insertUI( selector = selector, where = "beforeEnd", ui = h6(class = "error-message", "Please use only numbers for this field") ) } else if (type == "character") { removeUI(selector = ".error-message", multiple = TRUE) insertUI( selector = selector, where = "beforeEnd", ui = h6(class = "error-message", "Please use only numbers for this field") ) } }
/validateInput.R
permissive
agapiospanos/TableOneApp
R
false
false
1,143
r
validateInput <- function(selector, value, type = NULL, required = TRUE) { if (required) { if (length(value) == 0) { removeUI(selector = ".error-message", multiple = TRUE) insertUI( selector = selector, where = "beforeEnd", ui = h6(class = "error-message", "This field cannot be empty. Please fill in a value") ) } else { validateByType(type = type, value = value, selector = selector) } } else { if (length(value) > 0) { validateByType(type = type, value = value, selector = selector) } } } validateByType <- function(type, value, selector) { if (type == "numeric" & !is.numeric(value)) { removeUI(selector = ".error-message", multiple = TRUE) insertUI( selector = selector, where = "beforeEnd", ui = h6(class = "error-message", "Please use only numbers for this field") ) } else if (type == "character") { removeUI(selector = ".error-message", multiple = TRUE) insertUI( selector = selector, where = "beforeEnd", ui = h6(class = "error-message", "Please use only numbers for this field") ) } }
library(ggplot2) library(dplyr) NEI <- readRDS("summarySCC_PM25.rds") SCC <- readRDS("Source_Classification_Code.rds") x <- NEI_SCC %>% filter(type == "ON-ROAD" & fips=='24510') barplot(tapply(x$Emissions, x$year, FUN=sum), main="Total PM2.5 Emissions - Coal related sources / Baltimore", ylab = 'Amount of PM2.5 emitted, in tons') dev.copy(png,'plot5.png') dev.off()
/plot5.R
no_license
danielavarelat/Exploratory-Analysis-Project
R
false
false
376
r
library(ggplot2) library(dplyr) NEI <- readRDS("summarySCC_PM25.rds") SCC <- readRDS("Source_Classification_Code.rds") x <- NEI_SCC %>% filter(type == "ON-ROAD" & fips=='24510') barplot(tapply(x$Emissions, x$year, FUN=sum), main="Total PM2.5 Emissions - Coal related sources / Baltimore", ylab = 'Amount of PM2.5 emitted, in tons') dev.copy(png,'plot5.png') dev.off()
\name{correlationPlotter} \alias{correlationPlotter} \title{ correlationPlotter } \description{ Computes and plots a correlation circle (used in multivariate analyses). Correlation is computed between measured items and components (factors, dimensions, principal axes, etc...). } \usage{ correlationPlotter(data_matrix, factor_scores, x_axis = 1, y_axis = 2, col = NULL, pch = NULL, xlab = NULL, ylab = NULL, main = "", asp = 1, dev.new = TRUE) } \arguments{ \item{data_matrix}{ A set of data (i.e., original measures and observations) } \item{factor_scores}{ One set of factor scores that were computed from the original data matrix. } \item{x_axis}{ Which axis is the x-axis? Default is 1. } \item{y_axis}{ Which axis is the y-axis? Default is 2. } \item{col}{ A single-column matrix of colors for each data point. } \item{pch}{ A single-column matrix of pch for each data point. Indicates which point style to use for each item. See \code{\link{par}}. } \item{xlab}{ A label to be placed along the x-axis. } \item{ylab}{ A label to be placed along the y-axis. } \item{main}{ A title to be placed at the top of the graph. } \item{asp}{ numeric. Aspect ratio (see \code{asp} in \code{\link{par}}). } \item{dev.new}{ boolean. If TRUE, \code{\link{dev.new}} is called internally to create new device. If FALSE, a device must already be open. } } \author{ Derek Beaton } \keyword{ graphs } \keyword{ multivariate }
/Code/R/Release/prettyGraphs/man/correlationPlotter.Rd
no_license
cfhammill/ExPosition-Family
R
false
false
1,443
rd
\name{correlationPlotter} \alias{correlationPlotter} \title{ correlationPlotter } \description{ Computes and plots a correlation circle (used in multivariate analyses). Correlation is computed between measured items and components (factors, dimensions, principal axes, etc...). } \usage{ correlationPlotter(data_matrix, factor_scores, x_axis = 1, y_axis = 2, col = NULL, pch = NULL, xlab = NULL, ylab = NULL, main = "", asp = 1, dev.new = TRUE) } \arguments{ \item{data_matrix}{ A set of data (i.e., original measures and observations) } \item{factor_scores}{ One set of factor scores that were computed from the original data matrix. } \item{x_axis}{ Which axis is the x-axis? Default is 1. } \item{y_axis}{ Which axis is the y-axis? Default is 2. } \item{col}{ A single-column matrix of colors for each data point. } \item{pch}{ A single-column matrix of pch for each data point. Indicates which point style to use for each item. See \code{\link{par}}. } \item{xlab}{ A label to be placed along the x-axis. } \item{ylab}{ A label to be placed along the y-axis. } \item{main}{ A title to be placed at the top of the graph. } \item{asp}{ numeric. Aspect ratio (see \code{asp} in \code{\link{par}}). } \item{dev.new}{ boolean. If TRUE, \code{\link{dev.new}} is called internally to create new device. If FALSE, a device must already be open. } } \author{ Derek Beaton } \keyword{ graphs } \keyword{ multivariate }
library(monocle3) library(dplyr) library(tidyverse) library(Seurat) library(cowplot) library(SingleCellExperiment) ############################################################################################################### ## Trajectory on UMAP sample = "chromium040" seuratobj = readRDS(paste("Data/SeuratRDS/", sample, "_PC10_res0.5.rds", sep = "")) ToMonocle3 <- function(seurat_object, scale_all = FALSE, assay = "SCT", reduction_for_projection = "PCA", UMAP_cluster_slot = NULL){ if(scale_all){ message("Getting residuals for all Seurat genes in chosen assay slot and placing in scale.data") seurat_genes <- rownames(seurat_object[[assay]]) remaining_genes <- setdiff(seurat_genes, rownames(seurat_object[[assay]]@scale.data)) if(assay == "SCT"){ seurat_object <- Seurat::GetResidual(seurat_object, features = remaining_genes, assay = assay, umi.assay = "RNA") } else { seurat_object <- Seurat::ScaleData(seurat_object, features = rownames(seurat_object[[assay]])) } } #We prep the seurat object by creating gene loadings for ALL genes in the Seurat scale.data slot. This is done to allow downstream monocle3 functions on gene_modules to work appropriately. message("Projecting gene loadings for all Seurat genes in scale.data slot") seurat_object <- Seurat::ProjectDim(seurat_object, reduction = reduction_for_projection, assay = assay) ################## message("Initializing CDS object") #Extract Seurat's log-transformed values expression_matrix <- Seurat::GetAssayData(seurat_object, assay = assay, slot = "counts") #Extract Seurat meta_data meta_data <- seurat_object@meta.data #Extract gene names from Seurat object SCT slot to make CDS seurat_genes <- data.frame(gene_short_name = rownames(seurat_object[[assay]]), row.names = rownames(seurat_object[[assay]])) new_cds <- monocle3::new_cell_data_set(expression_data = expression_matrix, cell_metadata = meta_data, gene_metadata = seurat_genes) ################## message("Making an SCE object from the Seurat object to facilitate transfer of information from SCE to CDS") sce <- as.SingleCellExperiment(seurat_object, assay = assay) message("Loading in all Seurat reductions (PCA, HARMONY, UMAP, etc.) into CDS") SingleCellExperiment::reducedDims(new_cds) <- SingleCellExperiment::reducedDims(sce) message("Loading in specified Seurat assay into CDS") SummarizedExperiment::assays(new_cds) <- SummarizedExperiment::assays(sce) message("Loading in Seurat gene names into CDS") SummarizedExperiment::rowData(new_cds) <- SummarizedExperiment::rowData(sce) SummarizedExperiment::rowData(new_cds)$gene_short_name <- row.names(new_cds) message("Loading in Seurat gene loadings into CDS") new_cds@preprocess_aux$gene_loadings <- seurat_object@reductions[[reduction_for_projection]]@feature.loadings.projected ################## message("Get user specified selected clusters (or active idents) from Seurat and load into CDS") if(is.null(UMAP_cluster_slot)){ list_cluster <- Idents(seurat_object) } else { Idents(seurat_object) <- UMAP_cluster_slot list_cluster <- Idents(seurat_object) } new_cds@clusters[["UMAP"]]$clusters <- list_cluster #The next two commands are run in order to allow "order_cells" to be run in monocle3 rownames(new_cds@principal_graph_aux[['UMAP']]$dp_mst) <- NULL colnames(SingleCellExperiment::reducedDims(new_cds)[["UMAP"]]) <- NULL ################## message("Setting all cells as belonging to one partition (multiple partitions not supported yet)") recreate_partition <- c(rep(1, length(new_cds@colData@rownames))) names(recreate_partition) <- new_cds@colData@rownames recreate_partition <- as.factor(recreate_partition) new_cds@clusters[["UMAP"]]$partitions <- recreate_partition ################## message("Done") new_cds } cds = ToMonocle3(seuratobj, scale_all = TRUE, assay = "RNA", reduction_for_projection = "pca", UMAP_cluster_slot = NULL) cds <- learn_graph(cds) # a helper function to identify the root principal points: get_earliest_principal_node <- function(cds, seurat_cluster="0"){ cell_ids <- which(colData(cds)[, "seurat_clusters"] == seurat_cluster) closest_vertex <- cds@principal_graph_aux[["UMAP"]]$pr_graph_cell_proj_closest_vertex closest_vertex <- as.matrix(closest_vertex[colnames(cds), ]) root_pr_nodes <- igraph::V(principal_graph(cds)[["UMAP"]])$name[as.numeric(names (which.max(table(closest_vertex[cell_ids,]))))] root_pr_nodes } cds <- order_cells(cds, reduction_method = "UMAP", root_pr_nodes=get_earliest_principal_node(cds, seurat_cluster = 4)) plot_cells( cds = cds, color_cells_by = "pseudotime", show_trajectory_graph = TRUE ) ###################################### ## Harmony - with naive. UMAP harmonyobj = readRDS("Data/UMAP.Harmony_cluster_0to16_per100_with_naive_0714.rds") ToMonocle3 <- function(seurat_object, scale_all = FALSE, assay = "SCT", reduction_for_projection = "PCA", UMAP_cluster_slot = NULL){ if(scale_all){ message("Getting residuals for all Seurat genes in chosen assay slot and placing in scale.data") seurat_genes <- rownames(seurat_object[[assay]]) remaining_genes <- setdiff(seurat_genes, rownames(seurat_object[[assay]]@scale.data)) if(assay == "SCT"){ seurat_object <- Seurat::GetResidual(seurat_object, features = remaining_genes, assay = assay, umi.assay = "RNA") } else { seurat_object <- Seurat::ScaleData(seurat_object, features = rownames(seurat_object[[assay]])) } } #We prep the seurat object by creating gene loadings for ALL genes in the Seurat scale.data slot. This is done to allow downstream monocle3 functions on gene_modules to work appropriately. message("Projecting gene loadings for all Seurat genes in scale.data slot") seurat_object <- Seurat::ProjectDim(seurat_object, reduction = reduction_for_projection, assay = assay) ################## message("Initializing CDS object") #Extract Seurat's log-transformed values expression_matrix <- Seurat::GetAssayData(seurat_object, assay = assay, slot = "counts") #Extract Seurat meta_data meta_data <- seurat_object@meta.data #Extract gene names from Seurat object SCT slot to make CDS seurat_genes <- data.frame(gene_short_name = rownames(seurat_object[[assay]]), row.names = rownames(seurat_object[[assay]])) new_cds <- monocle3::new_cell_data_set(expression_data = expression_matrix, cell_metadata = meta_data, gene_metadata = seurat_genes) ################## message("Making an SCE object from the Seurat object to facilitate transfer of information from SCE to CDS") sce <- as.SingleCellExperiment(seurat_object, assay = assay) message("Loading in all Seurat reductions (PCA, HARMONY, UMAP, etc.) into CDS") SingleCellExperiment::reducedDims(new_cds) <- SingleCellExperiment::reducedDims(sce) message("Loading in specified Seurat assay into CDS") SummarizedExperiment::assays(new_cds) <- SummarizedExperiment::assays(sce) message("Loading in Seurat gene names into CDS") SummarizedExperiment::rowData(new_cds) <- SummarizedExperiment::rowData(sce) SummarizedExperiment::rowData(new_cds)$gene_short_name <- row.names(new_cds) message("Loading in Seurat gene loadings into CDS") new_cds@preprocess_aux$gene_loadings <- seurat_object@reductions[[reduction_for_projection]]@feature.loadings.projected ################## message("Get user specified selected clusters (or active idents) from Seurat and load into CDS") if(is.null(UMAP_cluster_slot)){ list_cluster <- Idents(seurat_object) } else { Idents(seurat_object) <- UMAP_cluster_slot list_cluster <- Idents(seurat_object) } new_cds@clusters[["UMAP"]]$clusters <- list_cluster #The next two commands are run in order to allow "order_cells" to be run in monocle3 rownames(new_cds@principal_graph_aux[['UMAP']]$dp_mst) <- NULL colnames(SingleCellExperiment::reducedDims(new_cds)[["UMAP"]]) <- NULL ################## message("Setting all cells as belonging to one partition (multiple partitions not supported yet)") recreate_partition <- c(rep(1, length(new_cds@colData@rownames))) names(recreate_partition) <- new_cds@colData@rownames recreate_partition <- as.factor(recreate_partition) new_cds@clusters[["UMAP"]]$partitions <- recreate_partition ################## message("Done") new_cds } cds = ToMonocle3(harmonyobj, scale_all = TRUE, assay = "RNA", reduction_for_projection = "pca", UMAP_cluster_slot = NULL) cds <- learn_graph(cds) # a helper function to identify the root principal points: get_earliest_principal_node <- function(cds, seurat_cluster){ cell_ids <- which(colData(cds)[, "seurat_clusters"] == seurat_cluster) closest_vertex <- cds@principal_graph_aux[["UMAP"]]$pr_graph_cell_proj_closest_vertex closest_vertex <- as.matrix(closest_vertex[colnames(cds), ]) root_pr_nodes <- igraph::V(principal_graph(cds)[["UMAP"]])$name[as.numeric(names (which.max(table(closest_vertex[cell_ids,]))))] root_pr_nodes } #cds <- order_cells(cds, reduction_method = "UMAP", root_pr_nodes=get_earliest_principal_node(cds, seurat_cluster = 5)) #cds <- order_cells(cds, reduction_method = "UMAP") cds <- order_cells(cds, reduction_method = "UMAP", root_pr_nodes=get_earliest_principal_node(cds, seurat_cluster = 5)) cds <- order_cells(cds, reduction_method = "UMAP", root_pr_nodes=c(get_earliest_principal_node(cds, seurat_cluster = 5), get_earliest_principal_node(cds, seurat_cluster = 4))) p <- plot_cells( cds = cds, color_cells_by = "pseudotime", show_trajectory_graph = TRUE ) save_plot('~/Dropbox/NGR_SNU_2019/scRNA_seq_SYK/Figures/plot.Monocle_UMAP.harmony_cluster5_0718.png', p, base_height = 6, base_width = 7) ################################################################ ## Harmony - divide coghelp vs naive harmony_cog <- subset(x = harmonyobj, subset = orig.ident == "chromium034") harmony_sep <- subset(x = harmonyobj, subset = orig.ident == "chromium035") harmony_less <- subset(x = harmonyobj, subset = orig.ident == "chromium033") harmony_naive <- subset(x = harmonyobj, subset = orig.ident == "chromium040") cds_cog = ToMonocle3(harmony_cog, scale_all = TRUE, assay = "RNA", reduction_for_projection = "pca", UMAP_cluster_slot = NULL) cds_cog <- learn_graph(cds_cog) cds_cog <- order_cells(cds_cog, reduction_method = "UMAP", root_pr_nodes=get_earliest_principal_node(cds_cog, seurat_cluster = 5)) p1 <- plot_cells( cds = cds_cog, color_cells_by = "pseudotime", show_trajectory_graph = TRUE ) cds_naive = ToMonocle3(harmony_naive, scale_all = TRUE, assay = "RNA", reduction_for_projection = "pca", UMAP_cluster_slot = NULL) cds_naive <- learn_graph(cds_naive) cds_naive <- order_cells(cds_naive, reduction_method = "UMAP", root_pr_nodes= get_earliest_principal_node(cds_naive, seurat_cluster = 5)) p2 <- plot_cells( cds = cds_naive, color_cells_by = "pseudotime", show_trajectory_graph = TRUE ) p = plot_grid(p1, p2) save_plot('~/Dropbox/NGR_SNU_2019/scRNA_seq_SYK/Figures/plot.Monocle_UMAP.harmony_split_cluster5_0718.png', p, base_height = 6, base_width = 14) cds_less = ToMonocle3(harmony_less, scale_all = TRUE, assay = "RNA", reduction_for_projection = "pca", UMAP_cluster_slot = NULL) cds_less <- learn_graph(cds_less) cds_less <- order_cells(cds_less, reduction_method = "UMAP", root_pr_nodes=get_earliest_principal_node(cds_less, seurat_cluster = 5)) p1 <- plot_cells( cds = cds_less, color_cells_by = "pseudotime", show_trajectory_graph = TRUE ) cds_sep = ToMonocle3(harmony_sep, scale_all = TRUE, assay = "RNA", reduction_for_projection = "pca", UMAP_cluster_slot = NULL) cds_sep <- learn_graph(cds_sep) cds_sep <- order_cells(cds_sep, reduction_method = "UMAP", root_pr_nodes= get_earliest_principal_node(cds_sep, seurat_cluster = 5)) p2 <- plot_cells( cds = cds_sep, color_cells_by = "pseudotime", show_trajectory_graph = TRUE ) p = plot_grid(p1, p2) save_plot('~/Dropbox/NGR_SNU_2019/scRNA_seq_SYK/Figures/plot.Monocle_UMAP.harmony_split_helpless&sep_0718.png', p, base_height = 6, base_width = 14) ############################################################################################################### ## Branch-like harmonyobj = readRDS("Data/UMAP.Harmony_cluster_0to16_per100_with_naive_0703.rds") expression_matrix <- harmonyobj@assays$RNA@counts %>% as.matrix() cell_metadata = harmonyobj@meta.data cell_metadata <- cbind(cell_metadata, harmonyobj[["umap"]]@cell.embeddings) gene_annotation = data.frame(gene_short_name=rownames(expression_matrix)) rownames(gene_annotation) = gene_annotation$gene_short_name # Make the CDS object cds <- new_cell_data_set(expression_matrix, cell_metadata = cell_metadata, gene_metadata = gene_annotation) # Pre-process the data cds <- preprocess_cds(cds, num_dim = 10) # Reduce dimensionality cds <- reduce_dimension(cds, reduction_method = "UMAP") # Clustering cds = cluster_cells(cds, reduction_method = "UMAP", resolution=0.7) p = plot_cells(cds, group_cells_by="cluster", color_cells_by = 'seurat_clusters', reduction_method = 'UMAP', label_cell_groups = T) p ##pseudotime cds <- learn_graph(cds) cds <- order_cells(cds, reduction_method = "UMAP", root_pr_nodes= get_earliest_principal_node(cds, seurat_cluster = 5)) p = plot_cells(cds, color_cells_by = "seurat_clusters", label_groups_by_cluster=T, label_leaves=T, label_branch_points=T, group_cells_by = 'cluster', show_trajectory_graph = T, label_cell_groups = F) p ggsave(paste('Figures/plot.', tag, '_pseudotime.before.', date, '.png', sep = ''), p, height = 4, width = 6, limitsize = F) cds <- order_cells(cds) p = plot_cells(cds, color_cells_by = "pseudotime", label_cell_groups=T, label_leaves=T, label_branch_points=T, graph_label_size=1.5, label_groups_by_cluster=T) p ggsave(paste('Figures/plot.', tag, '_pseudotime.after.', date, '.png', sep = ''), p, height = 4, width = 6, limitsize = F) reducedDims(cds)$UMAP[,1] <- cell_metadata$UMAP_1 reducedDims(cds)$UMAP[,2] <- cell_metadata$UMAP_2 p<-plot_cells(cds, reduction_method = "UMAP", color_cells_by = "seurat_clusters", group_label_size = 3.5, label_groups_by_cluster = F, label_cell_groups = T) p ggsave(paste('Figures/plot.', tag, '_pseudotime_UMAP.before.', date, '.png', sep = ''), p, height = 5, width = 6, limitsize = F) cds <- order_cells(cds) p<-plot_cells(cds, color_cells_by = "pseudotime", group_label_size = 3.5, label_groups_by_cluster = FALSE) p ggsave(paste('Figures/plot.', tag, '_pseudotime_UMAP.after.', date, '.png', sep = ''), p, height = 5, width = 7, limitsize = F) #### cds <- learn_graph(cds) p = plot_cells(cds, color_cells_by = "assigned_cell_type", label_groups_by_cluster=FALSE, label_leaves=FALSE, label_branch_points=FALSE) p ggsave(paste('Figures/gRNA/plot.pseudotime.before', names(cds_list)[k], date, 'png', sep = '.'), p, height = 4, width = 6, limitsize = F) cds <- order_cells(cds) p = plot_cells(cds, color_cells_by = "pseudotime", label_cell_groups=FALSE, label_leaves=FALSE, label_branch_points=FALSE, graph_label_size=1.5) p ggsave(paste('Figures/gRNA/plot.pseudotime.after', names(cds_list)[k], date, 'png', sep = '.'), p, height = 4, width = 6, limitsize = F)
/ToMonocle3_0628.R
no_license
shijianasdf/SingleCellPipeline
R
false
false
16,053
r
library(monocle3) library(dplyr) library(tidyverse) library(Seurat) library(cowplot) library(SingleCellExperiment) ############################################################################################################### ## Trajectory on UMAP sample = "chromium040" seuratobj = readRDS(paste("Data/SeuratRDS/", sample, "_PC10_res0.5.rds", sep = "")) ToMonocle3 <- function(seurat_object, scale_all = FALSE, assay = "SCT", reduction_for_projection = "PCA", UMAP_cluster_slot = NULL){ if(scale_all){ message("Getting residuals for all Seurat genes in chosen assay slot and placing in scale.data") seurat_genes <- rownames(seurat_object[[assay]]) remaining_genes <- setdiff(seurat_genes, rownames(seurat_object[[assay]]@scale.data)) if(assay == "SCT"){ seurat_object <- Seurat::GetResidual(seurat_object, features = remaining_genes, assay = assay, umi.assay = "RNA") } else { seurat_object <- Seurat::ScaleData(seurat_object, features = rownames(seurat_object[[assay]])) } } #We prep the seurat object by creating gene loadings for ALL genes in the Seurat scale.data slot. This is done to allow downstream monocle3 functions on gene_modules to work appropriately. message("Projecting gene loadings for all Seurat genes in scale.data slot") seurat_object <- Seurat::ProjectDim(seurat_object, reduction = reduction_for_projection, assay = assay) ################## message("Initializing CDS object") #Extract Seurat's log-transformed values expression_matrix <- Seurat::GetAssayData(seurat_object, assay = assay, slot = "counts") #Extract Seurat meta_data meta_data <- seurat_object@meta.data #Extract gene names from Seurat object SCT slot to make CDS seurat_genes <- data.frame(gene_short_name = rownames(seurat_object[[assay]]), row.names = rownames(seurat_object[[assay]])) new_cds <- monocle3::new_cell_data_set(expression_data = expression_matrix, cell_metadata = meta_data, gene_metadata = seurat_genes) ################## message("Making an SCE object from the Seurat object to facilitate transfer of information from SCE to CDS") sce <- as.SingleCellExperiment(seurat_object, assay = assay) message("Loading in all Seurat reductions (PCA, HARMONY, UMAP, etc.) into CDS") SingleCellExperiment::reducedDims(new_cds) <- SingleCellExperiment::reducedDims(sce) message("Loading in specified Seurat assay into CDS") SummarizedExperiment::assays(new_cds) <- SummarizedExperiment::assays(sce) message("Loading in Seurat gene names into CDS") SummarizedExperiment::rowData(new_cds) <- SummarizedExperiment::rowData(sce) SummarizedExperiment::rowData(new_cds)$gene_short_name <- row.names(new_cds) message("Loading in Seurat gene loadings into CDS") new_cds@preprocess_aux$gene_loadings <- seurat_object@reductions[[reduction_for_projection]]@feature.loadings.projected ################## message("Get user specified selected clusters (or active idents) from Seurat and load into CDS") if(is.null(UMAP_cluster_slot)){ list_cluster <- Idents(seurat_object) } else { Idents(seurat_object) <- UMAP_cluster_slot list_cluster <- Idents(seurat_object) } new_cds@clusters[["UMAP"]]$clusters <- list_cluster #The next two commands are run in order to allow "order_cells" to be run in monocle3 rownames(new_cds@principal_graph_aux[['UMAP']]$dp_mst) <- NULL colnames(SingleCellExperiment::reducedDims(new_cds)[["UMAP"]]) <- NULL ################## message("Setting all cells as belonging to one partition (multiple partitions not supported yet)") recreate_partition <- c(rep(1, length(new_cds@colData@rownames))) names(recreate_partition) <- new_cds@colData@rownames recreate_partition <- as.factor(recreate_partition) new_cds@clusters[["UMAP"]]$partitions <- recreate_partition ################## message("Done") new_cds } cds = ToMonocle3(seuratobj, scale_all = TRUE, assay = "RNA", reduction_for_projection = "pca", UMAP_cluster_slot = NULL) cds <- learn_graph(cds) # a helper function to identify the root principal points: get_earliest_principal_node <- function(cds, seurat_cluster="0"){ cell_ids <- which(colData(cds)[, "seurat_clusters"] == seurat_cluster) closest_vertex <- cds@principal_graph_aux[["UMAP"]]$pr_graph_cell_proj_closest_vertex closest_vertex <- as.matrix(closest_vertex[colnames(cds), ]) root_pr_nodes <- igraph::V(principal_graph(cds)[["UMAP"]])$name[as.numeric(names (which.max(table(closest_vertex[cell_ids,]))))] root_pr_nodes } cds <- order_cells(cds, reduction_method = "UMAP", root_pr_nodes=get_earliest_principal_node(cds, seurat_cluster = 4)) plot_cells( cds = cds, color_cells_by = "pseudotime", show_trajectory_graph = TRUE ) ###################################### ## Harmony - with naive. UMAP harmonyobj = readRDS("Data/UMAP.Harmony_cluster_0to16_per100_with_naive_0714.rds") ToMonocle3 <- function(seurat_object, scale_all = FALSE, assay = "SCT", reduction_for_projection = "PCA", UMAP_cluster_slot = NULL){ if(scale_all){ message("Getting residuals for all Seurat genes in chosen assay slot and placing in scale.data") seurat_genes <- rownames(seurat_object[[assay]]) remaining_genes <- setdiff(seurat_genes, rownames(seurat_object[[assay]]@scale.data)) if(assay == "SCT"){ seurat_object <- Seurat::GetResidual(seurat_object, features = remaining_genes, assay = assay, umi.assay = "RNA") } else { seurat_object <- Seurat::ScaleData(seurat_object, features = rownames(seurat_object[[assay]])) } } #We prep the seurat object by creating gene loadings for ALL genes in the Seurat scale.data slot. This is done to allow downstream monocle3 functions on gene_modules to work appropriately. message("Projecting gene loadings for all Seurat genes in scale.data slot") seurat_object <- Seurat::ProjectDim(seurat_object, reduction = reduction_for_projection, assay = assay) ################## message("Initializing CDS object") #Extract Seurat's log-transformed values expression_matrix <- Seurat::GetAssayData(seurat_object, assay = assay, slot = "counts") #Extract Seurat meta_data meta_data <- seurat_object@meta.data #Extract gene names from Seurat object SCT slot to make CDS seurat_genes <- data.frame(gene_short_name = rownames(seurat_object[[assay]]), row.names = rownames(seurat_object[[assay]])) new_cds <- monocle3::new_cell_data_set(expression_data = expression_matrix, cell_metadata = meta_data, gene_metadata = seurat_genes) ################## message("Making an SCE object from the Seurat object to facilitate transfer of information from SCE to CDS") sce <- as.SingleCellExperiment(seurat_object, assay = assay) message("Loading in all Seurat reductions (PCA, HARMONY, UMAP, etc.) into CDS") SingleCellExperiment::reducedDims(new_cds) <- SingleCellExperiment::reducedDims(sce) message("Loading in specified Seurat assay into CDS") SummarizedExperiment::assays(new_cds) <- SummarizedExperiment::assays(sce) message("Loading in Seurat gene names into CDS") SummarizedExperiment::rowData(new_cds) <- SummarizedExperiment::rowData(sce) SummarizedExperiment::rowData(new_cds)$gene_short_name <- row.names(new_cds) message("Loading in Seurat gene loadings into CDS") new_cds@preprocess_aux$gene_loadings <- seurat_object@reductions[[reduction_for_projection]]@feature.loadings.projected ################## message("Get user specified selected clusters (or active idents) from Seurat and load into CDS") if(is.null(UMAP_cluster_slot)){ list_cluster <- Idents(seurat_object) } else { Idents(seurat_object) <- UMAP_cluster_slot list_cluster <- Idents(seurat_object) } new_cds@clusters[["UMAP"]]$clusters <- list_cluster #The next two commands are run in order to allow "order_cells" to be run in monocle3 rownames(new_cds@principal_graph_aux[['UMAP']]$dp_mst) <- NULL colnames(SingleCellExperiment::reducedDims(new_cds)[["UMAP"]]) <- NULL ################## message("Setting all cells as belonging to one partition (multiple partitions not supported yet)") recreate_partition <- c(rep(1, length(new_cds@colData@rownames))) names(recreate_partition) <- new_cds@colData@rownames recreate_partition <- as.factor(recreate_partition) new_cds@clusters[["UMAP"]]$partitions <- recreate_partition ################## message("Done") new_cds } cds = ToMonocle3(harmonyobj, scale_all = TRUE, assay = "RNA", reduction_for_projection = "pca", UMAP_cluster_slot = NULL) cds <- learn_graph(cds) # a helper function to identify the root principal points: get_earliest_principal_node <- function(cds, seurat_cluster){ cell_ids <- which(colData(cds)[, "seurat_clusters"] == seurat_cluster) closest_vertex <- cds@principal_graph_aux[["UMAP"]]$pr_graph_cell_proj_closest_vertex closest_vertex <- as.matrix(closest_vertex[colnames(cds), ]) root_pr_nodes <- igraph::V(principal_graph(cds)[["UMAP"]])$name[as.numeric(names (which.max(table(closest_vertex[cell_ids,]))))] root_pr_nodes } #cds <- order_cells(cds, reduction_method = "UMAP", root_pr_nodes=get_earliest_principal_node(cds, seurat_cluster = 5)) #cds <- order_cells(cds, reduction_method = "UMAP") cds <- order_cells(cds, reduction_method = "UMAP", root_pr_nodes=get_earliest_principal_node(cds, seurat_cluster = 5)) cds <- order_cells(cds, reduction_method = "UMAP", root_pr_nodes=c(get_earliest_principal_node(cds, seurat_cluster = 5), get_earliest_principal_node(cds, seurat_cluster = 4))) p <- plot_cells( cds = cds, color_cells_by = "pseudotime", show_trajectory_graph = TRUE ) save_plot('~/Dropbox/NGR_SNU_2019/scRNA_seq_SYK/Figures/plot.Monocle_UMAP.harmony_cluster5_0718.png', p, base_height = 6, base_width = 7) ################################################################ ## Harmony - divide coghelp vs naive harmony_cog <- subset(x = harmonyobj, subset = orig.ident == "chromium034") harmony_sep <- subset(x = harmonyobj, subset = orig.ident == "chromium035") harmony_less <- subset(x = harmonyobj, subset = orig.ident == "chromium033") harmony_naive <- subset(x = harmonyobj, subset = orig.ident == "chromium040") cds_cog = ToMonocle3(harmony_cog, scale_all = TRUE, assay = "RNA", reduction_for_projection = "pca", UMAP_cluster_slot = NULL) cds_cog <- learn_graph(cds_cog) cds_cog <- order_cells(cds_cog, reduction_method = "UMAP", root_pr_nodes=get_earliest_principal_node(cds_cog, seurat_cluster = 5)) p1 <- plot_cells( cds = cds_cog, color_cells_by = "pseudotime", show_trajectory_graph = TRUE ) cds_naive = ToMonocle3(harmony_naive, scale_all = TRUE, assay = "RNA", reduction_for_projection = "pca", UMAP_cluster_slot = NULL) cds_naive <- learn_graph(cds_naive) cds_naive <- order_cells(cds_naive, reduction_method = "UMAP", root_pr_nodes= get_earliest_principal_node(cds_naive, seurat_cluster = 5)) p2 <- plot_cells( cds = cds_naive, color_cells_by = "pseudotime", show_trajectory_graph = TRUE ) p = plot_grid(p1, p2) save_plot('~/Dropbox/NGR_SNU_2019/scRNA_seq_SYK/Figures/plot.Monocle_UMAP.harmony_split_cluster5_0718.png', p, base_height = 6, base_width = 14) cds_less = ToMonocle3(harmony_less, scale_all = TRUE, assay = "RNA", reduction_for_projection = "pca", UMAP_cluster_slot = NULL) cds_less <- learn_graph(cds_less) cds_less <- order_cells(cds_less, reduction_method = "UMAP", root_pr_nodes=get_earliest_principal_node(cds_less, seurat_cluster = 5)) p1 <- plot_cells( cds = cds_less, color_cells_by = "pseudotime", show_trajectory_graph = TRUE ) cds_sep = ToMonocle3(harmony_sep, scale_all = TRUE, assay = "RNA", reduction_for_projection = "pca", UMAP_cluster_slot = NULL) cds_sep <- learn_graph(cds_sep) cds_sep <- order_cells(cds_sep, reduction_method = "UMAP", root_pr_nodes= get_earliest_principal_node(cds_sep, seurat_cluster = 5)) p2 <- plot_cells( cds = cds_sep, color_cells_by = "pseudotime", show_trajectory_graph = TRUE ) p = plot_grid(p1, p2) save_plot('~/Dropbox/NGR_SNU_2019/scRNA_seq_SYK/Figures/plot.Monocle_UMAP.harmony_split_helpless&sep_0718.png', p, base_height = 6, base_width = 14) ############################################################################################################### ## Branch-like harmonyobj = readRDS("Data/UMAP.Harmony_cluster_0to16_per100_with_naive_0703.rds") expression_matrix <- harmonyobj@assays$RNA@counts %>% as.matrix() cell_metadata = harmonyobj@meta.data cell_metadata <- cbind(cell_metadata, harmonyobj[["umap"]]@cell.embeddings) gene_annotation = data.frame(gene_short_name=rownames(expression_matrix)) rownames(gene_annotation) = gene_annotation$gene_short_name # Make the CDS object cds <- new_cell_data_set(expression_matrix, cell_metadata = cell_metadata, gene_metadata = gene_annotation) # Pre-process the data cds <- preprocess_cds(cds, num_dim = 10) # Reduce dimensionality cds <- reduce_dimension(cds, reduction_method = "UMAP") # Clustering cds = cluster_cells(cds, reduction_method = "UMAP", resolution=0.7) p = plot_cells(cds, group_cells_by="cluster", color_cells_by = 'seurat_clusters', reduction_method = 'UMAP', label_cell_groups = T) p ##pseudotime cds <- learn_graph(cds) cds <- order_cells(cds, reduction_method = "UMAP", root_pr_nodes= get_earliest_principal_node(cds, seurat_cluster = 5)) p = plot_cells(cds, color_cells_by = "seurat_clusters", label_groups_by_cluster=T, label_leaves=T, label_branch_points=T, group_cells_by = 'cluster', show_trajectory_graph = T, label_cell_groups = F) p ggsave(paste('Figures/plot.', tag, '_pseudotime.before.', date, '.png', sep = ''), p, height = 4, width = 6, limitsize = F) cds <- order_cells(cds) p = plot_cells(cds, color_cells_by = "pseudotime", label_cell_groups=T, label_leaves=T, label_branch_points=T, graph_label_size=1.5, label_groups_by_cluster=T) p ggsave(paste('Figures/plot.', tag, '_pseudotime.after.', date, '.png', sep = ''), p, height = 4, width = 6, limitsize = F) reducedDims(cds)$UMAP[,1] <- cell_metadata$UMAP_1 reducedDims(cds)$UMAP[,2] <- cell_metadata$UMAP_2 p<-plot_cells(cds, reduction_method = "UMAP", color_cells_by = "seurat_clusters", group_label_size = 3.5, label_groups_by_cluster = F, label_cell_groups = T) p ggsave(paste('Figures/plot.', tag, '_pseudotime_UMAP.before.', date, '.png', sep = ''), p, height = 5, width = 6, limitsize = F) cds <- order_cells(cds) p<-plot_cells(cds, color_cells_by = "pseudotime", group_label_size = 3.5, label_groups_by_cluster = FALSE) p ggsave(paste('Figures/plot.', tag, '_pseudotime_UMAP.after.', date, '.png', sep = ''), p, height = 5, width = 7, limitsize = F) #### cds <- learn_graph(cds) p = plot_cells(cds, color_cells_by = "assigned_cell_type", label_groups_by_cluster=FALSE, label_leaves=FALSE, label_branch_points=FALSE) p ggsave(paste('Figures/gRNA/plot.pseudotime.before', names(cds_list)[k], date, 'png', sep = '.'), p, height = 4, width = 6, limitsize = F) cds <- order_cells(cds) p = plot_cells(cds, color_cells_by = "pseudotime", label_cell_groups=FALSE, label_leaves=FALSE, label_branch_points=FALSE, graph_label_size=1.5) p ggsave(paste('Figures/gRNA/plot.pseudotime.after', names(cds_list)[k], date, 'png', sep = '.'), p, height = 4, width = 6, limitsize = F)
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/extract_design_mdesign_mcts.R \name{extract_design_mdesign_mcts} \alias{extract_design_mdesign_mcts} \title{Extract Design-Metadesign (XU) matrix for a set of continuous meta-features stored in a maf file} \usage{ extract_design_mdesign_mcts( maf, variant_col = "variant", mfeat_cols = "Histone_Marks", sample_id_col = "sample", ... ) } \arguments{ \item{maf}{mutation annotation file -- a data frame-like object containing columns for variant labels, sample IDs and continuous meta-feature variables. NOTE: uniqueness of rows of maf is assumed} \item{variant_col}{name of the column in \code{maf} containing variant labels.} \item{mfeat_cols}{names of columns in \code{maf} containing continuous meta-feature variables. Can be a vector.} \item{sample_id_col}{name of the column in \code{maf} containing tumor sample IDs.} \item{...}{Unused.} } \description{ Extract Design-Metadesign (XU) matrix for a set of continuous meta-features stored in a maf file }
/man/extract_design_mdesign_mcts.Rd
no_license
c7rishi/hidgenclassifier
R
false
true
1,050
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/extract_design_mdesign_mcts.R \name{extract_design_mdesign_mcts} \alias{extract_design_mdesign_mcts} \title{Extract Design-Metadesign (XU) matrix for a set of continuous meta-features stored in a maf file} \usage{ extract_design_mdesign_mcts( maf, variant_col = "variant", mfeat_cols = "Histone_Marks", sample_id_col = "sample", ... ) } \arguments{ \item{maf}{mutation annotation file -- a data frame-like object containing columns for variant labels, sample IDs and continuous meta-feature variables. NOTE: uniqueness of rows of maf is assumed} \item{variant_col}{name of the column in \code{maf} containing variant labels.} \item{mfeat_cols}{names of columns in \code{maf} containing continuous meta-feature variables. Can be a vector.} \item{sample_id_col}{name of the column in \code{maf} containing tumor sample IDs.} \item{...}{Unused.} } \description{ Extract Design-Metadesign (XU) matrix for a set of continuous meta-features stored in a maf file }
PhenStatWindow = function (phenlistObject , method = 'MM' , depVariable = 'Value' , equation = 'withWeight' , parameter = NULL , minObs = 4 , sensitivity = c(1, 1, 1) , pvalThreshold = 0 , #### for windowing only windowing = TRUE , seed = 123456, weightFUN = function(x) { # nlme::varComb(nlme::varIdent(form = ~ 1 | # Genotype), # nlme::varFixed(~ 1 / # ModelWeight)) nlme::varFixed( ~ 1 / ModelWeight) }, check = 2 , messages = FALSE , main = '' , threshold = 10 ^ -18 , plot = TRUE , storeplot = TRUE , PicDir = NULL , OverwriteExistingFiles = FALSE , filename = RandomRegardSeed(1) , superDebug = FALSE , predFunction = predFunction , residFunction = residFunction , weightORthreshold = 'weight' , maxPeaks = 15 , direction = direction , ...) { requireNamespace('PhenStat') requireNamespace('SmoothWin') requireNamespace('nlme') set.seed(seed) # Do not remove line below (necessary for windowing) phenlistObject@datasetPL = phenlistObject@datasetPL[order(Date2Integer(phenlistObject@datasetPL$Batch)), ] ########################### # Run normal (not windowed) models ########################### ## I checked the source and the messaging mechanism is written using the non-standard functioning in R note = windowingNote = graphFileName = NULL if (method == 'MM') { message0('Mixed model in progress ....') object0 = ModeWithErrorsAndMessages( m2ethod = method, phenList = phenlistObject, method = method, depVariable = depVariable, threshold = threshold, check = check, equation = equation, dataPointsThreshold = minObs, name = 'normal_analysis' ) note$normal_analysis_step1_1 = object0$note } else{ message0(method, ' in progress ....') object0 = ModeWithErrorsAndMessages( m2ethod = method, phenList = phenlistObject, method = method, depVariable = depVariable, threshold = threshold, check = check, equation = equation, dataPointsThreshold = minObs, name = 'normal_analysis' ) note$normal_analysis_step1_1 = object0$note # If not possible use MM (only for ABR) if (is.ABR(x = parameter) && length( grep( "to allow the application of RR plus framework", object0$mess$output, fixed = TRUE ) ) > 0) { method = 'MM' message0('Running the MM (only ABR) ... ') object0 = ModeWithErrorsAndMessages( m2ethod = method, key = 'method_alternative1_mm_for_abr_only', phenList = phenlistObject, method = method, depVariable = depVariable, threshold = threshold, check = check, equation = equation, dataPointsThreshold = minObs, name = 'normal_analysis' ) note$normal_analysis_step1_2 = object0$note } ############## If not possible add jiter if (is.ABR(x = parameter) && length(grep("jitter", object0$mess$output, fixed = TRUE)) > 0) { message0('Running the MM+Jitter (only ABR) ... ') method = 'MM' phenlistObject@datasetPL[, depVariable] = jitter(phenlistObject@datasetPL[, depVariable], 0.1) object0 = ModeWithErrorsAndMessages( m2ethod = method, key = 'method_alternative2_mm_jitter_for_abr_only', phenList = phenlistObject, method = method, depVariable = depVariable, threshold = threshold, check = check, equation = equation, dataPointsThreshold = minObs, name = 'normal_analysis' ) note$normal_analysis$step1_3 = object0$note } } ########################### #### This is for windowing only ########################### if (windowing && method %in% c('MM') && is.numeric(phenlistObject@datasetPL[, depVariable])) { # Full model message0('Running the full model before applying windowing ... ') objectNorm = ModeWithErrorsAndMessages( m2ethod = method, key = 'initial_full_model_before_appliying_windowing', method = method, phenList = phenlistObject, depVariable = depVariable, threshold = threshold, keepList = c( keep_batch = CheckIfNameExistInDataFrame(phenlistObject@datasetPL, 'Batch', checkLevels = FALSE), keep_equalvar = TRUE, keep_weight = CheckIfNameExistInDataFrame(phenlistObject@datasetPL, 'Weight'), keep_sex = CheckIfNameExistInDataFrame(phenlistObject@datasetPL, 'Sex'), keep_interaction = CheckIfNameExistInDataFrame(phenlistObject@datasetPL, 'Sex') ), check = check, equation = equation, name = 'windowing_analysis' ) windowingNote$windowing_analysis$fully_loaded_model = objectNorm$note message0('Start windowing ... ') ####################################################### if (!NullOrError(objectNorm$obj$value)) { obj = objectNorm$obj$value@analysisResults$model.output phenlistObject@datasetPL = nlme::getData(obj) # !important ### tt = Date2Integer(phenlistObject@datasetPL$Batch) mm = mm.bck = which(!phenlistObject@datasetPL$Genotype %in% phenlistObject@refGenotype) message0('The number of modes: ', length(unique(mm))) # Windowing & DOE if (length(unique(tt[mm])) > maxPeaks) { message0( 'More than ', maxPeaks, ' modes. A random sample of ', maxPeaks, ' would be used. total modes: ', length(unique(mm)) ) sa = sample(unique(tt[mm]), maxPeaks) mm = mm[which(tt[mm] %in% sa)] } ### message0('Windowing algorithm in progress ...') r = SmoothWin( object = obj , data = phenlistObject@datasetPL , t = tt , m = mm , weightFUN = weightFUN , messages = messages , check = check , seed = seed , threshold = threshold , simple.output = TRUE , sensitivity = sensitivity , pvalThreshold = pvalThreshold , debug = superDebug , residFun = residFunction , predictFun = predFunction , weightORthreshold = weightORthreshold , direction = direction , min.obs = function(ignore.me.in.default) { message0('Total number of sex: ',PhenStat:::noSexes(phenlistObject)) lutm = length(unique(tt[mm])) r = ifelse(lutm > 1, PhenStat:::noSexes(phenlistObject)*35, max(pi * sqrt(length(tt)), 35)) r = max(r * lutm, length(mm), na.rm = TRUE) r = min(r , length(tt), na.rm = TRUE) message0('min.obs = ',r) return(r) } ) ############################## phenlistObject@datasetPL$AllModelWeights = we = we2 = r$finalModel$FullWeight if (is.null(we) || length(unique(we)) < 2 || var(we, na.rm = TRUE) < threshold) { we2 = NULL windowingNote$windowing_extra = 'There is no variation in the weights then the standard model is applied.' }else{ #################### MeanVarOverTime = function(mm, tt, data = phenlistObject@datasetPL) { if (length(tt) < 1 || length(mm) < 1) return(1) v = sapply(unique(tt[mm]), function(i) { ind = 1:length(tt) CriTeria = (tt %in% i) & (ind %in% mm) if (sum(CriTeria) > 1) { sd(data[CriTeria],na.rm = TRUE) } else{ NA } }) if (length(v[!is.na(v)]) > 0) { return(mean(v, na.rm = TRUE)) } else{ return(1) } } #################### vMutants = MeanVarOverTime(mm = (1:length(tt))[mm.bck], tt = tt, data = phenlistObject@datasetPL[, depVariable]) VControls = MeanVarOverTime(mm = (1:length(tt))[-mm.bck], tt = tt, data = phenlistObject@datasetPL[, depVariable]) message0('Disabled but: Mutant sd = ', vMutants, ', Control sd = ', VControls) #we2[mm] = we2[mm] * vMutants #we2[-mm] = we2[-mm] * VControls } message0('Fitting the windowing weights into the optimized PhenStat model ...') objectf = ModeWithErrorsAndMessages( m2ethod = method, key = 'final_windowing_model', method = method, phenList = phenlistObject, depVariable = depVariable, modelWeight = we2, threshold = threshold, check = check, equation = equation, name = 'windowing_analysis' ) windowingNote$windowing_analysis$final_model = objectf$note # Full model windowing message0('Fitting the windowing weights into the full PhenStat model ...') objectfulw = ModeWithErrorsAndMessages( m2ethod = method, key = 'full_model_windowing', method = method, phenList = phenlistObject, depVariable = depVariable, modelWeight = we2, threshold = threshold, check = check, equation = equation, name = 'windowing_analysis', keepList = c( keep_batch = CheckIfNameExistInDataFrame(phenlistObject@datasetPL, 'Batch', checkLevels = FALSE), keep_equalvar = TRUE, keep_weight = CheckIfNameExistInDataFrame(phenlistObject@datasetPL, 'Weight'), keep_sex = CheckIfNameExistInDataFrame(phenlistObject@datasetPL, 'Sex'), keep_interaction = CheckIfNameExistInDataFrame(phenlistObject@datasetPL, 'Sex') ) ) windowingNote$windowing_analysis$full_model_windowed = objectfulw$note # Plotting args = list( # for output r = r , tt = tt , mm = mm.bck , we = we , threshold = threshold , maxPeaks = maxPeaks , ## for plot plot = plot , PicDir = PicDir , filename = filename , storeplot = storeplot , phenlistObject = phenlistObject , check = check , main = main , depVariable = depVariable , threshold = threshold ) #args = c(as.list(environment()), list()) windowingNote$window_parameters = WindowingDetails(args) graphFileName = PlotWindowingResult(args = args, overwrite = OverwriteExistingFiles) ObjectsThatMustBeRemovedInEachIteration(c('args', 'objectNorm')) } else{ message0('An error in forming the full model (windowing only) ... ') objectf = objectNorm = objectfulw = NULL# object0 r = we = NULL } } else{ objectf = objectNorm = objectfulw = NULL# object0 r = we = NULL } if (superDebug) { message0('SuperDebug is activated. Writting the Rdata file ... ') SupDebFile = file.exists0(file.path(PicDir, 'superDebugAnalysis.Rdata')) message0('Superdebug file: \n\t\t ~> ', SupDebFile) agg = c(as.list(environment()), list()) save(agg, file = SupDebFile) } return( list( InputObject = phenlistObject, # Full model (not windowed) FullObj = objectNorm$obj, # Optimised model (not windowed) NormalObj = object0$obj , # Optimized windowed model WindowedObj = objectf$obj , # Full model windowed FullWindowedObj = objectfulw$obj, WinDetails = r , weight = we , method = method , graphFileName = ifelse(is.null(graphFileName), 'NA', graphFileName), note = c(note, windowingNote) ) ) }
/Early adults stats pipeline/DRrequired/DRrequiredPackage/R/PhenStatWindow.R
permissive
xhyuo/impc_stats_pipeline
R
false
false
14,094
r
PhenStatWindow = function (phenlistObject , method = 'MM' , depVariable = 'Value' , equation = 'withWeight' , parameter = NULL , minObs = 4 , sensitivity = c(1, 1, 1) , pvalThreshold = 0 , #### for windowing only windowing = TRUE , seed = 123456, weightFUN = function(x) { # nlme::varComb(nlme::varIdent(form = ~ 1 | # Genotype), # nlme::varFixed(~ 1 / # ModelWeight)) nlme::varFixed( ~ 1 / ModelWeight) }, check = 2 , messages = FALSE , main = '' , threshold = 10 ^ -18 , plot = TRUE , storeplot = TRUE , PicDir = NULL , OverwriteExistingFiles = FALSE , filename = RandomRegardSeed(1) , superDebug = FALSE , predFunction = predFunction , residFunction = residFunction , weightORthreshold = 'weight' , maxPeaks = 15 , direction = direction , ...) { requireNamespace('PhenStat') requireNamespace('SmoothWin') requireNamespace('nlme') set.seed(seed) # Do not remove line below (necessary for windowing) phenlistObject@datasetPL = phenlistObject@datasetPL[order(Date2Integer(phenlistObject@datasetPL$Batch)), ] ########################### # Run normal (not windowed) models ########################### ## I checked the source and the messaging mechanism is written using the non-standard functioning in R note = windowingNote = graphFileName = NULL if (method == 'MM') { message0('Mixed model in progress ....') object0 = ModeWithErrorsAndMessages( m2ethod = method, phenList = phenlistObject, method = method, depVariable = depVariable, threshold = threshold, check = check, equation = equation, dataPointsThreshold = minObs, name = 'normal_analysis' ) note$normal_analysis_step1_1 = object0$note } else{ message0(method, ' in progress ....') object0 = ModeWithErrorsAndMessages( m2ethod = method, phenList = phenlistObject, method = method, depVariable = depVariable, threshold = threshold, check = check, equation = equation, dataPointsThreshold = minObs, name = 'normal_analysis' ) note$normal_analysis_step1_1 = object0$note # If not possible use MM (only for ABR) if (is.ABR(x = parameter) && length( grep( "to allow the application of RR plus framework", object0$mess$output, fixed = TRUE ) ) > 0) { method = 'MM' message0('Running the MM (only ABR) ... ') object0 = ModeWithErrorsAndMessages( m2ethod = method, key = 'method_alternative1_mm_for_abr_only', phenList = phenlistObject, method = method, depVariable = depVariable, threshold = threshold, check = check, equation = equation, dataPointsThreshold = minObs, name = 'normal_analysis' ) note$normal_analysis_step1_2 = object0$note } ############## If not possible add jiter if (is.ABR(x = parameter) && length(grep("jitter", object0$mess$output, fixed = TRUE)) > 0) { message0('Running the MM+Jitter (only ABR) ... ') method = 'MM' phenlistObject@datasetPL[, depVariable] = jitter(phenlistObject@datasetPL[, depVariable], 0.1) object0 = ModeWithErrorsAndMessages( m2ethod = method, key = 'method_alternative2_mm_jitter_for_abr_only', phenList = phenlistObject, method = method, depVariable = depVariable, threshold = threshold, check = check, equation = equation, dataPointsThreshold = minObs, name = 'normal_analysis' ) note$normal_analysis$step1_3 = object0$note } } ########################### #### This is for windowing only ########################### if (windowing && method %in% c('MM') && is.numeric(phenlistObject@datasetPL[, depVariable])) { # Full model message0('Running the full model before applying windowing ... ') objectNorm = ModeWithErrorsAndMessages( m2ethod = method, key = 'initial_full_model_before_appliying_windowing', method = method, phenList = phenlistObject, depVariable = depVariable, threshold = threshold, keepList = c( keep_batch = CheckIfNameExistInDataFrame(phenlistObject@datasetPL, 'Batch', checkLevels = FALSE), keep_equalvar = TRUE, keep_weight = CheckIfNameExistInDataFrame(phenlistObject@datasetPL, 'Weight'), keep_sex = CheckIfNameExistInDataFrame(phenlistObject@datasetPL, 'Sex'), keep_interaction = CheckIfNameExistInDataFrame(phenlistObject@datasetPL, 'Sex') ), check = check, equation = equation, name = 'windowing_analysis' ) windowingNote$windowing_analysis$fully_loaded_model = objectNorm$note message0('Start windowing ... ') ####################################################### if (!NullOrError(objectNorm$obj$value)) { obj = objectNorm$obj$value@analysisResults$model.output phenlistObject@datasetPL = nlme::getData(obj) # !important ### tt = Date2Integer(phenlistObject@datasetPL$Batch) mm = mm.bck = which(!phenlistObject@datasetPL$Genotype %in% phenlistObject@refGenotype) message0('The number of modes: ', length(unique(mm))) # Windowing & DOE if (length(unique(tt[mm])) > maxPeaks) { message0( 'More than ', maxPeaks, ' modes. A random sample of ', maxPeaks, ' would be used. total modes: ', length(unique(mm)) ) sa = sample(unique(tt[mm]), maxPeaks) mm = mm[which(tt[mm] %in% sa)] } ### message0('Windowing algorithm in progress ...') r = SmoothWin( object = obj , data = phenlistObject@datasetPL , t = tt , m = mm , weightFUN = weightFUN , messages = messages , check = check , seed = seed , threshold = threshold , simple.output = TRUE , sensitivity = sensitivity , pvalThreshold = pvalThreshold , debug = superDebug , residFun = residFunction , predictFun = predFunction , weightORthreshold = weightORthreshold , direction = direction , min.obs = function(ignore.me.in.default) { message0('Total number of sex: ',PhenStat:::noSexes(phenlistObject)) lutm = length(unique(tt[mm])) r = ifelse(lutm > 1, PhenStat:::noSexes(phenlistObject)*35, max(pi * sqrt(length(tt)), 35)) r = max(r * lutm, length(mm), na.rm = TRUE) r = min(r , length(tt), na.rm = TRUE) message0('min.obs = ',r) return(r) } ) ############################## phenlistObject@datasetPL$AllModelWeights = we = we2 = r$finalModel$FullWeight if (is.null(we) || length(unique(we)) < 2 || var(we, na.rm = TRUE) < threshold) { we2 = NULL windowingNote$windowing_extra = 'There is no variation in the weights then the standard model is applied.' }else{ #################### MeanVarOverTime = function(mm, tt, data = phenlistObject@datasetPL) { if (length(tt) < 1 || length(mm) < 1) return(1) v = sapply(unique(tt[mm]), function(i) { ind = 1:length(tt) CriTeria = (tt %in% i) & (ind %in% mm) if (sum(CriTeria) > 1) { sd(data[CriTeria],na.rm = TRUE) } else{ NA } }) if (length(v[!is.na(v)]) > 0) { return(mean(v, na.rm = TRUE)) } else{ return(1) } } #################### vMutants = MeanVarOverTime(mm = (1:length(tt))[mm.bck], tt = tt, data = phenlistObject@datasetPL[, depVariable]) VControls = MeanVarOverTime(mm = (1:length(tt))[-mm.bck], tt = tt, data = phenlistObject@datasetPL[, depVariable]) message0('Disabled but: Mutant sd = ', vMutants, ', Control sd = ', VControls) #we2[mm] = we2[mm] * vMutants #we2[-mm] = we2[-mm] * VControls } message0('Fitting the windowing weights into the optimized PhenStat model ...') objectf = ModeWithErrorsAndMessages( m2ethod = method, key = 'final_windowing_model', method = method, phenList = phenlistObject, depVariable = depVariable, modelWeight = we2, threshold = threshold, check = check, equation = equation, name = 'windowing_analysis' ) windowingNote$windowing_analysis$final_model = objectf$note # Full model windowing message0('Fitting the windowing weights into the full PhenStat model ...') objectfulw = ModeWithErrorsAndMessages( m2ethod = method, key = 'full_model_windowing', method = method, phenList = phenlistObject, depVariable = depVariable, modelWeight = we2, threshold = threshold, check = check, equation = equation, name = 'windowing_analysis', keepList = c( keep_batch = CheckIfNameExistInDataFrame(phenlistObject@datasetPL, 'Batch', checkLevels = FALSE), keep_equalvar = TRUE, keep_weight = CheckIfNameExistInDataFrame(phenlistObject@datasetPL, 'Weight'), keep_sex = CheckIfNameExistInDataFrame(phenlistObject@datasetPL, 'Sex'), keep_interaction = CheckIfNameExistInDataFrame(phenlistObject@datasetPL, 'Sex') ) ) windowingNote$windowing_analysis$full_model_windowed = objectfulw$note # Plotting args = list( # for output r = r , tt = tt , mm = mm.bck , we = we , threshold = threshold , maxPeaks = maxPeaks , ## for plot plot = plot , PicDir = PicDir , filename = filename , storeplot = storeplot , phenlistObject = phenlistObject , check = check , main = main , depVariable = depVariable , threshold = threshold ) #args = c(as.list(environment()), list()) windowingNote$window_parameters = WindowingDetails(args) graphFileName = PlotWindowingResult(args = args, overwrite = OverwriteExistingFiles) ObjectsThatMustBeRemovedInEachIteration(c('args', 'objectNorm')) } else{ message0('An error in forming the full model (windowing only) ... ') objectf = objectNorm = objectfulw = NULL# object0 r = we = NULL } } else{ objectf = objectNorm = objectfulw = NULL# object0 r = we = NULL } if (superDebug) { message0('SuperDebug is activated. Writting the Rdata file ... ') SupDebFile = file.exists0(file.path(PicDir, 'superDebugAnalysis.Rdata')) message0('Superdebug file: \n\t\t ~> ', SupDebFile) agg = c(as.list(environment()), list()) save(agg, file = SupDebFile) } return( list( InputObject = phenlistObject, # Full model (not windowed) FullObj = objectNorm$obj, # Optimised model (not windowed) NormalObj = object0$obj , # Optimized windowed model WindowedObj = objectf$obj , # Full model windowed FullWindowedObj = objectfulw$obj, WinDetails = r , weight = we , method = method , graphFileName = ifelse(is.null(graphFileName), 'NA', graphFileName), note = c(note, windowingNote) ) ) }
# # model FACTS tte predictor VSRs # dich.single <- function(rate, alpha, beta) { Y <- rexp(100000, rate) Y2 <- alpha + beta * Y Y3 <- exp(Y2) / (1 + exp(Y2)) return(Y3) } dich <- function(CHR, HR, alpha, beta) { rates <- NULL for (h in HR) { rates <- append(rates, mean(dich.single(CHR * h, alpha, beta))) } return(rates) } hist.dich.single <- function(rate, alpha, beta) { Y3 <- dich.single(rate, alpha, beta) hist(Y3) return(mean(Y3)) }
/src/Helpers/tte-dich-pred.R
permissive
BerryConsultants/facts-utils
R
false
false
469
r
# # model FACTS tte predictor VSRs # dich.single <- function(rate, alpha, beta) { Y <- rexp(100000, rate) Y2 <- alpha + beta * Y Y3 <- exp(Y2) / (1 + exp(Y2)) return(Y3) } dich <- function(CHR, HR, alpha, beta) { rates <- NULL for (h in HR) { rates <- append(rates, mean(dich.single(CHR * h, alpha, beta))) } return(rates) } hist.dich.single <- function(rate, alpha, beta) { Y3 <- dich.single(rate, alpha, beta) hist(Y3) return(mean(Y3)) }
% Generated by roxygen2 (4.0.1): do not edit by hand \docType{package} \name{zoon} \alias{zoon} \alias{zoon-package} \title{Zoon: A package for comparing multple SDM models, good model diagnostics and better reproducibility} \description{ Zoon: A package for comparing multple SDM models, good model diagnostics and better reproducibility }
/man/zoon.Rd
permissive
Pakillo/zoon
R
false
false
352
rd
% Generated by roxygen2 (4.0.1): do not edit by hand \docType{package} \name{zoon} \alias{zoon} \alias{zoon-package} \title{Zoon: A package for comparing multple SDM models, good model diagnostics and better reproducibility} \description{ Zoon: A package for comparing multple SDM models, good model diagnostics and better reproducibility }
checkInputSource <- function(inputSource, errors, errorTypes) { if (typeof(inputSource) != "character") { msg<- "'inputSource' must be character type." errors <- c(errors, msg) errorTypes <- c(errorTypes, "PARAMETER_VALUE_ERROR") } else { if (inputSource != "ABIF" && inputSource != "FASTA") { msg <- "'inputSource' must be 'ABIF' or 'FASTA'" errors <- c(errors, msg) errorTypes <- c(errorTypes, "PARAMETER_VALUE_ERROR") } } return(list(errors, errorTypes)) } checkProcessMethod <- function(inputSource, processMethod, errors, errorTypes) { if (typeof(processMethod) != "character") { msg<- "'processMethod' must be character type." errors <- c(errors, msg) errorTypes <- c(errorTypes, "PARAMETER_VALUE_ERROR") } else { if (processMethod != "REGEX" && processMethod != "CSV") { msg <- "'processMethod' must be 'REGEX' or 'CSV'" errors <- c(errors, msg) errorTypes <- c(errorTypes, "PARAMETER_VALUE_ERROR") } } return(list(errors, errorTypes)) } checkContigName <- function(contigName, errors, errorTypes) { if (is.null(contigName)) { msg<- "'contigName' must not be NULL. 'contigName' is missing." errors <- c(errors, msg) errorTypes <- c(errorTypes, "PARAMETER_VALUE_ERROR") } return(list(errors, errorTypes)) } checkGeneticCode <- function(geneticCode, errors, errorTypes) { if (typeof(geneticCode) != "character") { msg<- "'geneticCode' must be character type." errors <- c(errors, msg) errorTypes <- c(errorTypes, "PARAMETER_VALUE_ERROR") } else { if(!("*" %in% geneticCode)) { msg <- "'geneticCode' does not specify any stop codons." errors <- c(errors, msg) errorTypes <- c(errorTypes, "PARAMETER_VALUE_ERROR") } } return(list(errors, errorTypes)) } checkRefAAS <- function(refAminoAcidSeq, errors, errorTypes) { if (typeof(refAminoAcidSeq) != "character") { msg<- "'refAminoAcidSeq' must be character type." errors <- c(errors, msg) errorTypes <- c(errorTypes, "PARAMETER_VALUE_ERROR") } return(list(errors, errorTypes)) } ### ============================================================================ ### ConsensusRead related: 'minReadsNum', 'minReadLength', 'minFractionCall' ### 'maxFractionLost' prechecking ### ============================================================================ checkMinReadsNum <- function(minReadsNum, errors, errorTypes) { if (!is.numeric(minReadsNum)) { msg <- "'minReadsNum' must be numeric" errors <- c(errors, msg) errorTypes <- c(errorTypes, "PARAMETER_TYPE_ERROR") } else { if (minReadsNum%%1!=0) { msg <- "'minReadsNum' must be integer." errors <- c(errors, msg) errorTypes <- c(errorTypes, "PARAMETER_TYPE_ERROR") } if (minReadsNum == 0) { msg <- "'minReadsNum' cannot be zero." errors <- c(errors, msg) errorTypes <- c(errorTypes, "PARAMETER_VALUE_ERROR") } } return(list(errors, errorTypes)) } checkMinReadLength <- function(minReadLength, errors, errorTypes) { if (!is.numeric(minReadLength)) { msg <- "'minReadLength' must be numeric" errors <- c(errors, msg) errorTypes <- c(errorTypes, "PARAMETER_TYPE_ERROR") } else { if (minReadLength%%1!=0) { msg <- "'minReadLength' must be integer." errors <- c(errors, msg) errorTypes <- c(errorTypes, "PARAMETER_TYPE_ERROR") } } return(list(errors, errorTypes)) } checkMinFractionCall <- function(minFractionCall, errors, errorTypes) { if (!is.numeric(minFractionCall)) { msg <- "'minFractionCall' must be numeric" errors <- c(errors, msg) errorTypes <- c(errorTypes, "PARAMETER_TYPE_ERROR") } else { if (minFractionCall > 1 || minFractionCall < 0) { msg <- "'minFractionCall' must be between 0 and 1." errors <- c(errors, msg) errorTypes <- c(errorTypes, "PARAMETER_RANGE_ERROR") } } return(list(errors, errorTypes)) } checkMaxFractionLost <- function(maxFractionLost, errors, errorTypes) { if (!is.numeric(maxFractionLost)) { msg <- "'maxFractionLost' must be numeric" errors <- c(errors, msg) errorTypes <- c(errorTypes, "PARAMETER_TYPE_ERROR") } else { if (maxFractionLost > 1 || maxFractionLost < 0) { msg <- "'maxFractionLost' must be between 0 and 1." errors <- c(errors, msg) errorTypes <- c(errorTypes, "PARAMETER_RANGE_ERROR") } } return(list(errors, errorTypes)) } checkAcceptStopCodons <- function(acceptStopCodons, errors, errorTypes) { if (!is.logical(acceptStopCodons)) { msg <- "'acceptStopCodons' must be 'TRUE' or 'FALSE'" errors <- c(errors, msg) errorTypes <- c(errorTypes, "PARAMETER_VALUE_ERROR") } return(list(errors, errorTypes)) } checkReadingFrame <- function(readingFrame, errors, errorTypes) { if (!is.numeric(readingFrame)) { msg <- "'readingFrame' must be numeric" errors <- c(errors, msg) errorTypes <- c(errorTypes, "PARAMETER_TYPE_ERROR") } else { if(!readingFrame %in% c(1,2,3)) { msg <- "'readingFrame' must be 1, 2, or 3." errors <- c(errors, msg) errorTypes <- c(errorTypes, "PARAMETER_VALUE_ERROR") } } return(list(errors, errorTypes)) } checkProcessorsNum <- function(processorsNum, errors, errorTypes) { if (!is.numeric(processorsNum)) { msg <- "'processorsNum' must be numeric" errors <- c(errors, msg) errorTypes <- c(errorTypes, "PARAMETER_TYPE_ERROR") } else { if (!(processorsNum %% 1 == 0) && !is.null(processorsNum)) { msg <- "'processorsNum' must be integer." errors <- c(errors, msg) errorTypes <- c(errorTypes, "PARAMETER_TYPE_ERROR") } } return(list(errors, errorTypes)) } ### ============================================================================ ### 'ABIF_Directory' prechecking ### ============================================================================ checkABIF_Directory <- function(ABIF_Directory, errors, errorTypes) { if (typeof(ABIF_Directory) != "character") { msg<- "'ABIF_Directory' must be character type." errors <- c(errors, msg) errorTypes <- c(errorTypes, "PARAMETER_VALUE_ERROR") } else if (is.null(ABIF_Directory)) { msg<- "'ABIF_Directory' cannot be NULL." errors <- c(errors, msg) errorTypes <- c(errorTypes, "PARAMETER_VALUE_ERROR") } else { if (!dir.exists(ABIF_Directory)) { msg <- paste("'", ABIF_Directory, "'", " parent directory does not exist.", sep = "") errors <- c(errors, msg) errorTypes <- c(errorTypes, "DIRECTORY_NOT_EXIST_ERROR") } } return(list(errors, errorTypes)) } ### ============================================================================ ### Quality trimming related: 'TrimmingMethod', 'M1TrimmingCutoff', ### 'M2CutoffQualityScore', 'M2SlidingWindowSize' ### ============================================================================ checkTrimParam <- function(TrimmingMethod, M1TrimmingCutoff, M2CutoffQualityScore, M2SlidingWindowSize, errors, errorTypes) { if (TrimmingMethod == "M1") { if (!is.numeric(M1TrimmingCutoff)) { msg<- "'M1TrimmingCutoff' must be numeric (You choose M1)." errors <- c(errors, msg) errorTypes <- c(errorTypes, "PARAMETER_TYPE_ERROR") } else { # Ristriction about M1TrimmingCutoff ! if (M1TrimmingCutoff > 1 || M1TrimmingCutoff < 0) { msg <- paste("Your input M1TrimmingCutoff is: '", M1TrimmingCutoff, "' is invalid.", "'M1TrimmingCutoff' should", "be between 0 and 1.", sep = "") errors <- c(errors, msg) errorTypes <- c(errorTypes, "PARAMETER_RANGE_ERROR") } } } else if (TrimmingMethod == "M2") { if (!is.numeric(M2CutoffQualityScore)) { msg<- "'M2CutoffQualityScore' must be numeric (You choose M2)." errors <- c(errors, msg) errorTypes <- c(errorTypes, "PARAMETER_TYPE_ERROR") } else { if (M2CutoffQualityScore > 60 || M2CutoffQualityScore < 0 || M2CutoffQualityScore%%1!=0) { msg <- paste("Your input M2CutoffQualityScore is: '", M2CutoffQualityScore, "' is invalid.", "'M2CutoffQualityScore' should", "be between 0 and 60.", sep = "") errors <- c(errors, msg) errorTypes <- c(errorTypes, "PARAMETER_RANGE_ERROR") } } if (!is.numeric(M2SlidingWindowSize)) { msg<- "'M2SlidingWindowSize' must be numeric (You choose M2)." errors <- c(errors, msg) errorTypes <- c(errorTypes, "PARAMETER_TYPE_ERROR") } else { if (M2SlidingWindowSize > 40 || M2SlidingWindowSize < 0 || M2SlidingWindowSize%%1!=0) { msg <- paste("Your input M2SlidingWindowSize is: '", M2SlidingWindowSize, "' is invalid.", "'M2SlidingWindowSize' should", "be between 0 and 40.", sep = "") errors <- c(errors, msg) errorTypes <- c(errorTypes, "PARAMETER_RANGE_ERROR") } } } else { msg <- "'TrimmingMethod' must be 'M1' or 'M2'." errors <- c(errors, msg) errorTypes <- c(errorTypes, "PARAMETER_VALUE_ERROR") } return(list(errors, errorTypes)) } ### ============================================================================ ### 'baseNumPerRow', 'heightPerRow', signalRatioCutoff', 'showTrimmed' prechecking ### ============================================================================ checkBaseNumPerRow <- function(baseNumPerRow, errors, errorTypes) { if (!is.numeric(baseNumPerRow)) { msg <- "'baseNumPerRow' must be numeric" errors <- c(errors, msg) errorTypes <- c(errorTypes, "PARAMETER_TYPE_ERROR") } else { if (baseNumPerRow%%1!=0) { msg <- "'baseNumPerRow' must be integer." errors <- c(errors, msg) errorTypes <- c(errorTypes, "PARAMETER_TYPE_ERROR") } if (baseNumPerRow < 0 || baseNumPerRow > 200) { msg <- "'baseNumPerRow' must be between 0 and 200." errors <- c(errors, msg) errorTypes <- c(errorTypes, "PARAMETER_RANGE_ERROR") } } return(list(errors, errorTypes)) } checkHeightPerRow <- function(heightPerRow, errors, errorTypes) { if (!is.numeric(heightPerRow)) { msg <- "'heightPerRow' must be numeric" errors <- c(errors, msg) errorTypes <- c(errorTypes, "PARAMETER_TYPE_ERROR") } else { if (heightPerRow%%1!=0) { msg <- "'heightPerRow' must be integer." errors <- c(errors, msg) errorTypes <- c(errorTypes, "PARAMETER_TYPE_ERROR") } if (heightPerRow < 50 || heightPerRow > 600) { msg <- "'heightPerRow' must be between 50 and 600." errors <- c(errors, msg) errorTypes <- c(errorTypes, "PARAMETER_RANGE_ERROR") } } return(list(errors, errorTypes)) } ### ============================================================================ ### MakeBaseCalls Utilities function ### ============================================================================ checkSignalRatioCutoff <- function(signalRatioCutoff, errors, errorTypes) { if (!is.numeric(signalRatioCutoff)) { msg <- "'signalRatioCutoff' must be numeric" errors <- c(errors, msg) errorTypes <- c(errorTypes, "PARAMETER_TYPE_ERROR") } else { if (signalRatioCutoff < 0 || signalRatioCutoff > 1) { msg <- "'signalRatioCutoff' must be between 0 and 1." errors <- c(errors, msg) errorTypes <- c(errorTypes, "PARAMETER_RANGE_ERROR") } } return(list(errors, errorTypes)) } checkShowTrimmed <- function(showTrimmed, errors, errorTypes) { if (!is.logical(showTrimmed)) { msg <- "'showTrimmed' must be between TRUE and FALSE." errors <- c(errors, msg) errorTypes <- c(errorTypes, "PARAMETER_VALUE_ERROR") } return(list(errors, errorTypes)) } checkFASTA_File <- function(inputSource, FASTA_File, errors, errorTypes) { if (typeof(FASTA_File) != "character") { msg<- "'FASTA_File' must be character type." errors <- c(errors, msg) errorTypes <- c(errorTypes, "PARAMETER_VALUE_ERROR") } else if (is.null(FASTA_File)) { msg<- "'FASTA_File' cannot be NULL." errors <- c(errors, msg) errorTypes <- c(errorTypes, "PARAMETER_VALUE_ERROR") } else { if (!file.exists(FASTA_File)) { cat ("FASTA_File", FASTA_File) msg <- paste("'", FASTA_File, "'", " file does not exist.", sep = "") errors <- c(errors, msg) errorTypes <- c(errorTypes, "FILE_NOT_EXIST_ERROR") } if (is.na(str_extract(basename(FASTA_File), ".fa$")) && is.na(str_extract(basename(FASTA_File), ".fasta$"))) { msg <- paste("'", FASTA_File, "'", " file extension must be '.fa' or '.fasta'.", sep = "") errors <- c(errors, msg) errorTypes <- c(errorTypes, "FILE_TYPE_ERROR") } } return(list(errors, errorTypes)) } checkREGEX_SuffixForward <- function(REGEX_SuffixForward, errors, errorTypes){ if (typeof(REGEX_SuffixForward) != "character") { msg<- "'REGEX_SuffixForward' must be character type." errors <- c(errors, msg) errorTypes <- c(errorTypes, "PARAMETER_VALUE_ERROR") } else if (is.null(REGEX_SuffixForward)) { msg<- "'REGEX_SuffixForward' cannot be NULL." errors <- c(errors, msg) errorTypes <- c(errorTypes, "PARAMETER_VALUE_ERROR") } return(list(errors, errorTypes)) } checkREGEX_SuffixReverse <- function(REGEX_SuffixReverse, errors, errorTypes) { if (typeof(REGEX_SuffixReverse) != "character") { msg<- "'REGEX_SuffixReverse' must be character type." errors <- c(errors, msg) errorTypes <- c(errorTypes, "PARAMETER_VALUE_ERROR") } else if (is.null(REGEX_SuffixReverse)) { msg<- "'REGEX_SuffixReverse' cannot be NULL." errors <- c(errors, msg) errorTypes <- c(errorTypes, "PARAMETER_VALUE_ERROR") } return(list(errors, errorTypes)) } checkCSV_NamesConversion <- function(CSV_NamesConversion, errors, errorTypes) { if (typeof(CSV_NamesConversion) != "character") { msg<- "'CSV_NamesConversion' must be character type." errors <- c(errors, msg) errorTypes <- c(errorTypes, "PARAMETER_VALUE_ERROR") } else if (is.null(CSV_NamesConversion)) { msg<- "'CSV_NamesConversion' cannot be NULL." errors <- c(errors, msg) errorTypes <- c(errorTypes, "PARAMETER_VALUE_ERROR") } else { if (!file.exists(CSV_NamesConversion)) { msg <- paste("'", CSV_NamesConversion, "'", " file does not exist.", sep = "") errors <- c(errors, msg) errorTypes <- c(errorTypes, "CSV_FILE_NOT_EXIST_ERROR") } } return(list(errors, errorTypes)) } checkReadFileNameExist <- function(readFileName, errors, errorTypes) { if (!file.exists(readFileName)) { msg <- paste("'", readFileName, "'", " file does not exist.", sep = "") errors <- c(errors, msg) errorTypes <- c(errorTypes, "FILE_NOT_EXIST_ERROR") } return(list(errors, errorTypes)) } checkReadFileName <- function(readFileName, inputSource, errors, errorTypes) { if (inputSource == "ABIF") { if (is.na(str_extract(basename(readFileName), ".ab1$"))) { msg <- paste("'", readFileName, "'", " file extension must be '.ab1'.", sep = "") errors <- c(errors, msg) errorTypes <- c(errorTypes, "FILE_TYPE_ERROR") } } else if (inputSource == "FASTA") { if (is.na(str_extract(basename(readFileName), ".fa$")) && is.na(str_extract(basename(readFileName), ".fasta$"))) { msg <- paste("'", readFileName, "'", " file extension must be '.fa' or '.fasta'.", sep = "") errors <- c(errors, msg) errorTypes <- c(errorTypes, "FILE_TYPE_ERROR") } } return(list(errors, errorTypes)) } checkTargetFastaName <- function(targetFastaName, fastaReadName, readFileName, errors, errorTypes) { if(isEmpty(targetFastaName)) { msg <- paste0("The name '", fastaReadName, "' is not in the '", basename(readFileName), "' FASTA file") errors <- c(errors, msg) errorTypes <- c(errorTypes, "FASTA_NAME_NOT_EXIST") } return(list(errors, errorTypes)) } checkReadFeature <- function(readFeature, errors, errorTypes) { if (typeof(readFeature) != "character") { msg<- "'readFeature' must be character type." errors <- c(errors, msg) errorTypes <- c(errorTypes, "PARAMETER_VALUE_ERROR") } else { if (readFeature != "Forward Read" && readFeature != "Reverse Read") { msg <- "'readFeature' must be 'Forward Read' or 'Reverse Read'" errors <- c(errors, msg) errorTypes <- c(errorTypes, "PARAMETER_VALUE_ERROR") } } return(list(errors, errorTypes)) } checkQualityPhredScores <- function(qualityPhredScores, errors, errorTypes) { if (length(qualityPhredScores) == 0) { msg <- paste("'qualityPhredScores' length cannot be zero.") errors <- c(errors, msg) errorTypes <- c(errorTypes, "PARAMETER_VALUE_ERROR") } if (!all(qualityPhredScores%%1 == 0)) { msg <- "All elements in 'qualityPhredScores' vector must be integer." errors <- c(errors, msg) errorTypes <- c(errorTypes, "PARAMETER_VALUE_ERROR") } return(list(errors, errorTypes)) } checkAb1FastaCsv <- function(ABIF_Directory, FASTA_File, CSV_NamesConversion, inputSource, errors, errorTypes) { if (!file.exists(CSV_NamesConversion)) { msg <- paste("CSV_NamesConversion: '", CSV_NamesConversion, "'", " file does not exist.", sep = "") errors <- c(errors, msg) errorTypes <- c(errorTypes, "FILE_NOT_EXIST_ERROR") } else { warnings <- character() if (inputSource == "ABIF") { csvFile <- read.csv(CSV_NamesConversion, header = TRUE) csvReads <- as.character(csvFile$reads) parentDirFiles <- list.files(ABIF_Directory) sourceReads <- parentDirFiles[grepl("\\.ab1$", parentDirFiles)] } else if (inputSource == "FASTA") { csvFile <- read.csv(CSV_NamesConversion, header = TRUE) csvReads <- as.character(csvFile$reads) readFasta <- read.fasta(FASTA_File, as.string = TRUE) sourceReads <- names(readFasta) } ######################################################################## ### Check 1: all reads in the read folder are listed in the csv ######################################################################## readInCsvWarningMsg <- lapply(sourceReads, function(sourceRead) { if (!(sourceRead %in% csvReads)) { msg <- paste("'", sourceRead, "' is not in the ", "csv file (", CSV_NamesConversion, ")", sep = "") return(msg)} return()}) warnings <- c(warnings, unlist(readInCsvWarningMsg), use.names = FALSE) ######################################################################## ### Check 2: all reads listed in the csv file are in the reads folder ######################################################################## readInSourceWarningMsg <- lapply(csvReads, function(csvRead) { if (!(csvRead %in% sourceReads)) { msg <- paste("'", csvRead, "' is not in the ", "parent directory.", sep = "") return(msg)} return()}) warnings <- c(warnings, unlist(readInSourceWarningMsg), use.names = FALSE) # csv file has all of the columns, no extra columns ######################################################################## ### Check 3: csv file has all of the columns, no extra columns ######################################################################## if (!("contig" %in% colnames(csvFile))) { msg <- paste("'contig' is not in the csv file (", CSV_NamesConversion, ")", sep = "") errors <- c(errors, msg) errorTypes <- c(errorTypes, "CSV_MISMATCH_ERROR") } else if (!("direction" %in% colnames(csvFile))) { msg <- paste("'direction' is not in the csv file (", CSV_NamesConversion, ")", sep = "") errors <- c(errors, msg) errorTypes <- c(errorTypes, "CSV_MISMATCH_ERROR") } else if (!("reads" %in% colnames(csvFile))) { msg <- paste("'reads' is not in the csv file (", CSV_NamesConversion, ")", sep = "") errors <- c(errors, msg) errorTypes <- c(errorTypes, "CSV_MISMATCH_ERROR") } ######################################################################## ### Check 4: F/R column has only F's and R's. ######################################################################## csvFileDirLen <- length(unique(as.character(csvFile$direction))) if (!((csvFileDirLen == 1 || csvFileDirLen == 2) && ('F' %in% unique(as.character(csvFile$direction)) || 'R' %in% unique(as.character(csvFile$direction))))) { msg <- paste("In the 'direction' column of your CSV file, you can only have 'F' and 'R' ", sep = "") errors <- c(errors, msg) errorTypes <- c(errorTypes, "CSV_VALUE_ERROR") } if (length(warnings) != 0) { invisible(lapply(warnings, log_warn)) } } return(list(errors, errorTypes)) } checkGreplForward <- function(forwardSelectInputFiles, warnings, warningsType) { if (length(forwardSelectInputFiles) == 0) { msg <- paste0("Your 'contigName' and 'REGEX_SuffixForward' regular ", "expression parameters can not match any forward reads.") warnings <- c(warnings, msg) warningsType <- c(warningsType, "REGEX_MATCH_WARN") } return(list(warnings, warningsType)) } checkGreplReverse <- function(reverseSelectInputFiles, warnings, warningsType) { if (length(reverseSelectInputFiles) == 0) { msg <- paste0("Your 'contigName' and 'REGEX_SuffixReverse' regular ", "expression parameters can not match any reverse reads.") warnings <- c(warnings, msg) warningsType <- c(warningsType, "REGEX_MATCH_WARN") } return(list(warnings, warningsType)) } checkCSVConvForward <- function(forwardReads, warnings, warningsType) { if (length(forwardReads) == 0) { msg <- paste0("The names of forward reads in your 'CSV_NamesConversion'", " CSV file do not match any forward reads.") warnings <- c(warnings, msg) warningsType <- c(warningsType, "CSV_MATCH_WARN") } return(list(warnings, warningsType)) } checkCSVConvReverse <- function(reverseReads, warnings, warningsType) { if (length(reverseReads) == 0) { msg <- paste0("The names of reverse reads in your 'CSV_NamesConversion'", " CSV file do not match any reverse reads.") warnings <- c(warnings, msg) warningsType <- c(warningsType, "CSV_MATCH_WARN") } return(list(warnings, warningsType)) }
/R/UtilitiesFuncInputChecker.R
permissive
roblanf/sangeranalyseR
R
false
false
25,277
r
checkInputSource <- function(inputSource, errors, errorTypes) { if (typeof(inputSource) != "character") { msg<- "'inputSource' must be character type." errors <- c(errors, msg) errorTypes <- c(errorTypes, "PARAMETER_VALUE_ERROR") } else { if (inputSource != "ABIF" && inputSource != "FASTA") { msg <- "'inputSource' must be 'ABIF' or 'FASTA'" errors <- c(errors, msg) errorTypes <- c(errorTypes, "PARAMETER_VALUE_ERROR") } } return(list(errors, errorTypes)) } checkProcessMethod <- function(inputSource, processMethod, errors, errorTypes) { if (typeof(processMethod) != "character") { msg<- "'processMethod' must be character type." errors <- c(errors, msg) errorTypes <- c(errorTypes, "PARAMETER_VALUE_ERROR") } else { if (processMethod != "REGEX" && processMethod != "CSV") { msg <- "'processMethod' must be 'REGEX' or 'CSV'" errors <- c(errors, msg) errorTypes <- c(errorTypes, "PARAMETER_VALUE_ERROR") } } return(list(errors, errorTypes)) } checkContigName <- function(contigName, errors, errorTypes) { if (is.null(contigName)) { msg<- "'contigName' must not be NULL. 'contigName' is missing." errors <- c(errors, msg) errorTypes <- c(errorTypes, "PARAMETER_VALUE_ERROR") } return(list(errors, errorTypes)) } checkGeneticCode <- function(geneticCode, errors, errorTypes) { if (typeof(geneticCode) != "character") { msg<- "'geneticCode' must be character type." errors <- c(errors, msg) errorTypes <- c(errorTypes, "PARAMETER_VALUE_ERROR") } else { if(!("*" %in% geneticCode)) { msg <- "'geneticCode' does not specify any stop codons." errors <- c(errors, msg) errorTypes <- c(errorTypes, "PARAMETER_VALUE_ERROR") } } return(list(errors, errorTypes)) } checkRefAAS <- function(refAminoAcidSeq, errors, errorTypes) { if (typeof(refAminoAcidSeq) != "character") { msg<- "'refAminoAcidSeq' must be character type." errors <- c(errors, msg) errorTypes <- c(errorTypes, "PARAMETER_VALUE_ERROR") } return(list(errors, errorTypes)) } ### ============================================================================ ### ConsensusRead related: 'minReadsNum', 'minReadLength', 'minFractionCall' ### 'maxFractionLost' prechecking ### ============================================================================ checkMinReadsNum <- function(minReadsNum, errors, errorTypes) { if (!is.numeric(minReadsNum)) { msg <- "'minReadsNum' must be numeric" errors <- c(errors, msg) errorTypes <- c(errorTypes, "PARAMETER_TYPE_ERROR") } else { if (minReadsNum%%1!=0) { msg <- "'minReadsNum' must be integer." errors <- c(errors, msg) errorTypes <- c(errorTypes, "PARAMETER_TYPE_ERROR") } if (minReadsNum == 0) { msg <- "'minReadsNum' cannot be zero." errors <- c(errors, msg) errorTypes <- c(errorTypes, "PARAMETER_VALUE_ERROR") } } return(list(errors, errorTypes)) } checkMinReadLength <- function(minReadLength, errors, errorTypes) { if (!is.numeric(minReadLength)) { msg <- "'minReadLength' must be numeric" errors <- c(errors, msg) errorTypes <- c(errorTypes, "PARAMETER_TYPE_ERROR") } else { if (minReadLength%%1!=0) { msg <- "'minReadLength' must be integer." errors <- c(errors, msg) errorTypes <- c(errorTypes, "PARAMETER_TYPE_ERROR") } } return(list(errors, errorTypes)) } checkMinFractionCall <- function(minFractionCall, errors, errorTypes) { if (!is.numeric(minFractionCall)) { msg <- "'minFractionCall' must be numeric" errors <- c(errors, msg) errorTypes <- c(errorTypes, "PARAMETER_TYPE_ERROR") } else { if (minFractionCall > 1 || minFractionCall < 0) { msg <- "'minFractionCall' must be between 0 and 1." errors <- c(errors, msg) errorTypes <- c(errorTypes, "PARAMETER_RANGE_ERROR") } } return(list(errors, errorTypes)) } checkMaxFractionLost <- function(maxFractionLost, errors, errorTypes) { if (!is.numeric(maxFractionLost)) { msg <- "'maxFractionLost' must be numeric" errors <- c(errors, msg) errorTypes <- c(errorTypes, "PARAMETER_TYPE_ERROR") } else { if (maxFractionLost > 1 || maxFractionLost < 0) { msg <- "'maxFractionLost' must be between 0 and 1." errors <- c(errors, msg) errorTypes <- c(errorTypes, "PARAMETER_RANGE_ERROR") } } return(list(errors, errorTypes)) } checkAcceptStopCodons <- function(acceptStopCodons, errors, errorTypes) { if (!is.logical(acceptStopCodons)) { msg <- "'acceptStopCodons' must be 'TRUE' or 'FALSE'" errors <- c(errors, msg) errorTypes <- c(errorTypes, "PARAMETER_VALUE_ERROR") } return(list(errors, errorTypes)) } checkReadingFrame <- function(readingFrame, errors, errorTypes) { if (!is.numeric(readingFrame)) { msg <- "'readingFrame' must be numeric" errors <- c(errors, msg) errorTypes <- c(errorTypes, "PARAMETER_TYPE_ERROR") } else { if(!readingFrame %in% c(1,2,3)) { msg <- "'readingFrame' must be 1, 2, or 3." errors <- c(errors, msg) errorTypes <- c(errorTypes, "PARAMETER_VALUE_ERROR") } } return(list(errors, errorTypes)) } checkProcessorsNum <- function(processorsNum, errors, errorTypes) { if (!is.numeric(processorsNum)) { msg <- "'processorsNum' must be numeric" errors <- c(errors, msg) errorTypes <- c(errorTypes, "PARAMETER_TYPE_ERROR") } else { if (!(processorsNum %% 1 == 0) && !is.null(processorsNum)) { msg <- "'processorsNum' must be integer." errors <- c(errors, msg) errorTypes <- c(errorTypes, "PARAMETER_TYPE_ERROR") } } return(list(errors, errorTypes)) } ### ============================================================================ ### 'ABIF_Directory' prechecking ### ============================================================================ checkABIF_Directory <- function(ABIF_Directory, errors, errorTypes) { if (typeof(ABIF_Directory) != "character") { msg<- "'ABIF_Directory' must be character type." errors <- c(errors, msg) errorTypes <- c(errorTypes, "PARAMETER_VALUE_ERROR") } else if (is.null(ABIF_Directory)) { msg<- "'ABIF_Directory' cannot be NULL." errors <- c(errors, msg) errorTypes <- c(errorTypes, "PARAMETER_VALUE_ERROR") } else { if (!dir.exists(ABIF_Directory)) { msg <- paste("'", ABIF_Directory, "'", " parent directory does not exist.", sep = "") errors <- c(errors, msg) errorTypes <- c(errorTypes, "DIRECTORY_NOT_EXIST_ERROR") } } return(list(errors, errorTypes)) } ### ============================================================================ ### Quality trimming related: 'TrimmingMethod', 'M1TrimmingCutoff', ### 'M2CutoffQualityScore', 'M2SlidingWindowSize' ### ============================================================================ checkTrimParam <- function(TrimmingMethod, M1TrimmingCutoff, M2CutoffQualityScore, M2SlidingWindowSize, errors, errorTypes) { if (TrimmingMethod == "M1") { if (!is.numeric(M1TrimmingCutoff)) { msg<- "'M1TrimmingCutoff' must be numeric (You choose M1)." errors <- c(errors, msg) errorTypes <- c(errorTypes, "PARAMETER_TYPE_ERROR") } else { # Ristriction about M1TrimmingCutoff ! if (M1TrimmingCutoff > 1 || M1TrimmingCutoff < 0) { msg <- paste("Your input M1TrimmingCutoff is: '", M1TrimmingCutoff, "' is invalid.", "'M1TrimmingCutoff' should", "be between 0 and 1.", sep = "") errors <- c(errors, msg) errorTypes <- c(errorTypes, "PARAMETER_RANGE_ERROR") } } } else if (TrimmingMethod == "M2") { if (!is.numeric(M2CutoffQualityScore)) { msg<- "'M2CutoffQualityScore' must be numeric (You choose M2)." errors <- c(errors, msg) errorTypes <- c(errorTypes, "PARAMETER_TYPE_ERROR") } else { if (M2CutoffQualityScore > 60 || M2CutoffQualityScore < 0 || M2CutoffQualityScore%%1!=0) { msg <- paste("Your input M2CutoffQualityScore is: '", M2CutoffQualityScore, "' is invalid.", "'M2CutoffQualityScore' should", "be between 0 and 60.", sep = "") errors <- c(errors, msg) errorTypes <- c(errorTypes, "PARAMETER_RANGE_ERROR") } } if (!is.numeric(M2SlidingWindowSize)) { msg<- "'M2SlidingWindowSize' must be numeric (You choose M2)." errors <- c(errors, msg) errorTypes <- c(errorTypes, "PARAMETER_TYPE_ERROR") } else { if (M2SlidingWindowSize > 40 || M2SlidingWindowSize < 0 || M2SlidingWindowSize%%1!=0) { msg <- paste("Your input M2SlidingWindowSize is: '", M2SlidingWindowSize, "' is invalid.", "'M2SlidingWindowSize' should", "be between 0 and 40.", sep = "") errors <- c(errors, msg) errorTypes <- c(errorTypes, "PARAMETER_RANGE_ERROR") } } } else { msg <- "'TrimmingMethod' must be 'M1' or 'M2'." errors <- c(errors, msg) errorTypes <- c(errorTypes, "PARAMETER_VALUE_ERROR") } return(list(errors, errorTypes)) } ### ============================================================================ ### 'baseNumPerRow', 'heightPerRow', signalRatioCutoff', 'showTrimmed' prechecking ### ============================================================================ checkBaseNumPerRow <- function(baseNumPerRow, errors, errorTypes) { if (!is.numeric(baseNumPerRow)) { msg <- "'baseNumPerRow' must be numeric" errors <- c(errors, msg) errorTypes <- c(errorTypes, "PARAMETER_TYPE_ERROR") } else { if (baseNumPerRow%%1!=0) { msg <- "'baseNumPerRow' must be integer." errors <- c(errors, msg) errorTypes <- c(errorTypes, "PARAMETER_TYPE_ERROR") } if (baseNumPerRow < 0 || baseNumPerRow > 200) { msg <- "'baseNumPerRow' must be between 0 and 200." errors <- c(errors, msg) errorTypes <- c(errorTypes, "PARAMETER_RANGE_ERROR") } } return(list(errors, errorTypes)) } checkHeightPerRow <- function(heightPerRow, errors, errorTypes) { if (!is.numeric(heightPerRow)) { msg <- "'heightPerRow' must be numeric" errors <- c(errors, msg) errorTypes <- c(errorTypes, "PARAMETER_TYPE_ERROR") } else { if (heightPerRow%%1!=0) { msg <- "'heightPerRow' must be integer." errors <- c(errors, msg) errorTypes <- c(errorTypes, "PARAMETER_TYPE_ERROR") } if (heightPerRow < 50 || heightPerRow > 600) { msg <- "'heightPerRow' must be between 50 and 600." errors <- c(errors, msg) errorTypes <- c(errorTypes, "PARAMETER_RANGE_ERROR") } } return(list(errors, errorTypes)) } ### ============================================================================ ### MakeBaseCalls Utilities function ### ============================================================================ checkSignalRatioCutoff <- function(signalRatioCutoff, errors, errorTypes) { if (!is.numeric(signalRatioCutoff)) { msg <- "'signalRatioCutoff' must be numeric" errors <- c(errors, msg) errorTypes <- c(errorTypes, "PARAMETER_TYPE_ERROR") } else { if (signalRatioCutoff < 0 || signalRatioCutoff > 1) { msg <- "'signalRatioCutoff' must be between 0 and 1." errors <- c(errors, msg) errorTypes <- c(errorTypes, "PARAMETER_RANGE_ERROR") } } return(list(errors, errorTypes)) } checkShowTrimmed <- function(showTrimmed, errors, errorTypes) { if (!is.logical(showTrimmed)) { msg <- "'showTrimmed' must be between TRUE and FALSE." errors <- c(errors, msg) errorTypes <- c(errorTypes, "PARAMETER_VALUE_ERROR") } return(list(errors, errorTypes)) } checkFASTA_File <- function(inputSource, FASTA_File, errors, errorTypes) { if (typeof(FASTA_File) != "character") { msg<- "'FASTA_File' must be character type." errors <- c(errors, msg) errorTypes <- c(errorTypes, "PARAMETER_VALUE_ERROR") } else if (is.null(FASTA_File)) { msg<- "'FASTA_File' cannot be NULL." errors <- c(errors, msg) errorTypes <- c(errorTypes, "PARAMETER_VALUE_ERROR") } else { if (!file.exists(FASTA_File)) { cat ("FASTA_File", FASTA_File) msg <- paste("'", FASTA_File, "'", " file does not exist.", sep = "") errors <- c(errors, msg) errorTypes <- c(errorTypes, "FILE_NOT_EXIST_ERROR") } if (is.na(str_extract(basename(FASTA_File), ".fa$")) && is.na(str_extract(basename(FASTA_File), ".fasta$"))) { msg <- paste("'", FASTA_File, "'", " file extension must be '.fa' or '.fasta'.", sep = "") errors <- c(errors, msg) errorTypes <- c(errorTypes, "FILE_TYPE_ERROR") } } return(list(errors, errorTypes)) } checkREGEX_SuffixForward <- function(REGEX_SuffixForward, errors, errorTypes){ if (typeof(REGEX_SuffixForward) != "character") { msg<- "'REGEX_SuffixForward' must be character type." errors <- c(errors, msg) errorTypes <- c(errorTypes, "PARAMETER_VALUE_ERROR") } else if (is.null(REGEX_SuffixForward)) { msg<- "'REGEX_SuffixForward' cannot be NULL." errors <- c(errors, msg) errorTypes <- c(errorTypes, "PARAMETER_VALUE_ERROR") } return(list(errors, errorTypes)) } checkREGEX_SuffixReverse <- function(REGEX_SuffixReverse, errors, errorTypes) { if (typeof(REGEX_SuffixReverse) != "character") { msg<- "'REGEX_SuffixReverse' must be character type." errors <- c(errors, msg) errorTypes <- c(errorTypes, "PARAMETER_VALUE_ERROR") } else if (is.null(REGEX_SuffixReverse)) { msg<- "'REGEX_SuffixReverse' cannot be NULL." errors <- c(errors, msg) errorTypes <- c(errorTypes, "PARAMETER_VALUE_ERROR") } return(list(errors, errorTypes)) } checkCSV_NamesConversion <- function(CSV_NamesConversion, errors, errorTypes) { if (typeof(CSV_NamesConversion) != "character") { msg<- "'CSV_NamesConversion' must be character type." errors <- c(errors, msg) errorTypes <- c(errorTypes, "PARAMETER_VALUE_ERROR") } else if (is.null(CSV_NamesConversion)) { msg<- "'CSV_NamesConversion' cannot be NULL." errors <- c(errors, msg) errorTypes <- c(errorTypes, "PARAMETER_VALUE_ERROR") } else { if (!file.exists(CSV_NamesConversion)) { msg <- paste("'", CSV_NamesConversion, "'", " file does not exist.", sep = "") errors <- c(errors, msg) errorTypes <- c(errorTypes, "CSV_FILE_NOT_EXIST_ERROR") } } return(list(errors, errorTypes)) } checkReadFileNameExist <- function(readFileName, errors, errorTypes) { if (!file.exists(readFileName)) { msg <- paste("'", readFileName, "'", " file does not exist.", sep = "") errors <- c(errors, msg) errorTypes <- c(errorTypes, "FILE_NOT_EXIST_ERROR") } return(list(errors, errorTypes)) } checkReadFileName <- function(readFileName, inputSource, errors, errorTypes) { if (inputSource == "ABIF") { if (is.na(str_extract(basename(readFileName), ".ab1$"))) { msg <- paste("'", readFileName, "'", " file extension must be '.ab1'.", sep = "") errors <- c(errors, msg) errorTypes <- c(errorTypes, "FILE_TYPE_ERROR") } } else if (inputSource == "FASTA") { if (is.na(str_extract(basename(readFileName), ".fa$")) && is.na(str_extract(basename(readFileName), ".fasta$"))) { msg <- paste("'", readFileName, "'", " file extension must be '.fa' or '.fasta'.", sep = "") errors <- c(errors, msg) errorTypes <- c(errorTypes, "FILE_TYPE_ERROR") } } return(list(errors, errorTypes)) } checkTargetFastaName <- function(targetFastaName, fastaReadName, readFileName, errors, errorTypes) { if(isEmpty(targetFastaName)) { msg <- paste0("The name '", fastaReadName, "' is not in the '", basename(readFileName), "' FASTA file") errors <- c(errors, msg) errorTypes <- c(errorTypes, "FASTA_NAME_NOT_EXIST") } return(list(errors, errorTypes)) } checkReadFeature <- function(readFeature, errors, errorTypes) { if (typeof(readFeature) != "character") { msg<- "'readFeature' must be character type." errors <- c(errors, msg) errorTypes <- c(errorTypes, "PARAMETER_VALUE_ERROR") } else { if (readFeature != "Forward Read" && readFeature != "Reverse Read") { msg <- "'readFeature' must be 'Forward Read' or 'Reverse Read'" errors <- c(errors, msg) errorTypes <- c(errorTypes, "PARAMETER_VALUE_ERROR") } } return(list(errors, errorTypes)) } checkQualityPhredScores <- function(qualityPhredScores, errors, errorTypes) { if (length(qualityPhredScores) == 0) { msg <- paste("'qualityPhredScores' length cannot be zero.") errors <- c(errors, msg) errorTypes <- c(errorTypes, "PARAMETER_VALUE_ERROR") } if (!all(qualityPhredScores%%1 == 0)) { msg <- "All elements in 'qualityPhredScores' vector must be integer." errors <- c(errors, msg) errorTypes <- c(errorTypes, "PARAMETER_VALUE_ERROR") } return(list(errors, errorTypes)) } checkAb1FastaCsv <- function(ABIF_Directory, FASTA_File, CSV_NamesConversion, inputSource, errors, errorTypes) { if (!file.exists(CSV_NamesConversion)) { msg <- paste("CSV_NamesConversion: '", CSV_NamesConversion, "'", " file does not exist.", sep = "") errors <- c(errors, msg) errorTypes <- c(errorTypes, "FILE_NOT_EXIST_ERROR") } else { warnings <- character() if (inputSource == "ABIF") { csvFile <- read.csv(CSV_NamesConversion, header = TRUE) csvReads <- as.character(csvFile$reads) parentDirFiles <- list.files(ABIF_Directory) sourceReads <- parentDirFiles[grepl("\\.ab1$", parentDirFiles)] } else if (inputSource == "FASTA") { csvFile <- read.csv(CSV_NamesConversion, header = TRUE) csvReads <- as.character(csvFile$reads) readFasta <- read.fasta(FASTA_File, as.string = TRUE) sourceReads <- names(readFasta) } ######################################################################## ### Check 1: all reads in the read folder are listed in the csv ######################################################################## readInCsvWarningMsg <- lapply(sourceReads, function(sourceRead) { if (!(sourceRead %in% csvReads)) { msg <- paste("'", sourceRead, "' is not in the ", "csv file (", CSV_NamesConversion, ")", sep = "") return(msg)} return()}) warnings <- c(warnings, unlist(readInCsvWarningMsg), use.names = FALSE) ######################################################################## ### Check 2: all reads listed in the csv file are in the reads folder ######################################################################## readInSourceWarningMsg <- lapply(csvReads, function(csvRead) { if (!(csvRead %in% sourceReads)) { msg <- paste("'", csvRead, "' is not in the ", "parent directory.", sep = "") return(msg)} return()}) warnings <- c(warnings, unlist(readInSourceWarningMsg), use.names = FALSE) # csv file has all of the columns, no extra columns ######################################################################## ### Check 3: csv file has all of the columns, no extra columns ######################################################################## if (!("contig" %in% colnames(csvFile))) { msg <- paste("'contig' is not in the csv file (", CSV_NamesConversion, ")", sep = "") errors <- c(errors, msg) errorTypes <- c(errorTypes, "CSV_MISMATCH_ERROR") } else if (!("direction" %in% colnames(csvFile))) { msg <- paste("'direction' is not in the csv file (", CSV_NamesConversion, ")", sep = "") errors <- c(errors, msg) errorTypes <- c(errorTypes, "CSV_MISMATCH_ERROR") } else if (!("reads" %in% colnames(csvFile))) { msg <- paste("'reads' is not in the csv file (", CSV_NamesConversion, ")", sep = "") errors <- c(errors, msg) errorTypes <- c(errorTypes, "CSV_MISMATCH_ERROR") } ######################################################################## ### Check 4: F/R column has only F's and R's. ######################################################################## csvFileDirLen <- length(unique(as.character(csvFile$direction))) if (!((csvFileDirLen == 1 || csvFileDirLen == 2) && ('F' %in% unique(as.character(csvFile$direction)) || 'R' %in% unique(as.character(csvFile$direction))))) { msg <- paste("In the 'direction' column of your CSV file, you can only have 'F' and 'R' ", sep = "") errors <- c(errors, msg) errorTypes <- c(errorTypes, "CSV_VALUE_ERROR") } if (length(warnings) != 0) { invisible(lapply(warnings, log_warn)) } } return(list(errors, errorTypes)) } checkGreplForward <- function(forwardSelectInputFiles, warnings, warningsType) { if (length(forwardSelectInputFiles) == 0) { msg <- paste0("Your 'contigName' and 'REGEX_SuffixForward' regular ", "expression parameters can not match any forward reads.") warnings <- c(warnings, msg) warningsType <- c(warningsType, "REGEX_MATCH_WARN") } return(list(warnings, warningsType)) } checkGreplReverse <- function(reverseSelectInputFiles, warnings, warningsType) { if (length(reverseSelectInputFiles) == 0) { msg <- paste0("Your 'contigName' and 'REGEX_SuffixReverse' regular ", "expression parameters can not match any reverse reads.") warnings <- c(warnings, msg) warningsType <- c(warningsType, "REGEX_MATCH_WARN") } return(list(warnings, warningsType)) } checkCSVConvForward <- function(forwardReads, warnings, warningsType) { if (length(forwardReads) == 0) { msg <- paste0("The names of forward reads in your 'CSV_NamesConversion'", " CSV file do not match any forward reads.") warnings <- c(warnings, msg) warningsType <- c(warningsType, "CSV_MATCH_WARN") } return(list(warnings, warningsType)) } checkCSVConvReverse <- function(reverseReads, warnings, warningsType) { if (length(reverseReads) == 0) { msg <- paste0("The names of reverse reads in your 'CSV_NamesConversion'", " CSV file do not match any reverse reads.") warnings <- c(warnings, msg) warningsType <- c(warningsType, "CSV_MATCH_WARN") } return(list(warnings, warningsType)) }
# function slices a tree at slice and returns all subtrees # it uses extract.clade(), if trivial==FALSE subtrees with length than 2 taxa are ignored # written by Liam Revell 2011/2012 treeSlice<-function(tree,slice,trivial=FALSE){ if(class(tree)!="phylo") stop("tree should be object of class 'phylo.'") tree<-reorder(tree) # reorder cladewise H<-nodeHeights(tree) edges<-which(H[,2]>slice&H[,1]<slice) nodes<-tree$edge[edges,2] if(!trivial) nodes<-nodes[nodes>length(tree$tip)] trees<-list(); class(trees)<-"multiPhylo" for(i in 1:length(nodes)){ if(nodes[i]>length(tree$tip)){ trees[[i]]<-extract.clade(tree,node=nodes[i]) trees[[i]]$root.edge<-H[which(tree$edge[,2]==nodes[i]),2]-slice } else { z<-list(edge=matrix(c(2,1),1,2),edge.length=H[which(tree$edge[,2]==nodes[i]),2]-slice,tip.label=tree$tip.label[nodes[i]],Nnode=1L) class(z)<-"phylo"; trees[[i]]<-z } } return(trees) }
/R/treeSlice.R
no_license
mrhelmus/phytools
R
false
false
913
r
# function slices a tree at slice and returns all subtrees # it uses extract.clade(), if trivial==FALSE subtrees with length than 2 taxa are ignored # written by Liam Revell 2011/2012 treeSlice<-function(tree,slice,trivial=FALSE){ if(class(tree)!="phylo") stop("tree should be object of class 'phylo.'") tree<-reorder(tree) # reorder cladewise H<-nodeHeights(tree) edges<-which(H[,2]>slice&H[,1]<slice) nodes<-tree$edge[edges,2] if(!trivial) nodes<-nodes[nodes>length(tree$tip)] trees<-list(); class(trees)<-"multiPhylo" for(i in 1:length(nodes)){ if(nodes[i]>length(tree$tip)){ trees[[i]]<-extract.clade(tree,node=nodes[i]) trees[[i]]$root.edge<-H[which(tree$edge[,2]==nodes[i]),2]-slice } else { z<-list(edge=matrix(c(2,1),1,2),edge.length=H[which(tree$edge[,2]==nodes[i]),2]-slice,tip.label=tree$tip.label[nodes[i]],Nnode=1L) class(z)<-"phylo"; trees[[i]]<-z } } return(trees) }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/s.MXN.R \name{s.MXN} \alias{s.MXN} \title{Neural Network with \code{mxnet} \link{C, R}} \usage{ s.MXN(x, y = NULL, x.test = NULL, y.test = NULL, x.valid = NULL, y.valid = NULL, upsample = FALSE, upsample.seed = NULL, net = NULL, n.hidden.nodes = NULL, output = NULL, ctx = mxnet::mx.cpu(), initializer = mxnet::mx.init.Xavier(), batch.normalization = TRUE, l2.normalization = FALSE, activation = "relu", optimizer = "adadelta", batch.size = NULL, momentum = 0.9, max.epochs = 1000, min.epochs = 25, early.stop = c("train", "valid"), early.stop.absolute.threshold = NA, early.stop.relative.threshold = NA, early.stop.relativeVariance.threshold = NULL, early.stop.n.steps = NULL, learning.rate = NULL, dropout = 0, dropout.before = 1, dropout.after = 0, eval.metric = NULL, minimize = NULL, arg.params = NULL, mx.seed = NULL, x.name = NULL, y.name = NULL, plot.graphviz = FALSE, print.plot = TRUE, print.error.plot = NULL, rtlayout.mat = c(2, 1), plot.fitted = NULL, plot.predicted = NULL, plot.theme = getOption("rt.fit.theme", "lightgrid"), question = NULL, verbose = TRUE, verbose.mxnet = TRUE, verbose.checkpoint = FALSE, outdir = NULL, n.cores = rtCores, save.mod = ifelse(!is.null(outdir), TRUE, FALSE), ...) } \arguments{ \item{x}{Numeric vector or matrix / data frame of features i.e. independent variables} \item{y}{Numeric vector of outcome, i.e. dependent variable} \item{x.test}{Numeric vector or matrix / data frame of testing set features Columns must correspond to columns in \code{x}} \item{y.test}{Numeric vector of testing set outcome} \item{upsample}{Logical: If TRUE, upsample cases to balance outcome classes (for Classification only) Caution: upsample will randomly sample with replacement if the length of the majority class is more than double the length of the class you are upsampling, thereby introducing randomness} \item{upsample.seed}{Integer: If provided, will be used to set the seed during upsampling. Default = NULL (random seed)} \item{net}{MXNET Symbol: provide a previously defined network. logger will not work in this case at the moment, so early stopping cannot be applied} \item{n.hidden.nodes}{Integer vector: Length must be equal to the number of hidden layers you wish to create} \item{output}{String: "Logistic" for binary classification, "Softmax" for classification of 2 or more classes, "Linear" for Regression. Defaults to "Logistic" for binary outcome, "Softmax" for 3+ classes, "LinearReg" for regression.} \item{ctx}{MXNET context: \code{mxnet::mx.cpu()} to use CPU(s). Define N of cores using \code{n.cores} argument. \code{mxnet::mx.gpu()} to use GPU. For multiple GPUs, provide list like such: \code{ctx = list(mxnet::mx.gpu(0), mxnet::mx.gpu(1)} to use two GPUs.} \item{batch.normalization}{Logical: If TRUE, batch normalize before activation. Default = TRUE} \item{l2.normalization}{Logical: If TRUE, apply L2 normalization after fully connected step. Default = FALSE} \item{activation}{String vector: Activation types to use: 'relu', 'sigmoid', 'softrelu', 'tanh'. If length < n of hidden layers, elements are recycled. See \code{mxnet::mx.symbol.Activation}} \item{max.epochs}{Integer: Number of iterations for training.} \item{learning.rate}{Float: learning rate} \item{dropout}{Float (0, 1): Probability of dropping nodes} \item{dropout.before}{Integer: Index of hidden layer before which dropout should be applied} \item{dropout.after}{Integer: Index of hidden layer after which dropout should be applied} \item{eval.metric}{String: Metrix used for evaluation during train. Default: "rmse"} \item{x.name}{Character: Name for feature set} \item{y.name}{Character: Name for outcome} \item{plot.graphviz}{Logical: if TRUE, plot the network structure using \code{graphviz}} \item{print.plot}{Logical: if TRUE, produce plot using \code{mplot3} Takes precedence over \code{plot.fitted} and \code{plot.predicted}} \item{plot.fitted}{Logical: if TRUE, plot True (y) vs Fitted} \item{plot.predicted}{Logical: if TRUE, plot True (y.test) vs Predicted. Requires \code{x.test} and \code{y.test}} \item{plot.theme}{String: "zero", "dark", "box", "darkbox"} \item{question}{String: the question you are attempting to answer with this model, in plain language.} \item{verbose}{Logical: If TRUE, print summary to screen.} \item{outdir}{Path to output directory. If defined, will save Predicted vs. True plot, if available, as well as full model output, if \code{save.mod} is TRUE} \item{n.cores}{Integer: Number of cores to use. Caution: Only set to >1 if you're sure MXNET is not using already using multiple cores} \item{save.mod}{Logical. If TRUE, save all output as RDS file in \code{outdir} \code{save.mod} is TRUE by default if an \code{outdir} is defined. If set to TRUE, and no \code{outdir} is defined, outdir defaults to \code{paste0("./s.", mod.name)}} \item{...}{Additional parameters to be passed to \code{mxnet::mx.model.FeedForward.create}} } \description{ Train a Neural Network using \code{mxnet} with optional early stopping } \details{ Early stopping is considered after training has taken place for \code{min.epochs} epochs. After that point, early stopping is controlled by three criteria: an absolute threshold (\code{early.stop.absolute.threshold}), a relative threshold (\code{early.stop.relative.threshold}), or a relative variance across a set number of steps (\code{early.stop.realtiveVariance.threshold} along \code{early.stop.n.steps}). Early stopping by default (if you change none of the \code{early.stop} arguments), will look at training error and stop when the relative variance of the loss over the last 24 steps (classification) or 12 steps (regression) is lower than 5e-06 (classification) or lower than 5e-03 (regression). To set early stopping OFF, set all early stopping criteria to NA. It is important to tune learning rate and adjust max.epochs accordingly depending on the learning type (Classification vs. Regression) and the specific dataset. Defaults can not be expected to work on all problems. } \seealso{ \link{elevate} for external cross-validation Other Supervised Learning: \code{\link{s.ADABOOST}}, \code{\link{s.ADDTREE}}, \code{\link{s.BART}}, \code{\link{s.BAYESGLM}}, \code{\link{s.BRUTO}}, \code{\link{s.C50}}, \code{\link{s.CART}}, \code{\link{s.CTREE}}, \code{\link{s.DA}}, \code{\link{s.ET}}, \code{\link{s.EVTREE}}, \code{\link{s.GAM.default}}, \code{\link{s.GAM.formula}}, \code{\link{s.GAMSEL}}, \code{\link{s.GAM}}, \code{\link{s.GBM3}}, \code{\link{s.GBM}}, \code{\link{s.GLMNET}}, \code{\link{s.GLM}}, \code{\link{s.GLS}}, \code{\link{s.H2ODL}}, \code{\link{s.H2OGBM}}, \code{\link{s.H2ORF}}, \code{\link{s.IRF}}, \code{\link{s.KNN}}, \code{\link{s.LDA}}, \code{\link{s.LM}}, \code{\link{s.MARS}}, \code{\link{s.MLRF}}, \code{\link{s.NBAYES}}, \code{\link{s.NLA}}, \code{\link{s.NLS}}, \code{\link{s.NW}}, \code{\link{s.POLYMARS}}, \code{\link{s.PPR}}, \code{\link{s.PPTREE}}, \code{\link{s.QDA}}, \code{\link{s.QRNN}}, \code{\link{s.RANGER}}, \code{\link{s.RFSRC}}, \code{\link{s.RF}}, \code{\link{s.SGD}}, \code{\link{s.SPLS}}, \code{\link{s.SVM}}, \code{\link{s.TFN}}, \code{\link{s.XGBLIN}}, \code{\link{s.XGB}} Other Deep Learning: \code{\link{d.H2OAE}}, \code{\link{p.MXINCEPTION}}, \code{\link{s.H2ODL}}, \code{\link{s.TFN}} } \author{ Efstathios D. Gennatas } \concept{Deep Learning} \concept{Supervised Learning}
/man/s.MXN.Rd
no_license
muschellij2/rtemis
R
false
true
7,532
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/s.MXN.R \name{s.MXN} \alias{s.MXN} \title{Neural Network with \code{mxnet} \link{C, R}} \usage{ s.MXN(x, y = NULL, x.test = NULL, y.test = NULL, x.valid = NULL, y.valid = NULL, upsample = FALSE, upsample.seed = NULL, net = NULL, n.hidden.nodes = NULL, output = NULL, ctx = mxnet::mx.cpu(), initializer = mxnet::mx.init.Xavier(), batch.normalization = TRUE, l2.normalization = FALSE, activation = "relu", optimizer = "adadelta", batch.size = NULL, momentum = 0.9, max.epochs = 1000, min.epochs = 25, early.stop = c("train", "valid"), early.stop.absolute.threshold = NA, early.stop.relative.threshold = NA, early.stop.relativeVariance.threshold = NULL, early.stop.n.steps = NULL, learning.rate = NULL, dropout = 0, dropout.before = 1, dropout.after = 0, eval.metric = NULL, minimize = NULL, arg.params = NULL, mx.seed = NULL, x.name = NULL, y.name = NULL, plot.graphviz = FALSE, print.plot = TRUE, print.error.plot = NULL, rtlayout.mat = c(2, 1), plot.fitted = NULL, plot.predicted = NULL, plot.theme = getOption("rt.fit.theme", "lightgrid"), question = NULL, verbose = TRUE, verbose.mxnet = TRUE, verbose.checkpoint = FALSE, outdir = NULL, n.cores = rtCores, save.mod = ifelse(!is.null(outdir), TRUE, FALSE), ...) } \arguments{ \item{x}{Numeric vector or matrix / data frame of features i.e. independent variables} \item{y}{Numeric vector of outcome, i.e. dependent variable} \item{x.test}{Numeric vector or matrix / data frame of testing set features Columns must correspond to columns in \code{x}} \item{y.test}{Numeric vector of testing set outcome} \item{upsample}{Logical: If TRUE, upsample cases to balance outcome classes (for Classification only) Caution: upsample will randomly sample with replacement if the length of the majority class is more than double the length of the class you are upsampling, thereby introducing randomness} \item{upsample.seed}{Integer: If provided, will be used to set the seed during upsampling. Default = NULL (random seed)} \item{net}{MXNET Symbol: provide a previously defined network. logger will not work in this case at the moment, so early stopping cannot be applied} \item{n.hidden.nodes}{Integer vector: Length must be equal to the number of hidden layers you wish to create} \item{output}{String: "Logistic" for binary classification, "Softmax" for classification of 2 or more classes, "Linear" for Regression. Defaults to "Logistic" for binary outcome, "Softmax" for 3+ classes, "LinearReg" for regression.} \item{ctx}{MXNET context: \code{mxnet::mx.cpu()} to use CPU(s). Define N of cores using \code{n.cores} argument. \code{mxnet::mx.gpu()} to use GPU. For multiple GPUs, provide list like such: \code{ctx = list(mxnet::mx.gpu(0), mxnet::mx.gpu(1)} to use two GPUs.} \item{batch.normalization}{Logical: If TRUE, batch normalize before activation. Default = TRUE} \item{l2.normalization}{Logical: If TRUE, apply L2 normalization after fully connected step. Default = FALSE} \item{activation}{String vector: Activation types to use: 'relu', 'sigmoid', 'softrelu', 'tanh'. If length < n of hidden layers, elements are recycled. See \code{mxnet::mx.symbol.Activation}} \item{max.epochs}{Integer: Number of iterations for training.} \item{learning.rate}{Float: learning rate} \item{dropout}{Float (0, 1): Probability of dropping nodes} \item{dropout.before}{Integer: Index of hidden layer before which dropout should be applied} \item{dropout.after}{Integer: Index of hidden layer after which dropout should be applied} \item{eval.metric}{String: Metrix used for evaluation during train. Default: "rmse"} \item{x.name}{Character: Name for feature set} \item{y.name}{Character: Name for outcome} \item{plot.graphviz}{Logical: if TRUE, plot the network structure using \code{graphviz}} \item{print.plot}{Logical: if TRUE, produce plot using \code{mplot3} Takes precedence over \code{plot.fitted} and \code{plot.predicted}} \item{plot.fitted}{Logical: if TRUE, plot True (y) vs Fitted} \item{plot.predicted}{Logical: if TRUE, plot True (y.test) vs Predicted. Requires \code{x.test} and \code{y.test}} \item{plot.theme}{String: "zero", "dark", "box", "darkbox"} \item{question}{String: the question you are attempting to answer with this model, in plain language.} \item{verbose}{Logical: If TRUE, print summary to screen.} \item{outdir}{Path to output directory. If defined, will save Predicted vs. True plot, if available, as well as full model output, if \code{save.mod} is TRUE} \item{n.cores}{Integer: Number of cores to use. Caution: Only set to >1 if you're sure MXNET is not using already using multiple cores} \item{save.mod}{Logical. If TRUE, save all output as RDS file in \code{outdir} \code{save.mod} is TRUE by default if an \code{outdir} is defined. If set to TRUE, and no \code{outdir} is defined, outdir defaults to \code{paste0("./s.", mod.name)}} \item{...}{Additional parameters to be passed to \code{mxnet::mx.model.FeedForward.create}} } \description{ Train a Neural Network using \code{mxnet} with optional early stopping } \details{ Early stopping is considered after training has taken place for \code{min.epochs} epochs. After that point, early stopping is controlled by three criteria: an absolute threshold (\code{early.stop.absolute.threshold}), a relative threshold (\code{early.stop.relative.threshold}), or a relative variance across a set number of steps (\code{early.stop.realtiveVariance.threshold} along \code{early.stop.n.steps}). Early stopping by default (if you change none of the \code{early.stop} arguments), will look at training error and stop when the relative variance of the loss over the last 24 steps (classification) or 12 steps (regression) is lower than 5e-06 (classification) or lower than 5e-03 (regression). To set early stopping OFF, set all early stopping criteria to NA. It is important to tune learning rate and adjust max.epochs accordingly depending on the learning type (Classification vs. Regression) and the specific dataset. Defaults can not be expected to work on all problems. } \seealso{ \link{elevate} for external cross-validation Other Supervised Learning: \code{\link{s.ADABOOST}}, \code{\link{s.ADDTREE}}, \code{\link{s.BART}}, \code{\link{s.BAYESGLM}}, \code{\link{s.BRUTO}}, \code{\link{s.C50}}, \code{\link{s.CART}}, \code{\link{s.CTREE}}, \code{\link{s.DA}}, \code{\link{s.ET}}, \code{\link{s.EVTREE}}, \code{\link{s.GAM.default}}, \code{\link{s.GAM.formula}}, \code{\link{s.GAMSEL}}, \code{\link{s.GAM}}, \code{\link{s.GBM3}}, \code{\link{s.GBM}}, \code{\link{s.GLMNET}}, \code{\link{s.GLM}}, \code{\link{s.GLS}}, \code{\link{s.H2ODL}}, \code{\link{s.H2OGBM}}, \code{\link{s.H2ORF}}, \code{\link{s.IRF}}, \code{\link{s.KNN}}, \code{\link{s.LDA}}, \code{\link{s.LM}}, \code{\link{s.MARS}}, \code{\link{s.MLRF}}, \code{\link{s.NBAYES}}, \code{\link{s.NLA}}, \code{\link{s.NLS}}, \code{\link{s.NW}}, \code{\link{s.POLYMARS}}, \code{\link{s.PPR}}, \code{\link{s.PPTREE}}, \code{\link{s.QDA}}, \code{\link{s.QRNN}}, \code{\link{s.RANGER}}, \code{\link{s.RFSRC}}, \code{\link{s.RF}}, \code{\link{s.SGD}}, \code{\link{s.SPLS}}, \code{\link{s.SVM}}, \code{\link{s.TFN}}, \code{\link{s.XGBLIN}}, \code{\link{s.XGB}} Other Deep Learning: \code{\link{d.H2OAE}}, \code{\link{p.MXINCEPTION}}, \code{\link{s.H2ODL}}, \code{\link{s.TFN}} } \author{ Efstathios D. Gennatas } \concept{Deep Learning} \concept{Supervised Learning}
## ----setup, include=FALSE-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- knitr::opts_chunk$set(echo = TRUE) ## ----message = FALSE, warning = FALSE-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- set.seed(1) ##Load packages for caret and parallel processing library(caret) library(parallel) library(doParallel) ##Register cores cluster <- makeCluster(detectCores() - 1) registerDoParallel(cluster) ## -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- raw.data <- read.csv("../input/train.csv", na.strings = '') ## -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- str(raw.data) ## -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- ##See if any rows have missing data. sapply(raw.data, anyNA) ##See if any rows are duplicates. any(duplicated(raw.data)) ## -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- ##Subset data for selected columns. surv <- as.factor(raw.data$Survived) selected.data <- subset(raw.data, select = c('Pclass', 'Sex', 'Age', 'SibSp', 'Parch', 'Fare', 'Embarked')) ##Create Dummy Variables to remove factor features. dv <- dummyVars( ~., data = selected.data) ##Read dummy variables into new data frame. dv.data <- predict(dv, newdata = selected.data) ##Make Preprocessed object. pp <- preProcess(dv.data, method = 'knnImpute') ##Create object with preprocessed data impute.data <- predict(pp, dv.data) ##Load imputed data into data frame. imputed.data <- data.frame(dv.data) imputed.data$Age <- impute.data[,4] imputed.data$Survived <- raw.data$Survived ##Add embarked data imputed.data[,8:10] <- impute.data[,8:10] imputed.data$Survived <- surv ##Find correlation of features. cor(imputed.data[1:7]) ## ----include = FALSE------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- ##Remove unneeded feature. imputed.data <- imputed.data[,-imputed.data$Sex.male] ## -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- featurePlot(x = imputed.data[,2:7], y = as.factor(imputed.data$Survived), plot = 'pairs', auto.key = list(column = 2)) ## -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- train.model.ind <- createDataPartition(imputed.data$Survived, p = 0.8, list = FALSE) train.model <- imputed.data[train.model.ind,] test.model <- imputed.data[-train.model.ind,] ## -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- tc <- trainControl(allowParallel = TRUE, method = 'repeatedcv', number = 4, repeats = 2) ## ----include = FALSE------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- train.model$Survived <- as.factor(train.model$Survived) test.model$Survived <- as.factor(test.model$Survived) models.used <- c('kknn1', 'kknn2', 'rpart', 'bag','boost', 'nnet', 'svmLinear', 'rf') ## -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- system.time({ knnModel1 <- train(Survived ~ ., data = train.model, method = 'kknn', trainControl = tc) knnModel2 <- train(Survived ~ ., data = train.model, method = 'kknn', k = 3, l = 2, trainControl = tc) }) ## -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- system.time({ treeModel <- train(Survived ~ ., data = train.model, method = 'rpart') }) ## -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- system.time({ bagModel <- train(Survived ~ ., data = train.model, method = 'AdaBag', trainControl = tc) }) ## -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- system.time({ boostModel <- train(Survived ~ ., data = train.model, method = 'adaboost', trainControl = tc) }) ## -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- system.time({ netModel <- train(Survived ~ ., data = train.model, method = 'nnet', trace = FALSE, trainControl = tc) }) ## ----results = FALSE------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- system.time({ svmModel <- train(Survived ~ ., data = train.model, method = 'svmLinear', trainControl = tc) }) ## -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- system.time({ rfModel <- train(Survived ~ ., data = train.model, method = 'rf', trainControl = tc) }) ## -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- stopCluster(cluster) registerDoSEQ() elapsedTime <- list( 9.91, 0.61, 684.11, 50.67, 2.72, 2.05, 6.95) ## ----include = FALSE------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- knn1.mat <- confusionMatrix(train.model$Survived, predict(knnModel1, train.model)) knn2.mat <- confusionMatrix(train.model$Survived, predict(knnModel2, train.model)) rpart.mat <- confusionMatrix(train.model$Survived, predict(treeModel, train.model)) bag.mat <- confusionMatrix(train.model$Survived, predict(bagModel, train.model)) boost.mat <- confusionMatrix(train.model$Survived, predict(boostModel, train.model)) net.mat <- confusionMatrix(train.model$Survived, predict(netModel, train.model)) svm.mat <- confusionMatrix(train.model$Survived, predict(svmModel, train.model)) rf.mat <- confusionMatrix(train.model$Survived, predict(rfModel, train.model)) train.accuracies <- c(knn1.mat$overall[1], knn2.mat$overall[1], rpart.mat$overall[1], bag.mat$overall[1], boost.mat$overall[1], net.mat$overall[1], svm.mat$overall[1], rf.mat$overall[1]) knn1.mat <- confusionMatrix(test.model$Survived, predict(knnModel1, test.model)) knn2.mat <- confusionMatrix(test.model$Survived, predict(knnModel2, test.model)) rpart.mat <- confusionMatrix(test.model$Survived, predict(treeModel, test.model)) bag.mat <- confusionMatrix(test.model$Survived, predict(bagModel, test.model)) boost.mat <- confusionMatrix(test.model$Survived, predict(boostModel, test.model)) net.mat <- confusionMatrix(test.model$Survived, predict(netModel, test.model)) svm.mat <- confusionMatrix(test.model$Survived, predict(svmModel, test.model)) rf.mat <- confusionMatrix(test.model$Survived, predict(rfModel, test.model)) test.accuracies <- c(knn1.mat$overall[1], knn2.mat$overall[1], rpart.mat$overall[1], bag.mat$overall[1], boost.mat$overall[1], net.mat$overall[1], svm.mat$overall[1], rf.mat$overall[1]) ## -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- model.metas <- data.frame(row.names = models.used, train.accuracies, test.accuracies) model.metas plot(model.metas$test.accuracies, main = "Accuracy of the Models", xlab = "Models", ylab = "Accuracy", ylim = c(0.7, 1), xaxt = 'n', type = 'p', pch = 15, col = 'red') points(model.metas$train.accuracies, col = 'blue', pch = 16) axis(1, at = 1:8, labels = models.used) ## -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- ##Read in test data with unknown results. sub.data <- read.csv('../input/test.csv') ids <- sub.data$PassengerId ##Subset data for needed features. ##Subset data for selected columns. sub.data <- subset(sub.data, select = c('Pclass', 'Sex', 'Age', 'SibSp', 'Parch', 'Fare', 'Embarked')) sub.dv <- predict(dv, newdata = sub.data) ##pp <- preProcess(sub.data, method = 'knnImpute') sub.impute <- predict(pp, sub.dv) sub.data <- data.frame(sub.dv) sub.data$Age <- sub.impute[,4] sub.data$Fare <- sub.impute[,7] imputed.data[,8:10] <- impute.data[,8:10] preds <- predict(bagModel, sub.data) submit <- data.frame(PassengerId = ids, Survived = preds) write.csv(submit, 'submission.csv', row.names = FALSE)
/r/kernels/elimiller-the-caret-package-and-the-titanic/script/the-caret-package-and-the-titanic.R
no_license
helenaK/trustworthy-titanic
R
false
false
10,693
r
## ----setup, include=FALSE-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- knitr::opts_chunk$set(echo = TRUE) ## ----message = FALSE, warning = FALSE-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- set.seed(1) ##Load packages for caret and parallel processing library(caret) library(parallel) library(doParallel) ##Register cores cluster <- makeCluster(detectCores() - 1) registerDoParallel(cluster) ## -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- raw.data <- read.csv("../input/train.csv", na.strings = '') ## -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- str(raw.data) ## -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- ##See if any rows have missing data. sapply(raw.data, anyNA) ##See if any rows are duplicates. any(duplicated(raw.data)) ## -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- ##Subset data for selected columns. surv <- as.factor(raw.data$Survived) selected.data <- subset(raw.data, select = c('Pclass', 'Sex', 'Age', 'SibSp', 'Parch', 'Fare', 'Embarked')) ##Create Dummy Variables to remove factor features. dv <- dummyVars( ~., data = selected.data) ##Read dummy variables into new data frame. dv.data <- predict(dv, newdata = selected.data) ##Make Preprocessed object. pp <- preProcess(dv.data, method = 'knnImpute') ##Create object with preprocessed data impute.data <- predict(pp, dv.data) ##Load imputed data into data frame. imputed.data <- data.frame(dv.data) imputed.data$Age <- impute.data[,4] imputed.data$Survived <- raw.data$Survived ##Add embarked data imputed.data[,8:10] <- impute.data[,8:10] imputed.data$Survived <- surv ##Find correlation of features. cor(imputed.data[1:7]) ## ----include = FALSE------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- ##Remove unneeded feature. imputed.data <- imputed.data[,-imputed.data$Sex.male] ## -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- featurePlot(x = imputed.data[,2:7], y = as.factor(imputed.data$Survived), plot = 'pairs', auto.key = list(column = 2)) ## -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- train.model.ind <- createDataPartition(imputed.data$Survived, p = 0.8, list = FALSE) train.model <- imputed.data[train.model.ind,] test.model <- imputed.data[-train.model.ind,] ## -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- tc <- trainControl(allowParallel = TRUE, method = 'repeatedcv', number = 4, repeats = 2) ## ----include = FALSE------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- train.model$Survived <- as.factor(train.model$Survived) test.model$Survived <- as.factor(test.model$Survived) models.used <- c('kknn1', 'kknn2', 'rpart', 'bag','boost', 'nnet', 'svmLinear', 'rf') ## -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- system.time({ knnModel1 <- train(Survived ~ ., data = train.model, method = 'kknn', trainControl = tc) knnModel2 <- train(Survived ~ ., data = train.model, method = 'kknn', k = 3, l = 2, trainControl = tc) }) ## -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- system.time({ treeModel <- train(Survived ~ ., data = train.model, method = 'rpart') }) ## -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- system.time({ bagModel <- train(Survived ~ ., data = train.model, method = 'AdaBag', trainControl = tc) }) ## -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- system.time({ boostModel <- train(Survived ~ ., data = train.model, method = 'adaboost', trainControl = tc) }) ## -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- system.time({ netModel <- train(Survived ~ ., data = train.model, method = 'nnet', trace = FALSE, trainControl = tc) }) ## ----results = FALSE------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- system.time({ svmModel <- train(Survived ~ ., data = train.model, method = 'svmLinear', trainControl = tc) }) ## -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- system.time({ rfModel <- train(Survived ~ ., data = train.model, method = 'rf', trainControl = tc) }) ## -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- stopCluster(cluster) registerDoSEQ() elapsedTime <- list( 9.91, 0.61, 684.11, 50.67, 2.72, 2.05, 6.95) ## ----include = FALSE------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- knn1.mat <- confusionMatrix(train.model$Survived, predict(knnModel1, train.model)) knn2.mat <- confusionMatrix(train.model$Survived, predict(knnModel2, train.model)) rpart.mat <- confusionMatrix(train.model$Survived, predict(treeModel, train.model)) bag.mat <- confusionMatrix(train.model$Survived, predict(bagModel, train.model)) boost.mat <- confusionMatrix(train.model$Survived, predict(boostModel, train.model)) net.mat <- confusionMatrix(train.model$Survived, predict(netModel, train.model)) svm.mat <- confusionMatrix(train.model$Survived, predict(svmModel, train.model)) rf.mat <- confusionMatrix(train.model$Survived, predict(rfModel, train.model)) train.accuracies <- c(knn1.mat$overall[1], knn2.mat$overall[1], rpart.mat$overall[1], bag.mat$overall[1], boost.mat$overall[1], net.mat$overall[1], svm.mat$overall[1], rf.mat$overall[1]) knn1.mat <- confusionMatrix(test.model$Survived, predict(knnModel1, test.model)) knn2.mat <- confusionMatrix(test.model$Survived, predict(knnModel2, test.model)) rpart.mat <- confusionMatrix(test.model$Survived, predict(treeModel, test.model)) bag.mat <- confusionMatrix(test.model$Survived, predict(bagModel, test.model)) boost.mat <- confusionMatrix(test.model$Survived, predict(boostModel, test.model)) net.mat <- confusionMatrix(test.model$Survived, predict(netModel, test.model)) svm.mat <- confusionMatrix(test.model$Survived, predict(svmModel, test.model)) rf.mat <- confusionMatrix(test.model$Survived, predict(rfModel, test.model)) test.accuracies <- c(knn1.mat$overall[1], knn2.mat$overall[1], rpart.mat$overall[1], bag.mat$overall[1], boost.mat$overall[1], net.mat$overall[1], svm.mat$overall[1], rf.mat$overall[1]) ## -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- model.metas <- data.frame(row.names = models.used, train.accuracies, test.accuracies) model.metas plot(model.metas$test.accuracies, main = "Accuracy of the Models", xlab = "Models", ylab = "Accuracy", ylim = c(0.7, 1), xaxt = 'n', type = 'p', pch = 15, col = 'red') points(model.metas$train.accuracies, col = 'blue', pch = 16) axis(1, at = 1:8, labels = models.used) ## -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- ##Read in test data with unknown results. sub.data <- read.csv('../input/test.csv') ids <- sub.data$PassengerId ##Subset data for needed features. ##Subset data for selected columns. sub.data <- subset(sub.data, select = c('Pclass', 'Sex', 'Age', 'SibSp', 'Parch', 'Fare', 'Embarked')) sub.dv <- predict(dv, newdata = sub.data) ##pp <- preProcess(sub.data, method = 'knnImpute') sub.impute <- predict(pp, sub.dv) sub.data <- data.frame(sub.dv) sub.data$Age <- sub.impute[,4] sub.data$Fare <- sub.impute[,7] imputed.data[,8:10] <- impute.data[,8:10] preds <- predict(bagModel, sub.data) submit <- data.frame(PassengerId = ids, Survived = preds) write.csv(submit, 'submission.csv', row.names = FALSE)
# 04-isolate library(shiny) ui <- fluidPage( sliderInput(inputId = "num", label = "Choose a number", value = 25, min = 1, max = 100), textInput(inputId = "title", label = "Write a title", value = "Histogram of Random Normal Values"), plotOutput("hist") ) server <- function(input, output) { output$hist <- renderPlot({ hist(rnorm(input$num), main = isolate(input$title)) }) } shinyApp(ui = ui, server = server)
/day2/reactivity/04-isolate.R
no_license
wesslen/iviz-rstudio-workshop
R
false
false
483
r
# 04-isolate library(shiny) ui <- fluidPage( sliderInput(inputId = "num", label = "Choose a number", value = 25, min = 1, max = 100), textInput(inputId = "title", label = "Write a title", value = "Histogram of Random Normal Values"), plotOutput("hist") ) server <- function(input, output) { output$hist <- renderPlot({ hist(rnorm(input$num), main = isolate(input$title)) }) } shinyApp(ui = ui, server = server)
library(ldmap) library(dplyr) fam_df <- read_plink_fam(snakemake@input[["famf"]]) ind_df <- read.table(snakemake@input[["grm_id"]],header=FALSE,stringsAsFactors=FALSE) %>% rename(fid=V1,iid=V2) %>% mutate(fid=as.character(fid),iid=as.character(iid)) rest_df <- anti_join(fam_df,ind_df) %>% sample_n(10000,replace=F) write_plink_fam(snakemake@output[["sub_f"]])
/scripts/sample_panel.R
no_license
CreRecombinase/ptb_workflowr
R
false
false
363
r
library(ldmap) library(dplyr) fam_df <- read_plink_fam(snakemake@input[["famf"]]) ind_df <- read.table(snakemake@input[["grm_id"]],header=FALSE,stringsAsFactors=FALSE) %>% rename(fid=V1,iid=V2) %>% mutate(fid=as.character(fid),iid=as.character(iid)) rest_df <- anti_join(fam_df,ind_df) %>% sample_n(10000,replace=F) write_plink_fam(snakemake@output[["sub_f"]])
testlist <- list(phi = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), x = c(1.36656528938164e-311, -1.65791256519293e+82, 1.29418168595419e-228, -1.85353502606261e+293, 8.08855267383463e-84, -4.03929894096111e-178, 6.04817943207006e-103, -1.66738461804717e-220, -8.8217241872956e-21, -7.84828807007467e-146, -7.48864562038427e+21, -1.00905374512e-187, 5.22970923741951e-218, 2.77992264324548e-197, -5.29147138128251e+140, -1.71332436886848e-93, -1.52261021137076e-52, 2.0627472502345e-21, 1.07149136185465e+184, 4.41748962512848e+47, -4.05885895914463e-142)) result <- do.call(dcurver:::ddc,testlist) str(result)
/dcurver/inst/testfiles/ddc/AFL_ddc/ddc_valgrind_files/1609868540-test.R
no_license
akhikolla/updated-only-Issues
R
false
false
831
r
testlist <- list(phi = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), x = c(1.36656528938164e-311, -1.65791256519293e+82, 1.29418168595419e-228, -1.85353502606261e+293, 8.08855267383463e-84, -4.03929894096111e-178, 6.04817943207006e-103, -1.66738461804717e-220, -8.8217241872956e-21, -7.84828807007467e-146, -7.48864562038427e+21, -1.00905374512e-187, 5.22970923741951e-218, 2.77992264324548e-197, -5.29147138128251e+140, -1.71332436886848e-93, -1.52261021137076e-52, 2.0627472502345e-21, 1.07149136185465e+184, 4.41748962512848e+47, -4.05885895914463e-142)) result <- do.call(dcurver:::ddc,testlist) str(result)
testlist <- list(m = NULL, repetitions = 0L, in_m = structure(c(2.31584307392677e+77, 9.5381825217687e+295, 1.22810536108214e+146, 4.12396251261199e-221, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(10L, 3L))) result <- do.call(CNull:::communities_individual_based_sampling_alpha,testlist) str(result)
/CNull/inst/testfiles/communities_individual_based_sampling_alpha/AFL_communities_individual_based_sampling_alpha/communities_individual_based_sampling_alpha_valgrind_files/1615780484-test.R
no_license
akhikolla/updatedatatype-list2
R
false
false
347
r
testlist <- list(m = NULL, repetitions = 0L, in_m = structure(c(2.31584307392677e+77, 9.5381825217687e+295, 1.22810536108214e+146, 4.12396251261199e-221, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(10L, 3L))) result <- do.call(CNull:::communities_individual_based_sampling_alpha,testlist) str(result)
#Scatter plot pca <- prcomp(X, scale = TRUE) summary(pca) pca_var <- pca$sdev^2 pve <- pca_var/sum(pca_var) plot(pve, xlab = "PCA", ylab = "Proportion of Variance Explained", ylim = c(0,0.5)) plot(cumsum(pve), xlab = "PCA", ylab = "Cumulative Proportion of Variance Explained", ylim = c(0,1)) abline(h = 0.8, col = "red") #PCs as pca$x S<-var(scale(X)) egn_S <- eigen(S) PCs <- pca$x apply(PCs, 2, var) egn_S$values #Plotting PCs plot(PCs[, 1], PCs[, 2], xlab = "PC1 (28%)", ylab = "PC2 (18%)") plot(PCs[, 1], PCs[, 3], xlab = "PC1 (28%)", ylab = "PC3 (14%)") mn_1 <- which.min(PCs[, 1]) mx_1 <- which.max(PCs[, 1]) mn_1 mx_1 #Trees dd <- as.dist(matrix(c(0, 3, 2, 5, 3, 0, 4, 1, 2, 4, 0, 7, 5, 1, 7, 0), nrow = 4)) par(mfrow = c(1, 3)) plot(hc_sng <- hclust(dd, method = "single")) plot(hc_com <- hclust(dd, method = "complete")) plot(hc_avg <- hclust(dd, method = "average")) hc_sng$merge hc_sng$height #kmeans out_kmeans <- kmeans(X, centers = cbind(cntrd_0[[1]], cntrd_0[[2]]), algorithm = "MacQueen") #To create primary key row.names((data)) <- paste(data[,1],"",row.names(data)) ## Cluster membership. rect.hclust(hc_com,k=2) cluster_membership <- cutree(hc_com, k = 2) ##plot the cut on the dendrogram that produces these four clusters plot(hc_complete, labels = labs, cex = 0.5) abline(h=139, col = "red") ## K-Means clustering, with K = 4. set.seed(2) out_kmeans <- kmeans(X, centers = 4, nstart = 20) clusters_kmeans <- out_kmeans$cluster table(clusters_kmeans, cluster_membership) #PCA with colr coded by k-means set.seed(2) km<- kmeans(dist(nc),scale=TRUE,center=TRUE) pc4<-prcomp(dist(nc),scale=TRUE,center=TRUE) ntab<- data.frame(cbind(data.frame(pc4$x),data.farme(km$cluster))) plot(ntab[,1],ntab[,2],col=ntab[,65])
/HW-4.R
no_license
justherin/Multivariate-Data-Analysis
R
false
false
1,833
r
#Scatter plot pca <- prcomp(X, scale = TRUE) summary(pca) pca_var <- pca$sdev^2 pve <- pca_var/sum(pca_var) plot(pve, xlab = "PCA", ylab = "Proportion of Variance Explained", ylim = c(0,0.5)) plot(cumsum(pve), xlab = "PCA", ylab = "Cumulative Proportion of Variance Explained", ylim = c(0,1)) abline(h = 0.8, col = "red") #PCs as pca$x S<-var(scale(X)) egn_S <- eigen(S) PCs <- pca$x apply(PCs, 2, var) egn_S$values #Plotting PCs plot(PCs[, 1], PCs[, 2], xlab = "PC1 (28%)", ylab = "PC2 (18%)") plot(PCs[, 1], PCs[, 3], xlab = "PC1 (28%)", ylab = "PC3 (14%)") mn_1 <- which.min(PCs[, 1]) mx_1 <- which.max(PCs[, 1]) mn_1 mx_1 #Trees dd <- as.dist(matrix(c(0, 3, 2, 5, 3, 0, 4, 1, 2, 4, 0, 7, 5, 1, 7, 0), nrow = 4)) par(mfrow = c(1, 3)) plot(hc_sng <- hclust(dd, method = "single")) plot(hc_com <- hclust(dd, method = "complete")) plot(hc_avg <- hclust(dd, method = "average")) hc_sng$merge hc_sng$height #kmeans out_kmeans <- kmeans(X, centers = cbind(cntrd_0[[1]], cntrd_0[[2]]), algorithm = "MacQueen") #To create primary key row.names((data)) <- paste(data[,1],"",row.names(data)) ## Cluster membership. rect.hclust(hc_com,k=2) cluster_membership <- cutree(hc_com, k = 2) ##plot the cut on the dendrogram that produces these four clusters plot(hc_complete, labels = labs, cex = 0.5) abline(h=139, col = "red") ## K-Means clustering, with K = 4. set.seed(2) out_kmeans <- kmeans(X, centers = 4, nstart = 20) clusters_kmeans <- out_kmeans$cluster table(clusters_kmeans, cluster_membership) #PCA with colr coded by k-means set.seed(2) km<- kmeans(dist(nc),scale=TRUE,center=TRUE) pc4<-prcomp(dist(nc),scale=TRUE,center=TRUE) ntab<- data.frame(cbind(data.frame(pc4$x),data.farme(km$cluster))) plot(ntab[,1],ntab[,2],col=ntab[,65])
\name{explain.harmonicMean} \alias{explain.harmonicMean} \title{Harmonic Mean Function Explained} \description{Step by step demonstration of the harmonic mean calculus.} \usage{ explain.harmonicMean(x) } \arguments{ \item{x}{Should be a numbers vector} } \details{To calculate the harmonic mean, the user should give a numbers vector. The result is the explained process to calculate the harmonic mean, with the data of the dataset provided like argument. We can saw the harmonic mean formule in the harmonicMean_ help document.} \value{Numeric result and the process of this calculus explained.} \author{Jose Manuel Gomez Caceres, \email{josemanuel.gomezc@edu.uah.es} \cr{Juan Jose Cuadrado, \email{jjcg@uah.es}} \cr{Universidad de Alcala de Henares} } \note{A vector is created by c(), like c(1,2,3,4,5) creates a vector with the numbers: 1,2,3,4,5 } %\seealso{} \examples{ #data creation data <- c(10,4,5,7,3,4,1) explain.harmonicMean(data) } % Add one or more standard keywords, see file 'KEYWORDS' in the % R documentation directory. \keyword{ ~harmonic }% use one of RShowDoc("KEYWORDS") \keyword{ ~armonica }% __ONLY ONE__ keyword per line \keyword{ ~explain } \keyword{ ~mean } \keyword{ ~media } \keyword{ ~explicada}
/man/explain.harmonicMean.Rd
no_license
cran/LearningRlab
R
false
false
1,303
rd
\name{explain.harmonicMean} \alias{explain.harmonicMean} \title{Harmonic Mean Function Explained} \description{Step by step demonstration of the harmonic mean calculus.} \usage{ explain.harmonicMean(x) } \arguments{ \item{x}{Should be a numbers vector} } \details{To calculate the harmonic mean, the user should give a numbers vector. The result is the explained process to calculate the harmonic mean, with the data of the dataset provided like argument. We can saw the harmonic mean formule in the harmonicMean_ help document.} \value{Numeric result and the process of this calculus explained.} \author{Jose Manuel Gomez Caceres, \email{josemanuel.gomezc@edu.uah.es} \cr{Juan Jose Cuadrado, \email{jjcg@uah.es}} \cr{Universidad de Alcala de Henares} } \note{A vector is created by c(), like c(1,2,3,4,5) creates a vector with the numbers: 1,2,3,4,5 } %\seealso{} \examples{ #data creation data <- c(10,4,5,7,3,4,1) explain.harmonicMean(data) } % Add one or more standard keywords, see file 'KEYWORDS' in the % R documentation directory. \keyword{ ~harmonic }% use one of RShowDoc("KEYWORDS") \keyword{ ~armonica }% __ONLY ONE__ keyword per line \keyword{ ~explain } \keyword{ ~mean } \keyword{ ~media } \keyword{ ~explicada}
# This function recombines prioritized chlorophyll data with nutrient data f_recombine_chla_nuts_data <- function() { # Original data with nutrients chla_nuts_id <- contentid::store("data_model/model_chla_nuts_combined.csv") chla_nuts_file <- contentid::resolve(chla_nuts_id) chla_nuts0 <- readr::read_csv(chla_nuts_file) %>% select(-tide, -field_coords, -depth) %>% distinct() %>% filter(!(doy1998 == 7932 & station_wq_chl == "USGS-11447650" & is.na(diss_orthophos))) # Pascale's prioritized chla chla_new <- read_csv("data_model/chlorophyll_fin_updated.csv") %>% select(-1) %>% distinct() # Join data # There are some stations that were not in original dataset??? chla_join <- left_join(chla_new, chla_nuts0) %>% dplyr::mutate(source = ifelse(station_wq_chl %in% c("USGS-11447650", "USGS-382006121401601", "USGS-11455315", "USGS-11455139"), "USGS", source)) %>% dplyr::mutate( doy1998 = as.numeric(difftime(date, as.Date("1998-01-01"), units = "day")) + 1) # Check data check <- chla_join%>% group_by(date, location) %>% summarize(n = n()) %>% filter(n>1) # Write data to data_model readr::write_csv(chla_join, "data_model/model_chla_nuts_random.csv") }
/scripts/functions/f_recombine_chla_nuts_data.R
no_license
Delta-Stewardship-Council/swg-21-connectivity
R
false
false
1,282
r
# This function recombines prioritized chlorophyll data with nutrient data f_recombine_chla_nuts_data <- function() { # Original data with nutrients chla_nuts_id <- contentid::store("data_model/model_chla_nuts_combined.csv") chla_nuts_file <- contentid::resolve(chla_nuts_id) chla_nuts0 <- readr::read_csv(chla_nuts_file) %>% select(-tide, -field_coords, -depth) %>% distinct() %>% filter(!(doy1998 == 7932 & station_wq_chl == "USGS-11447650" & is.na(diss_orthophos))) # Pascale's prioritized chla chla_new <- read_csv("data_model/chlorophyll_fin_updated.csv") %>% select(-1) %>% distinct() # Join data # There are some stations that were not in original dataset??? chla_join <- left_join(chla_new, chla_nuts0) %>% dplyr::mutate(source = ifelse(station_wq_chl %in% c("USGS-11447650", "USGS-382006121401601", "USGS-11455315", "USGS-11455139"), "USGS", source)) %>% dplyr::mutate( doy1998 = as.numeric(difftime(date, as.Date("1998-01-01"), units = "day")) + 1) # Check data check <- chla_join%>% group_by(date, location) %>% summarize(n = n()) %>% filter(n>1) # Write data to data_model readr::write_csv(chla_join, "data_model/model_chla_nuts_random.csv") }
#********************************************************************************************* #NEXXUS Process Part-2 #This is the ROI analysis part. It starts from the matched pair created in Part-1 process. #This code will run ANCOVA for product x, competitor products and market Rx and #prepare output for all designed metrics. THe out put tables will be written in excel file #with tabs: # Attrition, Descriptive_Table, Market Penetration, ANCOVA_Test, PreMatch_Test_Ctrl #and PostMatch_Test_Ctrl, Monthly_Rx (For pre-post Trend) #Final excel file will be saved in Out_dir with file name: OutPut_Campaign_&Prog_num..xlsx #Developer : Jie Zhao #*********************************************************************************************; # requeire libraries library(sas7bdat) library(zoo) library(xlsx) library(plyr) library(dplyr) library(lsmeans) library(reshape) library(snow) library(snowfall) # needed file path defination dataPath <- paste("\\\\plyvnas01\\statservices2\\CustomStudies\\Promotion Management", "\\2014\\NOVOLOG\\04 Codes\\Nexxus\\R_Version\\1016", sep='') outPath <- paste("\\\\plyvnas01\\statservices2\\CustomStudies\\Promotion Management", "\\2014\\NOVOLOG\\04 Codes\\Nexxus\\R_Version\\1016\\withoutSpec", sep='') # dataPath <- paste('D:\\jzhao\\Nexxus') # outPath <- paste('D:\\jzhao\\Nexxus\\output') Raw_Xpo_data = 'for_sas_test_0929' # Xponent Raw Rx Data Ext <- '.csv' Ext1 <- '.sas7bdat' part2_function <- function(Campaign_PRD, Pre_wks, Target_RX, Total_wks, CampaignId, Specfile, ZIP2Region, n.cpu){ #@@Parameters: Campaign_PRD = 'VICTOZA' # Campaign Product Name Pre_wks = 26 #Same as defined in part-1 Target_RX = 'NRX' #Match and analysis Rx. Same as used in Part-1 Total_wks = 128 #Total available data weeks. Same as used in Part-1 #Campaign_dat = 139 #Campaign Contact HCP list excel file campId <- '139' Specfile = 'SPEC_Grp' #Excel file for customized specialty groups. Default = All in 1 ZIP2Region = 'ZIP_region' #Excel file for Zip to customized region. Defaulat=Census regions); #@@Returns: #A list contains what the SAS outputs # Campaign_PRD = 'VICTOZA' # Pre_wks=26 # Target_RX='NRX' # Total_wks=128 # Campaign_dat=139 # Specfile='' # ZIP2Region='' #Campaign Contact HCP list excel file #Campaign_dat <- paste('CAMPAIGN', Campaign_dat, sep='_') #read in input data #campId <- # as.vector(read.csv(paste(dataPath, '\\test\\', # Campaign_dat, '_id', # Ext, sep=''))[1,1]) CStart <- min(as.Date(as.character(read.csv(paste(dataPath, '\\Campaign_', campId, Ext, sep=''))$Engagement_Date), format='%Y/%m/%d')) names <- scan(file=paste(dataPath, '\\', Raw_Xpo_data, Ext, sep=''), nlines=1, what="complex", sep=',', skip=0) line1 <- scan(file=paste(dataPath, '\\', Raw_Xpo_data, Ext, sep=''), nlines=1, what="complex", sep=',', skip=1) date_num <- line1[6] from <- Total_wks + as.numeric(strftime(CStart,format="%W")) + (as.numeric(strftime(CStart,format="%Y")) - as.numeric(strftime(line1[names=='datadate'],format="%Y"))) *52 - as.numeric(strftime(line1[names=='datadate'],format="%W")) - 26 to <- Total_wks + as.numeric(strftime(CStart,format="%W")) + (as.numeric(strftime(CStart,format="%Y")) - as.numeric(strftime(line1[names=='datadate'],format="%Y"))) *52 - as.numeric(strftime(line1[names=='datadate'],format="%W")) - 1 IMSDate2 <- line1[names=='datadate'] #read in the sasdatax.HCP_MATCHED_ALL_&Prog_num. matched_data <- read.table(paste(dataPath, '\\hcp_matched_all', Ext, sep=''), sep=',', header=T, stringsAsFactors=FALSE) # dim(matched_data) #[1] 15940 534 colnames(matched_data)<- tolower(colnames(matched_data)) var_list <- colnames(matched_data) # check the covariates existence covar_nece <- c("imsdr7", "region", "engagement_date", "cohort", "engage_wk") type <- c("integer", "character", "character", "integer", "integer") if (any(is.na(match(covar_nece, var_list)))){ covar_nece_str <- paste(covar_nece, sep='', collapse=" ") stop(paste("Please make sure that the variables below are all included in your matched data!\n", covar_nece_str, '\n', sep='')) } if (any(sapply(matched_data[1:100, covar_nece], class)!=type)){ type_str <- paste(rbind(paste(covar_nece, ':', sep=''), type), sep='', collapse=' ') stop(paste("the covariates data type is not matched the following expectation!\nt", type_str, '\n', sep='')) } #covar checking end matched_data1 <- matched_data[, match(grep('^(imsdr7|engage|trx|nrx)|cohort$', var_list, value=T), var_list)] var_list1 <- colnames(matched_data1) #rm(list=c('matched_data')) othT <- grep('^trx_oth', var_list1, value=T) othN <- grep('^nrx_oth', var_list1, value=T) nlT <- grep('^trx_wk', var_list1, value=T) nlN <- grep('^nrx_wk', var_list1, value=T) #matched_data1_test <- matched_data1[1:100,] #var_list1[apply(matched_data1_test, 2, is.numeric)] matched_data2_temp <- function(xx){ x <- xx[, -ncol(xx)] r <- 1:nrow(x) group <- ifelse(x[, match('cohort', var_list1)] ==1, "TEST", "CONTROL") temp1 <- lapply(1:Total_wks, function(i){ gap <- i - x$engage_wk - 1 pre_flag <- ifelse(gap > (-1-Pre_wks) & gap < 0, 1, ifelse(gap > 0, 0, -1)) return(list(col_idx = 1:length(r), pre_flag, gap)) }) library(plyr) temp1_1 <- ldply(temp1, quickdf) temp1_1 <- arrange(temp1_1, col_idx, X2) x_t <- data.frame(t(x)) test_fun <- function(x, y) { #row <- rownames(x_t) %in% var tmp <- x[y] return(tmp) } pre_flag <- tapply(temp1_1[, 2], temp1_1[, 1], function(x) x == 1) pre_othT_withMiss <- Map(test_fun, x_t[rownames(x_t) %in% othT, ], pre_flag) pre_othT_withMiss <- do.call(rbind.data.frame, pre_othT_withMiss) pre_nlT_withMiss <- Map(test_fun, x_t[rownames(x_t) %in% nlT, ], pre_flag) pre_nlT_withMiss <- do.call(rbind.data.frame, pre_nlT_withMiss) pre_othN_withMiss <- Map(test_fun, x_t[rownames(x_t) %in% othN, ], pre_flag) pre_othN_withMiss <- do.call(rbind.data.frame, pre_othN_withMiss) pre_nlN_withMiss <- Map(test_fun, x_t[rownames(x_t) %in% nlN, ], pre_flag) pre_nlN_withMiss <- do.call(rbind.data.frame, pre_nlN_withMiss) pre_period_othT <- rowSums(pre_othT_withMiss, na.rm = TRUE) pre_period_nlT <- rowSums(pre_nlT_withMiss, na.rm = TRUE) pre_period_othN <- rowSums(pre_othN_withMiss, na.rm = TRUE) pre_period_nlN <- rowSums(pre_nlN_withMiss, na.rm = TRUE) pre_market_T <- pre_period_othT + pre_period_nlT pre_market_N <- pre_period_othN + pre_period_nlN pre_flag1 <- tapply(temp1_1[, 2], temp1_1[, 1], function(x) x == 0) post_othT_withMiss <- Map(test_fun, x_t[rownames(x_t) %in% othT, ], pre_flag1) post_othT_withMiss <- do.call(rbind.data.frame, post_othT_withMiss) post_nlT_withMiss <- Map(test_fun, x_t[rownames(x_t) %in% nlT, ], pre_flag1) post_nlT_withMiss <- do.call(rbind.data.frame, post_nlT_withMiss) post_othN_withMiss <- Map(test_fun, x_t[rownames(x_t) %in% othN, ], pre_flag1) post_othN_withMiss <- do.call(rbind.data.frame, post_othN_withMiss) post_nlN_withMiss <- Map(test_fun, x_t[rownames(x_t) %in% nlN, ], pre_flag1) post_nlN_withMiss <- do.call(rbind.data.frame, post_nlN_withMiss) post_period_othT <- rowSums(post_othT_withMiss, na.rm = TRUE) post_period_nlT <- rowSums(post_nlT_withMiss, na.rm = TRUE) post_period_othN <- rowSums(post_othN_withMiss, na.rm = TRUE) post_period_nlN <- rowSums(post_nlN_withMiss, na.rm = TRUE) post_market_T <- post_period_othT + post_period_nlT post_market_N <- post_period_othN + post_period_nlN temp2 <- data.frame(pre_period_othT=pre_period_othT, pre_period_nlT=pre_period_nlT, pre_period_othN=pre_period_othN, pre_period_nlN=pre_period_nlN, pre_market_T=pre_market_T, pre_market_N=pre_market_N, post_period_othT=post_period_othT, post_period_nlT=post_period_nlT, post_period_othN=post_period_othN, post_period_nlN=post_period_nlN, post_market_T=post_market_T, post_market_N=post_market_N) temp2_1=cbind(temp2[, 1:6] * Pre_wks/sapply(pre_flag, sum, na.rm=T), temp2[, 7:12] * Pre_wks/sapply(pre_flag1, sum, na.rm=T)) x1 <- x[,-match(c('engage_wk'), var_list1)] # x2 <- as.vector(t(x1)) # names(x2) <- names(x1) gap <- temp1_1[seq(Total_wks, nrow(temp1_1), Total_wks), 3] return(data.frame(x1, group = group, gap = gap, temp2_1, engagement_date=xx$engagement_date)) } start_time<- proc.time() start1 <- proc.time() num_pros <- n.cpu sfInit(parallel=TRUE, cpus=num_pros) sfLibrary("dplyr",character.only = TRUE) sfExport( 'othT' ,'othN' ,'nlT' , 'nlN' , 'var_list1' , 'Total_wks' , 'Pre_wks') sfExport("ldply", namespace = "plyr") #run the function thru the combos #separate the whole matched_data1 into n.cpu parts set.seed(1) dt_flag <- sample(rep(1:n.cpu, length=nrow(matched_data1))) matched_data_sep <- lapply(1:n.cpu, function(i){ matched_data1[dt_flag==i, ] }) #matched_data2_1 <- sfClusterApplyLB(1:nrow(matched_data1), matched_data2_temp) matched_data2_1 <- sfClusterApplyLB(matched_data_sep, matched_data2_temp) sfStop() end_time<- proc.time() matched_data2 <- ldply(matched_data2_1, quickdf) cat('part1 used-', (proc.time()-start1)[3]/60, 'min!\n') #lapply(matched_data2_1, length) matched_data3_1 <- as.data.frame(apply(matched_data2[, -match(c('engagement_date', 'group'), names(matched_data2))], 2, as.numeric)) matched_data3 <- cbind(matched_data3_1, group=matched_data2$group, engagement_date=matched_data2$engagement_date) #Get HCP counts for matched Test and controls after check min post weeks line6 <- length(unique(matched_data3[matched_data3$cohort==1, 'imsdr7'])) line7 <- length(unique(matched_data3[matched_data3$cohort==0, 'imsdr7'])) #Get the adjusted Post RX counts using Proc GLM or MIXED - the ANCOVA analysis options("contrasts") product_list <- c('Product X ', 'Product Other than X ', 'Market (All Products)') anova_time0 <- proc.time() esti_list <- lapply(c('post_period_nlN', 'post_period_othN', 'post_market_N'), function(v){ pre_v <- gsub('post', 'pre', v) eval(parse(text=paste('results = with(matched_data3, lm(', v, ' ~ group + ', pre_v, ' ))', sep='' ))) #summary <- summary.lm(results) esti <- summary(lsmeans(results, "group"))[, 2] matched_hcps <- line6 index <- esti[2]/esti[1] p_value <- 1- pnorm((summary(lsmeans(results, "group"))[2, 2] - summary(lsmeans(results, "group"))[1, 2]) / (sqrt((summary(lsmeans(results, "group"))[2, 3]) ^ 2 + (summary(lsmeans(results, "group"))[1, 3]) ^ 2) )) gains <- esti[2]-esti[1] return(c(product= product_list[match(v, c('post_period_nlN', 'post_period_othN', 'post_market_N'))], matched_hcps=matched_hcps, test=unname(esti[2]), cont=unname(esti[1]), gains=unname(gains), index=unname(index), p_value=unname(p_value))) }) esti_df <- ldply(esti_list, quickdf) cat('anova time used-', (proc.time()-anova_time0)[3], 'sec!\n') # START: create points for graph # matched_data_forGp get_graph_data_1 <- function(x){ r <- 1:nrow(x) group <- ifelse(x$cohort==1, 'TEST', 'CONTROL') gap_vct <- as.data.frame(sapply(x$engage_wk, function(x)c(1:Total_wks) - x - 1)) #[128, 6] gap_flag <- as.data.frame( sapply(as.data.frame(gap_vct), function(x) ifelse(x >(-1-Pre_wks) & x < (1+Pre_wks), T, F) ) ) breaks <- c(c(-Pre_wks+c(-Inf, -1, 4, 8, 13, 17, 21, 26)), c(4, 8, 13, 17, 21, 26)) Rel_month_vct <- c(-Inf, seq(-6, -1, 1), seq(1, 6, 1)) bucket <- sapply(gap_vct, function(x)cut(x, breaks, right=F, labels=Rel_month_vct)) rel_month <- lapply(r, function(i)bucket[gap_flag[, i], i]) breaks1 <- c(-Inf, -Pre_wks-0.1, -14, -5, -1, Inf) bucket_period <- lapply(r, function(i){ tt <- cut(gap_vct[gap_flag[, i], i] , breaks1 , right=T , labels=c(-999, -3, -2, -1, 999) ) tt <- as.numeric(levels(tt)[tt]) return(tt) }) period <- lapply(r, function(i) ifelse(bucket_period[[i]]==999 | bucket_period[[i]]==-999, rel_month[[i]], bucket_period[[i]]) ) t1 <- proc.time() temp <- lapply(r, function(i){ rel_trx <- as.numeric(x[i, match(nlT, var_list)][, gap_flag[, i]]) rel_nrx <- as.numeric(x[i, match(nlN, var_list)][, gap_flag[, i]]) rel_trx_other <- as.numeric(x[i, match(othT, var_list)][, gap_flag[, i]]) rel_nrx_other <- as.numeric(x[i, match(othN, var_list)][, gap_flag[, i]]) mkt_trx <- rel_trx + rel_trx_other mkt_nrx <- rel_nrx + rel_nrx_other # temp0 <- cbind(imsdr7=x$imsdr7[i], # group=group[i], rel_wk=gap_vct[gap_flag[, i], i], # rel_month=rel_month[[i]], rel_trx=rel_trx, # rel_nrx=rel_nrx, rel_trx_other=rel_trx_other, # rel_nrx_other=rel_nrx_other, mkt_trx=mkt_trx, # mkt_nrx=mkt_nrx, period=period[[i]], # spec=x$spec[i], zipcode=x$zipcode[i] # ) temp0 <- list(imsdr7=rep(x$imsdr7[i], length(rel_trx)), group=rep(group[i], length(rel_trx)), rel_wk=gap_vct[gap_flag[, i], i], rel_month=rel_month[[i]], rel_trx=rel_trx, rel_nrx=rel_nrx, rel_trx_other=rel_trx_other, rel_nrx_other=rel_nrx_other, mkt_trx=mkt_trx, mkt_nrx=mkt_nrx, period=period[[i]], spec=rep(x$spec[i], length(rel_trx)), zipcode=rep(x$zipcode[i], length(rel_trx)) ) return(temp0) }) cat((proc.time()-t1)[3]/60, 'min!\n') # temp1 <- do.call(rbind.data.frame, temp) temp1 <- do.call(rbind, lapply(temp, data.frame, stringsAsFactors=F)) # temp1 <- as_data_frame(temp) # temp1 <- ldply(temp, quickdf) #rel_trx <- mapply(function(a, b)a[b], as.matrix(x[, match(nlT, var_list)]), t(gap_flag)) return(temp1) } start_time<- proc.time() start2 <- proc.time() num_pros <- n.cpu sfInit(parallel=TRUE, cpus=num_pros) sfExport('othT','othN','nlT', 'nlN' , 'var_list', 'Total_wks', 'Pre_wks' , 'Specfile', 'ZIP2Region', 'dataPath' , 'Ext') sfClusterEval(library("plyr")) sfClusterEval(library("dplyr")) #run the function thru the combos #separate the whole matched_data into n.cpu parts dt_flag <- sample(rep(1:n.cpu, length=nrow(matched_data))) matched_data_sep <- lapply(1:n.cpu, function(i){ matched_data[dt_flag==i, ] }) graph_data2_temp <- sfClusterApplyLB(matched_data_sep, get_graph_data_1) #graph_data2_temp <- sfClusterApplyLB(1:2000, get_graph_data_1) sfStop() end_time<- proc.time() check_dim <- ldply(lapply(graph_data2_temp, function(i){ return(dim(i)) }), quickdf) graph_data_2 <- ldply(graph_data2_temp, quickdf) #create 'speciatly' and 'region' if 'Specfile' and 'ZIP2Region' files are given. if(Specfile != ''){ spec_dic <- read.table(paste(dataPath, '\\', Specfile, Ext, sep=''), sep=',', header=T , stringsAsFactors=F) idx_spec <- match(graph_data_2$spec, spec_dic$SPEC_CD) graph_data_2$specialty <- ifelse(!is.na(idx_spec), as.character(spec_dic[idx_spec, 'CLASS_DESC']), 'ALL OTHER') }else{ graph_data_2$specialty <- "ALL SPECIALTY" } if(ZIP2Region != ''){ reg_dic <- read.table(paste(dataPath, '\\', ZIP2Region, Ext, sep=''), sep=',', header=T , stringsAsFactors=F) graph_data_2$region <- ifelse(!is.na(match(graph_data_2$zipcode, reg_dic$ZIPCODE)), as.character(reg_dic[match(graph_data_2$zipcode, reg_dic$ZIPCODE), "REGION"]), 'OTHER') }else{ graph_data_2$region <- 'ALL REGION' } graph_data_2$spec <- NULL graph_data_2$zipcode <- NULL var_list2 <- names(graph_data_2) cat('part2 used-', (proc.time()-start2)[3]/60, 'min!\n') # *********** Get Monthly summary ****************; target_trx <- grep('trx|nrx', var_list2, value=T) graph_data_summary <- graph_data_2[, c(target_trx, 'group', 'rel_month')] %>% group_by(group, rel_month) %>% summarise_each(funs(sum)) hcp_cnt <- graph_data_2[, c('imsdr7', 'group', 'rel_month')] %>% group_by(group, rel_month) %>% summarise_each(funs(length(unique(.)))) graph_data_summary_1 <- left_join(graph_data_summary[order(graph_data_summary$group), ], hcp_cnt, by=c("group" = 'group', "rel_month" = "rel_month")) names(graph_data_summary_1)[length(graph_data_summary_1)] <- 'hcp_cnt' #******************* Get Post Match test/control comparison ****************; monthly_data <- cbind(graph_data_2[, c(target_trx , 'specialty' , 'region' , 'group' , 'imsdr7' , 'period')] , freq=rep(1, nrow(graph_data_2))) %>% group_by(specialty, region, group, imsdr7, period) %>% summarise_each(funs(sum(., na.rm=T))) %>% filter(as.numeric(period) < 0) monthly_mean <- monthly_data[, c(target_trx , 'specialty' , 'region' , 'group' # , 'imsdr7' , 'period')] %>% group_by(specialty, region, group, period) %>% summarise_each(funs(mean(., na.rm=T))) monthly_std <- monthly_data[, c(target_trx , 'specialty' , 'region' , 'group' # , 'imsdr7' , 'period')] %>% group_by(specialty, region, group, period) %>% summarise_each(funs(sd(., na.rm=T))) names(monthly_std)[match(target_trx, names(monthly_std))] <- paste(names(monthly_std)[match(target_trx, names(monthly_std))], '_std', sep='') monthly_mean_std <- left_join(monthly_mean, monthly_std, by=c('specialty', 'region','group', 'period')) monthly_mean_std$freq <- aggregate(rep(1, nrow(monthly_data)), by=list(specialty=monthly_data$specialty, region=monthly_data$region, group=monthly_data$group, period=monthly_data$period), function(x){sum(x, na.rm=T)})[,5] monthly_all_region_mean <- monthly_data[, c(target_trx , 'specialty' , 'group' , 'period' )] %>% group_by(specialty, group, period) %>% summarise_each(funs(mean(., na.rm=T))) monthly_all_region_std <- monthly_data[, c(target_trx , 'specialty' , 'group' , 'period' )] %>% group_by(specialty, group, period) %>% summarise_each(funs(sd(., na.rm=T))) names(monthly_all_region_std)[match(target_trx, names(monthly_all_region_std))] <- paste(target_trx, '_std', sep='') monthly_all_region_freq <- aggregate(rep(1, nrow(monthly_data)), by=list(specialty=monthly_data$specialty, group=monthly_data$group, period=monthly_data$period), function(x){sum(x, na.rm=T)}) monthly_all_region <- left_join(left_join(monthly_all_region_mean, monthly_all_region_std, by=c('specialty', 'group', 'period')), monthly_all_region_freq, by=c('specialty', 'group', 'period')) monthly_all_region$region <- 'all region' names(monthly_all_region)[match('x', names(monthly_all_region))] <- 'freq' monthly_mean_std_all <- rbind(monthly_all_region, monthly_mean_std) reshape <- function(input){ levels <- levels(as.factor(input$group)) stack <- function(i){ data <- input[input$group==i, ] transp_list <- lapply(1:nrow(data), function(r){ x <- data[r,] temp1 <- x[c(1, 3, ncol(monthly_mean_std_all))] temp2 <- t(x[-c(1, 2, 3, ncol(monthly_mean_std_all))]) new_names <- rownames(temp2) temp3 <- cbind(temp1, new_names,temp2) colnames(temp3)[ncol(temp3)] <- i return(temp3) }) stacked_data <- ldply(transp_list, rbind) return(stacked_data) } cont <- stack(levels[levels=='CONTROL']) test <- stack(levels[levels=='TEST']) cont_test <- left_join(cont, test, by=c('specialty', 'region', 'period', 'new_names')) return(cont_test) } pre_compare <- reshape(monthly_mean_std_all) var_list3 <- names(pre_compare) for_compare <- function(input, var){ temp <- pre_compare[pre_compare$new_names==var, ] temp_ord <- temp[order(temp[, match('specialty',var_list3)], temp[, match('region', var_list3)], temp[, match('period', var_list3)]), match(c('specialty', 'region', "period", 'CONTROL', 'TEST'), var_list3)] } nov_rx <- for_compare(pre_compare, 'rel_nrx') nov_std <- for_compare(pre_compare, 'rel_nrx_std') freq <- for_compare(pre_compare, 'freq') levels <- grep('^CONTROL|^TEST', names(nov_std), ignore.case=T, value=T) names(nov_std)[match(levels, names(nov_std))] <- paste(names(nov_std)[match(levels, names(nov_std))], '_std', sep='') names(freq)[match(levels, names(freq))] <- paste(names(freq)[match(levels, names(freq))], '_cnt', sep='') post_match_compare <- left_join(left_join(nov_rx, nov_std, by=c('specialty', 'region', 'period')), freq, by=c('specialty', 'region', 'period')) post_match_compare$p_value <- unlist(lapply(1:nrow(post_match_compare), function(r){ x <- post_match_compare[r, ] for_p_value <- ifelse(x$TEST_std > 0, abs(x$TEST-x$CONTROL)/sqrt(x$TEST_std^2/x$TEST_cnt+x$CONTROL_std^2/x$CONTROL_cnt), NA) p_value <- ifelse(!is.na(for_p_value), 2*(1-pnorm(for_p_value)), NA) return(p_value) })) return(list(esti_df = esti_df, pre_compare = pre_compare, post_match_compare = post_match_compare, graph_data_summary_1 = graph_data_summary_1)) } system.time(part2_result <- part2_function(Campaign_PRD = 'VICTOZA', Pre_wks=26, Target_RX='NRX', Total_wks=128, CampaignId=139, Specfile='spec_grp', ZIP2Region='ZIP_region', n.cpu=8)) #esti_df <- part2_result$esti_df pre_compare <- part2_result$pre_compare post_match_compare <- part2_result$post_match_compare graph_data_summary_1 <- part2_result$graph_data_summary_1 #write the result of part1 write.xlsx(part1$descriptive, paste(outPath, '\\OutPut_Campaign_', campId, '.xlsx', sep=''), sheetName='descriptive', append=T, row.names=F) write.xlsx(part1$pre_period_summary, paste(outPath, '\\OutPut_Campaign_', campId, '.xlsx', sep=''), sheetName='pre_match_compare', append=T, row.names=F) write.xlsx(part1$penetration, paste(outPath, '\\OutPut_Campaign_', campId, '.xlsx', sep=''), sheetName='penetration', append=T, row.names=F) write.xlsx(part1$attrition, paste(outPath, '\\OutPut_Campaign_', campId, '.xlsx', sep=''), sheetName='attrition', append=T, row.names=F) #write the result of part2 write.xlsx(esti_df, paste(outPath, '\\OutPut_Campaign_', campId, '.xlsx', sep=''), sheetName='anova_test', append=T, row.names=F) write.xlsx(pre_compare, paste(outPath, '\\OutPut_Campaign_', campId, '.xlsx', sep=''), sheetName='pre_compare', append=T, row.names=F) write.xlsx(post_match_compare, paste(outPath, '\\OutPut_Campaign_', campId, '.xlsx', sep=''), sheetName='post_match_compare', append=T, row.names=F) write.xlsx(graph_data_summary_1, paste(outPath, '\\OutPut_Campaign_', campId, '.xlsx', sep=''), sheetName='graph_data', append=T, row.names=F)
/part2_0115.R
no_license
jzhao0802/Nuxus_sas_to_R
R
false
false
29,916
r
#********************************************************************************************* #NEXXUS Process Part-2 #This is the ROI analysis part. It starts from the matched pair created in Part-1 process. #This code will run ANCOVA for product x, competitor products and market Rx and #prepare output for all designed metrics. THe out put tables will be written in excel file #with tabs: # Attrition, Descriptive_Table, Market Penetration, ANCOVA_Test, PreMatch_Test_Ctrl #and PostMatch_Test_Ctrl, Monthly_Rx (For pre-post Trend) #Final excel file will be saved in Out_dir with file name: OutPut_Campaign_&Prog_num..xlsx #Developer : Jie Zhao #*********************************************************************************************; # requeire libraries library(sas7bdat) library(zoo) library(xlsx) library(plyr) library(dplyr) library(lsmeans) library(reshape) library(snow) library(snowfall) # needed file path defination dataPath <- paste("\\\\plyvnas01\\statservices2\\CustomStudies\\Promotion Management", "\\2014\\NOVOLOG\\04 Codes\\Nexxus\\R_Version\\1016", sep='') outPath <- paste("\\\\plyvnas01\\statservices2\\CustomStudies\\Promotion Management", "\\2014\\NOVOLOG\\04 Codes\\Nexxus\\R_Version\\1016\\withoutSpec", sep='') # dataPath <- paste('D:\\jzhao\\Nexxus') # outPath <- paste('D:\\jzhao\\Nexxus\\output') Raw_Xpo_data = 'for_sas_test_0929' # Xponent Raw Rx Data Ext <- '.csv' Ext1 <- '.sas7bdat' part2_function <- function(Campaign_PRD, Pre_wks, Target_RX, Total_wks, CampaignId, Specfile, ZIP2Region, n.cpu){ #@@Parameters: Campaign_PRD = 'VICTOZA' # Campaign Product Name Pre_wks = 26 #Same as defined in part-1 Target_RX = 'NRX' #Match and analysis Rx. Same as used in Part-1 Total_wks = 128 #Total available data weeks. Same as used in Part-1 #Campaign_dat = 139 #Campaign Contact HCP list excel file campId <- '139' Specfile = 'SPEC_Grp' #Excel file for customized specialty groups. Default = All in 1 ZIP2Region = 'ZIP_region' #Excel file for Zip to customized region. Defaulat=Census regions); #@@Returns: #A list contains what the SAS outputs # Campaign_PRD = 'VICTOZA' # Pre_wks=26 # Target_RX='NRX' # Total_wks=128 # Campaign_dat=139 # Specfile='' # ZIP2Region='' #Campaign Contact HCP list excel file #Campaign_dat <- paste('CAMPAIGN', Campaign_dat, sep='_') #read in input data #campId <- # as.vector(read.csv(paste(dataPath, '\\test\\', # Campaign_dat, '_id', # Ext, sep=''))[1,1]) CStart <- min(as.Date(as.character(read.csv(paste(dataPath, '\\Campaign_', campId, Ext, sep=''))$Engagement_Date), format='%Y/%m/%d')) names <- scan(file=paste(dataPath, '\\', Raw_Xpo_data, Ext, sep=''), nlines=1, what="complex", sep=',', skip=0) line1 <- scan(file=paste(dataPath, '\\', Raw_Xpo_data, Ext, sep=''), nlines=1, what="complex", sep=',', skip=1) date_num <- line1[6] from <- Total_wks + as.numeric(strftime(CStart,format="%W")) + (as.numeric(strftime(CStart,format="%Y")) - as.numeric(strftime(line1[names=='datadate'],format="%Y"))) *52 - as.numeric(strftime(line1[names=='datadate'],format="%W")) - 26 to <- Total_wks + as.numeric(strftime(CStart,format="%W")) + (as.numeric(strftime(CStart,format="%Y")) - as.numeric(strftime(line1[names=='datadate'],format="%Y"))) *52 - as.numeric(strftime(line1[names=='datadate'],format="%W")) - 1 IMSDate2 <- line1[names=='datadate'] #read in the sasdatax.HCP_MATCHED_ALL_&Prog_num. matched_data <- read.table(paste(dataPath, '\\hcp_matched_all', Ext, sep=''), sep=',', header=T, stringsAsFactors=FALSE) # dim(matched_data) #[1] 15940 534 colnames(matched_data)<- tolower(colnames(matched_data)) var_list <- colnames(matched_data) # check the covariates existence covar_nece <- c("imsdr7", "region", "engagement_date", "cohort", "engage_wk") type <- c("integer", "character", "character", "integer", "integer") if (any(is.na(match(covar_nece, var_list)))){ covar_nece_str <- paste(covar_nece, sep='', collapse=" ") stop(paste("Please make sure that the variables below are all included in your matched data!\n", covar_nece_str, '\n', sep='')) } if (any(sapply(matched_data[1:100, covar_nece], class)!=type)){ type_str <- paste(rbind(paste(covar_nece, ':', sep=''), type), sep='', collapse=' ') stop(paste("the covariates data type is not matched the following expectation!\nt", type_str, '\n', sep='')) } #covar checking end matched_data1 <- matched_data[, match(grep('^(imsdr7|engage|trx|nrx)|cohort$', var_list, value=T), var_list)] var_list1 <- colnames(matched_data1) #rm(list=c('matched_data')) othT <- grep('^trx_oth', var_list1, value=T) othN <- grep('^nrx_oth', var_list1, value=T) nlT <- grep('^trx_wk', var_list1, value=T) nlN <- grep('^nrx_wk', var_list1, value=T) #matched_data1_test <- matched_data1[1:100,] #var_list1[apply(matched_data1_test, 2, is.numeric)] matched_data2_temp <- function(xx){ x <- xx[, -ncol(xx)] r <- 1:nrow(x) group <- ifelse(x[, match('cohort', var_list1)] ==1, "TEST", "CONTROL") temp1 <- lapply(1:Total_wks, function(i){ gap <- i - x$engage_wk - 1 pre_flag <- ifelse(gap > (-1-Pre_wks) & gap < 0, 1, ifelse(gap > 0, 0, -1)) return(list(col_idx = 1:length(r), pre_flag, gap)) }) library(plyr) temp1_1 <- ldply(temp1, quickdf) temp1_1 <- arrange(temp1_1, col_idx, X2) x_t <- data.frame(t(x)) test_fun <- function(x, y) { #row <- rownames(x_t) %in% var tmp <- x[y] return(tmp) } pre_flag <- tapply(temp1_1[, 2], temp1_1[, 1], function(x) x == 1) pre_othT_withMiss <- Map(test_fun, x_t[rownames(x_t) %in% othT, ], pre_flag) pre_othT_withMiss <- do.call(rbind.data.frame, pre_othT_withMiss) pre_nlT_withMiss <- Map(test_fun, x_t[rownames(x_t) %in% nlT, ], pre_flag) pre_nlT_withMiss <- do.call(rbind.data.frame, pre_nlT_withMiss) pre_othN_withMiss <- Map(test_fun, x_t[rownames(x_t) %in% othN, ], pre_flag) pre_othN_withMiss <- do.call(rbind.data.frame, pre_othN_withMiss) pre_nlN_withMiss <- Map(test_fun, x_t[rownames(x_t) %in% nlN, ], pre_flag) pre_nlN_withMiss <- do.call(rbind.data.frame, pre_nlN_withMiss) pre_period_othT <- rowSums(pre_othT_withMiss, na.rm = TRUE) pre_period_nlT <- rowSums(pre_nlT_withMiss, na.rm = TRUE) pre_period_othN <- rowSums(pre_othN_withMiss, na.rm = TRUE) pre_period_nlN <- rowSums(pre_nlN_withMiss, na.rm = TRUE) pre_market_T <- pre_period_othT + pre_period_nlT pre_market_N <- pre_period_othN + pre_period_nlN pre_flag1 <- tapply(temp1_1[, 2], temp1_1[, 1], function(x) x == 0) post_othT_withMiss <- Map(test_fun, x_t[rownames(x_t) %in% othT, ], pre_flag1) post_othT_withMiss <- do.call(rbind.data.frame, post_othT_withMiss) post_nlT_withMiss <- Map(test_fun, x_t[rownames(x_t) %in% nlT, ], pre_flag1) post_nlT_withMiss <- do.call(rbind.data.frame, post_nlT_withMiss) post_othN_withMiss <- Map(test_fun, x_t[rownames(x_t) %in% othN, ], pre_flag1) post_othN_withMiss <- do.call(rbind.data.frame, post_othN_withMiss) post_nlN_withMiss <- Map(test_fun, x_t[rownames(x_t) %in% nlN, ], pre_flag1) post_nlN_withMiss <- do.call(rbind.data.frame, post_nlN_withMiss) post_period_othT <- rowSums(post_othT_withMiss, na.rm = TRUE) post_period_nlT <- rowSums(post_nlT_withMiss, na.rm = TRUE) post_period_othN <- rowSums(post_othN_withMiss, na.rm = TRUE) post_period_nlN <- rowSums(post_nlN_withMiss, na.rm = TRUE) post_market_T <- post_period_othT + post_period_nlT post_market_N <- post_period_othN + post_period_nlN temp2 <- data.frame(pre_period_othT=pre_period_othT, pre_period_nlT=pre_period_nlT, pre_period_othN=pre_period_othN, pre_period_nlN=pre_period_nlN, pre_market_T=pre_market_T, pre_market_N=pre_market_N, post_period_othT=post_period_othT, post_period_nlT=post_period_nlT, post_period_othN=post_period_othN, post_period_nlN=post_period_nlN, post_market_T=post_market_T, post_market_N=post_market_N) temp2_1=cbind(temp2[, 1:6] * Pre_wks/sapply(pre_flag, sum, na.rm=T), temp2[, 7:12] * Pre_wks/sapply(pre_flag1, sum, na.rm=T)) x1 <- x[,-match(c('engage_wk'), var_list1)] # x2 <- as.vector(t(x1)) # names(x2) <- names(x1) gap <- temp1_1[seq(Total_wks, nrow(temp1_1), Total_wks), 3] return(data.frame(x1, group = group, gap = gap, temp2_1, engagement_date=xx$engagement_date)) } start_time<- proc.time() start1 <- proc.time() num_pros <- n.cpu sfInit(parallel=TRUE, cpus=num_pros) sfLibrary("dplyr",character.only = TRUE) sfExport( 'othT' ,'othN' ,'nlT' , 'nlN' , 'var_list1' , 'Total_wks' , 'Pre_wks') sfExport("ldply", namespace = "plyr") #run the function thru the combos #separate the whole matched_data1 into n.cpu parts set.seed(1) dt_flag <- sample(rep(1:n.cpu, length=nrow(matched_data1))) matched_data_sep <- lapply(1:n.cpu, function(i){ matched_data1[dt_flag==i, ] }) #matched_data2_1 <- sfClusterApplyLB(1:nrow(matched_data1), matched_data2_temp) matched_data2_1 <- sfClusterApplyLB(matched_data_sep, matched_data2_temp) sfStop() end_time<- proc.time() matched_data2 <- ldply(matched_data2_1, quickdf) cat('part1 used-', (proc.time()-start1)[3]/60, 'min!\n') #lapply(matched_data2_1, length) matched_data3_1 <- as.data.frame(apply(matched_data2[, -match(c('engagement_date', 'group'), names(matched_data2))], 2, as.numeric)) matched_data3 <- cbind(matched_data3_1, group=matched_data2$group, engagement_date=matched_data2$engagement_date) #Get HCP counts for matched Test and controls after check min post weeks line6 <- length(unique(matched_data3[matched_data3$cohort==1, 'imsdr7'])) line7 <- length(unique(matched_data3[matched_data3$cohort==0, 'imsdr7'])) #Get the adjusted Post RX counts using Proc GLM or MIXED - the ANCOVA analysis options("contrasts") product_list <- c('Product X ', 'Product Other than X ', 'Market (All Products)') anova_time0 <- proc.time() esti_list <- lapply(c('post_period_nlN', 'post_period_othN', 'post_market_N'), function(v){ pre_v <- gsub('post', 'pre', v) eval(parse(text=paste('results = with(matched_data3, lm(', v, ' ~ group + ', pre_v, ' ))', sep='' ))) #summary <- summary.lm(results) esti <- summary(lsmeans(results, "group"))[, 2] matched_hcps <- line6 index <- esti[2]/esti[1] p_value <- 1- pnorm((summary(lsmeans(results, "group"))[2, 2] - summary(lsmeans(results, "group"))[1, 2]) / (sqrt((summary(lsmeans(results, "group"))[2, 3]) ^ 2 + (summary(lsmeans(results, "group"))[1, 3]) ^ 2) )) gains <- esti[2]-esti[1] return(c(product= product_list[match(v, c('post_period_nlN', 'post_period_othN', 'post_market_N'))], matched_hcps=matched_hcps, test=unname(esti[2]), cont=unname(esti[1]), gains=unname(gains), index=unname(index), p_value=unname(p_value))) }) esti_df <- ldply(esti_list, quickdf) cat('anova time used-', (proc.time()-anova_time0)[3], 'sec!\n') # START: create points for graph # matched_data_forGp get_graph_data_1 <- function(x){ r <- 1:nrow(x) group <- ifelse(x$cohort==1, 'TEST', 'CONTROL') gap_vct <- as.data.frame(sapply(x$engage_wk, function(x)c(1:Total_wks) - x - 1)) #[128, 6] gap_flag <- as.data.frame( sapply(as.data.frame(gap_vct), function(x) ifelse(x >(-1-Pre_wks) & x < (1+Pre_wks), T, F) ) ) breaks <- c(c(-Pre_wks+c(-Inf, -1, 4, 8, 13, 17, 21, 26)), c(4, 8, 13, 17, 21, 26)) Rel_month_vct <- c(-Inf, seq(-6, -1, 1), seq(1, 6, 1)) bucket <- sapply(gap_vct, function(x)cut(x, breaks, right=F, labels=Rel_month_vct)) rel_month <- lapply(r, function(i)bucket[gap_flag[, i], i]) breaks1 <- c(-Inf, -Pre_wks-0.1, -14, -5, -1, Inf) bucket_period <- lapply(r, function(i){ tt <- cut(gap_vct[gap_flag[, i], i] , breaks1 , right=T , labels=c(-999, -3, -2, -1, 999) ) tt <- as.numeric(levels(tt)[tt]) return(tt) }) period <- lapply(r, function(i) ifelse(bucket_period[[i]]==999 | bucket_period[[i]]==-999, rel_month[[i]], bucket_period[[i]]) ) t1 <- proc.time() temp <- lapply(r, function(i){ rel_trx <- as.numeric(x[i, match(nlT, var_list)][, gap_flag[, i]]) rel_nrx <- as.numeric(x[i, match(nlN, var_list)][, gap_flag[, i]]) rel_trx_other <- as.numeric(x[i, match(othT, var_list)][, gap_flag[, i]]) rel_nrx_other <- as.numeric(x[i, match(othN, var_list)][, gap_flag[, i]]) mkt_trx <- rel_trx + rel_trx_other mkt_nrx <- rel_nrx + rel_nrx_other # temp0 <- cbind(imsdr7=x$imsdr7[i], # group=group[i], rel_wk=gap_vct[gap_flag[, i], i], # rel_month=rel_month[[i]], rel_trx=rel_trx, # rel_nrx=rel_nrx, rel_trx_other=rel_trx_other, # rel_nrx_other=rel_nrx_other, mkt_trx=mkt_trx, # mkt_nrx=mkt_nrx, period=period[[i]], # spec=x$spec[i], zipcode=x$zipcode[i] # ) temp0 <- list(imsdr7=rep(x$imsdr7[i], length(rel_trx)), group=rep(group[i], length(rel_trx)), rel_wk=gap_vct[gap_flag[, i], i], rel_month=rel_month[[i]], rel_trx=rel_trx, rel_nrx=rel_nrx, rel_trx_other=rel_trx_other, rel_nrx_other=rel_nrx_other, mkt_trx=mkt_trx, mkt_nrx=mkt_nrx, period=period[[i]], spec=rep(x$spec[i], length(rel_trx)), zipcode=rep(x$zipcode[i], length(rel_trx)) ) return(temp0) }) cat((proc.time()-t1)[3]/60, 'min!\n') # temp1 <- do.call(rbind.data.frame, temp) temp1 <- do.call(rbind, lapply(temp, data.frame, stringsAsFactors=F)) # temp1 <- as_data_frame(temp) # temp1 <- ldply(temp, quickdf) #rel_trx <- mapply(function(a, b)a[b], as.matrix(x[, match(nlT, var_list)]), t(gap_flag)) return(temp1) } start_time<- proc.time() start2 <- proc.time() num_pros <- n.cpu sfInit(parallel=TRUE, cpus=num_pros) sfExport('othT','othN','nlT', 'nlN' , 'var_list', 'Total_wks', 'Pre_wks' , 'Specfile', 'ZIP2Region', 'dataPath' , 'Ext') sfClusterEval(library("plyr")) sfClusterEval(library("dplyr")) #run the function thru the combos #separate the whole matched_data into n.cpu parts dt_flag <- sample(rep(1:n.cpu, length=nrow(matched_data))) matched_data_sep <- lapply(1:n.cpu, function(i){ matched_data[dt_flag==i, ] }) graph_data2_temp <- sfClusterApplyLB(matched_data_sep, get_graph_data_1) #graph_data2_temp <- sfClusterApplyLB(1:2000, get_graph_data_1) sfStop() end_time<- proc.time() check_dim <- ldply(lapply(graph_data2_temp, function(i){ return(dim(i)) }), quickdf) graph_data_2 <- ldply(graph_data2_temp, quickdf) #create 'speciatly' and 'region' if 'Specfile' and 'ZIP2Region' files are given. if(Specfile != ''){ spec_dic <- read.table(paste(dataPath, '\\', Specfile, Ext, sep=''), sep=',', header=T , stringsAsFactors=F) idx_spec <- match(graph_data_2$spec, spec_dic$SPEC_CD) graph_data_2$specialty <- ifelse(!is.na(idx_spec), as.character(spec_dic[idx_spec, 'CLASS_DESC']), 'ALL OTHER') }else{ graph_data_2$specialty <- "ALL SPECIALTY" } if(ZIP2Region != ''){ reg_dic <- read.table(paste(dataPath, '\\', ZIP2Region, Ext, sep=''), sep=',', header=T , stringsAsFactors=F) graph_data_2$region <- ifelse(!is.na(match(graph_data_2$zipcode, reg_dic$ZIPCODE)), as.character(reg_dic[match(graph_data_2$zipcode, reg_dic$ZIPCODE), "REGION"]), 'OTHER') }else{ graph_data_2$region <- 'ALL REGION' } graph_data_2$spec <- NULL graph_data_2$zipcode <- NULL var_list2 <- names(graph_data_2) cat('part2 used-', (proc.time()-start2)[3]/60, 'min!\n') # *********** Get Monthly summary ****************; target_trx <- grep('trx|nrx', var_list2, value=T) graph_data_summary <- graph_data_2[, c(target_trx, 'group', 'rel_month')] %>% group_by(group, rel_month) %>% summarise_each(funs(sum)) hcp_cnt <- graph_data_2[, c('imsdr7', 'group', 'rel_month')] %>% group_by(group, rel_month) %>% summarise_each(funs(length(unique(.)))) graph_data_summary_1 <- left_join(graph_data_summary[order(graph_data_summary$group), ], hcp_cnt, by=c("group" = 'group', "rel_month" = "rel_month")) names(graph_data_summary_1)[length(graph_data_summary_1)] <- 'hcp_cnt' #******************* Get Post Match test/control comparison ****************; monthly_data <- cbind(graph_data_2[, c(target_trx , 'specialty' , 'region' , 'group' , 'imsdr7' , 'period')] , freq=rep(1, nrow(graph_data_2))) %>% group_by(specialty, region, group, imsdr7, period) %>% summarise_each(funs(sum(., na.rm=T))) %>% filter(as.numeric(period) < 0) monthly_mean <- monthly_data[, c(target_trx , 'specialty' , 'region' , 'group' # , 'imsdr7' , 'period')] %>% group_by(specialty, region, group, period) %>% summarise_each(funs(mean(., na.rm=T))) monthly_std <- monthly_data[, c(target_trx , 'specialty' , 'region' , 'group' # , 'imsdr7' , 'period')] %>% group_by(specialty, region, group, period) %>% summarise_each(funs(sd(., na.rm=T))) names(monthly_std)[match(target_trx, names(monthly_std))] <- paste(names(monthly_std)[match(target_trx, names(monthly_std))], '_std', sep='') monthly_mean_std <- left_join(monthly_mean, monthly_std, by=c('specialty', 'region','group', 'period')) monthly_mean_std$freq <- aggregate(rep(1, nrow(monthly_data)), by=list(specialty=monthly_data$specialty, region=monthly_data$region, group=monthly_data$group, period=monthly_data$period), function(x){sum(x, na.rm=T)})[,5] monthly_all_region_mean <- monthly_data[, c(target_trx , 'specialty' , 'group' , 'period' )] %>% group_by(specialty, group, period) %>% summarise_each(funs(mean(., na.rm=T))) monthly_all_region_std <- monthly_data[, c(target_trx , 'specialty' , 'group' , 'period' )] %>% group_by(specialty, group, period) %>% summarise_each(funs(sd(., na.rm=T))) names(monthly_all_region_std)[match(target_trx, names(monthly_all_region_std))] <- paste(target_trx, '_std', sep='') monthly_all_region_freq <- aggregate(rep(1, nrow(monthly_data)), by=list(specialty=monthly_data$specialty, group=monthly_data$group, period=monthly_data$period), function(x){sum(x, na.rm=T)}) monthly_all_region <- left_join(left_join(monthly_all_region_mean, monthly_all_region_std, by=c('specialty', 'group', 'period')), monthly_all_region_freq, by=c('specialty', 'group', 'period')) monthly_all_region$region <- 'all region' names(monthly_all_region)[match('x', names(monthly_all_region))] <- 'freq' monthly_mean_std_all <- rbind(monthly_all_region, monthly_mean_std) reshape <- function(input){ levels <- levels(as.factor(input$group)) stack <- function(i){ data <- input[input$group==i, ] transp_list <- lapply(1:nrow(data), function(r){ x <- data[r,] temp1 <- x[c(1, 3, ncol(monthly_mean_std_all))] temp2 <- t(x[-c(1, 2, 3, ncol(monthly_mean_std_all))]) new_names <- rownames(temp2) temp3 <- cbind(temp1, new_names,temp2) colnames(temp3)[ncol(temp3)] <- i return(temp3) }) stacked_data <- ldply(transp_list, rbind) return(stacked_data) } cont <- stack(levels[levels=='CONTROL']) test <- stack(levels[levels=='TEST']) cont_test <- left_join(cont, test, by=c('specialty', 'region', 'period', 'new_names')) return(cont_test) } pre_compare <- reshape(monthly_mean_std_all) var_list3 <- names(pre_compare) for_compare <- function(input, var){ temp <- pre_compare[pre_compare$new_names==var, ] temp_ord <- temp[order(temp[, match('specialty',var_list3)], temp[, match('region', var_list3)], temp[, match('period', var_list3)]), match(c('specialty', 'region', "period", 'CONTROL', 'TEST'), var_list3)] } nov_rx <- for_compare(pre_compare, 'rel_nrx') nov_std <- for_compare(pre_compare, 'rel_nrx_std') freq <- for_compare(pre_compare, 'freq') levels <- grep('^CONTROL|^TEST', names(nov_std), ignore.case=T, value=T) names(nov_std)[match(levels, names(nov_std))] <- paste(names(nov_std)[match(levels, names(nov_std))], '_std', sep='') names(freq)[match(levels, names(freq))] <- paste(names(freq)[match(levels, names(freq))], '_cnt', sep='') post_match_compare <- left_join(left_join(nov_rx, nov_std, by=c('specialty', 'region', 'period')), freq, by=c('specialty', 'region', 'period')) post_match_compare$p_value <- unlist(lapply(1:nrow(post_match_compare), function(r){ x <- post_match_compare[r, ] for_p_value <- ifelse(x$TEST_std > 0, abs(x$TEST-x$CONTROL)/sqrt(x$TEST_std^2/x$TEST_cnt+x$CONTROL_std^2/x$CONTROL_cnt), NA) p_value <- ifelse(!is.na(for_p_value), 2*(1-pnorm(for_p_value)), NA) return(p_value) })) return(list(esti_df = esti_df, pre_compare = pre_compare, post_match_compare = post_match_compare, graph_data_summary_1 = graph_data_summary_1)) } system.time(part2_result <- part2_function(Campaign_PRD = 'VICTOZA', Pre_wks=26, Target_RX='NRX', Total_wks=128, CampaignId=139, Specfile='spec_grp', ZIP2Region='ZIP_region', n.cpu=8)) #esti_df <- part2_result$esti_df pre_compare <- part2_result$pre_compare post_match_compare <- part2_result$post_match_compare graph_data_summary_1 <- part2_result$graph_data_summary_1 #write the result of part1 write.xlsx(part1$descriptive, paste(outPath, '\\OutPut_Campaign_', campId, '.xlsx', sep=''), sheetName='descriptive', append=T, row.names=F) write.xlsx(part1$pre_period_summary, paste(outPath, '\\OutPut_Campaign_', campId, '.xlsx', sep=''), sheetName='pre_match_compare', append=T, row.names=F) write.xlsx(part1$penetration, paste(outPath, '\\OutPut_Campaign_', campId, '.xlsx', sep=''), sheetName='penetration', append=T, row.names=F) write.xlsx(part1$attrition, paste(outPath, '\\OutPut_Campaign_', campId, '.xlsx', sep=''), sheetName='attrition', append=T, row.names=F) #write the result of part2 write.xlsx(esti_df, paste(outPath, '\\OutPut_Campaign_', campId, '.xlsx', sep=''), sheetName='anova_test', append=T, row.names=F) write.xlsx(pre_compare, paste(outPath, '\\OutPut_Campaign_', campId, '.xlsx', sep=''), sheetName='pre_compare', append=T, row.names=F) write.xlsx(post_match_compare, paste(outPath, '\\OutPut_Campaign_', campId, '.xlsx', sep=''), sheetName='post_match_compare', append=T, row.names=F) write.xlsx(graph_data_summary_1, paste(outPath, '\\OutPut_Campaign_', campId, '.xlsx', sep=''), sheetName='graph_data', append=T, row.names=F)
#This code directly computes the final RMSE using validation set. #It could take 20 minutes or more to finish the computation of this code. #Please refer to the movielens.rmd or movielens.pdf for the codes used to reach the model conclusion. #Load libraries library(tidyverse) library(data.table) library(caret) library(lubridate) library(stringr) library(recosystem) #Download data dl <- tempfile() download.file("http://files.grouplens.org/datasets/movielens/ml-10m.zip", dl) #Extract key variables from data files ratings <- fread(text = gsub("::", "\t", readLines(unzip(dl, "ml-10M100K/ratings.dat"))), col.names = c("userId", "movieId", "rating", "timestamp")) movies <- str_split_fixed(readLines(unzip(dl, "ml-10M100K/movies.dat")), "\\::", 3) colnames(movies) <- c("movieId", "title", "genres") movies <- as.data.frame(movies) %>% mutate(movieId = as.numeric(levels(movieId))[movieId], title = as.character(title), genres = as.character(genres)) movielens <- left_join(ratings, movies, by = "movieId") #Generate edx and validation set. edx is 10% of movielens data set.seed(1, sample.kind="Rounding") # if using R 3.5 or earlier, use `set.seed(1)` instead test_index <- createDataPartition(y = movielens$rating, times = 1, p = 0.1, list = FALSE) edx <- movielens[-test_index,] temp <- movielens[test_index,] # Make sure userId and movieId in validation set are also in edx set validation <- temp %>% semi_join(edx, by = "movieId") %>% semi_join(edx, by = "userId") # Add rows removed from validation set back into edx set removed <- anti_join(temp, validation) edx <- rbind(edx, removed) rm(dl, ratings, movies, test_index, temp, movielens, removed) #Split edx into train set and test set #Test set will be 10% of edx set test_index <- createDataPartition(y = edx$rating, times = 1, p = 0.1, list = FALSE) train_set <- edx[-test_index,] test_set <- edx[test_index,] #Make sure userId and movieId in validation set are also in train set test_set <- test_set %>% semi_join(train_set, by = "movieId") %>% semi_join(train_set, by = "userId") #Average rating mu <- mean(train_set$rating) #Loss Function RMSE <- function(true, predicted){ sqrt(mean((true - predicted)^2, na.rm = TRUE)) } #Movie effect b_i movie_avgs <- train_set %>% group_by(movieId) %>% summarize(b_i = mean(rating - mu)) #User effect b_u user_avgs <- train_set %>% left_join(movie_avgs, by = "movieId") %>% group_by(userId) %>% summarize(b_u = mean(rating - mu - b_i)) #Regularized using lambda = 5 (refer to movielens.rmd or movielens.pdf for tuning process) l = 5 b_ir <- train_set %>% group_by(movieId) %>% summarize(b_i = sum(rating - mu)/(n() + l)) b_ur <- train_set %>% left_join(b_ir, by = "movieId") %>% group_by(userId) %>% summarize(b_u = sum(rating - mu - b_i)/(n() + l)) #Matrix factorisation #Compute model residual model_residual <- train_set %>% left_join(b_ir, by = "movieId") %>% left_join(b_ur, by = "userId") %>% mutate(residual = rating - mu - b_i - b_u) %>% select(userId, movieId, residual) #Base prediction on validation set y_hat_v <- validation %>% left_join(b_ir, by = "movieId") %>% left_join(b_ur, by = "userId") %>% mutate(pred = mu + b_i + b_u) %>% pull(pred) #Convert data to matrices then save to disk mf_train <- as.matrix(model_residual) mf_validation <- validation %>% select(userId, movieId, rating) mf_validation <- as.matrix(mf_validation) write.table(mf_train, file = "mf_train.txt", sep = " ", row.names = FALSE, col.names = FALSE) write.table(mf_validation, file = "mf_validation.txt", sep = " ", row.names = FALSE, col.names = FALSE) set.seed(2019, sample.kind = "Rounding") # if using R 3.5 or earlier, use `set.seed(1)` instead #Loading files for recosystem mftrain_set <- data_file("mf_train.txt") mfvalidation_set <- data_file("mf_validation.txt") #Loading reco object r <- Reco() #Training train_set opts <- r$tune(mftrain_set, opts = list(dim = c(10, 20, 30), lrate = c(0.1, 0.2), costp_l1 = 0, costq_l1 = 0, nthread = 1, niter = 10)) r$train(mftrain_set, opts = c(opts$min, nthread = 1, niter = 20)) #Save prediction into temp file predict_file_validation <- tempfile() r$predict(mfvalidation_set, out_file(predict_file_validation)) residuals_hat_mf_validation <- scan(predict_file_validation) #Add predicted residuals back to base predictions y_hat_mf_validation <- y_hat_v + residuals_hat_mf_validation #Calculate RMSE rmse_mf_validation <- RMSE(validation$rating, y_hat_mf_validation) #Final RMSE value rmse_mf_validation
/movielens.R
no_license
limhongwei/movielens
R
false
false
4,741
r
#This code directly computes the final RMSE using validation set. #It could take 20 minutes or more to finish the computation of this code. #Please refer to the movielens.rmd or movielens.pdf for the codes used to reach the model conclusion. #Load libraries library(tidyverse) library(data.table) library(caret) library(lubridate) library(stringr) library(recosystem) #Download data dl <- tempfile() download.file("http://files.grouplens.org/datasets/movielens/ml-10m.zip", dl) #Extract key variables from data files ratings <- fread(text = gsub("::", "\t", readLines(unzip(dl, "ml-10M100K/ratings.dat"))), col.names = c("userId", "movieId", "rating", "timestamp")) movies <- str_split_fixed(readLines(unzip(dl, "ml-10M100K/movies.dat")), "\\::", 3) colnames(movies) <- c("movieId", "title", "genres") movies <- as.data.frame(movies) %>% mutate(movieId = as.numeric(levels(movieId))[movieId], title = as.character(title), genres = as.character(genres)) movielens <- left_join(ratings, movies, by = "movieId") #Generate edx and validation set. edx is 10% of movielens data set.seed(1, sample.kind="Rounding") # if using R 3.5 or earlier, use `set.seed(1)` instead test_index <- createDataPartition(y = movielens$rating, times = 1, p = 0.1, list = FALSE) edx <- movielens[-test_index,] temp <- movielens[test_index,] # Make sure userId and movieId in validation set are also in edx set validation <- temp %>% semi_join(edx, by = "movieId") %>% semi_join(edx, by = "userId") # Add rows removed from validation set back into edx set removed <- anti_join(temp, validation) edx <- rbind(edx, removed) rm(dl, ratings, movies, test_index, temp, movielens, removed) #Split edx into train set and test set #Test set will be 10% of edx set test_index <- createDataPartition(y = edx$rating, times = 1, p = 0.1, list = FALSE) train_set <- edx[-test_index,] test_set <- edx[test_index,] #Make sure userId and movieId in validation set are also in train set test_set <- test_set %>% semi_join(train_set, by = "movieId") %>% semi_join(train_set, by = "userId") #Average rating mu <- mean(train_set$rating) #Loss Function RMSE <- function(true, predicted){ sqrt(mean((true - predicted)^2, na.rm = TRUE)) } #Movie effect b_i movie_avgs <- train_set %>% group_by(movieId) %>% summarize(b_i = mean(rating - mu)) #User effect b_u user_avgs <- train_set %>% left_join(movie_avgs, by = "movieId") %>% group_by(userId) %>% summarize(b_u = mean(rating - mu - b_i)) #Regularized using lambda = 5 (refer to movielens.rmd or movielens.pdf for tuning process) l = 5 b_ir <- train_set %>% group_by(movieId) %>% summarize(b_i = sum(rating - mu)/(n() + l)) b_ur <- train_set %>% left_join(b_ir, by = "movieId") %>% group_by(userId) %>% summarize(b_u = sum(rating - mu - b_i)/(n() + l)) #Matrix factorisation #Compute model residual model_residual <- train_set %>% left_join(b_ir, by = "movieId") %>% left_join(b_ur, by = "userId") %>% mutate(residual = rating - mu - b_i - b_u) %>% select(userId, movieId, residual) #Base prediction on validation set y_hat_v <- validation %>% left_join(b_ir, by = "movieId") %>% left_join(b_ur, by = "userId") %>% mutate(pred = mu + b_i + b_u) %>% pull(pred) #Convert data to matrices then save to disk mf_train <- as.matrix(model_residual) mf_validation <- validation %>% select(userId, movieId, rating) mf_validation <- as.matrix(mf_validation) write.table(mf_train, file = "mf_train.txt", sep = " ", row.names = FALSE, col.names = FALSE) write.table(mf_validation, file = "mf_validation.txt", sep = " ", row.names = FALSE, col.names = FALSE) set.seed(2019, sample.kind = "Rounding") # if using R 3.5 or earlier, use `set.seed(1)` instead #Loading files for recosystem mftrain_set <- data_file("mf_train.txt") mfvalidation_set <- data_file("mf_validation.txt") #Loading reco object r <- Reco() #Training train_set opts <- r$tune(mftrain_set, opts = list(dim = c(10, 20, 30), lrate = c(0.1, 0.2), costp_l1 = 0, costq_l1 = 0, nthread = 1, niter = 10)) r$train(mftrain_set, opts = c(opts$min, nthread = 1, niter = 20)) #Save prediction into temp file predict_file_validation <- tempfile() r$predict(mfvalidation_set, out_file(predict_file_validation)) residuals_hat_mf_validation <- scan(predict_file_validation) #Add predicted residuals back to base predictions y_hat_mf_validation <- y_hat_v + residuals_hat_mf_validation #Calculate RMSE rmse_mf_validation <- RMSE(validation$rating, y_hat_mf_validation) #Final RMSE value rmse_mf_validation
#' Cluster Single Cell #' #' This will perform clustering on your single cell data. #' #' @param input the input ex_sc #' @param dimension either "Comp" or "2d" #' @param method can either be "spectral" or "density" #' @param num_clust the number of clusters #' @export #' @details #' This will perform clustering on either the high dimensional PCA / ICA components if dimension = Comp, #' or the 2d tsne result if method = density. Typically spectral clustering works much better on higher dimensional data, #' which density based clustering works better on 2d data. #' @examples #' ex_sc_example <- cluster_sc(input = ex_sc_example, dimension = "Comp", method = "spectral", num_clust = 6) cluster_sc <- function(input, dimension, method, num_clust){ if(dimension == "Comp"){ if(method == "spectral"){ spec <- kknn::specClust(pData(input)[,grep("Comp", colnames(pData(input)))], centers = num_clust, method = 'random-walk') cluster <- spec$cluster cluster <- paste0("Cluster", cluster) pData(input)$Cluster <- cluster } if(method == "density"){ } } if(dimension == "2d"){ if(method == "spectral"){ spec <- kknn::specClust(pData(input)[,c("x", "y")], centers = num_clust, method = 'random-walk') cluster <- spec$cluster cluster <- paste0("Cluster", cluster) pData(input)$Cluster <- cluster } if(method == "density"){ } } return(input) }
/R/cluster_sc.R
no_license
garber-lab/scRNASeq
R
false
false
1,431
r
#' Cluster Single Cell #' #' This will perform clustering on your single cell data. #' #' @param input the input ex_sc #' @param dimension either "Comp" or "2d" #' @param method can either be "spectral" or "density" #' @param num_clust the number of clusters #' @export #' @details #' This will perform clustering on either the high dimensional PCA / ICA components if dimension = Comp, #' or the 2d tsne result if method = density. Typically spectral clustering works much better on higher dimensional data, #' which density based clustering works better on 2d data. #' @examples #' ex_sc_example <- cluster_sc(input = ex_sc_example, dimension = "Comp", method = "spectral", num_clust = 6) cluster_sc <- function(input, dimension, method, num_clust){ if(dimension == "Comp"){ if(method == "spectral"){ spec <- kknn::specClust(pData(input)[,grep("Comp", colnames(pData(input)))], centers = num_clust, method = 'random-walk') cluster <- spec$cluster cluster <- paste0("Cluster", cluster) pData(input)$Cluster <- cluster } if(method == "density"){ } } if(dimension == "2d"){ if(method == "spectral"){ spec <- kknn::specClust(pData(input)[,c("x", "y")], centers = num_clust, method = 'random-walk') cluster <- spec$cluster cluster <- paste0("Cluster", cluster) pData(input)$Cluster <- cluster } if(method == "density"){ } } return(input) }
library(librarysnapshot) ### Name: library_snapshot ### Title: library_snapshot ### Aliases: library_snapshot ### ** Examples ## Not run: ##D ##D ##D library_snapshot() ##D ##D library_snapshot(path = tempdir()) ##D ##D ## End(Not run)
/data/genthat_extracted_code/librarysnapshot/examples/library_snapshot.Rd.R
no_license
surayaaramli/typeRrh
R
false
false
251
r
library(librarysnapshot) ### Name: library_snapshot ### Title: library_snapshot ### Aliases: library_snapshot ### ** Examples ## Not run: ##D ##D ##D library_snapshot() ##D ##D library_snapshot(path = tempdir()) ##D ##D ## End(Not run)
# Note: data_i is the data set requested in part i of the Project # Note: requires "dplyr" package # begin by reading data testSUB <- read.table("./test/subject_test.txt") testX <- read.table("./test/X_test.txt") testY <- read.table("./test/Y_test.txt") trainSUB <- read.table("./train/subject_train.txt") trainX <- read.table("./train/X_train.txt") trainY <- read.table("./train/Y_train.txt") features <- read.table("./features.txt") act_lab <- read.table("./activity_labels.txt") ## 1 # create data sets for both the "test" data and the "train" data # the first column is the subject, the middle columns are the feature data, and the last column is the activity label test <- cbind(testSUB, testX, testY) train <- cbind(trainSUB, trainX, trainY) #merge the training and the test sets to create one data set data1 <- rbind(test,train) ## 2 # create vector with column numbers of veriables that include "mean()" or "std()" mean_or_sum <- as.numeric(c(grep("mean()",features$V2,fixed=TRUE), grep("std()",features$V2,fixed=TRUE))) # put these column numbers in numerical order, and add 1 (because the subject column shifts all of these column numbers by 1) columns_with_mean_or_sum <- sort(mean_or_sum+1) # extract only the measurements on the mean and standard deviation for each measurement data2 <- data1[ ,c(1, columns_with_mean_or_sum, 563)] ## 3 # Use descriptive activity names to name the activities in the data set mergeData = merge(data2,act_lab,by.x="V1.2", by.y="V1", all=TRUE) # remove first column, it is the index for activity names, thus it is redundant data3 <- mergeData[ , 2:69] ## 4 # clean up columnnames data3<-rename(data3, ID=V1, V1=V1.1, V2=V2.x, Activity_Label=V2.y) # create vector of column names from feature data DecVarNames <- features[mean_or_sum,2] # the following code is like "find" and "replace" in a text editor from <- c("tBody","tGravity","fBody","-","mean()","std()","Acc","BodyBody") to <- c("TimeBody","TimeGravity","FrequencyBody","","Mean","StandardDeviation","Accelerometer","Body") gsub2 <- function(pattern, replacement, x, ...) { for(i in 1:length(pattern)) x <- gsub(pattern[i], replacement[i], x, ...) x } DecVarNames <- gsub2(from, to, DecVarNames,fixed=TRUE) #make character vector syntactically valid to be column names DecVarNames <-make.names(DecVarNames, unique=TRUE) #redefind the names of the columns in data3 colnames(data3)<- c("ID",DecVarNames,"Activity_Label") # Appropriately label the data set with descriptive variable names. data4<-data3 ## 5 # create a second, independent tidy data set with the average of each variable for each activity and each subject data5<- data4 %>% group_by(Activity_Label) %>% summarise_each(funs(mean)) # output tidy data set requested for project print(data5)
/run_analysis.R
no_license
SLaw7/GCD_Project
R
false
false
2,782
r
# Note: data_i is the data set requested in part i of the Project # Note: requires "dplyr" package # begin by reading data testSUB <- read.table("./test/subject_test.txt") testX <- read.table("./test/X_test.txt") testY <- read.table("./test/Y_test.txt") trainSUB <- read.table("./train/subject_train.txt") trainX <- read.table("./train/X_train.txt") trainY <- read.table("./train/Y_train.txt") features <- read.table("./features.txt") act_lab <- read.table("./activity_labels.txt") ## 1 # create data sets for both the "test" data and the "train" data # the first column is the subject, the middle columns are the feature data, and the last column is the activity label test <- cbind(testSUB, testX, testY) train <- cbind(trainSUB, trainX, trainY) #merge the training and the test sets to create one data set data1 <- rbind(test,train) ## 2 # create vector with column numbers of veriables that include "mean()" or "std()" mean_or_sum <- as.numeric(c(grep("mean()",features$V2,fixed=TRUE), grep("std()",features$V2,fixed=TRUE))) # put these column numbers in numerical order, and add 1 (because the subject column shifts all of these column numbers by 1) columns_with_mean_or_sum <- sort(mean_or_sum+1) # extract only the measurements on the mean and standard deviation for each measurement data2 <- data1[ ,c(1, columns_with_mean_or_sum, 563)] ## 3 # Use descriptive activity names to name the activities in the data set mergeData = merge(data2,act_lab,by.x="V1.2", by.y="V1", all=TRUE) # remove first column, it is the index for activity names, thus it is redundant data3 <- mergeData[ , 2:69] ## 4 # clean up columnnames data3<-rename(data3, ID=V1, V1=V1.1, V2=V2.x, Activity_Label=V2.y) # create vector of column names from feature data DecVarNames <- features[mean_or_sum,2] # the following code is like "find" and "replace" in a text editor from <- c("tBody","tGravity","fBody","-","mean()","std()","Acc","BodyBody") to <- c("TimeBody","TimeGravity","FrequencyBody","","Mean","StandardDeviation","Accelerometer","Body") gsub2 <- function(pattern, replacement, x, ...) { for(i in 1:length(pattern)) x <- gsub(pattern[i], replacement[i], x, ...) x } DecVarNames <- gsub2(from, to, DecVarNames,fixed=TRUE) #make character vector syntactically valid to be column names DecVarNames <-make.names(DecVarNames, unique=TRUE) #redefind the names of the columns in data3 colnames(data3)<- c("ID",DecVarNames,"Activity_Label") # Appropriately label the data set with descriptive variable names. data4<-data3 ## 5 # create a second, independent tidy data set with the average of each variable for each activity and each subject data5<- data4 %>% group_by(Activity_Label) %>% summarise_each(funs(mean)) # output tidy data set requested for project print(data5)
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/gen-namespace-docs.R, % R/gen-namespace-examples.R \name{torch_conv_transpose1d} \alias{torch_conv_transpose1d} \title{Conv_transpose1d} \arguments{ \item{input}{NA input tensor of shape \eqn{(\text{minibatch} , \text{in\_channels} , iW)}} \item{weight}{NA filters of shape \eqn{(\text{in\_channels} , \frac{\text{out\_channels}}{\text{groups}} , kW)}} \item{bias}{NA optional bias of shape \eqn{(\text{out\_channels})}. Default: None} \item{stride}{NA the stride of the convolving kernel. Can be a single number or a tuple \verb{(sW,)}. Default: 1} \item{padding}{NA \code{dilation * (kernel_size - 1) - padding} zero-padding will be added to both sides of each dimension in the input. Can be a single number or a tuple \verb{(padW,)}. Default: 0} \item{output_padding}{NA additional size added to one side of each dimension in the output shape. Can be a single number or a tuple \code{(out_padW)}. Default: 0} \item{groups}{NA split input into groups, \eqn{\text{in\_channels}} should be divisible by the number of groups. Default: 1} \item{dilation}{NA the spacing between kernel elements. Can be a single number or a tuple \verb{(dW,)}. Default: 1} } \description{ Conv_transpose1d } \section{conv_transpose1d(input, weight, bias=None, stride=1, padding=0, output_padding=0, groups=1, dilation=1) -> Tensor }{ Applies a 1D transposed convolution operator over an input signal composed of several input planes, sometimes also called "deconvolution". See \code{~torch.nn.ConvTranspose1d} for details and output shape. .. include:: cudnn_deterministic.rst } \examples{ }
/man/torch_conv_transpose1d.Rd
permissive
qykong/torch
R
false
true
1,696
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/gen-namespace-docs.R, % R/gen-namespace-examples.R \name{torch_conv_transpose1d} \alias{torch_conv_transpose1d} \title{Conv_transpose1d} \arguments{ \item{input}{NA input tensor of shape \eqn{(\text{minibatch} , \text{in\_channels} , iW)}} \item{weight}{NA filters of shape \eqn{(\text{in\_channels} , \frac{\text{out\_channels}}{\text{groups}} , kW)}} \item{bias}{NA optional bias of shape \eqn{(\text{out\_channels})}. Default: None} \item{stride}{NA the stride of the convolving kernel. Can be a single number or a tuple \verb{(sW,)}. Default: 1} \item{padding}{NA \code{dilation * (kernel_size - 1) - padding} zero-padding will be added to both sides of each dimension in the input. Can be a single number or a tuple \verb{(padW,)}. Default: 0} \item{output_padding}{NA additional size added to one side of each dimension in the output shape. Can be a single number or a tuple \code{(out_padW)}. Default: 0} \item{groups}{NA split input into groups, \eqn{\text{in\_channels}} should be divisible by the number of groups. Default: 1} \item{dilation}{NA the spacing between kernel elements. Can be a single number or a tuple \verb{(dW,)}. Default: 1} } \description{ Conv_transpose1d } \section{conv_transpose1d(input, weight, bias=None, stride=1, padding=0, output_padding=0, groups=1, dilation=1) -> Tensor }{ Applies a 1D transposed convolution operator over an input signal composed of several input planes, sometimes also called "deconvolution". See \code{~torch.nn.ConvTranspose1d} for details and output shape. .. include:: cudnn_deterministic.rst } \examples{ }
#' @param wt (character) One of "json" or "xml". Required. #' @param raw (logical) Return raw JSON or XML as character string. Required. #' Default: `FALSE`
/man-roxygen/common.R
permissive
ropensci/ritis
R
false
false
157
r
#' @param wt (character) One of "json" or "xml". Required. #' @param raw (logical) Return raw JSON or XML as character string. Required. #' Default: `FALSE`
testlist <- list(A = structure(c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(7L, 9L)), B = structure(0, .Dim = c(1L, 1L))) result <- do.call(multivariance:::match_rows,testlist) str(result)
/multivariance/inst/testfiles/match_rows/AFL_match_rows/match_rows_valgrind_files/1613112724-test.R
no_license
akhikolla/updatedatatype-list3
R
false
false
348
r
testlist <- list(A = structure(c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(7L, 9L)), B = structure(0, .Dim = c(1L, 1L))) result <- do.call(multivariance:::match_rows,testlist) str(result)
source("\\\\helix.klient.uib.no\\biohome\\aey022\\rdata\\Mabira vegetation survey data\\matrix veg load data.r") ls() #stuff not necessary for ms but possibly useful in future or for exploratory is commented out # richard made this - it makes a data frame with the species richnesses in, with the different plant functional groups along the top and transect number down the side rich<-sapply(list(spores=spore.all, seeds=seed.all, trees=trees, trueherbs=trueherbs, ferns=ferns, subcanopywoody=subcanopywoody), function(x){ rowSums(x>0) }) rich<-as.data.frame(rich) rich # then that data frame is used to make scatterplots of the species richnesses of different groups, and round gives the correlation coefficinet 'r' #pairs(rich) #savePlot("pairs.pdf", type="pdf") #round(cor(rich, use="pair"),2) #this makes histograms of the frequencies of different species richness values among sampls #x11();par(mfrow=c(3,2)) #mapply(hist, x=rich, main=names(rich)) #savePlot("rich_hists.pdf", type="pdf") rich #new analysis: standardise richness and sample number, then make histograms rich$standardferns<- rich$ferns/max(rich$ferns, na.rm=T) rich$standardtrueherbs<- rich$trueherbs/max(rich$trueherbs, na.rm=T) rich$standardsubcanopywoody<- rich$subcanopywoody/max(rich$subcanopywoody, na.rm=T) rich$standardtrees<- rich$trees/max(rich$trees, na.rm=T) rich x11();par(mfrow=c(2,2)) plot(density(rich$standardtrueherbs, na.rm=T), main="herbs") #plot(density(rich$standardsubcanopywoody, na.rm=T), main="scw") #plot(density(rich$standardtrees, na.rm=T), main="trees") #plot(density(rich$standardferns, na.rm=T), main="(ferns)") ## next step is to make histograms for rich by species group and forest type. First I made a data frame of species richnesses of the 'new' groups (ie combining shrubs and tree seedlings into subcanopywoody). I did a set of scatterplots of just these groups without the seeds or spores rich2<-sapply(list(trees=trees, trueherbs=trueherbs, ferns=ferns, subcanopywoody=subcanopywoody), function(x){ rowSums(x>0) }) rich2<-as.data.frame(rich2) rich2 pairs(rich2) # then I added the environment data, to get the forest type ###########Warning MAGIC NUMBER HERE for forest age envrich <- merge(rich2,env,by.x=0, by.y=0) head(envrich) envrich<-envrich[,c(2:5,26)] str(envrich) # then I changed the data frame to one with three variables: species richness, plant functional group, and forest type, so that I could make a grouped boxplot library(reshape) meltedrich <- melt(envrich, id="newage") str(meltedrich) meltedrich<-meltedrich[(!is.na(meltedrich$value)),] xtabs(value~variable+newage, data=meltedrich) meltedrich$variable<-factor(meltedrich$variable,levels=c("ferns", "trueherbs", "subcanopywoody", "trees")) #Then I do the same as for lines 19-43 but for abundances instead of richnesses, to make the second boxplot abun2<-sapply(list(ferns=fernabun, trueherbs=trueherbs, subcanopywoody=subcanopywoody, trees=trees), rowSums) abun2<-as.data.frame(abun2) abun2 envabun <- merge(abun2,env,by.x=0, by.y=0) head(envabun) envabun<-envabun[,c(2:5,26)] ####MAGIC NUMBER!!!### str(envabun) library(reshape) meltedabun <- melt(envabun, id="newage") str(meltedabun) meltedabun<-meltedabun[(!is.na(meltedabun$value)),] meltedabun #these must be bullshit for cover variables... meltedabun$valuem2<-NA meltedabun$valuem2[(meltedabun$variable=="ferns")]<-meltedabun$value[(meltedabun$variable=="ferns")]/300#300 m2 is plot area meltedabun$valuem2[(meltedabun$variable=="trees")]<-meltedabun$value[(meltedabun$variable=="trees")]/300 meltedabun$trans<-sqrt(meltedabun$valuem2) # here is the code for the grouped boxplot, 'par' just sets the margins to make extra room for making the axis labels sideways. 'At' is the bit that organises the boxplots into groups. x11();par(mar=c(3,4,2,1),mfrow=c(2,1), mgp=c(3,0.3,0), tcl=-0.2, bty="n") boxplot(meltedrich$value~meltedrich$newage*meltedrich$variable, at=(c(15,14,13,11,10,9,7,6,5,3,2,1)), xlim=c(1,15), las=2, xaxt="n", cex.axis=0.75, ylim=c(0,25)) #mess with at to make the ys os og order axis(side=1, at=(c(15,14,13,11,10,9,7,6,5,3,2,1)), labels=rep(c("OG","MS","YS"),4), cex.axis=0.85, lwd=0) mtext("Forest type", side=1, line=1.5) mtext ("Trees > 10cm dbh* Sub-canopy woody (ns) Herbs (ns) Ferns (ns)", side=3, line=0, cex=0.85, adj=0) mtext("a", side=3, line=1, adj=0) mtext("Species richness",side=2,line=2) boxplot(meltedabun$valuem2[meltedabun$variable=="trees"]~meltedabun$newage[meltedabun$variable=="trees"]*meltedabun$variable[meltedabun$variable=="trees"], at=(c(15,14,13,11,10,9,7,6,5,3,2,1)), xlim=c(1,15), ylim=c(0,0.1), cex.axis=0.75, las=2, xaxt="n") # this puts the tree abundance data in boxplot(meltedabun$valuem2[meltedabun$variable=="ferns"]/10~meltedabun$newage[meltedabun$variable=="ferns"]*meltedabun$variable[meltedabun$variable=="ferns"], at=(c(15,14,13,11,10,9,7,6,5,3,2,1)), xlim=c(1,15), ylim=c(0,0.1), cex.axis=0.75, las=2, xaxt="n", add=TRUE) #then the ferns, scaled to match (div10) boxplot(meltedabun$value[is.na(meltedabun$valuem2)]/1000~meltedabun$newage[is.na(meltedabun$valuem2)]*meltedabun$variable[is.na(meltedabun$valuem2)], at=(c(15,14,13,11,10,9,7,6,5,3,2,1)), xlim=c(1,15), cex.axis=0.75, las=2, xaxt="n", add=TRUE) #this puts the cover data in scaled to fit the plot nicely. It's a dirty fix axis(side=2, at=c(0,0.02, 0.04,0.06,0.08,0.1), labels=c(0,20,40,60,80,100), las=2, pos=4.5, cex.axis=0.75) axis(side=2, at=c(0,0.02, 0.04,0.06,0.08,0.1), labels=c(0,20,40,60,80,100), las=2, pos=8.5, cex.axis=0.75) axis(side=2, at=c(0,0.02, 0.04,0.06,0.08,0.1), labels=c(0,0.2,0.4,0.6,0.8,1.0), las=2, pos=12.5, cex.axis=0.75) axis(side=1, at=(c(15,14,13,11,10,9,7,6,5,3,2,1)), labels=rep(c("OG","MS","YS"),4), cex.axis=0.85, lwd=0) mtext("Forest type", side=1, line=1.5) mtext ("Trees > 10cm dbh (ns) Sub-canopy woody** Herbs (ns) Ferns (ns)", side=3, line=0, cex=0.85, adj=0) mtext("b", side=3, line=1, adj=0) mtext(expression(paste("Total cover or abundance per ",~m^2)),side=2,line=2) savePlot("Figure 2 grouped rich and abundance boxplots NEW AGE m2.emf", type="emf") savePlot("Figure 2ab Eycott et al boxplots.eps", type="eps") savePlot("Figure 2ab Eycott et al boxplots.pdf", type="pdf") library(car) #has Levene test by(meltedrich,meltedrich$variable, function(x)leveneTest(value~newage, data=x))#ns by(meltedrich,meltedrich$variable, function(x)summary(aov(value~newage, data=x))) by(meltedabun,meltedabun$variable, function(x)leveneTest(valuem2~newage, data=x))#m2 only relevant for ferns ns for ferns, trees. sig for cover vars! hist(sqrt(meltedabun$valuem2[meltedabun$variable=="trueherbs"]), breaks=10) hist(sqrt(meltedabun$valuem2[meltedabun$variable=="subcanopywoody"]), breaks=10) by(meltedabun,meltedabun$variable, function(x)summary(aov(valuem2~newage, data=x))) #Applies only to ferns and trees. ns for ferns and trees but approaches sig for trees (checked them separately, yes the order is as presumed - ferns, trueherbs, scw, trees) meltedabun$trans<-sqrt(meltedabun$value) by(meltedabun,meltedabun$variable, function(x)summary(aov(sqrt(value)~newage, data=x))) #Applies only to scw and herbs - that is, % cover ones. Sig for scw and also trees... tapply(meltedabun$valuem2[meltedabun$variable=="trueherbs"], meltedabun$newage[meltedabun$variable=="trueherbs"], mean) tapply(meltedabun$trans[meltedabun$variable=="subcanopywoody"], meltedabun$newage[meltedabun$variable=="subcanopywoody"], median) tapply(meltedabun$trans[meltedabun$variable=="subcanopywoody"], meltedabun$newage[meltedabun$variable=="subcanopywoody"], mean) blank #colour plots of spprich for herbs, seeds and trees against forest age, not currently in ms #boxplot(rowSums(trueherbs>0)~env$newage, col=(c("gold","darkgreen","red")), notch=TRUE, main=" Herb Spp rich by Forest age", xlab="Forest Age", ylab="Spprich") #boxplot(rowSums(seed.all>0)~env$newage, col=(c("gold","darkgreen","red")), notch=TRUE, main=" Seed Spp rich by Forest age", xlab="Forest Age", ylab="Spprich") #boxplot(rowSums(trees>0)~env$newage, col=(c("gold","darkgreen","red")), notch=TRUE, main=" Tree Spp rich by Forest age", xlab="Forest Age", ylab="Spprich") #plot herbspprich against env vars, with points coloured by forest age #boxplot(rowSums(trueherbs>0)~env$Stumps, col=(c("gold","darkgreen","red")), notch=TRUE, main=" Herb Spp rich by n. stumps", xlab="Stumps", ylab="Spprich") #savePlot("herb rich by stumps.emf") #boxplot(rowSums(trueherbs>0)~env$m.from.stream, col=(c("gold","darkgreen","red")), notch=TRUE, main=" Herb Spp rich by m.from.stream", xlab="m.from.stream", ylab="Spprich") #savePlot("herb rich by mfromstream.emf") #plot (rowSums(trueherbs>0)~env$Slope, col=as.factor(env$newage), xlab="Slope", ylab="n herb species") #savePlot("herb rich by Slope.emf") #plot (rowSums(trueherbs>0)~env$BA, col=as.factor(env$newage), xlab="BA", ylab="n herb species") #savePlot("herb rich by BA.emf") #plot (rowSums(trueherbs>0)~env$Deadwood, col=as.factor(env$newage), xlab="Deadwood", ylab="n herb species") #savePlot("herb rich by Deadwood.emf") #plot (rowSums(trueherbs>0)~env$LLCov, col=as.factor(env$newage), xlab="log(Leaf Litter percent Cover)", ylab="n herb species") #savePlot("herb rich by LLCov.emf") #plot (rowSums(trueherbs>0)~env$LLDepth, col=as.factor(env$newage), xlab="LLDepth", ylab="n herb species") #savePlot("herb rich by LLDepth.emf") #plot (rowSums(trueherbs>0)~env$CanO, col=as.factor(env$newage), xlab="CanO", ylab="n herb species") #savePlot("herb rich by CanO.emf") #plot (rowSums(trueherbs>0)~env$pH, col=as.factor(env$newage), xlab="Soil pH", ylab="n herb species") #savePlot("herb rich by soil pH.emf") #plot (rowSums(trueherbs>0)~env$N., col=as.factor(env$newage), xlab="Soil nitrogen", ylab="n herb species") #savePlot("herb rich by soil Nitrogen.emf") #plot (rowSums(trueherbs>0)~env$C., col=as.factor(env$newage), xlab="Soil carbon", ylab="n herb species") #savePlot("herb rich by soil carbon.emf") #plot (rowSums(trueherbs>0)~env$TCat, col=as.factor(env$newage), xlab="total cations", ylab="n herb species") #savePlot("herb rich by soil total cations.emf") #add a legend. Note that R will wait for you to click the bit of the graph where you want the legend> #legend(locator(1),c("og","os","ys"),pch=c(1,1,1),col=c(1,2,3)) #plot cover/total seeds for herbs, seeds and trees against forest age #boxplot(rowSums(trueherbs)~env$newage, col=(c("gold","darkgreen","red")), notch=TRUE, main="Ground veg angiosperm cover by Forest age", xlab="Forest Age", ylab="Total cover") #boxplot(rowSums(seed.all)~env$newage, col=(c("gold","darkgreen","red")), notch=TRUE, main=" Seed Spp rich by Forest age", xlab="Forest Age", ylab="Total herb cover") #boxplot(rowSums(trees)~env$newage, col=(c("gold","darkgreen","red")), notch=TRUE, main=" Tree Spp rich by Forest age", xlab="Forest Age", ylab="Total herb cover") #plot herb cover against env vars, with points coloured by forest age #boxplot(rowSums(trueherbs)~env$Stumps, col=(c("gold","darkgreen","red")), notch=TRUE, main=" Herb Spp rich by n. stumps", xlab="Stumps", ylab="Spprich") #savePlot("herb rich by stumps.emf") #boxplot(rowSums(trueherbs)~env$m.from.stream, col=(c("gold","darkgreen","red")), notch=TRUE, main=" Herb Spp rich by m.from.stream", xlab="m.from.stream", ylab="Spprich") #savePlot("herb rich by mfromstream.emf") #plot (rowSums(trueherbs)~env$Slope, col=as.factor(env$newage), xlab="Slope", ylab="Total herb cover") #savePlot("herb rich by Slope.emf") #plot (rowSums(trueherbs)~env$BA, col=as.factor(env$newage), xlab="BA", ylab="Total herb cover") #savePlot("herb rich by BA.emf") #plot (rowSums(trueherbs)~env$Deadwood, col=as.factor(env$newage), xlab="Deadwood", ylab="Total herb cover") #savePlot("herb rich by Deadwood.emf") #plot (rowSums(trueherbs)~env$LLCov, col=as.factor(env$newage), xlab="LLCov", ylab="Total herb cover") #savePlot("herb rich by LLCov.emf") #plot (rowSums(trueherbs)~env$LLDepth, col=as.factor(env$newage), xlab="LLDepth", ylab="Total herb cover") #savePlot("herb rich by LLDepth.emf") #plot (rowSums(trueherbs)~env$CanO, col=as.factor(env$newage), xlab="CanO", ylab="Total herb cover") #savePlot("herb rich by CanO.emf") #plot (rowSums(trueherbs)~env$pH, col=as.factor(env$newage), xlab="Soil pH", ylab="Total herb cover") #savePlot("herb rich by soil pH.emf") #plot (rowSums(trueherbs)~env$N., col=as.factor(env$newage), xlab="Soil nitrogen", ylab="Total herb cover") #savePlot("herb rich by soil Nitrogen.emf") #plot (rowSums(trueherbs)~env$C., col=as.factor(env$newage), xlab="Soil carbon", ylab="Total herb cover") #savePlot("herb rich by soil carbon.emf") #plot (rowSums(trueherbs)~env$TCat, col=as.factor(env$newage), xlab="total cations", ylab="Total herb cover") #savePlot("herb rich by soil total cations.emf") ##SPECIES ACCUMULATION CURVES## #species accumulation curves by functional groups. the species accumulation function hates NA values, so I have to tell it to use data only for those plots where we know the species richness and the age. ls() levels(env$newage) keep2c<-!is.na(trueherbs[,1]) keep3c<-!is.na(ferns[,1]) keep5c<-!is.na(trees[,1]) keep6c<-!is.na(subcanopywoody[,1]) keep2c spaccherb <- specaccum(trueherbs[keep2c,], "exact") spaccfern <- specaccum(ferns[keep3c,], "exact") spaccbigtrees <- specaccum(trees[keep5c,], "exact") spaccsubcanopywoody <- specaccum(subcanopywoody[keep6c,], "exact") plot(spaccfern, ci.type="line", col="blue", lwd=2, ci.lty=1, ci.col="lightblue", xlim=c(1,70), xaxs="i", ylim=c(0,100), yaxs="i", bty="l") plot(spaccherb, add=T, ci.type="line", col="green", lwd=2, ci.lty=1, ci.col="lightgreen") plot(spaccbigtrees, add=T, ci.type="line", col="red", lwd=2, ci.lty=1, ci.col="pink") plot(spaccsubcanopywoody, add=T, ci.type="line", col="gold", lwd=2, ci.lty=1, ci.col="palegoldenrod") legend("topright",c("Ferns","Herbs","Sub-canopy woody", "Trees >10cm diameter"),pch=16,col=c("blue","green","gold", "red")) savePlot("species accumulation curves_bright colours.emf", type="emf") ## species accumulation curves, by forest age. x11();par(mfrow=c(2,2)) sa_fernog <- specaccum(ferns[keep3c&env$newage=="og",], "exact") sa_fernos <- specaccum(ferns[keep3c&env$newage=="os",], "exact") sa_fernys <- specaccum(ferns[keep3c&env$newage=="ys",], "exact") plot(sa_fernos, ci.type="line", col="blue", lwd=2, ci.lty=1, ci.col="lightblue", xlim=c(0,25), ylim=c(0,50), yaxs="i", main="Ferns", bty="l", xaxs="i") plot(sa_fernys, add=T, ci.type="line", col="green", lwd=2, ci.lty=1, ci.col="lightgreen") plot(sa_fernog, add=T, ci.type="line", col="red", lwd=2, ci.lty=1, ci.col="pink") legend("topleft",c("og","os","ys"),pch=16,col=c("red","blue","green")) savePlot("species accumulation curves_ferns_bright colours.emf") sa_herbog <- specaccum(trueherbs[keep2c&env$newage=="og",], "exact") sa_herbos <- specaccum(trueherbs[keep2c&env$newage=="os",], "exact") sa_herbys <- specaccum(trueherbs[keep2c&env$newage=="ys",], "exact") plot(sa_herbos, ci.type="line", col="blue", lwd=2, ci.lty=1, ci.col="lightblue", xlim=c(0,25), ylim=c(0,50), yaxs="i", main="Herbs", bty="l", xaxs="i") plot(sa_herbys, add=T, ci.type="line", col="green", lwd=2, ci.lty=1, ci.col="lightgreen") plot(sa_herbog, add=T, ci.type="line", col="red", lwd=2, ci.lty=1, ci.col="pink") legend("topleft",c("og","os","ys"),pch=16,col=c("red","blue","green")) savePlot("species accumulation curves_herbs_bright colours.emf") sa_scw_og <- specaccum(subcanopywoody[keep6c&env$newage=="og",], "exact") sa_scw_os <- specaccum(subcanopywoody[keep6c&env$newage=="os",], "exact") sa_scw_ys <- specaccum(subcanopywoody[keep6c&env$newage=="ys",], "exact") plot(sa_scw_os, ci.type="line", col="blue", lwd=2, ci.lty=1, ci.col="lightblue", xlim=c(0,25), ylim=c(0,50), yaxs="i", main="Sub-canopy woody plants", bty="l", xaxs="i") plot(sa_scw_ys, add=T, ci.type="line", col="green", lwd=2, ci.lty=1, ci.col="lightgreen") plot(sa_scw_og, add=T, ci.type="line", col="red", lwd=2, ci.lty=1, ci.col="pink") legend("topleft",c("og","os","ys"),pch=16,col=c("red","blue","green")) savePlot("species accumulation curves_scw_bright colours.emf") sa_trees_og <- specaccum(trees[keep5c&env$newage=="og",], "exact") sa_trees_os <- specaccum(trees[keep5c&env$newage=="os",], "exact") sa_trees_ys <- specaccum(trees[keep5c&env$newage=="ys",], "exact") plot(sa_trees_os, ci.type="line", col="blue", lwd=2, ci.lty=1, ci.col="lightblue", xlim=c(0,25), ylim=c(0,50), yaxs="i", main="Trees", bty="l", xaxs="i") plot(sa_trees_ys, add=T, ci.type="line", col="green", lwd=2, ci.lty=1, ci.col="lightgreen") plot(sa_trees_og, add=T, ci.type="line", col="red", lwd=2, ci.lty=1, ci.col="pink") legend("topright",c("og","os","ys"),pch=16,col=c("red","blue","green")) savePlot("species accumulation curves_fourplots_bright colours.emf", type="emf") ###VENN DIAGRAMS## #area1 The size of the first set ALL OG #area2 The size of the second set ALL OS #area3 The size of the third set ALL YS #n12 The size of the intersection between the first and the second set OG & OS #n23 The size of the intersection between the second and the third set OS & YS #n13 The size of the intersection between the first and the third set OG & YS #n123 The size of the intersection between all three sets #category A vector (length 3) of strings giving the category names of the sets #I've tried to eulerise them but it's not happening so I guess it doesn't meet the requirements library(VennDiagram) x11(); draw.triple.venn( area1 = length(which(colSums(trees[keep5c&(env$newage=="og"),])>0)), area2 = length(which(colSums(trees[keep5c&(env$newage=="os"),])>0)), area3 = length(which(colSums(trees[keep5c&(env$newage=="ys"),])>0)), n12 = length(intersect (names(which(colSums(trees[keep5c&(env$newage=="og"),])>0)),names(which(colSums(trees[keep5c&(env$newage=="os"),])>0)))), n23 = length(intersect (names(which(colSums(trees[keep5c&(env$newage=="os"),])>0)),names(which(colSums(trees[keep5c&(env$newage=="ys"),])>0)))), n13 = length(intersect (names(which(colSums(trees[keep5c&(env$newage=="og"),])>0)),names(which(colSums(trees[keep5c&(env$newage=="ys"),])>0)))), n123 =length(intersect(names(which(colSums(trees[keep5c&(env$newage=="og"),])>0)),(intersect(names(which(colSums(trees[keep5c&(env$newage=="os"),])>0)),names(which(colSums(trees[keep5c&(env$newage=="ys"),])>0)))))) , category = c("OG", "OS", "YS"), cex=2, fontfamily="sans", cat.cex=2, cat.fontfamily="sans" ); savePlot("Venntree.emf", type="emf") x11(); draw.triple.venn( area1 = length(which(colSums(trueherbs[keep2c&(env$newage=="og"),])>0)), area2 = length(which(colSums(trueherbs[keep2c&(env$newage=="os"),])>0)), area3 = length(which(colSums(trueherbs[keep2c&(env$newage=="ys"),])>0)), n12 = length(intersect (names(which(colSums(trueherbs[keep2c&(env$newage=="og"),])>0)),names(which(colSums(trueherbs[keep2c&(env$newage=="os"),])>0)))), n23 = length(intersect (names(which(colSums(trueherbs[keep2c&(env$newage=="os"),])>0)),names(which(colSums(trueherbs[keep2c&(env$newage=="ys"),])>0)))), n13 = length(intersect (names(which(colSums(trueherbs[keep2c&(env$newage=="og"),])>0)),names(which(colSums(trueherbs[keep2c&(env$newage=="ys"),])>0)))), n123 =length(intersect(names(which(colSums(trueherbs[keep2c&(env$newage=="og"),])>0)),(intersect(names(which(colSums(trueherbs[keep2c&(env$newage=="os"),])>0)),names(which(colSums(trueherbs[keep2c&(env$newage=="ys"),])>0)))))) , category = c("OG", "OS", "YS"), cex=2, fontfamily="sans", cat.cex=2, cat.fontfamily="sans" ); savePlot("Venntrueherb.emf", type="emf") x11(); draw.triple.venn( area1 = length(which(colSums(ferns[keep3c&(env$newage=="og"),])>0)), area2 = length(which(colSums(ferns[keep3c&(env$newage=="os"),])>0)), area3 = length(which(colSums(ferns[keep3c&(env$newage=="ys"),])>0)), n12 = length(intersect (names(which(colSums(ferns[keep3c&(env$newage=="og"),])>0)),names(which(colSums(ferns[keep3c&(env$newage=="os"),])>0)))), n23 = length(intersect (names(which(colSums(ferns[keep3c&(env$newage=="os"),])>0)),names(which(colSums(ferns[keep3c&(env$newage=="ys"),])>0)))), n13 = length(intersect (names(which(colSums(ferns[keep3c&(env$newage=="og"),])>0)),names(which(colSums(ferns[keep3c&(env$newage=="ys"),])>0)))), n123 =length(intersect(names(which(colSums(ferns[keep3c&(env$newage=="og"),])>0)),(intersect(names(which(colSums(ferns[keep3c&(env$newage=="os"),])>0)),names(which(colSums(ferns[keep3c&(env$newage=="ys"),])>0)))))) , category = c("OG", "OS", "YS"), cex=2, fontfamily="sans", cat.cex=2, cat.fontfamily="sans" ); savePlot("Vennfern.emf", type="emf") x11(); draw.triple.venn( area1 = length(which(colSums(subcanopywoody[keep6c&(env$newage=="og"),])>0)), area2 = length(which(colSums(subcanopywoody[keep6c&(env$newage=="os"),])>0)), area3 = length(which(colSums(subcanopywoody[keep6c&(env$newage=="ys"),])>0)), n12 = length(intersect (names(which(colSums(subcanopywoody[keep6c&(env$newage=="og"),])>0)),names(which(colSums(subcanopywoody[keep6c&(env$newage=="os"),])>0)))), n23 = length(intersect (names(which(colSums(subcanopywoody[keep6c&(env$newage=="os"),])>0)),names(which(colSums(subcanopywoody[keep6c&(env$newage=="ys"),])>0)))), n13 = length(intersect (names(which(colSums(subcanopywoody[keep6c&(env$newage=="og"),])>0)),names(which(colSums(subcanopywoody[keep6c&(env$newage=="ys"),])>0)))), n123 =length(intersect(names(which(colSums(subcanopywoody[keep6c&(env$newage=="og"),])>0)),(intersect(names(which(colSums(subcanopywoody[keep6c&(env$newage=="os"),])>0)),names(which(colSums(subcanopywoody[keep6c&(env$newage=="ys"),])>0)))))) , category = c("OG", "OS", "YS"), cex=2, fontfamily="sans", cat.cex=2, cat.fontfamily="sans" ); savePlot("Vennsubcanopywoody.emf", type="emf") #older versions: #nicked from data loading file: #bothsetdiff<-function(x,y)list(unique.to.x=setdiff(x,y),unique.to.y=setdiff(y,x), in.common=intersect(x,y)) #trying to do it somehow: prep these lists then open them in notepad, remove the first row, and resave into a folder just for these files #oglist<-list(colnames(treesog[,colSums(treesog)>0])) #oslist<-list(colnames(treesos[,colSums(treesos)>0])) #yslist<-list(colnames(treesys[,colSums(treesys)>0])) #write.table(oglist,"ogtrees.txt") #write.table(oslist,"ostrees.txt") #write.table(yslist,"ystrees.txt") #write.table(oglist,"ogtrees.txt", sep="\t") # this next bit seems to miss the last few off the list #write.table((rbind(oglist,oslist,yslist)), "eVenn.txt", sep="\t", quote=F) #write.table(trees,"trees.txt", sep="\t", quote=F) #write.table(ferns,"ferns.txt", sep="\t", quote=F) #write.table(subcanopywoody,"subcanopywoody.txt", sep="\t", quote=F) #write.table(trueherbs,"herbs.txt", sep="\t", quote=F) ####start of making matching sets? #treeslist<-lapply(levels(env$Forest), function(n){ # z<-trees[env$Forest==n,] # colnames(z[,colSums(z)>0]) #}) ##or the long way #treesog<-trees[keep5c&(env$Forest=="og"),] #treesos<-trees[keep5c&(env$Forest=="os"),] #treesys<-trees[keep5c&(env$Forest=="ys"),] #str(treesog) #scwog<-subcanopywoody[keep6c&(env$Forest=="og"),] #scwos<-subcanopywoody[keep6c&(env$Forest=="os"),] #scwys<-subcanopywoody[keep6c&(env$Forest=="ys"),] #this is no use because it misses out the ones in the middle of the venn #bothsetdiff(colnames(treesog[,colSums(treesog)>0]),colnames(treesos[,colSums(treesos)>0])) #bothsetdiff(colnames(treesog[,colSums(treesog)>0]),colnames(treesys[,colSums(treesys)>0])) #bothsetdiff(colnames(treesys[,colSums(treesys)>0]),colnames(treesos[,colSums(treesos)>0])) #looking at compartments to try to fix age class mystery coloury<-unique(cbind(env$newage,env$Compartment)) coloury<-coloury[order(coloury[,2]),] boxplot(Stumps~Compartment, data=env, col=coloury[,1]) boxplot(CanO~Compartment, data=env, col=coloury[,1]) boxplot(BA~Compartment, data=env, col=coloury[,1])
/Mabira vegetation survey data/mabira species richness_newage.r
no_license
amyeycott/Matrix
R
false
false
23,836
r
source("\\\\helix.klient.uib.no\\biohome\\aey022\\rdata\\Mabira vegetation survey data\\matrix veg load data.r") ls() #stuff not necessary for ms but possibly useful in future or for exploratory is commented out # richard made this - it makes a data frame with the species richnesses in, with the different plant functional groups along the top and transect number down the side rich<-sapply(list(spores=spore.all, seeds=seed.all, trees=trees, trueherbs=trueherbs, ferns=ferns, subcanopywoody=subcanopywoody), function(x){ rowSums(x>0) }) rich<-as.data.frame(rich) rich # then that data frame is used to make scatterplots of the species richnesses of different groups, and round gives the correlation coefficinet 'r' #pairs(rich) #savePlot("pairs.pdf", type="pdf") #round(cor(rich, use="pair"),2) #this makes histograms of the frequencies of different species richness values among sampls #x11();par(mfrow=c(3,2)) #mapply(hist, x=rich, main=names(rich)) #savePlot("rich_hists.pdf", type="pdf") rich #new analysis: standardise richness and sample number, then make histograms rich$standardferns<- rich$ferns/max(rich$ferns, na.rm=T) rich$standardtrueherbs<- rich$trueherbs/max(rich$trueherbs, na.rm=T) rich$standardsubcanopywoody<- rich$subcanopywoody/max(rich$subcanopywoody, na.rm=T) rich$standardtrees<- rich$trees/max(rich$trees, na.rm=T) rich x11();par(mfrow=c(2,2)) plot(density(rich$standardtrueherbs, na.rm=T), main="herbs") #plot(density(rich$standardsubcanopywoody, na.rm=T), main="scw") #plot(density(rich$standardtrees, na.rm=T), main="trees") #plot(density(rich$standardferns, na.rm=T), main="(ferns)") ## next step is to make histograms for rich by species group and forest type. First I made a data frame of species richnesses of the 'new' groups (ie combining shrubs and tree seedlings into subcanopywoody). I did a set of scatterplots of just these groups without the seeds or spores rich2<-sapply(list(trees=trees, trueherbs=trueherbs, ferns=ferns, subcanopywoody=subcanopywoody), function(x){ rowSums(x>0) }) rich2<-as.data.frame(rich2) rich2 pairs(rich2) # then I added the environment data, to get the forest type ###########Warning MAGIC NUMBER HERE for forest age envrich <- merge(rich2,env,by.x=0, by.y=0) head(envrich) envrich<-envrich[,c(2:5,26)] str(envrich) # then I changed the data frame to one with three variables: species richness, plant functional group, and forest type, so that I could make a grouped boxplot library(reshape) meltedrich <- melt(envrich, id="newage") str(meltedrich) meltedrich<-meltedrich[(!is.na(meltedrich$value)),] xtabs(value~variable+newage, data=meltedrich) meltedrich$variable<-factor(meltedrich$variable,levels=c("ferns", "trueherbs", "subcanopywoody", "trees")) #Then I do the same as for lines 19-43 but for abundances instead of richnesses, to make the second boxplot abun2<-sapply(list(ferns=fernabun, trueherbs=trueherbs, subcanopywoody=subcanopywoody, trees=trees), rowSums) abun2<-as.data.frame(abun2) abun2 envabun <- merge(abun2,env,by.x=0, by.y=0) head(envabun) envabun<-envabun[,c(2:5,26)] ####MAGIC NUMBER!!!### str(envabun) library(reshape) meltedabun <- melt(envabun, id="newage") str(meltedabun) meltedabun<-meltedabun[(!is.na(meltedabun$value)),] meltedabun #these must be bullshit for cover variables... meltedabun$valuem2<-NA meltedabun$valuem2[(meltedabun$variable=="ferns")]<-meltedabun$value[(meltedabun$variable=="ferns")]/300#300 m2 is plot area meltedabun$valuem2[(meltedabun$variable=="trees")]<-meltedabun$value[(meltedabun$variable=="trees")]/300 meltedabun$trans<-sqrt(meltedabun$valuem2) # here is the code for the grouped boxplot, 'par' just sets the margins to make extra room for making the axis labels sideways. 'At' is the bit that organises the boxplots into groups. x11();par(mar=c(3,4,2,1),mfrow=c(2,1), mgp=c(3,0.3,0), tcl=-0.2, bty="n") boxplot(meltedrich$value~meltedrich$newage*meltedrich$variable, at=(c(15,14,13,11,10,9,7,6,5,3,2,1)), xlim=c(1,15), las=2, xaxt="n", cex.axis=0.75, ylim=c(0,25)) #mess with at to make the ys os og order axis(side=1, at=(c(15,14,13,11,10,9,7,6,5,3,2,1)), labels=rep(c("OG","MS","YS"),4), cex.axis=0.85, lwd=0) mtext("Forest type", side=1, line=1.5) mtext ("Trees > 10cm dbh* Sub-canopy woody (ns) Herbs (ns) Ferns (ns)", side=3, line=0, cex=0.85, adj=0) mtext("a", side=3, line=1, adj=0) mtext("Species richness",side=2,line=2) boxplot(meltedabun$valuem2[meltedabun$variable=="trees"]~meltedabun$newage[meltedabun$variable=="trees"]*meltedabun$variable[meltedabun$variable=="trees"], at=(c(15,14,13,11,10,9,7,6,5,3,2,1)), xlim=c(1,15), ylim=c(0,0.1), cex.axis=0.75, las=2, xaxt="n") # this puts the tree abundance data in boxplot(meltedabun$valuem2[meltedabun$variable=="ferns"]/10~meltedabun$newage[meltedabun$variable=="ferns"]*meltedabun$variable[meltedabun$variable=="ferns"], at=(c(15,14,13,11,10,9,7,6,5,3,2,1)), xlim=c(1,15), ylim=c(0,0.1), cex.axis=0.75, las=2, xaxt="n", add=TRUE) #then the ferns, scaled to match (div10) boxplot(meltedabun$value[is.na(meltedabun$valuem2)]/1000~meltedabun$newage[is.na(meltedabun$valuem2)]*meltedabun$variable[is.na(meltedabun$valuem2)], at=(c(15,14,13,11,10,9,7,6,5,3,2,1)), xlim=c(1,15), cex.axis=0.75, las=2, xaxt="n", add=TRUE) #this puts the cover data in scaled to fit the plot nicely. It's a dirty fix axis(side=2, at=c(0,0.02, 0.04,0.06,0.08,0.1), labels=c(0,20,40,60,80,100), las=2, pos=4.5, cex.axis=0.75) axis(side=2, at=c(0,0.02, 0.04,0.06,0.08,0.1), labels=c(0,20,40,60,80,100), las=2, pos=8.5, cex.axis=0.75) axis(side=2, at=c(0,0.02, 0.04,0.06,0.08,0.1), labels=c(0,0.2,0.4,0.6,0.8,1.0), las=2, pos=12.5, cex.axis=0.75) axis(side=1, at=(c(15,14,13,11,10,9,7,6,5,3,2,1)), labels=rep(c("OG","MS","YS"),4), cex.axis=0.85, lwd=0) mtext("Forest type", side=1, line=1.5) mtext ("Trees > 10cm dbh (ns) Sub-canopy woody** Herbs (ns) Ferns (ns)", side=3, line=0, cex=0.85, adj=0) mtext("b", side=3, line=1, adj=0) mtext(expression(paste("Total cover or abundance per ",~m^2)),side=2,line=2) savePlot("Figure 2 grouped rich and abundance boxplots NEW AGE m2.emf", type="emf") savePlot("Figure 2ab Eycott et al boxplots.eps", type="eps") savePlot("Figure 2ab Eycott et al boxplots.pdf", type="pdf") library(car) #has Levene test by(meltedrich,meltedrich$variable, function(x)leveneTest(value~newage, data=x))#ns by(meltedrich,meltedrich$variable, function(x)summary(aov(value~newage, data=x))) by(meltedabun,meltedabun$variable, function(x)leveneTest(valuem2~newage, data=x))#m2 only relevant for ferns ns for ferns, trees. sig for cover vars! hist(sqrt(meltedabun$valuem2[meltedabun$variable=="trueherbs"]), breaks=10) hist(sqrt(meltedabun$valuem2[meltedabun$variable=="subcanopywoody"]), breaks=10) by(meltedabun,meltedabun$variable, function(x)summary(aov(valuem2~newage, data=x))) #Applies only to ferns and trees. ns for ferns and trees but approaches sig for trees (checked them separately, yes the order is as presumed - ferns, trueherbs, scw, trees) meltedabun$trans<-sqrt(meltedabun$value) by(meltedabun,meltedabun$variable, function(x)summary(aov(sqrt(value)~newage, data=x))) #Applies only to scw and herbs - that is, % cover ones. Sig for scw and also trees... tapply(meltedabun$valuem2[meltedabun$variable=="trueherbs"], meltedabun$newage[meltedabun$variable=="trueherbs"], mean) tapply(meltedabun$trans[meltedabun$variable=="subcanopywoody"], meltedabun$newage[meltedabun$variable=="subcanopywoody"], median) tapply(meltedabun$trans[meltedabun$variable=="subcanopywoody"], meltedabun$newage[meltedabun$variable=="subcanopywoody"], mean) blank #colour plots of spprich for herbs, seeds and trees against forest age, not currently in ms #boxplot(rowSums(trueherbs>0)~env$newage, col=(c("gold","darkgreen","red")), notch=TRUE, main=" Herb Spp rich by Forest age", xlab="Forest Age", ylab="Spprich") #boxplot(rowSums(seed.all>0)~env$newage, col=(c("gold","darkgreen","red")), notch=TRUE, main=" Seed Spp rich by Forest age", xlab="Forest Age", ylab="Spprich") #boxplot(rowSums(trees>0)~env$newage, col=(c("gold","darkgreen","red")), notch=TRUE, main=" Tree Spp rich by Forest age", xlab="Forest Age", ylab="Spprich") #plot herbspprich against env vars, with points coloured by forest age #boxplot(rowSums(trueherbs>0)~env$Stumps, col=(c("gold","darkgreen","red")), notch=TRUE, main=" Herb Spp rich by n. stumps", xlab="Stumps", ylab="Spprich") #savePlot("herb rich by stumps.emf") #boxplot(rowSums(trueherbs>0)~env$m.from.stream, col=(c("gold","darkgreen","red")), notch=TRUE, main=" Herb Spp rich by m.from.stream", xlab="m.from.stream", ylab="Spprich") #savePlot("herb rich by mfromstream.emf") #plot (rowSums(trueherbs>0)~env$Slope, col=as.factor(env$newage), xlab="Slope", ylab="n herb species") #savePlot("herb rich by Slope.emf") #plot (rowSums(trueherbs>0)~env$BA, col=as.factor(env$newage), xlab="BA", ylab="n herb species") #savePlot("herb rich by BA.emf") #plot (rowSums(trueherbs>0)~env$Deadwood, col=as.factor(env$newage), xlab="Deadwood", ylab="n herb species") #savePlot("herb rich by Deadwood.emf") #plot (rowSums(trueherbs>0)~env$LLCov, col=as.factor(env$newage), xlab="log(Leaf Litter percent Cover)", ylab="n herb species") #savePlot("herb rich by LLCov.emf") #plot (rowSums(trueherbs>0)~env$LLDepth, col=as.factor(env$newage), xlab="LLDepth", ylab="n herb species") #savePlot("herb rich by LLDepth.emf") #plot (rowSums(trueherbs>0)~env$CanO, col=as.factor(env$newage), xlab="CanO", ylab="n herb species") #savePlot("herb rich by CanO.emf") #plot (rowSums(trueherbs>0)~env$pH, col=as.factor(env$newage), xlab="Soil pH", ylab="n herb species") #savePlot("herb rich by soil pH.emf") #plot (rowSums(trueherbs>0)~env$N., col=as.factor(env$newage), xlab="Soil nitrogen", ylab="n herb species") #savePlot("herb rich by soil Nitrogen.emf") #plot (rowSums(trueherbs>0)~env$C., col=as.factor(env$newage), xlab="Soil carbon", ylab="n herb species") #savePlot("herb rich by soil carbon.emf") #plot (rowSums(trueherbs>0)~env$TCat, col=as.factor(env$newage), xlab="total cations", ylab="n herb species") #savePlot("herb rich by soil total cations.emf") #add a legend. Note that R will wait for you to click the bit of the graph where you want the legend> #legend(locator(1),c("og","os","ys"),pch=c(1,1,1),col=c(1,2,3)) #plot cover/total seeds for herbs, seeds and trees against forest age #boxplot(rowSums(trueherbs)~env$newage, col=(c("gold","darkgreen","red")), notch=TRUE, main="Ground veg angiosperm cover by Forest age", xlab="Forest Age", ylab="Total cover") #boxplot(rowSums(seed.all)~env$newage, col=(c("gold","darkgreen","red")), notch=TRUE, main=" Seed Spp rich by Forest age", xlab="Forest Age", ylab="Total herb cover") #boxplot(rowSums(trees)~env$newage, col=(c("gold","darkgreen","red")), notch=TRUE, main=" Tree Spp rich by Forest age", xlab="Forest Age", ylab="Total herb cover") #plot herb cover against env vars, with points coloured by forest age #boxplot(rowSums(trueherbs)~env$Stumps, col=(c("gold","darkgreen","red")), notch=TRUE, main=" Herb Spp rich by n. stumps", xlab="Stumps", ylab="Spprich") #savePlot("herb rich by stumps.emf") #boxplot(rowSums(trueherbs)~env$m.from.stream, col=(c("gold","darkgreen","red")), notch=TRUE, main=" Herb Spp rich by m.from.stream", xlab="m.from.stream", ylab="Spprich") #savePlot("herb rich by mfromstream.emf") #plot (rowSums(trueherbs)~env$Slope, col=as.factor(env$newage), xlab="Slope", ylab="Total herb cover") #savePlot("herb rich by Slope.emf") #plot (rowSums(trueherbs)~env$BA, col=as.factor(env$newage), xlab="BA", ylab="Total herb cover") #savePlot("herb rich by BA.emf") #plot (rowSums(trueherbs)~env$Deadwood, col=as.factor(env$newage), xlab="Deadwood", ylab="Total herb cover") #savePlot("herb rich by Deadwood.emf") #plot (rowSums(trueherbs)~env$LLCov, col=as.factor(env$newage), xlab="LLCov", ylab="Total herb cover") #savePlot("herb rich by LLCov.emf") #plot (rowSums(trueherbs)~env$LLDepth, col=as.factor(env$newage), xlab="LLDepth", ylab="Total herb cover") #savePlot("herb rich by LLDepth.emf") #plot (rowSums(trueherbs)~env$CanO, col=as.factor(env$newage), xlab="CanO", ylab="Total herb cover") #savePlot("herb rich by CanO.emf") #plot (rowSums(trueherbs)~env$pH, col=as.factor(env$newage), xlab="Soil pH", ylab="Total herb cover") #savePlot("herb rich by soil pH.emf") #plot (rowSums(trueherbs)~env$N., col=as.factor(env$newage), xlab="Soil nitrogen", ylab="Total herb cover") #savePlot("herb rich by soil Nitrogen.emf") #plot (rowSums(trueherbs)~env$C., col=as.factor(env$newage), xlab="Soil carbon", ylab="Total herb cover") #savePlot("herb rich by soil carbon.emf") #plot (rowSums(trueherbs)~env$TCat, col=as.factor(env$newage), xlab="total cations", ylab="Total herb cover") #savePlot("herb rich by soil total cations.emf") ##SPECIES ACCUMULATION CURVES## #species accumulation curves by functional groups. the species accumulation function hates NA values, so I have to tell it to use data only for those plots where we know the species richness and the age. ls() levels(env$newage) keep2c<-!is.na(trueherbs[,1]) keep3c<-!is.na(ferns[,1]) keep5c<-!is.na(trees[,1]) keep6c<-!is.na(subcanopywoody[,1]) keep2c spaccherb <- specaccum(trueherbs[keep2c,], "exact") spaccfern <- specaccum(ferns[keep3c,], "exact") spaccbigtrees <- specaccum(trees[keep5c,], "exact") spaccsubcanopywoody <- specaccum(subcanopywoody[keep6c,], "exact") plot(spaccfern, ci.type="line", col="blue", lwd=2, ci.lty=1, ci.col="lightblue", xlim=c(1,70), xaxs="i", ylim=c(0,100), yaxs="i", bty="l") plot(spaccherb, add=T, ci.type="line", col="green", lwd=2, ci.lty=1, ci.col="lightgreen") plot(spaccbigtrees, add=T, ci.type="line", col="red", lwd=2, ci.lty=1, ci.col="pink") plot(spaccsubcanopywoody, add=T, ci.type="line", col="gold", lwd=2, ci.lty=1, ci.col="palegoldenrod") legend("topright",c("Ferns","Herbs","Sub-canopy woody", "Trees >10cm diameter"),pch=16,col=c("blue","green","gold", "red")) savePlot("species accumulation curves_bright colours.emf", type="emf") ## species accumulation curves, by forest age. x11();par(mfrow=c(2,2)) sa_fernog <- specaccum(ferns[keep3c&env$newage=="og",], "exact") sa_fernos <- specaccum(ferns[keep3c&env$newage=="os",], "exact") sa_fernys <- specaccum(ferns[keep3c&env$newage=="ys",], "exact") plot(sa_fernos, ci.type="line", col="blue", lwd=2, ci.lty=1, ci.col="lightblue", xlim=c(0,25), ylim=c(0,50), yaxs="i", main="Ferns", bty="l", xaxs="i") plot(sa_fernys, add=T, ci.type="line", col="green", lwd=2, ci.lty=1, ci.col="lightgreen") plot(sa_fernog, add=T, ci.type="line", col="red", lwd=2, ci.lty=1, ci.col="pink") legend("topleft",c("og","os","ys"),pch=16,col=c("red","blue","green")) savePlot("species accumulation curves_ferns_bright colours.emf") sa_herbog <- specaccum(trueherbs[keep2c&env$newage=="og",], "exact") sa_herbos <- specaccum(trueherbs[keep2c&env$newage=="os",], "exact") sa_herbys <- specaccum(trueherbs[keep2c&env$newage=="ys",], "exact") plot(sa_herbos, ci.type="line", col="blue", lwd=2, ci.lty=1, ci.col="lightblue", xlim=c(0,25), ylim=c(0,50), yaxs="i", main="Herbs", bty="l", xaxs="i") plot(sa_herbys, add=T, ci.type="line", col="green", lwd=2, ci.lty=1, ci.col="lightgreen") plot(sa_herbog, add=T, ci.type="line", col="red", lwd=2, ci.lty=1, ci.col="pink") legend("topleft",c("og","os","ys"),pch=16,col=c("red","blue","green")) savePlot("species accumulation curves_herbs_bright colours.emf") sa_scw_og <- specaccum(subcanopywoody[keep6c&env$newage=="og",], "exact") sa_scw_os <- specaccum(subcanopywoody[keep6c&env$newage=="os",], "exact") sa_scw_ys <- specaccum(subcanopywoody[keep6c&env$newage=="ys",], "exact") plot(sa_scw_os, ci.type="line", col="blue", lwd=2, ci.lty=1, ci.col="lightblue", xlim=c(0,25), ylim=c(0,50), yaxs="i", main="Sub-canopy woody plants", bty="l", xaxs="i") plot(sa_scw_ys, add=T, ci.type="line", col="green", lwd=2, ci.lty=1, ci.col="lightgreen") plot(sa_scw_og, add=T, ci.type="line", col="red", lwd=2, ci.lty=1, ci.col="pink") legend("topleft",c("og","os","ys"),pch=16,col=c("red","blue","green")) savePlot("species accumulation curves_scw_bright colours.emf") sa_trees_og <- specaccum(trees[keep5c&env$newage=="og",], "exact") sa_trees_os <- specaccum(trees[keep5c&env$newage=="os",], "exact") sa_trees_ys <- specaccum(trees[keep5c&env$newage=="ys",], "exact") plot(sa_trees_os, ci.type="line", col="blue", lwd=2, ci.lty=1, ci.col="lightblue", xlim=c(0,25), ylim=c(0,50), yaxs="i", main="Trees", bty="l", xaxs="i") plot(sa_trees_ys, add=T, ci.type="line", col="green", lwd=2, ci.lty=1, ci.col="lightgreen") plot(sa_trees_og, add=T, ci.type="line", col="red", lwd=2, ci.lty=1, ci.col="pink") legend("topright",c("og","os","ys"),pch=16,col=c("red","blue","green")) savePlot("species accumulation curves_fourplots_bright colours.emf", type="emf") ###VENN DIAGRAMS## #area1 The size of the first set ALL OG #area2 The size of the second set ALL OS #area3 The size of the third set ALL YS #n12 The size of the intersection between the first and the second set OG & OS #n23 The size of the intersection between the second and the third set OS & YS #n13 The size of the intersection between the first and the third set OG & YS #n123 The size of the intersection between all three sets #category A vector (length 3) of strings giving the category names of the sets #I've tried to eulerise them but it's not happening so I guess it doesn't meet the requirements library(VennDiagram) x11(); draw.triple.venn( area1 = length(which(colSums(trees[keep5c&(env$newage=="og"),])>0)), area2 = length(which(colSums(trees[keep5c&(env$newage=="os"),])>0)), area3 = length(which(colSums(trees[keep5c&(env$newage=="ys"),])>0)), n12 = length(intersect (names(which(colSums(trees[keep5c&(env$newage=="og"),])>0)),names(which(colSums(trees[keep5c&(env$newage=="os"),])>0)))), n23 = length(intersect (names(which(colSums(trees[keep5c&(env$newage=="os"),])>0)),names(which(colSums(trees[keep5c&(env$newage=="ys"),])>0)))), n13 = length(intersect (names(which(colSums(trees[keep5c&(env$newage=="og"),])>0)),names(which(colSums(trees[keep5c&(env$newage=="ys"),])>0)))), n123 =length(intersect(names(which(colSums(trees[keep5c&(env$newage=="og"),])>0)),(intersect(names(which(colSums(trees[keep5c&(env$newage=="os"),])>0)),names(which(colSums(trees[keep5c&(env$newage=="ys"),])>0)))))) , category = c("OG", "OS", "YS"), cex=2, fontfamily="sans", cat.cex=2, cat.fontfamily="sans" ); savePlot("Venntree.emf", type="emf") x11(); draw.triple.venn( area1 = length(which(colSums(trueherbs[keep2c&(env$newage=="og"),])>0)), area2 = length(which(colSums(trueherbs[keep2c&(env$newage=="os"),])>0)), area3 = length(which(colSums(trueherbs[keep2c&(env$newage=="ys"),])>0)), n12 = length(intersect (names(which(colSums(trueherbs[keep2c&(env$newage=="og"),])>0)),names(which(colSums(trueherbs[keep2c&(env$newage=="os"),])>0)))), n23 = length(intersect (names(which(colSums(trueherbs[keep2c&(env$newage=="os"),])>0)),names(which(colSums(trueherbs[keep2c&(env$newage=="ys"),])>0)))), n13 = length(intersect (names(which(colSums(trueherbs[keep2c&(env$newage=="og"),])>0)),names(which(colSums(trueherbs[keep2c&(env$newage=="ys"),])>0)))), n123 =length(intersect(names(which(colSums(trueherbs[keep2c&(env$newage=="og"),])>0)),(intersect(names(which(colSums(trueherbs[keep2c&(env$newage=="os"),])>0)),names(which(colSums(trueherbs[keep2c&(env$newage=="ys"),])>0)))))) , category = c("OG", "OS", "YS"), cex=2, fontfamily="sans", cat.cex=2, cat.fontfamily="sans" ); savePlot("Venntrueherb.emf", type="emf") x11(); draw.triple.venn( area1 = length(which(colSums(ferns[keep3c&(env$newage=="og"),])>0)), area2 = length(which(colSums(ferns[keep3c&(env$newage=="os"),])>0)), area3 = length(which(colSums(ferns[keep3c&(env$newage=="ys"),])>0)), n12 = length(intersect (names(which(colSums(ferns[keep3c&(env$newage=="og"),])>0)),names(which(colSums(ferns[keep3c&(env$newage=="os"),])>0)))), n23 = length(intersect (names(which(colSums(ferns[keep3c&(env$newage=="os"),])>0)),names(which(colSums(ferns[keep3c&(env$newage=="ys"),])>0)))), n13 = length(intersect (names(which(colSums(ferns[keep3c&(env$newage=="og"),])>0)),names(which(colSums(ferns[keep3c&(env$newage=="ys"),])>0)))), n123 =length(intersect(names(which(colSums(ferns[keep3c&(env$newage=="og"),])>0)),(intersect(names(which(colSums(ferns[keep3c&(env$newage=="os"),])>0)),names(which(colSums(ferns[keep3c&(env$newage=="ys"),])>0)))))) , category = c("OG", "OS", "YS"), cex=2, fontfamily="sans", cat.cex=2, cat.fontfamily="sans" ); savePlot("Vennfern.emf", type="emf") x11(); draw.triple.venn( area1 = length(which(colSums(subcanopywoody[keep6c&(env$newage=="og"),])>0)), area2 = length(which(colSums(subcanopywoody[keep6c&(env$newage=="os"),])>0)), area3 = length(which(colSums(subcanopywoody[keep6c&(env$newage=="ys"),])>0)), n12 = length(intersect (names(which(colSums(subcanopywoody[keep6c&(env$newage=="og"),])>0)),names(which(colSums(subcanopywoody[keep6c&(env$newage=="os"),])>0)))), n23 = length(intersect (names(which(colSums(subcanopywoody[keep6c&(env$newage=="os"),])>0)),names(which(colSums(subcanopywoody[keep6c&(env$newage=="ys"),])>0)))), n13 = length(intersect (names(which(colSums(subcanopywoody[keep6c&(env$newage=="og"),])>0)),names(which(colSums(subcanopywoody[keep6c&(env$newage=="ys"),])>0)))), n123 =length(intersect(names(which(colSums(subcanopywoody[keep6c&(env$newage=="og"),])>0)),(intersect(names(which(colSums(subcanopywoody[keep6c&(env$newage=="os"),])>0)),names(which(colSums(subcanopywoody[keep6c&(env$newage=="ys"),])>0)))))) , category = c("OG", "OS", "YS"), cex=2, fontfamily="sans", cat.cex=2, cat.fontfamily="sans" ); savePlot("Vennsubcanopywoody.emf", type="emf") #older versions: #nicked from data loading file: #bothsetdiff<-function(x,y)list(unique.to.x=setdiff(x,y),unique.to.y=setdiff(y,x), in.common=intersect(x,y)) #trying to do it somehow: prep these lists then open them in notepad, remove the first row, and resave into a folder just for these files #oglist<-list(colnames(treesog[,colSums(treesog)>0])) #oslist<-list(colnames(treesos[,colSums(treesos)>0])) #yslist<-list(colnames(treesys[,colSums(treesys)>0])) #write.table(oglist,"ogtrees.txt") #write.table(oslist,"ostrees.txt") #write.table(yslist,"ystrees.txt") #write.table(oglist,"ogtrees.txt", sep="\t") # this next bit seems to miss the last few off the list #write.table((rbind(oglist,oslist,yslist)), "eVenn.txt", sep="\t", quote=F) #write.table(trees,"trees.txt", sep="\t", quote=F) #write.table(ferns,"ferns.txt", sep="\t", quote=F) #write.table(subcanopywoody,"subcanopywoody.txt", sep="\t", quote=F) #write.table(trueherbs,"herbs.txt", sep="\t", quote=F) ####start of making matching sets? #treeslist<-lapply(levels(env$Forest), function(n){ # z<-trees[env$Forest==n,] # colnames(z[,colSums(z)>0]) #}) ##or the long way #treesog<-trees[keep5c&(env$Forest=="og"),] #treesos<-trees[keep5c&(env$Forest=="os"),] #treesys<-trees[keep5c&(env$Forest=="ys"),] #str(treesog) #scwog<-subcanopywoody[keep6c&(env$Forest=="og"),] #scwos<-subcanopywoody[keep6c&(env$Forest=="os"),] #scwys<-subcanopywoody[keep6c&(env$Forest=="ys"),] #this is no use because it misses out the ones in the middle of the venn #bothsetdiff(colnames(treesog[,colSums(treesog)>0]),colnames(treesos[,colSums(treesos)>0])) #bothsetdiff(colnames(treesog[,colSums(treesog)>0]),colnames(treesys[,colSums(treesys)>0])) #bothsetdiff(colnames(treesys[,colSums(treesys)>0]),colnames(treesos[,colSums(treesos)>0])) #looking at compartments to try to fix age class mystery coloury<-unique(cbind(env$newage,env$Compartment)) coloury<-coloury[order(coloury[,2]),] boxplot(Stumps~Compartment, data=env, col=coloury[,1]) boxplot(CanO~Compartment, data=env, col=coloury[,1]) boxplot(BA~Compartment, data=env, col=coloury[,1])
################ # TPRdesignQTL # ################ #' TPR for MPP design x QTL combination #' #' Computes and organises the TPR results for each combination of MPP design #' and type of QTL. The TPR is averaged over the repetition and model. #' The function returns the percentage of time a specific type #' of QTL effect was detected by in a specific MPP design over a number of #' repetitions and QTL detection models. #' #' @param QTL_true list of true QTL positions. #' #' @param QTL_detected list results of detected QTLs. #' #' @param d_QTL distance to the QTL. #' #' @param n_des Number of MPP design. Default = 9. #' #' @param n_mod Number of QTL detection models. Default = 4. #' #' @param n_QTL Number of QTL. Default = 8. #' #' @param MPP_names MPP design names. #' #' @return A matrix with the precentage of detection over the whole simulation #' for each type of QTL and MPP design combination. #' #' @author Vincent Garin #' #' @export # QTL_true = Q_sel[1:20] # QTL_detected = Q_res # d_QTL = 10 # n_des = 9 # n_mod = 4 # n_QTL = 8 # MPP_names = MPP_names TPRdesignQTL <- function(QTL_true, QTL_detected, d_QTL, n_des = 9, n_mod = 4, n_QTL = 8, MPP_names){ n_rep <- length(QTL_detected) TPR_res <- vector(mode = "list", length = n_rep) for(r in 1:n_rep){ QTL_true_r <- QTL_true[[r]] # fix the true QTL QTL_det_r <- QTL_detected[[r]] # create a list to store the results for each MPP design TPR_MPP <- vector(mode = "list", length = n_des) for(i in 1:n_des){ QTL_det_i <- QTL_det_r[[i]] res <- matrix(0, n_mod, n_QTL) for(j in 1:n_QTL){ QTL_true_rj <- QTL_true_r[QTL_true_r$Qeff == j, ] # iterate over the different models TPR_j <- rep(0, n_mod) for(k in 1:n_mod){ if(!is.data.frame(QTL_det_i[[k]])){ if(QTL_det_i[[k]] == "no_QTL"){ TPR_j[k] <- 0 } else if (QTL_det_i[[k]] == "error") { TPR_j[k] <- NA } } else { TPR_j[k] <- TPR(QTL.true = QTL_true_rj, QTL.detected = QTL_det_i[[k]], distance = d_QTL) } } res[, j] <- TPR_j } res <- colSums(res, na.rm = TRUE) names(res) <- paste0("Q", 1:n_QTL) TPR_MPP[[i]] <- res } names(TPR_MPP) <- MPP_names TPR_res[[r]] <- TPR_MPP } # sum over the Replication and MPP design glb_res <- matrix(0, n_des, n_QTL) TPR_res_mat <- lapply(X = TPR_res, FUN = function(x, n_des) matrix(unlist(x), nrow = n_des, byrow = TRUE), n_des = n_des) for(y in 1:n_rep){ glb_res <- glb_res + TPR_res_mat[[y]] } rownames(glb_res) <- MPP_names colnames(glb_res) <- paste0("Q", 1:n_QTL) N_tot <- n_rep * n_mod av_res <- (glb_res/N_tot) * 100 return(av_res) }
/R/TPRdesignQTL.R
no_license
vincentgarin/mppSim
R
false
false
2,981
r
################ # TPRdesignQTL # ################ #' TPR for MPP design x QTL combination #' #' Computes and organises the TPR results for each combination of MPP design #' and type of QTL. The TPR is averaged over the repetition and model. #' The function returns the percentage of time a specific type #' of QTL effect was detected by in a specific MPP design over a number of #' repetitions and QTL detection models. #' #' @param QTL_true list of true QTL positions. #' #' @param QTL_detected list results of detected QTLs. #' #' @param d_QTL distance to the QTL. #' #' @param n_des Number of MPP design. Default = 9. #' #' @param n_mod Number of QTL detection models. Default = 4. #' #' @param n_QTL Number of QTL. Default = 8. #' #' @param MPP_names MPP design names. #' #' @return A matrix with the precentage of detection over the whole simulation #' for each type of QTL and MPP design combination. #' #' @author Vincent Garin #' #' @export # QTL_true = Q_sel[1:20] # QTL_detected = Q_res # d_QTL = 10 # n_des = 9 # n_mod = 4 # n_QTL = 8 # MPP_names = MPP_names TPRdesignQTL <- function(QTL_true, QTL_detected, d_QTL, n_des = 9, n_mod = 4, n_QTL = 8, MPP_names){ n_rep <- length(QTL_detected) TPR_res <- vector(mode = "list", length = n_rep) for(r in 1:n_rep){ QTL_true_r <- QTL_true[[r]] # fix the true QTL QTL_det_r <- QTL_detected[[r]] # create a list to store the results for each MPP design TPR_MPP <- vector(mode = "list", length = n_des) for(i in 1:n_des){ QTL_det_i <- QTL_det_r[[i]] res <- matrix(0, n_mod, n_QTL) for(j in 1:n_QTL){ QTL_true_rj <- QTL_true_r[QTL_true_r$Qeff == j, ] # iterate over the different models TPR_j <- rep(0, n_mod) for(k in 1:n_mod){ if(!is.data.frame(QTL_det_i[[k]])){ if(QTL_det_i[[k]] == "no_QTL"){ TPR_j[k] <- 0 } else if (QTL_det_i[[k]] == "error") { TPR_j[k] <- NA } } else { TPR_j[k] <- TPR(QTL.true = QTL_true_rj, QTL.detected = QTL_det_i[[k]], distance = d_QTL) } } res[, j] <- TPR_j } res <- colSums(res, na.rm = TRUE) names(res) <- paste0("Q", 1:n_QTL) TPR_MPP[[i]] <- res } names(TPR_MPP) <- MPP_names TPR_res[[r]] <- TPR_MPP } # sum over the Replication and MPP design glb_res <- matrix(0, n_des, n_QTL) TPR_res_mat <- lapply(X = TPR_res, FUN = function(x, n_des) matrix(unlist(x), nrow = n_des, byrow = TRUE), n_des = n_des) for(y in 1:n_rep){ glb_res <- glb_res + TPR_res_mat[[y]] } rownames(glb_res) <- MPP_names colnames(glb_res) <- paste0("Q", 1:n_QTL) N_tot <- n_rep * n_mod av_res <- (glb_res/N_tot) * 100 return(av_res) }
library(tidyverse) summaries = read_csv('eng/summaries.csv') summaries probs = read_csv('eng/probabilities.csv') probs swings = probs %>% group_by(season, matchid) %>% mutate( lagt = abs(probtie - lag(probtie)), lagh = abs(prob.h - lag(prob.h)), laga = abs(prob.a - lag(prob.a)), ) %>% # group_by(season, matchid) %>% summarise( swings = sum(lagt, na.rm = TRUE) + sum(lagh, na.rm = TRUE) + sum(laga, na.rm = TRUE) ) swings tension = probs %>% group_by(season, matchid) %>% mutate( tenst = abs(probtie - 0.33), tensh = abs(prob.h - 0.33), tensa = abs(prob.a - 0.33) ) %>% summarise( tension = sum(tenst) + sum(tensa) + sum(tensh) ) %>% arrange(tension) summaries %>% left_join(swings) %>% left_join(tension) %>% mutate( swingsp = cut_number(swings, 100), swingsp = as.numeric(swingsp), tensionp = cut_number(tension, 100), tensionp = 101 - as.numeric(tensionp), ) %>% arrange(tension) %>% view()
/model/rate.R
no_license
ryanvmenezes/fbwatch
R
false
false
997
r
library(tidyverse) summaries = read_csv('eng/summaries.csv') summaries probs = read_csv('eng/probabilities.csv') probs swings = probs %>% group_by(season, matchid) %>% mutate( lagt = abs(probtie - lag(probtie)), lagh = abs(prob.h - lag(prob.h)), laga = abs(prob.a - lag(prob.a)), ) %>% # group_by(season, matchid) %>% summarise( swings = sum(lagt, na.rm = TRUE) + sum(lagh, na.rm = TRUE) + sum(laga, na.rm = TRUE) ) swings tension = probs %>% group_by(season, matchid) %>% mutate( tenst = abs(probtie - 0.33), tensh = abs(prob.h - 0.33), tensa = abs(prob.a - 0.33) ) %>% summarise( tension = sum(tenst) + sum(tensa) + sum(tensh) ) %>% arrange(tension) summaries %>% left_join(swings) %>% left_join(tension) %>% mutate( swingsp = cut_number(swings, 100), swingsp = as.numeric(swingsp), tensionp = cut_number(tension, 100), tensionp = 101 - as.numeric(tensionp), ) %>% arrange(tension) %>% view()
#HardCode bad_drivers <- read.csv(file = "data/bad-drivers.csv", quote = '') # Selects State, Car Insurance Premiums, and Losses incurred by insurance companies for collisions per insured driver ($) from the CSV Insurance_Information <- select(bad_drivers, State, Car.Insurance.Premiums...., Losses.incurred.by.insurance.companies.for.collisions.per.insured.driver....) Alabama9 <- Insurance_Information %>% filter(State == "Alabama") California9 <- Insurance_Information %>% filter(State == "California") Delaware9 <- Insurance_Information %>% filter(State == "Delaware") Florida9 <- Insurance_Information %>% filter(State == "Florida") Georgia9 <- Insurance_Information %>% filter(State == "Georgia") Hawaii9 <- Insurance_Information %>% filter(State == "Hawaii") Idaho9 <- Insurance_Information %>% filter(State == "Idaho") Kansas9 <- Insurance_Information %>% filter(State == "Kansas") Louisiana9 <- Insurance_Information %>% filter(State == "Louisiana") Maine9 <- Insurance_Information %>% filter(State == "Maine") Nebraska9 <- Insurance_Information %>% filter(State == "Nebraska") Ohio9 <- Insurance_Information %>% filter(State == "Ohio") Pennsylvania9 <- Insurance_Information %>% filter(State == "Pennsylvania") Tennessee9 <- Insurance_Information %>% filter(State == "Tennessee") Utah9 <- Insurance_Information %>% filter(State == "Utah") Vermont9 <- Insurance_Information %>% filter(State == "Vermont") Washington9 <- Insurance_Information %>% filter(State == "Washington") Specific_States <- rbind(Alabama9, California9, Delaware9, Florida9, Georgia9, Hawaii9, Idaho9, Kansas9, Louisiana9, Maine9, Nebraska9, Ohio9, Pennsylvania9, Tennessee9, Utah9, Vermont9, Washington9) Specific_States_melt <-melt(Specific_States)
/EmanHardcode.R
no_license
nehay100/SHENFinalProject
R
false
false
1,759
r
#HardCode bad_drivers <- read.csv(file = "data/bad-drivers.csv", quote = '') # Selects State, Car Insurance Premiums, and Losses incurred by insurance companies for collisions per insured driver ($) from the CSV Insurance_Information <- select(bad_drivers, State, Car.Insurance.Premiums...., Losses.incurred.by.insurance.companies.for.collisions.per.insured.driver....) Alabama9 <- Insurance_Information %>% filter(State == "Alabama") California9 <- Insurance_Information %>% filter(State == "California") Delaware9 <- Insurance_Information %>% filter(State == "Delaware") Florida9 <- Insurance_Information %>% filter(State == "Florida") Georgia9 <- Insurance_Information %>% filter(State == "Georgia") Hawaii9 <- Insurance_Information %>% filter(State == "Hawaii") Idaho9 <- Insurance_Information %>% filter(State == "Idaho") Kansas9 <- Insurance_Information %>% filter(State == "Kansas") Louisiana9 <- Insurance_Information %>% filter(State == "Louisiana") Maine9 <- Insurance_Information %>% filter(State == "Maine") Nebraska9 <- Insurance_Information %>% filter(State == "Nebraska") Ohio9 <- Insurance_Information %>% filter(State == "Ohio") Pennsylvania9 <- Insurance_Information %>% filter(State == "Pennsylvania") Tennessee9 <- Insurance_Information %>% filter(State == "Tennessee") Utah9 <- Insurance_Information %>% filter(State == "Utah") Vermont9 <- Insurance_Information %>% filter(State == "Vermont") Washington9 <- Insurance_Information %>% filter(State == "Washington") Specific_States <- rbind(Alabama9, California9, Delaware9, Florida9, Georgia9, Hawaii9, Idaho9, Kansas9, Louisiana9, Maine9, Nebraska9, Ohio9, Pennsylvania9, Tennessee9, Utah9, Vermont9, Washington9) Specific_States_melt <-melt(Specific_States)
testlist <- list(m = NULL, repetitions = 0L, in_m = structure(c(2.31584307392677e+77, 9.53818252170339e+295, 1.22810536108214e+146, 2.07611487262023e-289, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(5L, 7L))) result <- do.call(CNull:::communities_individual_based_sampling_alpha,testlist) str(result)
/CNull/inst/testfiles/communities_individual_based_sampling_alpha/AFL_communities_individual_based_sampling_alpha/communities_individual_based_sampling_alpha_valgrind_files/1615771645-test.R
no_license
akhikolla/updatedatatype-list2
R
false
false
362
r
testlist <- list(m = NULL, repetitions = 0L, in_m = structure(c(2.31584307392677e+77, 9.53818252170339e+295, 1.22810536108214e+146, 2.07611487262023e-289, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(5L, 7L))) result <- do.call(CNull:::communities_individual_based_sampling_alpha,testlist) str(result)
\name{hlpm} \alias{hlpm} \docType{package} \title{ Hierarchical local pivotal method } \description{ Hierarchical local pivotal method (hlpm) selects an initial sample using the local pivotal method and then splits the sample into subsamples of given sizes using a successive (hierarchical) selection with the local pivotal method. Can be used with any prescribed inclusion probabilities that sum to an integer n. The sizes of the subsamples must also sum to n. It is used to select several subsamples such that each subsample is spatially balanced and the combined sample (the union of the subsamples) is also spatially balanced. Licence (GPL >=2). } \usage{ hlpm(p,X,sizes) } \arguments{ \item{p}{vector of inclusion probabilities for initial sample.} \item{X}{matrix of auxiliary variables.} \item{sizes}{vector of sizes of subsamples whose sum must match the sum of the initial inclusion probabilities.} } \value{Returns a list with population indexes of initial sample S and a vector sampleNumber indicating the number of the subsample of each unit.} \examples{ \dontrun{ ############ ## Example with two subsamples ############ N = 100; # population size X = cbind(runif(N),runif(N)); # auxiliary variables n = 10; # size of initial sample p = rep(n/N,N); # inclusion probabilities of initial sample sizes = c(7,3); # sizes of the two subsamples hlpm(p,X,sizes) # selection of samples using hierarchical local pivotal method } }
/issuestests/BalancedSampling/man/hlpm.Rd
no_license
akhikolla/RcppDeepStateTest
R
false
false
1,444
rd
\name{hlpm} \alias{hlpm} \docType{package} \title{ Hierarchical local pivotal method } \description{ Hierarchical local pivotal method (hlpm) selects an initial sample using the local pivotal method and then splits the sample into subsamples of given sizes using a successive (hierarchical) selection with the local pivotal method. Can be used with any prescribed inclusion probabilities that sum to an integer n. The sizes of the subsamples must also sum to n. It is used to select several subsamples such that each subsample is spatially balanced and the combined sample (the union of the subsamples) is also spatially balanced. Licence (GPL >=2). } \usage{ hlpm(p,X,sizes) } \arguments{ \item{p}{vector of inclusion probabilities for initial sample.} \item{X}{matrix of auxiliary variables.} \item{sizes}{vector of sizes of subsamples whose sum must match the sum of the initial inclusion probabilities.} } \value{Returns a list with population indexes of initial sample S and a vector sampleNumber indicating the number of the subsample of each unit.} \examples{ \dontrun{ ############ ## Example with two subsamples ############ N = 100; # population size X = cbind(runif(N),runif(N)); # auxiliary variables n = 10; # size of initial sample p = rep(n/N,N); # inclusion probabilities of initial sample sizes = c(7,3); # sizes of the two subsamples hlpm(p,X,sizes) # selection of samples using hierarchical local pivotal method } }
# Load packages library('ggplot2') library('ggthemes') library('scales') library('dplyr') library('mice') library('randomForest') library('plyr') #install.packages("corrplot") library(corrplot) #setwd("~/Desktop/MSA/msa_mh8111_as1_kaggle_project/") Train <- read.csv("train.csv", header=TRUE,stringsAsFactors = FALSE, na.strings=c("", "NA")) Test <- read.csv("test.csv", header=TRUE,stringsAsFactors = FALSE, na.strings=c("", "NA")) #raw data exploration for missing data summary(is.na(Train)) summary(is.na(Test)) myfunction <- function(my_data) { feature_names = colnames(my_data) for (col_i in feature_names) { is_na = is.na(my_data[col_i]) if (sum(is_na)) { print(summary(is_na)) } } } myfunction(Train) myfunction(Test) #new feature Per Passenger Fare Train$PerPassengerFare = Train$Fare / (Train$SibSp + Train$Parch + 1) Train <- subset(Train, select = -c(Fare)) summary(Train) #new feature NoFare print(subset(Test, Test$Fare == 0)) print(subset(Test, Test$Ticket == "LINE")) print(subset(Train, Train$PerPassengerFare == 0)) print(subset(Train, Train$Ticket == "LINE")) Train$NoFare <- (Train$PerPassengerFare < 0.01) print(sum(Train$NoFare == TRUE)) summary(Train) #new feature FamilySize Train$FamilySize = Train$SibSp + Train$Parch + 1 Train <- subset(Train, select = -c(SibSp, Parch)) summary(Train) #new feature Social Status by Ming Xiu [TODO: to copy over] Train$Title <- gsub('(.*, )|(\\..*)', '', Train$Name) commoner_title <- c('Dona', 'Don', 'Miss','Mlle','Mme','Mr','Mrs','Ms') royalty_title <- c('Lady', 'the Countess', 'Sir', 'Jonkheer', 'Master') rank_title <- c('Capt', 'Col', 'Dr', 'Major', 'Rev') Train$SocialClass[Train$Title %in% commoner_title] <- 'commoner' Train$SocialClass[Train$Title %in% royalty_title] <- 'royal' Train$SocialClass[Train$Title %in% rank_title] <- 'rank' #Handling Cabin missing data #####dropping feature completely, see arguments in slides Train <- subset(Train, select = -c(Cabin)) summary(Train) #Per Passenger Fare grouped by Pclass p1AvgFare = mean(subset(Train$PerPassengerFare, Train$Pclass == 1)) p1AvgFare p2AvgFare = mean(subset(Train$PerPassengerFare, Train$Pclass == 2)) p2AvgFare p3AvgFare = mean(subset(Train$PerPassengerFare, Train$Pclass == 3)) p3AvgFare #Handling missing data Fare in Test #filling the missing Fare in Test, [TODO] need compute avgFare by class combining both Train and Test later print(subset(Test, is.na(Test$Fare))) #argument: age > 60, class = 3, highly unlikely to be Crew Labor, assigning mean of p3AvgFare to him #Handling missing data in Embarked with the most frequent value ##argument, only 2 are missing, there is no great value digging into it. tableEmbarkedReq = count(Train$Embarked) mostFreqValue = tableEmbarkedReq$x[which(tableEmbarkedReq$freq == max(tableEmbarkedReq$freq))] Train$Embarked[is.na(Train$Embarked)] <- mostFreqValue summary(is.na(Train)) #partial optimization Train$Embarked <- as.factor(Train$Embarked) Train$Sex <- as.factor(Train$Sex) Train$Pclass <- as.factor(Train$Pclass) Train$Title <- as.factor(Train$Title) Train$SocialClass <- as.factor(Train$SocialClass) summary(is.na(Train)) #Handling missing data in Age Approach 1: init = mice(Train, maxit=0) predM = init$predictorMatrix #remove PassengerId, Ticket column from the predicate predM[, c("PassengerId", "Ticket")]=0 imp<-mice(Train, m=5, predictorMatrix = predM) print(imp) summary(imp) Train1 <- complete(imp) summary(Train1) summary(is.na(Train1)) summary(Train1) summary(Train1$Age) dim(Train1) #Handling missing data in Age Approach 2: #linear regression Age as function of most correlated features summary(is.na(Train)) #split Train into NA and non-NA set AgeTrainToPredict <-Train[is.na(Train$Age), ] AgeTrainLRSet <- Train[!is.na(Train$Age), ] #compute the correlation matrix to get top 5 features that most correlated to Age AgeTrain_LR <- AgeTrainLRSet[, c("Pclass","Sex","Age","FamilySize","PerPassengerFare","Embarked", "NoFare", "Title", "SocialClass")] summary(AgeTrain_LR) AgeTrain_LR <- AgeTrain_LR %>% mutate_if(is.factor, as.numeric) corVal <- round(abs(cor(AgeTrain_LR, method = c("pearson"))), 2) print(corVal) #find the top 5 features correlated with Age in the non-NA set top_5_cor_features <- sort(corVal["Age",], decreasing = TRUE)[2:6] print(top_5_cor_features) #finding a multi linear regression model with the five features #note the strong correlation between SocialClass and Title, so we drop SocialClass in the LM AgeTrainLRSet$Title ageFit <- lm(Age ~ Pclass + Title + FamilySize + PerPassengerFare, data = AgeTrainLRSet) coeffs = coefficients(ageFit) print(coeffs) #check out this page for the metric explaination #http://r-statistics.co/Linear-Regression.html summary(ageFit) predRes <- predict(ageFit, AgeTrainToPredict) summary(predRes) print(predRes) #[Comments on MultiVariable linear regression] ###the age value we get from this model has negative values. ###he R-squared & Ajusted R-squared value is about 40%, the prediction accuracy is not promising, thus dropped. #[TODO] based on predicted age, categrize it into age groups.. seems a valid feature. #non-intuitive feature important # 1. feature important using random forest set.seed(100) rf_model <- randomForest(factor(Survived) ~ Pclass + Sex + Age + FamilySize + PerPassengerFare + Embarked + NoFare + Title + SocialClass, data = Train1, importance = TRUE) importance(rf_model) rf_model$confusion featureImportance <- varImpPlot(rf_model, sort = T, n.var = 9, main = "Top 9 - Variable Importance") plot(featureImportance) # 2. feature importance using other approaches #Train1_1 <- Train1[, c("Pclass","Sex","Age","FamilySize","PerPassengerFare","Embarked", "NoFare", "Title", "SocialClass")] Train1_1 <- Train1[, c("Pclass","Sex","Age","FamilySize","PerPassengerFare", "NoFare", "Title", "SocialClass")] Train1_1 <- Train1_1 %>% mutate_if(is.factor, as.numeric) summary(is.na(Train1_1)) print(Train1_1$Embarked) summary(is.na(Train1)) print(as.factor(Train1$Embarked)) corVal <- round(cor(Train1_1, method = c("pearson", "kendall", "spearman")), 2) print(corVal) corrplot(corVal, type = "upper", order = "hclust", tl.col = "black", tl.srt = 45) # #[TODO] Why this one has individual categories from each Feature????? set.seed(7) library(mlbench) library(caret) Train1_2 <- Train1[, c("Survived", "Pclass","Sex","Age","FamilySize","PerPassengerFare", "NoFare", "Title", "SocialClass")] #control <- trainControl(method="repeatedcv", number=10, repeats=3) control <- trainControl(method="cv", number=5) model <- train(Survived~., data=Train1_2, trControl=control, importance=TRUE) featureImportance <- varImp(model) print(featureImportance) plot(featureImportance) #use Boruta requiredPackages <- c("Boruta", "mlbench") if (length(setdiff(requiredPackages, rownames(installed.packages()))) > 0) { install.packages(setdiff(requiredPackages, rownames(installed.packages()))) } # Libraries library(Boruta) # Mythological God of forest, it based on random Forest library(mlbench) library(caret) library(randomForest) ################################################## # Feature Selection with the Boruta algorithm ################################################## set.seed(111) boruta <- Boruta(Survived ~ ., data = Train1, doTrace = 2, maxRuns = 500) print(boruta) plot(boruta)
/msa_mh8111_as1_kaggle_project/MH8111_Titanic.R
no_license
lifengzhi/msa_mh8101_or1_lp
R
false
false
7,434
r
# Load packages library('ggplot2') library('ggthemes') library('scales') library('dplyr') library('mice') library('randomForest') library('plyr') #install.packages("corrplot") library(corrplot) #setwd("~/Desktop/MSA/msa_mh8111_as1_kaggle_project/") Train <- read.csv("train.csv", header=TRUE,stringsAsFactors = FALSE, na.strings=c("", "NA")) Test <- read.csv("test.csv", header=TRUE,stringsAsFactors = FALSE, na.strings=c("", "NA")) #raw data exploration for missing data summary(is.na(Train)) summary(is.na(Test)) myfunction <- function(my_data) { feature_names = colnames(my_data) for (col_i in feature_names) { is_na = is.na(my_data[col_i]) if (sum(is_na)) { print(summary(is_na)) } } } myfunction(Train) myfunction(Test) #new feature Per Passenger Fare Train$PerPassengerFare = Train$Fare / (Train$SibSp + Train$Parch + 1) Train <- subset(Train, select = -c(Fare)) summary(Train) #new feature NoFare print(subset(Test, Test$Fare == 0)) print(subset(Test, Test$Ticket == "LINE")) print(subset(Train, Train$PerPassengerFare == 0)) print(subset(Train, Train$Ticket == "LINE")) Train$NoFare <- (Train$PerPassengerFare < 0.01) print(sum(Train$NoFare == TRUE)) summary(Train) #new feature FamilySize Train$FamilySize = Train$SibSp + Train$Parch + 1 Train <- subset(Train, select = -c(SibSp, Parch)) summary(Train) #new feature Social Status by Ming Xiu [TODO: to copy over] Train$Title <- gsub('(.*, )|(\\..*)', '', Train$Name) commoner_title <- c('Dona', 'Don', 'Miss','Mlle','Mme','Mr','Mrs','Ms') royalty_title <- c('Lady', 'the Countess', 'Sir', 'Jonkheer', 'Master') rank_title <- c('Capt', 'Col', 'Dr', 'Major', 'Rev') Train$SocialClass[Train$Title %in% commoner_title] <- 'commoner' Train$SocialClass[Train$Title %in% royalty_title] <- 'royal' Train$SocialClass[Train$Title %in% rank_title] <- 'rank' #Handling Cabin missing data #####dropping feature completely, see arguments in slides Train <- subset(Train, select = -c(Cabin)) summary(Train) #Per Passenger Fare grouped by Pclass p1AvgFare = mean(subset(Train$PerPassengerFare, Train$Pclass == 1)) p1AvgFare p2AvgFare = mean(subset(Train$PerPassengerFare, Train$Pclass == 2)) p2AvgFare p3AvgFare = mean(subset(Train$PerPassengerFare, Train$Pclass == 3)) p3AvgFare #Handling missing data Fare in Test #filling the missing Fare in Test, [TODO] need compute avgFare by class combining both Train and Test later print(subset(Test, is.na(Test$Fare))) #argument: age > 60, class = 3, highly unlikely to be Crew Labor, assigning mean of p3AvgFare to him #Handling missing data in Embarked with the most frequent value ##argument, only 2 are missing, there is no great value digging into it. tableEmbarkedReq = count(Train$Embarked) mostFreqValue = tableEmbarkedReq$x[which(tableEmbarkedReq$freq == max(tableEmbarkedReq$freq))] Train$Embarked[is.na(Train$Embarked)] <- mostFreqValue summary(is.na(Train)) #partial optimization Train$Embarked <- as.factor(Train$Embarked) Train$Sex <- as.factor(Train$Sex) Train$Pclass <- as.factor(Train$Pclass) Train$Title <- as.factor(Train$Title) Train$SocialClass <- as.factor(Train$SocialClass) summary(is.na(Train)) #Handling missing data in Age Approach 1: init = mice(Train, maxit=0) predM = init$predictorMatrix #remove PassengerId, Ticket column from the predicate predM[, c("PassengerId", "Ticket")]=0 imp<-mice(Train, m=5, predictorMatrix = predM) print(imp) summary(imp) Train1 <- complete(imp) summary(Train1) summary(is.na(Train1)) summary(Train1) summary(Train1$Age) dim(Train1) #Handling missing data in Age Approach 2: #linear regression Age as function of most correlated features summary(is.na(Train)) #split Train into NA and non-NA set AgeTrainToPredict <-Train[is.na(Train$Age), ] AgeTrainLRSet <- Train[!is.na(Train$Age), ] #compute the correlation matrix to get top 5 features that most correlated to Age AgeTrain_LR <- AgeTrainLRSet[, c("Pclass","Sex","Age","FamilySize","PerPassengerFare","Embarked", "NoFare", "Title", "SocialClass")] summary(AgeTrain_LR) AgeTrain_LR <- AgeTrain_LR %>% mutate_if(is.factor, as.numeric) corVal <- round(abs(cor(AgeTrain_LR, method = c("pearson"))), 2) print(corVal) #find the top 5 features correlated with Age in the non-NA set top_5_cor_features <- sort(corVal["Age",], decreasing = TRUE)[2:6] print(top_5_cor_features) #finding a multi linear regression model with the five features #note the strong correlation between SocialClass and Title, so we drop SocialClass in the LM AgeTrainLRSet$Title ageFit <- lm(Age ~ Pclass + Title + FamilySize + PerPassengerFare, data = AgeTrainLRSet) coeffs = coefficients(ageFit) print(coeffs) #check out this page for the metric explaination #http://r-statistics.co/Linear-Regression.html summary(ageFit) predRes <- predict(ageFit, AgeTrainToPredict) summary(predRes) print(predRes) #[Comments on MultiVariable linear regression] ###the age value we get from this model has negative values. ###he R-squared & Ajusted R-squared value is about 40%, the prediction accuracy is not promising, thus dropped. #[TODO] based on predicted age, categrize it into age groups.. seems a valid feature. #non-intuitive feature important # 1. feature important using random forest set.seed(100) rf_model <- randomForest(factor(Survived) ~ Pclass + Sex + Age + FamilySize + PerPassengerFare + Embarked + NoFare + Title + SocialClass, data = Train1, importance = TRUE) importance(rf_model) rf_model$confusion featureImportance <- varImpPlot(rf_model, sort = T, n.var = 9, main = "Top 9 - Variable Importance") plot(featureImportance) # 2. feature importance using other approaches #Train1_1 <- Train1[, c("Pclass","Sex","Age","FamilySize","PerPassengerFare","Embarked", "NoFare", "Title", "SocialClass")] Train1_1 <- Train1[, c("Pclass","Sex","Age","FamilySize","PerPassengerFare", "NoFare", "Title", "SocialClass")] Train1_1 <- Train1_1 %>% mutate_if(is.factor, as.numeric) summary(is.na(Train1_1)) print(Train1_1$Embarked) summary(is.na(Train1)) print(as.factor(Train1$Embarked)) corVal <- round(cor(Train1_1, method = c("pearson", "kendall", "spearman")), 2) print(corVal) corrplot(corVal, type = "upper", order = "hclust", tl.col = "black", tl.srt = 45) # #[TODO] Why this one has individual categories from each Feature????? set.seed(7) library(mlbench) library(caret) Train1_2 <- Train1[, c("Survived", "Pclass","Sex","Age","FamilySize","PerPassengerFare", "NoFare", "Title", "SocialClass")] #control <- trainControl(method="repeatedcv", number=10, repeats=3) control <- trainControl(method="cv", number=5) model <- train(Survived~., data=Train1_2, trControl=control, importance=TRUE) featureImportance <- varImp(model) print(featureImportance) plot(featureImportance) #use Boruta requiredPackages <- c("Boruta", "mlbench") if (length(setdiff(requiredPackages, rownames(installed.packages()))) > 0) { install.packages(setdiff(requiredPackages, rownames(installed.packages()))) } # Libraries library(Boruta) # Mythological God of forest, it based on random Forest library(mlbench) library(caret) library(randomForest) ################################################## # Feature Selection with the Boruta algorithm ################################################## set.seed(111) boruta <- Boruta(Survived ~ ., data = Train1, doTrace = 2, maxRuns = 500) print(boruta) plot(boruta)
library(PopGenome) ### Name: sweeps.stats-methods ### Title: Selective Sweeps ### Aliases: sweeps.stats,GENOME-method sweeps.stats-methods ### get.sweeps,GENOME-method get.sweeps-methods ### Keywords: methods ### ** Examples # Reading one alignment stored in the folder Aln # GENOME.class <- readData("\home\Aln") # # CL # GENOME.class <- sweeps.stats(GENOME.class) # GENOME.class@CL # # CLR # create global set # GENOME.class <- detail.stats(GENOME.class) # freq <- GENOME.class@region.stats@minor.allele.freqs[[1]] # freq.table <- list() # freq.table[[1]] <- table(freq) # define the region of interest # GENOME.class.split <- splitting.data(GENOME.class, positions= ...) # calculate CLR # GENOME.class.split <- sweeps.stats(GENOME.class.split, freq.table=freq.table) # GENOME.class@CLR
/data/genthat_extracted_code/PopGenome/examples/sweeps.stats-methods.Rd.R
no_license
surayaaramli/typeRrh
R
false
false
803
r
library(PopGenome) ### Name: sweeps.stats-methods ### Title: Selective Sweeps ### Aliases: sweeps.stats,GENOME-method sweeps.stats-methods ### get.sweeps,GENOME-method get.sweeps-methods ### Keywords: methods ### ** Examples # Reading one alignment stored in the folder Aln # GENOME.class <- readData("\home\Aln") # # CL # GENOME.class <- sweeps.stats(GENOME.class) # GENOME.class@CL # # CLR # create global set # GENOME.class <- detail.stats(GENOME.class) # freq <- GENOME.class@region.stats@minor.allele.freqs[[1]] # freq.table <- list() # freq.table[[1]] <- table(freq) # define the region of interest # GENOME.class.split <- splitting.data(GENOME.class, positions= ...) # calculate CLR # GENOME.class.split <- sweeps.stats(GENOME.class.split, freq.table=freq.table) # GENOME.class@CLR
# Table HPC <- read.table("household_power_consumption.txt", head=T, sep = ";", na.strings = "?") # Filter data set from Feb. 1, 2007 to Feb. 2, 2007 HPC <- subset(HPC, Date == "1/2/2007" | Date == "2/2/2007") # Combine Date and Time column HPC$DateTime <- paste(HPC$Date, HPC$Time) # Format data and time HPC$DateTime <- strptime(HPC$DateTime, "%d/%m/%Y %H:%M:%S") #PLOT 4 par(mfrow=c(2,2), mar=c(4,4,2,1), oma=c(0,0,2,0)) with(HPC, { plot(HPC$DateTime, HPC$Global_active_power, type="l", ylab="Global Active Power (kilowatts)", xlab="") plot(HPC$DateTime, HPC$Voltage, type="l", ylab="Voltage (volt)", xlab="") plot(HPC$DateTime, HPC$Sub_metering_1, type="l", ylab="Global Active Power (kilowatts)", xlab="") lines(HPC$DateTime, HPC$Sub_metering_2, col="red") lines(HPC$DateTime, HPC$Sub_metering_3, col="blue") legend("topright", c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), col=c("black", "red", "blue"), lty = 1) legend("topright", col=c("black", "red", "blue"), lty=1, lwd=2, bty="n", legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3")) plot(HPC$DateTime, HPC$Global_reactive_power, type="l", ylab="Global Rective Power (kilowatts)",xlab="") }) #PNG dev.copy(png, file = "plot4.png", width = 480, height = 480) dev.off()
/plot4.R
no_license
lucianegoes/ExData_Plotting1
R
false
false
1,328
r
# Table HPC <- read.table("household_power_consumption.txt", head=T, sep = ";", na.strings = "?") # Filter data set from Feb. 1, 2007 to Feb. 2, 2007 HPC <- subset(HPC, Date == "1/2/2007" | Date == "2/2/2007") # Combine Date and Time column HPC$DateTime <- paste(HPC$Date, HPC$Time) # Format data and time HPC$DateTime <- strptime(HPC$DateTime, "%d/%m/%Y %H:%M:%S") #PLOT 4 par(mfrow=c(2,2), mar=c(4,4,2,1), oma=c(0,0,2,0)) with(HPC, { plot(HPC$DateTime, HPC$Global_active_power, type="l", ylab="Global Active Power (kilowatts)", xlab="") plot(HPC$DateTime, HPC$Voltage, type="l", ylab="Voltage (volt)", xlab="") plot(HPC$DateTime, HPC$Sub_metering_1, type="l", ylab="Global Active Power (kilowatts)", xlab="") lines(HPC$DateTime, HPC$Sub_metering_2, col="red") lines(HPC$DateTime, HPC$Sub_metering_3, col="blue") legend("topright", c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), col=c("black", "red", "blue"), lty = 1) legend("topright", col=c("black", "red", "blue"), lty=1, lwd=2, bty="n", legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3")) plot(HPC$DateTime, HPC$Global_reactive_power, type="l", ylab="Global Rective Power (kilowatts)",xlab="") }) #PNG dev.copy(png, file = "plot4.png", width = 480, height = 480) dev.off()
## This file contains functions: ## 1) to create a matrix object that can cache it's inverse, and ## 2) computes the inverse of the matrix returned by the first function ## ## create matrix object that can cache it's inverse makeCacheMatrix <- function(x = matrix()) { minv <- NULL set <- function(y){ x <<- y minv <<- NULL } get <- function() x setinverse <- function(inverse) minv <<- inverse getinverse <- function() minv list(set = set, get = get, setinverse = setinverse, getinverse = getinverse) } ## computes the inverse of matrix using cached value if it exists cacheSolve <- function(x, ...) { ## Return a matrix that is the inverse of 'x' minv <- x$getinverse() if(!is.null(minv)) { message("getting cached data") return(minv) } data <- x$get() minv <- solve(data) x$setinverse(minv) minv }
/cachematrix.R
no_license
Rbrown567/ProgrammingAssignment2
R
false
false
875
r
## This file contains functions: ## 1) to create a matrix object that can cache it's inverse, and ## 2) computes the inverse of the matrix returned by the first function ## ## create matrix object that can cache it's inverse makeCacheMatrix <- function(x = matrix()) { minv <- NULL set <- function(y){ x <<- y minv <<- NULL } get <- function() x setinverse <- function(inverse) minv <<- inverse getinverse <- function() minv list(set = set, get = get, setinverse = setinverse, getinverse = getinverse) } ## computes the inverse of matrix using cached value if it exists cacheSolve <- function(x, ...) { ## Return a matrix that is the inverse of 'x' minv <- x$getinverse() if(!is.null(minv)) { message("getting cached data") return(minv) } data <- x$get() minv <- solve(data) x$setinverse(minv) minv }
require(data.table) #fread in the dataset initially setting all columns to characters allowing all data to be read in #and avoiding any issues with data conversions on the load. Any required conversions are done subsequently DT<-fread("household_power_consumption.txt",colClasses='character') setkey(DT,Date) #subset the dataset to keep only the 2 required dates DT<-DT[c("1/2/2007","2/2/2007")] require(lubridate) #convert Sub_meterings from char to numeric and Date&Time to datetime DT[,datetime:=dmy_hms(paste(Date, Time))] for (col in c("Sub_metering_1","Sub_metering_2","Sub_metering_3")) set(DT, j=col, value=as.numeric(DT[[col]])) #set graphing device to png - note width = 480, height = 480 are the default values png(file = "plot3.png",bg = "transparent") #first calculate the ranges for X and Y so the graph is properly scaled yrange<-range(c(DT$Sub_metering_1,DT$Sub_metering_2,DT$Sub_metering_3)) xrange<-range(DT$datetime) #plot graph with option "n" so multiple lines could be added later plot(xrange,yrange,type="n", xlab="",ylab="Energy sub metering" ,pch=20) lines(DT$datetime, DT$Sub_metering_1, type="l", col="black") lines(DT$datetime, DT$Sub_metering_2, type="l", col="red") lines(DT$datetime, DT$Sub_metering_3, type="l", col="blue") legend("topright", c("Sub_metering_1","Sub_metering_2","Sub_metering_3"), col=c("black","red","blue"),lty=c(1,1,1)) dev.off()
/plot3.R
no_license
lamakaha/ExData_Plotting1
R
false
false
1,386
r
require(data.table) #fread in the dataset initially setting all columns to characters allowing all data to be read in #and avoiding any issues with data conversions on the load. Any required conversions are done subsequently DT<-fread("household_power_consumption.txt",colClasses='character') setkey(DT,Date) #subset the dataset to keep only the 2 required dates DT<-DT[c("1/2/2007","2/2/2007")] require(lubridate) #convert Sub_meterings from char to numeric and Date&Time to datetime DT[,datetime:=dmy_hms(paste(Date, Time))] for (col in c("Sub_metering_1","Sub_metering_2","Sub_metering_3")) set(DT, j=col, value=as.numeric(DT[[col]])) #set graphing device to png - note width = 480, height = 480 are the default values png(file = "plot3.png",bg = "transparent") #first calculate the ranges for X and Y so the graph is properly scaled yrange<-range(c(DT$Sub_metering_1,DT$Sub_metering_2,DT$Sub_metering_3)) xrange<-range(DT$datetime) #plot graph with option "n" so multiple lines could be added later plot(xrange,yrange,type="n", xlab="",ylab="Energy sub metering" ,pch=20) lines(DT$datetime, DT$Sub_metering_1, type="l", col="black") lines(DT$datetime, DT$Sub_metering_2, type="l", col="red") lines(DT$datetime, DT$Sub_metering_3, type="l", col="blue") legend("topright", c("Sub_metering_1","Sub_metering_2","Sub_metering_3"), col=c("black","red","blue"),lty=c(1,1,1)) dev.off()
piecewise <- function(enter, exit, event, cutpoints){ n <- length(cutpoints) + 1 ## No. of time intervals. d <- numeric(n) ## Events tt <- numeric(n) ## Risk times ## assume 0 <= enter < exit < \infty. nn <- length(enter) ## Check length(exit), length(event), etc. ## First interval: d[1] <- sum( event[( (exit <= cutpoints[1]) & (exit > 0) )] ) left <- pmin( enter, cutpoints[1] ) right <- pmin( exit, cutpoints[1] ) tt[1] <- sum(right - left) ## Intervals 2, ..., (n - 1): for ( j in 2:(n-1) ){ d[j] <- sum( event[( (exit <= cutpoints[j]) & (exit > cutpoints[j-1]) )] ) left <- pmin( pmax(enter, cutpoints[j-1]), cutpoints[j]) right <- pmax( pmin(exit, cutpoints[j]), cutpoints[j-1] ) tt[j] <- sum(right - left) } ## Last interval: d[n] <- sum( event[ (exit > cutpoints[n - 1]) ] ) left <- pmax( enter, cutpoints[n-1] ) right <- pmax( exit, cutpoints[n-1] ) tt[n] <- sum(right - left) intensity <- ifelse(tt > 0, d / tt, NA) list(events = d, exposure = tt, intensity = intensity) }
/eha/R/piecewise.R
no_license
ingted/R-Examples
R
false
false
1,154
r
piecewise <- function(enter, exit, event, cutpoints){ n <- length(cutpoints) + 1 ## No. of time intervals. d <- numeric(n) ## Events tt <- numeric(n) ## Risk times ## assume 0 <= enter < exit < \infty. nn <- length(enter) ## Check length(exit), length(event), etc. ## First interval: d[1] <- sum( event[( (exit <= cutpoints[1]) & (exit > 0) )] ) left <- pmin( enter, cutpoints[1] ) right <- pmin( exit, cutpoints[1] ) tt[1] <- sum(right - left) ## Intervals 2, ..., (n - 1): for ( j in 2:(n-1) ){ d[j] <- sum( event[( (exit <= cutpoints[j]) & (exit > cutpoints[j-1]) )] ) left <- pmin( pmax(enter, cutpoints[j-1]), cutpoints[j]) right <- pmax( pmin(exit, cutpoints[j]), cutpoints[j-1] ) tt[j] <- sum(right - left) } ## Last interval: d[n] <- sum( event[ (exit > cutpoints[n - 1]) ] ) left <- pmax( enter, cutpoints[n-1] ) right <- pmax( exit, cutpoints[n-1] ) tt[n] <- sum(right - left) intensity <- ifelse(tt > 0, d / tt, NA) list(events = d, exposure = tt, intensity = intensity) }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/docx_ASAP.r \name{docxASAP} \alias{docxASAP} \title{docxASAP} \usage{ docxASAP( wd, asap.name, od = NULL, docx.name = "docxASAP.docx", control.file = "docxASAP.csv", first.figure.number = 1, figure.prefix = "", plotf = "png", use.group = NULL, append.asap.name.caption = TRUE ) } \arguments{ \item{wd}{directory where ASAP run is located} \item{asap.name}{Base name of original dat file (without the .dat extension)} \item{od}{output directory for plots and csv files (default = NULL means wd\\plots\\)} \item{docx.name}{name of Word document to create (default = docxASAP.docx)} \item{control.file}{csv file of parameters defining plots to add to docx (default = docxASAP.csv)} \item{first.figure.number}{starting value for figures, succesive figures based on Order} \item{figure.prefix}{text to add before figure number, e.g., "B" becomes Figure B1. (default = "")} \item{plotf}{type of plot to save (default = 'png')} \item{use.group}{only add figures from this Group to docx (default = NULL means all groups added)} \item{append.asap.name.caption}{flag to add (TRUE = default) asap.name to end of figure caption text} } \description{ Create a Word document of the figures from ASAPplots with predefined captions. }
/man/docxASAP.Rd
permissive
cmlegault/ASAPplots
R
false
true
1,369
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/docx_ASAP.r \name{docxASAP} \alias{docxASAP} \title{docxASAP} \usage{ docxASAP( wd, asap.name, od = NULL, docx.name = "docxASAP.docx", control.file = "docxASAP.csv", first.figure.number = 1, figure.prefix = "", plotf = "png", use.group = NULL, append.asap.name.caption = TRUE ) } \arguments{ \item{wd}{directory where ASAP run is located} \item{asap.name}{Base name of original dat file (without the .dat extension)} \item{od}{output directory for plots and csv files (default = NULL means wd\\plots\\)} \item{docx.name}{name of Word document to create (default = docxASAP.docx)} \item{control.file}{csv file of parameters defining plots to add to docx (default = docxASAP.csv)} \item{first.figure.number}{starting value for figures, succesive figures based on Order} \item{figure.prefix}{text to add before figure number, e.g., "B" becomes Figure B1. (default = "")} \item{plotf}{type of plot to save (default = 'png')} \item{use.group}{only add figures from this Group to docx (default = NULL means all groups added)} \item{append.asap.name.caption}{flag to add (TRUE = default) asap.name to end of figure caption text} } \description{ Create a Word document of the figures from ASAPplots with predefined captions. }
# ----------------------------------------- # # Create aim 2 data set prior to haplotypes # # Mozzie Phase 1 # # September 19, 2019 # # K. Sumner # # ----------------------------------------- # #### --------- load packages ----------------- #### library(readr) library(dplyr) library(tidyr) library(lubridate) library(data.table) library(tableone) library(stringr) #### ----- AMA ------- #### #### ---------- load in the data sets ---------- #### # read in the merged anpopheles mosquito data set anoph_merged_data = read_rds("/Users/kelseysumner/Desktop/Dissertation Materials/SpatialR21 Grant/Final Dissertation Materials/Final Data Sets/Final Cohort data June 2017 to July 2018/Mosquito data/clean data/merged_data/spat21_mosquito_anopheles_merged_data_18JAN2019.RDS") # read in the full human data set final_data = read_rds("Desktop/Dissertation Materials/SpatialR21 Grant/Final Dissertation Materials/Final Data Sets/Final Cohort data June 2017 to July 2018/Human data/spat21_clean_human_files/merged_files/final merged data/spat21_human_final_censored_data_for_dissertation_with_exposure_outcome_1OCT2019.rds") # read in the ama simplified edgelist ama_edgelist = read_csv("Desktop/clean_ids_haplotype_results/AMA/AMA_haplotypes_edgelist_simplified_number_haps_shared.csv") #### ------ clean up the edgelists to be in the proper format --------- #### # first look at the columns colnames(ama_edgelist) # remove the X1 column ama_edgelist = ama_edgelist %>% dplyr::select(-"X1") # look at how many unique observations length(unique(ama_edgelist$from)) # 1115 length(unique(ama_edgelist$to)) # 1115 # looks correct # subset the data set to just have heads in left column and mosquitoes in right column table(nchar(ama_edgelist$from)) table(nchar(ama_edgelist$to)) # remove the rows where both the to and from columns are human or mosquito samples ama_edgelist = ama_edgelist %>% filter(!(str_detect(from,"-") & str_detect(to,"-"))) %>% filter(!(str_detect(from," ") & str_detect(to," "))) # make the first column human samples and second column mosquito samples # create a for loop that checks to see if each sample is sharing with a mosquito or not # switch the from column first new_from = rep(NA,nrow(ama_edgelist)) new_to = rep(NA,nrow(ama_edgelist)) for (i in 1:nrow(ama_edgelist)){ if (str_detect(ama_edgelist$from[i]," ")){ new_from[i] = ama_edgelist$to[i] new_to[i] = ama_edgelist$from[i] } else { new_from[i] = ama_edgelist$from[i] new_to[i] = ama_edgelist$to[i] } } ama_edgelist$from = new_from ama_edgelist$to = new_to # rename the column headers in the ama edgelist ama_edgelist = ama_edgelist %>% rename("sample_name_dbs"="from","sample_id_mosquito"="to","haps_shared"="weight") # split up the edgelist into the shared mosquito heads and abdomens ama_edgelist_head = ama_edgelist %>% filter(str_detect(sample_id_mosquito,"H")) %>% rename("sample_id_head"="sample_id_mosquito") ama_edgelist_abdomen = ama_edgelist %>% filter(str_detect(sample_id_mosquito,"A")) %>% rename("sample_id_abdomen"="sample_id_mosquito") #### --------- subset the human and mosquito data sets to just the variables of interest ---------- #### # make sure main exposure and main outcome for primary case definition are factors final_data$main_exposure_primary_case_def = as.factor(final_data$main_exposure_primary_case_def) final_data$main_outcome_primary_case_def = as.factor(final_data$main_outcome_primary_case_def) # select variables you need for human data colnames(final_data) human_data = final_data %>% filter(main_exposure_primary_case_def == "asymptomatic infection" | main_outcome_primary_case_def == "symptomatic infection") %>% dplyr::select(visit_type,sample_id_date,sample_name_final,sample_name_dbs,age_cat_baseline,unq_memID,village_name,HH_ID,main_exposure_primary_case_def,main_outcome_primary_case_def,pfr364Q_std_combined,age_all_baseline) %>% mutate(aim2_exposure = ifelse(is.na(main_exposure_primary_case_def),as.character(main_outcome_primary_case_def),as.character(main_exposure_primary_case_def))) %>% dplyr::select(-main_exposure_primary_case_def,-main_outcome_primary_case_def,-visit_type) # select variables you need for mosquito data colnames(anoph_merged_data) mosquito_data = anoph_merged_data %>% filter(!(is.na(sample_id_head) & is.na(sample_id_abdomen)) | sample_id_mosquito == "K01 00030" | sample_id_mosquito == "K01 00047") %>% dplyr::select(HH_ID,collection_date,total_num_mosq_in_hh,sample_id_abdomen,sample_id_head,sample_id_mosquito) # note: there are 15 entries where the lab didn't have mosquitoes so didn't have separate head and abdomen ids, removed these entries # K01 00030 and K01 00047 were sequenced and pf positive but were original test samples so weren't in the normal qpcr data set # add their information here for the data set ids mosquito_data$sample_id_abdomen[which(mosquito_data$sample_id_mosquito=="K01 00030")] = "K01 A00030" mosquito_data$sample_id_head[which(mosquito_data$sample_id_mosquito=="K01 00030")] = "K01 H00030" mosquito_data$sample_id_abdomen[which(mosquito_data$sample_id_mosquito=="K01 00047")] = "K01 A00047" mosquito_data$sample_id_head[which(mosquito_data$sample_id_mosquito=="K01 00047")] = "K01 H00047" ## --- write code to work with the mosquito heads outcome # check how the samples would merge with the full data set before asymptomatic/symptomatic criteria is enforced # check this observation final_data = final_data %>% dplyr::select(sample_name_dbs,sample_name_final) merge_check = left_join(ama_edgelist_head,final_data,by="sample_name_dbs") merge_check %>% filter(is.na(sample_name_final)) %>% View() # add the human and mosquito data sets' variables to the edgelist # the edgelist will be the level of analysis # merge the human info first ama_edgelist_head = left_join(ama_edgelist_head,human_data,by="sample_name_dbs") # then merge the mosquito info ama_edgelist_head = left_join(ama_edgelist_head,mosquito_data,by="sample_id_head") # check the merge ama_edgelist_head %>% filter(is.na(sample_name_final)) %>% View() ama_edgelist_head %>% filter(is.na(collection_date)) %>% View() # the samples that didn't merge did not meet the case definition for an asymptomatic or symptomatic infection # this observation has been shown in the code chunk above around line 110 # rename some of the variables in the data set for clarity colnames(ama_edgelist_head) ama_edgelist_head = ama_edgelist_head %>% rename("sample_id_human" = "sample_name_dbs","HH_ID_human"="HH_ID.x","HH_ID_mosquito"="HH_ID.y","human_date"="sample_id_date","mosquito_date"="collection_date") %>% dplyr::select(-sample_id_abdomen,-sample_id_mosquito) colnames(ama_edgelist_head) # first create a variable that is the time diff between human and mosquito samples subtract human time from mosquito time # if time date difference is positive than the mosquito was collected after the human sample ama_edgelist_head = ama_edgelist_head %>% mutate(date_difference = mosquito_date - human_date) # now restrict the merged data set to only shared haplotypes with the same HH_ID length(which(ama_edgelist_head$HH_ID_human==ama_edgelist_head$HH_ID_mosquito)) # 3 obs are in same HH ama_edgelist_head = ama_edgelist_head %>% filter(HH_ID_human==HH_ID_mosquito) # now restrict the merged data set to only shared haplotypes in the correct time frame ama_edgelist_head = ama_edgelist_head %>% filter(date_difference >= 10 & date_difference < 32) # between 10 and 31 days # clean up the final merged data set for the mosquito heads colnames(ama_edgelist_head) ama_edgelist_head = ama_edgelist_head %>% rename(HH_ID = HH_ID_human) %>% dplyr::select(-HH_ID_mosquito,-date_difference) # count how many haplotypes were shared between mosquito heads and humans length(which(ama_edgelist_head$haps_shared >0)) # 79 heads # write out the edgelist write_rds(ama_edgelist_head,"Desktop/spat21_ama_edgelist_head_29OCT2019.rds") write_csv(ama_edgelist_head,"Desktop/spat21_ama_edgelist_head_29OCT2019.csv") ## --- write code to work with the mosquito abdomens outcome # add the human and mosquito data sets' variables to the edgelist # the edgelist will be the level of analysis ama_edgelist_abdomen = left_join(ama_edgelist_abdomen,human_data,by="sample_name_dbs") ama_edgelist_abdomen = left_join(ama_edgelist_abdomen,mosquito_data,by="sample_id_abdomen") # check the merge ama_edgelist_abdomen %>% filter(is.na(sample_name_final)) %>% View() ama_edgelist_abdomen %>% filter(is.na(collection_date)) %>% View() # the samples that didn't merge did not meet the case definition for an asymptomatic or symptomatic infection # this observation has been shown in the code chunk above around line 110 # rename some of the variables in the data set for clarity colnames(ama_edgelist_abdomen) ama_edgelist_abdomen = ama_edgelist_abdomen %>% rename("sample_id_human" = "sample_name_dbs","HH_ID_human"="HH_ID.x","HH_ID_mosquito"="HH_ID.y","human_date"="sample_id_date","mosquito_date"="collection_date") %>% dplyr::select(-sample_id_head,-sample_id_mosquito) colnames(ama_edgelist_abdomen) # first create a variable that is the time diff between human and mosquito samples subtract human time from mosquito time # if time date difference is positive than the mosquito was collected after the human sample ama_edgelist_abdomen = ama_edgelist_abdomen %>% mutate(date_difference = mosquito_date - human_date) # now restrict the merged data set to only shared haplotypes with the same HH_ID length(which(ama_edgelist_abdomen$HH_ID_human==ama_edgelist_abdomen$HH_ID_mosquito)) # 3 obs are in same HH ama_edgelist_abdomen = ama_edgelist_abdomen %>% filter(HH_ID_human==HH_ID_mosquito) # now restrict the merged data set to only shared haplotypes in the correct time frame ama_edgelist_abdomen = ama_edgelist_abdomen %>% filter(date_difference >= 0 & date_difference < 15) # between 0 and 14 days # clean up the final merged data set for the mosquito abdomens colnames(ama_edgelist_abdomen) ama_edgelist_abdomen = ama_edgelist_abdomen %>% rename(HH_ID = HH_ID_human) %>% dplyr::select(-HH_ID_mosquito,-date_difference) # count how many haplotypes were shared between mosquito abdomens and humans length(which(ama_edgelist_abdomen$haps_shared >0)) # 106 abdomens # write out the edgelist write_rds(ama_edgelist_abdomen,"Desktop/spat21_ama_edgelist_abdomen_29OCT2019.rds") write_csv(ama_edgelist_abdomen,"Desktop/spat21_ama_edgelist_abdomen_29OCT2019.csv") #### ----- CSP ------- #### #### ---------- load in the data sets ---------- #### # read in the merged anpopheles mosquito data set anoph_merged_data = read_rds("/Users/kelseysumner/Desktop/Dissertation Materials/SpatialR21 Grant/Final Dissertation Materials/Final Data Sets/Final Cohort data June 2017 to July 2018/Mosquito data/clean data/merged_data/spat21_mosquito_anopheles_merged_data_18JAN2019.RDS") # read in the full human data set final_data = read_rds("Desktop/Dissertation Materials/SpatialR21 Grant/Final Dissertation Materials/Final Data Sets/Final Cohort data June 2017 to July 2018/Human data/spat21_clean_human_files/merged_files/final merged data/spat21_human_final_censored_data_for_dissertation_with_exposure_outcome_1OCT2019.rds") # read in the ama simplified edgelist csp_edgelist = read_csv("Desktop/clean_ids_haplotype_results/CSP/CSP_haplotypes_edgelist_simplified_number_haps_shared.csv") #### ------ clean up the edgelists to be in the proper format --------- #### # first look at the columns colnames(csp_edgelist) # remove the X1 column csp_edgelist = csp_edgelist %>% dplyr::select(-"X1") # look at how many unique observations length(unique(csp_edgelist$from)) # 1280 length(unique(csp_edgelist$to)) # 1280 # looks correct # subset the data set to just have heads in left column and mosquitoes in right column table(nchar(csp_edgelist$from)) table(nchar(csp_edgelist$to)) # remove the rows where both the to and from columns are human samples csp_edgelist = csp_edgelist %>% filter(!(str_detect(from,"-") & str_detect(to,"-"))) %>% filter(!(str_detect(from," ") & str_detect(to," "))) # make the first column human samples and second column mosquito samples # create a for loop that checks to see if each sample is sharing with a mosquito or not # switch the from column first new_from = rep(NA,nrow(csp_edgelist)) new_to = rep(NA,nrow(csp_edgelist)) for (i in 1:nrow(csp_edgelist)){ if (str_detect(csp_edgelist$from[i]," ")){ new_from[i] = csp_edgelist$to[i] new_to[i] = csp_edgelist$from[i] } else { new_from[i] = csp_edgelist$from[i] new_to[i] = csp_edgelist$to[i] } } csp_edgelist$from = new_from csp_edgelist$to = new_to # rename the column headers in the csp edgelist csp_edgelist = csp_edgelist %>% rename("sample_name_dbs"="from","sample_id_mosquito"="to","haps_shared"="weight") # split up the edgelist into the shared mosquito heads and abdomens csp_edgelist_head = csp_edgelist %>% filter(str_detect(sample_id_mosquito,"H")) %>% rename("sample_id_head"="sample_id_mosquito") csp_edgelist_abdomen = csp_edgelist %>% filter(str_detect(sample_id_mosquito,"A")) %>% rename("sample_id_abdomen"="sample_id_mosquito") #### --------- subset the human and mosquito data sets to just the variables of interest ---------- #### # make sure main exposure and main outcome for primary case definition are factors final_data$main_exposure_primary_case_def = as.factor(final_data$main_exposure_primary_case_def) final_data$main_outcome_primary_case_def = as.factor(final_data$main_outcome_primary_case_def) # select variables you need for human data colnames(final_data) human_data = final_data %>% filter(main_exposure_primary_case_def == "asymptomatic infection" | main_outcome_primary_case_def == "symptomatic infection") %>% dplyr::select(visit_type,sample_id_date,sample_name_final,sample_name_dbs,age_cat_baseline,unq_memID,village_name,HH_ID,main_exposure_primary_case_def,main_outcome_primary_case_def,pfr364Q_std_combined,age_all_baseline) %>% mutate(aim2_exposure = ifelse(is.na(main_exposure_primary_case_def),as.character(main_outcome_primary_case_def),as.character(main_exposure_primary_case_def))) %>% dplyr::select(-main_exposure_primary_case_def,-main_outcome_primary_case_def,-visit_type) # select variables you need for mosquito data colnames(anoph_merged_data) mosquito_data = anoph_merged_data %>% filter(!(is.na(sample_id_head) & is.na(sample_id_abdomen)) | sample_id_mosquito == "K01 00030" | sample_id_mosquito == "K01 00047") %>% dplyr::select(HH_ID,collection_date,total_num_mosq_in_hh,sample_id_abdomen,sample_id_head,sample_id_mosquito) # note: there are 15 entries where the lab didn't have mosquitoes so didn't have separate head and abdomen ids, removed these entries # K01 00030 and K01 00047 were sequenced and pf positive but were original test samples so weren't in the normal qpcr data set # add their information here for the data set ids mosquito_data$sample_id_abdomen[which(mosquito_data$sample_id_mosquito=="K01 00030")] = "K01 A00030" mosquito_data$sample_id_head[which(mosquito_data$sample_id_mosquito=="K01 00030")] = "K01 H00030" mosquito_data$sample_id_abdomen[which(mosquito_data$sample_id_mosquito=="K01 00047")] = "K01 A00047" mosquito_data$sample_id_head[which(mosquito_data$sample_id_mosquito=="K01 00047")] = "K01 H00047" ## --- write code to work with the mosquito heads outcome # check how the samples would merge with the full data set before asymptomatic/symptomatic criteria is enforced # check this observation final_data = final_data %>% dplyr::select(sample_name_dbs,sample_name_final) merge_check = left_join(csp_edgelist_head,final_data,by="sample_name_dbs") merge_check %>% filter(is.na(sample_name_final)) %>% View() # add the human and mosquito data sets' variables to the edgelist # the edgelist will be the level of analysis # merge the human info first csp_edgelist_head = left_join(csp_edgelist_head,human_data,by="sample_name_dbs") # then merge the mosquito info csp_edgelist_head = left_join(csp_edgelist_head,mosquito_data,by="sample_id_head") # check the merge csp_edgelist_head %>% filter(is.na(sample_name_final)) %>% View() csp_edgelist_head %>% filter(is.na(collection_date)) %>% View() # the samples that didn't merge did not meet the case definition for an asymptomatic or symptomatic infection # this observation has been shown in the code chunk above around line 110 # rename some of the variables in the data set for clarity colnames(csp_edgelist_head) csp_edgelist_head = csp_edgelist_head %>% rename("sample_id_human" = "sample_name_dbs","HH_ID_human"="HH_ID.x","HH_ID_mosquito"="HH_ID.y","human_date"="sample_id_date","mosquito_date"="collection_date") %>% dplyr::select(-sample_id_abdomen,-sample_id_mosquito) colnames(csp_edgelist_head) # first create a variable that is the time diff between human and mosquito samples subtract human time from mosquito time # if time date difference is positive than the mosquito was collected after the human sample csp_edgelist_head = csp_edgelist_head %>% mutate(date_difference = mosquito_date - human_date) # now restrict the merged data set to only shared haplotypes with the same HH_ID length(which(csp_edgelist_head$HH_ID_human==csp_edgelist_head$HH_ID_mosquito)) # 3 obs are in same HH csp_edgelist_head = csp_edgelist_head %>% filter(HH_ID_human==HH_ID_mosquito) # now restrict the merged data set to only shared haplotypes in the correct time frame csp_edgelist_head = csp_edgelist_head %>% filter(date_difference >= 10 & date_difference < 32) # between 10 and 31 days # clean up the final merged data set for the mosquito heads colnames(csp_edgelist_head) csp_edgelist_head = csp_edgelist_head %>% rename(HH_ID = HH_ID_human) %>% dplyr::select(-HH_ID_mosquito,-date_difference) # count how many haplotypes were shared between mosquito heads and humans length(which(csp_edgelist_head$haps_shared >0)) # 166 heads # write out the edgelist write_rds(csp_edgelist_head,"Desktop/spat21_csp_edgelist_head_29OCT2019.rds") write_csv(csp_edgelist_head,"Desktop/spat21_csp_edgelist_head_29OCT2019.csv") ## --- write code to work with the mosquito abdomens outcome # add the human and mosquito data sets' variables to the edgelist # the edgelist will be the level of analysis csp_edgelist_abdomen = left_join(csp_edgelist_abdomen,human_data,by="sample_name_dbs") csp_edgelist_abdomen = left_join(csp_edgelist_abdomen,mosquito_data,by="sample_id_abdomen") # check the merge csp_edgelist_abdomen %>% filter(is.na(sample_name_final)) %>% View() csp_edgelist_abdomen %>% filter(is.na(collection_date)) %>% View() # the samples that didn't merge did not meet the case definition for an asymptomatic or symptomatic infection # this observation has been shown in the code chunk above around line 110 # rename some of the variables in the data set for clarity colnames(csp_edgelist_abdomen) csp_edgelist_abdomen = csp_edgelist_abdomen %>% rename("sample_id_human" = "sample_name_dbs","HH_ID_human"="HH_ID.x","HH_ID_mosquito"="HH_ID.y","human_date"="sample_id_date","mosquito_date"="collection_date") %>% dplyr::select(-sample_id_head,-sample_id_mosquito) colnames(csp_edgelist_abdomen) # first create a variable that is the time diff between human and mosquito samples subtract human time from mosquito time # if time date difference is positive than the mosquito was collected after the human sample csp_edgelist_abdomen = csp_edgelist_abdomen %>% mutate(date_difference = mosquito_date - human_date) # now restrict the merged data set to only shared haplotypes with the same HH_ID length(which(csp_edgelist_abdomen$HH_ID_human==csp_edgelist_abdomen$HH_ID_mosquito)) # 3 obs are in same HH csp_edgelist_abdomen = csp_edgelist_abdomen %>% filter(HH_ID_human==HH_ID_mosquito) # now restrict the merged data set to only shared haplotypes in the correct time frame csp_edgelist_abdomen = csp_edgelist_abdomen %>% filter(date_difference >= 0 & date_difference < 15) # between 0 and 14 days # clean up the final merged data set for the mosquito abdomens colnames(csp_edgelist_abdomen) csp_edgelist_abdomen = csp_edgelist_abdomen %>% rename(HH_ID = HH_ID_human) %>% dplyr::select(-HH_ID_mosquito,-date_difference) # count how many haplotypes were shared between mosquito abdomens and humans length(which(csp_edgelist_abdomen$haps_shared >0)) # 234 abdomens # write out the edgelist write_rds(csp_edgelist_abdomen,"Desktop/spat21_csp_edgelist_abdomen_29OCT2019.rds") write_csv(csp_edgelist_abdomen,"Desktop/spat21_csp_edgelist_abdomen_29OCT2019.csv")
/SpatialR21_project/code/aim2_analyses/spat21_create_aim2_dataset_prior_to_haplotypes.R
no_license
kelseysumner/taylorlab
R
false
false
20,730
r
# ----------------------------------------- # # Create aim 2 data set prior to haplotypes # # Mozzie Phase 1 # # September 19, 2019 # # K. Sumner # # ----------------------------------------- # #### --------- load packages ----------------- #### library(readr) library(dplyr) library(tidyr) library(lubridate) library(data.table) library(tableone) library(stringr) #### ----- AMA ------- #### #### ---------- load in the data sets ---------- #### # read in the merged anpopheles mosquito data set anoph_merged_data = read_rds("/Users/kelseysumner/Desktop/Dissertation Materials/SpatialR21 Grant/Final Dissertation Materials/Final Data Sets/Final Cohort data June 2017 to July 2018/Mosquito data/clean data/merged_data/spat21_mosquito_anopheles_merged_data_18JAN2019.RDS") # read in the full human data set final_data = read_rds("Desktop/Dissertation Materials/SpatialR21 Grant/Final Dissertation Materials/Final Data Sets/Final Cohort data June 2017 to July 2018/Human data/spat21_clean_human_files/merged_files/final merged data/spat21_human_final_censored_data_for_dissertation_with_exposure_outcome_1OCT2019.rds") # read in the ama simplified edgelist ama_edgelist = read_csv("Desktop/clean_ids_haplotype_results/AMA/AMA_haplotypes_edgelist_simplified_number_haps_shared.csv") #### ------ clean up the edgelists to be in the proper format --------- #### # first look at the columns colnames(ama_edgelist) # remove the X1 column ama_edgelist = ama_edgelist %>% dplyr::select(-"X1") # look at how many unique observations length(unique(ama_edgelist$from)) # 1115 length(unique(ama_edgelist$to)) # 1115 # looks correct # subset the data set to just have heads in left column and mosquitoes in right column table(nchar(ama_edgelist$from)) table(nchar(ama_edgelist$to)) # remove the rows where both the to and from columns are human or mosquito samples ama_edgelist = ama_edgelist %>% filter(!(str_detect(from,"-") & str_detect(to,"-"))) %>% filter(!(str_detect(from," ") & str_detect(to," "))) # make the first column human samples and second column mosquito samples # create a for loop that checks to see if each sample is sharing with a mosquito or not # switch the from column first new_from = rep(NA,nrow(ama_edgelist)) new_to = rep(NA,nrow(ama_edgelist)) for (i in 1:nrow(ama_edgelist)){ if (str_detect(ama_edgelist$from[i]," ")){ new_from[i] = ama_edgelist$to[i] new_to[i] = ama_edgelist$from[i] } else { new_from[i] = ama_edgelist$from[i] new_to[i] = ama_edgelist$to[i] } } ama_edgelist$from = new_from ama_edgelist$to = new_to # rename the column headers in the ama edgelist ama_edgelist = ama_edgelist %>% rename("sample_name_dbs"="from","sample_id_mosquito"="to","haps_shared"="weight") # split up the edgelist into the shared mosquito heads and abdomens ama_edgelist_head = ama_edgelist %>% filter(str_detect(sample_id_mosquito,"H")) %>% rename("sample_id_head"="sample_id_mosquito") ama_edgelist_abdomen = ama_edgelist %>% filter(str_detect(sample_id_mosquito,"A")) %>% rename("sample_id_abdomen"="sample_id_mosquito") #### --------- subset the human and mosquito data sets to just the variables of interest ---------- #### # make sure main exposure and main outcome for primary case definition are factors final_data$main_exposure_primary_case_def = as.factor(final_data$main_exposure_primary_case_def) final_data$main_outcome_primary_case_def = as.factor(final_data$main_outcome_primary_case_def) # select variables you need for human data colnames(final_data) human_data = final_data %>% filter(main_exposure_primary_case_def == "asymptomatic infection" | main_outcome_primary_case_def == "symptomatic infection") %>% dplyr::select(visit_type,sample_id_date,sample_name_final,sample_name_dbs,age_cat_baseline,unq_memID,village_name,HH_ID,main_exposure_primary_case_def,main_outcome_primary_case_def,pfr364Q_std_combined,age_all_baseline) %>% mutate(aim2_exposure = ifelse(is.na(main_exposure_primary_case_def),as.character(main_outcome_primary_case_def),as.character(main_exposure_primary_case_def))) %>% dplyr::select(-main_exposure_primary_case_def,-main_outcome_primary_case_def,-visit_type) # select variables you need for mosquito data colnames(anoph_merged_data) mosquito_data = anoph_merged_data %>% filter(!(is.na(sample_id_head) & is.na(sample_id_abdomen)) | sample_id_mosquito == "K01 00030" | sample_id_mosquito == "K01 00047") %>% dplyr::select(HH_ID,collection_date,total_num_mosq_in_hh,sample_id_abdomen,sample_id_head,sample_id_mosquito) # note: there are 15 entries where the lab didn't have mosquitoes so didn't have separate head and abdomen ids, removed these entries # K01 00030 and K01 00047 were sequenced and pf positive but were original test samples so weren't in the normal qpcr data set # add their information here for the data set ids mosquito_data$sample_id_abdomen[which(mosquito_data$sample_id_mosquito=="K01 00030")] = "K01 A00030" mosquito_data$sample_id_head[which(mosquito_data$sample_id_mosquito=="K01 00030")] = "K01 H00030" mosquito_data$sample_id_abdomen[which(mosquito_data$sample_id_mosquito=="K01 00047")] = "K01 A00047" mosquito_data$sample_id_head[which(mosquito_data$sample_id_mosquito=="K01 00047")] = "K01 H00047" ## --- write code to work with the mosquito heads outcome # check how the samples would merge with the full data set before asymptomatic/symptomatic criteria is enforced # check this observation final_data = final_data %>% dplyr::select(sample_name_dbs,sample_name_final) merge_check = left_join(ama_edgelist_head,final_data,by="sample_name_dbs") merge_check %>% filter(is.na(sample_name_final)) %>% View() # add the human and mosquito data sets' variables to the edgelist # the edgelist will be the level of analysis # merge the human info first ama_edgelist_head = left_join(ama_edgelist_head,human_data,by="sample_name_dbs") # then merge the mosquito info ama_edgelist_head = left_join(ama_edgelist_head,mosquito_data,by="sample_id_head") # check the merge ama_edgelist_head %>% filter(is.na(sample_name_final)) %>% View() ama_edgelist_head %>% filter(is.na(collection_date)) %>% View() # the samples that didn't merge did not meet the case definition for an asymptomatic or symptomatic infection # this observation has been shown in the code chunk above around line 110 # rename some of the variables in the data set for clarity colnames(ama_edgelist_head) ama_edgelist_head = ama_edgelist_head %>% rename("sample_id_human" = "sample_name_dbs","HH_ID_human"="HH_ID.x","HH_ID_mosquito"="HH_ID.y","human_date"="sample_id_date","mosquito_date"="collection_date") %>% dplyr::select(-sample_id_abdomen,-sample_id_mosquito) colnames(ama_edgelist_head) # first create a variable that is the time diff between human and mosquito samples subtract human time from mosquito time # if time date difference is positive than the mosquito was collected after the human sample ama_edgelist_head = ama_edgelist_head %>% mutate(date_difference = mosquito_date - human_date) # now restrict the merged data set to only shared haplotypes with the same HH_ID length(which(ama_edgelist_head$HH_ID_human==ama_edgelist_head$HH_ID_mosquito)) # 3 obs are in same HH ama_edgelist_head = ama_edgelist_head %>% filter(HH_ID_human==HH_ID_mosquito) # now restrict the merged data set to only shared haplotypes in the correct time frame ama_edgelist_head = ama_edgelist_head %>% filter(date_difference >= 10 & date_difference < 32) # between 10 and 31 days # clean up the final merged data set for the mosquito heads colnames(ama_edgelist_head) ama_edgelist_head = ama_edgelist_head %>% rename(HH_ID = HH_ID_human) %>% dplyr::select(-HH_ID_mosquito,-date_difference) # count how many haplotypes were shared between mosquito heads and humans length(which(ama_edgelist_head$haps_shared >0)) # 79 heads # write out the edgelist write_rds(ama_edgelist_head,"Desktop/spat21_ama_edgelist_head_29OCT2019.rds") write_csv(ama_edgelist_head,"Desktop/spat21_ama_edgelist_head_29OCT2019.csv") ## --- write code to work with the mosquito abdomens outcome # add the human and mosquito data sets' variables to the edgelist # the edgelist will be the level of analysis ama_edgelist_abdomen = left_join(ama_edgelist_abdomen,human_data,by="sample_name_dbs") ama_edgelist_abdomen = left_join(ama_edgelist_abdomen,mosquito_data,by="sample_id_abdomen") # check the merge ama_edgelist_abdomen %>% filter(is.na(sample_name_final)) %>% View() ama_edgelist_abdomen %>% filter(is.na(collection_date)) %>% View() # the samples that didn't merge did not meet the case definition for an asymptomatic or symptomatic infection # this observation has been shown in the code chunk above around line 110 # rename some of the variables in the data set for clarity colnames(ama_edgelist_abdomen) ama_edgelist_abdomen = ama_edgelist_abdomen %>% rename("sample_id_human" = "sample_name_dbs","HH_ID_human"="HH_ID.x","HH_ID_mosquito"="HH_ID.y","human_date"="sample_id_date","mosquito_date"="collection_date") %>% dplyr::select(-sample_id_head,-sample_id_mosquito) colnames(ama_edgelist_abdomen) # first create a variable that is the time diff between human and mosquito samples subtract human time from mosquito time # if time date difference is positive than the mosquito was collected after the human sample ama_edgelist_abdomen = ama_edgelist_abdomen %>% mutate(date_difference = mosquito_date - human_date) # now restrict the merged data set to only shared haplotypes with the same HH_ID length(which(ama_edgelist_abdomen$HH_ID_human==ama_edgelist_abdomen$HH_ID_mosquito)) # 3 obs are in same HH ama_edgelist_abdomen = ama_edgelist_abdomen %>% filter(HH_ID_human==HH_ID_mosquito) # now restrict the merged data set to only shared haplotypes in the correct time frame ama_edgelist_abdomen = ama_edgelist_abdomen %>% filter(date_difference >= 0 & date_difference < 15) # between 0 and 14 days # clean up the final merged data set for the mosquito abdomens colnames(ama_edgelist_abdomen) ama_edgelist_abdomen = ama_edgelist_abdomen %>% rename(HH_ID = HH_ID_human) %>% dplyr::select(-HH_ID_mosquito,-date_difference) # count how many haplotypes were shared between mosquito abdomens and humans length(which(ama_edgelist_abdomen$haps_shared >0)) # 106 abdomens # write out the edgelist write_rds(ama_edgelist_abdomen,"Desktop/spat21_ama_edgelist_abdomen_29OCT2019.rds") write_csv(ama_edgelist_abdomen,"Desktop/spat21_ama_edgelist_abdomen_29OCT2019.csv") #### ----- CSP ------- #### #### ---------- load in the data sets ---------- #### # read in the merged anpopheles mosquito data set anoph_merged_data = read_rds("/Users/kelseysumner/Desktop/Dissertation Materials/SpatialR21 Grant/Final Dissertation Materials/Final Data Sets/Final Cohort data June 2017 to July 2018/Mosquito data/clean data/merged_data/spat21_mosquito_anopheles_merged_data_18JAN2019.RDS") # read in the full human data set final_data = read_rds("Desktop/Dissertation Materials/SpatialR21 Grant/Final Dissertation Materials/Final Data Sets/Final Cohort data June 2017 to July 2018/Human data/spat21_clean_human_files/merged_files/final merged data/spat21_human_final_censored_data_for_dissertation_with_exposure_outcome_1OCT2019.rds") # read in the ama simplified edgelist csp_edgelist = read_csv("Desktop/clean_ids_haplotype_results/CSP/CSP_haplotypes_edgelist_simplified_number_haps_shared.csv") #### ------ clean up the edgelists to be in the proper format --------- #### # first look at the columns colnames(csp_edgelist) # remove the X1 column csp_edgelist = csp_edgelist %>% dplyr::select(-"X1") # look at how many unique observations length(unique(csp_edgelist$from)) # 1280 length(unique(csp_edgelist$to)) # 1280 # looks correct # subset the data set to just have heads in left column and mosquitoes in right column table(nchar(csp_edgelist$from)) table(nchar(csp_edgelist$to)) # remove the rows where both the to and from columns are human samples csp_edgelist = csp_edgelist %>% filter(!(str_detect(from,"-") & str_detect(to,"-"))) %>% filter(!(str_detect(from," ") & str_detect(to," "))) # make the first column human samples and second column mosquito samples # create a for loop that checks to see if each sample is sharing with a mosquito or not # switch the from column first new_from = rep(NA,nrow(csp_edgelist)) new_to = rep(NA,nrow(csp_edgelist)) for (i in 1:nrow(csp_edgelist)){ if (str_detect(csp_edgelist$from[i]," ")){ new_from[i] = csp_edgelist$to[i] new_to[i] = csp_edgelist$from[i] } else { new_from[i] = csp_edgelist$from[i] new_to[i] = csp_edgelist$to[i] } } csp_edgelist$from = new_from csp_edgelist$to = new_to # rename the column headers in the csp edgelist csp_edgelist = csp_edgelist %>% rename("sample_name_dbs"="from","sample_id_mosquito"="to","haps_shared"="weight") # split up the edgelist into the shared mosquito heads and abdomens csp_edgelist_head = csp_edgelist %>% filter(str_detect(sample_id_mosquito,"H")) %>% rename("sample_id_head"="sample_id_mosquito") csp_edgelist_abdomen = csp_edgelist %>% filter(str_detect(sample_id_mosquito,"A")) %>% rename("sample_id_abdomen"="sample_id_mosquito") #### --------- subset the human and mosquito data sets to just the variables of interest ---------- #### # make sure main exposure and main outcome for primary case definition are factors final_data$main_exposure_primary_case_def = as.factor(final_data$main_exposure_primary_case_def) final_data$main_outcome_primary_case_def = as.factor(final_data$main_outcome_primary_case_def) # select variables you need for human data colnames(final_data) human_data = final_data %>% filter(main_exposure_primary_case_def == "asymptomatic infection" | main_outcome_primary_case_def == "symptomatic infection") %>% dplyr::select(visit_type,sample_id_date,sample_name_final,sample_name_dbs,age_cat_baseline,unq_memID,village_name,HH_ID,main_exposure_primary_case_def,main_outcome_primary_case_def,pfr364Q_std_combined,age_all_baseline) %>% mutate(aim2_exposure = ifelse(is.na(main_exposure_primary_case_def),as.character(main_outcome_primary_case_def),as.character(main_exposure_primary_case_def))) %>% dplyr::select(-main_exposure_primary_case_def,-main_outcome_primary_case_def,-visit_type) # select variables you need for mosquito data colnames(anoph_merged_data) mosquito_data = anoph_merged_data %>% filter(!(is.na(sample_id_head) & is.na(sample_id_abdomen)) | sample_id_mosquito == "K01 00030" | sample_id_mosquito == "K01 00047") %>% dplyr::select(HH_ID,collection_date,total_num_mosq_in_hh,sample_id_abdomen,sample_id_head,sample_id_mosquito) # note: there are 15 entries where the lab didn't have mosquitoes so didn't have separate head and abdomen ids, removed these entries # K01 00030 and K01 00047 were sequenced and pf positive but were original test samples so weren't in the normal qpcr data set # add their information here for the data set ids mosquito_data$sample_id_abdomen[which(mosquito_data$sample_id_mosquito=="K01 00030")] = "K01 A00030" mosquito_data$sample_id_head[which(mosquito_data$sample_id_mosquito=="K01 00030")] = "K01 H00030" mosquito_data$sample_id_abdomen[which(mosquito_data$sample_id_mosquito=="K01 00047")] = "K01 A00047" mosquito_data$sample_id_head[which(mosquito_data$sample_id_mosquito=="K01 00047")] = "K01 H00047" ## --- write code to work with the mosquito heads outcome # check how the samples would merge with the full data set before asymptomatic/symptomatic criteria is enforced # check this observation final_data = final_data %>% dplyr::select(sample_name_dbs,sample_name_final) merge_check = left_join(csp_edgelist_head,final_data,by="sample_name_dbs") merge_check %>% filter(is.na(sample_name_final)) %>% View() # add the human and mosquito data sets' variables to the edgelist # the edgelist will be the level of analysis # merge the human info first csp_edgelist_head = left_join(csp_edgelist_head,human_data,by="sample_name_dbs") # then merge the mosquito info csp_edgelist_head = left_join(csp_edgelist_head,mosquito_data,by="sample_id_head") # check the merge csp_edgelist_head %>% filter(is.na(sample_name_final)) %>% View() csp_edgelist_head %>% filter(is.na(collection_date)) %>% View() # the samples that didn't merge did not meet the case definition for an asymptomatic or symptomatic infection # this observation has been shown in the code chunk above around line 110 # rename some of the variables in the data set for clarity colnames(csp_edgelist_head) csp_edgelist_head = csp_edgelist_head %>% rename("sample_id_human" = "sample_name_dbs","HH_ID_human"="HH_ID.x","HH_ID_mosquito"="HH_ID.y","human_date"="sample_id_date","mosquito_date"="collection_date") %>% dplyr::select(-sample_id_abdomen,-sample_id_mosquito) colnames(csp_edgelist_head) # first create a variable that is the time diff between human and mosquito samples subtract human time from mosquito time # if time date difference is positive than the mosquito was collected after the human sample csp_edgelist_head = csp_edgelist_head %>% mutate(date_difference = mosquito_date - human_date) # now restrict the merged data set to only shared haplotypes with the same HH_ID length(which(csp_edgelist_head$HH_ID_human==csp_edgelist_head$HH_ID_mosquito)) # 3 obs are in same HH csp_edgelist_head = csp_edgelist_head %>% filter(HH_ID_human==HH_ID_mosquito) # now restrict the merged data set to only shared haplotypes in the correct time frame csp_edgelist_head = csp_edgelist_head %>% filter(date_difference >= 10 & date_difference < 32) # between 10 and 31 days # clean up the final merged data set for the mosquito heads colnames(csp_edgelist_head) csp_edgelist_head = csp_edgelist_head %>% rename(HH_ID = HH_ID_human) %>% dplyr::select(-HH_ID_mosquito,-date_difference) # count how many haplotypes were shared between mosquito heads and humans length(which(csp_edgelist_head$haps_shared >0)) # 166 heads # write out the edgelist write_rds(csp_edgelist_head,"Desktop/spat21_csp_edgelist_head_29OCT2019.rds") write_csv(csp_edgelist_head,"Desktop/spat21_csp_edgelist_head_29OCT2019.csv") ## --- write code to work with the mosquito abdomens outcome # add the human and mosquito data sets' variables to the edgelist # the edgelist will be the level of analysis csp_edgelist_abdomen = left_join(csp_edgelist_abdomen,human_data,by="sample_name_dbs") csp_edgelist_abdomen = left_join(csp_edgelist_abdomen,mosquito_data,by="sample_id_abdomen") # check the merge csp_edgelist_abdomen %>% filter(is.na(sample_name_final)) %>% View() csp_edgelist_abdomen %>% filter(is.na(collection_date)) %>% View() # the samples that didn't merge did not meet the case definition for an asymptomatic or symptomatic infection # this observation has been shown in the code chunk above around line 110 # rename some of the variables in the data set for clarity colnames(csp_edgelist_abdomen) csp_edgelist_abdomen = csp_edgelist_abdomen %>% rename("sample_id_human" = "sample_name_dbs","HH_ID_human"="HH_ID.x","HH_ID_mosquito"="HH_ID.y","human_date"="sample_id_date","mosquito_date"="collection_date") %>% dplyr::select(-sample_id_head,-sample_id_mosquito) colnames(csp_edgelist_abdomen) # first create a variable that is the time diff between human and mosquito samples subtract human time from mosquito time # if time date difference is positive than the mosquito was collected after the human sample csp_edgelist_abdomen = csp_edgelist_abdomen %>% mutate(date_difference = mosquito_date - human_date) # now restrict the merged data set to only shared haplotypes with the same HH_ID length(which(csp_edgelist_abdomen$HH_ID_human==csp_edgelist_abdomen$HH_ID_mosquito)) # 3 obs are in same HH csp_edgelist_abdomen = csp_edgelist_abdomen %>% filter(HH_ID_human==HH_ID_mosquito) # now restrict the merged data set to only shared haplotypes in the correct time frame csp_edgelist_abdomen = csp_edgelist_abdomen %>% filter(date_difference >= 0 & date_difference < 15) # between 0 and 14 days # clean up the final merged data set for the mosquito abdomens colnames(csp_edgelist_abdomen) csp_edgelist_abdomen = csp_edgelist_abdomen %>% rename(HH_ID = HH_ID_human) %>% dplyr::select(-HH_ID_mosquito,-date_difference) # count how many haplotypes were shared between mosquito abdomens and humans length(which(csp_edgelist_abdomen$haps_shared >0)) # 234 abdomens # write out the edgelist write_rds(csp_edgelist_abdomen,"Desktop/spat21_csp_edgelist_abdomen_29OCT2019.rds") write_csv(csp_edgelist_abdomen,"Desktop/spat21_csp_edgelist_abdomen_29OCT2019.csv")
library(raster) library(sp) library(tidyverse) #### Climate Projections Summary #### ## GDD from May to October (GDD > 5 & 0) ## GDD from April to September (GDD > 5 & 0) ## precipitation in September NEED TO UPDATE SCRIPT ## precipitation in October ## max temp in June ## max temp in July ## max temp in August ## max temp in September ## min temperature in October ## min temperature in September ## min temp in April ## min temp in March ## min temp of December to March #### Projection Variation #### ## SD for temperature projections ## Plot histograms for each 20 year period ## Precipitation most likley non-normal, check plots (maybe use IQR or CV) (no precipitation in this file yet) ## For nw, mw, and hw: take SD for each MEMBER (5 counts) then average them #### Periods of Years #### ## 2040 - 2059 (moderate warming) #### Initial Condition Member #### ## r1i1p1 ############################################# #file upload, file names will be the same just #this script does not describe the member within the filename #make sure to keep files stored in different folders for each member #for functionality these variables can update easily: # 1. file path in same syntax as written in code # 2. initial condition (note: output from ClimateBC is r11i1p1 but the "r1i1p1" seems to be the more likely name) # 3. start and end Year ##loop to read all the files and assign to corresponding variable names startYear <- 2040 endYear <- 2059 member <- "r11i1p1" #outputs mean annual temperature as mat_YEAR #YEAR <- startYear:endYear for (i in startYear:endYear) { assign(paste("mat_", i, sep = ""), raster( paste("final_climate_proj/bcvin_raster/CanESM2_RCP85_", member, "_", i, "MSY/MAT.asc", sep = "") ) ) } ############################################# ##tmax assigning variables ##outputs as tmax_XX_YEAR ##XX <- startMonth:endMonth ##YEAR <- startYear:endYear startMonth <- 1 endMonth <- 12 for (i in startYear:endYear) { for (j in startMonth:endMonth) { if(j < 10){ assign(paste("tmax_0", j, "_", i, sep = ""), raster( paste("final_climate_proj/bcvin_raster/CanESM2_RCP85_", member, "_", i, "MSY/Tmax", "0", j, ".asc", sep = "") ) ) } else{ assign(paste("tmax_", j, "_", i, sep = ""), raster( paste("final_climate_proj/bcvin_raster/CanESM2_RCP85_", member, "_", i, "MSY/Tmax", j, ".asc", sep = "") ) ) } } } ############################################# #tmin assigning variables ##outputs as tmax_XX_YEAR ##XX <- startMonth:endMonth ##YEAR <- startYear:endYear startMonth <- 1 endMonth <- 12 for (i in startYear:endYear) { for (j in startMonth:endMonth) { if(j < 10){ assign( paste("tmin_0", j, "_", i, sep = ""), raster( paste("final_climate_proj/bcvin_raster/CanESM2_RCP85_", member, "_", i, "MSY/Tmin", "0", j, ".asc", sep = "") ) ) #end assign } else{ assign( paste("tmin_", j, "_", i, sep = ""), raster( paste("final_climate_proj/bcvin_raster/CanESM2_RCP85_", member, "_", i, "MSY/Tmin", j, ".asc", sep = "") ) )#end assign } } } ############################################# ########################################################################################## ##data manipulation and writing derived files #converting to average between startYear and endYear #mw == moderate warming ########################################################################################## #tmax mean and sd over the 20 year period, patience is key, this took me ~5 min to load #assigning new variables for(i in 1:12) { if(i < 10) { assign( paste("tmax_0", i, "_", "mw", sep = ""), lapply( paste("tmax_0", i,"_", startYear:endYear, sep = ""), get) %>% stack() %>% calc(fun = mean, na.rm = TRUE) %>% calc(fun = function(x){x/10}) )#end assign assign( paste("tmax_0", i, "_", "mw_sd", sep = ""), lapply( paste("tmax_0", i,"_", startYear:endYear, sep = ""), get) %>% stack() %>% calc(fun = sd, na.rm = TRUE) %>% calc(fun = function(x){x/10}) )#end assign } if(i >= 10) { assign( paste("tmax_", i, "_", "mw", sep = ""), lapply( paste("tmax_", i,"_", startYear:endYear, sep = ""), get) %>% stack() %>% calc(fun = mean, na.rm = TRUE) %>% calc(fun = function(x){x/10}) )#end assign assign( paste("tmax_", i, "_", "mw_sd", sep = ""), lapply( paste("tmax_", i,"_", startYear:endYear, sep = ""), get) %>% stack() %>% calc(fun = sd, na.rm = TRUE) %>% calc(fun = function(x){x/10}) ) } } #writing corresponding files for(i in 1:12) { if(i < 10) { writeRaster( x = get(paste("tmax_0", i, "_mw", sep = "")), filename = paste("tmax_0", i, "_mw.asc", sep = "") ) writeRaster( x = get(paste("tmax_0", i, "_mw_sd", sep = "")), filename = paste("tmax_0", i, "_mw_sd.asc", sep = "") ) } if(i >= 10) { writeRaster( x = get(paste("tmax_", i, "_mw", sep = "")), filename = paste("tmax_", i, "_mw.asc", sep = "") ) writeRaster( x = get(paste("tmax_", i, "_mw_sd", sep = "")), filename = paste("tmax_", i, "_mw_sd.asc", sep = "") ) } } ############################################# #tmin mean over the 20 year period for(i in 1:12) { if(i < 10) { assign( paste("tmin_0", i, "_", "mw", sep = ""), lapply( paste("tmin_0", i,"_", startYear:endYear, sep = ""), get) %>% stack() %>% calc(fun = mean, na.rm = TRUE) %>% calc(fun = function(x){x/10}) ) } if(i >= 10) { assign( paste("tmin_", i, "_", "mw", sep = ""), lapply( paste("tmin_", i,"_", startYear:endYear, sep = ""), get) %>% stack() %>% calc(fun = mean, na.rm = TRUE) %>% calc(fun = function(x){x/10}) ) } } #writing corresponding files for(i in 1:12) { if(i < 10) { writeRaster( x = get(paste("tmin_0", i, "_mw", sep = "")), filename = paste("tmin_0", i, "_mw.asc", sep = "") ) writeRaster( x = get(paste("tmin_0", i, "_mw_sd", sep = "")), filename = paste("tmin_0", i, "_mw_sd.asc", sep = "") ) } if(i >= 10) { writeRaster( x = get(paste("tmin_", i, "_mw", sep = "")), filename = paste("tmin_", i, "_mw.asc", sep = "") ) writeRaster( x = get(paste("tmin_", i, "_mw_sd", sep = "")), filename = paste("tmin_", i, "_mw_sd.asc", sep = "") ) } } ############################################# mat_mw <- lapply( paste("mat_", startYear:endYear, sep = ""), get ) %>% stack() %>% calc(fun = mean, na.rm = TRUE) %>% calc(fun = function(x){x/10}) mat_mw_sd <- lapply( paste("mat_", startYear:endYear, sep = ""), get ) %>% stack() %>% calc(fun = sd, na.rm = TRUE) %>% calc(fun = function(x){x/10}) writeRaster(mat_mw, filename = "mat_mw.asc") writeRaster(mat_mw_sd, filename = "mat_mw_sd.asc") ############################################# #December through March stack(tmin_12_mw, tmin_01_mw, tmin_02_mw, tmin_03_mw) %>% calc(fun = mean) %>% writeRaster(filename = "tmin_12_03_mw.asc") ############################################# #Growing Degree Days #formula is the summation of ((Tmax + Tmin)/2 - 10) * days in the month #negative values are converted to zero. Reference: Parker et al #September GDD_09_mw <- overlay(tmax_09_mw, tmin_09_mw, fun = function(r1, r2){((r1 + r2) / 2 - 10) * 30}) GDD_09_mw[GDD_09_mw < 0] <- 0 writeRaster(GDD_09_mw, "GDD_09_mw.asc") #October GDD_10_mw <- overlay(tmax_10_mw, tmin_10_mw, fun = function(r1, r2){((r1 + r2) / 2 - 10) * 31}) GDD_10_mw[GDD_10_mw < 0] <- 0 writeRaster(GDD_10_mw, "GDD_10_mw.asc") ############################################# #May to October #31 + 30 + 31 + 31 + 30 + 31 = 184 days in May through October tmax_05_10_mw <- stack(tmax_05_mw, tmax_06_mw, tmax_07_mw, tmax_08_mw, tmax_09_mw, tmax_10_mw) %>% calc(fun = mean) tmin_05_10_mw <- stack(tmin_05_mw, tmin_06_mw, tmin_07_mw, tmin_08_mw, tmin_09_mw, tmin_10_mw) %>% calc(fun = mean) GDD_05_10_mw <- overlay(tmin_05_10_mw, tmax_05_10_mw, fun = function(r1, r2){((r1 + r2) / 2 - 10) * 184}) GDD_05_10_mw[GDD_05_10_mw < 0] <- 0 writeRaster(GDD_05_10_mw, "GDD_05_10_mw.asc") ############################################# #April to September #30 + 31 + 30 + 31 + 31 + 30 = 183 days in April through September tmax_04_09_mw <- stack(tmax_04_mw, tmax_05_mw, tmax_06_mw, tmax_07_mw, tmax_08_mw, tmax_09_mw) %>% calc(fun = mean) tmin_04_09_mw <- stack(tmin_04_mw, tmin_05_mw, tmin_06_mw, tmin_07_mw, tmin_08_mw, tmin_09_mw) %>% calc(fun = mean) GDD_04_09_mw <- overlay(tmin_04_09_mw, tmax_04_09_mw, fun = function(r1, r2){((r1 + r2) / 2 - 10) * 183}) GDD_04_09_mw[GDD_04_09_mw < 0] <- 0 writeRaster(GDD_04_09_mw, "GDD_04_09_mw.asc")
/analyses/climate_projections/derivedfiles.R
no_license
sandyie/cloned-bcvin
R
false
false
10,067
r
library(raster) library(sp) library(tidyverse) #### Climate Projections Summary #### ## GDD from May to October (GDD > 5 & 0) ## GDD from April to September (GDD > 5 & 0) ## precipitation in September NEED TO UPDATE SCRIPT ## precipitation in October ## max temp in June ## max temp in July ## max temp in August ## max temp in September ## min temperature in October ## min temperature in September ## min temp in April ## min temp in March ## min temp of December to March #### Projection Variation #### ## SD for temperature projections ## Plot histograms for each 20 year period ## Precipitation most likley non-normal, check plots (maybe use IQR or CV) (no precipitation in this file yet) ## For nw, mw, and hw: take SD for each MEMBER (5 counts) then average them #### Periods of Years #### ## 2040 - 2059 (moderate warming) #### Initial Condition Member #### ## r1i1p1 ############################################# #file upload, file names will be the same just #this script does not describe the member within the filename #make sure to keep files stored in different folders for each member #for functionality these variables can update easily: # 1. file path in same syntax as written in code # 2. initial condition (note: output from ClimateBC is r11i1p1 but the "r1i1p1" seems to be the more likely name) # 3. start and end Year ##loop to read all the files and assign to corresponding variable names startYear <- 2040 endYear <- 2059 member <- "r11i1p1" #outputs mean annual temperature as mat_YEAR #YEAR <- startYear:endYear for (i in startYear:endYear) { assign(paste("mat_", i, sep = ""), raster( paste("final_climate_proj/bcvin_raster/CanESM2_RCP85_", member, "_", i, "MSY/MAT.asc", sep = "") ) ) } ############################################# ##tmax assigning variables ##outputs as tmax_XX_YEAR ##XX <- startMonth:endMonth ##YEAR <- startYear:endYear startMonth <- 1 endMonth <- 12 for (i in startYear:endYear) { for (j in startMonth:endMonth) { if(j < 10){ assign(paste("tmax_0", j, "_", i, sep = ""), raster( paste("final_climate_proj/bcvin_raster/CanESM2_RCP85_", member, "_", i, "MSY/Tmax", "0", j, ".asc", sep = "") ) ) } else{ assign(paste("tmax_", j, "_", i, sep = ""), raster( paste("final_climate_proj/bcvin_raster/CanESM2_RCP85_", member, "_", i, "MSY/Tmax", j, ".asc", sep = "") ) ) } } } ############################################# #tmin assigning variables ##outputs as tmax_XX_YEAR ##XX <- startMonth:endMonth ##YEAR <- startYear:endYear startMonth <- 1 endMonth <- 12 for (i in startYear:endYear) { for (j in startMonth:endMonth) { if(j < 10){ assign( paste("tmin_0", j, "_", i, sep = ""), raster( paste("final_climate_proj/bcvin_raster/CanESM2_RCP85_", member, "_", i, "MSY/Tmin", "0", j, ".asc", sep = "") ) ) #end assign } else{ assign( paste("tmin_", j, "_", i, sep = ""), raster( paste("final_climate_proj/bcvin_raster/CanESM2_RCP85_", member, "_", i, "MSY/Tmin", j, ".asc", sep = "") ) )#end assign } } } ############################################# ########################################################################################## ##data manipulation and writing derived files #converting to average between startYear and endYear #mw == moderate warming ########################################################################################## #tmax mean and sd over the 20 year period, patience is key, this took me ~5 min to load #assigning new variables for(i in 1:12) { if(i < 10) { assign( paste("tmax_0", i, "_", "mw", sep = ""), lapply( paste("tmax_0", i,"_", startYear:endYear, sep = ""), get) %>% stack() %>% calc(fun = mean, na.rm = TRUE) %>% calc(fun = function(x){x/10}) )#end assign assign( paste("tmax_0", i, "_", "mw_sd", sep = ""), lapply( paste("tmax_0", i,"_", startYear:endYear, sep = ""), get) %>% stack() %>% calc(fun = sd, na.rm = TRUE) %>% calc(fun = function(x){x/10}) )#end assign } if(i >= 10) { assign( paste("tmax_", i, "_", "mw", sep = ""), lapply( paste("tmax_", i,"_", startYear:endYear, sep = ""), get) %>% stack() %>% calc(fun = mean, na.rm = TRUE) %>% calc(fun = function(x){x/10}) )#end assign assign( paste("tmax_", i, "_", "mw_sd", sep = ""), lapply( paste("tmax_", i,"_", startYear:endYear, sep = ""), get) %>% stack() %>% calc(fun = sd, na.rm = TRUE) %>% calc(fun = function(x){x/10}) ) } } #writing corresponding files for(i in 1:12) { if(i < 10) { writeRaster( x = get(paste("tmax_0", i, "_mw", sep = "")), filename = paste("tmax_0", i, "_mw.asc", sep = "") ) writeRaster( x = get(paste("tmax_0", i, "_mw_sd", sep = "")), filename = paste("tmax_0", i, "_mw_sd.asc", sep = "") ) } if(i >= 10) { writeRaster( x = get(paste("tmax_", i, "_mw", sep = "")), filename = paste("tmax_", i, "_mw.asc", sep = "") ) writeRaster( x = get(paste("tmax_", i, "_mw_sd", sep = "")), filename = paste("tmax_", i, "_mw_sd.asc", sep = "") ) } } ############################################# #tmin mean over the 20 year period for(i in 1:12) { if(i < 10) { assign( paste("tmin_0", i, "_", "mw", sep = ""), lapply( paste("tmin_0", i,"_", startYear:endYear, sep = ""), get) %>% stack() %>% calc(fun = mean, na.rm = TRUE) %>% calc(fun = function(x){x/10}) ) } if(i >= 10) { assign( paste("tmin_", i, "_", "mw", sep = ""), lapply( paste("tmin_", i,"_", startYear:endYear, sep = ""), get) %>% stack() %>% calc(fun = mean, na.rm = TRUE) %>% calc(fun = function(x){x/10}) ) } } #writing corresponding files for(i in 1:12) { if(i < 10) { writeRaster( x = get(paste("tmin_0", i, "_mw", sep = "")), filename = paste("tmin_0", i, "_mw.asc", sep = "") ) writeRaster( x = get(paste("tmin_0", i, "_mw_sd", sep = "")), filename = paste("tmin_0", i, "_mw_sd.asc", sep = "") ) } if(i >= 10) { writeRaster( x = get(paste("tmin_", i, "_mw", sep = "")), filename = paste("tmin_", i, "_mw.asc", sep = "") ) writeRaster( x = get(paste("tmin_", i, "_mw_sd", sep = "")), filename = paste("tmin_", i, "_mw_sd.asc", sep = "") ) } } ############################################# mat_mw <- lapply( paste("mat_", startYear:endYear, sep = ""), get ) %>% stack() %>% calc(fun = mean, na.rm = TRUE) %>% calc(fun = function(x){x/10}) mat_mw_sd <- lapply( paste("mat_", startYear:endYear, sep = ""), get ) %>% stack() %>% calc(fun = sd, na.rm = TRUE) %>% calc(fun = function(x){x/10}) writeRaster(mat_mw, filename = "mat_mw.asc") writeRaster(mat_mw_sd, filename = "mat_mw_sd.asc") ############################################# #December through March stack(tmin_12_mw, tmin_01_mw, tmin_02_mw, tmin_03_mw) %>% calc(fun = mean) %>% writeRaster(filename = "tmin_12_03_mw.asc") ############################################# #Growing Degree Days #formula is the summation of ((Tmax + Tmin)/2 - 10) * days in the month #negative values are converted to zero. Reference: Parker et al #September GDD_09_mw <- overlay(tmax_09_mw, tmin_09_mw, fun = function(r1, r2){((r1 + r2) / 2 - 10) * 30}) GDD_09_mw[GDD_09_mw < 0] <- 0 writeRaster(GDD_09_mw, "GDD_09_mw.asc") #October GDD_10_mw <- overlay(tmax_10_mw, tmin_10_mw, fun = function(r1, r2){((r1 + r2) / 2 - 10) * 31}) GDD_10_mw[GDD_10_mw < 0] <- 0 writeRaster(GDD_10_mw, "GDD_10_mw.asc") ############################################# #May to October #31 + 30 + 31 + 31 + 30 + 31 = 184 days in May through October tmax_05_10_mw <- stack(tmax_05_mw, tmax_06_mw, tmax_07_mw, tmax_08_mw, tmax_09_mw, tmax_10_mw) %>% calc(fun = mean) tmin_05_10_mw <- stack(tmin_05_mw, tmin_06_mw, tmin_07_mw, tmin_08_mw, tmin_09_mw, tmin_10_mw) %>% calc(fun = mean) GDD_05_10_mw <- overlay(tmin_05_10_mw, tmax_05_10_mw, fun = function(r1, r2){((r1 + r2) / 2 - 10) * 184}) GDD_05_10_mw[GDD_05_10_mw < 0] <- 0 writeRaster(GDD_05_10_mw, "GDD_05_10_mw.asc") ############################################# #April to September #30 + 31 + 30 + 31 + 31 + 30 = 183 days in April through September tmax_04_09_mw <- stack(tmax_04_mw, tmax_05_mw, tmax_06_mw, tmax_07_mw, tmax_08_mw, tmax_09_mw) %>% calc(fun = mean) tmin_04_09_mw <- stack(tmin_04_mw, tmin_05_mw, tmin_06_mw, tmin_07_mw, tmin_08_mw, tmin_09_mw) %>% calc(fun = mean) GDD_04_09_mw <- overlay(tmin_04_09_mw, tmax_04_09_mw, fun = function(r1, r2){((r1 + r2) / 2 - 10) * 183}) GDD_04_09_mw[GDD_04_09_mw < 0] <- 0 writeRaster(GDD_04_09_mw, "GDD_04_09_mw.asc")
########################################## # # ### ADM PRACTICAL CA ### # # ########################################## # The key to success in any organization is attracting and retaining top talent. # You are an HR analyst at my company, and one of my tasks is to determine which factors # keep employees at my company and which prompt others to leave. We need to know what # factors we can change to prevent the loss of good people. # You have data about past and current employees in a spreadsheet. It has various data # points on our employees, but we're' most interested in whether they’re still with the # company or whether they’ve gone to work somewhere else. And we want to understand how # this relates to workforce attrition. #Attributes: # Age: in years # Attrition: Y/N the dependent variable -- have they left the company? # BusinessTravel: Non-Travel; Traval_Frequently, Travel_Rarely # DailyRate: Consultancy Charge per Day # Department: Human Resources; Research & Development; Sales # DistanceFromHome: How far the employe lives from work # Education: 1 'Below College'; 2 'College'; 3 'Bachelor'; 4 'Master'; 5 'Doctor' # EducationField: Human Resources; Life Sciences; Marketing; Medical; Other; Technical Degree # EmployeeCount: No of employes in this record # EmployeeNumber: Employee ID # EnvironmentSatisfaction: 4 point Likert scale: 1 'Low'; 2 'Medium'; 3 'High'; 4 'Very High' # Gender: Male / Female # HourlyRate: Consultancy Charge per Hour # JobInvolvement: 4 point Likert scale: 1 'Low'; 2 'Medium'; 3 'High'; 4 'Very High' # JobLevel Metadata not available -- make an assumption # JobRole: Healthcare Representative; Human Resources; Laboratory Technician; Manager; Manufacturing Director; Research Director; Research Scientist; Sales Executive; Sales Representative # JobSatisfaction: 4 point Likert scale: 1 'Low'; 2 'Medium'; 3 'High'; 4 'Very High' # MaritalStatus: Divorced; Married; Single # MonthlyIncome: monthly salary # MonthlyRate: Consultancy Charge per Day # NumCompaniesWorked: No. of previous employeers # Over18: Y/N # OverTime: Yes/No # PercentSalaryHike: Last Years Increment # PerformanceRating: 4 point Likert scale: 1 'Low'; 2 'Good'; 3 'Excellent'; 4 'Outstanding' # RelationshipSatisfaction: 4 point Likert scale: 1 'Low'; 2 'Medium'; 3 'High'; 4 'Very High' # StandardHours: Contract hours # StockOptionLevel: No available metadata -- make an assumption :) # TotalWorkingYears: Career Age # TrainingTimesLastYear: No. of training courses attended last year # WorkLifeBalance: 4 Point Likert Scale: 1 'Bad'; 2 'Good'; 3 'Better'; 4 'Best' # YearsAtCompany: Time spent with company # YearsInCurrentRole: Time in current role # YearsSinceLastPromotion: No. of years since last promoted # YearsWithCurrManager: Year spent with current manager setwd("/Users/simoncaton/Documents/OneDriveBusiness/Teaching/ADM/Project and CA/") #change this to where you downloaded the .csv hrdata <- read.csv("ADM CA 1 Data.csv", stringsAsFactors = T) #will autoencode the text attributes to factors #ok, now we need to make a dataset unique to you set.seed(1337) # <-- put your student number here WITHOUT the x!! Leave off a starting zero if you have one #e.g.: set.seed(62345678) my_dataset <- hrdata[order(runif(600)), ] #let's remove ID, we probably don't want that: my_dataset <- my_dataset[-10] #Unfortunately, due to a technical error, 3 columns of the data were lost :( #HR blamed IT, IT blamed HR, your manager will blame you, so let's just hope those columns weren't important! col1 <- round(runif(1)*32)+2 #the +2 protects the Age and Attrition Variables col2 <- round(runif(1)*31)+2 col3 <- round(runif(1)*30)+2 cols <- names(my_dataset) print(paste("I lost: ", cols[col1], ",", cols[col2], ",", cols[col3])) #"I lost: StandardHours , OverTime , Gender" my_dataset <- my_dataset[-col1] my_dataset <- my_dataset[-col2] my_dataset <- my_dataset[-col3] #if you want to use something other than R save your dataset: write.csv(file="mydata.csv", my_dataset, row.names = F) #Now please begin, and good luck! #Because you lost 3 columns, some models may/may not work as well, #don't worry about this, I will control for it in the grading!
/ca2/CA 1 Starting Code.R
no_license
bhumi3696/Advance-Data-Mining
R
false
false
4,389
r
########################################## # # ### ADM PRACTICAL CA ### # # ########################################## # The key to success in any organization is attracting and retaining top talent. # You are an HR analyst at my company, and one of my tasks is to determine which factors # keep employees at my company and which prompt others to leave. We need to know what # factors we can change to prevent the loss of good people. # You have data about past and current employees in a spreadsheet. It has various data # points on our employees, but we're' most interested in whether they’re still with the # company or whether they’ve gone to work somewhere else. And we want to understand how # this relates to workforce attrition. #Attributes: # Age: in years # Attrition: Y/N the dependent variable -- have they left the company? # BusinessTravel: Non-Travel; Traval_Frequently, Travel_Rarely # DailyRate: Consultancy Charge per Day # Department: Human Resources; Research & Development; Sales # DistanceFromHome: How far the employe lives from work # Education: 1 'Below College'; 2 'College'; 3 'Bachelor'; 4 'Master'; 5 'Doctor' # EducationField: Human Resources; Life Sciences; Marketing; Medical; Other; Technical Degree # EmployeeCount: No of employes in this record # EmployeeNumber: Employee ID # EnvironmentSatisfaction: 4 point Likert scale: 1 'Low'; 2 'Medium'; 3 'High'; 4 'Very High' # Gender: Male / Female # HourlyRate: Consultancy Charge per Hour # JobInvolvement: 4 point Likert scale: 1 'Low'; 2 'Medium'; 3 'High'; 4 'Very High' # JobLevel Metadata not available -- make an assumption # JobRole: Healthcare Representative; Human Resources; Laboratory Technician; Manager; Manufacturing Director; Research Director; Research Scientist; Sales Executive; Sales Representative # JobSatisfaction: 4 point Likert scale: 1 'Low'; 2 'Medium'; 3 'High'; 4 'Very High' # MaritalStatus: Divorced; Married; Single # MonthlyIncome: monthly salary # MonthlyRate: Consultancy Charge per Day # NumCompaniesWorked: No. of previous employeers # Over18: Y/N # OverTime: Yes/No # PercentSalaryHike: Last Years Increment # PerformanceRating: 4 point Likert scale: 1 'Low'; 2 'Good'; 3 'Excellent'; 4 'Outstanding' # RelationshipSatisfaction: 4 point Likert scale: 1 'Low'; 2 'Medium'; 3 'High'; 4 'Very High' # StandardHours: Contract hours # StockOptionLevel: No available metadata -- make an assumption :) # TotalWorkingYears: Career Age # TrainingTimesLastYear: No. of training courses attended last year # WorkLifeBalance: 4 Point Likert Scale: 1 'Bad'; 2 'Good'; 3 'Better'; 4 'Best' # YearsAtCompany: Time spent with company # YearsInCurrentRole: Time in current role # YearsSinceLastPromotion: No. of years since last promoted # YearsWithCurrManager: Year spent with current manager setwd("/Users/simoncaton/Documents/OneDriveBusiness/Teaching/ADM/Project and CA/") #change this to where you downloaded the .csv hrdata <- read.csv("ADM CA 1 Data.csv", stringsAsFactors = T) #will autoencode the text attributes to factors #ok, now we need to make a dataset unique to you set.seed(1337) # <-- put your student number here WITHOUT the x!! Leave off a starting zero if you have one #e.g.: set.seed(62345678) my_dataset <- hrdata[order(runif(600)), ] #let's remove ID, we probably don't want that: my_dataset <- my_dataset[-10] #Unfortunately, due to a technical error, 3 columns of the data were lost :( #HR blamed IT, IT blamed HR, your manager will blame you, so let's just hope those columns weren't important! col1 <- round(runif(1)*32)+2 #the +2 protects the Age and Attrition Variables col2 <- round(runif(1)*31)+2 col3 <- round(runif(1)*30)+2 cols <- names(my_dataset) print(paste("I lost: ", cols[col1], ",", cols[col2], ",", cols[col3])) #"I lost: StandardHours , OverTime , Gender" my_dataset <- my_dataset[-col1] my_dataset <- my_dataset[-col2] my_dataset <- my_dataset[-col3] #if you want to use something other than R save your dataset: write.csv(file="mydata.csv", my_dataset, row.names = F) #Now please begin, and good luck! #Because you lost 3 columns, some models may/may not work as well, #don't worry about this, I will control for it in the grading!
selfplay <- function(seed = 0, ai_move, Bs, Bg, nsample, mateXdepth, start = init_state, move.limit = 100, verbose = TRUE) { return(cvc(seed, ai_move, Bs, Bg, ai_move, Bs, Bg, nsample, mateXdepth, start, move.limit, verbose)) } selfplay2 <- function(seed = 0, ai_move, Bs, Bg, Bse, Bge, nsample, mateXdepth, expo) { return(cvc2(seed, ai_move, Bs, Bg, Bse, Bge, ai_move, Bs, Bg, Bse, Bge, nsample, mateXdepth, expo)) } cvc <- function(seed = 0, ai_move1, Bs1, Bg1, ai_move2, Bs2, Bg2, nsample, mateXdepth, start = init_state, move.limit = 100, verbose = TRUE, plotit = FALSE) { if (!is.null(seed)) set.seed(seed) state <- start slist <- list(state) if (verbose) print_state(state) mlist <- character() mv <- ai_move1(state, Bs1, Bg1, nsample, mateXdepth) mlist <- c(mlist, mv) winner = "" mvct <- 0 while (mv != "resign" && mvct < move.limit) { state <- move_parser(state, mv) slist <- c(slist, list(state)) if (verbose) print_state(state) if (plotit) draw_state(state, title = TRUE) if (state[4] %% 2 == 0) { mv <- ai_move1(state, Bs1, Bg1, nsample, mateXdepth) } else { mv <- ai_move2(state, Bs2, Bg2, nsample, mateXdepth) } mlist <- c(mlist, mv) mvct <- mvct + 1 } if (mvct < move.limit) { if (state[4] %% 2 == 0) { winner = "gote" } else { winner = "sente" } } return(list(mlist = mlist, slist = slist, seed = seed, nsample = nsample, mateXdepth = mateXdepth, winner = winner)) } cvc2 <- function(seed = 0, ai_move1, Bs1, Bg1, Bse1, Bge1, ai_move2, Bs2, Bg2, Bse2, Bge2, nsample, mateXdepth, expo) { set.seed(seed) slist <- list() state <- init_state mlist <- character() mv <- ai_move1(state, Bs1, Bg1, Bse1, Bge1, nsample, mateXdepth, expo) mlist <- c(mlist, mv) while (mv != "resign") { state <- move_parser(state, mv) slist <- c(slist, list(state)) print_state(state) if (state[4] %% 2 == 0) { mv <- ai_move1(state, Bs1, Bg1, Bse1, Bge1, nsample, mateXdepth, expo) } else { mv <- ai_move2(state, Bs2, Bg2, Bse2, Bge2, nsample, mateXdepth, expo) } mlist <- c(mlist, mv) } return(list(mlist = mlist, slist = slist, seed = seed, nsample = nsample, mateXdepth = mateXdepth, expo = expo)) }
/doubutsu1/gen0/selfplay.R
no_license
snarles/gamers
R
false
false
2,336
r
selfplay <- function(seed = 0, ai_move, Bs, Bg, nsample, mateXdepth, start = init_state, move.limit = 100, verbose = TRUE) { return(cvc(seed, ai_move, Bs, Bg, ai_move, Bs, Bg, nsample, mateXdepth, start, move.limit, verbose)) } selfplay2 <- function(seed = 0, ai_move, Bs, Bg, Bse, Bge, nsample, mateXdepth, expo) { return(cvc2(seed, ai_move, Bs, Bg, Bse, Bge, ai_move, Bs, Bg, Bse, Bge, nsample, mateXdepth, expo)) } cvc <- function(seed = 0, ai_move1, Bs1, Bg1, ai_move2, Bs2, Bg2, nsample, mateXdepth, start = init_state, move.limit = 100, verbose = TRUE, plotit = FALSE) { if (!is.null(seed)) set.seed(seed) state <- start slist <- list(state) if (verbose) print_state(state) mlist <- character() mv <- ai_move1(state, Bs1, Bg1, nsample, mateXdepth) mlist <- c(mlist, mv) winner = "" mvct <- 0 while (mv != "resign" && mvct < move.limit) { state <- move_parser(state, mv) slist <- c(slist, list(state)) if (verbose) print_state(state) if (plotit) draw_state(state, title = TRUE) if (state[4] %% 2 == 0) { mv <- ai_move1(state, Bs1, Bg1, nsample, mateXdepth) } else { mv <- ai_move2(state, Bs2, Bg2, nsample, mateXdepth) } mlist <- c(mlist, mv) mvct <- mvct + 1 } if (mvct < move.limit) { if (state[4] %% 2 == 0) { winner = "gote" } else { winner = "sente" } } return(list(mlist = mlist, slist = slist, seed = seed, nsample = nsample, mateXdepth = mateXdepth, winner = winner)) } cvc2 <- function(seed = 0, ai_move1, Bs1, Bg1, Bse1, Bge1, ai_move2, Bs2, Bg2, Bse2, Bge2, nsample, mateXdepth, expo) { set.seed(seed) slist <- list() state <- init_state mlist <- character() mv <- ai_move1(state, Bs1, Bg1, Bse1, Bge1, nsample, mateXdepth, expo) mlist <- c(mlist, mv) while (mv != "resign") { state <- move_parser(state, mv) slist <- c(slist, list(state)) print_state(state) if (state[4] %% 2 == 0) { mv <- ai_move1(state, Bs1, Bg1, Bse1, Bge1, nsample, mateXdepth, expo) } else { mv <- ai_move2(state, Bs2, Bg2, Bse2, Bge2, nsample, mateXdepth, expo) } mlist <- c(mlist, mv) } return(list(mlist = mlist, slist = slist, seed = seed, nsample = nsample, mateXdepth = mateXdepth, expo = expo)) }
test_that("Global Geary statistics are stable", { guerry_modeled <- guerry guerry_lm <- lm(Crm_prs ~ Litercy, guerry_modeled) guerry_modeled$predictions <- predict(guerry_lm, guerry_modeled) weights <- ww_build_weights(guerry) resid <- guerry_modeled$Crm_prs - guerry_modeled$predictions expect_snapshot({ df_global_c <- ww_global_geary_c(guerry_modeled, Crm_prs, predictions) df_global_c[1:3] }) expect_snapshot({ df_global_c_p <- ww_global_geary_pvalue(guerry_modeled, Crm_prs, predictions) df_global_c_p[1:3] }) expect_snapshot( (vec_global_c <- ww_global_geary_c_vec(guerry_modeled$Crm_prs, guerry_modeled$predictions, weights)) ) expect_snapshot( (vec_global_c_p <- ww_global_geary_pvalue_vec(guerry_modeled$Crm_prs, guerry_modeled$predictions, weights)) ) expect_identical( df_global_c$.estimate, vec_global_c ) expect_identical( df_global_c_p$.estimate, vec_global_c_p ) set.seed(123) spdep_output <- spdep::geary.test(resid, weights) expect_identical( vec_global_c, spdep_output$estimate[[1]] ) expect_identical( vec_global_c_p, spdep_output$p.value ) })
/tests/testthat/test-global_geary.R
permissive
ropensci/waywiser
R
false
false
1,176
r
test_that("Global Geary statistics are stable", { guerry_modeled <- guerry guerry_lm <- lm(Crm_prs ~ Litercy, guerry_modeled) guerry_modeled$predictions <- predict(guerry_lm, guerry_modeled) weights <- ww_build_weights(guerry) resid <- guerry_modeled$Crm_prs - guerry_modeled$predictions expect_snapshot({ df_global_c <- ww_global_geary_c(guerry_modeled, Crm_prs, predictions) df_global_c[1:3] }) expect_snapshot({ df_global_c_p <- ww_global_geary_pvalue(guerry_modeled, Crm_prs, predictions) df_global_c_p[1:3] }) expect_snapshot( (vec_global_c <- ww_global_geary_c_vec(guerry_modeled$Crm_prs, guerry_modeled$predictions, weights)) ) expect_snapshot( (vec_global_c_p <- ww_global_geary_pvalue_vec(guerry_modeled$Crm_prs, guerry_modeled$predictions, weights)) ) expect_identical( df_global_c$.estimate, vec_global_c ) expect_identical( df_global_c_p$.estimate, vec_global_c_p ) set.seed(123) spdep_output <- spdep::geary.test(resid, weights) expect_identical( vec_global_c, spdep_output$estimate[[1]] ) expect_identical( vec_global_c_p, spdep_output$p.value ) })
# R Coding Reference # –––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––– # FUNCTIONS: c() # concatenate (like creating a vector) length() # returns the length of a vector quantity ls() # list of all objects and/or data created during session rm() # remove/delete a created object or data rm(list = ls()) # remove ALL created objects or data matrix(data,nrow,ncol,byrow,dimnames) # creates a matrix; byrow is FALSE by default matrix(a # b,r,c) # creates a matrix from values 'a' to 'b' with row size = 'r' and column size = 'c' -can use a negative sign '-' when indexing a matrix to include all rows and/or colums except for those indicated in the index dim(matrix) # gives the number of rows followed by the number of columns sqrt() # returns the square root...obviously...; can take sqrt() of an entire matrix var() # returns the variance of a data set sd() # returns the standard deviation of a data set rnorm(nPoints,mean,sd) # returns a vector of RANDOMIZED normal variables; by defualt the mean = 1 plot(x,y,col = "desiredColor") # creates a plot of "x vs. y"; x-axis (horizontal) vs. y-axis (vertical) -xlab = "this is the x-axis label" -ylab = "this is the y-axis label" -main = "this is the title of the plot" dev.off() # tell R that we are done with the current plot seq(a,b,length = d) # creates a vector of numbers between 'a' and 'b', equally spaced, with length 'd' contour(x,y,f) # can be used to represent/create 3-dimensional plots/graphs; 'f' is the z coordinate as a function of 'x' and 'y' image(x,y,f) # work similarly to the contour function but rather created a color coated graph based on the z coordinate; heatmap persp(theta = angle1,phi = angle2) # gives a different perspective of the current graph based on the two given angles read.table() # imports data into R write.table() # exports data from R fix() # view the data in a spreadsheet-like window library(libName) # includes library for use in your session fix(dataBase) # invokes an editable version of data and opens the data, available for use, in the workspace attach(dataBase) # dataBase is attached to the ‘R’ search path names(dataBase) # display what information is contained within the dataBase lm(y~x,dataBase) # linear regression model fit of data with ‘y’ as the response and ‘x’ as the predictors lm(y~x1+x2+x3+…,dataBase) # multiple linear regression model fit of data with ‘y’ as the response and ‘x’ as the predictors lm(y~x1*x2,dataBase) # linear regression model fit of data with ‘y’ as the response, ‘x’ as the predictors, and an interaction term summary(modelVar) # returns values for Min, Max, 1st Quartile, 3rd Quartile, Median, Approx. Coefficients, SE, t-value, p-value, RSE, DOF, Mult. R^2, Adj. R^2, and F-stat. coef(modelVar) # returns the coefficients of the model fit confint(modelVar) # returns the confidence intervals of the model fit abline(modelVar) # adds one or more straight lines through the current plot residuals(modelVar) # returns the residuals from the model fit rstudent(modelVar) # returns the studentized residuals from the model fit hatvalues(modelVar) # returns leverage statistics which.max(data) # returns the maximum value within the supplied data vif() # returns the variance inflation factor(s) I() # change the class of an object to indicate that it should be treated ‘as is’ anova(modelVar1,modelVar2) # compute analysis of variance (or deviance) tables for one or more fitted model objects poly(x) # creates a polynomial with ‘x’ DOF # –––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––– # COMMANDS: ?<func_name> # gets help with a particular function mean # sets the average/mean value of a data set sd # sets the standard deviation of a data set matName[r,c] # returns the appropriate row and/or column of a matrix for computational/manipulitive purposes cat("\014") # clear the console window; also can use the keybind "CTRL + L"
/R/R Coding Reference.R
no_license
Lexxeous/technical_references
R
false
false
4,532
r
# R Coding Reference # –––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––– # FUNCTIONS: c() # concatenate (like creating a vector) length() # returns the length of a vector quantity ls() # list of all objects and/or data created during session rm() # remove/delete a created object or data rm(list = ls()) # remove ALL created objects or data matrix(data,nrow,ncol,byrow,dimnames) # creates a matrix; byrow is FALSE by default matrix(a # b,r,c) # creates a matrix from values 'a' to 'b' with row size = 'r' and column size = 'c' -can use a negative sign '-' when indexing a matrix to include all rows and/or colums except for those indicated in the index dim(matrix) # gives the number of rows followed by the number of columns sqrt() # returns the square root...obviously...; can take sqrt() of an entire matrix var() # returns the variance of a data set sd() # returns the standard deviation of a data set rnorm(nPoints,mean,sd) # returns a vector of RANDOMIZED normal variables; by defualt the mean = 1 plot(x,y,col = "desiredColor") # creates a plot of "x vs. y"; x-axis (horizontal) vs. y-axis (vertical) -xlab = "this is the x-axis label" -ylab = "this is the y-axis label" -main = "this is the title of the plot" dev.off() # tell R that we are done with the current plot seq(a,b,length = d) # creates a vector of numbers between 'a' and 'b', equally spaced, with length 'd' contour(x,y,f) # can be used to represent/create 3-dimensional plots/graphs; 'f' is the z coordinate as a function of 'x' and 'y' image(x,y,f) # work similarly to the contour function but rather created a color coated graph based on the z coordinate; heatmap persp(theta = angle1,phi = angle2) # gives a different perspective of the current graph based on the two given angles read.table() # imports data into R write.table() # exports data from R fix() # view the data in a spreadsheet-like window library(libName) # includes library for use in your session fix(dataBase) # invokes an editable version of data and opens the data, available for use, in the workspace attach(dataBase) # dataBase is attached to the ‘R’ search path names(dataBase) # display what information is contained within the dataBase lm(y~x,dataBase) # linear regression model fit of data with ‘y’ as the response and ‘x’ as the predictors lm(y~x1+x2+x3+…,dataBase) # multiple linear regression model fit of data with ‘y’ as the response and ‘x’ as the predictors lm(y~x1*x2,dataBase) # linear regression model fit of data with ‘y’ as the response, ‘x’ as the predictors, and an interaction term summary(modelVar) # returns values for Min, Max, 1st Quartile, 3rd Quartile, Median, Approx. Coefficients, SE, t-value, p-value, RSE, DOF, Mult. R^2, Adj. R^2, and F-stat. coef(modelVar) # returns the coefficients of the model fit confint(modelVar) # returns the confidence intervals of the model fit abline(modelVar) # adds one or more straight lines through the current plot residuals(modelVar) # returns the residuals from the model fit rstudent(modelVar) # returns the studentized residuals from the model fit hatvalues(modelVar) # returns leverage statistics which.max(data) # returns the maximum value within the supplied data vif() # returns the variance inflation factor(s) I() # change the class of an object to indicate that it should be treated ‘as is’ anova(modelVar1,modelVar2) # compute analysis of variance (or deviance) tables for one or more fitted model objects poly(x) # creates a polynomial with ‘x’ DOF # –––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––– # COMMANDS: ?<func_name> # gets help with a particular function mean # sets the average/mean value of a data set sd # sets the standard deviation of a data set matName[r,c] # returns the appropriate row and/or column of a matrix for computational/manipulitive purposes cat("\014") # clear the console window; also can use the keybind "CTRL + L"
\docType{class} \name{cvpen-class} \alias{cvpen-class} \title{Class "cvpen"} \description{ Class of object returned by a cross-validation performed through the \code{crossval} method. } \section{Slots}{ \describe{ \item{\code{lambda1}:}{vector of \eqn{\lambda_1}{lambda1} (\eqn{\ell_1}{l1} or \eqn{\ell_\infty}{infinity} penalty levels) for which each cross-validation has been performed.} \item{\code{lambda2}:}{vector (or scalar) of \eqn{\ell_2}{l2}-penalty levels for which each cross-validation has been performed.} \item{\code{lambda1.min}:}{level of \eqn{\lambda_1}{lambda1} that minimizes the error estimated by cross-validation.} \item{\code{lambda1.1se}:}{largest level of \eqn{\lambda_1}{lambda1} such as the cross-validated error is within 1 standard error of the minimum.} \item{\code{lambda2.min}:}{level of \eqn{\lambda_2}{lambda2} that minimizes the error estimated by cross-validation.} \item{\code{cv.error}:}{a data frame containing the mean cross-validated error and its associated standard error for each values of \code{lambda1} and \code{lambda2}.} \item{\code{folds}:}{list of \code{K} vectors indicating the folds used for cross-validation.} \item{\code{beta.min}:}{the vector of parameters obtained by fitting the problem on the full data set \code{x} and \code{y} with \code{lambda1.min} and \code{lambda2.min} penalties.} \item{\code{beta.1se}:}{the vector of parameters obtained by fitting the problem on the full data set \code{x} and \code{y} with \code{lambda1.1se} and \code{lambda2.min} penalties. } } The specific \code{\link{plot,cvpen-method}} method is documented. } \seealso{ See also \code{\link{plot,cvpen-method}} and \code{\link{crossval}}. } \keyword{class}
/man/cvpen-class.Rd
no_license
xinchoubiology/quadrupen
R
false
false
1,766
rd
\docType{class} \name{cvpen-class} \alias{cvpen-class} \title{Class "cvpen"} \description{ Class of object returned by a cross-validation performed through the \code{crossval} method. } \section{Slots}{ \describe{ \item{\code{lambda1}:}{vector of \eqn{\lambda_1}{lambda1} (\eqn{\ell_1}{l1} or \eqn{\ell_\infty}{infinity} penalty levels) for which each cross-validation has been performed.} \item{\code{lambda2}:}{vector (or scalar) of \eqn{\ell_2}{l2}-penalty levels for which each cross-validation has been performed.} \item{\code{lambda1.min}:}{level of \eqn{\lambda_1}{lambda1} that minimizes the error estimated by cross-validation.} \item{\code{lambda1.1se}:}{largest level of \eqn{\lambda_1}{lambda1} such as the cross-validated error is within 1 standard error of the minimum.} \item{\code{lambda2.min}:}{level of \eqn{\lambda_2}{lambda2} that minimizes the error estimated by cross-validation.} \item{\code{cv.error}:}{a data frame containing the mean cross-validated error and its associated standard error for each values of \code{lambda1} and \code{lambda2}.} \item{\code{folds}:}{list of \code{K} vectors indicating the folds used for cross-validation.} \item{\code{beta.min}:}{the vector of parameters obtained by fitting the problem on the full data set \code{x} and \code{y} with \code{lambda1.min} and \code{lambda2.min} penalties.} \item{\code{beta.1se}:}{the vector of parameters obtained by fitting the problem on the full data set \code{x} and \code{y} with \code{lambda1.1se} and \code{lambda2.min} penalties. } } The specific \code{\link{plot,cvpen-method}} method is documented. } \seealso{ See also \code{\link{plot,cvpen-method}} and \code{\link{crossval}}. } \keyword{class}
#' fonction pour enregistrer les résultats dans une liste #'======================================================= #' fonction pour enregistrer les résultats dans une liste # initialisation: Initialise la liste des résultats et crée les fonctions setresult/getresult initresults <- function(){ # initialiser la variable locale liste de résultats allresults <- list(Whatsit="List of Results") # stocker un résultat, attribué à setresult dans globalenv setresult <<- function(rres, rname) { if (missing(rname)) { rname <- rres[["name"]] } allresults[[rname]] <<- rres } # retrouver une résultat, attribué à getresult dans globalenv getresult <<- function(rname){ allresults[[rname]] } } # # # tests # initresults() # getresult("Whatsit") # getresult("whatsit") # # setresult(list(name="hoho", thing = "thisisit")) # getresult("hoho")
/funcoderesultats.R
no_license
Brufico/Standardfunctions10
R
false
false
1,006
r
#' fonction pour enregistrer les résultats dans une liste #'======================================================= #' fonction pour enregistrer les résultats dans une liste # initialisation: Initialise la liste des résultats et crée les fonctions setresult/getresult initresults <- function(){ # initialiser la variable locale liste de résultats allresults <- list(Whatsit="List of Results") # stocker un résultat, attribué à setresult dans globalenv setresult <<- function(rres, rname) { if (missing(rname)) { rname <- rres[["name"]] } allresults[[rname]] <<- rres } # retrouver une résultat, attribué à getresult dans globalenv getresult <<- function(rname){ allresults[[rname]] } } # # # tests # initresults() # getresult("Whatsit") # getresult("whatsit") # # setresult(list(name="hoho", thing = "thisisit")) # getresult("hoho")
#' Optimized Incremental Processing Probabilistic-EWMA (PEWMA). #' #' @description \code{OipPewma} is the optimized implementation of the #' \code{IpPewma} function using environmental variables. It has been shown that #' in long datasets it can reduce runtime by up to 50\%. This function allows #' the calculation of anomalies using PEWMA in an incremental processing mode. #' This algorithm is a probabilistic method of EWMA which dynamically adjusts the #' parameterization based on the probability of the given observation. This #' method produces dynamic, data-driven anomaly thresholds which are robust to #' abrupt transient changes, yet quickly adjust to long-term distributional #' shifts. #' #' @param data Numerical vector with training and test dataset. #' @param n.train Number of points of the dataset that correspond to the #' training set. #' @param alpha0 Maximal weighting parameter. #' @param beta Weight placed on the probability of the given observation. #' @param l Control limit multiplier. #' @param last.res Last result returned by the algorithm. #' #' @details \code{data} must be a numerical vector without NA values. #' \code{alpha0} must be a numeric value where 0 < \code{alpha0} < 1. If a #' faster adjustment to the initial shift is desirable, simply lowering #' \code{alpha0} will suffice. \code{beta} is the weight placed on the #' probability of the given observation. it must be a numeric value where #' 0 <= \code{beta} <= 1. Note that \code{beta} equals 0, PEWMA converges to a #' standard EWMA. Finally \code{l} is the parameter that determines the control #' limits. By default, 3 is used. \code{last.res} is the last result returned #' by some previous execution of this algorithm. The first time the algorithm #' is executed its value is NULL. However, to run a new batch #' of data without having to include it in the old dataset and restart the #' process, the two parameters returned by the last run are only needed. #' #' This algorithm can be used for both classical and incremental processing. It #' should be noted that in case of having a finite dataset the #' \code{\link{CpPewma}} or \code{\link{OcpPewma}} algorithms are faster. #' Incremental processing can be used in two ways. 1) Processing all available #' data and saving \code{last.res} for future runs in which there is new data. #' 2) Using the \href{https://CRAN.R-project.org/package=stream}{stream} library #' for when there is too much data and it does not fit into the memory. #' An example has been made for this use case. #' #' @return A list of the following items. #' #' \item{result}{dataset conformed by the following columns.} #' \itemize{ #' \item \code{is.anomaly} 1 if the value is anomalous 0, otherwise. #' \item \code{ucl} Upper control limit. #' \item \code{lcl} Lower control limit. #' } #' \item{last.res}{Last result returned by the algorithm. Is a dataset #' containing the parameters calculated in the last iteration and necessary #' for the next one.} #' #' @references M. Carter, Kevin y W. Streilein. Probabilistic reasoning for #' streaming anomaly detection. 2012 IEEE Statistical Signal Processing Workshop #' (SSP), pp. 377-380, Aug 2012. #' #' @example tests/examples/oip_pewma_example.R #' #' @export # Pewma CONTROL CHART OipPewma <- function(data, alpha0 = 0.2, beta = 0, n.train = 5, l = 3, last.res = NULL) { # validate parameters if (!is.numeric(data) | (sum(is.na(data)) > 0)) { stop("data argument must be a numeric vector and without NA values.") } if (!is.numeric(n.train) | n.train <= 0) { stop("n.train argument must be a positive numeric value.") } if (!is.numeric(alpha0) | alpha0 <= 0 | alpha0 > 1) { stop("alpha0 argument must be a numeric value in (0,1] range.") } if (!is.numeric(beta) | beta < 0 | beta > 1) { stop("beta argument must be a numeric value in [0,1] range.") } if (!is.numeric(l)) { stop("l argument must be a numeric value.") } if (!is.null(last.res) & !is.data.frame(last.res)) { stop("last.res argument must be NULL or a data.frame with las execution result.") } # Auxiliar function Pewma Pewma <- function(x, env) { row <- get("last.res", envir = env) row$i <- row$i + 1 row$x <- x row$s1 <- row$s1.next row$std <- row$std.next row$z <- ifelse(row$std == 0, 0, (row$x - row$s1) / row$std) row$p <- 1 / sqrt(2 * pi) * exp(-(row$z ^ 2) / 2) row$alpha <- ifelse(row$i <= n.train, 1 - 1/row$i, (1 - beta * row$p) * alpha0) row$s1 <- row$alpha * row$s1 + (1 - row$alpha) * row$x row$s2 <- row$alpha * row$s2 + (1 - row$alpha) * row$x^2 row$s1.next <- row$s1 row$std.next <- sqrt(abs(row$s2 - row$s1 ^ 2)) row$ucl <- row$s1 + l[1] * row$std row$lcl <- row$s1 - l[1] * row$std row$is.anomaly <- row$x < row$lcl | row$x > row$ucl row$is.supAnomaly <- row$x > row$ucl row$is.infAnomaly <- row$x < row$lcl assign("last.res", row, env) return(row[c("is.anomaly", "ucl", "lcl", "i")]) } # inicializamos las variables new.enviroment <- new.env() if (is.null(last.res)) { last.res <- data.frame(value = data[1], i = 0, s1 = data[1], s2 = data[1] ^ 2, s1.next = data[1], std.next = 0, std = 0, z = 0, p = 0, is.anomaly = 0, lcl = 0, ucl = 0, alpha = alpha0) } assign("last.res", last.res, envir = new.enviroment) res <- as.data.frame(t(sapply(data, Pewma, new.enviroment))) res <- data.frame(is.anomaly = unlist(res$is.anomaly), lcl = unlist(res$lcl), ucl = unlist(res$ucl), i = unlist(res$i)) last.res <- get("last.res", envir = new.enviroment) res[res$i <= n.train, "is.anomaly"] <- 0 res[res$i <= n.train, "lcl"] <- data[res[res$i <= n.train, "i"]] res[res$i <= n.train, "ucl"] <- data[res[res$i <= n.train, "i"]] return(list(result = res[, c("is.anomaly", "ucl", "lcl")], last.res = last.res)) }
/R/oip_pewma.R
no_license
cran/otsad
R
false
false
6,326
r
#' Optimized Incremental Processing Probabilistic-EWMA (PEWMA). #' #' @description \code{OipPewma} is the optimized implementation of the #' \code{IpPewma} function using environmental variables. It has been shown that #' in long datasets it can reduce runtime by up to 50\%. This function allows #' the calculation of anomalies using PEWMA in an incremental processing mode. #' This algorithm is a probabilistic method of EWMA which dynamically adjusts the #' parameterization based on the probability of the given observation. This #' method produces dynamic, data-driven anomaly thresholds which are robust to #' abrupt transient changes, yet quickly adjust to long-term distributional #' shifts. #' #' @param data Numerical vector with training and test dataset. #' @param n.train Number of points of the dataset that correspond to the #' training set. #' @param alpha0 Maximal weighting parameter. #' @param beta Weight placed on the probability of the given observation. #' @param l Control limit multiplier. #' @param last.res Last result returned by the algorithm. #' #' @details \code{data} must be a numerical vector without NA values. #' \code{alpha0} must be a numeric value where 0 < \code{alpha0} < 1. If a #' faster adjustment to the initial shift is desirable, simply lowering #' \code{alpha0} will suffice. \code{beta} is the weight placed on the #' probability of the given observation. it must be a numeric value where #' 0 <= \code{beta} <= 1. Note that \code{beta} equals 0, PEWMA converges to a #' standard EWMA. Finally \code{l} is the parameter that determines the control #' limits. By default, 3 is used. \code{last.res} is the last result returned #' by some previous execution of this algorithm. The first time the algorithm #' is executed its value is NULL. However, to run a new batch #' of data without having to include it in the old dataset and restart the #' process, the two parameters returned by the last run are only needed. #' #' This algorithm can be used for both classical and incremental processing. It #' should be noted that in case of having a finite dataset the #' \code{\link{CpPewma}} or \code{\link{OcpPewma}} algorithms are faster. #' Incremental processing can be used in two ways. 1) Processing all available #' data and saving \code{last.res} for future runs in which there is new data. #' 2) Using the \href{https://CRAN.R-project.org/package=stream}{stream} library #' for when there is too much data and it does not fit into the memory. #' An example has been made for this use case. #' #' @return A list of the following items. #' #' \item{result}{dataset conformed by the following columns.} #' \itemize{ #' \item \code{is.anomaly} 1 if the value is anomalous 0, otherwise. #' \item \code{ucl} Upper control limit. #' \item \code{lcl} Lower control limit. #' } #' \item{last.res}{Last result returned by the algorithm. Is a dataset #' containing the parameters calculated in the last iteration and necessary #' for the next one.} #' #' @references M. Carter, Kevin y W. Streilein. Probabilistic reasoning for #' streaming anomaly detection. 2012 IEEE Statistical Signal Processing Workshop #' (SSP), pp. 377-380, Aug 2012. #' #' @example tests/examples/oip_pewma_example.R #' #' @export # Pewma CONTROL CHART OipPewma <- function(data, alpha0 = 0.2, beta = 0, n.train = 5, l = 3, last.res = NULL) { # validate parameters if (!is.numeric(data) | (sum(is.na(data)) > 0)) { stop("data argument must be a numeric vector and without NA values.") } if (!is.numeric(n.train) | n.train <= 0) { stop("n.train argument must be a positive numeric value.") } if (!is.numeric(alpha0) | alpha0 <= 0 | alpha0 > 1) { stop("alpha0 argument must be a numeric value in (0,1] range.") } if (!is.numeric(beta) | beta < 0 | beta > 1) { stop("beta argument must be a numeric value in [0,1] range.") } if (!is.numeric(l)) { stop("l argument must be a numeric value.") } if (!is.null(last.res) & !is.data.frame(last.res)) { stop("last.res argument must be NULL or a data.frame with las execution result.") } # Auxiliar function Pewma Pewma <- function(x, env) { row <- get("last.res", envir = env) row$i <- row$i + 1 row$x <- x row$s1 <- row$s1.next row$std <- row$std.next row$z <- ifelse(row$std == 0, 0, (row$x - row$s1) / row$std) row$p <- 1 / sqrt(2 * pi) * exp(-(row$z ^ 2) / 2) row$alpha <- ifelse(row$i <= n.train, 1 - 1/row$i, (1 - beta * row$p) * alpha0) row$s1 <- row$alpha * row$s1 + (1 - row$alpha) * row$x row$s2 <- row$alpha * row$s2 + (1 - row$alpha) * row$x^2 row$s1.next <- row$s1 row$std.next <- sqrt(abs(row$s2 - row$s1 ^ 2)) row$ucl <- row$s1 + l[1] * row$std row$lcl <- row$s1 - l[1] * row$std row$is.anomaly <- row$x < row$lcl | row$x > row$ucl row$is.supAnomaly <- row$x > row$ucl row$is.infAnomaly <- row$x < row$lcl assign("last.res", row, env) return(row[c("is.anomaly", "ucl", "lcl", "i")]) } # inicializamos las variables new.enviroment <- new.env() if (is.null(last.res)) { last.res <- data.frame(value = data[1], i = 0, s1 = data[1], s2 = data[1] ^ 2, s1.next = data[1], std.next = 0, std = 0, z = 0, p = 0, is.anomaly = 0, lcl = 0, ucl = 0, alpha = alpha0) } assign("last.res", last.res, envir = new.enviroment) res <- as.data.frame(t(sapply(data, Pewma, new.enviroment))) res <- data.frame(is.anomaly = unlist(res$is.anomaly), lcl = unlist(res$lcl), ucl = unlist(res$ucl), i = unlist(res$i)) last.res <- get("last.res", envir = new.enviroment) res[res$i <= n.train, "is.anomaly"] <- 0 res[res$i <= n.train, "lcl"] <- data[res[res$i <= n.train, "i"]] res[res$i <= n.train, "ucl"] <- data[res[res$i <= n.train, "i"]] return(list(result = res[, c("is.anomaly", "ucl", "lcl")], last.res = last.res)) }
context("test-private-loops.R") describe("Private event loop", { it("changes current_loop()", { expect_identical(current_loop(), global_loop()) with_temp_loop({ expect_false(identical(current_loop(), global_loop())) }) }) it("runs only its own tasks", { x <- 0 later(~{x <<- 1}, 0) with_temp_loop({ expect_true(loop_empty()) later(~{x <<- 2}) run_now() expect_identical(x, 2) run_now(loop = global_loop()) expect_identical(x, 1) }) }) }) test_that("Private event loops", { l <- create_loop(autorun = FALSE) x <- 0 expect_true(exists_loop(l)) with_loop(l, { later(function() x <<- x + 1 ) run_now() }) expect_equal(x, 1) with_loop(l, { later(function() x <<- x + 1 ) run_now() later(function() x <<- x + 1 ) later(function() x <<- x + 1 ) }) expect_equal(x, 2) with_loop(l, run_now()) expect_equal(x, 4) destroy_loop(l) expect_false(exists_loop(l)) # Can't run later-y things with destroyed loop expect_error(with_loop(l, later(function() message("foo")))) expect_error(with_loop(l, run_now())) # GC with functions in destroyed loops, even if callback isn't executed. l <- create_loop(autorun = FALSE) x <- 0 gc() with_loop(l, { later( local({ reg.finalizer(environment(), function(e) x <<-x + 1) function() message("foo") }) ) }) gc() expect_identical(x, 0) destroy_loop(l) gc() expect_identical(x, 1) # A GC'd loop object will cause its queue to be deleted, which will allow GC # of any resources l <- create_loop(autorun = FALSE) x <- 0 gc() with_loop(l, { later( local({ reg.finalizer(environment(), function(e) x <<-x + 1) function() message("foo") }) ) }) gc() expect_identical(x, 0) # Delete the reference to the loop, and GC. This causes the queue to be # deleted, which removes references to items in the queue. However, the items # in the queue won't be GC'd yet. (At least not as of R 3.5.2.) rm(l) gc() expect_identical(x, 0) # A second GC triggers the finalizer for an item that was in the queue. gc() expect_identical(x, 1) # Can't destroy global loop expect_error(destroy_loop(global_loop())) }) test_that("Temporary event loops", { l <- NULL x <- 0 with_temp_loop({ l <- current_loop() later(function() x <<- x + 1 ) run_now() }) expect_false(exists_loop(l)) expect_error(with_loop(l, { later(function() x <<- x + 1 ) run_now() })) # Test GC # Make sure that items captured in later callbacks are GC'd after the callback # is executed. x <- 0 with_temp_loop({ later( local({ reg.finalizer(environment(), function(e) x <<-x + 1) function() 1 }) ) gc() run_now() }) expect_identical(x, 0) gc() expect_identical(x, 1) # Test that objects are GC'd after loop is destroyed, even if callback hasn't # been executed. x <- 0 with_temp_loop({ later( local({ reg.finalizer(environment(), function(e) x <<-x + 1) function() 1 }) ) run_now() later( local({ e <- environment() reg.finalizer(environment(), function(e) x <<-x + 1) function() 1 }) ) gc() }) expect_identical(x, 1) gc() expect_identical(x, 2) }) test_that("list_queue", { l <- create_loop(autorun = FALSE) q <- NULL f <- function() 1 # A dummy function with_loop(l, { later(f) q <- list_queue() }) expect_equal(length(q), 1) expect_identical(q[[1]]$callback, f) with_loop(l, { run_now() q <- list_queue() }) expect_equal(length(q), 0) with_loop(l, { later(f) later(f) later(sum) q <- list_queue() }) expect_equal(length(q), 3) expect_identical(q[[1]]$callback, f) expect_identical(q[[2]]$callback, f) expect_identical(q[[3]]$callback, sum) # Empty the queue by calling run now. Also test calling list_queue by passing # in a loop handle. with_loop(l, run_now()) q <- list_queue(l) expect_equal(length(q), 0) })
/tests/testthat/test-private-loops.R
permissive
atheriel/later
R
false
false
4,224
r
context("test-private-loops.R") describe("Private event loop", { it("changes current_loop()", { expect_identical(current_loop(), global_loop()) with_temp_loop({ expect_false(identical(current_loop(), global_loop())) }) }) it("runs only its own tasks", { x <- 0 later(~{x <<- 1}, 0) with_temp_loop({ expect_true(loop_empty()) later(~{x <<- 2}) run_now() expect_identical(x, 2) run_now(loop = global_loop()) expect_identical(x, 1) }) }) }) test_that("Private event loops", { l <- create_loop(autorun = FALSE) x <- 0 expect_true(exists_loop(l)) with_loop(l, { later(function() x <<- x + 1 ) run_now() }) expect_equal(x, 1) with_loop(l, { later(function() x <<- x + 1 ) run_now() later(function() x <<- x + 1 ) later(function() x <<- x + 1 ) }) expect_equal(x, 2) with_loop(l, run_now()) expect_equal(x, 4) destroy_loop(l) expect_false(exists_loop(l)) # Can't run later-y things with destroyed loop expect_error(with_loop(l, later(function() message("foo")))) expect_error(with_loop(l, run_now())) # GC with functions in destroyed loops, even if callback isn't executed. l <- create_loop(autorun = FALSE) x <- 0 gc() with_loop(l, { later( local({ reg.finalizer(environment(), function(e) x <<-x + 1) function() message("foo") }) ) }) gc() expect_identical(x, 0) destroy_loop(l) gc() expect_identical(x, 1) # A GC'd loop object will cause its queue to be deleted, which will allow GC # of any resources l <- create_loop(autorun = FALSE) x <- 0 gc() with_loop(l, { later( local({ reg.finalizer(environment(), function(e) x <<-x + 1) function() message("foo") }) ) }) gc() expect_identical(x, 0) # Delete the reference to the loop, and GC. This causes the queue to be # deleted, which removes references to items in the queue. However, the items # in the queue won't be GC'd yet. (At least not as of R 3.5.2.) rm(l) gc() expect_identical(x, 0) # A second GC triggers the finalizer for an item that was in the queue. gc() expect_identical(x, 1) # Can't destroy global loop expect_error(destroy_loop(global_loop())) }) test_that("Temporary event loops", { l <- NULL x <- 0 with_temp_loop({ l <- current_loop() later(function() x <<- x + 1 ) run_now() }) expect_false(exists_loop(l)) expect_error(with_loop(l, { later(function() x <<- x + 1 ) run_now() })) # Test GC # Make sure that items captured in later callbacks are GC'd after the callback # is executed. x <- 0 with_temp_loop({ later( local({ reg.finalizer(environment(), function(e) x <<-x + 1) function() 1 }) ) gc() run_now() }) expect_identical(x, 0) gc() expect_identical(x, 1) # Test that objects are GC'd after loop is destroyed, even if callback hasn't # been executed. x <- 0 with_temp_loop({ later( local({ reg.finalizer(environment(), function(e) x <<-x + 1) function() 1 }) ) run_now() later( local({ e <- environment() reg.finalizer(environment(), function(e) x <<-x + 1) function() 1 }) ) gc() }) expect_identical(x, 1) gc() expect_identical(x, 2) }) test_that("list_queue", { l <- create_loop(autorun = FALSE) q <- NULL f <- function() 1 # A dummy function with_loop(l, { later(f) q <- list_queue() }) expect_equal(length(q), 1) expect_identical(q[[1]]$callback, f) with_loop(l, { run_now() q <- list_queue() }) expect_equal(length(q), 0) with_loop(l, { later(f) later(f) later(sum) q <- list_queue() }) expect_equal(length(q), 3) expect_identical(q[[1]]$callback, f) expect_identical(q[[2]]$callback, f) expect_identical(q[[3]]$callback, sum) # Empty the queue by calling run now. Also test calling list_queue by passing # in a loop handle. with_loop(l, run_now()) q <- list_queue(l) expect_equal(length(q), 0) })
# # Previous code: # test_that("it works", { # app <- ShinyDriver$new(test_path("../..")) # app$snapshotInit("mytest") # app$setInputs(name = "Hadley") # app$setInputs(greet = "click") # app$snapshot() # app$snapshot(list(output = "greeting")) # }) # shinytest2 code using `app$**()`: test_that("basic website example works using shinytest", { app <- AppDriver$new(variant = platform_variant()) app$set_inputs(name = "Hadley") app$set_inputs(greet = "click") # Take picture and record inputs / outputs app$expect_screenshot() app$expect_values() # Snapshot some text values app$expect_text("#greeting") app$expect_html("#greeting", outer_html = TRUE) # Only record `output[c("greeting")]` app$expect_values(output = "greeting") }) # shinytest2 code using `app$**()`: test_that("basic website example works using testthat", { app <- AppDriver$new(variant = platform_variant(), name = "manual") app$set_inputs(name = "Hadley") app$set_inputs(greet = "click") # Take picture and record inputs / outputs tmpfile <- tempfile() app$get_screenshot(tmpfile) expect_snapshot_file(tmpfile, name = "manual-screenshot.png", variant = app$get_variant()) values <- app$get_values() expect_equal(values$output$greeting, "Hello Hadley!") # Snapshot some text values expect_equal( app$get_text("#greeting"), "Hello Hadley!" ) expect_equal( app$get_html("#greeting", outer_html = TRUE), "<div id=\"greeting\" class=\"shiny-text-output shiny-bound-output\" aria-live=\"polite\">Hello Hadley!</div>" ) })
/tests/testthat/apps/hello/tests/testthat/test-app.R
permissive
rstudio/shinytest2
R
false
false
1,575
r
# # Previous code: # test_that("it works", { # app <- ShinyDriver$new(test_path("../..")) # app$snapshotInit("mytest") # app$setInputs(name = "Hadley") # app$setInputs(greet = "click") # app$snapshot() # app$snapshot(list(output = "greeting")) # }) # shinytest2 code using `app$**()`: test_that("basic website example works using shinytest", { app <- AppDriver$new(variant = platform_variant()) app$set_inputs(name = "Hadley") app$set_inputs(greet = "click") # Take picture and record inputs / outputs app$expect_screenshot() app$expect_values() # Snapshot some text values app$expect_text("#greeting") app$expect_html("#greeting", outer_html = TRUE) # Only record `output[c("greeting")]` app$expect_values(output = "greeting") }) # shinytest2 code using `app$**()`: test_that("basic website example works using testthat", { app <- AppDriver$new(variant = platform_variant(), name = "manual") app$set_inputs(name = "Hadley") app$set_inputs(greet = "click") # Take picture and record inputs / outputs tmpfile <- tempfile() app$get_screenshot(tmpfile) expect_snapshot_file(tmpfile, name = "manual-screenshot.png", variant = app$get_variant()) values <- app$get_values() expect_equal(values$output$greeting, "Hello Hadley!") # Snapshot some text values expect_equal( app$get_text("#greeting"), "Hello Hadley!" ) expect_equal( app$get_html("#greeting", outer_html = TRUE), "<div id=\"greeting\" class=\"shiny-text-output shiny-bound-output\" aria-live=\"polite\">Hello Hadley!</div>" ) })
#' @title Confusion matrix. #' #' @description #' Calculates the confusion matrix for a (possibly resampled) prediction. #' Rows indicate true classes, columns predicted classes. The marginal elements count the number of #' classification errors for the respective row or column, i.e., the number of errors #' when you condition on the corresponding true (rows) or predicted (columns) class. #' The last bottom right element displays the total amount of errors. #' #' A list is returned that contains multiple matrices. #' If \code{relative = TRUE} we compute three matrices, one with absolute values and two with relative. #' The relative confusion matrices are normalized based on rows and columns respectively, #' if \code{FALSE} we only compute the absolute value matrix. #' #' The \code{print} function returns the relative matrices in #' a compact way so that both row and column marginals can be seen in one matrix. #' For details see \code{\link{ConfusionMatrix}}. #' #' Note that for resampling no further aggregation is currently performed. #' All predictions on all test sets are joined to a vector yhat, as are all labels #' joined to a vector y. Then yhat is simply tabulated vs. y, as if both were computed on #' a single test set. This probably mainly makes sense when cross-validation is used for resampling. #' #' @template arg_pred #' @param relative [\code{logical(1)}]\cr #' If \code{TRUE} two additional matrices are calculated. One is normalized by rows and one by #' columns. #' @param sums [\code{logical(1)}]\cr #' If \code{TRUE} add absolute number of observations in each group. #' @param set [\code{character(1)}]\cr #' Specifies which part(s) of the data are used for the calculation. #' If \code{set} equals \code{train} or \code{test}, the \code{pred} object must be the result of a #' resampling, otherwise an error is thrown. #' Defaults to \dQuote{both}. Possible values are \dQuote{train}, \dQuote{test}, or \dQuote{both}. #' @return [\code{\link{ConfusionMatrix}}]. #' @family performance #' @export #' @examples #' # get confusion matrix after simple manual prediction #' allinds = 1:150 #' train = sample(allinds, 75) #' test = setdiff(allinds, train) #' mod = train("classif.lda", iris.task, subset = train) #' pred = predict(mod, iris.task, subset = test) #' print(calculateConfusionMatrix(pred)) #' print(calculateConfusionMatrix(pred, sums = TRUE)) #' print(calculateConfusionMatrix(pred, relative = TRUE)) #' #' # now after cross-validation #' r = crossval("classif.lda", iris.task, iters = 2L) #' print(calculateConfusionMatrix(r$pred)) calculateConfusionMatrix = function(pred, relative = FALSE, sums = FALSE, set = "both") { checkPrediction(pred, task.type = "classif", check.truth = TRUE, no.na = TRUE) assertFlag(relative) assertFlag(sums) n = getTaskSize(pred$task.desc) resp = getPredictionResponse(pred) n.pred = length(resp) truth = getPredictionTruth(pred) if (set != "both") { assertClass(pred, classes = "ResamplePrediction") subset.idx = (pred$data$set == set) if (!any(subset.idx)) { stopf("prediction object contains no observations for set = '%s'", set) } truth = truth[subset.idx] resp = resp[subset.idx] } cls = union(levels(resp), levels(truth)) k = length(cls) truth = factor(truth, levels = cls) resp = factor(resp, levels = cls) tab = table(truth, resp) # create table for margins, where only the off-diag errs are in mt = tab diag(mt) = 0 row.err = rowSums(mt) col.err = colSums(mt) result = rbind(cbind(tab, row.err), c(col.err, sum(col.err))) dimnames(result) = list(true = c(cls, "-err.-"), predicted = c(cls, "-err.-")) if (sums) { rowsum = rowSums(tab) colsum = colSums(tab) result = rbind(cbind(result, c(rowsum, NA)), c(colsum, NA, n)) colnames(result)[k + 2] = "-n-" rownames(result)[k + 2] = "-n-" } result = list(result = result, task.desc = getPredictionTaskDesc(pred), relative = relative, sums = sums) js = 1:k # indexes for nonmargin cols if (relative) { normConfMatrix = function(r) { if (any(r[js] > 0)) r / sum(r[js]) else rep(0, k) } #normalize by rows and add margins as a new column result.rel.row = t(apply(tab, 1, normConfMatrix)) result.rel.row = cbind(result.rel.row, "-err-" = rowSums(result.rel.row) - diag(result.rel.row)) #normalize by columns and add margins as a new row result.rel.col = apply(tab, 2, normConfMatrix) result.rel.col = rbind(result.rel.col, "-err-" = colSums(result.rel.col) - diag(result.rel.col)) result$relative.row = result.rel.row result$relative.col = result.rel.col result$relative.error = result$result[k + 1, k + 1] / n.pred } addClasses(result, "ConfusionMatrix") } #' @export #' @describeIn calculateConfusionMatrix #' #' @param x [\code{\link{ConfusionMatrix}}]\cr #' Object to print. #' @param both [\code{logical(1)}]\cr #' If \code{TRUE} both the absolute and relative confusion matrices are printed. #' @param digits [\code{integer(1)}]\cr #' How many numbers after the decimal point should be printed, only relevant for relative confusion matrices. #' @param ... [any]\cr #' Currently not used. print.ConfusionMatrix = function(x, both = TRUE, digits = 2, ...) { assertFlag(both) assertInt(digits, lower = 1) #formatting stuff, use digits after(!) the decimal point. nsmall = digits digits = nsmall - 1 cls = getTaskDesc(x$task.desc)$class.levels k = length(cls) n = getTaskDesc(x$task.desc)$size if (x$relative) { js = 1:k res = paste(format(x$relative.row[js, js], digits = digits, nsmall = nsmall), format(x$relative.col[js, js], digits = digits, nsmall = nsmall), sep = "/") attributes(res) = attributes(x$relative.row[js, js]) col.err = x$relative.col[k + 1, ] row.err = x$relative.row[, k + 1] full.err = stri_pad_right(format(x$relative.error, digits = digits, nsmall = nsmall), width = nchar(res[1, 1])) #bind marginal errors correctly formatted to rows and columns res = rbind(res, stri_pad_left(format(col.err, digits = digits, nsmall = nsmall), width = nchar(res[1, 1]))) res = cbind(res, c(format(row.err, digits = digits, nsmall = nsmall), full.err)) #also bind the marginal sums to the relative confusion matrix for printing if (x$sums) { res = rbind(cbind(res, c(x$result["-n-", 1:k], NA)), c(x$result[1:k, "-n-"], NA, n)) dimnames(res) = list(true = c(cls, "-err.-", "-n-"), predicted = c(cls, "-err.-", "-n-")) } else { dimnames(res) = list(true = c(cls, "-err.-"), predicted = c(cls, "-err.-")) } cat("Relative confusion matrix (normalized by row/column):\n") print(noquote(res)) if (both) { cat("\n\nAbsolute confusion matrix:\n") print(x$result) } } else { print(x$result) } } #' @title Confusion matrix #' #' @description #' The result of \code{\link{calculateConfusionMatrix}}. #' #' Object members: #' \describe{ #' \item{result [\code{matrix}]}{Confusion matrix of absolute values and marginals. Can also contain #' row and column sums of observations.} #' \item{task.desc [\code{\link{TaskDesc}}]}{Additional information about the task.} #' \item{sums [\code{logical(1)}]}{Flag if marginal sums of observations are calculated.} #' \item{relative [\code{logical(1)}]}{Flag if the relative confusion matrices are calculated.} #' \item{relative.row [\code{matrix}]}{Confusion matrix of relative values and marginals normalized by row.} #' \item{relative.col [\code{matrix}]}{Confusion matrix of relative values and marginals normalized by column.} #' \item{relative.error [\code{numeric(1)}]}{Relative error overall.} #' } #' @name ConfusionMatrix #' @family performance NULL
/R/calculateConfusionMatrix.R
no_license
NamwooPark/mlr
R
false
false
7,815
r
#' @title Confusion matrix. #' #' @description #' Calculates the confusion matrix for a (possibly resampled) prediction. #' Rows indicate true classes, columns predicted classes. The marginal elements count the number of #' classification errors for the respective row or column, i.e., the number of errors #' when you condition on the corresponding true (rows) or predicted (columns) class. #' The last bottom right element displays the total amount of errors. #' #' A list is returned that contains multiple matrices. #' If \code{relative = TRUE} we compute three matrices, one with absolute values and two with relative. #' The relative confusion matrices are normalized based on rows and columns respectively, #' if \code{FALSE} we only compute the absolute value matrix. #' #' The \code{print} function returns the relative matrices in #' a compact way so that both row and column marginals can be seen in one matrix. #' For details see \code{\link{ConfusionMatrix}}. #' #' Note that for resampling no further aggregation is currently performed. #' All predictions on all test sets are joined to a vector yhat, as are all labels #' joined to a vector y. Then yhat is simply tabulated vs. y, as if both were computed on #' a single test set. This probably mainly makes sense when cross-validation is used for resampling. #' #' @template arg_pred #' @param relative [\code{logical(1)}]\cr #' If \code{TRUE} two additional matrices are calculated. One is normalized by rows and one by #' columns. #' @param sums [\code{logical(1)}]\cr #' If \code{TRUE} add absolute number of observations in each group. #' @param set [\code{character(1)}]\cr #' Specifies which part(s) of the data are used for the calculation. #' If \code{set} equals \code{train} or \code{test}, the \code{pred} object must be the result of a #' resampling, otherwise an error is thrown. #' Defaults to \dQuote{both}. Possible values are \dQuote{train}, \dQuote{test}, or \dQuote{both}. #' @return [\code{\link{ConfusionMatrix}}]. #' @family performance #' @export #' @examples #' # get confusion matrix after simple manual prediction #' allinds = 1:150 #' train = sample(allinds, 75) #' test = setdiff(allinds, train) #' mod = train("classif.lda", iris.task, subset = train) #' pred = predict(mod, iris.task, subset = test) #' print(calculateConfusionMatrix(pred)) #' print(calculateConfusionMatrix(pred, sums = TRUE)) #' print(calculateConfusionMatrix(pred, relative = TRUE)) #' #' # now after cross-validation #' r = crossval("classif.lda", iris.task, iters = 2L) #' print(calculateConfusionMatrix(r$pred)) calculateConfusionMatrix = function(pred, relative = FALSE, sums = FALSE, set = "both") { checkPrediction(pred, task.type = "classif", check.truth = TRUE, no.na = TRUE) assertFlag(relative) assertFlag(sums) n = getTaskSize(pred$task.desc) resp = getPredictionResponse(pred) n.pred = length(resp) truth = getPredictionTruth(pred) if (set != "both") { assertClass(pred, classes = "ResamplePrediction") subset.idx = (pred$data$set == set) if (!any(subset.idx)) { stopf("prediction object contains no observations for set = '%s'", set) } truth = truth[subset.idx] resp = resp[subset.idx] } cls = union(levels(resp), levels(truth)) k = length(cls) truth = factor(truth, levels = cls) resp = factor(resp, levels = cls) tab = table(truth, resp) # create table for margins, where only the off-diag errs are in mt = tab diag(mt) = 0 row.err = rowSums(mt) col.err = colSums(mt) result = rbind(cbind(tab, row.err), c(col.err, sum(col.err))) dimnames(result) = list(true = c(cls, "-err.-"), predicted = c(cls, "-err.-")) if (sums) { rowsum = rowSums(tab) colsum = colSums(tab) result = rbind(cbind(result, c(rowsum, NA)), c(colsum, NA, n)) colnames(result)[k + 2] = "-n-" rownames(result)[k + 2] = "-n-" } result = list(result = result, task.desc = getPredictionTaskDesc(pred), relative = relative, sums = sums) js = 1:k # indexes for nonmargin cols if (relative) { normConfMatrix = function(r) { if (any(r[js] > 0)) r / sum(r[js]) else rep(0, k) } #normalize by rows and add margins as a new column result.rel.row = t(apply(tab, 1, normConfMatrix)) result.rel.row = cbind(result.rel.row, "-err-" = rowSums(result.rel.row) - diag(result.rel.row)) #normalize by columns and add margins as a new row result.rel.col = apply(tab, 2, normConfMatrix) result.rel.col = rbind(result.rel.col, "-err-" = colSums(result.rel.col) - diag(result.rel.col)) result$relative.row = result.rel.row result$relative.col = result.rel.col result$relative.error = result$result[k + 1, k + 1] / n.pred } addClasses(result, "ConfusionMatrix") } #' @export #' @describeIn calculateConfusionMatrix #' #' @param x [\code{\link{ConfusionMatrix}}]\cr #' Object to print. #' @param both [\code{logical(1)}]\cr #' If \code{TRUE} both the absolute and relative confusion matrices are printed. #' @param digits [\code{integer(1)}]\cr #' How many numbers after the decimal point should be printed, only relevant for relative confusion matrices. #' @param ... [any]\cr #' Currently not used. print.ConfusionMatrix = function(x, both = TRUE, digits = 2, ...) { assertFlag(both) assertInt(digits, lower = 1) #formatting stuff, use digits after(!) the decimal point. nsmall = digits digits = nsmall - 1 cls = getTaskDesc(x$task.desc)$class.levels k = length(cls) n = getTaskDesc(x$task.desc)$size if (x$relative) { js = 1:k res = paste(format(x$relative.row[js, js], digits = digits, nsmall = nsmall), format(x$relative.col[js, js], digits = digits, nsmall = nsmall), sep = "/") attributes(res) = attributes(x$relative.row[js, js]) col.err = x$relative.col[k + 1, ] row.err = x$relative.row[, k + 1] full.err = stri_pad_right(format(x$relative.error, digits = digits, nsmall = nsmall), width = nchar(res[1, 1])) #bind marginal errors correctly formatted to rows and columns res = rbind(res, stri_pad_left(format(col.err, digits = digits, nsmall = nsmall), width = nchar(res[1, 1]))) res = cbind(res, c(format(row.err, digits = digits, nsmall = nsmall), full.err)) #also bind the marginal sums to the relative confusion matrix for printing if (x$sums) { res = rbind(cbind(res, c(x$result["-n-", 1:k], NA)), c(x$result[1:k, "-n-"], NA, n)) dimnames(res) = list(true = c(cls, "-err.-", "-n-"), predicted = c(cls, "-err.-", "-n-")) } else { dimnames(res) = list(true = c(cls, "-err.-"), predicted = c(cls, "-err.-")) } cat("Relative confusion matrix (normalized by row/column):\n") print(noquote(res)) if (both) { cat("\n\nAbsolute confusion matrix:\n") print(x$result) } } else { print(x$result) } } #' @title Confusion matrix #' #' @description #' The result of \code{\link{calculateConfusionMatrix}}. #' #' Object members: #' \describe{ #' \item{result [\code{matrix}]}{Confusion matrix of absolute values and marginals. Can also contain #' row and column sums of observations.} #' \item{task.desc [\code{\link{TaskDesc}}]}{Additional information about the task.} #' \item{sums [\code{logical(1)}]}{Flag if marginal sums of observations are calculated.} #' \item{relative [\code{logical(1)}]}{Flag if the relative confusion matrices are calculated.} #' \item{relative.row [\code{matrix}]}{Confusion matrix of relative values and marginals normalized by row.} #' \item{relative.col [\code{matrix}]}{Confusion matrix of relative values and marginals normalized by column.} #' \item{relative.error [\code{numeric(1)}]}{Relative error overall.} #' } #' @name ConfusionMatrix #' @family performance NULL
##################################################### #Lab 1. START ##################################################### #Tools > Global Options > Code > Saving > Default text encoding: UTF-8 ##################################################### rm(list=ls()) #작업 디렉토리 설정 setwd("C:/Bigdata_Analytics/R_DataAnalysis/R_Hanwha") ##################################################### #원본 데이터 읽기 ##################################################### data_cust <- read.csv("data/BGCON_CUST_DATA.csv", header=TRUE, sep=",", encoding="CP949", fileEncoding="UCS-2") data_claim <- read.csv("data/BGCON_CLAIM_DATA.csv", header=TRUE, sep=",", encoding="CP949", fileEncoding="UCS-2") data_cntt <- read.csv("data/BGCON_CNTT_DATA.csv", header=TRUE, sep=",", encoding="CP949", fileEncoding="UCS-2") data_fmly <- read.csv("data/BGCON_FMLY_DATA.csv", header=TRUE, sep=",", encoding="CP949", fileEncoding="UCS-2") data_fpinfo <- read.csv("data/BGCON_FPINFO_DATA.csv", header=TRUE, sep=",", encoding="CP949", fileEncoding="UCS-2") ##################################################### #정상인과 사기자의수 확인 ##################################################### (count_siu <- table(data_cust$SIU_CUST_YN)) #사기자가 아닌 사람과 사기자인 사람의 수 18801, 1806 names(count_siu) <- c("분석대상", "정상인", "사기자") pie(count_siu, cex=0.8, #사기자 빨간색 main="사기자 수", labels=paste(names(count_siu), "\n", count_siu, "명", "\n", round(count_siu/sum(count_siu)*100), "%")) rm(count_siu) ##################################################### #데이터 전처리, NA, NULL 값 처리 등... ##################################################### #나이를 연령대로 변환 age_to_gen <- function(row) { row = floor(row/10) } data_cust$AGE <- sapply(data_cust$AGE, age_to_gen) #Y는 1으로 N은 0으로 변환 yn_to_10 <- function(row) { if(row=="Y") row = 1 else if(row=="N") row = 0 else row = "" } data_cust$SIU_CUST_YN <- sapply(data_cust$SIU_CUST_YN, yn_to_10) data_cust$FP_CAREER <- sapply(data_cust$FP_CAREER, yn_to_10) #NA를 0으로 na_to_0 <- function(row) { if(is.na(row)) row = 0 else row = row } data_cust$RESI_TYPE_CODE <- sapply(data_cust$RESI_TYPE_CODE, na_to_0) data_cust$TOTALPREM <- sapply(data_cust$TOTALPREM, na_to_0) ##################################################### #지역을 코드로 변환 ctpr_to_code <- function(row) { if(row=="서울") row = 1 else if(row=="부산") row = 2 else if(row=="대구") row = 3 else if(row=="인천") row = 4 else if(row=="광주") row = 5 else if(row=="대전") row = 6 else if(row=="울산") row = 7 else if(row=="세종") row = 8 else if(row=="경기") row = 9 else if(row=="강원") row = 10 else if(row=="충북") row = 11 else if(row=="충남") row = 12 else if(row=="전북") row = 13 else if(row=="전남") row = 14 else if(row=="경북") row = 15 else if(row=="경남") row = 16 else if(row=="제주") row = 17 else row = 0 } data_cust$CTPR <- sapply(data_cust$CTPR, ctpr_to_code) data_cust$CTPR <- unlist(data_cust$CTPR) ##################################################### #MINCRDT, MAXCRDT NA를 6으로 변환 na_to_6 <- function(row) { if(is.na(row)) row = 6 else row = row } data_cust$MINCRDT <- sapply(data_cust$MINCRDT, na_to_6) data_cust$MAXCRDT <- sapply(data_cust$MAXCRDT, na_to_6) data_cust$CUST_INCM <- sapply(data_cust$CUST_INCM, na_to_0) data_cust$JPBASE_HSHD_INCM <- sapply(data_cust$JPBASE_HSHD_INCM, na_to_0) #OCCP_GRP의 첫 문자만 빼냄 occp_grp_1_to_no <- function(row) { row = substr(row, 1, 1) if(row == "") return (0) else return (as.integer(row)) } data_cust$OCCP_GRP_1 <- sapply(data_cust$OCCP_GRP_1, occp_grp_1_to_no) #결혼 유무 Y/N을 1/0으로 -> data_cust$MATE_OCCP_GRP_1 <- sapply(data_cust$MATE_OCCP_GRP_1, occp_grp_1_to_no) ##################################################### #널스트링을 N으로, 결혼 유무에서 널스트링일 경우 N으로 nullstring_to_N <- function(row) { if(row == "N") row = "N" else if(row == "Y") row = "Y" else row = "N" } data_cust$WEDD_YN <- sapply(data_cust$WEDD_YN, nullstring_to_N) data_cust$WEDD_YN <- sapply(data_cust$WEDD_YN, yn_to_10) #임시 함수 제거 rm(age_to_gen) rm(yn_to_10) rm(na_to_0) rm(ctpr_to_code) rm(na_to_6) rm(occp_grp_1_to_no) rm(nullstring_to_N) #데이터 임시 저장 write.csv(data_cust, "Working/data_cust_1-1.csv", row.names = FALSE)
/R_DataAnalysis/R_Hanwha/Lab_1-1.R
no_license
Jerrykim91/Bigdata_Analytics
R
false
false
4,651
r
##################################################### #Lab 1. START ##################################################### #Tools > Global Options > Code > Saving > Default text encoding: UTF-8 ##################################################### rm(list=ls()) #작업 디렉토리 설정 setwd("C:/Bigdata_Analytics/R_DataAnalysis/R_Hanwha") ##################################################### #원본 데이터 읽기 ##################################################### data_cust <- read.csv("data/BGCON_CUST_DATA.csv", header=TRUE, sep=",", encoding="CP949", fileEncoding="UCS-2") data_claim <- read.csv("data/BGCON_CLAIM_DATA.csv", header=TRUE, sep=",", encoding="CP949", fileEncoding="UCS-2") data_cntt <- read.csv("data/BGCON_CNTT_DATA.csv", header=TRUE, sep=",", encoding="CP949", fileEncoding="UCS-2") data_fmly <- read.csv("data/BGCON_FMLY_DATA.csv", header=TRUE, sep=",", encoding="CP949", fileEncoding="UCS-2") data_fpinfo <- read.csv("data/BGCON_FPINFO_DATA.csv", header=TRUE, sep=",", encoding="CP949", fileEncoding="UCS-2") ##################################################### #정상인과 사기자의수 확인 ##################################################### (count_siu <- table(data_cust$SIU_CUST_YN)) #사기자가 아닌 사람과 사기자인 사람의 수 18801, 1806 names(count_siu) <- c("분석대상", "정상인", "사기자") pie(count_siu, cex=0.8, #사기자 빨간색 main="사기자 수", labels=paste(names(count_siu), "\n", count_siu, "명", "\n", round(count_siu/sum(count_siu)*100), "%")) rm(count_siu) ##################################################### #데이터 전처리, NA, NULL 값 처리 등... ##################################################### #나이를 연령대로 변환 age_to_gen <- function(row) { row = floor(row/10) } data_cust$AGE <- sapply(data_cust$AGE, age_to_gen) #Y는 1으로 N은 0으로 변환 yn_to_10 <- function(row) { if(row=="Y") row = 1 else if(row=="N") row = 0 else row = "" } data_cust$SIU_CUST_YN <- sapply(data_cust$SIU_CUST_YN, yn_to_10) data_cust$FP_CAREER <- sapply(data_cust$FP_CAREER, yn_to_10) #NA를 0으로 na_to_0 <- function(row) { if(is.na(row)) row = 0 else row = row } data_cust$RESI_TYPE_CODE <- sapply(data_cust$RESI_TYPE_CODE, na_to_0) data_cust$TOTALPREM <- sapply(data_cust$TOTALPREM, na_to_0) ##################################################### #지역을 코드로 변환 ctpr_to_code <- function(row) { if(row=="서울") row = 1 else if(row=="부산") row = 2 else if(row=="대구") row = 3 else if(row=="인천") row = 4 else if(row=="광주") row = 5 else if(row=="대전") row = 6 else if(row=="울산") row = 7 else if(row=="세종") row = 8 else if(row=="경기") row = 9 else if(row=="강원") row = 10 else if(row=="충북") row = 11 else if(row=="충남") row = 12 else if(row=="전북") row = 13 else if(row=="전남") row = 14 else if(row=="경북") row = 15 else if(row=="경남") row = 16 else if(row=="제주") row = 17 else row = 0 } data_cust$CTPR <- sapply(data_cust$CTPR, ctpr_to_code) data_cust$CTPR <- unlist(data_cust$CTPR) ##################################################### #MINCRDT, MAXCRDT NA를 6으로 변환 na_to_6 <- function(row) { if(is.na(row)) row = 6 else row = row } data_cust$MINCRDT <- sapply(data_cust$MINCRDT, na_to_6) data_cust$MAXCRDT <- sapply(data_cust$MAXCRDT, na_to_6) data_cust$CUST_INCM <- sapply(data_cust$CUST_INCM, na_to_0) data_cust$JPBASE_HSHD_INCM <- sapply(data_cust$JPBASE_HSHD_INCM, na_to_0) #OCCP_GRP의 첫 문자만 빼냄 occp_grp_1_to_no <- function(row) { row = substr(row, 1, 1) if(row == "") return (0) else return (as.integer(row)) } data_cust$OCCP_GRP_1 <- sapply(data_cust$OCCP_GRP_1, occp_grp_1_to_no) #결혼 유무 Y/N을 1/0으로 -> data_cust$MATE_OCCP_GRP_1 <- sapply(data_cust$MATE_OCCP_GRP_1, occp_grp_1_to_no) ##################################################### #널스트링을 N으로, 결혼 유무에서 널스트링일 경우 N으로 nullstring_to_N <- function(row) { if(row == "N") row = "N" else if(row == "Y") row = "Y" else row = "N" } data_cust$WEDD_YN <- sapply(data_cust$WEDD_YN, nullstring_to_N) data_cust$WEDD_YN <- sapply(data_cust$WEDD_YN, yn_to_10) #임시 함수 제거 rm(age_to_gen) rm(yn_to_10) rm(na_to_0) rm(ctpr_to_code) rm(na_to_6) rm(occp_grp_1_to_no) rm(nullstring_to_N) #데이터 임시 저장 write.csv(data_cust, "Working/data_cust_1-1.csv", row.names = FALSE)
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/edr_regex_gadget.R \name{edr_regexplain_gadget} \alias{edr_regexplain_gadget} \title{Use the RegExplain gadget with some example text} \usage{ edr_regexplain_gadget(text = NULL) } \arguments{ \item{text}{Text to experiment with in the RegExplain gadget. If nothing is provided then the following sentence/string will be used: \code{"The quick brown fox jumps over the lazy dog"}.} } \description{ Use the RegExplain gadget with some example text }
/man/edr_regexplain_gadget.Rd
permissive
rich-iannone/edr
R
false
true
526
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/edr_regex_gadget.R \name{edr_regexplain_gadget} \alias{edr_regexplain_gadget} \title{Use the RegExplain gadget with some example text} \usage{ edr_regexplain_gadget(text = NULL) } \arguments{ \item{text}{Text to experiment with in the RegExplain gadget. If nothing is provided then the following sentence/string will be used: \code{"The quick brown fox jumps over the lazy dog"}.} } \description{ Use the RegExplain gadget with some example text }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/network.data.R \name{network.data} \alias{network.data} \title{Make a network object containing data, priors, and a JAGS model file} \usage{ network.data( Outcomes = NULL, Study = NULL, Treat = NULL, N = NULL, SE = NULL, response = NULL, Treat.order = NULL, type = "random", rank.preference = "higher", baseline = "none", baseline.risk = "independent", covariate = NULL, covariate.type = NULL, covariate.model = NULL, mean.d = NULL, prec.d = NULL, mean.Eta = NULL, prec.Eta = NULL, hy.prior.Eta = NULL, mean.bl = NULL, prec.bl = NULL, hy.prior.bl = NULL, mean.cov = NULL, prec.cov = NULL, hy.prior.cov = NULL, hy.prior = NULL ) } \arguments{ \item{Outcomes}{Arm-level outcomes. If it is a multinomial response, the matrix would have dimensions treatment arms (row) by multinomial categories (column). If it is binomial or normal, it would be a vector.} \item{Study}{A vector of study indicator for each arm} \item{Treat}{A vector of treatment indicator for each arm} \item{N}{A vector of total number of observations in each arm. Used for binomial and multinomial responses.} \item{SE}{A vector of standard error for each arm. Used only for normal response.} \item{response}{Specification of the outcomes type. Must specify one of the following: "normal", "binomial", or "multinomial".} \item{Treat.order}{Treatment order which determines how treatments are compared. The first treatment that is specified is considered to be the baseline treatment. Default order is alphabetical. If the treatments are coded 1, 2, etc, then the treatment with a value of 1 would be assigned as a baseline treatment.} \item{type}{Type of model fitted: either "random" for random effects model or "fixed" for fixed effects model. Default is "random".} \item{rank.preference}{Set it equal to "higher" if higher values are preferred (i.e. assumes events are good). Set it equal to "lower" if lower values are preferred (i.e. assumes events are bad). Default is "higher".} \item{baseline}{Three different assumptions for treatment x baseline risk interactions (slopes): "independent", "common", or "exchangeable". Default is "none" which doesn't incorporate baseline risk.} \item{baseline.risk}{Two different assumptions for baseline risk: "independent" or "exchangeable". See Achana et al. (2012) for more information about baseline risk.} \item{covariate}{A covariate matrix with each row representing each trial and column representing each covariate. This is a study-level data, meaning that the user doesn't need to repeatedly specify covariates for each arm.} \item{covariate.type}{Should be a vector indicating the type of the covariate. Covariate can be either "continuous" or "discrete". If it continuous, covariates are centered. If the covariate is discrete it is not centered and it has to be in a dummy integer format (i.e. 0,1,2,...). The code doesn't factor the covariates for the user, so user needs to specify dummy variables if factor is needed.} \item{covariate.model}{"independent" allows covariate effects for each treatment. "common" restricts same covariate effect for all treatment. Lastly, "exchangeable" assumes that the covariate effects are different but related and strength is borrowed across them. We set "common" to be default. See Cooper et al. (2009) for more details on covariates.} \item{mean.d}{Prior mean for the relative effect} \item{prec.d}{Prior precision for the relative effect} \item{mean.Eta}{Prior mean for the study effect (baseline risk)} \item{prec.Eta}{Prior precision for the study effect (baseline risk)} \item{hy.prior.Eta}{Between treatment heterogeneity in baseline risk (for exchangeable assumption only). Format of the parameter is same as hy.prior.} \item{mean.bl}{Prior mean for the baseline slope} \item{prec.bl}{Prior precision for the baseline slope} \item{hy.prior.bl}{Between treatment heterogeneity in baseline slope (for exchangeable regression coefficient only). Format of the parameter is same as hy.prior.} \item{mean.cov}{Prior mean for the covariate effect} \item{prec.cov}{Prior precision for the covariate effect} \item{hy.prior.cov}{Between treatment heterogeneity in covariate effect (for exchangeable regression coefficient only). Format of the parameter is same as hy.prior. Default is set to be dunif(0, 5) for binary, dunif(0, 100) for normal, and wishart with identity scale matrix and (# of categories - 1) degrees of freedom for multinomial.} \item{hy.prior}{Prior for the heterogeneity parameter. Supports uniform, gamma, and half normal for normal and binomial response and wishart for multinomial response. It should be a list of length 3, where first element should be the distribution (one of dunif, dgamma, dhnorm, dwish) and the next two are the parameters associated with the distribution. For example, list("dunif", 0, 5) give uniform prior with lower bound 0 and upper bound 5 for the heterogeneity parameter. For wishart distribution, the last two parameter would be the scale matrix and the degrees of freedom.} } \value{ Creates list of variables that are used to run the model using \code{\link{network.run}} \item{data}{Data combining all the input data. User can check this to insure the data is correctly specified. For modelling purposes, character valued studies or treatment variables are changed to numeric values based on alphabetical order.} \item{nrow}{Total number of arms in the meta-analysis} \item{ncat}{Number of columns in the Outcomes. Will equal 1 for binary and normal and number of categories for multinomial} \item{nstudy}{Number of study} \item{na}{Number of arms for each study} \item{ntreat}{Number of treatment} \item{b.id}{Indicator in sequence of all treatments for which treatment is base treatment in Study} \item{t}{\code{Treat} transformed into a matrix which has dimensions number of study by max number of arms in studies} \item{r}{\code{Outcomes} made into an array that is suitable for use in rjags code. For multinomial, it has 3 dimensions: number of study by max number of arms in studies by number of categories.} \item{mx}{If the continuous covariate is included, it calculates the mean of the covariates which is used to center the covariates. The numeric indicator after mx refers to column number of the covariates if there are more than one covariates included. Discrete covariates are not centered.} \item{mx_bl}{If the baseline effect is specified, it also calculates the mean baseline risk.} \item{prior.data}{Prior data created using the user inputs or default values. If no user input is specifies for the prior, it uses default values.} \item{code}{Rjags model file code that is generated using information provided by the user. To view model file inside R in a nice format, use \code{cat(network$code).}} } \description{ This function makes a network object that can be used to run network meta-analysis using \code{\link{network.run}}. User needs to specify Outcomes, Study, Treat, N or SE, and response. Prior parameters are filled in automatically based on the data type if not specified. The input data should be arm-level so that we have observations for each treatment in each study. The input data is preprocessed to fit the format necessary to run model in JAGS. } \examples{ ###Blocker data example blocker network <- with(blocker, { network.data(Outcomes, Study, Treat, N = N, response = "binomial") }) network } \references{ S. Dias, A.J. Sutton, A.E. Ades, and N.J. Welton (2013a), \emph{A Generalized Linear Modeling Framework for Pairwise and Network Meta-analysis of Randomized Controlled Trials}, Medical Decision Making 33(5):607-617. [\url{https://doi.org/10.1177/0272989X12458724}] F.A. Achana, N.J. Cooper, S. Dias, G. Lu, S.J.C. Rice, D. Kendrick, A.J. Sutton (2012), \emph{Extending methods for investigating the relationship between treatment effect and baseline risk from pairwise meta-analysis to network meta-analysis}, Statistics in Medicine 32(5):752-771. [\url{https://doi.org/10.1002/sim.5539}] N.J. Cooper, A.J. Sutton, D. Morris, A.E. Ades, N.J. Welton (2009), \emph{Addressing between-study heterogeneity and inconsistency in mixed treatment comparisons: Application to stroke prevention treatments in individuals with non-rheumatic atrial fibrillation}, Statistics in Medicine 28:1861-1881. [\url{https://doi.org/10.1002/sim.3594}] }
/man/network.data.Rd
no_license
shamim-active/bnma
R
false
true
8,454
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/network.data.R \name{network.data} \alias{network.data} \title{Make a network object containing data, priors, and a JAGS model file} \usage{ network.data( Outcomes = NULL, Study = NULL, Treat = NULL, N = NULL, SE = NULL, response = NULL, Treat.order = NULL, type = "random", rank.preference = "higher", baseline = "none", baseline.risk = "independent", covariate = NULL, covariate.type = NULL, covariate.model = NULL, mean.d = NULL, prec.d = NULL, mean.Eta = NULL, prec.Eta = NULL, hy.prior.Eta = NULL, mean.bl = NULL, prec.bl = NULL, hy.prior.bl = NULL, mean.cov = NULL, prec.cov = NULL, hy.prior.cov = NULL, hy.prior = NULL ) } \arguments{ \item{Outcomes}{Arm-level outcomes. If it is a multinomial response, the matrix would have dimensions treatment arms (row) by multinomial categories (column). If it is binomial or normal, it would be a vector.} \item{Study}{A vector of study indicator for each arm} \item{Treat}{A vector of treatment indicator for each arm} \item{N}{A vector of total number of observations in each arm. Used for binomial and multinomial responses.} \item{SE}{A vector of standard error for each arm. Used only for normal response.} \item{response}{Specification of the outcomes type. Must specify one of the following: "normal", "binomial", or "multinomial".} \item{Treat.order}{Treatment order which determines how treatments are compared. The first treatment that is specified is considered to be the baseline treatment. Default order is alphabetical. If the treatments are coded 1, 2, etc, then the treatment with a value of 1 would be assigned as a baseline treatment.} \item{type}{Type of model fitted: either "random" for random effects model or "fixed" for fixed effects model. Default is "random".} \item{rank.preference}{Set it equal to "higher" if higher values are preferred (i.e. assumes events are good). Set it equal to "lower" if lower values are preferred (i.e. assumes events are bad). Default is "higher".} \item{baseline}{Three different assumptions for treatment x baseline risk interactions (slopes): "independent", "common", or "exchangeable". Default is "none" which doesn't incorporate baseline risk.} \item{baseline.risk}{Two different assumptions for baseline risk: "independent" or "exchangeable". See Achana et al. (2012) for more information about baseline risk.} \item{covariate}{A covariate matrix with each row representing each trial and column representing each covariate. This is a study-level data, meaning that the user doesn't need to repeatedly specify covariates for each arm.} \item{covariate.type}{Should be a vector indicating the type of the covariate. Covariate can be either "continuous" or "discrete". If it continuous, covariates are centered. If the covariate is discrete it is not centered and it has to be in a dummy integer format (i.e. 0,1,2,...). The code doesn't factor the covariates for the user, so user needs to specify dummy variables if factor is needed.} \item{covariate.model}{"independent" allows covariate effects for each treatment. "common" restricts same covariate effect for all treatment. Lastly, "exchangeable" assumes that the covariate effects are different but related and strength is borrowed across them. We set "common" to be default. See Cooper et al. (2009) for more details on covariates.} \item{mean.d}{Prior mean for the relative effect} \item{prec.d}{Prior precision for the relative effect} \item{mean.Eta}{Prior mean for the study effect (baseline risk)} \item{prec.Eta}{Prior precision for the study effect (baseline risk)} \item{hy.prior.Eta}{Between treatment heterogeneity in baseline risk (for exchangeable assumption only). Format of the parameter is same as hy.prior.} \item{mean.bl}{Prior mean for the baseline slope} \item{prec.bl}{Prior precision for the baseline slope} \item{hy.prior.bl}{Between treatment heterogeneity in baseline slope (for exchangeable regression coefficient only). Format of the parameter is same as hy.prior.} \item{mean.cov}{Prior mean for the covariate effect} \item{prec.cov}{Prior precision for the covariate effect} \item{hy.prior.cov}{Between treatment heterogeneity in covariate effect (for exchangeable regression coefficient only). Format of the parameter is same as hy.prior. Default is set to be dunif(0, 5) for binary, dunif(0, 100) for normal, and wishart with identity scale matrix and (# of categories - 1) degrees of freedom for multinomial.} \item{hy.prior}{Prior for the heterogeneity parameter. Supports uniform, gamma, and half normal for normal and binomial response and wishart for multinomial response. It should be a list of length 3, where first element should be the distribution (one of dunif, dgamma, dhnorm, dwish) and the next two are the parameters associated with the distribution. For example, list("dunif", 0, 5) give uniform prior with lower bound 0 and upper bound 5 for the heterogeneity parameter. For wishart distribution, the last two parameter would be the scale matrix and the degrees of freedom.} } \value{ Creates list of variables that are used to run the model using \code{\link{network.run}} \item{data}{Data combining all the input data. User can check this to insure the data is correctly specified. For modelling purposes, character valued studies or treatment variables are changed to numeric values based on alphabetical order.} \item{nrow}{Total number of arms in the meta-analysis} \item{ncat}{Number of columns in the Outcomes. Will equal 1 for binary and normal and number of categories for multinomial} \item{nstudy}{Number of study} \item{na}{Number of arms for each study} \item{ntreat}{Number of treatment} \item{b.id}{Indicator in sequence of all treatments for which treatment is base treatment in Study} \item{t}{\code{Treat} transformed into a matrix which has dimensions number of study by max number of arms in studies} \item{r}{\code{Outcomes} made into an array that is suitable for use in rjags code. For multinomial, it has 3 dimensions: number of study by max number of arms in studies by number of categories.} \item{mx}{If the continuous covariate is included, it calculates the mean of the covariates which is used to center the covariates. The numeric indicator after mx refers to column number of the covariates if there are more than one covariates included. Discrete covariates are not centered.} \item{mx_bl}{If the baseline effect is specified, it also calculates the mean baseline risk.} \item{prior.data}{Prior data created using the user inputs or default values. If no user input is specifies for the prior, it uses default values.} \item{code}{Rjags model file code that is generated using information provided by the user. To view model file inside R in a nice format, use \code{cat(network$code).}} } \description{ This function makes a network object that can be used to run network meta-analysis using \code{\link{network.run}}. User needs to specify Outcomes, Study, Treat, N or SE, and response. Prior parameters are filled in automatically based on the data type if not specified. The input data should be arm-level so that we have observations for each treatment in each study. The input data is preprocessed to fit the format necessary to run model in JAGS. } \examples{ ###Blocker data example blocker network <- with(blocker, { network.data(Outcomes, Study, Treat, N = N, response = "binomial") }) network } \references{ S. Dias, A.J. Sutton, A.E. Ades, and N.J. Welton (2013a), \emph{A Generalized Linear Modeling Framework for Pairwise and Network Meta-analysis of Randomized Controlled Trials}, Medical Decision Making 33(5):607-617. [\url{https://doi.org/10.1177/0272989X12458724}] F.A. Achana, N.J. Cooper, S. Dias, G. Lu, S.J.C. Rice, D. Kendrick, A.J. Sutton (2012), \emph{Extending methods for investigating the relationship between treatment effect and baseline risk from pairwise meta-analysis to network meta-analysis}, Statistics in Medicine 32(5):752-771. [\url{https://doi.org/10.1002/sim.5539}] N.J. Cooper, A.J. Sutton, D. Morris, A.E. Ades, N.J. Welton (2009), \emph{Addressing between-study heterogeneity and inconsistency in mixed treatment comparisons: Application to stroke prevention treatments in individuals with non-rheumatic atrial fibrillation}, Statistics in Medicine 28:1861-1881. [\url{https://doi.org/10.1002/sim.3594}] }
tryCatch({source("tests/testthat/helpers.R"); source("helpers.R")}, warning=function(w) invisible()) test_that("isTimestepRegular works", { library(rloadest) simpledata <- app2.calib[-which(diff(app2.calib$DATES) < 7),] # Error handling should be up to the caller expect_error(isTimestepRegular(simpledata$DATES, hist=FALSE, handler=stop), "Time series is irregular") expect_warning(isTimestepRegular(simpledata$DATES, hist=FALSE, handler=warning), "Time series is irregular") expect_false(isTimestepRegular(simpledata$DATES, hist=FALSE, handler=function(e) {})) expect_false(isTimestepRegular(simpledata$DATES, hist=TRUE, handler=function(e) {})) expect_manual_OK("Histogram of timesteps makes sense") # Regular time steps should pass simpledata <- transform(simpledata, DATES=seq(DATES[1], DATES[length(DATES)], length.out=length(DATES))) expect_true(isTimestepRegular(simpledata$DATES, hist=TRUE, handler=function(e) {})) # Tolerance should be settable simpledata <- transform(simpledata, DATES=DATES + pmin(pmax(rnorm(length(DATES), 0, 0.1), -0.5), 0.5)) expect_false(isTimestepRegular(simpledata$DATES, hist=TRUE, handler=function(e) {})) expect_true(isTimestepRegular(simpledata$DATES, hist=TRUE, tol = 1, handler=function(e) {})) }) test_that("Durbin Watson tests are reasonable", { library(rloadest) simpledata <- transform(app2.calib[-which(diff(app2.calib$DATES) < 7),], Period=seasons(DATES,breaks=c("Apr", "Jul"))) reg.model <- loadReg2(loadReg( Atrazine ~ Period*center(log(FLOW)), data = simpledata, flow = "FLOW", dates = "DATES", conc.units="mg/L")) # An irregular time step shouldn't work without some effort expect_error(expect_warning(residDurbinWatson(reg.model), "Time series is irregular"), "invalid for an irregular time series") # But if you're willing to sacrifice regularity, you should be able to get a number expect_is(residDurbinWatson(reg.model, irregular.timesteps.ok=TRUE, plot=FALSE), "numeric") # And if it's regular already, it should just work. newdata <- transform(simpledata) expect_is(residDurbinWatson(reg.model, irregular.timesteps.ok=TRUE, plot=FALSE), "numeric") }) test_that("estimateRho works", { library(rloadest) # make the dates regular so that we can pretend this dataset makes sense simpledata <- app2.calib[-which(diff(app2.calib$DATES) < 7),] simpledata <- transform(simpledata, DATES=seq(DATES[1], DATES[length(DATES)], length.out=length(DATES))) simpledata <- transform(simpledata, Period=seasons(DATES,breaks=c("Apr", "Jul"))) reg.model <- loadReg2(loadReg( Atrazine ~ Period*center(log(FLOW)), data = simpledata, flow = "FLOW", dates = "DATES", conc.units="mg/L")) # Call estimateRho rho.out <- estimateRho(load.model=reg.model, flux.or.conc="flux", abs.or.rel.resids="absolute", newdata=NULL, plot.acf=TRUE, irr=TRUE) # Output should be a function and a fitted Arima model expect_is(rho.out$rho, "numeric") expect_is(rho.out$time.step, "difftime") expect_is(rho.out$rho.fun, "function") # a function that takes a date and a vector of dates and returns a vector of correlations expect_is(rho.out$cormat.fun, "function") # a function that takes a vector of dates and returns a matrix of correlations expect_is(rho.out$arima.model, "Arima") # the arima fit # Check the function for the regular time series to which it was fitted plot(rho.out$rho.fun(simpledata$DATES[3], simpledata$DATES), x=simpledata$DATES, type="b", col="blue") points(rho.out$rho.fun(simpledata$DATES[8], simpledata$DATES), x=simpledata$DATES, type="b", col="green") points(rho.out$rho.fun(simpledata$DATES[14], simpledata$DATES), x=simpledata$DATES, type="b", col="gold") points(rho.out$rho.fun(simpledata$DATES[21], simpledata$DATES), x=simpledata$DATES, type="b", col="red") expect_manual_OK("correlations at dates #3, 8, 14, & 21 are OK for regular time series", "Look at the plot.") # Check the function for an irregular time series - correlation should be a # function of the distance in time rather than in the number of rows # separating two observations simpledata <- app2.calib[-which(diff(app2.calib$DATES) < 7),] plot(rho.out$rho.fun(simpledata$DATES[3], simpledata$DATES), x=simpledata$DATES, type="b", col="blue", ylab="Cor from Dates") points(rho.out$rho.fun(simpledata$DATES[8], simpledata$DATES), x=simpledata$DATES, type="b", col="green") points(rho.out$rho.fun(simpledata$DATES[14], simpledata$DATES), x=simpledata$DATES, type="b", col="gold") points(rho.out$rho.fun(simpledata$DATES[21], simpledata$DATES), x=simpledata$DATES, type="b", col="red") expect_manual_OK("correlations at dates #3, 8, 14, & 21 are OK for irregular time series", "Look at the plot.") # Check that other date formats are also fine simpledata <- app2.calib[-which(diff(app2.calib$DATES) < 7),] # these DATES are in Date format plot(rho.out$rho.fun(simpledata$DATES[3], simpledata$DATES), x=simpledata$DATES, type="b", col="blue", ylab="Cor from varied date formats") points(rho.out$rho.fun(as.POSIXlt(simpledata$DATES[8]), as.POSIXlt(simpledata$DATES)), x=simpledata$DATES, type="b", col="green") library(chron) points(rho.out$rho.fun(as.chron(simpledata$DATES[14]), as.chron(simpledata$DATES)), x=simpledata$DATES, type="b", col="gold") points(rho.out$rho.fun(as.POSIXct(simpledata$DATES[21]), as.POSIXct(simpledata$DATES)), x=simpledata$DATES, type="b", col="red") expect_manual_OK("exact same plot when built from varying date formats", "Look at the plot.") })
/tests/testthat/test-05-diagnostics.R
no_license
jacaronda/loadflex
R
false
false
5,603
r
tryCatch({source("tests/testthat/helpers.R"); source("helpers.R")}, warning=function(w) invisible()) test_that("isTimestepRegular works", { library(rloadest) simpledata <- app2.calib[-which(diff(app2.calib$DATES) < 7),] # Error handling should be up to the caller expect_error(isTimestepRegular(simpledata$DATES, hist=FALSE, handler=stop), "Time series is irregular") expect_warning(isTimestepRegular(simpledata$DATES, hist=FALSE, handler=warning), "Time series is irregular") expect_false(isTimestepRegular(simpledata$DATES, hist=FALSE, handler=function(e) {})) expect_false(isTimestepRegular(simpledata$DATES, hist=TRUE, handler=function(e) {})) expect_manual_OK("Histogram of timesteps makes sense") # Regular time steps should pass simpledata <- transform(simpledata, DATES=seq(DATES[1], DATES[length(DATES)], length.out=length(DATES))) expect_true(isTimestepRegular(simpledata$DATES, hist=TRUE, handler=function(e) {})) # Tolerance should be settable simpledata <- transform(simpledata, DATES=DATES + pmin(pmax(rnorm(length(DATES), 0, 0.1), -0.5), 0.5)) expect_false(isTimestepRegular(simpledata$DATES, hist=TRUE, handler=function(e) {})) expect_true(isTimestepRegular(simpledata$DATES, hist=TRUE, tol = 1, handler=function(e) {})) }) test_that("Durbin Watson tests are reasonable", { library(rloadest) simpledata <- transform(app2.calib[-which(diff(app2.calib$DATES) < 7),], Period=seasons(DATES,breaks=c("Apr", "Jul"))) reg.model <- loadReg2(loadReg( Atrazine ~ Period*center(log(FLOW)), data = simpledata, flow = "FLOW", dates = "DATES", conc.units="mg/L")) # An irregular time step shouldn't work without some effort expect_error(expect_warning(residDurbinWatson(reg.model), "Time series is irregular"), "invalid for an irregular time series") # But if you're willing to sacrifice regularity, you should be able to get a number expect_is(residDurbinWatson(reg.model, irregular.timesteps.ok=TRUE, plot=FALSE), "numeric") # And if it's regular already, it should just work. newdata <- transform(simpledata) expect_is(residDurbinWatson(reg.model, irregular.timesteps.ok=TRUE, plot=FALSE), "numeric") }) test_that("estimateRho works", { library(rloadest) # make the dates regular so that we can pretend this dataset makes sense simpledata <- app2.calib[-which(diff(app2.calib$DATES) < 7),] simpledata <- transform(simpledata, DATES=seq(DATES[1], DATES[length(DATES)], length.out=length(DATES))) simpledata <- transform(simpledata, Period=seasons(DATES,breaks=c("Apr", "Jul"))) reg.model <- loadReg2(loadReg( Atrazine ~ Period*center(log(FLOW)), data = simpledata, flow = "FLOW", dates = "DATES", conc.units="mg/L")) # Call estimateRho rho.out <- estimateRho(load.model=reg.model, flux.or.conc="flux", abs.or.rel.resids="absolute", newdata=NULL, plot.acf=TRUE, irr=TRUE) # Output should be a function and a fitted Arima model expect_is(rho.out$rho, "numeric") expect_is(rho.out$time.step, "difftime") expect_is(rho.out$rho.fun, "function") # a function that takes a date and a vector of dates and returns a vector of correlations expect_is(rho.out$cormat.fun, "function") # a function that takes a vector of dates and returns a matrix of correlations expect_is(rho.out$arima.model, "Arima") # the arima fit # Check the function for the regular time series to which it was fitted plot(rho.out$rho.fun(simpledata$DATES[3], simpledata$DATES), x=simpledata$DATES, type="b", col="blue") points(rho.out$rho.fun(simpledata$DATES[8], simpledata$DATES), x=simpledata$DATES, type="b", col="green") points(rho.out$rho.fun(simpledata$DATES[14], simpledata$DATES), x=simpledata$DATES, type="b", col="gold") points(rho.out$rho.fun(simpledata$DATES[21], simpledata$DATES), x=simpledata$DATES, type="b", col="red") expect_manual_OK("correlations at dates #3, 8, 14, & 21 are OK for regular time series", "Look at the plot.") # Check the function for an irregular time series - correlation should be a # function of the distance in time rather than in the number of rows # separating two observations simpledata <- app2.calib[-which(diff(app2.calib$DATES) < 7),] plot(rho.out$rho.fun(simpledata$DATES[3], simpledata$DATES), x=simpledata$DATES, type="b", col="blue", ylab="Cor from Dates") points(rho.out$rho.fun(simpledata$DATES[8], simpledata$DATES), x=simpledata$DATES, type="b", col="green") points(rho.out$rho.fun(simpledata$DATES[14], simpledata$DATES), x=simpledata$DATES, type="b", col="gold") points(rho.out$rho.fun(simpledata$DATES[21], simpledata$DATES), x=simpledata$DATES, type="b", col="red") expect_manual_OK("correlations at dates #3, 8, 14, & 21 are OK for irregular time series", "Look at the plot.") # Check that other date formats are also fine simpledata <- app2.calib[-which(diff(app2.calib$DATES) < 7),] # these DATES are in Date format plot(rho.out$rho.fun(simpledata$DATES[3], simpledata$DATES), x=simpledata$DATES, type="b", col="blue", ylab="Cor from varied date formats") points(rho.out$rho.fun(as.POSIXlt(simpledata$DATES[8]), as.POSIXlt(simpledata$DATES)), x=simpledata$DATES, type="b", col="green") library(chron) points(rho.out$rho.fun(as.chron(simpledata$DATES[14]), as.chron(simpledata$DATES)), x=simpledata$DATES, type="b", col="gold") points(rho.out$rho.fun(as.POSIXct(simpledata$DATES[21]), as.POSIXct(simpledata$DATES)), x=simpledata$DATES, type="b", col="red") expect_manual_OK("exact same plot when built from varying date formats", "Look at the plot.") })
source("utils.R") devtools::install_github("hadley/lineprof") MyRequire(pryr) MyRequire(data.table) MyRequire(ggplot2) MyRequire(pryr) MyRequire(devtools) object_size(1:10) object_size(mean) object_size(mtcars) # zero-length vector with size != 0 & size is not proportionate with length sizes <- sapply(0:50, function(n) object_size(seq_len(n))) plot(0:50, sizes, xlab = "Length", ylab = "Size (bytes)", type = "s") # zero-length vector size object_size(numeric()) object_size(logical()) object_size(raw()) object_size(list()) plot(0:50, sizes - 40, xlab = "Length", ylab = "Bytes excluding overhead", type = "n") abline(h = 0, col = "grey80") abline(h = c(8, 16, 32, 48, 64, 128), col = "grey80") abline(a = 0, b = 4, col = "grey90", lwd = 4) lines(sizes - 40, type = "s") # shared components x <- 1:1e6 object_size(x) y <- list(x, x, x) object_size(y) object_size(x, y) # no sharing x1 <- 1:1e6 y1 <- list(1:1e6, 1:1e6, 1:1e6) object_size(x1) object_size(y1) object_size(x1, y1) object_size(x1) + object_size(y1) == object_size(x1, y1) # strings pool object_size("banana") object_size(rep("banana", 10)) # memory usage & garbage collection mem_used() mem_change(x <- 1:1e6) mem_change(rm(x)) mem_change(NULL) mem_change(NULL) mem_change(x <- 1:1e6) mem_change(y <- x) mem_change(rm(x)) mem_change(rm(y)) f1 <- function() { x <- 1:1e6 10 } mem_change(x <- f1()) object_size(x) f2 <- function() { x <- 1:1e6 a ~ b } mem_change(y <- f2()) object_size(y) f3 <- function() { x <- 1:1e6 function() 10 } mem_change(z <- f3()) object_size(z) # memory profiling with lineprof if (!dir.exists("data")) { dir.create("data") } write.csv(diamonds, "data/diamonds.csv", row.names = FALSE) source("src/R/read-delim.R") prof <- lineprof(read_delim("data/diamonds.csv")) shine(prof) prof_2 <- lineprof(read_delim_2("data/diamonds.csv")) shine(prof_2) prof_3 <- lineprof(f()) shine(prof_3) # modification in place x <- 1:10 x[5] <- 10 x # RStudio x <- 1:10 c(address(x), refs(x)) y <- x c(address(y), refs(y)) x <- 1:5 y <- x rm(y) # Should really be 1, because we've deleted y refs(x) x <- 1:5 y <- x z <- x # Should really be 3 refs(x) x <- 1:10 y <- x c(address(x), address(y)) x[5] <- 6L c(address(x), address(y)) x <- 1:10 tracemem(x) x[5] <- 6L y <- x x[5] <- 6L # Touching the object forces an increment f <- function(x) x {x <- 1:10; f(x); refs(x)} # Sum is primitive, so no increment {x <- 1:10; sum(x); refs(x)} # f() and g() never evaluate x, so refs don't increment f <- function(x) 10 g <- function(x) substitute(x) {x <- 1:10; f(x); refs(x)} {x <- 1:10; g(x); refs(x)} # loops x <- data.frame(matrix(runif(100 * 1e4), ncol = 100)) medians <- vapply(x, median, numeric(1)) for(i in seq_along(medians)) { x[, i] <- x[, i] - medians[i] } for(i in 1:5) { x[, i] <- x[, i] - medians[i] print(c(address(x), refs(x))) } y <- as.list(x) for(i in 1:5) { y[[i]] <- y[[i]] - medians[i] print(c(address(y), refs(y))) } # my personal tests #------------------------------------------------------------------------------- dt <- data.table(1:5) dt address(dt) f <- function(x) { x <- data.table(6:10) print(address(x)) } f(dt) dt address(dt) f_2 <- function(x) { x[, V2 := V1] print(address(x)) } f_2(dt) dt address(dt) x <- 1:1e6 y <- x object_size(x) object_size(y) object_size(x, y) address(x) address(y) x[1] <- 10L address(x) address(y) object_size(x, y) dt <- data.table(X1 = 1:1e6) dt address(dt) object_size(dt) f <- function(x) { x[, X2 := X1] } f(dt) dt address(dt) object_size(dt) df <- data.frame(X1 = 1:1e6) head(df) address(df) object_size(df) f <- function(df) { df$X2 <- df$X1 df } df <- f(df) head(df) address(df) object_size(df) df$X2[1] <- 10 dt <- data.table(1:1e6) dt_2 <- dt object_size(dt) object_size(dt_2) object_size(dt, dt_2) address(dt) address(dt_2) dt_2[1, V1 := 10L] dt_2 dt address(dt) address(dt_2) object_size(dt, dt_2) dt <- data.table(1:5, letters[1:5]) dt <- data.table(1:5, letters[1:5], LETTERS[1:5]) address(dt) track_dt <- track_copy(dt) f <- function(x) { setnames(x, names(x), c("X1", "X2")) x[, X3 := X1] } track_dt() f(dt[, .(V1, V2)]) f(dt) address(dt) dt <- data.table(1:5, letters[1:5], LETTERS[1:5]) track_dt <- track_copy(dt) track_dt() tracemem(dt) dt_bis <- dt[, .(V1, V2)] dt_bis$V1[1] <- 200 address(dt) address(dt_bis) track_dt <- track_copy(dt) track_dt() dt_bis[, V4 := V1]
/memory.R
no_license
minutestatistique/personal-R-notes
R
false
false
4,415
r
source("utils.R") devtools::install_github("hadley/lineprof") MyRequire(pryr) MyRequire(data.table) MyRequire(ggplot2) MyRequire(pryr) MyRequire(devtools) object_size(1:10) object_size(mean) object_size(mtcars) # zero-length vector with size != 0 & size is not proportionate with length sizes <- sapply(0:50, function(n) object_size(seq_len(n))) plot(0:50, sizes, xlab = "Length", ylab = "Size (bytes)", type = "s") # zero-length vector size object_size(numeric()) object_size(logical()) object_size(raw()) object_size(list()) plot(0:50, sizes - 40, xlab = "Length", ylab = "Bytes excluding overhead", type = "n") abline(h = 0, col = "grey80") abline(h = c(8, 16, 32, 48, 64, 128), col = "grey80") abline(a = 0, b = 4, col = "grey90", lwd = 4) lines(sizes - 40, type = "s") # shared components x <- 1:1e6 object_size(x) y <- list(x, x, x) object_size(y) object_size(x, y) # no sharing x1 <- 1:1e6 y1 <- list(1:1e6, 1:1e6, 1:1e6) object_size(x1) object_size(y1) object_size(x1, y1) object_size(x1) + object_size(y1) == object_size(x1, y1) # strings pool object_size("banana") object_size(rep("banana", 10)) # memory usage & garbage collection mem_used() mem_change(x <- 1:1e6) mem_change(rm(x)) mem_change(NULL) mem_change(NULL) mem_change(x <- 1:1e6) mem_change(y <- x) mem_change(rm(x)) mem_change(rm(y)) f1 <- function() { x <- 1:1e6 10 } mem_change(x <- f1()) object_size(x) f2 <- function() { x <- 1:1e6 a ~ b } mem_change(y <- f2()) object_size(y) f3 <- function() { x <- 1:1e6 function() 10 } mem_change(z <- f3()) object_size(z) # memory profiling with lineprof if (!dir.exists("data")) { dir.create("data") } write.csv(diamonds, "data/diamonds.csv", row.names = FALSE) source("src/R/read-delim.R") prof <- lineprof(read_delim("data/diamonds.csv")) shine(prof) prof_2 <- lineprof(read_delim_2("data/diamonds.csv")) shine(prof_2) prof_3 <- lineprof(f()) shine(prof_3) # modification in place x <- 1:10 x[5] <- 10 x # RStudio x <- 1:10 c(address(x), refs(x)) y <- x c(address(y), refs(y)) x <- 1:5 y <- x rm(y) # Should really be 1, because we've deleted y refs(x) x <- 1:5 y <- x z <- x # Should really be 3 refs(x) x <- 1:10 y <- x c(address(x), address(y)) x[5] <- 6L c(address(x), address(y)) x <- 1:10 tracemem(x) x[5] <- 6L y <- x x[5] <- 6L # Touching the object forces an increment f <- function(x) x {x <- 1:10; f(x); refs(x)} # Sum is primitive, so no increment {x <- 1:10; sum(x); refs(x)} # f() and g() never evaluate x, so refs don't increment f <- function(x) 10 g <- function(x) substitute(x) {x <- 1:10; f(x); refs(x)} {x <- 1:10; g(x); refs(x)} # loops x <- data.frame(matrix(runif(100 * 1e4), ncol = 100)) medians <- vapply(x, median, numeric(1)) for(i in seq_along(medians)) { x[, i] <- x[, i] - medians[i] } for(i in 1:5) { x[, i] <- x[, i] - medians[i] print(c(address(x), refs(x))) } y <- as.list(x) for(i in 1:5) { y[[i]] <- y[[i]] - medians[i] print(c(address(y), refs(y))) } # my personal tests #------------------------------------------------------------------------------- dt <- data.table(1:5) dt address(dt) f <- function(x) { x <- data.table(6:10) print(address(x)) } f(dt) dt address(dt) f_2 <- function(x) { x[, V2 := V1] print(address(x)) } f_2(dt) dt address(dt) x <- 1:1e6 y <- x object_size(x) object_size(y) object_size(x, y) address(x) address(y) x[1] <- 10L address(x) address(y) object_size(x, y) dt <- data.table(X1 = 1:1e6) dt address(dt) object_size(dt) f <- function(x) { x[, X2 := X1] } f(dt) dt address(dt) object_size(dt) df <- data.frame(X1 = 1:1e6) head(df) address(df) object_size(df) f <- function(df) { df$X2 <- df$X1 df } df <- f(df) head(df) address(df) object_size(df) df$X2[1] <- 10 dt <- data.table(1:1e6) dt_2 <- dt object_size(dt) object_size(dt_2) object_size(dt, dt_2) address(dt) address(dt_2) dt_2[1, V1 := 10L] dt_2 dt address(dt) address(dt_2) object_size(dt, dt_2) dt <- data.table(1:5, letters[1:5]) dt <- data.table(1:5, letters[1:5], LETTERS[1:5]) address(dt) track_dt <- track_copy(dt) f <- function(x) { setnames(x, names(x), c("X1", "X2")) x[, X3 := X1] } track_dt() f(dt[, .(V1, V2)]) f(dt) address(dt) dt <- data.table(1:5, letters[1:5], LETTERS[1:5]) track_dt <- track_copy(dt) track_dt() tracemem(dt) dt_bis <- dt[, .(V1, V2)] dt_bis$V1[1] <- 200 address(dt) address(dt_bis) track_dt <- track_copy(dt) track_dt() dt_bis[, V4 := V1]
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/utils.R \name{crossValidationSplits} \alias{crossValidationSplits} \title{crossValidationSplits} \usage{ crossValidationSplits(dataSet, k, seed = NULL) } \arguments{ \item{dataSet}{is the set of data to split up into. Make sure samples are the rows and the columns are the features.} \item{k}{is the number to splits to partition the data into.} \item{seed}{is the seed used for the randomization. Default to null} } \description{ Splits the data into k partitions, which can later be used together with the function "CVtrainAndTestSet" } \examples{ } \keyword{Cross} \keyword{validation}
/Filips ML package/Filips.ML.package/man/crossValidationSplits.Rd
no_license
Filco306/ML-Implementations
R
false
true
672
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/utils.R \name{crossValidationSplits} \alias{crossValidationSplits} \title{crossValidationSplits} \usage{ crossValidationSplits(dataSet, k, seed = NULL) } \arguments{ \item{dataSet}{is the set of data to split up into. Make sure samples are the rows and the columns are the features.} \item{k}{is the number to splits to partition the data into.} \item{seed}{is the seed used for the randomization. Default to null} } \description{ Splits the data into k partitions, which can later be used together with the function "CVtrainAndTestSet" } \examples{ } \keyword{Cross} \keyword{validation}
# Make figure of log(gene_copies) vs Ct values makefig_logGenes_vs_ct <- function(inputDat){ inputDat %>% ggplot(aes(Ct, log10(Gene_copy))) + geom_smooth(method="lm", se=FALSE) + geom_point(aes(shape = Concentration_cate, color = Assay)) + labs(x = "CT value", y = "log10(Gene copies)") }
/R/makefig_logGenes_vs_ct.R
no_license
germs-lab/evaluatePCR
R
false
false
318
r
# Make figure of log(gene_copies) vs Ct values makefig_logGenes_vs_ct <- function(inputDat){ inputDat %>% ggplot(aes(Ct, log10(Gene_copy))) + geom_smooth(method="lm", se=FALSE) + geom_point(aes(shape = Concentration_cate, color = Assay)) + labs(x = "CT value", y = "log10(Gene copies)") }
library(ggplot2) dat <- read.table('compiled_data.txt', as.is = T) colnames(dat) <- c('run_name', 'cal_time', 'sim_rate', 'sd_rate', 'slope', 'r', 'rate_mean', 'rate_low', 'rate_high', 'root_mean', 'root_low', 'root_high', paste0('r1_', c('rate_mean', 'rate_low', 'rate_high', 'root_mean', 'root_low', 'root_high')), paste0('r2_', c('rate_mean', 'rate_low', 'rate_high', 'root_mean', 'root_low', 'root_high')), paste0('r3_', c('rate_mean', 'rate_low', 'rate_high', 'root_mean', 'root_low', 'root_high')), paste0('r4_', c('rate_mean', 'rate_low', 'rate_high', 'root_mean', 'root_low', 'root_high')), paste0('r5_', c('rate_mean', 'rate_low', 'rate_high', 'root_mean', 'root_low', 'root_high')) ) pass_cr1 <- vector() pass_cr2 <- vector() pass_cr3 <- vector() pass_true <- dat$sim_rate < dat$rate_high & dat$sim_rate > dat$rate_low cv_rates <- (dat$rate_high - dat$rate_low) / dat$rate_mean <= 0.2 rate_median <- dat$rate_low + ((dat$rate_high - dat$rate_low) / 2) for(i in 1:nrow(dat)){ rates_high <-dat[i, grep('r.+rate_high', colnames(dat))] rates_low <- dat[i, grep('r.+rate_low', colnames(dat))] rates_med <- dat[i, grep('r.+rate_mean', colnames(dat))] #pass_cr1[i] <- dat$rate_mean[i] >= max(rates_high) | dat$rate_mean[i] <= min(rates_low) pass_cr1[i] <- rate_median[i] >= max(rates_high) | rate_median[i] <= min(rates_low) pass_cr2[i] <- dat$rate_low[i] >= max(rates_high) | dat$rate_high[i] <= min(rates_low) pass_cr3[i] <- sum( ((rates_high - rates_low) / rates_med) > ((dat$rate_high[i] - dat$rate_low[i]) / dat$rate_mean[i] )) >= 4 } dat <- cbind(dat, rate_median, pass_cr1, pass_cr2, pass_cr3, pass_true, cv_rates) dat <- dat[-which(dat$rate_median > 0.1), ] dat <- dat[-which((dat$rate_high - dat$rate_low) > 0.002), ] #plot(dat$cal_time[dat$sd_rate == 0.01], rate_median[dat$sd_rate == 0.01], col = c('red', 'black')[as.numeric(pass_cr1) + 1], ylim = c(0, 0.003)) #points(dat$cal_time[dat$sd_rate == 0.01], dat$rate_high[dat$sd_rate == 0.01], col = c('red', 'black')[as.numeric(pass_cr1) + 1], pch = 2) #points(dat$cal_time[dat$sd_rate == 0.01], dat$rate_low[dat$sd_rate == 0.01], col = c('red', 'black')[as.numeric(pass_cr1) + 1], pch = 2) #lines(x = c(0, 30), y = c(0.0001, 0.0001)) d1 <- dat[dat$sd_rate == 0.01, ] plot_1 <- ggplot(d1, aes(x = cal_time, y = rate_median, colour = pass_cr3)) + geom_point() + geom_errorbar(aes(ymin = rate_low, ymax = rate_high)) d2 <- dat[dat$sd_rate == 0.01, ] plot_2 <- ggplot(d2, aes(x = cal_time, y = rate_median, colour = pass_cr2)) + geom_point() + geom_errorbar(aes(ymin = rate_low, ymax = rate_high)) plot_3 <- ggplot(d1, aes(x = cal_time, y = rate_median, colour = pass_true)) + geom_point() + geom_errorbar(aes(ymin = rate_low, ymax = rate_high)) plot_4 <- ggplot(d1, aes(cal_time, y = rate_median, colour = cv_rates)) + geom_point() + geom_errorbar(aes(ymin = rate_low, ymax = rate_high)) plot_5 <- ggplot(d2, aes(x = cal_time, y = rate_median, colour = pass_cr1)) + geom_point() + geom_errorbar(aes(ymin = rate_low, ymax = rate_high)) #coord_cartesian(ylim = c(0, 0.003)) +
/analyse_data/analyse_data_5_reps.R
no_license
sebastianduchene/date_rand_test
R
false
false
3,109
r
library(ggplot2) dat <- read.table('compiled_data.txt', as.is = T) colnames(dat) <- c('run_name', 'cal_time', 'sim_rate', 'sd_rate', 'slope', 'r', 'rate_mean', 'rate_low', 'rate_high', 'root_mean', 'root_low', 'root_high', paste0('r1_', c('rate_mean', 'rate_low', 'rate_high', 'root_mean', 'root_low', 'root_high')), paste0('r2_', c('rate_mean', 'rate_low', 'rate_high', 'root_mean', 'root_low', 'root_high')), paste0('r3_', c('rate_mean', 'rate_low', 'rate_high', 'root_mean', 'root_low', 'root_high')), paste0('r4_', c('rate_mean', 'rate_low', 'rate_high', 'root_mean', 'root_low', 'root_high')), paste0('r5_', c('rate_mean', 'rate_low', 'rate_high', 'root_mean', 'root_low', 'root_high')) ) pass_cr1 <- vector() pass_cr2 <- vector() pass_cr3 <- vector() pass_true <- dat$sim_rate < dat$rate_high & dat$sim_rate > dat$rate_low cv_rates <- (dat$rate_high - dat$rate_low) / dat$rate_mean <= 0.2 rate_median <- dat$rate_low + ((dat$rate_high - dat$rate_low) / 2) for(i in 1:nrow(dat)){ rates_high <-dat[i, grep('r.+rate_high', colnames(dat))] rates_low <- dat[i, grep('r.+rate_low', colnames(dat))] rates_med <- dat[i, grep('r.+rate_mean', colnames(dat))] #pass_cr1[i] <- dat$rate_mean[i] >= max(rates_high) | dat$rate_mean[i] <= min(rates_low) pass_cr1[i] <- rate_median[i] >= max(rates_high) | rate_median[i] <= min(rates_low) pass_cr2[i] <- dat$rate_low[i] >= max(rates_high) | dat$rate_high[i] <= min(rates_low) pass_cr3[i] <- sum( ((rates_high - rates_low) / rates_med) > ((dat$rate_high[i] - dat$rate_low[i]) / dat$rate_mean[i] )) >= 4 } dat <- cbind(dat, rate_median, pass_cr1, pass_cr2, pass_cr3, pass_true, cv_rates) dat <- dat[-which(dat$rate_median > 0.1), ] dat <- dat[-which((dat$rate_high - dat$rate_low) > 0.002), ] #plot(dat$cal_time[dat$sd_rate == 0.01], rate_median[dat$sd_rate == 0.01], col = c('red', 'black')[as.numeric(pass_cr1) + 1], ylim = c(0, 0.003)) #points(dat$cal_time[dat$sd_rate == 0.01], dat$rate_high[dat$sd_rate == 0.01], col = c('red', 'black')[as.numeric(pass_cr1) + 1], pch = 2) #points(dat$cal_time[dat$sd_rate == 0.01], dat$rate_low[dat$sd_rate == 0.01], col = c('red', 'black')[as.numeric(pass_cr1) + 1], pch = 2) #lines(x = c(0, 30), y = c(0.0001, 0.0001)) d1 <- dat[dat$sd_rate == 0.01, ] plot_1 <- ggplot(d1, aes(x = cal_time, y = rate_median, colour = pass_cr3)) + geom_point() + geom_errorbar(aes(ymin = rate_low, ymax = rate_high)) d2 <- dat[dat$sd_rate == 0.01, ] plot_2 <- ggplot(d2, aes(x = cal_time, y = rate_median, colour = pass_cr2)) + geom_point() + geom_errorbar(aes(ymin = rate_low, ymax = rate_high)) plot_3 <- ggplot(d1, aes(x = cal_time, y = rate_median, colour = pass_true)) + geom_point() + geom_errorbar(aes(ymin = rate_low, ymax = rate_high)) plot_4 <- ggplot(d1, aes(cal_time, y = rate_median, colour = cv_rates)) + geom_point() + geom_errorbar(aes(ymin = rate_low, ymax = rate_high)) plot_5 <- ggplot(d2, aes(x = cal_time, y = rate_median, colour = pass_cr1)) + geom_point() + geom_errorbar(aes(ymin = rate_low, ymax = rate_high)) #coord_cartesian(ylim = c(0, 0.003)) +
library(imlib) library(ggplot2) RES <- 100 dat <- readRDS("data/prepped_data.rds") dat <- dat[dat$year > 1946,] m <- readRDS("models/t3_m7.rds") predictors <- c("lfree_fair_elections","lhorizontal_constraint_narrow") variables <- lapply(predictors,function(x) { seq(0,1,length.out = RES) }) names(variables) <- predictors ts <- makeTestSet(dat,variables,mean) for(v in predictors){ ts[[paste0(v,"_sq")]] <- ts[[v]] ^ 2 } ts[[paste(predictors[1],predictors[2],sep=":")]] <- ts[[predictors[1]]] * ts[[predictors[2]]] ts[[paste(predictors[2],predictors[1],sep=":")]] <- ts[[predictors[1]]] * ts[[predictors[1]]] ts$timesince_sq <- ts$timesince ^ 2 ts$timesince_cb <- ts$timesince ^ 3 ts <- ts[names(ts) %in% names(coef(m))] ts <- cbind(ts,sim(ts,m)) print(head(ts)) plt <- ggplot(ts,aes(x=lfree_fair_elections,y=sim_mean, color=lhorizontal_constraint_narrow,group=lhorizontal_constraint_narrow)) + geom_line() + theme(legend.position="none") ggsave("/tmp/view.png",plt,width=4,height=4,device="png") write.csv(ts,"data/simulated.csv")
/makeSimSet.R
no_license
Peder2911/simcube
R
false
false
1,061
r
library(imlib) library(ggplot2) RES <- 100 dat <- readRDS("data/prepped_data.rds") dat <- dat[dat$year > 1946,] m <- readRDS("models/t3_m7.rds") predictors <- c("lfree_fair_elections","lhorizontal_constraint_narrow") variables <- lapply(predictors,function(x) { seq(0,1,length.out = RES) }) names(variables) <- predictors ts <- makeTestSet(dat,variables,mean) for(v in predictors){ ts[[paste0(v,"_sq")]] <- ts[[v]] ^ 2 } ts[[paste(predictors[1],predictors[2],sep=":")]] <- ts[[predictors[1]]] * ts[[predictors[2]]] ts[[paste(predictors[2],predictors[1],sep=":")]] <- ts[[predictors[1]]] * ts[[predictors[1]]] ts$timesince_sq <- ts$timesince ^ 2 ts$timesince_cb <- ts$timesince ^ 3 ts <- ts[names(ts) %in% names(coef(m))] ts <- cbind(ts,sim(ts,m)) print(head(ts)) plt <- ggplot(ts,aes(x=lfree_fair_elections,y=sim_mean, color=lhorizontal_constraint_narrow,group=lhorizontal_constraint_narrow)) + geom_line() + theme(legend.position="none") ggsave("/tmp/view.png",plt,width=4,height=4,device="png") write.csv(ts,"data/simulated.csv")
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/pipelineHelpers.R \name{factorListToMetadata} \alias{factorListToMetadata} \title{Converts a list of factors into 'pagoda2' metadata optionally filtering down to the cells present in the provided 'pagoda2' app.} \usage{ factorListToMetadata(factor.list, p2 = NULL) } \arguments{ \item{factor.list}{list of factors named by the cell identifier} \item{p2}{'pagoda2' app to filter the factors by, optional (default=NULL)} } \value{ 'pagoda2' web metadata object } \description{ Converts a list of factors into 'pagoda2' metadata optionally filtering down to the cells present in the provided 'pagoda2' app. }
/man/factorListToMetadata.Rd
no_license
rrydbirk/pagoda2
R
false
true
685
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/pipelineHelpers.R \name{factorListToMetadata} \alias{factorListToMetadata} \title{Converts a list of factors into 'pagoda2' metadata optionally filtering down to the cells present in the provided 'pagoda2' app.} \usage{ factorListToMetadata(factor.list, p2 = NULL) } \arguments{ \item{factor.list}{list of factors named by the cell identifier} \item{p2}{'pagoda2' app to filter the factors by, optional (default=NULL)} } \value{ 'pagoda2' web metadata object } \description{ Converts a list of factors into 'pagoda2' metadata optionally filtering down to the cells present in the provided 'pagoda2' app. }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/ec2_operations.R \name{ec2_describe_reserved_instances_offerings} \alias{ec2_describe_reserved_instances_offerings} \title{Describes Reserved Instance offerings that are available for purchase} \usage{ ec2_describe_reserved_instances_offerings(AvailabilityZone, Filters, IncludeMarketplace, InstanceType, MaxDuration, MaxInstanceCount, MinDuration, OfferingClass, ProductDescription, ReservedInstancesOfferingIds, DryRun, InstanceTenancy, MaxResults, NextToken, OfferingType) } \arguments{ \item{AvailabilityZone}{The Availability Zone in which the Reserved Instance can be used.} \item{Filters}{One or more filters. \itemize{ \item \code{availability-zone} - The Availability Zone where the Reserved Instance can be used. \item \code{duration} - The duration of the Reserved Instance (for example, one year or three years), in seconds (\code{31536000} \\| \code{94608000}). \item \code{fixed-price} - The purchase price of the Reserved Instance (for example, 9800.0). \item \code{instance-type} - The instance type that is covered by the reservation. \item \code{marketplace} - Set to \code{true} to show only Reserved Instance Marketplace offerings. When this filter is not used, which is the default behavior, all offerings from both AWS and the Reserved Instance Marketplace are listed. \item \code{product-description} - The Reserved Instance product platform description. Instances that include \verb{(Amazon VPC)} in the product platform description will only be displayed to EC2-Classic account holders and are for use with Amazon VPC. (\code{Linux/UNIX} \\| \verb{Linux/UNIX (Amazon VPC)} \\| \verb{SUSE Linux} \\| \verb{SUSE Linux (Amazon VPC)} \\| \verb{Red Hat Enterprise Linux} \\| \verb{Red Hat Enterprise Linux (Amazon VPC)} \\| \code{Windows} \\| \verb{Windows (Amazon VPC)} \\| \verb{Windows with SQL Server Standard} \\| \verb{Windows with SQL Server Standard (Amazon VPC)} \\| \verb{Windows with SQL Server Web} \\| \verb{ Windows with SQL Server Web (Amazon VPC)} \\| \verb{Windows with SQL Server Enterprise} \\| \verb{Windows with SQL Server Enterprise (Amazon VPC)}) \item \code{reserved-instances-offering-id} - The Reserved Instances offering ID. \item \code{scope} - The scope of the Reserved Instance (\verb{Availability Zone} or \code{Region}). \item \code{usage-price} - The usage price of the Reserved Instance, per hour (for example, 0.84). }} \item{IncludeMarketplace}{Include Reserved Instance Marketplace offerings in the response.} \item{InstanceType}{The instance type that the reservation will cover (for example, \code{m1.small}). For more information, see \href{https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html}{Instance Types} in the \emph{Amazon Elastic Compute Cloud User Guide}.} \item{MaxDuration}{The maximum duration (in seconds) to filter when searching for offerings. Default: 94608000 (3 years)} \item{MaxInstanceCount}{The maximum number of instances to filter when searching for offerings. Default: 20} \item{MinDuration}{The minimum duration (in seconds) to filter when searching for offerings. Default: 2592000 (1 month)} \item{OfferingClass}{The offering class of the Reserved Instance. Can be \code{standard} or \code{convertible}.} \item{ProductDescription}{The Reserved Instance product platform description. Instances that include \verb{(Amazon VPC)} in the description are for use with Amazon VPC.} \item{ReservedInstancesOfferingIds}{One or more Reserved Instances offering IDs.} \item{DryRun}{Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is \code{DryRunOperation}. Otherwise, it is \code{UnauthorizedOperation}.} \item{InstanceTenancy}{The tenancy of the instances covered by the reservation. A Reserved Instance with a tenancy of \code{dedicated} is applied to instances that run in a VPC on single-tenant hardware (i.e., Dedicated Instances). \strong{Important:} The \code{host} value cannot be used with this parameter. Use the \code{default} or \code{dedicated} values only. Default: \code{default}} \item{MaxResults}{The maximum number of results to return for the request in a single page. The remaining results of the initial request can be seen by sending another request with the returned \code{NextToken} value. The maximum is 100. Default: 100} \item{NextToken}{The token to retrieve the next page of results.} \item{OfferingType}{The Reserved Instance offering type. If you are using tools that predate the 2011-11-01 API version, you only have access to the \verb{Medium Utilization} Reserved Instance offering type.} } \description{ Describes Reserved Instance offerings that are available for purchase. With Reserved Instances, you purchase the right to launch instances for a period of time. During that time period, you do not receive insufficient capacity errors, and you pay a lower usage rate than the rate charged for On-Demand instances for the actual time used. } \details{ If you have listed your own Reserved Instances for sale in the Reserved Instance Marketplace, they will be excluded from these results. This is to ensure that you do not purchase your own Reserved Instances. For more information, see \href{https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ri-market-general.html}{Reserved Instance Marketplace} in the \emph{Amazon Elastic Compute Cloud User Guide}. } \section{Request syntax}{ \preformatted{svc$describe_reserved_instances_offerings( AvailabilityZone = "string", Filters = list( list( Name = "string", Values = list( "string" ) ) ), IncludeMarketplace = TRUE|FALSE, InstanceType = "t1.micro"|"t2.nano"|"t2.micro"|"t2.small"|"t2.medium"|"t2.large"|"t2.xlarge"|"t2.2xlarge"|"t3.nano"|"t3.micro"|"t3.small"|"t3.medium"|"t3.large"|"t3.xlarge"|"t3.2xlarge"|"t3a.nano"|"t3a.micro"|"t3a.small"|"t3a.medium"|"t3a.large"|"t3a.xlarge"|"t3a.2xlarge"|"m1.small"|"m1.medium"|"m1.large"|"m1.xlarge"|"m3.medium"|"m3.large"|"m3.xlarge"|"m3.2xlarge"|"m4.large"|"m4.xlarge"|"m4.2xlarge"|"m4.4xlarge"|"m4.10xlarge"|"m4.16xlarge"|"m2.xlarge"|"m2.2xlarge"|"m2.4xlarge"|"cr1.8xlarge"|"r3.large"|"r3.xlarge"|"r3.2xlarge"|"r3.4xlarge"|"r3.8xlarge"|"r4.large"|"r4.xlarge"|"r4.2xlarge"|"r4.4xlarge"|"r4.8xlarge"|"r4.16xlarge"|"r5.large"|"r5.xlarge"|"r5.2xlarge"|"r5.4xlarge"|"r5.8xlarge"|"r5.12xlarge"|"r5.16xlarge"|"r5.24xlarge"|"r5.metal"|"r5a.large"|"r5a.xlarge"|"r5a.2xlarge"|"r5a.4xlarge"|"r5a.8xlarge"|"r5a.12xlarge"|"r5a.16xlarge"|"r5a.24xlarge"|"r5d.large"|"r5d.xlarge"|"r5d.2xlarge"|"r5d.4xlarge"|"r5d.8xlarge"|"r5d.12xlarge"|"r5d.16xlarge"|"r5d.24xlarge"|"r5d.metal"|"r5ad.large"|"r5ad.xlarge"|"r5ad.2xlarge"|"r5ad.4xlarge"|"r5ad.8xlarge"|"r5ad.12xlarge"|"r5ad.16xlarge"|"r5ad.24xlarge"|"x1.16xlarge"|"x1.32xlarge"|"x1e.xlarge"|"x1e.2xlarge"|"x1e.4xlarge"|"x1e.8xlarge"|"x1e.16xlarge"|"x1e.32xlarge"|"i2.xlarge"|"i2.2xlarge"|"i2.4xlarge"|"i2.8xlarge"|"i3.large"|"i3.xlarge"|"i3.2xlarge"|"i3.4xlarge"|"i3.8xlarge"|"i3.16xlarge"|"i3.metal"|"i3en.large"|"i3en.xlarge"|"i3en.2xlarge"|"i3en.3xlarge"|"i3en.6xlarge"|"i3en.12xlarge"|"i3en.24xlarge"|"i3en.metal"|"hi1.4xlarge"|"hs1.8xlarge"|"c1.medium"|"c1.xlarge"|"c3.large"|"c3.xlarge"|"c3.2xlarge"|"c3.4xlarge"|"c3.8xlarge"|"c4.large"|"c4.xlarge"|"c4.2xlarge"|"c4.4xlarge"|"c4.8xlarge"|"c5.large"|"c5.xlarge"|"c5.2xlarge"|"c5.4xlarge"|"c5.9xlarge"|"c5.12xlarge"|"c5.18xlarge"|"c5.24xlarge"|"c5.metal"|"c5d.large"|"c5d.xlarge"|"c5d.2xlarge"|"c5d.4xlarge"|"c5d.9xlarge"|"c5d.12xlarge"|"c5d.18xlarge"|"c5d.24xlarge"|"c5d.metal"|"c5n.large"|"c5n.xlarge"|"c5n.2xlarge"|"c5n.4xlarge"|"c5n.9xlarge"|"c5n.18xlarge"|"cc1.4xlarge"|"cc2.8xlarge"|"g2.2xlarge"|"g2.8xlarge"|"g3.4xlarge"|"g3.8xlarge"|"g3.16xlarge"|"g3s.xlarge"|"g4dn.xlarge"|"g4dn.2xlarge"|"g4dn.4xlarge"|"g4dn.8xlarge"|"g4dn.12xlarge"|"g4dn.16xlarge"|"cg1.4xlarge"|"p2.xlarge"|"p2.8xlarge"|"p2.16xlarge"|"p3.2xlarge"|"p3.8xlarge"|"p3.16xlarge"|"p3dn.24xlarge"|"d2.xlarge"|"d2.2xlarge"|"d2.4xlarge"|"d2.8xlarge"|"f1.2xlarge"|"f1.4xlarge"|"f1.16xlarge"|"m5.large"|"m5.xlarge"|"m5.2xlarge"|"m5.4xlarge"|"m5.8xlarge"|"m5.12xlarge"|"m5.16xlarge"|"m5.24xlarge"|"m5.metal"|"m5a.large"|"m5a.xlarge"|"m5a.2xlarge"|"m5a.4xlarge"|"m5a.8xlarge"|"m5a.12xlarge"|"m5a.16xlarge"|"m5a.24xlarge"|"m5d.large"|"m5d.xlarge"|"m5d.2xlarge"|"m5d.4xlarge"|"m5d.8xlarge"|"m5d.12xlarge"|"m5d.16xlarge"|"m5d.24xlarge"|"m5d.metal"|"m5ad.large"|"m5ad.xlarge"|"m5ad.2xlarge"|"m5ad.4xlarge"|"m5ad.8xlarge"|"m5ad.12xlarge"|"m5ad.16xlarge"|"m5ad.24xlarge"|"h1.2xlarge"|"h1.4xlarge"|"h1.8xlarge"|"h1.16xlarge"|"z1d.large"|"z1d.xlarge"|"z1d.2xlarge"|"z1d.3xlarge"|"z1d.6xlarge"|"z1d.12xlarge"|"z1d.metal"|"u-6tb1.metal"|"u-9tb1.metal"|"u-12tb1.metal"|"u-18tb1.metal"|"u-24tb1.metal"|"a1.medium"|"a1.large"|"a1.xlarge"|"a1.2xlarge"|"a1.4xlarge"|"a1.metal"|"m5dn.large"|"m5dn.xlarge"|"m5dn.2xlarge"|"m5dn.4xlarge"|"m5dn.8xlarge"|"m5dn.12xlarge"|"m5dn.16xlarge"|"m5dn.24xlarge"|"m5n.large"|"m5n.xlarge"|"m5n.2xlarge"|"m5n.4xlarge"|"m5n.8xlarge"|"m5n.12xlarge"|"m5n.16xlarge"|"m5n.24xlarge"|"r5dn.large"|"r5dn.xlarge"|"r5dn.2xlarge"|"r5dn.4xlarge"|"r5dn.8xlarge"|"r5dn.12xlarge"|"r5dn.16xlarge"|"r5dn.24xlarge"|"r5n.large"|"r5n.xlarge"|"r5n.2xlarge"|"r5n.4xlarge"|"r5n.8xlarge"|"r5n.12xlarge"|"r5n.16xlarge"|"r5n.24xlarge"|"inf1.xlarge"|"inf1.2xlarge"|"inf1.6xlarge"|"inf1.24xlarge", MaxDuration = 123, MaxInstanceCount = 123, MinDuration = 123, OfferingClass = "standard"|"convertible", ProductDescription = "Linux/UNIX"|"Linux/UNIX (Amazon VPC)"|"Windows"|"Windows (Amazon VPC)", ReservedInstancesOfferingIds = list( "string" ), DryRun = TRUE|FALSE, InstanceTenancy = "default"|"dedicated"|"host", MaxResults = 123, NextToken = "string", OfferingType = "Heavy Utilization"|"Medium Utilization"|"Light Utilization"|"No Upfront"|"Partial Upfront"|"All Upfront" ) } } \keyword{internal}
/cran/paws.compute/man/ec2_describe_reserved_instances_offerings.Rd
permissive
johnnytommy/paws
R
false
true
9,989
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/ec2_operations.R \name{ec2_describe_reserved_instances_offerings} \alias{ec2_describe_reserved_instances_offerings} \title{Describes Reserved Instance offerings that are available for purchase} \usage{ ec2_describe_reserved_instances_offerings(AvailabilityZone, Filters, IncludeMarketplace, InstanceType, MaxDuration, MaxInstanceCount, MinDuration, OfferingClass, ProductDescription, ReservedInstancesOfferingIds, DryRun, InstanceTenancy, MaxResults, NextToken, OfferingType) } \arguments{ \item{AvailabilityZone}{The Availability Zone in which the Reserved Instance can be used.} \item{Filters}{One or more filters. \itemize{ \item \code{availability-zone} - The Availability Zone where the Reserved Instance can be used. \item \code{duration} - The duration of the Reserved Instance (for example, one year or three years), in seconds (\code{31536000} \\| \code{94608000}). \item \code{fixed-price} - The purchase price of the Reserved Instance (for example, 9800.0). \item \code{instance-type} - The instance type that is covered by the reservation. \item \code{marketplace} - Set to \code{true} to show only Reserved Instance Marketplace offerings. When this filter is not used, which is the default behavior, all offerings from both AWS and the Reserved Instance Marketplace are listed. \item \code{product-description} - The Reserved Instance product platform description. Instances that include \verb{(Amazon VPC)} in the product platform description will only be displayed to EC2-Classic account holders and are for use with Amazon VPC. (\code{Linux/UNIX} \\| \verb{Linux/UNIX (Amazon VPC)} \\| \verb{SUSE Linux} \\| \verb{SUSE Linux (Amazon VPC)} \\| \verb{Red Hat Enterprise Linux} \\| \verb{Red Hat Enterprise Linux (Amazon VPC)} \\| \code{Windows} \\| \verb{Windows (Amazon VPC)} \\| \verb{Windows with SQL Server Standard} \\| \verb{Windows with SQL Server Standard (Amazon VPC)} \\| \verb{Windows with SQL Server Web} \\| \verb{ Windows with SQL Server Web (Amazon VPC)} \\| \verb{Windows with SQL Server Enterprise} \\| \verb{Windows with SQL Server Enterprise (Amazon VPC)}) \item \code{reserved-instances-offering-id} - The Reserved Instances offering ID. \item \code{scope} - The scope of the Reserved Instance (\verb{Availability Zone} or \code{Region}). \item \code{usage-price} - The usage price of the Reserved Instance, per hour (for example, 0.84). }} \item{IncludeMarketplace}{Include Reserved Instance Marketplace offerings in the response.} \item{InstanceType}{The instance type that the reservation will cover (for example, \code{m1.small}). For more information, see \href{https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html}{Instance Types} in the \emph{Amazon Elastic Compute Cloud User Guide}.} \item{MaxDuration}{The maximum duration (in seconds) to filter when searching for offerings. Default: 94608000 (3 years)} \item{MaxInstanceCount}{The maximum number of instances to filter when searching for offerings. Default: 20} \item{MinDuration}{The minimum duration (in seconds) to filter when searching for offerings. Default: 2592000 (1 month)} \item{OfferingClass}{The offering class of the Reserved Instance. Can be \code{standard} or \code{convertible}.} \item{ProductDescription}{The Reserved Instance product platform description. Instances that include \verb{(Amazon VPC)} in the description are for use with Amazon VPC.} \item{ReservedInstancesOfferingIds}{One or more Reserved Instances offering IDs.} \item{DryRun}{Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is \code{DryRunOperation}. Otherwise, it is \code{UnauthorizedOperation}.} \item{InstanceTenancy}{The tenancy of the instances covered by the reservation. A Reserved Instance with a tenancy of \code{dedicated} is applied to instances that run in a VPC on single-tenant hardware (i.e., Dedicated Instances). \strong{Important:} The \code{host} value cannot be used with this parameter. Use the \code{default} or \code{dedicated} values only. Default: \code{default}} \item{MaxResults}{The maximum number of results to return for the request in a single page. The remaining results of the initial request can be seen by sending another request with the returned \code{NextToken} value. The maximum is 100. Default: 100} \item{NextToken}{The token to retrieve the next page of results.} \item{OfferingType}{The Reserved Instance offering type. If you are using tools that predate the 2011-11-01 API version, you only have access to the \verb{Medium Utilization} Reserved Instance offering type.} } \description{ Describes Reserved Instance offerings that are available for purchase. With Reserved Instances, you purchase the right to launch instances for a period of time. During that time period, you do not receive insufficient capacity errors, and you pay a lower usage rate than the rate charged for On-Demand instances for the actual time used. } \details{ If you have listed your own Reserved Instances for sale in the Reserved Instance Marketplace, they will be excluded from these results. This is to ensure that you do not purchase your own Reserved Instances. For more information, see \href{https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ri-market-general.html}{Reserved Instance Marketplace} in the \emph{Amazon Elastic Compute Cloud User Guide}. } \section{Request syntax}{ \preformatted{svc$describe_reserved_instances_offerings( AvailabilityZone = "string", Filters = list( list( Name = "string", Values = list( "string" ) ) ), IncludeMarketplace = TRUE|FALSE, InstanceType = "t1.micro"|"t2.nano"|"t2.micro"|"t2.small"|"t2.medium"|"t2.large"|"t2.xlarge"|"t2.2xlarge"|"t3.nano"|"t3.micro"|"t3.small"|"t3.medium"|"t3.large"|"t3.xlarge"|"t3.2xlarge"|"t3a.nano"|"t3a.micro"|"t3a.small"|"t3a.medium"|"t3a.large"|"t3a.xlarge"|"t3a.2xlarge"|"m1.small"|"m1.medium"|"m1.large"|"m1.xlarge"|"m3.medium"|"m3.large"|"m3.xlarge"|"m3.2xlarge"|"m4.large"|"m4.xlarge"|"m4.2xlarge"|"m4.4xlarge"|"m4.10xlarge"|"m4.16xlarge"|"m2.xlarge"|"m2.2xlarge"|"m2.4xlarge"|"cr1.8xlarge"|"r3.large"|"r3.xlarge"|"r3.2xlarge"|"r3.4xlarge"|"r3.8xlarge"|"r4.large"|"r4.xlarge"|"r4.2xlarge"|"r4.4xlarge"|"r4.8xlarge"|"r4.16xlarge"|"r5.large"|"r5.xlarge"|"r5.2xlarge"|"r5.4xlarge"|"r5.8xlarge"|"r5.12xlarge"|"r5.16xlarge"|"r5.24xlarge"|"r5.metal"|"r5a.large"|"r5a.xlarge"|"r5a.2xlarge"|"r5a.4xlarge"|"r5a.8xlarge"|"r5a.12xlarge"|"r5a.16xlarge"|"r5a.24xlarge"|"r5d.large"|"r5d.xlarge"|"r5d.2xlarge"|"r5d.4xlarge"|"r5d.8xlarge"|"r5d.12xlarge"|"r5d.16xlarge"|"r5d.24xlarge"|"r5d.metal"|"r5ad.large"|"r5ad.xlarge"|"r5ad.2xlarge"|"r5ad.4xlarge"|"r5ad.8xlarge"|"r5ad.12xlarge"|"r5ad.16xlarge"|"r5ad.24xlarge"|"x1.16xlarge"|"x1.32xlarge"|"x1e.xlarge"|"x1e.2xlarge"|"x1e.4xlarge"|"x1e.8xlarge"|"x1e.16xlarge"|"x1e.32xlarge"|"i2.xlarge"|"i2.2xlarge"|"i2.4xlarge"|"i2.8xlarge"|"i3.large"|"i3.xlarge"|"i3.2xlarge"|"i3.4xlarge"|"i3.8xlarge"|"i3.16xlarge"|"i3.metal"|"i3en.large"|"i3en.xlarge"|"i3en.2xlarge"|"i3en.3xlarge"|"i3en.6xlarge"|"i3en.12xlarge"|"i3en.24xlarge"|"i3en.metal"|"hi1.4xlarge"|"hs1.8xlarge"|"c1.medium"|"c1.xlarge"|"c3.large"|"c3.xlarge"|"c3.2xlarge"|"c3.4xlarge"|"c3.8xlarge"|"c4.large"|"c4.xlarge"|"c4.2xlarge"|"c4.4xlarge"|"c4.8xlarge"|"c5.large"|"c5.xlarge"|"c5.2xlarge"|"c5.4xlarge"|"c5.9xlarge"|"c5.12xlarge"|"c5.18xlarge"|"c5.24xlarge"|"c5.metal"|"c5d.large"|"c5d.xlarge"|"c5d.2xlarge"|"c5d.4xlarge"|"c5d.9xlarge"|"c5d.12xlarge"|"c5d.18xlarge"|"c5d.24xlarge"|"c5d.metal"|"c5n.large"|"c5n.xlarge"|"c5n.2xlarge"|"c5n.4xlarge"|"c5n.9xlarge"|"c5n.18xlarge"|"cc1.4xlarge"|"cc2.8xlarge"|"g2.2xlarge"|"g2.8xlarge"|"g3.4xlarge"|"g3.8xlarge"|"g3.16xlarge"|"g3s.xlarge"|"g4dn.xlarge"|"g4dn.2xlarge"|"g4dn.4xlarge"|"g4dn.8xlarge"|"g4dn.12xlarge"|"g4dn.16xlarge"|"cg1.4xlarge"|"p2.xlarge"|"p2.8xlarge"|"p2.16xlarge"|"p3.2xlarge"|"p3.8xlarge"|"p3.16xlarge"|"p3dn.24xlarge"|"d2.xlarge"|"d2.2xlarge"|"d2.4xlarge"|"d2.8xlarge"|"f1.2xlarge"|"f1.4xlarge"|"f1.16xlarge"|"m5.large"|"m5.xlarge"|"m5.2xlarge"|"m5.4xlarge"|"m5.8xlarge"|"m5.12xlarge"|"m5.16xlarge"|"m5.24xlarge"|"m5.metal"|"m5a.large"|"m5a.xlarge"|"m5a.2xlarge"|"m5a.4xlarge"|"m5a.8xlarge"|"m5a.12xlarge"|"m5a.16xlarge"|"m5a.24xlarge"|"m5d.large"|"m5d.xlarge"|"m5d.2xlarge"|"m5d.4xlarge"|"m5d.8xlarge"|"m5d.12xlarge"|"m5d.16xlarge"|"m5d.24xlarge"|"m5d.metal"|"m5ad.large"|"m5ad.xlarge"|"m5ad.2xlarge"|"m5ad.4xlarge"|"m5ad.8xlarge"|"m5ad.12xlarge"|"m5ad.16xlarge"|"m5ad.24xlarge"|"h1.2xlarge"|"h1.4xlarge"|"h1.8xlarge"|"h1.16xlarge"|"z1d.large"|"z1d.xlarge"|"z1d.2xlarge"|"z1d.3xlarge"|"z1d.6xlarge"|"z1d.12xlarge"|"z1d.metal"|"u-6tb1.metal"|"u-9tb1.metal"|"u-12tb1.metal"|"u-18tb1.metal"|"u-24tb1.metal"|"a1.medium"|"a1.large"|"a1.xlarge"|"a1.2xlarge"|"a1.4xlarge"|"a1.metal"|"m5dn.large"|"m5dn.xlarge"|"m5dn.2xlarge"|"m5dn.4xlarge"|"m5dn.8xlarge"|"m5dn.12xlarge"|"m5dn.16xlarge"|"m5dn.24xlarge"|"m5n.large"|"m5n.xlarge"|"m5n.2xlarge"|"m5n.4xlarge"|"m5n.8xlarge"|"m5n.12xlarge"|"m5n.16xlarge"|"m5n.24xlarge"|"r5dn.large"|"r5dn.xlarge"|"r5dn.2xlarge"|"r5dn.4xlarge"|"r5dn.8xlarge"|"r5dn.12xlarge"|"r5dn.16xlarge"|"r5dn.24xlarge"|"r5n.large"|"r5n.xlarge"|"r5n.2xlarge"|"r5n.4xlarge"|"r5n.8xlarge"|"r5n.12xlarge"|"r5n.16xlarge"|"r5n.24xlarge"|"inf1.xlarge"|"inf1.2xlarge"|"inf1.6xlarge"|"inf1.24xlarge", MaxDuration = 123, MaxInstanceCount = 123, MinDuration = 123, OfferingClass = "standard"|"convertible", ProductDescription = "Linux/UNIX"|"Linux/UNIX (Amazon VPC)"|"Windows"|"Windows (Amazon VPC)", ReservedInstancesOfferingIds = list( "string" ), DryRun = TRUE|FALSE, InstanceTenancy = "default"|"dedicated"|"host", MaxResults = 123, NextToken = "string", OfferingType = "Heavy Utilization"|"Medium Utilization"|"Light Utilization"|"No Upfront"|"Partial Upfront"|"All Upfront" ) } } \keyword{internal}
areal_units_inla_mesh = function(locs, areal_units_resolution_km, areal_units_proj4string_planar_km ) { require(INLA) sppoly = inla.mesh.2d ( loc=locs, max.edge = c( 0.5, 5 ) * areal_units_resolution_km, # # max size of a triange (in, out) offset = c( 0.1, 1 ) * areal_units_resolution_km , # how much to extend inside and outside of boundary, cutoff = c( 0.5, 5 ) * areal_units_resolution_km # min distance allowed between points #, # boundary = inla.mesh.segment(st_coordinates( as(boundary, "sf") )[,c(1,2)]) ) # convert to sp* sppoly = SpatialPolygonsDataFrame( Sr = SpatialPolygons( lapply( 1:nrow(sppoly$graph$tv), function(x) { tv = sppoly$graph$tv[x, , drop = TRUE] Polygons(list(Polygon( sppoly$loc[tv[c(1, 3, 2, 1)], 1:2, drop = FALSE])), ID = x) } ), proj4string = sp::CRS( areal_units_proj4string_planar_km ) ), data = as.data.frame(sppoly$graph$tv[, c(1, 3, 2), drop = FALSE]), match.ID = FALSE ) return(sppoly) }
/R/areal_units_inla_mesh.R
permissive
jae0/aegis.polygons
R
false
false
1,097
r
areal_units_inla_mesh = function(locs, areal_units_resolution_km, areal_units_proj4string_planar_km ) { require(INLA) sppoly = inla.mesh.2d ( loc=locs, max.edge = c( 0.5, 5 ) * areal_units_resolution_km, # # max size of a triange (in, out) offset = c( 0.1, 1 ) * areal_units_resolution_km , # how much to extend inside and outside of boundary, cutoff = c( 0.5, 5 ) * areal_units_resolution_km # min distance allowed between points #, # boundary = inla.mesh.segment(st_coordinates( as(boundary, "sf") )[,c(1,2)]) ) # convert to sp* sppoly = SpatialPolygonsDataFrame( Sr = SpatialPolygons( lapply( 1:nrow(sppoly$graph$tv), function(x) { tv = sppoly$graph$tv[x, , drop = TRUE] Polygons(list(Polygon( sppoly$loc[tv[c(1, 3, 2, 1)], 1:2, drop = FALSE])), ID = x) } ), proj4string = sp::CRS( areal_units_proj4string_planar_km ) ), data = as.data.frame(sppoly$graph$tv[, c(1, 3, 2), drop = FALSE]), match.ID = FALSE ) return(sppoly) }
mondrian <- function(data, labels = colnames(data), xlab = "", ylab = "" , main = "", col = NULL , pop = NULL, indiv = FALSE, ...) { ## Initial checking data <- as.data.frame(data) data <- data[complete.cases(data), ] ## delete rows (individuals) with missing data ## Default values if(is.null(labels)) labels <- colnames(data) if(is.null(col)) { if(length(labels) == 2) col <- c("blue", "red") else col <- brewer.pal(length(labels), "Set1") } else { col <- rep(col, length.out = length(labels)) } main <- paste(main, " (n = ", nrow(data), ")", sep = "") if(!is.null(pop)) { ## Individuals are grouped in sub-populations labelpop <- levels(unique(data[, pop])) nbpop <- length(labelpop) ## Graphic window management nrow <- floor(sqrt(nbpop + 1)) par(mfrow = c(nrow, ceiling(nbpop / nrow))) outpop <- list() ## Results for each sub-population subpop <- by(data, data[, pop], function(x) mondrian(x[, - pop], pop = NULL, xlab = xlab, ylab = ylab, main = unique(x[, pop]), col = col, indiv = indiv, ...)) outpop <- lapply(subpop, function(x) x) names(outpop) <- labelpop ## Result if all individuals belong to the same population outpop$pop <- mondrian(data[, - pop], xlab = xlab, ylab = ylab , main = "Total population", col = col, pop = NULL, indiv = indiv, ...) par(mfrow = c(1, 1)) invisible(outpop) } else { ## Individuals are defined as belonging to the same population ## Percents matrix building counts_profiles <- rev(table(apply(data, 1, paste, collapse = ""))) ## counts table for profiles percents_profiles <- counts_profiles / sum(counts_profiles) ## percents table for profiles mat_profiles <- matrix(rep(percents_profiles, length(col)), byrow = FALSE, ncol = length(col), nrow = length(percents_profiles)) dimnames(mat_profiles) <- list(names(percents_profiles), labels) ## Presence-absence profiles profiles <- t(data.frame(strsplit(rownames(mat_profiles), ""))) ## Display par(las = 3, mar = c(4, 3, 2.5, 1), mgp = c(2, 0, 0), font = 2, cex.axis = 1.2) plot(ncol(mat_profiles), 1, xlim = c(0, ncol(mat_profiles)), ylim = c(0, 1), type = "n", axes = FALSE, xlab = xlab, ylab = ylab, main = main, ...) axis(2, at = c(0, 1), c("0", "100%"), tick = FALSE, lwd = 0.5) par(las = 1, font = 2, cex.axis = 0.9) axis(1, at = 0.5:ncol(mat_profiles), labels, tick = FALSE) dimrect <- rbind(rep(0, ncol(mat_profiles)), apply(mat_profiles, 2, cumsum)) out <- sapply(1:ncol(mat_profiles), function(Y) { sapply(1:nrow(mat_profiles), function(X) { rect(Y - 1, dimrect[X], Y, dimrect[X + 1], col = ifelse(profiles[X, Y] == "1", col[Y], "white")) }) }) if(indiv) sapply(1:nrow(data), function(i) lines(0:ncol(data), rep(i/nrow(data), 1 + ncol(data)))) invisible(percents_profiles) } }
/R/mondrian.R
no_license
JunqiangZheng/Mondrian
R
false
false
2,973
r
mondrian <- function(data, labels = colnames(data), xlab = "", ylab = "" , main = "", col = NULL , pop = NULL, indiv = FALSE, ...) { ## Initial checking data <- as.data.frame(data) data <- data[complete.cases(data), ] ## delete rows (individuals) with missing data ## Default values if(is.null(labels)) labels <- colnames(data) if(is.null(col)) { if(length(labels) == 2) col <- c("blue", "red") else col <- brewer.pal(length(labels), "Set1") } else { col <- rep(col, length.out = length(labels)) } main <- paste(main, " (n = ", nrow(data), ")", sep = "") if(!is.null(pop)) { ## Individuals are grouped in sub-populations labelpop <- levels(unique(data[, pop])) nbpop <- length(labelpop) ## Graphic window management nrow <- floor(sqrt(nbpop + 1)) par(mfrow = c(nrow, ceiling(nbpop / nrow))) outpop <- list() ## Results for each sub-population subpop <- by(data, data[, pop], function(x) mondrian(x[, - pop], pop = NULL, xlab = xlab, ylab = ylab, main = unique(x[, pop]), col = col, indiv = indiv, ...)) outpop <- lapply(subpop, function(x) x) names(outpop) <- labelpop ## Result if all individuals belong to the same population outpop$pop <- mondrian(data[, - pop], xlab = xlab, ylab = ylab , main = "Total population", col = col, pop = NULL, indiv = indiv, ...) par(mfrow = c(1, 1)) invisible(outpop) } else { ## Individuals are defined as belonging to the same population ## Percents matrix building counts_profiles <- rev(table(apply(data, 1, paste, collapse = ""))) ## counts table for profiles percents_profiles <- counts_profiles / sum(counts_profiles) ## percents table for profiles mat_profiles <- matrix(rep(percents_profiles, length(col)), byrow = FALSE, ncol = length(col), nrow = length(percents_profiles)) dimnames(mat_profiles) <- list(names(percents_profiles), labels) ## Presence-absence profiles profiles <- t(data.frame(strsplit(rownames(mat_profiles), ""))) ## Display par(las = 3, mar = c(4, 3, 2.5, 1), mgp = c(2, 0, 0), font = 2, cex.axis = 1.2) plot(ncol(mat_profiles), 1, xlim = c(0, ncol(mat_profiles)), ylim = c(0, 1), type = "n", axes = FALSE, xlab = xlab, ylab = ylab, main = main, ...) axis(2, at = c(0, 1), c("0", "100%"), tick = FALSE, lwd = 0.5) par(las = 1, font = 2, cex.axis = 0.9) axis(1, at = 0.5:ncol(mat_profiles), labels, tick = FALSE) dimrect <- rbind(rep(0, ncol(mat_profiles)), apply(mat_profiles, 2, cumsum)) out <- sapply(1:ncol(mat_profiles), function(Y) { sapply(1:nrow(mat_profiles), function(X) { rect(Y - 1, dimrect[X], Y, dimrect[X + 1], col = ifelse(profiles[X, Y] == "1", col[Y], "white")) }) }) if(indiv) sapply(1:nrow(data), function(i) lines(0:ncol(data), rep(i/nrow(data), 1 + ncol(data)))) invisible(percents_profiles) } }
# This file is part of the standard setup for testthat. # It is recommended that you do not modify it. # # Where should you do additional test configuration? # Learn more about the roles of various files in: # * https://r-pkgs.org/testing-design.html#sec-tests-files-overview # * https://testthat.r-lib.org/articles/special-files.html library(testthat) library(latex.makers) test_check("latex.makers")
/tests/testthat.R
no_license
billdenney/latex.makers
R
false
false
404
r
# This file is part of the standard setup for testthat. # It is recommended that you do not modify it. # # Where should you do additional test configuration? # Learn more about the roles of various files in: # * https://r-pkgs.org/testing-design.html#sec-tests-files-overview # * https://testthat.r-lib.org/articles/special-files.html library(testthat) library(latex.makers) test_check("latex.makers")
#' @importFrom flipData CleanSubset NoData processExperimentData <- function(experiment.data, subset, weights, n.questions.left.out, seed, input.prior.mean, input.prior.sd, missing, covariates, simulated.priors, simulated.sample.size) { nms <- names(experiment.data) choice.name <- nms[1] n.questions <- sum(nms == choice.name) n.alternatives <- length(levels(experiment.data[[1]])) n.qc <- n.questions * n.alternatives n.attributes <- (length(nms) - n.questions) / n.qc if (round(n.attributes) != n.attributes) stop("The number of parameters in the Experiment question is invalid.") checkNumberOfQuestionsLeftOut(n.questions, n.questions.left.out) is.data.simulated <- !is.null(simulated.priors) if (is.data.simulated) { simulatedDataWarnings(subset, weights, covariates) subset <- NULL weights <- NULL covariates <- NULL } if (missing == "Error if missing data") errorIfMissingDataFoundExperiment(experiment.data, subset, weights, covariates, n.questions, is.data.simulated) non.missing.table <- nonMissingTableForExperiment(experiment.data, subset, weights, covariates, n.questions, n.alternatives, n.attributes, missing, is.data.simulated) non.missing <- nonMissingRespondents(non.missing.table, n.questions.left.out, missing, n.questions) filter.subset <- CleanSubset(subset, nrow(experiment.data)) subset <- filter.subset & non.missing if (sum(filter.subset) == 0) stop("All respondents have been filtered out.") else if (sum(subset) == 0) NoData() weights <- prepareWeights(weights, subset) experiment.data <- experiment.data[subset, ] non.missing.table <- non.missing.table[subset, , drop = FALSE] if (!is.null(covariates)) covariates <- covariates[subset, ] n.respondents <- nrow(experiment.data) Y <- extractChoices(experiment.data, non.missing.table) attribute.data <- experiment.data[, -1:-n.questions] names(attribute.data) <- nms[-1:-n.questions] attribute.data <- completeLevels(attribute.data) n.attribute.parameters <- nAttributeParameters(attribute.data, n.attributes, n.questions, n.alternatives) n.parameters <- sum(n.attribute.parameters) par.names <- parameterNames(attribute.data, n.attributes, n.questions, n.alternatives, n.parameters) all.names <- allNames(attribute.data, n.attributes, n.questions, n.alternatives, n.parameters) attribute.levels <- attributeLevels(attribute.data, n.attributes, n.questions, n.alternatives, n.parameters) checkPriorParameters(input.prior.mean, input.prior.sd, n.alternatives, n.attributes, n.parameters) x.list <- createDesignMatrix(attribute.data, n.attributes, n.questions, n.alternatives, n.parameters, n.attribute.parameters, input.prior.mean, non.missing.table) X <- x.list$X parameter.scales <- x.list$parameter.scales parameter.range <- calcRange(attribute.data) prior.mean <- processInputPrior(input.prior.mean, n.parameters, n.attributes, n.attribute.parameters, parameter.scales) prior.sd <- processInputPrior(input.prior.sd, n.parameters, n.attributes, n.attribute.parameters, parameter.scales) respondent.indices <- constructRespondentIndices(non.missing.table) if (is.data.simulated) { n.respondents <- simulated.sample.size subset <- rep(TRUE, n.respondents) weights <- rep(1, n.respondents) covariates <- NULL sampled.output <- sampleFromX(X, respondent.indices, n.respondents, seed) X <- sampled.output$X respondent.indices <- sampled.output$respondent.indices attribute.names <- unique(names(attribute.data)) output <- generateSimulatedChoices(X, respondent.indices, simulated.priors, seed, n.alternatives, n.attribute.parameters, attribute.names, parameter.scales) Y <- output$choices simulated.respondent.parameters <- output$respondent.parameters } else simulated.respondent.parameters <- NULL split.data <- crossValidationSplit(X, Y, n.questions.left.out, seed, respondent.indices) none.alternatives <- which(apply(split.data$X.in, 2, function(x) nrow(unique(x)) == 1)) list(n.questions = n.questions, n.questions.left.out = n.questions.left.out, n.alternatives = n.alternatives, n.attributes = n.attributes, n.respondents = n.respondents, n.parameters = n.parameters, n.attribute.parameters = n.attribute.parameters, par.names = par.names, all.names = all.names, beta.names = par.names, all.beta.names = all.names, X.in = split.data$X.in, Y.in = split.data$Y.in, X.out = split.data$X.out, Y.out = split.data$Y.out, left.out = split.data$left.out, n.questions.left.in = split.data$n.questions.left.in, subset = subset, weights = weights, covariates = covariates, parameter.scales = parameter.scales, parameter.range = parameter.range, prior.mean = prior.mean, prior.sd = prior.sd, simulated.respondent.parameters = simulated.respondent.parameters, attribute.levels = attribute.levels, none.alternatives = none.alternatives) } extractChoices <- function(experiment.data, non.missing.table) { n.questions <- ncol(non.missing.table) result <- c(t(sapply(experiment.data[, 1:n.questions], function(x) as.numeric(x)))) result <- result[c(t(non.missing.table))] } # Ensure that each factor contains a complete set of levels completeLevels <- function(attribute.data) { nms <- names(attribute.data) unique.names <- unique(nms) for (nm in unique.names) { att <- attribute.data[nms == nm] if (is.factor(att[[1]])) { complete.levels <- unique(unlist(lapply(att, levels))) ind <- which(nms == nm) for (i in ind) { att.q <- attribute.data[[i]] lvls <- levels(att.q) map <- rep(NA, length(lvls)) for (j in 1:length(lvls)) map[j] <- which(lvls[j] == complete.levels) attribute.data[[i]] <- factor(complete.levels[map[att.q]], levels = complete.levels) } } } attribute.data } createDesignMatrix <- function(attribute.data, n.attributes, n.questions, n.alternatives, n.parameters, n.attribute.parameters, input.prior.mean, non.missing.table) { n.rs <- sum(non.missing.table) n.qc <- n.questions * n.alternatives n.respondents <- nrow(attribute.data) meansAndSDs <- getParameterMeanAndSD(attribute.data, n.attributes, n.questions, n.alternatives) parameter.scales <- rep(1, n.parameters) rs <- 1 X <- array(dim = c(n.rs, n.alternatives, n.parameters)) for (r in 1:n.respondents) { for (q in 1:n.questions) { if (non.missing.table[r, q]) { p <- 1 for (i in 1:n.attributes) { is.ordered <- length(input.prior.mean) == n.attributes && input.prior.mean[i] != 0 for (j in 1:n.alternatives) { ind <- n.qc * (i - 1) + n.alternatives * (q - 1) + j v <- attribute.data[[ind]][r] if (is.factor(v)) { n.v <- length(levels(v)) - 1 int.v <- as.numeric(v) X[rs, j, p:(p + n.v - 1)] <- 0 if (int.v > 1) { if (is.ordered) X[rs, j, p:(p + int.v - 2)] <- 1 else X[rs, j, p + int.v - 2] <- 1 } } else { mn <- meansAndSDs$means[i] std <- meansAndSDs$sds[i] # Divide by 2 * SD as recommended by Gelman in # "Scaling regression inputs by dividing by two # standard deviations (2008)" X[rs, j, p] <- 0.5 * (v - mn) / std if (q == 1 && j == 1) parameter.scales[p] <- 2 * std } } if (is.factor(v)) p <- p + n.v else p <- p + 1 } rs <- rs + 1 } } } list(X = X, parameter.scales = parameter.scales) } nAttributeParameters <- function(attribute.data, n.attributes, n.questions, n.alternatives) { result <- rep(NA, n.attributes) for (i in 1:n.attributes) { v <- attribute.data[[n.questions * n.alternatives * (i - 1) + 1]] if (is.factor(v)) result[i] <- length(levels(v)) - 1 else result[i] <- 1 } result } parameterNames <- function(attribute.data, n.attributes, n.questions, n.alternatives, n.parameters) { nms <- names(attribute.data) result <- rep("", n.parameters) ind <- 1 for (i in 1:n.attributes) { col <- n.questions * n.alternatives * (i - 1) + 1 v <- attribute.data[[col]] if (is.factor(v)) { lvls <- levels(v) for (j in 2:length(lvls)) result[ind + j - 2] <- paste0(nms[col], ": ", lvls[j]) ind <- ind + length(lvls) - 1 } else { result[ind] <- nms[col] ind <- ind + 1 } } result } # Includes the names of parameters left out allNames <- function(attribute.data, n.attributes, n.questions, n.alternatives, n.parameters) { nms <- names(attribute.data) result <- rep("", n.parameters) ind <- 1 for (i in 1:n.attributes) { col <- n.questions * n.alternatives * (i - 1) + 1 v <- attribute.data[[col]] if (is.factor(v)) { lvls <- levels(v) for (j in 1:length(lvls)) result[ind + j - 1] <- paste0(nms[col], ": ", lvls[j]) ind <- ind + length(lvls) } else { result[ind] <- nms[col] ind <- ind + 1 } } result } # Creates a list of attribute levels. If an attribute is numeric, an empty # character vector is provided for that attribute. attributeLevels <- function(attribute.data, n.attributes, n.questions, n.alternatives, n.parameters) { nms <- names(attribute.data) result <- list() for (i in 1:n.attributes) { col <- n.questions * n.alternatives * (i - 1) + 1 v <- attribute.data[[col]] result[[nms[col]]] <- if (is.factor(v)) levels(v) else character(0) } result } processInputPrior <- function(prior.par, n.parameters, n.attributes, n.attribute.parameters, parameter.scales = NULL) { if (is.null(parameter.scales)) parameter.scales <- rep(1, n.parameters) result <- rep(NA, n.parameters) if (length(prior.par) == 1) result <- rep(prior.par, n.parameters) else if (length(prior.par) == n.attributes) { result <- rep(NA, n.parameters) for (i in 1:n.attributes) { if (n.attribute.parameters[i] == 1) # numeric { ind <- sum(n.attribute.parameters[1:i]) result[ind] <- prior.par[i] * parameter.scales[ind] } else # categorical { ind.end <- sum(n.attribute.parameters[1:i]) ind.start <- ind.end - n.attribute.parameters[i] + 1 for (j in ind.start:ind.end) result[j] <- prior.par[i] } } } else # length(prior.par) == n.parameters result <- prior.par * parameter.scales result } crossValidationSplit <- function(X, Y, n.questions.left.out, seed, respondent.indices) { n.respondents <- length(respondent.indices) n.questions.left.in <- rep(NA, n.respondents) if (n.questions.left.out > 0) { n.alternatives <- dim(X)[2] n.parameters <- dim(X)[3] n.in <- nrow(X) - n.questions.left.out * n.respondents X.in <- array(dim = c(n.in, n.alternatives, n.parameters)) Y.in <- rep(NA, n.in) X.out <- array(dim = c(n.respondents * n.questions.left.out, n.alternatives, n.parameters)) Y.out <- rep(NA, n.respondents * n.questions.left.out) set.seed(seed) rs <- 0 rs.in <- 0 for (r in 1:n.respondents) { n.questions <- length(respondent.indices[[r]]) n.questions.left.in[r] <- n.questions - n.questions.left.out ind.left.out <- sample(n.questions, n.questions.left.out) ind.left.in <- setdiff(1:n.questions, ind.left.out) ind.left.out.new <- (r - 1) * n.questions.left.out + 1:n.questions.left.out ind.left.in.new <- rs.in + 1:n.questions.left.in[r] X.in[ind.left.in.new, , ] <- X[rs + ind.left.in, , ] Y.in[ind.left.in.new] <- Y[rs + ind.left.in] X.out[ind.left.out.new, , ] <- X[rs + ind.left.out, , ] Y.out[ind.left.out.new] <- Y[rs + ind.left.out] rs <- rs + n.questions rs.in <- rs.in + n.questions.left.in[r] } } else { X.in <- X Y.in <- Y X.out <- NULL Y.out <- NULL n.questions.left.in <- sapply(respondent.indices, length) } list(X.in = X.in, X.out = X.out, Y.in = Y.in, Y.out = Y.out, n.questions.left.in = n.questions.left.in) } checkPriorParameters <- function(input.prior.mean, input.prior.sd, n.alternatives, n.attributes, n.parameters, include.choice.parameters = FALSE) { if (include.choice.parameters) { n.attributes <- n.attributes + 1 n.parameters <- n.parameters + n.alternatives - 1 } if (!is.numeric(input.prior.mean) || (length(input.prior.mean) != n.parameters && length(input.prior.mean) != n.attributes && length(input.prior.mean) != 1)) stop("The supplied parameter hb.prior.mean is inappropriate. ", "Based on the input data this needs to be a numeric ", "vector of length 1, ", n.attributes, " (number of attributes) or ", n.parameters, " (number of parameters).") if (!is.numeric(input.prior.sd) || (length(input.prior.sd) != n.parameters && length(input.prior.sd) != n.attributes && length(input.prior.sd) != 1)) stop("The supplied parameter hb.prior.sd is inappropriate. ", "Based on the input data this needs to be a numeric ", "vector of length 1, ", n.attributes, " (number of attributes) or ", n.parameters, " (number of parameters).") } #' @importFrom flipData CalibrateWeight CleanWeights prepareWeights <- function(weights, subset) { if (!is.null(weights)) { weights <- CleanWeights(weights) weights <- weights[subset] CalibrateWeight(weights) } else rep(1, sum(subset)) } getParameterMeanAndSD <- function(attribute.data, n.attributes, n.questions, n.alternatives) { n.qc <- n.questions * n.alternatives means <- rep(NA, n.attributes) sds <- rep(NA, n.attributes) for (i in 1:n.attributes) { v <- attribute.data[[n.qc * (i - 1) + 1]] if (!is.factor(v)) { ind.start <- n.qc * (i - 1) + 1 ind.end <- n.qc * i values <- as.matrix(attribute.data[ind.start:(n.qc * i)]) means[i] <- mean(values) sds[i] <- sd(values) } } list(means = means, sds = sds) } #' @importFrom flipData MissingDataFail errorIfMissingDataFoundExperiment <- function(experiment.data, subset, weights, covariates, n.questions, is.data.simulated) { if (is.data.simulated) experiment.data <- experiment.data[, -1:-n.questions] if (any(is.na(experiment.data)) || (!is.null(subset) && any(is.na(subset))) || (!is.null(weights) && any(is.na(weights))) || (!is.null(covariates) && any(is.na(covariates)))) MissingDataFail(); } #' Returns which respondents are considered missing based on missing data #' settings. #' @param non.missing.table A logical matrix of respondents x questions #' indicating which ones are not missing. #' @param n.questions.left.out The number of questions to leave out. #' @param missing The missing data setting. #' @param n.questions The number of questions per respondent. nonMissingRespondents <- function(non.missing.table, n.questions.left.out, missing, n.questions) { n.respondents <- nrow(non.missing.table) if (missing == "Error if missing data") rep(TRUE, n.respondents) else if (missing == "Use partial data") rowSums(non.missing.table) > n.questions.left.out else if (missing == "Exclude cases with missing data") apply(non.missing.table, 1, all) } nonMissingTableForExperiment <- function(experiment.data, subset, weights, covariates, n.questions, n.alternatives, n.attributes, missing, is.data.simulated) { result <- matrix(TRUE, nrow = nrow(experiment.data), ncol = n.questions) if (missing == "Use partial data") { if (!is.data.simulated) for (i in 1:n.questions) result[, i] <- result[, i] & !is.na(experiment.data[[i]]) for (i in 1:n.attributes) { for (j in 1:n.questions) { ind <- n.questions + (i - 1) * n.questions * n.alternatives + (j - 1) * n.alternatives + (1:n.alternatives) not.missing <- !is.na(rowSums(sapply(experiment.data[, ind], as.numeric))) result[, j] <- result[, j] & not.missing } } } else { if (is.data.simulated) experiment.data <- experiment.data[, -1:-n.questions] result <- result & !is.na(rowSums(sapply(experiment.data, as.numeric))) } if (!is.null(subset)) result <- result & !is.na(subset) if (!is.null(weights)) result <- result & !is.na(weights) if (!is.null(covariates)) result <- result & !is.na(rowSums(covariates)) result }
/R/experimentdata.R
no_license
pdwaggoner/flipChoice
R
false
false
20,939
r
#' @importFrom flipData CleanSubset NoData processExperimentData <- function(experiment.data, subset, weights, n.questions.left.out, seed, input.prior.mean, input.prior.sd, missing, covariates, simulated.priors, simulated.sample.size) { nms <- names(experiment.data) choice.name <- nms[1] n.questions <- sum(nms == choice.name) n.alternatives <- length(levels(experiment.data[[1]])) n.qc <- n.questions * n.alternatives n.attributes <- (length(nms) - n.questions) / n.qc if (round(n.attributes) != n.attributes) stop("The number of parameters in the Experiment question is invalid.") checkNumberOfQuestionsLeftOut(n.questions, n.questions.left.out) is.data.simulated <- !is.null(simulated.priors) if (is.data.simulated) { simulatedDataWarnings(subset, weights, covariates) subset <- NULL weights <- NULL covariates <- NULL } if (missing == "Error if missing data") errorIfMissingDataFoundExperiment(experiment.data, subset, weights, covariates, n.questions, is.data.simulated) non.missing.table <- nonMissingTableForExperiment(experiment.data, subset, weights, covariates, n.questions, n.alternatives, n.attributes, missing, is.data.simulated) non.missing <- nonMissingRespondents(non.missing.table, n.questions.left.out, missing, n.questions) filter.subset <- CleanSubset(subset, nrow(experiment.data)) subset <- filter.subset & non.missing if (sum(filter.subset) == 0) stop("All respondents have been filtered out.") else if (sum(subset) == 0) NoData() weights <- prepareWeights(weights, subset) experiment.data <- experiment.data[subset, ] non.missing.table <- non.missing.table[subset, , drop = FALSE] if (!is.null(covariates)) covariates <- covariates[subset, ] n.respondents <- nrow(experiment.data) Y <- extractChoices(experiment.data, non.missing.table) attribute.data <- experiment.data[, -1:-n.questions] names(attribute.data) <- nms[-1:-n.questions] attribute.data <- completeLevels(attribute.data) n.attribute.parameters <- nAttributeParameters(attribute.data, n.attributes, n.questions, n.alternatives) n.parameters <- sum(n.attribute.parameters) par.names <- parameterNames(attribute.data, n.attributes, n.questions, n.alternatives, n.parameters) all.names <- allNames(attribute.data, n.attributes, n.questions, n.alternatives, n.parameters) attribute.levels <- attributeLevels(attribute.data, n.attributes, n.questions, n.alternatives, n.parameters) checkPriorParameters(input.prior.mean, input.prior.sd, n.alternatives, n.attributes, n.parameters) x.list <- createDesignMatrix(attribute.data, n.attributes, n.questions, n.alternatives, n.parameters, n.attribute.parameters, input.prior.mean, non.missing.table) X <- x.list$X parameter.scales <- x.list$parameter.scales parameter.range <- calcRange(attribute.data) prior.mean <- processInputPrior(input.prior.mean, n.parameters, n.attributes, n.attribute.parameters, parameter.scales) prior.sd <- processInputPrior(input.prior.sd, n.parameters, n.attributes, n.attribute.parameters, parameter.scales) respondent.indices <- constructRespondentIndices(non.missing.table) if (is.data.simulated) { n.respondents <- simulated.sample.size subset <- rep(TRUE, n.respondents) weights <- rep(1, n.respondents) covariates <- NULL sampled.output <- sampleFromX(X, respondent.indices, n.respondents, seed) X <- sampled.output$X respondent.indices <- sampled.output$respondent.indices attribute.names <- unique(names(attribute.data)) output <- generateSimulatedChoices(X, respondent.indices, simulated.priors, seed, n.alternatives, n.attribute.parameters, attribute.names, parameter.scales) Y <- output$choices simulated.respondent.parameters <- output$respondent.parameters } else simulated.respondent.parameters <- NULL split.data <- crossValidationSplit(X, Y, n.questions.left.out, seed, respondent.indices) none.alternatives <- which(apply(split.data$X.in, 2, function(x) nrow(unique(x)) == 1)) list(n.questions = n.questions, n.questions.left.out = n.questions.left.out, n.alternatives = n.alternatives, n.attributes = n.attributes, n.respondents = n.respondents, n.parameters = n.parameters, n.attribute.parameters = n.attribute.parameters, par.names = par.names, all.names = all.names, beta.names = par.names, all.beta.names = all.names, X.in = split.data$X.in, Y.in = split.data$Y.in, X.out = split.data$X.out, Y.out = split.data$Y.out, left.out = split.data$left.out, n.questions.left.in = split.data$n.questions.left.in, subset = subset, weights = weights, covariates = covariates, parameter.scales = parameter.scales, parameter.range = parameter.range, prior.mean = prior.mean, prior.sd = prior.sd, simulated.respondent.parameters = simulated.respondent.parameters, attribute.levels = attribute.levels, none.alternatives = none.alternatives) } extractChoices <- function(experiment.data, non.missing.table) { n.questions <- ncol(non.missing.table) result <- c(t(sapply(experiment.data[, 1:n.questions], function(x) as.numeric(x)))) result <- result[c(t(non.missing.table))] } # Ensure that each factor contains a complete set of levels completeLevels <- function(attribute.data) { nms <- names(attribute.data) unique.names <- unique(nms) for (nm in unique.names) { att <- attribute.data[nms == nm] if (is.factor(att[[1]])) { complete.levels <- unique(unlist(lapply(att, levels))) ind <- which(nms == nm) for (i in ind) { att.q <- attribute.data[[i]] lvls <- levels(att.q) map <- rep(NA, length(lvls)) for (j in 1:length(lvls)) map[j] <- which(lvls[j] == complete.levels) attribute.data[[i]] <- factor(complete.levels[map[att.q]], levels = complete.levels) } } } attribute.data } createDesignMatrix <- function(attribute.data, n.attributes, n.questions, n.alternatives, n.parameters, n.attribute.parameters, input.prior.mean, non.missing.table) { n.rs <- sum(non.missing.table) n.qc <- n.questions * n.alternatives n.respondents <- nrow(attribute.data) meansAndSDs <- getParameterMeanAndSD(attribute.data, n.attributes, n.questions, n.alternatives) parameter.scales <- rep(1, n.parameters) rs <- 1 X <- array(dim = c(n.rs, n.alternatives, n.parameters)) for (r in 1:n.respondents) { for (q in 1:n.questions) { if (non.missing.table[r, q]) { p <- 1 for (i in 1:n.attributes) { is.ordered <- length(input.prior.mean) == n.attributes && input.prior.mean[i] != 0 for (j in 1:n.alternatives) { ind <- n.qc * (i - 1) + n.alternatives * (q - 1) + j v <- attribute.data[[ind]][r] if (is.factor(v)) { n.v <- length(levels(v)) - 1 int.v <- as.numeric(v) X[rs, j, p:(p + n.v - 1)] <- 0 if (int.v > 1) { if (is.ordered) X[rs, j, p:(p + int.v - 2)] <- 1 else X[rs, j, p + int.v - 2] <- 1 } } else { mn <- meansAndSDs$means[i] std <- meansAndSDs$sds[i] # Divide by 2 * SD as recommended by Gelman in # "Scaling regression inputs by dividing by two # standard deviations (2008)" X[rs, j, p] <- 0.5 * (v - mn) / std if (q == 1 && j == 1) parameter.scales[p] <- 2 * std } } if (is.factor(v)) p <- p + n.v else p <- p + 1 } rs <- rs + 1 } } } list(X = X, parameter.scales = parameter.scales) } nAttributeParameters <- function(attribute.data, n.attributes, n.questions, n.alternatives) { result <- rep(NA, n.attributes) for (i in 1:n.attributes) { v <- attribute.data[[n.questions * n.alternatives * (i - 1) + 1]] if (is.factor(v)) result[i] <- length(levels(v)) - 1 else result[i] <- 1 } result } parameterNames <- function(attribute.data, n.attributes, n.questions, n.alternatives, n.parameters) { nms <- names(attribute.data) result <- rep("", n.parameters) ind <- 1 for (i in 1:n.attributes) { col <- n.questions * n.alternatives * (i - 1) + 1 v <- attribute.data[[col]] if (is.factor(v)) { lvls <- levels(v) for (j in 2:length(lvls)) result[ind + j - 2] <- paste0(nms[col], ": ", lvls[j]) ind <- ind + length(lvls) - 1 } else { result[ind] <- nms[col] ind <- ind + 1 } } result } # Includes the names of parameters left out allNames <- function(attribute.data, n.attributes, n.questions, n.alternatives, n.parameters) { nms <- names(attribute.data) result <- rep("", n.parameters) ind <- 1 for (i in 1:n.attributes) { col <- n.questions * n.alternatives * (i - 1) + 1 v <- attribute.data[[col]] if (is.factor(v)) { lvls <- levels(v) for (j in 1:length(lvls)) result[ind + j - 1] <- paste0(nms[col], ": ", lvls[j]) ind <- ind + length(lvls) } else { result[ind] <- nms[col] ind <- ind + 1 } } result } # Creates a list of attribute levels. If an attribute is numeric, an empty # character vector is provided for that attribute. attributeLevels <- function(attribute.data, n.attributes, n.questions, n.alternatives, n.parameters) { nms <- names(attribute.data) result <- list() for (i in 1:n.attributes) { col <- n.questions * n.alternatives * (i - 1) + 1 v <- attribute.data[[col]] result[[nms[col]]] <- if (is.factor(v)) levels(v) else character(0) } result } processInputPrior <- function(prior.par, n.parameters, n.attributes, n.attribute.parameters, parameter.scales = NULL) { if (is.null(parameter.scales)) parameter.scales <- rep(1, n.parameters) result <- rep(NA, n.parameters) if (length(prior.par) == 1) result <- rep(prior.par, n.parameters) else if (length(prior.par) == n.attributes) { result <- rep(NA, n.parameters) for (i in 1:n.attributes) { if (n.attribute.parameters[i] == 1) # numeric { ind <- sum(n.attribute.parameters[1:i]) result[ind] <- prior.par[i] * parameter.scales[ind] } else # categorical { ind.end <- sum(n.attribute.parameters[1:i]) ind.start <- ind.end - n.attribute.parameters[i] + 1 for (j in ind.start:ind.end) result[j] <- prior.par[i] } } } else # length(prior.par) == n.parameters result <- prior.par * parameter.scales result } crossValidationSplit <- function(X, Y, n.questions.left.out, seed, respondent.indices) { n.respondents <- length(respondent.indices) n.questions.left.in <- rep(NA, n.respondents) if (n.questions.left.out > 0) { n.alternatives <- dim(X)[2] n.parameters <- dim(X)[3] n.in <- nrow(X) - n.questions.left.out * n.respondents X.in <- array(dim = c(n.in, n.alternatives, n.parameters)) Y.in <- rep(NA, n.in) X.out <- array(dim = c(n.respondents * n.questions.left.out, n.alternatives, n.parameters)) Y.out <- rep(NA, n.respondents * n.questions.left.out) set.seed(seed) rs <- 0 rs.in <- 0 for (r in 1:n.respondents) { n.questions <- length(respondent.indices[[r]]) n.questions.left.in[r] <- n.questions - n.questions.left.out ind.left.out <- sample(n.questions, n.questions.left.out) ind.left.in <- setdiff(1:n.questions, ind.left.out) ind.left.out.new <- (r - 1) * n.questions.left.out + 1:n.questions.left.out ind.left.in.new <- rs.in + 1:n.questions.left.in[r] X.in[ind.left.in.new, , ] <- X[rs + ind.left.in, , ] Y.in[ind.left.in.new] <- Y[rs + ind.left.in] X.out[ind.left.out.new, , ] <- X[rs + ind.left.out, , ] Y.out[ind.left.out.new] <- Y[rs + ind.left.out] rs <- rs + n.questions rs.in <- rs.in + n.questions.left.in[r] } } else { X.in <- X Y.in <- Y X.out <- NULL Y.out <- NULL n.questions.left.in <- sapply(respondent.indices, length) } list(X.in = X.in, X.out = X.out, Y.in = Y.in, Y.out = Y.out, n.questions.left.in = n.questions.left.in) } checkPriorParameters <- function(input.prior.mean, input.prior.sd, n.alternatives, n.attributes, n.parameters, include.choice.parameters = FALSE) { if (include.choice.parameters) { n.attributes <- n.attributes + 1 n.parameters <- n.parameters + n.alternatives - 1 } if (!is.numeric(input.prior.mean) || (length(input.prior.mean) != n.parameters && length(input.prior.mean) != n.attributes && length(input.prior.mean) != 1)) stop("The supplied parameter hb.prior.mean is inappropriate. ", "Based on the input data this needs to be a numeric ", "vector of length 1, ", n.attributes, " (number of attributes) or ", n.parameters, " (number of parameters).") if (!is.numeric(input.prior.sd) || (length(input.prior.sd) != n.parameters && length(input.prior.sd) != n.attributes && length(input.prior.sd) != 1)) stop("The supplied parameter hb.prior.sd is inappropriate. ", "Based on the input data this needs to be a numeric ", "vector of length 1, ", n.attributes, " (number of attributes) or ", n.parameters, " (number of parameters).") } #' @importFrom flipData CalibrateWeight CleanWeights prepareWeights <- function(weights, subset) { if (!is.null(weights)) { weights <- CleanWeights(weights) weights <- weights[subset] CalibrateWeight(weights) } else rep(1, sum(subset)) } getParameterMeanAndSD <- function(attribute.data, n.attributes, n.questions, n.alternatives) { n.qc <- n.questions * n.alternatives means <- rep(NA, n.attributes) sds <- rep(NA, n.attributes) for (i in 1:n.attributes) { v <- attribute.data[[n.qc * (i - 1) + 1]] if (!is.factor(v)) { ind.start <- n.qc * (i - 1) + 1 ind.end <- n.qc * i values <- as.matrix(attribute.data[ind.start:(n.qc * i)]) means[i] <- mean(values) sds[i] <- sd(values) } } list(means = means, sds = sds) } #' @importFrom flipData MissingDataFail errorIfMissingDataFoundExperiment <- function(experiment.data, subset, weights, covariates, n.questions, is.data.simulated) { if (is.data.simulated) experiment.data <- experiment.data[, -1:-n.questions] if (any(is.na(experiment.data)) || (!is.null(subset) && any(is.na(subset))) || (!is.null(weights) && any(is.na(weights))) || (!is.null(covariates) && any(is.na(covariates)))) MissingDataFail(); } #' Returns which respondents are considered missing based on missing data #' settings. #' @param non.missing.table A logical matrix of respondents x questions #' indicating which ones are not missing. #' @param n.questions.left.out The number of questions to leave out. #' @param missing The missing data setting. #' @param n.questions The number of questions per respondent. nonMissingRespondents <- function(non.missing.table, n.questions.left.out, missing, n.questions) { n.respondents <- nrow(non.missing.table) if (missing == "Error if missing data") rep(TRUE, n.respondents) else if (missing == "Use partial data") rowSums(non.missing.table) > n.questions.left.out else if (missing == "Exclude cases with missing data") apply(non.missing.table, 1, all) } nonMissingTableForExperiment <- function(experiment.data, subset, weights, covariates, n.questions, n.alternatives, n.attributes, missing, is.data.simulated) { result <- matrix(TRUE, nrow = nrow(experiment.data), ncol = n.questions) if (missing == "Use partial data") { if (!is.data.simulated) for (i in 1:n.questions) result[, i] <- result[, i] & !is.na(experiment.data[[i]]) for (i in 1:n.attributes) { for (j in 1:n.questions) { ind <- n.questions + (i - 1) * n.questions * n.alternatives + (j - 1) * n.alternatives + (1:n.alternatives) not.missing <- !is.na(rowSums(sapply(experiment.data[, ind], as.numeric))) result[, j] <- result[, j] & not.missing } } } else { if (is.data.simulated) experiment.data <- experiment.data[, -1:-n.questions] result <- result & !is.na(rowSums(sapply(experiment.data, as.numeric))) } if (!is.null(subset)) result <- result & !is.na(subset) if (!is.null(weights)) result <- result & !is.na(weights) if (!is.null(covariates)) result <- result & !is.na(rowSums(covariates)) result }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/proton_operations.R \name{proton_get_service} \alias{proton_get_service} \title{Get detailed data for a service} \usage{ proton_get_service(name) } \arguments{ \item{name}{[required] The name of the service that you want to get the detailed data for.} } \description{ Get detailed data for a service. See \url{https://www.paws-r-sdk.com/docs/proton_get_service/} for full documentation. } \keyword{internal}
/cran/paws.compute/man/proton_get_service.Rd
permissive
paws-r/paws
R
false
true
487
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/proton_operations.R \name{proton_get_service} \alias{proton_get_service} \title{Get detailed data for a service} \usage{ proton_get_service(name) } \arguments{ \item{name}{[required] The name of the service that you want to get the detailed data for.} } \description{ Get detailed data for a service. See \url{https://www.paws-r-sdk.com/docs/proton_get_service/} for full documentation. } \keyword{internal}
## A black theme require(ggplot2) require(showtext) ## for google fonts font_add_google("Raleway") showtext_auto() theme_black <- function (base_size = 12, base_family = "Raleway", base_line_size = base_size/18, base_rect_size = base_size/18) { half_line <- base_size/2 theme( line = element_line(colour = "white", size = base_line_size, linetype = 1, lineend = "round"), rect = element_rect(fill = "white", colour = "black", size = base_rect_size, linetype = 1), text = element_text(family = base_family, face = "plain", colour = "white", size = base_size, lineheight = 0.9, hjust = 0.5, vjust = 0.5, angle = 0, margin = margin(), debug = FALSE), #### axis #### axis.line = element_blank(), axis.line.x = NULL, axis.line.y = NULL, axis.text = element_text( size = rel(1), colour = "white"), axis.text.x = element_text( margin = margin(t = 0.5 * half_line/4), vjust = 1), axis.text.x.top = element_text( margin = margin(b = 0.5 * half_line/4), vjust = 0), axis.text.y = element_text( margin = margin(r = 0.5 * half_line/4), hjust = 1), axis.text.y.right = element_text(margin = margin(l = 0.5 * half_line/2), hjust = 0), axis.ticks = element_line(colour = "white", size = base_line_size/4), axis.ticks.length = unit(base_size/6, "pt"), axis.title.x = element_text( margin = margin(t = half_line), vjust = 1), axis.title.x.top = element_text( margin = margin(b = half_line), vjust = 0), axis.title.y = element_text( angle = 90, margin = margin(r = half_line), vjust = 1), axis.title.y.right = element_text( angle = -90, margin = margin(l = half_line), vjust = 0), #### legend #### legend.background = element_rect(colour = NA, fill = NA), legend.spacing = unit(half_line, "pt"), legend.spacing.x = NULL, legend.spacing.y = NULL, legend.margin = margin(half_line, 2*base_size, half_line, half_line), legend.key = element_rect(fill = NA, colour = NA), legend.key.size = unit(1, "lines"), legend.key.height = NULL, legend.key.width = NULL, legend.text = element_text(size = rel(0.9)), legend.text.align = NULL, legend.title = element_text(hjust = 0, vjust = 1), legend.title.align = NULL, legend.position = "right", legend.direction = NULL, legend.justification = "center", legend.box = NULL, legend.box.margin = margin(0, 0, 0, 0, "cm"), legend.box.background = element_blank(), legend.box.spacing = unit(half_line, "pt"), #### strip #### strip.background = element_rect( fill = "grey5", colour = NA), strip.text = element_text( colour = "white", size = rel(0.8), margin = margin(0.2 * half_line, 0.2 * half_line, 0.2 * half_line, 0.2 * half_line) ), strip.text.x = NULL, strip.text.y = element_text(angle = -90), strip.placement = "inside", strip.placement.x = NULL, strip.placement.y = NULL, strip.switch.pad.grid = unit(half_line/2, "pt"), strip.switch.pad.wrap = unit(half_line/2, "pt"), #### panel #### panel.background = element_rect(fill = NA, colour = NA), panel.border = element_blank(), panel.grid = element_line(colour = "white", size = rel(0.15), linetype = 1), panel.grid.minor = element_line(colour = "grey50", size = rel(1), linetype = 2), panel.spacing = unit(half_line, "pt"), panel.spacing.x = NULL, panel.spacing.y = NULL, panel.ontop = FALSE, #### plot #### plot.background = element_rect(colour = "black", fill = "black"), plot.title = element_text( size = rel(1.2), hjust = 0.5, vjust = 1, margin = margin(b = half_line)), plot.subtitle = element_text( hjust = 0.5, vjust = 1, margin = margin(b = half_line)), plot.caption = element_text( size = rel(0.6), hjust = 0, vjust = 1, margin = margin(t = half_line)), plot.tag = element_text( size = rel(1.4), hjust = 0.5, vjust = 0.5), plot.tag.position = "topleft", plot.margin = margin(0.5 * half_line, 0.5 * half_line, 0.5 * half_line, 0.5 * half_line), complete = TRUE) }
/theme_black.R
no_license
tkoomar/ggplot2_themes
R
false
false
4,761
r
## A black theme require(ggplot2) require(showtext) ## for google fonts font_add_google("Raleway") showtext_auto() theme_black <- function (base_size = 12, base_family = "Raleway", base_line_size = base_size/18, base_rect_size = base_size/18) { half_line <- base_size/2 theme( line = element_line(colour = "white", size = base_line_size, linetype = 1, lineend = "round"), rect = element_rect(fill = "white", colour = "black", size = base_rect_size, linetype = 1), text = element_text(family = base_family, face = "plain", colour = "white", size = base_size, lineheight = 0.9, hjust = 0.5, vjust = 0.5, angle = 0, margin = margin(), debug = FALSE), #### axis #### axis.line = element_blank(), axis.line.x = NULL, axis.line.y = NULL, axis.text = element_text( size = rel(1), colour = "white"), axis.text.x = element_text( margin = margin(t = 0.5 * half_line/4), vjust = 1), axis.text.x.top = element_text( margin = margin(b = 0.5 * half_line/4), vjust = 0), axis.text.y = element_text( margin = margin(r = 0.5 * half_line/4), hjust = 1), axis.text.y.right = element_text(margin = margin(l = 0.5 * half_line/2), hjust = 0), axis.ticks = element_line(colour = "white", size = base_line_size/4), axis.ticks.length = unit(base_size/6, "pt"), axis.title.x = element_text( margin = margin(t = half_line), vjust = 1), axis.title.x.top = element_text( margin = margin(b = half_line), vjust = 0), axis.title.y = element_text( angle = 90, margin = margin(r = half_line), vjust = 1), axis.title.y.right = element_text( angle = -90, margin = margin(l = half_line), vjust = 0), #### legend #### legend.background = element_rect(colour = NA, fill = NA), legend.spacing = unit(half_line, "pt"), legend.spacing.x = NULL, legend.spacing.y = NULL, legend.margin = margin(half_line, 2*base_size, half_line, half_line), legend.key = element_rect(fill = NA, colour = NA), legend.key.size = unit(1, "lines"), legend.key.height = NULL, legend.key.width = NULL, legend.text = element_text(size = rel(0.9)), legend.text.align = NULL, legend.title = element_text(hjust = 0, vjust = 1), legend.title.align = NULL, legend.position = "right", legend.direction = NULL, legend.justification = "center", legend.box = NULL, legend.box.margin = margin(0, 0, 0, 0, "cm"), legend.box.background = element_blank(), legend.box.spacing = unit(half_line, "pt"), #### strip #### strip.background = element_rect( fill = "grey5", colour = NA), strip.text = element_text( colour = "white", size = rel(0.8), margin = margin(0.2 * half_line, 0.2 * half_line, 0.2 * half_line, 0.2 * half_line) ), strip.text.x = NULL, strip.text.y = element_text(angle = -90), strip.placement = "inside", strip.placement.x = NULL, strip.placement.y = NULL, strip.switch.pad.grid = unit(half_line/2, "pt"), strip.switch.pad.wrap = unit(half_line/2, "pt"), #### panel #### panel.background = element_rect(fill = NA, colour = NA), panel.border = element_blank(), panel.grid = element_line(colour = "white", size = rel(0.15), linetype = 1), panel.grid.minor = element_line(colour = "grey50", size = rel(1), linetype = 2), panel.spacing = unit(half_line, "pt"), panel.spacing.x = NULL, panel.spacing.y = NULL, panel.ontop = FALSE, #### plot #### plot.background = element_rect(colour = "black", fill = "black"), plot.title = element_text( size = rel(1.2), hjust = 0.5, vjust = 1, margin = margin(b = half_line)), plot.subtitle = element_text( hjust = 0.5, vjust = 1, margin = margin(b = half_line)), plot.caption = element_text( size = rel(0.6), hjust = 0, vjust = 1, margin = margin(t = half_line)), plot.tag = element_text( size = rel(1.4), hjust = 0.5, vjust = 0.5), plot.tag.position = "topleft", plot.margin = margin(0.5 * half_line, 0.5 * half_line, 0.5 * half_line, 0.5 * half_line), complete = TRUE) }
############################## ## clinical model ## Jeff Du 05/2019 ## ############################# #################################################### ## ## Section I ## Initial Setup, load package and data ## #################################################### library(caret) clinical.data <- read.table("C:/Users/dug/OneDrive - QIAGEN GmbH/SVM_RFE_Prj/0505_Clinical_Table.txt", header = T, row.names=1, sep="\t") str(clinical.data) ## remove results categories from the clinical table clinical.data$progtype1_systemic_2locoregional <- NULL clinical.data$Did_the_patient_develop_a_New_Tumor_Event <- NULL clinical.data$Alive_or_Dead <- NULL head(clinical.data[ ,1:8]) #################################################### ## ## Sectopm II ## split the dataset into training and validation ## #################################################### set.seed(123) trainRowNum <- createDataPartition(clinical.data$Progressed, p=0.75, list = F) train.data <- clinical.data[trainRowNum, ] test.data <- clinical.data[-trainRowNum, ] ## save X and Y for later combination; y <- train.data$Progressed x <- train.data x$Progressed <- NULL dim(x) dim(train.data) length(y) y_t <- test.data$Progressed ############################################## ## ## naive visulization of features in the training data ## ############################################## library(skimr) skim.train <- skim_to_wide(train.data) skim.train[1:20, c(1:5, 15:16)] ## good, there's no missing data ############################################## ## ## create One-Hot Encoding variables ## for the categories in the training data ## ############################################## ## ## One-Hot Encoding to create dummy variables by converting a categorical variable to as many binary variables dummie.model <- dummyVars(Progressed ~ ., data=train.data) train.dataDum <- predict(dummie.model, newdata = train.data) train.data <- data.frame(train.dataDum) ## cound not use train.data <- as.data.frame(train.dataDum) test.dataDum <- predict(dummie.model, newdata = test.data) ################################ ## remove constant columns #datamatrix <- train.data #dim(datamatrix) #datamatrix <- datamatrix[ , apply(datamatrix, 2, var, na.rm=T)!=0] #dim(datamatrix) #constantCol <- colnames(as.matrix( which(apply(datamatrix, MARGIN=2, function(x) var(x) < 0.001)))) #count <- datamatrix[ !colnames(datamatrix) %in% constantCol, ] #dim(count) #head(count) #train.data <- datamatrix ## check the new data structure of the train.data; str(train.data) dim(train.data) str(test.dataDum) dim(test.dataDum) test.dataDum <- as.data.frame(test.dataDum) preProcess.model <- preProcess( train.data, method = 'range' ) train.data <- predict(preProcess.model, newdata = train.data) # preProcess.modelT <- preProcess( test.dataDum, method = 'range') # test.dataPro <- predict(preProcess.modelT, newdata = test.dataDum) ## ad the progressed variable to the train.data ## train.data$Progress <- y Y <- ifelse(y==1, "yes", "no") Y Y_T <- ifelse(y_t==1, "yes", "no") train.data$class <- Y # test.dataPro$class <- Y_T dim(train.data) ## ZScore ? apply( train.data[, 1:60], 2, FUN=function(x){c('min' = min(x), 'max' = max(x) )}) head(train.data[, 50:60]) train.data$class <- as.factor(train.data$class) featurePlot( x = train.data[ , 1:20], y = train.data$class, plot = 'box', strip = strip.custom(par.strip.text = list( cex = 0.7)), scales = list(x = list(relation="free"), y = list(relation="free") ) ) featurePlot(x = train.data[, 1:20], y = train.data$class, plot = "density", strip=strip.custom(par.strip.text=list(cex=.7)), scales = list(x = list(relation="free"), y = list(relation="free")) ) ####################################################################### set.seed(100) options(warn=-1) subsets <- c(1:43) str(train.data) train.data$class <- as.factor(train.data$class) ctrl <- rfeControl(functions = rfFuncs, method = "repeatedcv", repeats = 5, verbose = FALSE) lmProfile <- rfe(x=train.data[, 1:53], y=train.data$class, sizes = subsets, rfeControl = ctrl) lmProfile varimp_lm <- varImp(lmProfile) plot(varimp_lm, main="variable importance with lmProfile") # colnames(train.data) set.seed(123) fitControl <- trainControl( method = 'repeatedcv', # k-fold cross validation number = 15, # number of folds repeats = 10, # number of repeats savePredictions = T, # saves predictions for optimal tuning parameter classProbs = T, # should class probabilities be returned summaryFunction=twoClassSummary #, # results summary function # savePredictions = 'final' ) ############################# #### Step 4.1 choose svm_linear method model_svmLinear = train(class ~ ., data=train.data, method='svmLinear', tuneLength = 4, metric='ROC', trControl = fitControl ) ############################# ## briefly check the svmLinear results model_svmLinear # model_svmLinear$pred$yes # model_svmLinear$pred$no varimp_svmLinear <- varImp(model_svmLinear) plot(varimp_svmLinear, main="Variable Importance with svmLinear") ######################################################################################## ## step 4.2 ## plot roc for the training data ## We can calculate the area under the curve... ## Select a parameter setting ## selectedIndices <- model_mars2$pred library(pROC) rocobj_svmlinear <- roc(model_svmLinear$pred$obs, model_svmLinear$pred$yes, ci=TRUE, plot=TRUE, legacy.axes=TRUE, percent=TRUE, main="svmLinear with Cinical Data", xlab="False Positive Percentage", ylab="True Postive Percentage", col="darkblue", lwd=4, print.auc=TRUE) library(corrplot) corrplot( cor(train.data[, 1:53]), method = "square", tl.cex = 0.5) x <- train.data[, 1:53] x$class <- y x$progtype1_systemic_2locoregional <- NULL x$TTP <- NULL x$Did_the_patient_develop_a_New_Tumor_Event.Yes <- NULL corrplot( cor(x), method = "square", tl.cex = 0.35) colnames(x) dim(train.data) dim(test.data) test.data2 <- predict(dummie.model, test.data) test.data2 <- data.frame(test.data2) str(test.data2) str(train.dataDum) # train.dataDum <- data.frame(train.dataDum) # predict(preProcess.model, train.dataDum) # test.data2$`Race.Black African American` <- NULL # test.dataDum$`Race.Black African American` <- NULL test.data3 <- predict(preProcess.model, test.data2) test.data3$Race.Unknown.Refused <- rep( 0, nrow(test.data3)) colnames(test.data3) colnames(train.data) # str(train.data) # str(test.data3) test.data3$class <- Y_T summary(test.data3$class) test.data3$class <- as.factor(test.data3$class) summary(test.data3$class) predicted2 <- predict(model_svmLinear, test.data3) head(predicted2) confusionMatrix(reference = test.data3$class, data = predicted2, mode='everything', positive='yes') ########################################### dim(test.data3) ## ZScore ? apply( train.data[, 1:60], 2, FUN=function(x){c('min' = min(x), 'max' = max(x) )}) head(test.data3[, 50:60]) ############################# #### Step 4.1 choose svm_linear method set.seed(100) model_svmRadial = train(class ~ ., data=train.data, method='svmRadial', tuneLength = 8, metric='ROC', trControl = fitControl ) ############################# ## briefly check the svmLinear results model_svmRadial # model_svmLinear$pred$yes # model_svmLinear$pred$no varimp_svmRadial <- varImp(model_svmRadial) plot(varimp_svmRadial, main="Variable Importance with svmLinear") rocobj_svmRadial <- roc(model_svmRadial$pred$obs, model_svmRadial$pred$yes, ci=TRUE, plot=TRUE, legacy.axes=TRUE, percent=TRUE, main="svmRadial with Cinical Data", xlab="False Positive Percentage", ylab="True Postive Percentage", col="darkblue", lwd=4, print.auc=TRUE) predicted_svmRadial <- predict(model_svmRadial, test.data3) head(predicted_svmRadial) confusionMatrix(reference = test.data3$class, data = predicted_svmRadial, mode='everything', positive='yes')
/0506_clinical_model_svm-RFE.R
no_license
breezedu/svm-rfe
R
false
false
8,874
r
############################## ## clinical model ## Jeff Du 05/2019 ## ############################# #################################################### ## ## Section I ## Initial Setup, load package and data ## #################################################### library(caret) clinical.data <- read.table("C:/Users/dug/OneDrive - QIAGEN GmbH/SVM_RFE_Prj/0505_Clinical_Table.txt", header = T, row.names=1, sep="\t") str(clinical.data) ## remove results categories from the clinical table clinical.data$progtype1_systemic_2locoregional <- NULL clinical.data$Did_the_patient_develop_a_New_Tumor_Event <- NULL clinical.data$Alive_or_Dead <- NULL head(clinical.data[ ,1:8]) #################################################### ## ## Sectopm II ## split the dataset into training and validation ## #################################################### set.seed(123) trainRowNum <- createDataPartition(clinical.data$Progressed, p=0.75, list = F) train.data <- clinical.data[trainRowNum, ] test.data <- clinical.data[-trainRowNum, ] ## save X and Y for later combination; y <- train.data$Progressed x <- train.data x$Progressed <- NULL dim(x) dim(train.data) length(y) y_t <- test.data$Progressed ############################################## ## ## naive visulization of features in the training data ## ############################################## library(skimr) skim.train <- skim_to_wide(train.data) skim.train[1:20, c(1:5, 15:16)] ## good, there's no missing data ############################################## ## ## create One-Hot Encoding variables ## for the categories in the training data ## ############################################## ## ## One-Hot Encoding to create dummy variables by converting a categorical variable to as many binary variables dummie.model <- dummyVars(Progressed ~ ., data=train.data) train.dataDum <- predict(dummie.model, newdata = train.data) train.data <- data.frame(train.dataDum) ## cound not use train.data <- as.data.frame(train.dataDum) test.dataDum <- predict(dummie.model, newdata = test.data) ################################ ## remove constant columns #datamatrix <- train.data #dim(datamatrix) #datamatrix <- datamatrix[ , apply(datamatrix, 2, var, na.rm=T)!=0] #dim(datamatrix) #constantCol <- colnames(as.matrix( which(apply(datamatrix, MARGIN=2, function(x) var(x) < 0.001)))) #count <- datamatrix[ !colnames(datamatrix) %in% constantCol, ] #dim(count) #head(count) #train.data <- datamatrix ## check the new data structure of the train.data; str(train.data) dim(train.data) str(test.dataDum) dim(test.dataDum) test.dataDum <- as.data.frame(test.dataDum) preProcess.model <- preProcess( train.data, method = 'range' ) train.data <- predict(preProcess.model, newdata = train.data) # preProcess.modelT <- preProcess( test.dataDum, method = 'range') # test.dataPro <- predict(preProcess.modelT, newdata = test.dataDum) ## ad the progressed variable to the train.data ## train.data$Progress <- y Y <- ifelse(y==1, "yes", "no") Y Y_T <- ifelse(y_t==1, "yes", "no") train.data$class <- Y # test.dataPro$class <- Y_T dim(train.data) ## ZScore ? apply( train.data[, 1:60], 2, FUN=function(x){c('min' = min(x), 'max' = max(x) )}) head(train.data[, 50:60]) train.data$class <- as.factor(train.data$class) featurePlot( x = train.data[ , 1:20], y = train.data$class, plot = 'box', strip = strip.custom(par.strip.text = list( cex = 0.7)), scales = list(x = list(relation="free"), y = list(relation="free") ) ) featurePlot(x = train.data[, 1:20], y = train.data$class, plot = "density", strip=strip.custom(par.strip.text=list(cex=.7)), scales = list(x = list(relation="free"), y = list(relation="free")) ) ####################################################################### set.seed(100) options(warn=-1) subsets <- c(1:43) str(train.data) train.data$class <- as.factor(train.data$class) ctrl <- rfeControl(functions = rfFuncs, method = "repeatedcv", repeats = 5, verbose = FALSE) lmProfile <- rfe(x=train.data[, 1:53], y=train.data$class, sizes = subsets, rfeControl = ctrl) lmProfile varimp_lm <- varImp(lmProfile) plot(varimp_lm, main="variable importance with lmProfile") # colnames(train.data) set.seed(123) fitControl <- trainControl( method = 'repeatedcv', # k-fold cross validation number = 15, # number of folds repeats = 10, # number of repeats savePredictions = T, # saves predictions for optimal tuning parameter classProbs = T, # should class probabilities be returned summaryFunction=twoClassSummary #, # results summary function # savePredictions = 'final' ) ############################# #### Step 4.1 choose svm_linear method model_svmLinear = train(class ~ ., data=train.data, method='svmLinear', tuneLength = 4, metric='ROC', trControl = fitControl ) ############################# ## briefly check the svmLinear results model_svmLinear # model_svmLinear$pred$yes # model_svmLinear$pred$no varimp_svmLinear <- varImp(model_svmLinear) plot(varimp_svmLinear, main="Variable Importance with svmLinear") ######################################################################################## ## step 4.2 ## plot roc for the training data ## We can calculate the area under the curve... ## Select a parameter setting ## selectedIndices <- model_mars2$pred library(pROC) rocobj_svmlinear <- roc(model_svmLinear$pred$obs, model_svmLinear$pred$yes, ci=TRUE, plot=TRUE, legacy.axes=TRUE, percent=TRUE, main="svmLinear with Cinical Data", xlab="False Positive Percentage", ylab="True Postive Percentage", col="darkblue", lwd=4, print.auc=TRUE) library(corrplot) corrplot( cor(train.data[, 1:53]), method = "square", tl.cex = 0.5) x <- train.data[, 1:53] x$class <- y x$progtype1_systemic_2locoregional <- NULL x$TTP <- NULL x$Did_the_patient_develop_a_New_Tumor_Event.Yes <- NULL corrplot( cor(x), method = "square", tl.cex = 0.35) colnames(x) dim(train.data) dim(test.data) test.data2 <- predict(dummie.model, test.data) test.data2 <- data.frame(test.data2) str(test.data2) str(train.dataDum) # train.dataDum <- data.frame(train.dataDum) # predict(preProcess.model, train.dataDum) # test.data2$`Race.Black African American` <- NULL # test.dataDum$`Race.Black African American` <- NULL test.data3 <- predict(preProcess.model, test.data2) test.data3$Race.Unknown.Refused <- rep( 0, nrow(test.data3)) colnames(test.data3) colnames(train.data) # str(train.data) # str(test.data3) test.data3$class <- Y_T summary(test.data3$class) test.data3$class <- as.factor(test.data3$class) summary(test.data3$class) predicted2 <- predict(model_svmLinear, test.data3) head(predicted2) confusionMatrix(reference = test.data3$class, data = predicted2, mode='everything', positive='yes') ########################################### dim(test.data3) ## ZScore ? apply( train.data[, 1:60], 2, FUN=function(x){c('min' = min(x), 'max' = max(x) )}) head(test.data3[, 50:60]) ############################# #### Step 4.1 choose svm_linear method set.seed(100) model_svmRadial = train(class ~ ., data=train.data, method='svmRadial', tuneLength = 8, metric='ROC', trControl = fitControl ) ############################# ## briefly check the svmLinear results model_svmRadial # model_svmLinear$pred$yes # model_svmLinear$pred$no varimp_svmRadial <- varImp(model_svmRadial) plot(varimp_svmRadial, main="Variable Importance with svmLinear") rocobj_svmRadial <- roc(model_svmRadial$pred$obs, model_svmRadial$pred$yes, ci=TRUE, plot=TRUE, legacy.axes=TRUE, percent=TRUE, main="svmRadial with Cinical Data", xlab="False Positive Percentage", ylab="True Postive Percentage", col="darkblue", lwd=4, print.auc=TRUE) predicted_svmRadial <- predict(model_svmRadial, test.data3) head(predicted_svmRadial) confusionMatrix(reference = test.data3$class, data = predicted_svmRadial, mode='everything', positive='yes')
# These are an abbreviated steps from Dr. Mike Hayden (http://technology.indstate.edu/directory/haydenm.htm)' 16 steps approach: # 1) State the overall problem and research question(s). # 2) State technique/methodology that will be used to dra a conclusion for this research question. # 3) Describe your population, sample, and sampling procedure. # 4) Data collection. # 5) Explain variables used. # 6) State assumptions and limitations. # 7) State the null hypothesis and allternative hypothesis. # 8) Set and explain level of significance (alpha). # 9) Include descriptive statistics + chats, graphs, or plots that describe your data set. # 10) Calculate the test statistic (T.S.). # 11) Draw a picture, then find the p-value. # 12) Draw conclusion, provide interpretation, and confirm with confidence interval: is p-value z a(alpha)) If so, reject the null.
/03.3 Dr Rs 12 Steps.R
permissive
freddyiniguez/BigDataZacatecas
R
false
false
863
r
# These are an abbreviated steps from Dr. Mike Hayden (http://technology.indstate.edu/directory/haydenm.htm)' 16 steps approach: # 1) State the overall problem and research question(s). # 2) State technique/methodology that will be used to dra a conclusion for this research question. # 3) Describe your population, sample, and sampling procedure. # 4) Data collection. # 5) Explain variables used. # 6) State assumptions and limitations. # 7) State the null hypothesis and allternative hypothesis. # 8) Set and explain level of significance (alpha). # 9) Include descriptive statistics + chats, graphs, or plots that describe your data set. # 10) Calculate the test statistic (T.S.). # 11) Draw a picture, then find the p-value. # 12) Draw conclusion, provide interpretation, and confirm with confidence interval: is p-value z a(alpha)) If so, reject the null.
\name{rp.control} \alias{rp.control} \alias{rp.control.dispose} \title{Create or dispose of an rpanel} \description{ The function \code{rp.control} creates a panel window into which rpanel widgets can be placed. It can also set up variables within the rpanel object. The function \code{rp.control.dispose} disposes of an rpanel.} \usage{ rp.control(title = "", size = c(100, 100), panelname, background, ...) rp.control.dispose(panel) } \arguments{ \item{title}{the title of the panel displayed in the banner.} \item{size}{a two-element numeric vector specifying width and height of the panel in pixels. If this argument is omitted the size of the panel will adapt to the subsequent addition of widgets.} \item{panelname}{the name of the panel. It is usually not necessary to set this as it will be given a name automatically.} \item{background}{the background colour of the control e.g. "white". (New parameter with version 2.0.)} \item{\dots}{additional arguments which are treated as variable initialisations and are stored within the returned rpanel object. For example inserting \code{x=3} creates a variable \code{x} in the rpanel object with the value 3. Note that the names of these additional arguments should not conflict with those of the main arguments of \code{rp.control}.} \item{panel}{the panel to be disposed of. This represents the object and its parameters} } \note{ Previous arguments \code{realname} and \code{aschar} have been discontinued in version 1.1. } \details{ Objects passed into \code{rp.control} are then available to be used by action functions. } \value{ The list object which defines the panel. } \references{ rpanel: Simple interactive controls for R functions using the tcltk package. Journal of Statistical Software, 17, issue 9. } \seealso{ \code{\link{rp.button}}, \code{\link{rp.checkbox}}, \code{\link{rp.combo}}, \code{\link{rp.doublebutton}}, \code{\link{rp.grid}}, \code{\link{rp.image}}, \code{\link{rp.listbox}}, \code{\link{rp.menu}}, \code{\link{rp.radiogroup}}, \code{\link{rp.slider}}, \code{\link{rp.text}}, \code{\link{rp.textentry}}, \code{\link{rp.tkrplot}}, \code{\link{rp.widget.dispose}} } \examples{ \dontrun{ hist.or.boxp <- function(panel) { if (panel$plot.type == "histogram") hist(panel$x) else boxplot(panel$x) panel } panel <- rp.control(x=rnorm(50), panelname="panel") rp.radiogroup(panel, plot.type, c("histogram", "boxplot"), title="Plot type", action = hist.or.boxp) # Try also # panel <- rp.control() # rp.control.dispose(panel) }} \keyword{iplot} \keyword{dynamic}
/man/rp.control.Rd
no_license
cran/rpanel
R
false
false
2,722
rd
\name{rp.control} \alias{rp.control} \alias{rp.control.dispose} \title{Create or dispose of an rpanel} \description{ The function \code{rp.control} creates a panel window into which rpanel widgets can be placed. It can also set up variables within the rpanel object. The function \code{rp.control.dispose} disposes of an rpanel.} \usage{ rp.control(title = "", size = c(100, 100), panelname, background, ...) rp.control.dispose(panel) } \arguments{ \item{title}{the title of the panel displayed in the banner.} \item{size}{a two-element numeric vector specifying width and height of the panel in pixels. If this argument is omitted the size of the panel will adapt to the subsequent addition of widgets.} \item{panelname}{the name of the panel. It is usually not necessary to set this as it will be given a name automatically.} \item{background}{the background colour of the control e.g. "white". (New parameter with version 2.0.)} \item{\dots}{additional arguments which are treated as variable initialisations and are stored within the returned rpanel object. For example inserting \code{x=3} creates a variable \code{x} in the rpanel object with the value 3. Note that the names of these additional arguments should not conflict with those of the main arguments of \code{rp.control}.} \item{panel}{the panel to be disposed of. This represents the object and its parameters} } \note{ Previous arguments \code{realname} and \code{aschar} have been discontinued in version 1.1. } \details{ Objects passed into \code{rp.control} are then available to be used by action functions. } \value{ The list object which defines the panel. } \references{ rpanel: Simple interactive controls for R functions using the tcltk package. Journal of Statistical Software, 17, issue 9. } \seealso{ \code{\link{rp.button}}, \code{\link{rp.checkbox}}, \code{\link{rp.combo}}, \code{\link{rp.doublebutton}}, \code{\link{rp.grid}}, \code{\link{rp.image}}, \code{\link{rp.listbox}}, \code{\link{rp.menu}}, \code{\link{rp.radiogroup}}, \code{\link{rp.slider}}, \code{\link{rp.text}}, \code{\link{rp.textentry}}, \code{\link{rp.tkrplot}}, \code{\link{rp.widget.dispose}} } \examples{ \dontrun{ hist.or.boxp <- function(panel) { if (panel$plot.type == "histogram") hist(panel$x) else boxplot(panel$x) panel } panel <- rp.control(x=rnorm(50), panelname="panel") rp.radiogroup(panel, plot.type, c("histogram", "boxplot"), title="Plot type", action = hist.or.boxp) # Try also # panel <- rp.control() # rp.control.dispose(panel) }} \keyword{iplot} \keyword{dynamic}
# This file is generated by make.paws. Please do not edit here. #' @importFrom paws.common new_handlers new_service set_config NULL #' AWS Amplify #' #' @description #' Amplify is a fully managed continuous deployment and hosting service for #' modern web apps. #' #' @param #' config #' Optional configuration of credentials, endpoint, and/or region. #' #' @section Service syntax: #' ``` #' svc <- amplify( #' config = list( #' credentials = list( #' creds = list( #' access_key_id = "string", #' secret_access_key = "string", #' session_token = "string" #' ), #' profile = "string" #' ), #' endpoint = "string", #' region = "string" #' ) #' ) #' ``` #' #' @examples #' \dontrun{ #' svc <- amplify() #' svc$create_app( #' Foo = 123 #' ) #' } #' #' @section Operations: #' \tabular{ll}{ #' \link[=amplify_create_app]{create_app} \tab Creates a new Amplify App \cr #' \link[=amplify_create_backend_environment]{create_backend_environment} \tab Creates a new backend environment for an Amplify App \cr #' \link[=amplify_create_branch]{create_branch} \tab Creates a new Branch for an Amplify App \cr #' \link[=amplify_create_deployment]{create_deployment} \tab Create a deployment for manual deploy apps \cr #' \link[=amplify_create_domain_association]{create_domain_association} \tab Create a new DomainAssociation on an App \cr #' \link[=amplify_create_webhook]{create_webhook} \tab Create a new webhook on an App \cr #' \link[=amplify_delete_app]{delete_app} \tab Delete an existing Amplify App by appId \cr #' \link[=amplify_delete_backend_environment]{delete_backend_environment} \tab Delete backend environment for an Amplify App \cr #' \link[=amplify_delete_branch]{delete_branch} \tab Deletes a branch for an Amplify App \cr #' \link[=amplify_delete_domain_association]{delete_domain_association} \tab Deletes a DomainAssociation \cr #' \link[=amplify_delete_job]{delete_job} \tab Delete a job, for an Amplify branch, part of Amplify App \cr #' \link[=amplify_delete_webhook]{delete_webhook} \tab Deletes a webhook \cr #' \link[=amplify_generate_access_logs]{generate_access_logs} \tab Retrieve website access logs for a specific time range via a pre-signed URL\cr #' \link[=amplify_get_app]{get_app} \tab Retrieves an existing Amplify App by appId \cr #' \link[=amplify_get_artifact_url]{get_artifact_url} \tab Retrieves artifact info that corresponds to a artifactId \cr #' \link[=amplify_get_backend_environment]{get_backend_environment} \tab Retrieves a backend environment for an Amplify App \cr #' \link[=amplify_get_branch]{get_branch} \tab Retrieves a branch for an Amplify App \cr #' \link[=amplify_get_domain_association]{get_domain_association} \tab Retrieves domain info that corresponds to an appId and domainName \cr #' \link[=amplify_get_job]{get_job} \tab Get a job for a branch, part of an Amplify App \cr #' \link[=amplify_get_webhook]{get_webhook} \tab Retrieves webhook info that corresponds to a webhookId \cr #' \link[=amplify_list_apps]{list_apps} \tab Lists existing Amplify Apps \cr #' \link[=amplify_list_artifacts]{list_artifacts} \tab List artifacts with an app, a branch, a job and an artifact type \cr #' \link[=amplify_list_backend_environments]{list_backend_environments} \tab Lists backend environments for an Amplify App \cr #' \link[=amplify_list_branches]{list_branches} \tab Lists branches for an Amplify App \cr #' \link[=amplify_list_domain_associations]{list_domain_associations} \tab List domains with an app \cr #' \link[=amplify_list_jobs]{list_jobs} \tab List Jobs for a branch, part of an Amplify App \cr #' \link[=amplify_list_tags_for_resource]{list_tags_for_resource} \tab List tags for resource \cr #' \link[=amplify_list_webhooks]{list_webhooks} \tab List webhooks with an app \cr #' \link[=amplify_start_deployment]{start_deployment} \tab Start a deployment for manual deploy apps \cr #' \link[=amplify_start_job]{start_job} \tab Starts a new job for a branch, part of an Amplify App \cr #' \link[=amplify_stop_job]{stop_job} \tab Stop a job that is in progress, for an Amplify branch, part of Amplify App \cr #' \link[=amplify_tag_resource]{tag_resource} \tab Tag resource with tag key and value \cr #' \link[=amplify_untag_resource]{untag_resource} \tab Untag resource with resourceArn \cr #' \link[=amplify_update_app]{update_app} \tab Updates an existing Amplify App \cr #' \link[=amplify_update_branch]{update_branch} \tab Updates a branch for an Amplify App \cr #' \link[=amplify_update_domain_association]{update_domain_association} \tab Create a new DomainAssociation on an App \cr #' \link[=amplify_update_webhook]{update_webhook} \tab Update a webhook #' } #' #' @rdname amplify #' @export amplify <- function(config = list()) { svc <- .amplify$operations svc <- set_config(svc, config) return(svc) } # Private API objects: metadata, handlers, interfaces, etc. .amplify <- list() .amplify$operations <- list() .amplify$metadata <- list( service_name = "amplify", endpoints = list("*" = list(endpoint = "amplify.{region}.amazonaws.com", global = FALSE), "cn-*" = list(endpoint = "amplify.{region}.amazonaws.com.cn", global = FALSE)), service_id = "Amplify", api_version = "2017-07-25", signing_name = "amplify", json_version = "1.1", target_prefix = "" ) .amplify$service <- function(config = list()) { handlers <- new_handlers("restjson", "v4") new_service(.amplify$metadata, handlers, config) }
/paws/R/amplify_service.R
permissive
johnnytommy/paws
R
false
false
5,494
r
# This file is generated by make.paws. Please do not edit here. #' @importFrom paws.common new_handlers new_service set_config NULL #' AWS Amplify #' #' @description #' Amplify is a fully managed continuous deployment and hosting service for #' modern web apps. #' #' @param #' config #' Optional configuration of credentials, endpoint, and/or region. #' #' @section Service syntax: #' ``` #' svc <- amplify( #' config = list( #' credentials = list( #' creds = list( #' access_key_id = "string", #' secret_access_key = "string", #' session_token = "string" #' ), #' profile = "string" #' ), #' endpoint = "string", #' region = "string" #' ) #' ) #' ``` #' #' @examples #' \dontrun{ #' svc <- amplify() #' svc$create_app( #' Foo = 123 #' ) #' } #' #' @section Operations: #' \tabular{ll}{ #' \link[=amplify_create_app]{create_app} \tab Creates a new Amplify App \cr #' \link[=amplify_create_backend_environment]{create_backend_environment} \tab Creates a new backend environment for an Amplify App \cr #' \link[=amplify_create_branch]{create_branch} \tab Creates a new Branch for an Amplify App \cr #' \link[=amplify_create_deployment]{create_deployment} \tab Create a deployment for manual deploy apps \cr #' \link[=amplify_create_domain_association]{create_domain_association} \tab Create a new DomainAssociation on an App \cr #' \link[=amplify_create_webhook]{create_webhook} \tab Create a new webhook on an App \cr #' \link[=amplify_delete_app]{delete_app} \tab Delete an existing Amplify App by appId \cr #' \link[=amplify_delete_backend_environment]{delete_backend_environment} \tab Delete backend environment for an Amplify App \cr #' \link[=amplify_delete_branch]{delete_branch} \tab Deletes a branch for an Amplify App \cr #' \link[=amplify_delete_domain_association]{delete_domain_association} \tab Deletes a DomainAssociation \cr #' \link[=amplify_delete_job]{delete_job} \tab Delete a job, for an Amplify branch, part of Amplify App \cr #' \link[=amplify_delete_webhook]{delete_webhook} \tab Deletes a webhook \cr #' \link[=amplify_generate_access_logs]{generate_access_logs} \tab Retrieve website access logs for a specific time range via a pre-signed URL\cr #' \link[=amplify_get_app]{get_app} \tab Retrieves an existing Amplify App by appId \cr #' \link[=amplify_get_artifact_url]{get_artifact_url} \tab Retrieves artifact info that corresponds to a artifactId \cr #' \link[=amplify_get_backend_environment]{get_backend_environment} \tab Retrieves a backend environment for an Amplify App \cr #' \link[=amplify_get_branch]{get_branch} \tab Retrieves a branch for an Amplify App \cr #' \link[=amplify_get_domain_association]{get_domain_association} \tab Retrieves domain info that corresponds to an appId and domainName \cr #' \link[=amplify_get_job]{get_job} \tab Get a job for a branch, part of an Amplify App \cr #' \link[=amplify_get_webhook]{get_webhook} \tab Retrieves webhook info that corresponds to a webhookId \cr #' \link[=amplify_list_apps]{list_apps} \tab Lists existing Amplify Apps \cr #' \link[=amplify_list_artifacts]{list_artifacts} \tab List artifacts with an app, a branch, a job and an artifact type \cr #' \link[=amplify_list_backend_environments]{list_backend_environments} \tab Lists backend environments for an Amplify App \cr #' \link[=amplify_list_branches]{list_branches} \tab Lists branches for an Amplify App \cr #' \link[=amplify_list_domain_associations]{list_domain_associations} \tab List domains with an app \cr #' \link[=amplify_list_jobs]{list_jobs} \tab List Jobs for a branch, part of an Amplify App \cr #' \link[=amplify_list_tags_for_resource]{list_tags_for_resource} \tab List tags for resource \cr #' \link[=amplify_list_webhooks]{list_webhooks} \tab List webhooks with an app \cr #' \link[=amplify_start_deployment]{start_deployment} \tab Start a deployment for manual deploy apps \cr #' \link[=amplify_start_job]{start_job} \tab Starts a new job for a branch, part of an Amplify App \cr #' \link[=amplify_stop_job]{stop_job} \tab Stop a job that is in progress, for an Amplify branch, part of Amplify App \cr #' \link[=amplify_tag_resource]{tag_resource} \tab Tag resource with tag key and value \cr #' \link[=amplify_untag_resource]{untag_resource} \tab Untag resource with resourceArn \cr #' \link[=amplify_update_app]{update_app} \tab Updates an existing Amplify App \cr #' \link[=amplify_update_branch]{update_branch} \tab Updates a branch for an Amplify App \cr #' \link[=amplify_update_domain_association]{update_domain_association} \tab Create a new DomainAssociation on an App \cr #' \link[=amplify_update_webhook]{update_webhook} \tab Update a webhook #' } #' #' @rdname amplify #' @export amplify <- function(config = list()) { svc <- .amplify$operations svc <- set_config(svc, config) return(svc) } # Private API objects: metadata, handlers, interfaces, etc. .amplify <- list() .amplify$operations <- list() .amplify$metadata <- list( service_name = "amplify", endpoints = list("*" = list(endpoint = "amplify.{region}.amazonaws.com", global = FALSE), "cn-*" = list(endpoint = "amplify.{region}.amazonaws.com.cn", global = FALSE)), service_id = "Amplify", api_version = "2017-07-25", signing_name = "amplify", json_version = "1.1", target_prefix = "" ) .amplify$service <- function(config = list()) { handlers <- new_handlers("restjson", "v4") new_service(.amplify$metadata, handlers, config) }
`regimes` <- function (topology, times, regime.specs, term) { N <- length(term); reg <- set.of.regimes(topology,regime.specs); R <- length(reg); beta <- vector(R*N, mode="list"); for (i in 1:N) { for (k in 1:R) { p <- pedigree(topology, term[i]); n <- length(p); beta[[i + N*(k-1)]] <- as.integer(regime.specs[p[1:(n-1)]] == reg[k]); } } return(beta); }
/S34_S38_phylogenetic_comparative_methods/scripts/resources/slouch/R/regimes.R
no_license
hj1994412/teleost_genomes_immune
R
false
false
392
r
`regimes` <- function (topology, times, regime.specs, term) { N <- length(term); reg <- set.of.regimes(topology,regime.specs); R <- length(reg); beta <- vector(R*N, mode="list"); for (i in 1:N) { for (k in 1:R) { p <- pedigree(topology, term[i]); n <- length(p); beta[[i + N*(k-1)]] <- as.integer(regime.specs[p[1:(n-1)]] == reg[k]); } } return(beta); }
#Data Structure-Vectors x=c(1,5,7,8,4) x2<-c(2,5,7,8,4) x x2 x4 =c('A','B','C','D','E') x4 x5=1:100 x5 (x6=seq(1,100,by=3)) ?seq seq(0, 1, length.out = 11) marks=rnorm(60,mean = 60, sd=10) marks plot(density(marks))
/VECTOR1.R
no_license
shashwatapal1297/analyticsnew
R
false
false
222
r
#Data Structure-Vectors x=c(1,5,7,8,4) x2<-c(2,5,7,8,4) x x2 x4 =c('A','B','C','D','E') x4 x5=1:100 x5 (x6=seq(1,100,by=3)) ?seq seq(0, 1, length.out = 11) marks=rnorm(60,mean = 60, sd=10) marks plot(density(marks))
putAttributes = # putAttributes("test_dtl3", it = c(foo = "abc", bar = 123)) function(domain, ..., name = names(.objs)[1], auth = getOption("AmazonAWS"), .objs = list(...), .opts = getDefaultS3CurlOptions()) { if(length(.objs) > 1) { return(mapply(function(id, dom, obj) { putAttributes(dom, obj, name = id, auth = auth, .opts = .opts) }, names(.objs), rep(domain, length = length(.objs)), .objs)) } args = c(Action = "PutAttributes", DomainName = domain, ItemName = name) obj = .objs[[1]] tmp = mapply(function(i, val, id) { structure(c(id, val), names = paste(sprintf("Attribute.%d", i), c("Name", "Value"), sep = ".")) }, seq(along = obj) - 1L, obj, names(obj), SIMPLIFY = FALSE) args = c(args, structure(unlist(tmp), names = sapply(tmp, names))) txt = makeRequest(args, auth, 'http://sdb.amazonaws.com', .opts) ans = getResponseMetaData(txt) name } getAttributes = # # getAttributes("test_dtl3", "it") # function(domain, name, auth = getOption("AmazonAWS"), .opts = getDefaultS3CurlOptions()) { args = c(Action = "GetAttributes", DomainName = domain, ItemName = name) txt = makeRequest(args, auth, uri = 'http://sdb.amazonaws.com', .opts) readAttributeFromXML(txt) } deleteAttributes = # # deleteAttributes("test_dtl3", 'it') # function(domain, name, ..., .fields = list(...), auth = getOption("AmazonAWS"), .opts = getDefaultS3CurlOptions()) { args = c(Action = "DeleteAttributes", DomainName = domain, ItemName = name) if(length(.fields) == 0) { val = getAttributes(domain, name, auth = auth, .opts = .opts) .fields = names(val) } ids = sprintf("Attribute.%d.Name", seq(along = .fields) - 1L) args[ ids ] = unlist(.fields) txt = makeRequest(args, auth, uri = 'http://sdb.amazonaws.com', .opts) getResponseMetaData(txt) } readAttributeFromXML = function(txt) { if(is.character(txt)) txt = xmlParse(txt) node = xmlRoot(txt)[["GetAttributesResult"]] if(xmlSize(node) == 0) return(NULL) structure(xmlSApply(node, function(x) xmlValue(x[["Value"]])), names = xmlSApply(node, function(x) xmlValue(x[["Name"]]))) } makeRequest = function(args, key = getOption("AmazonAWS"), uri = 'http://sdb.amazonaws.com', .opts = getDefaultS3CurlOptions()) { h = StringToSign(args, "/", key) args = paste(names(h), h, sep = "=", collapse = "&") uri = paste(uri, args, sep = "?") getURLContent(uri, .opts = .opts) } getDefaultS3CurlOptions = function() { list(ssl.verifypeer = FALSE, followlocation = TRUE, httpheader = "User-Agent: R and RAmazonDB") }
/R/putData.R
no_license
cabiling/RAmazonDB
R
false
false
2,814
r
putAttributes = # putAttributes("test_dtl3", it = c(foo = "abc", bar = 123)) function(domain, ..., name = names(.objs)[1], auth = getOption("AmazonAWS"), .objs = list(...), .opts = getDefaultS3CurlOptions()) { if(length(.objs) > 1) { return(mapply(function(id, dom, obj) { putAttributes(dom, obj, name = id, auth = auth, .opts = .opts) }, names(.objs), rep(domain, length = length(.objs)), .objs)) } args = c(Action = "PutAttributes", DomainName = domain, ItemName = name) obj = .objs[[1]] tmp = mapply(function(i, val, id) { structure(c(id, val), names = paste(sprintf("Attribute.%d", i), c("Name", "Value"), sep = ".")) }, seq(along = obj) - 1L, obj, names(obj), SIMPLIFY = FALSE) args = c(args, structure(unlist(tmp), names = sapply(tmp, names))) txt = makeRequest(args, auth, 'http://sdb.amazonaws.com', .opts) ans = getResponseMetaData(txt) name } getAttributes = # # getAttributes("test_dtl3", "it") # function(domain, name, auth = getOption("AmazonAWS"), .opts = getDefaultS3CurlOptions()) { args = c(Action = "GetAttributes", DomainName = domain, ItemName = name) txt = makeRequest(args, auth, uri = 'http://sdb.amazonaws.com', .opts) readAttributeFromXML(txt) } deleteAttributes = # # deleteAttributes("test_dtl3", 'it') # function(domain, name, ..., .fields = list(...), auth = getOption("AmazonAWS"), .opts = getDefaultS3CurlOptions()) { args = c(Action = "DeleteAttributes", DomainName = domain, ItemName = name) if(length(.fields) == 0) { val = getAttributes(domain, name, auth = auth, .opts = .opts) .fields = names(val) } ids = sprintf("Attribute.%d.Name", seq(along = .fields) - 1L) args[ ids ] = unlist(.fields) txt = makeRequest(args, auth, uri = 'http://sdb.amazonaws.com', .opts) getResponseMetaData(txt) } readAttributeFromXML = function(txt) { if(is.character(txt)) txt = xmlParse(txt) node = xmlRoot(txt)[["GetAttributesResult"]] if(xmlSize(node) == 0) return(NULL) structure(xmlSApply(node, function(x) xmlValue(x[["Value"]])), names = xmlSApply(node, function(x) xmlValue(x[["Name"]]))) } makeRequest = function(args, key = getOption("AmazonAWS"), uri = 'http://sdb.amazonaws.com', .opts = getDefaultS3CurlOptions()) { h = StringToSign(args, "/", key) args = paste(names(h), h, sep = "=", collapse = "&") uri = paste(uri, args, sep = "?") getURLContent(uri, .opts = .opts) } getDefaultS3CurlOptions = function() { list(ssl.verifypeer = FALSE, followlocation = TRUE, httpheader = "User-Agent: R and RAmazonDB") }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/Consistency Functions.R \name{decile_classification2} \alias{decile_classification2} \title{Decile classification} \usage{ decile_classification2(x) } \description{ uses exponential decay function with standard deviation of classification score to evalute consistency. If consistency is < 36, then there are at least a few deciles that have a trend strongly counter to the general trend of the medical event. }
/OHDSITrends/man/decile_classification2.Rd
no_license
NEONKID/StudyProtocolSandbox
R
false
true
489
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/Consistency Functions.R \name{decile_classification2} \alias{decile_classification2} \title{Decile classification} \usage{ decile_classification2(x) } \description{ uses exponential decay function with standard deviation of classification score to evalute consistency. If consistency is < 36, then there are at least a few deciles that have a trend strongly counter to the general trend of the medical event. }
install.packages("xlsx") library(xlsx) ### Write Data in Excel write.xlsx(ddd,"/.xlsx", sheetName = "Sheet1", col.name = TRUE, row.names = TRUE, append = FALSE) ### Append Data in Same File write.xlsx(ddd2,"/.xlsx", sheetName = "Sheet1", col.name = TRUE, row.names = TRUE, append = TRUE) ### Sheet 1 ### write.xlsx(theta_mean_by_year,"hot_cold_theta.xlsx", sheetName = "theta_mean_by_year", col.name = TRUE, row.names = TRUE, append = FALSE) ### Sheet 2 ### write.xlsx(theta_mean_lm_coef,"hot_cold_theta.xlsx", sheetName = "theta_mean_lm_coef", col.name = TRUE, row.names = TRUE, append = TRUE) str(theta_mean_lm) df_yh <- data.frame(matrix(unlist(theta_mean_lm), nrow=length(theta_mean_lm), byrow=T)) df_yh2 <- data.frame(t(matrix(unlist(theta_mean_lm), nrow=length(theta_mean_lm), byrow=T))) df_yh3 <- data.frame(matrix(unlist(theta_mean_lm$[1]), nrow=length(theta_mean_lm), byrow=T)) df_yh4 <- data.frame(matrix(unlist(theta_mean_lm[[1]]), nrow=length(theta_mean_lm), byrow=T)) print(theta_mean_lm[[1]]) print(theta_mean_lm[1]) ni <- topics_n i <- 1 for(i in ni){ result = (i) print(result) } i <- 1 j <- 2 paste(i, "*", j, "=", i*j) i=4 temp <- data.frame() for(i in 1:K){ temp <- data.frame(t(matrix(unlist(theta_mean_lm[i][12]), nrow=length(theta_mean_lm[i][12]), byrow=T))) } for(i in 1:K){ #paste("moddel",i) <- data.frame() #paste("model",i) <- as.data.frame(theta_mean_lm[i][12]) temp <- data.frame(theta_mean_lm[[i]][12]) myfile <- file.path(/Users/yhn_hac/Hanyang University/01-2. Study_Alone/R Data Analysis/LDA (Latent Dirichlet Allocation)/LDA Research, paste0("model","_",i,".xlsx")) write.xlsx(temp, myfile, sheetName = "Sheet1", col.name = TRUE, row.names = TRUE, append = FALSE) } write.xlsx(temp, "/Users/yhn_hac/Hanyang University/01-2. Study_Alone/R Data Analysis/LDA (Latent Dirichlet Allocation)/LDA Research/myfile.xlsx", sheetName = "Sheet1", col.name = TRUE, row.names = TRUE, append = FALSE) data.frame(theta_mean_lm[[i]][12]) model_[i] <- 1 paste("model",i) modelstr(theta_mean_lm[[i]][12]) mmm <- data.frame(theta_mean_lm[[i]][12]) mmm <- list(theta_mean_lm[[i]][12]) ### 토픽이름 열추가 mmm$kd <- data.frame(rep(K,length(mmm$model.x))) write.xlsx(mmm, "/Users/yhn_hac/Hanyang University/01-2. Study_Alone/R Data Analysis/LDA (Latent Dirichlet Allocation)/LDA Research/myfile.xlsx", sheetName = "Sheet1", col.name = TRUE, row.names = TRUE, append = FALSE) mmm$k <- mmm$model.x * mmm$model.theta_mean_by_year_time mmm$k <- c(1,2,2) str(theta_mean_lm) View(theta_mean_lm) i <- 1 theta_mean_lm[[i]][12][1] mmm$k <- data.frame(rep(i,length(mmm$model.x))) mmm2 <- mmm write.xlsx(mmm2, "myfile2.xlsx", sheetName = "Sheet1", col.name = TRUE, row.names = TRUE, append = FALSE) str(mmm) str(mmm2) theta_mean_lm df_yh <- data.frame(matrix(unlist(theta_mean_lm), nrow=length(theta_mean_lm), byrow=T)) df_yh2 <- data.frame(t(matrix(unlist(theta_mean_lm), nrow=length(theta_mean_lm), byrow=T))) View(df_yh) str(df_yh) write.xlsx(theta,"/Users/yhn_hac/Hanyang University/01-2. Study_Alone/R Data Analysis/LDA (Latent Dirichlet Allocation)/theta1.xlsx", sheetName = "Sheet1", col.name = TRUE, row.names = TRUE, append = FALSE)
/R/calc_anything.R
permissive
johnfkoo951/LDA
R
false
false
3,281
r
install.packages("xlsx") library(xlsx) ### Write Data in Excel write.xlsx(ddd,"/.xlsx", sheetName = "Sheet1", col.name = TRUE, row.names = TRUE, append = FALSE) ### Append Data in Same File write.xlsx(ddd2,"/.xlsx", sheetName = "Sheet1", col.name = TRUE, row.names = TRUE, append = TRUE) ### Sheet 1 ### write.xlsx(theta_mean_by_year,"hot_cold_theta.xlsx", sheetName = "theta_mean_by_year", col.name = TRUE, row.names = TRUE, append = FALSE) ### Sheet 2 ### write.xlsx(theta_mean_lm_coef,"hot_cold_theta.xlsx", sheetName = "theta_mean_lm_coef", col.name = TRUE, row.names = TRUE, append = TRUE) str(theta_mean_lm) df_yh <- data.frame(matrix(unlist(theta_mean_lm), nrow=length(theta_mean_lm), byrow=T)) df_yh2 <- data.frame(t(matrix(unlist(theta_mean_lm), nrow=length(theta_mean_lm), byrow=T))) df_yh3 <- data.frame(matrix(unlist(theta_mean_lm$[1]), nrow=length(theta_mean_lm), byrow=T)) df_yh4 <- data.frame(matrix(unlist(theta_mean_lm[[1]]), nrow=length(theta_mean_lm), byrow=T)) print(theta_mean_lm[[1]]) print(theta_mean_lm[1]) ni <- topics_n i <- 1 for(i in ni){ result = (i) print(result) } i <- 1 j <- 2 paste(i, "*", j, "=", i*j) i=4 temp <- data.frame() for(i in 1:K){ temp <- data.frame(t(matrix(unlist(theta_mean_lm[i][12]), nrow=length(theta_mean_lm[i][12]), byrow=T))) } for(i in 1:K){ #paste("moddel",i) <- data.frame() #paste("model",i) <- as.data.frame(theta_mean_lm[i][12]) temp <- data.frame(theta_mean_lm[[i]][12]) myfile <- file.path(/Users/yhn_hac/Hanyang University/01-2. Study_Alone/R Data Analysis/LDA (Latent Dirichlet Allocation)/LDA Research, paste0("model","_",i,".xlsx")) write.xlsx(temp, myfile, sheetName = "Sheet1", col.name = TRUE, row.names = TRUE, append = FALSE) } write.xlsx(temp, "/Users/yhn_hac/Hanyang University/01-2. Study_Alone/R Data Analysis/LDA (Latent Dirichlet Allocation)/LDA Research/myfile.xlsx", sheetName = "Sheet1", col.name = TRUE, row.names = TRUE, append = FALSE) data.frame(theta_mean_lm[[i]][12]) model_[i] <- 1 paste("model",i) modelstr(theta_mean_lm[[i]][12]) mmm <- data.frame(theta_mean_lm[[i]][12]) mmm <- list(theta_mean_lm[[i]][12]) ### 토픽이름 열추가 mmm$kd <- data.frame(rep(K,length(mmm$model.x))) write.xlsx(mmm, "/Users/yhn_hac/Hanyang University/01-2. Study_Alone/R Data Analysis/LDA (Latent Dirichlet Allocation)/LDA Research/myfile.xlsx", sheetName = "Sheet1", col.name = TRUE, row.names = TRUE, append = FALSE) mmm$k <- mmm$model.x * mmm$model.theta_mean_by_year_time mmm$k <- c(1,2,2) str(theta_mean_lm) View(theta_mean_lm) i <- 1 theta_mean_lm[[i]][12][1] mmm$k <- data.frame(rep(i,length(mmm$model.x))) mmm2 <- mmm write.xlsx(mmm2, "myfile2.xlsx", sheetName = "Sheet1", col.name = TRUE, row.names = TRUE, append = FALSE) str(mmm) str(mmm2) theta_mean_lm df_yh <- data.frame(matrix(unlist(theta_mean_lm), nrow=length(theta_mean_lm), byrow=T)) df_yh2 <- data.frame(t(matrix(unlist(theta_mean_lm), nrow=length(theta_mean_lm), byrow=T))) View(df_yh) str(df_yh) write.xlsx(theta,"/Users/yhn_hac/Hanyang University/01-2. Study_Alone/R Data Analysis/LDA (Latent Dirichlet Allocation)/theta1.xlsx", sheetName = "Sheet1", col.name = TRUE, row.names = TRUE, append = FALSE)
# Remove species in the tree that are not in the trait matrix species.to.exclude <- aatree$tip.label[!(aatree$tip.label %in% traits2$sp)] phylo <- drop.tip(aatree,species.to.exclude) rm(species.to.exclude) #Remove species not in the phylogeny traits3<-traits2[traits2$sp %in% phylo$tip.label,] #prep the phylogeny inv.phylo <- MCMCglmm::inverseA(phylo, nodes = "TIPS", scale = TRUE) A <- solve(inv.phylo$Ainv) rownames(A) <- rownames(inv.phylo$Ainv)
/analyses/source/treeprep.R
no_license
AileneKane/arboretaclimsens
R
false
false
497
r
# Remove species in the tree that are not in the trait matrix species.to.exclude <- aatree$tip.label[!(aatree$tip.label %in% traits2$sp)] phylo <- drop.tip(aatree,species.to.exclude) rm(species.to.exclude) #Remove species not in the phylogeny traits3<-traits2[traits2$sp %in% phylo$tip.label,] #prep the phylogeny inv.phylo <- MCMCglmm::inverseA(phylo, nodes = "TIPS", scale = TRUE) A <- solve(inv.phylo$Ainv) rownames(A) <- rownames(inv.phylo$Ainv)
#' @title Find the accuracy of a forecast #' #' @description Calculates the Mean Square Error (MSE), Mean Absolute #' Deviation (MAD) and Mean Absolute Percentage Error (MAPE) of #' a forecast. #' #' @param y the actual data. #' #' @param yhat the forecasted vector. #' #' #' @examples #' twoStage.accuracy <- function(y, yhat) { err <- y - yhat # MSE MSE <- mean(err^2) # MAD MAD <- mean(abs(err)) # MAPE MAPE <- mean(abs(err/y)) * 100 ErrVec <- cbind(MSE, MAD, paste(toString(MAPE),"%")) colnames(ErrVec) <- c("MSE","MAD","MAPE") return(ErrVec) }
/R/accuracy.R
no_license
anupamal14/twoStageForecast_v0
R
false
false
609
r
#' @title Find the accuracy of a forecast #' #' @description Calculates the Mean Square Error (MSE), Mean Absolute #' Deviation (MAD) and Mean Absolute Percentage Error (MAPE) of #' a forecast. #' #' @param y the actual data. #' #' @param yhat the forecasted vector. #' #' #' @examples #' twoStage.accuracy <- function(y, yhat) { err <- y - yhat # MSE MSE <- mean(err^2) # MAD MAD <- mean(abs(err)) # MAPE MAPE <- mean(abs(err/y)) * 100 ErrVec <- cbind(MSE, MAD, paste(toString(MAPE),"%")) colnames(ErrVec) <- c("MSE","MAD","MAPE") return(ErrVec) }
# define the function to generate random labyrinth of given size (may be insolvable) generate_lab <- (function(y) apply(matrix(nrow = y, ncol =y), c(1,2), function(x) sample(c(0,1),1))) # set size labi_size <- as.integer(readline(prompt = "Podaj rozmiar labiryntu:")) # generate labyrinth, clear start and exit, add borders # fill the inner part with generated random lab { lab_x <- generate_lab(labi_size); lab_x[nrow(lab_x),ncol(lab_x)] <- 0; lab_x[1,1] <- 0; lab_xx = matrix(data = 1, nrow = labi_size + 2, ncol = labi_size + 2); lab_xx[2:(labi_size+1),2:(labi_size+1)] <- lab_x } # correct the labyrinth manually if needed :) lab_xx[11:14,11] <- 0 lab_xx[15,19] <- 0 # draw labyrinth with pheatmap package install.packages("pheatmap") library(pheatmap) pheatmap(lab_xx, cellwidth = 10, cellheight = 10, cluster_row = FALSE, cluster_col = FALSE, color=gray.colors(2,start=1,end=0)) # You can save the labyrinth as .pdf file { pdf(file = "lab20.pdf", width=7, height=7, paper='a4', pagecentre = T, onefile = F) pheatmap(lab_xx, cellwidth = 15, cellheight = 15, cluster_row = FALSE, cluster_col = FALSE, color=gray.colors(2,start=1,end=0)) dev.off() } #assign generated lab to new matrix lab20 <- lab_xx dim(lab20) dim(lab_xx) <- c(22,22) dim(lab_xx) lab_xx #lab_xx to vector dim(lab_xx) <- (dim(lab_xx)[1]*dim(lab_xx)[2])
/Rstudio/PDZ-1-genalg/labyrinth_generator.R
no_license
slawomirslowik/Mgr_Inf
R
false
false
1,530
r
# define the function to generate random labyrinth of given size (may be insolvable) generate_lab <- (function(y) apply(matrix(nrow = y, ncol =y), c(1,2), function(x) sample(c(0,1),1))) # set size labi_size <- as.integer(readline(prompt = "Podaj rozmiar labiryntu:")) # generate labyrinth, clear start and exit, add borders # fill the inner part with generated random lab { lab_x <- generate_lab(labi_size); lab_x[nrow(lab_x),ncol(lab_x)] <- 0; lab_x[1,1] <- 0; lab_xx = matrix(data = 1, nrow = labi_size + 2, ncol = labi_size + 2); lab_xx[2:(labi_size+1),2:(labi_size+1)] <- lab_x } # correct the labyrinth manually if needed :) lab_xx[11:14,11] <- 0 lab_xx[15,19] <- 0 # draw labyrinth with pheatmap package install.packages("pheatmap") library(pheatmap) pheatmap(lab_xx, cellwidth = 10, cellheight = 10, cluster_row = FALSE, cluster_col = FALSE, color=gray.colors(2,start=1,end=0)) # You can save the labyrinth as .pdf file { pdf(file = "lab20.pdf", width=7, height=7, paper='a4', pagecentre = T, onefile = F) pheatmap(lab_xx, cellwidth = 15, cellheight = 15, cluster_row = FALSE, cluster_col = FALSE, color=gray.colors(2,start=1,end=0)) dev.off() } #assign generated lab to new matrix lab20 <- lab_xx dim(lab20) dim(lab_xx) <- c(22,22) dim(lab_xx) lab_xx #lab_xx to vector dim(lab_xx) <- (dim(lab_xx)[1]*dim(lab_xx)[2])
#'Takes a call and returns its polynomial coefficients #' #'@rdname simplify #' #'@param tree A call that will be parsed and simplified recursively #'@param .x. the variable name with respect to which the polynomial should be most simplified #'@param params All names of free variables. If there are no free variables, the value should be "" #'@param iterate The number of times the call is nested. Default and proper value when called from the outside is 1 #' #'@details Will work on any call as long as it can be reduced to a polynomial with respect the the variable #'and each of the parameters. Operates recursively, reducing each of the coefficients with respect to the extra parameters #'in turn. Calls .polyExp.num when all remaining coefficients are numeric to reduce the expression more fully. #' #'@return A list containing a list, \code{coeffs}, of coefficients ordered high to low (i.e. the list (2,3,4) would correspond to #'the polynomial 2*x^2+3*x+4 ) and value, \code{pow}, indicating the order of the polynomial. #'If the expression is not a polynomial, this method returns an empty list or an error. #' .polyExp <- function(tree, .x., params, iterate=1){ # Reduce purely numerical coefficients to a number # Aaron Mayerson May 29, 2013 .eval_coeffs <- function(coeffs){ for (i in 1:length(coeffs)){ val <- try(eval(coeffs[[i]]),silent=TRUE) if( class(val)=="numeric") coeffs[[i]] <- val } return(coeffs) } #A function the calls .polyExp() on each of the resultant coefficients in turn to further simplify them with #respect to the additional parameters. .reduce_coeffs <- function(coeffs, params){ for(i in 1:length(coeffs)){ new.x. <- params[1] if(length(params)==1) newparams <- "" else newparams <- params[-1] newco <- .polyExp(coeffs[[i]], new.x., newparams) if(newco$pow>=2){ expvec <- c(rep("^", newco$pow-1), "", "") powvec <- c((newco$pow):2, "", "") } else{ expvec <- rep("", newco$pow+1) powvec <- rep("", newco$pow+1) } multvec <- c(rep("*", newco$pow), "") varvec <- c(rep(new.x., newco$pow), "") #simplify expression index <- 1 for(j in 1:length(newco$coeffs)){ if(index>length(newco$coeffs)) break if(newco$coeffs[[index]]==0){ newco$coeffs <- newco$coeffs[-index] multvec <- multvec[-index] varvec <- varvec[-index] expvec <- expvec[-index] powvec <- powvec[-index] } else{ if(newco$coeffs[[index]]==1 && index!= length(newco$coeffs)){ newco$coeffs[index] <- "" multvec[index] <- "" } else{ if(!(class(newco$coeffs[[index]])=='name')&&!(class(newco$coeffs[[index]])=='numeric')){ if(newco$coeffs[[index]][[1]]=='+'||newco$coeffs[[index]][[1]]=='-') newco$coeffs[[index]] <- paste("(", deparse(newco$coeffs[[index]]), ")", sep="") } } index <- index + 1 } } if(length(newco$coeffs)==0) coeffs[[i]] <- 0 else coeffs[[i]] <- parse(text = paste(newco$coeffs, multvec , varvec, expvec, powvec, collapse="+", sep=""))[[1]] } return(coeffs) } #If there are no additional parameters, we can assume the coefficients will be numeric and call a faster, #more simplified version if(all(params =="") ) return(.polyExp.num(tree, .x.)) #if it is a simple expression if(tree==.x.){ coeffs <- list(1, 0) return(list(coeffs= coeffs, pow=1)) } #if it is a constant if(class(tree)=='numeric'||class(tree)=='name'){ coeffs <- list(tree) if(iterate==1){ coeffs <- suppressWarnings(.reduce_coeffs(coeffs, params)) } return(list(coeffs = coeffs, pow=0)) } if(tree[[1]]=='('){ return(Recall(tree[[2]], .x., params, iterate=iterate+1)) } if(tree[[1]] == '+'){ lside <- Recall(tree[[2]], .x., params, iterate = iterate+1) rside <- Recall(tree[[3]], .x., params, iterate = iterate+1) if(rside$pow >= lside$pow){ pow = rside$pow coeffs <- rside$coeffs lcoeffs <- append(rep(0, pow-lside$pow), lside$coeffs) for(i in 1:length(coeffs)){ if(coeffs[[i]]==0) coeffs[[i]] <- lcoeffs[[i]] else { if(lcoeffs[[i]]==0) coeffs[[i]] <- coeffs[[i]] else coeffs[[i]] <- parse(text = paste(deparse(coeffs[[i]], width.cutoff=500), "+", deparse(lcoeffs[[i]], width.cutoff=500), sep=""))[[1]] } } if(iterate==1){ coeffs <- suppressWarnings(.reduce_coeffs(coeffs, params)) } return(list(coeffs = coeffs, pow = pow)) } else{ pow = lside$pow coeffs <- lside$coeffs rcoeffs <- append(rep(0, pow-rside$pow), rside$coeffs) for(i in 1:length(coeffs)){ if(coeffs[[i]]==0) coeffs[[i]] <- rcoeffs[[i]] else{ if(rcoeffs[[i]] == 0) coeffs[[i]] <- coeffs[[i]] else coeffs[[i]] <- parse(text = paste(deparse(coeffs[[i]], width.cutoff=500), "+", deparse(rcoeffs[[i]], width.cutoff=500), sep=""))[[1]] } } if(iterate==1){ coeffs <- suppressWarnings(.reduce_coeffs(coeffs, params)) } return(list(coeffs = coeffs, pow = pow)) } } if(tree[[1]] == '-'){ if(length(tree)==2){ inside <- Recall(tree[[2]], .x., params, iterate = iterate+1) coeffs <- inside$coeffs pow = inside$pow for(i in 1:length(coeffs)){ if(coeffs[[i]] == 0) coeffs[[i]] <- coeffs[[i]] else coeffs[[i]] <- parse(text = paste("-", deparse(coeffs[[i]]), sep=""))[[1]] } # reduce expressions that result in numerical coefficients to numbers, added coeffs <- .eval_coeffs(coeffs) if(iterate==1){ coeffs <- suppressWarnings(.reduce_coeffs(coeffs, params)) } return(list(coeffs = coeffs, pow=pow)) } else{ lside <- Recall(tree[[2]], .x., params, iterate=iterate+1) rside <- Recall(tree[[3]], .x., params, iterate=iterate+1) if(rside$pow >= lside$pow){ pow = rside$pow coeffs <- rside$coeffs lcoeffs <- append(rep(0, pow-lside$pow), lside$coeffs) for(i in 1:length(coeffs)){ if(lcoeffs[[i]]==0) coeffs[[i]] <- parse(text = paste("-", deparse(coeffs[[i]], width.cutoff=500), sep = ""))[[1]] else{ if(coeffs[[i]] == 0) coeffs[[i]] <- lcoeffs[[i]] else coeffs[[i]] <- parse(text = paste(deparse(lcoeffs[[i]], width.cutoff=500), "-", deparse(coeffs[[i]], width.cutoff=500), sep = ""))[[1]] } } # reduce expressions that result in numerical coefficients to numbers coeffs <- .eval_coeffs(coeffs) if(iterate==1){ coeffs <- suppressWarnings(.reduce_coeffs(coeffs, params)) } return(list(coeffs = coeffs, pow = pow)) } else{ pow = lside$pow coeffs <- lside$coeffs rcoeffs <- append(rep(0, pow-rside$pow), rside$coeffs) for(i in 1:length(coeffs)){ if(coeffs[[i]] == 0) coeffs[[i]] <- parse(text = paste("-", deparse(rcoeffs[[i]], width.cutoff=500), sep = ""))[[1]] else{ if(rcoeffs[[i]] == 0) coeffs[[i]] <- coeffs[[i]] else coeffs[[i]] <- parse(text = paste(deparse(coeffs[[i]], width.cutoff=500), "-", deparse(rcoeffs[[i]], width.cutoff=500), sep = ""))[[1]] } } if(iterate==1){ coeffs <- suppressWarnings(.reduce_coeffs(coeffs, params)) } return(list(coeffs = coeffs, pow = pow)) } } } if(tree[[1]] == '*'){ lside <- Recall(tree[[2]], .x., params, iterate=iterate+1) rside <- Recall(tree[[3]], .x., params, iterate=iterate+1) pow <- lside$pow + rside$pow diff <- abs(lside$pow - rside$pow) dim = max(lside$pow, rside$pow)+1 cmatrix <- list() for(i in 1:length(lside$coeffs)){ cmatrix[[i]]<- list() for(j in 1:length(rside$coeffs)){ if(lside$coeffs[[i]]==0 || rside$coeffs[[j]]==0) cmatrix[[i]][[j]] <- 0 else{ if(lside$coeffs[[i]]==1) cmatrix[[i]][[j]] <- rside$coeffs[[j]] else{ if(rside$coeffs[[j]]==1) cmatrix[[i]][[j]] <- lside$coeffs[[i]] else cmatrix[[i]][[j]] <- parse(text = paste("(", deparse(lside$coeffs[[i]], width.cutoff=500), ")*(", deparse(rside$coeffs[[j]], width.cutoff=500), ")", sep=""))[[1]] } } } } coeffs <- list() index=1 for(i in 1:length(lside$coeffs)){ for(j in 1:i){ if(j <= length(rside$coeffs)){ if(j==1) coeffs[[index]] <- cmatrix[[i-j+1]][[j]] else{ if(coeffs[[index]]==0) coeffs[[index]] <- cmatrix[[i-j+1]][[j]] else{ if(cmatrix[[i-j+1]][[j]] ==0) coeffs[[index]] <- coeffs[[index]] else coeffs[[index]] <- parse(text=paste(deparse(coeffs[[index]], width.cutoff=500), "+", deparse(cmatrix[[i-j+1]][[j]], width.cutoff=500), sep=""))[[1]] } } } } index = index+1 } for(j in 2:length(rside$coeffs)){ if(length(rside$coeffs)==1) break for(i in 1:(length(rside$coeffs)-j+1)){ if((length(cmatrix)-i )>= 0 && (i+j-1)<= length(rside$coeffs)){ if(i==1) coeffs[[index]] <- cmatrix[[length(cmatrix)-i+1]][[i+j-1]] else{ if(coeffs[[index]] == 0) coeffs[[index]] <- cmatrix[[length(cmatrix)-i+1]][[i+j-1]] else{ if(cmatrix[[length(cmatrix)-i+1]][[i+j-1]]==0) coeffs[[index]] <- coeffs[[index]] else coeffs[[index]] <- parse(text = paste(deparse(coeffs[[index]], width.cutoff=500), "+", deparse(cmatrix[[length(cmatrix)-i+1]][[i+j-1]], width.cutoff=500), sep=""))[[1]] } } } } index = index+1 } if(iterate==1){ coeffs <- suppressWarnings(.reduce_coeffs(coeffs, params)) } return(list(coeffs = coeffs, pow = pow)) } if(tree[[1]] == '^'){ #Recursively call as multiplication newTree <- tree tree[[3]] <- eval(tree[[3]]) - 1 if(eval(tree[[3]]) <0) stop("Can only handle positive exponents") if(tree[[3]] == 1){ tree <- tree[[2]] } newTree <- parse(text= paste(deparse(newTree[[2]], width.cutoff=500), "*", deparse(tree, width.cutoff=500), sep=""))[[1]] return(Recall(newTree, .x., params, iterate = iterate+1)) } stop("Is not a polynomial") return(list()) } #'Takes a call and returns its polynomial coefficients as numerics. #' #'@rdname simplify #' #'@details works with the same structure as .polyExp() but will return only if all coefficients reduce to numeric values. #' #'@return A list containing a list, \code{coeffs}, of coefficients ordered high to low (i.e. the list (2,3,4) would correspond to #'the polynomial 2*x^2+3*x+4 ) and value, \code{pow}, indicating the order of the polynomial. #'If the expression is not a polynomial, this method returns an empty list or an error. .polyExp.num <- function(tree, .x.){ #if it is a simple expression if(tree==.x.){ coeffs <- c(1, 0) return(list(coeffs= coeffs, pow=1)) } #if it is a constant if(class(tree)=='numeric'||class(tree)=='name'){ coeffs <- c(tree) return(list(coeffs = coeffs, pow=0)) } if(tree[[1]]=='('){ return(Recall(tree[[2]], .x.)) } if(tree[[1]] == '+'){ lside <- Recall(tree[[2]], .x.) rside <- Recall(tree[[3]], .x.) if(rside$pow >= lside$pow){ pow = rside$pow coeffs <- rside$coeffs lcoeffs <- append(rep(0, pow-lside$pow), lside$coeffs) for(i in 1:length(coeffs)) coeffs <- tryCatch({coeffs[i] <- coeffs[i] + lcoeffs[i] coeffs}, error = function(e){ coeffs <- as.list(coeffs) coeffs[i] <- parse(text = paste(deparse(coeffs[[i]]), "+", deparse(lcoeffs[[i]]), sep="")) coeffs} ) return(list(coeffs = coeffs, pow = pow)) } else{ pow = lside$pow coeffs <- lside$coeffs rcoeffs <- append(rep(0, pow-rside$pow), rside$coeffs) coeffs <- coeffs + rcoeffs return(list(coeffs = coeffs, pow = pow)) } } if(tree[[1]] == '-'){ if(length(tree)==2){ inside = Recall(tree[[2]], .x.) coeffs = -inside$coeffs pow=inside$pow return(list(coeffs = coeffs, pow=pow)) } else{ lside <- Recall(tree[[2]], .x.) rside <- Recall(tree[[3]], .x.) if(rside$pow >= lside$pow){ pow = rside$pow coeffs <- rside$coeffs lcoeffs <- append(rep(0, pow-lside$pow), lside$coeffs) names <- names(coeffs) coeffs <- lcoeffs - coeffs names(coeffs) <- names return(list(coeffs = coeffs, pow = pow)) } else{ pow = lside$pow coeffs <- lside$coeffs rcoeffs <- append(rep(0, pow-rside$pow), rside$coeffs) coeffs <- coeffs - rcoeffs return(list(coeffs = coeffs, pow = pow)) } } } if(tree[[1]] == '*'){ lside <- Recall(tree[[2]], .x.) rside <- Recall(tree[[3]], .x.) pow <- lside$pow + rside$pow diff <- abs(lside$pow - rside$pow) dim = max(lside$pow, rside$pow)+1 cmatrix <- outer(lside$coeffs, rside$coeffs) #pad matrix to make it square if(nrow(cmatrix) > ncol(cmatrix)){ for(i in (1:diff)) cmatrix <- cbind(cmatrix, 0) } if(ncol(cmatrix) > nrow(cmatrix)){ for(i in (1:diff)) cmatrix <- rbind(cmatrix, 0) } coeffs <- rep(0, pow+1) for(i in (1:dim)){ if(i != 0){ coeffs[i] <- sum(cmatrix[cbind((i:1), (1:i))]) } } for(i in dim:(pow+1)){ if(i != 0){ coeffs[i] <- sum(cmatrix[cbind((dim:(1+i-dim)), ((1+i-dim):dim))]) } } return(list(coeffs = coeffs, pow = pow)) } if(tree[[1]] == '^'){ #Recursively call as multiplication newTree <- tree tree[[3]] <- tree[[3]] - 1 if(tree[[3]] == 1){ tree <- tree[[2]] } newTree <- parse(text= paste(deparse(newTree[[2]]), "*", deparse(tree), sep=""))[[1]] return(Recall(newTree, .x.)) } stop("Is not a polynomial") return(list()) } #' Method for putting a polynomial together given the coefficients and power from .polyExp() #' #' @rdname simplify #' #' @param poly output of .polyExp() #' @param form original formula - provides information on which variable the polynomial was reduced with respect to. #' #' @return A formula whose left hand side is a polynomial that fits the description given with the input poly. .makePoly <- function(form, poly){ if(poly$pow>=2){ expvec <- c(rep("^", poly$pow-1), "", "") powvec <- c((poly$pow):2, "", "") } else{ expvec <- rep("", poly$pow+1) powvec <- rep("", poly$pow+1) } multvec <- c(rep("*", poly$pow), "") varvec <- c(rep(all.vars(rhs(form)), poly$pow), "") #simplify exppolysion index <- 1 for(j in 1:length(poly$coeffs)){ if(index>length(poly$coeffs)) break if(poly$coeffs[[index]]==0){ poly$coeffs <- poly$coeffs[-index] multvec <- multvec[-index] varvec <- varvec[-index] expvec <- expvec[-index] powvec <- powvec[-index] } else{ if(poly$coeffs[[index]]==1 && index!= length(poly$coeffs)){ poly$coeffs[index] <- "" multvec[index] <- "" } else{ if(!(class(poly$coeffs[[index]])=='name')&&!(class(poly$coeffs[[index]])=='numeric')){ if(poly$coeffs[[index]][[1]]=='+'||poly$coeffs[[index]][[1]]=='-') poly$coeffs[[index]] <- paste("(", deparse(poly$coeffs[[index]]), ")", sep="") } } index <- index + 1 } } form[[2]] <- if(length(poly$coeffs)==0) 0 else parse(text = paste(poly$coeffs, multvec , varvec, expvec, powvec, collapse="+", sep=""))[[1]] return(form) }
/R/simplify.R
no_license
rpruim/mosaic
R
false
false
17,183
r
#'Takes a call and returns its polynomial coefficients #' #'@rdname simplify #' #'@param tree A call that will be parsed and simplified recursively #'@param .x. the variable name with respect to which the polynomial should be most simplified #'@param params All names of free variables. If there are no free variables, the value should be "" #'@param iterate The number of times the call is nested. Default and proper value when called from the outside is 1 #' #'@details Will work on any call as long as it can be reduced to a polynomial with respect the the variable #'and each of the parameters. Operates recursively, reducing each of the coefficients with respect to the extra parameters #'in turn. Calls .polyExp.num when all remaining coefficients are numeric to reduce the expression more fully. #' #'@return A list containing a list, \code{coeffs}, of coefficients ordered high to low (i.e. the list (2,3,4) would correspond to #'the polynomial 2*x^2+3*x+4 ) and value, \code{pow}, indicating the order of the polynomial. #'If the expression is not a polynomial, this method returns an empty list or an error. #' .polyExp <- function(tree, .x., params, iterate=1){ # Reduce purely numerical coefficients to a number # Aaron Mayerson May 29, 2013 .eval_coeffs <- function(coeffs){ for (i in 1:length(coeffs)){ val <- try(eval(coeffs[[i]]),silent=TRUE) if( class(val)=="numeric") coeffs[[i]] <- val } return(coeffs) } #A function the calls .polyExp() on each of the resultant coefficients in turn to further simplify them with #respect to the additional parameters. .reduce_coeffs <- function(coeffs, params){ for(i in 1:length(coeffs)){ new.x. <- params[1] if(length(params)==1) newparams <- "" else newparams <- params[-1] newco <- .polyExp(coeffs[[i]], new.x., newparams) if(newco$pow>=2){ expvec <- c(rep("^", newco$pow-1), "", "") powvec <- c((newco$pow):2, "", "") } else{ expvec <- rep("", newco$pow+1) powvec <- rep("", newco$pow+1) } multvec <- c(rep("*", newco$pow), "") varvec <- c(rep(new.x., newco$pow), "") #simplify expression index <- 1 for(j in 1:length(newco$coeffs)){ if(index>length(newco$coeffs)) break if(newco$coeffs[[index]]==0){ newco$coeffs <- newco$coeffs[-index] multvec <- multvec[-index] varvec <- varvec[-index] expvec <- expvec[-index] powvec <- powvec[-index] } else{ if(newco$coeffs[[index]]==1 && index!= length(newco$coeffs)){ newco$coeffs[index] <- "" multvec[index] <- "" } else{ if(!(class(newco$coeffs[[index]])=='name')&&!(class(newco$coeffs[[index]])=='numeric')){ if(newco$coeffs[[index]][[1]]=='+'||newco$coeffs[[index]][[1]]=='-') newco$coeffs[[index]] <- paste("(", deparse(newco$coeffs[[index]]), ")", sep="") } } index <- index + 1 } } if(length(newco$coeffs)==0) coeffs[[i]] <- 0 else coeffs[[i]] <- parse(text = paste(newco$coeffs, multvec , varvec, expvec, powvec, collapse="+", sep=""))[[1]] } return(coeffs) } #If there are no additional parameters, we can assume the coefficients will be numeric and call a faster, #more simplified version if(all(params =="") ) return(.polyExp.num(tree, .x.)) #if it is a simple expression if(tree==.x.){ coeffs <- list(1, 0) return(list(coeffs= coeffs, pow=1)) } #if it is a constant if(class(tree)=='numeric'||class(tree)=='name'){ coeffs <- list(tree) if(iterate==1){ coeffs <- suppressWarnings(.reduce_coeffs(coeffs, params)) } return(list(coeffs = coeffs, pow=0)) } if(tree[[1]]=='('){ return(Recall(tree[[2]], .x., params, iterate=iterate+1)) } if(tree[[1]] == '+'){ lside <- Recall(tree[[2]], .x., params, iterate = iterate+1) rside <- Recall(tree[[3]], .x., params, iterate = iterate+1) if(rside$pow >= lside$pow){ pow = rside$pow coeffs <- rside$coeffs lcoeffs <- append(rep(0, pow-lside$pow), lside$coeffs) for(i in 1:length(coeffs)){ if(coeffs[[i]]==0) coeffs[[i]] <- lcoeffs[[i]] else { if(lcoeffs[[i]]==0) coeffs[[i]] <- coeffs[[i]] else coeffs[[i]] <- parse(text = paste(deparse(coeffs[[i]], width.cutoff=500), "+", deparse(lcoeffs[[i]], width.cutoff=500), sep=""))[[1]] } } if(iterate==1){ coeffs <- suppressWarnings(.reduce_coeffs(coeffs, params)) } return(list(coeffs = coeffs, pow = pow)) } else{ pow = lside$pow coeffs <- lside$coeffs rcoeffs <- append(rep(0, pow-rside$pow), rside$coeffs) for(i in 1:length(coeffs)){ if(coeffs[[i]]==0) coeffs[[i]] <- rcoeffs[[i]] else{ if(rcoeffs[[i]] == 0) coeffs[[i]] <- coeffs[[i]] else coeffs[[i]] <- parse(text = paste(deparse(coeffs[[i]], width.cutoff=500), "+", deparse(rcoeffs[[i]], width.cutoff=500), sep=""))[[1]] } } if(iterate==1){ coeffs <- suppressWarnings(.reduce_coeffs(coeffs, params)) } return(list(coeffs = coeffs, pow = pow)) } } if(tree[[1]] == '-'){ if(length(tree)==2){ inside <- Recall(tree[[2]], .x., params, iterate = iterate+1) coeffs <- inside$coeffs pow = inside$pow for(i in 1:length(coeffs)){ if(coeffs[[i]] == 0) coeffs[[i]] <- coeffs[[i]] else coeffs[[i]] <- parse(text = paste("-", deparse(coeffs[[i]]), sep=""))[[1]] } # reduce expressions that result in numerical coefficients to numbers, added coeffs <- .eval_coeffs(coeffs) if(iterate==1){ coeffs <- suppressWarnings(.reduce_coeffs(coeffs, params)) } return(list(coeffs = coeffs, pow=pow)) } else{ lside <- Recall(tree[[2]], .x., params, iterate=iterate+1) rside <- Recall(tree[[3]], .x., params, iterate=iterate+1) if(rside$pow >= lside$pow){ pow = rside$pow coeffs <- rside$coeffs lcoeffs <- append(rep(0, pow-lside$pow), lside$coeffs) for(i in 1:length(coeffs)){ if(lcoeffs[[i]]==0) coeffs[[i]] <- parse(text = paste("-", deparse(coeffs[[i]], width.cutoff=500), sep = ""))[[1]] else{ if(coeffs[[i]] == 0) coeffs[[i]] <- lcoeffs[[i]] else coeffs[[i]] <- parse(text = paste(deparse(lcoeffs[[i]], width.cutoff=500), "-", deparse(coeffs[[i]], width.cutoff=500), sep = ""))[[1]] } } # reduce expressions that result in numerical coefficients to numbers coeffs <- .eval_coeffs(coeffs) if(iterate==1){ coeffs <- suppressWarnings(.reduce_coeffs(coeffs, params)) } return(list(coeffs = coeffs, pow = pow)) } else{ pow = lside$pow coeffs <- lside$coeffs rcoeffs <- append(rep(0, pow-rside$pow), rside$coeffs) for(i in 1:length(coeffs)){ if(coeffs[[i]] == 0) coeffs[[i]] <- parse(text = paste("-", deparse(rcoeffs[[i]], width.cutoff=500), sep = ""))[[1]] else{ if(rcoeffs[[i]] == 0) coeffs[[i]] <- coeffs[[i]] else coeffs[[i]] <- parse(text = paste(deparse(coeffs[[i]], width.cutoff=500), "-", deparse(rcoeffs[[i]], width.cutoff=500), sep = ""))[[1]] } } if(iterate==1){ coeffs <- suppressWarnings(.reduce_coeffs(coeffs, params)) } return(list(coeffs = coeffs, pow = pow)) } } } if(tree[[1]] == '*'){ lside <- Recall(tree[[2]], .x., params, iterate=iterate+1) rside <- Recall(tree[[3]], .x., params, iterate=iterate+1) pow <- lside$pow + rside$pow diff <- abs(lside$pow - rside$pow) dim = max(lside$pow, rside$pow)+1 cmatrix <- list() for(i in 1:length(lside$coeffs)){ cmatrix[[i]]<- list() for(j in 1:length(rside$coeffs)){ if(lside$coeffs[[i]]==0 || rside$coeffs[[j]]==0) cmatrix[[i]][[j]] <- 0 else{ if(lside$coeffs[[i]]==1) cmatrix[[i]][[j]] <- rside$coeffs[[j]] else{ if(rside$coeffs[[j]]==1) cmatrix[[i]][[j]] <- lside$coeffs[[i]] else cmatrix[[i]][[j]] <- parse(text = paste("(", deparse(lside$coeffs[[i]], width.cutoff=500), ")*(", deparse(rside$coeffs[[j]], width.cutoff=500), ")", sep=""))[[1]] } } } } coeffs <- list() index=1 for(i in 1:length(lside$coeffs)){ for(j in 1:i){ if(j <= length(rside$coeffs)){ if(j==1) coeffs[[index]] <- cmatrix[[i-j+1]][[j]] else{ if(coeffs[[index]]==0) coeffs[[index]] <- cmatrix[[i-j+1]][[j]] else{ if(cmatrix[[i-j+1]][[j]] ==0) coeffs[[index]] <- coeffs[[index]] else coeffs[[index]] <- parse(text=paste(deparse(coeffs[[index]], width.cutoff=500), "+", deparse(cmatrix[[i-j+1]][[j]], width.cutoff=500), sep=""))[[1]] } } } } index = index+1 } for(j in 2:length(rside$coeffs)){ if(length(rside$coeffs)==1) break for(i in 1:(length(rside$coeffs)-j+1)){ if((length(cmatrix)-i )>= 0 && (i+j-1)<= length(rside$coeffs)){ if(i==1) coeffs[[index]] <- cmatrix[[length(cmatrix)-i+1]][[i+j-1]] else{ if(coeffs[[index]] == 0) coeffs[[index]] <- cmatrix[[length(cmatrix)-i+1]][[i+j-1]] else{ if(cmatrix[[length(cmatrix)-i+1]][[i+j-1]]==0) coeffs[[index]] <- coeffs[[index]] else coeffs[[index]] <- parse(text = paste(deparse(coeffs[[index]], width.cutoff=500), "+", deparse(cmatrix[[length(cmatrix)-i+1]][[i+j-1]], width.cutoff=500), sep=""))[[1]] } } } } index = index+1 } if(iterate==1){ coeffs <- suppressWarnings(.reduce_coeffs(coeffs, params)) } return(list(coeffs = coeffs, pow = pow)) } if(tree[[1]] == '^'){ #Recursively call as multiplication newTree <- tree tree[[3]] <- eval(tree[[3]]) - 1 if(eval(tree[[3]]) <0) stop("Can only handle positive exponents") if(tree[[3]] == 1){ tree <- tree[[2]] } newTree <- parse(text= paste(deparse(newTree[[2]], width.cutoff=500), "*", deparse(tree, width.cutoff=500), sep=""))[[1]] return(Recall(newTree, .x., params, iterate = iterate+1)) } stop("Is not a polynomial") return(list()) } #'Takes a call and returns its polynomial coefficients as numerics. #' #'@rdname simplify #' #'@details works with the same structure as .polyExp() but will return only if all coefficients reduce to numeric values. #' #'@return A list containing a list, \code{coeffs}, of coefficients ordered high to low (i.e. the list (2,3,4) would correspond to #'the polynomial 2*x^2+3*x+4 ) and value, \code{pow}, indicating the order of the polynomial. #'If the expression is not a polynomial, this method returns an empty list or an error. .polyExp.num <- function(tree, .x.){ #if it is a simple expression if(tree==.x.){ coeffs <- c(1, 0) return(list(coeffs= coeffs, pow=1)) } #if it is a constant if(class(tree)=='numeric'||class(tree)=='name'){ coeffs <- c(tree) return(list(coeffs = coeffs, pow=0)) } if(tree[[1]]=='('){ return(Recall(tree[[2]], .x.)) } if(tree[[1]] == '+'){ lside <- Recall(tree[[2]], .x.) rside <- Recall(tree[[3]], .x.) if(rside$pow >= lside$pow){ pow = rside$pow coeffs <- rside$coeffs lcoeffs <- append(rep(0, pow-lside$pow), lside$coeffs) for(i in 1:length(coeffs)) coeffs <- tryCatch({coeffs[i] <- coeffs[i] + lcoeffs[i] coeffs}, error = function(e){ coeffs <- as.list(coeffs) coeffs[i] <- parse(text = paste(deparse(coeffs[[i]]), "+", deparse(lcoeffs[[i]]), sep="")) coeffs} ) return(list(coeffs = coeffs, pow = pow)) } else{ pow = lside$pow coeffs <- lside$coeffs rcoeffs <- append(rep(0, pow-rside$pow), rside$coeffs) coeffs <- coeffs + rcoeffs return(list(coeffs = coeffs, pow = pow)) } } if(tree[[1]] == '-'){ if(length(tree)==2){ inside = Recall(tree[[2]], .x.) coeffs = -inside$coeffs pow=inside$pow return(list(coeffs = coeffs, pow=pow)) } else{ lside <- Recall(tree[[2]], .x.) rside <- Recall(tree[[3]], .x.) if(rside$pow >= lside$pow){ pow = rside$pow coeffs <- rside$coeffs lcoeffs <- append(rep(0, pow-lside$pow), lside$coeffs) names <- names(coeffs) coeffs <- lcoeffs - coeffs names(coeffs) <- names return(list(coeffs = coeffs, pow = pow)) } else{ pow = lside$pow coeffs <- lside$coeffs rcoeffs <- append(rep(0, pow-rside$pow), rside$coeffs) coeffs <- coeffs - rcoeffs return(list(coeffs = coeffs, pow = pow)) } } } if(tree[[1]] == '*'){ lside <- Recall(tree[[2]], .x.) rside <- Recall(tree[[3]], .x.) pow <- lside$pow + rside$pow diff <- abs(lside$pow - rside$pow) dim = max(lside$pow, rside$pow)+1 cmatrix <- outer(lside$coeffs, rside$coeffs) #pad matrix to make it square if(nrow(cmatrix) > ncol(cmatrix)){ for(i in (1:diff)) cmatrix <- cbind(cmatrix, 0) } if(ncol(cmatrix) > nrow(cmatrix)){ for(i in (1:diff)) cmatrix <- rbind(cmatrix, 0) } coeffs <- rep(0, pow+1) for(i in (1:dim)){ if(i != 0){ coeffs[i] <- sum(cmatrix[cbind((i:1), (1:i))]) } } for(i in dim:(pow+1)){ if(i != 0){ coeffs[i] <- sum(cmatrix[cbind((dim:(1+i-dim)), ((1+i-dim):dim))]) } } return(list(coeffs = coeffs, pow = pow)) } if(tree[[1]] == '^'){ #Recursively call as multiplication newTree <- tree tree[[3]] <- tree[[3]] - 1 if(tree[[3]] == 1){ tree <- tree[[2]] } newTree <- parse(text= paste(deparse(newTree[[2]]), "*", deparse(tree), sep=""))[[1]] return(Recall(newTree, .x.)) } stop("Is not a polynomial") return(list()) } #' Method for putting a polynomial together given the coefficients and power from .polyExp() #' #' @rdname simplify #' #' @param poly output of .polyExp() #' @param form original formula - provides information on which variable the polynomial was reduced with respect to. #' #' @return A formula whose left hand side is a polynomial that fits the description given with the input poly. .makePoly <- function(form, poly){ if(poly$pow>=2){ expvec <- c(rep("^", poly$pow-1), "", "") powvec <- c((poly$pow):2, "", "") } else{ expvec <- rep("", poly$pow+1) powvec <- rep("", poly$pow+1) } multvec <- c(rep("*", poly$pow), "") varvec <- c(rep(all.vars(rhs(form)), poly$pow), "") #simplify exppolysion index <- 1 for(j in 1:length(poly$coeffs)){ if(index>length(poly$coeffs)) break if(poly$coeffs[[index]]==0){ poly$coeffs <- poly$coeffs[-index] multvec <- multvec[-index] varvec <- varvec[-index] expvec <- expvec[-index] powvec <- powvec[-index] } else{ if(poly$coeffs[[index]]==1 && index!= length(poly$coeffs)){ poly$coeffs[index] <- "" multvec[index] <- "" } else{ if(!(class(poly$coeffs[[index]])=='name')&&!(class(poly$coeffs[[index]])=='numeric')){ if(poly$coeffs[[index]][[1]]=='+'||poly$coeffs[[index]][[1]]=='-') poly$coeffs[[index]] <- paste("(", deparse(poly$coeffs[[index]]), ")", sep="") } } index <- index + 1 } } form[[2]] <- if(length(poly$coeffs)==0) 0 else parse(text = paste(poly$coeffs, multvec , varvec, expvec, powvec, collapse="+", sep=""))[[1]] return(form) }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/fonctions_activite_donnees_pluri_annuelles.R \name{get_activite_sejours} \alias{get_activite_sejours} \title{Tableau de bord général d'activité avec un compte des séjours, et séparant HDJ/HC} \usage{ make_tdb(val, niveau, annee, mois ) } \arguments{ \item{df}{un tableau de données de type séjours rum/rsa} } \value{ tableau de bord d'activité général en séjours } \description{ Tableau de bord général d'activité avec un compte des séjours, et séparant HDJ/HC } \examples{ \dontrun{ get_activite_sejours( df, structure ) -> tdb } }
/man/get_activite_sejours.Rd
no_license
24p11/dimRactivite
R
false
true
632
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/fonctions_activite_donnees_pluri_annuelles.R \name{get_activite_sejours} \alias{get_activite_sejours} \title{Tableau de bord général d'activité avec un compte des séjours, et séparant HDJ/HC} \usage{ make_tdb(val, niveau, annee, mois ) } \arguments{ \item{df}{un tableau de données de type séjours rum/rsa} } \value{ tableau de bord d'activité général en séjours } \description{ Tableau de bord général d'activité avec un compte des séjours, et séparant HDJ/HC } \examples{ \dontrun{ get_activite_sejours( df, structure ) -> tdb } }
#Creates the FIRST plot of the assignment plot1 <- function(){ if(is.null(myData)){ myData <- getData() } hist(myData$Global_active_power, col="red", main ="Global Active Power", xlab="Global Active Power (kilowats)") } #Gets a zip file from the internet, unzips it, loads one of the inner files and filters it given two dates getData <- function(fileUrl = "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip", dataFile = "household_power_consumption.txt", begin = "1/2/2007", end = "2/2/2007"){ temp <- tempfile() download.file(fileUrl,temp) data <- read.csv(unz(temp, dataFile),sep=";",na.strings = "?") data <- data[data$Date %in% c(begin, end),] unlink(temp) data } png("plot1.png", bg=NA) plot1() dev.off()
/plot1.R
no_license
bolilla/ExData_Plotting1
R
false
false
770
r
#Creates the FIRST plot of the assignment plot1 <- function(){ if(is.null(myData)){ myData <- getData() } hist(myData$Global_active_power, col="red", main ="Global Active Power", xlab="Global Active Power (kilowats)") } #Gets a zip file from the internet, unzips it, loads one of the inner files and filters it given two dates getData <- function(fileUrl = "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip", dataFile = "household_power_consumption.txt", begin = "1/2/2007", end = "2/2/2007"){ temp <- tempfile() download.file(fileUrl,temp) data <- read.csv(unz(temp, dataFile),sep=";",na.strings = "?") data <- data[data$Date %in% c(begin, end),] unlink(temp) data } png("plot1.png", bg=NA) plot1() dev.off()
## Ranking hospitals in all states ## Write a function called rankall that takes two arguments: ## an outcome name (outcome) and a hospital rank- ing (num). ## The function reads the outcome-of-care-measures.csv file and ## returns a 2-column data frame containing the hospital in each ## state that has the ranking specified in num. rankall <- function(outcome, num = "best") { ## Read outcome data data <- read.csv("outcome-of-care-measures.csv", colClasses = "character") ## Check that state and outcome are valid if(outcome == "heart attack"){ rate <- 11 }else{ if(outcome == "heart failure"){ rate <- 17 } else{ if(outcome == "pneumonia"){ rate <- 23 }else{ stop("invalid outcome") } } } ## For each state, find the hospital of the given rank stateNames <- unique(data$State) result <- data.frame() result.names <- c("hospital", "state") for(state in stateNames){ ## Return hospital name in that state with the given rank hospital <- data$Hospital.Name[data$State==state] deathrate <- as.double(data[rate][data$State==state,]) ord <-order(deathrate,hospital) orderHospital <- hospital[ord] orderDeath <- deathrate[ord] if(num=="best"){ temp <- orderHospital[1] }else{ if(num=="worst"){ orderHospital <- orderHospital[!is.na(orderDeath)] temp <- orderHospital[length(orderHospital)] }else{ if(num <= length(orderHospital)){ temp <- orderHospital[num] }else{ temp <- NA } } } row <- data.frame(temp, state) names(row) <- c("hospital", "state") result <- rbind(result, row) } result }
/2. R programming language/exercise4/rankall.R
no_license
xiangacadia/DataSciencCoursera
R
false
false
1,970
r
## Ranking hospitals in all states ## Write a function called rankall that takes two arguments: ## an outcome name (outcome) and a hospital rank- ing (num). ## The function reads the outcome-of-care-measures.csv file and ## returns a 2-column data frame containing the hospital in each ## state that has the ranking specified in num. rankall <- function(outcome, num = "best") { ## Read outcome data data <- read.csv("outcome-of-care-measures.csv", colClasses = "character") ## Check that state and outcome are valid if(outcome == "heart attack"){ rate <- 11 }else{ if(outcome == "heart failure"){ rate <- 17 } else{ if(outcome == "pneumonia"){ rate <- 23 }else{ stop("invalid outcome") } } } ## For each state, find the hospital of the given rank stateNames <- unique(data$State) result <- data.frame() result.names <- c("hospital", "state") for(state in stateNames){ ## Return hospital name in that state with the given rank hospital <- data$Hospital.Name[data$State==state] deathrate <- as.double(data[rate][data$State==state,]) ord <-order(deathrate,hospital) orderHospital <- hospital[ord] orderDeath <- deathrate[ord] if(num=="best"){ temp <- orderHospital[1] }else{ if(num=="worst"){ orderHospital <- orderHospital[!is.na(orderDeath)] temp <- orderHospital[length(orderHospital)] }else{ if(num <= length(orderHospital)){ temp <- orderHospital[num] }else{ temp <- NA } } } row <- data.frame(temp, state) names(row) <- c("hospital", "state") result <- rbind(result, row) } result }
## ui.R ## shiny::shinyUI( shinydashboard::dashboardPage( title = "shinyauth", shinydashboard::dashboardHeader( title = "shinyauth"), shinyauth::saSidebar( shiny::uiOutput("sidebar") ), shinyauth::saBody( shiny::uiOutput("body") ) ) )
/minimum_example/ui.R
no_license
8280567/shinyauth
R
false
false
284
r
## ui.R ## shiny::shinyUI( shinydashboard::dashboardPage( title = "shinyauth", shinydashboard::dashboardHeader( title = "shinyauth"), shinyauth::saSidebar( shiny::uiOutput("sidebar") ), shinyauth::saBody( shiny::uiOutput("body") ) ) )
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/AllClasses.R \docType{class} \name{fmrstunpar-class} \alias{fmrstunpar-class} \alias{fmrstunpar} \title{An S4 class to represent estimated optimal lambdas} \description{ An S4 class to represent estimated optimal lambdas resulted from running \code{\link{fmrs.tunsel}} } \section{Slots}{ \describe{ \item{\code{ncomp}}{A length-one numeric vector} \item{\code{lambPen}}{A dimension-one-\code{ncomp} numeric array} \item{\code{MCPGam}}{A length-one numeric vector} \item{\code{SICAGam}}{A length-one numeric vector} \item{\code{disFamily}}{A length-one character vector} \item{\code{penFamily}}{A length-one character vector} \item{\code{lambRidge}}{A length-one numeric vector} \item{\code{model}}{A length-one character vector} \item{\code{activeset}}{A dimension-\code{nobs}-\code{ncomp} 0-1 matrix} }} \keyword{object}
/man/fmrstunpar-class.Rd
no_license
zample/fmrs
R
false
true
913
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/AllClasses.R \docType{class} \name{fmrstunpar-class} \alias{fmrstunpar-class} \alias{fmrstunpar} \title{An S4 class to represent estimated optimal lambdas} \description{ An S4 class to represent estimated optimal lambdas resulted from running \code{\link{fmrs.tunsel}} } \section{Slots}{ \describe{ \item{\code{ncomp}}{A length-one numeric vector} \item{\code{lambPen}}{A dimension-one-\code{ncomp} numeric array} \item{\code{MCPGam}}{A length-one numeric vector} \item{\code{SICAGam}}{A length-one numeric vector} \item{\code{disFamily}}{A length-one character vector} \item{\code{penFamily}}{A length-one character vector} \item{\code{lambRidge}}{A length-one numeric vector} \item{\code{model}}{A length-one character vector} \item{\code{activeset}}{A dimension-\code{nobs}-\code{ncomp} 0-1 matrix} }} \keyword{object}
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/XMLDocument.R \docType{data} \name{XMLDocument} \alias{XMLDocument} \alias{xml_doc} \title{XMLDocument Class} \format{An object of class \code{R6ClassGenerator} of length 24.} \usage{ XMLDocument } \description{ This is a slightly specialized subclass of \code{XMLElement}. } \details{ XML documents *must* have a root element } \keyword{datasets}
/man/XMLDocument.Rd
permissive
manigaru/XML
R
false
true
426
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/XMLDocument.R \docType{data} \name{XMLDocument} \alias{XMLDocument} \alias{xml_doc} \title{XMLDocument Class} \format{An object of class \code{R6ClassGenerator} of length 24.} \usage{ XMLDocument } \description{ This is a slightly specialized subclass of \code{XMLElement}. } \details{ XML documents *must* have a root element } \keyword{datasets}
#' interscal.delta #' @keywords internal interscal.delta <- function(sym.data) { res <- interval.dist(sym.data, distance = "interscal") CMin <- res$min.matrix CMax <- res$max.matrix Fil <- sym.data$N D2M <- matrix(0, 2 * Fil, 2 * Fil) for (i in 1:Fil) { for (j in i:Fil) { if (i == j) { if (j != Fil) { D2M[2 * i - 1, 2 * j - 1] <- 0 D2M[2 * i, 2 * j] <- 0 D2M[2 * i - 1, 2 * j] <- CMax[i, j] D2M[2 * j, 2 * i - 1] <- D2M[2 * i - 1, 2 * j] } else { D2M[2 * i - 1, 2 * j - 1] <- 0 D2M[2 * i, 2 * j] <- 0 D2M[2 * i - 1, 2 * j] <- CMax[i, i] D2M[2 * j, 2 * i - 1] <- D2M[2 * i - 1, 2 * j] } } else { D2M[2 * i - 1, 2 * j - 1] <- CMin[i, j] D2M[2 * i, 2 * j] <- CMax[i, j] D2M[2 * i - 1, 2 * j] <- (CMax[i, j] + CMin[i, j])/2 D2M[2 * i, 2 * j - 1] <- (CMax[i, j] + CMin[i, j])/2 D2M[2 * j - 1, 2 * i - 1] <- D2M[2 * i - 1, 2 * j - 1] D2M[2 * j, 2 * i] <- D2M[2 * i, 2 * j] D2M[2 * j, 2 * i - 1] <- D2M[2 * i - 1, 2 * j] D2M[2 * j - 1, 2 * i] <- D2M[2 * i, 2 * j - 1] } } } return(D2M) }
/R/interscal.delta.R
no_license
rcannood/RSDA
R
false
false
1,387
r
#' interscal.delta #' @keywords internal interscal.delta <- function(sym.data) { res <- interval.dist(sym.data, distance = "interscal") CMin <- res$min.matrix CMax <- res$max.matrix Fil <- sym.data$N D2M <- matrix(0, 2 * Fil, 2 * Fil) for (i in 1:Fil) { for (j in i:Fil) { if (i == j) { if (j != Fil) { D2M[2 * i - 1, 2 * j - 1] <- 0 D2M[2 * i, 2 * j] <- 0 D2M[2 * i - 1, 2 * j] <- CMax[i, j] D2M[2 * j, 2 * i - 1] <- D2M[2 * i - 1, 2 * j] } else { D2M[2 * i - 1, 2 * j - 1] <- 0 D2M[2 * i, 2 * j] <- 0 D2M[2 * i - 1, 2 * j] <- CMax[i, i] D2M[2 * j, 2 * i - 1] <- D2M[2 * i - 1, 2 * j] } } else { D2M[2 * i - 1, 2 * j - 1] <- CMin[i, j] D2M[2 * i, 2 * j] <- CMax[i, j] D2M[2 * i - 1, 2 * j] <- (CMax[i, j] + CMin[i, j])/2 D2M[2 * i, 2 * j - 1] <- (CMax[i, j] + CMin[i, j])/2 D2M[2 * j - 1, 2 * i - 1] <- D2M[2 * i - 1, 2 * j - 1] D2M[2 * j, 2 * i] <- D2M[2 * i, 2 * j] D2M[2 * j, 2 * i - 1] <- D2M[2 * i - 1, 2 * j] D2M[2 * j - 1, 2 * i] <- D2M[2 * i, 2 * j - 1] } } } return(D2M) }
# MonteCarlo Pi rm(list=ls()) set.seed(5757) NTrials <- c(10,10^2,10^3,10^4,10^5,10^6) Ncases <- length(NTrials) PiEst <- numeric(Ncases) for( i in 1:Ncases) { Rcount <- 0 for (j in 1:NTrials[i]) { x <- runif(1) y <- runif(1) if( x^2 + y^2 <= 1) { Rcount <- Rcount + 1 } } PiEst[i] <- 4*Rcount/NTrials[i] } sink("MCPi_R_novector.txt") print(cbind(NTrials,PiEst)) sink() pdf("MCPi_R_novector.pdf") plot(NTrials,PiEst, main="Monte Carlo Estimation of Pi (R, Basic)", xlab="Number of Trials", ylab="Estimation of Pi") dev.off()
/Module2/MCPi_novector.R
no_license
DBurkeGGC/ursusProjects
R
false
false
595
r
# MonteCarlo Pi rm(list=ls()) set.seed(5757) NTrials <- c(10,10^2,10^3,10^4,10^5,10^6) Ncases <- length(NTrials) PiEst <- numeric(Ncases) for( i in 1:Ncases) { Rcount <- 0 for (j in 1:NTrials[i]) { x <- runif(1) y <- runif(1) if( x^2 + y^2 <= 1) { Rcount <- Rcount + 1 } } PiEst[i] <- 4*Rcount/NTrials[i] } sink("MCPi_R_novector.txt") print(cbind(NTrials,PiEst)) sink() pdf("MCPi_R_novector.pdf") plot(NTrials,PiEst, main="Monte Carlo Estimation of Pi (R, Basic)", xlab="Number of Trials", ylab="Estimation of Pi") dev.off()
library(rpart); setorig = rbind(set1,set2,set3,set4,set5); setover = rbind(set1234over,set1235over,set1245over,set1345over,set2345over); setunder = rbind(set1234under,set1235under,set1245under,set1345under,set2345under); setSMOTE = rbind(set1234SMOTE,set1235SMOTE,set1245SMOTE,set1345SMOTE,set2345SMOTE); CARTIMP = function(train,minsplit) { cp = 0.0001; control = rpart.control(minsplit = minsplit, cp = cp, maxsurrogate = 0, usesurrogate = 0, xval = 0); CART = rpart(Severity~., data = train, control = control); Imp = CART$variable.importance; return(Imp); } OrigImp = as.data.frame(CARTIMP(setorig,55)); OverImp = as.data.frame(CARTIMP(setover,200)); UnderImp = as.data.frame(CARTIMP(setunder,100)); SMOTEImp = as.data.frame(CARTIMP(setSMOTE,250));
/CART/CART_VarImp_Over.R
no_license
aammeenn/Traffic-Accident-Severity
R
false
false
785
r
library(rpart); setorig = rbind(set1,set2,set3,set4,set5); setover = rbind(set1234over,set1235over,set1245over,set1345over,set2345over); setunder = rbind(set1234under,set1235under,set1245under,set1345under,set2345under); setSMOTE = rbind(set1234SMOTE,set1235SMOTE,set1245SMOTE,set1345SMOTE,set2345SMOTE); CARTIMP = function(train,minsplit) { cp = 0.0001; control = rpart.control(minsplit = minsplit, cp = cp, maxsurrogate = 0, usesurrogate = 0, xval = 0); CART = rpart(Severity~., data = train, control = control); Imp = CART$variable.importance; return(Imp); } OrigImp = as.data.frame(CARTIMP(setorig,55)); OverImp = as.data.frame(CARTIMP(setover,200)); UnderImp = as.data.frame(CARTIMP(setunder,100)); SMOTEImp = as.data.frame(CARTIMP(setSMOTE,250));