content
large_stringlengths
0
6.46M
path
large_stringlengths
3
331
license_type
large_stringclasses
2 values
repo_name
large_stringlengths
5
125
language
large_stringclasses
1 value
is_vendor
bool
2 classes
is_generated
bool
2 classes
length_bytes
int64
4
6.46M
extension
large_stringclasses
75 values
text
stringlengths
0
6.46M
library(chemspiderapi) context("check_dataSources") test_that("check_dataSources() fails if more than 20 data sources are provided.", { expect_error( .check_dataSources(dataSources = c("a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v")) ) }) test_that("check_dataSources() is silent if a single data source is provided.", { expect_silent( .check_dataSources(dataSources = "PubChem") ) }) test_that("check_dataSources() stays silent if two data sources are provided.", { expect_silent( .check_dataSources(dataSources = c("PubChem", "ChEBI")) ) })
/tests/testthat/test-check_dataSources.R
permissive
mattoslmp/chemspiderapi
R
false
false
635
r
library(chemspiderapi) context("check_dataSources") test_that("check_dataSources() fails if more than 20 data sources are provided.", { expect_error( .check_dataSources(dataSources = c("a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v")) ) }) test_that("check_dataSources() is silent if a single data source is provided.", { expect_silent( .check_dataSources(dataSources = "PubChem") ) }) test_that("check_dataSources() stays silent if two data sources are provided.", { expect_silent( .check_dataSources(dataSources = c("PubChem", "ChEBI")) ) })
% Generated by roxygen2 (4.1.0): do not edit by hand % Please edit documentation in R/pollen.equiv.R \docType{data} \name{pollen.equiv} \alias{pollen.equiv} \title{A table to convert the pollen taxa identified by investigators to standardized lists.} \format{a \code{data.frame} object} \usage{ translate.table } \description{ A list of standardized (published) taxonomies from the literature to help standardize taxonomies for synthesis work. } \details{ Taxon conversion table (readable). } \author{ Simon J. Goring \email{simon.j.goring@gmail.com}; Jeremiah Marsicek }
/man/pollen.equiv.Rd
no_license
parthasen/neotoma
R
false
false
573
rd
% Generated by roxygen2 (4.1.0): do not edit by hand % Please edit documentation in R/pollen.equiv.R \docType{data} \name{pollen.equiv} \alias{pollen.equiv} \title{A table to convert the pollen taxa identified by investigators to standardized lists.} \format{a \code{data.frame} object} \usage{ translate.table } \description{ A list of standardized (published) taxonomies from the literature to help standardize taxonomies for synthesis work. } \details{ Taxon conversion table (readable). } \author{ Simon J. Goring \email{simon.j.goring@gmail.com}; Jeremiah Marsicek }
CalculateTaxableIncome <- function(Data, Periods = 1){ Data[, ':=' (P_Income_Taxable = pmax(0, P_Income_PrivateTaxable + P_Benefits_All_Taxable), P_Income_NonTaxable = P_Income_PrivateNonTaxable + P_Benefits_All_NonTaxable)] Data[, P_Income_Total := P_Income_PrivateTaxable + P_Benefits_All_Taxable + P_Income_NonTaxable ] } attr(CalculateTaxableIncome, "output") <- c("P_Income_Taxable", "P_Income_NonTaxable", "P_Income_Total") attr(CalculateTaxableIncome, "input") <- c("P_Income_PrivateTaxable", "P_Benefits_All_Taxable", "P_Income_PrivateNonTaxable", "P_Benefits_All_NonTaxable")
/procedures/CalculateTaxableIncome.R
permissive
kevinxperese/TAWA_microsimulation_model
R
false
false
827
r
CalculateTaxableIncome <- function(Data, Periods = 1){ Data[, ':=' (P_Income_Taxable = pmax(0, P_Income_PrivateTaxable + P_Benefits_All_Taxable), P_Income_NonTaxable = P_Income_PrivateNonTaxable + P_Benefits_All_NonTaxable)] Data[, P_Income_Total := P_Income_PrivateTaxable + P_Benefits_All_Taxable + P_Income_NonTaxable ] } attr(CalculateTaxableIncome, "output") <- c("P_Income_Taxable", "P_Income_NonTaxable", "P_Income_Total") attr(CalculateTaxableIncome, "input") <- c("P_Income_PrivateTaxable", "P_Benefits_All_Taxable", "P_Income_PrivateNonTaxable", "P_Benefits_All_NonTaxable")
# apply.R # `apply` is used to evaluate a function (often an anonymours one) # over the margins of an array # - it is most often used to apply a funciton to the rows or columns of a matrix # - it can be used with general arrays, e.g., taking the average of an array of matrices # - it is not really faster than writing a loop, wut it works in one line! # > apply # function (X, MARGIN, FUN, ..., simplify = TRUE) # { # FUN <- match.fun(FUN) # simplify <- isTRUE(simplify) # dl <- length(dim(X)) # if (!dl) # stop("dim(X) must have a positive length") # if (is.object(X)) # X <- if (dl == 2L) # as.matrix(X) # else as.array(X) # d <- dim(X) # dn <- dimnames(X) # ds <- seq_len(dl) # if (is.character(MARGIN)) { # if (is.null(dnn <- names(dn))) # stop("'X' must have named dimnames") # MARGIN <- match(MARGIN, dnn) # if (anyNA(MARGIN)) # stop("not all elements of 'MARGIN' are names of dimensions") # } # d.call <- d[-MARGIN] # d.ans <- d[MARGIN] # if (anyNA(d.call) || anyNA(d.ans)) # stop("'MARGIN' does not match dim(X)") # s.call <- ds[-MARGIN] # s.ans <- ds[MARGIN] # dn.call <- dn[-MARGIN] # dn.ans <- dn[MARGIN] # d2 <- prod(d.ans) # if (d2 == 0L) { # newX <- array(vector(typeof(X), 1L), dim = c(prod(d.call), # 1L)) # ans <- forceAndCall(1, FUN, if (length(d.call) < 2L) newX[, # 1] else array(newX[, 1L], d.call, dn.call), ...) # return(if (is.null(ans)) ans else if (length(d.ans) < # 2L) ans[1L][-1L] else array(ans, d.ans, dn.ans)) # } # newX <- aperm(X, c(s.call, s.ans)) # dim(newX) <- c(prod(d.call), d2) # ans <- vector("list", d2) # if (length(d.call) < 2L) { # if (length(dn.call)) # dimnames(newX) <- c(dn.call, list(NULL)) # for (i in 1L:d2) { # tmp <- forceAndCall(1, FUN, newX[, i], ...) # if (!is.null(tmp)) # ans[[i]] <- tmp # } # } # else for (i in 1L:d2) { # tmp <- forceAndCall(1, FUN, array(newX[, i], d.call, # dn.call), ...) # if (!is.null(tmp)) # ans[[i]] <- tmp # } # ans.list <- !simplify || is.recursive(ans[[1L]]) # l.ans <- length(ans[[1L]]) # ans.names <- names(ans[[1L]]) # if (!ans.list) # ans.list <- any(lengths(ans) != l.ans) # if (!ans.list && length(ans.names)) { # all.same <- vapply(ans, function(x) identical(names(x), # ans.names), NA) # if (!all(all.same)) # ans.names <- NULL # } # len.a <- if (ans.list) # d2 # else length(ans <- unlist(ans, recursive = FALSE)) # if (length(MARGIN) == 1L && len.a == d2) { # names(ans) <- if (length(dn.ans[[1L]])) # dn.ans[[1L]] # ans # } # else if (len.a == d2) # array(ans, d.ans, dn.ans) # else if (len.a && len.a%%d2 == 0L) { # if (is.null(dn.ans)) # dn.ans <- vector(mode = "list", length(d.ans)) # dn1 <- list(ans.names) # if (length(dn.call) && !is.null(n1 <- names(dn <- dn.call[1])) && # nzchar(n1) && length(ans.names) == length(dn[[1]])) # names(dn1) <- n1 # dn.ans <- c(dn1, dn.ans) # array(ans, c(len.a%/%d2, d.ans), if (!is.null(names(dn.ans)) || # !all(vapply(dn.ans, is.null, NA))) # dn.ans) # } # else ans # } x <- matrix(rnorm(200), 20, 10) x apply(x, 2, mean) # applying function mean to dimension 2: cols apply(x, 1, sum) # applying function sum to dimension 1: rows # for sums and means of matrix dimensions, we have optimized shortcuts rowSums(x) # same as apply(x, 1, sum) rowMeans(x) # same as apply(x, 1, mean) colSums(x) # same as apply(x, 2, sum) colMeans(x) # same as apply(x, 2, mean) # quantiles of the rows of a matrix x <- matrix(rnorm(200), 20, 10) apply(x, 1, quantile, probs=c(0.25, 0.75)) # average matrix in an array a <- array(rnorm(2 * 2 * 10), c(2, 2, 10)) apply(a, c(1, 2), mean) rowMeans(a, dims=2)
/src/loopfunctions/apply.R
no_license
positronn/coursera-datascience-lab
R
false
false
4,438
r
# apply.R # `apply` is used to evaluate a function (often an anonymours one) # over the margins of an array # - it is most often used to apply a funciton to the rows or columns of a matrix # - it can be used with general arrays, e.g., taking the average of an array of matrices # - it is not really faster than writing a loop, wut it works in one line! # > apply # function (X, MARGIN, FUN, ..., simplify = TRUE) # { # FUN <- match.fun(FUN) # simplify <- isTRUE(simplify) # dl <- length(dim(X)) # if (!dl) # stop("dim(X) must have a positive length") # if (is.object(X)) # X <- if (dl == 2L) # as.matrix(X) # else as.array(X) # d <- dim(X) # dn <- dimnames(X) # ds <- seq_len(dl) # if (is.character(MARGIN)) { # if (is.null(dnn <- names(dn))) # stop("'X' must have named dimnames") # MARGIN <- match(MARGIN, dnn) # if (anyNA(MARGIN)) # stop("not all elements of 'MARGIN' are names of dimensions") # } # d.call <- d[-MARGIN] # d.ans <- d[MARGIN] # if (anyNA(d.call) || anyNA(d.ans)) # stop("'MARGIN' does not match dim(X)") # s.call <- ds[-MARGIN] # s.ans <- ds[MARGIN] # dn.call <- dn[-MARGIN] # dn.ans <- dn[MARGIN] # d2 <- prod(d.ans) # if (d2 == 0L) { # newX <- array(vector(typeof(X), 1L), dim = c(prod(d.call), # 1L)) # ans <- forceAndCall(1, FUN, if (length(d.call) < 2L) newX[, # 1] else array(newX[, 1L], d.call, dn.call), ...) # return(if (is.null(ans)) ans else if (length(d.ans) < # 2L) ans[1L][-1L] else array(ans, d.ans, dn.ans)) # } # newX <- aperm(X, c(s.call, s.ans)) # dim(newX) <- c(prod(d.call), d2) # ans <- vector("list", d2) # if (length(d.call) < 2L) { # if (length(dn.call)) # dimnames(newX) <- c(dn.call, list(NULL)) # for (i in 1L:d2) { # tmp <- forceAndCall(1, FUN, newX[, i], ...) # if (!is.null(tmp)) # ans[[i]] <- tmp # } # } # else for (i in 1L:d2) { # tmp <- forceAndCall(1, FUN, array(newX[, i], d.call, # dn.call), ...) # if (!is.null(tmp)) # ans[[i]] <- tmp # } # ans.list <- !simplify || is.recursive(ans[[1L]]) # l.ans <- length(ans[[1L]]) # ans.names <- names(ans[[1L]]) # if (!ans.list) # ans.list <- any(lengths(ans) != l.ans) # if (!ans.list && length(ans.names)) { # all.same <- vapply(ans, function(x) identical(names(x), # ans.names), NA) # if (!all(all.same)) # ans.names <- NULL # } # len.a <- if (ans.list) # d2 # else length(ans <- unlist(ans, recursive = FALSE)) # if (length(MARGIN) == 1L && len.a == d2) { # names(ans) <- if (length(dn.ans[[1L]])) # dn.ans[[1L]] # ans # } # else if (len.a == d2) # array(ans, d.ans, dn.ans) # else if (len.a && len.a%%d2 == 0L) { # if (is.null(dn.ans)) # dn.ans <- vector(mode = "list", length(d.ans)) # dn1 <- list(ans.names) # if (length(dn.call) && !is.null(n1 <- names(dn <- dn.call[1])) && # nzchar(n1) && length(ans.names) == length(dn[[1]])) # names(dn1) <- n1 # dn.ans <- c(dn1, dn.ans) # array(ans, c(len.a%/%d2, d.ans), if (!is.null(names(dn.ans)) || # !all(vapply(dn.ans, is.null, NA))) # dn.ans) # } # else ans # } x <- matrix(rnorm(200), 20, 10) x apply(x, 2, mean) # applying function mean to dimension 2: cols apply(x, 1, sum) # applying function sum to dimension 1: rows # for sums and means of matrix dimensions, we have optimized shortcuts rowSums(x) # same as apply(x, 1, sum) rowMeans(x) # same as apply(x, 1, mean) colSums(x) # same as apply(x, 2, sum) colMeans(x) # same as apply(x, 2, mean) # quantiles of the rows of a matrix x <- matrix(rnorm(200), 20, 10) apply(x, 1, quantile, probs=c(0.25, 0.75)) # average matrix in an array a <- array(rnorm(2 * 2 * 10), c(2, 2, 10)) apply(a, c(1, 2), mean) rowMeans(a, dims=2)
library(symbolicDA) ### Name: interscal.SDA ### Title: Multidimensional scaling for symbolic interval data - InterScal ### algorithm ### Aliases: interscal.SDA ### Keywords: MDS Multidimensional scaling ### ** Examples # LONG RUNNING - UNCOMMENT TO RUN #sda<-parse.SO("samochody") #data<-sda$indivIC #mds<-interscal.SDA(data, d=2, calculateDist=TRUE)
/data/genthat_extracted_code/symbolicDA/examples/interscal.sda.rd.R
no_license
surayaaramli/typeRrh
R
false
false
360
r
library(symbolicDA) ### Name: interscal.SDA ### Title: Multidimensional scaling for symbolic interval data - InterScal ### algorithm ### Aliases: interscal.SDA ### Keywords: MDS Multidimensional scaling ### ** Examples # LONG RUNNING - UNCOMMENT TO RUN #sda<-parse.SO("samochody") #data<-sda$indivIC #mds<-interscal.SDA(data, d=2, calculateDist=TRUE)
testlist <- list(A = structure(c(2.31584292726466e+77, 9.53818252170339e+295, 1.22810536108214e+146, 4.12396251261199e-221, 0, 0, 0), .Dim = c(1L, 7L)), B = structure(0, .Dim = c(1L, 1L))) result <- do.call(multivariance:::match_rows,testlist) str(result)
/multivariance/inst/testfiles/match_rows/AFL_match_rows/match_rows_valgrind_files/1613109665-test.R
no_license
akhikolla/updatedatatype-list3
R
false
false
257
r
testlist <- list(A = structure(c(2.31584292726466e+77, 9.53818252170339e+295, 1.22810536108214e+146, 4.12396251261199e-221, 0, 0, 0), .Dim = c(1L, 7L)), B = structure(0, .Dim = c(1L, 1L))) result <- do.call(multivariance:::match_rows,testlist) str(result)
#' @importFrom utils object.size format_size <- function(size) format(size, standard = "auto", unit = "auto", digits = 2L)
/R/format_size.R
no_license
alastairrushworth/inspectdf
R
false
false
122
r
#' @importFrom utils object.size format_size <- function(size) format(size, standard = "auto", unit = "auto", digits = 2L)
library(readxl) load.spec.defaults = function() { spec.param = list( mort = data.matrix(read_excel('Data/nathist-params-defaults.xlsx', range="B4:I10", sheet="Estimates", col_names=sprintf("X%d", 1:8))), dist = data.matrix(read_excel('Data/nathist-params-defaults.xlsx', range="B25:I31", sheet="Estimates", col_names=sprintf("X%d", 1:8))), prog = data.matrix(read_excel('Data/nathist-params-defaults.xlsx', range="B15:I20", sheet="Estimates", col_names=sprintf("X%d", 1:8)))) spec.param = lapply(spec.param, function(par) { stages = cd4.names[1:nrow(par)] arr = array(dim=c(nrow(par), 4, 2), dimnames=list(stages, age.names, sex.names)) arr[,,1] = par[,1:4] arr[,,2] = par[,5:8] return(arr) }) spec.param$dist = 0.01 * spec.param$dist # Convert from probabilities to proportions spec.param$art = data.matrix(read_excel('Data/nathist-params-defaults.xlsx', range="B34:B34", sheet="Estimates", col_names="X1")) return(spec.param) }
/Utils/load-spec-defaults.R
permissive
rlglaubius/NaturalHistorySynthesis
R
false
false
985
r
library(readxl) load.spec.defaults = function() { spec.param = list( mort = data.matrix(read_excel('Data/nathist-params-defaults.xlsx', range="B4:I10", sheet="Estimates", col_names=sprintf("X%d", 1:8))), dist = data.matrix(read_excel('Data/nathist-params-defaults.xlsx', range="B25:I31", sheet="Estimates", col_names=sprintf("X%d", 1:8))), prog = data.matrix(read_excel('Data/nathist-params-defaults.xlsx', range="B15:I20", sheet="Estimates", col_names=sprintf("X%d", 1:8)))) spec.param = lapply(spec.param, function(par) { stages = cd4.names[1:nrow(par)] arr = array(dim=c(nrow(par), 4, 2), dimnames=list(stages, age.names, sex.names)) arr[,,1] = par[,1:4] arr[,,2] = par[,5:8] return(arr) }) spec.param$dist = 0.01 * spec.param$dist # Convert from probabilities to proportions spec.param$art = data.matrix(read_excel('Data/nathist-params-defaults.xlsx', range="B34:B34", sheet="Estimates", col_names="X1")) return(spec.param) }
######################## #load packages library(Seurat, lib.loc = "/software/R-3.6.1-el7-x86_64/lib64/R/library") # Seurat 3 version library(magrittr) library(cowplot) rm(list=ls()) ######################## #settings batch_effects <- "small" projectName = "seurat3" filter_genes = F filter_cells = F normData = F Datascaling = T min_cells = 10 #5 min_genes = 300 norm_method = "LogNormalize" scale_factor = 10000 visualize = T outfile_prefix = "broad_pbmc1_10x_v2_A_simulation" save_obj = F nhvg = 5000 numVG = 300 regressUMI = F selection_method = "vst" src_dir = "/home/qizhan/scratch-midway2/others/dmatch_review/things_done_by_me/july17th-july23rd2020/broad_PBMC_10x_simulate_batch_effects/seurat3/" working_dir = paste0("/project2/mengjiechen/qizhan/qizhan/dmatch/dmatch_response_to_review/others/broad_PBMC_10x_batch_effects_simulation_and_correction/corrected_data/pbmc1_10x_v2_A/", batch_effects, "/") read_dir = paste0("/project2/mengjiechen/qizhan/qizhan/dmatch/dmatch_response_to_review/others/broad_PBMC_10x_batch_effects_simulation_and_correction/simulated_data/pbmc1_10x_v2_A/", batch_effects, "/") ratio_names <- c("1_6", "1_3", "1_2") for (i in 1:10) { for (j in 1:3) { ratio_name <- ratio_names[j] file <- paste0("pbmc1_10x_v2_A_", ratio_name, "_rep", i) print(file) load(paste0(read_dir, file)) ######################## # load data sample1 <- sim_list[[1]] sample2 <- sim_list[[2]] meta1 <- sim_list[[3]] meta2 <- sim_list[[4]] expr_mat <- cbind(sample1, sample2) rownames(expr_mat) <- toupper(rownames(expr_mat)) batch <- c(rep(1, ncol(sample1)), rep(2, ncol(sample2))) celltype <- c(meta1, meta2) metadata <- data.frame("batch"=batch, "celltype"=celltype) rownames(metadata) <- colnames(expr_mat) ######################## # settings for running seurat3 umapplot_filename = "_seurat3_umap" dmatch_out_filename = "_seurat3_out" metadata_out_filename = "_seurat3_metadata_out" pca_filename = "_seurat3_pca" batch_label = "batch" celltype_label = "celltype" npcs = 30 saveout_dir = paste0("/project2/mengjiechen/qizhan/qizhan/dmatch/dmatch_response_to_review/others/broad_PBMC_10x_batch_effects_simulation_and_correction/corrected_data/pbmc1_10x_v2_A/seurat3/", batch_effects, "/results/") dmatch_out_filename = "_samples" dmatch_out_pcs_filename = "_samples_pcs" plotout_dir <- paste0("/project2/mengjiechen/qizhan/qizhan/dmatch/dmatch_response_to_review/others/broad_PBMC_10x_batch_effects_simulation_and_correction/corrected_data/pbmc1_10x_v2_A/seurat3/", batch_effects, "/plots/") umapplot_filename <- "_umap_plot" heatmap_filename <- "_heatmap" pcaplot_filename <- "_pcaplot" outfilename_prefix <- file visualize = T save_obj = F source(paste0(src_dir,'call_seurat_3.R')) batch_list = seurat3_preprocess( expr_mat, metadata, normData = normData, Datascaling = Datascaling, regressUMI = regressUMI, min_cells = min_cells, min_genes = min_genes, norm_method = norm_method, scale_factor = scale_factor, numVG = numVG, nhvg = nhvg, batch_label = batch_label, celltype_label = celltype_label) batches = call_seurat3(batch_list, batch_label, celltype_label, npcs, plotout_dir = plotout_dir, saveout_dir = saveout_dir, outfilename_prefix = outfilename_prefix, visualize = visualize, save_obj = save_obj) } }
/runDifferentMethods/seurat3/run_seurat3_small.R
no_license
qzhan321/Dmatch-data-code-rep
R
false
false
3,521
r
######################## #load packages library(Seurat, lib.loc = "/software/R-3.6.1-el7-x86_64/lib64/R/library") # Seurat 3 version library(magrittr) library(cowplot) rm(list=ls()) ######################## #settings batch_effects <- "small" projectName = "seurat3" filter_genes = F filter_cells = F normData = F Datascaling = T min_cells = 10 #5 min_genes = 300 norm_method = "LogNormalize" scale_factor = 10000 visualize = T outfile_prefix = "broad_pbmc1_10x_v2_A_simulation" save_obj = F nhvg = 5000 numVG = 300 regressUMI = F selection_method = "vst" src_dir = "/home/qizhan/scratch-midway2/others/dmatch_review/things_done_by_me/july17th-july23rd2020/broad_PBMC_10x_simulate_batch_effects/seurat3/" working_dir = paste0("/project2/mengjiechen/qizhan/qizhan/dmatch/dmatch_response_to_review/others/broad_PBMC_10x_batch_effects_simulation_and_correction/corrected_data/pbmc1_10x_v2_A/", batch_effects, "/") read_dir = paste0("/project2/mengjiechen/qizhan/qizhan/dmatch/dmatch_response_to_review/others/broad_PBMC_10x_batch_effects_simulation_and_correction/simulated_data/pbmc1_10x_v2_A/", batch_effects, "/") ratio_names <- c("1_6", "1_3", "1_2") for (i in 1:10) { for (j in 1:3) { ratio_name <- ratio_names[j] file <- paste0("pbmc1_10x_v2_A_", ratio_name, "_rep", i) print(file) load(paste0(read_dir, file)) ######################## # load data sample1 <- sim_list[[1]] sample2 <- sim_list[[2]] meta1 <- sim_list[[3]] meta2 <- sim_list[[4]] expr_mat <- cbind(sample1, sample2) rownames(expr_mat) <- toupper(rownames(expr_mat)) batch <- c(rep(1, ncol(sample1)), rep(2, ncol(sample2))) celltype <- c(meta1, meta2) metadata <- data.frame("batch"=batch, "celltype"=celltype) rownames(metadata) <- colnames(expr_mat) ######################## # settings for running seurat3 umapplot_filename = "_seurat3_umap" dmatch_out_filename = "_seurat3_out" metadata_out_filename = "_seurat3_metadata_out" pca_filename = "_seurat3_pca" batch_label = "batch" celltype_label = "celltype" npcs = 30 saveout_dir = paste0("/project2/mengjiechen/qizhan/qizhan/dmatch/dmatch_response_to_review/others/broad_PBMC_10x_batch_effects_simulation_and_correction/corrected_data/pbmc1_10x_v2_A/seurat3/", batch_effects, "/results/") dmatch_out_filename = "_samples" dmatch_out_pcs_filename = "_samples_pcs" plotout_dir <- paste0("/project2/mengjiechen/qizhan/qizhan/dmatch/dmatch_response_to_review/others/broad_PBMC_10x_batch_effects_simulation_and_correction/corrected_data/pbmc1_10x_v2_A/seurat3/", batch_effects, "/plots/") umapplot_filename <- "_umap_plot" heatmap_filename <- "_heatmap" pcaplot_filename <- "_pcaplot" outfilename_prefix <- file visualize = T save_obj = F source(paste0(src_dir,'call_seurat_3.R')) batch_list = seurat3_preprocess( expr_mat, metadata, normData = normData, Datascaling = Datascaling, regressUMI = regressUMI, min_cells = min_cells, min_genes = min_genes, norm_method = norm_method, scale_factor = scale_factor, numVG = numVG, nhvg = nhvg, batch_label = batch_label, celltype_label = celltype_label) batches = call_seurat3(batch_list, batch_label, celltype_label, npcs, plotout_dir = plotout_dir, saveout_dir = saveout_dir, outfilename_prefix = outfilename_prefix, visualize = visualize, save_obj = save_obj) } }
## plot 1 # Read the data from the text file table <- read.table("household_power_consumption.txt", header = TRUE, sep = ";", na.string = "?", numerals = c("no.loss")) # Subset the required data subtable <- subset(table, (table$Date == "1/2/2007") | (table$Date == "2/2/2007")) # Create a new DateTime column by merging date & time subtable$DateTime <- paste(subtable$Date, subtable$Time) subtable$DateTime <- strptime(subtable$DateTime, "%d/%m/%Y %H:%M:%S") # Store in plot1.png png("figure/plot1.png", width = 480, height = 480) par(mfrow = c(1,1)) par(mar = c(5,4,2,2)) hist(subtable$Global_active_power, col = "red", main = "Global Active Power", xlab = "Global Active Power (kilowatts)" , ylab = "Frequency") dev.off()
/plot1.R
no_license
mchinnas/ExData_Plotting1
R
false
false
735
r
## plot 1 # Read the data from the text file table <- read.table("household_power_consumption.txt", header = TRUE, sep = ";", na.string = "?", numerals = c("no.loss")) # Subset the required data subtable <- subset(table, (table$Date == "1/2/2007") | (table$Date == "2/2/2007")) # Create a new DateTime column by merging date & time subtable$DateTime <- paste(subtable$Date, subtable$Time) subtable$DateTime <- strptime(subtable$DateTime, "%d/%m/%Y %H:%M:%S") # Store in plot1.png png("figure/plot1.png", width = 480, height = 480) par(mfrow = c(1,1)) par(mar = c(5,4,2,2)) hist(subtable$Global_active_power, col = "red", main = "Global Active Power", xlab = "Global Active Power (kilowatts)" , ylab = "Frequency") dev.off()
# plot4.R # set folder of project setwd("C:/EDA_PROJECT1") # if file not exist, download and unzip in subfolder "/data" if(!file.exists("data/household_power_consumption.txt")) { url <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip" download.file(url, destfile="data.zip") unzip(zipfile="data.zip", exdir="data") } # load file to workspace data1 <- read.table("C:/EDA_PROJECT1/data/household_power_consumption.txt", header=TRUE, sep=";", dec=".", stringsAsFactors=FALSE) # filter days 1 and 2 february 2007 data2<- subset(data1, (data1$Date == "1/2/2007" | data1$Date== "2/2/2007")) # create variable DateTime data2 <- transform(data2, DateTime=as.POSIXct(paste(Date, Time)), "%d/%m/%Y %H:%M:%S") # convert character to numeric data2$Sub_metering_1 <- as.numeric(as.character(data2$Sub_metering_1)) data2$Sub_metering_2 <- as.numeric(as.character(data2$Sub_metering_2)) data2$Sub_metering_3 <- as.numeric(as.character(data2$Sub_metering_3)) attach(data2) # use the variables of data2 # 4 graphics in 2 x 2 par(mfrow = c(2, 2)) # generate graphic top-left plot(DateTime, Global_active_power, type = "l", xlab = "", ylab = "Global Active Power") # generate graphic top-right plot(DateTime, Voltage, type = "l", xlab = "datetime", ylab = "Voltage") # generate graphic bottom-left plot(DateTime, Sub_metering_1, type="l", ylab= "Energy sub metering", xlab="") lines(DateTime, Sub_metering_2, type="l", col="red") lines(DateTime, Sub_metering_3, type="l", col="blue") legend("topright", c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lty=1, col=c("black", "red", "blue")) # generate graphic bottom-right plot(DateTime, Global_reactive_power, type = "l", col = "black", xlab = "datetime", ylab = colnames(data2)[4]) # generate output dev.copy(png, file="plot4.png", with=480, height=480) dev.off() detach(data2)
/plot4.R
no_license
CSoaresF/ExData_Plotting1
R
false
false
2,055
r
# plot4.R # set folder of project setwd("C:/EDA_PROJECT1") # if file not exist, download and unzip in subfolder "/data" if(!file.exists("data/household_power_consumption.txt")) { url <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip" download.file(url, destfile="data.zip") unzip(zipfile="data.zip", exdir="data") } # load file to workspace data1 <- read.table("C:/EDA_PROJECT1/data/household_power_consumption.txt", header=TRUE, sep=";", dec=".", stringsAsFactors=FALSE) # filter days 1 and 2 february 2007 data2<- subset(data1, (data1$Date == "1/2/2007" | data1$Date== "2/2/2007")) # create variable DateTime data2 <- transform(data2, DateTime=as.POSIXct(paste(Date, Time)), "%d/%m/%Y %H:%M:%S") # convert character to numeric data2$Sub_metering_1 <- as.numeric(as.character(data2$Sub_metering_1)) data2$Sub_metering_2 <- as.numeric(as.character(data2$Sub_metering_2)) data2$Sub_metering_3 <- as.numeric(as.character(data2$Sub_metering_3)) attach(data2) # use the variables of data2 # 4 graphics in 2 x 2 par(mfrow = c(2, 2)) # generate graphic top-left plot(DateTime, Global_active_power, type = "l", xlab = "", ylab = "Global Active Power") # generate graphic top-right plot(DateTime, Voltage, type = "l", xlab = "datetime", ylab = "Voltage") # generate graphic bottom-left plot(DateTime, Sub_metering_1, type="l", ylab= "Energy sub metering", xlab="") lines(DateTime, Sub_metering_2, type="l", col="red") lines(DateTime, Sub_metering_3, type="l", col="blue") legend("topright", c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lty=1, col=c("black", "red", "blue")) # generate graphic bottom-right plot(DateTime, Global_reactive_power, type = "l", col = "black", xlab = "datetime", ylab = colnames(data2)[4]) # generate output dev.copy(png, file="plot4.png", with=480, height=480) dev.off() detach(data2)
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/Regression_Description.R \docType{package} \name{Regression} \alias{Regression} \alias{Regression-package} \title{Regression: A package for computating a linear regression model.} \description{ The regression package includes two class of which one of then is for plotting. } \section{linreg function}{ calculates all the needed variables and saves then in an object called formulas, }
/man/Regression.Rd
no_license
akilahmd/Rdata
R
false
true
465
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/Regression_Description.R \docType{package} \name{Regression} \alias{Regression} \alias{Regression-package} \title{Regression: A package for computating a linear regression model.} \description{ The regression package includes two class of which one of then is for plotting. } \section{linreg function}{ calculates all the needed variables and saves then in an object called formulas, }
/rCharts/gr1/Granat_Merkel_lab5/Granat_Merkel_TWD_lab5.R
no_license
Siemashko/TechnikiWizualizacjiDanych2018
R
false
false
1,119
r
# Question 1 : Have total emissions from PM2.5 decreased in the United States from 1999 to 2008? # Using the base plotting system, make a plot showing the total PM2.5 emission from all sources for each of the years 1999, 2002, 2005, and 2008. # Needs plyr libraray library(plyr) #read datafiles NEI <- readRDS("summarySCC_PM25.rds") SCC <- readRDS("Source_Classification_Code.rds") ## Calculate total PM2.5 emissions by year TotalPM25ByYear <- tapply(NEI$Emissions, NEI$year, sum) ## Create line graph plot(TotalPM25ByYear, x = rownames(TotalPM25ByYear), type = "n", axes = FALSE, ylab = expression("Total PM"[2.5] * " Emission (in tons)"), xlab = "Year", main = expression("Total PM"[2.5] * " Emission (1999 - 2008)")) points(TotalPM25ByYear, x = rownames(TotalPM25ByYear), pch = 16, col = "black") lines(TotalPM25ByYear, x = rownames(TotalPM25ByYear), col = "blue") axis(2) axis(side = 1, at = seq(1999, 2008, by = 3)) box() # save graph in a png file dev.copy(png, file="plot1.png", height=480, width=480) dev.off()
/plot 1.R
no_license
KrupaR/Exploratory-data-analysis-project-2
R
false
false
1,045
r
# Question 1 : Have total emissions from PM2.5 decreased in the United States from 1999 to 2008? # Using the base plotting system, make a plot showing the total PM2.5 emission from all sources for each of the years 1999, 2002, 2005, and 2008. # Needs plyr libraray library(plyr) #read datafiles NEI <- readRDS("summarySCC_PM25.rds") SCC <- readRDS("Source_Classification_Code.rds") ## Calculate total PM2.5 emissions by year TotalPM25ByYear <- tapply(NEI$Emissions, NEI$year, sum) ## Create line graph plot(TotalPM25ByYear, x = rownames(TotalPM25ByYear), type = "n", axes = FALSE, ylab = expression("Total PM"[2.5] * " Emission (in tons)"), xlab = "Year", main = expression("Total PM"[2.5] * " Emission (1999 - 2008)")) points(TotalPM25ByYear, x = rownames(TotalPM25ByYear), pch = 16, col = "black") lines(TotalPM25ByYear, x = rownames(TotalPM25ByYear), col = "blue") axis(2) axis(side = 1, at = seq(1999, 2008, by = 3)) box() # save graph in a png file dev.copy(png, file="plot1.png", height=480, width=480) dev.off()
#' Plot American Football Field #' #' This function produces a plot of an American football field using ggplot2 #' objects. Originally, this function was created for the NFL Big Data Bowl 2021 #' on Kaggle (\url{https://www.kaggle.com/c/nfl-big-data-bowl-2021}). To keep #' with the conventions of the datasets provided, the plotted field spans from #' 0 - 120 in the x-direction and 0 - 53.3 in the y-direction. It is expected #' that users will add other ggplot2 objects (e.g., points representing players, #' text annotations) to produce complete visualizations. #' #' @param left_endzone_color Color of left end zone, specified in quotes #' @param right_endzone_color Color of right end zone, specified in quotes #' @param field_color Color of field (not including end zones), specified #' in quotes #' @param field_alpha Opacity of field color (not including end zones), #' specified as numeric between 0.0 and 1.0 #' @param top_buffer Empty space provided on top of plot, specified as numeric #' @param bottom_buffer Empty space provided at bottom of plot, specified as #' numeric #' @param left_buffer Empty space provided to left of plot, specified as numeric #' @param right_buffer Empty space provided to right of plot, specified as #' numeric #' @param five_yd_lines Boolean value indicating whether to include white #' vertical lines at each five yard increment #' @param ydline_labels Boolean value indicating whether to include yard line #' labels every ten yards #' @param ydline_label_size Size of text used for yard line labels, specified #' as numeric #' @param outer_hash Boolean value indicating whether to include hash marks #' outside of yard line labels (near sidelines) #' @param inner_hash Boolean value indicating whether to include hash marks #' near middle of field #' #' @return The output will be a plot of an American football field #' #' @examples #' ggfootball() #' ggfootball(left_endzone = "red", right_endzone = "blue", #' field_alpha = 0.7) #' ggfootball() + geom_point(data = #' data.frame(x = c(10, 20), y = c(20, 30)), #' aes(x = x, y = y)) #' #' @export ggfootball <- function(left_endzone_color = "gray90", right_endzone_color = "gray90", field_color = "green4", field_alpha = 0.85, top_buffer = 1, bottom_buffer = 1, left_buffer = 1, right_buffer = 1, five_yd_lines = TRUE, ydline_labels = TRUE, ydline_label_size = 4, outer_hash = TRUE, inner_hash = FALSE) { # Make middle of field green gplot <- ggplot2::ggplot() + ggplot2::geom_rect(data = NULL, ggplot2::aes(xmin = 10, xmax = 110, ymin = 0, ymax = 53.3), fill = field_color, color = "black", alpha = field_alpha) + # Add endzones ggplot2::geom_rect(data = NULL, ggplot2::aes(xmin = 0, xmax = 10, ymin = 0, ymax = 53.3), fill = left_endzone_color, color = "black") + ggplot2::geom_rect(data = NULL, ggplot2::aes(xmin = 110, xmax = 120, ymin = 0, ymax = 53.3), fill = right_endzone_color, color = "black") + # Format gridlines, tick marks, tick labels, and border of plot window ggplot2::theme_bw() + ggplot2::theme(panel.grid.minor = ggplot2::element_blank(), panel.grid.major = ggplot2::element_blank(), panel.border = ggplot2::element_blank(), axis.text = ggplot2::element_blank(), axis.ticks = ggplot2::element_blank(), axis.title = ggplot2::element_blank(), text = ggplot2::element_text(size = 16), #, legend.position = "none" # Optional hiding of legend ) + # Add x and y axis limits ggplot2::lims(x = c(0 - left_buffer, 120 + right_buffer), y = c(0 - bottom_buffer, 53.3 + top_buffer)) # Add vertical lines at each 5-yard increment if(five_yd_lines) { # Create data frame with necessary x and y coordinates five_yard_df <- data.frame(x = seq(from = 15, to = 105, by = 5)) # Add to existing plot gplot <- gplot + ggplot2::geom_segment(data = five_yard_df, mapping = ggplot2::aes(x = x, xend = x, y = -Inf, yend = 53.3), color = "white") } # Add yardline labels if(ydline_labels) { # Create data frame with labels and coordinates yard_labels_df <- data.frame(x = seq(from = 20, to = 100, by = 10), y = rep(x = 4, n = 9), digits = c(seq(from = 10, to = 50, by = 10), seq(from = 40, to = 10, by = -10))) # Add to existing plot gplot <- gplot + ggplot2::geom_text(data = yard_labels_df, mapping = ggplot2::aes(x = x, y = y, label = digits), color = "white", size = ydline_label_size) gplot <- gplot + ggplot2::geom_text(data = yard_labels_df, mapping = ggplot2::aes(x = x, y = 53.3 - y, label = digits), color = "white", angle = 180, size = ydline_label_size) } # Add outer hash marks to field if(outer_hash) { # Create data frame with hash mark x-coordinates hash_df <- data.frame(x = 11:109) # Add to existing plot gplot <- gplot + ggplot2::geom_segment(data = hash_df, mapping = ggplot2::aes(x = x, xend = x, y = 0.5, yend = 1.5), color = "white") + ggplot2::geom_segment(data = hash_df, mapping = ggplot2::aes(x = x, xend = x, y = 51.8, yend = 52.8), color = "white") } # Add inner hash marks to field if(inner_hash) { # Create data frame with hash mark x-coordinates hash_df <- data.frame(x = 11:109) # Add to existing plot gplot <- gplot + ggplot2::geom_segment(data = hash_df, mapping = ggplot2::aes(x = x, xend = x, y = 17.8, yend = 18.8), color = "white") + ggplot2::geom_segment(data = hash_df, mapping = ggplot2::aes(x = x, xend = x, y = 34.6, yend = 35.6), color = "white") } # Create final solid black outlines for the field gplot <- gplot + ggplot2::geom_rect(data = NULL, ggplot2::aes(xmin = 10, xmax = 110, ymin = 0, ymax = 53.3), fill = NA, color = "black") + ggplot2::geom_rect(data = NULL, ggplot2::aes(xmin = 0, xmax = 120, ymin = 0, ymax = 53.3), fill = NA, color = "black") # Return plot gplot }
/R/ggfootball.R
permissive
robkravec/ggfootball
R
false
false
7,281
r
#' Plot American Football Field #' #' This function produces a plot of an American football field using ggplot2 #' objects. Originally, this function was created for the NFL Big Data Bowl 2021 #' on Kaggle (\url{https://www.kaggle.com/c/nfl-big-data-bowl-2021}). To keep #' with the conventions of the datasets provided, the plotted field spans from #' 0 - 120 in the x-direction and 0 - 53.3 in the y-direction. It is expected #' that users will add other ggplot2 objects (e.g., points representing players, #' text annotations) to produce complete visualizations. #' #' @param left_endzone_color Color of left end zone, specified in quotes #' @param right_endzone_color Color of right end zone, specified in quotes #' @param field_color Color of field (not including end zones), specified #' in quotes #' @param field_alpha Opacity of field color (not including end zones), #' specified as numeric between 0.0 and 1.0 #' @param top_buffer Empty space provided on top of plot, specified as numeric #' @param bottom_buffer Empty space provided at bottom of plot, specified as #' numeric #' @param left_buffer Empty space provided to left of plot, specified as numeric #' @param right_buffer Empty space provided to right of plot, specified as #' numeric #' @param five_yd_lines Boolean value indicating whether to include white #' vertical lines at each five yard increment #' @param ydline_labels Boolean value indicating whether to include yard line #' labels every ten yards #' @param ydline_label_size Size of text used for yard line labels, specified #' as numeric #' @param outer_hash Boolean value indicating whether to include hash marks #' outside of yard line labels (near sidelines) #' @param inner_hash Boolean value indicating whether to include hash marks #' near middle of field #' #' @return The output will be a plot of an American football field #' #' @examples #' ggfootball() #' ggfootball(left_endzone = "red", right_endzone = "blue", #' field_alpha = 0.7) #' ggfootball() + geom_point(data = #' data.frame(x = c(10, 20), y = c(20, 30)), #' aes(x = x, y = y)) #' #' @export ggfootball <- function(left_endzone_color = "gray90", right_endzone_color = "gray90", field_color = "green4", field_alpha = 0.85, top_buffer = 1, bottom_buffer = 1, left_buffer = 1, right_buffer = 1, five_yd_lines = TRUE, ydline_labels = TRUE, ydline_label_size = 4, outer_hash = TRUE, inner_hash = FALSE) { # Make middle of field green gplot <- ggplot2::ggplot() + ggplot2::geom_rect(data = NULL, ggplot2::aes(xmin = 10, xmax = 110, ymin = 0, ymax = 53.3), fill = field_color, color = "black", alpha = field_alpha) + # Add endzones ggplot2::geom_rect(data = NULL, ggplot2::aes(xmin = 0, xmax = 10, ymin = 0, ymax = 53.3), fill = left_endzone_color, color = "black") + ggplot2::geom_rect(data = NULL, ggplot2::aes(xmin = 110, xmax = 120, ymin = 0, ymax = 53.3), fill = right_endzone_color, color = "black") + # Format gridlines, tick marks, tick labels, and border of plot window ggplot2::theme_bw() + ggplot2::theme(panel.grid.minor = ggplot2::element_blank(), panel.grid.major = ggplot2::element_blank(), panel.border = ggplot2::element_blank(), axis.text = ggplot2::element_blank(), axis.ticks = ggplot2::element_blank(), axis.title = ggplot2::element_blank(), text = ggplot2::element_text(size = 16), #, legend.position = "none" # Optional hiding of legend ) + # Add x and y axis limits ggplot2::lims(x = c(0 - left_buffer, 120 + right_buffer), y = c(0 - bottom_buffer, 53.3 + top_buffer)) # Add vertical lines at each 5-yard increment if(five_yd_lines) { # Create data frame with necessary x and y coordinates five_yard_df <- data.frame(x = seq(from = 15, to = 105, by = 5)) # Add to existing plot gplot <- gplot + ggplot2::geom_segment(data = five_yard_df, mapping = ggplot2::aes(x = x, xend = x, y = -Inf, yend = 53.3), color = "white") } # Add yardline labels if(ydline_labels) { # Create data frame with labels and coordinates yard_labels_df <- data.frame(x = seq(from = 20, to = 100, by = 10), y = rep(x = 4, n = 9), digits = c(seq(from = 10, to = 50, by = 10), seq(from = 40, to = 10, by = -10))) # Add to existing plot gplot <- gplot + ggplot2::geom_text(data = yard_labels_df, mapping = ggplot2::aes(x = x, y = y, label = digits), color = "white", size = ydline_label_size) gplot <- gplot + ggplot2::geom_text(data = yard_labels_df, mapping = ggplot2::aes(x = x, y = 53.3 - y, label = digits), color = "white", angle = 180, size = ydline_label_size) } # Add outer hash marks to field if(outer_hash) { # Create data frame with hash mark x-coordinates hash_df <- data.frame(x = 11:109) # Add to existing plot gplot <- gplot + ggplot2::geom_segment(data = hash_df, mapping = ggplot2::aes(x = x, xend = x, y = 0.5, yend = 1.5), color = "white") + ggplot2::geom_segment(data = hash_df, mapping = ggplot2::aes(x = x, xend = x, y = 51.8, yend = 52.8), color = "white") } # Add inner hash marks to field if(inner_hash) { # Create data frame with hash mark x-coordinates hash_df <- data.frame(x = 11:109) # Add to existing plot gplot <- gplot + ggplot2::geom_segment(data = hash_df, mapping = ggplot2::aes(x = x, xend = x, y = 17.8, yend = 18.8), color = "white") + ggplot2::geom_segment(data = hash_df, mapping = ggplot2::aes(x = x, xend = x, y = 34.6, yend = 35.6), color = "white") } # Create final solid black outlines for the field gplot <- gplot + ggplot2::geom_rect(data = NULL, ggplot2::aes(xmin = 10, xmax = 110, ymin = 0, ymax = 53.3), fill = NA, color = "black") + ggplot2::geom_rect(data = NULL, ggplot2::aes(xmin = 0, xmax = 120, ymin = 0, ymax = 53.3), fill = NA, color = "black") # Return plot gplot }
library(shiny) library(tidyverse) library(ggplot2) library(shinythemes) # Imput Data totpay <- readRDS("TotalPay.rds") highp <- readRDS("HighPay.rds") meanp <- readRDS("MeanPay.rds") medp <- readRDS("MedianPay.rds") cost <- readRDS("HighCost.rds") # UI part ui <- navbarPage(title = "LA Employee Payroll", theme = shinytheme("flatly"), tabPanel("Total Payroll", titlePanel("Total payroll by LA city"), plotOutput(outputId = "Totalpay") ), tabPanel("Who Earned Most", titlePanel("Who Earned Most?"), sidebarLayout( sidebarPanel( numericInput(inputId = "num1", label = "Number of highest paid employees;", value = 10), selectInput(inputId = "Year1", label = "Year:", choices = c("2017", "2016", "2015", "2014", "2013"), selected = "2017") ), mainPanel(tableOutput(outputId = "Highpay")) ) ), tabPanel("Which Departments Earned Most", titlePanel("Which Departments Earned Most?"), sidebarLayout( sidebarPanel( numericInput(inputId = "num2", label = "Number of department:", value = 5), selectInput(inputId = "Year2", label = "Year:", choices = c("2017", "2016", "2015", "2014", "2013"), selected = "2017"), radioButtons(inputId = "method", label = "Methods:", choices = c("Mean", "Median"), "Median") ), mainPanel(tableOutput(outputId = "Mpay")) ) ), tabPanel("Which Departments Cost Most", titlePanel("Which Departments Cost Most?"), sidebarLayout( sidebarPanel( numericInput(inputId = "num3", label = "Number of department:", value = 5), selectInput(inputId = "Year3", label = "Year:", choices = c("2017", "2016", "2015", "2014", "2013"), selected = "2017") ), mainPanel(tableOutput(outputId = "Highcost")) ) ) ) # server part server <- function(input, output) { # For question 1 output$Totalpay <- renderPlot({ ggplot(data = totpay, aes(x = Year, y = Payment/1000000, fill = Type)) + geom_col() + labs(x = "Year", y = "Pay(million)") + scale_fill_manual(values = c("#D55E00", "#009E73", "#0072B2"), name="Type of Pay", breaks=c("Tbase", "Tover", "Tother"), labels=c("Total Basepay", "Total Overtimepay", "Total Otherpay")) }) # For question 2 output$Highpay <- renderTable({ highp %>% filter(Year == input$Year1) %>% head(input$num1) }) #Which departments earn most output$Mpay <- renderTable({ if (input$method == "Mean") { meanp %>% filter(Year == input$Year2) %>% head(input$num2) } else { medp %>% filter(Year == input$Year2) %>% head(input$num2) } }) ##Which departments cost most? output$Highcost <- renderTable({ cost %>% filter(Year == input$Year3) %>% head(input$num3) }) } shinyApp(ui = ui, server = server)
/hw3/app.R
no_license
EudoraHan/biostat-m280-2019-winter
R
false
false
4,492
r
library(shiny) library(tidyverse) library(ggplot2) library(shinythemes) # Imput Data totpay <- readRDS("TotalPay.rds") highp <- readRDS("HighPay.rds") meanp <- readRDS("MeanPay.rds") medp <- readRDS("MedianPay.rds") cost <- readRDS("HighCost.rds") # UI part ui <- navbarPage(title = "LA Employee Payroll", theme = shinytheme("flatly"), tabPanel("Total Payroll", titlePanel("Total payroll by LA city"), plotOutput(outputId = "Totalpay") ), tabPanel("Who Earned Most", titlePanel("Who Earned Most?"), sidebarLayout( sidebarPanel( numericInput(inputId = "num1", label = "Number of highest paid employees;", value = 10), selectInput(inputId = "Year1", label = "Year:", choices = c("2017", "2016", "2015", "2014", "2013"), selected = "2017") ), mainPanel(tableOutput(outputId = "Highpay")) ) ), tabPanel("Which Departments Earned Most", titlePanel("Which Departments Earned Most?"), sidebarLayout( sidebarPanel( numericInput(inputId = "num2", label = "Number of department:", value = 5), selectInput(inputId = "Year2", label = "Year:", choices = c("2017", "2016", "2015", "2014", "2013"), selected = "2017"), radioButtons(inputId = "method", label = "Methods:", choices = c("Mean", "Median"), "Median") ), mainPanel(tableOutput(outputId = "Mpay")) ) ), tabPanel("Which Departments Cost Most", titlePanel("Which Departments Cost Most?"), sidebarLayout( sidebarPanel( numericInput(inputId = "num3", label = "Number of department:", value = 5), selectInput(inputId = "Year3", label = "Year:", choices = c("2017", "2016", "2015", "2014", "2013"), selected = "2017") ), mainPanel(tableOutput(outputId = "Highcost")) ) ) ) # server part server <- function(input, output) { # For question 1 output$Totalpay <- renderPlot({ ggplot(data = totpay, aes(x = Year, y = Payment/1000000, fill = Type)) + geom_col() + labs(x = "Year", y = "Pay(million)") + scale_fill_manual(values = c("#D55E00", "#009E73", "#0072B2"), name="Type of Pay", breaks=c("Tbase", "Tover", "Tother"), labels=c("Total Basepay", "Total Overtimepay", "Total Otherpay")) }) # For question 2 output$Highpay <- renderTable({ highp %>% filter(Year == input$Year1) %>% head(input$num1) }) #Which departments earn most output$Mpay <- renderTable({ if (input$method == "Mean") { meanp %>% filter(Year == input$Year2) %>% head(input$num2) } else { medp %>% filter(Year == input$Year2) %>% head(input$num2) } }) ##Which departments cost most? output$Highcost <- renderTable({ cost %>% filter(Year == input$Year3) %>% head(input$num3) }) } shinyApp(ui = ui, server = server)
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/recplot2.R \name{enve.recplot2.extractWindows} \alias{enve.recplot2.extractWindows} \title{Enveomics: Recruitment Plot (2) Extract Windows} \usage{ enve.recplot2.extractWindows( rp, peak, lower.tail = TRUE, significance = 0.05, seq.names = FALSE ) } \arguments{ \item{rp}{Recruitment plot, a \code{\link{enve.RecPlot2}} object.} \item{peak}{Peak, an \code{\link{enve.RecPlot2.Peak}} object. If list, it is assumed to be a list of \code{\link{enve.RecPlot2.Peak}} objects, in which case the core peak is used (see \code{\link{enve.recplot2.corePeak}}).} \item{lower.tail}{If \code{FALSE}, it returns windows significantly above the peak in sequencing depth.} \item{significance}{Significance threshold (alpha) to select windows.} \item{seq.names}{Returns subject sequence names instead of a vector of Booleans. If the recruitment plot was generated with named position bins (e.g, using \code{pos.breaks=0} or a two-column \code{pos.breaks.tsv}), it returns a vector of characters (the sequence identifiers), otherwise it returns a data.frame with a name column and two columns of coordinates.} } \value{ Returns a vector of logicals if \code{seq.names = FALSE}. If \code{seq.names = TRUE}, it returns a data.frame with five columns: \code{name.from}, \code{name.to}, \code{pos.from}, \code{pos.to}, and \code{seq.name} (see \code{\link{enve.recplot2.coordinates}}). } \description{ Extract windows significantly below (or above) the peak in sequencing depth. } \author{ Luis M. Rodriguez-R [aut, cre] }
/enveomics.R/man/enve.recplot2.extractWindows.Rd
permissive
lmrodriguezr/enveomics
R
false
true
1,594
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/recplot2.R \name{enve.recplot2.extractWindows} \alias{enve.recplot2.extractWindows} \title{Enveomics: Recruitment Plot (2) Extract Windows} \usage{ enve.recplot2.extractWindows( rp, peak, lower.tail = TRUE, significance = 0.05, seq.names = FALSE ) } \arguments{ \item{rp}{Recruitment plot, a \code{\link{enve.RecPlot2}} object.} \item{peak}{Peak, an \code{\link{enve.RecPlot2.Peak}} object. If list, it is assumed to be a list of \code{\link{enve.RecPlot2.Peak}} objects, in which case the core peak is used (see \code{\link{enve.recplot2.corePeak}}).} \item{lower.tail}{If \code{FALSE}, it returns windows significantly above the peak in sequencing depth.} \item{significance}{Significance threshold (alpha) to select windows.} \item{seq.names}{Returns subject sequence names instead of a vector of Booleans. If the recruitment plot was generated with named position bins (e.g, using \code{pos.breaks=0} or a two-column \code{pos.breaks.tsv}), it returns a vector of characters (the sequence identifiers), otherwise it returns a data.frame with a name column and two columns of coordinates.} } \value{ Returns a vector of logicals if \code{seq.names = FALSE}. If \code{seq.names = TRUE}, it returns a data.frame with five columns: \code{name.from}, \code{name.to}, \code{pos.from}, \code{pos.to}, and \code{seq.name} (see \code{\link{enve.recplot2.coordinates}}). } \description{ Extract windows significantly below (or above) the peak in sequencing depth. } \author{ Luis M. Rodriguez-R [aut, cre] }
#' @title Referral matrix selfloops #' #' @description Provides a list of initatiors and completers of redo selfloops #' #' @inherit activity_frequency params references seealso return #' @seealso \code{\link{number_of_selfloops}} #' #' @concept metrics_repetition #' #' @export #' redo_selfloops_referral_matrix <- function(log, eventlog = deprecated()) { UseMethod("redo_selfloops_referral_matrix") } #' @describeIn redo_selfloops_referral_matrix Compute matrix for eventlog #' @export redo_selfloops_referral_matrix.eventlog <- function(log, eventlog = deprecated()) { first_resource <- NULL last_resource <- NULL if(lifecycle::is_present(eventlog)) { lifecycle::deprecate_warn( when = "0.9.0", what = "redo_selfloops_referral_matrix(eventlog)", with = "redo_selfloops_referral_matrix(log)") log <- eventlog } eventlog %>% redo_selfloops() %>% group_by(first_resource, last_resource) %>% summarize(absolute = n()) -> output class(output) <- c("referral_matrix", class(output)) attr(output, "type") <- "selfloop" attr(output, "mapping") <- mapping(eventlog) return(output) } #' @describeIn redo_selfloops_referral_matrix Compute matrix for activitylog #' @export redo_selfloops_referral_matrix.activitylog <- function(log, eventlog = deprecated()) { if(lifecycle::is_present(eventlog)) { lifecycle::deprecate_warn( when = "0.9.0", what = "redo_selfloops_referral_matrix(eventlog)", with = "redo_selfloops_referral_matrix(log)") log <- eventlog } log %>% to_eventlog() %>% redo_selfloops_referral_matrix() }
/R/redo_selfloops_referral_matrix.R
no_license
cran/edeaR
R
false
false
1,634
r
#' @title Referral matrix selfloops #' #' @description Provides a list of initatiors and completers of redo selfloops #' #' @inherit activity_frequency params references seealso return #' @seealso \code{\link{number_of_selfloops}} #' #' @concept metrics_repetition #' #' @export #' redo_selfloops_referral_matrix <- function(log, eventlog = deprecated()) { UseMethod("redo_selfloops_referral_matrix") } #' @describeIn redo_selfloops_referral_matrix Compute matrix for eventlog #' @export redo_selfloops_referral_matrix.eventlog <- function(log, eventlog = deprecated()) { first_resource <- NULL last_resource <- NULL if(lifecycle::is_present(eventlog)) { lifecycle::deprecate_warn( when = "0.9.0", what = "redo_selfloops_referral_matrix(eventlog)", with = "redo_selfloops_referral_matrix(log)") log <- eventlog } eventlog %>% redo_selfloops() %>% group_by(first_resource, last_resource) %>% summarize(absolute = n()) -> output class(output) <- c("referral_matrix", class(output)) attr(output, "type") <- "selfloop" attr(output, "mapping") <- mapping(eventlog) return(output) } #' @describeIn redo_selfloops_referral_matrix Compute matrix for activitylog #' @export redo_selfloops_referral_matrix.activitylog <- function(log, eventlog = deprecated()) { if(lifecycle::is_present(eventlog)) { lifecycle::deprecate_warn( when = "0.9.0", what = "redo_selfloops_referral_matrix(eventlog)", with = "redo_selfloops_referral_matrix(log)") log <- eventlog } log %>% to_eventlog() %>% redo_selfloops_referral_matrix() }
pollutantmean<- function(directory,pollutant,id=1:332) { #Generate filename in the right format ids_filenames<-paste(directory,"/",formatC(id,width = 3,flag="0"),".csv",sep = "") #Setup column classes to improve read.csv speed colCl <- c("Date","numeric","numeric","numeric") #Create empty data frame air_pol_df<-data.frame() #Iterate for all ids for(file in ids_filenames) { #Bind new data to data frame air_pol_df<-rbind(air_pol_df,read.csv(file,colClasses = colCl,comment.char = "")) } #Filter NA cases and return mean good_rows <- complete.cases(air_pol_df) filtered_airpol_df <-air_pol_df[good_rows,] mean(filtered_airpol_df[,pollutant]) }
/r_prog/assign_1/pollutantmean.R
no_license
jsousafl/R_datascience_spec
R
false
false
690
r
pollutantmean<- function(directory,pollutant,id=1:332) { #Generate filename in the right format ids_filenames<-paste(directory,"/",formatC(id,width = 3,flag="0"),".csv",sep = "") #Setup column classes to improve read.csv speed colCl <- c("Date","numeric","numeric","numeric") #Create empty data frame air_pol_df<-data.frame() #Iterate for all ids for(file in ids_filenames) { #Bind new data to data frame air_pol_df<-rbind(air_pol_df,read.csv(file,colClasses = colCl,comment.char = "")) } #Filter NA cases and return mean good_rows <- complete.cases(air_pol_df) filtered_airpol_df <-air_pol_df[good_rows,] mean(filtered_airpol_df[,pollutant]) }
#' @export makeRLearner.classif.lightgbm = function() { makeRLearnerClassif( cl = "classif.lightgbm", package = "lightgbm", par.set = makeParamSet( makeUntypedLearnerParam("early.stopping.data"), makeIntegerLearnerParam("nrounds", lower = 1, default = 10), makeDiscreteLearnerParam("metric", values = c("map", "auc", "binary_logloss", "binary_error", "multi_logloss", "multi_error")), makeIntegerLearnerParam("verbose", lower = -1, upper = 1, tunable = FALSE), makeLogicalLearnerParam("record", default = TRUE, tunable = FALSE), makeIntegerLearnerParam("eval_freq", lower = 1, tunable = FALSE, requires = quote(verbose > 0)), makeUntypedLearnerParam("init_model"), makeIntegerLearnerParam("early_stopping_rounds", lower = 1), makeDiscreteLearnerParam("boosting", values = c("gbdt", "dart", "rf"), default = "gbdt", requires = quote(boosting != "rf" || bagging_freq > 0 && bagging_fraction < 1 && feature_fraction < 1)), makeNumericLearnerParam("learning_rate", lower = 0, upper = 1, default = 0.1), makeIntegerLearnerParam("num_leaves", lower = 1, default = 31), makeDiscreteLearnerParam("tree_learner", values = c("serial", "feature", "data", "voting"), default = "serial"), makeIntegerLearnerParam("num_threads", lower = 1), makeDiscreteLearnerParam("device", values = c("cpu", "gpu"), default = "cpu"), makeIntegerLearnerParam("max_depth", lower = -1, default = -1), makeNumericLearnerParam("min_sum_hessian_in_leaf", lower = 0, default = 1e-3), makeNumericLearnerParam("feature_fraction", lower = 0, upper = 1, default = 1), makeNumericLearnerParam("bagging_fraction", lower = 0, upper = 1, default = 1), makeIntegerLearnerParam("bagging_freq", lower = 0, default = 0), makeNumericLearnerParam("lambda_l1", lower = 0, default = 0), makeNumericLearnerParam("lambda_l2", lower = 0, default = 0), makeNumericLearnerParam("min_split_gain", lower = 0, default = 0), makeNumericLearnerParam("drop_rate", lower = 0, upper = 1, default = 0.1, requires = quote(boosting == "dart")), makeNumericLearnerParam("skip_drop", lower = 0, upper = 1, default = 0.5, requires = quote(boosting == "dart")), makeIntegerLearnerParam("max_drop", lower = 1, default = 50, requires = quote(boosting == "dart")), makeLogicalLearnerParam("xgboost_dart_mode", default = FALSE), makeIntegerLearnerParam("max_cat_threshold", lower = 0, default = 32), makeNumericLearnerParam("cat_l2", lower = 0, default = 10), makeIntegerLearnerParam("max_cat_to_onehot", lower = 0, default = 4) ), properties = c("twoclass", "multiclass", "numerics", "factors", "prob", "missings", "factors"), name = "Light Gradient Boosting Machine", short.name = "lightgbm", note = "" ) } #' @export trainLearner.classif.lightgbm = function(.learner, .task, .subset, .weights = NULL, early.stopping.data = NULL, metric, ...) { pv = list(...) nc = length(getTaskDesc(.task)$class.levels) train = getTaskData(.task, .subset, target.extra = TRUE) feat.cols = colnames(train$data)[vlapply(train$data, is.factor)] prep = lightgbm::lgb.prepare_rules(train$data) pv$data = lightgbm::lgb.Dataset.construct(lightgbm::lgb.Dataset(data.matrix(prep$data), label = as.numeric(train$target) - 1, categorical_feature = feat.cols)) if (!is.null(early.stopping.data)) pv$valids = list(test = lightgbm::lgb.Dataset.create.valid(pv$data, data.matrix(early.stopping.data$data), label = as.numeric(early.stopping.data$target) - 1)) pv$metric = coalesce(metric, "") if(nc == 2) { pv$objective = "binary" pv$num_class = 1 } else { pv$objective = "multiclass" pv$num_class = nc } mod = do.call(lightgbm::lgb.train, pv) return(list(mod = mod, rules = prep$rules)) } #' @export predictLearner.classif.lightgbm = function(.learner, .model, .newdata, ...) { td = .model$task.desc m = .model$learner.model cls = td$class.levels nc = length(cls) .newdata = data.matrix(lightgbm::lgb.prepare_rules(.newdata, rules = m$rules)$data) p = predict(m$mod, .newdata) if (nc == 2) { y = matrix(0, ncol = 2, nrow = nrow(.newdata)) colnames(y) = cls y[, 1L] = 1 - p y[, 2L] = p if (.learner$predict.type == "prob") { return(y) } else { p = colnames(y)[max.col(y)] names(p) = NULL p = factor(p, levels = colnames(y)) return(p) } } else { p = matrix(p, nrow = length(p) / nc, ncol = nc, byrow = TRUE) colnames(p) = cls if (.learner$predict.type == "prob") { return(p) } else { ind = max.col(p) cns = colnames(p) return(factor(cns[ind], levels = cns)) } } }
/Backup/classif.lightgbm_MLR_implementation.R
no_license
falrrema/Santander_Value_Prediction_Proyect
R
false
false
5,138
r
#' @export makeRLearner.classif.lightgbm = function() { makeRLearnerClassif( cl = "classif.lightgbm", package = "lightgbm", par.set = makeParamSet( makeUntypedLearnerParam("early.stopping.data"), makeIntegerLearnerParam("nrounds", lower = 1, default = 10), makeDiscreteLearnerParam("metric", values = c("map", "auc", "binary_logloss", "binary_error", "multi_logloss", "multi_error")), makeIntegerLearnerParam("verbose", lower = -1, upper = 1, tunable = FALSE), makeLogicalLearnerParam("record", default = TRUE, tunable = FALSE), makeIntegerLearnerParam("eval_freq", lower = 1, tunable = FALSE, requires = quote(verbose > 0)), makeUntypedLearnerParam("init_model"), makeIntegerLearnerParam("early_stopping_rounds", lower = 1), makeDiscreteLearnerParam("boosting", values = c("gbdt", "dart", "rf"), default = "gbdt", requires = quote(boosting != "rf" || bagging_freq > 0 && bagging_fraction < 1 && feature_fraction < 1)), makeNumericLearnerParam("learning_rate", lower = 0, upper = 1, default = 0.1), makeIntegerLearnerParam("num_leaves", lower = 1, default = 31), makeDiscreteLearnerParam("tree_learner", values = c("serial", "feature", "data", "voting"), default = "serial"), makeIntegerLearnerParam("num_threads", lower = 1), makeDiscreteLearnerParam("device", values = c("cpu", "gpu"), default = "cpu"), makeIntegerLearnerParam("max_depth", lower = -1, default = -1), makeNumericLearnerParam("min_sum_hessian_in_leaf", lower = 0, default = 1e-3), makeNumericLearnerParam("feature_fraction", lower = 0, upper = 1, default = 1), makeNumericLearnerParam("bagging_fraction", lower = 0, upper = 1, default = 1), makeIntegerLearnerParam("bagging_freq", lower = 0, default = 0), makeNumericLearnerParam("lambda_l1", lower = 0, default = 0), makeNumericLearnerParam("lambda_l2", lower = 0, default = 0), makeNumericLearnerParam("min_split_gain", lower = 0, default = 0), makeNumericLearnerParam("drop_rate", lower = 0, upper = 1, default = 0.1, requires = quote(boosting == "dart")), makeNumericLearnerParam("skip_drop", lower = 0, upper = 1, default = 0.5, requires = quote(boosting == "dart")), makeIntegerLearnerParam("max_drop", lower = 1, default = 50, requires = quote(boosting == "dart")), makeLogicalLearnerParam("xgboost_dart_mode", default = FALSE), makeIntegerLearnerParam("max_cat_threshold", lower = 0, default = 32), makeNumericLearnerParam("cat_l2", lower = 0, default = 10), makeIntegerLearnerParam("max_cat_to_onehot", lower = 0, default = 4) ), properties = c("twoclass", "multiclass", "numerics", "factors", "prob", "missings", "factors"), name = "Light Gradient Boosting Machine", short.name = "lightgbm", note = "" ) } #' @export trainLearner.classif.lightgbm = function(.learner, .task, .subset, .weights = NULL, early.stopping.data = NULL, metric, ...) { pv = list(...) nc = length(getTaskDesc(.task)$class.levels) train = getTaskData(.task, .subset, target.extra = TRUE) feat.cols = colnames(train$data)[vlapply(train$data, is.factor)] prep = lightgbm::lgb.prepare_rules(train$data) pv$data = lightgbm::lgb.Dataset.construct(lightgbm::lgb.Dataset(data.matrix(prep$data), label = as.numeric(train$target) - 1, categorical_feature = feat.cols)) if (!is.null(early.stopping.data)) pv$valids = list(test = lightgbm::lgb.Dataset.create.valid(pv$data, data.matrix(early.stopping.data$data), label = as.numeric(early.stopping.data$target) - 1)) pv$metric = coalesce(metric, "") if(nc == 2) { pv$objective = "binary" pv$num_class = 1 } else { pv$objective = "multiclass" pv$num_class = nc } mod = do.call(lightgbm::lgb.train, pv) return(list(mod = mod, rules = prep$rules)) } #' @export predictLearner.classif.lightgbm = function(.learner, .model, .newdata, ...) { td = .model$task.desc m = .model$learner.model cls = td$class.levels nc = length(cls) .newdata = data.matrix(lightgbm::lgb.prepare_rules(.newdata, rules = m$rules)$data) p = predict(m$mod, .newdata) if (nc == 2) { y = matrix(0, ncol = 2, nrow = nrow(.newdata)) colnames(y) = cls y[, 1L] = 1 - p y[, 2L] = p if (.learner$predict.type == "prob") { return(y) } else { p = colnames(y)[max.col(y)] names(p) = NULL p = factor(p, levels = colnames(y)) return(p) } } else { p = matrix(p, nrow = length(p) / nc, ncol = nc, byrow = TRUE) colnames(p) = cls if (.learner$predict.type == "prob") { return(p) } else { ind = max.col(p) cns = colnames(p) return(factor(cns[ind], levels = cns)) } } }
# # This is the server logic of a Shiny web application. You can run the # application by clicking 'Run App' above. # # Find out more about building applications with Shiny here: # # http://shiny.rstudio.com/ # library(shiny) library(climr) # Define server logic required to draw a histogram shinyServer(function(input, output) { output$distPlot <- renderPlot({ data_climr = load_clim(input$data) fit_climr = fit(data_climr, data_type = input$scale, fit_type = input$smoother) plot(fit_climr, time_grid = pretty(fit_climr$data$x, n= inpur$grid)) }) })
/w9/climr_shiny/server.R
no_license
iant04128591/STAT40830
R
false
false
597
r
# # This is the server logic of a Shiny web application. You can run the # application by clicking 'Run App' above. # # Find out more about building applications with Shiny here: # # http://shiny.rstudio.com/ # library(shiny) library(climr) # Define server logic required to draw a histogram shinyServer(function(input, output) { output$distPlot <- renderPlot({ data_climr = load_clim(input$data) fit_climr = fit(data_climr, data_type = input$scale, fit_type = input$smoother) plot(fit_climr, time_grid = pretty(fit_climr$data$x, n= inpur$grid)) }) })
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/summary_dv.R \name{summary_dv} \alias{summary_dv} \title{summary_dv} \usage{ summary_dv(site) } \arguments{ \item{site}{character USGS site ID} } \description{ Calculate min max day of year }
/man/summary_dv.Rd
no_license
rbcarvin/demoPackage
R
false
true
270
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/summary_dv.R \name{summary_dv} \alias{summary_dv} \title{summary_dv} \usage{ summary_dv(site) } \arguments{ \item{site}{character USGS site ID} } \description{ Calculate min max day of year }
for (f in list.files("Rcode",full.names = TRUE)){ source(f) } files = commandArgs(TRUE) l = graphInput(files) cat(txtToLp(l[[1]],l[[2]]))
/other/autoRun.R
no_license
antongt/DenseProject
R
false
false
139
r
for (f in list.files("Rcode",full.names = TRUE)){ source(f) } files = commandArgs(TRUE) l = graphInput(files) cat(txtToLp(l[[1]],l[[2]]))
testlist <- list(b = c(-1667457875L, -1667457892L, 571280653L, 571280653L, -1835887972L)) result <- do.call(mcga:::ByteVectorToDoubles,testlist) str(result)
/mcga/inst/testfiles/ByteVectorToDoubles/AFL_ByteVectorToDoubles/ByteVectorToDoubles_valgrind_files/1613106663-test.R
no_license
akhikolla/updatedatatype-list3
R
false
false
157
r
testlist <- list(b = c(-1667457875L, -1667457892L, 571280653L, 571280653L, -1835887972L)) result <- do.call(mcga:::ByteVectorToDoubles,testlist) str(result)
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/geomUpdateUtils.R \name{remGeoSurplus} \alias{remGeoSurplus} \title{Remove Surplus Geometries} \usage{ remGeoSurplus( input_file, collection_files = c("geomEurope", "geomSmallCountries") ) } \arguments{ \item{input_file}{In file} \item{collection_files}{File(s) with geoms to remove} } \value{ input_file Updated input file } \description{ Some geography files are collections of many countries that overlap with other files that have their own higher-quality data. The remGeoSurplus function ensures each country is only present once in the dataset. } \examples{ \dontrun{ data("geomGlobal") localcovid19now:::remGeoSurplus(geomGlobal) } } \keyword{internal}
/man/remGeoSurplus.Rd
permissive
sjbeckett/localcovid19now
R
false
true
743
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/geomUpdateUtils.R \name{remGeoSurplus} \alias{remGeoSurplus} \title{Remove Surplus Geometries} \usage{ remGeoSurplus( input_file, collection_files = c("geomEurope", "geomSmallCountries") ) } \arguments{ \item{input_file}{In file} \item{collection_files}{File(s) with geoms to remove} } \value{ input_file Updated input file } \description{ Some geography files are collections of many countries that overlap with other files that have their own higher-quality data. The remGeoSurplus function ensures each country is only present once in the dataset. } \examples{ \dontrun{ data("geomGlobal") localcovid19now:::remGeoSurplus(geomGlobal) } } \keyword{internal}
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/generated_client.R \name{templates_post_scripts} \alias{templates_post_scripts} \title{Create a Script Template} \usage{ templates_post_scripts( script_id, name, note = NULL, ui_report_id = NULL, archived = NULL, hidden = NULL ) } \arguments{ \item{script_id}{integer required. The id of the script that this template uses.} \item{name}{string required. The name of the template.} \item{note}{string optional. A note describing what this template is used for; custom scripts created off this template will display this description.} \item{ui_report_id}{integer optional. The id of the report that this template uses.} \item{archived}{boolean optional. Whether the template has been archived.} \item{hidden}{boolean optional. The hidden status of the item.} } \value{ A list containing the following elements: \item{id}{integer, } \item{public}{boolean, If the template is public or not.} \item{scriptId}{integer, The id of the script that this template uses.} \item{scriptType}{string, The type of the template's backing script (e.g SQL, Container, Python, R, JavaScript)} \item{userContext}{string, The user context of the script that this template uses.} \item{params}{array, An array containing the following fields: \itemize{ \item name string, The variable's name as used within your code. \item label string, The label to present to users when asking them for the value. \item description string, A short sentence or fragment describing this parameter to the end user. \item type string, The type of parameter. Valid options: string, multi_line_string, integer, float, bool, file, table, database, credential_aws, credential_redshift, or credential_custom \item required boolean, Whether this param is required. \item value string, The value you would like to set this param to. Setting this value makes this parameter a fixed param. \item default string, If an argument for this parameter is not defined, it will use this default value. Use true, True, t, y, yes, or 1 for true bool's or false, False, f, n, no, or 0 for false bool's. Cannot be used for parameters that are required or a credential type. \item allowedValues array, The possible values this parameter can take, effectively making this an enumerable parameter. Allowed values is an array of hashes of the following format: `{label: 'Import', 'value': 'import'}` }} \item{name}{string, The name of the template.} \item{category}{string, The category of this template.} \item{note}{string, A note describing what this template is used for; custom scripts created off this template will display this description.} \item{createdAt}{string, } \item{updatedAt}{string, } \item{useCount}{integer, The number of uses of this template.} \item{uiReportId}{integer, The id of the report that this template uses.} \item{techReviewed}{boolean, Whether this template has been audited by Civis for security vulnerability and correctness.} \item{archived}{boolean, Whether the template has been archived.} \item{hidden}{boolean, The hidden status of the item.} \item{author}{list, A list containing the following elements: \itemize{ \item id integer, The ID of this user. \item name string, This user's name. \item username string, This user's username. \item initials string, This user's initials. \item online boolean, Whether this user is online. }} \item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} } \description{ Create a Script Template }
/man/templates_post_scripts.Rd
no_license
cran/civis
R
false
true
3,557
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/generated_client.R \name{templates_post_scripts} \alias{templates_post_scripts} \title{Create a Script Template} \usage{ templates_post_scripts( script_id, name, note = NULL, ui_report_id = NULL, archived = NULL, hidden = NULL ) } \arguments{ \item{script_id}{integer required. The id of the script that this template uses.} \item{name}{string required. The name of the template.} \item{note}{string optional. A note describing what this template is used for; custom scripts created off this template will display this description.} \item{ui_report_id}{integer optional. The id of the report that this template uses.} \item{archived}{boolean optional. Whether the template has been archived.} \item{hidden}{boolean optional. The hidden status of the item.} } \value{ A list containing the following elements: \item{id}{integer, } \item{public}{boolean, If the template is public or not.} \item{scriptId}{integer, The id of the script that this template uses.} \item{scriptType}{string, The type of the template's backing script (e.g SQL, Container, Python, R, JavaScript)} \item{userContext}{string, The user context of the script that this template uses.} \item{params}{array, An array containing the following fields: \itemize{ \item name string, The variable's name as used within your code. \item label string, The label to present to users when asking them for the value. \item description string, A short sentence or fragment describing this parameter to the end user. \item type string, The type of parameter. Valid options: string, multi_line_string, integer, float, bool, file, table, database, credential_aws, credential_redshift, or credential_custom \item required boolean, Whether this param is required. \item value string, The value you would like to set this param to. Setting this value makes this parameter a fixed param. \item default string, If an argument for this parameter is not defined, it will use this default value. Use true, True, t, y, yes, or 1 for true bool's or false, False, f, n, no, or 0 for false bool's. Cannot be used for parameters that are required or a credential type. \item allowedValues array, The possible values this parameter can take, effectively making this an enumerable parameter. Allowed values is an array of hashes of the following format: `{label: 'Import', 'value': 'import'}` }} \item{name}{string, The name of the template.} \item{category}{string, The category of this template.} \item{note}{string, A note describing what this template is used for; custom scripts created off this template will display this description.} \item{createdAt}{string, } \item{updatedAt}{string, } \item{useCount}{integer, The number of uses of this template.} \item{uiReportId}{integer, The id of the report that this template uses.} \item{techReviewed}{boolean, Whether this template has been audited by Civis for security vulnerability and correctness.} \item{archived}{boolean, Whether the template has been archived.} \item{hidden}{boolean, The hidden status of the item.} \item{author}{list, A list containing the following elements: \itemize{ \item id integer, The ID of this user. \item name string, This user's name. \item username string, This user's username. \item initials string, This user's initials. \item online boolean, Whether this user is online. }} \item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} } \description{ Create a Script Template }
# Created by Karina Halevy # Last edited: August 14, 2020 # Problem 1: you have four siblings - Alex, Blake, Charlie, and Diane. # They each randomly pull a number from a hat between 1 and 39, inclusive. # Their birth months are each random numbers between 1 - 12, inclusive. # Create and print a data frame with these three columns of information. # Print the first row of this data frame. # Name the columns "name", "hat", and "month", and print the "hat" column using its name. # Your code for Problem 1 here # Problem 2: continuing with the data frame from Problem 1, compute and print all the information for the people who are born between January and June, inclusive. # Your code for Problem 2 here # Problem 3: similar to the multiplication table created in class, create an addition table for the numbers 1:7. # Then, compute and print the mean, median, minimum, and maximum of the whole table. # Compute and print the mean, median, minimum, and maximum of the fifth column of the table. # Compute and print the mean, median, minimum, and maximum of each row of the table, using 4 lines of code. # Your code for Problem 3 here # Problem 4: generate two vectors of length 100 each, where each entry is a number between 1 and 100, inclusive. # Plot a scatterplot of the two vectors (one of them should be the x-axis, and the other one should be the y-axis).
/Workshop/Workshop_4.R
no_license
lucaspingpao/RShiny-Tutorials
R
false
false
1,388
r
# Created by Karina Halevy # Last edited: August 14, 2020 # Problem 1: you have four siblings - Alex, Blake, Charlie, and Diane. # They each randomly pull a number from a hat between 1 and 39, inclusive. # Their birth months are each random numbers between 1 - 12, inclusive. # Create and print a data frame with these three columns of information. # Print the first row of this data frame. # Name the columns "name", "hat", and "month", and print the "hat" column using its name. # Your code for Problem 1 here # Problem 2: continuing with the data frame from Problem 1, compute and print all the information for the people who are born between January and June, inclusive. # Your code for Problem 2 here # Problem 3: similar to the multiplication table created in class, create an addition table for the numbers 1:7. # Then, compute and print the mean, median, minimum, and maximum of the whole table. # Compute and print the mean, median, minimum, and maximum of the fifth column of the table. # Compute and print the mean, median, minimum, and maximum of each row of the table, using 4 lines of code. # Your code for Problem 3 here # Problem 4: generate two vectors of length 100 each, where each entry is a number between 1 and 100, inclusive. # Plot a scatterplot of the two vectors (one of them should be the x-axis, and the other one should be the y-axis).
#' Get a url. #' #' @section RFC2616: #' The GET method means retrieve whatever information (in the form of an #' entity) is identified by the Request-URI. If the Request-URI refers to a #' data-producing process, it is the produced data which shall be returned as #' the entity in the response and not the source text of the process, unless #' that text happens to be the output of the process. #' #' The semantics of the GET method change to a "conditional GET" if the #' request message includes an If-Modified-Since, If-Unmodified-Since, #' If-Match, If-None-Match, or If-Range header field. A conditional GET method #' requests that the entity be transferred only under the circumstances #' described by the conditional header field(s). The conditional GET method is #' intended to reduce unnecessary network usage by allowing cached entities to #' be refreshed without requiring multiple requests or transferring data #' already held by the client. #' #' The semantics of the GET method change to a "partial GET" if the request #' message includes a Range header field. A partial GET requests that only #' part of the entity be transferred, as described in \url{http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35} #' The partial GET method is intended to reduce unnecessary network usage by #' allowing partially-retrieved entities to be completed without transferring #' data already held by the client. #' #' @param url the url of the page to retrieve #' @param ... Further parameters, such as \code{query}, \code{path}, etc, #' passed on to \code{\link{modify_url}}. These parameters must be named. #' @param config Additional configuration settings such as http #' authentication (\code{\link{authenticate}}), additional headers #' (\code{\link{add_headers}}), cookies (\code{\link{set_cookies}}) etc. #' See \code{\link{config}} for full details and list of helpers. #' @param handle The handle to use with this request. If not #' supplied, will be retrieved and reused from the \code{\link{handle_pool}} #' based on the scheme, hostname and port of the url. By default \pkg{httr} # automatically reuses the same http connection (aka handle) for mulitple #' requests to the same scheme/host/port combo. This substantially reduces #' connection time, and ensures that cookies are maintained over multiple #' requests to the same host. See \code{\link{handle_pool}} for more #' details. #' #' @family http methods #' @export #' @examples #' GET("http://google.com/") #' GET("http://google.com/", path = "search") #' GET("http://google.com/", path = "search", query = c(q = "ham")) #' #' # See what GET is doing with requestb.in #' b <- new_bin() #' GET(b) #' GET(b, add_headers(a = 1, b = 2)) #' GET(b, set_cookies(a = 1, b = 2)) #' GET(b, authenticate("username", "password")) #' GET(b, verbose()) #' #' # You might want to manually specify the handle so you can have multiple #' # independent logins to the same website. #' google <- handle("http://google.com") #' GET(handle = google, path = "/") #' GET(handle = google, path = "search") GET <- function(url = NULL, config = list(), ..., handle = NULL) { hu <- handle_url(handle, url, ...) make_request("get", hu$handle, hu$url, config = config) } get_config <- function() { config() }
/R/http-get.r
no_license
jiho/httr
R
false
false
3,292
r
#' Get a url. #' #' @section RFC2616: #' The GET method means retrieve whatever information (in the form of an #' entity) is identified by the Request-URI. If the Request-URI refers to a #' data-producing process, it is the produced data which shall be returned as #' the entity in the response and not the source text of the process, unless #' that text happens to be the output of the process. #' #' The semantics of the GET method change to a "conditional GET" if the #' request message includes an If-Modified-Since, If-Unmodified-Since, #' If-Match, If-None-Match, or If-Range header field. A conditional GET method #' requests that the entity be transferred only under the circumstances #' described by the conditional header field(s). The conditional GET method is #' intended to reduce unnecessary network usage by allowing cached entities to #' be refreshed without requiring multiple requests or transferring data #' already held by the client. #' #' The semantics of the GET method change to a "partial GET" if the request #' message includes a Range header field. A partial GET requests that only #' part of the entity be transferred, as described in \url{http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35} #' The partial GET method is intended to reduce unnecessary network usage by #' allowing partially-retrieved entities to be completed without transferring #' data already held by the client. #' #' @param url the url of the page to retrieve #' @param ... Further parameters, such as \code{query}, \code{path}, etc, #' passed on to \code{\link{modify_url}}. These parameters must be named. #' @param config Additional configuration settings such as http #' authentication (\code{\link{authenticate}}), additional headers #' (\code{\link{add_headers}}), cookies (\code{\link{set_cookies}}) etc. #' See \code{\link{config}} for full details and list of helpers. #' @param handle The handle to use with this request. If not #' supplied, will be retrieved and reused from the \code{\link{handle_pool}} #' based on the scheme, hostname and port of the url. By default \pkg{httr} # automatically reuses the same http connection (aka handle) for mulitple #' requests to the same scheme/host/port combo. This substantially reduces #' connection time, and ensures that cookies are maintained over multiple #' requests to the same host. See \code{\link{handle_pool}} for more #' details. #' #' @family http methods #' @export #' @examples #' GET("http://google.com/") #' GET("http://google.com/", path = "search") #' GET("http://google.com/", path = "search", query = c(q = "ham")) #' #' # See what GET is doing with requestb.in #' b <- new_bin() #' GET(b) #' GET(b, add_headers(a = 1, b = 2)) #' GET(b, set_cookies(a = 1, b = 2)) #' GET(b, authenticate("username", "password")) #' GET(b, verbose()) #' #' # You might want to manually specify the handle so you can have multiple #' # independent logins to the same website. #' google <- handle("http://google.com") #' GET(handle = google, path = "/") #' GET(handle = google, path = "search") GET <- function(url = NULL, config = list(), ..., handle = NULL) { hu <- handle_url(handle, url, ...) make_request("get", hu$handle, hu$url, config = config) } get_config <- function() { config() }
% Generated by roxygen2 (4.1.1): do not edit by hand % Please edit documentation in R/issue_class.R \name{assign_user} \alias{assign_user} \title{Assign a user to an issue} \usage{ assign_user(issue, user, jira_url = getOption("jira_url"), jira_user = getOption("jira_user"), jira_password = getOption("jira_password"), verbose = getOption("jira_verbose")) } \arguments{ \item{issue}{An existing issue id or key.} \item{jira_url}{base url to JIRA. Defaults to 'jira/'} \item{jira_user}{username for authentication} \item{jira_password}{password for authentication} \item{verbose}{FALSE} } \value{ POST results } \description{ Assign a user to an issue }
/man/assign_user.Rd
no_license
patsulda/Rjira
R
false
false
665
rd
% Generated by roxygen2 (4.1.1): do not edit by hand % Please edit documentation in R/issue_class.R \name{assign_user} \alias{assign_user} \title{Assign a user to an issue} \usage{ assign_user(issue, user, jira_url = getOption("jira_url"), jira_user = getOption("jira_user"), jira_password = getOption("jira_password"), verbose = getOption("jira_verbose")) } \arguments{ \item{issue}{An existing issue id or key.} \item{jira_url}{base url to JIRA. Defaults to 'jira/'} \item{jira_user}{username for authentication} \item{jira_password}{password for authentication} \item{verbose}{FALSE} } \value{ POST results } \description{ Assign a user to an issue }
outcome_frames <- Stack() push_outcome_frame <- function(env = parent.frame()) outcome_frames$push(env) as_outcome_fn <- function(expr, env, args = NULL) { if(is.null(expr)) return(as.function(list(NULL), envir = env)) expr <- substitute({ get("push_outcome_frame", envir = asNamespace("tfautograph"), inherits = FALSE)() expr }, list(expr = expr)) fn <- as.function.default(c(args, list(expr)), envir = env) function(...) { returned <- withVisible(fn(...)) outcome_env <- outcome_frames$pop() modified <- as.list(outcome_env, all.names = TRUE) modified <- prune_nested_unmodified(modified, env) out <- drop_empty(list( modified = modified, returned = returned$value, visible = returned$visible )) # splice out outcome_env from closures out <- rapply(list(out), function(x) { if (identical(environment(x), outcome_env)) environment(x) <- env x }, classes = c("function", "formula"), how = "replace")[[1]] out } } export_modified <- function(modified, env) { for (nm in names(modified)) { if (is_named_list(modified[[nm]]) && is_named_list(preexisting <- get0(nm, envir = env))) modified[[nm]] <- modify_list(preexisting, modified[[nm]]) else if (is_undef(modified[[nm]])) { if (exists(nm, envir = env, inherits = FALSE)) rm(list = nm, envir = env) makeActiveBinding(nm, modified[[nm]], env) modified[[nm]] <- NULL } } if(length(modified)) list2env(modified, envir = env) } prune_nested_unmodified <- function(modified, env) { for (nm in names(modified)) { obj <- modified[[nm]] if(is_undef(obj) || is_undef(nm, env) || !is_named_list(obj)) next orig <- get0(nm, env) pruned_obj <- prune_identical(obj, orig)[[1]] modified[[nm]] <- pruned_obj } drop_empty(modified) } prune_identical <- function(x, y) { if(anyDuplicated(names(x)) || anyDuplicated(names(y))) stop("names can't be duplicated") # TODO: need to handle unnamed lists better both in the case of recursing past # them and calling prune_identical on unnamed list elements, as well as being # able to prune unnamed elements. A hacky unsatisfactory fix is to autoname # them with some cryptic name, like .__ag_autoname_id_01 then unnaming them # later. for (nm in intersect(names(x), names(y))) { if (identical(x[[nm]], y[[nm]])) x[[nm]] <- y[[nm]] <- NULL else if (is.list(x[[nm]]) && is.list(y[[nm]])) { res <- prune_identical(x[[nm]], y[[nm]]) x[[nm]] <- res[[1]] y[[nm]] <- res[[2]] } } list(x, y) } modify_list <- function (x, y) { stopifnot(is_named_list(x), is_named_list(y)) y_names <- names(y) if(is.null(y_names) || anyNA(y_names) || anyDuplicated(y_names) || any(!nzchar(y_names))) stop("Invalid names. Lists must either have no names or all elements must have unique names") for (nm in y_names) { x[[nm]] <- if (is_named_list(x[[nm]]) && is_named_list(y[[nm]])) modify_list(x[[nm]], y[[nm]]) else y[[nm]] } x }
/R/outcome.R
no_license
cran/tfautograph
R
false
false
3,122
r
outcome_frames <- Stack() push_outcome_frame <- function(env = parent.frame()) outcome_frames$push(env) as_outcome_fn <- function(expr, env, args = NULL) { if(is.null(expr)) return(as.function(list(NULL), envir = env)) expr <- substitute({ get("push_outcome_frame", envir = asNamespace("tfautograph"), inherits = FALSE)() expr }, list(expr = expr)) fn <- as.function.default(c(args, list(expr)), envir = env) function(...) { returned <- withVisible(fn(...)) outcome_env <- outcome_frames$pop() modified <- as.list(outcome_env, all.names = TRUE) modified <- prune_nested_unmodified(modified, env) out <- drop_empty(list( modified = modified, returned = returned$value, visible = returned$visible )) # splice out outcome_env from closures out <- rapply(list(out), function(x) { if (identical(environment(x), outcome_env)) environment(x) <- env x }, classes = c("function", "formula"), how = "replace")[[1]] out } } export_modified <- function(modified, env) { for (nm in names(modified)) { if (is_named_list(modified[[nm]]) && is_named_list(preexisting <- get0(nm, envir = env))) modified[[nm]] <- modify_list(preexisting, modified[[nm]]) else if (is_undef(modified[[nm]])) { if (exists(nm, envir = env, inherits = FALSE)) rm(list = nm, envir = env) makeActiveBinding(nm, modified[[nm]], env) modified[[nm]] <- NULL } } if(length(modified)) list2env(modified, envir = env) } prune_nested_unmodified <- function(modified, env) { for (nm in names(modified)) { obj <- modified[[nm]] if(is_undef(obj) || is_undef(nm, env) || !is_named_list(obj)) next orig <- get0(nm, env) pruned_obj <- prune_identical(obj, orig)[[1]] modified[[nm]] <- pruned_obj } drop_empty(modified) } prune_identical <- function(x, y) { if(anyDuplicated(names(x)) || anyDuplicated(names(y))) stop("names can't be duplicated") # TODO: need to handle unnamed lists better both in the case of recursing past # them and calling prune_identical on unnamed list elements, as well as being # able to prune unnamed elements. A hacky unsatisfactory fix is to autoname # them with some cryptic name, like .__ag_autoname_id_01 then unnaming them # later. for (nm in intersect(names(x), names(y))) { if (identical(x[[nm]], y[[nm]])) x[[nm]] <- y[[nm]] <- NULL else if (is.list(x[[nm]]) && is.list(y[[nm]])) { res <- prune_identical(x[[nm]], y[[nm]]) x[[nm]] <- res[[1]] y[[nm]] <- res[[2]] } } list(x, y) } modify_list <- function (x, y) { stopifnot(is_named_list(x), is_named_list(y)) y_names <- names(y) if(is.null(y_names) || anyNA(y_names) || anyDuplicated(y_names) || any(!nzchar(y_names))) stop("Invalid names. Lists must either have no names or all elements must have unique names") for (nm in y_names) { x[[nm]] <- if (is_named_list(x[[nm]]) && is_named_list(y[[nm]])) modify_list(x[[nm]], y[[nm]]) else y[[nm]] } x }
#' @include RevigoR.R #' @include AllGenerics-export.R NULL #' @name plotRevigo #' #' @title Plotting ReViGo data #' #' @description #' plotting stuff #' #' @param x a RevigoRData object #' @param xType one of the following values: #' \code{"alphabetical","pvalue","uniqueness","X","Y"} #' @param yType one of the following values: #' \code{"pvalue","uniqueness","X","Y"} #' @param pointSizeType one of the following values: #' \code{"same","point_size","pvalue","uniqueness","X","Y"} #' @param colourType one of the following values: #' \code{"same","unique","pvalue","uniqueness","X","Y"} #' @param colour a colour, which ggplot understands #' @param size a positive numeric value #' @param showAllLabels optional logical value: should all labels be shown? #' (default: \code{showAllLabels = FALSE}) #' #' @return a ggplot2 plot #' #' @importFrom colorRamps matlab.like2 #' @importFrom ggthemes solarized_pal #' #' @examples #' data("rdinput", package = "RevigoR") #' rd <- getRevigo(rdinput) #' plotRevigoScatter(rd) #' plotRevigoTreemap(rd) NULL #' @rdname plotRevigo #' @export setMethod( f = "plotRevigoScatter", signature = signature(x = "RevigoRData"), definition = function(x, xType, yType, pointSizeType, colourType, colour, size, showAllLabels){ requireNamespace("ggplot2", quietly = TRUE) # input check checkX <- c("alphabetical","pvalue","uniqueness","X","Y") checkY <- c("pvalue","uniqueness","X","Y") checkPoint <- c("same","point_size","pvalue","uniqueness","X","Y") checkColour <- c("same","unique","pvalue","uniqueness","X","Y") labelNames <- c("description" = "Alphabetical", "plot_size" = "Point size", "log10pvalue" = "-log10(p)", "uniqueness" = "uniqueness", "plot_X" = "Semantic space X", "plot_Y" = "Semantic space Y") .checkValueValidity(xType, checkX) xType <- .convert_types_to_colnames(xType) .checkValueValidity(yType, checkY) yType <- .convert_types_to_colnames(yType) .checkValueValidity(pointSizeType, checkPoint) pointSizeType <- .convert_types_to_colnames(pointSizeType) .checkValueValidity(colourType, checkColour) colourType <- .convert_types_to_colnames(colourType) if(!.is_a_string(colour)){ stop("'colour' must be a single character value.", call. = FALSE) } if(!.is_numeric_string(size) || size <= 0){ stop("'size' must be a positive numeric value.", call. = FALSE) } if(!.is_a_bool(showAllLabels)){ stop("'showAllLabels' must be TRUE or FALSE.", call. = FALSE) } # get data and subset data <- getTableData(x) data <- data[!is.na(data$plot_X) | !is.na(data$plot_Y),] data$log10pvalue <- abs(data$log10pvalue) if(showAllLabels){ ex <- data } else{ ex <- data[data$dispensability < 0.15,] } # plot data plot <- ggplot(data = data) # point and colour options if(pointSizeType == "same" && colourType == "same"){ plot <- plot + geom_point(mapping = aes_string(x = xType, y = yType), colour = colour, size = size, alpha = I(0.6)) } else if(pointSizeType == "same"){ plot <- plot + geom_point(mapping = aes_string(x = xType, y = yType, colour = colourType), size = size, alpha = I(0.6)) } else if(colourType == "same"){ plot <- plot + geom_point(mapping = aes_string(x = xType, y = yType, size = pointSizeType), colour = colour, alpha = I(0.6)) } else if(colourType == "unique"){ plot <- plot + geom_point(mapping = aes_string(x = xType, y = yType, size = pointSizeType, colour = "description"), alpha = I(0.6)) } else { plot <- plot + geom_point(mapping = aes_string(x = xType, y = yType, colour = colourType, size = pointSizeType), alpha = I(0.6)) } # if(pointSizeType == "same"){ plot <- plot + geom_point(mapping = aes_string(x = xType, y = yType), size = size, shape = 21, fill = "transparent", colour = I(alpha("black", 0.6))) } else { plot <- plot + geom_point(mapping = aes_string(x = xType, y = yType, size = pointSizeType), shape = 21, fill = "transparent", colour = I(alpha("black", 0.6))) } # if(xType %in% c("log10pvalue","uniqueness","plot_X","plot_Y")){ x_range = max(data[,xType]) - min(data[,xType]) plot <- plot + xlim(min(data[,xType]) - x_range/10, max(data[,xType]) + x_range/10) } if(yType %in% c("log10pvalue","uniqueness","plot_X","plot_Y")){ y_range = max(data[,yType]) - min(data[,yType]) plot <- plot + ylim(min(data[,yType]) - y_range/10, max(data[,yType]) + y_range/10) } # remaining plot stuff plot <- plot + theme_bw() + theme(legend.key = element_blank()) + labs(x = labelNames[xType], y = labelNames[yType]) + geom_text(data = ex, aes_string(x = xType, y = yType, label = "description"), colour = I(alpha("black", 0.85)), size = 3) # if(colourType == "unique"){ plot <- plot + scale_colour_manual(name = "GO Terms", values = grDevices::colorRampPalette(ggthemes::solarized_pal()(8))(nrow(data))) } else { plot <- plot + scale_colour_gradientn(name = labelNames[colourType], colours = colorRamps::matlab.like2(100), limits = c(0,max(data$log10pvalue)) ) } # if(pointSizeType == "same"){ plot <- plot + scale_size(name = "one size", range = c(10,10), guide = FALSE) } else { plot <- plot + scale_size(name = labelNames[pointSizeType], range = c(5, 30)) } return(plot) } ) # .convert_types_to_colnames <- function(type){ if(type %in% c("X","Y")){ type <- paste0("plot_",type) } if(type == "pvalue"){ type <- "log10pvalue" } if(type == "alphabetical"){ type <- "description" } if(type == "point_size"){ type <- "plot_size" } return(type) } #' @rdname plotRevigo #' #' @param type one of \code{"frequency"}, \code{"pvalue"}, \code{"uniqueness"} #' or \code{"dispensability"} #' @param palette name of a colour palette, which #' \code{ggplot2::scale_fill_brewer} accepts #' #' @import treemapify #' @export setMethod( f = "plotRevigoTreemap", signature = signature(x = "RevigoRData"), definition = function(x, type, palette){ requireNamespace("ggplot2", quietly = TRUE) requireNamespace("treemapify", quietly = TRUE) cols <- c("frequency" = "frequency", "pvalue" = "log10pvalue", "uniqueness" = "uniqueness", "dispensability" = "dispensability") # input check if(!.is_non_empty_string(type)){ stop("'type' must be a non-empty single character value.", call. = FALSE) } .checkValueValidity(type, names(cols)) # get data and make values positive data <- getTreemapData(x) data$log10pvalue <- abs(data$log10pvalue) # plot data plot <- ggplot(data, mapping = aes_string(area = cols[type], fill = "representative", label = "description", subgroup = "representative")) + geom_treemap() + geom_treemap_subgroup_border() + geom_treemap_subgroup_text(place = "centre", grow = TRUE, colour = "black", fontface = "bold", min.size = 0) + geom_treemap_text(colour = "black", place = "topleft", fontface = "italic", reflow = TRUE) + scale_fill_brewer(palette = palette) + labs(title = "REVIGO Gene Ontology treemap") + guides(fill = FALSE) return(plot) } )
/R/RevigoR-plot.R
no_license
FelixErnst/RevigoR
R
false
false
9,358
r
#' @include RevigoR.R #' @include AllGenerics-export.R NULL #' @name plotRevigo #' #' @title Plotting ReViGo data #' #' @description #' plotting stuff #' #' @param x a RevigoRData object #' @param xType one of the following values: #' \code{"alphabetical","pvalue","uniqueness","X","Y"} #' @param yType one of the following values: #' \code{"pvalue","uniqueness","X","Y"} #' @param pointSizeType one of the following values: #' \code{"same","point_size","pvalue","uniqueness","X","Y"} #' @param colourType one of the following values: #' \code{"same","unique","pvalue","uniqueness","X","Y"} #' @param colour a colour, which ggplot understands #' @param size a positive numeric value #' @param showAllLabels optional logical value: should all labels be shown? #' (default: \code{showAllLabels = FALSE}) #' #' @return a ggplot2 plot #' #' @importFrom colorRamps matlab.like2 #' @importFrom ggthemes solarized_pal #' #' @examples #' data("rdinput", package = "RevigoR") #' rd <- getRevigo(rdinput) #' plotRevigoScatter(rd) #' plotRevigoTreemap(rd) NULL #' @rdname plotRevigo #' @export setMethod( f = "plotRevigoScatter", signature = signature(x = "RevigoRData"), definition = function(x, xType, yType, pointSizeType, colourType, colour, size, showAllLabels){ requireNamespace("ggplot2", quietly = TRUE) # input check checkX <- c("alphabetical","pvalue","uniqueness","X","Y") checkY <- c("pvalue","uniqueness","X","Y") checkPoint <- c("same","point_size","pvalue","uniqueness","X","Y") checkColour <- c("same","unique","pvalue","uniqueness","X","Y") labelNames <- c("description" = "Alphabetical", "plot_size" = "Point size", "log10pvalue" = "-log10(p)", "uniqueness" = "uniqueness", "plot_X" = "Semantic space X", "plot_Y" = "Semantic space Y") .checkValueValidity(xType, checkX) xType <- .convert_types_to_colnames(xType) .checkValueValidity(yType, checkY) yType <- .convert_types_to_colnames(yType) .checkValueValidity(pointSizeType, checkPoint) pointSizeType <- .convert_types_to_colnames(pointSizeType) .checkValueValidity(colourType, checkColour) colourType <- .convert_types_to_colnames(colourType) if(!.is_a_string(colour)){ stop("'colour' must be a single character value.", call. = FALSE) } if(!.is_numeric_string(size) || size <= 0){ stop("'size' must be a positive numeric value.", call. = FALSE) } if(!.is_a_bool(showAllLabels)){ stop("'showAllLabels' must be TRUE or FALSE.", call. = FALSE) } # get data and subset data <- getTableData(x) data <- data[!is.na(data$plot_X) | !is.na(data$plot_Y),] data$log10pvalue <- abs(data$log10pvalue) if(showAllLabels){ ex <- data } else{ ex <- data[data$dispensability < 0.15,] } # plot data plot <- ggplot(data = data) # point and colour options if(pointSizeType == "same" && colourType == "same"){ plot <- plot + geom_point(mapping = aes_string(x = xType, y = yType), colour = colour, size = size, alpha = I(0.6)) } else if(pointSizeType == "same"){ plot <- plot + geom_point(mapping = aes_string(x = xType, y = yType, colour = colourType), size = size, alpha = I(0.6)) } else if(colourType == "same"){ plot <- plot + geom_point(mapping = aes_string(x = xType, y = yType, size = pointSizeType), colour = colour, alpha = I(0.6)) } else if(colourType == "unique"){ plot <- plot + geom_point(mapping = aes_string(x = xType, y = yType, size = pointSizeType, colour = "description"), alpha = I(0.6)) } else { plot <- plot + geom_point(mapping = aes_string(x = xType, y = yType, colour = colourType, size = pointSizeType), alpha = I(0.6)) } # if(pointSizeType == "same"){ plot <- plot + geom_point(mapping = aes_string(x = xType, y = yType), size = size, shape = 21, fill = "transparent", colour = I(alpha("black", 0.6))) } else { plot <- plot + geom_point(mapping = aes_string(x = xType, y = yType, size = pointSizeType), shape = 21, fill = "transparent", colour = I(alpha("black", 0.6))) } # if(xType %in% c("log10pvalue","uniqueness","plot_X","plot_Y")){ x_range = max(data[,xType]) - min(data[,xType]) plot <- plot + xlim(min(data[,xType]) - x_range/10, max(data[,xType]) + x_range/10) } if(yType %in% c("log10pvalue","uniqueness","plot_X","plot_Y")){ y_range = max(data[,yType]) - min(data[,yType]) plot <- plot + ylim(min(data[,yType]) - y_range/10, max(data[,yType]) + y_range/10) } # remaining plot stuff plot <- plot + theme_bw() + theme(legend.key = element_blank()) + labs(x = labelNames[xType], y = labelNames[yType]) + geom_text(data = ex, aes_string(x = xType, y = yType, label = "description"), colour = I(alpha("black", 0.85)), size = 3) # if(colourType == "unique"){ plot <- plot + scale_colour_manual(name = "GO Terms", values = grDevices::colorRampPalette(ggthemes::solarized_pal()(8))(nrow(data))) } else { plot <- plot + scale_colour_gradientn(name = labelNames[colourType], colours = colorRamps::matlab.like2(100), limits = c(0,max(data$log10pvalue)) ) } # if(pointSizeType == "same"){ plot <- plot + scale_size(name = "one size", range = c(10,10), guide = FALSE) } else { plot <- plot + scale_size(name = labelNames[pointSizeType], range = c(5, 30)) } return(plot) } ) # .convert_types_to_colnames <- function(type){ if(type %in% c("X","Y")){ type <- paste0("plot_",type) } if(type == "pvalue"){ type <- "log10pvalue" } if(type == "alphabetical"){ type <- "description" } if(type == "point_size"){ type <- "plot_size" } return(type) } #' @rdname plotRevigo #' #' @param type one of \code{"frequency"}, \code{"pvalue"}, \code{"uniqueness"} #' or \code{"dispensability"} #' @param palette name of a colour palette, which #' \code{ggplot2::scale_fill_brewer} accepts #' #' @import treemapify #' @export setMethod( f = "plotRevigoTreemap", signature = signature(x = "RevigoRData"), definition = function(x, type, palette){ requireNamespace("ggplot2", quietly = TRUE) requireNamespace("treemapify", quietly = TRUE) cols <- c("frequency" = "frequency", "pvalue" = "log10pvalue", "uniqueness" = "uniqueness", "dispensability" = "dispensability") # input check if(!.is_non_empty_string(type)){ stop("'type' must be a non-empty single character value.", call. = FALSE) } .checkValueValidity(type, names(cols)) # get data and make values positive data <- getTreemapData(x) data$log10pvalue <- abs(data$log10pvalue) # plot data plot <- ggplot(data, mapping = aes_string(area = cols[type], fill = "representative", label = "description", subgroup = "representative")) + geom_treemap() + geom_treemap_subgroup_border() + geom_treemap_subgroup_text(place = "centre", grow = TRUE, colour = "black", fontface = "bold", min.size = 0) + geom_treemap_text(colour = "black", place = "topleft", fontface = "italic", reflow = TRUE) + scale_fill_brewer(palette = palette) + labs(title = "REVIGO Gene Ontology treemap") + guides(fill = FALSE) return(plot) } )
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/paws.R \name{sagemakerruntime} \alias{sagemakerruntime} \title{Amazon SageMaker Runtime} \usage{ sagemakerruntime(config = list()) } \arguments{ \item{config}{Optional configuration of credentials, endpoint, and/or region. \itemize{ \item{\strong{access_key_id}:} {AWS access key ID} \item{\strong{secret_access_key}:} {AWS secret access key} \item{\strong{session_token}:} {AWS temporary session token} \item{\strong{profile}:} {The name of a profile to use. If not given, then the default profile is used.} \item{\strong{anonymous}:} {Set anonymous credentials.} \item{\strong{endpoint}:} {The complete URL to use for the constructed client.} \item{\strong{region}:} {The AWS Region used in instantiating the client.} \item{\strong{close_connection}:} {Immediately close all HTTP connections.} \item{\strong{timeout}:} {The time in seconds till a timeout exception is thrown when attempting to make a connection. The default is 60 seconds.} \item{\strong{s3_force_path_style}:} {Set this to \code{true} to force the request to use path-style addressing, i.e., \verb{http://s3.amazonaws.com/BUCKET/KEY}.} }} } \value{ A client for the service. You can call the service's operations using syntax like \code{svc$operation(...)}, where \code{svc} is the name you've assigned to the client. The available operations are listed in the Operations section. } \description{ The Amazon SageMaker runtime API. } \section{Service syntax}{ \if{html}{\out{<div class="sourceCode">}}\preformatted{svc <- sagemakerruntime( config = list( credentials = list( creds = list( access_key_id = "string", secret_access_key = "string", session_token = "string" ), profile = "string", anonymous = "logical" ), endpoint = "string", region = "string", close_connection = "logical", timeout = "numeric", s3_force_path_style = "logical" ) ) }\if{html}{\out{</div>}} } \section{Operations}{ \tabular{ll}{ \link[paws.machine.learning:sagemakerruntime_invoke_endpoint]{invoke_endpoint} \tab After you deploy a model into production using Amazon SageMaker hosting services, your client applications use this API to get inferences from the model hosted at the specified endpoint\cr \link[paws.machine.learning:sagemakerruntime_invoke_endpoint_async]{invoke_endpoint_async} \tab After you deploy a model into production using Amazon SageMaker hosting services, your client applications use this API to get inferences from the model hosted at the specified endpoint in an asynchronous manner } } \examples{ \dontrun{ svc <- sagemakerruntime() svc$invoke_endpoint( Foo = 123 ) } }
/man/sagemakerruntime.Rd
no_license
cran/paws
R
false
true
2,712
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/paws.R \name{sagemakerruntime} \alias{sagemakerruntime} \title{Amazon SageMaker Runtime} \usage{ sagemakerruntime(config = list()) } \arguments{ \item{config}{Optional configuration of credentials, endpoint, and/or region. \itemize{ \item{\strong{access_key_id}:} {AWS access key ID} \item{\strong{secret_access_key}:} {AWS secret access key} \item{\strong{session_token}:} {AWS temporary session token} \item{\strong{profile}:} {The name of a profile to use. If not given, then the default profile is used.} \item{\strong{anonymous}:} {Set anonymous credentials.} \item{\strong{endpoint}:} {The complete URL to use for the constructed client.} \item{\strong{region}:} {The AWS Region used in instantiating the client.} \item{\strong{close_connection}:} {Immediately close all HTTP connections.} \item{\strong{timeout}:} {The time in seconds till a timeout exception is thrown when attempting to make a connection. The default is 60 seconds.} \item{\strong{s3_force_path_style}:} {Set this to \code{true} to force the request to use path-style addressing, i.e., \verb{http://s3.amazonaws.com/BUCKET/KEY}.} }} } \value{ A client for the service. You can call the service's operations using syntax like \code{svc$operation(...)}, where \code{svc} is the name you've assigned to the client. The available operations are listed in the Operations section. } \description{ The Amazon SageMaker runtime API. } \section{Service syntax}{ \if{html}{\out{<div class="sourceCode">}}\preformatted{svc <- sagemakerruntime( config = list( credentials = list( creds = list( access_key_id = "string", secret_access_key = "string", session_token = "string" ), profile = "string", anonymous = "logical" ), endpoint = "string", region = "string", close_connection = "logical", timeout = "numeric", s3_force_path_style = "logical" ) ) }\if{html}{\out{</div>}} } \section{Operations}{ \tabular{ll}{ \link[paws.machine.learning:sagemakerruntime_invoke_endpoint]{invoke_endpoint} \tab After you deploy a model into production using Amazon SageMaker hosting services, your client applications use this API to get inferences from the model hosted at the specified endpoint\cr \link[paws.machine.learning:sagemakerruntime_invoke_endpoint_async]{invoke_endpoint_async} \tab After you deploy a model into production using Amazon SageMaker hosting services, your client applications use this API to get inferences from the model hosted at the specified endpoint in an asynchronous manner } } \examples{ \dontrun{ svc <- sagemakerruntime() svc$invoke_endpoint( Foo = 123 ) } }
# blank/ transparent theme for mapping theme_map <- function(){ theme( axis.text = element_blank(), axis.ticks = element_blank(), panel.grid.major = element_line(color = NA), panel.background = element_blank(), plot.background = element_rect(fill = "transparent", color = NA), legend.background = element_rect(fill = "transparent", color = NA) ) } # custom fill function custom_fill <- function(pal = "YlGnBu", name, guide = guide_colorbar(ticks = F), direction = 1, na.value = "gray50"){ scale_fill_distiller(palette = pal, name = name, guide = guide, direction = direction, na.value = na.value) }
/functions/map_style.R
no_license
bransonf/map_help
R
false
false
630
r
# blank/ transparent theme for mapping theme_map <- function(){ theme( axis.text = element_blank(), axis.ticks = element_blank(), panel.grid.major = element_line(color = NA), panel.background = element_blank(), plot.background = element_rect(fill = "transparent", color = NA), legend.background = element_rect(fill = "transparent", color = NA) ) } # custom fill function custom_fill <- function(pal = "YlGnBu", name, guide = guide_colorbar(ticks = F), direction = 1, na.value = "gray50"){ scale_fill_distiller(palette = pal, name = name, guide = guide, direction = direction, na.value = na.value) }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/SegFunctions.R \name{ACEDuncanPolyK} \alias{ACEDuncanPolyK} \title{A function to compute Duncan's Constrained Absolute Centralisation Index} \usage{ ACEDuncanPolyK(x, dc = NULL, K = NULL, kdist = NULL, center = 1, spatobj = NULL, folder = NULL, shape = NULL) } \arguments{ \item{x}{- an object of class matrix (or which can be coerced to that class), where each column represents the distribution of a group within spatial units. The number of columns should be greater than 1 (at least 2 groups are required). You should not include a column with total population, because this will be interpreted as a group.} \item{dc}{- a numeric matrix/vector containing the distances between spatial units centroids and the central spatial unit(s).} \item{K}{- the number of neighbourhoods under the influence of a center} \item{kdist}{- the maximal distance that defines the neighbourhoods influenced by a center} \item{center}{- a numeric vector giving the number of the spatial units that represent the centers in the table} \item{spatobj}{- a spatial object (SpatialPolygonsDataFrame) with geographic information} \item{folder}{- a character vector with the folder (directory) name indicating where the shapefile is located on the drive} \item{shape}{- a character vector with the name of the shapefile (without the .shp extension).} } \value{ a matrix containing relative centralisation index values } \description{ Constrained (local) version of Duncan's centralization index. The function can be used in two ways: to provide a matrix containing the distances between spatial/organizational unit centroids or a external geographic information source (spatial object or shape file). } \examples{ x <- segdata@data[ ,1:2] foldername <- system.file('extdata', package = 'OasisR') shapename <- 'segdata' ACEDuncanPolyK(x, spatobj = segdata, center = c(28, 83)) ACEDuncanPolyK(x, folder = foldername, shape = shapename, center = c(28, 83), K = 3) center <- c(28, 83) polydist <- matrix(data = NA, nrow = nrow(x), ncol = length(center)) for (i in 1:ncol(polydist)) polydist[,i] <- distcenter(spatobj = segdata, center = center[i]) ACEDuncanPolyK(x, dc = polydist, kdist = 2) } \references{ Duncan O. D. and Duncan B. (1955) \emph{A Methodological Analysis of Segregation Indexes}. American Sociological Review 41, pp. 210-217 Folch D.C and Rey S. J (2016) \emph{The centralization index: A measure of local spatial segregation}. Papers in Regional Science 95 (3), pp. 555-576 } \seealso{ \code{\link{ACEDuncan}}, \code{\link{ACEDuncanPoly}}, \code{\link{RCE}}, \code{\link{RCEPoly}}, \code{\link{RCEPolyK}} \code{\link{ACE}}, \code{\link{ACEPoly}} }
/man/ACEDuncanPolyK.Rd
no_license
cran/OasisR
R
false
true
2,842
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/SegFunctions.R \name{ACEDuncanPolyK} \alias{ACEDuncanPolyK} \title{A function to compute Duncan's Constrained Absolute Centralisation Index} \usage{ ACEDuncanPolyK(x, dc = NULL, K = NULL, kdist = NULL, center = 1, spatobj = NULL, folder = NULL, shape = NULL) } \arguments{ \item{x}{- an object of class matrix (or which can be coerced to that class), where each column represents the distribution of a group within spatial units. The number of columns should be greater than 1 (at least 2 groups are required). You should not include a column with total population, because this will be interpreted as a group.} \item{dc}{- a numeric matrix/vector containing the distances between spatial units centroids and the central spatial unit(s).} \item{K}{- the number of neighbourhoods under the influence of a center} \item{kdist}{- the maximal distance that defines the neighbourhoods influenced by a center} \item{center}{- a numeric vector giving the number of the spatial units that represent the centers in the table} \item{spatobj}{- a spatial object (SpatialPolygonsDataFrame) with geographic information} \item{folder}{- a character vector with the folder (directory) name indicating where the shapefile is located on the drive} \item{shape}{- a character vector with the name of the shapefile (without the .shp extension).} } \value{ a matrix containing relative centralisation index values } \description{ Constrained (local) version of Duncan's centralization index. The function can be used in two ways: to provide a matrix containing the distances between spatial/organizational unit centroids or a external geographic information source (spatial object or shape file). } \examples{ x <- segdata@data[ ,1:2] foldername <- system.file('extdata', package = 'OasisR') shapename <- 'segdata' ACEDuncanPolyK(x, spatobj = segdata, center = c(28, 83)) ACEDuncanPolyK(x, folder = foldername, shape = shapename, center = c(28, 83), K = 3) center <- c(28, 83) polydist <- matrix(data = NA, nrow = nrow(x), ncol = length(center)) for (i in 1:ncol(polydist)) polydist[,i] <- distcenter(spatobj = segdata, center = center[i]) ACEDuncanPolyK(x, dc = polydist, kdist = 2) } \references{ Duncan O. D. and Duncan B. (1955) \emph{A Methodological Analysis of Segregation Indexes}. American Sociological Review 41, pp. 210-217 Folch D.C and Rey S. J (2016) \emph{The centralization index: A measure of local spatial segregation}. Papers in Regional Science 95 (3), pp. 555-576 } \seealso{ \code{\link{ACEDuncan}}, \code{\link{ACEDuncanPoly}}, \code{\link{RCE}}, \code{\link{RCEPoly}}, \code{\link{RCEPolyK}} \code{\link{ACE}}, \code{\link{ACEPoly}} }
#Reading in the leaf wetness data library(gdata) #Reading in the wet leaf wetness data wet1 = read.xls('/home/adam/Dropbox/sapflow/leafwet/pww/pww_leaf_2014-08-15_date_corrected.xls',sheet=1) wet1 = wet1[-c(1,2),] wet2 = read.xls('/home/adam/Dropbox/sapflow/leafwet/pww/pww_leaf_2014-12-08.xls',sheet=1) wet2 = wet2[-c(1,2),] wet = rbind(wet1,wet2) wet = wet[,c(1,2,5,8,11,14)] #450 threshold #wet6 = wet[,c(1,3,6,9,12,15)] #460 threshold wet[,1] = as.POSIXct(wet[,1],format = '%m/%d/%Y %I:%M %p') wet[,2] = as.numeric(as.matrix(wet[,2])) wet[,3] = as.numeric(as.matrix(wet[,3])) wet[,4] = as.numeric(as.matrix(wet[,4])) wet[,5] = as.numeric(as.matrix(wet[,5])) wet[,6] = as.numeric(as.matrix(wet[,6])) colnames(wet)[1] = 'time' #Month index mID = sort(unique(substr(wet$time,6,7))) #Hour index hID = sort(unique(substr(wet$time,12,13))) ID = expand.grid(hID,mID) wetave = mat.or.vec(nrow(ID),5) for(i in 1:nrow(ID)){ tempID = which(substr(wet$time,12,13) == ID[i,1] & substr(wet$time,6,7) == ID[i,2]) wetave[i,] = apply(wet[tempID,2:6]/10,2,mean) } #Take mean of sensors and plot by month #Get rid of July bc there is only 1 day of data there pdat = rowMeans(wetave) pdat = pdat[25:144] #DO IT AGAIN FOR LAU #Reading in the wet leaf wetness data wet1 = read.xls('/home/adam/Dropbox/sapflow/leafwet/lau/lau_leaf_2014-09-04.xls',sheet=1) wet1 = wet1[-c(1,2),] wet2 = read.xls('/home/adam/Dropbox/sapflow/leafwet/lau/lau_leaf_2014-10-27.xls',sheet=1) wet2 = wet2[-c(1,2),] wet3 = read.xls('/home/adam/Dropbox/sapflow/leafwet/lau/lau_leaf_2014-12-03.xls',sheet=1) wet3 = wet3[-c(1,2),] wet = rbind(wet1,wet2,wet3) wet = wet[,c(1,2,5,8,11,14)] #450 threshold #wet6 = wet[,c(1,3,6,9,12,15)] #460 threshold wet[,1] = as.POSIXct(wet[,1],format = '%m/%d/%Y %I:%M %p') wet[,2] = as.numeric(as.matrix(wet[,2])) wet[,3] = as.numeric(as.matrix(wet[,3])) wet[,4] = as.numeric(as.matrix(wet[,4])) wet[,5] = as.numeric(as.matrix(wet[,5])) wet[,6] = as.numeric(as.matrix(wet[,6])) colnames(wet)[1] = 'time' #Month index mID = sort(unique(substr(wet$time,6,7))) #Hour index hID = sort(unique(substr(wet$time,12,13))) ID = expand.grid(hID,mID) wetave = mat.or.vec(nrow(ID),5) for(i in 1:nrow(ID)){ tempID = which(substr(wet$time,12,13) == ID[i,1] & substr(wet$time,6,7) == ID[i,2]) wetave[i,] = apply(wet[tempID,2:6]/10,2,mean) } #The sensors are pretty similar to each other. Take mean and plot by month pdatlau = rowMeans(wetave) #Get both pdats on to a 0 to 100 scale pdat = pdat*100 pdatlau = pdatlau*100 #PLOTTING jpeg('/home/adam/Dropbox/sapflow/figs/poster/leafwet.jpeg',width = 1000, height = 1000) par(mfrow = c(2,2),las = 1,cex.main = 3,cex.lab = 2.5,cex.axis=2.5,mar = c(5,6,4,2)) plot(y = pdatlau[1:24], x = 1:24, lwd = 4, ylim = c(0,100), col = colors()[132], ylab = '% of hour leaf is wet', xlab = '', main = 'August', tck = 0.03, cex.axis = 2.2, type = 'l') lines(y = pdat[1:24], x = 1:24,lwd = 4,col = colors()[149]) legend(x = 13, y = 98,cex=2,legend = c("Laupahoehoe","Pu\'u\ Wa\'awa\'a"),fill = colors()[c(132,149)]) plot(y = pdatlau[25:48], x = 1:24, lwd = 4, ylim = c(0,100), col = colors()[132], ylab = '', xlab = '', main = 'September', tck = 0.03, type = 'l') lines(y = pdat[25:48], x = 1:24,lwd = 4,col = colors()[149]) plot(y = pdatlau[49:72], x = 1:24, lwd = 4, ylim = c(0,100), col = colors()[132], ylab = '% of hour leaf is wet', xlab = 'Hour of day', main = 'October', tck = 0.03, cex.axis = 2.2, type = 'l') lines(y = pdat[49:72], x = 1:24,lwd = 4,col = colors()[149]) plot(y = pdatlau[73:96], x = 1:24, lwd = 4, ylim = c(0,100), col = colors()[132], ylab = '', xlab = 'Hour of day', main = 'November', tck = 0.03, type = 'l') lines(y = pdat[73:96], x = 1:24,lwd = 4,col = colors()[149]) dev.off()
/r_scripts/leaf_wetness_by_month_v2.r
no_license
treelover/climate
R
false
false
3,762
r
#Reading in the leaf wetness data library(gdata) #Reading in the wet leaf wetness data wet1 = read.xls('/home/adam/Dropbox/sapflow/leafwet/pww/pww_leaf_2014-08-15_date_corrected.xls',sheet=1) wet1 = wet1[-c(1,2),] wet2 = read.xls('/home/adam/Dropbox/sapflow/leafwet/pww/pww_leaf_2014-12-08.xls',sheet=1) wet2 = wet2[-c(1,2),] wet = rbind(wet1,wet2) wet = wet[,c(1,2,5,8,11,14)] #450 threshold #wet6 = wet[,c(1,3,6,9,12,15)] #460 threshold wet[,1] = as.POSIXct(wet[,1],format = '%m/%d/%Y %I:%M %p') wet[,2] = as.numeric(as.matrix(wet[,2])) wet[,3] = as.numeric(as.matrix(wet[,3])) wet[,4] = as.numeric(as.matrix(wet[,4])) wet[,5] = as.numeric(as.matrix(wet[,5])) wet[,6] = as.numeric(as.matrix(wet[,6])) colnames(wet)[1] = 'time' #Month index mID = sort(unique(substr(wet$time,6,7))) #Hour index hID = sort(unique(substr(wet$time,12,13))) ID = expand.grid(hID,mID) wetave = mat.or.vec(nrow(ID),5) for(i in 1:nrow(ID)){ tempID = which(substr(wet$time,12,13) == ID[i,1] & substr(wet$time,6,7) == ID[i,2]) wetave[i,] = apply(wet[tempID,2:6]/10,2,mean) } #Take mean of sensors and plot by month #Get rid of July bc there is only 1 day of data there pdat = rowMeans(wetave) pdat = pdat[25:144] #DO IT AGAIN FOR LAU #Reading in the wet leaf wetness data wet1 = read.xls('/home/adam/Dropbox/sapflow/leafwet/lau/lau_leaf_2014-09-04.xls',sheet=1) wet1 = wet1[-c(1,2),] wet2 = read.xls('/home/adam/Dropbox/sapflow/leafwet/lau/lau_leaf_2014-10-27.xls',sheet=1) wet2 = wet2[-c(1,2),] wet3 = read.xls('/home/adam/Dropbox/sapflow/leafwet/lau/lau_leaf_2014-12-03.xls',sheet=1) wet3 = wet3[-c(1,2),] wet = rbind(wet1,wet2,wet3) wet = wet[,c(1,2,5,8,11,14)] #450 threshold #wet6 = wet[,c(1,3,6,9,12,15)] #460 threshold wet[,1] = as.POSIXct(wet[,1],format = '%m/%d/%Y %I:%M %p') wet[,2] = as.numeric(as.matrix(wet[,2])) wet[,3] = as.numeric(as.matrix(wet[,3])) wet[,4] = as.numeric(as.matrix(wet[,4])) wet[,5] = as.numeric(as.matrix(wet[,5])) wet[,6] = as.numeric(as.matrix(wet[,6])) colnames(wet)[1] = 'time' #Month index mID = sort(unique(substr(wet$time,6,7))) #Hour index hID = sort(unique(substr(wet$time,12,13))) ID = expand.grid(hID,mID) wetave = mat.or.vec(nrow(ID),5) for(i in 1:nrow(ID)){ tempID = which(substr(wet$time,12,13) == ID[i,1] & substr(wet$time,6,7) == ID[i,2]) wetave[i,] = apply(wet[tempID,2:6]/10,2,mean) } #The sensors are pretty similar to each other. Take mean and plot by month pdatlau = rowMeans(wetave) #Get both pdats on to a 0 to 100 scale pdat = pdat*100 pdatlau = pdatlau*100 #PLOTTING jpeg('/home/adam/Dropbox/sapflow/figs/poster/leafwet.jpeg',width = 1000, height = 1000) par(mfrow = c(2,2),las = 1,cex.main = 3,cex.lab = 2.5,cex.axis=2.5,mar = c(5,6,4,2)) plot(y = pdatlau[1:24], x = 1:24, lwd = 4, ylim = c(0,100), col = colors()[132], ylab = '% of hour leaf is wet', xlab = '', main = 'August', tck = 0.03, cex.axis = 2.2, type = 'l') lines(y = pdat[1:24], x = 1:24,lwd = 4,col = colors()[149]) legend(x = 13, y = 98,cex=2,legend = c("Laupahoehoe","Pu\'u\ Wa\'awa\'a"),fill = colors()[c(132,149)]) plot(y = pdatlau[25:48], x = 1:24, lwd = 4, ylim = c(0,100), col = colors()[132], ylab = '', xlab = '', main = 'September', tck = 0.03, type = 'l') lines(y = pdat[25:48], x = 1:24,lwd = 4,col = colors()[149]) plot(y = pdatlau[49:72], x = 1:24, lwd = 4, ylim = c(0,100), col = colors()[132], ylab = '% of hour leaf is wet', xlab = 'Hour of day', main = 'October', tck = 0.03, cex.axis = 2.2, type = 'l') lines(y = pdat[49:72], x = 1:24,lwd = 4,col = colors()[149]) plot(y = pdatlau[73:96], x = 1:24, lwd = 4, ylim = c(0,100), col = colors()[132], ylab = '', xlab = 'Hour of day', main = 'November', tck = 0.03, type = 'l') lines(y = pdat[73:96], x = 1:24,lwd = 4,col = colors()[149]) dev.off()
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/oneway.R \name{onewayid} \alias{onewayid} \alias{onewayid.data.frame} \alias{onewayid.SpatialLines} \title{Aggregate ods so they become non-directional} \usage{ onewayid(x, attrib, id1 = names(x)[1], id2 = names(x)[2], stplanr.key = od_id_order(x, id1, id2)) \method{onewayid}{data.frame}(x, attrib, id1 = names(x)[1], id2 = names(x)[2], stplanr.key = od_id_order(x, id1, id2)) \method{onewayid}{SpatialLines}(x, attrib, id1 = names(x)[1], id2 = names(x)[2], stplanr.key = od_id_order(x, id1, id2)) } \arguments{ \item{x}{A data frame or SpatialLinesDataFrame, representing an OD matrix} \item{attrib}{A vector of column numbers or names for deciding which attribute(s) of class numeric to aggregate} \item{id1}{Optional (it is assumed to be the first column) text string referring to the name of the variable containing the unique id of the origin} \item{id2}{Optional (it is assumed to be the second column) text string referring to the name of the variable containing the unique id of the destination} \item{stplanr.key}{A key of unique OD pairs regardless of the order, autogenerated by \code{\link[=od_id_order]{od_id_order()}}} } \value{ \code{onewayid} outputs a data.frame with rows containing results for the user-selected attribute values that have been aggregated. } \description{ For example, sum total travel in both directions. } \details{ Flow data often contains movement in two directions: from point A to point B and then from B to A. This can be problematic for transport planning, because the magnitude of flow along a route can be masked by flows the other direction. If only the largest flow in either direction is captured in an analysis, for example, the true extent of travel will by heavily under-estimated for OD pairs which have similar amounts of travel in both directions. Flows in both direction are often represented by overlapping lines with identical geometries (see \code{\link[=flowlines]{flowlines()}}) which can be confusing for users and are difficult to plot. } \examples{ data(flow) flow_oneway <- onewayid(flow, attrib = 3) nrow(flow_oneway) < nrow(flow) # result has fewer rows sum(flow$All) == sum(flow_oneway$All) # but the same total flow # using names instead of index for attribute onewayid(flow, attrib = "All") # using many attributes to aggregate attrib <- which(vapply(flow, is.numeric, TRUE)) flow_oneway <- onewayid(flow, attrib = attrib) colSums(flow_oneway[attrib]) == colSums(flow[attrib]) # test if the colSums are equal # Demonstrate the results from onewayid and onewaygeo are identical flow_oneway_geo <- onewaygeo(flowlines, attrib = attrib) plot(flow_oneway$All, flow_oneway_geo$All) onewayid(flowlines_sf, "all") # with spatial data data(flowlines) fo <- onewayid(flowlines, attrib = "All") head(fo@data) plot(fo) sum(fo$All) == sum(flowlines$All) # test results for one line n <- 3 plot(fo[n, ], lwd = 20, add = TRUE) f_over_n <- rgeos::gEquals(fo[n, ], flowlines["All"], byid = TRUE) sum(flowlines$All[f_over_n]) == sum(fo$All[n]) # check aggregation worked plot(flowlines[which(f_over_n)[1], ], add = TRUE, col = "white", lwd = 10) plot(flowlines[which(f_over_n)[2], ], add = TRUE, lwd = 5) }
/man/onewayid.Rd
permissive
seanolondon/stplanr
R
false
true
3,250
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/oneway.R \name{onewayid} \alias{onewayid} \alias{onewayid.data.frame} \alias{onewayid.SpatialLines} \title{Aggregate ods so they become non-directional} \usage{ onewayid(x, attrib, id1 = names(x)[1], id2 = names(x)[2], stplanr.key = od_id_order(x, id1, id2)) \method{onewayid}{data.frame}(x, attrib, id1 = names(x)[1], id2 = names(x)[2], stplanr.key = od_id_order(x, id1, id2)) \method{onewayid}{SpatialLines}(x, attrib, id1 = names(x)[1], id2 = names(x)[2], stplanr.key = od_id_order(x, id1, id2)) } \arguments{ \item{x}{A data frame or SpatialLinesDataFrame, representing an OD matrix} \item{attrib}{A vector of column numbers or names for deciding which attribute(s) of class numeric to aggregate} \item{id1}{Optional (it is assumed to be the first column) text string referring to the name of the variable containing the unique id of the origin} \item{id2}{Optional (it is assumed to be the second column) text string referring to the name of the variable containing the unique id of the destination} \item{stplanr.key}{A key of unique OD pairs regardless of the order, autogenerated by \code{\link[=od_id_order]{od_id_order()}}} } \value{ \code{onewayid} outputs a data.frame with rows containing results for the user-selected attribute values that have been aggregated. } \description{ For example, sum total travel in both directions. } \details{ Flow data often contains movement in two directions: from point A to point B and then from B to A. This can be problematic for transport planning, because the magnitude of flow along a route can be masked by flows the other direction. If only the largest flow in either direction is captured in an analysis, for example, the true extent of travel will by heavily under-estimated for OD pairs which have similar amounts of travel in both directions. Flows in both direction are often represented by overlapping lines with identical geometries (see \code{\link[=flowlines]{flowlines()}}) which can be confusing for users and are difficult to plot. } \examples{ data(flow) flow_oneway <- onewayid(flow, attrib = 3) nrow(flow_oneway) < nrow(flow) # result has fewer rows sum(flow$All) == sum(flow_oneway$All) # but the same total flow # using names instead of index for attribute onewayid(flow, attrib = "All") # using many attributes to aggregate attrib <- which(vapply(flow, is.numeric, TRUE)) flow_oneway <- onewayid(flow, attrib = attrib) colSums(flow_oneway[attrib]) == colSums(flow[attrib]) # test if the colSums are equal # Demonstrate the results from onewayid and onewaygeo are identical flow_oneway_geo <- onewaygeo(flowlines, attrib = attrib) plot(flow_oneway$All, flow_oneway_geo$All) onewayid(flowlines_sf, "all") # with spatial data data(flowlines) fo <- onewayid(flowlines, attrib = "All") head(fo@data) plot(fo) sum(fo$All) == sum(flowlines$All) # test results for one line n <- 3 plot(fo[n, ], lwd = 20, add = TRUE) f_over_n <- rgeos::gEquals(fo[n, ], flowlines["All"], byid = TRUE) sum(flowlines$All[f_over_n]) == sum(fo$All[n]) # check aggregation worked plot(flowlines[which(f_over_n)[1], ], add = TRUE, col = "white", lwd = 10) plot(flowlines[which(f_over_n)[2], ], add = TRUE, lwd = 5) }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/mlr3automl-package.R \docType{package} \name{mlr3automl-package} \alias{mlr3automl} \alias{mlr3automl-package} \title{mlr3automl: AutoML in mlr3} \description{ AutoML in mlr3. } \seealso{ Useful links: \itemize{ \item Report bugs at \url{https://github.com/a-hanf/mlr3automl/issues} } } \author{ \strong{Maintainer}: Alexander Hanf \email{alexanderhanf@gmx.de} } \keyword{internal}
/man/mlr3automl-package.Rd
no_license
mb706/mlr3automl
R
false
true
464
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/mlr3automl-package.R \docType{package} \name{mlr3automl-package} \alias{mlr3automl} \alias{mlr3automl-package} \title{mlr3automl: AutoML in mlr3} \description{ AutoML in mlr3. } \seealso{ Useful links: \itemize{ \item Report bugs at \url{https://github.com/a-hanf/mlr3automl/issues} } } \author{ \strong{Maintainer}: Alexander Hanf \email{alexanderhanf@gmx.de} } \keyword{internal}
\name{parse.default} \alias{parse.default} \title{Default parser which simply emits the key and expression.} \usage{ parse.default(key, rest, srcref) } \arguments{ \item{key}{the parsing key} \item{rest}{the expression to be parsed} \item{srcref}{srcref providing location of file name and line number} } \value{ A list containing the key and expression (possibly null) } \description{ Used for elements with optional values (like \code{@export}) where roclets can do more sophisticated things with \code{NULL}. } \seealso{ Other preref parsing functions: \code{\link{parse.name}}, \code{\link{parse.name.description}}, \code{\link{parse.toggle}}, \code{\link{parse.unknown}}, \code{\link{parse.value}} } \keyword{internal}
/man/parse.default.Rd
no_license
miraisolutions/roxygen2
R
false
false
753
rd
\name{parse.default} \alias{parse.default} \title{Default parser which simply emits the key and expression.} \usage{ parse.default(key, rest, srcref) } \arguments{ \item{key}{the parsing key} \item{rest}{the expression to be parsed} \item{srcref}{srcref providing location of file name and line number} } \value{ A list containing the key and expression (possibly null) } \description{ Used for elements with optional values (like \code{@export}) where roclets can do more sophisticated things with \code{NULL}. } \seealso{ Other preref parsing functions: \code{\link{parse.name}}, \code{\link{parse.name.description}}, \code{\link{parse.toggle}}, \code{\link{parse.unknown}}, \code{\link{parse.value}} } \keyword{internal}
# Load packages ---------------------------------------------------------------- library(shiny) library(ggplot2) library(dplyr) library(tools) # Load data -------------------------------------------------------------------- load("movies.RData") # Define UI -------------------------------------------------------------------- ui <- fluidPage( titlePanel("Movie browser"), sidebarLayout( sidebarPanel( selectInput( inputId = "y", label = "Y-axis:", choices = c( "IMDB rating" = "imdb_rating", "IMDB number of votes" = "imdb_num_votes", "Critics Score" = "critics_score", "Audience Score" = "audience_score", "Runtime" = "runtime" ), selected = "audience_score" ), selectInput( inputId = "x", label = "X-axis:", choices = c( "IMDB rating" = "imdb_rating", "IMDB number of votes" = "imdb_num_votes", "Critics Score" = "critics_score", "Audience Score" = "audience_score", "Runtime" = "runtime" ), selected = "critics_score" ), selectInput( inputId = "z", label = "Color by:", choices = c( "Title Type" = "title_type", "Genre" = "genre", "MPAA Rating" = "mpaa_rating", "Critics Rating" = "critics_rating", "Audience Rating" = "audience_rating" ), selected = "mpaa_rating" ), textInput( inputId = "plot_title", label = "Plot title", placeholder = "Enter text for plot title" ), checkboxGroupInput( inputId = "selected_type", label = "Select movie type(s):", choices = c("Documentary", "Feature Film", "TV Movie"), selected = "Feature Film" ) ), mainPanel( plotOutput(outputId = "scatterplot"), textOutput(outputId = "description") ) ) ) # Define server ---------------------------------------------------------------- server <- function(input, output, session) { # Create a subset of data filtering for selected title types movies_subset <- reactive({ req(input$selected_type) filter(movies, title_type %in% input$selected_type) }) # Convert plot_title toTitleCase pretty_plot_title <- reactive({ toTitleCase(input$plot_title) }) # Create scatterplot object the plotOutput function is expecting output$scatterplot <- renderPlot({ ggplot( data = movies_subset(), aes_string(x = input$x, y = input$y, color = input$z) ) + geom_point() + labs(title = pretty_plot_title()) }) # Create descriptive text output$description <- renderText({ paste0("The plot above titled '", pretty_plot_title(), "' visualizes the relationship between ", input$x, " and ", input$y, ", conditional on ", input$z, ".") }) } # Create the Shiny app object -------------------------------------------------- shinyApp(ui = ui, server = server)
/5 Solutions/3-2a-debug-reactives-solution.R
no_license
ferkullj/activity13-shiny-apps
R
false
false
3,019
r
# Load packages ---------------------------------------------------------------- library(shiny) library(ggplot2) library(dplyr) library(tools) # Load data -------------------------------------------------------------------- load("movies.RData") # Define UI -------------------------------------------------------------------- ui <- fluidPage( titlePanel("Movie browser"), sidebarLayout( sidebarPanel( selectInput( inputId = "y", label = "Y-axis:", choices = c( "IMDB rating" = "imdb_rating", "IMDB number of votes" = "imdb_num_votes", "Critics Score" = "critics_score", "Audience Score" = "audience_score", "Runtime" = "runtime" ), selected = "audience_score" ), selectInput( inputId = "x", label = "X-axis:", choices = c( "IMDB rating" = "imdb_rating", "IMDB number of votes" = "imdb_num_votes", "Critics Score" = "critics_score", "Audience Score" = "audience_score", "Runtime" = "runtime" ), selected = "critics_score" ), selectInput( inputId = "z", label = "Color by:", choices = c( "Title Type" = "title_type", "Genre" = "genre", "MPAA Rating" = "mpaa_rating", "Critics Rating" = "critics_rating", "Audience Rating" = "audience_rating" ), selected = "mpaa_rating" ), textInput( inputId = "plot_title", label = "Plot title", placeholder = "Enter text for plot title" ), checkboxGroupInput( inputId = "selected_type", label = "Select movie type(s):", choices = c("Documentary", "Feature Film", "TV Movie"), selected = "Feature Film" ) ), mainPanel( plotOutput(outputId = "scatterplot"), textOutput(outputId = "description") ) ) ) # Define server ---------------------------------------------------------------- server <- function(input, output, session) { # Create a subset of data filtering for selected title types movies_subset <- reactive({ req(input$selected_type) filter(movies, title_type %in% input$selected_type) }) # Convert plot_title toTitleCase pretty_plot_title <- reactive({ toTitleCase(input$plot_title) }) # Create scatterplot object the plotOutput function is expecting output$scatterplot <- renderPlot({ ggplot( data = movies_subset(), aes_string(x = input$x, y = input$y, color = input$z) ) + geom_point() + labs(title = pretty_plot_title()) }) # Create descriptive text output$description <- renderText({ paste0("The plot above titled '", pretty_plot_title(), "' visualizes the relationship between ", input$x, " and ", input$y, ", conditional on ", input$z, ".") }) } # Create the Shiny app object -------------------------------------------------- shinyApp(ui = ui, server = server)
testlist <- list(Rs = numeric(0), atmp = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), relh = -1.72131968218895e+83, temp = c(8.5728629954997e-312, 1.56898424065867e+82, 8.96970809549085e-158, -1.3258495253834e-113, 2.79620616433656e-119, -6.80033518839696e+41, 2.68298522855314e-211, 1444042902784.06, 6.68889884134308e+51, -4.05003163986346e-308, -3.52601820453991e+43, -1.49815227045093e+197, -2.61605817623304e+76, -1.18078903777423e-90, 1.86807199752012e+112, -5.58551357556946e+160, 2.00980775884956e-162, 1.81541609400943e-79, 7.89363005545926e+139, 2.3317908961407e-93, 2.16562581831091e+161)) result <- do.call(meteor:::ET0_Makkink,testlist) str(result)
/meteor/inst/testfiles/ET0_Makkink/AFL_ET0_Makkink/ET0_Makkink_valgrind_files/1615845160-test.R
no_license
akhikolla/updatedatatype-list3
R
false
false
736
r
testlist <- list(Rs = numeric(0), atmp = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), relh = -1.72131968218895e+83, temp = c(8.5728629954997e-312, 1.56898424065867e+82, 8.96970809549085e-158, -1.3258495253834e-113, 2.79620616433656e-119, -6.80033518839696e+41, 2.68298522855314e-211, 1444042902784.06, 6.68889884134308e+51, -4.05003163986346e-308, -3.52601820453991e+43, -1.49815227045093e+197, -2.61605817623304e+76, -1.18078903777423e-90, 1.86807199752012e+112, -5.58551357556946e+160, 2.00980775884956e-162, 1.81541609400943e-79, 7.89363005545926e+139, 2.3317908961407e-93, 2.16562581831091e+161)) result <- do.call(meteor:::ET0_Makkink,testlist) str(result)
## read all data and filter on the requested dates change clomuns for comforable use. household <- read.csv("household_power_consumption.txt",sep = ";") household$Date <- as.Date(household$Date, "%d/%m/%Y") household <- household[household$Date == "2007-02-01" | household$Date == "2007-02-02",] full_time <- strptime(paste(household$Date, household$Time), format = "%Y-%m-%d %H:%M:%S") household$Global_active_power <- as.numeric(household$Global_active_power) household$Voltage <- as.numeric(as.character(household$Voltage)) household$Sub_metering_1 <- as.numeric(household$Sub_metering_1) household$Sub_metering_2 <- as.numeric(household$Sub_metering_2) household$Sub_metering_3 <- as.numeric(household$Sub_metering_3) household$Global_reactive_power <- as.numeric(as.character(household$Global_reactive_power)) ## change plotting device. graphics.off() bmp("plot4.bmp") ## creating the plots using plot axis lines and legend. par(mfrow=c(2,2)) plot(full_time,household$Global_active_power, ylab = "Global Active Power (kilowatts)",yaxt='n',xlab = "",type = 'n') axis(side=2, at=seq(0,3000, 1000), labels=seq(0,6,2)) lines(full_time, household$Global_active_power) plot(full_time,household$Voltage, ylab = "Voltage",xlab = "",yaxt='n',type = 'n') axis(side=2, at=c(234,238,242,246)) lines(full_time, household$Voltage) plot(full_time,household$Sub_metering_1, ylab = "Energy Sub Metering",xlab = "",type = "n",yaxt = "n",ylim = c(0,40)) axis(side=2, at=c(0,10,20,30)) lines(full_time, household$Sub_metering_1) lines(full_time, household$Sub_metering_2, col = "red") lines(full_time, household$Sub_metering_3, col = "blue") legend("topright", legend = c("Sub_metering_1","Sub_metering_2","Sub_metering_3"), col=c("black","red","blue") ,lty = 1,bty="n") plot(full_time,household$Global_reactive_power, ylab = "Global_reactive_power",xlab = "datetime",type = "n") lines(full_time, household$Global_reactive_power) ## close plotting device. graphics.off()
/plot4.R
no_license
cfiros/Coursera-ExploratoryAnalysis
R
false
false
1,963
r
## read all data and filter on the requested dates change clomuns for comforable use. household <- read.csv("household_power_consumption.txt",sep = ";") household$Date <- as.Date(household$Date, "%d/%m/%Y") household <- household[household$Date == "2007-02-01" | household$Date == "2007-02-02",] full_time <- strptime(paste(household$Date, household$Time), format = "%Y-%m-%d %H:%M:%S") household$Global_active_power <- as.numeric(household$Global_active_power) household$Voltage <- as.numeric(as.character(household$Voltage)) household$Sub_metering_1 <- as.numeric(household$Sub_metering_1) household$Sub_metering_2 <- as.numeric(household$Sub_metering_2) household$Sub_metering_3 <- as.numeric(household$Sub_metering_3) household$Global_reactive_power <- as.numeric(as.character(household$Global_reactive_power)) ## change plotting device. graphics.off() bmp("plot4.bmp") ## creating the plots using plot axis lines and legend. par(mfrow=c(2,2)) plot(full_time,household$Global_active_power, ylab = "Global Active Power (kilowatts)",yaxt='n',xlab = "",type = 'n') axis(side=2, at=seq(0,3000, 1000), labels=seq(0,6,2)) lines(full_time, household$Global_active_power) plot(full_time,household$Voltage, ylab = "Voltage",xlab = "",yaxt='n',type = 'n') axis(side=2, at=c(234,238,242,246)) lines(full_time, household$Voltage) plot(full_time,household$Sub_metering_1, ylab = "Energy Sub Metering",xlab = "",type = "n",yaxt = "n",ylim = c(0,40)) axis(side=2, at=c(0,10,20,30)) lines(full_time, household$Sub_metering_1) lines(full_time, household$Sub_metering_2, col = "red") lines(full_time, household$Sub_metering_3, col = "blue") legend("topright", legend = c("Sub_metering_1","Sub_metering_2","Sub_metering_3"), col=c("black","red","blue") ,lty = 1,bty="n") plot(full_time,household$Global_reactive_power, ylab = "Global_reactive_power",xlab = "datetime",type = "n") lines(full_time, household$Global_reactive_power) ## close plotting device. graphics.off()
#' Add Raw WoW data to the WoWRaw MongoDB database #' #' This function adds raw WoW data to the WoWRaw MongoDB database. If you need assistance please email \email{info@@datamuster.net.au} to seek help or suggest improvements. #' @name add_wowraw #' @param data is a single file of a column of weights including the RFID read that is inserted between the weights when it is read #' @param username if you don't have a username set up using the dmaccess function you can pass a username, if no value added then the function looks for a value from dmaccess via keyring #' @param password if you include a username you will also need to add a password contact Lauren O'Connor if you don't have access #' @return a message that indicates the data has been successfully added #' @author Dave Swain \email{d.swain@@cqu.edu.au} and Lauren O'Connor \email{l.r.oconnor@@cqu.edu.au} #' @import mongolite #' @export # Just need to add a read.csv object as the data stream e.g. data <- read.csv('20200116_121648_Kraatz_WoW_1.txt') # The plan is to include some summary statistics as the data is uploaded. For example the RFID location finds where the # the RFID tag occurs in the weight stream. Greg is going to send me details of his algorithm so we can run his weight # calculation at the same time that we add the data. The function needs to have a wrapper to read multiple # csv files. add_wowraw <- function(data, username, password){ library(mongolite) library(jsonlite) library(dplyr) names(data) <- "data" weights <- data%>%filter(data<5000) RFID <- data%>%filter(data>5000) RFIDlocation <- which(data$data %in% RFID$data, arr.ind=TRUE) # print(RFIDlocation[1]) # print(weights) # print(RFID) alldata <- toJSON(data$data) id <- toJSON(RFID$data) pass <- sprintf("mongodb://%s:%s@datamuster-shard-00-00-8mplm.mongodb.net:27017,datamuster-shard-00-01-8mplm.mongodb.net:27017,datamuster-shard-00-02-8mplm.mongodb.net:27017/test?ssl=true&replicaSet=DataMuster-shard-0&authSource=admin", username, password) WoWData <- mongo(collection = "WoWData", db = "WoWRaw", url = pass, verbose = T) total <- sprintf('{"RawData":%s, "RFID":%s}', alldata, id) WoWData$insert(total) }
/R/add_wowraw.R
no_license
PrecisionLivestockManagement/DMMongoDB
R
false
false
2,243
r
#' Add Raw WoW data to the WoWRaw MongoDB database #' #' This function adds raw WoW data to the WoWRaw MongoDB database. If you need assistance please email \email{info@@datamuster.net.au} to seek help or suggest improvements. #' @name add_wowraw #' @param data is a single file of a column of weights including the RFID read that is inserted between the weights when it is read #' @param username if you don't have a username set up using the dmaccess function you can pass a username, if no value added then the function looks for a value from dmaccess via keyring #' @param password if you include a username you will also need to add a password contact Lauren O'Connor if you don't have access #' @return a message that indicates the data has been successfully added #' @author Dave Swain \email{d.swain@@cqu.edu.au} and Lauren O'Connor \email{l.r.oconnor@@cqu.edu.au} #' @import mongolite #' @export # Just need to add a read.csv object as the data stream e.g. data <- read.csv('20200116_121648_Kraatz_WoW_1.txt') # The plan is to include some summary statistics as the data is uploaded. For example the RFID location finds where the # the RFID tag occurs in the weight stream. Greg is going to send me details of his algorithm so we can run his weight # calculation at the same time that we add the data. The function needs to have a wrapper to read multiple # csv files. add_wowraw <- function(data, username, password){ library(mongolite) library(jsonlite) library(dplyr) names(data) <- "data" weights <- data%>%filter(data<5000) RFID <- data%>%filter(data>5000) RFIDlocation <- which(data$data %in% RFID$data, arr.ind=TRUE) # print(RFIDlocation[1]) # print(weights) # print(RFID) alldata <- toJSON(data$data) id <- toJSON(RFID$data) pass <- sprintf("mongodb://%s:%s@datamuster-shard-00-00-8mplm.mongodb.net:27017,datamuster-shard-00-01-8mplm.mongodb.net:27017,datamuster-shard-00-02-8mplm.mongodb.net:27017/test?ssl=true&replicaSet=DataMuster-shard-0&authSource=admin", username, password) WoWData <- mongo(collection = "WoWData", db = "WoWRaw", url = pass, verbose = T) total <- sprintf('{"RawData":%s, "RFID":%s}', alldata, id) WoWData$insert(total) }
library(randomUniformForest) ### Name: randomUniformForest ### Title: Random Uniform Forests for Classification, Regression and ### Unsupervised Learning ### Aliases: randomUniformForest randomUniformForest.default ### randomUniformForest.formula plot.randomUniformForest ### print.randomUniformForest summary.randomUniformForest ### ** Examples ## not run ## NOTE : use option 'threads = 1' (disabling parallel processing) to speed up computing ## for small samples, since parallel processing is useful only for computationally ## intensive tasks ###### PART ONE : QUICK GUIDE #### Classification # data(iris) # iris.ruf <- randomUniformForest(Species ~ ., data = iris, threads = 1) ## Regular companions (from 1 to 18): ## 1 - model, parameters, statistics: # iris.ruf ## or print(iris.ruf) ## 2 - OOB error: # plot(iris.ruf, threads = 1) ## 3 - (global) variable importance, some statistics about trees: # summary(iris.ruf) #### Regression ## NOTE: when formula is used, missing values are automatically deleted and dummies ## are built for categorical features # data(airquality) # ozone.ruf <- randomUniformForest(Ozone ~ ., data = airquality, threads = 1) # ozone.ruf ## plot OOB error: # plot(ozone.ruf, threads = 1) ## 4 - Alternative modelling: ## 4.1 bagging like: # ozone.bagging <- randomUniformForest(Ozone ~ ., data = airquality, # bagging = TRUE, threads = 1) ## 4.2 Ensemble of totally randomized trees, e.g. purely random forest: # ozone.prf <- randomUniformForest(Ozone ~ ., data = airquality, # randomfeature = TRUE, threads = 1) ## 4.3 Extremely randomized trees like: # ozone.ETlike <- randomUniformForest(Ozone ~ ., data = airquality, # subsamplerate = 1, replace = FALSE, bagging = TRUE, mtry = floor((ncol(airquality)-1)/3), # threads = 1) ## Common case: use X, as a matrix or data frame, and Y, as a response vector, #### Classification : iris data, training and testing # data(iris) ## define random training and test sample : ## "Species" is the response vector # set.seed(2015) # iris.train_test <- init_values(iris[,-which(colnames(iris) == "Species")], iris$Species, # sample.size = 1/2) ## training and test samples: # iris.train = iris.train_test$xtrain # species.train = iris.train_test$ytrain # iris.test = iris.train_test$xtest # species.test = iris.train_test$ytest ## 5 - training and test (or validation) modelling: # iris.train_test.ruf <- randomUniformForest(iris.train, species.train, # xtest = iris.test, ytest = species.test, threads = 1) ## 6 - all-in-one results: # iris.train_test.ruf ## 7 - Alternative modelling: imbalanced classes ## balanced sampling (for example): equal sample size for all labels # iris.train_test.balancedsampling.ruf <- randomUniformForest(iris.train, species.train, # xtest = iris.test, ytest = species.test, rebalancedsampling = TRUE, threads = 1) ###### PART TWO : SUMMARIZED CASE STUDY #### Classification : Wine Quality data ## http://archive.ics.uci.edu/ml/datasets/Wine+Quality ## We use 'red wine quality' file : data have 1599 observations, 12 variables and 6 classes. # data(wineQualityRed) # wineQualityRed.data = wineQualityRed ## class and observations # Y = wineQualityRed.data[, "quality"] # X = wineQualityRed.data[, -which(colnames(wineQualityRed.data) == "quality")] ## First look : train model with default parameters (and retrieve estimates) # wineQualityRed.std.ruf <- randomUniformForest(X, as.factor(Y)) # wineQualityRed.std.ruf ## (global) Variable Importance: # summary(wineQualityRed.std.ruf) ## But some labels do not have enough observations to assess variable importance ## merging class 3 and 4. Merging class 7 and 8 to get enough labels. # Y[Y == 3] = 4 # Y[Y == 8] = 7 ## make Y as a factor, change names and get a summary # Y = as.factor(Y) # levels(Y) = c("3 or 4", "5", "6", "7 or 8") # table(Y) ## learn a new model to get a better view on variable importance ## NOTE: Y is now a factor, the model will catch the learning task as a classification one # wineQualityRed.new.ruf <- randomUniformForest(X, Y) # wineQualityRed.new.ruf ## global variable importance is more consistent # summary(wineQualityRed.new.ruf) ## plot OOB error (needs some computing) # plot(wineQualityRed.new.ruf) ## 8 - alternative Modelling: use subtrees (small trees, extended then reassembled) ## may change something, depending on data # wineQualityRed.new.ruf <- randomUniformForest(X, Y, usesubtrees = TRUE) ## 9 - deep variable importance: ## 9.1 - interactions are granular: use more for consistency, or less to see primary information ## 9.2 - a table is printed with details # importance.wineQualityRed <- importance(wineQualityRed.new.ruf, Xtest = X, maxInteractions = 6) ## 10 - visualization: ## 10.1 - global importance, interactions, importance based on interactions, ## importance based on labels, partial dependencies for all influential variables ## (loop over the prompt to get others partial dependencies) ## 10.2 - get more points, using option whichOrder = "all", default option. # plot(importance.wineQualityRed, Xtest = X, whichOrder = "first") ## 11 - Cluster analysis: (if quick answers are needed) ## Note: called 'cluster' since it was first designed for clustering ## 11.1 - choose the granularity : components, maximum features, (as) categorical ones ## 11.2 - get a compact view ## 11.3 - see how importance is explaining the data # analysis.wineQualityRed = clusterAnalysis(importance.wineQualityRed, X, components = 3, # maxFeatures = 3, clusteredObject = wineQualityRed.new.ruf, categorical = NULL, OOB = TRUE) ## 11.4 - interpretation: ## Numerical features average: a good wine has much less volatile acidity, ## much more citric acid, ... than a wine of low quality. ## Most influential features: while volatile.acidity seems to be important,... ## (Component frequencies:) ..., all variables must be taken into account, since information ## provided by the most important ones does not enough cover the whole available information. ## 11.5 - Complementarity: ## One does not forget to look plot of importance function. clusterAnalysis( ) ## is a summarized view of the former and should not have contradictory terms ## but, eventually, complementary ones. ## 12 - Partial importance: (local) variable importance per class ## which features for a very good wine (class 7 or 8) ? ## Note: in classification, partial importance is almost the same than "variable importance over ## labels", being more local but they have different interpretations. The former is exclusive. # pImportance.wineQualityRed.class7or8 <- partialImportance(X, importance.wineQualityRed, # whichClass = "7 or 8", nLocalFeatures = 6) ## 13 - Partial dependencies: how response relies to each variable or a pair of ones? ## 13.1 - admit options. ## get it feature after feature, recalling partial dependence and considering feature ## at the first order assuming the feature is the most important, ## at least for the class one need to assess. # pDependence.wineQualityRed.totalSulfurDioxide <- partialDependenceOverResponses(X, # importance.wineQualityRed, whichFeature = "total.sulfur.dioxide", # whichOrder = "first", outliersFilter = TRUE) ## 13.2 - Look for the second order (assuming the feature is the second most important, ## at least for the class one need to assess). # pDependence.wineQualityRed.totalSulfurDioxide <- partialDependenceOverResponses(X, # importance.wineQualityRed, whichFeature = "total.sulfur.dioxide", # whichOrder = "second", outliersFilter = TRUE) ## 13.3 - Look at all orders: no assumptions, simply look the average effect # pDependence.wineQualityRed.totalSulfurDioxide <- partialDependenceOverResponses(X, # importance.wineQualityRed, whichFeature = "total.sulfur.dioxide", # whichOrder = "all", outliersFilter = TRUE) ## see what happens then for "alcohol" (more points using option 'whichOrder = "all"') # pDependence.wineQualityRed.alcohol <- partialDependenceOverResponses(X, # importance.wineQualityRed, whichFeature = "alcohol", # whichOrder = "first", outliersFilter = TRUE) ## 13.4 - Translate interactions into dependence : pair of features ## is interaction leading to the same class (underlying structure)? ## is dependence linear ? ## for which values of the pair is the dependence the most effective ? # pDependence.wineQualityRed.sulfatesAndVolatileAcidity <- partialDependenceBetweenPredictors(X, # importance.wineQualityRed, c("sulphates", "volatile.acidity"), # whichOrder = "all", outliersFilter = TRUE) #### Regression : Auto MPG ## http://archive.ics.uci.edu/ml/datasets/Auto+MPG ## 398 observations, 8 variables, ## Variable to predict : "mpg", miles per gallon # data(autoMPG) # autoMPG.data = autoMPG # Y = autoMPG.data[,"mpg"] # X = autoMPG.data[,-which(colnames(autoMPG.data) == "mpg")] ## remove "car name" which is a variable with unique ID (car models) # X = X[, -which(colnames(X) == "car name")] ## train the default model and get OOB evaluation # autoMPG.ruf <- randomUniformForest(X, Y) ## assess variable importance (ask more points with 'maxInteractions' option) ## NOTE: importance strongly depends on 'ntree' and 'mtry' parameters # importance.autoMPG <- importance(autoMPG.ruf, Xtest = X) ## 14 - Dependence on most important predictors: marginal distribution of the response ## over each variable # plot(importance.autoMPG, Xtest = X) ## 15 - Extrapolation: ## recalling partial dependencies and getting points ## NOTE : points are the result of the forest classifier and not of the training responses # pDependence.autoMPG.weight <- partialDependenceOverResponses(X, importance.autoMPG, # whichFeature = "weight", whichOrder = "all", outliersFilter = TRUE) ## 16 - Visualization again: view as discrete values # visualize again 'model year' as a discrete variable and not as a continuous one # pDependence.autoMPG.modelyear <- partialDependenceOverResponses(X, importance.autoMPG, # whichFeature = "model year", whichOrder = "all", maxClasses = 30) ## 16 - Partial importance for regression: see important variables only for a part ## of response values ## what are the features that lead to a lower consumption (and high mpg)? # pImportance.autoMPG.high <- partialImportance(X, importance.autoMPG, # threshold = mean(Y), thresholdDirection = "high", nLocalFeatures = 6) ## 17 - Partial dependencies between covariates: ## look at "weight" and "acceleration" dependence # pDependence.autoMPG.weightAndAcceleration <- # partialDependenceBetweenPredictors(X, importance.autoMPG, c("weight", "acceleration"), # whichOrder = "all", perspective = FALSE, outliersFilter = TRUE) ## 18 - More visualization: 3D (looking to the prompt to start animation) ## Note: requires some computation # pDependence.autoMPG.weightAndAcceleration <- # partialDependenceBetweenPredictors(X, importance.autoMPG, c("weight", "acceleration"), # whichOrder = "all", perspective = TRUE, outliersFilter = FALSE) ##dtFW
/data/genthat_extracted_code/randomUniformForest/examples/randomUniformForest.Rd.R
no_license
surayaaramli/typeRrh
R
false
false
11,024
r
library(randomUniformForest) ### Name: randomUniformForest ### Title: Random Uniform Forests for Classification, Regression and ### Unsupervised Learning ### Aliases: randomUniformForest randomUniformForest.default ### randomUniformForest.formula plot.randomUniformForest ### print.randomUniformForest summary.randomUniformForest ### ** Examples ## not run ## NOTE : use option 'threads = 1' (disabling parallel processing) to speed up computing ## for small samples, since parallel processing is useful only for computationally ## intensive tasks ###### PART ONE : QUICK GUIDE #### Classification # data(iris) # iris.ruf <- randomUniformForest(Species ~ ., data = iris, threads = 1) ## Regular companions (from 1 to 18): ## 1 - model, parameters, statistics: # iris.ruf ## or print(iris.ruf) ## 2 - OOB error: # plot(iris.ruf, threads = 1) ## 3 - (global) variable importance, some statistics about trees: # summary(iris.ruf) #### Regression ## NOTE: when formula is used, missing values are automatically deleted and dummies ## are built for categorical features # data(airquality) # ozone.ruf <- randomUniformForest(Ozone ~ ., data = airquality, threads = 1) # ozone.ruf ## plot OOB error: # plot(ozone.ruf, threads = 1) ## 4 - Alternative modelling: ## 4.1 bagging like: # ozone.bagging <- randomUniformForest(Ozone ~ ., data = airquality, # bagging = TRUE, threads = 1) ## 4.2 Ensemble of totally randomized trees, e.g. purely random forest: # ozone.prf <- randomUniformForest(Ozone ~ ., data = airquality, # randomfeature = TRUE, threads = 1) ## 4.3 Extremely randomized trees like: # ozone.ETlike <- randomUniformForest(Ozone ~ ., data = airquality, # subsamplerate = 1, replace = FALSE, bagging = TRUE, mtry = floor((ncol(airquality)-1)/3), # threads = 1) ## Common case: use X, as a matrix or data frame, and Y, as a response vector, #### Classification : iris data, training and testing # data(iris) ## define random training and test sample : ## "Species" is the response vector # set.seed(2015) # iris.train_test <- init_values(iris[,-which(colnames(iris) == "Species")], iris$Species, # sample.size = 1/2) ## training and test samples: # iris.train = iris.train_test$xtrain # species.train = iris.train_test$ytrain # iris.test = iris.train_test$xtest # species.test = iris.train_test$ytest ## 5 - training and test (or validation) modelling: # iris.train_test.ruf <- randomUniformForest(iris.train, species.train, # xtest = iris.test, ytest = species.test, threads = 1) ## 6 - all-in-one results: # iris.train_test.ruf ## 7 - Alternative modelling: imbalanced classes ## balanced sampling (for example): equal sample size for all labels # iris.train_test.balancedsampling.ruf <- randomUniformForest(iris.train, species.train, # xtest = iris.test, ytest = species.test, rebalancedsampling = TRUE, threads = 1) ###### PART TWO : SUMMARIZED CASE STUDY #### Classification : Wine Quality data ## http://archive.ics.uci.edu/ml/datasets/Wine+Quality ## We use 'red wine quality' file : data have 1599 observations, 12 variables and 6 classes. # data(wineQualityRed) # wineQualityRed.data = wineQualityRed ## class and observations # Y = wineQualityRed.data[, "quality"] # X = wineQualityRed.data[, -which(colnames(wineQualityRed.data) == "quality")] ## First look : train model with default parameters (and retrieve estimates) # wineQualityRed.std.ruf <- randomUniformForest(X, as.factor(Y)) # wineQualityRed.std.ruf ## (global) Variable Importance: # summary(wineQualityRed.std.ruf) ## But some labels do not have enough observations to assess variable importance ## merging class 3 and 4. Merging class 7 and 8 to get enough labels. # Y[Y == 3] = 4 # Y[Y == 8] = 7 ## make Y as a factor, change names and get a summary # Y = as.factor(Y) # levels(Y) = c("3 or 4", "5", "6", "7 or 8") # table(Y) ## learn a new model to get a better view on variable importance ## NOTE: Y is now a factor, the model will catch the learning task as a classification one # wineQualityRed.new.ruf <- randomUniformForest(X, Y) # wineQualityRed.new.ruf ## global variable importance is more consistent # summary(wineQualityRed.new.ruf) ## plot OOB error (needs some computing) # plot(wineQualityRed.new.ruf) ## 8 - alternative Modelling: use subtrees (small trees, extended then reassembled) ## may change something, depending on data # wineQualityRed.new.ruf <- randomUniformForest(X, Y, usesubtrees = TRUE) ## 9 - deep variable importance: ## 9.1 - interactions are granular: use more for consistency, or less to see primary information ## 9.2 - a table is printed with details # importance.wineQualityRed <- importance(wineQualityRed.new.ruf, Xtest = X, maxInteractions = 6) ## 10 - visualization: ## 10.1 - global importance, interactions, importance based on interactions, ## importance based on labels, partial dependencies for all influential variables ## (loop over the prompt to get others partial dependencies) ## 10.2 - get more points, using option whichOrder = "all", default option. # plot(importance.wineQualityRed, Xtest = X, whichOrder = "first") ## 11 - Cluster analysis: (if quick answers are needed) ## Note: called 'cluster' since it was first designed for clustering ## 11.1 - choose the granularity : components, maximum features, (as) categorical ones ## 11.2 - get a compact view ## 11.3 - see how importance is explaining the data # analysis.wineQualityRed = clusterAnalysis(importance.wineQualityRed, X, components = 3, # maxFeatures = 3, clusteredObject = wineQualityRed.new.ruf, categorical = NULL, OOB = TRUE) ## 11.4 - interpretation: ## Numerical features average: a good wine has much less volatile acidity, ## much more citric acid, ... than a wine of low quality. ## Most influential features: while volatile.acidity seems to be important,... ## (Component frequencies:) ..., all variables must be taken into account, since information ## provided by the most important ones does not enough cover the whole available information. ## 11.5 - Complementarity: ## One does not forget to look plot of importance function. clusterAnalysis( ) ## is a summarized view of the former and should not have contradictory terms ## but, eventually, complementary ones. ## 12 - Partial importance: (local) variable importance per class ## which features for a very good wine (class 7 or 8) ? ## Note: in classification, partial importance is almost the same than "variable importance over ## labels", being more local but they have different interpretations. The former is exclusive. # pImportance.wineQualityRed.class7or8 <- partialImportance(X, importance.wineQualityRed, # whichClass = "7 or 8", nLocalFeatures = 6) ## 13 - Partial dependencies: how response relies to each variable or a pair of ones? ## 13.1 - admit options. ## get it feature after feature, recalling partial dependence and considering feature ## at the first order assuming the feature is the most important, ## at least for the class one need to assess. # pDependence.wineQualityRed.totalSulfurDioxide <- partialDependenceOverResponses(X, # importance.wineQualityRed, whichFeature = "total.sulfur.dioxide", # whichOrder = "first", outliersFilter = TRUE) ## 13.2 - Look for the second order (assuming the feature is the second most important, ## at least for the class one need to assess). # pDependence.wineQualityRed.totalSulfurDioxide <- partialDependenceOverResponses(X, # importance.wineQualityRed, whichFeature = "total.sulfur.dioxide", # whichOrder = "second", outliersFilter = TRUE) ## 13.3 - Look at all orders: no assumptions, simply look the average effect # pDependence.wineQualityRed.totalSulfurDioxide <- partialDependenceOverResponses(X, # importance.wineQualityRed, whichFeature = "total.sulfur.dioxide", # whichOrder = "all", outliersFilter = TRUE) ## see what happens then for "alcohol" (more points using option 'whichOrder = "all"') # pDependence.wineQualityRed.alcohol <- partialDependenceOverResponses(X, # importance.wineQualityRed, whichFeature = "alcohol", # whichOrder = "first", outliersFilter = TRUE) ## 13.4 - Translate interactions into dependence : pair of features ## is interaction leading to the same class (underlying structure)? ## is dependence linear ? ## for which values of the pair is the dependence the most effective ? # pDependence.wineQualityRed.sulfatesAndVolatileAcidity <- partialDependenceBetweenPredictors(X, # importance.wineQualityRed, c("sulphates", "volatile.acidity"), # whichOrder = "all", outliersFilter = TRUE) #### Regression : Auto MPG ## http://archive.ics.uci.edu/ml/datasets/Auto+MPG ## 398 observations, 8 variables, ## Variable to predict : "mpg", miles per gallon # data(autoMPG) # autoMPG.data = autoMPG # Y = autoMPG.data[,"mpg"] # X = autoMPG.data[,-which(colnames(autoMPG.data) == "mpg")] ## remove "car name" which is a variable with unique ID (car models) # X = X[, -which(colnames(X) == "car name")] ## train the default model and get OOB evaluation # autoMPG.ruf <- randomUniformForest(X, Y) ## assess variable importance (ask more points with 'maxInteractions' option) ## NOTE: importance strongly depends on 'ntree' and 'mtry' parameters # importance.autoMPG <- importance(autoMPG.ruf, Xtest = X) ## 14 - Dependence on most important predictors: marginal distribution of the response ## over each variable # plot(importance.autoMPG, Xtest = X) ## 15 - Extrapolation: ## recalling partial dependencies and getting points ## NOTE : points are the result of the forest classifier and not of the training responses # pDependence.autoMPG.weight <- partialDependenceOverResponses(X, importance.autoMPG, # whichFeature = "weight", whichOrder = "all", outliersFilter = TRUE) ## 16 - Visualization again: view as discrete values # visualize again 'model year' as a discrete variable and not as a continuous one # pDependence.autoMPG.modelyear <- partialDependenceOverResponses(X, importance.autoMPG, # whichFeature = "model year", whichOrder = "all", maxClasses = 30) ## 16 - Partial importance for regression: see important variables only for a part ## of response values ## what are the features that lead to a lower consumption (and high mpg)? # pImportance.autoMPG.high <- partialImportance(X, importance.autoMPG, # threshold = mean(Y), thresholdDirection = "high", nLocalFeatures = 6) ## 17 - Partial dependencies between covariates: ## look at "weight" and "acceleration" dependence # pDependence.autoMPG.weightAndAcceleration <- # partialDependenceBetweenPredictors(X, importance.autoMPG, c("weight", "acceleration"), # whichOrder = "all", perspective = FALSE, outliersFilter = TRUE) ## 18 - More visualization: 3D (looking to the prompt to start animation) ## Note: requires some computation # pDependence.autoMPG.weightAndAcceleration <- # partialDependenceBetweenPredictors(X, importance.autoMPG, c("weight", "acceleration"), # whichOrder = "all", perspective = TRUE, outliersFilter = FALSE) ##dtFW
context("Bounding box") # Checks the values in the ratio-of-uniforms bounding box returned by ru() # for some examples in which these values can be found explicitly. # The examples are: # A: d-dimensional normal density for d = 1, 2, 3. # B: 1-dimensional log-normal density under log transformation back to the # normal density # C: 1-dimensional gamma density, with shape parameter not less than 1 # A. d-dimensional normal density normal_box <- function(d, sigma = diag(d), rotate = TRUE, r = 1 / 2) { # # Calculates bounding box values in the case of a zero-mean unnormalized # d-dimensional normal density, with arbitrary covariance structure. # "Unnormalized" means that the function has a maximum of 1 attained # at the origin, which is what is required for comparabilty with the # output from the function ru(), which scales the input function to have # a maximum of 1 and relocates the mode to the origin. # # Args: # d : dimension of target density. # sigma : the covariance matrix of the normal density. # rotate : should the rotation transformation used in the function ru() # be applied? # r : ratio-of-uniforms tuning parameter. # # Returns: # box : a (2 * d + 1) by d + 2 matrix of ratio-of-uniforms bounding box # information with the same structure as object$box returned by # ru(). # # Make sure that sigma is a matrix sigma <- as.matrix(sigma) # If d > 1 and if rotate = TRUE calculate the covariance matrix after # the rotation applied in the function ru. The inverse of the # covariance matrix sigma plays the role of hess_mat in the function ru. if (rotate & d > 1) { l_mat <- t(chol(solve(sigma))) l_mat <- l_mat / det(l_mat) ^ (1/d) sigma <- t(l_mat) %*% sigma %*% l_mat } # Respective values at which bminus and bplus are obtained for each of # the d marginal variables in the N(0, 1), independent components case. val <- sqrt((r * d + 1) / r) # Calculate box ar <- 1 bplus <- val * exp(-1 /2) * sqrt(diag(sigma)) bminus <- -bplus box <- c(ar, bminus, bplus) # Adjustment of val for the marginal variances and for the correlation # between the components. adj_mat <- t(sqrt(diag(sigma)) * stats::cov2cor(sigma)) vals <- rbind(rep(0, d), -val * adj_mat, val * adj_mat) colnames(vals) <- paste("vals", 1:d, sep="") # We hope to get perfect convegence indicators conv <- rep(0, 2 * d + 1) # Create matrix of bounding box information box <- cbind(box, vals, conv) bs <- paste(paste("b", 1:d, sep=""),rep(c("minus", "plus"), each=d), sep="") rownames(box) <- c("a", bs) # return(box) } my_tol <- 1e-5 # 1. 1-dimensional normal # (a) N(0, 1) x1a <- ru(logf = function(x) -x ^ 2 / 2, d = 1, n = 1, init = 0) test_that("N(0,1)", { testthat::expect_equal(x1a$box, normal_box(d = 1), tolerance = my_tol) }) # (b) N(0, 2) sigma <- 2 # Note: sigma means variance here x1b <- ru(logf = function(x) -x ^ 2 / (2 * sigma), d = 1, n = 1, init = 0) test_that("N(0,2)", { testthat::expect_equal(x1b$box, normal_box(d = 1, sigma = 2), tolerance = my_tol) }) # (c) N(1, 2) sigma <- 2 # Note: sigma means variance here x1c <- ru(logf = function(x) -(x - 1) ^ 2 / (2 * sigma), d = 1, n = 1, init = 0) testthat::expect_equal(x1c$box, normal_box(d = 1, sigma = 2), tolerance = my_tol) # 2. 2-dimensional normal # (a) Zero mean, unit variances and independent components # Note: rotating shouldn't make any difference in this example. # (i) rotate = TRUE x2ai <- ru(logf = function(x) -(x[1]^2 + x[2]^2) / 2, d = 2, n = 1000, init = c(0, 0), rotate = TRUE) test_that("BVN, rotation", { testthat::expect_equal(x2ai$box, normal_box(d = 2), tolerance = my_tol) }) # (ii) rotate = FALSE x2aii <- ru(logf = function(x) -(x[1]^2 + x[2]^2) / 2, d = 2, n = 1000, init = c(0, 0), rotate = FALSE) test_that("BVN, no rotation", { testthat::expect_equal(x2aii$box, normal_box(d = 2), tolerance = my_tol) }) # Function to evaluate the unnormalized log-density of a d-dimensional # normal distribution log_dmvnorm <- function(x, mean = rep(0, d), sigma = diag(d)) { x <- matrix(x, ncol = length(x)) d <- ncol(x) - 0.5 * (x - mean) %*% solve(sigma) %*% t(x - mean) } # (b) Zero mean, unit variances with positive association rho <- 0.9 covmat <- matrix(c(1, rho, rho, 1), 2, 2) # (i) rotate = FALSE x2bi <- ru(logf = log_dmvnorm, sigma = covmat, d = 2, n = 1, init = c(0, 0), rotate = FALSE) test_that("BVN, rho = 0.9, no rotation", { testthat::expect_equal(x2bi$box, normal_box(d = 2, sigma = covmat, rotate = FALSE), tolerance = my_tol) }) # (ii) rotate = TRUE x2bii <- ru(logf = log_dmvnorm, sigma = covmat, d = 2, n = 1, init = c(0, 0), rotate = TRUE) test_that("BVN, rho = 0.9, rotation", { testthat::expect_equal(x2bii$box, normal_box(d = 2, sigma = covmat, rotate = TRUE), tolerance = my_tol) }) # (c) Zero mean, different variances with positive association covmat <- matrix(c(10, 3, 3, 2), 2, 2) # (i) rotate = FALSE x2ci <- ru(logf = log_dmvnorm, sigma = covmat, d = 2, n = 1, init = c(0, 0), rotate = FALSE) test_that("BVN, general Sigma, no rotation", { testthat::expect_equal(x2ci$box, normal_box(d = 2, sigma = covmat, rotate = FALSE), tolerance = my_tol) }) # (ii) rotate = TRUE x2cii <- ru(logf = log_dmvnorm, sigma = covmat, d = 2, n = 1, init = c(0, 0), rotate = TRUE) test_that("BVN, general Sigma, rotation", { testthat::expect_equal(x2cii$box, normal_box(d = 2, sigma = covmat, rotate = TRUE), tolerance = my_tol) }) # (d) Mean (1,2), different variances with negative association covmat <- matrix(c(10, -3, -3, 2), 2, 2) # (i) rotate = FALSE x2di <- ru(logf = log_dmvnorm, sigma = covmat, d = 2, n = 1, init = c(0, 0), rotate = FALSE, mean = c(1, 2)) test_that("BVN, non-zero mu, general Sigma, no rotation", { testthat::expect_equal(x2di$box, normal_box(d = 2, sigma = covmat, rotate = FALSE), tolerance = my_tol) }) # (ii) rotate = TRUE x2dii <- ru(logf = log_dmvnorm, sigma = covmat, d = 2, n = 1, init = c(0, 0), rotate = TRUE, mean = c(1, 2)) test_that("BVN, non-zero mu, general Sigma, rotation", { testthat::expect_equal(x2dii$box, normal_box(d = 2, sigma = covmat, rotate = TRUE), tolerance = my_tol) }) # 3. 3-dimensional normal # (a) Zero mean, unit variances with positive association covmat <- matrix(rho, 3, 3) + diag(1 - rho, 3) # (i) rotate = FALSE x3ai <- ru(logf = log_dmvnorm, sigma = covmat, d = 3, n = 1, init = c(0, 0, 0), rotate = FALSE) test_that("TVN, rho = 0.9, no rotation", { testthat::expect_equal(x3ai$box, normal_box(d = 3, sigma = covmat, rotate = FALSE), tolerance = my_tol) }) # (ii) rotate = TRUE x3aii <- ru(logf = log_dmvnorm, sigma = covmat, d = 3, n = 1, init = c(0, 0, 0), rotate = TRUE) test_that("TVN, rho = 0.9, rotation", { testthat::expect_equal(x3aii$box, normal_box(d = 3, sigma = covmat, rotate = TRUE), tolerance = my_tol) }) # B. 1-dimensional log-normal density # Check that using a log transformation takes us back to the standard # normal case lambda <- 0 x <- ru(logf = stats::dlnorm, log = TRUE, d = 1, n = 1, init = 0.1, trans = "BC", lambda = lambda) test_that("Log-normal", { testthat::expect_equal(x$box, normal_box(d = 1), tolerance = my_tol) }) # C: 1-dimensional gamma density, with shape parameter not less than 1 gamma_box <- function(shape = 1, rate = 1, r = 1 / 2) { # # Calculates bounding box values in the case of a zero-mean unnormalized # d-dimensional normal density, with arbitrary covariance structure. # "Unnormalized" means that the function has a maximum of 1 attained # at the origin, which is what is required for comparabilty with the # output from the function ru(), which scales the input function to have # a maximum of 1 and relocates the mode to the origin. # # Args: # shape : gamma shape parameter. # rate : gamma rate parameter. # r : ratio-of-uniforms tuning parameter. # # Returns: # box : a 3 by 3 matrix of ratio-of-uniforms bounding box information # with the same structure as object$box returned by ru(). # if (shape < 1) { stop("A ratio-of-uniforms bounding box cannot be found when shape < 1") } # Mode of the gamma(shape, rate) density x_mode <- (shape - 1) / rate # Value of the (normalized) density at the mode x_max <- stats::dgamma(x_mode, shape = shape, rate = rate) # # Calculate box ar <- 1 a_quad <- rate * r / (r + 1) b_quad <- -(1 + r * (shape - 1) / (r + 1) - a_quad * x_mode) c_quad <- -x_mode b_vals <- Re(polyroot(c(c_quad, b_quad, a_quad))) vals1 <- c(0, b_vals) b_fun <- function(x) { x * (stats::dgamma(x + x_mode, shape = shape) / x_max) ^ (r / (r + 1)) } box <- c(ar, b_fun(b_vals)) # Create matrix of bounding box information conv <- rep(0, 3) box <- cbind(box, vals1, conv) bs <- paste(paste("b", 1, sep=""),rep(c("minus", "plus"), each=1), sep="") rownames(box) <- c("a", bs) # return(box) } # Shape = 1 x1 <- suppressWarnings(ru(logf = dgamma, shape = 1, log = TRUE, d = 1, n = 1, lower = 0, init = 1)) test_that("Gamma(1, 1)", { testthat::expect_equal(x1$box, gamma_box(shape = 1), tolerance = my_tol) }) # Shape = 10 x2 <- ru(logf = dgamma, shape = 10, log = TRUE, d = 1, n = 1, lower = 0, init = 10) test_that("Gamma(10, 1)", { testthat::expect_equal(x2$box, gamma_box(shape = 10), tolerance = my_tol) })
/fuzzedpackages/rust/tests/testthat/test-ru.R
no_license
akhikolla/testpackages
R
false
false
10,447
r
context("Bounding box") # Checks the values in the ratio-of-uniforms bounding box returned by ru() # for some examples in which these values can be found explicitly. # The examples are: # A: d-dimensional normal density for d = 1, 2, 3. # B: 1-dimensional log-normal density under log transformation back to the # normal density # C: 1-dimensional gamma density, with shape parameter not less than 1 # A. d-dimensional normal density normal_box <- function(d, sigma = diag(d), rotate = TRUE, r = 1 / 2) { # # Calculates bounding box values in the case of a zero-mean unnormalized # d-dimensional normal density, with arbitrary covariance structure. # "Unnormalized" means that the function has a maximum of 1 attained # at the origin, which is what is required for comparabilty with the # output from the function ru(), which scales the input function to have # a maximum of 1 and relocates the mode to the origin. # # Args: # d : dimension of target density. # sigma : the covariance matrix of the normal density. # rotate : should the rotation transformation used in the function ru() # be applied? # r : ratio-of-uniforms tuning parameter. # # Returns: # box : a (2 * d + 1) by d + 2 matrix of ratio-of-uniforms bounding box # information with the same structure as object$box returned by # ru(). # # Make sure that sigma is a matrix sigma <- as.matrix(sigma) # If d > 1 and if rotate = TRUE calculate the covariance matrix after # the rotation applied in the function ru. The inverse of the # covariance matrix sigma plays the role of hess_mat in the function ru. if (rotate & d > 1) { l_mat <- t(chol(solve(sigma))) l_mat <- l_mat / det(l_mat) ^ (1/d) sigma <- t(l_mat) %*% sigma %*% l_mat } # Respective values at which bminus and bplus are obtained for each of # the d marginal variables in the N(0, 1), independent components case. val <- sqrt((r * d + 1) / r) # Calculate box ar <- 1 bplus <- val * exp(-1 /2) * sqrt(diag(sigma)) bminus <- -bplus box <- c(ar, bminus, bplus) # Adjustment of val for the marginal variances and for the correlation # between the components. adj_mat <- t(sqrt(diag(sigma)) * stats::cov2cor(sigma)) vals <- rbind(rep(0, d), -val * adj_mat, val * adj_mat) colnames(vals) <- paste("vals", 1:d, sep="") # We hope to get perfect convegence indicators conv <- rep(0, 2 * d + 1) # Create matrix of bounding box information box <- cbind(box, vals, conv) bs <- paste(paste("b", 1:d, sep=""),rep(c("minus", "plus"), each=d), sep="") rownames(box) <- c("a", bs) # return(box) } my_tol <- 1e-5 # 1. 1-dimensional normal # (a) N(0, 1) x1a <- ru(logf = function(x) -x ^ 2 / 2, d = 1, n = 1, init = 0) test_that("N(0,1)", { testthat::expect_equal(x1a$box, normal_box(d = 1), tolerance = my_tol) }) # (b) N(0, 2) sigma <- 2 # Note: sigma means variance here x1b <- ru(logf = function(x) -x ^ 2 / (2 * sigma), d = 1, n = 1, init = 0) test_that("N(0,2)", { testthat::expect_equal(x1b$box, normal_box(d = 1, sigma = 2), tolerance = my_tol) }) # (c) N(1, 2) sigma <- 2 # Note: sigma means variance here x1c <- ru(logf = function(x) -(x - 1) ^ 2 / (2 * sigma), d = 1, n = 1, init = 0) testthat::expect_equal(x1c$box, normal_box(d = 1, sigma = 2), tolerance = my_tol) # 2. 2-dimensional normal # (a) Zero mean, unit variances and independent components # Note: rotating shouldn't make any difference in this example. # (i) rotate = TRUE x2ai <- ru(logf = function(x) -(x[1]^2 + x[2]^2) / 2, d = 2, n = 1000, init = c(0, 0), rotate = TRUE) test_that("BVN, rotation", { testthat::expect_equal(x2ai$box, normal_box(d = 2), tolerance = my_tol) }) # (ii) rotate = FALSE x2aii <- ru(logf = function(x) -(x[1]^2 + x[2]^2) / 2, d = 2, n = 1000, init = c(0, 0), rotate = FALSE) test_that("BVN, no rotation", { testthat::expect_equal(x2aii$box, normal_box(d = 2), tolerance = my_tol) }) # Function to evaluate the unnormalized log-density of a d-dimensional # normal distribution log_dmvnorm <- function(x, mean = rep(0, d), sigma = diag(d)) { x <- matrix(x, ncol = length(x)) d <- ncol(x) - 0.5 * (x - mean) %*% solve(sigma) %*% t(x - mean) } # (b) Zero mean, unit variances with positive association rho <- 0.9 covmat <- matrix(c(1, rho, rho, 1), 2, 2) # (i) rotate = FALSE x2bi <- ru(logf = log_dmvnorm, sigma = covmat, d = 2, n = 1, init = c(0, 0), rotate = FALSE) test_that("BVN, rho = 0.9, no rotation", { testthat::expect_equal(x2bi$box, normal_box(d = 2, sigma = covmat, rotate = FALSE), tolerance = my_tol) }) # (ii) rotate = TRUE x2bii <- ru(logf = log_dmvnorm, sigma = covmat, d = 2, n = 1, init = c(0, 0), rotate = TRUE) test_that("BVN, rho = 0.9, rotation", { testthat::expect_equal(x2bii$box, normal_box(d = 2, sigma = covmat, rotate = TRUE), tolerance = my_tol) }) # (c) Zero mean, different variances with positive association covmat <- matrix(c(10, 3, 3, 2), 2, 2) # (i) rotate = FALSE x2ci <- ru(logf = log_dmvnorm, sigma = covmat, d = 2, n = 1, init = c(0, 0), rotate = FALSE) test_that("BVN, general Sigma, no rotation", { testthat::expect_equal(x2ci$box, normal_box(d = 2, sigma = covmat, rotate = FALSE), tolerance = my_tol) }) # (ii) rotate = TRUE x2cii <- ru(logf = log_dmvnorm, sigma = covmat, d = 2, n = 1, init = c(0, 0), rotate = TRUE) test_that("BVN, general Sigma, rotation", { testthat::expect_equal(x2cii$box, normal_box(d = 2, sigma = covmat, rotate = TRUE), tolerance = my_tol) }) # (d) Mean (1,2), different variances with negative association covmat <- matrix(c(10, -3, -3, 2), 2, 2) # (i) rotate = FALSE x2di <- ru(logf = log_dmvnorm, sigma = covmat, d = 2, n = 1, init = c(0, 0), rotate = FALSE, mean = c(1, 2)) test_that("BVN, non-zero mu, general Sigma, no rotation", { testthat::expect_equal(x2di$box, normal_box(d = 2, sigma = covmat, rotate = FALSE), tolerance = my_tol) }) # (ii) rotate = TRUE x2dii <- ru(logf = log_dmvnorm, sigma = covmat, d = 2, n = 1, init = c(0, 0), rotate = TRUE, mean = c(1, 2)) test_that("BVN, non-zero mu, general Sigma, rotation", { testthat::expect_equal(x2dii$box, normal_box(d = 2, sigma = covmat, rotate = TRUE), tolerance = my_tol) }) # 3. 3-dimensional normal # (a) Zero mean, unit variances with positive association covmat <- matrix(rho, 3, 3) + diag(1 - rho, 3) # (i) rotate = FALSE x3ai <- ru(logf = log_dmvnorm, sigma = covmat, d = 3, n = 1, init = c(0, 0, 0), rotate = FALSE) test_that("TVN, rho = 0.9, no rotation", { testthat::expect_equal(x3ai$box, normal_box(d = 3, sigma = covmat, rotate = FALSE), tolerance = my_tol) }) # (ii) rotate = TRUE x3aii <- ru(logf = log_dmvnorm, sigma = covmat, d = 3, n = 1, init = c(0, 0, 0), rotate = TRUE) test_that("TVN, rho = 0.9, rotation", { testthat::expect_equal(x3aii$box, normal_box(d = 3, sigma = covmat, rotate = TRUE), tolerance = my_tol) }) # B. 1-dimensional log-normal density # Check that using a log transformation takes us back to the standard # normal case lambda <- 0 x <- ru(logf = stats::dlnorm, log = TRUE, d = 1, n = 1, init = 0.1, trans = "BC", lambda = lambda) test_that("Log-normal", { testthat::expect_equal(x$box, normal_box(d = 1), tolerance = my_tol) }) # C: 1-dimensional gamma density, with shape parameter not less than 1 gamma_box <- function(shape = 1, rate = 1, r = 1 / 2) { # # Calculates bounding box values in the case of a zero-mean unnormalized # d-dimensional normal density, with arbitrary covariance structure. # "Unnormalized" means that the function has a maximum of 1 attained # at the origin, which is what is required for comparabilty with the # output from the function ru(), which scales the input function to have # a maximum of 1 and relocates the mode to the origin. # # Args: # shape : gamma shape parameter. # rate : gamma rate parameter. # r : ratio-of-uniforms tuning parameter. # # Returns: # box : a 3 by 3 matrix of ratio-of-uniforms bounding box information # with the same structure as object$box returned by ru(). # if (shape < 1) { stop("A ratio-of-uniforms bounding box cannot be found when shape < 1") } # Mode of the gamma(shape, rate) density x_mode <- (shape - 1) / rate # Value of the (normalized) density at the mode x_max <- stats::dgamma(x_mode, shape = shape, rate = rate) # # Calculate box ar <- 1 a_quad <- rate * r / (r + 1) b_quad <- -(1 + r * (shape - 1) / (r + 1) - a_quad * x_mode) c_quad <- -x_mode b_vals <- Re(polyroot(c(c_quad, b_quad, a_quad))) vals1 <- c(0, b_vals) b_fun <- function(x) { x * (stats::dgamma(x + x_mode, shape = shape) / x_max) ^ (r / (r + 1)) } box <- c(ar, b_fun(b_vals)) # Create matrix of bounding box information conv <- rep(0, 3) box <- cbind(box, vals1, conv) bs <- paste(paste("b", 1, sep=""),rep(c("minus", "plus"), each=1), sep="") rownames(box) <- c("a", bs) # return(box) } # Shape = 1 x1 <- suppressWarnings(ru(logf = dgamma, shape = 1, log = TRUE, d = 1, n = 1, lower = 0, init = 1)) test_that("Gamma(1, 1)", { testthat::expect_equal(x1$box, gamma_box(shape = 1), tolerance = my_tol) }) # Shape = 10 x2 <- ru(logf = dgamma, shape = 10, log = TRUE, d = 1, n = 1, lower = 0, init = 10) test_that("Gamma(10, 1)", { testthat::expect_equal(x2$box, gamma_box(shape = 10), tolerance = my_tol) })
## After first checking if the inverse for the matrix has not already been ## calculated and cached, the cacheSolve() function will calculate the inverse ## of a matrix created with the makeCacheMatrix() function and cache the value. ## If cacheSolve() is called again for the same matrix, then the cached inverse ## value will be returned ## makeCacheMatrix() creates a special matrix that is a list storing the ## matrix, its cached inverse, and functions to set and get its inverse. makeCacheMatrix <- function(x = matrix()) { inv <- NULL set <- function(y) { x <<- y inv <<- NULL } get <- function() x setinv <- function(inverse) inv <<- inverse getinv <- function() inv list(set = set, get = get, setinv = setinv, getinv = getinv) } ## cacheSolve() checks for a cached inverse of the matrix created with ## makeCacheMatrix() and returns it if stored. Else, cacheSolve() calculates the ## inverse of the matrix and saves the value in the special matrix. cacheSolve <- function(x, ...) { ## Return a matrix that is the inverse of 'x' inv <- x$getinv() if(!is.null(inv)) { message("getting cached data") return(inv) } data <- x$get() inv <- solve(data, ...) x$setinv(inv) inv }
/cachematrix.R
no_license
namdnguyen/ProgrammingAssignment2
R
false
false
1,291
r
## After first checking if the inverse for the matrix has not already been ## calculated and cached, the cacheSolve() function will calculate the inverse ## of a matrix created with the makeCacheMatrix() function and cache the value. ## If cacheSolve() is called again for the same matrix, then the cached inverse ## value will be returned ## makeCacheMatrix() creates a special matrix that is a list storing the ## matrix, its cached inverse, and functions to set and get its inverse. makeCacheMatrix <- function(x = matrix()) { inv <- NULL set <- function(y) { x <<- y inv <<- NULL } get <- function() x setinv <- function(inverse) inv <<- inverse getinv <- function() inv list(set = set, get = get, setinv = setinv, getinv = getinv) } ## cacheSolve() checks for a cached inverse of the matrix created with ## makeCacheMatrix() and returns it if stored. Else, cacheSolve() calculates the ## inverse of the matrix and saves the value in the special matrix. cacheSolve <- function(x, ...) { ## Return a matrix that is the inverse of 'x' inv <- x$getinv() if(!is.null(inv)) { message("getting cached data") return(inv) } data <- x$get() inv <- solve(data, ...) x$setinv(inv) inv }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/densitylik.R \name{pi.x} \alias{pi.x} \title{One form of modelling the density gradient} \usage{ pi.x(x, sigma, beta, w) } \arguments{ \item{x}{the perpendicular distance} \item{sigma}{a gaussian standard deviation} \item{beta}{a multiplicative constant, helping to improve the fit notice that if beta>0, animals avoid linear structure, if beta<0, they prefer it} \item{w}{truncation distance} } \value{ component of likelihood } \description{ Not to be called by user; support function for \code{likdensity} }
/man/pi.x.Rd
no_license
DistanceDevelopment/densgrad
R
false
true
592
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/densitylik.R \name{pi.x} \alias{pi.x} \title{One form of modelling the density gradient} \usage{ pi.x(x, sigma, beta, w) } \arguments{ \item{x}{the perpendicular distance} \item{sigma}{a gaussian standard deviation} \item{beta}{a multiplicative constant, helping to improve the fit notice that if beta>0, animals avoid linear structure, if beta<0, they prefer it} \item{w}{truncation distance} } \value{ component of likelihood } \description{ Not to be called by user; support function for \code{likdensity} }
library(phonTools) ### Name: voweldata ### Title: Information about Vowel Data Sets ### Aliases: voweldata ### ** Examples data(voweldata)
/data/genthat_extracted_code/phonTools/examples/voweldata.Rd.R
no_license
surayaaramli/typeRrh
R
false
false
146
r
library(phonTools) ### Name: voweldata ### Title: Information about Vowel Data Sets ### Aliases: voweldata ### ** Examples data(voweldata)
################################### ## ICDR ################################### ## packages library(tidyverse) library(sf) library(tmap) library(tmaptools) ## download times <- map(unique(tigris::fips_codes$state)[1:51], tictoc::tic() ~lehdr::grab_lodes(state = .x, year = 2015, lodes_type = 'od', job_type = 'JT00', segment = 'S000', agg_geo = 'BG', state_part = "aux", download_dir = "~/Desktop/R/git/networks/data/lodes/", use_cache = TRUE)) ## load lodes <- map_dfr(fs::dir_ls("~/Desktop/R/git/networks/data/lodes/"), ~vroom::vroom(.x, col_select = c("w_geocode", "h_geocode", "S000"), col_types = cols(w_geocode = col_character(), h_geocode = col_character(), S000 = col_integer())) %>% filter(S000 > 1)) ## block groups for country blocks <- reduce( map(unique(tigris::fips_codes$state)[1:51], ~tigris::block_groups(state = .x, cb = TRUE, year = 2015)), rbind) ## move alaska and hawaii blocks <- tigris::shift_geometry(blocks, geoid_column = "STATEFP") centroids <- st_centroid(blocks) plot(st_geometry(centroids)) ## generate origin-destination shape lines <- od::od_to_sf(lodes %>% mutate(w_geocode = str_sub(w_geocode, 1, 12), h_geocode = str_sub(h_geocode, 1, 12)) %>% group_by(w_geocode, h_geocode) %>% summarise(flow = sum(S000)), select(centroids, GEOID)) ## dissolve the background border <- blocks %>% st_union() %>% st_combine() ## map it commutes <- tm_shape(border) + tm_borders(col = '#7c7c7c', lwd = 0.5, lty = 1) + tm_shape(lines) + tm_lines(col = '#000000', lwd = "flow", alpha = 0.5, scale = 2) + tm_layout(title = "COMMUTES", frame = FALSE, legend.position = c("RIGHT", "BOTTOM")) tictoc::tic() tmap_save(commutes, height = 10, dpi = 300) tictoc::toc()
/R/lodes.R
no_license
asrenninger/tinkering
R
false
false
2,202
r
################################### ## ICDR ################################### ## packages library(tidyverse) library(sf) library(tmap) library(tmaptools) ## download times <- map(unique(tigris::fips_codes$state)[1:51], tictoc::tic() ~lehdr::grab_lodes(state = .x, year = 2015, lodes_type = 'od', job_type = 'JT00', segment = 'S000', agg_geo = 'BG', state_part = "aux", download_dir = "~/Desktop/R/git/networks/data/lodes/", use_cache = TRUE)) ## load lodes <- map_dfr(fs::dir_ls("~/Desktop/R/git/networks/data/lodes/"), ~vroom::vroom(.x, col_select = c("w_geocode", "h_geocode", "S000"), col_types = cols(w_geocode = col_character(), h_geocode = col_character(), S000 = col_integer())) %>% filter(S000 > 1)) ## block groups for country blocks <- reduce( map(unique(tigris::fips_codes$state)[1:51], ~tigris::block_groups(state = .x, cb = TRUE, year = 2015)), rbind) ## move alaska and hawaii blocks <- tigris::shift_geometry(blocks, geoid_column = "STATEFP") centroids <- st_centroid(blocks) plot(st_geometry(centroids)) ## generate origin-destination shape lines <- od::od_to_sf(lodes %>% mutate(w_geocode = str_sub(w_geocode, 1, 12), h_geocode = str_sub(h_geocode, 1, 12)) %>% group_by(w_geocode, h_geocode) %>% summarise(flow = sum(S000)), select(centroids, GEOID)) ## dissolve the background border <- blocks %>% st_union() %>% st_combine() ## map it commutes <- tm_shape(border) + tm_borders(col = '#7c7c7c', lwd = 0.5, lty = 1) + tm_shape(lines) + tm_lines(col = '#000000', lwd = "flow", alpha = 0.5, scale = 2) + tm_layout(title = "COMMUTES", frame = FALSE, legend.position = c("RIGHT", "BOTTOM")) tictoc::tic() tmap_save(commutes, height = 10, dpi = 300) tictoc::toc()
# iPlots testing source('~/R Work/GDAwithR/code/StartLibraries.R') library(iplots) # Mosaic Plot library(MASS) data("Cars93") head(Cars93) imosaic(Cars93[, c("AirBags", "Cylinders", "Origin")]) # Barcharts ibar(Cars93$Cylinders) # Parallel Coordinate Plots ipcp(Cars93[c(4:8,12:15,17,19:25)]) # Parallel Box Plot ibox(Cars93[4:6]) # Boxplot y by x ibox(Cars93$Horsepower, Cars93$Cylinders) # Scatterplot iplot(Cars93$Horsepower, Cars93$MPG.city) l <- lowess(Cars93$Horsepower,Cars93$ MPG.city) ilines(l) iobj.rm() l <- lowess(Cars93$Horsepower,Cars93$ MPG.city, f = 0.5) ilines(l) iplot.opt(ptDiam=5, col=unclass(Cars93$Cylinders)) # Histograms ihist(Cars93$Horsepower) # Selections iset.selected() sum(sign(iset.selected()))/length(Cars93$ Horsepower) iset.select(Cars93$Horsepower >= 240) Cars93[iset.selected(),] iset.select(Cars93$Horsepower > 0) # Color Brush ibar(Cars93$Cylinders) iplot(Cars93$MPG.city, Cars93$MPG.highway) # iObjects iplot(Cars93$EngineSize, Cars93$Horsepower) # select some points subs <- iset.selected() iabline(lm(Horsepower~EngineSize, data = Cars93[subs,])) # Conventions
/code/iPlots Samples.R
no_license
jfortuny/GDAwithR
R
false
false
1,114
r
# iPlots testing source('~/R Work/GDAwithR/code/StartLibraries.R') library(iplots) # Mosaic Plot library(MASS) data("Cars93") head(Cars93) imosaic(Cars93[, c("AirBags", "Cylinders", "Origin")]) # Barcharts ibar(Cars93$Cylinders) # Parallel Coordinate Plots ipcp(Cars93[c(4:8,12:15,17,19:25)]) # Parallel Box Plot ibox(Cars93[4:6]) # Boxplot y by x ibox(Cars93$Horsepower, Cars93$Cylinders) # Scatterplot iplot(Cars93$Horsepower, Cars93$MPG.city) l <- lowess(Cars93$Horsepower,Cars93$ MPG.city) ilines(l) iobj.rm() l <- lowess(Cars93$Horsepower,Cars93$ MPG.city, f = 0.5) ilines(l) iplot.opt(ptDiam=5, col=unclass(Cars93$Cylinders)) # Histograms ihist(Cars93$Horsepower) # Selections iset.selected() sum(sign(iset.selected()))/length(Cars93$ Horsepower) iset.select(Cars93$Horsepower >= 240) Cars93[iset.selected(),] iset.select(Cars93$Horsepower > 0) # Color Brush ibar(Cars93$Cylinders) iplot(Cars93$MPG.city, Cars93$MPG.highway) # iObjects iplot(Cars93$EngineSize, Cars93$Horsepower) # select some points subs <- iset.selected() iabline(lm(Horsepower~EngineSize, data = Cars93[subs,])) # Conventions
# Download and unzip data if (!file.exists('PowerConsumption.zip')) { filetodownload <- 'https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip' download.file(filetodownload,destfile = 'PowerConsumption.zip') } unzip('PowerConsumption.zip',exdir = './Class4Week1') ##Load packages library(dplyr) ##Load and Filter Data readings<-read.table(file = './Class4Week1/household_power_consumption.txt', header = TRUE, sep = ";",na.strings = "?",stringsAsFactors = FALSE) readings2<-filter(readings, Date %in% c("1/2/2007","2/2/2007")) ##object.size(readings) & Remove Original because of size rm(readings) ##Build Chart par(mfrow=c(1,1)) hist(readings2$Global_active_power,col="red", xlab="Global Active Power (kilowatts)",ylab="Frequency",main = "Global Active Power") ##Make png file dev.copy(png, file = "./GitHub/ExData_Plotting1/plot1.png",height=480, width=480) ##Close Device dev.off()
/plot1.R
no_license
dsseifried/ExData_Plotting1
R
false
false
929
r
# Download and unzip data if (!file.exists('PowerConsumption.zip')) { filetodownload <- 'https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip' download.file(filetodownload,destfile = 'PowerConsumption.zip') } unzip('PowerConsumption.zip',exdir = './Class4Week1') ##Load packages library(dplyr) ##Load and Filter Data readings<-read.table(file = './Class4Week1/household_power_consumption.txt', header = TRUE, sep = ";",na.strings = "?",stringsAsFactors = FALSE) readings2<-filter(readings, Date %in% c("1/2/2007","2/2/2007")) ##object.size(readings) & Remove Original because of size rm(readings) ##Build Chart par(mfrow=c(1,1)) hist(readings2$Global_active_power,col="red", xlab="Global Active Power (kilowatts)",ylab="Frequency",main = "Global Active Power") ##Make png file dev.copy(png, file = "./GitHub/ExData_Plotting1/plot1.png",height=480, width=480) ##Close Device dev.off()
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/get_generals.r \name{get_generals} \alias{get_generals} \title{Get general parameters} \usage{ get_generals(MV, path_matrix) } \arguments{ \item{MV}{matrix of manifest variables} \item{path_matrix}{matrix with path connections} } \value{ list with number and names of observations, MVs and LVs } \description{ Internal function not to be called by the user } \details{ Internal function. \code{get_generals} is called by \code{plspm}. This function gets the number and names of: observations, manifest variables, and latent variables } \section{Warning}{ Do NOT use this function unless you are ME, a package developer, or a jedi user who really knows what is doing (seriously!) } \keyword{internal}
/man/get_generals.Rd
no_license
kongdd/plspm
R
false
true
787
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/get_generals.r \name{get_generals} \alias{get_generals} \title{Get general parameters} \usage{ get_generals(MV, path_matrix) } \arguments{ \item{MV}{matrix of manifest variables} \item{path_matrix}{matrix with path connections} } \value{ list with number and names of observations, MVs and LVs } \description{ Internal function not to be called by the user } \details{ Internal function. \code{get_generals} is called by \code{plspm}. This function gets the number and names of: observations, manifest variables, and latent variables } \section{Warning}{ Do NOT use this function unless you are ME, a package developer, or a jedi user who really knows what is doing (seriously!) } \keyword{internal}
#!/usr/bin/env Rscript VERSION = "0.0.1" suppressPackageStartupMessages(library("optparse")) suppressWarnings(suppressPackageStartupMessages(library("Biostrings"))) suppressWarnings(suppressPackageStartupMessages(library("DECIPHER"))) option_list <- list( make_option( c("-i", "--infile"), type="character", action="store", help="The input fasta to align (required)." ), make_option( c("-o", "--outfile"), type="character", action="store", help="The output file to write to (required)." ), make_option( "--version", type="logical", action="store_true", default=FALSE, help="Print version and exit.", ) ) parser <- OptionParser( usage = "%prog --infile in.fasta --outfile out.fasta", option_list = option_list ) args <- parse_args(parser) log_stderr <- function(...) { cat(sprintf(...), sep='', file=stderr()) } quit_with_err <- function(...) { log_stderr(...) quit(save = "no", status = 1, runLast = FALSE) } validate_file <- function(path) { if (is.null(path)) { quit_with_err("Please provide required file") } } main <- function(args) { if (args$version) { cat(VERSION, file=stdout()) quit(save = "no", status = 0, runLast = FALSE) } validate_file(args$infile) validate_file(args$outfile) dna <- readDNAStringSet(args$infile) # De-replicate the input sequences. u_dna <- unique(dna) index <- match(dna, u_dna) if (length(u_dna) <= 1) { writeXStringSet(dna, args$outfile) quit(save = "no", status = 0, runLast = FALSE) } # Do alignment of the dereplicated sequences sink(stderr(), type = "output") U_DNA <- AlignSeqs(u_dna, processors=1) # Re-replicate the sequences. DNA <- U_DNA[index] names(DNA) <- names(dna) # Write to file. writeXStringSet(DNA, args$outfile) } main(args)
/bin/run_decipher.R
permissive
darcyabjones/pante
R
false
false
1,900
r
#!/usr/bin/env Rscript VERSION = "0.0.1" suppressPackageStartupMessages(library("optparse")) suppressWarnings(suppressPackageStartupMessages(library("Biostrings"))) suppressWarnings(suppressPackageStartupMessages(library("DECIPHER"))) option_list <- list( make_option( c("-i", "--infile"), type="character", action="store", help="The input fasta to align (required)." ), make_option( c("-o", "--outfile"), type="character", action="store", help="The output file to write to (required)." ), make_option( "--version", type="logical", action="store_true", default=FALSE, help="Print version and exit.", ) ) parser <- OptionParser( usage = "%prog --infile in.fasta --outfile out.fasta", option_list = option_list ) args <- parse_args(parser) log_stderr <- function(...) { cat(sprintf(...), sep='', file=stderr()) } quit_with_err <- function(...) { log_stderr(...) quit(save = "no", status = 1, runLast = FALSE) } validate_file <- function(path) { if (is.null(path)) { quit_with_err("Please provide required file") } } main <- function(args) { if (args$version) { cat(VERSION, file=stdout()) quit(save = "no", status = 0, runLast = FALSE) } validate_file(args$infile) validate_file(args$outfile) dna <- readDNAStringSet(args$infile) # De-replicate the input sequences. u_dna <- unique(dna) index <- match(dna, u_dna) if (length(u_dna) <= 1) { writeXStringSet(dna, args$outfile) quit(save = "no", status = 0, runLast = FALSE) } # Do alignment of the dereplicated sequences sink(stderr(), type = "output") U_DNA <- AlignSeqs(u_dna, processors=1) # Re-replicate the sequences. DNA <- U_DNA[index] names(DNA) <- names(dna) # Write to file. writeXStringSet(DNA, args$outfile) } main(args)
#+ presets, echo = FALSE, warning = FALSE, message = FALSE library(winference) library(doMC) library(doRNG) library(dplyr) library(ggthemes) registerDoMC(cores = 6) rm(list = ls()) set.seed(11) setmytheme() # # model target <- get_autoregressive() # # number of observations nobservations <- 100 load(file = "~/Dropbox/ABCD/Results/data/ar1data.RData") obs <- obs[1:nobservations] # now using the embeddings, and Hilbert sort lagvalue <- 1 lag_obs <- create_lagmatrix(matrix(obs, nrow = 1), lagvalue) lag_obs <- lag_obs[,(lagvalue+1):ncol(lag_obs)] eps <- 0.05 # compute regularized transport distance between delayed embeddings of time series compute_d_wasserstein <- function(theta, transportiterations = 100){ fake_rand <- target$generate_randomness(nobservations) fake_obs <- target$robservation(nobservations, theta, target$parameters, fake_rand) lag_fake_obs <- create_lagmatrix(matrix(fake_obs, nrow = target$ydim), lagvalue) C <- cost_matrix_L2(lag_obs, lag_fake_obs[,(lagvalue+1):nobservations,drop=FALSE]) epsilon <- eps * median(C) equalw <- rep(1/(nobservations-lagvalue), (nobservations-lagvalue)) wass <- wasserstein(equalw, equalw, C, epsilon, transportiterations) return(as.numeric(wass$distances)) } proposal <- mixture_proposal() param_algo <- list(nthetas = 1024, nmoves = 5, proposal = proposal, nsteps = 20, minimum_diversity = 0.5, R = 2, maxtrials = 1000) filename <- paste0("~/Dropbox/ABCD/Results/autoregressive/ar1data.n", nobservations, ".wsmc_rhit-wassersteinL2.RData") # results <- wsmc_rhit(compute_d_wasserstein, target, param_algo, savefile = filename) # wsmc.df <- wsmc_to_dataframe(results, target$parameter_names) # nsteps <- max(wsmc.df$step) # save(wsmc.df, results, nsteps, file = filename) load(filename) wsmc.df <- wsmc_to_dataframe(results, target$parameter_names) nsteps <- max(wsmc.df$step) wsmc.df <- wsmc_to_dataframe(results, target$parameter_names) nsteps <- max(wsmc.df$step) g <- ggplot(wsmc.df, aes(x = rho, group = step)) + geom_density(aes(y = ..density..), colour = "darkgrey") g <- g + theme(legend.position = "none") g <- g + xlab(expression(rho)) g g <- ggplot(wsmc.df, aes(x = logsigma, group = step)) + geom_density(aes(y = ..density..), colour = "darkgrey") g <- g + theme(legend.position = "none") g <- g + xlab(expression(log(sigma))) g g <- ggplot(wsmc.df, aes(x = rho, y = logsigma, colour = step, group = step)) g <- g + geom_point(alpha = 0.5) g <- g + scale_colour_gradient2(midpoint = floor(nsteps/2)) + theme(legend.position = "none") g <- g + xlab(expression(rho)) + ylab(expression(log(sigma))) g filename <- paste0("~/Dropbox/ABCD/Results/autoregressive/ar1data.n", nobservations, ".metropolis.RData") load(filename) chainlist_to_dataframe <- function(chains_list){ nchains <- length(chains_list) niterations <- nrow(chains_list[[1]]) chaindf <- foreach (i = 1:nchains, .combine = rbind) %do% { data.frame(ichain = rep(i, niterations), iteration = 1:niterations, X = chains_list[[i]]) } return(chaindf) } chaindf <- chainlist_to_dataframe(mh$chains) # plot chaindf.melt <- melt(chaindf, id.vars = c("ichain", "iteration")) chain.bycomponent.df <- chaindf.melt %>% filter(iteration > 2000) %>% spread(variable, value) # g + geom_density2d(data=chain.bycomponent.df, aes(x = X.1, y = X.2, colour = NULL, group = NULL), # colour = "black") g + geom_point(data=chain.bycomponent.df %>% filter(iteration %% 10 == 1), aes(x = X.1, y = X.2, colour = NULL, group = NULL), colour = "black") g <- ggplot(wsmc.df %>% filter(step > 10), aes(x = rho, y = logsigma, colour = step, group = step)) g <- g + geom_point(alpha = 0.5) g <- g + theme(legend.position = "none") g <- g + xlab(expression(rho)) + ylab(expression(log(sigma))) g + geom_point(data=chain.bycomponent.df %>% filter(iteration %% 10 == 1), aes(x = X.1, y = X.2, colour = NULL, group = NULL), colour = "orange", alpha = 0.1) g <- ggplot(wsmc.df %>% filter(step == 15), aes(x = rho, y = logsigma, colour = step, group = step)) g <- g + geom_density2d() g <- g + theme(legend.position = "none") g <- g + xlab(expression(rho)) + ylab(expression(log(sigma))) g + geom_density2d(data=chain.bycomponent.df, aes(x = X.1, y = X.2, colour = NULL, group = NULL), colour = "black") g <- ggplot(wsmc.df %>% filter(step == 15), aes(x = rho)) g <- g + geom_histogram(aes(y = ..density..), alpha = 0.5) g <- g + theme(legend.position = "none") g <- g + xlab(expression(rho)) g <- g + geom_histogram(data=chain.bycomponent.df, aes(x = X.1, y = ..density..), fill = "red", alpha = 0.2) g g <- ggplot(wsmc.df %>% filter(step == 15), aes(x = logsigma)) g <- g + geom_histogram(aes(y = ..density..), alpha = 0.5) g <- g + theme(legend.position = "none") g <- g + xlab(expression(log(sigma))) g <- g + geom_histogram(data=chain.bycomponent.df, aes(x = X.2, y = ..density..), fill = "red", alpha = 0.2) g
/inst/reproduce/autoregressive/ar1_w-smc_rhit-n100-wassersteinL2.R
no_license
alexanderwhatley/winference
R
false
false
4,958
r
#+ presets, echo = FALSE, warning = FALSE, message = FALSE library(winference) library(doMC) library(doRNG) library(dplyr) library(ggthemes) registerDoMC(cores = 6) rm(list = ls()) set.seed(11) setmytheme() # # model target <- get_autoregressive() # # number of observations nobservations <- 100 load(file = "~/Dropbox/ABCD/Results/data/ar1data.RData") obs <- obs[1:nobservations] # now using the embeddings, and Hilbert sort lagvalue <- 1 lag_obs <- create_lagmatrix(matrix(obs, nrow = 1), lagvalue) lag_obs <- lag_obs[,(lagvalue+1):ncol(lag_obs)] eps <- 0.05 # compute regularized transport distance between delayed embeddings of time series compute_d_wasserstein <- function(theta, transportiterations = 100){ fake_rand <- target$generate_randomness(nobservations) fake_obs <- target$robservation(nobservations, theta, target$parameters, fake_rand) lag_fake_obs <- create_lagmatrix(matrix(fake_obs, nrow = target$ydim), lagvalue) C <- cost_matrix_L2(lag_obs, lag_fake_obs[,(lagvalue+1):nobservations,drop=FALSE]) epsilon <- eps * median(C) equalw <- rep(1/(nobservations-lagvalue), (nobservations-lagvalue)) wass <- wasserstein(equalw, equalw, C, epsilon, transportiterations) return(as.numeric(wass$distances)) } proposal <- mixture_proposal() param_algo <- list(nthetas = 1024, nmoves = 5, proposal = proposal, nsteps = 20, minimum_diversity = 0.5, R = 2, maxtrials = 1000) filename <- paste0("~/Dropbox/ABCD/Results/autoregressive/ar1data.n", nobservations, ".wsmc_rhit-wassersteinL2.RData") # results <- wsmc_rhit(compute_d_wasserstein, target, param_algo, savefile = filename) # wsmc.df <- wsmc_to_dataframe(results, target$parameter_names) # nsteps <- max(wsmc.df$step) # save(wsmc.df, results, nsteps, file = filename) load(filename) wsmc.df <- wsmc_to_dataframe(results, target$parameter_names) nsteps <- max(wsmc.df$step) wsmc.df <- wsmc_to_dataframe(results, target$parameter_names) nsteps <- max(wsmc.df$step) g <- ggplot(wsmc.df, aes(x = rho, group = step)) + geom_density(aes(y = ..density..), colour = "darkgrey") g <- g + theme(legend.position = "none") g <- g + xlab(expression(rho)) g g <- ggplot(wsmc.df, aes(x = logsigma, group = step)) + geom_density(aes(y = ..density..), colour = "darkgrey") g <- g + theme(legend.position = "none") g <- g + xlab(expression(log(sigma))) g g <- ggplot(wsmc.df, aes(x = rho, y = logsigma, colour = step, group = step)) g <- g + geom_point(alpha = 0.5) g <- g + scale_colour_gradient2(midpoint = floor(nsteps/2)) + theme(legend.position = "none") g <- g + xlab(expression(rho)) + ylab(expression(log(sigma))) g filename <- paste0("~/Dropbox/ABCD/Results/autoregressive/ar1data.n", nobservations, ".metropolis.RData") load(filename) chainlist_to_dataframe <- function(chains_list){ nchains <- length(chains_list) niterations <- nrow(chains_list[[1]]) chaindf <- foreach (i = 1:nchains, .combine = rbind) %do% { data.frame(ichain = rep(i, niterations), iteration = 1:niterations, X = chains_list[[i]]) } return(chaindf) } chaindf <- chainlist_to_dataframe(mh$chains) # plot chaindf.melt <- melt(chaindf, id.vars = c("ichain", "iteration")) chain.bycomponent.df <- chaindf.melt %>% filter(iteration > 2000) %>% spread(variable, value) # g + geom_density2d(data=chain.bycomponent.df, aes(x = X.1, y = X.2, colour = NULL, group = NULL), # colour = "black") g + geom_point(data=chain.bycomponent.df %>% filter(iteration %% 10 == 1), aes(x = X.1, y = X.2, colour = NULL, group = NULL), colour = "black") g <- ggplot(wsmc.df %>% filter(step > 10), aes(x = rho, y = logsigma, colour = step, group = step)) g <- g + geom_point(alpha = 0.5) g <- g + theme(legend.position = "none") g <- g + xlab(expression(rho)) + ylab(expression(log(sigma))) g + geom_point(data=chain.bycomponent.df %>% filter(iteration %% 10 == 1), aes(x = X.1, y = X.2, colour = NULL, group = NULL), colour = "orange", alpha = 0.1) g <- ggplot(wsmc.df %>% filter(step == 15), aes(x = rho, y = logsigma, colour = step, group = step)) g <- g + geom_density2d() g <- g + theme(legend.position = "none") g <- g + xlab(expression(rho)) + ylab(expression(log(sigma))) g + geom_density2d(data=chain.bycomponent.df, aes(x = X.1, y = X.2, colour = NULL, group = NULL), colour = "black") g <- ggplot(wsmc.df %>% filter(step == 15), aes(x = rho)) g <- g + geom_histogram(aes(y = ..density..), alpha = 0.5) g <- g + theme(legend.position = "none") g <- g + xlab(expression(rho)) g <- g + geom_histogram(data=chain.bycomponent.df, aes(x = X.1, y = ..density..), fill = "red", alpha = 0.2) g g <- ggplot(wsmc.df %>% filter(step == 15), aes(x = logsigma)) g <- g + geom_histogram(aes(y = ..density..), alpha = 0.5) g <- g + theme(legend.position = "none") g <- g + xlab(expression(log(sigma))) g <- g + geom_histogram(data=chain.bycomponent.df, aes(x = X.2, y = ..density..), fill = "red", alpha = 0.2) g
# Libraries -------------------------------------------------------------------- library(tidyverse) library(heatmaply) library(GGally) library(corrplot) # Definitions ------------------------------------------------------------------ # Working directory setwd("./BE19_GFPScreens/") getwd() # Results directory resdir <- "results/correlations/" dir.create(resdir) # Import results comblist <- readRDS("results/processing/comblist.RDS") %>% print() combdf.gd <- readRDS("results/processing/combdf_gd.RDS") %>% print() # Replicate correlations (heatmaply with dendrogram) --------------------------- # For pdf file saving to work, needed orca (installed through conda) for (i in 1:length(comblist)) { print(i) readcols <- str_which(names(comblist[[i]]), "^..\\..$") cr <- cor(comblist[[i]][, readcols], method = "pearson", use = "pairwise.complete.obs") heatmaply_cor(cr, limits = c(0, 1), main = names(comblist)[i], hclustfun = hclust, hclust_method = "ward.D", plot_method = "ggplot", dendrogram = "both", show_dendrogram = c(T, F), file = paste0(resdir, names(comblist)[i],"_replicates.pdf")) } dev.off() # Experiment correlations (ggpairs) -------------------------------------------- # Table with p-values gd.p <- select(combdf.gd, str_which(names(combdf.gd), "_q")) %>% print() # Table with ratios gd.r <- select(combdf.gd, str_which(names(combdf.gd), "_log2fc")) gd.r <- replace(gd.r, abs(gd.r) > 5, NA_real_) %>% rename_all(list(~ str_extract(., ".*(?=_log2fc)"))) %>% print() # Table of significant ratios only, rest filled with NA gd.rsig <- gd.r %>% replace(gd.p > 0.05, NA_real_) %>% print() # Pairwise correlations pdf(paste0(resdir, "all_proteins_sigGuides_ggpairs.pdf")) ggpairs(gd.rsig, axisLabels = "none", switch = "y", upper = list(continuous = wrap("smooth", alpha = 0.3, size = 0.2)), lower = list(continuous = wrap("points", alpha = 0.3, size = 0.2))) + theme(panel.grid.major = element_blank(), panel.background = element_rect(fill = "grey95")) dev.off() # Corrplots & corr-p-value histograms ------------------------------------------ correlationPlot <- function(input, filename) { gd.corrs <- cor(input, use = "pairwise.complete.obs", method = "pearson") gd.corrs %>% as_tibble(rownames = "Protein") write_csv(as.data.frame(gd.corrs), paste0(resdir, filename, ".csv")) gd.corrp <- Hmisc::rcorr(as.matrix(input), type = "pearson")$P pdf(paste0(resdir, filename, ".pdf")) corrplot(gd.corrs, p.mat = gd.corrp, type = "lower", diag = F, insig = "p-value", sig.level = "-1") hist(gd.corrs, xlim = c(-1, 1), breaks = 20) hist(gd.corrp, xlim = c(0, 1), breaks = 20) dev.off() } correlationPlot(gd.rsig, "all_proteins_sigGuides_corrplot") # Session info ----------------------------------------------------------------- writeLines(capture.output(sessionInfo()), "code/BE19-05_SessionInfo.txt")
/BE19_GFPScreens/code/BE19-05_Correlations.R
permissive
OlgaTSchubert/ProteinGenetics
R
false
false
3,253
r
# Libraries -------------------------------------------------------------------- library(tidyverse) library(heatmaply) library(GGally) library(corrplot) # Definitions ------------------------------------------------------------------ # Working directory setwd("./BE19_GFPScreens/") getwd() # Results directory resdir <- "results/correlations/" dir.create(resdir) # Import results comblist <- readRDS("results/processing/comblist.RDS") %>% print() combdf.gd <- readRDS("results/processing/combdf_gd.RDS") %>% print() # Replicate correlations (heatmaply with dendrogram) --------------------------- # For pdf file saving to work, needed orca (installed through conda) for (i in 1:length(comblist)) { print(i) readcols <- str_which(names(comblist[[i]]), "^..\\..$") cr <- cor(comblist[[i]][, readcols], method = "pearson", use = "pairwise.complete.obs") heatmaply_cor(cr, limits = c(0, 1), main = names(comblist)[i], hclustfun = hclust, hclust_method = "ward.D", plot_method = "ggplot", dendrogram = "both", show_dendrogram = c(T, F), file = paste0(resdir, names(comblist)[i],"_replicates.pdf")) } dev.off() # Experiment correlations (ggpairs) -------------------------------------------- # Table with p-values gd.p <- select(combdf.gd, str_which(names(combdf.gd), "_q")) %>% print() # Table with ratios gd.r <- select(combdf.gd, str_which(names(combdf.gd), "_log2fc")) gd.r <- replace(gd.r, abs(gd.r) > 5, NA_real_) %>% rename_all(list(~ str_extract(., ".*(?=_log2fc)"))) %>% print() # Table of significant ratios only, rest filled with NA gd.rsig <- gd.r %>% replace(gd.p > 0.05, NA_real_) %>% print() # Pairwise correlations pdf(paste0(resdir, "all_proteins_sigGuides_ggpairs.pdf")) ggpairs(gd.rsig, axisLabels = "none", switch = "y", upper = list(continuous = wrap("smooth", alpha = 0.3, size = 0.2)), lower = list(continuous = wrap("points", alpha = 0.3, size = 0.2))) + theme(panel.grid.major = element_blank(), panel.background = element_rect(fill = "grey95")) dev.off() # Corrplots & corr-p-value histograms ------------------------------------------ correlationPlot <- function(input, filename) { gd.corrs <- cor(input, use = "pairwise.complete.obs", method = "pearson") gd.corrs %>% as_tibble(rownames = "Protein") write_csv(as.data.frame(gd.corrs), paste0(resdir, filename, ".csv")) gd.corrp <- Hmisc::rcorr(as.matrix(input), type = "pearson")$P pdf(paste0(resdir, filename, ".pdf")) corrplot(gd.corrs, p.mat = gd.corrp, type = "lower", diag = F, insig = "p-value", sig.level = "-1") hist(gd.corrs, xlim = c(-1, 1), breaks = 20) hist(gd.corrp, xlim = c(0, 1), breaks = 20) dev.off() } correlationPlot(gd.rsig, "all_proteins_sigGuides_corrplot") # Session info ----------------------------------------------------------------- writeLines(capture.output(sessionInfo()), "code/BE19-05_SessionInfo.txt")
context("pproto") test_that("check that proto objects behave as expected", { # initialize proto <- proto::proto p <- proto(a = 5) p2 <- p$proto(b = 5) # check that fields are correct expect_equal(p$ls(), "a") expect_equal(p$a, 5) expect_equal(p2$ls(), "b") expect_equal(p2$b, 5) # update value p$a <- 10 # check that a in both objects have become 10 expect_equal(p$a, 10) expect_equal(p2$a, 10) }) test_that("create new pproto", { p <- pproto(a = 5) expect_equal(p$ls(), "a") expect_equal(p$a, 5) }) test_that("create inherited proto with s3 fields", { # initialize p <- pproto(a = 5) p2 <- pproto(NULL, p, b = 5) # check that fields are correct expect_equal(p$ls(), "a") expect_equal(p$a, 5) expect_equal(p2$ls(), c("a", "b")) expect_equal(p2$a, 5) expect_equal(p2$b, 5) # update parent proto p$a <- 7 # check that fields are correct expect_equal(p$ls(), "a") expect_equal(p$a, 7) expect_equal(p2$ls(), c("a", "b")) expect_equal(p2$a, 5) expect_equal(p2$b, 5) }) test_that("create inherited proto with proto fields", { # initialize p <- pproto("p", a = 1) p2 <- pproto("p2", p, b = 2, c = p) p3 <- pproto("p3", p, d = 3, f = p2) # check that fields are correct expect_equal(class(p), c("p", "pproto", "proto", "environment")) expect_equal(p$ls(), "a") expect_equal(p$a, 1) expect_equal(class(p2), c("p2", "p", "pproto", "proto", "environment")) expect_equal(p2$ls(), c("a", "b", "c")) expect_equal(p2$a, 1) expect_equal(p2$b, 2) expect_equal(class(p2$c), c("p", "pproto", "proto", "environment")) expect_equal(p2$c$a, 1) expect_equal(class(p3), c("p3", "p", "pproto", "proto", "environment")) expect_equal(p3$ls(), c("a", "d", "f")) expect_equal(p3$a, 1) expect_equal(p3$d, 3) expect_equal(class(p3$f), c("p2", "p", "pproto", "proto", "environment")) expect_equal(p3$f$a, 1) expect_equal(p3$f$b, 2) expect_equal(class(p3$f$c), c("p", "pproto", "proto", "environment")) expect_equal(p3$f$c$a, 1) # change p and check that values are correct p$a <- 4 p2$b <- 5 # check that fields are correct expect_equal(class(p), c("p", "pproto", "proto", "environment")) expect_equal(p$ls(), "a") expect_equal(p$a, 4) expect_equal(class(p2), c("p2", "p", "pproto", "proto", "environment")) expect_equal(p2$ls(), c("a", "b", "c")) expect_equal(p2$a, 1) expect_equal(p2$b, 5) expect_equal(class(p2$c), c("p", "pproto", "proto", "environment")) expect_equal(p2$c$a, 1) expect_equal(class(p3), c("p3", "p", "pproto", "proto", "environment")) expect_equal(p3$ls(), c("a", "d", "f")) expect_equal(p3$a, 1) expect_equal(p3$d, 3) expect_equal(class(p3$f), c("p2", "p", "pproto", "proto", "environment")) expect_equal(p3$f$a, 1) expect_equal(p3$f$b, 2) expect_equal(class(p3$f$c), c("p", "pproto", "proto", "environment")) expect_equal(p3$f$c$a, 1) })
/fuzzedpackages/oppr/tests/testthat/test_pproto.R
no_license
akhikolla/testpackages
R
false
false
2,899
r
context("pproto") test_that("check that proto objects behave as expected", { # initialize proto <- proto::proto p <- proto(a = 5) p2 <- p$proto(b = 5) # check that fields are correct expect_equal(p$ls(), "a") expect_equal(p$a, 5) expect_equal(p2$ls(), "b") expect_equal(p2$b, 5) # update value p$a <- 10 # check that a in both objects have become 10 expect_equal(p$a, 10) expect_equal(p2$a, 10) }) test_that("create new pproto", { p <- pproto(a = 5) expect_equal(p$ls(), "a") expect_equal(p$a, 5) }) test_that("create inherited proto with s3 fields", { # initialize p <- pproto(a = 5) p2 <- pproto(NULL, p, b = 5) # check that fields are correct expect_equal(p$ls(), "a") expect_equal(p$a, 5) expect_equal(p2$ls(), c("a", "b")) expect_equal(p2$a, 5) expect_equal(p2$b, 5) # update parent proto p$a <- 7 # check that fields are correct expect_equal(p$ls(), "a") expect_equal(p$a, 7) expect_equal(p2$ls(), c("a", "b")) expect_equal(p2$a, 5) expect_equal(p2$b, 5) }) test_that("create inherited proto with proto fields", { # initialize p <- pproto("p", a = 1) p2 <- pproto("p2", p, b = 2, c = p) p3 <- pproto("p3", p, d = 3, f = p2) # check that fields are correct expect_equal(class(p), c("p", "pproto", "proto", "environment")) expect_equal(p$ls(), "a") expect_equal(p$a, 1) expect_equal(class(p2), c("p2", "p", "pproto", "proto", "environment")) expect_equal(p2$ls(), c("a", "b", "c")) expect_equal(p2$a, 1) expect_equal(p2$b, 2) expect_equal(class(p2$c), c("p", "pproto", "proto", "environment")) expect_equal(p2$c$a, 1) expect_equal(class(p3), c("p3", "p", "pproto", "proto", "environment")) expect_equal(p3$ls(), c("a", "d", "f")) expect_equal(p3$a, 1) expect_equal(p3$d, 3) expect_equal(class(p3$f), c("p2", "p", "pproto", "proto", "environment")) expect_equal(p3$f$a, 1) expect_equal(p3$f$b, 2) expect_equal(class(p3$f$c), c("p", "pproto", "proto", "environment")) expect_equal(p3$f$c$a, 1) # change p and check that values are correct p$a <- 4 p2$b <- 5 # check that fields are correct expect_equal(class(p), c("p", "pproto", "proto", "environment")) expect_equal(p$ls(), "a") expect_equal(p$a, 4) expect_equal(class(p2), c("p2", "p", "pproto", "proto", "environment")) expect_equal(p2$ls(), c("a", "b", "c")) expect_equal(p2$a, 1) expect_equal(p2$b, 5) expect_equal(class(p2$c), c("p", "pproto", "proto", "environment")) expect_equal(p2$c$a, 1) expect_equal(class(p3), c("p3", "p", "pproto", "proto", "environment")) expect_equal(p3$ls(), c("a", "d", "f")) expect_equal(p3$a, 1) expect_equal(p3$d, 3) expect_equal(class(p3$f), c("p2", "p", "pproto", "proto", "environment")) expect_equal(p3$f$a, 1) expect_equal(p3$f$b, 2) expect_equal(class(p3$f$c), c("p", "pproto", "proto", "environment")) expect_equal(p3$f$c$a, 1) })
## ----setup, include=FALSE----------------------------------------------------- library(knitr) opts_chunk$set(out.extra='style="display:block; margin: auto"', fig.align="center", tidy=FALSE) ## ----gamma, message=F--------------------------------------------------------- library(esaddle) set.seed(4141) x <- rgamma(1000, 2, 1) # Fixing tuning parameter of EES decay <- 0.05 # Evaluating EES at several point xSeq <- seq(-2, 8, length.out = 200) tmp <- dsaddle(y = xSeq, X = x, decay = decay, log = TRUE) # Un-normalized EES tmp2 <- dsaddle(y = xSeq, X = x, decay = decay, # EES normalized by importance sampling normalize = TRUE, control = list("method" = "IS", nNorm = 500), log = TRUE) # Plotting true density, EES and normal approximation plot(xSeq, exp(tmp$llk), type = 'l', ylab = "Density", xlab = "x") lines(xSeq, dgamma(xSeq, 2, 1), col = 3) lines(xSeq, dnorm(xSeq, mean(x), sd(x)), col = 2) lines(xSeq, exp(tmp2$llk), col = 4) suppressWarnings( rug(x) ) legend("topright", c("EES un-norm", "EES normalized", "Truth", "Gaussian"), col = c(1, 4, 3, 2), lty = 1) res <- findMode(x, init = mean(x), decay = decay)$mode abline(v = res, lty = 2, lwd = 1.5) ## ----selGamma, message=F------------------------------------------------------ tmp <- selectDecay(decay = c(5e-4, 1e-3, 5e-3, 0.01, 0.1, 0.5, 5, Inf), # grid of decay values K = 4, simulator = function() x, multicore = T, ncores = 2) ## ----warp, message=F---------------------------------------------------------- dwarp <- function(x, alpha) { lik <- dnorm(x[ , 1], log = TRUE) tmp <- x[ , 1]^2 lik <- lik + dnorm(x[ , 2] - alpha*tmp, log = TRUE) lik } rwarp <- function(n = 1, alpha) { z <- matrix(rnorm(n*2), n, 2) tmp <- z[ , 1]^2 z[ , 2] <- z[ , 2] + alpha*tmp z } ## ----warpGrid, message=F------------------------------------------------------ alpha <- 1 X <- rwarp(2000, alpha = alpha) # Creating 2d grid m <- 50 expansion <- 1 x1 <- seq(-2, 3, length=m)* expansion; x2 <- seq(-3, 3, length=m) * expansion x <- expand.grid(x1, x2) # Evaluating true density on grid alpha <- 1 dw <- exp( dwarp(x, alpha = alpha) ) # Evaluating EES density dwa <- dsaddle(as.matrix(x), X, decay = 0.05, log = FALSE)$llk ## ----warpPlot, message=F------------------------------------------------------ # Plotting true density par(mfrow = c(1, 2)) plot(X, pch=".", col=1, ylim = c(-2, 3), xlim = c(-2, 2), main = "True density", xlab = expression(X[1]), ylab = expression(X[2])) contour(x1, x2, matrix(dw, m, m), levels = quantile(as.vector(dw), seq(0.8, 0.995, length.out = 10)), col=2, add=T) # Plotting EES density plot(X, pch=".",col=1, ylim = c(-2, 3), xlim = c(-2, 2), main = "EES density", xlab = expression(X[1]), ylab = expression(X[2])) contour(x1, x2, matrix(dwa, m, m), levels = quantile(as.vector(dwa), seq(0.8, 0.995, length.out = 10)), col=2, add=T) # Finding mode using EES init <- rnorm(2, 0, sd = c(1, 2)) # random initialization res <- findMode(X = X, init = init, decay = 0.05)$mode points(res[1], res[2], pch = 3, lwd = 2) ## ----warpSelect, message=F---------------------------------------------------- tmp <- selectDecay(decay = c(0.005, 0.01, 0.1, 0.25, 0.5, 1, 5, Inf), K = 4, simulator = function() X, multicore = T, ncores = 2)
/inst/doc/esaddle.R
no_license
cran/esaddle
R
false
false
3,434
r
## ----setup, include=FALSE----------------------------------------------------- library(knitr) opts_chunk$set(out.extra='style="display:block; margin: auto"', fig.align="center", tidy=FALSE) ## ----gamma, message=F--------------------------------------------------------- library(esaddle) set.seed(4141) x <- rgamma(1000, 2, 1) # Fixing tuning parameter of EES decay <- 0.05 # Evaluating EES at several point xSeq <- seq(-2, 8, length.out = 200) tmp <- dsaddle(y = xSeq, X = x, decay = decay, log = TRUE) # Un-normalized EES tmp2 <- dsaddle(y = xSeq, X = x, decay = decay, # EES normalized by importance sampling normalize = TRUE, control = list("method" = "IS", nNorm = 500), log = TRUE) # Plotting true density, EES and normal approximation plot(xSeq, exp(tmp$llk), type = 'l', ylab = "Density", xlab = "x") lines(xSeq, dgamma(xSeq, 2, 1), col = 3) lines(xSeq, dnorm(xSeq, mean(x), sd(x)), col = 2) lines(xSeq, exp(tmp2$llk), col = 4) suppressWarnings( rug(x) ) legend("topright", c("EES un-norm", "EES normalized", "Truth", "Gaussian"), col = c(1, 4, 3, 2), lty = 1) res <- findMode(x, init = mean(x), decay = decay)$mode abline(v = res, lty = 2, lwd = 1.5) ## ----selGamma, message=F------------------------------------------------------ tmp <- selectDecay(decay = c(5e-4, 1e-3, 5e-3, 0.01, 0.1, 0.5, 5, Inf), # grid of decay values K = 4, simulator = function() x, multicore = T, ncores = 2) ## ----warp, message=F---------------------------------------------------------- dwarp <- function(x, alpha) { lik <- dnorm(x[ , 1], log = TRUE) tmp <- x[ , 1]^2 lik <- lik + dnorm(x[ , 2] - alpha*tmp, log = TRUE) lik } rwarp <- function(n = 1, alpha) { z <- matrix(rnorm(n*2), n, 2) tmp <- z[ , 1]^2 z[ , 2] <- z[ , 2] + alpha*tmp z } ## ----warpGrid, message=F------------------------------------------------------ alpha <- 1 X <- rwarp(2000, alpha = alpha) # Creating 2d grid m <- 50 expansion <- 1 x1 <- seq(-2, 3, length=m)* expansion; x2 <- seq(-3, 3, length=m) * expansion x <- expand.grid(x1, x2) # Evaluating true density on grid alpha <- 1 dw <- exp( dwarp(x, alpha = alpha) ) # Evaluating EES density dwa <- dsaddle(as.matrix(x), X, decay = 0.05, log = FALSE)$llk ## ----warpPlot, message=F------------------------------------------------------ # Plotting true density par(mfrow = c(1, 2)) plot(X, pch=".", col=1, ylim = c(-2, 3), xlim = c(-2, 2), main = "True density", xlab = expression(X[1]), ylab = expression(X[2])) contour(x1, x2, matrix(dw, m, m), levels = quantile(as.vector(dw), seq(0.8, 0.995, length.out = 10)), col=2, add=T) # Plotting EES density plot(X, pch=".",col=1, ylim = c(-2, 3), xlim = c(-2, 2), main = "EES density", xlab = expression(X[1]), ylab = expression(X[2])) contour(x1, x2, matrix(dwa, m, m), levels = quantile(as.vector(dwa), seq(0.8, 0.995, length.out = 10)), col=2, add=T) # Finding mode using EES init <- rnorm(2, 0, sd = c(1, 2)) # random initialization res <- findMode(X = X, init = init, decay = 0.05)$mode points(res[1], res[2], pch = 3, lwd = 2) ## ----warpSelect, message=F---------------------------------------------------- tmp <- selectDecay(decay = c(0.005, 0.01, 0.1, 0.25, 0.5, 1, 5, Inf), K = 4, simulator = function() X, multicore = T, ncores = 2)
# distance_m -------------------------------------------------------------- context(":::distance_m") test_that("distances", { expect_equal( distance_m(1, 0, 1, 0), 0 ) # Brest - Strasbourg (901 km) expect_equal( round(distance_m(-4.486009, 48.390528, 7.750713, 48.584614) / 1000), 901 ) # Louvre Pyramid side (35.4 m) expect_equal( round(distance_m(2.3357192, 48.8612197, 2.3361712, 48.8611065), 1), 35.4 ) }) # circ -------------------------------------------------------------------- context(":::circ") test_that("circ", { expect_true (circ(c("1", "2", "3", "1"))) expect_false(circ(c("1", "2", "3", "2"))) }) context(":::rotate_circ") test_that("rotates circular way from n positions", { expect_error( rotate_circ(c("1", "2", "3", "2")) ) roundabout <- c("1", "2", "3", "1") expect_equal( rotate_circ(roundabout, 1), c("2", "3", "1", "2") ) expect_equal( rotate_circ(roundabout, 2), c("3", "1", "2", "3") ) expect_equal( rotate_circ(roundabout, 3), roundabout ) }) # rm_following_double ----------------------------------------------------- context(":::rm_following_double") test_that("removes following doubles", { expect_equal( rm_following_double(1:10), 1:10 ) expect_equal( rm_following_double(c(1, 1, 2, 3, 3, 4, 5, 5)), c(1, 2, 3, 4, 5) ) expect_equal( rm_following_double(c(1:5, 5:0, 0:3, 3:-1)), c(1:5, 4:0, 1:3, 2:-1) ) expect_equal( rm_following_double(rep(c(1:5, 5:1), each = 3)), c(1:5, 4:1) ) }) # merge_2_ways ------------------------------------------------------------ context(":::merge_2_ways") test_that("merge 2 ways", { expect_error( merge_2_ways( c("1", "2", "3"), NULL ), "empty" ) expect_error( merge_2_ways( c("1", "2", "3"), c("2", "10", "11") ), "ways not connected by their ends" ) # need 0 reverse expect_equal( merge_2_ways( c("1", "2", "3"), c("3", "4", "5", "6") ), c("1", "2", "3", "4", "5", "6") ) # need 1 reverse (v1) expect_equal( merge_2_ways( c("1", "2", "3"), c("6", "5", "4", "3") ), c("1", "2", "3", "4", "5", "6") ) # need 1 reverse (v2) expect_equal( merge_2_ways( c("3", "2", "1"), c("3", "4", "5", "6") ), c("1", "2", "3", "4", "5", "6") ) # need 2 reverses expect_equal( merge_2_ways( c("3", "2", "1"), c("6", "5", "4", "3") ), c("1", "2", "3", "4", "5", "6") ) }) # roundabout_part --------------------------------------------------------- context(":::roundabout_part") test_that("simple_case", { expect_equal( roundabout_part( c("2", "4"), c("2", "5", "1", "3", "2"), c("1", "6") ), c("2", "5", "1") ) # -- 3 -- # / \ # / \ # 4 --- *2* 1 ---- 6 # \ / # \ / # -- 5 -- }) test_that("with rotation", { expect_equal( roundabout_part( c("2", "4"), c("3", "2", "5", "1", "3"), c("1", "6") ), c("2", "5", "1") ) # < *3* < # / \ # / \ # 4 -<- 2 1 ->- 6 # \ / # \ / # -> 5 -> expect_equal( roundabout_part( c("2", "4"), c("1", "3", "2", "5", "1"), c("1", "6") ), c("2", "5", "1") ) # <- 3 -- # / \ # / \ # 4 ---- 2 *1* ---- 6 # \ / # \ / # -- 5 -> expect_equal( roundabout_part( c("2", "4"), c("5", "1", "3", "2", "5"), c("1", "6") ), c("2", "5", "1") ) # <- 3 -- # / \ # / \ # 4 ---- 2 1 ---- 6 # \ / # \ / # --*5*-> }) test_that("entrance same as exit", { expect_equal( roundabout_part( c("2", "4"), c("3", "2", "5", "1", "3") ), c("2", "5", "1", "3", "2") ) expect_equal( roundabout_part( c("1", "6"), c("5", "1", "3", "2", "5") ), c("1", "3", "2", "5", "1") ) }) test_that("anti_clockwise", { expect_equal( roundabout_part( c("2", "4"), rev(c("3", "2", "5", "1", "3")), c("1", "6") ), c("2", "3", "1") ) # > *3* > # / \ # / \ # 4 -<- 2 1 ->- 6 # \ / # \ / # -< 5 -< }) # merge_ways -------------------------------------------------------------- # < *3* < # / \ # / \ # 4 -<- 2 1 ->- 6 -<- 7 -<- 8 ->- 9 ->- 10 # \ / # \ / # -> 5 -> test_that("first or last way is not circular", { expect_error( merge_ways( list( c("3", "2", "5", "1", "3"), # (roundabout) c("1", "6"), c("8", "7", "6") ) ), "first or last way cannot be circular" ) }) test_that("straight line", { expect_equal( merge_ways( list( c("1", "6"), c("8", "7", "6"), c("8", "9", "10") ) ), c("1", "6", "7", "8", "9", "10") ) }) test_that("including roudabout", { expect_equal( merge_ways( list( c("2", "4"), c("3", "2", "5", "1", "3"), # (roundabout) c("1", "6"), c("8", "7", "6"), c("8", "9", "10") ) ), c("4", "2", "5", "1", "6", "7", "8", "9", "10") ) })
/tests/testthat/test_util_geom.R
no_license
py-b/osmbus
R
false
false
5,882
r
# distance_m -------------------------------------------------------------- context(":::distance_m") test_that("distances", { expect_equal( distance_m(1, 0, 1, 0), 0 ) # Brest - Strasbourg (901 km) expect_equal( round(distance_m(-4.486009, 48.390528, 7.750713, 48.584614) / 1000), 901 ) # Louvre Pyramid side (35.4 m) expect_equal( round(distance_m(2.3357192, 48.8612197, 2.3361712, 48.8611065), 1), 35.4 ) }) # circ -------------------------------------------------------------------- context(":::circ") test_that("circ", { expect_true (circ(c("1", "2", "3", "1"))) expect_false(circ(c("1", "2", "3", "2"))) }) context(":::rotate_circ") test_that("rotates circular way from n positions", { expect_error( rotate_circ(c("1", "2", "3", "2")) ) roundabout <- c("1", "2", "3", "1") expect_equal( rotate_circ(roundabout, 1), c("2", "3", "1", "2") ) expect_equal( rotate_circ(roundabout, 2), c("3", "1", "2", "3") ) expect_equal( rotate_circ(roundabout, 3), roundabout ) }) # rm_following_double ----------------------------------------------------- context(":::rm_following_double") test_that("removes following doubles", { expect_equal( rm_following_double(1:10), 1:10 ) expect_equal( rm_following_double(c(1, 1, 2, 3, 3, 4, 5, 5)), c(1, 2, 3, 4, 5) ) expect_equal( rm_following_double(c(1:5, 5:0, 0:3, 3:-1)), c(1:5, 4:0, 1:3, 2:-1) ) expect_equal( rm_following_double(rep(c(1:5, 5:1), each = 3)), c(1:5, 4:1) ) }) # merge_2_ways ------------------------------------------------------------ context(":::merge_2_ways") test_that("merge 2 ways", { expect_error( merge_2_ways( c("1", "2", "3"), NULL ), "empty" ) expect_error( merge_2_ways( c("1", "2", "3"), c("2", "10", "11") ), "ways not connected by their ends" ) # need 0 reverse expect_equal( merge_2_ways( c("1", "2", "3"), c("3", "4", "5", "6") ), c("1", "2", "3", "4", "5", "6") ) # need 1 reverse (v1) expect_equal( merge_2_ways( c("1", "2", "3"), c("6", "5", "4", "3") ), c("1", "2", "3", "4", "5", "6") ) # need 1 reverse (v2) expect_equal( merge_2_ways( c("3", "2", "1"), c("3", "4", "5", "6") ), c("1", "2", "3", "4", "5", "6") ) # need 2 reverses expect_equal( merge_2_ways( c("3", "2", "1"), c("6", "5", "4", "3") ), c("1", "2", "3", "4", "5", "6") ) }) # roundabout_part --------------------------------------------------------- context(":::roundabout_part") test_that("simple_case", { expect_equal( roundabout_part( c("2", "4"), c("2", "5", "1", "3", "2"), c("1", "6") ), c("2", "5", "1") ) # -- 3 -- # / \ # / \ # 4 --- *2* 1 ---- 6 # \ / # \ / # -- 5 -- }) test_that("with rotation", { expect_equal( roundabout_part( c("2", "4"), c("3", "2", "5", "1", "3"), c("1", "6") ), c("2", "5", "1") ) # < *3* < # / \ # / \ # 4 -<- 2 1 ->- 6 # \ / # \ / # -> 5 -> expect_equal( roundabout_part( c("2", "4"), c("1", "3", "2", "5", "1"), c("1", "6") ), c("2", "5", "1") ) # <- 3 -- # / \ # / \ # 4 ---- 2 *1* ---- 6 # \ / # \ / # -- 5 -> expect_equal( roundabout_part( c("2", "4"), c("5", "1", "3", "2", "5"), c("1", "6") ), c("2", "5", "1") ) # <- 3 -- # / \ # / \ # 4 ---- 2 1 ---- 6 # \ / # \ / # --*5*-> }) test_that("entrance same as exit", { expect_equal( roundabout_part( c("2", "4"), c("3", "2", "5", "1", "3") ), c("2", "5", "1", "3", "2") ) expect_equal( roundabout_part( c("1", "6"), c("5", "1", "3", "2", "5") ), c("1", "3", "2", "5", "1") ) }) test_that("anti_clockwise", { expect_equal( roundabout_part( c("2", "4"), rev(c("3", "2", "5", "1", "3")), c("1", "6") ), c("2", "3", "1") ) # > *3* > # / \ # / \ # 4 -<- 2 1 ->- 6 # \ / # \ / # -< 5 -< }) # merge_ways -------------------------------------------------------------- # < *3* < # / \ # / \ # 4 -<- 2 1 ->- 6 -<- 7 -<- 8 ->- 9 ->- 10 # \ / # \ / # -> 5 -> test_that("first or last way is not circular", { expect_error( merge_ways( list( c("3", "2", "5", "1", "3"), # (roundabout) c("1", "6"), c("8", "7", "6") ) ), "first or last way cannot be circular" ) }) test_that("straight line", { expect_equal( merge_ways( list( c("1", "6"), c("8", "7", "6"), c("8", "9", "10") ) ), c("1", "6", "7", "8", "9", "10") ) }) test_that("including roudabout", { expect_equal( merge_ways( list( c("2", "4"), c("3", "2", "5", "1", "3"), # (roundabout) c("1", "6"), c("8", "7", "6"), c("8", "9", "10") ) ), c("4", "2", "5", "1", "6", "7", "8", "9", "10") ) })
library(e1071) trainingB <- read.csv(file="D:\\drive\\JUNIOR\\CSC240\\CSC240 Course Project\\Test\\SendHelp\\SetB.csv", header=TRUE, sep=",") trainingB modelB <- naiveBayes(ColorID ~ ., data = trainingB) modelB as.data.frame(trainingB)[11,] predict(modelB, as.data.frame(trainingB)[11,], type="raw")
/TrainingB.R
no_license
benjaminhechen/naive-bayes-and-logreg-setup
R
false
false
319
r
library(e1071) trainingB <- read.csv(file="D:\\drive\\JUNIOR\\CSC240\\CSC240 Course Project\\Test\\SendHelp\\SetB.csv", header=TRUE, sep=",") trainingB modelB <- naiveBayes(ColorID ~ ., data = trainingB) modelB as.data.frame(trainingB)[11,] predict(modelB, as.data.frame(trainingB)[11,], type="raw")
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/print.svyContTable.R \name{print.svyContTable} \alias{print.svyContTable} \title{Format and print \code{svyContTable} class objects} \usage{ \method{print}{svyContTable}( x, digits = 2, pDigits = 3, quote = FALSE, missing = FALSE, explain = TRUE, printToggle = TRUE, noSpaces = FALSE, nonnormal = NULL, minMax = FALSE, insertLevel = FALSE, test = TRUE, smd = FALSE, formatOptions = list(scientific = FALSE), ... ) } \arguments{ \item{x}{Object returned by \code{\link{svyCreateContTable}} function.} \item{digits}{Number of digits to print in the table.} \item{pDigits}{Number of digits to print for p-values (also used for standardized mean differences).} \item{quote}{Whether to show everything in quotes. The default is FALSE. If TRUE, everything including the row and column names are quoted so that you can copy it to Excel easily.} \item{missing}{Whether to show missing data information.} \item{explain}{Whether to add explanation to the variable names, i.e., (mean (SD) or median [IQR]) is added to the variable names.} \item{printToggle}{Whether to print the output. If FALSE, no output is created, and a matrix is invisibly returned.} \item{noSpaces}{Whether to remove spaces added for alignment. Use this option if you prefer to align numbers yourself in other software.} \item{nonnormal}{A character vector to specify the variables for which the p-values should be those of nonparametric tests. By default all p-values are from normal assumption-based tests (oneway.test).} \item{minMax}{Whether to use [min,max] instead of [p25,p75] for nonnormal variables. The default is FALSE.} \item{insertLevel}{Whether to add an empty level column to the left of strata.} \item{test}{Whether to show p-values. TRUE by default. If FALSE, only the numerical summaries are shown.} \item{smd}{Whether to show standardized mean differences. FALSE by default. If there are more than one contrasts, the average of all possible standardized mean differences is shown. For individual contrasts, use \code{summary}.} \item{formatOptions}{A list of options, which will be passed to \code{\link[base]{format}}. Can be used to modify the \code{big.mark}, \code{decimal.mark}, \code{big.interval} etc. The default is \code{list(scientific = FALSE)}. The options digits, nsmall, justify and trim are not available. (Experimental)} \item{...}{For compatibility with generic. Ignored.} } \value{ A matrix object containing what you see is also invisibly returned. This can be assinged a name and exported via \code{write.csv}. } \description{ \code{print} method for the \code{svyContTable} class objects created by \code{\link{CreateContTable}} function. } \examples{ ## See the examples for svyCreateTableOne() } \seealso{ \code{\link{svyCreateTableOne}}, \code{\link{svyCreateCatTable}}, \code{\link{summary.svyCatTable}} } \author{ Kazuki Yoshida, Alexander Bartel }
/man/print.svyContTable.Rd
no_license
kaz-yos/tableone
R
false
true
2,980
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/print.svyContTable.R \name{print.svyContTable} \alias{print.svyContTable} \title{Format and print \code{svyContTable} class objects} \usage{ \method{print}{svyContTable}( x, digits = 2, pDigits = 3, quote = FALSE, missing = FALSE, explain = TRUE, printToggle = TRUE, noSpaces = FALSE, nonnormal = NULL, minMax = FALSE, insertLevel = FALSE, test = TRUE, smd = FALSE, formatOptions = list(scientific = FALSE), ... ) } \arguments{ \item{x}{Object returned by \code{\link{svyCreateContTable}} function.} \item{digits}{Number of digits to print in the table.} \item{pDigits}{Number of digits to print for p-values (also used for standardized mean differences).} \item{quote}{Whether to show everything in quotes. The default is FALSE. If TRUE, everything including the row and column names are quoted so that you can copy it to Excel easily.} \item{missing}{Whether to show missing data information.} \item{explain}{Whether to add explanation to the variable names, i.e., (mean (SD) or median [IQR]) is added to the variable names.} \item{printToggle}{Whether to print the output. If FALSE, no output is created, and a matrix is invisibly returned.} \item{noSpaces}{Whether to remove spaces added for alignment. Use this option if you prefer to align numbers yourself in other software.} \item{nonnormal}{A character vector to specify the variables for which the p-values should be those of nonparametric tests. By default all p-values are from normal assumption-based tests (oneway.test).} \item{minMax}{Whether to use [min,max] instead of [p25,p75] for nonnormal variables. The default is FALSE.} \item{insertLevel}{Whether to add an empty level column to the left of strata.} \item{test}{Whether to show p-values. TRUE by default. If FALSE, only the numerical summaries are shown.} \item{smd}{Whether to show standardized mean differences. FALSE by default. If there are more than one contrasts, the average of all possible standardized mean differences is shown. For individual contrasts, use \code{summary}.} \item{formatOptions}{A list of options, which will be passed to \code{\link[base]{format}}. Can be used to modify the \code{big.mark}, \code{decimal.mark}, \code{big.interval} etc. The default is \code{list(scientific = FALSE)}. The options digits, nsmall, justify and trim are not available. (Experimental)} \item{...}{For compatibility with generic. Ignored.} } \value{ A matrix object containing what you see is also invisibly returned. This can be assinged a name and exported via \code{write.csv}. } \description{ \code{print} method for the \code{svyContTable} class objects created by \code{\link{CreateContTable}} function. } \examples{ ## See the examples for svyCreateTableOne() } \seealso{ \code{\link{svyCreateTableOne}}, \code{\link{svyCreateCatTable}}, \code{\link{summary.svyCatTable}} } \author{ Kazuki Yoshida, Alexander Bartel }
source('resources/cellcounts/wbcInference-V112.R') ## arguments <- commandArgs(T) methylationfile <- arguments[1] cellcountfile <- arguments[2] pcfile <- arguments[3] ccrnmethdatafile <- arguments[4] nthreads <- as.numeric(arguments[5]) chunks <- as.numeric(arguments[6]) jid <- as.numeric(arguments[7]) message("Reading methylation data...") load(methylationfile) cellcounts <- read.table(cellcountfile, he=T) stopifnot(all(cellcounts$IID == colnames(norm.beta))) cellcounts <- as.matrix(subset(cellcounts, select=-c(IID))) load(pcfile) if(!is.na(jid)) { chunksize <- ceiling(nrow(norm.beta) / chunks) i1 <- chunksize * (jid-1) + 1 i2 <- min(nrow(norm.beta), chunksize * jid) norm.beta <- norm.beta[i1:i2,] ccrnmethdatafile <- paste0(ccrnmethdatafile, ".", jid, ".RData") } else { ccrnmethdatafile <- paste0(ccrnmethdatafile, ".RData") } message("Data size: ", ncol(norm.beta), " individuals and ", nrow(norm.beta), " CpGs.") # adjust for cell counts norm.beta <- adjust.beta(norm.beta, pc, cellcounts, mc.cores=nthreads) # and rank transformed data of cell countadjusted betas norm.beta <- inverse.rank.transform(norm.beta, nthreads) # Save results save(norm.beta, file=ccrnmethdatafile)
/resources/cellcounts/adjust_cellcounts.R
no_license
AST87/godmc
R
false
false
1,209
r
source('resources/cellcounts/wbcInference-V112.R') ## arguments <- commandArgs(T) methylationfile <- arguments[1] cellcountfile <- arguments[2] pcfile <- arguments[3] ccrnmethdatafile <- arguments[4] nthreads <- as.numeric(arguments[5]) chunks <- as.numeric(arguments[6]) jid <- as.numeric(arguments[7]) message("Reading methylation data...") load(methylationfile) cellcounts <- read.table(cellcountfile, he=T) stopifnot(all(cellcounts$IID == colnames(norm.beta))) cellcounts <- as.matrix(subset(cellcounts, select=-c(IID))) load(pcfile) if(!is.na(jid)) { chunksize <- ceiling(nrow(norm.beta) / chunks) i1 <- chunksize * (jid-1) + 1 i2 <- min(nrow(norm.beta), chunksize * jid) norm.beta <- norm.beta[i1:i2,] ccrnmethdatafile <- paste0(ccrnmethdatafile, ".", jid, ".RData") } else { ccrnmethdatafile <- paste0(ccrnmethdatafile, ".RData") } message("Data size: ", ncol(norm.beta), " individuals and ", nrow(norm.beta), " CpGs.") # adjust for cell counts norm.beta <- adjust.beta(norm.beta, pc, cellcounts, mc.cores=nthreads) # and rank transformed data of cell countadjusted betas norm.beta <- inverse.rank.transform(norm.beta, nthreads) # Save results save(norm.beta, file=ccrnmethdatafile)
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/gcloud.R \name{gcloud-paths} \alias{gcloud-paths} \alias{gcloud} \alias{gsutil} \title{Discover paths of gcloud executables.} \usage{ gcloud() gsutil() } \description{ Discover the paths of the \code{gcloud} and \code{gsutil} executables. } \details{ The path to the \code{gcloud} executable can be explicitly specified, using the \code{GCLOUD_BINARY_PATH} environment variable, or the \code{gcloud.binary.path} \R option. The path to the \code{gsutil} executable can be explicitly specified, using the \code{GSUTIL_BINARY_PATH} environment variable, or the \code{gsutil.binary.path} \R option. When none of the above are set, locations will instead be discovered either on the system \code{PATH}, or by looking in the default folders used for the Google Cloud SDK installation. } \keyword{internal}
/man/gcloud-paths.Rd
no_license
Geoany/cloudml
R
false
true
881
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/gcloud.R \name{gcloud-paths} \alias{gcloud-paths} \alias{gcloud} \alias{gsutil} \title{Discover paths of gcloud executables.} \usage{ gcloud() gsutil() } \description{ Discover the paths of the \code{gcloud} and \code{gsutil} executables. } \details{ The path to the \code{gcloud} executable can be explicitly specified, using the \code{GCLOUD_BINARY_PATH} environment variable, or the \code{gcloud.binary.path} \R option. The path to the \code{gsutil} executable can be explicitly specified, using the \code{GSUTIL_BINARY_PATH} environment variable, or the \code{gsutil.binary.path} \R option. When none of the above are set, locations will instead be discovered either on the system \code{PATH}, or by looking in the default folders used for the Google Cloud SDK installation. } \keyword{internal}
context("logger_index") test_that("logger_index works as expected", { remove_all_loggers() on.exit(remove_all_loggers()) get_logger("tree/leaf") get_logger("shrub/leaf") get_logger("plant/shrub/leaf") expect_identical(nrow(logger_index()), 8L) })
/tests/testthat/test_logger_index.R
permissive
s-fleck/lgr
R
false
false
264
r
context("logger_index") test_that("logger_index works as expected", { remove_all_loggers() on.exit(remove_all_loggers()) get_logger("tree/leaf") get_logger("shrub/leaf") get_logger("plant/shrub/leaf") expect_identical(nrow(logger_index()), 8L) })
# Automatically generated by openapi-generator (https://openapi-generator.tech) # Please update as you see appropriate context("Test MethodsApi") api.instance <- MethodsApi$new() test_that("MethodsGet", { # tests for MethodsGet # base path: https://test-server.brapi.org/brapi/v2 # Get the Methods # Returns a list of Methods available on a server. An Observation Variable has 3 critical parts; A Trait being observed, a Method for making the observation, and a Scale on which the observation can be measured and compared with other observations.&#39; # @param method.db.id character The unique identifier for a method (optional) # @param observation.variable.db.id character The unique identifier for an observation variable (optional) # @param external.reference.id character An external reference ID. Could be a simple string or a URI. (use with `externalReferenceSource` parameter) (optional) # @param external.reference.source character An identifier for the source system or database of an external reference (use with `externalReferenceID` parameter) (optional) # @param page integer Used to request a specific page of data to be returned. The page indexing starts at 0 (the first page is 'page'= 0). Default is `0`. (optional) # @param page.size integer The size of the pages to be returned. Default is `1000`. (optional) # @param authorization character HTTP HEADER - Token used for Authorization <strong> Bearer {token_string} </strong> (optional) # @return [MethodListResponse] # uncomment below to test the operation #expect_equal(result, "EXPECTED_RESULT") }) test_that("MethodsMethodDbIdGet", { # tests for MethodsMethodDbIdGet # base path: https://test-server.brapi.org/brapi/v2 # Get the details for a specific Method # Retrieve details about a specific method An Observation Variable has 3 critical parts; A Trait being observed, a Method for making the observation, and a Scale on which the observation can be measured and compared with other observations. # @param method.db.id character Id of the method to retrieve details of. # @param authorization character HTTP HEADER - Token used for Authorization <strong> Bearer {token_string} </strong> (optional) # @return [MethodSingleResponse] # uncomment below to test the operation #expect_equal(result, "EXPECTED_RESULT") }) test_that("MethodsMethodDbIdPut", { # tests for MethodsMethodDbIdPut # base path: https://test-server.brapi.org/brapi/v2 # Update an existing Method # Update the details of an existing method # @param method.db.id character Id of the method to retrieve details of. # @param authorization character HTTP HEADER - Token used for Authorization <strong> Bearer {token_string} </strong> (optional) # @param body MethodBaseClass (optional) # @return [MethodSingleResponse] # uncomment below to test the operation #expect_equal(result, "EXPECTED_RESULT") }) test_that("MethodsPost", { # tests for MethodsPost # base path: https://test-server.brapi.org/brapi/v2 # Add new Methods # Create new method objects in the database # @param authorization character HTTP HEADER - Token used for Authorization <strong> Bearer {token_string} </strong> (optional) # @param method.base.class array[MethodBaseClass] (optional) # @return [MethodListResponse] # uncomment below to test the operation #expect_equal(result, "EXPECTED_RESULT") })
/tests/testthat/test_methods_api.R
no_license
Breeding-Insight/brapi-r-v2
R
false
false
3,421
r
# Automatically generated by openapi-generator (https://openapi-generator.tech) # Please update as you see appropriate context("Test MethodsApi") api.instance <- MethodsApi$new() test_that("MethodsGet", { # tests for MethodsGet # base path: https://test-server.brapi.org/brapi/v2 # Get the Methods # Returns a list of Methods available on a server. An Observation Variable has 3 critical parts; A Trait being observed, a Method for making the observation, and a Scale on which the observation can be measured and compared with other observations.&#39; # @param method.db.id character The unique identifier for a method (optional) # @param observation.variable.db.id character The unique identifier for an observation variable (optional) # @param external.reference.id character An external reference ID. Could be a simple string or a URI. (use with `externalReferenceSource` parameter) (optional) # @param external.reference.source character An identifier for the source system or database of an external reference (use with `externalReferenceID` parameter) (optional) # @param page integer Used to request a specific page of data to be returned. The page indexing starts at 0 (the first page is 'page'= 0). Default is `0`. (optional) # @param page.size integer The size of the pages to be returned. Default is `1000`. (optional) # @param authorization character HTTP HEADER - Token used for Authorization <strong> Bearer {token_string} </strong> (optional) # @return [MethodListResponse] # uncomment below to test the operation #expect_equal(result, "EXPECTED_RESULT") }) test_that("MethodsMethodDbIdGet", { # tests for MethodsMethodDbIdGet # base path: https://test-server.brapi.org/brapi/v2 # Get the details for a specific Method # Retrieve details about a specific method An Observation Variable has 3 critical parts; A Trait being observed, a Method for making the observation, and a Scale on which the observation can be measured and compared with other observations. # @param method.db.id character Id of the method to retrieve details of. # @param authorization character HTTP HEADER - Token used for Authorization <strong> Bearer {token_string} </strong> (optional) # @return [MethodSingleResponse] # uncomment below to test the operation #expect_equal(result, "EXPECTED_RESULT") }) test_that("MethodsMethodDbIdPut", { # tests for MethodsMethodDbIdPut # base path: https://test-server.brapi.org/brapi/v2 # Update an existing Method # Update the details of an existing method # @param method.db.id character Id of the method to retrieve details of. # @param authorization character HTTP HEADER - Token used for Authorization <strong> Bearer {token_string} </strong> (optional) # @param body MethodBaseClass (optional) # @return [MethodSingleResponse] # uncomment below to test the operation #expect_equal(result, "EXPECTED_RESULT") }) test_that("MethodsPost", { # tests for MethodsPost # base path: https://test-server.brapi.org/brapi/v2 # Add new Methods # Create new method objects in the database # @param authorization character HTTP HEADER - Token used for Authorization <strong> Bearer {token_string} </strong> (optional) # @param method.base.class array[MethodBaseClass] (optional) # @return [MethodListResponse] # uncomment below to test the operation #expect_equal(result, "EXPECTED_RESULT") })
library(ggthemes) ### Name: theme_tufte ### Title: Tufte Maximal Data, Minimal Ink Theme ### Aliases: theme_tufte ### ** Examples library("ggplot2") p <- ggplot(mtcars, aes(x = wt, y = mpg)) + geom_point() + scale_x_continuous(breaks = extended_range_breaks()(mtcars$wt)) + scale_y_continuous(breaks = extended_range_breaks()(mtcars$mpg)) + ggtitle("Cars") p + geom_rangeframe() + theme_tufte() p + geom_rug() + theme_tufte(ticks = FALSE)
/data/genthat_extracted_code/ggthemes/examples/theme_tufte.Rd.R
no_license
surayaaramli/typeRrh
R
false
false
460
r
library(ggthemes) ### Name: theme_tufte ### Title: Tufte Maximal Data, Minimal Ink Theme ### Aliases: theme_tufte ### ** Examples library("ggplot2") p <- ggplot(mtcars, aes(x = wt, y = mpg)) + geom_point() + scale_x_continuous(breaks = extended_range_breaks()(mtcars$wt)) + scale_y_continuous(breaks = extended_range_breaks()(mtcars$mpg)) + ggtitle("Cars") p + geom_rangeframe() + theme_tufte() p + geom_rug() + theme_tufte(ticks = FALSE)
load.netmind.rawdata = function(fn, f, set) { out=NULL netmind=NULL header = readLines(fn, n=20) outfmt = c(dates="year-m-d", times="h:m:s") if (length(header) < 20 ) return( out ) localtime = netmindDate( header=header, outvalue="localtime" ) tmpfile = "tmp.netmind" # remove "*" -- hard to do internally without fileswapping out and reloading tmp = readLines(fn) tmp = gsub("[*]", "", tmp ) write( tmp, file=tmpfile ) # skip 16 because first few records are sometimes incomplete netmind = read.table( file=tmpfile, sep="", as.is=T, colClasses="character", header=F, skip=16) file.remove(tmpfile) nr0 = nrow(netmind) if (nr0 < 30 ) return(out) nc0 = ncol(netmind) if ( nc0 < 12 ) return ( out ) # no net metrics stored ( only position and speed ) -- ignored if ( nc0 > 14 ) stop( fn) # should not be the case unless new data streams start if ( nc0 == 12) { # older files do not have depths, add a dummy column # more modern data have 13 columns 2000 + netmind$depth= NA } if ( nc0 == 14 & as.numeric(netmind[1,1])<130000) { # a few files have wing spread as well # more modern data have 13 columns 2000 + netmind[,13] = NULL } colnames(netmind) = c("ndate", "ntime", "lat.deg", "lat.min", "lat.orient", "lon.deg", "lon.min", "lon.orient", "speed", "primary", "secondary", "doorspread", "depth") numbers = c("lat.deg", "lat.min", "lon.deg", "lon.min", "speed", "primary", "secondary", "doorspread", "depth") netmind = factor2number(netmind, numbers) netmind$tinc = 1:nrow(netmind) # determine deepest point if possible, using a smoothed depth as variability due to incorrect pings are frequent deepest.point = NULL if (nc0 %in% c( 13, 14 ) ) { require(mgcv) z.gam = try ( gam( depth ~ s(tinc, k=5, bs="ts"), data=netmind, optimizer=c("outer", "nlm") ), silent=T ) if ( ! "try-error" %in% class( z.gam )) { netmind$depth.smoothed = predict( z.gam, newdata=netmind, newdata.guaranteed=T ) deepest.point = which.max ( netmind$depth.smoothed) } } if ( nc0==12 | length(deepest.point) == 0 ) deepest.point = round( nrow(netmind) / 2 ) netmind$lon = - (netmind$lon.deg + (netmind$lon.min / 60) ) netmind$lat = netmind$lat.deg + (netmind$lat.min / 60) netmind = netmind[, c("ndate", "ntime", "lat", "lon", "speed", "primary", "secondary", "doorspread", "depth")] netmind$ndate = paste(substring(netmind$ndate,1,2), substring(netmind$ndate,3,4), substring(netmind$ndate,5,6), sep="-") netmind$ntime = paste(substring(netmind$ntime,1,2), substring(netmind$ntime,3,4), substring(netmind$ntime,5,6), sep=":") netmind$chron = chron( dates.=netmind$ndate, times.=netmind$ntime, format=c(dates="y-m-d", times="h:m:s"), out.format=outfmt ) # netmind data stored in GMT/UTC from GPS; the offset varies depending upon season due to daylight savings time (3 or 4 hrs) # obtain time offset in hours time.offset = netmindDate( header=header, outvalue="timeoffset" ) # rounded to hours of fractional days netmind$chron = as.chron( as.numeric(netmind$chron) + time.offset, out.format = outfmt ) # now in local time (America/Halifax) netmind.timestamp = netmind$chron[deepest.point] yr = as.numeric( as.character( years( netmind.timestamp ) ) ) line.localtime = grep("Local Time:", header, ignore.case=T ) line.ship = grep("Ship:", header, ignore.case=T ) line.comments = grep("Comments:", header, ignore.case=T ) trip = gsub( "^.*Trip:", "", header[ line.ship ] ) trip = gsub( "Tow:.*$", "", trip ) trip = gsub( "[[:space:]]", "", trip ) if ( ! grepl( "^S[[:digit:]]{8}$", trip, ignore.case=T ) ) { # not a standard code dy = paste( "00", as.character( days(netmind.timestamp) ), sep="") dy = substring( dy, nchar(dy)-1, nchar(dy) ) mn = paste( "00", as.character( as.numeric(months(netmind.timestamp))), sep="") mn = substring( mn, nchar(mn)-1, nchar(mn) ) yr = paste( "00", as.character( years(netmind.timestamp) ), sep="") yr = substring( yr, nchar(yr)-3, nchar(yr) ) trip=paste("S", dy, mn, yr, sep="" ) } setno = gsub( "^.*Tow:", "", header[ line.ship ] ) setno = gsub( "[[:space:]]", "", setno ) setno = as.numeric( setno ) station = unlist(strsplit( header[[1]], "\\", fixed=TRUE )) station = station[ length(station) ] station = gsub( "[[:alpha:]]", "", station) station = gsub( "[[:punct:]]", "", station) station = as.numeric( station ) setxi = NULL if (is.null ( setxi ) ) { # check time first netmind.date.range = range( netmind$chron ) sets.in.date.range = which( set$chron >= netmind.date.range[1] & set$chron <= netmind.date.range[2] ) if ( length( sets.in.date.range ) == 1 ) setxi= sets.in.date.range } if (is.null ( setxi ) ) { # check time first netmind.date.range = range( netmind$chron ) sets.in.date.range = which( set$chron >= netmind.date.range[1] & set$chron <= netmind.date.range[2] & set$set==setno ) if ( length( sets.in.date.range ) == 1 ) setxi= sets.in.date.range } if (is.null ( setxi ) ) { # check time and station netmind.date.range = range( netmind$chron ) sets.in.date.range = which( set$chron >= netmind.date.range[1] & set$chron <= netmind.date.range[2] & set$trip==trip & set$station==station & set$set==setno ) if ( length( sets.in.date.range ) == 1 ) setxi= sets.in.date.range } if (is.null ( setxi ) ) { # check time and station netmind.date.range = range( netmind$chron ) sets.in.date.range = which( set$chron >= netmind.date.range[1] & set$chron <= netmind.date.range[2] & set$station==station ) if ( length( sets.in.date.range ) == 1 ) setxi= sets.in.date.range } if (is.null ( setxi ) ) { # check distances ni = trunc( nrow(netmind) / 2 ) dx = abs( set$lon - netmind$lon[ni] ) dy = abs( set$lat - netmind$lat[ni] ) sets.in.spatial.range = which( dx < 5/60 & dy < 5/60 & set$yr==yr ) # less than 5 minutes away if ( length( sets.in.spatial.range ) == 1 ) setxi= sets.in.spatial.range } if (is.null ( setxi ) ) { # check distances ni = trunc( nrow(netmind) / 2 ) dx = abs( set$lon - netmind$lon[ni] ) dy = abs( set$lat - netmind$lat[ni] ) sets.in.spatial.range = which( dx < 5/60 & dy < 5/60 & set$trip==trip ) # less than 5 minutes away if ( length( sets.in.spatial.range ) == 1 ) setxi= sets.in.spatial.range } if (is.null ( setxi ) ) { # check staion, distance, and time ni = trunc( nrow(netmind) / 2 ) dx = abs( set$lon - netmind$lon[ni] ) dy = abs( set$lat - netmind$lat[ni] ) sets.in.spatial.range = which( dx < 5/60 & dy < 5/60 & set$station==station & set$trip==trip & set$set==setno ) # less than 10 minutes away if ( length( sets.in.spatial.range ) == 1 ) setxi= sets.in.spatial.range } if (is.null ( setxi ) ) { # check staion, distance, and time ni = trunc( nrow(netmind) / 2 ) dx = abs( set$lon - netmind$lon[ni] ) dy = abs( set$lat - netmind$lat[ni] ) sets.in.spatial.range = which( dx < 5/60 & dy < 5/60 & set$station==station & set$trip==trip ) # less than 10 minutes away if ( length( sets.in.spatial.range ) == 1 ) setxi= sets.in.spatial.range } if (is.null ( setxi ) ) return (NULL) # no matching data -- break setx = set[ setxi , ] # matching trip/set/station netmind_uid = paste( "netmind", setx$trip, setx$set, setx$station, hours(netmind.timestamp), minutes(netmind.timestamp), f, sep=".") filename = basename(fn) line.ship = grep("Ship:", header, ignore.case=T ) line.comments = grep("Comments:", header, ignore.case=T ) comments = gsub("^Comments: ", "", header[ line.comments] ) netmind$netmind_uid = netmind_uid metadata = data.frame( filename, netmind_uid, yr, netmind.timestamp, setx$trip, setx$set, setx$station, setx$Zx, setx$chron,comments, stringsAsFactors =FALSE ) names( metadata ) = c("filename", "netmind_uid", "yr", "netmind_timestamp", "trip", "set", "station", "setZx", "setChron", "comments" ) metadata$yr = as.numeric( as.character( metadata$yr )) basedata = netmind[ which( !is.na( netmind$netmind_uid) ) ,] out = list( metadata=metadata, basedata=basedata ) return(out) }
/snowcrab/src/_Rfunctions/survey/load.netmind.rawdata.r
no_license
fernandomayer/ecomod
R
false
false
8,990
r
load.netmind.rawdata = function(fn, f, set) { out=NULL netmind=NULL header = readLines(fn, n=20) outfmt = c(dates="year-m-d", times="h:m:s") if (length(header) < 20 ) return( out ) localtime = netmindDate( header=header, outvalue="localtime" ) tmpfile = "tmp.netmind" # remove "*" -- hard to do internally without fileswapping out and reloading tmp = readLines(fn) tmp = gsub("[*]", "", tmp ) write( tmp, file=tmpfile ) # skip 16 because first few records are sometimes incomplete netmind = read.table( file=tmpfile, sep="", as.is=T, colClasses="character", header=F, skip=16) file.remove(tmpfile) nr0 = nrow(netmind) if (nr0 < 30 ) return(out) nc0 = ncol(netmind) if ( nc0 < 12 ) return ( out ) # no net metrics stored ( only position and speed ) -- ignored if ( nc0 > 14 ) stop( fn) # should not be the case unless new data streams start if ( nc0 == 12) { # older files do not have depths, add a dummy column # more modern data have 13 columns 2000 + netmind$depth= NA } if ( nc0 == 14 & as.numeric(netmind[1,1])<130000) { # a few files have wing spread as well # more modern data have 13 columns 2000 + netmind[,13] = NULL } colnames(netmind) = c("ndate", "ntime", "lat.deg", "lat.min", "lat.orient", "lon.deg", "lon.min", "lon.orient", "speed", "primary", "secondary", "doorspread", "depth") numbers = c("lat.deg", "lat.min", "lon.deg", "lon.min", "speed", "primary", "secondary", "doorspread", "depth") netmind = factor2number(netmind, numbers) netmind$tinc = 1:nrow(netmind) # determine deepest point if possible, using a smoothed depth as variability due to incorrect pings are frequent deepest.point = NULL if (nc0 %in% c( 13, 14 ) ) { require(mgcv) z.gam = try ( gam( depth ~ s(tinc, k=5, bs="ts"), data=netmind, optimizer=c("outer", "nlm") ), silent=T ) if ( ! "try-error" %in% class( z.gam )) { netmind$depth.smoothed = predict( z.gam, newdata=netmind, newdata.guaranteed=T ) deepest.point = which.max ( netmind$depth.smoothed) } } if ( nc0==12 | length(deepest.point) == 0 ) deepest.point = round( nrow(netmind) / 2 ) netmind$lon = - (netmind$lon.deg + (netmind$lon.min / 60) ) netmind$lat = netmind$lat.deg + (netmind$lat.min / 60) netmind = netmind[, c("ndate", "ntime", "lat", "lon", "speed", "primary", "secondary", "doorspread", "depth")] netmind$ndate = paste(substring(netmind$ndate,1,2), substring(netmind$ndate,3,4), substring(netmind$ndate,5,6), sep="-") netmind$ntime = paste(substring(netmind$ntime,1,2), substring(netmind$ntime,3,4), substring(netmind$ntime,5,6), sep=":") netmind$chron = chron( dates.=netmind$ndate, times.=netmind$ntime, format=c(dates="y-m-d", times="h:m:s"), out.format=outfmt ) # netmind data stored in GMT/UTC from GPS; the offset varies depending upon season due to daylight savings time (3 or 4 hrs) # obtain time offset in hours time.offset = netmindDate( header=header, outvalue="timeoffset" ) # rounded to hours of fractional days netmind$chron = as.chron( as.numeric(netmind$chron) + time.offset, out.format = outfmt ) # now in local time (America/Halifax) netmind.timestamp = netmind$chron[deepest.point] yr = as.numeric( as.character( years( netmind.timestamp ) ) ) line.localtime = grep("Local Time:", header, ignore.case=T ) line.ship = grep("Ship:", header, ignore.case=T ) line.comments = grep("Comments:", header, ignore.case=T ) trip = gsub( "^.*Trip:", "", header[ line.ship ] ) trip = gsub( "Tow:.*$", "", trip ) trip = gsub( "[[:space:]]", "", trip ) if ( ! grepl( "^S[[:digit:]]{8}$", trip, ignore.case=T ) ) { # not a standard code dy = paste( "00", as.character( days(netmind.timestamp) ), sep="") dy = substring( dy, nchar(dy)-1, nchar(dy) ) mn = paste( "00", as.character( as.numeric(months(netmind.timestamp))), sep="") mn = substring( mn, nchar(mn)-1, nchar(mn) ) yr = paste( "00", as.character( years(netmind.timestamp) ), sep="") yr = substring( yr, nchar(yr)-3, nchar(yr) ) trip=paste("S", dy, mn, yr, sep="" ) } setno = gsub( "^.*Tow:", "", header[ line.ship ] ) setno = gsub( "[[:space:]]", "", setno ) setno = as.numeric( setno ) station = unlist(strsplit( header[[1]], "\\", fixed=TRUE )) station = station[ length(station) ] station = gsub( "[[:alpha:]]", "", station) station = gsub( "[[:punct:]]", "", station) station = as.numeric( station ) setxi = NULL if (is.null ( setxi ) ) { # check time first netmind.date.range = range( netmind$chron ) sets.in.date.range = which( set$chron >= netmind.date.range[1] & set$chron <= netmind.date.range[2] ) if ( length( sets.in.date.range ) == 1 ) setxi= sets.in.date.range } if (is.null ( setxi ) ) { # check time first netmind.date.range = range( netmind$chron ) sets.in.date.range = which( set$chron >= netmind.date.range[1] & set$chron <= netmind.date.range[2] & set$set==setno ) if ( length( sets.in.date.range ) == 1 ) setxi= sets.in.date.range } if (is.null ( setxi ) ) { # check time and station netmind.date.range = range( netmind$chron ) sets.in.date.range = which( set$chron >= netmind.date.range[1] & set$chron <= netmind.date.range[2] & set$trip==trip & set$station==station & set$set==setno ) if ( length( sets.in.date.range ) == 1 ) setxi= sets.in.date.range } if (is.null ( setxi ) ) { # check time and station netmind.date.range = range( netmind$chron ) sets.in.date.range = which( set$chron >= netmind.date.range[1] & set$chron <= netmind.date.range[2] & set$station==station ) if ( length( sets.in.date.range ) == 1 ) setxi= sets.in.date.range } if (is.null ( setxi ) ) { # check distances ni = trunc( nrow(netmind) / 2 ) dx = abs( set$lon - netmind$lon[ni] ) dy = abs( set$lat - netmind$lat[ni] ) sets.in.spatial.range = which( dx < 5/60 & dy < 5/60 & set$yr==yr ) # less than 5 minutes away if ( length( sets.in.spatial.range ) == 1 ) setxi= sets.in.spatial.range } if (is.null ( setxi ) ) { # check distances ni = trunc( nrow(netmind) / 2 ) dx = abs( set$lon - netmind$lon[ni] ) dy = abs( set$lat - netmind$lat[ni] ) sets.in.spatial.range = which( dx < 5/60 & dy < 5/60 & set$trip==trip ) # less than 5 minutes away if ( length( sets.in.spatial.range ) == 1 ) setxi= sets.in.spatial.range } if (is.null ( setxi ) ) { # check staion, distance, and time ni = trunc( nrow(netmind) / 2 ) dx = abs( set$lon - netmind$lon[ni] ) dy = abs( set$lat - netmind$lat[ni] ) sets.in.spatial.range = which( dx < 5/60 & dy < 5/60 & set$station==station & set$trip==trip & set$set==setno ) # less than 10 minutes away if ( length( sets.in.spatial.range ) == 1 ) setxi= sets.in.spatial.range } if (is.null ( setxi ) ) { # check staion, distance, and time ni = trunc( nrow(netmind) / 2 ) dx = abs( set$lon - netmind$lon[ni] ) dy = abs( set$lat - netmind$lat[ni] ) sets.in.spatial.range = which( dx < 5/60 & dy < 5/60 & set$station==station & set$trip==trip ) # less than 10 minutes away if ( length( sets.in.spatial.range ) == 1 ) setxi= sets.in.spatial.range } if (is.null ( setxi ) ) return (NULL) # no matching data -- break setx = set[ setxi , ] # matching trip/set/station netmind_uid = paste( "netmind", setx$trip, setx$set, setx$station, hours(netmind.timestamp), minutes(netmind.timestamp), f, sep=".") filename = basename(fn) line.ship = grep("Ship:", header, ignore.case=T ) line.comments = grep("Comments:", header, ignore.case=T ) comments = gsub("^Comments: ", "", header[ line.comments] ) netmind$netmind_uid = netmind_uid metadata = data.frame( filename, netmind_uid, yr, netmind.timestamp, setx$trip, setx$set, setx$station, setx$Zx, setx$chron,comments, stringsAsFactors =FALSE ) names( metadata ) = c("filename", "netmind_uid", "yr", "netmind_timestamp", "trip", "set", "station", "setZx", "setChron", "comments" ) metadata$yr = as.numeric( as.character( metadata$yr )) basedata = netmind[ which( !is.na( netmind$netmind_uid) ) ,] out = list( metadata=metadata, basedata=basedata ) return(out) }
#' Cabbage temp requirement for land evaluation #' #' A dataset containing the temp characteristics of the crop requirements for farming Cabbage. #' #' @details #' The following are the factors for evaluation: #' #' \itemize{ #' \item TgAv - Mean temperature of the growing cycle (°C) #' \item TmAv0 - Mean temp. at germination (°C) (1st month) #' \item TdDiff - Temp diffrence day/night ( C ) #' } #' @seealso #' \itemize{ #' \item Yen, B. T., Pheng, K. S., and Hoanh, C. T. (2006). \emph{LUSET: Land Use Suitability Evaluation Tool User's Guide}. International Rice Research Institute. #' } #' #' @docType data #' @keywords dataset #' @format A data frame with 3 rows and 8 columns #' @name CABBAGETemp NULL
/R/CABBAGETemp.R
permissive
alstat/ALUES
R
false
false
776
r
#' Cabbage temp requirement for land evaluation #' #' A dataset containing the temp characteristics of the crop requirements for farming Cabbage. #' #' @details #' The following are the factors for evaluation: #' #' \itemize{ #' \item TgAv - Mean temperature of the growing cycle (°C) #' \item TmAv0 - Mean temp. at germination (°C) (1st month) #' \item TdDiff - Temp diffrence day/night ( C ) #' } #' @seealso #' \itemize{ #' \item Yen, B. T., Pheng, K. S., and Hoanh, C. T. (2006). \emph{LUSET: Land Use Suitability Evaluation Tool User's Guide}. International Rice Research Institute. #' } #' #' @docType data #' @keywords dataset #' @format A data frame with 3 rows and 8 columns #' @name CABBAGETemp NULL
#===================== #AssignHhVehicleDvmt.R #===================== #This module splits household DVMT among household vehicles as a function of #assigned household vehicle DVMT splits and household DVMT. The module is #callable. #================================= #Packages used in code development #================================= #Uncomment following lines during code development. Recomment when done. # library(visioneval) #============================================= #SECTION 1: ESTIMATE AND SAVE MODEL PARAMETERS #============================================= #This module has no estimated model. #================================================ #SECTION 2: DEFINE THE MODULE DATA SPECIFICATIONS #================================================ #Define the data specifications #------------------------------ AssignHhVehicleDvmtSpecifications <- list( #Level of geography module is applied at RunBy = "Azone", #Specify new tables to be created by Inp if any #Specify new tables to be created by Set if any #Specify input data #Specify data to be loaded from data store Get = items( item( NAME = "HhId", TABLE = "Household", GROUP = "Year", TYPE = "character", UNITS = "ID", PROHIBIT = "", ISELEMENTOF = "" ), item( NAME = "Dvmt", TABLE = "Household", GROUP = "Year", TYPE = "compound", UNITS = "MI/DAY", PROHIBIT = c("NA", "< 0"), ISELEMENTOF = "" ), item( NAME = "HhId", TABLE = "Vehicle", GROUP = "Year", TYPE = "character", UNITS = "ID", PROHIBIT = "NA", ISELEMENTOF = "" ), item( NAME = "DvmtProp", TABLE = "Vehicle", GROUP = "Year", TYPE = "double", UNITS = "proportion", PROHIBIT = c("NA", "< 0", "> 1"), ISELEMENTOF = "" ) ), #Specify data to saved in the data store Set = items( item( NAME = "Dvmt", TABLE = "Vehicle", GROUP = "Year", TYPE = "compound", UNITS = "MI/DAY", NAVALUE = -1, PROHIBIT = c("NA", "< 0"), ISELEMENTOF = "", SIZE = 0, DESCRIPTION = "Average vehicle DVMT" ) ), #Module is callable Call = TRUE ) #Save the data specifications list #--------------------------------- #' Specifications list for AssignHhVehicleDvmt module #' #' A list containing specifications for the AssignHhVehicleDvmt module. #' #' @format A list containing 3 components: #' \describe{ #' \item{RunBy}{the level of geography that the module is run at} #' \item{Get}{module inputs to be read from the datastore} #' \item{Set}{module outputs to be written to the datastore} #' } #' @source AssignHhVehicleDvmt.R script. "AssignHhVehicleDvmtSpecifications" devtools::use_data(AssignHhVehicleDvmtSpecifications, overwrite = TRUE) #======================================================= #SECTION 3: DEFINE FUNCTIONS THAT IMPLEMENT THE SUBMODEL #======================================================= #This function that assigns household DVMT to vehicles. #Main module function that assigns household DVMT to vehicles #------------------------------------------------------------ #' Assign household DVMT to household vehicles. #' #' \code{AssignVehicleDvmt} assigns DVMT to each household vehicle. #' #' This function assigns household DVMT to each vehicle as a function of #' household DVMT and precalculated proportional allocation of DVMT to vehicles. #' #' @param L A list containing the components listed in the Get specifications #' for the module. #' @return A list containing the components specified in the Set #' specifications for the module. #' @import visioneval #' @export AssignHhVehicleDvmt <- function(L) { #Calculate DVMT by vehicle VehDvmt_ <- L$Year$Vehicle$DvmtProp * L$Year$Household$Dvmt[match(L$Year$Vehicle$HhId, L$Year$Household$HhId)] #Return the results Out_ls <- initDataList() Out_ls$Year$Vehicle <- list(Dvmt = VehDvmt_) #Return the outputs list Out_ls } #================================ #Code to aid development and test #================================ #Test code to check specifications, loading inputs, and whether datastore #contains data needed to run module. Return input list (L) to use for developing #module functions #------------------------------------------------------------------------------- # TestDat_ <- testModule( # ModuleName = "AssignVehicleDvmt", # LoadDatastore = TRUE, # SaveDatastore = TRUE, # DoRun = FALSE # ) # L <- TestDat_$L # R <- AssignVehicleDvmt(L) #Test code to check everything including running the module and checking whether #the outputs are consistent with the 'Set' specifications #------------------------------------------------------------------------------- # TestDat_ <- testModule( # ModuleName = "AssignVehicleAge", # LoadDatastore = TRUE, # SaveDatastore = TRUE, # DoRun = TRUE # )
/sources/modules/VEEnergyAndEmissions/R/AssignHhVehicleDvmt.R
permissive
n8mauer/VisionEval
R
false
false
4,930
r
#===================== #AssignHhVehicleDvmt.R #===================== #This module splits household DVMT among household vehicles as a function of #assigned household vehicle DVMT splits and household DVMT. The module is #callable. #================================= #Packages used in code development #================================= #Uncomment following lines during code development. Recomment when done. # library(visioneval) #============================================= #SECTION 1: ESTIMATE AND SAVE MODEL PARAMETERS #============================================= #This module has no estimated model. #================================================ #SECTION 2: DEFINE THE MODULE DATA SPECIFICATIONS #================================================ #Define the data specifications #------------------------------ AssignHhVehicleDvmtSpecifications <- list( #Level of geography module is applied at RunBy = "Azone", #Specify new tables to be created by Inp if any #Specify new tables to be created by Set if any #Specify input data #Specify data to be loaded from data store Get = items( item( NAME = "HhId", TABLE = "Household", GROUP = "Year", TYPE = "character", UNITS = "ID", PROHIBIT = "", ISELEMENTOF = "" ), item( NAME = "Dvmt", TABLE = "Household", GROUP = "Year", TYPE = "compound", UNITS = "MI/DAY", PROHIBIT = c("NA", "< 0"), ISELEMENTOF = "" ), item( NAME = "HhId", TABLE = "Vehicle", GROUP = "Year", TYPE = "character", UNITS = "ID", PROHIBIT = "NA", ISELEMENTOF = "" ), item( NAME = "DvmtProp", TABLE = "Vehicle", GROUP = "Year", TYPE = "double", UNITS = "proportion", PROHIBIT = c("NA", "< 0", "> 1"), ISELEMENTOF = "" ) ), #Specify data to saved in the data store Set = items( item( NAME = "Dvmt", TABLE = "Vehicle", GROUP = "Year", TYPE = "compound", UNITS = "MI/DAY", NAVALUE = -1, PROHIBIT = c("NA", "< 0"), ISELEMENTOF = "", SIZE = 0, DESCRIPTION = "Average vehicle DVMT" ) ), #Module is callable Call = TRUE ) #Save the data specifications list #--------------------------------- #' Specifications list for AssignHhVehicleDvmt module #' #' A list containing specifications for the AssignHhVehicleDvmt module. #' #' @format A list containing 3 components: #' \describe{ #' \item{RunBy}{the level of geography that the module is run at} #' \item{Get}{module inputs to be read from the datastore} #' \item{Set}{module outputs to be written to the datastore} #' } #' @source AssignHhVehicleDvmt.R script. "AssignHhVehicleDvmtSpecifications" devtools::use_data(AssignHhVehicleDvmtSpecifications, overwrite = TRUE) #======================================================= #SECTION 3: DEFINE FUNCTIONS THAT IMPLEMENT THE SUBMODEL #======================================================= #This function that assigns household DVMT to vehicles. #Main module function that assigns household DVMT to vehicles #------------------------------------------------------------ #' Assign household DVMT to household vehicles. #' #' \code{AssignVehicleDvmt} assigns DVMT to each household vehicle. #' #' This function assigns household DVMT to each vehicle as a function of #' household DVMT and precalculated proportional allocation of DVMT to vehicles. #' #' @param L A list containing the components listed in the Get specifications #' for the module. #' @return A list containing the components specified in the Set #' specifications for the module. #' @import visioneval #' @export AssignHhVehicleDvmt <- function(L) { #Calculate DVMT by vehicle VehDvmt_ <- L$Year$Vehicle$DvmtProp * L$Year$Household$Dvmt[match(L$Year$Vehicle$HhId, L$Year$Household$HhId)] #Return the results Out_ls <- initDataList() Out_ls$Year$Vehicle <- list(Dvmt = VehDvmt_) #Return the outputs list Out_ls } #================================ #Code to aid development and test #================================ #Test code to check specifications, loading inputs, and whether datastore #contains data needed to run module. Return input list (L) to use for developing #module functions #------------------------------------------------------------------------------- # TestDat_ <- testModule( # ModuleName = "AssignVehicleDvmt", # LoadDatastore = TRUE, # SaveDatastore = TRUE, # DoRun = FALSE # ) # L <- TestDat_$L # R <- AssignVehicleDvmt(L) #Test code to check everything including running the module and checking whether #the outputs are consistent with the 'Set' specifications #------------------------------------------------------------------------------- # TestDat_ <- testModule( # ModuleName = "AssignVehicleAge", # LoadDatastore = TRUE, # SaveDatastore = TRUE, # DoRun = TRUE # )
library(lpSolve) # Using lpSolve to solve the following: # You need to buy some filing cabinets. You know that Cabinet X costs $10 per unit, # requires six square feet of floor space, and holds eight cubic feet of files. # Cabinet Y costs $20 per unit, requires eight square feet of floor space, and holds # twelve cubic feet of files. You have been given $140 for this purchase, though you # don't have to spend that much. The office has room for no more than 72 square feet # of cabinets. How many of which model should you buy, in order to maximize storage volume? # Set up problem: # X1 - Number of Cabinet X purchased # X2 - Number of Cabinet Y purchased # maximize: 8 x1 + 12 x2 subject to: # cost: 10 x1 + 20 x2 <= 140 # floor space: 6 x1 + 8 x2 <= 72 # at least 0 x1: 1 x1 + 0 x2 >= 0 # at least 0 x2: 0 x1 + 1 x2 >= 0 f.obj <- c(8, 12) f.con <- matrix (c(10, 20, 6, 8, 1, 0, 0, 1), nrow = 4, byrow=TRUE) f.dir <- c("<=", "<=", ">=", ">=") f.rhs <- c(140, 72, 0, 0) # What is the maximum storage volume? 100 square feet of storage volume lp ("max", f.obj, f.con, f.dir, f.rhs) # How many of each type of cabinet should you buy? 8 of cabinet X and 3 of cabinet Y lp ("max", f.obj, f.con, f.dir, f.rhs)$solution
/Linear-Programming/ex-lpSolve-Cabinets.r
no_license
colson1111/Other-R-Code
R
false
false
1,261
r
library(lpSolve) # Using lpSolve to solve the following: # You need to buy some filing cabinets. You know that Cabinet X costs $10 per unit, # requires six square feet of floor space, and holds eight cubic feet of files. # Cabinet Y costs $20 per unit, requires eight square feet of floor space, and holds # twelve cubic feet of files. You have been given $140 for this purchase, though you # don't have to spend that much. The office has room for no more than 72 square feet # of cabinets. How many of which model should you buy, in order to maximize storage volume? # Set up problem: # X1 - Number of Cabinet X purchased # X2 - Number of Cabinet Y purchased # maximize: 8 x1 + 12 x2 subject to: # cost: 10 x1 + 20 x2 <= 140 # floor space: 6 x1 + 8 x2 <= 72 # at least 0 x1: 1 x1 + 0 x2 >= 0 # at least 0 x2: 0 x1 + 1 x2 >= 0 f.obj <- c(8, 12) f.con <- matrix (c(10, 20, 6, 8, 1, 0, 0, 1), nrow = 4, byrow=TRUE) f.dir <- c("<=", "<=", ">=", ">=") f.rhs <- c(140, 72, 0, 0) # What is the maximum storage volume? 100 square feet of storage volume lp ("max", f.obj, f.con, f.dir, f.rhs) # How many of each type of cabinet should you buy? 8 of cabinet X and 3 of cabinet Y lp ("max", f.obj, f.con, f.dir, f.rhs)$solution
% Generated by roxygen2 (4.1.1): do not edit by hand % Please edit documentation in R/TrendLine.R, R/TrendLine_setters.R \docType{methods} \name{initialize,TrendLine-method} \alias{initialize,TrendLine-method} \alias{setFinalValue} \alias{setFinalValue,TrendLine,numeric-method} \alias{setFinalXValue} \alias{setFinalXValue,TrendLine,numeric-method} \alias{setInitialValue} \alias{setInitialValue,TrendLine,numeric-method} \alias{setInitialXValue} \alias{setInitialXValue,TrendLine,numeric-method} \alias{setValueAxis,TrendLine,ValueAxisOrCharacterOrMissing-method} \alias{setValueAxisX} \alias{setValueAxisX,TrendLine,ValueAxisOrCharacterOrMissing-method} \alias{trendLine} \title{Initialize a TrendLine} \usage{ \S4method{initialize}{TrendLine}(.Object, initialValue, initialXValue, finalValue, finalXValue, valueAxis, valueAxisX, ...) trendLine(.Object, initialValue, initialXValue, finalValue, finalXValue, valueAxis, valueAxisX, ...) setInitialValue(.Object, initialValue) \S4method{setInitialValue}{TrendLine,numeric}(.Object, initialValue) setInitialXValue(.Object, initialXValue) \S4method{setInitialXValue}{TrendLine,numeric}(.Object, initialXValue) setFinalValue(.Object, finalValue) \S4method{setFinalValue}{TrendLine,numeric}(.Object, finalValue) setFinalXValue(.Object, finalXValue) \S4method{setFinalXValue}{TrendLine,numeric}(.Object, finalXValue) \S4method{setValueAxis}{TrendLine,ValueAxisOrCharacterOrMissing}(.Object, valueAxis = NULL, ...) setValueAxisX(.Object, valueAxisX = NULL, ...) \S4method{setValueAxisX}{TrendLine,ValueAxisOrCharacterOrMissing}(.Object, valueAxisX = NULL, ...) } \arguments{ \item{.Object}{\linkS4class{TrendLine}.} \item{initialValue}{\code{numeric}. Value from which trend line should start.} \item{initialXValue}{\code{numeric}. Used by XY chart only. X value from which trend line should start.} \item{finalValue}{\code{numeric}. Value at which trend line should end.} \item{finalXValue}{\code{numeric}. Used by XY chart only. X value at which trend line should end.} \item{valueAxis}{\linkS4class{ValueAxis}. Value axis of the trend line. Will use first value axis of the chart if not set any. You can use a reference to the value axis object or id of value axis.} \item{valueAxisX}{\linkS4class{ValueAxis}. Used by XY chart only. X axis of trend line. Will use first X axis of the chart if not set any. You can use a reference to the value axis object or id of value axis.} \item{...}{Other properties.} } \description{ Initialize a TrendLine } \section{Functions}{ \itemize{ \item \code{trendLine}: }} \examples{ new("TrendLine", initialValue = 1, finalValue = 11) # Other example valueAxis <- valueAxis(title = "Hello !", axisTitleOffset = 12) new("TrendLine", valueAxis = valueAxis) trendLine(initialValue = 1, finalValue = 11) setInitialValue(.Object = trendLine(), initialValue = 16) setInitialXValue(.Object = trendLine(), initialXValue = 16) setFinalValue(.Object = trendLine(), finalValue = 16) setFinalXValue(.Object = trendLine(), finalXValue = 16) setValueAxis(.Object = trendLine(), id = "valueAxis-1", title = "Hello !", axisTitleOffset = 12) # equival to: valueAxis_obj <- valueAxis(id = "valueAxis-1", title = "Hello !", axisTitleOffset = 12) trendLine(valueAxis = valueAxis_obj) # or... trendLine(valueAxis = "valueAxis-1") # valid if and only if 'valueAxis_obj' has already been added to the chart # --- setValueAxisX(.Object = trendLine(), id = "valueAxisX-1", title = "Hello !", axisTitleOffset = 12) # equival to: valueAxisX_obj <- valueAxis(id = "valueAxisX-1", title = "Hello !", axisTitleOffset = 12) trendLine(valueAxisX = valueAxisX_obj) # or... trendLine(valueAxisX = "valueAxisX-1") # valid if and only if 'valueAxisX_obj' has already been added to the chart # --- }
/man/initialize-TrendLine.Rd
no_license
RomainBenetiere/rAmCharts
R
false
false
3,802
rd
% Generated by roxygen2 (4.1.1): do not edit by hand % Please edit documentation in R/TrendLine.R, R/TrendLine_setters.R \docType{methods} \name{initialize,TrendLine-method} \alias{initialize,TrendLine-method} \alias{setFinalValue} \alias{setFinalValue,TrendLine,numeric-method} \alias{setFinalXValue} \alias{setFinalXValue,TrendLine,numeric-method} \alias{setInitialValue} \alias{setInitialValue,TrendLine,numeric-method} \alias{setInitialXValue} \alias{setInitialXValue,TrendLine,numeric-method} \alias{setValueAxis,TrendLine,ValueAxisOrCharacterOrMissing-method} \alias{setValueAxisX} \alias{setValueAxisX,TrendLine,ValueAxisOrCharacterOrMissing-method} \alias{trendLine} \title{Initialize a TrendLine} \usage{ \S4method{initialize}{TrendLine}(.Object, initialValue, initialXValue, finalValue, finalXValue, valueAxis, valueAxisX, ...) trendLine(.Object, initialValue, initialXValue, finalValue, finalXValue, valueAxis, valueAxisX, ...) setInitialValue(.Object, initialValue) \S4method{setInitialValue}{TrendLine,numeric}(.Object, initialValue) setInitialXValue(.Object, initialXValue) \S4method{setInitialXValue}{TrendLine,numeric}(.Object, initialXValue) setFinalValue(.Object, finalValue) \S4method{setFinalValue}{TrendLine,numeric}(.Object, finalValue) setFinalXValue(.Object, finalXValue) \S4method{setFinalXValue}{TrendLine,numeric}(.Object, finalXValue) \S4method{setValueAxis}{TrendLine,ValueAxisOrCharacterOrMissing}(.Object, valueAxis = NULL, ...) setValueAxisX(.Object, valueAxisX = NULL, ...) \S4method{setValueAxisX}{TrendLine,ValueAxisOrCharacterOrMissing}(.Object, valueAxisX = NULL, ...) } \arguments{ \item{.Object}{\linkS4class{TrendLine}.} \item{initialValue}{\code{numeric}. Value from which trend line should start.} \item{initialXValue}{\code{numeric}. Used by XY chart only. X value from which trend line should start.} \item{finalValue}{\code{numeric}. Value at which trend line should end.} \item{finalXValue}{\code{numeric}. Used by XY chart only. X value at which trend line should end.} \item{valueAxis}{\linkS4class{ValueAxis}. Value axis of the trend line. Will use first value axis of the chart if not set any. You can use a reference to the value axis object or id of value axis.} \item{valueAxisX}{\linkS4class{ValueAxis}. Used by XY chart only. X axis of trend line. Will use first X axis of the chart if not set any. You can use a reference to the value axis object or id of value axis.} \item{...}{Other properties.} } \description{ Initialize a TrendLine } \section{Functions}{ \itemize{ \item \code{trendLine}: }} \examples{ new("TrendLine", initialValue = 1, finalValue = 11) # Other example valueAxis <- valueAxis(title = "Hello !", axisTitleOffset = 12) new("TrendLine", valueAxis = valueAxis) trendLine(initialValue = 1, finalValue = 11) setInitialValue(.Object = trendLine(), initialValue = 16) setInitialXValue(.Object = trendLine(), initialXValue = 16) setFinalValue(.Object = trendLine(), finalValue = 16) setFinalXValue(.Object = trendLine(), finalXValue = 16) setValueAxis(.Object = trendLine(), id = "valueAxis-1", title = "Hello !", axisTitleOffset = 12) # equival to: valueAxis_obj <- valueAxis(id = "valueAxis-1", title = "Hello !", axisTitleOffset = 12) trendLine(valueAxis = valueAxis_obj) # or... trendLine(valueAxis = "valueAxis-1") # valid if and only if 'valueAxis_obj' has already been added to the chart # --- setValueAxisX(.Object = trendLine(), id = "valueAxisX-1", title = "Hello !", axisTitleOffset = 12) # equival to: valueAxisX_obj <- valueAxis(id = "valueAxisX-1", title = "Hello !", axisTitleOffset = 12) trendLine(valueAxisX = valueAxisX_obj) # or... trendLine(valueAxisX = "valueAxisX-1") # valid if and only if 'valueAxisX_obj' has already been added to the chart # --- }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/313.Sum_Leng_ADJ_All_Graph.R \name{PlotlengthALT} \alias{PlotlengthALT} \title{Plots the summary length using adjusted Logit Wald method} \usage{ PlotlengthALT(n, alp, h, a, b) } \arguments{ \item{n}{- Number of trials} \item{alp}{- Alpha value (significance level required)} \item{h}{- Adding factor} \item{a}{- Beta parameters for hypo "p"} \item{b}{- Beta parameters for hypo "p"} } \description{ Plots the summary length using adjusted Logit Wald method } \details{ The plots of the summary length of adjusted Wald method } \examples{ n= 10; alp=0.05; h=2;a=1;b=1; PlotlengthALT(n,alp,h,a,b) } \seealso{ Other Expected length of adjusted methods: \code{\link{PlotexplAAS}()}, \code{\link{PlotexplAAll}()}, \code{\link{PlotexplALR}()}, \code{\link{PlotexplALT}()}, \code{\link{PlotexplASC}()}, \code{\link{PlotexplATW}()}, \code{\link{PlotexplAWD}()}, \code{\link{PlotlengthAAS}()}, \code{\link{PlotlengthAAll}()}, \code{\link{PlotlengthALR}()}, \code{\link{PlotlengthASC}()}, \code{\link{PlotlengthATW}()}, \code{\link{PlotlengthAWD}()}, \code{\link{lengthAAS}()}, \code{\link{lengthAAll}()}, \code{\link{lengthALR}()}, \code{\link{lengthALT}()}, \code{\link{lengthASC}()}, \code{\link{lengthATW}()}, \code{\link{lengthAWD}()} } \concept{Expected length of adjusted methods}
/man/PlotlengthALT.Rd
no_license
RajeswaranV/proportion
R
false
true
1,365
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/313.Sum_Leng_ADJ_All_Graph.R \name{PlotlengthALT} \alias{PlotlengthALT} \title{Plots the summary length using adjusted Logit Wald method} \usage{ PlotlengthALT(n, alp, h, a, b) } \arguments{ \item{n}{- Number of trials} \item{alp}{- Alpha value (significance level required)} \item{h}{- Adding factor} \item{a}{- Beta parameters for hypo "p"} \item{b}{- Beta parameters for hypo "p"} } \description{ Plots the summary length using adjusted Logit Wald method } \details{ The plots of the summary length of adjusted Wald method } \examples{ n= 10; alp=0.05; h=2;a=1;b=1; PlotlengthALT(n,alp,h,a,b) } \seealso{ Other Expected length of adjusted methods: \code{\link{PlotexplAAS}()}, \code{\link{PlotexplAAll}()}, \code{\link{PlotexplALR}()}, \code{\link{PlotexplALT}()}, \code{\link{PlotexplASC}()}, \code{\link{PlotexplATW}()}, \code{\link{PlotexplAWD}()}, \code{\link{PlotlengthAAS}()}, \code{\link{PlotlengthAAll}()}, \code{\link{PlotlengthALR}()}, \code{\link{PlotlengthASC}()}, \code{\link{PlotlengthATW}()}, \code{\link{PlotlengthAWD}()}, \code{\link{lengthAAS}()}, \code{\link{lengthAAll}()}, \code{\link{lengthALR}()}, \code{\link{lengthALT}()}, \code{\link{lengthASC}()}, \code{\link{lengthATW}()}, \code{\link{lengthAWD}()} } \concept{Expected length of adjusted methods}
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/data_documentation.R \docType{data} \name{corp_data1} \alias{corp_data1} \title{corp_data1} \format{ An object of class \code{data.table} (inherits from \code{data.frame}) with 10 rows and 6 columns. } \usage{ corp_data1 } \description{ Some made up data on the top 10 US companies in the Fortune 500. Mock-matched to corp_data2 in examples/match_template.R } \keyword{datasets}
/man/corp_data1.Rd
permissive
seunglee98/fedmatch
R
false
true
457
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/data_documentation.R \docType{data} \name{corp_data1} \alias{corp_data1} \title{corp_data1} \format{ An object of class \code{data.table} (inherits from \code{data.frame}) with 10 rows and 6 columns. } \usage{ corp_data1 } \description{ Some made up data on the top 10 US companies in the Fortune 500. Mock-matched to corp_data2 in examples/match_template.R } \keyword{datasets}
############################################################ # # # Use of transition_state # # # ############################################################ # Load packages library(tidyverse) library(gganimate) # Define palette pal <- c('#E69F00', '#56B4E9', '#009E73') # Import data data <- read_csv('data/IHME-GBD-cleaned.csv') # Process data global_female <- data %>% # Select females filter(sex == 'Female') %>% # Select global filter(location == 'Global') %>% # Trim years (for shadow_mark effect) filter(year %in% c(1990, 2000, 2010, 2017)) %>% # Nest by year group_by(year) %>% nest() %>% # Rank causes within each year and then select the top ten / year mutate(rank = map(.x = data, ~ .x %>% arrange(desc(val)) %>% mutate(rank = row_number()) %>% filter(rank <= 10))) %>% # Unnest select(-data) %>% unnest() %>% # Add padding to text labels mutate(cause = str_pad(cause, width = 28, side = 'both')) p1 <- ggplot(data = global_female) + aes(x = year, y = rev(rank), colour = colour, fill = colour) + geom_label(aes(label = cause), size = 6, alpha = 0.2, label.padding = unit(1.2, 'lines'), label.size = 0.5) + scale_fill_manual(values = pal) + scale_colour_manual(values = pal) + scale_x_continuous(breaks = c(1990, 2000, 2010, 2017), limits = c(1988, 2019)) + scale_y_continuous(breaks = 1:10, labels = 10:1) + labs(subtitle = 'Rank order of leading causes of female global DALY per 100k population', x = 'Year', y = 'Rank') + theme_minimal(base_size = 20) + theme(legend.position = 'none', panel.grid = element_blank()) p1_tstate <- p1 + transition_states(states = year, wrap = FALSE) + ease_aes('linear') + shadow_mark() + exit_fade() animate(p1_tstate, res = 72, width = 1300, height = 700, fps = 10, # default nframe = 100, # default end_pause = 30, start_pause = 10)
/transition_state.R
no_license
kamermanpr/satRday2019
R
false
false
2,465
r
############################################################ # # # Use of transition_state # # # ############################################################ # Load packages library(tidyverse) library(gganimate) # Define palette pal <- c('#E69F00', '#56B4E9', '#009E73') # Import data data <- read_csv('data/IHME-GBD-cleaned.csv') # Process data global_female <- data %>% # Select females filter(sex == 'Female') %>% # Select global filter(location == 'Global') %>% # Trim years (for shadow_mark effect) filter(year %in% c(1990, 2000, 2010, 2017)) %>% # Nest by year group_by(year) %>% nest() %>% # Rank causes within each year and then select the top ten / year mutate(rank = map(.x = data, ~ .x %>% arrange(desc(val)) %>% mutate(rank = row_number()) %>% filter(rank <= 10))) %>% # Unnest select(-data) %>% unnest() %>% # Add padding to text labels mutate(cause = str_pad(cause, width = 28, side = 'both')) p1 <- ggplot(data = global_female) + aes(x = year, y = rev(rank), colour = colour, fill = colour) + geom_label(aes(label = cause), size = 6, alpha = 0.2, label.padding = unit(1.2, 'lines'), label.size = 0.5) + scale_fill_manual(values = pal) + scale_colour_manual(values = pal) + scale_x_continuous(breaks = c(1990, 2000, 2010, 2017), limits = c(1988, 2019)) + scale_y_continuous(breaks = 1:10, labels = 10:1) + labs(subtitle = 'Rank order of leading causes of female global DALY per 100k population', x = 'Year', y = 'Rank') + theme_minimal(base_size = 20) + theme(legend.position = 'none', panel.grid = element_blank()) p1_tstate <- p1 + transition_states(states = year, wrap = FALSE) + ease_aes('linear') + shadow_mark() + exit_fade() animate(p1_tstate, res = 72, width = 1300, height = 700, fps = 10, # default nframe = 100, # default end_pause = 30, start_pause = 10)
# Find scenarios in a directory, extract their types, names and years and put them in an (arguably) clearer directory format library(fs) library(stringr) library(tibble) library(tidyr) library(dplyr) source = "data/sensitive/May-2020/" destination = "data/sensitive/GCVT_Scenario_Pack/" scenario_names = list.files(source) %>% str_match("^(Link|Matrix)_(?:Y20\\d{2}_)?(.*?)_(\\d{4}).csv$") %>% # The Y20xx bit is because of some badly named files... as.tibble() %>% drop_na() %>% select(type=2, name=3, year=4, filename=1) for (rown in 1:nrow(scenario_names)) { row = scenario_names[rown,] todir = path(destination, "scenarios", row$name, ifelse(row$type == "Link", "links", "od_matrices")) dir_create(todir, recursive = T) file_copy(path(source, row$filename), paste(todir, "/", row$year, ".csv", sep="")) }
/src/data-preparation/move_scenario_csvs.R
permissive
IntegratedTransportPlanning/gcvt
R
false
false
830
r
# Find scenarios in a directory, extract their types, names and years and put them in an (arguably) clearer directory format library(fs) library(stringr) library(tibble) library(tidyr) library(dplyr) source = "data/sensitive/May-2020/" destination = "data/sensitive/GCVT_Scenario_Pack/" scenario_names = list.files(source) %>% str_match("^(Link|Matrix)_(?:Y20\\d{2}_)?(.*?)_(\\d{4}).csv$") %>% # The Y20xx bit is because of some badly named files... as.tibble() %>% drop_na() %>% select(type=2, name=3, year=4, filename=1) for (rown in 1:nrow(scenario_names)) { row = scenario_names[rown,] todir = path(destination, "scenarios", row$name, ifelse(row$type == "Link", "links", "od_matrices")) dir_create(todir, recursive = T) file_copy(path(source, row$filename), paste(todir, "/", row$year, ".csv", sep="")) }
enrichedPairs <- function(data, flank=5, exclude=0, assay.in=1, assay.out=NULL) # For each bin pair in 'data', this function counts the number of read pairs in # the neighbouring bin pairs in 'data', with four defined neighbourhood types. # # written by Aaron Lun # created 23 April 2014 # last modified 2 March 2017 { flank <- as.integer(flank) exclude <- as.integer(exclude) if (flank <= 0L) { stop("flank width must be a positive integer") } if (exclude < 0L) { stop("exclude width must be a positive integer") } if (flank <= exclude) { stop("exclude width must be less than the flank width") } .check_StrictGI(data) rdata <- .splitByChr(regions(data)) last.id <- rdata$last first.id <- rdata$first # Running through each pair of chromosomes. np <- nrow(data) nl <- ncol(data) all.chrs <- as.character(seqnames(regions(data))) aid <- anchors(data, type="first", id=TRUE) by.chr <- split(seq_len(np), all.chrs[aid]) tid <- anchors(data, type="second", id=TRUE) # Setting up the output for each mode. modes <- .neighbor_locales(assay.out) count.output <- lapply(modes, FUN=function(x) matrix(0L, nrow=np, ncol=nl)) n.output <- lapply(modes, FUN=function(x) numeric(np)) for (anchor in names(by.chr)) { next.chr <- by.chr[[anchor]] next.chr <- split(next.chr, all.chrs[tid[next.chr]]) a.len <- last.id[[anchor]] - first.id[[anchor]] + 1L for (target in names(next.chr)) { current.pair <- next.chr[[target]] all.a <- aid[current.pair] - first.id[[anchor]] all.t <- tid[current.pair] - first.id[[target]] t.len <- last.id[[target]] - first.id[[target]] + 1L o <- order(all.a, all.t) all.a <- all.a[o] all.t <- all.t[o] all.c <- assay(data, assay.in)[current.pair,,drop=FALSE][o,,drop=FALSE] # Getting counts for each library and type of neighbouring region. for (lib in seq_len(nl)) { collected <- .Call(cxx_quadrant_bg, all.a, all.t, all.c[,lib], flank, exclude, a.len, t.len, anchor==target) if (is.character(collected)) { stop(collected) } for (m in seq_along(modes)) { cur.counts <- collected[[1]][[m]] cur.counts[o] <- cur.counts count.output[[m]][current.pair,lib] <- cur.counts } if (lib==1L) { for (m in seq_along(modes)) { cur.n <- collected[[2]][[m]] cur.n[o] <- cur.n n.output[[m]][current.pair] <- cur.n } } } } } # Adding the counts to the data and returning the object. n.names <- .neighbor_numbers(modes) for (m in seq_along(modes)) { assay(data, modes[m]) <- count.output[[m]] mcols(data)[[n.names[m]]] <- n.output[[m]] } return(data) } .neighbor_locales <- function(x=NULL) { if (is.null(x)) { modes <- c("quadrant", "vertical", "horizontal", "surrounding") } else { modes <- x if (!is.character(modes) || length(modes)!=4L || anyDuplicated(modes)) { stop("'assay.out' must be a character vector with 4 unique names") } } return(modes) } .neighbor_numbers <- function(x=NULL) { if (is.null(x)) x <- .neighbor_locales() paste0("N.", x) }
/R/enrichedPairs.R
no_license
hcoughlan/archive-diffHic
R
false
false
3,431
r
enrichedPairs <- function(data, flank=5, exclude=0, assay.in=1, assay.out=NULL) # For each bin pair in 'data', this function counts the number of read pairs in # the neighbouring bin pairs in 'data', with four defined neighbourhood types. # # written by Aaron Lun # created 23 April 2014 # last modified 2 March 2017 { flank <- as.integer(flank) exclude <- as.integer(exclude) if (flank <= 0L) { stop("flank width must be a positive integer") } if (exclude < 0L) { stop("exclude width must be a positive integer") } if (flank <= exclude) { stop("exclude width must be less than the flank width") } .check_StrictGI(data) rdata <- .splitByChr(regions(data)) last.id <- rdata$last first.id <- rdata$first # Running through each pair of chromosomes. np <- nrow(data) nl <- ncol(data) all.chrs <- as.character(seqnames(regions(data))) aid <- anchors(data, type="first", id=TRUE) by.chr <- split(seq_len(np), all.chrs[aid]) tid <- anchors(data, type="second", id=TRUE) # Setting up the output for each mode. modes <- .neighbor_locales(assay.out) count.output <- lapply(modes, FUN=function(x) matrix(0L, nrow=np, ncol=nl)) n.output <- lapply(modes, FUN=function(x) numeric(np)) for (anchor in names(by.chr)) { next.chr <- by.chr[[anchor]] next.chr <- split(next.chr, all.chrs[tid[next.chr]]) a.len <- last.id[[anchor]] - first.id[[anchor]] + 1L for (target in names(next.chr)) { current.pair <- next.chr[[target]] all.a <- aid[current.pair] - first.id[[anchor]] all.t <- tid[current.pair] - first.id[[target]] t.len <- last.id[[target]] - first.id[[target]] + 1L o <- order(all.a, all.t) all.a <- all.a[o] all.t <- all.t[o] all.c <- assay(data, assay.in)[current.pair,,drop=FALSE][o,,drop=FALSE] # Getting counts for each library and type of neighbouring region. for (lib in seq_len(nl)) { collected <- .Call(cxx_quadrant_bg, all.a, all.t, all.c[,lib], flank, exclude, a.len, t.len, anchor==target) if (is.character(collected)) { stop(collected) } for (m in seq_along(modes)) { cur.counts <- collected[[1]][[m]] cur.counts[o] <- cur.counts count.output[[m]][current.pair,lib] <- cur.counts } if (lib==1L) { for (m in seq_along(modes)) { cur.n <- collected[[2]][[m]] cur.n[o] <- cur.n n.output[[m]][current.pair] <- cur.n } } } } } # Adding the counts to the data and returning the object. n.names <- .neighbor_numbers(modes) for (m in seq_along(modes)) { assay(data, modes[m]) <- count.output[[m]] mcols(data)[[n.names[m]]] <- n.output[[m]] } return(data) } .neighbor_locales <- function(x=NULL) { if (is.null(x)) { modes <- c("quadrant", "vertical", "horizontal", "surrounding") } else { modes <- x if (!is.character(modes) || length(modes)!=4L || anyDuplicated(modes)) { stop("'assay.out' must be a character vector with 4 unique names") } } return(modes) } .neighbor_numbers <- function(x=NULL) { if (is.null(x)) x <- .neighbor_locales() paste0("N.", x) }
\name{get_iheatmap} \alias{get_iheatmap} %- Also NEED an '\alias' for EACH other topic documented here. \title{ get the interactive heatmap } \description{ %% ~~ show the interactive heatmap ~~ } \usage{ get_iheatmap(iheatmap_object) } %- maybe also 'usage' for other objects documented here. \arguments{ iheatmap_object } \details{ %% ~~ If necessary, more details than the description above ~~ } \value{ %% ~Describe the value returned %% If it is a LIST, use %% \item{comp1 }{Description of 'comp1'} %% \item{comp2 }{Description of 'comp2'} %% ... } \references{ %% ~put references to the literature/web site here ~ } \author{ %% ~~who you are~~ } \note{ %% ~~further notes~~ } %% ~Make other sections like Warning with \section{Warning }{....} ~ \seealso{ %% ~~objects to See Also as \code{\link{help}}, ~~~ } \examples{ ##---- return an interactive heatmap plot ---- ##-- ==> Define data, use random, ##-- or do help(data=index) for the standard data sets. ## The function is currently defined as IHeatMap_object<-IHeatMap(expression=expression,profile=profile) iheatmap_object<-create_iheatmap (IHeatMap_object) get_iheatmap (iheatmap_object) } % Add one or more standard keywords, see file 'KEYWORDS' in the % R documentation directory. \keyword{ ~kwd1 }% use one of RShowDoc("KEYWORDS") \keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
/man/get_iheatmap.Rd
no_license
harryjerryzhu/iheatmap
R
false
false
1,363
rd
\name{get_iheatmap} \alias{get_iheatmap} %- Also NEED an '\alias' for EACH other topic documented here. \title{ get the interactive heatmap } \description{ %% ~~ show the interactive heatmap ~~ } \usage{ get_iheatmap(iheatmap_object) } %- maybe also 'usage' for other objects documented here. \arguments{ iheatmap_object } \details{ %% ~~ If necessary, more details than the description above ~~ } \value{ %% ~Describe the value returned %% If it is a LIST, use %% \item{comp1 }{Description of 'comp1'} %% \item{comp2 }{Description of 'comp2'} %% ... } \references{ %% ~put references to the literature/web site here ~ } \author{ %% ~~who you are~~ } \note{ %% ~~further notes~~ } %% ~Make other sections like Warning with \section{Warning }{....} ~ \seealso{ %% ~~objects to See Also as \code{\link{help}}, ~~~ } \examples{ ##---- return an interactive heatmap plot ---- ##-- ==> Define data, use random, ##-- or do help(data=index) for the standard data sets. ## The function is currently defined as IHeatMap_object<-IHeatMap(expression=expression,profile=profile) iheatmap_object<-create_iheatmap (IHeatMap_object) get_iheatmap (iheatmap_object) } % Add one or more standard keywords, see file 'KEYWORDS' in the % R documentation directory. \keyword{ ~kwd1 }% use one of RShowDoc("KEYWORDS") \keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
library(ebimetagenomics) ### Name: projectSamples ### Title: Find samples associated with an EMG project ### Aliases: projectSamples ### Keywords: EMG ### ** Examples ## No test: ps=getProjectSummary("SRP047083") projectSamples(ps) ## End(No test)
/data/genthat_extracted_code/ebimetagenomics/examples/projectSamples.Rd.R
no_license
surayaaramli/typeRrh
R
false
false
255
r
library(ebimetagenomics) ### Name: projectSamples ### Title: Find samples associated with an EMG project ### Aliases: projectSamples ### Keywords: EMG ### ** Examples ## No test: ps=getProjectSummary("SRP047083") projectSamples(ps) ## End(No test)
# R program to calculate unsteady state human body exergy consumPtion # This is a program for the calculation of human-body core and skin temperatures # and also clothing surface temperature based on the two-node model # originally developed by Gagge et al. # The program has been developed so that it fits the calculation of human-body # exergy balance under unsteady-state conditions. # The program is based on the Excel version for calculating human body exergy consumPtion rate developed by masanori Shukuya # 1st ver. masanori Shukuya 13th February, 2013 # # This program has been further extended to be able to include the human-body exergy balance. # masanori Shukuya 11th may, 2014 # # This version is for un-steady state exergy calculation. # masanori Shukuya 30th June, 2014//18th February, 2015 # # transformation of VBA-code and Excel procedures into R syntax # Marcel Schweiker may, 2015 #################################### # main program ############################################# # # This is a program for the calculation of human-body core and skin temperatures # and also clothing surface temperature based on the two-node model # originally developed by Gagge et al. # The program has been developed so that it fits the calculation of human-body # exergy balance under unsteady-state conditions. # 1st ver. ms 13th February, 2013 # # This program has been further extended to be able to include the human-body exergy balance. # ms 11th may, 2014 # # This version is for un-steady state exergy calculation. # ms 30th June, 2014//18th February, 2015 # # transfer of VBA-code to R-code by m. schweiker march, 2015 # ############################################### calcHbExSteady <- function(ta, tr, rh, vel, clo, met, tao, rho, frad = .7, eps = .95, ic = 1.085, ht=171, wt=70, tcr=37, tsk=36, basMet=58.2, warmUp=60, cdil=100, sigmatr=.25){ # definition of output variables # Exergy input xInmets <- xInmetwcs <- xInAIRwcs <- xInAIRwcwcs <- xInAIRwds <- xInAIRwdwds <- xInLUNGwcs <- xInLUNGwcwcs <- xInLUNGwds <- xInLUNGwdwds <- xInsheLLwcs <- xInsheLLwcwcs <- xInsheLLwds <- xInsheLLwdwds <- xInraDs <- xInraDwcs <- xIntotaLs <- NA # Exergy output xoutstorecores <- xoutstoreshels <- xoutaIRwcs <- xoutaIRwcwcs <- xoutaIRwds <- xoutaIRwdwds <- xoutswEATwcs <- xoutswEATwcwcs <- xoutswEATwds <- xoutswEATwdwds <- xoutraDs <- xoutraDwcs <- xoutCONVs <- xoutCONVwcs <- xouttotaLs <- NA # balance and additional variables xconss <- tsks <- tcrs <- ws <- NA tko <- 273.15 tskSet <- 33.7 tcrSet <- 36.8 lr <- 16.5 * 10 ^ (-3) #cdil <- 200 # may vary between 75 and 225 #sigmatr <- 0.5; # may vary between .25 and .75 csw <- 170 mAir <- 28.97 * 0.001; mWater <- 18.015 * 0.001; rGas <- 8.31446 #[J/(molK)] row <- 1000; Roa <- 1.2#[kg/m3] cpBody <- 3490; cpa <- 1005; cpv <- 1846; cpw <- 4186 #[J/(kgK)] PO <- 101325#[N/m2<-J/m3] aBody <- 0.008883 * ht ^ 0.663 * wt ^ 0.444 # radiation area of human body - taking account for covered parts by other parts of body (e.g. inner parts of arm) - coefficients taken from Fanger rMA <- wt / aBody i <- 1 icl <- clo # clothing insulation [clo] va <- vel # Indoor air velocity [m/s] met <- met # metabolic rate [met] tmr <- tr # mean radiant temp [degree C] ta <- ta # air temp [degree C] phia <- rh # relative humidity indoors [%] toEnv <- tao # outdoor temp [degree C] phiaEnv <- rho # relative humidity outdoors [%] # warm-up period (here 30 minutes) - see also discussion in paper for (j in 1:warmUp){ # extract single value from time series data dtCal <- 60 qmet <- metaTherm(met, basMet) fcl <- 1 + 0.15 * icl rcl <- 0.155 * icl TKa <- ta + tko; tkmr <- tmr + tko tkoEnv <- toEnv + tko pvEnv <- pVapor(toEnv, phiaEnv) #--------module_1 function see Book Shukuya p. 268 hr <- 6.13 * frad * eps # radiative heat transfer coefficient hc <- hcvG(va, met, basMet) # convective heat transfer coefficient #--------module_1 top <- (hr * tmr + hc * ta) / (hr + hc) ffcl <- 1 / (1 + 0.155 * icl * fcl * (hr + hc)); fpcl <- 1 / (1 + 0.155 * icl * fcl * hc / ic) # related to evaporation cl <- 1 / (1 + 0.155 * icl * fcl * hc / ic) im <- hc * fpcl / ((hr + hc) * ffcl) iclStar <- 0.6; fclStar <- 1 + 0.15 * iclStar hcStar <- hcvG(0.1, 1, basMet) ffclStar <- 1 / (1 + 0.155 * iclStar * fclStar * (hr + hcStar)) fpclStar <- 1 / (1 + 0.155 * iclStar * fclStar * hcStar / ic) imStar <- hcStar * fpclStar / ((hr + hcStar) * ffclStar); cres <- 0.0014 * (basMet * met) * (34 - ta) # heat transfer to environment by convection in relation to respiration (empirical equation by Fanger) pva <- pVapor(ta, phia)#--------module_1 eres <- 0.0173 * (basMet * met) * (5.87 - pva / 1000) # heat removed by evaporation in relation to respiration (empirical equation by Fanger) #Else qShiv <- mshiv(tcrSet, tskSet, tcr, tsk)#--------module_3 vblS <- vblCdilStr(cdil, sigmatr, tcrSet, tskSet, tcr, tsk)# blood flow rate #--------module_2 # next line is core of Gagge model. If blood flow becomes lower, than skin layer gets more dominant alfaSk <- 0.0418 + 0.745 / (vblS + 0.585); # factor to adjust for difference in dominance kS <- 5.28 + 1.163 * vblS # conductance between core and skin layer Qcr <- (1 - alfaSk) * rMA * cpBody; Qsk <- alfaSk * rMA * cpBody # heat capacity of core and skin layer tcrN <- (1 - dtCal * kS / Qcr) * tcr + dtCal / Qcr * (qmet + qShiv - (cres + eres) + kS * tsk) # core temperature at time step before step calculated psks <- pVapor(tsk, 100)# saturated water vapour pressure at skin surface calculated with pure water , i.e. adaptive processes such as less salt in sweat might be put heresee also p. 281 of book Shukuya #--------module_1 emax <- fpcl * (fcl * lr * hc) * (psks - pva) # max rate of water dispersion when the whole skin surface is wet tb <- alfaSk * tsk + (1 - alfaSk) * tcr; # average body temperature using calculated tsk and tcr tbSet <- alfaSk * tsk + (1 - alfaSk) * tcrSet # average body temperature using set-point values for tsk and tcr ersw <- csw * (tb - tbSet) * exp((tsk - tskSet) / 10.7) # amount of sweat secretion w <- 0.06 + 0.94 * ersw / emax if (w < 0.06){ w <- 0.06 } if (1 < w){ w <- 1 } DTQ <- dtCal / Qsk tskN <- (1 - DTQ * kS - DTQ * fcl * ffcl * (hr + hc)) * tsk - DTQ * w * fcl * lr * hc * fpcl * psks + DTQ * (kS * tcr + fcl * (hr + hc) * ffcl * top + w * fcl * lr * hc * fpcl * pva) # tsk in next step tclN <- ((1 / rcl) * tskN + fcl * hr * tmr + fcl * hc * ta) / (1 / rcl + fcl * hr + fcl * hc) # # Exergy balance # # Thermal exergy generation by metabolism # tkcrN <- tcrN + tko; tkskN <- tskN + tko; tkclN <- tclN + tko metaEnergy <- qmet + qShiv xMet <- metaEnergy * (1 - tkoEnv / tkcrN); xMetwc <- wcXCheck(tkcrN, tkoEnv) # Inhaled humid air Vin <- 1.2 * 10 ^ (-6) * metaEnergy # Volume of air intake [V/s] cpav <- cpa * (mAir / (rGas * TKa)) * (PO - pva) + cpv * (mWater / (rGas * TKa)) * pva xInhaleWc <- Vin * wcEx(cpav, TKa, tkoEnv); xInhaleWcwc <- wcXCheck(TKa, tkoEnv)#--------module_4 xInhaleWd <- Vin * wdEx(TKa, tkoEnv, pva, pvEnv); xInhaleWdwd <- wdXCheck(pva, pvEnv)#--------module_4 # Liquid water generated in the core by metabolism to be dispersed into the exhaled air VwCore <- Vin * (0.029 - 0.049 * 10 ^ (-4) * pva) xLwCoreWc <- VwCore * Roa * wcEx(cpw, tkcrN, tkoEnv); xLwCoreWcwc <- wcXCheck(tkcrN, tkoEnv) #--------module_4 pvs_env <- pVapor(toEnv, 100) xLwCoreWet <- VwCore * Roa * rGas / mWater * tkoEnv * log(pvs_env / pvEnv) xLwCoreWetwd <- "wet" # Liquid water generated in the shell by metabolism to be secreted as sweat vwShellrow <- w * emax / (2450 * 1000) xLwShellWc <- vwShellrow * wcEx(cpw, tkskN, tkoEnv); xLwShellWcwc <- wcXCheck(tkskN, tkoEnv)#--------module_4 xLwShellWd <- vwShellrow * wdExLw(tkoEnv, pvs_env, pva, pvEnv); xLwShellWdwd <- wdXCheck(pva, pvEnv) #--------module_4 # radiant exergy input xInRad <- fcl * hr * (tkmr - tkoEnv) ^ 2 / (tkmr + tkoEnv); xInRadwc <- wcXCheck(tkmr, tkoEnv) # total exergy input xIntotal <- xMet + xInhaleWc + xInhaleWd + xLwCoreWc + xLwCoreWet + xLwShellWc + xLwShellWd + xInRad # Exergy stored xStCore <- Qcr * (1 - tkoEnv / tkcrN) * (tcrN - tcr) / dtCal xStShell <- Qsk * (1 - tkoEnv / tkskN) * (tskN - tsk) / dtCal # Exhaled humid air pvssCr <- pVapor(tcrN, 100) cpav <- cpa * (mAir / (rGas * tkcrN)) * (PO - pvssCr) + cpv * (mWater / (rGas * tkcrN)) * pvssCr xExhaleWc <- Vin * wcEx(cpav, tkcrN, tkoEnv); xExhaleWcwc <- wcXCheck(tkcrN, tkoEnv) #--------module_4 xExhaleWd <- Vin * wdEx(tkcrN, tkoEnv, pvssCr, pvEnv); xExhaleWdwd <- wdXCheck(pvssCr, pvEnv)#--------module_4 # water vapor originating from the sweat and humid air containing the evaporated sweat xSweatWc <- vwShellrow * wcEx(cpv, tkclN, tkoEnv); xSweatWcwc <- wcXCheck(tkclN, tkoEnv)#--------module_4 xSweatWd <- vwShellrow * wdExLw(tkoEnv, pva, pva, pvEnv); xSweatWdwd <- wdXCheck(pva, pvEnv) #--------module_4 # radiant exergy output xOutRad <- fcl * hr * (tkclN - tkoEnv) ^ 2 / (tkclN + tkoEnv); xOutRadwc <- wcXCheck(tkclN, tkoEnv) # Exergy transfer by convection xOutConv <- fcl * hc * (tkclN - TKa) * (1. - tkoEnv / tkclN); xOutConvwc <- wcXCheck(tkclN, tkoEnv) xouttotal <- xStCore + xStShell + xExhaleWc + xExhaleWd + xSweatWc + xSweatWd + xOutRad + xOutConv xConsumption <- xIntotal - xouttotal #etStar <- calcet(top, ta, phia, w, im, 50, im); #--------module_4 tcr <- tcrN tsk <- tskN } # Output values # Exergy input xInmets[i] <- signif(xMet, 4) #metabolism xInmetwcs[i] <- xMetwc #metabolism warm/cold xInAIRwcs[i] <- signif(xInhaleWc, 4) # inhaled humid air xInAIRwcwcs[i] <- xInhaleWcwc xInAIRwds[i] <- signif(xInhaleWd, 4) # xInAIRwdwds[i] <- xInhaleWdwd # wet/dry xInLUNGwcs[i] <- signif(xLwCoreWc, 4) # water lung xInLUNGwcwcs[i] <- xLwCoreWcwc xInLUNGwds[i] <- signif(xLwCoreWet, 4) xInLUNGwdwds[i] <- xLwCoreWetwd xInsheLLwcs[i] <- signif(xLwShellWc, 4) # water sweat xInsheLLwcwcs[i] <- xLwShellWcwc xInsheLLwds[i] <- signif(xLwShellWd, 4) xInsheLLwdwds[i] <- xLwShellWdwd xInraDs[i] <- signif(xInRad, 4) # radiation in xInraDwcs[i] <- xInRadwc xIntotaLs[i] <- signif(xIntotal, 4) # totaL exergy input # Exergy output xoutstorecores[i] <- signif(xStCore, 4) # exergy stored xoutstoreshels[i] <- signif(xStShell, 4) xoutaIRwcs[i] <- signif(xExhaleWc, 4) # exhaled humid air xoutaIRwcwcs[i] <- xExhaleWcwc xoutaIRwds[i] <- signif(xExhaleWd, 4) xoutaIRwdwds[i] <- xExhaleWdwd xoutswEATwcs[i] <- signif(xSweatWc, 4) # water vapour xoutswEATwcwcs[i] <- xSweatWcwc xoutswEATwds[i] <- signif(xSweatWd, 4) xoutswEATwdwds[i] <- xSweatWdwd xoutraDs[i] <- signif(xOutRad, 4) # radiation out xoutraDwcs[i] <- xOutRadwc xoutCONVs[i] <- signif(xOutConv, 4) # convection xoutCONVwcs[i] <- xOutConvwc xouttotaLs[i] <- signif(xouttotal, 4) # totaL exergy out # balance xconss[i] <- signif(xConsumption, 4) # exergy consumPtion total tsks[i] <- signif(tsk, 4) tcrs[i] <- signif(tcr, 4) ws[i] <- signif(w, 4) #setStar <- calcet(top, ta, phia, w, im, 50, imStar); resultsst <- data.frame( # Output values #setStar, #etStar, # Exergy input xInmets, #metabolism xInmetwcs, #metabolism warm/cold xInAIRwcs, # inhaled humid air xInAIRwcwcs, xInAIRwds, # xInAIRwdwds, # wet/dry xInLUNGwcs, # water lung xInLUNGwcwcs, xInLUNGwds, xInLUNGwdwds, xInsheLLwcs, # water sweat xInsheLLwcwcs, xInsheLLwds, xInsheLLwdwds, xInraDs, # radiation in xInraDwcs, xIntotaLs, # totaL exergy input # Exergy output xoutstorecores, # exergy stored core xoutstoreshels, # exergy stored shell xoutaIRwcs, # exhaled humid air xoutaIRwcwcs, xoutaIRwds, xoutaIRwdwds, xoutswEATwcs, # water vapour xoutswEATwcwcs, xoutswEATwds, xoutswEATwdwds, xoutraDs, # radiation out xoutraDwcs, xoutCONVs, # convection xoutCONVwcs, xouttotaLs, # totaL exergy out # balance xconss, # exergy consumPtion total xConsumption, tsks, # skin temperature tcrs, # core temperature ws, # skin wettedness stringsasFactors=FALSE ) resultsst } # end of main program ###########################
/comf/R/fctHBxStSt.r
no_license
ingted/R-Examples
R
false
false
12,883
r
# R program to calculate unsteady state human body exergy consumPtion # This is a program for the calculation of human-body core and skin temperatures # and also clothing surface temperature based on the two-node model # originally developed by Gagge et al. # The program has been developed so that it fits the calculation of human-body # exergy balance under unsteady-state conditions. # The program is based on the Excel version for calculating human body exergy consumPtion rate developed by masanori Shukuya # 1st ver. masanori Shukuya 13th February, 2013 # # This program has been further extended to be able to include the human-body exergy balance. # masanori Shukuya 11th may, 2014 # # This version is for un-steady state exergy calculation. # masanori Shukuya 30th June, 2014//18th February, 2015 # # transformation of VBA-code and Excel procedures into R syntax # Marcel Schweiker may, 2015 #################################### # main program ############################################# # # This is a program for the calculation of human-body core and skin temperatures # and also clothing surface temperature based on the two-node model # originally developed by Gagge et al. # The program has been developed so that it fits the calculation of human-body # exergy balance under unsteady-state conditions. # 1st ver. ms 13th February, 2013 # # This program has been further extended to be able to include the human-body exergy balance. # ms 11th may, 2014 # # This version is for un-steady state exergy calculation. # ms 30th June, 2014//18th February, 2015 # # transfer of VBA-code to R-code by m. schweiker march, 2015 # ############################################### calcHbExSteady <- function(ta, tr, rh, vel, clo, met, tao, rho, frad = .7, eps = .95, ic = 1.085, ht=171, wt=70, tcr=37, tsk=36, basMet=58.2, warmUp=60, cdil=100, sigmatr=.25){ # definition of output variables # Exergy input xInmets <- xInmetwcs <- xInAIRwcs <- xInAIRwcwcs <- xInAIRwds <- xInAIRwdwds <- xInLUNGwcs <- xInLUNGwcwcs <- xInLUNGwds <- xInLUNGwdwds <- xInsheLLwcs <- xInsheLLwcwcs <- xInsheLLwds <- xInsheLLwdwds <- xInraDs <- xInraDwcs <- xIntotaLs <- NA # Exergy output xoutstorecores <- xoutstoreshels <- xoutaIRwcs <- xoutaIRwcwcs <- xoutaIRwds <- xoutaIRwdwds <- xoutswEATwcs <- xoutswEATwcwcs <- xoutswEATwds <- xoutswEATwdwds <- xoutraDs <- xoutraDwcs <- xoutCONVs <- xoutCONVwcs <- xouttotaLs <- NA # balance and additional variables xconss <- tsks <- tcrs <- ws <- NA tko <- 273.15 tskSet <- 33.7 tcrSet <- 36.8 lr <- 16.5 * 10 ^ (-3) #cdil <- 200 # may vary between 75 and 225 #sigmatr <- 0.5; # may vary between .25 and .75 csw <- 170 mAir <- 28.97 * 0.001; mWater <- 18.015 * 0.001; rGas <- 8.31446 #[J/(molK)] row <- 1000; Roa <- 1.2#[kg/m3] cpBody <- 3490; cpa <- 1005; cpv <- 1846; cpw <- 4186 #[J/(kgK)] PO <- 101325#[N/m2<-J/m3] aBody <- 0.008883 * ht ^ 0.663 * wt ^ 0.444 # radiation area of human body - taking account for covered parts by other parts of body (e.g. inner parts of arm) - coefficients taken from Fanger rMA <- wt / aBody i <- 1 icl <- clo # clothing insulation [clo] va <- vel # Indoor air velocity [m/s] met <- met # metabolic rate [met] tmr <- tr # mean radiant temp [degree C] ta <- ta # air temp [degree C] phia <- rh # relative humidity indoors [%] toEnv <- tao # outdoor temp [degree C] phiaEnv <- rho # relative humidity outdoors [%] # warm-up period (here 30 minutes) - see also discussion in paper for (j in 1:warmUp){ # extract single value from time series data dtCal <- 60 qmet <- metaTherm(met, basMet) fcl <- 1 + 0.15 * icl rcl <- 0.155 * icl TKa <- ta + tko; tkmr <- tmr + tko tkoEnv <- toEnv + tko pvEnv <- pVapor(toEnv, phiaEnv) #--------module_1 function see Book Shukuya p. 268 hr <- 6.13 * frad * eps # radiative heat transfer coefficient hc <- hcvG(va, met, basMet) # convective heat transfer coefficient #--------module_1 top <- (hr * tmr + hc * ta) / (hr + hc) ffcl <- 1 / (1 + 0.155 * icl * fcl * (hr + hc)); fpcl <- 1 / (1 + 0.155 * icl * fcl * hc / ic) # related to evaporation cl <- 1 / (1 + 0.155 * icl * fcl * hc / ic) im <- hc * fpcl / ((hr + hc) * ffcl) iclStar <- 0.6; fclStar <- 1 + 0.15 * iclStar hcStar <- hcvG(0.1, 1, basMet) ffclStar <- 1 / (1 + 0.155 * iclStar * fclStar * (hr + hcStar)) fpclStar <- 1 / (1 + 0.155 * iclStar * fclStar * hcStar / ic) imStar <- hcStar * fpclStar / ((hr + hcStar) * ffclStar); cres <- 0.0014 * (basMet * met) * (34 - ta) # heat transfer to environment by convection in relation to respiration (empirical equation by Fanger) pva <- pVapor(ta, phia)#--------module_1 eres <- 0.0173 * (basMet * met) * (5.87 - pva / 1000) # heat removed by evaporation in relation to respiration (empirical equation by Fanger) #Else qShiv <- mshiv(tcrSet, tskSet, tcr, tsk)#--------module_3 vblS <- vblCdilStr(cdil, sigmatr, tcrSet, tskSet, tcr, tsk)# blood flow rate #--------module_2 # next line is core of Gagge model. If blood flow becomes lower, than skin layer gets more dominant alfaSk <- 0.0418 + 0.745 / (vblS + 0.585); # factor to adjust for difference in dominance kS <- 5.28 + 1.163 * vblS # conductance between core and skin layer Qcr <- (1 - alfaSk) * rMA * cpBody; Qsk <- alfaSk * rMA * cpBody # heat capacity of core and skin layer tcrN <- (1 - dtCal * kS / Qcr) * tcr + dtCal / Qcr * (qmet + qShiv - (cres + eres) + kS * tsk) # core temperature at time step before step calculated psks <- pVapor(tsk, 100)# saturated water vapour pressure at skin surface calculated with pure water , i.e. adaptive processes such as less salt in sweat might be put heresee also p. 281 of book Shukuya #--------module_1 emax <- fpcl * (fcl * lr * hc) * (psks - pva) # max rate of water dispersion when the whole skin surface is wet tb <- alfaSk * tsk + (1 - alfaSk) * tcr; # average body temperature using calculated tsk and tcr tbSet <- alfaSk * tsk + (1 - alfaSk) * tcrSet # average body temperature using set-point values for tsk and tcr ersw <- csw * (tb - tbSet) * exp((tsk - tskSet) / 10.7) # amount of sweat secretion w <- 0.06 + 0.94 * ersw / emax if (w < 0.06){ w <- 0.06 } if (1 < w){ w <- 1 } DTQ <- dtCal / Qsk tskN <- (1 - DTQ * kS - DTQ * fcl * ffcl * (hr + hc)) * tsk - DTQ * w * fcl * lr * hc * fpcl * psks + DTQ * (kS * tcr + fcl * (hr + hc) * ffcl * top + w * fcl * lr * hc * fpcl * pva) # tsk in next step tclN <- ((1 / rcl) * tskN + fcl * hr * tmr + fcl * hc * ta) / (1 / rcl + fcl * hr + fcl * hc) # # Exergy balance # # Thermal exergy generation by metabolism # tkcrN <- tcrN + tko; tkskN <- tskN + tko; tkclN <- tclN + tko metaEnergy <- qmet + qShiv xMet <- metaEnergy * (1 - tkoEnv / tkcrN); xMetwc <- wcXCheck(tkcrN, tkoEnv) # Inhaled humid air Vin <- 1.2 * 10 ^ (-6) * metaEnergy # Volume of air intake [V/s] cpav <- cpa * (mAir / (rGas * TKa)) * (PO - pva) + cpv * (mWater / (rGas * TKa)) * pva xInhaleWc <- Vin * wcEx(cpav, TKa, tkoEnv); xInhaleWcwc <- wcXCheck(TKa, tkoEnv)#--------module_4 xInhaleWd <- Vin * wdEx(TKa, tkoEnv, pva, pvEnv); xInhaleWdwd <- wdXCheck(pva, pvEnv)#--------module_4 # Liquid water generated in the core by metabolism to be dispersed into the exhaled air VwCore <- Vin * (0.029 - 0.049 * 10 ^ (-4) * pva) xLwCoreWc <- VwCore * Roa * wcEx(cpw, tkcrN, tkoEnv); xLwCoreWcwc <- wcXCheck(tkcrN, tkoEnv) #--------module_4 pvs_env <- pVapor(toEnv, 100) xLwCoreWet <- VwCore * Roa * rGas / mWater * tkoEnv * log(pvs_env / pvEnv) xLwCoreWetwd <- "wet" # Liquid water generated in the shell by metabolism to be secreted as sweat vwShellrow <- w * emax / (2450 * 1000) xLwShellWc <- vwShellrow * wcEx(cpw, tkskN, tkoEnv); xLwShellWcwc <- wcXCheck(tkskN, tkoEnv)#--------module_4 xLwShellWd <- vwShellrow * wdExLw(tkoEnv, pvs_env, pva, pvEnv); xLwShellWdwd <- wdXCheck(pva, pvEnv) #--------module_4 # radiant exergy input xInRad <- fcl * hr * (tkmr - tkoEnv) ^ 2 / (tkmr + tkoEnv); xInRadwc <- wcXCheck(tkmr, tkoEnv) # total exergy input xIntotal <- xMet + xInhaleWc + xInhaleWd + xLwCoreWc + xLwCoreWet + xLwShellWc + xLwShellWd + xInRad # Exergy stored xStCore <- Qcr * (1 - tkoEnv / tkcrN) * (tcrN - tcr) / dtCal xStShell <- Qsk * (1 - tkoEnv / tkskN) * (tskN - tsk) / dtCal # Exhaled humid air pvssCr <- pVapor(tcrN, 100) cpav <- cpa * (mAir / (rGas * tkcrN)) * (PO - pvssCr) + cpv * (mWater / (rGas * tkcrN)) * pvssCr xExhaleWc <- Vin * wcEx(cpav, tkcrN, tkoEnv); xExhaleWcwc <- wcXCheck(tkcrN, tkoEnv) #--------module_4 xExhaleWd <- Vin * wdEx(tkcrN, tkoEnv, pvssCr, pvEnv); xExhaleWdwd <- wdXCheck(pvssCr, pvEnv)#--------module_4 # water vapor originating from the sweat and humid air containing the evaporated sweat xSweatWc <- vwShellrow * wcEx(cpv, tkclN, tkoEnv); xSweatWcwc <- wcXCheck(tkclN, tkoEnv)#--------module_4 xSweatWd <- vwShellrow * wdExLw(tkoEnv, pva, pva, pvEnv); xSweatWdwd <- wdXCheck(pva, pvEnv) #--------module_4 # radiant exergy output xOutRad <- fcl * hr * (tkclN - tkoEnv) ^ 2 / (tkclN + tkoEnv); xOutRadwc <- wcXCheck(tkclN, tkoEnv) # Exergy transfer by convection xOutConv <- fcl * hc * (tkclN - TKa) * (1. - tkoEnv / tkclN); xOutConvwc <- wcXCheck(tkclN, tkoEnv) xouttotal <- xStCore + xStShell + xExhaleWc + xExhaleWd + xSweatWc + xSweatWd + xOutRad + xOutConv xConsumption <- xIntotal - xouttotal #etStar <- calcet(top, ta, phia, w, im, 50, im); #--------module_4 tcr <- tcrN tsk <- tskN } # Output values # Exergy input xInmets[i] <- signif(xMet, 4) #metabolism xInmetwcs[i] <- xMetwc #metabolism warm/cold xInAIRwcs[i] <- signif(xInhaleWc, 4) # inhaled humid air xInAIRwcwcs[i] <- xInhaleWcwc xInAIRwds[i] <- signif(xInhaleWd, 4) # xInAIRwdwds[i] <- xInhaleWdwd # wet/dry xInLUNGwcs[i] <- signif(xLwCoreWc, 4) # water lung xInLUNGwcwcs[i] <- xLwCoreWcwc xInLUNGwds[i] <- signif(xLwCoreWet, 4) xInLUNGwdwds[i] <- xLwCoreWetwd xInsheLLwcs[i] <- signif(xLwShellWc, 4) # water sweat xInsheLLwcwcs[i] <- xLwShellWcwc xInsheLLwds[i] <- signif(xLwShellWd, 4) xInsheLLwdwds[i] <- xLwShellWdwd xInraDs[i] <- signif(xInRad, 4) # radiation in xInraDwcs[i] <- xInRadwc xIntotaLs[i] <- signif(xIntotal, 4) # totaL exergy input # Exergy output xoutstorecores[i] <- signif(xStCore, 4) # exergy stored xoutstoreshels[i] <- signif(xStShell, 4) xoutaIRwcs[i] <- signif(xExhaleWc, 4) # exhaled humid air xoutaIRwcwcs[i] <- xExhaleWcwc xoutaIRwds[i] <- signif(xExhaleWd, 4) xoutaIRwdwds[i] <- xExhaleWdwd xoutswEATwcs[i] <- signif(xSweatWc, 4) # water vapour xoutswEATwcwcs[i] <- xSweatWcwc xoutswEATwds[i] <- signif(xSweatWd, 4) xoutswEATwdwds[i] <- xSweatWdwd xoutraDs[i] <- signif(xOutRad, 4) # radiation out xoutraDwcs[i] <- xOutRadwc xoutCONVs[i] <- signif(xOutConv, 4) # convection xoutCONVwcs[i] <- xOutConvwc xouttotaLs[i] <- signif(xouttotal, 4) # totaL exergy out # balance xconss[i] <- signif(xConsumption, 4) # exergy consumPtion total tsks[i] <- signif(tsk, 4) tcrs[i] <- signif(tcr, 4) ws[i] <- signif(w, 4) #setStar <- calcet(top, ta, phia, w, im, 50, imStar); resultsst <- data.frame( # Output values #setStar, #etStar, # Exergy input xInmets, #metabolism xInmetwcs, #metabolism warm/cold xInAIRwcs, # inhaled humid air xInAIRwcwcs, xInAIRwds, # xInAIRwdwds, # wet/dry xInLUNGwcs, # water lung xInLUNGwcwcs, xInLUNGwds, xInLUNGwdwds, xInsheLLwcs, # water sweat xInsheLLwcwcs, xInsheLLwds, xInsheLLwdwds, xInraDs, # radiation in xInraDwcs, xIntotaLs, # totaL exergy input # Exergy output xoutstorecores, # exergy stored core xoutstoreshels, # exergy stored shell xoutaIRwcs, # exhaled humid air xoutaIRwcwcs, xoutaIRwds, xoutaIRwdwds, xoutswEATwcs, # water vapour xoutswEATwcwcs, xoutswEATwds, xoutswEATwdwds, xoutraDs, # radiation out xoutraDwcs, xoutCONVs, # convection xoutCONVwcs, xouttotaLs, # totaL exergy out # balance xconss, # exergy consumPtion total xConsumption, tsks, # skin temperature tcrs, # core temperature ws, # skin wettedness stringsasFactors=FALSE ) resultsst } # end of main program ###########################
library(ggplot2) library(lme4) library(nlme) data(revenge, package = "hecstatmod") revenge$t <- as.integer(revenge$time) mixed1 <- lmer(revenge ~ sex + age + vc + wom + t + (1 | id) + (0 +t | id), data = revenge, REML = TRUE) mixed2 <- lmer(revenge ~ sex + age + vc + wom + t + (1+t | id), data = revenge, REML = TRUE) # Same as # nlme::lme(revenge ~ sex + age + vc + wom + t, random = ~ 1+t | id, data = revenge) # Are the slope and intercept correlated? anova(mixed2, mixed1) # Strongly reject null that corr=0 # since not on boundary, chisq(1) is correct asymptotic dist # Variance parameters for random effects and errors VarCorr(mixed2) # print standard dev and correlation VarCorr(mixed2)$id #print covariance matrix # Fixed and random effects fixef(mixed2) # Scatterplot of random intercept and slope plot(ranef(mixed2)) # Because all covariates are fixed in time, # these added up give a different intercept to each individual # depending on its characteristics intercept_fixed <- c(cbind(1, as.matrix(revenge[revenge$time==1,1:4])) %*% fixef(mixed2)[1:5]) slope_fixed <- nlme::fixed.effects(mixed2)["t"] #random effects random_effects <- ranef(mixed2) # Show the fitted line in the revenge score - time scale ran_lines <- data.frame(intercept = intercept_fixed + random_effects$id[,1], slope = slope_fixed + random_effects$id[,2], id = 1:80) ggplot(data = revenge, aes(x=t, y=revenge, group=id)) + geom_line(alpha = 0.2) + geom_abline(data = ran_lines, aes(intercept = intercept, slope = slope, group = id), col = 4, alpha = 0.2) + scale_x_continuous(expand = c(0,0), limits = c(1,5)) # Same model, but we add an AR(1) covariance model for the errors # This must be fitted using nlme::lme rather than lme4::lmer # as the latter doesn't handle covariance models for errors mixed3 <- lme(revenge ~ sex + age + vc + wom + t, random = ~ 1 | id, data = revenge, correlation = corAR1()) mixed4 <- lme(revenge ~ sex + vc + wom + age + t, random = ~ 1 + t | id, data = revenge, correlation = corAR1()) # null distribution is 0.5 chisq_1 + 0.5chisq_2 # but poor finite-sample properties anova(mixed4, mixed3) # With the AR(1) model, there is no more # need for a random slope for time plot(ranef(mixed3)) # Check sample mean is indeed zero isTRUE(all.equal(0, mean(ranef(mixed3)[,1]))) nlme::plot.lme(mixed3) VarCorr(mixed3)
/code/MATH60604A_6e_randomslope.R
permissive
lbelzile/statmod
R
false
false
2,604
r
library(ggplot2) library(lme4) library(nlme) data(revenge, package = "hecstatmod") revenge$t <- as.integer(revenge$time) mixed1 <- lmer(revenge ~ sex + age + vc + wom + t + (1 | id) + (0 +t | id), data = revenge, REML = TRUE) mixed2 <- lmer(revenge ~ sex + age + vc + wom + t + (1+t | id), data = revenge, REML = TRUE) # Same as # nlme::lme(revenge ~ sex + age + vc + wom + t, random = ~ 1+t | id, data = revenge) # Are the slope and intercept correlated? anova(mixed2, mixed1) # Strongly reject null that corr=0 # since not on boundary, chisq(1) is correct asymptotic dist # Variance parameters for random effects and errors VarCorr(mixed2) # print standard dev and correlation VarCorr(mixed2)$id #print covariance matrix # Fixed and random effects fixef(mixed2) # Scatterplot of random intercept and slope plot(ranef(mixed2)) # Because all covariates are fixed in time, # these added up give a different intercept to each individual # depending on its characteristics intercept_fixed <- c(cbind(1, as.matrix(revenge[revenge$time==1,1:4])) %*% fixef(mixed2)[1:5]) slope_fixed <- nlme::fixed.effects(mixed2)["t"] #random effects random_effects <- ranef(mixed2) # Show the fitted line in the revenge score - time scale ran_lines <- data.frame(intercept = intercept_fixed + random_effects$id[,1], slope = slope_fixed + random_effects$id[,2], id = 1:80) ggplot(data = revenge, aes(x=t, y=revenge, group=id)) + geom_line(alpha = 0.2) + geom_abline(data = ran_lines, aes(intercept = intercept, slope = slope, group = id), col = 4, alpha = 0.2) + scale_x_continuous(expand = c(0,0), limits = c(1,5)) # Same model, but we add an AR(1) covariance model for the errors # This must be fitted using nlme::lme rather than lme4::lmer # as the latter doesn't handle covariance models for errors mixed3 <- lme(revenge ~ sex + age + vc + wom + t, random = ~ 1 | id, data = revenge, correlation = corAR1()) mixed4 <- lme(revenge ~ sex + vc + wom + age + t, random = ~ 1 + t | id, data = revenge, correlation = corAR1()) # null distribution is 0.5 chisq_1 + 0.5chisq_2 # but poor finite-sample properties anova(mixed4, mixed3) # With the AR(1) model, there is no more # need for a random slope for time plot(ranef(mixed3)) # Check sample mean is indeed zero isTRUE(all.equal(0, mean(ranef(mixed3)[,1]))) nlme::plot.lme(mixed3) VarCorr(mixed3)
# Plot 2 png(filename="Plot2.png", width=480, height=480) par(mar=c(3,4,2,2)) plot(data$datetime, data$Global_active_power, type="l", ylab="Global Active Power (kilowatts)") dev.off()
/DeborahDeng/Plot2.r
no_license
mouselovescats/ExData_Plotting1
R
false
false
187
r
# Plot 2 png(filename="Plot2.png", width=480, height=480) par(mar=c(3,4,2,2)) plot(data$datetime, data$Global_active_power, type="l", ylab="Global Active Power (kilowatts)") dev.off()
# script 1 install.packages("dplyr") library(dplyr)
/package_install.R
no_license
b-w-a/pols_209
R
false
false
56
r
# script 1 install.packages("dplyr") library(dplyr)
# TODO: Add comment # # Author: tiffn_000 ############################################################################### #OLS estimates return.data <- read.csv("C:\\Users\\tiffn_000\\Desktop\\return_period_t85.csv",header=T) #return.data <- read.csv("return_period_ab2.csv",header=T) head(return.data) mod.ols <- lm(T_100 ~ time, data=return.data) summary(mod.ols) plot(return.data$time, residuals(mod.ols), type='o') abline(h=0, lty=2) ##library(ts) - this is now part of the base installation par(mfrow=c(2,1)) acf(residuals(mod.ols)) acf(residuals(mod.ols), type='partial') #durbin.watson(mod.ols, max.lag=10) durbinWatsonTest(mod.ols, max.lag=10) ## GLS estimates #library(nlme) #- only load ones at the beginning mod.gls <- gls(T_100 ~ time, correlation=corARMA(p=1), method='ML',data=return.data) summary(mod.gls) summary(mod.gls)$tTable #gives full p-value mod.ols <- lm(test.zoo.lp3.sac$prob0.1Q_maf ~ test.zoo.lp3.sac$Date) summary(mod.ols) plot(test.zoo.lp3.sac$Date, residuals(mod.ols), type='o') abline(h=0, lty=2) ##library(ts) - this is now part of the base installation par(mfrow=c(2,1)) acf(residuals(mod.ols)) acf(residuals(mod.ols), type='partial') #durbin.watson(mod.ols, max.lag=10) durbinWatsonTest(mod.ols, max.lag=10) ## GLS estimates #library(nlme) #- only load ones at the beginning mod.gls <- gls(prob0.1Q_maf ~ Date, data=test.zoo.lp3.sac, correlation=corARMA(p=1), method='ML') summary(mod.gls) summary(mod.gls)$tTable #gives full p-value mod.ols <- lm(test.zoo.lp3$prob0.1Q_maf ~ test.zoo.lp3$Date) summary(mod.ols) plot(test.zoo.lp3$Date, residuals(mod.ols), type='o') abline(h=0, lty=2) ##library(ts) - this is now part of the base installation par(mfrow=c(2,1)) acf(residuals(mod.ols)) acf(residuals(mod.ols), type='partial') #durbin.watson(mod.ols, max.lag=10) durbinWatsonTest(mod.ols, max.lag=10) ## GLS estimates #library(nlme) #- only load ones at the beginning mod.gls <- gls(prob_0.1_Q_maf ~ Date, data=test.zoo.lp3, correlation=corARMA(p=1), method='ML') summary(mod.gls) blah <- as.data.frame(summary(mod.gls)$tTable) #gives full p-value blah$coeff_names <- dimnames(summary(mod.gls)$tTable)[[1]]
/R/durbinwatson.R
no_license
tnkocis/stReamflowstats
R
false
false
2,254
r
# TODO: Add comment # # Author: tiffn_000 ############################################################################### #OLS estimates return.data <- read.csv("C:\\Users\\tiffn_000\\Desktop\\return_period_t85.csv",header=T) #return.data <- read.csv("return_period_ab2.csv",header=T) head(return.data) mod.ols <- lm(T_100 ~ time, data=return.data) summary(mod.ols) plot(return.data$time, residuals(mod.ols), type='o') abline(h=0, lty=2) ##library(ts) - this is now part of the base installation par(mfrow=c(2,1)) acf(residuals(mod.ols)) acf(residuals(mod.ols), type='partial') #durbin.watson(mod.ols, max.lag=10) durbinWatsonTest(mod.ols, max.lag=10) ## GLS estimates #library(nlme) #- only load ones at the beginning mod.gls <- gls(T_100 ~ time, correlation=corARMA(p=1), method='ML',data=return.data) summary(mod.gls) summary(mod.gls)$tTable #gives full p-value mod.ols <- lm(test.zoo.lp3.sac$prob0.1Q_maf ~ test.zoo.lp3.sac$Date) summary(mod.ols) plot(test.zoo.lp3.sac$Date, residuals(mod.ols), type='o') abline(h=0, lty=2) ##library(ts) - this is now part of the base installation par(mfrow=c(2,1)) acf(residuals(mod.ols)) acf(residuals(mod.ols), type='partial') #durbin.watson(mod.ols, max.lag=10) durbinWatsonTest(mod.ols, max.lag=10) ## GLS estimates #library(nlme) #- only load ones at the beginning mod.gls <- gls(prob0.1Q_maf ~ Date, data=test.zoo.lp3.sac, correlation=corARMA(p=1), method='ML') summary(mod.gls) summary(mod.gls)$tTable #gives full p-value mod.ols <- lm(test.zoo.lp3$prob0.1Q_maf ~ test.zoo.lp3$Date) summary(mod.ols) plot(test.zoo.lp3$Date, residuals(mod.ols), type='o') abline(h=0, lty=2) ##library(ts) - this is now part of the base installation par(mfrow=c(2,1)) acf(residuals(mod.ols)) acf(residuals(mod.ols), type='partial') #durbin.watson(mod.ols, max.lag=10) durbinWatsonTest(mod.ols, max.lag=10) ## GLS estimates #library(nlme) #- only load ones at the beginning mod.gls <- gls(prob_0.1_Q_maf ~ Date, data=test.zoo.lp3, correlation=corARMA(p=1), method='ML') summary(mod.gls) blah <- as.data.frame(summary(mod.gls)$tTable) #gives full p-value blah$coeff_names <- dimnames(summary(mod.gls)$tTable)[[1]]
testlist <- list(A = structure(c(2.31584307392677e+77, 9.53818252170339e+295, 1.22810624464246e+146, 4.12396251261199e-221, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(5L, 7L)), B = structure(0, .Dim = c(1L, 1L))) result <- do.call(multivariance:::match_rows,testlist) str(result)
/multivariance/inst/testfiles/match_rows/AFL_match_rows/match_rows_valgrind_files/1613102764-test.R
no_license
akhikolla/updatedatatype-list3
R
false
false
343
r
testlist <- list(A = structure(c(2.31584307392677e+77, 9.53818252170339e+295, 1.22810624464246e+146, 4.12396251261199e-221, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(5L, 7L)), B = structure(0, .Dim = c(1L, 1L))) result <- do.call(multivariance:::match_rows,testlist) str(result)
#' Find the least cost path in a least cost matrix. #' #' @description Uses the original distance matrix created by \code{\link{distanceMatrix}} and the least cost path matrix created by \code{\link{leastCostMatrix}} to find the least cost path between the first and the last cells of the matrix. If \code{diagonal} was \code{TRUE} in \code{\link{leastCostMatrix}}, then it must be \code{TRUE} when using this function. Otherwise, the default is \code{FALSE} in both. #' #' @usage leastCostPath(distance.matrix = NULL, #' least.cost.matrix = NULL, #' diagonal = FALSE, #' parallel.execution = TRUE) #' #' @param distance.matrix numeric matrix or list of numeric matrices, a distance matrix produced by \code{\link{distanceMatrix}}. #' @param least.cost.matrix numeric matrix or list of numeric matrices produced by \code{\link{leastCostMatrix}}. #' @param diagonal boolean, if \code{TRUE}, diagonals are included in the computation of the least cost path. Defaults to \code{FALSE}, as the original algorithm did not include diagonals in the computation of the least cost path. #' @param parallel.execution boolean, if \code{TRUE} (default), execution is parallelized, and serialized if \code{FALSE}. #' @return Alist of dataframes if \code{least.cost.matrix} is a list, or a dataframe if \code{least.cost.matrix} is a matrix. The dataframe/s have the following columns: #' \itemize{ #' \item \emph{A} row/sample of one of the sequences. #' \item \emph{B} row/sample of one the other sequence. #' \item \emph{distance} distance between both samples, extracted from \code{distance.matrix}. #' \item \emph{cumulative.distance} cumulative distance at the samples \code{A} and \code{B}. #' } #' @examples #' #' \dontrun{ #' #'#loading data #'data(sequenceA) #'data(sequenceB) #' #'#preparing datasets #'AB.sequences <- prepareSequences( #' sequence.A = sequenceA, #' sequence.A.name = "A", #' sequence.B = sequenceB, #' sequence.B.name = "B", #' merge.mode = "complete", #' if.empty.cases = "zero", #' transformation = "hellinger" #' ) #' #'#computing distance matrix #'AB.distance.matrix <- distanceMatrix( #' sequences = sequences, #' grouping.column = "id", #' method = "manhattan" #' ) #' #'#computing least cost matrix #'AB.least.cost.matrix <- leastCostMatrix( #' distance.matrix = AB.distance.matrix, #' diagonal = FALSE #' ) #' #'AB.least.cost.path <- leastCostPath( #' distance.matrix = AB.distance.matrix, #' least.cost.matrix = AB.least.cost.matrix #' ) #' #'#plot #'plotMatrix(distance.matrix = AB.distance.matrix, #' least.cost.path = AB.least.cost.path #' ) #' #'} #' #' @export leastCostPath <- function(distance.matrix = NULL, least.cost.matrix = NULL, diagonal = FALSE, parallel.execution = TRUE){ #if input is matrix, get it into list if(inherits(least.cost.matrix, "matrix") == TRUE | is.matrix(least.cost.matrix) == TRUE){ temp <- list() temp[[1]] <- least.cost.matrix least.cost.matrix <- temp names(least.cost.matrix) <- "" } #if input is matrix, get it into list if(inherits(distance.matrix, "matrix") == TRUE | is.matrix(distance.matrix) == TRUE){ temp <- list() temp[[1]] <- distance.matrix distance.matrix <- temp names(distance.matrix) <- "" } if(inherits(least.cost.matrix, "list") == TRUE){ n.iterations <- length(least.cost.matrix) } if(inherits(distance.matrix, "list") == TRUE){ m.elements <- length(distance.matrix) } if(n.iterations != m.elements){ stop("Arguments 'distance.matrix' and 'least.cost.matrix' don't have the same number of slots.") } if(n.iterations > 1){ if(sum(names(distance.matrix) %in% names(least.cost.matrix)) != n.iterations){ stop("Elements in arguments 'distance.matrix' and 'least.cost.matrix' don't have the same names.") } } #setting diagonal if it's empty if(is.null(diagonal)){diagonal <- FALSE} #parallel execution = TRUE if(parallel.execution == TRUE){ `%dopar%` <- foreach::`%dopar%` n.cores <- parallel::detectCores() - 1 if(n.iterations < n.cores){n.cores <- n.iterations} my.cluster <- parallel::makeCluster(n.cores, type="FORK") doParallel::registerDoParallel(my.cluster) #exporting cluster variables parallel::clusterExport(cl = my.cluster, varlist = c('least.cost.matrix', 'distance.matrix', 'diagonal'), envir = environment() ) } else { #replaces dopar (parallel) by do (serial) `%dopar%` <- foreach::`%do%` } #parallelized loop least.cost.paths <- foreach::foreach(i=1:n.iterations) %dopar% { #getting distance matrix least.cost.matrix.i <- least.cost.matrix[[i]] distance.matrix.i <- distance.matrix[[i]] if(sum(dim(least.cost.matrix.i) == dim(distance.matrix.i)) != 2){ stop(paste("The elements ", i, " of 'distance.matrix' and 'least.cost.matrix' don't hav the same dimensions.")) } #dataframe to store the path pairings <- data.frame(A = nrow(least.cost.matrix.i), B = ncol(least.cost.matrix.i), distance = distance.matrix.i[nrow(distance.matrix.i), ncol(distance.matrix.i)], cumulative.distance = least.cost.matrix.i[nrow(least.cost.matrix.i), ncol(least.cost.matrix.i)]) #defining coordinates of the focal cell focal.row <- pairings$A focal.column <- pairings$B #computation for no diaggonal if(diagonal == FALSE){ #going through the matrix repeat{ #defining values o focal row focal.cumulative.cost <- least.cost.matrix.i[focal.row, focal.column] focal.cost <- distance.matrix.i[focal.row, focal.column] #SCANNING NEIGHBORS neighbors <- data.frame(A = c(focal.row-1, focal.row), B = c(focal.column, focal.column-1)) #removing neighbors with coordinates lower than 1 (out of bounds) neighbors[neighbors<1] <- NA neighbors <- stats::na.omit(neighbors) if(nrow(neighbors) == 0){break} #computing cost and cumulative cost values for the neighbors if(nrow(neighbors) > 1){ neighbors$distance <- diag(distance.matrix.i[neighbors$A, neighbors$B]) neighbors$cumulative.distance <- diag(x = least.cost.matrix.i[neighbors$A, neighbors$B]) } else { neighbors$distance <- distance.matrix.i[neighbors$A, neighbors$B] neighbors$cumulative.distance <- least.cost.matrix.i[neighbors$A, neighbors$B] } #getting the neighbor with a minimum least.cost.matrix.i neighbors <- neighbors[which.min(neighbors$cumulative.distance), c("A", "B")] #temporal dataframe to rbind with pairings pairings.temp <- data.frame( A = neighbors$A, B = neighbors$B, distance = distance.matrix.i[neighbors$A, neighbors$B], cumulative.distance = least.cost.matrix.i[neighbors$A, neighbors$B] ) #putting them together pairings <- rbind(pairings, pairings.temp) #new focal cell focal.row <- pairings[nrow(pairings), "A"] focal.column <- pairings[nrow(pairings), "B"] }#end of repeat } #end of diagonal == FALSE #computation for diaggonal if(diagonal == TRUE){ #going through the matrix repeat{ #defining values o focal row focal.cumulative.cost <- least.cost.matrix.i[focal.row, focal.column] focal.cost <- distance.matrix.i[focal.row, focal.column] #SCANNING NEIGHBORS neighbors <- data.frame(A = c(focal.row-1, focal.row-1, focal.row), B=c(focal.column, focal.column-1, focal.column-1)) #removing neighbors with coordinates lower than 1 (out of bounds) neighbors[neighbors<1] <- NA neighbors <- stats::na.omit(neighbors) if(nrow(neighbors) == 0){break} #computing cost and cumulative cost values for the neighbors if(nrow(neighbors) > 1){ neighbors$distance <- diag(distance.matrix.i[neighbors$A, neighbors$B]) neighbors$cumulative.distance <- diag(x = least.cost.matrix.i[neighbors$A, neighbors$B]) }else{ neighbors$distance <- distance.matrix.i[neighbors$A, neighbors$B] neighbors$cumulative.distance <- least.cost.matrix.i[neighbors$A, neighbors$B] } #getting the neighbor with a minimum least.cost.matrix.i neighbors <- neighbors[which.min(neighbors$cumulative.distance), c("A", "B")] #temporal dataframe to rbind with pairings pairings.temp <- data.frame( A = neighbors$A, B = neighbors$B, distance = distance.matrix.i[neighbors$A, neighbors$B], cumulative.distance = least.cost.matrix.i[neighbors$A, neighbors$B] ) #putting them together pairings<-rbind(pairings, pairings.temp) #new focal cell focal.row <- pairings[nrow(pairings), "A"] focal.column <- pairings[nrow(pairings), "B"] }#end of repeat } #end of diagonal == TRUE #getting names of the sequences sequence.names = unlist(strsplit(names(distance.matrix)[i], split='|', fixed=TRUE)) #renaming pairings colnames(pairings)[1] <- sequence.names[1] colnames(pairings)[2] <- sequence.names[2] return(pairings) } #end of %dopar% #stopping cluster if(parallel.execution == TRUE){ parallel::stopCluster(my.cluster) } else { #creating the correct alias again `%dopar%` <- foreach::`%dopar%` } #list names names(least.cost.paths) <- names(least.cost.matrix) #return output return(least.cost.paths) } #end of function
/R/leastCostPath.R
no_license
xbenitogranell/distantia
R
false
false
9,886
r
#' Find the least cost path in a least cost matrix. #' #' @description Uses the original distance matrix created by \code{\link{distanceMatrix}} and the least cost path matrix created by \code{\link{leastCostMatrix}} to find the least cost path between the first and the last cells of the matrix. If \code{diagonal} was \code{TRUE} in \code{\link{leastCostMatrix}}, then it must be \code{TRUE} when using this function. Otherwise, the default is \code{FALSE} in both. #' #' @usage leastCostPath(distance.matrix = NULL, #' least.cost.matrix = NULL, #' diagonal = FALSE, #' parallel.execution = TRUE) #' #' @param distance.matrix numeric matrix or list of numeric matrices, a distance matrix produced by \code{\link{distanceMatrix}}. #' @param least.cost.matrix numeric matrix or list of numeric matrices produced by \code{\link{leastCostMatrix}}. #' @param diagonal boolean, if \code{TRUE}, diagonals are included in the computation of the least cost path. Defaults to \code{FALSE}, as the original algorithm did not include diagonals in the computation of the least cost path. #' @param parallel.execution boolean, if \code{TRUE} (default), execution is parallelized, and serialized if \code{FALSE}. #' @return Alist of dataframes if \code{least.cost.matrix} is a list, or a dataframe if \code{least.cost.matrix} is a matrix. The dataframe/s have the following columns: #' \itemize{ #' \item \emph{A} row/sample of one of the sequences. #' \item \emph{B} row/sample of one the other sequence. #' \item \emph{distance} distance between both samples, extracted from \code{distance.matrix}. #' \item \emph{cumulative.distance} cumulative distance at the samples \code{A} and \code{B}. #' } #' @examples #' #' \dontrun{ #' #'#loading data #'data(sequenceA) #'data(sequenceB) #' #'#preparing datasets #'AB.sequences <- prepareSequences( #' sequence.A = sequenceA, #' sequence.A.name = "A", #' sequence.B = sequenceB, #' sequence.B.name = "B", #' merge.mode = "complete", #' if.empty.cases = "zero", #' transformation = "hellinger" #' ) #' #'#computing distance matrix #'AB.distance.matrix <- distanceMatrix( #' sequences = sequences, #' grouping.column = "id", #' method = "manhattan" #' ) #' #'#computing least cost matrix #'AB.least.cost.matrix <- leastCostMatrix( #' distance.matrix = AB.distance.matrix, #' diagonal = FALSE #' ) #' #'AB.least.cost.path <- leastCostPath( #' distance.matrix = AB.distance.matrix, #' least.cost.matrix = AB.least.cost.matrix #' ) #' #'#plot #'plotMatrix(distance.matrix = AB.distance.matrix, #' least.cost.path = AB.least.cost.path #' ) #' #'} #' #' @export leastCostPath <- function(distance.matrix = NULL, least.cost.matrix = NULL, diagonal = FALSE, parallel.execution = TRUE){ #if input is matrix, get it into list if(inherits(least.cost.matrix, "matrix") == TRUE | is.matrix(least.cost.matrix) == TRUE){ temp <- list() temp[[1]] <- least.cost.matrix least.cost.matrix <- temp names(least.cost.matrix) <- "" } #if input is matrix, get it into list if(inherits(distance.matrix, "matrix") == TRUE | is.matrix(distance.matrix) == TRUE){ temp <- list() temp[[1]] <- distance.matrix distance.matrix <- temp names(distance.matrix) <- "" } if(inherits(least.cost.matrix, "list") == TRUE){ n.iterations <- length(least.cost.matrix) } if(inherits(distance.matrix, "list") == TRUE){ m.elements <- length(distance.matrix) } if(n.iterations != m.elements){ stop("Arguments 'distance.matrix' and 'least.cost.matrix' don't have the same number of slots.") } if(n.iterations > 1){ if(sum(names(distance.matrix) %in% names(least.cost.matrix)) != n.iterations){ stop("Elements in arguments 'distance.matrix' and 'least.cost.matrix' don't have the same names.") } } #setting diagonal if it's empty if(is.null(diagonal)){diagonal <- FALSE} #parallel execution = TRUE if(parallel.execution == TRUE){ `%dopar%` <- foreach::`%dopar%` n.cores <- parallel::detectCores() - 1 if(n.iterations < n.cores){n.cores <- n.iterations} my.cluster <- parallel::makeCluster(n.cores, type="FORK") doParallel::registerDoParallel(my.cluster) #exporting cluster variables parallel::clusterExport(cl = my.cluster, varlist = c('least.cost.matrix', 'distance.matrix', 'diagonal'), envir = environment() ) } else { #replaces dopar (parallel) by do (serial) `%dopar%` <- foreach::`%do%` } #parallelized loop least.cost.paths <- foreach::foreach(i=1:n.iterations) %dopar% { #getting distance matrix least.cost.matrix.i <- least.cost.matrix[[i]] distance.matrix.i <- distance.matrix[[i]] if(sum(dim(least.cost.matrix.i) == dim(distance.matrix.i)) != 2){ stop(paste("The elements ", i, " of 'distance.matrix' and 'least.cost.matrix' don't hav the same dimensions.")) } #dataframe to store the path pairings <- data.frame(A = nrow(least.cost.matrix.i), B = ncol(least.cost.matrix.i), distance = distance.matrix.i[nrow(distance.matrix.i), ncol(distance.matrix.i)], cumulative.distance = least.cost.matrix.i[nrow(least.cost.matrix.i), ncol(least.cost.matrix.i)]) #defining coordinates of the focal cell focal.row <- pairings$A focal.column <- pairings$B #computation for no diaggonal if(diagonal == FALSE){ #going through the matrix repeat{ #defining values o focal row focal.cumulative.cost <- least.cost.matrix.i[focal.row, focal.column] focal.cost <- distance.matrix.i[focal.row, focal.column] #SCANNING NEIGHBORS neighbors <- data.frame(A = c(focal.row-1, focal.row), B = c(focal.column, focal.column-1)) #removing neighbors with coordinates lower than 1 (out of bounds) neighbors[neighbors<1] <- NA neighbors <- stats::na.omit(neighbors) if(nrow(neighbors) == 0){break} #computing cost and cumulative cost values for the neighbors if(nrow(neighbors) > 1){ neighbors$distance <- diag(distance.matrix.i[neighbors$A, neighbors$B]) neighbors$cumulative.distance <- diag(x = least.cost.matrix.i[neighbors$A, neighbors$B]) } else { neighbors$distance <- distance.matrix.i[neighbors$A, neighbors$B] neighbors$cumulative.distance <- least.cost.matrix.i[neighbors$A, neighbors$B] } #getting the neighbor with a minimum least.cost.matrix.i neighbors <- neighbors[which.min(neighbors$cumulative.distance), c("A", "B")] #temporal dataframe to rbind with pairings pairings.temp <- data.frame( A = neighbors$A, B = neighbors$B, distance = distance.matrix.i[neighbors$A, neighbors$B], cumulative.distance = least.cost.matrix.i[neighbors$A, neighbors$B] ) #putting them together pairings <- rbind(pairings, pairings.temp) #new focal cell focal.row <- pairings[nrow(pairings), "A"] focal.column <- pairings[nrow(pairings), "B"] }#end of repeat } #end of diagonal == FALSE #computation for diaggonal if(diagonal == TRUE){ #going through the matrix repeat{ #defining values o focal row focal.cumulative.cost <- least.cost.matrix.i[focal.row, focal.column] focal.cost <- distance.matrix.i[focal.row, focal.column] #SCANNING NEIGHBORS neighbors <- data.frame(A = c(focal.row-1, focal.row-1, focal.row), B=c(focal.column, focal.column-1, focal.column-1)) #removing neighbors with coordinates lower than 1 (out of bounds) neighbors[neighbors<1] <- NA neighbors <- stats::na.omit(neighbors) if(nrow(neighbors) == 0){break} #computing cost and cumulative cost values for the neighbors if(nrow(neighbors) > 1){ neighbors$distance <- diag(distance.matrix.i[neighbors$A, neighbors$B]) neighbors$cumulative.distance <- diag(x = least.cost.matrix.i[neighbors$A, neighbors$B]) }else{ neighbors$distance <- distance.matrix.i[neighbors$A, neighbors$B] neighbors$cumulative.distance <- least.cost.matrix.i[neighbors$A, neighbors$B] } #getting the neighbor with a minimum least.cost.matrix.i neighbors <- neighbors[which.min(neighbors$cumulative.distance), c("A", "B")] #temporal dataframe to rbind with pairings pairings.temp <- data.frame( A = neighbors$A, B = neighbors$B, distance = distance.matrix.i[neighbors$A, neighbors$B], cumulative.distance = least.cost.matrix.i[neighbors$A, neighbors$B] ) #putting them together pairings<-rbind(pairings, pairings.temp) #new focal cell focal.row <- pairings[nrow(pairings), "A"] focal.column <- pairings[nrow(pairings), "B"] }#end of repeat } #end of diagonal == TRUE #getting names of the sequences sequence.names = unlist(strsplit(names(distance.matrix)[i], split='|', fixed=TRUE)) #renaming pairings colnames(pairings)[1] <- sequence.names[1] colnames(pairings)[2] <- sequence.names[2] return(pairings) } #end of %dopar% #stopping cluster if(parallel.execution == TRUE){ parallel::stopCluster(my.cluster) } else { #creating the correct alias again `%dopar%` <- foreach::`%dopar%` } #list names names(least.cost.paths) <- names(least.cost.matrix) #return output return(least.cost.paths) } #end of function
#' Format Months to One Letter Abbreviation #' #' Format long month name, integer, or date formats to a single capital letter. #' Useful for plot scales as a way to save space. #' #' @param x A vector of month names, integers 1-12, or dates. #' @param \ldots ignored. #' @return Returns a single letter month abbreviation atomic vector. #' @export #' @rdname f_month #' @examples #' f_month(month.name) #' #' f_month(1:12) #' #' dates <- seq(as.Date("2000/1/1"), by = "month", length.out = 12) #' f_month(dates) #' \dontrun{ #' if (!require("pacman")) install.packages("pacman") #' pacman::p_load(tidyverse) #' #' set.seed(10) #' dat <- data_frame( #' month = sample(month.name, 1000, TRUE), #' area = sample(LETTERS[1:5], 1000, TRUE) #' ) %>% #' count(month, area) %>% #' ungroup() %>% #' mutate(month = factor(month, levels = constant_months)) #' #' ## without date formatting #' ggplot(dat, aes(month, n)) + #' geom_bar(stat = 'identity') + #' facet_wrap(~ area) #' #' ## with date formatting #' ggplot(dat, aes(month, n)) + #' geom_bar(stat = 'identity') + #' facet_wrap(~ area) + #' scale_x_discrete(labels = f_month) #' } f_month <- function(x, ...) { UseMethod('f_month') } #' @export #' @rdname f_month #' @method f_month default f_month.default <- function(x, ...) { toupper(gsub("(^.)(.+)", "\\1", as.character(x))) } #' @export #' @rdname f_month #' @method f_month numeric f_month.numeric <- function(x, ...) { toupper(gsub("(^.)(.+)", "\\1", month.abb[x])) } #' @export #' @rdname f_month #' @method f_month Date f_month.Date <- function(x, ...) { toupper(gsub("(^.)(.+)", "\\1", as.character(format(x, "%b")))) } #' @export #' @rdname f_month #' @method f_month POSIXt f_month.POSIXt <- function(x, ...) { toupper(gsub("(^.)(.+)", "\\1", as.character(format(x, "%b")))) } #' @export #' @rdname f_month #' @method f_month hms f_month.hms <- function(x, ...) { f_month.POSIXt(as.POSIXct(x)) } #' @export #' @rdname f_month ff_month <- function(...) { function(x) {f_month(x)} }
/R/f_month.R
no_license
guhjy/numform
R
false
false
2,073
r
#' Format Months to One Letter Abbreviation #' #' Format long month name, integer, or date formats to a single capital letter. #' Useful for plot scales as a way to save space. #' #' @param x A vector of month names, integers 1-12, or dates. #' @param \ldots ignored. #' @return Returns a single letter month abbreviation atomic vector. #' @export #' @rdname f_month #' @examples #' f_month(month.name) #' #' f_month(1:12) #' #' dates <- seq(as.Date("2000/1/1"), by = "month", length.out = 12) #' f_month(dates) #' \dontrun{ #' if (!require("pacman")) install.packages("pacman") #' pacman::p_load(tidyverse) #' #' set.seed(10) #' dat <- data_frame( #' month = sample(month.name, 1000, TRUE), #' area = sample(LETTERS[1:5], 1000, TRUE) #' ) %>% #' count(month, area) %>% #' ungroup() %>% #' mutate(month = factor(month, levels = constant_months)) #' #' ## without date formatting #' ggplot(dat, aes(month, n)) + #' geom_bar(stat = 'identity') + #' facet_wrap(~ area) #' #' ## with date formatting #' ggplot(dat, aes(month, n)) + #' geom_bar(stat = 'identity') + #' facet_wrap(~ area) + #' scale_x_discrete(labels = f_month) #' } f_month <- function(x, ...) { UseMethod('f_month') } #' @export #' @rdname f_month #' @method f_month default f_month.default <- function(x, ...) { toupper(gsub("(^.)(.+)", "\\1", as.character(x))) } #' @export #' @rdname f_month #' @method f_month numeric f_month.numeric <- function(x, ...) { toupper(gsub("(^.)(.+)", "\\1", month.abb[x])) } #' @export #' @rdname f_month #' @method f_month Date f_month.Date <- function(x, ...) { toupper(gsub("(^.)(.+)", "\\1", as.character(format(x, "%b")))) } #' @export #' @rdname f_month #' @method f_month POSIXt f_month.POSIXt <- function(x, ...) { toupper(gsub("(^.)(.+)", "\\1", as.character(format(x, "%b")))) } #' @export #' @rdname f_month #' @method f_month hms f_month.hms <- function(x, ...) { f_month.POSIXt(as.POSIXct(x)) } #' @export #' @rdname f_month ff_month <- function(...) { function(x) {f_month(x)} }
library(shiny) # Define UI for miles per gallon app ---- ui <- pageWithSidebar( # App title ---- headerPanel("2018-19 CDC vs twitter Maps"), # Sidebar panel for inputs ---- sidebarPanel(selectInput("variable", "Charts option:", c("CDC" = "cdc", "NY yearly" = "nyyearly", "PostivelyTested" = "PostivelyTested", "Mortality" = "mortality", "ILI" = "ili", "CDC HeatMap" = "cdc_heatmap", "Tweets Flu heatmap" = "tweet_flu_heatmap", "Tweet Influenza heatmap" = "tweet_influenza_heatmap", "Tweet Influenza Flu heatmap" = "tweet_influenza_flu_heatmap", "CDC vs Flu Tweets" = "cdc_vs_flutweet", "CDC vs Influenza Tweets" = "cdc_vs_influtweet", "CDC vs Flu and Influenza Tweets" = "cdc_vs_influ_flutweet")) ), # Main panel for displaying outputs ---- mainPanel(uiOutput("mapPlot")) ) # Define server logic to plot various variables against mpg ---- # Data pre-processing ---- # Tweak the "am" variable to have nicer factor labels -- since this # doesn't rely on any user inputs, we can do this once at startup # and then use the value throughout the lifetime of the app # Define server logic to plot various variables against mpg ---- server <- function(input, output) { output$mapPlot <- renderUI({ if(input$variable =="cdc") { img(src= 'InfluSummary01.png') } else if(input$variable =="nyyearly") { img(src= 'NY_52weeks.png') } else if(input$variable =="PostivelyTested") { img(src= 'Rplot_part2_graph2.png') } else if(input$variable =="mortality") { img(src= 'Mortality.png') } else if(input$variable =="ili") { img(src= 'ILI.png') } else if(input$variable =="cdc_heatmap") { img(src= 'Rplot_HeatMap.png') } else if(input$variable =="tweet_flu_heatmap") { img(src= 'FluHeatMap.png') } else if(input$variable =="tweet_influenza_heatmap") { img(src= 'InfluenzaHeatMap.png') } else if(input$variable =="tweet_influenza_flu_heatmap") { img(src= 'CombinedHeatMap.png') } else if(input$variable =="cdc_vs_flutweet") { img(src= 'cdc_vs_flu.png', width=500, height=850) } else if(input$variable =="cdc_vs_influtweet") { img(src= 'cdc_vs_influenza.png', width=500, height=850) } else if(input$variable =="cdc_vs_influ_flutweet") { img(src= 'cdc_vs_combined.png', width=500, height=850) } }) } shinyApp(ui, server)
/Part2/codes/shinyapp/Rshinyapp.R
no_license
anupriya-goyal/Exploratory-Data-Analysis
R
false
false
2,942
r
library(shiny) # Define UI for miles per gallon app ---- ui <- pageWithSidebar( # App title ---- headerPanel("2018-19 CDC vs twitter Maps"), # Sidebar panel for inputs ---- sidebarPanel(selectInput("variable", "Charts option:", c("CDC" = "cdc", "NY yearly" = "nyyearly", "PostivelyTested" = "PostivelyTested", "Mortality" = "mortality", "ILI" = "ili", "CDC HeatMap" = "cdc_heatmap", "Tweets Flu heatmap" = "tweet_flu_heatmap", "Tweet Influenza heatmap" = "tweet_influenza_heatmap", "Tweet Influenza Flu heatmap" = "tweet_influenza_flu_heatmap", "CDC vs Flu Tweets" = "cdc_vs_flutweet", "CDC vs Influenza Tweets" = "cdc_vs_influtweet", "CDC vs Flu and Influenza Tweets" = "cdc_vs_influ_flutweet")) ), # Main panel for displaying outputs ---- mainPanel(uiOutput("mapPlot")) ) # Define server logic to plot various variables against mpg ---- # Data pre-processing ---- # Tweak the "am" variable to have nicer factor labels -- since this # doesn't rely on any user inputs, we can do this once at startup # and then use the value throughout the lifetime of the app # Define server logic to plot various variables against mpg ---- server <- function(input, output) { output$mapPlot <- renderUI({ if(input$variable =="cdc") { img(src= 'InfluSummary01.png') } else if(input$variable =="nyyearly") { img(src= 'NY_52weeks.png') } else if(input$variable =="PostivelyTested") { img(src= 'Rplot_part2_graph2.png') } else if(input$variable =="mortality") { img(src= 'Mortality.png') } else if(input$variable =="ili") { img(src= 'ILI.png') } else if(input$variable =="cdc_heatmap") { img(src= 'Rplot_HeatMap.png') } else if(input$variable =="tweet_flu_heatmap") { img(src= 'FluHeatMap.png') } else if(input$variable =="tweet_influenza_heatmap") { img(src= 'InfluenzaHeatMap.png') } else if(input$variable =="tweet_influenza_flu_heatmap") { img(src= 'CombinedHeatMap.png') } else if(input$variable =="cdc_vs_flutweet") { img(src= 'cdc_vs_flu.png', width=500, height=850) } else if(input$variable =="cdc_vs_influtweet") { img(src= 'cdc_vs_influenza.png', width=500, height=850) } else if(input$variable =="cdc_vs_influ_flutweet") { img(src= 'cdc_vs_combined.png', width=500, height=850) } }) } shinyApp(ui, server)
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/mplot3.xy.R \name{mplot3.xy} \alias{mplot3.xy} \title{\code{mplot3}: XY Scatter and line plots} \usage{ mplot3.xy( x, y, fit = NULL, formula = NULL, se.fit = FALSE, mod.params = NULL, error.x = NULL, error.y = NULL, cluster = NULL, cluster.params = list(), data = NULL, type = NULL, group = NULL, xlab = NULL, ylab = NULL, main = NULL, main.line = 0.5, main.adj = 0, main.col = NULL, main.font = 2, main.family = "", cex = 1.2, xlim = NULL, ylim = NULL, xpd = TRUE, xaxs = "r", yaxs = "r", rsq = NULL, rsq.pval = FALSE, rsq.side = 1, rsq.adj = 0.98, rsq.col = NULL, fit.error = FALSE, fit.error.side = 1, fit.error.padj = NA, fit.error.col = NULL, xaxp = NULL, yaxp = NULL, scatter = TRUE, axes = TRUE, axes.equal = FALSE, axes.col = NULL, pty = "m", box = NULL, bty = "o", box.col = NULL, box.alpha = 1, box.lty = 1, box.lwd = 1.5, grid = FALSE, grid.nx = NULL, grid.ny = NULL, grid.col = NULL, grid.alpha = 1, grid.lty = 1, grid.lwd = 1.5, bg = NULL, plot.bg = NULL, annotation = NULL, annotation.col = NULL, tick.col = NULL, tck = 0.015, x.axis.side = 1, y.axis.side = 2, x.axis.at = NULL, y.axis.at = NULL, x.axis.labs = TRUE, y.axis.labs = TRUE, x.axis.padj = -1.1, xlab.line = 1.3, y.axis.padj = 0.9, ylab.line = 1.6, xlab.adj = 0.5, ylab.adj = 0.5, mar = c(2.5, 2.5, 1.5, 1), col = NULL, pch = ifelse(is.null(point.bg.col), 16, 21), point.cex = 0.85, point.col = NULL, point.bg.col = NULL, point.alpha = 0.66, line.col = NULL, line.alpha = 0.66, lty = 1, lwd = 2, marker.col = NULL, marker.alpha = 0.66, error.x.col = NULL, error.y.col = NULL, error.x.lty = 1, error.y.lty = 1, error.x.lwd = 1, error.y.lwd = 1, error.arrow.code = 3, fit.col = NULL, fit.lwd = 2.5, fit.alpha = 0.66, fit.legend = ifelse(is.null(fit), FALSE, TRUE), se.lty = "poly", se.lwd = 1, se.col = NULL, se.alpha = 0.5, se.times = 2, se.border = FALSE, se.density = NULL, xy.fit = NULL, xy.fit.col = NULL, firstpc = FALSE, firstpc.lty = 3, firstpc.col = NULL, hline = NULL, hline.col = NULL, hline.lwd = 1.5, hline.lty = 3, vline = NULL, vline.lwd = 1.5, vline.col = "blue", vline.lty = 3, diagonal = FALSE, diagonal.inv = FALSE, diagonal.lwd = 1.5, diagonal.lty = 1, diagonal.col = NULL, diagonal.alpha = 0.5, group.legend = NULL, group.title = NULL, group.names = NULL, group.side = 3, group.adj = 0.02, group.padj = 2, group.at = NA, fit.legend.col = NULL, fit.legend.side = 3, fit.legend.adj = 0.02, fit.legend.padj = 2, fit.legend.at = NA, labs.col = NULL, na.rm = TRUE, theme = getOption("rt.theme", "lightgrid"), palette = getOption("rt.palette", "rtCol1"), zero.lines = NULL, zero.col = NULL, zero.alpha = 1, zero.lty = 1, zero.lwd = 1.5, order.on.x = NULL, alpha.off = FALSE, autolabel = letters, new = FALSE, set.par = TRUE, par.reset = TRUE, return.lims = FALSE, pdf.width = 6, pdf.height = 6, trace = 0, filename = NULL, ... ) } \arguments{ \item{x}{Numeric vector or list of vectors for x-axis. If \code{data} is provided, name of variable, unquoted.} \item{y}{Numeric vector of list of vectors for y-axis If \code{data} is provided, name of variable, unquoted.} \item{fit}{Character: \pkg{rtemis} model to calculate y ~ x fit. Options: see \code{modSelect} Can also be Logical, which will give a GAM fit if TRUE. If you specify "NLA", the activation function should be passed as a string.} \item{formula}{Formula: Provide a formula to be solved using \link{s.NLS}. If provided, \code{fit} is forced to \code{'nls'}. e.g. y ~ b * m ^ x for a power curve. Note: \code{nls} is prone to errors and warnings, but is powerful. Use single letters for parameter names, no numbers.} \item{se.fit}{Logical: If TRUE, draw the standard error of the fit} \item{mod.params}{List: Arguments for learner defined by \code{fit}. Default = NULL, i.e. use default learner parameters} \item{error.x}{Vector, float: Error in \code{x} (e.g. standard deviation) will be plotted as bars around point} \item{error.y}{Vector, float: Error in \code{y} (e.g. standard deviation) will be plotted as bars around point} \item{cluster}{Character: Clusterer name. Will cluster \code{data.frame(x, y)} and pass result to \code{group}. Run \link{clustSelect} for options} \item{cluster.params}{List: Names list of parameters to pass to the \code{cluster} function} \item{data}{(Optional) data frame, where \code{x} and \code{y} are defined} \item{type}{Character: "p" for points, "l" for lines, "s" for steps. Default = "p". If \code{x} and/or \code{y} contains multiple vectors, \code{type} can be a vector, e.g. \code{c("p", "l", "l")} will give a set of points and two sets of lines. Otherwise, \code{type} is recycled to length of x} \item{group}{Vector: will be converted to factor. If \code{data} is provided, name of variable, unquoted.} \item{xlab}{Character: x-axis label} \item{ylab}{Character: y-axis label} \item{main}{Character: Plot title} \item{main.line}{Float: \code{mtext line} argument for the title. Higher numbers move title upwards. Default = .5} \item{main.adj}{Float: Plot title alignment (0 = left, .5 = center, 1 = right)} \item{main.col}{Color for title. Defaults to black for theme "light" and "box", otherwise white} \item{main.font}{Integer: 1: regular, 2: bold, 3: italic. See \code{par("font")} for more} \item{main.family}{Character: Font family to use. See \code{par("family")}} \item{cex}{Float: Character expansion factor (See \code{?par})} \item{xlim}{Float vector, length 2: x-axis limits} \item{ylim}{Float vector, length 2: y-axis limits} \item{xpd}{Logical or NA: FALSE: plotting clipped to plot region; TRUE: plotting clipped to figure region; NA: plotting clipped to device region. Default = TRUE} \item{xaxs}{Character: "r": Extend plot x-axis limits by 4% on either end; "i": Use exact x-axis limits. Default = "r"} \item{yaxs}{Character: as \code{xaxs} for the y-axis. Default = "r"} \item{rsq}{Logical: If TRUE, add legend with R-squared (if fit is not NULL)} \item{rsq.pval}{Logical: If TRUE, add legend with R-squared and its p-value (if fit is not NULL)} \item{rsq.side}{Integer: [1:4] Where to place the \code{rsq} annotation. Default = 1 (i.e. bottom)} \item{rsq.adj}{Float: Adjust \code{rsq} annotation. See \code{mtext "adj"}} \item{rsq.col}{Color: Color for \code{rsq} annotation. Default = NULL, which results in \code{fit.col}} \item{fit.error}{Logical: If TRUE: draw fit error annotation. Default = NULL, which results in TRUE, if fit is set} \item{fit.error.side}{Integer [1:4]: Which side to draw \code{fit.error} on. Default = 1} \item{fit.error.padj}{Float: See \code{mtext:padg} Default = NA} \item{fit.error.col}{Color: Color for \code{fit.error} annotation. Default = NULL, which results in a theme-appropriate gray} \item{xaxp}{See \code{par("xaxp")}} \item{yaxp}{See \code{par("yaxp")}} \item{scatter}{Logical: If TRUE, plot (x, y) scatter points. Default = TRUE} \item{axes}{Logical: Should the axes be drawn? Defaults to TRUE} \item{axes.equal}{Logical: Should axes be equal? Defaults to FALSE} \item{axes.col}{Character: Color for axes values (box color set with \code{box.col})} \item{pty}{Character: "s" gives a square plot; "m" gives a plot that fills graphics device size. Default = "m" (See \code{par("pty")})} \item{box}{Logical: If TRUE, draw a box around the plot. Default = TRUE for themes "box" and "darkbox"} \item{bty}{Character: "o", "l", "7", "c", "u", or "]" result in a box resembling each character. (See \code{par("bty")})} \item{box.col}{Color: Box color} \item{box.alpha}{Float: Alpha for \code{box.col}} \item{box.lty}{Integer: Box line type} \item{box.lwd}{Float: Box line width. Default = 1.5} \item{grid}{Logical: If TRUE, draw grid} \item{grid.nx}{Integer: N of grid lines on the x axis. Use with caution, probably along with \code{xaxs = "i"}} \item{grid.ny}{Integer: N of grid lines on the y axis Use with caution, probably along with \code{yaxs = "i"}} \item{grid.col}{Color: Grid color} \item{grid.alpha}{Float: Alpha for \code{grid.col}} \item{grid.lty}{Integer: Grid line type (See \code{par("lty")})} \item{grid.lwd}{Float: Grid line width} \item{bg}{Color: Background color. Defaults to white for themes "light" and "box", black otherwise.} \item{plot.bg}{Color: Background color. Default = NULL, which results in theme-appropriate color} \item{annotation}{Character: Add annotation at the bottom right of the plot} \item{annotation.col}{Color for annotation} \item{tick.col}{Color: Tick color. Default = NULL, which results in theme-appropriate color} \item{tck}{Float: Tick length. Can be negative (See \code{par("tck")})} \item{x.axis.side}{Integer {1, 3}: Side to place x-axis. Default = 1} \item{y.axis.side}{Integer {2, 4}: Side to place y-axis. Default = 2} \item{x.axis.at}{Float, vector: x coordinates to place tick marks. Default = NULL, determined by \code{graphics::axis} aautomatically} \item{y.axis.at}{As \code{x.axis.at} for y-axis} \item{x.axis.labs}{See \code{axis("labels")}} \item{y.axis.labs}{See \code{axis("labels")}} \item{x.axis.padj}{Float: Adjustment for the x axis tick labels position} \item{xlab.line}{Float: Adjustment for the x axis label position (See code{line} in \code{?mtext})} \item{y.axis.padj}{Float: Similar to \code{x.axis.padj} for the y axis} \item{ylab.line}{Float: Similar to \code{xlab.line} for the y axis} \item{xlab.adj}{Float: \code{adj} for \code{xlab} (See \code{par("adj")})} \item{ylab.adj}{Float: \code{adj} for \code{ylab} (See \code{par("adj")})} \item{mar}{Float, vector, length 4: Margins; see \code{par("mar")}} \item{fit.col}{Color: Color of the fit line.} \item{fit.lwd}{Float: Fit line width} \item{se.lty}{How to draw the \code{se.fit} "poly" draws a polygon around the fit line, otherwise an integer defines the lty (line type) for lines to be drawn} \item{se.col}{Color for \code{se.fit}} \item{se.alpha}{Alpha for \code{se.fit}} \item{se.times}{Draw polygon or lines at +/- \code{se.times} * standard error of fit. Defaults to 2 (1.96 * standard error corresponds to 95\% confidence interval)} \item{se.border}{Define border of polygon for \code{se.fit}. See \code{border} in \code{graphics::polygon}} \item{se.density}{Density of shading line of polygon for \code{se.fit}. See \code{density} in \code{graphics::polygon}} \item{diagonal}{Logical: If TRUE, draw diagonal line. Default = FALSE} \item{diagonal.lwd}{Float: Line width for \code{diagonal}. Default = 1.5} \item{diagonal.lty}{Integer: Line type for \code{diagonal}. Default = 1} \item{diagonal.col}{Color: Color for \code{diagonal}. Defaults to "white" for dark themes, and "black" for light themes} \item{diagonal.alpha}{Float: Alpha for \code{diagonal} Default = .5} \item{group.legend}{Logical: If TRUE, place \code{group.names} in a legend} \item{group.title}{Character: Group title, shown above group names. e.g. if group names are c("San Francisco", "Philadelphia"), \code{group.title} can be "Place of residence"} \item{group.names}{(Optional) If multiple groups are plotted, use these names if \code{group.title = TRUE}} \item{theme}{Character: "light", "dark", "lightgrid", "darkgrid", "lightbox", "darkbox" Default = "lightgrid" if no default \code{"rt.fit"} is set using \code{options}. You can set a system-wide default in your \code{.Rprofile} by including a line like options(rt.theme = 'lightgrid')} \item{palette}{Vector of colors, or Character defining a builtin palette - get all options with \code{rtPalette()}} \item{zero.lines}{Logical: If TRUE, draw lines at x = 0 and y = 0} \item{zero.col}{Color for \code{zero.lines}} \item{zero.alpha}{Color alpha for \code{zero.lines}} \item{zero.lty}{Integer: Zero line line type} \item{zero.lwd}{Float: Zero line width} \item{order.on.x}{Logical: If TRUE, order (x, y) by increasing x. Default = NULL: will be set to TRUE if fit is set, otherwise FALSE} \item{new}{Logical: If TRUE, add plot to existing plot. See \code{par("new")}} \item{par.reset}{Logical: If TRUE, reset \code{par} setting before exiting. Default = TRUE} \item{return.lims}{Logical: If TRUE, return xlim and ylim. Default = FALSE} \item{pdf.width}{Float: Width in inches for pdf output (if \code{filename} is set). Default = 6} \item{pdf.height}{Float: Height in inches for pdf output. Default = 6} \item{trace}{Integer: If > 0, pass \code{verbose = TRUE} to the cluster and fit functions, if used. Default = 0} \item{filename}{Character: Path to file to save plot. Default = NULL} \item{...}{Additional arguments to be passed to learner function} } \description{ Plot points and lines with optional fits and standard error bands } \details{ This is relatively old code and may need a little cleaning up } \author{ Efstathios D. Gennatas }
/man/mplot3.xy.Rd
no_license
anhnguyendepocen/rtemis
R
false
true
13,058
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/mplot3.xy.R \name{mplot3.xy} \alias{mplot3.xy} \title{\code{mplot3}: XY Scatter and line plots} \usage{ mplot3.xy( x, y, fit = NULL, formula = NULL, se.fit = FALSE, mod.params = NULL, error.x = NULL, error.y = NULL, cluster = NULL, cluster.params = list(), data = NULL, type = NULL, group = NULL, xlab = NULL, ylab = NULL, main = NULL, main.line = 0.5, main.adj = 0, main.col = NULL, main.font = 2, main.family = "", cex = 1.2, xlim = NULL, ylim = NULL, xpd = TRUE, xaxs = "r", yaxs = "r", rsq = NULL, rsq.pval = FALSE, rsq.side = 1, rsq.adj = 0.98, rsq.col = NULL, fit.error = FALSE, fit.error.side = 1, fit.error.padj = NA, fit.error.col = NULL, xaxp = NULL, yaxp = NULL, scatter = TRUE, axes = TRUE, axes.equal = FALSE, axes.col = NULL, pty = "m", box = NULL, bty = "o", box.col = NULL, box.alpha = 1, box.lty = 1, box.lwd = 1.5, grid = FALSE, grid.nx = NULL, grid.ny = NULL, grid.col = NULL, grid.alpha = 1, grid.lty = 1, grid.lwd = 1.5, bg = NULL, plot.bg = NULL, annotation = NULL, annotation.col = NULL, tick.col = NULL, tck = 0.015, x.axis.side = 1, y.axis.side = 2, x.axis.at = NULL, y.axis.at = NULL, x.axis.labs = TRUE, y.axis.labs = TRUE, x.axis.padj = -1.1, xlab.line = 1.3, y.axis.padj = 0.9, ylab.line = 1.6, xlab.adj = 0.5, ylab.adj = 0.5, mar = c(2.5, 2.5, 1.5, 1), col = NULL, pch = ifelse(is.null(point.bg.col), 16, 21), point.cex = 0.85, point.col = NULL, point.bg.col = NULL, point.alpha = 0.66, line.col = NULL, line.alpha = 0.66, lty = 1, lwd = 2, marker.col = NULL, marker.alpha = 0.66, error.x.col = NULL, error.y.col = NULL, error.x.lty = 1, error.y.lty = 1, error.x.lwd = 1, error.y.lwd = 1, error.arrow.code = 3, fit.col = NULL, fit.lwd = 2.5, fit.alpha = 0.66, fit.legend = ifelse(is.null(fit), FALSE, TRUE), se.lty = "poly", se.lwd = 1, se.col = NULL, se.alpha = 0.5, se.times = 2, se.border = FALSE, se.density = NULL, xy.fit = NULL, xy.fit.col = NULL, firstpc = FALSE, firstpc.lty = 3, firstpc.col = NULL, hline = NULL, hline.col = NULL, hline.lwd = 1.5, hline.lty = 3, vline = NULL, vline.lwd = 1.5, vline.col = "blue", vline.lty = 3, diagonal = FALSE, diagonal.inv = FALSE, diagonal.lwd = 1.5, diagonal.lty = 1, diagonal.col = NULL, diagonal.alpha = 0.5, group.legend = NULL, group.title = NULL, group.names = NULL, group.side = 3, group.adj = 0.02, group.padj = 2, group.at = NA, fit.legend.col = NULL, fit.legend.side = 3, fit.legend.adj = 0.02, fit.legend.padj = 2, fit.legend.at = NA, labs.col = NULL, na.rm = TRUE, theme = getOption("rt.theme", "lightgrid"), palette = getOption("rt.palette", "rtCol1"), zero.lines = NULL, zero.col = NULL, zero.alpha = 1, zero.lty = 1, zero.lwd = 1.5, order.on.x = NULL, alpha.off = FALSE, autolabel = letters, new = FALSE, set.par = TRUE, par.reset = TRUE, return.lims = FALSE, pdf.width = 6, pdf.height = 6, trace = 0, filename = NULL, ... ) } \arguments{ \item{x}{Numeric vector or list of vectors for x-axis. If \code{data} is provided, name of variable, unquoted.} \item{y}{Numeric vector of list of vectors for y-axis If \code{data} is provided, name of variable, unquoted.} \item{fit}{Character: \pkg{rtemis} model to calculate y ~ x fit. Options: see \code{modSelect} Can also be Logical, which will give a GAM fit if TRUE. If you specify "NLA", the activation function should be passed as a string.} \item{formula}{Formula: Provide a formula to be solved using \link{s.NLS}. If provided, \code{fit} is forced to \code{'nls'}. e.g. y ~ b * m ^ x for a power curve. Note: \code{nls} is prone to errors and warnings, but is powerful. Use single letters for parameter names, no numbers.} \item{se.fit}{Logical: If TRUE, draw the standard error of the fit} \item{mod.params}{List: Arguments for learner defined by \code{fit}. Default = NULL, i.e. use default learner parameters} \item{error.x}{Vector, float: Error in \code{x} (e.g. standard deviation) will be plotted as bars around point} \item{error.y}{Vector, float: Error in \code{y} (e.g. standard deviation) will be plotted as bars around point} \item{cluster}{Character: Clusterer name. Will cluster \code{data.frame(x, y)} and pass result to \code{group}. Run \link{clustSelect} for options} \item{cluster.params}{List: Names list of parameters to pass to the \code{cluster} function} \item{data}{(Optional) data frame, where \code{x} and \code{y} are defined} \item{type}{Character: "p" for points, "l" for lines, "s" for steps. Default = "p". If \code{x} and/or \code{y} contains multiple vectors, \code{type} can be a vector, e.g. \code{c("p", "l", "l")} will give a set of points and two sets of lines. Otherwise, \code{type} is recycled to length of x} \item{group}{Vector: will be converted to factor. If \code{data} is provided, name of variable, unquoted.} \item{xlab}{Character: x-axis label} \item{ylab}{Character: y-axis label} \item{main}{Character: Plot title} \item{main.line}{Float: \code{mtext line} argument for the title. Higher numbers move title upwards. Default = .5} \item{main.adj}{Float: Plot title alignment (0 = left, .5 = center, 1 = right)} \item{main.col}{Color for title. Defaults to black for theme "light" and "box", otherwise white} \item{main.font}{Integer: 1: regular, 2: bold, 3: italic. See \code{par("font")} for more} \item{main.family}{Character: Font family to use. See \code{par("family")}} \item{cex}{Float: Character expansion factor (See \code{?par})} \item{xlim}{Float vector, length 2: x-axis limits} \item{ylim}{Float vector, length 2: y-axis limits} \item{xpd}{Logical or NA: FALSE: plotting clipped to plot region; TRUE: plotting clipped to figure region; NA: plotting clipped to device region. Default = TRUE} \item{xaxs}{Character: "r": Extend plot x-axis limits by 4% on either end; "i": Use exact x-axis limits. Default = "r"} \item{yaxs}{Character: as \code{xaxs} for the y-axis. Default = "r"} \item{rsq}{Logical: If TRUE, add legend with R-squared (if fit is not NULL)} \item{rsq.pval}{Logical: If TRUE, add legend with R-squared and its p-value (if fit is not NULL)} \item{rsq.side}{Integer: [1:4] Where to place the \code{rsq} annotation. Default = 1 (i.e. bottom)} \item{rsq.adj}{Float: Adjust \code{rsq} annotation. See \code{mtext "adj"}} \item{rsq.col}{Color: Color for \code{rsq} annotation. Default = NULL, which results in \code{fit.col}} \item{fit.error}{Logical: If TRUE: draw fit error annotation. Default = NULL, which results in TRUE, if fit is set} \item{fit.error.side}{Integer [1:4]: Which side to draw \code{fit.error} on. Default = 1} \item{fit.error.padj}{Float: See \code{mtext:padg} Default = NA} \item{fit.error.col}{Color: Color for \code{fit.error} annotation. Default = NULL, which results in a theme-appropriate gray} \item{xaxp}{See \code{par("xaxp")}} \item{yaxp}{See \code{par("yaxp")}} \item{scatter}{Logical: If TRUE, plot (x, y) scatter points. Default = TRUE} \item{axes}{Logical: Should the axes be drawn? Defaults to TRUE} \item{axes.equal}{Logical: Should axes be equal? Defaults to FALSE} \item{axes.col}{Character: Color for axes values (box color set with \code{box.col})} \item{pty}{Character: "s" gives a square plot; "m" gives a plot that fills graphics device size. Default = "m" (See \code{par("pty")})} \item{box}{Logical: If TRUE, draw a box around the plot. Default = TRUE for themes "box" and "darkbox"} \item{bty}{Character: "o", "l", "7", "c", "u", or "]" result in a box resembling each character. (See \code{par("bty")})} \item{box.col}{Color: Box color} \item{box.alpha}{Float: Alpha for \code{box.col}} \item{box.lty}{Integer: Box line type} \item{box.lwd}{Float: Box line width. Default = 1.5} \item{grid}{Logical: If TRUE, draw grid} \item{grid.nx}{Integer: N of grid lines on the x axis. Use with caution, probably along with \code{xaxs = "i"}} \item{grid.ny}{Integer: N of grid lines on the y axis Use with caution, probably along with \code{yaxs = "i"}} \item{grid.col}{Color: Grid color} \item{grid.alpha}{Float: Alpha for \code{grid.col}} \item{grid.lty}{Integer: Grid line type (See \code{par("lty")})} \item{grid.lwd}{Float: Grid line width} \item{bg}{Color: Background color. Defaults to white for themes "light" and "box", black otherwise.} \item{plot.bg}{Color: Background color. Default = NULL, which results in theme-appropriate color} \item{annotation}{Character: Add annotation at the bottom right of the plot} \item{annotation.col}{Color for annotation} \item{tick.col}{Color: Tick color. Default = NULL, which results in theme-appropriate color} \item{tck}{Float: Tick length. Can be negative (See \code{par("tck")})} \item{x.axis.side}{Integer {1, 3}: Side to place x-axis. Default = 1} \item{y.axis.side}{Integer {2, 4}: Side to place y-axis. Default = 2} \item{x.axis.at}{Float, vector: x coordinates to place tick marks. Default = NULL, determined by \code{graphics::axis} aautomatically} \item{y.axis.at}{As \code{x.axis.at} for y-axis} \item{x.axis.labs}{See \code{axis("labels")}} \item{y.axis.labs}{See \code{axis("labels")}} \item{x.axis.padj}{Float: Adjustment for the x axis tick labels position} \item{xlab.line}{Float: Adjustment for the x axis label position (See code{line} in \code{?mtext})} \item{y.axis.padj}{Float: Similar to \code{x.axis.padj} for the y axis} \item{ylab.line}{Float: Similar to \code{xlab.line} for the y axis} \item{xlab.adj}{Float: \code{adj} for \code{xlab} (See \code{par("adj")})} \item{ylab.adj}{Float: \code{adj} for \code{ylab} (See \code{par("adj")})} \item{mar}{Float, vector, length 4: Margins; see \code{par("mar")}} \item{fit.col}{Color: Color of the fit line.} \item{fit.lwd}{Float: Fit line width} \item{se.lty}{How to draw the \code{se.fit} "poly" draws a polygon around the fit line, otherwise an integer defines the lty (line type) for lines to be drawn} \item{se.col}{Color for \code{se.fit}} \item{se.alpha}{Alpha for \code{se.fit}} \item{se.times}{Draw polygon or lines at +/- \code{se.times} * standard error of fit. Defaults to 2 (1.96 * standard error corresponds to 95\% confidence interval)} \item{se.border}{Define border of polygon for \code{se.fit}. See \code{border} in \code{graphics::polygon}} \item{se.density}{Density of shading line of polygon for \code{se.fit}. See \code{density} in \code{graphics::polygon}} \item{diagonal}{Logical: If TRUE, draw diagonal line. Default = FALSE} \item{diagonal.lwd}{Float: Line width for \code{diagonal}. Default = 1.5} \item{diagonal.lty}{Integer: Line type for \code{diagonal}. Default = 1} \item{diagonal.col}{Color: Color for \code{diagonal}. Defaults to "white" for dark themes, and "black" for light themes} \item{diagonal.alpha}{Float: Alpha for \code{diagonal} Default = .5} \item{group.legend}{Logical: If TRUE, place \code{group.names} in a legend} \item{group.title}{Character: Group title, shown above group names. e.g. if group names are c("San Francisco", "Philadelphia"), \code{group.title} can be "Place of residence"} \item{group.names}{(Optional) If multiple groups are plotted, use these names if \code{group.title = TRUE}} \item{theme}{Character: "light", "dark", "lightgrid", "darkgrid", "lightbox", "darkbox" Default = "lightgrid" if no default \code{"rt.fit"} is set using \code{options}. You can set a system-wide default in your \code{.Rprofile} by including a line like options(rt.theme = 'lightgrid')} \item{palette}{Vector of colors, or Character defining a builtin palette - get all options with \code{rtPalette()}} \item{zero.lines}{Logical: If TRUE, draw lines at x = 0 and y = 0} \item{zero.col}{Color for \code{zero.lines}} \item{zero.alpha}{Color alpha for \code{zero.lines}} \item{zero.lty}{Integer: Zero line line type} \item{zero.lwd}{Float: Zero line width} \item{order.on.x}{Logical: If TRUE, order (x, y) by increasing x. Default = NULL: will be set to TRUE if fit is set, otherwise FALSE} \item{new}{Logical: If TRUE, add plot to existing plot. See \code{par("new")}} \item{par.reset}{Logical: If TRUE, reset \code{par} setting before exiting. Default = TRUE} \item{return.lims}{Logical: If TRUE, return xlim and ylim. Default = FALSE} \item{pdf.width}{Float: Width in inches for pdf output (if \code{filename} is set). Default = 6} \item{pdf.height}{Float: Height in inches for pdf output. Default = 6} \item{trace}{Integer: If > 0, pass \code{verbose = TRUE} to the cluster and fit functions, if used. Default = 0} \item{filename}{Character: Path to file to save plot. Default = NULL} \item{...}{Additional arguments to be passed to learner function} } \description{ Plot points and lines with optional fits and standard error bands } \details{ This is relatively old code and may need a little cleaning up } \author{ Efstathios D. Gennatas }
#' get_query #' #' @description Query Craigslist and clean the scrape data #' #' @param query The URL specifying the query #' @param type What type of thing you want to look up on craiglist. Currently #' only apartment searches are available. Default is \code{apa} for "apartment". #' #' @return #' #' @import magrittr #' @keywords internal #' @export #' get_query <- function(query, type = "apa") { ## The raw query raw_query <- xml2::read_html(GET(query,use_proxy(proxy,port))) ## Select out the listing ads raw_ads <- rvest::html_nodes(raw_query, "p.result-info") ## Create data vectors create_vector(env = environment(), c("titles", "prices", "dates", "urls", "locales", "beds", "sqfts")) ## Loop through to make sure no data is missing for(i in 1:length(raw_ads)){ ## Get the current post post <- raw_ads[i] ## Post title title <- post %>% rvest::html_node("a.result-title") %>% rvest::html_text() ## Post price (returns NA if an error is generated) price <- na_error({ post %>% rvest::html_node("span.result-price") %>% rvest::html_text() %>% stringr::str_extract("[0-9]+") %>% as.numeric() }) ## Post date date <- post %>% rvest::html_node("time") %>% rvest::html_attr("datetime") ## Post url url <- post %>% rvest::html_node(".result-title") %>% rvest::html_attr("href") ## Approx location (returns NA if an error is generated) locale <- na_error({ post %>% rvest::html_node(".result-hood") %>% rvest::html_text() }) ## Post bedrooms and sqft (returns NA if an error is generated) size <- na_error({ post %>% rvest::html_node(".housing") %>% rvest::html_text() }) # Obtain num bedrooms (returns NA if an error is generated) bed <- na_error({ size %>% stringr::str_extract("[0-9]*br") %>% stringr::str_replace("br", "") }) # Obtain square footage (returns NA if an error is generated) sqft <- na_error({ size %>% stringr::str_extract("[0-9]*ft") %>% stringr::str_replace("ft", "") }) ## Populate data vectors titles <- c(titles, title) prices <- c(prices, price) dates <- c(dates, date) urls <- c(urls, url) locales <- c(locales, locale) beds <- c(beds, bed) sqfts <- c(sqfts, sqft) } ## Remove parens from locations locales <- stringr::str_replace_all(locales, "[\\)\\()]", "") ## Bind the data clean_data <- data.frame(Title = titles, Date = dates, Price = prices, Bedrooms = beds, SqFt = sqfts, Location = locales, URL = urls) return(clean_data) }
/R/get_query-internal.R
no_license
abibler/craigr
R
false
false
2,906
r
#' get_query #' #' @description Query Craigslist and clean the scrape data #' #' @param query The URL specifying the query #' @param type What type of thing you want to look up on craiglist. Currently #' only apartment searches are available. Default is \code{apa} for "apartment". #' #' @return #' #' @import magrittr #' @keywords internal #' @export #' get_query <- function(query, type = "apa") { ## The raw query raw_query <- xml2::read_html(GET(query,use_proxy(proxy,port))) ## Select out the listing ads raw_ads <- rvest::html_nodes(raw_query, "p.result-info") ## Create data vectors create_vector(env = environment(), c("titles", "prices", "dates", "urls", "locales", "beds", "sqfts")) ## Loop through to make sure no data is missing for(i in 1:length(raw_ads)){ ## Get the current post post <- raw_ads[i] ## Post title title <- post %>% rvest::html_node("a.result-title") %>% rvest::html_text() ## Post price (returns NA if an error is generated) price <- na_error({ post %>% rvest::html_node("span.result-price") %>% rvest::html_text() %>% stringr::str_extract("[0-9]+") %>% as.numeric() }) ## Post date date <- post %>% rvest::html_node("time") %>% rvest::html_attr("datetime") ## Post url url <- post %>% rvest::html_node(".result-title") %>% rvest::html_attr("href") ## Approx location (returns NA if an error is generated) locale <- na_error({ post %>% rvest::html_node(".result-hood") %>% rvest::html_text() }) ## Post bedrooms and sqft (returns NA if an error is generated) size <- na_error({ post %>% rvest::html_node(".housing") %>% rvest::html_text() }) # Obtain num bedrooms (returns NA if an error is generated) bed <- na_error({ size %>% stringr::str_extract("[0-9]*br") %>% stringr::str_replace("br", "") }) # Obtain square footage (returns NA if an error is generated) sqft <- na_error({ size %>% stringr::str_extract("[0-9]*ft") %>% stringr::str_replace("ft", "") }) ## Populate data vectors titles <- c(titles, title) prices <- c(prices, price) dates <- c(dates, date) urls <- c(urls, url) locales <- c(locales, locale) beds <- c(beds, bed) sqfts <- c(sqfts, sqft) } ## Remove parens from locations locales <- stringr::str_replace_all(locales, "[\\)\\()]", "") ## Bind the data clean_data <- data.frame(Title = titles, Date = dates, Price = prices, Bedrooms = beds, SqFt = sqfts, Location = locales, URL = urls) return(clean_data) }
testlist <- list(A = structure(c(2.32784507357645e-308, 9.53818252170339e+295, 1.22810607559368e+146, 4.12396251261199e-221, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(5L, 7L)), B = structure(0, .Dim = c(1L, 1L))) result <- do.call(multivariance:::match_rows,testlist) str(result)
/multivariance/inst/testfiles/match_rows/AFL_match_rows/match_rows_valgrind_files/1613104687-test.R
no_license
akhikolla/updatedatatype-list3
R
false
false
344
r
testlist <- list(A = structure(c(2.32784507357645e-308, 9.53818252170339e+295, 1.22810607559368e+146, 4.12396251261199e-221, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(5L, 7L)), B = structure(0, .Dim = c(1L, 1L))) result <- do.call(multivariance:::match_rows,testlist) str(result)
source(here::here("R/package-loading.R")) # this will be use to test out git Here's an example of a conflict
/R/version_control_session.R
no_license
krauthn/LearningR
R
false
false
111
r
source(here::here("R/package-loading.R")) # this will be use to test out git Here's an example of a conflict
ColourPicker <- function(variable){ if(any(levels(variable) %in% c("Broadleaf deciduous forest", "Herbaceous"))){ colos <- c(rep(NA, length(levels(variable)))) tp <- data.frame(habitat = c("Production - Herbaceous", "Production - Plantation", "Cropland/Other vegetation mosaic", "Urban", "Bare area (consolidated", "Bare area (unconsolidated", "Water bodies", "Mixed forest", "Tree open", "Herbaceous with spare tree/shrub", "Shrub", "Herbaceous", "Sparse vegetation", "Broadleaf evergreen forest", "Broadleaf deciduous forest", "Needleleaf evergreen forest", "Needleleaf deciduous forest"), colour = c("ffff64", "ffff00", "c8c864", "c31400", "dcdcdc", 'fff5d7', "0046c8", "788200", "8ca000", "be9600", "966400", "ffb432", "ffebaf", "286400", "00a000", "003c00", "285000")) } if(any(levels(variable) %in% c("Pasture", "Primary vegetation", "Production - Arable"))){ colos <- c(rep(NA, length(levels(variable)))) tp <- data.frame(habitat = c("Pasture","Primary vegetation", "Production - Arable","Production - Crop plantations", "Production - Wood plantation", "Secondary vegetation", "Urban", "Unknown"), colour =c("FFEB3B", "1B5E20", "E65100", "FF9800", "795548", "4CAF50", "788084", "607D8B")) } if(any(levels(variable) %in% c("Annual crop", "Perennial crops","Integrated systems"))){ colos <- c(rep(NA, length(levels(variable)))) tp <- data.frame(habitat = c("Primary vegetation","Secondary vegetation","Annual crop", "Perennial crops","Integrated systems","Tree plantations", "Pastures (grazed lands)","Urban", "Unknown"), colour = c("006400", "008B00", "EEEE00", "8B8B00", "8B8970", "8B2323", "CD8500","8B8378", "292421")) } colos <- tp$colour[match(levels(variable), tp$habitat)] return(colos) }
/Functions/ColourPicker.R
permissive
helenphillips/GlobalEWDiversity
R
false
false
2,230
r
ColourPicker <- function(variable){ if(any(levels(variable) %in% c("Broadleaf deciduous forest", "Herbaceous"))){ colos <- c(rep(NA, length(levels(variable)))) tp <- data.frame(habitat = c("Production - Herbaceous", "Production - Plantation", "Cropland/Other vegetation mosaic", "Urban", "Bare area (consolidated", "Bare area (unconsolidated", "Water bodies", "Mixed forest", "Tree open", "Herbaceous with spare tree/shrub", "Shrub", "Herbaceous", "Sparse vegetation", "Broadleaf evergreen forest", "Broadleaf deciduous forest", "Needleleaf evergreen forest", "Needleleaf deciduous forest"), colour = c("ffff64", "ffff00", "c8c864", "c31400", "dcdcdc", 'fff5d7', "0046c8", "788200", "8ca000", "be9600", "966400", "ffb432", "ffebaf", "286400", "00a000", "003c00", "285000")) } if(any(levels(variable) %in% c("Pasture", "Primary vegetation", "Production - Arable"))){ colos <- c(rep(NA, length(levels(variable)))) tp <- data.frame(habitat = c("Pasture","Primary vegetation", "Production - Arable","Production - Crop plantations", "Production - Wood plantation", "Secondary vegetation", "Urban", "Unknown"), colour =c("FFEB3B", "1B5E20", "E65100", "FF9800", "795548", "4CAF50", "788084", "607D8B")) } if(any(levels(variable) %in% c("Annual crop", "Perennial crops","Integrated systems"))){ colos <- c(rep(NA, length(levels(variable)))) tp <- data.frame(habitat = c("Primary vegetation","Secondary vegetation","Annual crop", "Perennial crops","Integrated systems","Tree plantations", "Pastures (grazed lands)","Urban", "Unknown"), colour = c("006400", "008B00", "EEEE00", "8B8B00", "8B8970", "8B2323", "CD8500","8B8378", "292421")) } colos <- tp$colour[match(levels(variable), tp$habitat)] return(colos) }
#' @title Sampling of the full promotions data set. #' #' @description #' A sampling of the promotions data from the Complete Journey study signifying #' whether a given product was featured in the weekly mailer or was part of an #' in-store display (other than regular product placement). #' #' @source 84.51°, Customer Journey study, \url{http://www.8451.com/area51/} #' @format A data frame with 1,948,597 rows and 5 variables #' \itemize{ #' \item product_id: Uniquely identifies each product #' \item store_id: Uniquely identifies each store #' \item display_location: Display location (see details for range of values) #' \item mailer_location: Mailer location (see details for range of values) #' \item week: Week of the transaction; Ranges 1-53 #' } #' @section Display Location Codes: #' \itemize{ #' \item 0 - Not on Display #' \item 1 - Store Front #' \item 2 - Store Rear #' \item 3 - Front End Cap #' \item 4 - Mid-Aisle End Cap #' \item 5 - Rear End Cap #' \item 6 - Side-Aisle End Cap #' \item 7 - In-Aisle #' \item 9 - Secondary Location Display #' \item A - In-Shelf #' } #' @section Mailer Location Codes: #' \itemize{ #' \item 0 - Not on ad #' \item A - Interior page feature #' \item C - Interior page line item #' \item D - Front page feature #' \item F - Back page feature #' \item H - Wrap from feature #' \item J - Wrap interior coupon #' \item L - Wrap back feature #' \item P - Interior page coupon #' \item X - Free on interior page #' \item Z - Free on front page, back page or wrap #' } #' #' @seealso Use \code{\link{get_promotions}} to download the entire promotions #' data containing all 20,940,529 rows. #' #' @docType data #' @name promotions_sample #' #' @examples #' \donttest{ #' # sampled promotions data set #' promotions_sample #' #' # Join promotions to transactions to analyze #' # product promotion/location #' require("dplyr") #' transactions %>% #' left_join(promotions_sample, #' c("product_id", "store_id", "week")) #' } #' @importFrom tibble tibble NULL
/R/promotions.R
no_license
Adrianh31/completejourney
R
false
false
2,080
r
#' @title Sampling of the full promotions data set. #' #' @description #' A sampling of the promotions data from the Complete Journey study signifying #' whether a given product was featured in the weekly mailer or was part of an #' in-store display (other than regular product placement). #' #' @source 84.51°, Customer Journey study, \url{http://www.8451.com/area51/} #' @format A data frame with 1,948,597 rows and 5 variables #' \itemize{ #' \item product_id: Uniquely identifies each product #' \item store_id: Uniquely identifies each store #' \item display_location: Display location (see details for range of values) #' \item mailer_location: Mailer location (see details for range of values) #' \item week: Week of the transaction; Ranges 1-53 #' } #' @section Display Location Codes: #' \itemize{ #' \item 0 - Not on Display #' \item 1 - Store Front #' \item 2 - Store Rear #' \item 3 - Front End Cap #' \item 4 - Mid-Aisle End Cap #' \item 5 - Rear End Cap #' \item 6 - Side-Aisle End Cap #' \item 7 - In-Aisle #' \item 9 - Secondary Location Display #' \item A - In-Shelf #' } #' @section Mailer Location Codes: #' \itemize{ #' \item 0 - Not on ad #' \item A - Interior page feature #' \item C - Interior page line item #' \item D - Front page feature #' \item F - Back page feature #' \item H - Wrap from feature #' \item J - Wrap interior coupon #' \item L - Wrap back feature #' \item P - Interior page coupon #' \item X - Free on interior page #' \item Z - Free on front page, back page or wrap #' } #' #' @seealso Use \code{\link{get_promotions}} to download the entire promotions #' data containing all 20,940,529 rows. #' #' @docType data #' @name promotions_sample #' #' @examples #' \donttest{ #' # sampled promotions data set #' promotions_sample #' #' # Join promotions to transactions to analyze #' # product promotion/location #' require("dplyr") #' transactions %>% #' left_join(promotions_sample, #' c("product_id", "store_id", "week")) #' } #' @importFrom tibble tibble NULL
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/data.R \docType{data} \name{alice_dat} \alias{alice_dat} \title{Alice's gene data} \format{A tibble with 32 rows and 4 variables: \describe{ \item{id}{Participant ID} \item{picture}{A factor describing whether a participant tried to resemble a picture of themselves (Self), or a matched stranger (Other)} \item{gene}{A factor describing whether a participant received the toggle switch (C-gene + Toggle) or not (C-gene)} \item{resemblance}{How closely their face resembled the picture (100\% = the participants face is exactly like the face in the photograph, 0\% the person in the photo bears no resemblance at all to the participant)} }} \source{ \url{https://www.discoveringstatistics.com/books/an-adventure-in-statistics/} } \usage{ alice_dat } \description{ A dataset relating to the following fictitious experiment: Participants who had facial burns were recruited. One-week pre-test, a gene C-10XFMG (the so-called chameleon gene) was introduced into all participants.Two days prior to test, a genetic toggle switch was introduced into half of the participants. Half of the participants were asked to look at a photograph of themselves from before their injury and to imagine the cells in their faces changing to become like the photo. The remainder acted as a control and were asked to look at a picture of a same-sex stranger and to try to change their face to become the person in the picture. All participants looked at the picture for 6 sessions of 20 minutes each. At the end of the sessions their faces were scanned into a computer and compared to the face in the photograph. Facial recognition software produced a precise resemblance measure as a percentage (100% = the participants face is exactly like the face in the photograph, 0% the person in the photo bears no resemblance at all to the participant). The data are taken from Chapter 17 of Field, A. P. (2016). An adventure in statistics: the reality enigma. London: Sage. } \keyword{datasets}
/man/alice_dat.Rd
no_license
karthy257/adventr
R
false
true
2,051
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/data.R \docType{data} \name{alice_dat} \alias{alice_dat} \title{Alice's gene data} \format{A tibble with 32 rows and 4 variables: \describe{ \item{id}{Participant ID} \item{picture}{A factor describing whether a participant tried to resemble a picture of themselves (Self), or a matched stranger (Other)} \item{gene}{A factor describing whether a participant received the toggle switch (C-gene + Toggle) or not (C-gene)} \item{resemblance}{How closely their face resembled the picture (100\% = the participants face is exactly like the face in the photograph, 0\% the person in the photo bears no resemblance at all to the participant)} }} \source{ \url{https://www.discoveringstatistics.com/books/an-adventure-in-statistics/} } \usage{ alice_dat } \description{ A dataset relating to the following fictitious experiment: Participants who had facial burns were recruited. One-week pre-test, a gene C-10XFMG (the so-called chameleon gene) was introduced into all participants.Two days prior to test, a genetic toggle switch was introduced into half of the participants. Half of the participants were asked to look at a photograph of themselves from before their injury and to imagine the cells in their faces changing to become like the photo. The remainder acted as a control and were asked to look at a picture of a same-sex stranger and to try to change their face to become the person in the picture. All participants looked at the picture for 6 sessions of 20 minutes each. At the end of the sessions their faces were scanned into a computer and compared to the face in the photograph. Facial recognition software produced a precise resemblance measure as a percentage (100% = the participants face is exactly like the face in the photograph, 0% the person in the photo bears no resemblance at all to the participant). The data are taken from Chapter 17 of Field, A. P. (2016). An adventure in statistics: the reality enigma. London: Sage. } \keyword{datasets}
# Exercise 5: Gates Foundation Educational Grants setwd('/c/users/hsuwy/desktop/school/info201/m8-dataframes/exercise-5/') # Read data into a variable called `grants` using the `read.csv` function grants <- read.csv('data/gates_money.csv', stringsAsFactors = FALSE) # Use the View function to look at your data View(grants) # Create a variable `spending` as the `total_amount` column of the dataset spending <- grants$total_amount # Confirm that your `spending` variable is a vector using the `is.vector` function is.vector(spending) # Create a variable `org` as the `organization` column of the dataset. # Unfortunately, it will not be a vector by default, so you must transform it using the as.vector function org <- as.vector(grants$organization) ### Now you can ask some more interesting questions about the dataset. Store your answers in variables ### # What was the mean grant value? mean.grant <- mean(spending) # What was the dollar amount of the largest grant? largest.grant <- max(spending) # What was the dollar amount of the smallest grant? smallest.grant <- min(spending) # Which organization received the largest grant? largest.grant.org <- org[largest.grant==spending] # Which organization received the smallest grant? smalest.grant.org <- org[smallest.grant==spending] # How many grants were awarded in 2010? grants.2010 <- length(org[grants$start_year==2010])
/exercise-5/exercise.R
permissive
wynhsu/m8-dataframes
R
false
false
1,389
r
# Exercise 5: Gates Foundation Educational Grants setwd('/c/users/hsuwy/desktop/school/info201/m8-dataframes/exercise-5/') # Read data into a variable called `grants` using the `read.csv` function grants <- read.csv('data/gates_money.csv', stringsAsFactors = FALSE) # Use the View function to look at your data View(grants) # Create a variable `spending` as the `total_amount` column of the dataset spending <- grants$total_amount # Confirm that your `spending` variable is a vector using the `is.vector` function is.vector(spending) # Create a variable `org` as the `organization` column of the dataset. # Unfortunately, it will not be a vector by default, so you must transform it using the as.vector function org <- as.vector(grants$organization) ### Now you can ask some more interesting questions about the dataset. Store your answers in variables ### # What was the mean grant value? mean.grant <- mean(spending) # What was the dollar amount of the largest grant? largest.grant <- max(spending) # What was the dollar amount of the smallest grant? smallest.grant <- min(spending) # Which organization received the largest grant? largest.grant.org <- org[largest.grant==spending] # Which organization received the smallest grant? smalest.grant.org <- org[smallest.grant==spending] # How many grants were awarded in 2010? grants.2010 <- length(org[grants$start_year==2010])
% Generated by roxygen2 (4.0.1): do not edit by hand \name{simulation} \alias{simulation} \title{Simulation in Oedekoven et al} \description{ The following is the code used to generate the simulation results in the paper. It will likely take about 10 hours to run all of the simulations depending on your computer. } \examples{ \dontrun{ require(mrds) #define function for simulations tsims=function(N=4000,df=3,w=40,nreps=100,plot=TRUE,hz=FALSE, tdf=TRUE,hzscale=1,hzpow=2,sigma=1) { maxw=w func_t=function(x,df) return(dt(x,df)/dt(0,df)) TrueN=N/w fN=matrix(NA,ncol=2,nrow=nreps) gN=matrix(NA,ncol=2,nrow=nreps) hrN=matrix(NA,ncol=2,nrow=nreps) fAIC=matrix(NA,ncol=2,nrow=nreps) gAIC=matrix(NA,ncol=2,nrow=nreps) hrAIC=matrix(NA,ncol=2,nrow=nreps) nobs=matrix(NA,ncol=2,nrow=nreps) w=matrix(NA,ncol=2,nrow=nreps) for (i in 1:nreps) { u=runif(N,0,maxw) if(hz) seen=mrds:::keyfct.hz(u,hzscale,hzpow)>=runif(N,0,1) else if(tdf) seen=func_t(u,df=df)>=runif(N,0,1) else seen=mrds:::keyfct.hn(u,key.scale=exp(hzscale+rnorm(N,0,sigma)))>=runif(N,0,1) xx=u[seen] n=length(xx) data=data.frame(distance=xx,observer=rep(1,n),detected=rep(1,n),object=1:n) for(j in 1:2) { nobs[i,j]=n if(j==1) w[i,j]=max(data$distance) else w[i,j]=min(maxw,2*max(data$distance)) # code for hazard rate if(hz) mod.hr=ddf(dsmodel=~cds(key="hr"),method="ds",data=data,meta.data=list(width=w[i,j]), control=list(initial=list(scale=log(hzscale),shape=log(hzpow)))) else mod.hr=ddf(dsmodel=~cds(key="hr"),method="ds",data=data, meta.data=list(width=w[i,j])) # code for g likelihood mod.admb=fitadmb(data, w = w[i,j], formula = ~1,likelihood="g",keep=TRUE) # code for f2 likelihood mod.admbf=fitadmb(data, w = w[i,j], formula = ~1,likelihood="f2",keep=TRUE) hrN[i,j]=mod.hr$Nhat/w[i,j] hrAIC[i,j]=mod.hr$criterion gN[i,j]=compute_Nhat(mod.admb$coefficients[1:2],x=data$distance, w=w[i,j],adjust=F)/w[i,j] gAIC[i,j]=-2*mod.admb$loglik+4 fN[i,j]=compute_Nhat(mod.admbf$coefficients[1:2],x=data$distance, w=w[i,j],adjust=T)/w[i,j] fAIC[i,j]=-2*mod.admbf$loglik+4 nclass=ceiling(sqrt(n)*2) if(plot) { par(mfrow=c(3,1)) param=c(mod.admb$coefficients[1],mod.admb$coefficients[2]) plotfit(data$distance,w=w[i,j],param,nclass=nclass,adjust=F) if(hz) lines((0:(ceiling(w[i,j]*100)))/100, mrds:::keyfct.hz((0:(ceiling(w[i,j]*100)))/100,hzscale,hzpow),lty=2) else lines((0:(ceiling(w[i,j]*100)))/100, func_t((0:(ceiling(w[i,j]*100)))/100,df=df),lty=2) param=c(mod.admbf$coefficients[1],mod.admbf$coefficients[2]) plotfit(data$distance,w=w[i,j],c(param[1]-exp(2*param[2]),param[2]), nclass=nclass,adjust=F) if(hz) lines((0:(ceiling(w[i,j]*100)))/100, mrds:::keyfct.hz((0:(ceiling(w[i,j]*100)))/100,hzscale,hzpow),lty=2) else lines((0:(ceiling(w[i,j]*100)))/100, func_t((0:(ceiling(w[i,j]*100)))/100,df=df),lty=2) plot(mod.hr,nc=nclass,showpoints=FALSE) if(hz) lines((0:(ceiling(w[i,j]*100)))/100, mrds:::keyfct.hz((0:(ceiling(w[i,j]*100)))/100,hzscale,hzpow),lty=2) else lines((0:(ceiling(w[i,j]*100)))/100, func_t((0:(ceiling(w[i,j]*100)))/100,df=df),lty=2) } } } return(list(TrueN=TrueN,fN=fN,gN=gN,hrN=hrN,fAIC=fAIC, gAIC=gAIC,hrAIC=hrAIC,nobs=nobs,w=w)) } # define function to summarize results sim_summary=function(z) { res=NULL TrueN=z$TrueN for (i in 1:2) { x=lapply(z[2:9],function(x)return(x[,i])) AvgN=with(x,c(fN[fAIC<hrAIC],hrN[fAIC>=hrAIC])) prb_avg=100*(AvgN-TrueN)/TrueN res=rbind(res,c(w=mean(x$w),nobs=mean(x$nobs),prb_f=mean(100*(x$fN-TrueN)/TrueN), prb_hr=mean(100*(x$hrN-TrueN)/TrueN),prb_avg=mean(prb_avg), se_prb=sqrt(var(prb_avg)/length(x$w)), prop_f_better_than_hr=with(x,mean(fAIC<hrAIC)), prop_f_better_than_g=with(x,mean(fAIC<gAIC)), prop_g_better_than_f=with(x,mean(fAIC>gAIC)), rmse_hr =with(x,100*sqrt((TrueN-mean(x$hrN))^2+var(x$hrN)))/TrueN, rmse_f = with(x,100*sqrt((TrueN-mean(x$fN))^2+var(x$fN)))/TrueN, rmse_avg =with(x,100*sqrt((TrueN-mean(AvgN))^2+var(AvgN)))/TrueN)) } return(res) } # perform simulations nreps=500 set.seed(93851) t.df3.n180=tsims(plot=F,nreps=nreps) t.df5.n180=tsims(df=5,plot=F,nreps=nreps) t.df10.n180=tsims(df=10,plot=F,nreps=nreps) t.df3.n90=tsims(N=2000,plot=F,nreps=nreps) t.df5.n90=tsims(N=2000,df=5,plot=F,nreps=nreps) t.df10.n90=tsims(N=2000,df=10,plot=F,nreps=nreps) hn.n180=tsims(N=8000,td=FALSE,hzscale=-.5,sigma=0.5,plot=F,nreps=nreps) hn.n90=tsims(N=4000,td=FALSE,hzscale=-.5,sigma=0.5,plot=F,nreps=nreps) hr.n180=tsims(N=7000,hz=TRUE,hzscale=.7,hzpow=2.5,plot=F,nreps=nreps) hr.n90=tsims(N=3500,hz=TRUE,hzscale=.7,hzpow=2.5,plot=F,nreps=nreps) # pull together results into a table which can be used by xtable for TeX file sim_results=rbind(sim_summary(t.df3.n180),sim_summary(t.df5.n180), sim_summary(t.df10.n180),sim_summary(t.df3.n90), sim_summary(t.df5.n90),sim_summary(t.df10.n90), sim_summary(hn.n180),sim_summary(hn.n90),sim_summary(hr.n180), sim_summary(hr.n90)) sim_results=cbind(c("t(df=3)","","t(df=5)","","t(df=10)","","t(df=3)","","t(df=5)","", "t(df=10)","","hn","","hn","","hz","","hz",""), apply(sim_results,2,function(x) sprintf("\%0.2f",x))) colnames(sim_results)[1]="Function" colnames(sim_results)[2]="$\\\\bar{w}\\\\;\\\\;$" colnames(sim_results)[3]="$\\\\bar{n}\\\\;\\\\;\\\\;$" colnames(sim_results)[4]="$PRB_F$" colnames(sim_results)[5]="$PRB_{HR}$" colnames(sim_results)[6]="$PRB_{AVG}$" colnames(sim_results)[7]="$se(PRB_{AVG})$" colnames(sim_results)[8]="$AIC_F<AIC_{HR}$" colnames(sim_results)[9]="$AIC_F<AIC_G$" colnames(sim_results)[10]="$AIC_G<AIC_F$" colnames(sim_results)[11]="$RMSE_F$" colnames(sim_results)[12]="$RMSE_{HR}$" colnames(sim_results)[13]="$RMSE_{AVG}$" #commented out code that produces table in paper #library(xtable) #print(xtable(sim_results,caption=paste("Percent relative bias (PRB) and root mean", #" square error (RMSE) as proportion of true abundance for random scale half-normal", #" and hazard rate detection function models for distances generated from t-distribution, #" random scale half-normal and hazard rate detection functions. Each value is the summary", #" for 100 replicate simulations. The subscripts F, G and HR refer to the likelihoods eq (6),", #" eq (9) and the hazard rate. AVG subscript represents the values in which the estimate was", #" generated from the model that had the lowest AIC for each replicate.", #label="simresults",align=c("c","c",rep("r",12))), #caption.placement="top",latex.environments="center",size="scriptsize",include.rownames=FALSE, #sanitize.colnames.function = function(x){x}, sanitize.text.function=function(x){x}) } }
/RandomScale/man/simulation.Rd
no_license
jlaake/RandomScale
R
false
false
7,072
rd
% Generated by roxygen2 (4.0.1): do not edit by hand \name{simulation} \alias{simulation} \title{Simulation in Oedekoven et al} \description{ The following is the code used to generate the simulation results in the paper. It will likely take about 10 hours to run all of the simulations depending on your computer. } \examples{ \dontrun{ require(mrds) #define function for simulations tsims=function(N=4000,df=3,w=40,nreps=100,plot=TRUE,hz=FALSE, tdf=TRUE,hzscale=1,hzpow=2,sigma=1) { maxw=w func_t=function(x,df) return(dt(x,df)/dt(0,df)) TrueN=N/w fN=matrix(NA,ncol=2,nrow=nreps) gN=matrix(NA,ncol=2,nrow=nreps) hrN=matrix(NA,ncol=2,nrow=nreps) fAIC=matrix(NA,ncol=2,nrow=nreps) gAIC=matrix(NA,ncol=2,nrow=nreps) hrAIC=matrix(NA,ncol=2,nrow=nreps) nobs=matrix(NA,ncol=2,nrow=nreps) w=matrix(NA,ncol=2,nrow=nreps) for (i in 1:nreps) { u=runif(N,0,maxw) if(hz) seen=mrds:::keyfct.hz(u,hzscale,hzpow)>=runif(N,0,1) else if(tdf) seen=func_t(u,df=df)>=runif(N,0,1) else seen=mrds:::keyfct.hn(u,key.scale=exp(hzscale+rnorm(N,0,sigma)))>=runif(N,0,1) xx=u[seen] n=length(xx) data=data.frame(distance=xx,observer=rep(1,n),detected=rep(1,n),object=1:n) for(j in 1:2) { nobs[i,j]=n if(j==1) w[i,j]=max(data$distance) else w[i,j]=min(maxw,2*max(data$distance)) # code for hazard rate if(hz) mod.hr=ddf(dsmodel=~cds(key="hr"),method="ds",data=data,meta.data=list(width=w[i,j]), control=list(initial=list(scale=log(hzscale),shape=log(hzpow)))) else mod.hr=ddf(dsmodel=~cds(key="hr"),method="ds",data=data, meta.data=list(width=w[i,j])) # code for g likelihood mod.admb=fitadmb(data, w = w[i,j], formula = ~1,likelihood="g",keep=TRUE) # code for f2 likelihood mod.admbf=fitadmb(data, w = w[i,j], formula = ~1,likelihood="f2",keep=TRUE) hrN[i,j]=mod.hr$Nhat/w[i,j] hrAIC[i,j]=mod.hr$criterion gN[i,j]=compute_Nhat(mod.admb$coefficients[1:2],x=data$distance, w=w[i,j],adjust=F)/w[i,j] gAIC[i,j]=-2*mod.admb$loglik+4 fN[i,j]=compute_Nhat(mod.admbf$coefficients[1:2],x=data$distance, w=w[i,j],adjust=T)/w[i,j] fAIC[i,j]=-2*mod.admbf$loglik+4 nclass=ceiling(sqrt(n)*2) if(plot) { par(mfrow=c(3,1)) param=c(mod.admb$coefficients[1],mod.admb$coefficients[2]) plotfit(data$distance,w=w[i,j],param,nclass=nclass,adjust=F) if(hz) lines((0:(ceiling(w[i,j]*100)))/100, mrds:::keyfct.hz((0:(ceiling(w[i,j]*100)))/100,hzscale,hzpow),lty=2) else lines((0:(ceiling(w[i,j]*100)))/100, func_t((0:(ceiling(w[i,j]*100)))/100,df=df),lty=2) param=c(mod.admbf$coefficients[1],mod.admbf$coefficients[2]) plotfit(data$distance,w=w[i,j],c(param[1]-exp(2*param[2]),param[2]), nclass=nclass,adjust=F) if(hz) lines((0:(ceiling(w[i,j]*100)))/100, mrds:::keyfct.hz((0:(ceiling(w[i,j]*100)))/100,hzscale,hzpow),lty=2) else lines((0:(ceiling(w[i,j]*100)))/100, func_t((0:(ceiling(w[i,j]*100)))/100,df=df),lty=2) plot(mod.hr,nc=nclass,showpoints=FALSE) if(hz) lines((0:(ceiling(w[i,j]*100)))/100, mrds:::keyfct.hz((0:(ceiling(w[i,j]*100)))/100,hzscale,hzpow),lty=2) else lines((0:(ceiling(w[i,j]*100)))/100, func_t((0:(ceiling(w[i,j]*100)))/100,df=df),lty=2) } } } return(list(TrueN=TrueN,fN=fN,gN=gN,hrN=hrN,fAIC=fAIC, gAIC=gAIC,hrAIC=hrAIC,nobs=nobs,w=w)) } # define function to summarize results sim_summary=function(z) { res=NULL TrueN=z$TrueN for (i in 1:2) { x=lapply(z[2:9],function(x)return(x[,i])) AvgN=with(x,c(fN[fAIC<hrAIC],hrN[fAIC>=hrAIC])) prb_avg=100*(AvgN-TrueN)/TrueN res=rbind(res,c(w=mean(x$w),nobs=mean(x$nobs),prb_f=mean(100*(x$fN-TrueN)/TrueN), prb_hr=mean(100*(x$hrN-TrueN)/TrueN),prb_avg=mean(prb_avg), se_prb=sqrt(var(prb_avg)/length(x$w)), prop_f_better_than_hr=with(x,mean(fAIC<hrAIC)), prop_f_better_than_g=with(x,mean(fAIC<gAIC)), prop_g_better_than_f=with(x,mean(fAIC>gAIC)), rmse_hr =with(x,100*sqrt((TrueN-mean(x$hrN))^2+var(x$hrN)))/TrueN, rmse_f = with(x,100*sqrt((TrueN-mean(x$fN))^2+var(x$fN)))/TrueN, rmse_avg =with(x,100*sqrt((TrueN-mean(AvgN))^2+var(AvgN)))/TrueN)) } return(res) } # perform simulations nreps=500 set.seed(93851) t.df3.n180=tsims(plot=F,nreps=nreps) t.df5.n180=tsims(df=5,plot=F,nreps=nreps) t.df10.n180=tsims(df=10,plot=F,nreps=nreps) t.df3.n90=tsims(N=2000,plot=F,nreps=nreps) t.df5.n90=tsims(N=2000,df=5,plot=F,nreps=nreps) t.df10.n90=tsims(N=2000,df=10,plot=F,nreps=nreps) hn.n180=tsims(N=8000,td=FALSE,hzscale=-.5,sigma=0.5,plot=F,nreps=nreps) hn.n90=tsims(N=4000,td=FALSE,hzscale=-.5,sigma=0.5,plot=F,nreps=nreps) hr.n180=tsims(N=7000,hz=TRUE,hzscale=.7,hzpow=2.5,plot=F,nreps=nreps) hr.n90=tsims(N=3500,hz=TRUE,hzscale=.7,hzpow=2.5,plot=F,nreps=nreps) # pull together results into a table which can be used by xtable for TeX file sim_results=rbind(sim_summary(t.df3.n180),sim_summary(t.df5.n180), sim_summary(t.df10.n180),sim_summary(t.df3.n90), sim_summary(t.df5.n90),sim_summary(t.df10.n90), sim_summary(hn.n180),sim_summary(hn.n90),sim_summary(hr.n180), sim_summary(hr.n90)) sim_results=cbind(c("t(df=3)","","t(df=5)","","t(df=10)","","t(df=3)","","t(df=5)","", "t(df=10)","","hn","","hn","","hz","","hz",""), apply(sim_results,2,function(x) sprintf("\%0.2f",x))) colnames(sim_results)[1]="Function" colnames(sim_results)[2]="$\\\\bar{w}\\\\;\\\\;$" colnames(sim_results)[3]="$\\\\bar{n}\\\\;\\\\;\\\\;$" colnames(sim_results)[4]="$PRB_F$" colnames(sim_results)[5]="$PRB_{HR}$" colnames(sim_results)[6]="$PRB_{AVG}$" colnames(sim_results)[7]="$se(PRB_{AVG})$" colnames(sim_results)[8]="$AIC_F<AIC_{HR}$" colnames(sim_results)[9]="$AIC_F<AIC_G$" colnames(sim_results)[10]="$AIC_G<AIC_F$" colnames(sim_results)[11]="$RMSE_F$" colnames(sim_results)[12]="$RMSE_{HR}$" colnames(sim_results)[13]="$RMSE_{AVG}$" #commented out code that produces table in paper #library(xtable) #print(xtable(sim_results,caption=paste("Percent relative bias (PRB) and root mean", #" square error (RMSE) as proportion of true abundance for random scale half-normal", #" and hazard rate detection function models for distances generated from t-distribution, #" random scale half-normal and hazard rate detection functions. Each value is the summary", #" for 100 replicate simulations. The subscripts F, G and HR refer to the likelihoods eq (6),", #" eq (9) and the hazard rate. AVG subscript represents the values in which the estimate was", #" generated from the model that had the lowest AIC for each replicate.", #label="simresults",align=c("c","c",rep("r",12))), #caption.placement="top",latex.environments="center",size="scriptsize",include.rownames=FALSE, #sanitize.colnames.function = function(x){x}, sanitize.text.function=function(x){x}) } }
library("openxlsx") library(ggplot2) library(dplyr) library(sqldf) #reading the customer data customer_churn_data <- read.xlsx( "Telco Churn.xlsx", sheet = "WA_Fn-UseC_-Telco-Customer-Chur", startRow = 1, colNames = TRUE ) dim(customer_churn_data) #7043 rows 21 variables names(customer_churn_data) names(customer_churn_data) # [1] "customerID" "gender" "SeniorCitizen" "Partner" "Dependents" # [6] "tenure" "PhoneService" "MultipleLines" "InternetService" "OnlineSecurity" # [11] "OnlineBackup" "DeviceProtection" "TechSupport" "StreamingTV" "StreamingMovies" # [16] "Contract" "PaperlessBilling" "PaymentMethod" "MonthlyCharges" "TotalCharges" # [21] "Churn" str(customer_churn_data) customer_churn_data$TotalCharges <- as.numeric(customer_churn_data$TotalCharges) summary(customer_churn_data) customer_churn_data <- subset(customer_churn_data, !is.na(customer_churn_data$TotalCharges)) sum(is.na(customer_churn_data)) # Churn prop.table(table(customer_churn_data$Churn)) customer_churn_data$tenureCategory <- cut(customer_churn_data$tenure, seq(0, 75, 5), right = FALSE) customer_churn_data$MonthlyChargesCategory <- cut(customer_churn_data$MonthlyCharges, seq(0, 120,20), right = FALSE) customer_churn_data$TotalChargesCategory <- cut(customer_churn_data$TotalCharges, seq(0, 9000,1000), right = FALSE) customer_churn_data <- customer_churn_data[, !names(customer_churn_data) %in% c("customerID", "tenure", "MonthlyCharges", "TotalCharges")] #Now, creating dummy variables using one hot encoding: #Converting every categorical variable to numerical using dummy variables dmy <- dummyVars(" ~ .", data = customer_churn_data, fullRank = T) customer_churn_transformed <- data.frame(predict(dmy, newdata = customer_churn_data)) #Checking the structure of transformed train file str(customer_churn_transformed) sum(is.na(customer_churn_transformed)) ## kmeans cluster #how many clusters?? library(cluster) library(fpc) #Elbow Method for finding the optimal number of clusters set.seed(123) # Compute and plot wss for k = 2 to k = 10. k.max <- 10 data <- customer_churn_transformed wss <- sapply(1:k.max, function(k) { kmeans(data, k, nstart = 50, iter.max = 15)$tot.withinss }) wss plot( 1:k.max, wss, type = "b", pch = 19, frame = FALSE, xlab = "Number of clusters K", ylab = "Total within-clusters sum of squares" ) rm(data) #4 kmeans_fit <- kmeans(customer_churn_transformed, 4, nstart = 25) #kmeans_fit$centers kmeans_fit$size customer_churn_transformed$cluster <- kmeans_fit$cluster customer_churn_data$cluster <- kmeans_fit$cluster ggplot(data=customer_churn_data, aes(x=cluster,fill=Churn))+ geom_bar(position = "dodge") ggplot(data=customer_churn_data, aes(x=cluster,fill=gender))+ geom_bar(position = "dodge") ggplot(data=customer_churn_data, aes(x=cluster,fill=Partner))+ geom_bar(position = "dodge") ggplot(data=customer_churn_data, aes(x=cluster,fill=Dependents))+ geom_bar(position = "dodge") ggplot(data=customer_churn_data, aes(x=cluster,fill=PhoneService))+ geom_bar(position = "dodge") ggplot(data=customer_churn_data, aes(x=cluster,fill=MultipleLines))+ geom_bar(position = "dodge") ggplot(data=customer_churn_data, aes(x=cluster,fill=InternetService))+ geom_bar(position = "dodge") ggplot(data=customer_churn_data, aes(x=cluster,fill=OnlineSecurity))+ geom_bar(position = "dodge") ggplot(data=customer_churn_data, aes(x=cluster,fill=OnlineBackup))+ geom_bar(position = "dodge") ggplot(data=customer_churn_data, aes(x=cluster,fill=DeviceProtection))+ geom_bar(position = "dodge") ggplot(data=customer_churn_data, aes(x=cluster,fill=TechSupport))+ geom_bar(position = "dodge") ggplot(data=customer_churn_data, aes(x=cluster,fill=StreamingTV))+ geom_bar(position = "dodge") ggplot(data=customer_churn_data, aes(x=cluster,fill=StreamingMovies))+ geom_bar(position = "dodge") ggplot(data=customer_churn_data, aes(x=cluster,fill=Contract))+ geom_bar(position = "dodge") ggplot(data=customer_churn_data, aes(x=cluster,fill=PaperlessBilling))+ geom_bar(position = "dodge") ggplot(data=customer_churn_data, aes(x=cluster,fill=PaymentMethod))+ geom_bar(position = "dodge") ggplot(data=customer_churn_data, aes(x=cluster,fill=tenureCategory))+ geom_bar(position = "dodge") ggplot(data=customer_churn_data, aes(x=cluster,fill=MonthlyChargesCategory))+ geom_bar(position = "dodge") ggplot(data=customer_churn_data, aes(x=cluster,fill=TotalChargesCategory))+ geom_bar(position = "dodge")
/Customer Churn Analysis/R/Customer_Churn_Clusters.r
no_license
SinhaRuma/Data-Analysis-Models
R
false
false
4,708
r
library("openxlsx") library(ggplot2) library(dplyr) library(sqldf) #reading the customer data customer_churn_data <- read.xlsx( "Telco Churn.xlsx", sheet = "WA_Fn-UseC_-Telco-Customer-Chur", startRow = 1, colNames = TRUE ) dim(customer_churn_data) #7043 rows 21 variables names(customer_churn_data) names(customer_churn_data) # [1] "customerID" "gender" "SeniorCitizen" "Partner" "Dependents" # [6] "tenure" "PhoneService" "MultipleLines" "InternetService" "OnlineSecurity" # [11] "OnlineBackup" "DeviceProtection" "TechSupport" "StreamingTV" "StreamingMovies" # [16] "Contract" "PaperlessBilling" "PaymentMethod" "MonthlyCharges" "TotalCharges" # [21] "Churn" str(customer_churn_data) customer_churn_data$TotalCharges <- as.numeric(customer_churn_data$TotalCharges) summary(customer_churn_data) customer_churn_data <- subset(customer_churn_data, !is.na(customer_churn_data$TotalCharges)) sum(is.na(customer_churn_data)) # Churn prop.table(table(customer_churn_data$Churn)) customer_churn_data$tenureCategory <- cut(customer_churn_data$tenure, seq(0, 75, 5), right = FALSE) customer_churn_data$MonthlyChargesCategory <- cut(customer_churn_data$MonthlyCharges, seq(0, 120,20), right = FALSE) customer_churn_data$TotalChargesCategory <- cut(customer_churn_data$TotalCharges, seq(0, 9000,1000), right = FALSE) customer_churn_data <- customer_churn_data[, !names(customer_churn_data) %in% c("customerID", "tenure", "MonthlyCharges", "TotalCharges")] #Now, creating dummy variables using one hot encoding: #Converting every categorical variable to numerical using dummy variables dmy <- dummyVars(" ~ .", data = customer_churn_data, fullRank = T) customer_churn_transformed <- data.frame(predict(dmy, newdata = customer_churn_data)) #Checking the structure of transformed train file str(customer_churn_transformed) sum(is.na(customer_churn_transformed)) ## kmeans cluster #how many clusters?? library(cluster) library(fpc) #Elbow Method for finding the optimal number of clusters set.seed(123) # Compute and plot wss for k = 2 to k = 10. k.max <- 10 data <- customer_churn_transformed wss <- sapply(1:k.max, function(k) { kmeans(data, k, nstart = 50, iter.max = 15)$tot.withinss }) wss plot( 1:k.max, wss, type = "b", pch = 19, frame = FALSE, xlab = "Number of clusters K", ylab = "Total within-clusters sum of squares" ) rm(data) #4 kmeans_fit <- kmeans(customer_churn_transformed, 4, nstart = 25) #kmeans_fit$centers kmeans_fit$size customer_churn_transformed$cluster <- kmeans_fit$cluster customer_churn_data$cluster <- kmeans_fit$cluster ggplot(data=customer_churn_data, aes(x=cluster,fill=Churn))+ geom_bar(position = "dodge") ggplot(data=customer_churn_data, aes(x=cluster,fill=gender))+ geom_bar(position = "dodge") ggplot(data=customer_churn_data, aes(x=cluster,fill=Partner))+ geom_bar(position = "dodge") ggplot(data=customer_churn_data, aes(x=cluster,fill=Dependents))+ geom_bar(position = "dodge") ggplot(data=customer_churn_data, aes(x=cluster,fill=PhoneService))+ geom_bar(position = "dodge") ggplot(data=customer_churn_data, aes(x=cluster,fill=MultipleLines))+ geom_bar(position = "dodge") ggplot(data=customer_churn_data, aes(x=cluster,fill=InternetService))+ geom_bar(position = "dodge") ggplot(data=customer_churn_data, aes(x=cluster,fill=OnlineSecurity))+ geom_bar(position = "dodge") ggplot(data=customer_churn_data, aes(x=cluster,fill=OnlineBackup))+ geom_bar(position = "dodge") ggplot(data=customer_churn_data, aes(x=cluster,fill=DeviceProtection))+ geom_bar(position = "dodge") ggplot(data=customer_churn_data, aes(x=cluster,fill=TechSupport))+ geom_bar(position = "dodge") ggplot(data=customer_churn_data, aes(x=cluster,fill=StreamingTV))+ geom_bar(position = "dodge") ggplot(data=customer_churn_data, aes(x=cluster,fill=StreamingMovies))+ geom_bar(position = "dodge") ggplot(data=customer_churn_data, aes(x=cluster,fill=Contract))+ geom_bar(position = "dodge") ggplot(data=customer_churn_data, aes(x=cluster,fill=PaperlessBilling))+ geom_bar(position = "dodge") ggplot(data=customer_churn_data, aes(x=cluster,fill=PaymentMethod))+ geom_bar(position = "dodge") ggplot(data=customer_churn_data, aes(x=cluster,fill=tenureCategory))+ geom_bar(position = "dodge") ggplot(data=customer_churn_data, aes(x=cluster,fill=MonthlyChargesCategory))+ geom_bar(position = "dodge") ggplot(data=customer_churn_data, aes(x=cluster,fill=TotalChargesCategory))+ geom_bar(position = "dodge")
# # This is the user-interface definition of a Shiny web application. You can # run the application by clicking 'Run App' above. # # Find out more about building applications with Shiny here: # # http://shiny.rstudio.com/ # library(shiny) shinyUI(fluidPage( titlePanel("LLUC-app-----29/11/2020"), tabsetPanel( tabPanel("tab 1", sidebarLayout( sidebarPanel( sliderInput("bins", "Number of bins:", min = 1, max = 50, value = 30) ), mainPanel( plotOutput("distPlot") ) )), tabPanel("tab 2", "contents"), tabPanel("tab 3", "contents")) ))
/lluc/ui.R
no_license
lmata22/Developing-Data-Products-JH---week4
R
false
false
1,122
r
# # This is the user-interface definition of a Shiny web application. You can # run the application by clicking 'Run App' above. # # Find out more about building applications with Shiny here: # # http://shiny.rstudio.com/ # library(shiny) shinyUI(fluidPage( titlePanel("LLUC-app-----29/11/2020"), tabsetPanel( tabPanel("tab 1", sidebarLayout( sidebarPanel( sliderInput("bins", "Number of bins:", min = 1, max = 50, value = 30) ), mainPanel( plotOutput("distPlot") ) )), tabPanel("tab 2", "contents"), tabPanel("tab 3", "contents")) ))
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/pdb_functions2.R \name{fasta.combine} \alias{fasta.combine} \title{Combine FASTA files} \usage{ fasta.combine( fasta_file_names, experiment_directory = NULL, fasta_file_directory = NULL, fasta_file_in_different_directory = FALSE ) } \arguments{ \item{fasta_file_names}{List of file names} \item{experiment_directory}{Experiment directory. If NULL, will use current directory using getwd()} \item{fasta_file_directory}{If FASTA files are in a different directory. Defaults to NULL.} \item{fasta_file_in_different_directory}{If FASTA files are in a different directory. Defaults to FALSE.} } \description{ This function combines FASTA files from a list of file names }
/man/fasta.combine.Rd
no_license
egmg726/crisscrosslinker
R
false
true
756
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/pdb_functions2.R \name{fasta.combine} \alias{fasta.combine} \title{Combine FASTA files} \usage{ fasta.combine( fasta_file_names, experiment_directory = NULL, fasta_file_directory = NULL, fasta_file_in_different_directory = FALSE ) } \arguments{ \item{fasta_file_names}{List of file names} \item{experiment_directory}{Experiment directory. If NULL, will use current directory using getwd()} \item{fasta_file_directory}{If FASTA files are in a different directory. Defaults to NULL.} \item{fasta_file_in_different_directory}{If FASTA files are in a different directory. Defaults to FALSE.} } \description{ This function combines FASTA files from a list of file names }
# index_by_window.R # v.0.0 Koshlan Mayer-Blackwell 2016-3-14 # Description: sliding window analysis can be useful for the detection of trends that may exist within # discrete time intervals but not the whole period of analysis. # index_by_window() is a utility funciton that generates a list of different indices specifying a sliding window #' index_by_window #' #' @param v is an index vector such as c(1,2,3,4,5) #' @param n is an integer describing the size of the window #' @param by is an integer specifying the window step #' #' @return a list of indices #' @export #' #' @examples #' ind <- index_by_window(by =1, n = 4) #' alphabet <- letters[1:26] #' lapply(ind, function(x) alphabet[x]) #' index_by_window <- function(v=seq(1,10), n=5, by =1 ){ vector_length <- length(v) end <- vector_length - n starts <- seq(1,end, by = by) return(lapply(starts, function(x) seq(x, x + n -1 ))) }
/R/index_by_window.R
no_license
PGWG/analysis
R
false
false
922
r
# index_by_window.R # v.0.0 Koshlan Mayer-Blackwell 2016-3-14 # Description: sliding window analysis can be useful for the detection of trends that may exist within # discrete time intervals but not the whole period of analysis. # index_by_window() is a utility funciton that generates a list of different indices specifying a sliding window #' index_by_window #' #' @param v is an index vector such as c(1,2,3,4,5) #' @param n is an integer describing the size of the window #' @param by is an integer specifying the window step #' #' @return a list of indices #' @export #' #' @examples #' ind <- index_by_window(by =1, n = 4) #' alphabet <- letters[1:26] #' lapply(ind, function(x) alphabet[x]) #' index_by_window <- function(v=seq(1,10), n=5, by =1 ){ vector_length <- length(v) end <- vector_length - n starts <- seq(1,end, by = by) return(lapply(starts, function(x) seq(x, x + n -1 ))) }
library(dplyr) library(leaflet) library(Kormaps) library(htmlwidgets) DONG<-read.csv('c:/RStudy/one.csv') head(DONG) data(korpopmap3) Encoding(names(korpopmap3))<-'UTF-8' Encoding(korpopmap3@data$name_eng)<-'UTF-8' Encoding(korpopmap3@data$name)<-'UTF-8' Encoding(korpopmap3@data$행정구역별_읍면동)<-'UTF-8' korpopmap3@data<-korpopmap3@data[c(359:380),] korpopmap3@polygons<-korpopmap3@polygons[c(359:380)] View(korpopmap3@data) View(korpopmap3@polygons) korpopmap3@data$name<-gsub('·','',korpopmap3@data$name) colnames(DONG)<-c('구별','name','일인가구') dong<- DONG %>%filter(구별=='강남구') korpopmap3@data<-merge(korpopmap3@data,dong,by.x='name',sort=FALSE) mymap <- korpopmap3@data mypalette <- colorNumeric(palette ='RdYlBu' , domain = mymap$'일인가구') mypopup <- paste0(mymap$name,'<br> 1인가구: ',mymap$'일인가구') map7 <- NULL map7<-leaflet(korpopmap3) %>% addTiles() %>% setView(lat=37.52711, lng=126.987517, zoom=12) %>% addPolygons(stroke =FALSE, smoothFactor = 0.2, fillOpacity = .9, popup = mypopup, color = ~mypalette(mymap$일인가구)) %>% addLegend( values = ~mymap$일인가구, pal =mypalette, title = '인구수', opacity = 1) map7 saveWidget(map7, file="map2.html")
/map2.R
no_license
hsyun89/Rstudy
R
false
false
1,230
r
library(dplyr) library(leaflet) library(Kormaps) library(htmlwidgets) DONG<-read.csv('c:/RStudy/one.csv') head(DONG) data(korpopmap3) Encoding(names(korpopmap3))<-'UTF-8' Encoding(korpopmap3@data$name_eng)<-'UTF-8' Encoding(korpopmap3@data$name)<-'UTF-8' Encoding(korpopmap3@data$행정구역별_읍면동)<-'UTF-8' korpopmap3@data<-korpopmap3@data[c(359:380),] korpopmap3@polygons<-korpopmap3@polygons[c(359:380)] View(korpopmap3@data) View(korpopmap3@polygons) korpopmap3@data$name<-gsub('·','',korpopmap3@data$name) colnames(DONG)<-c('구별','name','일인가구') dong<- DONG %>%filter(구별=='강남구') korpopmap3@data<-merge(korpopmap3@data,dong,by.x='name',sort=FALSE) mymap <- korpopmap3@data mypalette <- colorNumeric(palette ='RdYlBu' , domain = mymap$'일인가구') mypopup <- paste0(mymap$name,'<br> 1인가구: ',mymap$'일인가구') map7 <- NULL map7<-leaflet(korpopmap3) %>% addTiles() %>% setView(lat=37.52711, lng=126.987517, zoom=12) %>% addPolygons(stroke =FALSE, smoothFactor = 0.2, fillOpacity = .9, popup = mypopup, color = ~mypalette(mymap$일인가구)) %>% addLegend( values = ~mymap$일인가구, pal =mypalette, title = '인구수', opacity = 1) map7 saveWidget(map7, file="map2.html")
#' List the species available in the msigdbr package #' #' @return A data frame of the available species. #' #' @importFrom dplyr arrange distinct select #' @importFrom tidyselect starts_with #' @export #' #' @examples #' msigdbr_species() msigdbr_species <- function() { msigdbr_orthologs %>% select(starts_with("species")) %>% distinct() %>% arrange(.data$species_name) } #' List the species available in the msigdbr package #' #' This function is being deprecated and replaced by `msigdbr_species()`. #' #' @return A vector of possible species. #' #' @export msigdbr_show_species <- function() { .Deprecated("msigdbr_species") msigdbr_orthologs$species_name %>% unique() %>% sort() } #' List the collections available in the msigdbr package #' #' @return A data frame of the available collections. #' #' @importFrom dplyr arrange count distinct #' @export #' #' @examples #' msigdbr_collections() msigdbr_collections <- function() { msigdbr_genesets %>% distinct(.data$gs_cat, .data$gs_subcat, .data$gs_id) %>% count(.data$gs_cat, .data$gs_subcat, name = "num_genesets") %>% arrange(.data$gs_cat, .data$gs_subcat) } #' Retrieve the gene sets data frame #' #' Retrieve a data frame of gene sets and their member genes. #' The available species and collections can be checked with `msigdbr_species()` and `msigdbr_collections()`. #' #' @param species Species name, such as Homo sapiens or Mus musculus. #' @param category MSigDB collection abbreviation, such as H or C1. #' @param subcategory MSigDB sub-collection abbreviation, such as CGP or BP. #' #' @return A data frame of gene sets with one gene per row. #' #' @references \url{https://www.gsea-msigdb.org/gsea/msigdb/collections.jsp} #' #' @import tibble #' @importFrom dplyr filter inner_join arrange select #' @importFrom tidyselect everything #' @export #' #' @examples #' # get all human gene sets #' \donttest{ #' msigdbr(species = "Homo sapiens") #' } #' #' # get mouse C2 (curated) CGP (chemical and genetic perturbations) gene sets #' \donttest{ #' msigdbr(species = "Mus musculus", category = "C2", subcategory = "CGP") #' } msigdbr <- function(species = "Homo sapiens", category = NULL, subcategory = NULL) { # confirm that only one species is specified if (length(species) > 1) { stop("please specify only one species at a time") } # filter orthologs by species orthologs_subset <- filter(msigdbr_orthologs, .data$species_name == species) # confirm that the species exists in the database if (nrow(orthologs_subset) == 0) { stop("species does not exist in the database: ", species) } genesets_subset <- msigdbr_genesets # filter by category if (is.character(category)) { if (length(category) > 1) { stop("please specify only one category at a time") } if (category %in% genesets_subset$gs_cat) { genesets_subset <- filter(genesets_subset, .data$gs_cat == category) } else { stop("unknown category") } } # filter by sub-category (with and without colon) if (is.character(subcategory)) { if (length(subcategory) > 1) { stop("please specify only one subcategory at a time") } if (subcategory %in% genesets_subset$gs_subcat) { genesets_subset <- filter(genesets_subset, .data$gs_subcat == subcategory) } else if (subcategory %in% gsub(".*:", "", genesets_subset$gs_subcat)) { genesets_subset <- filter(genesets_subset, gsub(".*:", "", .data$gs_subcat) == subcategory) } else { stop("unknown subcategory") } } # combine gene sets and genes genesets_subset <- inner_join(genesets_subset, msigdbr_genes, by = "gs_id") # combine gene sets and orthologs genesets_subset %>% inner_join(orthologs_subset, by = "human_entrez_gene") %>% arrange(.data$gs_name, .data$human_gene_symbol, .data$gene_symbol) %>% select( .data$gs_cat, .data$gs_subcat, .data$gs_name, .data$entrez_gene, .data$ensembl_gene, .data$gene_symbol, .data$human_entrez_gene, .data$human_gene_symbol, everything() ) }
/R/functions.R
permissive
lianos/msigdbr
R
false
false
4,097
r
#' List the species available in the msigdbr package #' #' @return A data frame of the available species. #' #' @importFrom dplyr arrange distinct select #' @importFrom tidyselect starts_with #' @export #' #' @examples #' msigdbr_species() msigdbr_species <- function() { msigdbr_orthologs %>% select(starts_with("species")) %>% distinct() %>% arrange(.data$species_name) } #' List the species available in the msigdbr package #' #' This function is being deprecated and replaced by `msigdbr_species()`. #' #' @return A vector of possible species. #' #' @export msigdbr_show_species <- function() { .Deprecated("msigdbr_species") msigdbr_orthologs$species_name %>% unique() %>% sort() } #' List the collections available in the msigdbr package #' #' @return A data frame of the available collections. #' #' @importFrom dplyr arrange count distinct #' @export #' #' @examples #' msigdbr_collections() msigdbr_collections <- function() { msigdbr_genesets %>% distinct(.data$gs_cat, .data$gs_subcat, .data$gs_id) %>% count(.data$gs_cat, .data$gs_subcat, name = "num_genesets") %>% arrange(.data$gs_cat, .data$gs_subcat) } #' Retrieve the gene sets data frame #' #' Retrieve a data frame of gene sets and their member genes. #' The available species and collections can be checked with `msigdbr_species()` and `msigdbr_collections()`. #' #' @param species Species name, such as Homo sapiens or Mus musculus. #' @param category MSigDB collection abbreviation, such as H or C1. #' @param subcategory MSigDB sub-collection abbreviation, such as CGP or BP. #' #' @return A data frame of gene sets with one gene per row. #' #' @references \url{https://www.gsea-msigdb.org/gsea/msigdb/collections.jsp} #' #' @import tibble #' @importFrom dplyr filter inner_join arrange select #' @importFrom tidyselect everything #' @export #' #' @examples #' # get all human gene sets #' \donttest{ #' msigdbr(species = "Homo sapiens") #' } #' #' # get mouse C2 (curated) CGP (chemical and genetic perturbations) gene sets #' \donttest{ #' msigdbr(species = "Mus musculus", category = "C2", subcategory = "CGP") #' } msigdbr <- function(species = "Homo sapiens", category = NULL, subcategory = NULL) { # confirm that only one species is specified if (length(species) > 1) { stop("please specify only one species at a time") } # filter orthologs by species orthologs_subset <- filter(msigdbr_orthologs, .data$species_name == species) # confirm that the species exists in the database if (nrow(orthologs_subset) == 0) { stop("species does not exist in the database: ", species) } genesets_subset <- msigdbr_genesets # filter by category if (is.character(category)) { if (length(category) > 1) { stop("please specify only one category at a time") } if (category %in% genesets_subset$gs_cat) { genesets_subset <- filter(genesets_subset, .data$gs_cat == category) } else { stop("unknown category") } } # filter by sub-category (with and without colon) if (is.character(subcategory)) { if (length(subcategory) > 1) { stop("please specify only one subcategory at a time") } if (subcategory %in% genesets_subset$gs_subcat) { genesets_subset <- filter(genesets_subset, .data$gs_subcat == subcategory) } else if (subcategory %in% gsub(".*:", "", genesets_subset$gs_subcat)) { genesets_subset <- filter(genesets_subset, gsub(".*:", "", .data$gs_subcat) == subcategory) } else { stop("unknown subcategory") } } # combine gene sets and genes genesets_subset <- inner_join(genesets_subset, msigdbr_genes, by = "gs_id") # combine gene sets and orthologs genesets_subset %>% inner_join(orthologs_subset, by = "human_entrez_gene") %>% arrange(.data$gs_name, .data$human_gene_symbol, .data$gene_symbol) %>% select( .data$gs_cat, .data$gs_subcat, .data$gs_name, .data$entrez_gene, .data$ensembl_gene, .data$gene_symbol, .data$human_entrez_gene, .data$human_gene_symbol, everything() ) }