content
large_stringlengths 0
6.46M
| path
large_stringlengths 3
331
| license_type
large_stringclasses 2
values | repo_name
large_stringlengths 5
125
| language
large_stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.46M
| extension
large_stringclasses 75
values | text
stringlengths 0
6.46M
|
---|---|---|---|---|---|---|---|---|---|
library(ranger)
### Name: treeInfo
### Title: Tree information in human readable format
### Aliases: treeInfo
### ** Examples
require(ranger)
rf <- ranger(Species ~ ., data = iris)
treeInfo(rf, 1)
|
/data/genthat_extracted_code/ranger/examples/treeInfo.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false | false | 204 |
r
|
library(ranger)
### Name: treeInfo
### Title: Tree information in human readable format
### Aliases: treeInfo
### ** Examples
require(ranger)
rf <- ranger(Species ~ ., data = iris)
treeInfo(rf, 1)
|
% Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/truncatedt.R
\name{truncatedt}
\alias{truncatedt}
\title{Internal function for moments of truncated t}
\usage{
truncatedt(a, mu, sigma, nu)
}
\arguments{
\item{a}{truncation at}
\item{mu}{mean of t distribution}
\item{sigma}{scale of t distribution}
\item{nu}{degrees of freedom}
}
\value{
first and second moment of the truncated t distribution
}
\description{
Internal function for moments of truncated t
}
\keyword{distribution}
\keyword{t}
\keyword{truncated}
|
/pkg/fitmixst4/man/truncatedt.Rd
|
no_license
|
jhoefler/fitmixst4
|
R
| false | false | 554 |
rd
|
% Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/truncatedt.R
\name{truncatedt}
\alias{truncatedt}
\title{Internal function for moments of truncated t}
\usage{
truncatedt(a, mu, sigma, nu)
}
\arguments{
\item{a}{truncation at}
\item{mu}{mean of t distribution}
\item{sigma}{scale of t distribution}
\item{nu}{degrees of freedom}
}
\value{
first and second moment of the truncated t distribution
}
\description{
Internal function for moments of truncated t
}
\keyword{distribution}
\keyword{t}
\keyword{truncated}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/labels.default.R
\name{labels.default}
\alias{labels.default}
\title{fun_name}
\usage{
labels.default(params)
}
\arguments{
\item{param}{fun_name}
}
\description{
kolejna funkcja podmieniona
}
|
/man/labels.default.Rd
|
no_license
|
granatb/RapeR
|
R
| false | true | 271 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/labels.default.R
\name{labels.default}
\alias{labels.default}
\title{fun_name}
\usage{
labels.default(params)
}
\arguments{
\item{param}{fun_name}
}
\description{
kolejna funkcja podmieniona
}
|
data(iris)
library(ggplot)
names(iris)
train_indx = createDataPartition(y=iris$Species,
p=0.7,
list=F)
train = iris[train_indx,]
test = iris[-train_indx,]
lda_model = train(Species~., method='lda',
data=train)
nb_model = train(Species~., method='nb',
data=train)
pred_lda = predict(lda_model, test)#linear discriminant analysis
pred_nb = predict(nb_model, test)#naive bayes
table(pred_lda, pred_nb)
equal_pred = (pred_lda==pred_nb)
qplot(Petal.Width, Sepal.Width,
color = equal_pred, data=test)
|
/practical-machine-learning/12.lda_nb.R
|
no_license
|
r3dmaohong/coursera_practice
|
R
| false | false | 596 |
r
|
data(iris)
library(ggplot)
names(iris)
train_indx = createDataPartition(y=iris$Species,
p=0.7,
list=F)
train = iris[train_indx,]
test = iris[-train_indx,]
lda_model = train(Species~., method='lda',
data=train)
nb_model = train(Species~., method='nb',
data=train)
pred_lda = predict(lda_model, test)#linear discriminant analysis
pred_nb = predict(nb_model, test)#naive bayes
table(pred_lda, pred_nb)
equal_pred = (pred_lda==pred_nb)
qplot(Petal.Width, Sepal.Width,
color = equal_pred, data=test)
|
# Copyright 2019 Battelle Memorial Institute; see the LICENSE file.
#' module_aglu_L2242.land_input_4_irr_mgmt
#'
#' Generate logit exponent of the fourth land node that specifies crop commodity and GLU by region,
#' and generate the ghost node share for the bionenergy node.
#'
#' @param command API command to execute
#' @param ... other optional parameters, depending on command
#' @return Depends on \code{command}: either a vector of required inputs,
#' a vector of output names, or (if \code{command} is "MAKE") all
#' the generated outputs: \code{L2242.LN4_Logit}, \code{L2242.LN4_NodeGhostShare}, \code{L2242.LN4_NodeIsGhostShareRel}. The corresponding file in the
#' original data system was \code{L2242.land_input_4_irr_mgmt.R} (aglu level2).
#' @details This chunk generates the logit exponent of the fourth land nest that specifies crop commodity and GLU by region,
#' and the ghost node share for the bionenergy node in future years, and specifies whether the bionenergy ghost node share is relative.
#' @importFrom assertthat assert_that
#' @importFrom dplyr bind_rows distinct filter if_else left_join mutate select
#' @importFrom tidyr separate
#' @author RC August 2017
module_aglu_L2242.land_input_4_irr_mgmt <- function(command, ...) {
if(command == driver.DECLARE_INPUTS) {
return(c(FILE = "aglu/A_LandNode_logit_irr",
FILE = "aglu/A_bio_ghost_share",
FILE = "aglu/A_LT_Mapping",
FILE = "aglu/A_LandLeaf3",
"L2012.AgYield_bio_ref",
"L2012.AgProduction_ag_irr_mgmt"))
} else if(command == driver.DECLARE_OUTPUTS) {
return(c("L2242.LN4_Logit",
"L2242.LN4_NodeGhostShare",
"L2242.LN4_NodeIsGhostShareRel"))
} else if(command == driver.MAKE) {
# silence package check notes
GLU_name <- LandLeaf <- LandNode4 <- LandNode1 <- LandNode2 <- LandNode3 <- year <-
ghost.share <- GCAM_commodity <- Land_Type <- ghost.unnormalized.share <-
region <- AgSupplySector <- AgSupplySubsector <- NULL
all_data <- list(...)[[1]]
# Load required inputs
A_LandNode_logit_irr <- get_data(all_data, "aglu/A_LandNode_logit_irr")
A_bio_ghost_share <- get_data(all_data, "aglu/A_bio_ghost_share")
A_LT_Mapping <- get_data(all_data, "aglu/A_LT_Mapping")
A_LandLeaf3 <- get_data(all_data, "aglu/A_LandLeaf3")
L2012.AgYield_bio_ref <- get_data(all_data, "L2012.AgYield_bio_ref", strip_attributes = TRUE)
L2012.AgProduction_ag_irr_mgmt <- get_data(all_data, "L2012.AgProduction_ag_irr_mgmt")
# L2242.LN4_Logit: Logit exponent of the fourth land nest by region
# There are no technologies that are disaggregated to irrigated and rainfed but not to lo- and hi-input techs,
# so here we only write out the logit exponent for the irrigated/rainfed node competition.
L2012.AgProduction_ag_irr_mgmt %>%
distinct(region, AgSupplySubsector, AgSupplySector) %>%
bind_rows(distinct(L2012.AgYield_bio_ref, region, AgSupplySubsector, AgSupplySector)) %>%
mutate(AgSupplySector = if_else(grepl("biomassTree", AgSupplySubsector), "biomassTree", "biomassGrass")) %>%
left_join(A_LandLeaf3, by=c("AgSupplySector" = "LandLeaf")) %>%
separate(AgSupplySubsector, c("LandNode4", "GLU_name")) %>%
mutate(logit.year.fillout = min(MODEL_BASE_YEARS)) %>%
# Match in logit exponent values, use left_join instead because the logit.type variable are NAs, drop later
left_join(A_LandNode_logit_irr, by = c("LandNode4" = "LandNode")) %>%
mutate(LandAllocatorRoot = "root",
LandNode1 = paste(LandNode1, GLU_name, sep = "_"),
LandNode2 = paste(LandNode2, GLU_name, sep = "_"),
LandNode3 = paste(LandNode3, GLU_name, sep = "_"),
LandNode4 = paste(LandNode4, GLU_name, sep = "_")) %>%
select(LEVEL2_DATA_NAMES[["LN4_Logit"]], LOGIT_TYPE_COLNAME) ->
L2242.LN4_Logit
# L2242.LN4_NodeGhostShare:
# Specify ghost node share for bioenergy node in future years (starting with first bio year).
L2012.AgYield_bio_ref %>%
distinct(region, AgSupplySubsector) %>%
mutate(GCAM_commodity = if_else(grepl("^biomassGrass", AgSupplySubsector), "biomassGrass", "biomassTree"),
GLU_name = if_else(grepl("^biomassGrass", AgSupplySubsector), gsub("biomassGrass_", "", AgSupplySubsector),
gsub("biomassTree_", "", AgSupplySubsector))) %>%
left_join_error_no_match(A_LT_Mapping, by = "GCAM_commodity") %>%
mutate(LandAllocatorRoot = "root",
LandNode1 = paste(LandNode1, GLU_name, sep = aglu.LT_GLU_DELIMITER),
LandNode2 = paste(LandNode2, GLU_name, sep = aglu.LT_GLU_DELIMITER),
LandNode3 = paste(LandNode3, GLU_name, sep = aglu.LT_GLU_DELIMITER),
LandNode4 = paste(LandLeaf, GLU_name, sep = aglu.LT_GLU_DELIMITER)) %>%
repeat_add_columns(tibble::tibble(year = MODEL_FUTURE_YEARS)) %>%
filter(year >= aglu.BIO_START_YEAR) %>%
left_join(A_bio_ghost_share, by = "year") %>%
mutate(ghost.unnormalized.share = approx_fun(year, ghost.share)) %>%
select(LEVEL2_DATA_NAMES[["LN4_NodeGhostShare"]]) ->
L2242.LN4_NodeGhostShare
# L2242.LN4_NodeIsGhostShareRel:
# Specify whether bioenergy ghost node share is relative.
# These are the same values that would have been set in the leaves in land input 3.
# Specify whether bioenergy ghost share is relative to the dominant crop
# Note: this was just set to 1 in the old data system
L2242.LN4_NodeGhostShare %>%
select(-year, -ghost.unnormalized.share) %>%
distinct() %>%
mutate(is.ghost.share.relative = 1) ->
L2242.LN4_NodeIsGhostShareRel
# Produce outputs
L2242.LN4_Logit %>%
add_title("Logit exponent of the fourth land nest by region") %>%
add_units("NA") %>%
add_comments("Only write out the logit exponent for the irrigated/rainfed node competition") %>%
add_legacy_name("L2242.LN4_Logit") %>%
add_precursors("aglu/A_LandNode_logit_irr",
"aglu/A_LandLeaf3",
"L2012.AgYield_bio_ref",
"L2012.AgProduction_ag_irr_mgmt") ->
L2242.LN4_Logit
L2242.LN4_NodeGhostShare %>%
add_title("Ghost node share for bioenergy node in future years, the fourth land nest") %>%
add_units("NA") %>%
add_comments("Ghost share values are read in from an assumptions file") %>%
add_comments("and then mapped to all bioenergy nodes for future years after the bio start year") %>%
add_legacy_name("L2242.LN4_NodeGhostShare") %>%
add_precursors("L2012.AgYield_bio_ref",
"aglu/A_bio_ghost_share",
"aglu/A_LT_Mapping") ->
L2242.LN4_NodeGhostShare
L2242.LN4_NodeIsGhostShareRel %>%
add_title("Whether bioenergy ghost share is relative to the dominant crop, the forth land nest") %>%
add_units("NA") %>%
add_comments("Copy the nesting structure from L2242.LN4_NodeGhostShare") %>%
add_comments("Set is.ghost.share.relative to 1") %>%
add_legacy_name("L2242.LN4_NodeIsGhostShareRel") %>%
same_precursors_as("L2242.LN4_NodeGhostShare") ->
L2242.LN4_NodeIsGhostShareRel
return_data(L2242.LN4_Logit, L2242.LN4_NodeGhostShare, L2242.LN4_NodeIsGhostShareRel)
} else {
stop("Unknown command")
}
}
|
/input/gcamdata/R/zaglu_L2242.land_input_4_irr_mgmt.R
|
permissive
|
JGCRI/gcam-core
|
R
| false | false | 7,452 |
r
|
# Copyright 2019 Battelle Memorial Institute; see the LICENSE file.
#' module_aglu_L2242.land_input_4_irr_mgmt
#'
#' Generate logit exponent of the fourth land node that specifies crop commodity and GLU by region,
#' and generate the ghost node share for the bionenergy node.
#'
#' @param command API command to execute
#' @param ... other optional parameters, depending on command
#' @return Depends on \code{command}: either a vector of required inputs,
#' a vector of output names, or (if \code{command} is "MAKE") all
#' the generated outputs: \code{L2242.LN4_Logit}, \code{L2242.LN4_NodeGhostShare}, \code{L2242.LN4_NodeIsGhostShareRel}. The corresponding file in the
#' original data system was \code{L2242.land_input_4_irr_mgmt.R} (aglu level2).
#' @details This chunk generates the logit exponent of the fourth land nest that specifies crop commodity and GLU by region,
#' and the ghost node share for the bionenergy node in future years, and specifies whether the bionenergy ghost node share is relative.
#' @importFrom assertthat assert_that
#' @importFrom dplyr bind_rows distinct filter if_else left_join mutate select
#' @importFrom tidyr separate
#' @author RC August 2017
module_aglu_L2242.land_input_4_irr_mgmt <- function(command, ...) {
if(command == driver.DECLARE_INPUTS) {
return(c(FILE = "aglu/A_LandNode_logit_irr",
FILE = "aglu/A_bio_ghost_share",
FILE = "aglu/A_LT_Mapping",
FILE = "aglu/A_LandLeaf3",
"L2012.AgYield_bio_ref",
"L2012.AgProduction_ag_irr_mgmt"))
} else if(command == driver.DECLARE_OUTPUTS) {
return(c("L2242.LN4_Logit",
"L2242.LN4_NodeGhostShare",
"L2242.LN4_NodeIsGhostShareRel"))
} else if(command == driver.MAKE) {
# silence package check notes
GLU_name <- LandLeaf <- LandNode4 <- LandNode1 <- LandNode2 <- LandNode3 <- year <-
ghost.share <- GCAM_commodity <- Land_Type <- ghost.unnormalized.share <-
region <- AgSupplySector <- AgSupplySubsector <- NULL
all_data <- list(...)[[1]]
# Load required inputs
A_LandNode_logit_irr <- get_data(all_data, "aglu/A_LandNode_logit_irr")
A_bio_ghost_share <- get_data(all_data, "aglu/A_bio_ghost_share")
A_LT_Mapping <- get_data(all_data, "aglu/A_LT_Mapping")
A_LandLeaf3 <- get_data(all_data, "aglu/A_LandLeaf3")
L2012.AgYield_bio_ref <- get_data(all_data, "L2012.AgYield_bio_ref", strip_attributes = TRUE)
L2012.AgProduction_ag_irr_mgmt <- get_data(all_data, "L2012.AgProduction_ag_irr_mgmt")
# L2242.LN4_Logit: Logit exponent of the fourth land nest by region
# There are no technologies that are disaggregated to irrigated and rainfed but not to lo- and hi-input techs,
# so here we only write out the logit exponent for the irrigated/rainfed node competition.
L2012.AgProduction_ag_irr_mgmt %>%
distinct(region, AgSupplySubsector, AgSupplySector) %>%
bind_rows(distinct(L2012.AgYield_bio_ref, region, AgSupplySubsector, AgSupplySector)) %>%
mutate(AgSupplySector = if_else(grepl("biomassTree", AgSupplySubsector), "biomassTree", "biomassGrass")) %>%
left_join(A_LandLeaf3, by=c("AgSupplySector" = "LandLeaf")) %>%
separate(AgSupplySubsector, c("LandNode4", "GLU_name")) %>%
mutate(logit.year.fillout = min(MODEL_BASE_YEARS)) %>%
# Match in logit exponent values, use left_join instead because the logit.type variable are NAs, drop later
left_join(A_LandNode_logit_irr, by = c("LandNode4" = "LandNode")) %>%
mutate(LandAllocatorRoot = "root",
LandNode1 = paste(LandNode1, GLU_name, sep = "_"),
LandNode2 = paste(LandNode2, GLU_name, sep = "_"),
LandNode3 = paste(LandNode3, GLU_name, sep = "_"),
LandNode4 = paste(LandNode4, GLU_name, sep = "_")) %>%
select(LEVEL2_DATA_NAMES[["LN4_Logit"]], LOGIT_TYPE_COLNAME) ->
L2242.LN4_Logit
# L2242.LN4_NodeGhostShare:
# Specify ghost node share for bioenergy node in future years (starting with first bio year).
L2012.AgYield_bio_ref %>%
distinct(region, AgSupplySubsector) %>%
mutate(GCAM_commodity = if_else(grepl("^biomassGrass", AgSupplySubsector), "biomassGrass", "biomassTree"),
GLU_name = if_else(grepl("^biomassGrass", AgSupplySubsector), gsub("biomassGrass_", "", AgSupplySubsector),
gsub("biomassTree_", "", AgSupplySubsector))) %>%
left_join_error_no_match(A_LT_Mapping, by = "GCAM_commodity") %>%
mutate(LandAllocatorRoot = "root",
LandNode1 = paste(LandNode1, GLU_name, sep = aglu.LT_GLU_DELIMITER),
LandNode2 = paste(LandNode2, GLU_name, sep = aglu.LT_GLU_DELIMITER),
LandNode3 = paste(LandNode3, GLU_name, sep = aglu.LT_GLU_DELIMITER),
LandNode4 = paste(LandLeaf, GLU_name, sep = aglu.LT_GLU_DELIMITER)) %>%
repeat_add_columns(tibble::tibble(year = MODEL_FUTURE_YEARS)) %>%
filter(year >= aglu.BIO_START_YEAR) %>%
left_join(A_bio_ghost_share, by = "year") %>%
mutate(ghost.unnormalized.share = approx_fun(year, ghost.share)) %>%
select(LEVEL2_DATA_NAMES[["LN4_NodeGhostShare"]]) ->
L2242.LN4_NodeGhostShare
# L2242.LN4_NodeIsGhostShareRel:
# Specify whether bioenergy ghost node share is relative.
# These are the same values that would have been set in the leaves in land input 3.
# Specify whether bioenergy ghost share is relative to the dominant crop
# Note: this was just set to 1 in the old data system
L2242.LN4_NodeGhostShare %>%
select(-year, -ghost.unnormalized.share) %>%
distinct() %>%
mutate(is.ghost.share.relative = 1) ->
L2242.LN4_NodeIsGhostShareRel
# Produce outputs
L2242.LN4_Logit %>%
add_title("Logit exponent of the fourth land nest by region") %>%
add_units("NA") %>%
add_comments("Only write out the logit exponent for the irrigated/rainfed node competition") %>%
add_legacy_name("L2242.LN4_Logit") %>%
add_precursors("aglu/A_LandNode_logit_irr",
"aglu/A_LandLeaf3",
"L2012.AgYield_bio_ref",
"L2012.AgProduction_ag_irr_mgmt") ->
L2242.LN4_Logit
L2242.LN4_NodeGhostShare %>%
add_title("Ghost node share for bioenergy node in future years, the fourth land nest") %>%
add_units("NA") %>%
add_comments("Ghost share values are read in from an assumptions file") %>%
add_comments("and then mapped to all bioenergy nodes for future years after the bio start year") %>%
add_legacy_name("L2242.LN4_NodeGhostShare") %>%
add_precursors("L2012.AgYield_bio_ref",
"aglu/A_bio_ghost_share",
"aglu/A_LT_Mapping") ->
L2242.LN4_NodeGhostShare
L2242.LN4_NodeIsGhostShareRel %>%
add_title("Whether bioenergy ghost share is relative to the dominant crop, the forth land nest") %>%
add_units("NA") %>%
add_comments("Copy the nesting structure from L2242.LN4_NodeGhostShare") %>%
add_comments("Set is.ghost.share.relative to 1") %>%
add_legacy_name("L2242.LN4_NodeIsGhostShareRel") %>%
same_precursors_as("L2242.LN4_NodeGhostShare") ->
L2242.LN4_NodeIsGhostShareRel
return_data(L2242.LN4_Logit, L2242.LN4_NodeGhostShare, L2242.LN4_NodeIsGhostShareRel)
} else {
stop("Unknown command")
}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/build-reference.R
\name{build_reference}
\alias{build_reference}
\alias{build_reference_index}
\title{Build reference section}
\usage{
build_reference(pkg = ".", lazy = TRUE, examples = TRUE,
run_dont_run = FALSE, mathjax = TRUE, seed = 1014, override = list(),
preview = NA)
build_reference_index(pkg = ".")
}
\arguments{
\item{pkg}{Path to package.}
\item{lazy}{If \code{TRUE}, only rebuild pages where the \code{.Rd}
is more recent than the \code{.html}. This makes it much easier to
rapidly protoype. It is set to \code{FALSE} by \code{\link[=build_site]{build_site()}}.}
\item{examples}{Run examples?}
\item{run_dont_run}{Run examples that are surrounded in \\dontrun?}
\item{mathjax}{Use mathjax to render math symbols?}
\item{seed}{Seed used to initialize so that random examples are
reproducible.}
\item{override}{An optional named list used to temporarily override
values in \code{_pkgdown.yml}}
\item{preview}{If \code{TRUE}, or \code{is.na(preview) && interactive()}, will preview
freshly generated section in browser.}
}
\description{
By default, pkgdown will generate an index that simply lists all
the functions in alphabetical order. To override this, provide a
\code{reference} section in your \code{_pkgdown.yml} as described
below.
}
\section{YAML config}{
To tweak the index page, you need a section called \code{reference}
which provides a list of sections containing, a \code{title}, list of
\code{contents}, and optional \code{description}.
For example, the following code breaks up the functions in pkgdown
into two groups:
\preformatted{
reference:
- title: Render components
desc: Build each component of the site.
contents:
- starts_with("build_")
- init_site
- title: Templates
contents:
- render_page
}
Note that \code{contents} can contain either a list of function names,
or if the functions in a section share a common prefix or suffix, you
can use \code{starts_with("prefix")} and \code{ends_with("suffix")} to
select them all. For more complex naming schemes you can use an aribrary
regular expression with \code{matches("regexp")}. You can also use a leading
\code{-} to exclude matches from a section. By default, these functions that
match multiple topics will exclude topics with keyword "internal". To
include, use (e.g.) \code{starts_with("build_", internal = TRUE)}.
Alternatively, you can selected topics that contain specified concepts with
\code{has_concept("blah")}. Concepts are not currently well-supported by
roxygen2, but may be useful if you write Rd files by hand.
pkgdown will check that all non-internal topics are included on
this page, and will generate a warning if you have missed any.
}
\section{Icons}{
You can optionally supply an icon for each help topic. To do so, you'll
need a top-level \code{icons} directory. This should contain {.png} files
that are either 40x40 (for regular display) or 80x80 (if you want
retina display). Icons are matched to topics by aliases.
}
\examples{
# This example illustrates some important output types
# The following output should be wrapped over multiple lines
a <- 1:100
a
cat("This some text!\\n")
message("This is a message!")
warning("This is a warning!")
# This is a multi-line block
{
1 + 2
2 + 2
}
\dontrun{
stop("This is an error!", call. = FALSE)
}
\donttest{
# This code won't generally be run by CRAN. But it
# will be run by pkgdown
b <- 10
a + b
}
}
|
/man/build_reference.Rd
|
permissive
|
dfalster/pkgdown
|
R
| false | true | 3,481 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/build-reference.R
\name{build_reference}
\alias{build_reference}
\alias{build_reference_index}
\title{Build reference section}
\usage{
build_reference(pkg = ".", lazy = TRUE, examples = TRUE,
run_dont_run = FALSE, mathjax = TRUE, seed = 1014, override = list(),
preview = NA)
build_reference_index(pkg = ".")
}
\arguments{
\item{pkg}{Path to package.}
\item{lazy}{If \code{TRUE}, only rebuild pages where the \code{.Rd}
is more recent than the \code{.html}. This makes it much easier to
rapidly protoype. It is set to \code{FALSE} by \code{\link[=build_site]{build_site()}}.}
\item{examples}{Run examples?}
\item{run_dont_run}{Run examples that are surrounded in \\dontrun?}
\item{mathjax}{Use mathjax to render math symbols?}
\item{seed}{Seed used to initialize so that random examples are
reproducible.}
\item{override}{An optional named list used to temporarily override
values in \code{_pkgdown.yml}}
\item{preview}{If \code{TRUE}, or \code{is.na(preview) && interactive()}, will preview
freshly generated section in browser.}
}
\description{
By default, pkgdown will generate an index that simply lists all
the functions in alphabetical order. To override this, provide a
\code{reference} section in your \code{_pkgdown.yml} as described
below.
}
\section{YAML config}{
To tweak the index page, you need a section called \code{reference}
which provides a list of sections containing, a \code{title}, list of
\code{contents}, and optional \code{description}.
For example, the following code breaks up the functions in pkgdown
into two groups:
\preformatted{
reference:
- title: Render components
desc: Build each component of the site.
contents:
- starts_with("build_")
- init_site
- title: Templates
contents:
- render_page
}
Note that \code{contents} can contain either a list of function names,
or if the functions in a section share a common prefix or suffix, you
can use \code{starts_with("prefix")} and \code{ends_with("suffix")} to
select them all. For more complex naming schemes you can use an aribrary
regular expression with \code{matches("regexp")}. You can also use a leading
\code{-} to exclude matches from a section. By default, these functions that
match multiple topics will exclude topics with keyword "internal". To
include, use (e.g.) \code{starts_with("build_", internal = TRUE)}.
Alternatively, you can selected topics that contain specified concepts with
\code{has_concept("blah")}. Concepts are not currently well-supported by
roxygen2, but may be useful if you write Rd files by hand.
pkgdown will check that all non-internal topics are included on
this page, and will generate a warning if you have missed any.
}
\section{Icons}{
You can optionally supply an icon for each help topic. To do so, you'll
need a top-level \code{icons} directory. This should contain {.png} files
that are either 40x40 (for regular display) or 80x80 (if you want
retina display). Icons are matched to topics by aliases.
}
\examples{
# This example illustrates some important output types
# The following output should be wrapped over multiple lines
a <- 1:100
a
cat("This some text!\\n")
message("This is a message!")
warning("This is a warning!")
# This is a multi-line block
{
1 + 2
2 + 2
}
\dontrun{
stop("This is an error!", call. = FALSE)
}
\donttest{
# This code won't generally be run by CRAN. But it
# will be run by pkgdown
b <- 10
a + b
}
}
|
testlist <- list(A = structure(c(2.32784507357645e-308, 9.53818197352948e+295, 1.22810536108214e+146, 4.12396251261199e-221, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(5L, 7L)), B = structure(0, .Dim = c(1L, 1L)))
result <- do.call(multivariance:::match_rows,testlist)
str(result)
|
/multivariance/inst/testfiles/match_rows/AFL_match_rows/match_rows_valgrind_files/1613105737-test.R
|
no_license
|
akhikolla/updatedatatype-list3
|
R
| false | false | 344 |
r
|
testlist <- list(A = structure(c(2.32784507357645e-308, 9.53818197352948e+295, 1.22810536108214e+146, 4.12396251261199e-221, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(5L, 7L)), B = structure(0, .Dim = c(1L, 1L)))
result <- do.call(multivariance:::match_rows,testlist)
str(result)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/kms_operations.R
\name{kms_cancel_key_deletion}
\alias{kms_cancel_key_deletion}
\title{Cancels the deletion of a customer master key (CMK)}
\usage{
kms_cancel_key_deletion(KeyId)
}
\arguments{
\item{KeyId}{[required] The unique identifier for the customer master key (CMK) for which to
cancel deletion.
Specify the key ID or the Amazon Resource Name (ARN) of the CMK.
For example:
\itemize{
\item Key ID: \code{1234abcd-12ab-34cd-56ef-1234567890ab}
\item Key ARN:
\code{arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab}
}
To get the key ID and key ARN for a CMK, use ListKeys or DescribeKey.}
}
\description{
Cancels the deletion of a customer master key (CMK). When this operation
is successful, the CMK is set to the \code{Disabled} state. To enable a CMK,
use EnableKey. You cannot perform this operation on a CMK in a different
AWS account.
}
\details{
For more information about scheduling and canceling deletion of a CMK,
see \href{https://docs.aws.amazon.com/kms/latest/developerguide/deleting-keys.html}{Deleting Customer Master Keys}
in the \emph{AWS Key Management Service Developer Guide}.
The result of this operation varies with the key state of the CMK. For
details, see \href{https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html}{How Key State Affects Use of a Customer Master Key}
in the \emph{AWS Key Management Service Developer Guide}.
}
\section{Request syntax}{
\preformatted{svc$cancel_key_deletion(
KeyId = "string"
)
}
}
\examples{
# The following example cancels deletion of the specified CMK.
\donttest{svc$cancel_key_deletion(
KeyId = "1234abcd-12ab-34cd-56ef-1234567890ab"
)}
}
\keyword{internal}
|
/cran/paws.security.identity/man/kms_cancel_key_deletion.Rd
|
permissive
|
peoplecure/paws
|
R
| false | true | 1,756 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/kms_operations.R
\name{kms_cancel_key_deletion}
\alias{kms_cancel_key_deletion}
\title{Cancels the deletion of a customer master key (CMK)}
\usage{
kms_cancel_key_deletion(KeyId)
}
\arguments{
\item{KeyId}{[required] The unique identifier for the customer master key (CMK) for which to
cancel deletion.
Specify the key ID or the Amazon Resource Name (ARN) of the CMK.
For example:
\itemize{
\item Key ID: \code{1234abcd-12ab-34cd-56ef-1234567890ab}
\item Key ARN:
\code{arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab}
}
To get the key ID and key ARN for a CMK, use ListKeys or DescribeKey.}
}
\description{
Cancels the deletion of a customer master key (CMK). When this operation
is successful, the CMK is set to the \code{Disabled} state. To enable a CMK,
use EnableKey. You cannot perform this operation on a CMK in a different
AWS account.
}
\details{
For more information about scheduling and canceling deletion of a CMK,
see \href{https://docs.aws.amazon.com/kms/latest/developerguide/deleting-keys.html}{Deleting Customer Master Keys}
in the \emph{AWS Key Management Service Developer Guide}.
The result of this operation varies with the key state of the CMK. For
details, see \href{https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html}{How Key State Affects Use of a Customer Master Key}
in the \emph{AWS Key Management Service Developer Guide}.
}
\section{Request syntax}{
\preformatted{svc$cancel_key_deletion(
KeyId = "string"
)
}
}
\examples{
# The following example cancels deletion of the specified CMK.
\donttest{svc$cancel_key_deletion(
KeyId = "1234abcd-12ab-34cd-56ef-1234567890ab"
)}
}
\keyword{internal}
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/expression_cluster_SC_function.R
\name{expression_cluster_SC}
\alias{expression_cluster_SC}
\title{Function to to create barplots from different cluster groups}
\usage{
expression_cluster_SC(fluidSCproc, based_on_values = "log2ExNorm",
cluster_column_name = "clust", centrality_function = mean,
log_base_adjust = 10, cluster_genes = c("Hierarchical", "Correlation"),
nrClust = 6, distMethod = "euclidean", clustMethod = "average",
geneWeights = NULL, quantile_breaks = c(0.25, 0.5, 0.75, 0.95, 1),
quantile_labels = c("very low", "low", "medium", "high", "very high"),
alpha_range = c(0.5, 1), selected_assays = NULL, return_data = FALSE)
}
\arguments{
\item{fluidSCproc}{fluidSCproc S3 object~}
\item{based_on_values}{values to use, defaults to "log2Ex"}
\item{cluster_column_name}{name of the column with cluster information}
\item{centrality_function}{function to use for calculating a centrality score, e.g. mean, median, ...}
\item{log_base_adjust}{transform deviation scores}
\item{cluster_genes}{algorithm to cluster assays}
\item{nrClust}{number of assay clusters}
\item{distMethod}{method to calculate distance matrix}
\item{clustMethod}{method to cluster distance matrix}
\item{geneWeights}{named assay vector with weights, will be used to highlight more important genes}
\item{quantile_breaks}{divide geneWeights in quantile groups}
\item{quantile_labels}{give labels to groups created by quantile_breaks}
\item{alpha_range}{vector with range for alpha-value}
\item{selected_assays}{select subset of assays}
\item{return_data}{boolean to return data or graphic plot, default to FALSE}
}
\value{
returns a barplot with facets for different clusters and important assays highlighted based on assay weights
}
\description{
This function will calculate deviation scores for the different cluster groups from the population average and display them as bar plots.
In addition it will also cluster the assays.
}
\details{
NA
}
\examples{
expression_cluster_SC()
}
|
/man/expression_cluster_SC.Rd
|
no_license
|
RubD/fluidSCqpcr
|
R
| false | false | 2,081 |
rd
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/expression_cluster_SC_function.R
\name{expression_cluster_SC}
\alias{expression_cluster_SC}
\title{Function to to create barplots from different cluster groups}
\usage{
expression_cluster_SC(fluidSCproc, based_on_values = "log2ExNorm",
cluster_column_name = "clust", centrality_function = mean,
log_base_adjust = 10, cluster_genes = c("Hierarchical", "Correlation"),
nrClust = 6, distMethod = "euclidean", clustMethod = "average",
geneWeights = NULL, quantile_breaks = c(0.25, 0.5, 0.75, 0.95, 1),
quantile_labels = c("very low", "low", "medium", "high", "very high"),
alpha_range = c(0.5, 1), selected_assays = NULL, return_data = FALSE)
}
\arguments{
\item{fluidSCproc}{fluidSCproc S3 object~}
\item{based_on_values}{values to use, defaults to "log2Ex"}
\item{cluster_column_name}{name of the column with cluster information}
\item{centrality_function}{function to use for calculating a centrality score, e.g. mean, median, ...}
\item{log_base_adjust}{transform deviation scores}
\item{cluster_genes}{algorithm to cluster assays}
\item{nrClust}{number of assay clusters}
\item{distMethod}{method to calculate distance matrix}
\item{clustMethod}{method to cluster distance matrix}
\item{geneWeights}{named assay vector with weights, will be used to highlight more important genes}
\item{quantile_breaks}{divide geneWeights in quantile groups}
\item{quantile_labels}{give labels to groups created by quantile_breaks}
\item{alpha_range}{vector with range for alpha-value}
\item{selected_assays}{select subset of assays}
\item{return_data}{boolean to return data or graphic plot, default to FALSE}
}
\value{
returns a barplot with facets for different clusters and important assays highlighted based on assay weights
}
\description{
This function will calculate deviation scores for the different cluster groups from the population average and display them as bar plots.
In addition it will also cluster the assays.
}
\details{
NA
}
\examples{
expression_cluster_SC()
}
|
# MULTIVARIABLE ANALYSIS --------------------------------------------------
# 0 Set up ----------------------------------------------------------------
library(tidyverse)
library(lubridate)
library(broom)
library(lmtest)
ae_synthetic_Prov <- readRDS('ae_synthetic_Prov.rds')
# 1 Main effects model --------------------------------------------
# variables to inlude in initial main effects model
# AE_Arrival_Mode
# Age_Band
# Sex
# IMD_Decile_From_LSOA
# AE_Arrive_HourOfDay
# der_Num_Diag_Group
# der_Dist_Group
initial_main_effects_model <- glm(formula = Admitted_Flag ~ AE_Arrival_Mode + Age_Band + Sex + IMD_Decile_From_LSOA +
AE_Arrive_HourOfDay + der_Num_Diag_Group + der_Dist_Group,
data = ae_synthetic_Prov,
family = binomial(link = 'logit'))
tidy(initial_main_effects_model) %>%
mutate(oddsRatio = exp(estimate),
pValue = round(p.value, digits = 4)) %>%
select(term, coefficient = estimate, oddsRatio, pValue) %>%
View()
# let's view the (exponentiated) model coefficients (i.e. the odds ratios)
tidy(initial_main_effects_model) %>%
mutate(oddsRatio = exp(estimate),
pValue = round(p.value, digits = 4)) %>%
select(term, oddsRatio) %>%
mutate(term = factor(term),
term = factor(term, levels = rev(levels(term)))) %>%
ggplot() +
geom_point(aes(x = term, y = oddsRatio), colour = '#5881c1') +
geom_hline(aes(yintercept = 1)) +
scale_y_log10() +
coord_flip()
# Should we exclude any variable from this initial model?
# Look for p > 0.05
# 1.1 Try removing IMD_Decile_From_LSOA -----------------------------------
initial_main_effects_model_upd <- update(initial_main_effects_model, . ~ . - IMD_Decile_From_LSOA )
# Examine p.value of added variable(s)
tidy(initial_main_effects_model_upd) %>%
mutate(oddsRatio = exp(estimate),
pValue = round(p.value, digits = 4)) %>%
select(term, coefficient = estimate, oddsRatio, pValue) %>%
View()
# Compare coefficients of the current and upadted model
tidy(initial_main_effects_model) %>%
mutate(oddsRatio = exp(estimate),
pValue = round(p.value, digits = 4)) %>%
select(term, original = oddsRatio) %>%
full_join(tidy(initial_main_effects_model_upd) %>%
mutate(oddsRatio = exp(estimate),
pValue = round(p.value, digits = 4)) %>%
select(term, updated = oddsRatio),
by = 'term') %>%
gather(model, oddsRatio, 3:2) %>%
mutate(term = factor(term),
term = factor(term, levels = rev(levels(term))),
model = factor(model, levels = c('original', 'updated'))) %>%
ggplot() +
geom_point(aes(x = term, y = oddsRatio, colour = model)) +
geom_hline(aes(yintercept = 1)) +
scale_y_log10() +
scale_colour_manual(values = c('#f9bf07', '#5881c1')) +
coord_flip()
# compare model fits
lrtest(initial_main_effects_model, initial_main_effects_model_upd)
# close call, but probably worth keeping this variable
# 1.2 Try adding in previously excluded variable -------------------------
# Now let's test whther inclusing any of the variables that we excluded
# in the univariable analysis add value to this initial model
# Note that the significance of some variable only become clear when other
# variables are included
# der_Arrive_Quarter
# der_Arrive_Month
# der_Arrive_Winter
# der_Arrive_Weekday
# der_Arrive_FinYear
# Start with der_Arrive_Quarter
# Add into model
initial_main_effects_model_upd <- update(initial_main_effects_model, . ~ . + der_Arrive_Quarter )
# Examine p.value of added variable(s)
tidy(initial_main_effects_model_upd) %>%
mutate(oddsRatio = exp(estimate),
pValue = round(p.value, digits = 4)) %>%
select(term, coefficient = estimate, oddsRatio, pValue) %>%
View()
# Compare coefficients of the current and upadted model
tidy(initial_main_effects_model) %>%
mutate(oddsRatio = exp(estimate),
pValue = round(p.value, digits = 4)) %>%
select(term, original = oddsRatio) %>%
full_join(tidy(initial_main_effects_model_upd) %>%
mutate(oddsRatio = exp(estimate),
pValue = round(p.value, digits = 4)) %>%
select(term, updated = oddsRatio),
by = 'term') %>%
gather(model, oddsRatio, 3:2) %>%
mutate(term = factor(term),
term = factor(term, levels = rev(levels(term))),
model = factor(model, levels = c('original', 'updated'))) %>%
ggplot() +
geom_point(aes(x = term, y = oddsRatio, colour = model)) +
geom_hline(aes(yintercept = 1)) +
scale_y_log10() +
scale_colour_manual(values = c('#f9bf07', '#5881c1')) +
coord_flip()
# compare model fits
lrtest(initial_main_effects_model, initial_main_effects_model_upd)
## repeat for the other variables
# 1.3 EXERCISE - repeat for other variables -------------------------------
# der_Arrive_Month
# der_Arrive_Winter
# der_Arrive_Weekday
# der_Arrive_FinYear
# This is our main effects model
main_effects_model <- initial_main_effects_model
# 2 Preliminary final model -----------------------------------------------
# Next we need to consider interaction between our predictor variables
# AE_Arrival_Mode
# Age_Band
# Sex
# IMD_Decile_From_LSOA
# AE_Arrive_HourOfDay
# der_Num_Diag_Group
# der_Dist_Group
# (7 choose 2 = ) 21 potential 2-way interactions between these variables
# Which have some plausibility?
# Consider the following
# AE_Arrival_Mode:Age_Band
# Age_Band:Sex
# AE_Arrival_Mode:AE_Arrive_HourOfDay
# IMD_Decile_From_LSOA:der_Dist_Group
# 2.1 Try AE_Arrival_Mode:Age_Band ----------------------------------------
main_effects_model_upd <- update(main_effects_model, . ~ . + IMD_Decile_From_LSOA:der_Dist_Group)
# Examine coefficient and p.value of added variable(s)
# Look for p.value of interaction terms < 0.05
tidy(main_effects_model_upd) %>%
mutate(oddsRatio = exp(estimate),
pValue = round(p.value, digits = 4)) %>%
select(term, coefficient = estimate, oddsRatio, pValue) %>%
View()
# Compare model fits
lrtest(main_effects_model, main_effects_model_upd)
# 2.2 EXERCISE Repeat for other plausible interaction terms ---------------
# Age_Band:Sex
# AE_Arrival_Mode:AE_Arrive_HourOfDay
# IMD_Decile_From_LSOA:der_Dist_Group)
# Add all accepted interaction terms together and look again at p.values
preliminary_final_model <- update(main_effects_model, . ~ . +
AE_Arrival_Mode:Age_Band +
Age_Band:Sex +
AE_Arrival_Mode:AE_Arrive_HourOfDay)
tidy(preliminary_final_model) %>%
mutate(oddsRatio = exp(estimate),
pValue = round(p.value, digits = 4)) %>%
select(term, coefficient = estimate, oddsRatio, pValue) %>%
View()
# Compare model fits
lrtest(main_effects_model, preliminary_final_model)
# 3 Save our preliminary final model --------------------------------------
save(preliminary_final_model, file = 'preliminary_final_model.RDA')
|
/2_2_multivariable_analysis.R
|
no_license
|
andrw-jns/mclass_logistic_regression
|
R
| false | false | 7,093 |
r
|
# MULTIVARIABLE ANALYSIS --------------------------------------------------
# 0 Set up ----------------------------------------------------------------
library(tidyverse)
library(lubridate)
library(broom)
library(lmtest)
ae_synthetic_Prov <- readRDS('ae_synthetic_Prov.rds')
# 1 Main effects model --------------------------------------------
# variables to inlude in initial main effects model
# AE_Arrival_Mode
# Age_Band
# Sex
# IMD_Decile_From_LSOA
# AE_Arrive_HourOfDay
# der_Num_Diag_Group
# der_Dist_Group
initial_main_effects_model <- glm(formula = Admitted_Flag ~ AE_Arrival_Mode + Age_Band + Sex + IMD_Decile_From_LSOA +
AE_Arrive_HourOfDay + der_Num_Diag_Group + der_Dist_Group,
data = ae_synthetic_Prov,
family = binomial(link = 'logit'))
tidy(initial_main_effects_model) %>%
mutate(oddsRatio = exp(estimate),
pValue = round(p.value, digits = 4)) %>%
select(term, coefficient = estimate, oddsRatio, pValue) %>%
View()
# let's view the (exponentiated) model coefficients (i.e. the odds ratios)
tidy(initial_main_effects_model) %>%
mutate(oddsRatio = exp(estimate),
pValue = round(p.value, digits = 4)) %>%
select(term, oddsRatio) %>%
mutate(term = factor(term),
term = factor(term, levels = rev(levels(term)))) %>%
ggplot() +
geom_point(aes(x = term, y = oddsRatio), colour = '#5881c1') +
geom_hline(aes(yintercept = 1)) +
scale_y_log10() +
coord_flip()
# Should we exclude any variable from this initial model?
# Look for p > 0.05
# 1.1 Try removing IMD_Decile_From_LSOA -----------------------------------
initial_main_effects_model_upd <- update(initial_main_effects_model, . ~ . - IMD_Decile_From_LSOA )
# Examine p.value of added variable(s)
tidy(initial_main_effects_model_upd) %>%
mutate(oddsRatio = exp(estimate),
pValue = round(p.value, digits = 4)) %>%
select(term, coefficient = estimate, oddsRatio, pValue) %>%
View()
# Compare coefficients of the current and upadted model
tidy(initial_main_effects_model) %>%
mutate(oddsRatio = exp(estimate),
pValue = round(p.value, digits = 4)) %>%
select(term, original = oddsRatio) %>%
full_join(tidy(initial_main_effects_model_upd) %>%
mutate(oddsRatio = exp(estimate),
pValue = round(p.value, digits = 4)) %>%
select(term, updated = oddsRatio),
by = 'term') %>%
gather(model, oddsRatio, 3:2) %>%
mutate(term = factor(term),
term = factor(term, levels = rev(levels(term))),
model = factor(model, levels = c('original', 'updated'))) %>%
ggplot() +
geom_point(aes(x = term, y = oddsRatio, colour = model)) +
geom_hline(aes(yintercept = 1)) +
scale_y_log10() +
scale_colour_manual(values = c('#f9bf07', '#5881c1')) +
coord_flip()
# compare model fits
lrtest(initial_main_effects_model, initial_main_effects_model_upd)
# close call, but probably worth keeping this variable
# 1.2 Try adding in previously excluded variable -------------------------
# Now let's test whther inclusing any of the variables that we excluded
# in the univariable analysis add value to this initial model
# Note that the significance of some variable only become clear when other
# variables are included
# der_Arrive_Quarter
# der_Arrive_Month
# der_Arrive_Winter
# der_Arrive_Weekday
# der_Arrive_FinYear
# Start with der_Arrive_Quarter
# Add into model
initial_main_effects_model_upd <- update(initial_main_effects_model, . ~ . + der_Arrive_Quarter )
# Examine p.value of added variable(s)
tidy(initial_main_effects_model_upd) %>%
mutate(oddsRatio = exp(estimate),
pValue = round(p.value, digits = 4)) %>%
select(term, coefficient = estimate, oddsRatio, pValue) %>%
View()
# Compare coefficients of the current and upadted model
tidy(initial_main_effects_model) %>%
mutate(oddsRatio = exp(estimate),
pValue = round(p.value, digits = 4)) %>%
select(term, original = oddsRatio) %>%
full_join(tidy(initial_main_effects_model_upd) %>%
mutate(oddsRatio = exp(estimate),
pValue = round(p.value, digits = 4)) %>%
select(term, updated = oddsRatio),
by = 'term') %>%
gather(model, oddsRatio, 3:2) %>%
mutate(term = factor(term),
term = factor(term, levels = rev(levels(term))),
model = factor(model, levels = c('original', 'updated'))) %>%
ggplot() +
geom_point(aes(x = term, y = oddsRatio, colour = model)) +
geom_hline(aes(yintercept = 1)) +
scale_y_log10() +
scale_colour_manual(values = c('#f9bf07', '#5881c1')) +
coord_flip()
# compare model fits
lrtest(initial_main_effects_model, initial_main_effects_model_upd)
## repeat for the other variables
# 1.3 EXERCISE - repeat for other variables -------------------------------
# der_Arrive_Month
# der_Arrive_Winter
# der_Arrive_Weekday
# der_Arrive_FinYear
# This is our main effects model
main_effects_model <- initial_main_effects_model
# 2 Preliminary final model -----------------------------------------------
# Next we need to consider interaction between our predictor variables
# AE_Arrival_Mode
# Age_Band
# Sex
# IMD_Decile_From_LSOA
# AE_Arrive_HourOfDay
# der_Num_Diag_Group
# der_Dist_Group
# (7 choose 2 = ) 21 potential 2-way interactions between these variables
# Which have some plausibility?
# Consider the following
# AE_Arrival_Mode:Age_Band
# Age_Band:Sex
# AE_Arrival_Mode:AE_Arrive_HourOfDay
# IMD_Decile_From_LSOA:der_Dist_Group
# 2.1 Try AE_Arrival_Mode:Age_Band ----------------------------------------
main_effects_model_upd <- update(main_effects_model, . ~ . + IMD_Decile_From_LSOA:der_Dist_Group)
# Examine coefficient and p.value of added variable(s)
# Look for p.value of interaction terms < 0.05
tidy(main_effects_model_upd) %>%
mutate(oddsRatio = exp(estimate),
pValue = round(p.value, digits = 4)) %>%
select(term, coefficient = estimate, oddsRatio, pValue) %>%
View()
# Compare model fits
lrtest(main_effects_model, main_effects_model_upd)
# 2.2 EXERCISE Repeat for other plausible interaction terms ---------------
# Age_Band:Sex
# AE_Arrival_Mode:AE_Arrive_HourOfDay
# IMD_Decile_From_LSOA:der_Dist_Group)
# Add all accepted interaction terms together and look again at p.values
preliminary_final_model <- update(main_effects_model, . ~ . +
AE_Arrival_Mode:Age_Band +
Age_Band:Sex +
AE_Arrival_Mode:AE_Arrive_HourOfDay)
tidy(preliminary_final_model) %>%
mutate(oddsRatio = exp(estimate),
pValue = round(p.value, digits = 4)) %>%
select(term, coefficient = estimate, oddsRatio, pValue) %>%
View()
# Compare model fits
lrtest(main_effects_model, preliminary_final_model)
# 3 Save our preliminary final model --------------------------------------
save(preliminary_final_model, file = 'preliminary_final_model.RDA')
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/training_run.R
\name{training_run}
\alias{training_run}
\title{Run a training script}
\usage{
training_run(file = "train.R", context = "local",
config = Sys.getenv("R_CONFIG_ACTIVE", unset = "default"), flags = NULL,
properties = NULL, run_dir = NULL, echo = TRUE, view = "auto",
envir = parent.frame(), encoding = getOption("encoding"))
}
\arguments{
\item{file}{Path to training script (defaults to "train.R")}
\item{context}{Run context (defaults to "local")}
\item{config}{The configuration to use. Defaults to the active configuration
for the current environment (as specified by the \code{R_CONFIG_ACTIVE}
environment variable), or \code{default} when unset.}
\item{flags}{Named list with flag values (see \code{\link[=flags]{flags()}}) or path
to YAML file containing flag values.}
\item{properties}{Named character vector with run properties. Properties are
additional metadata about the run which will be subsequently available via
\code{\link[=ls_runs]{ls_runs()}}.}
\item{run_dir}{Directory to store run data within}
\item{echo}{Print expressions within training script}
\item{view}{View the results of the run after training. The default "auto"
will view the run when executing a top-level (printed) statement in an
interactive session. Pass \code{TRUE} or \code{FALSE} to control whether the view is
shown explictly. You can also pass "save" to save a copy of the
run report at \code{tfruns.d/view.html}}
\item{envir}{The environment in which the script should be evaluated}
\item{encoding}{The encoding of the training script; see \code{\link[=file]{file()}}.}
}
\value{
Single row data frame with run flags, metrics, etc.
}
\description{
Run a training script
}
\details{
The training run will by default use a unique new run directory
within the "runs" sub-directory of the current working directory (or to the
value of the \code{tfruns.runs_dir} R option if specified).
The directory name will be a timestamp (in GMT time). If a duplicate name is
generated then the function will wait long enough to return a unique one.
If you want to use an alternate directory to store run data you can either
set the global \code{tfruns.runs_dir} R option, or you can pass a \code{run_dir}
explicitly to \code{training_run()}, optionally using the \code{\link[=unique_run_dir]{unique_run_dir()}}
function to generate a timestamp-based directory name.
}
|
/man/training_run.Rd
|
no_license
|
charlesyauiag/tfruns
|
R
| false | true | 2,452 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/training_run.R
\name{training_run}
\alias{training_run}
\title{Run a training script}
\usage{
training_run(file = "train.R", context = "local",
config = Sys.getenv("R_CONFIG_ACTIVE", unset = "default"), flags = NULL,
properties = NULL, run_dir = NULL, echo = TRUE, view = "auto",
envir = parent.frame(), encoding = getOption("encoding"))
}
\arguments{
\item{file}{Path to training script (defaults to "train.R")}
\item{context}{Run context (defaults to "local")}
\item{config}{The configuration to use. Defaults to the active configuration
for the current environment (as specified by the \code{R_CONFIG_ACTIVE}
environment variable), or \code{default} when unset.}
\item{flags}{Named list with flag values (see \code{\link[=flags]{flags()}}) or path
to YAML file containing flag values.}
\item{properties}{Named character vector with run properties. Properties are
additional metadata about the run which will be subsequently available via
\code{\link[=ls_runs]{ls_runs()}}.}
\item{run_dir}{Directory to store run data within}
\item{echo}{Print expressions within training script}
\item{view}{View the results of the run after training. The default "auto"
will view the run when executing a top-level (printed) statement in an
interactive session. Pass \code{TRUE} or \code{FALSE} to control whether the view is
shown explictly. You can also pass "save" to save a copy of the
run report at \code{tfruns.d/view.html}}
\item{envir}{The environment in which the script should be evaluated}
\item{encoding}{The encoding of the training script; see \code{\link[=file]{file()}}.}
}
\value{
Single row data frame with run flags, metrics, etc.
}
\description{
Run a training script
}
\details{
The training run will by default use a unique new run directory
within the "runs" sub-directory of the current working directory (or to the
value of the \code{tfruns.runs_dir} R option if specified).
The directory name will be a timestamp (in GMT time). If a duplicate name is
generated then the function will wait long enough to return a unique one.
If you want to use an alternate directory to store run data you can either
set the global \code{tfruns.runs_dir} R option, or you can pass a \code{run_dir}
explicitly to \code{training_run()}, optionally using the \code{\link[=unique_run_dir]{unique_run_dir()}}
function to generate a timestamp-based directory name.
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{lungB}
\alias{lungB}
\title{matrix 675x156}
\format{An object of class \code{matrix} with 675 rows and 156 columns.}
\usage{
lungB
}
\description{
matrix 675x156
}
\keyword{datasets}
|
/man/lungB.Rd
|
no_license
|
AlfonsoRReyes/martinezEDA
|
R
| false | true | 289 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{lungB}
\alias{lungB}
\title{matrix 675x156}
\format{An object of class \code{matrix} with 675 rows and 156 columns.}
\usage{
lungB
}
\description{
matrix 675x156
}
\keyword{datasets}
|
#' Traverse inward node-by-node until stopping conditions are met
#'
#' @description
#'
#' From a graph object of class `dgr_graph`, move along inward edges from one or
#' more nodes present in a selection to other connected nodes, replacing the
#' current nodes in the selection with those nodes traversed to until reaching
#' nodes that satisfy one or more conditions.
#'
#' This traversal function makes use of an active selection of nodes. After the
#' traversal, depending on the traversal conditions, there will either be a
#' selection of nodes or no selection at all.
#'
#' Selections of nodes can be performed using the following node selection
#' (`select_*()`) functions: [select_nodes()], [select_last_nodes_created()],
#' [select_nodes_by_degree()], [select_nodes_by_id()], or
#' [select_nodes_in_neighborhood()].
#'
#' Selections of nodes can also be performed using the following traversal
#' (`trav_*()`) functions: [trav_out()], [trav_in()], [trav_both()],
#' [trav_out_node()], [trav_in_node()], [trav_out_until()], or
#' [trav_in_until()].
#'
#' @inheritParams render_graph
#' @param conditions An option to use a stopping condition for the traversal. If
#' the condition is met during the traversal (i.e., the node(s) traversed to
#' match the condition), then those traversals will terminate at those nodes.
#' Otherwise, traversals with continue and terminate when the number of steps
#' provided in `max_steps` is reached.
#' @param max_steps The maximum number of `trav_in()` steps (i.e., node-to-node
#' traversals in the inward direction) to allow before stopping.
#' @param exclude_unmatched If `TRUE` (the default value) then any nodes not
#' satisfying the conditions provided in `conditions` that are in the ending
#' selection are excluded.
#' @param add_to_selection If `TRUE` then every node traversed will be part of
#' the final selection of nodes. If `FALSE` (the default value) then only the
#' nodes finally traversed to will be part of the final node selection.
#'
#' @return A graph object of class `dgr_graph`.
#'
#' @examples
#' # Create a path graph and add
#' # values of 1 to 10 across the
#' # nodes from beginning to end;
#' # select the last path node
#' graph <-
#' create_graph() %>%
#' add_path(
#' n = 10,
#' node_data = node_data(
#' value = 1:10)) %>%
#' select_nodes_by_id(
#' nodes = 10)
#'
#' # Traverse inward, node-by-node
#' # until stopping at a node where
#' # the `value` attribute is 1
#' graph <-
#' graph %>%
#' trav_in_until(
#' conditions =
#' value == 1)
#'
#' # Get the graph's node selection
#' graph %>% get_selection()
#'
#' # Create two cycles in a graph and
#' # add values of 1 to 6 to the
#' # first cycle, and values 7 to
#' # 12 in the second; select nodes
#' # `6` and `12`
#' graph <-
#' create_graph() %>%
#' add_cycle(
#' n = 6,
#' node_data = node_data(
#' value = 1:6)) %>%
#' add_cycle(
#' n = 6,
#' node_data = node_data(
#' value = 7:12)) %>%
#' select_nodes_by_id(
#' nodes = c(6, 12))
#'
#' # Traverse inward, node-by-node
#' # from `6` and `12` until stopping
#' # at the first nodes where the
#' # `value` attribute is 1, 2, or 10;
#' # specify that we should only
#' # keep the finally traversed to
#' # nodes that satisfy the conditions
#' graph <-
#' graph %>%
#' trav_in_until(
#' conditions =
#' value %in% c(1, 2, 10),
#' exclude_unmatched = TRUE)
#'
#' # Get the graph's node selection
#' graph %>% get_selection()
#'
#' @import rlang
#' @export
trav_in_until <- function(
graph,
conditions,
max_steps = 30,
exclude_unmatched = TRUE,
add_to_selection = FALSE
) {
# Get the time of function start
time_function_start <- Sys.time()
# Get the name of the function
fcn_name <- get_calling_fcn()
# Validation: Graph object is valid
if (graph_object_valid(graph) == FALSE) {
emit_error(
fcn_name = fcn_name,
reasons = "The graph object is not valid")
}
# Validation: Graph contains nodes
if (graph_contains_nodes(graph) == FALSE) {
emit_error(
fcn_name = fcn_name,
reasons = "The graph contains no nodes")
}
# Validation: Graph contains edges
if (graph_contains_edges(graph) == FALSE) {
emit_error(
fcn_name = fcn_name,
reasons = "The graph contains no edges")
}
# Validation: Graph object has valid node selection
if (graph_contains_node_selection(graph) == FALSE) {
emit_error(
fcn_name = fcn_name,
reasons = c(
"There is no selection of nodes available.",
"any traversal requires an active selection",
"this type of traversal requires a selection of nodes"))
}
# Capture provided conditions
conditions <- rlang::enquo(conditions)
# Initialize the node stack and
# the step count
node_stack <- vector(mode = "integer")
step <- 0
starting_nodes <-
suppressMessages(
graph %>%
get_selection())
# Determine which nodes satisfy the
# conditions provided
all_nodes_conditions_met <-
graph %>%
get_node_ids(conditions = !!conditions)
if (exclude_unmatched & all(is.na(all_nodes_conditions_met))) {
# Clear the active selection
graph <-
suppressMessages(
graph %>%
clear_selection())
# Remove action from graph log
graph$graph_log <-
graph$graph_log[-nrow(graph$graph_log), ]
# Update the `graph_log` df with an action
graph$graph_log <-
add_action_to_log(
graph_log = graph$graph_log,
version_id = nrow(graph$graph_log) + 1,
function_used = fcn_name,
time_modified = time_function_start,
duration = graph_function_duration(time_function_start),
nodes = nrow(graph$nodes_df),
edges = nrow(graph$edges_df))
# Perform graph actions, if any are available
if (nrow(graph$graph_actions) > 0) {
graph <-
graph %>%
trigger_graph_actions()
}
# Write graph backup if the option is set
if (graph$graph_info$write_backups) {
save_graph_as_rds(graph = graph)
}
return(graph)
}
repeat {
# Perform traversal
graph <- graph %>% trav_in()
# Remove action from graph log
graph$graph_log <-
graph$graph_log[-nrow(graph$graph_log), ]
# If any nodes are `all_nodes_conditions_met` nodes
# deselect that node and save the node in a stack
if (any(suppressMessages(graph %>% get_selection()) %in%
all_nodes_conditions_met)) {
node_stack <-
c(node_stack,
intersect(
suppressMessages(graph %>% get_selection()),
all_nodes_conditions_met))
# Remove the node from the active selection
graph <-
graph %>% deselect_nodes(nodes = node_stack)
# Remove action from graph log
graph$graph_log <-
graph$graph_log[-nrow(graph$graph_log), ]
}
if (all(is.na(suppressMessages(get_selection(graph))))) break
step <- step + 1
if (step == max_steps) break
}
if (length(node_stack > 0)) {
if (add_to_selection) {
if (exclude_unmatched) {
node_stack <-
intersect(node_stack, all_nodes_conditions_met)
}
path_nodes <-
node_stack %>%
purrr::map(
.f = function(x) {
graph %>%
to_igraph() %>%
igraph::all_simple_paths(
from = x,
to = starting_nodes,
mode = "out") %>%
unlist() %>%
as.integer()}) %>%
unlist() %>%
unique()
graph <-
graph %>%
select_nodes_by_id(unique(path_nodes))
} else {
graph <-
graph %>%
select_nodes_by_id(unique(node_stack))
}
# Remove action from graph log
graph$graph_log <-
graph$graph_log[-nrow(graph$graph_log), ]
} else if (length(node_stack) < 1) {
if (exclude_unmatched &
!all(is.na(suppressMessages(get_selection(graph))))) {
new_selection <- suppressMessages(get_selection(graph))
graph <-
suppressMessages(
graph %>%
clear_selection() %>%
select_nodes_by_id(
intersect(new_selection, all_nodes_conditions_met)))
# Remove action from graph log
graph$graph_log <-
graph$graph_log[-nrow(graph$graph_log), ]
# Remove action from graph log
graph$graph_log <-
graph$graph_log[-nrow(graph$graph_log), ]
}
}
# Update the `graph_log` df with an action
graph$graph_log <-
add_action_to_log(
graph_log = graph$graph_log,
version_id = nrow(graph$graph_log) + 1,
function_used = fcn_name,
time_modified = time_function_start,
duration = graph_function_duration(time_function_start),
nodes = nrow(graph$nodes_df),
edges = nrow(graph$edges_df))
# Perform graph actions, if any are available
if (nrow(graph$graph_actions) > 0) {
graph <-
graph %>%
trigger_graph_actions()
}
# Write graph backup if the option is set
if (graph$graph_info$write_backups) {
save_graph_as_rds(graph = graph)
}
graph
}
|
/R/trav_in_until.R
|
permissive
|
rich-iannone/DiagrammeR
|
R
| false | false | 9,211 |
r
|
#' Traverse inward node-by-node until stopping conditions are met
#'
#' @description
#'
#' From a graph object of class `dgr_graph`, move along inward edges from one or
#' more nodes present in a selection to other connected nodes, replacing the
#' current nodes in the selection with those nodes traversed to until reaching
#' nodes that satisfy one or more conditions.
#'
#' This traversal function makes use of an active selection of nodes. After the
#' traversal, depending on the traversal conditions, there will either be a
#' selection of nodes or no selection at all.
#'
#' Selections of nodes can be performed using the following node selection
#' (`select_*()`) functions: [select_nodes()], [select_last_nodes_created()],
#' [select_nodes_by_degree()], [select_nodes_by_id()], or
#' [select_nodes_in_neighborhood()].
#'
#' Selections of nodes can also be performed using the following traversal
#' (`trav_*()`) functions: [trav_out()], [trav_in()], [trav_both()],
#' [trav_out_node()], [trav_in_node()], [trav_out_until()], or
#' [trav_in_until()].
#'
#' @inheritParams render_graph
#' @param conditions An option to use a stopping condition for the traversal. If
#' the condition is met during the traversal (i.e., the node(s) traversed to
#' match the condition), then those traversals will terminate at those nodes.
#' Otherwise, traversals with continue and terminate when the number of steps
#' provided in `max_steps` is reached.
#' @param max_steps The maximum number of `trav_in()` steps (i.e., node-to-node
#' traversals in the inward direction) to allow before stopping.
#' @param exclude_unmatched If `TRUE` (the default value) then any nodes not
#' satisfying the conditions provided in `conditions` that are in the ending
#' selection are excluded.
#' @param add_to_selection If `TRUE` then every node traversed will be part of
#' the final selection of nodes. If `FALSE` (the default value) then only the
#' nodes finally traversed to will be part of the final node selection.
#'
#' @return A graph object of class `dgr_graph`.
#'
#' @examples
#' # Create a path graph and add
#' # values of 1 to 10 across the
#' # nodes from beginning to end;
#' # select the last path node
#' graph <-
#' create_graph() %>%
#' add_path(
#' n = 10,
#' node_data = node_data(
#' value = 1:10)) %>%
#' select_nodes_by_id(
#' nodes = 10)
#'
#' # Traverse inward, node-by-node
#' # until stopping at a node where
#' # the `value` attribute is 1
#' graph <-
#' graph %>%
#' trav_in_until(
#' conditions =
#' value == 1)
#'
#' # Get the graph's node selection
#' graph %>% get_selection()
#'
#' # Create two cycles in a graph and
#' # add values of 1 to 6 to the
#' # first cycle, and values 7 to
#' # 12 in the second; select nodes
#' # `6` and `12`
#' graph <-
#' create_graph() %>%
#' add_cycle(
#' n = 6,
#' node_data = node_data(
#' value = 1:6)) %>%
#' add_cycle(
#' n = 6,
#' node_data = node_data(
#' value = 7:12)) %>%
#' select_nodes_by_id(
#' nodes = c(6, 12))
#'
#' # Traverse inward, node-by-node
#' # from `6` and `12` until stopping
#' # at the first nodes where the
#' # `value` attribute is 1, 2, or 10;
#' # specify that we should only
#' # keep the finally traversed to
#' # nodes that satisfy the conditions
#' graph <-
#' graph %>%
#' trav_in_until(
#' conditions =
#' value %in% c(1, 2, 10),
#' exclude_unmatched = TRUE)
#'
#' # Get the graph's node selection
#' graph %>% get_selection()
#'
#' @import rlang
#' @export
trav_in_until <- function(
graph,
conditions,
max_steps = 30,
exclude_unmatched = TRUE,
add_to_selection = FALSE
) {
# Get the time of function start
time_function_start <- Sys.time()
# Get the name of the function
fcn_name <- get_calling_fcn()
# Validation: Graph object is valid
if (graph_object_valid(graph) == FALSE) {
emit_error(
fcn_name = fcn_name,
reasons = "The graph object is not valid")
}
# Validation: Graph contains nodes
if (graph_contains_nodes(graph) == FALSE) {
emit_error(
fcn_name = fcn_name,
reasons = "The graph contains no nodes")
}
# Validation: Graph contains edges
if (graph_contains_edges(graph) == FALSE) {
emit_error(
fcn_name = fcn_name,
reasons = "The graph contains no edges")
}
# Validation: Graph object has valid node selection
if (graph_contains_node_selection(graph) == FALSE) {
emit_error(
fcn_name = fcn_name,
reasons = c(
"There is no selection of nodes available.",
"any traversal requires an active selection",
"this type of traversal requires a selection of nodes"))
}
# Capture provided conditions
conditions <- rlang::enquo(conditions)
# Initialize the node stack and
# the step count
node_stack <- vector(mode = "integer")
step <- 0
starting_nodes <-
suppressMessages(
graph %>%
get_selection())
# Determine which nodes satisfy the
# conditions provided
all_nodes_conditions_met <-
graph %>%
get_node_ids(conditions = !!conditions)
if (exclude_unmatched & all(is.na(all_nodes_conditions_met))) {
# Clear the active selection
graph <-
suppressMessages(
graph %>%
clear_selection())
# Remove action from graph log
graph$graph_log <-
graph$graph_log[-nrow(graph$graph_log), ]
# Update the `graph_log` df with an action
graph$graph_log <-
add_action_to_log(
graph_log = graph$graph_log,
version_id = nrow(graph$graph_log) + 1,
function_used = fcn_name,
time_modified = time_function_start,
duration = graph_function_duration(time_function_start),
nodes = nrow(graph$nodes_df),
edges = nrow(graph$edges_df))
# Perform graph actions, if any are available
if (nrow(graph$graph_actions) > 0) {
graph <-
graph %>%
trigger_graph_actions()
}
# Write graph backup if the option is set
if (graph$graph_info$write_backups) {
save_graph_as_rds(graph = graph)
}
return(graph)
}
repeat {
# Perform traversal
graph <- graph %>% trav_in()
# Remove action from graph log
graph$graph_log <-
graph$graph_log[-nrow(graph$graph_log), ]
# If any nodes are `all_nodes_conditions_met` nodes
# deselect that node and save the node in a stack
if (any(suppressMessages(graph %>% get_selection()) %in%
all_nodes_conditions_met)) {
node_stack <-
c(node_stack,
intersect(
suppressMessages(graph %>% get_selection()),
all_nodes_conditions_met))
# Remove the node from the active selection
graph <-
graph %>% deselect_nodes(nodes = node_stack)
# Remove action from graph log
graph$graph_log <-
graph$graph_log[-nrow(graph$graph_log), ]
}
if (all(is.na(suppressMessages(get_selection(graph))))) break
step <- step + 1
if (step == max_steps) break
}
if (length(node_stack > 0)) {
if (add_to_selection) {
if (exclude_unmatched) {
node_stack <-
intersect(node_stack, all_nodes_conditions_met)
}
path_nodes <-
node_stack %>%
purrr::map(
.f = function(x) {
graph %>%
to_igraph() %>%
igraph::all_simple_paths(
from = x,
to = starting_nodes,
mode = "out") %>%
unlist() %>%
as.integer()}) %>%
unlist() %>%
unique()
graph <-
graph %>%
select_nodes_by_id(unique(path_nodes))
} else {
graph <-
graph %>%
select_nodes_by_id(unique(node_stack))
}
# Remove action from graph log
graph$graph_log <-
graph$graph_log[-nrow(graph$graph_log), ]
} else if (length(node_stack) < 1) {
if (exclude_unmatched &
!all(is.na(suppressMessages(get_selection(graph))))) {
new_selection <- suppressMessages(get_selection(graph))
graph <-
suppressMessages(
graph %>%
clear_selection() %>%
select_nodes_by_id(
intersect(new_selection, all_nodes_conditions_met)))
# Remove action from graph log
graph$graph_log <-
graph$graph_log[-nrow(graph$graph_log), ]
# Remove action from graph log
graph$graph_log <-
graph$graph_log[-nrow(graph$graph_log), ]
}
}
# Update the `graph_log` df with an action
graph$graph_log <-
add_action_to_log(
graph_log = graph$graph_log,
version_id = nrow(graph$graph_log) + 1,
function_used = fcn_name,
time_modified = time_function_start,
duration = graph_function_duration(time_function_start),
nodes = nrow(graph$nodes_df),
edges = nrow(graph$edges_df))
# Perform graph actions, if any are available
if (nrow(graph$graph_actions) > 0) {
graph <-
graph %>%
trigger_graph_actions()
}
# Write graph backup if the option is set
if (graph$graph_info$write_backups) {
save_graph_as_rds(graph = graph)
}
graph
}
|
#' GetPairs
#'
#' Form all pairs of rows in \code{X} and compute Mahalanobis distances based on \code{v}.
#'
#' To help with computational constraints, you have the option to not form pairs between all rows of \code{X} but instead of specify a certain number (\code{numForTransitionStart}) to randomly be selected as rows from which transitions start, and another number (\code{numForTransitionEnd}) to be randomly selected as where transitions end. We then form all pairs between transition-start rows and transition-end rows.
#'
#' In order to get a smaller data frame for later manipulations (and maybe just because it's a good idea), you can also specify \code{onlyIncludeNearestN}, in which case we return only the nearest \code{onlyIncludeNearestN} transition ends for each transition start (instead of all pairs).
#'
#' @param X data frame
#' @param u input of interest
#' @param v other inputs
#' @param mahalanobisConstantTerm Weights are (1 / (mahalanobisConstantTerm + Mahalanobis distance))
#' @param numForTransitionStart number of rows to use as the start points of transitions (defaulting to `NULL`, we use all rows)
#' @param numForTransitionEnd number of rows to use as potential end points of transitions (defaulting to `NULL`, we use all rows)
#' @param onlyIncludeNearestN for each transition start, we only include as transition end points the nearest `onlyIncludeNearestN` rows (defaulting to `NULL`, we use all rows)
#' @return a data frame with the inputs \code{v} from the first of each pair, \code{u} from each half (with ".B" appended to the second), and the Mahalanobis distances between the pairs.
#' @export
#' @examples
#' v <- rnorm(100)
#' u <- v + 0.3*rnorm(100)
#' qplot(v,u)
#' X = data.frame(v=v,u=u)
#' pairsDF <- GetPairs(X, "v", "u")
#' pairsDFRow1 <- subset(pairsDF, OriginalRowNumber==1)
#' # When we subset to one "original row number", all of the v's are the same:
#' print(pairsDFRow1$v)
#' # ... and u's corresponding to closer v.B (the v in the second element of the pair) have higher weight:
#' qplot(u.B, Weight, data=pairsDFRow1)
GetPairs <- function(X, u, v,
numForTransitionStart = NULL,
numForTransitionEnd = NULL,
onlyIncludeNearestN = NULL,
mahalanobisConstantTerm=1) {
assert_that(length(u) == 1) # make sure we have exactly 1 input var of interest
for (columnName in c(u,v)) {
assert_that(columnName %in% names(X))
columnClass <- class(X[[columnName]])
if (!(columnClass) %in% c("integer", "numeric")) {
stop(sprintf("Sorry, column %s is of class %s. I can only deal with integer and numeric types for now.", columnName, columnClass))
}
}
if (!is.null(numForTransitionStart)) {
X1 <- X[sample.int(nrow(X), size=numForTransitionStart), c(v,u)]
} else {
X1 <- X[c(v,u)]
}
if (!is.null(numForTransitionEnd)) {
X2 <- X[sample.int(nrow(X), size=numForTransitionEnd), c(v,u)]
} else {
X2 <- X[c(v,u)]
}
X1$OriginalRowNumber <- 1:nrow(X1)
X2$OriginalRowNumber.B <- 1:nrow(X2)
vMatrix1 <- as.matrix(X1[,v])
vMatrix2 <- as.matrix(X2[,v])
covV=cov(vMatrix2)
distMatrix <- apply(vMatrix1, 1, function(row) mahalanobis(vMatrix2, row, covV))
dim(distMatrix)
colnames(distMatrix) <- 1:ncol(distMatrix)
rownames(distMatrix) <- 1:nrow(distMatrix)
distDF <- as.data.frame(as.table(distMatrix))
names(distDF) <- c("OriginalRowNumber.B", "OriginalRowNumber", "MahalanobisDistance")
if (!is.null(onlyIncludeNearestN)) {
distDF <- distDF %>%
group_by(OriginalRowNumber) %>%
filter(rank(MahalanobisDistance, ties.method="random") < onlyIncludeNearestN)
}
pairs <- merge(X1, distDF, by = "OriginalRowNumber")
pairs <- merge(X2, pairs, by = "OriginalRowNumber.B", suffixes = c(".B", ""))
pairs$Weight <- 1/(mahalanobisConstantTerm + pairs$MahalanobisDistance)
# If we haven't sampled, then OriginalRowNumber == OriginalRowNumber.B means that
# the transition start and end are the same, so we should remove those rows.
if (is.null(numForTransitionStart) && is.null(numForTransitionEnd)) {
pairs <- subset(pairs, OriginalRowNumber != OriginalRowNumber.B)
}
# Renormalize weights:
pairs <- pairs %>% group_by(OriginalRowNumber) %>% mutate(Weight = Weight/sum(Weight))
return(data.frame(pairs))
}
#' PlotPairCumulativeWeights
#'
#' For a sample of transition start rows, we plot rank of transition end (by increasing weight) vs. cumulative weight. This gives a sense of how much weight is going into the nearest points vs. further ones.
#'
#' @export
#' @examples
#' v <- rnorm(100)
#' u <- v + 0.3*rnorm(100)
#' X = data.frame(v=v,u=u)
#' pairsDF <- GetPairs(X, "v", "u")
#' pairsDFRow1 <- subset(pairsDF, OriginalRowNumber==1)
#' # For most original rows, we get 75% of the weight in 50% of the pairs:
#' PlotPairCumulativeWeights(pairsDF)
PlotPairCumulativeWeights <- function(pairs, numOriginalRowNumbersToPlot = 20) {
rowNumSample <- sample(unique(pairs$OriginalRowNumber))[1:numOriginalRowNumbersToPlot]
pairsWithCumWeightSums <- pairs %>%
group_by(OriginalRowNumber) %>%
arrange(OriginalRowNumber, -Weight) %>%
mutate(CumulativeWeight = cumsum(Weight), Rank = dense_rank(-Weight))
pairsSubset <- subset(pairsWithCumWeightSums, OriginalRowNumber %in% rowNumSample)
ggplot() +
geom_line(aes(x=Rank, y=CumulativeWeight, color=factor(OriginalRowNumber)), data = pairsSubset, alpha = .2) +
geom_line(aes(x=Rank, y=CumulativeWeight), stat = "summary", fun.y = "median", data=pairsWithCumWeightSums)
}
|
/R/pairs.R
|
permissive
|
dchudz/predcomps
|
R
| false | false | 5,633 |
r
|
#' GetPairs
#'
#' Form all pairs of rows in \code{X} and compute Mahalanobis distances based on \code{v}.
#'
#' To help with computational constraints, you have the option to not form pairs between all rows of \code{X} but instead of specify a certain number (\code{numForTransitionStart}) to randomly be selected as rows from which transitions start, and another number (\code{numForTransitionEnd}) to be randomly selected as where transitions end. We then form all pairs between transition-start rows and transition-end rows.
#'
#' In order to get a smaller data frame for later manipulations (and maybe just because it's a good idea), you can also specify \code{onlyIncludeNearestN}, in which case we return only the nearest \code{onlyIncludeNearestN} transition ends for each transition start (instead of all pairs).
#'
#' @param X data frame
#' @param u input of interest
#' @param v other inputs
#' @param mahalanobisConstantTerm Weights are (1 / (mahalanobisConstantTerm + Mahalanobis distance))
#' @param numForTransitionStart number of rows to use as the start points of transitions (defaulting to `NULL`, we use all rows)
#' @param numForTransitionEnd number of rows to use as potential end points of transitions (defaulting to `NULL`, we use all rows)
#' @param onlyIncludeNearestN for each transition start, we only include as transition end points the nearest `onlyIncludeNearestN` rows (defaulting to `NULL`, we use all rows)
#' @return a data frame with the inputs \code{v} from the first of each pair, \code{u} from each half (with ".B" appended to the second), and the Mahalanobis distances between the pairs.
#' @export
#' @examples
#' v <- rnorm(100)
#' u <- v + 0.3*rnorm(100)
#' qplot(v,u)
#' X = data.frame(v=v,u=u)
#' pairsDF <- GetPairs(X, "v", "u")
#' pairsDFRow1 <- subset(pairsDF, OriginalRowNumber==1)
#' # When we subset to one "original row number", all of the v's are the same:
#' print(pairsDFRow1$v)
#' # ... and u's corresponding to closer v.B (the v in the second element of the pair) have higher weight:
#' qplot(u.B, Weight, data=pairsDFRow1)
GetPairs <- function(X, u, v,
numForTransitionStart = NULL,
numForTransitionEnd = NULL,
onlyIncludeNearestN = NULL,
mahalanobisConstantTerm=1) {
assert_that(length(u) == 1) # make sure we have exactly 1 input var of interest
for (columnName in c(u,v)) {
assert_that(columnName %in% names(X))
columnClass <- class(X[[columnName]])
if (!(columnClass) %in% c("integer", "numeric")) {
stop(sprintf("Sorry, column %s is of class %s. I can only deal with integer and numeric types for now.", columnName, columnClass))
}
}
if (!is.null(numForTransitionStart)) {
X1 <- X[sample.int(nrow(X), size=numForTransitionStart), c(v,u)]
} else {
X1 <- X[c(v,u)]
}
if (!is.null(numForTransitionEnd)) {
X2 <- X[sample.int(nrow(X), size=numForTransitionEnd), c(v,u)]
} else {
X2 <- X[c(v,u)]
}
X1$OriginalRowNumber <- 1:nrow(X1)
X2$OriginalRowNumber.B <- 1:nrow(X2)
vMatrix1 <- as.matrix(X1[,v])
vMatrix2 <- as.matrix(X2[,v])
covV=cov(vMatrix2)
distMatrix <- apply(vMatrix1, 1, function(row) mahalanobis(vMatrix2, row, covV))
dim(distMatrix)
colnames(distMatrix) <- 1:ncol(distMatrix)
rownames(distMatrix) <- 1:nrow(distMatrix)
distDF <- as.data.frame(as.table(distMatrix))
names(distDF) <- c("OriginalRowNumber.B", "OriginalRowNumber", "MahalanobisDistance")
if (!is.null(onlyIncludeNearestN)) {
distDF <- distDF %>%
group_by(OriginalRowNumber) %>%
filter(rank(MahalanobisDistance, ties.method="random") < onlyIncludeNearestN)
}
pairs <- merge(X1, distDF, by = "OriginalRowNumber")
pairs <- merge(X2, pairs, by = "OriginalRowNumber.B", suffixes = c(".B", ""))
pairs$Weight <- 1/(mahalanobisConstantTerm + pairs$MahalanobisDistance)
# If we haven't sampled, then OriginalRowNumber == OriginalRowNumber.B means that
# the transition start and end are the same, so we should remove those rows.
if (is.null(numForTransitionStart) && is.null(numForTransitionEnd)) {
pairs <- subset(pairs, OriginalRowNumber != OriginalRowNumber.B)
}
# Renormalize weights:
pairs <- pairs %>% group_by(OriginalRowNumber) %>% mutate(Weight = Weight/sum(Weight))
return(data.frame(pairs))
}
#' PlotPairCumulativeWeights
#'
#' For a sample of transition start rows, we plot rank of transition end (by increasing weight) vs. cumulative weight. This gives a sense of how much weight is going into the nearest points vs. further ones.
#'
#' @export
#' @examples
#' v <- rnorm(100)
#' u <- v + 0.3*rnorm(100)
#' X = data.frame(v=v,u=u)
#' pairsDF <- GetPairs(X, "v", "u")
#' pairsDFRow1 <- subset(pairsDF, OriginalRowNumber==1)
#' # For most original rows, we get 75% of the weight in 50% of the pairs:
#' PlotPairCumulativeWeights(pairsDF)
PlotPairCumulativeWeights <- function(pairs, numOriginalRowNumbersToPlot = 20) {
rowNumSample <- sample(unique(pairs$OriginalRowNumber))[1:numOriginalRowNumbersToPlot]
pairsWithCumWeightSums <- pairs %>%
group_by(OriginalRowNumber) %>%
arrange(OriginalRowNumber, -Weight) %>%
mutate(CumulativeWeight = cumsum(Weight), Rank = dense_rank(-Weight))
pairsSubset <- subset(pairsWithCumWeightSums, OriginalRowNumber %in% rowNumSample)
ggplot() +
geom_line(aes(x=Rank, y=CumulativeWeight, color=factor(OriginalRowNumber)), data = pairsSubset, alpha = .2) +
geom_line(aes(x=Rank, y=CumulativeWeight), stat = "summary", fun.y = "median", data=pairsWithCumWeightSums)
}
|
# This code
# - calculates greeks over time for ATM, 1SD, 2SD calls
# - plots the greeks for each call
# Author: Michael Julian
install.packages('fOptions','ggplot2')
##### if you do not have fOptions/ggplot2, run install.packages() first.
library(fOptions); library(ggplot2)
# OTM CALL DELTA OVER TIME
sd_2_delta_call = GBSGreeks('delta', 'c', S=100, X=116 , Time=(1:60)/365,
r=.02, b=0, sigma=.35)
sd_1_delta_call = GBSGreeks('delta', 'c', S=100, X=108 , Time=(1:60)/365,
r=.02, b=0, sigma=.35)
sd_0_delta_call = GBSGreeks('delta', 'c', S=100, X=100 , Time=(1:60)/365,
r=.02, b=0, sigma=.35)
# ATM/OTM CALL THETA OVER TIME
sd_2_theta_call = GBSGreeks('theta', 'c', S=100, X=116 , Time=(1:60)/365,
r=.02, b=0, sigma=.35)
sd_1_theta_call = GBSGreeks('theta', 'c', S=100, X=108 , Time=(1:60)/365,
r=.02, b=0, sigma=.35)
sd_0_theta_call = GBSGreeks('theta', 'c', S=100, X=100 , Time=(1:60)/365,
r=.02, b=0, sigma=.35)
# ATM/OTM CALL GAMMA OVER TIME
sd_2_gamma_call = GBSGreeks('gamma', 'c', S=100, X=116 , Time=(1:60)/365,
r=.02, b=0, sigma=.35)
sd_1_gamma_call = GBSGreeks('gamma', 'c', S=100, X=108 , Time=(1:60)/365,
r=.02, b=0, sigma=.35)
sd_0_gamma_call = GBSGreeks('gamma', 'c', S=100, X=100 , Time=(1:60)/365,
r=.02, b=0, sigma=.35)
# ATM/OTM CALL VEGA OVER TIME
sd_2_vega_call = GBSGreeks('vega', 'c', S=100, X=116 , Time=(1:60)/365,
r=.02, b=0, sigma=.35)
sd_1_vega_call = GBSGreeks('vega', 'c', S=100, X=108 , Time=(1:60)/365,
r=.02, b=0, sigma=.35)
sd_0_vega_call = GBSGreeks('vega', 'c', S=100, X=100 , Time=(1:60)/365,
r=.02, b=0, sigma=.35)
df = data.frame(Option = factor(c(rep('ATM Call',60), rep('1 SD Call', 60), rep('2 SD Call', 60))),
Dte= c(rep(1:60,3)),
Gamma=c(sd_0_gamma_call, sd_1_gamma_call, sd_2_gamma_call),
Delta=c(sd_0_delta_call, sd_1_delta_call, sd_2_delta_call),
Theta=c(sd_0_theta_call, sd_1_theta_call, sd_2_theta_call),
Vega =c(sd_0_vega_call, sd_1_vega_call, sd_2_vega_call))
# GAMMA PLOT
ggplot(df) + geom_line(aes(x=Dte,y=Gamma, colour=Option)) +
geom_point(aes(x=Dte,y=Gamma, colour=Option)) +
ggtitle('GAMMA OVER TIME: (Spot = 100) (IV = 35) (Rf = 1%)')
# DELTA PLOT
ggplot(df) + geom_line(aes(x=Dte,y=Delta, colour=Option)) +
geom_point(aes(x=Dte,y=Delta, colour=Option)) +
ggtitle('DELTA OVER TIME: (Spot = 100) (IV = 35) (Rf = 1%)')
# THETA PLOT
ggplot(df) + geom_line(aes(x=Dte,y=Theta, colour=Option)) +
geom_point(aes(x=Dte,y=Theta, colour=Option)) +
ggtitle('THETA OVER TIME: (Spot = 100) (IV = 35) (Rf = 1%)')
# VEGA PLOT
ggplot(df) + geom_line(aes(x=Dte,y=Vega, colour=Option)) +
geom_point(aes(x=Dte,y=Vega, colour=Option)) +
ggtitle('VEGA OVER TIME: (Spot = 100) (IV = 35) (Rf = 1%)')
|
/R_Options/greeksovertime.R
|
no_license
|
MichaelJulian/finance_in_r
|
R
| false | false | 3,015 |
r
|
# This code
# - calculates greeks over time for ATM, 1SD, 2SD calls
# - plots the greeks for each call
# Author: Michael Julian
install.packages('fOptions','ggplot2')
##### if you do not have fOptions/ggplot2, run install.packages() first.
library(fOptions); library(ggplot2)
# OTM CALL DELTA OVER TIME
sd_2_delta_call = GBSGreeks('delta', 'c', S=100, X=116 , Time=(1:60)/365,
r=.02, b=0, sigma=.35)
sd_1_delta_call = GBSGreeks('delta', 'c', S=100, X=108 , Time=(1:60)/365,
r=.02, b=0, sigma=.35)
sd_0_delta_call = GBSGreeks('delta', 'c', S=100, X=100 , Time=(1:60)/365,
r=.02, b=0, sigma=.35)
# ATM/OTM CALL THETA OVER TIME
sd_2_theta_call = GBSGreeks('theta', 'c', S=100, X=116 , Time=(1:60)/365,
r=.02, b=0, sigma=.35)
sd_1_theta_call = GBSGreeks('theta', 'c', S=100, X=108 , Time=(1:60)/365,
r=.02, b=0, sigma=.35)
sd_0_theta_call = GBSGreeks('theta', 'c', S=100, X=100 , Time=(1:60)/365,
r=.02, b=0, sigma=.35)
# ATM/OTM CALL GAMMA OVER TIME
sd_2_gamma_call = GBSGreeks('gamma', 'c', S=100, X=116 , Time=(1:60)/365,
r=.02, b=0, sigma=.35)
sd_1_gamma_call = GBSGreeks('gamma', 'c', S=100, X=108 , Time=(1:60)/365,
r=.02, b=0, sigma=.35)
sd_0_gamma_call = GBSGreeks('gamma', 'c', S=100, X=100 , Time=(1:60)/365,
r=.02, b=0, sigma=.35)
# ATM/OTM CALL VEGA OVER TIME
sd_2_vega_call = GBSGreeks('vega', 'c', S=100, X=116 , Time=(1:60)/365,
r=.02, b=0, sigma=.35)
sd_1_vega_call = GBSGreeks('vega', 'c', S=100, X=108 , Time=(1:60)/365,
r=.02, b=0, sigma=.35)
sd_0_vega_call = GBSGreeks('vega', 'c', S=100, X=100 , Time=(1:60)/365,
r=.02, b=0, sigma=.35)
df = data.frame(Option = factor(c(rep('ATM Call',60), rep('1 SD Call', 60), rep('2 SD Call', 60))),
Dte= c(rep(1:60,3)),
Gamma=c(sd_0_gamma_call, sd_1_gamma_call, sd_2_gamma_call),
Delta=c(sd_0_delta_call, sd_1_delta_call, sd_2_delta_call),
Theta=c(sd_0_theta_call, sd_1_theta_call, sd_2_theta_call),
Vega =c(sd_0_vega_call, sd_1_vega_call, sd_2_vega_call))
# GAMMA PLOT
ggplot(df) + geom_line(aes(x=Dte,y=Gamma, colour=Option)) +
geom_point(aes(x=Dte,y=Gamma, colour=Option)) +
ggtitle('GAMMA OVER TIME: (Spot = 100) (IV = 35) (Rf = 1%)')
# DELTA PLOT
ggplot(df) + geom_line(aes(x=Dte,y=Delta, colour=Option)) +
geom_point(aes(x=Dte,y=Delta, colour=Option)) +
ggtitle('DELTA OVER TIME: (Spot = 100) (IV = 35) (Rf = 1%)')
# THETA PLOT
ggplot(df) + geom_line(aes(x=Dte,y=Theta, colour=Option)) +
geom_point(aes(x=Dte,y=Theta, colour=Option)) +
ggtitle('THETA OVER TIME: (Spot = 100) (IV = 35) (Rf = 1%)')
# VEGA PLOT
ggplot(df) + geom_line(aes(x=Dte,y=Vega, colour=Option)) +
geom_point(aes(x=Dte,y=Vega, colour=Option)) +
ggtitle('VEGA OVER TIME: (Spot = 100) (IV = 35) (Rf = 1%)')
|
# Install and load packages
package_names <- c("survey","dplyr","foreign","devtools")
lapply(package_names, function(x) if(!x %in% installed.packages()) install.packages(x))
lapply(package_names, require, character.only=T)
install_github("e-mitchell/meps_r_pkg/MEPS")
library(MEPS)
options(survey.lonely.psu="adjust")
# Load FYC file
FYC <- read_sas('C:/MEPS/.FYC..sas7bdat');
year <- .year.
if(year <= 2001) FYC <- FYC %>% mutate(VARPSU = VARPSU.yy., VARSTR=VARSTR.yy.)
if(year <= 1998) FYC <- FYC %>% rename(PERWT.yy.F = WTDPER.yy.)
if(year == 1996) FYC <- FYC %>% mutate(AGE42X = AGE2X, AGE31X = AGE1X)
FYC <- FYC %>%
mutate_at(vars(starts_with("AGE")),funs(replace(., .< 0, NA))) %>%
mutate(AGELAST = coalesce(AGE.yy.X, AGE42X, AGE31X))
FYC$ind = 1
# Sex
FYC <- FYC %>%
mutate(sex = recode_factor(SEX, .default = "Missing", .missing = "Missing",
"1" = "Male",
"2" = "Female"))
FYCdsgn <- svydesign(
id = ~VARPSU,
strata = ~VARSTR,
weights = ~PERWT.yy.F,
data = FYC,
nest = TRUE)
results <- svyby(~TOTEXP.yy., FUN = svymean, by = ~sex, design = subset(FYCdsgn, TOTEXP.yy. > 0))
print(results)
|
/mepstrends/hc_use/json/code/r/meanEXP__sex__ind__.r
|
permissive
|
HHS-AHRQ/MEPS-summary-tables
|
R
| false | false | 1,170 |
r
|
# Install and load packages
package_names <- c("survey","dplyr","foreign","devtools")
lapply(package_names, function(x) if(!x %in% installed.packages()) install.packages(x))
lapply(package_names, require, character.only=T)
install_github("e-mitchell/meps_r_pkg/MEPS")
library(MEPS)
options(survey.lonely.psu="adjust")
# Load FYC file
FYC <- read_sas('C:/MEPS/.FYC..sas7bdat');
year <- .year.
if(year <= 2001) FYC <- FYC %>% mutate(VARPSU = VARPSU.yy., VARSTR=VARSTR.yy.)
if(year <= 1998) FYC <- FYC %>% rename(PERWT.yy.F = WTDPER.yy.)
if(year == 1996) FYC <- FYC %>% mutate(AGE42X = AGE2X, AGE31X = AGE1X)
FYC <- FYC %>%
mutate_at(vars(starts_with("AGE")),funs(replace(., .< 0, NA))) %>%
mutate(AGELAST = coalesce(AGE.yy.X, AGE42X, AGE31X))
FYC$ind = 1
# Sex
FYC <- FYC %>%
mutate(sex = recode_factor(SEX, .default = "Missing", .missing = "Missing",
"1" = "Male",
"2" = "Female"))
FYCdsgn <- svydesign(
id = ~VARPSU,
strata = ~VARSTR,
weights = ~PERWT.yy.F,
data = FYC,
nest = TRUE)
results <- svyby(~TOTEXP.yy., FUN = svymean, by = ~sex, design = subset(FYCdsgn, TOTEXP.yy. > 0))
print(results)
|
require(R2jags)
require(ggplot2)
require(cowplot)
require(grid)
setwd ("F:/research stuff/FS_PostDoc/outside_consult/LoggingData_Julia") #If running on desktop.
load("Plotting_Pers_LogInfSnagQMD.RData")
##_____________ Logging & Infection parameter estimates for all species ________________________
Spp <- spp
dat <- data.frame(betas[,c("beta.Logging","beta.Logging.lo","beta.Logging.hi",
"beta.Infest","beta.Infest.lo","beta.Infest.hi")],stringsAsFactors=F)
dat$Spp <- Spp
row.names(dat) <- NULL
dat <- dat[order(dat$beta.Infest),]
dat$x <- seq(1:nrow(dat))
pLog <- ggplot(data = dat,aes(x=x,y=beta.Logging)) +
geom_errorbar(aes(ymin=beta.Logging.lo,ymax=beta.Logging.hi),size=1,width=0) +
geom_point(size=2.5) +
geom_hline(yintercept=0) +
coord_flip() +
scale_x_continuous(breaks=seq(1,43),labels=dat$Spp,expand=c(0,1)) +
scale_y_continuous(lim=c(-2.15,2.16)) +
ylab(expression(hat(beta)["Logging"])) + xlab("Species") +
theme(axis.title.y=element_text(size=30)) +
theme(axis.title.x=element_text(size=30)) +
theme(axis.text.x=element_text(size=15)) +
theme(axis.text.y=element_text(size=15)) +
geom_text(aes(x=44,y=-2.1),label="A",size=8)
pInf <- ggplot(data = dat,aes(x=x,y=beta.Infest)) +
geom_errorbar(aes(ymin=beta.Infest.lo,ymax=beta.Infest.hi),size=1,width=0) +
geom_point(size=2.5) +
geom_hline(yintercept=0) +
coord_flip() +
scale_x_continuous(breaks=seq(1,43),labels=dat$Spp,expand=c(0,1)) +
scale_y_continuous(lim=c(-2.15,2.16)) +
ylab(expression(hat(beta)["Infestation"])) + xlab(NULL) +
theme(axis.title.y=element_text(size=30)) +
theme(axis.title.x=element_text(size=30)) +
theme(axis.text.x=element_text(size=15)) +
theme(axis.text.y=element_text(size=15)) +
geom_text(aes(x=44,y=-2.1),label="B",size=8)
p <- ggdraw() +
draw_plot(pLog, x = 0, y = 0, width = .52, height = 1) +
draw_plot(pInf, x = 0.52, y = 0, width = .48, height = 1)
#p
save_plot("Betas_all_spp.jpeg", p, ncol = 3, nrow = 3, dpi=600)
##_____________ Logging, Infection, Snag, & QMD parameter estimates for supported species ____________________
Spp <- spp
dat <- data.frame(betas.supported[,c("beta.Logging","beta.Logging.lo","beta.Logging.hi",
"beta.Infest","beta.Infest.lo","beta.Infest.hi",
"beta.snag","beta.snag.lo","beta.snag.hi",
"beta.QMD","beta.QMD.lo","beta.QMD.hi")],stringsAsFactors=F)
dat$Spp <- row.names(dat)
row.names(dat) <- NULL
dat <- dat[order(dat$beta.Infest),]
dat$x <- seq(1:nrow(dat))
dat$support.Inf <- dat$support.sng <- dat$support.QMD <- "none"
dat$support.Inf[which(dat$beta.Infest.lo>0)] <- "pos"
dat$support.Inf[which(dat$beta.Infest.hi<0)] <- "neg"
dat$support.sng[which(dat$beta.snag.lo>0)] <- "pos"
dat$support.QMD[which(dat$beta.QMD.hi<0)] <- "neg"
pLog <- ggplot(data = dat,aes(x=x,y=beta.Logging)) +
geom_errorbar(aes(ymin=beta.Logging.lo,ymax=beta.Logging.hi),size=1,width=0) +
geom_point(size=2.5) +
geom_hline(yintercept=0) +
coord_flip() +
scale_x_continuous(breaks=seq(1,11),labels=dat$Spp,expand=c(0,1)) +
scale_y_continuous(lim=c(-2.15,2.16)) +
ylab(expression(hat(beta)["Logging"])) + xlab("Species") +
theme(axis.title.y=element_text(size=30)) +
theme(axis.title.x=element_text(size=30)) +
theme(axis.text.x=element_text(size=15)) +
theme(axis.text.y=element_text(size=15)) +
geom_text(aes(x=11.5,y=-2.1),label="A",size=8)
pInf <- ggplot(data = dat,aes(x=x,y=beta.Infest)) +
geom_errorbar(aes(ymin=beta.Infest.lo,ymax=beta.Infest.hi,colour=support.Inf),size=1,width=0) +
geom_point(size=2.5) +
geom_hline(yintercept=0) +
coord_flip() +
scale_colour_manual(values = c('#0072B2','black','#D55E00')) +
scale_x_continuous(breaks=seq(1,11),labels=dat$Spp,expand=c(0,1)) +
scale_y_continuous(lim=c(-2.15,2.16)) +
ylab(expression(hat(beta)["Infestation"])) + xlab(NULL) +
theme(axis.title.y=element_text(size=30)) +
theme(axis.title.x=element_text(size=30)) +
theme(axis.text.x=element_text(size=15)) +
theme(axis.text.y=element_text(size=15)) +
geom_text(aes(x=11.5,y=-2.1),label="B",size=8) +
guides(colour=F)
pSnag <- ggplot(data = dat,aes(x=x,y=beta.snag)) +
geom_errorbar(aes(ymin=beta.snag.lo,ymax=beta.snag.hi,colour=support.sng),size=1,width=0) +
geom_point(size=2.5) +
geom_hline(yintercept=0) +
coord_flip() +
scale_colour_manual(values = c('black','#D55E00')) +
scale_x_continuous(breaks=seq(1,11),labels=dat$Spp,expand=c(0,1)) +
scale_y_continuous(lim=c(-2.15,2.16)) +
ylab(expression(hat(beta)["Snag"])) + xlab("Species") +
theme(axis.title.y=element_text(size=30)) +
theme(axis.title.x=element_text(size=30)) +
theme(axis.text.x=element_text(size=15)) +
theme(axis.text.y=element_text(size=15)) +
geom_text(aes(x=11.5,y=-2.1),label="C",size=8) +
guides(colour=F)
pQMD <- ggplot(data = dat,aes(x=x,y=beta.QMD)) +
geom_errorbar(aes(ymin=beta.QMD.lo,ymax=beta.QMD.hi,colour=support.QMD),size=1,width=0) +
geom_point(size=2.5) +
geom_hline(yintercept=0) +
coord_flip() +
scale_colour_manual(values = c('#0072B2','black')) +
scale_x_continuous(breaks=seq(1,11),labels=dat$Spp,expand=c(0,1)) +
scale_y_continuous(lim=c(-2.15,2.16)) +
ylab(expression(hat(beta)["QMD"])) + xlab(NULL) +
theme(axis.title.y=element_text(size=30)) +
theme(axis.title.x=element_text(size=30)) +
theme(axis.text.x=element_text(size=15)) +
theme(axis.text.y=element_text(size=15)) +
geom_text(aes(x=11.5,y=-2.1),label="D",size=8) +
guides(colour=F)
p <- ggdraw() +
draw_plot(pLog, x = 0, y = 0.5, width = .52, height = 0.5) +
draw_plot(pInf, x = 0.52, y = 0.5, width = .48, height = 0.5) +
draw_plot(pSnag, x = 0, y = 0, width = .52, height = 0.5) +
draw_plot(pQMD, x = 0.52, y = 0, width = .48, height = 0.5)
save_plot("Betas_supported_spp.jpeg", p, ncol = 3, nrow = 3, dpi=600)
##_____________ Species richness X Logging & Infection plots ____________________
SR.est$Logging <- rep(Plot.data$Logging,4)
SR.est$Infest <- c(Plot.data$EarlInf_2014,Plot.data$EarlInf_2014,Plot.data$EarlInf_2015,Plot.data$EarlInf_2016)
pLog <- ggplot(data = SR.est,aes(x=Logging,y=md)) +
geom_point(alpha=0.4) +
geom_errorbar(aes(ymin=lo,ymax=hi),width=0.01,alpha=0.4) +
scale_y_continuous(breaks=c(15,20,25)) +
labs(x="Proportion cut stumps",y=NULL) +
theme(axis.title.x=element_text(size=35)) +
theme(axis.title.y=element_blank()) +
theme(axis.text.x=element_text(size=35)) +
theme(axis.text.y=element_text(size=35)) +
geom_text(aes(x=0,y=30),label="A",size=15)
pInf <- ggplot(data = SR.est,aes(x=Infest,y=md)) +
geom_point(alpha=0.4) +
geom_errorbar(aes(ymin=lo,ymax=hi),width=0.3,alpha=0.4) +
scale_y_continuous(breaks=c(15,20,25)) +
labs(x="Number of early infested trees",y=NULL) +
theme(axis.title.x=element_text(size=35)) +
theme(axis.title.y=element_blank()) +
theme(axis.text.x=element_text(size=35)) +
theme(axis.text.y=element_text(size=35)) +
geom_text(aes(x=0,y=30),label="B",size=15)
p <- ggdraw() +
draw_plot(pLog, x = 0.05, y = 0.5, width = .95, height = 0.5) +
draw_plot(pInf, x = 0.05, y = 0, width = .95, height = 0.5) +
draw_plot_label(label="Species Richness",size=35,x=0,y=0.2,angle=90)
save_plot("SpecRich.jpeg", p, ncol = 2.5, nrow = 3, dpi=600)
##_____________ Species occupancy estimates and predictions ____________________
## 9 species related with infestation
# Compile column indices for predicted occupancy #
prd.md <- which(substr(dimnames(psi.Inf)[[2]],1,7)=="psiPred"&substr(dimnames(psi.Inf)[[2]],
(nchar(dimnames(psi.Inf)[[2]])-1),
nchar(dimnames(psi.Inf)[[2]]))=="md")
prd.lo <- which(substr(dimnames(psi.Inf)[[2]],1,7)=="psiPred"&substr(dimnames(psi.Inf)[[2]],
(nchar(dimnames(psi.Inf)[[2]])-1),
nchar(dimnames(psi.Inf)[[2]]))=="lo")
prd.hi <- which(substr(dimnames(psi.Inf)[[2]],1,7)=="psiPred"&substr(dimnames(psi.Inf)[[2]],
(nchar(dimnames(psi.Inf)[[2]])-1),
nchar(dimnames(psi.Inf)[[2]]))=="hi")
prd.cols <- as.numeric(rbind(prd.md,prd.lo,prd.hi)) #column indices
rm(prd.md,prd.lo,prd.hi)
# ATTW #
dat.est <- psi.Inf["ATTW",c("psiEst.Inf0.md","psiEst.Inf0.lo","psiEst.Inf0.hi",
"psiEst.Inflo.md","psiEst.Inflo.lo","psiEst.Inflo.hi",
"psiEst.Infhi.md","psiEst.Infhi.lo","psiEst.Infhi.hi")]
dat.est <- data.frame(rbind(as.numeric(dat.est[1:3]),as.numeric(dat.est[4:6]),as.numeric(dat.est[7:9])))
names(dat.est) <- c("psi","psi.lo","psi.hi")
dat.est$X <- c(0,4.33,12.7)
dat.est$bin.lo <- c(0,1,7)
dat.est$bin.hi <- c(0,5,23)
dat <- as.numeric(psi.Inf["ATTW",prd.cols])
dat.prd <- dat[1:3]
for(i in seq(4,length(dat),by=3)) dat.prd <- rbind(dat.prd,dat[i:(i+2)])
dat.prd <- data.frame(dat.prd,row.names=NULL)
names(dat.prd) <- c("psi","psi.lo","psi.hi")
dat.prd$X <- InfX.plot
pATTW <- ggplot(data = dat.prd,aes(x=X,y=psi)) +
geom_line(size=1,linetype="solid") +
geom_line(aes(y=psi.lo),size=1,linetype="dashed") +
geom_line(aes(y=psi.hi),size=1,linetype="dashed") +
# geom_errorbarh(data=dat.est,aes(y=psi,xmin=bin.lo,xmax=bin.hi),size=1,height=0,color="dark gray",linetype="longdash") +
geom_point(data=dat.est,aes(x=X,y=psi),size=5) +
geom_errorbar(data=dat.est,aes(x=X,ymin=psi.lo,ymax=psi.hi),size=1,width=0.5) +
# geom_vline(aes(xintercept=0.5),size=0.5) + # Add vertical lines bin boundaries for finte-sample estimate
# geom_vline(aes(xintercept=6.5),size=0.5) +
ylab(NULL) + xlab(NULL) +
scale_y_continuous(lim=c(0,1.05),breaks=c(0,0.25,0.5,0.75,1)) +
# ylab("Occupancy") + xlab("Infestation") +
# theme(axis.title.x=element_text(size=30)) +
# theme(axis.title.y=element_text(size=30)) +
theme(axis.text.x=element_text(size=20)) +
theme(axis.text.y=element_text(size=25)) +
guides(shape=FALSE,linetype=FALSE) +
geom_text(aes(x=3,y=1.05),label = "ATTW",size=8)
#save_plot("ATTWv1.jpeg", pATTW, ncol = 2.5, nrow = 3, dpi=600)
#save_plot("ATTWv2.jpeg", pATTW, ncol = 2.5, nrow = 3, dpi=600)
# BTLH #
dat.est <- psi.Inf["BTLH",c("psiEst.Inf0.md","psiEst.Inf0.lo","psiEst.Inf0.hi",
"psiEst.Inflo.md","psiEst.Inflo.lo","psiEst.Inflo.hi",
"psiEst.Infhi.md","psiEst.Infhi.lo","psiEst.Infhi.hi")]
dat.est <- data.frame(rbind(as.numeric(dat.est[1:3]),as.numeric(dat.est[4:6]),as.numeric(dat.est[7:9])))
names(dat.est) <- c("psi","psi.lo","psi.hi")
dat.est$X <- c(0,4.33,12.7)
dat.est$bin.lo <- c(0,1,7)
dat.est$bin.hi <- c(0,5,23)
dat <- as.numeric(psi.Inf["BTLH",prd.cols])
dat.prd <- dat[1:3]
for(i in seq(4,length(dat),by=3)) dat.prd <- rbind(dat.prd,dat[i:(i+2)])
dat.prd <- data.frame(dat.prd,row.names=NULL)
names(dat.prd) <- c("psi","psi.lo","psi.hi")
dat.prd$X <- InfX.plot
pBTLH <- ggplot(data = dat.prd,aes(x=X,y=psi)) +
geom_line(size=1,linetype="solid") +
geom_line(aes(y=psi.lo),size=1,linetype="dashed") +
geom_line(aes(y=psi.hi),size=1,linetype="dashed") +
geom_point(data=dat.est,aes(x=X,y=psi),size=5) +
geom_errorbar(data=dat.est,aes(x=X,ymin=psi.lo,ymax=psi.hi),size=1,width=0.5) +
scale_y_continuous(lim=c(0,1.05),breaks=c(0,0.25,0.5,0.75,1)) +
ylab(NULL) + xlab(NULL) +
theme(axis.text.x=element_text(size=20)) +
theme(axis.text.y=element_text(size=25)) +
guides(shape=FALSE,linetype=FALSE) +
geom_text(aes(x=3,y=1.05),label = "BTLH",size=8)
# CLNU #
dat.est <- psi.Inf["CLNU",c("psiEst.Inf0.md","psiEst.Inf0.lo","psiEst.Inf0.hi",
"psiEst.Inflo.md","psiEst.Inflo.lo","psiEst.Inflo.hi",
"psiEst.Infhi.md","psiEst.Infhi.lo","psiEst.Infhi.hi")]
dat.est <- data.frame(rbind(as.numeric(dat.est[1:3]),as.numeric(dat.est[4:6]),as.numeric(dat.est[7:9])))
names(dat.est) <- c("psi","psi.lo","psi.hi")
dat.est$X <- c(0,4.33,12.7)
dat.est$bin.lo <- c(0,1,7)
dat.est$bin.hi <- c(0,5,23)
dat <- as.numeric(psi.Inf["CLNU",prd.cols])
dat.prd <- dat[1:3]
for(i in seq(4,length(dat),by=3)) dat.prd <- rbind(dat.prd,dat[i:(i+2)])
dat.prd <- data.frame(dat.prd,row.names=NULL)
names(dat.prd) <- c("psi","psi.lo","psi.hi")
dat.prd$X <- InfX.plot
pCLNU <- ggplot(data = dat.prd,aes(x=X,y=psi)) +
geom_line(size=1,linetype="solid") +
geom_line(aes(y=psi.lo),size=1,linetype="dashed") +
geom_line(aes(y=psi.hi),size=1,linetype="dashed") +
geom_point(data=dat.est,aes(x=X,y=psi),size=5) +
geom_errorbar(data=dat.est,aes(x=X,ymin=psi.lo,ymax=psi.hi),size=1,width=0.5) +
scale_y_continuous(lim=c(0,1.05),breaks=c(0,0.25,0.5,0.75,1)) +
ylab(NULL) + xlab(NULL) +
theme(axis.text.x=element_text(size=20)) +
theme(axis.text.y=element_text(size=25)) +
guides(shape=FALSE,linetype=FALSE) +
geom_text(aes(x=3,y=1.05),label = "CLNU",size=8)
# EVGR #
dat.est <- psi.Inf["EVGR",c("psiEst.Inf0.md","psiEst.Inf0.lo","psiEst.Inf0.hi",
"psiEst.Inflo.md","psiEst.Inflo.lo","psiEst.Inflo.hi",
"psiEst.Infhi.md","psiEst.Infhi.lo","psiEst.Infhi.hi")]
dat.est <- data.frame(rbind(as.numeric(dat.est[1:3]),as.numeric(dat.est[4:6]),as.numeric(dat.est[7:9])))
names(dat.est) <- c("psi","psi.lo","psi.hi")
dat.est$X <- c(0,4.33,12.7)
dat.est$bin.lo <- c(0,1,7)
dat.est$bin.hi <- c(0,5,23)
dat <- as.numeric(psi.Inf["EVGR",prd.cols])
dat.prd <- dat[1:3]
for(i in seq(4,length(dat),by=3)) dat.prd <- rbind(dat.prd,dat[i:(i+2)])
dat.prd <- data.frame(dat.prd,row.names=NULL)
names(dat.prd) <- c("psi","psi.lo","psi.hi")
dat.prd$X <- InfX.plot
pEVGR <- ggplot(data = dat.prd,aes(x=X,y=psi)) +
geom_line(size=1,linetype="solid") +
geom_line(aes(y=psi.lo),size=1,linetype="dashed") +
geom_line(aes(y=psi.hi),size=1,linetype="dashed") +
geom_point(data=dat.est,aes(x=X,y=psi),size=5) +
geom_errorbar(data=dat.est,aes(x=X,ymin=psi.lo,ymax=psi.hi),size=1,width=0.5) +
scale_y_continuous(lim=c(0,1.05),breaks=c(0,0.25,0.5,0.75,1)) +
ylab(NULL) + xlab(NULL) +
theme(axis.text.x=element_text(size=20)) +
theme(axis.text.y=element_text(size=25)) +
guides(shape=FALSE,linetype=FALSE) +
geom_text(aes(x=3,y=1.05),label = "EVGR",size=8)
# HOWR #
dat.est <- psi.Inf["HOWR",c("psiEst.Inf0.md","psiEst.Inf0.lo","psiEst.Inf0.hi",
"psiEst.Inflo.md","psiEst.Inflo.lo","psiEst.Inflo.hi",
"psiEst.Infhi.md","psiEst.Infhi.lo","psiEst.Infhi.hi")]
dat.est <- data.frame(rbind(as.numeric(dat.est[1:3]),as.numeric(dat.est[4:6]),as.numeric(dat.est[7:9])))
names(dat.est) <- c("psi","psi.lo","psi.hi")
dat.est$X <- c(0,4.33,12.7)
dat.est$bin.lo <- c(0,1,7)
dat.est$bin.hi <- c(0,5,23)
dat <- as.numeric(psi.Inf["HOWR",prd.cols])
dat.prd <- dat[1:3]
for(i in seq(4,length(dat),by=3)) dat.prd <- rbind(dat.prd,dat[i:(i+2)])
dat.prd <- data.frame(dat.prd,row.names=NULL)
names(dat.prd) <- c("psi","psi.lo","psi.hi")
dat.prd$X <- InfX.plot
pHOWR <- ggplot(data = dat.prd,aes(x=X,y=psi)) +
geom_line(size=1,linetype="solid") +
geom_line(aes(y=psi.lo),size=1,linetype="dashed") +
geom_line(aes(y=psi.hi),size=1,linetype="dashed") +
geom_point(data=dat.est,aes(x=X,y=psi),size=5) +
geom_errorbar(data=dat.est,aes(x=X,ymin=psi.lo,ymax=psi.hi),size=1,width=0.5) +
scale_y_continuous(lim=c(0,1.05),breaks=c(0,0.25,0.5,0.75,1)) +
ylab(NULL) + xlab(NULL) +
theme(axis.text.x=element_text(size=20)) +
theme(axis.text.y=element_text(size=25)) +
guides(shape=FALSE,linetype=FALSE) +
geom_text(aes(x=3,y=1.05),label = "HOWR",size=8)
# NOFL #
dat.est <- psi.Inf["NOFL",c("psiEst.Inf0.md","psiEst.Inf0.lo","psiEst.Inf0.hi",
"psiEst.Inflo.md","psiEst.Inflo.lo","psiEst.Inflo.hi",
"psiEst.Infhi.md","psiEst.Infhi.lo","psiEst.Infhi.hi")]
dat.est <- data.frame(rbind(as.numeric(dat.est[1:3]),as.numeric(dat.est[4:6]),as.numeric(dat.est[7:9])))
names(dat.est) <- c("psi","psi.lo","psi.hi")
dat.est$X <- c(0,4.33,12.7)
dat.est$bin.lo <- c(0,1,7)
dat.est$bin.hi <- c(0,5,23)
dat <- as.numeric(psi.Inf["NOFL",prd.cols])
dat.prd <- dat[1:3]
for(i in seq(4,length(dat),by=3)) dat.prd <- rbind(dat.prd,dat[i:(i+2)])
dat.prd <- data.frame(dat.prd,row.names=NULL)
names(dat.prd) <- c("psi","psi.lo","psi.hi")
dat.prd$X <- InfX.plot
pNOFL <- ggplot(data = dat.prd,aes(x=X,y=psi)) +
geom_line(size=1,linetype="solid") +
geom_line(aes(y=psi.lo),size=1,linetype="dashed") +
geom_line(aes(y=psi.hi),size=1,linetype="dashed") +
geom_point(data=dat.est,aes(x=X,y=psi),size=5) +
geom_errorbar(data=dat.est,aes(x=X,ymin=psi.lo,ymax=psi.hi),size=1,width=0.5) +
scale_y_continuous(lim=c(0,1.05),breaks=c(0,0.25,0.5,0.75,1)) +
ylab(NULL) + xlab(NULL) +
theme(axis.text.x=element_text(size=20)) +
theme(axis.text.y=element_text(size=25)) +
guides(shape=FALSE,linetype=FALSE) +
geom_text(aes(x=3,y=1.05),label = "NOFL",size=8)
# WAVI #
dat.est <- psi.Inf["WAVI",c("psiEst.Inf0.md","psiEst.Inf0.lo","psiEst.Inf0.hi",
"psiEst.Inflo.md","psiEst.Inflo.lo","psiEst.Inflo.hi",
"psiEst.Infhi.md","psiEst.Infhi.lo","psiEst.Infhi.hi")]
dat.est <- data.frame(rbind(as.numeric(dat.est[1:3]),as.numeric(dat.est[4:6]),as.numeric(dat.est[7:9])))
names(dat.est) <- c("psi","psi.lo","psi.hi")
dat.est$X <- c(0,4.33,12.7)
dat.est$bin.lo <- c(0,1,7)
dat.est$bin.hi <- c(0,5,23)
dat <- as.numeric(psi.Inf["WAVI",prd.cols])
dat.prd <- dat[1:3]
for(i in seq(4,length(dat),by=3)) dat.prd <- rbind(dat.prd,dat[i:(i+2)])
dat.prd <- data.frame(dat.prd,row.names=NULL)
names(dat.prd) <- c("psi","psi.lo","psi.hi")
dat.prd$X <- InfX.plot
pWAVI <- ggplot(data = dat.prd,aes(x=X,y=psi)) +
geom_line(size=1,linetype="solid") +
geom_line(aes(y=psi.lo),size=1,linetype="dashed") +
geom_line(aes(y=psi.hi),size=1,linetype="dashed") +
geom_point(data=dat.est,aes(x=X,y=psi),size=5) +
geom_errorbar(data=dat.est,aes(x=X,ymin=psi.lo,ymax=psi.hi),size=1,width=0.5) +
scale_y_continuous(lim=c(0,1.05),breaks=c(0,0.25,0.5,0.75,1)) +
ylab(NULL) + xlab(NULL) +
theme(axis.text.x=element_text(size=20)) +
theme(axis.text.y=element_text(size=25)) +
guides(shape=FALSE,linetype=FALSE) +
geom_text(aes(x=3,y=1.05),label = "WAVI",size=8)
# WETA #
dat.est <- psi.Inf["WETA",c("psiEst.Inf0.md","psiEst.Inf0.lo","psiEst.Inf0.hi",
"psiEst.Inflo.md","psiEst.Inflo.lo","psiEst.Inflo.hi",
"psiEst.Infhi.md","psiEst.Infhi.lo","psiEst.Infhi.hi")]
dat.est <- data.frame(rbind(as.numeric(dat.est[1:3]),as.numeric(dat.est[4:6]),as.numeric(dat.est[7:9])))
names(dat.est) <- c("psi","psi.lo","psi.hi")
dat.est$X <- c(0,4.33,12.7)
dat.est$bin.lo <- c(0,1,7)
dat.est$bin.hi <- c(0,5,23)
dat <- as.numeric(psi.Inf["WETA",prd.cols])
dat.prd <- dat[1:3]
for(i in seq(4,length(dat),by=3)) dat.prd <- rbind(dat.prd,dat[i:(i+2)])
dat.prd <- data.frame(dat.prd,row.names=NULL)
names(dat.prd) <- c("psi","psi.lo","psi.hi")
dat.prd$X <- InfX.plot
pWETA <- ggplot(data = dat.prd,aes(x=X,y=psi)) +
geom_line(size=1,linetype="solid") +
geom_line(aes(y=psi.lo),size=1,linetype="dashed") +
geom_line(aes(y=psi.hi),size=1,linetype="dashed") +
geom_point(data=dat.est,aes(x=X,y=psi),size=5) +
geom_errorbar(data=dat.est,aes(x=X,ymin=psi.lo,ymax=psi.hi),size=1,width=0.5) +
scale_y_continuous(lim=c(0,1.05),breaks=c(0,0.25,0.5,0.75,1)) +
ylab(NULL) + xlab(NULL) +
theme(axis.text.x=element_text(size=20)) +
theme(axis.text.y=element_text(size=25)) +
guides(shape=FALSE,linetype=FALSE) +
geom_text(aes(x=3,y=1.05),label = "WETA",size=8)
# WEWP #
dat.est <- psi.Inf["WEWP",c("psiEst.Inf0.md","psiEst.Inf0.lo","psiEst.Inf0.hi",
"psiEst.Inflo.md","psiEst.Inflo.lo","psiEst.Inflo.hi",
"psiEst.Infhi.md","psiEst.Infhi.lo","psiEst.Infhi.hi")]
dat.est <- data.frame(rbind(as.numeric(dat.est[1:3]),as.numeric(dat.est[4:6]),as.numeric(dat.est[7:9])))
names(dat.est) <- c("psi","psi.lo","psi.hi")
dat.est$X <- c(0,4.33,12.7)
dat.est$bin.lo <- c(0,1,7)
dat.est$bin.hi <- c(0,5,23)
dat <- as.numeric(psi.Inf["WEWP",prd.cols])
dat.prd <- dat[1:3]
for(i in seq(4,length(dat),by=3)) dat.prd <- rbind(dat.prd,dat[i:(i+2)])
dat.prd <- data.frame(dat.prd,row.names=NULL)
names(dat.prd) <- c("psi","psi.lo","psi.hi")
dat.prd$X <- InfX.plot
pWEWP <- ggplot(data = dat.prd,aes(x=X,y=psi)) +
geom_line(size=1,linetype="solid") +
geom_line(aes(y=psi.lo),size=1,linetype="dashed") +
geom_line(aes(y=psi.hi),size=1,linetype="dashed") +
geom_point(data=dat.est,aes(x=X,y=psi),size=5) +
geom_errorbar(data=dat.est,aes(x=X,ymin=psi.lo,ymax=psi.hi),size=1,width=0.5) +
scale_y_continuous(lim=c(0,1.05),breaks=c(0,0.25,0.5,0.75,1)) +
ylab(NULL) + xlab(NULL) +
theme(axis.text.x=element_text(size=20)) +
theme(axis.text.y=element_text(size=25)) +
guides(shape=FALSE,linetype=FALSE) +
geom_text(aes(x=3,y=1.05),label = "WEWP",size=8)
p <- ggdraw() +
draw_plot(pATTW, x = 0.05, y = 0.684, width = .317, height = .317) +
draw_plot(pBTLH, x = 0.367, y = 0.684, width = .317, height = .317) +
draw_plot(pCLNU, x = 0.684, y = 0.684, width = .317, height = .317) +
draw_plot(pNOFL, x = 0.05, y = 0.367, width = .317, height = .317) +
draw_plot(pEVGR, x = 0.367, y = 0.367, width = .317, height = .317) +
draw_plot(pWAVI, x = 0.684, y = 0.367, width = .317, height = .317) +
draw_plot(pHOWR, x = 0.05, y = 0.05, width = .317, height = .317) +
draw_plot(pWETA, x = 0.367, y = 0.05, width = .317, height = .317) +
draw_plot(pWEWP, x = 0.684, y = 0.05, width = .317, height = .317) +
draw_plot_label(label=c("Infestation (Number of infested spruce)","Point occupancy"),
size=c(30,30),x=c(0,0),y=c(0.05,0.3),angle=c(0,90))
save_plot("SppPlots_Inf.jpeg", p, ncol = 3, nrow = 3, dpi=600)
## 2 species related with snags, and 3 with QMD
# Compile column indices for predicted occupancy with snags #
prd.md <- which(substr(dimnames(psi.snag)[[2]],1,7)=="psiPred"&substr(dimnames(psi.snag)[[2]],
(nchar(dimnames(psi.snag)[[2]])-1),
nchar(dimnames(psi.snag)[[2]]))=="md")
prd.lo <- which(substr(dimnames(psi.snag)[[2]],1,7)=="psiPred"&substr(dimnames(psi.snag)[[2]],
(nchar(dimnames(psi.snag)[[2]])-1),
nchar(dimnames(psi.snag)[[2]]))=="lo")
prd.hi <- which(substr(dimnames(psi.snag)[[2]],1,7)=="psiPred"&substr(dimnames(psi.snag)[[2]],
(nchar(dimnames(psi.snag)[[2]])-1),
nchar(dimnames(psi.snag)[[2]]))=="hi")
prd.cols <- as.numeric(rbind(prd.md,prd.lo,prd.hi)) #column indices
rm(prd.md,prd.lo,prd.hi)
# HOWR #
dat.est <- psi.snag["HOWR",c("psiEst.snaglo.md","psiEst.snaglo.lo","psiEst.snaglo.hi",
"psiEst.snagmd.md","psiEst.snagmd.lo","psiEst.snagmd.hi",
"psiEst.snaghi.md","psiEst.snaghi.lo","psiEst.snaghi.hi")]
dat.est <- data.frame(rbind(as.numeric(dat.est[1:3]),as.numeric(dat.est[4:6]),as.numeric(dat.est[7:9])))
names(dat.est) <- c("psi","psi.lo","psi.hi")
dat.est$X <- c(1.36,3.69,7.31)
dat.est$bin.lo <- c(0,3,7)
dat.est$bin.hi <- c(2,6,11)
dat <- as.numeric(psi.snag["HOWR",prd.cols])
dat.prd <- dat[1:3]
for(i in seq(4,length(dat),by=3)) dat.prd <- rbind(dat.prd,dat[i:(i+2)])
dat.prd <- data.frame(dat.prd,row.names=NULL)
names(dat.prd) <- c("psi","psi.lo","psi.hi")
dat.prd$X <- SnagX.plot
pHOWR <- ggplot(data = dat.prd,aes(x=X,y=psi)) +
geom_line(size=1,linetype="solid") +
geom_line(aes(y=psi.lo),size=1,linetype="dashed") +
geom_line(aes(y=psi.hi),size=1,linetype="dashed") +
geom_point(data=dat.est,aes(x=X,y=psi),size=5) +
geom_errorbar(data=dat.est,aes(x=X,ymin=psi.lo,ymax=psi.hi),size=1,width=0.5) +
ylab(NULL) + xlab("Snags (count)") +
scale_y_continuous(lim=c(0,1.05),breaks=c(0,0.25,0.5,0.75,1)) +
theme(axis.title.x=element_text(size=30)) +
theme(axis.text.x=element_text(size=20)) +
theme(axis.text.y=element_text(size=25)) +
guides(shape=FALSE,linetype=FALSE) +
geom_text(aes(x=2,y=1.05),label = "HOWR",size=8)
# STJA #
dat.est <- psi.snag["STJA",c("psiEst.snaglo.md","psiEst.snaglo.lo","psiEst.snaglo.hi",
"psiEst.snagmd.md","psiEst.snagmd.lo","psiEst.snagmd.hi",
"psiEst.snaghi.md","psiEst.snaghi.lo","psiEst.snaghi.hi")]
dat.est <- data.frame(rbind(as.numeric(dat.est[1:3]),as.numeric(dat.est[4:6]),as.numeric(dat.est[7:9])))
names(dat.est) <- c("psi","psi.lo","psi.hi")
dat.est$X <- c(1.36,3.69,7.31)
dat.est$bin.lo <- c(0,3,7)
dat.est$bin.hi <- c(2,6,11)
dat <- as.numeric(psi.snag["STJA",prd.cols])
dat.prd <- dat[1:3]
for(i in seq(4,length(dat),by=3)) dat.prd <- rbind(dat.prd,dat[i:(i+2)])
dat.prd <- data.frame(dat.prd,row.names=NULL)
names(dat.prd) <- c("psi","psi.lo","psi.hi")
dat.prd$X <- SnagX.plot
pSTJA <- ggplot(data = dat.prd,aes(x=X,y=psi)) +
geom_line(size=1,linetype="solid") +
geom_line(aes(y=psi.lo),size=1,linetype="dashed") +
geom_line(aes(y=psi.hi),size=1,linetype="dashed") +
geom_point(data=dat.est,aes(x=X,y=psi),size=5) +
geom_errorbar(data=dat.est,aes(x=X,ymin=psi.lo,ymax=psi.hi),size=1,width=0.5) +
ylab(NULL) + xlab("Snags (count)") +
scale_y_continuous(lim=c(0,1.05),breaks=c(0,0.25,0.5,0.75,1)) +
theme(axis.title.x=element_text(size=30)) +
theme(axis.text.x=element_text(size=20)) +
theme(axis.text.y=element_text(size=25)) +
guides(shape=FALSE,linetype=FALSE) +
geom_text(aes(x=2,y=1.05),label = "STJA",size=8)
# Compile column indices for predicted occupancy with snags #
prd.md <- which(substr(dimnames(psi.QMD)[[2]],1,7)=="psiPred"&substr(dimnames(psi.QMD)[[2]],
(nchar(dimnames(psi.QMD)[[2]])-1),
nchar(dimnames(psi.QMD)[[2]]))=="md")
prd.lo <- which(substr(dimnames(psi.QMD)[[2]],1,7)=="psiPred"&substr(dimnames(psi.QMD)[[2]],
(nchar(dimnames(psi.QMD)[[2]])-1),
nchar(dimnames(psi.QMD)[[2]]))=="lo")
prd.hi <- which(substr(dimnames(psi.QMD)[[2]],1,7)=="psiPred"&substr(dimnames(psi.QMD)[[2]],
(nchar(dimnames(psi.QMD)[[2]])-1),
nchar(dimnames(psi.QMD)[[2]]))=="hi")
prd.cols <- as.numeric(rbind(prd.md,prd.lo,prd.hi)) #column indices
rm(prd.md,prd.lo,prd.hi)
# AMRO #
dat.est <- psi.QMD["AMRO",c("psiEst.QMDlo.md","psiEst.QMDlo.lo","psiEst.QMDlo.hi",
"psiEst.QMDmd.md","psiEst.QMDmd.lo","psiEst.QMDmd.hi",
"psiEst.QMDhi.md","psiEst.QMDhi.lo","psiEst.QMDhi.hi")]
dat.est <- data.frame(rbind(as.numeric(dat.est[1:3]),as.numeric(dat.est[4:6]),as.numeric(dat.est[7:9])))
names(dat.est) <- c("psi","psi.lo","psi.hi")
dat.est$X <- c(514,818,1331)
dat.est$bin.lo <- c(331,635,960)
dat.est$bin.hi <- c(630,939,2441)
dat <- as.numeric(psi.QMD["AMRO",prd.cols])
dat.prd <- dat[1:3]
for(i in seq(4,length(dat),by=3)) dat.prd <- rbind(dat.prd,dat[i:(i+2)])
dat.prd <- data.frame(dat.prd,row.names=NULL)
names(dat.prd) <- c("psi","psi.lo","psi.hi")
dat.prd$X <- QMDX.plot
pAMRO <- ggplot(data = dat.prd,aes(x=X,y=psi)) +
geom_line(size=1,linetype="solid") +
geom_line(aes(y=psi.lo),size=1,linetype="dashed") +
geom_line(aes(y=psi.hi),size=1,linetype="dashed") +
geom_point(data=dat.est,aes(x=X,y=psi),size=5) +
geom_errorbar(data=dat.est,aes(x=X,ymin=psi.lo,ymax=psi.hi),size=1,width=50) +
labs(y=NULL,x=expression("QMD ("~cm^2~")")) +
scale_y_continuous(lim=c(0,1.06),breaks=c(0,0.25,0.5,0.75,1)) +
theme(axis.title.x=element_text(size=30)) +
theme(axis.text.x=element_text(size=20)) +
theme(axis.text.y=element_text(size=25)) +
guides(shape=FALSE,linetype=FALSE) +
geom_text(aes(x=600,y=1.06),label = "AMRO",size=8)
# WETA #
dat.est <- psi.QMD["WETA",c("psiEst.QMDlo.md","psiEst.QMDlo.lo","psiEst.QMDlo.hi",
"psiEst.QMDmd.md","psiEst.QMDmd.lo","psiEst.QMDmd.hi",
"psiEst.QMDhi.md","psiEst.QMDhi.lo","psiEst.QMDhi.hi")]
dat.est <- data.frame(rbind(as.numeric(dat.est[1:3]),as.numeric(dat.est[4:6]),as.numeric(dat.est[7:9])))
names(dat.est) <- c("psi","psi.lo","psi.hi")
dat.est$X <- c(514,818,1331)
dat.est$bin.lo <- c(331,635,960)
dat.est$bin.hi <- c(630,939,2441)
dat <- as.numeric(psi.QMD["WETA",prd.cols])
dat.prd <- dat[1:3]
for(i in seq(4,length(dat),by=3)) dat.prd <- rbind(dat.prd,dat[i:(i+2)])
dat.prd <- data.frame(dat.prd,row.names=NULL)
names(dat.prd) <- c("psi","psi.lo","psi.hi")
dat.prd$X <- QMDX.plot
pWETA <- ggplot(data = dat.prd,aes(x=X,y=psi)) +
geom_line(size=1,linetype="solid") +
geom_line(aes(y=psi.lo),size=1,linetype="dashed") +
geom_line(aes(y=psi.hi),size=1,linetype="dashed") +
geom_point(data=dat.est,aes(x=X,y=psi),size=5) +
geom_errorbar(data=dat.est,aes(x=X,ymin=psi.lo,ymax=psi.hi),size=1,width=50) +
labs(y=NULL,x=expression("QMD ("~cm^2~")")) +
scale_y_continuous(lim=c(0,1.05),breaks=c(0,0.25,0.5,0.75,1)) +
theme(axis.title.x=element_text(size=30)) +
theme(axis.text.x=element_text(size=20)) +
theme(axis.text.y=element_text(size=25)) +
guides(shape=FALSE,linetype=FALSE) +
geom_text(aes(x=600,y=1.05),label = "WETA",size=8)
# WEWP #
dat.est <- psi.QMD["WEWP",c("psiEst.QMDlo.md","psiEst.QMDlo.lo","psiEst.QMDlo.hi",
"psiEst.QMDmd.md","psiEst.QMDmd.lo","psiEst.QMDmd.hi",
"psiEst.QMDhi.md","psiEst.QMDhi.lo","psiEst.QMDhi.hi")]
dat.est <- data.frame(rbind(as.numeric(dat.est[1:3]),as.numeric(dat.est[4:6]),as.numeric(dat.est[7:9])))
names(dat.est) <- c("psi","psi.lo","psi.hi")
dat.est$X <- c(514,818,1331)
dat.est$bin.lo <- c(331,635,960)
dat.est$bin.hi <- c(630,939,2441)
dat <- as.numeric(psi.QMD["WEWP",prd.cols])
dat.prd <- dat[1:3]
for(i in seq(4,length(dat),by=3)) dat.prd <- rbind(dat.prd,dat[i:(i+2)])
dat.prd <- data.frame(dat.prd,row.names=NULL)
names(dat.prd) <- c("psi","psi.lo","psi.hi")
dat.prd$X <- QMDX.plot
pWEWP <- ggplot(data = dat.prd,aes(x=X,y=psi)) +
geom_line(size=1,linetype="solid") +
geom_line(aes(y=psi.lo),size=1,linetype="dashed") +
geom_line(aes(y=psi.hi),size=1,linetype="dashed") +
geom_point(data=dat.est,aes(x=X,y=psi),size=5) +
geom_errorbar(data=dat.est,aes(x=X,ymin=psi.lo,ymax=psi.hi),size=1,width=50) +
labs(y=NULL,x=expression("QMD ("~cm^2~")")) +
scale_y_continuous(lim=c(0,1.05),breaks=c(0,0.25,0.5,0.75,1)) +
theme(axis.title.x=element_text(size=30)) +
theme(axis.text.x=element_text(size=20)) +
theme(axis.text.y=element_text(size=25)) +
guides(shape=FALSE,linetype=FALSE) +
geom_text(aes(x=600,y=1.05),label = "WEWP",size=8)
p <- ggdraw() +
draw_plot(pHOWR, x = 0.05, y = 0.66, width = .475, height = .33) +
draw_plot(pSTJA, x = 0.525, y = 0.66, width = .475, height = .33) +
draw_plot(pAMRO, x = 0.05, y = 0.33, width = .475, height = .33) +
draw_plot(pWETA, x = 0.525, y = 0.33, width = .475, height = .33) +
draw_plot(pWEWP, x = 0.05, y = 0, width = .475, height = .33) +
draw_plot_label(label="Point occupancy",size=30,x=0,y=0.3,angle=90)
save_plot("SppPlots_SnagQMD.jpeg", p, ncol = 2.5, nrow = 3, dpi=600)
|
/archive/Plotting_Pers_LogInfSnagQMD.R
|
no_license
|
qureshlatif/Julia-Kelly-Ch3
|
R
| false | false | 31,902 |
r
|
require(R2jags)
require(ggplot2)
require(cowplot)
require(grid)
setwd ("F:/research stuff/FS_PostDoc/outside_consult/LoggingData_Julia") #If running on desktop.
load("Plotting_Pers_LogInfSnagQMD.RData")
##_____________ Logging & Infection parameter estimates for all species ________________________
Spp <- spp
dat <- data.frame(betas[,c("beta.Logging","beta.Logging.lo","beta.Logging.hi",
"beta.Infest","beta.Infest.lo","beta.Infest.hi")],stringsAsFactors=F)
dat$Spp <- Spp
row.names(dat) <- NULL
dat <- dat[order(dat$beta.Infest),]
dat$x <- seq(1:nrow(dat))
pLog <- ggplot(data = dat,aes(x=x,y=beta.Logging)) +
geom_errorbar(aes(ymin=beta.Logging.lo,ymax=beta.Logging.hi),size=1,width=0) +
geom_point(size=2.5) +
geom_hline(yintercept=0) +
coord_flip() +
scale_x_continuous(breaks=seq(1,43),labels=dat$Spp,expand=c(0,1)) +
scale_y_continuous(lim=c(-2.15,2.16)) +
ylab(expression(hat(beta)["Logging"])) + xlab("Species") +
theme(axis.title.y=element_text(size=30)) +
theme(axis.title.x=element_text(size=30)) +
theme(axis.text.x=element_text(size=15)) +
theme(axis.text.y=element_text(size=15)) +
geom_text(aes(x=44,y=-2.1),label="A",size=8)
pInf <- ggplot(data = dat,aes(x=x,y=beta.Infest)) +
geom_errorbar(aes(ymin=beta.Infest.lo,ymax=beta.Infest.hi),size=1,width=0) +
geom_point(size=2.5) +
geom_hline(yintercept=0) +
coord_flip() +
scale_x_continuous(breaks=seq(1,43),labels=dat$Spp,expand=c(0,1)) +
scale_y_continuous(lim=c(-2.15,2.16)) +
ylab(expression(hat(beta)["Infestation"])) + xlab(NULL) +
theme(axis.title.y=element_text(size=30)) +
theme(axis.title.x=element_text(size=30)) +
theme(axis.text.x=element_text(size=15)) +
theme(axis.text.y=element_text(size=15)) +
geom_text(aes(x=44,y=-2.1),label="B",size=8)
p <- ggdraw() +
draw_plot(pLog, x = 0, y = 0, width = .52, height = 1) +
draw_plot(pInf, x = 0.52, y = 0, width = .48, height = 1)
#p
save_plot("Betas_all_spp.jpeg", p, ncol = 3, nrow = 3, dpi=600)
##_____________ Logging, Infection, Snag, & QMD parameter estimates for supported species ____________________
Spp <- spp
dat <- data.frame(betas.supported[,c("beta.Logging","beta.Logging.lo","beta.Logging.hi",
"beta.Infest","beta.Infest.lo","beta.Infest.hi",
"beta.snag","beta.snag.lo","beta.snag.hi",
"beta.QMD","beta.QMD.lo","beta.QMD.hi")],stringsAsFactors=F)
dat$Spp <- row.names(dat)
row.names(dat) <- NULL
dat <- dat[order(dat$beta.Infest),]
dat$x <- seq(1:nrow(dat))
dat$support.Inf <- dat$support.sng <- dat$support.QMD <- "none"
dat$support.Inf[which(dat$beta.Infest.lo>0)] <- "pos"
dat$support.Inf[which(dat$beta.Infest.hi<0)] <- "neg"
dat$support.sng[which(dat$beta.snag.lo>0)] <- "pos"
dat$support.QMD[which(dat$beta.QMD.hi<0)] <- "neg"
pLog <- ggplot(data = dat,aes(x=x,y=beta.Logging)) +
geom_errorbar(aes(ymin=beta.Logging.lo,ymax=beta.Logging.hi),size=1,width=0) +
geom_point(size=2.5) +
geom_hline(yintercept=0) +
coord_flip() +
scale_x_continuous(breaks=seq(1,11),labels=dat$Spp,expand=c(0,1)) +
scale_y_continuous(lim=c(-2.15,2.16)) +
ylab(expression(hat(beta)["Logging"])) + xlab("Species") +
theme(axis.title.y=element_text(size=30)) +
theme(axis.title.x=element_text(size=30)) +
theme(axis.text.x=element_text(size=15)) +
theme(axis.text.y=element_text(size=15)) +
geom_text(aes(x=11.5,y=-2.1),label="A",size=8)
pInf <- ggplot(data = dat,aes(x=x,y=beta.Infest)) +
geom_errorbar(aes(ymin=beta.Infest.lo,ymax=beta.Infest.hi,colour=support.Inf),size=1,width=0) +
geom_point(size=2.5) +
geom_hline(yintercept=0) +
coord_flip() +
scale_colour_manual(values = c('#0072B2','black','#D55E00')) +
scale_x_continuous(breaks=seq(1,11),labels=dat$Spp,expand=c(0,1)) +
scale_y_continuous(lim=c(-2.15,2.16)) +
ylab(expression(hat(beta)["Infestation"])) + xlab(NULL) +
theme(axis.title.y=element_text(size=30)) +
theme(axis.title.x=element_text(size=30)) +
theme(axis.text.x=element_text(size=15)) +
theme(axis.text.y=element_text(size=15)) +
geom_text(aes(x=11.5,y=-2.1),label="B",size=8) +
guides(colour=F)
pSnag <- ggplot(data = dat,aes(x=x,y=beta.snag)) +
geom_errorbar(aes(ymin=beta.snag.lo,ymax=beta.snag.hi,colour=support.sng),size=1,width=0) +
geom_point(size=2.5) +
geom_hline(yintercept=0) +
coord_flip() +
scale_colour_manual(values = c('black','#D55E00')) +
scale_x_continuous(breaks=seq(1,11),labels=dat$Spp,expand=c(0,1)) +
scale_y_continuous(lim=c(-2.15,2.16)) +
ylab(expression(hat(beta)["Snag"])) + xlab("Species") +
theme(axis.title.y=element_text(size=30)) +
theme(axis.title.x=element_text(size=30)) +
theme(axis.text.x=element_text(size=15)) +
theme(axis.text.y=element_text(size=15)) +
geom_text(aes(x=11.5,y=-2.1),label="C",size=8) +
guides(colour=F)
pQMD <- ggplot(data = dat,aes(x=x,y=beta.QMD)) +
geom_errorbar(aes(ymin=beta.QMD.lo,ymax=beta.QMD.hi,colour=support.QMD),size=1,width=0) +
geom_point(size=2.5) +
geom_hline(yintercept=0) +
coord_flip() +
scale_colour_manual(values = c('#0072B2','black')) +
scale_x_continuous(breaks=seq(1,11),labels=dat$Spp,expand=c(0,1)) +
scale_y_continuous(lim=c(-2.15,2.16)) +
ylab(expression(hat(beta)["QMD"])) + xlab(NULL) +
theme(axis.title.y=element_text(size=30)) +
theme(axis.title.x=element_text(size=30)) +
theme(axis.text.x=element_text(size=15)) +
theme(axis.text.y=element_text(size=15)) +
geom_text(aes(x=11.5,y=-2.1),label="D",size=8) +
guides(colour=F)
p <- ggdraw() +
draw_plot(pLog, x = 0, y = 0.5, width = .52, height = 0.5) +
draw_plot(pInf, x = 0.52, y = 0.5, width = .48, height = 0.5) +
draw_plot(pSnag, x = 0, y = 0, width = .52, height = 0.5) +
draw_plot(pQMD, x = 0.52, y = 0, width = .48, height = 0.5)
save_plot("Betas_supported_spp.jpeg", p, ncol = 3, nrow = 3, dpi=600)
##_____________ Species richness X Logging & Infection plots ____________________
SR.est$Logging <- rep(Plot.data$Logging,4)
SR.est$Infest <- c(Plot.data$EarlInf_2014,Plot.data$EarlInf_2014,Plot.data$EarlInf_2015,Plot.data$EarlInf_2016)
pLog <- ggplot(data = SR.est,aes(x=Logging,y=md)) +
geom_point(alpha=0.4) +
geom_errorbar(aes(ymin=lo,ymax=hi),width=0.01,alpha=0.4) +
scale_y_continuous(breaks=c(15,20,25)) +
labs(x="Proportion cut stumps",y=NULL) +
theme(axis.title.x=element_text(size=35)) +
theme(axis.title.y=element_blank()) +
theme(axis.text.x=element_text(size=35)) +
theme(axis.text.y=element_text(size=35)) +
geom_text(aes(x=0,y=30),label="A",size=15)
pInf <- ggplot(data = SR.est,aes(x=Infest,y=md)) +
geom_point(alpha=0.4) +
geom_errorbar(aes(ymin=lo,ymax=hi),width=0.3,alpha=0.4) +
scale_y_continuous(breaks=c(15,20,25)) +
labs(x="Number of early infested trees",y=NULL) +
theme(axis.title.x=element_text(size=35)) +
theme(axis.title.y=element_blank()) +
theme(axis.text.x=element_text(size=35)) +
theme(axis.text.y=element_text(size=35)) +
geom_text(aes(x=0,y=30),label="B",size=15)
p <- ggdraw() +
draw_plot(pLog, x = 0.05, y = 0.5, width = .95, height = 0.5) +
draw_plot(pInf, x = 0.05, y = 0, width = .95, height = 0.5) +
draw_plot_label(label="Species Richness",size=35,x=0,y=0.2,angle=90)
save_plot("SpecRich.jpeg", p, ncol = 2.5, nrow = 3, dpi=600)
##_____________ Species occupancy estimates and predictions ____________________
## 9 species related with infestation
# Compile column indices for predicted occupancy #
prd.md <- which(substr(dimnames(psi.Inf)[[2]],1,7)=="psiPred"&substr(dimnames(psi.Inf)[[2]],
(nchar(dimnames(psi.Inf)[[2]])-1),
nchar(dimnames(psi.Inf)[[2]]))=="md")
prd.lo <- which(substr(dimnames(psi.Inf)[[2]],1,7)=="psiPred"&substr(dimnames(psi.Inf)[[2]],
(nchar(dimnames(psi.Inf)[[2]])-1),
nchar(dimnames(psi.Inf)[[2]]))=="lo")
prd.hi <- which(substr(dimnames(psi.Inf)[[2]],1,7)=="psiPred"&substr(dimnames(psi.Inf)[[2]],
(nchar(dimnames(psi.Inf)[[2]])-1),
nchar(dimnames(psi.Inf)[[2]]))=="hi")
prd.cols <- as.numeric(rbind(prd.md,prd.lo,prd.hi)) #column indices
rm(prd.md,prd.lo,prd.hi)
# ATTW #
dat.est <- psi.Inf["ATTW",c("psiEst.Inf0.md","psiEst.Inf0.lo","psiEst.Inf0.hi",
"psiEst.Inflo.md","psiEst.Inflo.lo","psiEst.Inflo.hi",
"psiEst.Infhi.md","psiEst.Infhi.lo","psiEst.Infhi.hi")]
dat.est <- data.frame(rbind(as.numeric(dat.est[1:3]),as.numeric(dat.est[4:6]),as.numeric(dat.est[7:9])))
names(dat.est) <- c("psi","psi.lo","psi.hi")
dat.est$X <- c(0,4.33,12.7)
dat.est$bin.lo <- c(0,1,7)
dat.est$bin.hi <- c(0,5,23)
dat <- as.numeric(psi.Inf["ATTW",prd.cols])
dat.prd <- dat[1:3]
for(i in seq(4,length(dat),by=3)) dat.prd <- rbind(dat.prd,dat[i:(i+2)])
dat.prd <- data.frame(dat.prd,row.names=NULL)
names(dat.prd) <- c("psi","psi.lo","psi.hi")
dat.prd$X <- InfX.plot
pATTW <- ggplot(data = dat.prd,aes(x=X,y=psi)) +
geom_line(size=1,linetype="solid") +
geom_line(aes(y=psi.lo),size=1,linetype="dashed") +
geom_line(aes(y=psi.hi),size=1,linetype="dashed") +
# geom_errorbarh(data=dat.est,aes(y=psi,xmin=bin.lo,xmax=bin.hi),size=1,height=0,color="dark gray",linetype="longdash") +
geom_point(data=dat.est,aes(x=X,y=psi),size=5) +
geom_errorbar(data=dat.est,aes(x=X,ymin=psi.lo,ymax=psi.hi),size=1,width=0.5) +
# geom_vline(aes(xintercept=0.5),size=0.5) + # Add vertical lines bin boundaries for finte-sample estimate
# geom_vline(aes(xintercept=6.5),size=0.5) +
ylab(NULL) + xlab(NULL) +
scale_y_continuous(lim=c(0,1.05),breaks=c(0,0.25,0.5,0.75,1)) +
# ylab("Occupancy") + xlab("Infestation") +
# theme(axis.title.x=element_text(size=30)) +
# theme(axis.title.y=element_text(size=30)) +
theme(axis.text.x=element_text(size=20)) +
theme(axis.text.y=element_text(size=25)) +
guides(shape=FALSE,linetype=FALSE) +
geom_text(aes(x=3,y=1.05),label = "ATTW",size=8)
#save_plot("ATTWv1.jpeg", pATTW, ncol = 2.5, nrow = 3, dpi=600)
#save_plot("ATTWv2.jpeg", pATTW, ncol = 2.5, nrow = 3, dpi=600)
# BTLH #
dat.est <- psi.Inf["BTLH",c("psiEst.Inf0.md","psiEst.Inf0.lo","psiEst.Inf0.hi",
"psiEst.Inflo.md","psiEst.Inflo.lo","psiEst.Inflo.hi",
"psiEst.Infhi.md","psiEst.Infhi.lo","psiEst.Infhi.hi")]
dat.est <- data.frame(rbind(as.numeric(dat.est[1:3]),as.numeric(dat.est[4:6]),as.numeric(dat.est[7:9])))
names(dat.est) <- c("psi","psi.lo","psi.hi")
dat.est$X <- c(0,4.33,12.7)
dat.est$bin.lo <- c(0,1,7)
dat.est$bin.hi <- c(0,5,23)
dat <- as.numeric(psi.Inf["BTLH",prd.cols])
dat.prd <- dat[1:3]
for(i in seq(4,length(dat),by=3)) dat.prd <- rbind(dat.prd,dat[i:(i+2)])
dat.prd <- data.frame(dat.prd,row.names=NULL)
names(dat.prd) <- c("psi","psi.lo","psi.hi")
dat.prd$X <- InfX.plot
pBTLH <- ggplot(data = dat.prd,aes(x=X,y=psi)) +
geom_line(size=1,linetype="solid") +
geom_line(aes(y=psi.lo),size=1,linetype="dashed") +
geom_line(aes(y=psi.hi),size=1,linetype="dashed") +
geom_point(data=dat.est,aes(x=X,y=psi),size=5) +
geom_errorbar(data=dat.est,aes(x=X,ymin=psi.lo,ymax=psi.hi),size=1,width=0.5) +
scale_y_continuous(lim=c(0,1.05),breaks=c(0,0.25,0.5,0.75,1)) +
ylab(NULL) + xlab(NULL) +
theme(axis.text.x=element_text(size=20)) +
theme(axis.text.y=element_text(size=25)) +
guides(shape=FALSE,linetype=FALSE) +
geom_text(aes(x=3,y=1.05),label = "BTLH",size=8)
# CLNU #
dat.est <- psi.Inf["CLNU",c("psiEst.Inf0.md","psiEst.Inf0.lo","psiEst.Inf0.hi",
"psiEst.Inflo.md","psiEst.Inflo.lo","psiEst.Inflo.hi",
"psiEst.Infhi.md","psiEst.Infhi.lo","psiEst.Infhi.hi")]
dat.est <- data.frame(rbind(as.numeric(dat.est[1:3]),as.numeric(dat.est[4:6]),as.numeric(dat.est[7:9])))
names(dat.est) <- c("psi","psi.lo","psi.hi")
dat.est$X <- c(0,4.33,12.7)
dat.est$bin.lo <- c(0,1,7)
dat.est$bin.hi <- c(0,5,23)
dat <- as.numeric(psi.Inf["CLNU",prd.cols])
dat.prd <- dat[1:3]
for(i in seq(4,length(dat),by=3)) dat.prd <- rbind(dat.prd,dat[i:(i+2)])
dat.prd <- data.frame(dat.prd,row.names=NULL)
names(dat.prd) <- c("psi","psi.lo","psi.hi")
dat.prd$X <- InfX.plot
pCLNU <- ggplot(data = dat.prd,aes(x=X,y=psi)) +
geom_line(size=1,linetype="solid") +
geom_line(aes(y=psi.lo),size=1,linetype="dashed") +
geom_line(aes(y=psi.hi),size=1,linetype="dashed") +
geom_point(data=dat.est,aes(x=X,y=psi),size=5) +
geom_errorbar(data=dat.est,aes(x=X,ymin=psi.lo,ymax=psi.hi),size=1,width=0.5) +
scale_y_continuous(lim=c(0,1.05),breaks=c(0,0.25,0.5,0.75,1)) +
ylab(NULL) + xlab(NULL) +
theme(axis.text.x=element_text(size=20)) +
theme(axis.text.y=element_text(size=25)) +
guides(shape=FALSE,linetype=FALSE) +
geom_text(aes(x=3,y=1.05),label = "CLNU",size=8)
# EVGR #
dat.est <- psi.Inf["EVGR",c("psiEst.Inf0.md","psiEst.Inf0.lo","psiEst.Inf0.hi",
"psiEst.Inflo.md","psiEst.Inflo.lo","psiEst.Inflo.hi",
"psiEst.Infhi.md","psiEst.Infhi.lo","psiEst.Infhi.hi")]
dat.est <- data.frame(rbind(as.numeric(dat.est[1:3]),as.numeric(dat.est[4:6]),as.numeric(dat.est[7:9])))
names(dat.est) <- c("psi","psi.lo","psi.hi")
dat.est$X <- c(0,4.33,12.7)
dat.est$bin.lo <- c(0,1,7)
dat.est$bin.hi <- c(0,5,23)
dat <- as.numeric(psi.Inf["EVGR",prd.cols])
dat.prd <- dat[1:3]
for(i in seq(4,length(dat),by=3)) dat.prd <- rbind(dat.prd,dat[i:(i+2)])
dat.prd <- data.frame(dat.prd,row.names=NULL)
names(dat.prd) <- c("psi","psi.lo","psi.hi")
dat.prd$X <- InfX.plot
pEVGR <- ggplot(data = dat.prd,aes(x=X,y=psi)) +
geom_line(size=1,linetype="solid") +
geom_line(aes(y=psi.lo),size=1,linetype="dashed") +
geom_line(aes(y=psi.hi),size=1,linetype="dashed") +
geom_point(data=dat.est,aes(x=X,y=psi),size=5) +
geom_errorbar(data=dat.est,aes(x=X,ymin=psi.lo,ymax=psi.hi),size=1,width=0.5) +
scale_y_continuous(lim=c(0,1.05),breaks=c(0,0.25,0.5,0.75,1)) +
ylab(NULL) + xlab(NULL) +
theme(axis.text.x=element_text(size=20)) +
theme(axis.text.y=element_text(size=25)) +
guides(shape=FALSE,linetype=FALSE) +
geom_text(aes(x=3,y=1.05),label = "EVGR",size=8)
# HOWR #
dat.est <- psi.Inf["HOWR",c("psiEst.Inf0.md","psiEst.Inf0.lo","psiEst.Inf0.hi",
"psiEst.Inflo.md","psiEst.Inflo.lo","psiEst.Inflo.hi",
"psiEst.Infhi.md","psiEst.Infhi.lo","psiEst.Infhi.hi")]
dat.est <- data.frame(rbind(as.numeric(dat.est[1:3]),as.numeric(dat.est[4:6]),as.numeric(dat.est[7:9])))
names(dat.est) <- c("psi","psi.lo","psi.hi")
dat.est$X <- c(0,4.33,12.7)
dat.est$bin.lo <- c(0,1,7)
dat.est$bin.hi <- c(0,5,23)
dat <- as.numeric(psi.Inf["HOWR",prd.cols])
dat.prd <- dat[1:3]
for(i in seq(4,length(dat),by=3)) dat.prd <- rbind(dat.prd,dat[i:(i+2)])
dat.prd <- data.frame(dat.prd,row.names=NULL)
names(dat.prd) <- c("psi","psi.lo","psi.hi")
dat.prd$X <- InfX.plot
pHOWR <- ggplot(data = dat.prd,aes(x=X,y=psi)) +
geom_line(size=1,linetype="solid") +
geom_line(aes(y=psi.lo),size=1,linetype="dashed") +
geom_line(aes(y=psi.hi),size=1,linetype="dashed") +
geom_point(data=dat.est,aes(x=X,y=psi),size=5) +
geom_errorbar(data=dat.est,aes(x=X,ymin=psi.lo,ymax=psi.hi),size=1,width=0.5) +
scale_y_continuous(lim=c(0,1.05),breaks=c(0,0.25,0.5,0.75,1)) +
ylab(NULL) + xlab(NULL) +
theme(axis.text.x=element_text(size=20)) +
theme(axis.text.y=element_text(size=25)) +
guides(shape=FALSE,linetype=FALSE) +
geom_text(aes(x=3,y=1.05),label = "HOWR",size=8)
# NOFL #
dat.est <- psi.Inf["NOFL",c("psiEst.Inf0.md","psiEst.Inf0.lo","psiEst.Inf0.hi",
"psiEst.Inflo.md","psiEst.Inflo.lo","psiEst.Inflo.hi",
"psiEst.Infhi.md","psiEst.Infhi.lo","psiEst.Infhi.hi")]
dat.est <- data.frame(rbind(as.numeric(dat.est[1:3]),as.numeric(dat.est[4:6]),as.numeric(dat.est[7:9])))
names(dat.est) <- c("psi","psi.lo","psi.hi")
dat.est$X <- c(0,4.33,12.7)
dat.est$bin.lo <- c(0,1,7)
dat.est$bin.hi <- c(0,5,23)
dat <- as.numeric(psi.Inf["NOFL",prd.cols])
dat.prd <- dat[1:3]
for(i in seq(4,length(dat),by=3)) dat.prd <- rbind(dat.prd,dat[i:(i+2)])
dat.prd <- data.frame(dat.prd,row.names=NULL)
names(dat.prd) <- c("psi","psi.lo","psi.hi")
dat.prd$X <- InfX.plot
pNOFL <- ggplot(data = dat.prd,aes(x=X,y=psi)) +
geom_line(size=1,linetype="solid") +
geom_line(aes(y=psi.lo),size=1,linetype="dashed") +
geom_line(aes(y=psi.hi),size=1,linetype="dashed") +
geom_point(data=dat.est,aes(x=X,y=psi),size=5) +
geom_errorbar(data=dat.est,aes(x=X,ymin=psi.lo,ymax=psi.hi),size=1,width=0.5) +
scale_y_continuous(lim=c(0,1.05),breaks=c(0,0.25,0.5,0.75,1)) +
ylab(NULL) + xlab(NULL) +
theme(axis.text.x=element_text(size=20)) +
theme(axis.text.y=element_text(size=25)) +
guides(shape=FALSE,linetype=FALSE) +
geom_text(aes(x=3,y=1.05),label = "NOFL",size=8)
# WAVI #
dat.est <- psi.Inf["WAVI",c("psiEst.Inf0.md","psiEst.Inf0.lo","psiEst.Inf0.hi",
"psiEst.Inflo.md","psiEst.Inflo.lo","psiEst.Inflo.hi",
"psiEst.Infhi.md","psiEst.Infhi.lo","psiEst.Infhi.hi")]
dat.est <- data.frame(rbind(as.numeric(dat.est[1:3]),as.numeric(dat.est[4:6]),as.numeric(dat.est[7:9])))
names(dat.est) <- c("psi","psi.lo","psi.hi")
dat.est$X <- c(0,4.33,12.7)
dat.est$bin.lo <- c(0,1,7)
dat.est$bin.hi <- c(0,5,23)
dat <- as.numeric(psi.Inf["WAVI",prd.cols])
dat.prd <- dat[1:3]
for(i in seq(4,length(dat),by=3)) dat.prd <- rbind(dat.prd,dat[i:(i+2)])
dat.prd <- data.frame(dat.prd,row.names=NULL)
names(dat.prd) <- c("psi","psi.lo","psi.hi")
dat.prd$X <- InfX.plot
pWAVI <- ggplot(data = dat.prd,aes(x=X,y=psi)) +
geom_line(size=1,linetype="solid") +
geom_line(aes(y=psi.lo),size=1,linetype="dashed") +
geom_line(aes(y=psi.hi),size=1,linetype="dashed") +
geom_point(data=dat.est,aes(x=X,y=psi),size=5) +
geom_errorbar(data=dat.est,aes(x=X,ymin=psi.lo,ymax=psi.hi),size=1,width=0.5) +
scale_y_continuous(lim=c(0,1.05),breaks=c(0,0.25,0.5,0.75,1)) +
ylab(NULL) + xlab(NULL) +
theme(axis.text.x=element_text(size=20)) +
theme(axis.text.y=element_text(size=25)) +
guides(shape=FALSE,linetype=FALSE) +
geom_text(aes(x=3,y=1.05),label = "WAVI",size=8)
# WETA #
dat.est <- psi.Inf["WETA",c("psiEst.Inf0.md","psiEst.Inf0.lo","psiEst.Inf0.hi",
"psiEst.Inflo.md","psiEst.Inflo.lo","psiEst.Inflo.hi",
"psiEst.Infhi.md","psiEst.Infhi.lo","psiEst.Infhi.hi")]
dat.est <- data.frame(rbind(as.numeric(dat.est[1:3]),as.numeric(dat.est[4:6]),as.numeric(dat.est[7:9])))
names(dat.est) <- c("psi","psi.lo","psi.hi")
dat.est$X <- c(0,4.33,12.7)
dat.est$bin.lo <- c(0,1,7)
dat.est$bin.hi <- c(0,5,23)
dat <- as.numeric(psi.Inf["WETA",prd.cols])
dat.prd <- dat[1:3]
for(i in seq(4,length(dat),by=3)) dat.prd <- rbind(dat.prd,dat[i:(i+2)])
dat.prd <- data.frame(dat.prd,row.names=NULL)
names(dat.prd) <- c("psi","psi.lo","psi.hi")
dat.prd$X <- InfX.plot
pWETA <- ggplot(data = dat.prd,aes(x=X,y=psi)) +
geom_line(size=1,linetype="solid") +
geom_line(aes(y=psi.lo),size=1,linetype="dashed") +
geom_line(aes(y=psi.hi),size=1,linetype="dashed") +
geom_point(data=dat.est,aes(x=X,y=psi),size=5) +
geom_errorbar(data=dat.est,aes(x=X,ymin=psi.lo,ymax=psi.hi),size=1,width=0.5) +
scale_y_continuous(lim=c(0,1.05),breaks=c(0,0.25,0.5,0.75,1)) +
ylab(NULL) + xlab(NULL) +
theme(axis.text.x=element_text(size=20)) +
theme(axis.text.y=element_text(size=25)) +
guides(shape=FALSE,linetype=FALSE) +
geom_text(aes(x=3,y=1.05),label = "WETA",size=8)
# WEWP #
dat.est <- psi.Inf["WEWP",c("psiEst.Inf0.md","psiEst.Inf0.lo","psiEst.Inf0.hi",
"psiEst.Inflo.md","psiEst.Inflo.lo","psiEst.Inflo.hi",
"psiEst.Infhi.md","psiEst.Infhi.lo","psiEst.Infhi.hi")]
dat.est <- data.frame(rbind(as.numeric(dat.est[1:3]),as.numeric(dat.est[4:6]),as.numeric(dat.est[7:9])))
names(dat.est) <- c("psi","psi.lo","psi.hi")
dat.est$X <- c(0,4.33,12.7)
dat.est$bin.lo <- c(0,1,7)
dat.est$bin.hi <- c(0,5,23)
dat <- as.numeric(psi.Inf["WEWP",prd.cols])
dat.prd <- dat[1:3]
for(i in seq(4,length(dat),by=3)) dat.prd <- rbind(dat.prd,dat[i:(i+2)])
dat.prd <- data.frame(dat.prd,row.names=NULL)
names(dat.prd) <- c("psi","psi.lo","psi.hi")
dat.prd$X <- InfX.plot
pWEWP <- ggplot(data = dat.prd,aes(x=X,y=psi)) +
geom_line(size=1,linetype="solid") +
geom_line(aes(y=psi.lo),size=1,linetype="dashed") +
geom_line(aes(y=psi.hi),size=1,linetype="dashed") +
geom_point(data=dat.est,aes(x=X,y=psi),size=5) +
geom_errorbar(data=dat.est,aes(x=X,ymin=psi.lo,ymax=psi.hi),size=1,width=0.5) +
scale_y_continuous(lim=c(0,1.05),breaks=c(0,0.25,0.5,0.75,1)) +
ylab(NULL) + xlab(NULL) +
theme(axis.text.x=element_text(size=20)) +
theme(axis.text.y=element_text(size=25)) +
guides(shape=FALSE,linetype=FALSE) +
geom_text(aes(x=3,y=1.05),label = "WEWP",size=8)
p <- ggdraw() +
draw_plot(pATTW, x = 0.05, y = 0.684, width = .317, height = .317) +
draw_plot(pBTLH, x = 0.367, y = 0.684, width = .317, height = .317) +
draw_plot(pCLNU, x = 0.684, y = 0.684, width = .317, height = .317) +
draw_plot(pNOFL, x = 0.05, y = 0.367, width = .317, height = .317) +
draw_plot(pEVGR, x = 0.367, y = 0.367, width = .317, height = .317) +
draw_plot(pWAVI, x = 0.684, y = 0.367, width = .317, height = .317) +
draw_plot(pHOWR, x = 0.05, y = 0.05, width = .317, height = .317) +
draw_plot(pWETA, x = 0.367, y = 0.05, width = .317, height = .317) +
draw_plot(pWEWP, x = 0.684, y = 0.05, width = .317, height = .317) +
draw_plot_label(label=c("Infestation (Number of infested spruce)","Point occupancy"),
size=c(30,30),x=c(0,0),y=c(0.05,0.3),angle=c(0,90))
save_plot("SppPlots_Inf.jpeg", p, ncol = 3, nrow = 3, dpi=600)
## 2 species related with snags, and 3 with QMD
# Compile column indices for predicted occupancy with snags #
prd.md <- which(substr(dimnames(psi.snag)[[2]],1,7)=="psiPred"&substr(dimnames(psi.snag)[[2]],
(nchar(dimnames(psi.snag)[[2]])-1),
nchar(dimnames(psi.snag)[[2]]))=="md")
prd.lo <- which(substr(dimnames(psi.snag)[[2]],1,7)=="psiPred"&substr(dimnames(psi.snag)[[2]],
(nchar(dimnames(psi.snag)[[2]])-1),
nchar(dimnames(psi.snag)[[2]]))=="lo")
prd.hi <- which(substr(dimnames(psi.snag)[[2]],1,7)=="psiPred"&substr(dimnames(psi.snag)[[2]],
(nchar(dimnames(psi.snag)[[2]])-1),
nchar(dimnames(psi.snag)[[2]]))=="hi")
prd.cols <- as.numeric(rbind(prd.md,prd.lo,prd.hi)) #column indices
rm(prd.md,prd.lo,prd.hi)
# HOWR #
dat.est <- psi.snag["HOWR",c("psiEst.snaglo.md","psiEst.snaglo.lo","psiEst.snaglo.hi",
"psiEst.snagmd.md","psiEst.snagmd.lo","psiEst.snagmd.hi",
"psiEst.snaghi.md","psiEst.snaghi.lo","psiEst.snaghi.hi")]
dat.est <- data.frame(rbind(as.numeric(dat.est[1:3]),as.numeric(dat.est[4:6]),as.numeric(dat.est[7:9])))
names(dat.est) <- c("psi","psi.lo","psi.hi")
dat.est$X <- c(1.36,3.69,7.31)
dat.est$bin.lo <- c(0,3,7)
dat.est$bin.hi <- c(2,6,11)
dat <- as.numeric(psi.snag["HOWR",prd.cols])
dat.prd <- dat[1:3]
for(i in seq(4,length(dat),by=3)) dat.prd <- rbind(dat.prd,dat[i:(i+2)])
dat.prd <- data.frame(dat.prd,row.names=NULL)
names(dat.prd) <- c("psi","psi.lo","psi.hi")
dat.prd$X <- SnagX.plot
pHOWR <- ggplot(data = dat.prd,aes(x=X,y=psi)) +
geom_line(size=1,linetype="solid") +
geom_line(aes(y=psi.lo),size=1,linetype="dashed") +
geom_line(aes(y=psi.hi),size=1,linetype="dashed") +
geom_point(data=dat.est,aes(x=X,y=psi),size=5) +
geom_errorbar(data=dat.est,aes(x=X,ymin=psi.lo,ymax=psi.hi),size=1,width=0.5) +
ylab(NULL) + xlab("Snags (count)") +
scale_y_continuous(lim=c(0,1.05),breaks=c(0,0.25,0.5,0.75,1)) +
theme(axis.title.x=element_text(size=30)) +
theme(axis.text.x=element_text(size=20)) +
theme(axis.text.y=element_text(size=25)) +
guides(shape=FALSE,linetype=FALSE) +
geom_text(aes(x=2,y=1.05),label = "HOWR",size=8)
# STJA #
dat.est <- psi.snag["STJA",c("psiEst.snaglo.md","psiEst.snaglo.lo","psiEst.snaglo.hi",
"psiEst.snagmd.md","psiEst.snagmd.lo","psiEst.snagmd.hi",
"psiEst.snaghi.md","psiEst.snaghi.lo","psiEst.snaghi.hi")]
dat.est <- data.frame(rbind(as.numeric(dat.est[1:3]),as.numeric(dat.est[4:6]),as.numeric(dat.est[7:9])))
names(dat.est) <- c("psi","psi.lo","psi.hi")
dat.est$X <- c(1.36,3.69,7.31)
dat.est$bin.lo <- c(0,3,7)
dat.est$bin.hi <- c(2,6,11)
dat <- as.numeric(psi.snag["STJA",prd.cols])
dat.prd <- dat[1:3]
for(i in seq(4,length(dat),by=3)) dat.prd <- rbind(dat.prd,dat[i:(i+2)])
dat.prd <- data.frame(dat.prd,row.names=NULL)
names(dat.prd) <- c("psi","psi.lo","psi.hi")
dat.prd$X <- SnagX.plot
pSTJA <- ggplot(data = dat.prd,aes(x=X,y=psi)) +
geom_line(size=1,linetype="solid") +
geom_line(aes(y=psi.lo),size=1,linetype="dashed") +
geom_line(aes(y=psi.hi),size=1,linetype="dashed") +
geom_point(data=dat.est,aes(x=X,y=psi),size=5) +
geom_errorbar(data=dat.est,aes(x=X,ymin=psi.lo,ymax=psi.hi),size=1,width=0.5) +
ylab(NULL) + xlab("Snags (count)") +
scale_y_continuous(lim=c(0,1.05),breaks=c(0,0.25,0.5,0.75,1)) +
theme(axis.title.x=element_text(size=30)) +
theme(axis.text.x=element_text(size=20)) +
theme(axis.text.y=element_text(size=25)) +
guides(shape=FALSE,linetype=FALSE) +
geom_text(aes(x=2,y=1.05),label = "STJA",size=8)
# Compile column indices for predicted occupancy with snags #
prd.md <- which(substr(dimnames(psi.QMD)[[2]],1,7)=="psiPred"&substr(dimnames(psi.QMD)[[2]],
(nchar(dimnames(psi.QMD)[[2]])-1),
nchar(dimnames(psi.QMD)[[2]]))=="md")
prd.lo <- which(substr(dimnames(psi.QMD)[[2]],1,7)=="psiPred"&substr(dimnames(psi.QMD)[[2]],
(nchar(dimnames(psi.QMD)[[2]])-1),
nchar(dimnames(psi.QMD)[[2]]))=="lo")
prd.hi <- which(substr(dimnames(psi.QMD)[[2]],1,7)=="psiPred"&substr(dimnames(psi.QMD)[[2]],
(nchar(dimnames(psi.QMD)[[2]])-1),
nchar(dimnames(psi.QMD)[[2]]))=="hi")
prd.cols <- as.numeric(rbind(prd.md,prd.lo,prd.hi)) #column indices
rm(prd.md,prd.lo,prd.hi)
# AMRO #
dat.est <- psi.QMD["AMRO",c("psiEst.QMDlo.md","psiEst.QMDlo.lo","psiEst.QMDlo.hi",
"psiEst.QMDmd.md","psiEst.QMDmd.lo","psiEst.QMDmd.hi",
"psiEst.QMDhi.md","psiEst.QMDhi.lo","psiEst.QMDhi.hi")]
dat.est <- data.frame(rbind(as.numeric(dat.est[1:3]),as.numeric(dat.est[4:6]),as.numeric(dat.est[7:9])))
names(dat.est) <- c("psi","psi.lo","psi.hi")
dat.est$X <- c(514,818,1331)
dat.est$bin.lo <- c(331,635,960)
dat.est$bin.hi <- c(630,939,2441)
dat <- as.numeric(psi.QMD["AMRO",prd.cols])
dat.prd <- dat[1:3]
for(i in seq(4,length(dat),by=3)) dat.prd <- rbind(dat.prd,dat[i:(i+2)])
dat.prd <- data.frame(dat.prd,row.names=NULL)
names(dat.prd) <- c("psi","psi.lo","psi.hi")
dat.prd$X <- QMDX.plot
pAMRO <- ggplot(data = dat.prd,aes(x=X,y=psi)) +
geom_line(size=1,linetype="solid") +
geom_line(aes(y=psi.lo),size=1,linetype="dashed") +
geom_line(aes(y=psi.hi),size=1,linetype="dashed") +
geom_point(data=dat.est,aes(x=X,y=psi),size=5) +
geom_errorbar(data=dat.est,aes(x=X,ymin=psi.lo,ymax=psi.hi),size=1,width=50) +
labs(y=NULL,x=expression("QMD ("~cm^2~")")) +
scale_y_continuous(lim=c(0,1.06),breaks=c(0,0.25,0.5,0.75,1)) +
theme(axis.title.x=element_text(size=30)) +
theme(axis.text.x=element_text(size=20)) +
theme(axis.text.y=element_text(size=25)) +
guides(shape=FALSE,linetype=FALSE) +
geom_text(aes(x=600,y=1.06),label = "AMRO",size=8)
# WETA #
dat.est <- psi.QMD["WETA",c("psiEst.QMDlo.md","psiEst.QMDlo.lo","psiEst.QMDlo.hi",
"psiEst.QMDmd.md","psiEst.QMDmd.lo","psiEst.QMDmd.hi",
"psiEst.QMDhi.md","psiEst.QMDhi.lo","psiEst.QMDhi.hi")]
dat.est <- data.frame(rbind(as.numeric(dat.est[1:3]),as.numeric(dat.est[4:6]),as.numeric(dat.est[7:9])))
names(dat.est) <- c("psi","psi.lo","psi.hi")
dat.est$X <- c(514,818,1331)
dat.est$bin.lo <- c(331,635,960)
dat.est$bin.hi <- c(630,939,2441)
dat <- as.numeric(psi.QMD["WETA",prd.cols])
dat.prd <- dat[1:3]
for(i in seq(4,length(dat),by=3)) dat.prd <- rbind(dat.prd,dat[i:(i+2)])
dat.prd <- data.frame(dat.prd,row.names=NULL)
names(dat.prd) <- c("psi","psi.lo","psi.hi")
dat.prd$X <- QMDX.plot
pWETA <- ggplot(data = dat.prd,aes(x=X,y=psi)) +
geom_line(size=1,linetype="solid") +
geom_line(aes(y=psi.lo),size=1,linetype="dashed") +
geom_line(aes(y=psi.hi),size=1,linetype="dashed") +
geom_point(data=dat.est,aes(x=X,y=psi),size=5) +
geom_errorbar(data=dat.est,aes(x=X,ymin=psi.lo,ymax=psi.hi),size=1,width=50) +
labs(y=NULL,x=expression("QMD ("~cm^2~")")) +
scale_y_continuous(lim=c(0,1.05),breaks=c(0,0.25,0.5,0.75,1)) +
theme(axis.title.x=element_text(size=30)) +
theme(axis.text.x=element_text(size=20)) +
theme(axis.text.y=element_text(size=25)) +
guides(shape=FALSE,linetype=FALSE) +
geom_text(aes(x=600,y=1.05),label = "WETA",size=8)
# WEWP #
dat.est <- psi.QMD["WEWP",c("psiEst.QMDlo.md","psiEst.QMDlo.lo","psiEst.QMDlo.hi",
"psiEst.QMDmd.md","psiEst.QMDmd.lo","psiEst.QMDmd.hi",
"psiEst.QMDhi.md","psiEst.QMDhi.lo","psiEst.QMDhi.hi")]
dat.est <- data.frame(rbind(as.numeric(dat.est[1:3]),as.numeric(dat.est[4:6]),as.numeric(dat.est[7:9])))
names(dat.est) <- c("psi","psi.lo","psi.hi")
dat.est$X <- c(514,818,1331)
dat.est$bin.lo <- c(331,635,960)
dat.est$bin.hi <- c(630,939,2441)
dat <- as.numeric(psi.QMD["WEWP",prd.cols])
dat.prd <- dat[1:3]
for(i in seq(4,length(dat),by=3)) dat.prd <- rbind(dat.prd,dat[i:(i+2)])
dat.prd <- data.frame(dat.prd,row.names=NULL)
names(dat.prd) <- c("psi","psi.lo","psi.hi")
dat.prd$X <- QMDX.plot
pWEWP <- ggplot(data = dat.prd,aes(x=X,y=psi)) +
geom_line(size=1,linetype="solid") +
geom_line(aes(y=psi.lo),size=1,linetype="dashed") +
geom_line(aes(y=psi.hi),size=1,linetype="dashed") +
geom_point(data=dat.est,aes(x=X,y=psi),size=5) +
geom_errorbar(data=dat.est,aes(x=X,ymin=psi.lo,ymax=psi.hi),size=1,width=50) +
labs(y=NULL,x=expression("QMD ("~cm^2~")")) +
scale_y_continuous(lim=c(0,1.05),breaks=c(0,0.25,0.5,0.75,1)) +
theme(axis.title.x=element_text(size=30)) +
theme(axis.text.x=element_text(size=20)) +
theme(axis.text.y=element_text(size=25)) +
guides(shape=FALSE,linetype=FALSE) +
geom_text(aes(x=600,y=1.05),label = "WEWP",size=8)
p <- ggdraw() +
draw_plot(pHOWR, x = 0.05, y = 0.66, width = .475, height = .33) +
draw_plot(pSTJA, x = 0.525, y = 0.66, width = .475, height = .33) +
draw_plot(pAMRO, x = 0.05, y = 0.33, width = .475, height = .33) +
draw_plot(pWETA, x = 0.525, y = 0.33, width = .475, height = .33) +
draw_plot(pWEWP, x = 0.05, y = 0, width = .475, height = .33) +
draw_plot_label(label="Point occupancy",size=30,x=0,y=0.3,angle=90)
save_plot("SppPlots_SnagQMD.jpeg", p, ncol = 2.5, nrow = 3, dpi=600)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/bullet-scores.R
\name{max_u}
\alias{max_u}
\title{Wilcox test of bullet to bullet similarity}
\usage{
max_u(land1, land2, scores, addNA = FALSE)
}
\arguments{
\item{land1}{(numeric) vector with land ids of bullet 1}
\item{land2}{(numeric) vector with land ids of bullet 2}
\item{scores}{numeric vector of scores to be summarized into a single number}
\item{addNA}{how are missing values treated? addNA = TRUE leaves missing values, addNA=FALSE imputes with 0.}
}
\value{
numeric vector of binary prediction whether two lands are same-source. Vector has the same length as the input vectors.
}
\description{
The combination of \code{land1} and \code{land2} are a key to the scores,
i.e. if a bullet has six lands, each of the input vectors should have
length 36.
}
|
/man/max_u.Rd
|
no_license
|
heike/bulletxtrctr
|
R
| false | true | 845 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/bullet-scores.R
\name{max_u}
\alias{max_u}
\title{Wilcox test of bullet to bullet similarity}
\usage{
max_u(land1, land2, scores, addNA = FALSE)
}
\arguments{
\item{land1}{(numeric) vector with land ids of bullet 1}
\item{land2}{(numeric) vector with land ids of bullet 2}
\item{scores}{numeric vector of scores to be summarized into a single number}
\item{addNA}{how are missing values treated? addNA = TRUE leaves missing values, addNA=FALSE imputes with 0.}
}
\value{
numeric vector of binary prediction whether two lands are same-source. Vector has the same length as the input vectors.
}
\description{
The combination of \code{land1} and \code{land2} are a key to the scores,
i.e. if a bullet has six lands, each of the input vectors should have
length 36.
}
|
testlist <- list(G = numeric(0), Rn = numeric(0), atmp = numeric(0), ra = numeric(0), relh = numeric(0), rs = numeric(0), temp = c(2.77059250438547e-305, -1.25285923573267e-52, 3.55262847582597e-157, 5.01953721589298e-150, 1.93571408430154e+257, 8.69418809820149e-304, 3.55262942202735e-157, 3.15081457687199e-92, 2.58900187138465e+256, -8.2690320226195e-103, -9.15531387510807e-246, -1.99202664855694e-137, -1.00521720423163e-92, -1.86834569065576e+236, 4.01110925644457e+166, NaN, 1.78094221491197e-269, 1.95236685739849e-214, 2.28917898403533e-310, 0, 0))
result <- do.call(meteor:::ET0_PenmanMonteith,testlist)
str(result)
|
/meteor/inst/testfiles/ET0_PenmanMonteith/AFL_ET0_PenmanMonteith/ET0_PenmanMonteith_valgrind_files/1615841916-test.R
|
no_license
|
akhikolla/updatedatatype-list3
|
R
| false | false | 661 |
r
|
testlist <- list(G = numeric(0), Rn = numeric(0), atmp = numeric(0), ra = numeric(0), relh = numeric(0), rs = numeric(0), temp = c(2.77059250438547e-305, -1.25285923573267e-52, 3.55262847582597e-157, 5.01953721589298e-150, 1.93571408430154e+257, 8.69418809820149e-304, 3.55262942202735e-157, 3.15081457687199e-92, 2.58900187138465e+256, -8.2690320226195e-103, -9.15531387510807e-246, -1.99202664855694e-137, -1.00521720423163e-92, -1.86834569065576e+236, 4.01110925644457e+166, NaN, 1.78094221491197e-269, 1.95236685739849e-214, 2.28917898403533e-310, 0, 0))
result <- do.call(meteor:::ET0_PenmanMonteith,testlist)
str(result)
|
# lectura de datos
library(ggplot2)
library(caret)
library(RKEEL)
# library(rDML) # Por si acaso
library(kknn)
library(GGally)
library(Hmisc)
library(dplyr)
library(corrplot)
library(tidyr)
library(VIM)
library(mice)
library(bmrm)
library(DMwR)
library(NoiseFiltersR)
library(beeswarm)
library(moments)
library(MASS)
library(FSelector)
library("tree")
library(rpart)
require(discretization)
library(party)
library(RWeka)
library(Amelia)
train <- read.csv("./train.csv", header=TRUE, na.strings="?")
test <- read.csv("./test.csv", header = TRUE, na.strings = "?")
attr.transform.add <- function(data){
trans <- wrong.remove(data)
trans$tX2 <- log(trans$X2)
trans$tX3 <- log(trans$X3)
trans$tX4 <- log(trans$X4)
trans$X6[trans$X6 < 0] <- 0
trans$tX7 <- log(trans$X7)
trans$tX8 <- trans$X8^2
trans$tX9 <- log(trans$X9)
trans$tX13 <- trans$X13^2
trans$tX15 <- log(trans$X15)
trans$tX16 <- log(trans$X16)
trans$tX17 <- log(trans$X17)
trans$tX18 <- log(trans$X18)
# trans$X21 <- cbrt(trans$X21)
train$X23[train$X23 < 0] <- 0
trans$tX23 <- log(trans$X23 + 100)
trans$tX24 <- log(trans$X24)
trans$tX25 <- trans$X25^2
trans$tX26 <- cbrt(trans$X26)
trans$tX27 <- cbrt(trans$X27)
trans$tX28 <- log(trans$X28)
trans$tX29 <- log(trans$X29)
trans$tX31 <- log(trans$X31)
trans$tX33 <- log(trans$X33)
# trans$tX34 <- trans$X34^2
trans$X35[trans$X35 < 0] <- 0
trans$tX35 <- log(trans$X35 + 0.1)
# trans$tX39 <- log(trans$X39)
trans$tX40 <- cbrt(trans$X40)
# trans$tX43 <- cbrt(trans$X43)
trans$tX44 <- log(trans$X44)
trans$X45[trans$X45 < 0] <- 0
trans$tX45 <- sqrt(trans$X45)
trans$tX47 <- cbrt(trans$X47)
trans$tX48 <- log(trans$X48 + 25)
trans$tX49 <- cbrt(trans$X49)
##
trans <- trans[,-c(11,34,39,41,43)]
##
trans
}
wrong.remove <- function(data){
transf <- data.frame(data)
wrongs <- which(
transf$X7 <= 0
)
if(length(wrongs) > 0){
transf <- transf[-wrongs,]
}
transf
}
cbrt <- function(x) sign(x) * abs(x)^(1/3)
# 1-2
# preprocesamiento
# aprendizaje del modelo
model = rpart(as.factor(train$C)~.,data = train, method = "class",
control = rpart.control(minsplit = 10, xval = 10))
# prediccion de etiquetas
model.pred.train = as.numeric(predict(model, train, type = "class"))
model.pred.train[which(model.pred.train == 1)] = 0
model.pred.train[which(model.pred.train == 2)] = 1
round(mean(model.pred.train==train$C)*100,digits=1)
model.pred = as.numeric(predict(model, test, type = "class"))
model.pred
model.pred[which(model.pred == 1)] = 0
model.pred[which(model.pred == 2)] = 1
model.pred
# 1 -> 2
# 0 -> 1
write.csv(data.frame('Id'=c(1:length(model.pred)),'Prediction'=model.pred), file = paste("RAW.csv"), row.names=FALSE)
# 3
# preprocesamiento
train = computeMissingValues(train, type = "knn", k = 2)
new_data = preProcessData(train,test)
scaled.train = new_data[[1]]
scaled.test = new_data[[2]]
# aprendizaje del modelo
model = rpart(as.factor(scaled.train$C)~.,data = scaled.train, method = "class",
control = rpart.control(minsplit = 10, xval = 10))
# prediccion de etiquetas
model.pred.train = as.numeric(predict(model, scaled.train, type = "class"))
model.pred.train[which(model.pred.train == 1)] = 0
model.pred.train[which(model.pred.train == 2)] = 1
round(mean(model.pred.train==train$C)*100,digits=1)
model.pred = as.numeric(predict(model, scaled.test, type = "class"))
model.pred
model.pred[which(model.pred == 1)] = 0
model.pred[which(model.pred == 2)] = 1
model.pred
# 1 -> 2
# 0 -> 1
write.csv(data.frame('Id'=c(1:length(model.pred)),'Prediction'=model.pred), file = paste("2KNNCS.csv"), row.names=FALSE)
# 4
# preprocesamiento
train = computeMissingValues(train, type = "median")
new_data = preProcessData(train,test)
scaled.train = new_data[[1]]
scaled.test = new_data[[2]]
# aprendizaje del modelo
model = rpart(as.factor(scaled.train$C)~.,data = scaled.train, method = "class",
control = rpart.control(minsplit = 10, xval = 10))
model = prune(model, cp = model$cptable[which.min(model$cptable[,"xerror"]),"CP"])
# prediccion de etiquetas
model.pred.train = as.numeric(predict(model, scaled.train, type = "class"))
model.pred.train[which(model.pred.train == 1)] = 0
model.pred.train[which(model.pred.train == 2)] = 1
round(mean(model.pred.train==train$C)*100,digits=1)
model.pred = as.numeric(predict(model, scaled.test, type = "class"))
model.pred[which(model.pred == 1)] = 0
model.pred[which(model.pred == 2)] = 1
# 1 -> 2
# 0 -> 1
write.csv(data.frame('Id'=c(1:length(model.pred)),'Prediction'=model.pred), file = paste("MedianCS.csv"), row.names=FALSE)
# 5
# preprocesamiento
ini = mice(train, maxit = 0)
quitar = as.character(ini$loggedEvents[,"out"])
valores = mice(train, meth="pmm", seed = 500, remove_collinear = FALSE)
compData = complete(valores,1)
train_1 = compData
new_data = preProcessData(train_1,test)
scaled.train = new_data[[1]]
scaled.test = new_data[[2]]
# aprendizaje del modelo
model = rpart(as.factor(scaled.train$C)~.,data = scaled.train, method = "class",
control = rpart.control(minsplit = 10, xval = 10))
# prediccion de etiquetas
model.pred.train = as.numeric(predict(model, scaled.train, type = "class"))
model.pred.train[which(model.pred.train == 1)] = 0
model.pred.train[which(model.pred.train == 2)] = 1
round(mean(model.pred.train==train$C)*100,digits=1)
model.pred = as.numeric(predict(model, scaled.test, type = "class"))
model.pred
model.pred[which(model.pred == 1)] = 0
model.pred[which(model.pred == 2)] = 1
model.pred
# 1 -> 2
# 0 -> 1
write.csv(data.frame('Id'=c(1:length(model.pred)),'Prediction'=model.pred), file = paste("MedianCS.csv"), row.names=FALSE)
# 8
# preprocesamiento
data.sin.na = computeMissingValues(train, type = 'rf')
new_data = preProcessData(data.sin.na,test)
scaled.train = new_data[[1]]
scaled.test = new_data[[2]]
# aprendizaje del modelo
model = rpart(as.factor(scaled.train$C)~.,data = scaled.train, method = "class",
control = rpart.control(minsplit = 10, xval = 10))
model = prune(model,cp = model$cptable[which.min(model$cptable[,"xerror"]),"CP"])
# prediccion de etiquetas
model.pred.train = as.numeric(predict(model, scaled.train, type = "class"))
model.pred.train[which(model.pred.train == 1)] = 0
model.pred.train[which(model.pred.train == 2)] = 1
round(mean(model.pred.train==scaled.train$C)*100,digits=1)
model.pred = as.numeric(predict(model, scaled.test, type = "class"))
model.pred
model.pred[which(model.pred == 1)] = 0
model.pred[which(model.pred == 2)] = 1
model.pred
# 1 -> 2
# 0 -> 1
write.csv(data.frame('Id'=c(1:length(model.pred)),'Prediction'=model.pred), file = paste("RFCS.csv"), row.names=FALSE)
# 9-11
library(FSelector)
Class = train[,51]
train_1 = train[,-51]
weights = FSelector::linear.correlation(Class~.,data = train_1)
subset = FSelector::cutoff.k(weights,30)
f1 = as.simple.formula(subset,"Class")
subset = FSelector::cutoff.k(weights,3)
f2 = as.simple.formula(subset,"Class")
weights = FSelector::rank.correlation(Class~.,data = train_1)
subset = FSelector::cutoff.k(weights,7)
f3 = as.simple.formula(subset,"Class")
new_data = preProcessData(cbind(train_1,Class),test)
scaled.train = new_data[[1]]
scaled.test = new_data[[2]]
# aprendizaje del modelo
model = rpart(f3,data = scaled.train[,subset], method = "class",
control = rpart.control(minsplit = 10, xval = 10))
model = prune(model,cp = model$cptable[which.min(model$cptable[,"xerror"]),"CP"])
# prediccion de etiquetas
model.pred.train = as.numeric(predict(model, scaled.train, type = "class"))
model.pred.train[which(model.pred.train == 1)] = 0
model.pred.train[which(model.pred.train == 2)] = 1
round(mean(model.pred.train==scaled.train$C)*100,digits=1)
model.pred = as.numeric(predict(model, scaled.test, type = "class"))
model.pred
model.pred[which(model.pred == 1)] = 0
model.pred[which(model.pred == 2)] = 1
model.pred
# 1 -> 2
# 0 -> 1
write.csv(data.frame('Id'=c(1:length(model.pred)),'Prediction'=model.pred), file = paste("RC7MEDCS.csv"), row.names=FALSE)
# 12-14
library(NoiseFiltersR)
library(FSelector)
train_1 = computeMissingValues(train, type = "knn", k = 1)
train_1[,51] <- as.factor(train_1[,51])
set.seed(1)
out.data <- NoiseFiltersR::IPF(train_1, nfolds = 5, consensus = FALSE)
data.clean = out.data$cleanData
new_data = preProcessData(data.clean,test)
scaled.train = new_data[[1]]
scaled.test = new_data[[2]]
Class = scaled.train[,51]
weights <- FSelector::random.forest.importance(Class ~ .,scaled.train[,-51], importance.type = 1)
print(weights)
subset <- cutoff.k(weights,10)
subset1 <- cutoff.k(weights,20)
subset
f <- as.simple.formula(subset,"C")
f1 <- as.simple.formula(subset,"C")
control <- caret::trainControl(method = "repeatedcv", number = 10, repeats = 5)
modelo <- caret::train(f, data = scaled.train, method="ctree", trControl = control)
confusionMatrix(modelo)
pred.train = predict(modelo,scaled.train, type = "prob")
fit.train = ifelse(pred.train[1]> 0.5,0,1)
round(mean(fit.train==scaled.train$C)*100,digits=1)
pred.test = predict(modelo,scaled.test, type = "prob")
pred.test
fit.test = ifelse(pred.test[1]> 0.5,0,1)
fit.test
write.csv(data.frame('Id'=c(1:length(fit.test)),'Prediction'=fit.test), file = paste("IPFRFICTREE.csv"), row.names=FALSE)
#write.csv(data.frame('Id'=c(1:length(fit.test)),'Prediction'=fit.test), file = paste("IPFRFICTREE20.csv"), row.names=FALSE)
# 15
# preprocesamiento
train.sin.na = computeMissingValues(train, type = 'rf')
train.sin.na[,51] = as.factor(train.sin.na[,51])
train.sin.ruido = filterNoiseData(train.sin.na)
train.sin.ruido[,51] = as.numeric(train.sin.ruido[,51])
train.sin = computeOutliers(train.sin.ruido, type = 'remove')
new_data = preProcessData(train.sin,test)
scaled_train = new_data[[1]]
scaled_test = new_data[[2]]
# comentar para quitar la discretización.
cm <- discretization::disc.Topdown(scaled_train,1)
scaled_train = cm$Disc.data
Class = scaled_train[,51]
predictores.1 = rankingLearningRandomForest(scaled_train[,-51],Class,numeroVars = c(0,1))
p1 = predictores.1[1:10]
predictores.1 = featureSelection('chi',10, scaled_train[,-51],Class)
p1=predictores.1
#Class = scaled_train[,51]
#predictores.2 = featureSelection('rfi',10,scaled_train[,-51],Class)
#predictores.2
f1 = as.simple.formula(p1,"C")
#f2 = as.simple.formula(predictores.2,"C")
# convertimos en facctor
tmp = as.numeric(scaled_train[,51])
tmp[which(tmp == 1)] = 0
tmp[which(tmp == 2)] = 1
scaled_train[,51] = as.factor(tmp)
control <- caret::trainControl(method = "repeatedcv", number = 10, repeats = 5)
modelo <- caret::train(f1, data = scaled_train, method="ctree", trControl = control)
confusionMatrix(modelo)
pred.train = predict(modelo,scaled_train, type = "prob")
fit.train = ifelse(pred.train[1]> 0.5,0,1)
round(mean(fit.train==scaled_train$C)*100,digits=1)
pred.test = predict(modelo,scaled_test, type = "prob")
fit.test = ifelse(pred.test[1]> 0.5,0,1)
# estos son los trialNumero.csv
write.csv(data.frame('Id'=c(1:length(fit.test)),'Prediction'=fit.test), file = paste("trial3.csv"), row.names=FALSE)
######
## cambiar knn por median
train.sin.na = computeMissingValues(train, type = "knn")
train.sin.outliers = computeOutliers(train.sin.na, type = 'mean')
train.sin.ruido = filterNoiseData(train.sin.outliers)
new_data = preProcessData(train.sin.ruido,test)
scaled_train = new_data[[1]]
scaled_test = new_data[[2]]
scaled_train[,51] = scaled_train[,51]-1
train_1 = solveUnbalance(scaled_train)
predictores.1 = featureSelection('rfi',25, train[,-51],train[,51])
p1=predictores.1
p1
f1 = as.simple.formula(p1,"Y")
model = ctree(f1,data = train_1,control = ctree_control(mincriterion = 0.9))
# prediccion de etiquetas
model = prune(model,cp = 0.01)
model =prune.misclass (model ,best =3)
model.pred.train = predict(model, train, type = "prob")
fit.train = ifelse(model.pred.train[[1]]> 0.5,0,1)
model.pred.train
model.pred.train
model.pred.train[which(model.pred.train == 1)] = 0
model.pred.train[which(model.pred.train == 2)] = 1
round(mean(model.pred.train==train$C)*100,digits=1)
model.pred = as.numeric(predict(model, scaled_test, type = "class"))
model.pred
model.pred[which(model.pred == 1)] = 0
model.pred[which(model.pred == 2)] = 1
model.pred
### PCA + unbalance
datos.pca = prcomp(train_1,scale = TRUE)
library(factoextra)
std_dev <- datos.pca$sdev
pr_var <- std_dev^2
prop_varex <- pr_var/sum(pr_var)
plot(cumsum(prop_varex), xlab = "PCA", type = "b")
data.train = data.frame(datos.pca$x,Y)
test.data = predict(datos.pca, newdata = scaled_test)
test.data = as.data.frame(test.data)
control <- caret::trainControl(method = "repeatedcv", number = 10, repeats = 5)
modelo <- caret::train(Y~., data = data.train[,1:30], method="ctree", trControl = control)
confusionMatrix(modelo)
pred.train = predict(modelo,data.train, type = "prob")
fit.train = ifelse(pred.train[1]> 0.5,0,1)
pred.train
round(mean(fit.train==data.train$Y)*100,digits=1)
pred.test = predict(modelo,test.data, type = "prob")
pred.test
fit.test = ifelse(pred.test[1]> 0.5,0,1)
head(pred.test[1])
head(fit.test)
write.csv(data.frame('Id'=c(1:length(fit.test)),'Prediction'=fit.test), file = paste("fsrpart.csv"), row.names=FALSE)
## amelia y unbalance correlation
train.sin.na = computeMissingValues(train,type = 'mean')
train.sin.na = removeHighCorrelationAttributes(train.sin.na,0.99)
###################################################
missmap(train)
completed_data <- amelia(train, m = 1,p2s = 2)
train1 = completed_data$imputations[[1]]
train2 = completed_data$imputations[[2]]
train3 = completed_data$imputations[[3]]
train4 = completed_data$imputations[[4]]
train5 = completed_data$imputations[[5]]
#######################################################
# cambiar train.sin.na por trainNum que son los conjuntos de amelia.
train.sin.outliers = computeOutliers(train1, type = 'median')
train.sin.ruido = filterNoiseData(train.sin.outliers)
train.sin.ruido = solveUnbalance(train.sin.ruido)
new_data = preProcessData(train.sin.ruido,test)
scaled_train = new_data[[1]]
scaled_test = new_data[[2]]
p1 = featureSelection('rfi',10,train[,-ncol(scaled_train)], as.factor(train[,ncol(scaled_train)]))
f = as.simple.formula(p1,"C")
control <- caret::trainControl(method = "repeatedcv", number = 10, repeats = 5)
modelo <- caret::train(f, data = train, method="ctree", trControl = control)
confusionMatrix(modelo)
pred.train = predict(modelo,train, type = "prob")
fit.train = ifelse(pred.train[1]> 0.5,0,1)
round(mean(fit.train==train$C)*100,digits=1)
pred.test = predict(modelo,test, type = "prob")
pred.test
fit.test = ifelse(pred.test[1]> 0.5,0,1)
fit.test
write.csv(data.frame('Id'=c(1:length(fit.test)),'Prediction'=fit.test), file = paste("unbalancecorrelation.csv"), row.names=FALSE)
######### nuevas pruebas #########
library(bmrm)
# Calcula el score de validación cruzada para el dataset
# funcion.train.predict: función(train, test) que entrena el clasificador con train y devuelve las predicciones sobre test
cross_validation <- function(dataset, funcion.train.predict, folds = 10){
fold.indexes <- balanced.cv.fold(dataset$C)
return(mean(sapply(1:folds, cross_validation_fold, fold.indexes, dataset, funcion.train.predict)))
}
cross_validation_fold <- function(fold, indexes, dataset, funcion.train.predict){
train.inds <- which(indexes==fold)
train <- dataset[train.inds,]
test <- na.omit(dataset[-train.inds,])
ypred <- funcion.train.predict(train, test[,-ncol(test)])
mean(ypred==test$C)
}
set.seed(28)
funcion.train.predict <- function(train, test){
# Train
outliers.train.por.la.cara <- which(apply(train[,-ncol(train)], MARGIN=1, function(x) any(!is.na(x) & x < -68000)))
outliers.test.por.la.cara <- which(apply(test, MARGIN=1, function(x) any(!is.na(x) && x < -68000)))
indices.nas.train <- which(has.na(train))
model <- caret::train(as.factor(C) ~ ., train[-c(outliers.train.por.la.cara, indices.nas.train),], method="ctree", preProcess = c("center", "scale"))
# Predict
preds <- predict(model, test)
preds[outliers.test.por.la.cara] <- 0
print(preds)
return(preds)
}
has.na <- function(x) apply(x,1,function(z)any(is.na(z)))
createSubmission <- function(pred, filename){
sub <- cbind(Id = 1:length(pred), Prediction = as.numeric(as.character(pred)))
write.csv(sub, paste0("subs-tree/",filename), row.names = F)
sub
}
cross_validation(train, funcion.train.predict)
sub.prueba <- funcion.train.predict(train, test)
sub.prueba
sub <- createSubmission(sub.prueba, "pruebaseguronomejora") # 0.83 ??????????
### prueba numero 32
set.seed(28)
train.predict.32 <- function(train, test){
# Train
# Fuera outliers por la cara
outliers.train.por.la.cara <- which(apply(train[,-ncol(train)], MARGIN=1, function(x) any(!is.na(x) & x < -68000)))
outliers.test.por.la.cara <- which(apply(test, MARGIN=1, function(x) any(!is.na(x) & x < -68000)))
if(length(outliers.train.por.la.cara) > 0) train <- train[-outliers.train.por.la.cara,]
# Imputación knn
train.completed <- knnImputation(train)
# Filtro de ruido
train.cleaned <- CVCF(train.completed, consensus = F)$cleanData
# Train
model <- caret::train(C ~ ., train.cleaned, method="ctree", preProcess = c("YeoJohnson","center", "scale"),
tuneGrid = expand.grid(mincriterion = c(0.99,0.98,0.97,0.96,0.95,0.94,0.93,0.92,0.91,0.90)))
#print(model)
# Predict
preds <- predict(model, test)
preds[outliers.test.por.la.cara] <- 0
return(preds)
}
train$C <- as.factor(train$C)
cross_validation(train, train.predict.34)
sub.34 <- train.predict.32(train, test)
createSubmission(sub.32, "34")
## prueba 33 # -> 0.882
set.seed(28)
train.predict.knn.imputation.ef <- function(train, test){
# Train
outliers.train.por.la.cara <- which(apply(train[,-ncol(train)], MARGIN=1, function(x) any(!is.na(x) & x < -68000)))
outliers.test.por.la.cara <- which(apply(test, MARGIN=1, function(x) any(!is.na(x) && x < -68000)))
if(length(outliers.train.por.la.cara) > 0) train <- train[-outliers.train.por.la.cara,]
train.completed <- knnImputation(train)
train.cleaned <- EF(train.completed)$cleanData
model <- caret::train(C ~ ., train.cleaned, method="ctree", preProcess = c("YeoJohnson","center", "scale"),
tuneGrid = expand.grid(mincriterion = c(0.99,0.98,0.97,0.96,0.95,0.94,0.93,0.92,0.91,0.90)))
# Predict
preds <- predict(model, test)
preds[outliers.test.por.la.cara] <- 0
return(preds)
}
train$C <- as.factor(train$C)
cross_validation(train, train.predict.knn.imputation.ef)
set.seed(28)
sub.33 <- train.predict.knn.imputation.ef(train, test)
createSubmission(sub.33, "33")
#### prueba 34
train.predict.34 <- function(train, test){
# Train
# Fuera outliers por la cara
outliers.train.por.la.cara <- which(apply(train[,-ncol(train)], MARGIN=1, function(x) any(!is.na(x) & x < -68000)))
outliers.test.por.la.cara <- which(apply(test, MARGIN=1, function(x) any(!is.na(x) & x < -68000)))
if(length(outliers.train.por.la.cara) > 0) train <- train[-outliers.train.por.la.cara,]
# Imputación knn
train.completed <- knnImputation(train)
# Filtro de ruido
train.cleaned <- CVCF(train.completed, consensus = F)$cleanData
# Train
Class = train.cleaned[,ncol(train.cleaned)]
weights <- FSelector::random.forest.importance(Class ~ .,train.cleaned[,-ncol(train.cleaned)], importance.type = 1)
subset <- cutoff.k(weights,10)
f = as.simple.formula(subset, "C")
model <- caret::train(f, train.cleaned, method="ctree", preProcess = c("YeoJohnson","center", "scale"),
tuneGrid = expand.grid(mincriterion = c(0.99,0.98,0.97,0.96,0.95,0.94,0.93,0.92,0.91,0.90)))
#print(model)
# Predict
preds <- predict(model, test)
preds[outliers.test.por.la.cara] <- 0
return(preds)
}
train$C <- as.factor(train$C)
cross_validation(train, train.predict.34)
set.seed(28)
sub.34 <- train.predict.34(train, test)
createSubmission(sub.34, "34")
#### prueba 35
set.seed(28)
train.predict.knn.imputation.ef.majority <- function(train, test){
# Train
outliers.train.por.la.cara <- which(apply(train[,-ncol(train)], MARGIN=1, function(x) any(!is.na(x) & x < -68000)))
outliers.test.por.la.cara <- which(apply(test, MARGIN=1, function(x) any(!is.na(x) && x < -68000)))
if(length(outliers.train.por.la.cara) > 0) train <- train[-outliers.train.por.la.cara,]
train.completed <- knnImputation(train)
train.cleaned <- EF(train.completed, consensus = F)$cleanData
model <- caret::train(C ~ ., train.cleaned, method="ctree", preProcess = c("YeoJohnson","center", "scale"),
tuneGrid = expand.grid(mincriterion = c(0.99,0.98,0.97,0.96,0.95,0.94,0.93,0.92,0.91,0.90)))
# Predict
preds <- predict(model, test)
preds[outliers.test.por.la.cara] <- 0
return(preds)
}
train$C <- as.factor(train$C)
cross_validation(train, train.predict.knn.imputation.ef.majority)
set.seed(28)
sub.35 <- train.predict.knn.imputation.ef.majority(train, test)
createSubmission(sub.35, "35")
### prueba 36
set.seed(28)
train.predict.knn.imputation.cvcf <- function(train, test){
# Train
outliers.train.por.la.cara <- which(apply(train[,-ncol(train)], MARGIN=1, function(x) any(!is.na(x) & x < -68000)))
outliers.test.por.la.cara <- which(apply(test, MARGIN=1, function(x) any(!is.na(x) && x < -68000)))
if(length(outliers.train.por.la.cara) > 0) train <- train[-outliers.train.por.la.cara,]
train.completed <- knnImputation(train)
train.cleaned <- CVCF(train.completed, consensus = T)$cleanData
model <- caret::train(C ~ ., train.cleaned, method="ctree", preProcess = c("center", "scale"),
tuneGrid = expand.grid(mincriterion = c(0.99,0.98,0.97,0.96,0.95,0.94,0.93,0.92,0.91,0.90)))
# Predict
preds <- predict(model, test)
preds[outliers.test.por.la.cara] <- 0
return(preds)
}
train$C <- as.factor(train$C)
cross_validation(train, train.predict.knn.imputation.cvcf)
set.seed(28)
sub.36 <- train.predict.knn.imputation.cvcf(train, test)
createSubmission(sub.36, "36")
##### prueba 37
set.seed(28)
train.predict.37 <- function(train, test){
# Train
outliers.train.por.la.cara <- which(apply(train[,-ncol(train)], MARGIN=1, function(x) any(!is.na(x) & x < -68000)))
outliers.test.por.la.cara <- which(apply(test, MARGIN=1, function(x) any(!is.na(x) & x < -68000)))
if(length(outliers.train.por.la.cara) > 0) train <- train[-outliers.train.por.la.cara,]
train.completed <- knnImputation(train)
train.cleaned <- EF(train.completed)$cleanData
train.transformed <- attr.transform.add(train.cleaned)
scaler <- preProcess(train.transformed) # Centrado y escalado
train.scaled <- predict(scaler, train.transformed)
model = ctree(C ~ .,data = train.scaled,control = ctree_control(mincriterion = 0.9))
# Predict
test[outliers.test.por.la.cara,] <- 1
test.transformed <- attr.transform.add(test)
test.scaled <- predict(scaler, test.transformed)
preds <- predict(model, test.scaled)
preds[outliers.test.por.la.cara] <- 0
return(preds)
}
set.seed(28)
train$C <- as.factor(train$C)
sub.37 <- train.predict.37(train, test)
createSubmission(sub.37, "37")
##### prueba 38
set.seed(28)
train.predict.knn.imputation.ipf <- function(train, test){
# Train
outliers.train.por.la.cara <- which(apply(train[,-ncol(train)], MARGIN=1, function(x) any(!is.na(x) & x < -68000)))
outliers.test.por.la.cara <- which(apply(test, MARGIN=1, function(x) any(!is.na(x) && x < -68000)))
if(length(outliers.train.por.la.cara) > 0) train <- train[-outliers.train.por.la.cara,]
train.completed <- knnImputation(train)
train.cleaned <- IPF(train.completed, consensus = T)$cleanData
model = rpart(C ~.,data = train.cleaned, method = "class",
control = rpart.control(minsplit = 10, xval = 10))
model = prune(model, cp = model$cptable[which.min(model$cptable[,"xerror"]),"CP"])
# Predict
preds <- predict(model, test, type="class")
preds[outliers.test.por.la.cara] <- 0
return(preds)
}
train$C <- as.factor(train$C)
cross_validation(train, train.predict.knn.imputation.ipf)
set.seed(28)
sub.38 <- train.predict.knn.imputation.ipf(train, test)
createSubmission(sub.38, "38")
### prueba 39
set.seed(28)
train.predict.knn.imputation.ef.transforms <- function(train, test){
# Train
outliers.train.por.la.cara <- which(apply(train[,-ncol(train)], MARGIN=1, function(x) any(!is.na(x) & x < -68000)))
outliers.test.por.la.cara <- which(apply(test, MARGIN=1, function(x) any(!is.na(x) && x < -68000)))
if(length(outliers.train.por.la.cara) > 0) train <- train[-outliers.train.por.la.cara,]
train.completed <- knnImputation(train)
train.cleaned <- EF(train.completed)$cleanData
train.transformed <- attr.transform.add(train.cleaned)
scaler <- preProcess(train.transformed) # Centrado y escalado
train.scaled <- predict(scaler, train.transformed)
model <- caret::train(C ~ ., train.scaled, method="ctree", preProcess = c("YeoJohnson"),
tuneGrid = expand.grid(mincriterion = c(0.99,0.98,0.97,0.96,0.95,0.94,0.93,0.92,0.91,0.90)))
# Predict
test[outliers.test.por.la.cara,] <- 1
test.transformed <- attr.transform.add(test)
test.scaled <- predict(scaler, test.transformed)
preds <- predict(model, test.scaled)
preds[outliers.test.por.la.cara] <- 0
return(preds)
}
train$C <- as.factor(train$C)
cross_validation(train, train.predict.knn.imputation.ef.transforms)
set.seed(28)
sub.39 <- train.predict.knn.imputation.ef.transforms(train, test)
createSubmission(sub.39, "39")
### prueba 40
set.seed(28)
train.predict.40<- function(train, test){
# Train
outliers.train.por.la.cara <- which(apply(train[,-ncol(train)], MARGIN=1, function(x) any(!is.na(x) & x < -68000)))
outliers.test.por.la.cara <- which(apply(test, MARGIN=1, function(x) any(!is.na(x) & x < -68000)))
if(length(outliers.train.por.la.cara) > 0) train <- train[-outliers.train.por.la.cara,]
indices.outliers <- which(
train$X1 > 1000 |
train$X7 > 300 |
train$X15 > 600 |
train$X16 > 400000 |
train$X17 > 250 |
train$X20 > 300 |
train$X21 < -1300 |
train$X24 > 1700 |
train$X26 < -1500 |
train$X29 > 39 |
train$X33 > 480 |
train$X39 > 400 |
train$X43 > 2000 |
train$X45 > 25
)
if(length(indices.outliers) > 0) train <- train[-indices.outliers,]
print(paste0("Eliminados ",length(indices.outliers), " outliers."))
train.completed <- knnImputation(train)
train.cleaned <- EF(train.completed)$cleanData
model <- caret::train(C ~ ., train.cleaned, method="ctree", preProcess = c("YeoJohnson","center", "scale"),
tuneGrid = expand.grid(mincriterion = c(0.99,0.98,0.97,0.96,0.95,0.94,0.93,0.92,0.91,0.90)))
# Predict
preds <- predict(model, test)
preds[outliers.test.por.la.cara] <- 0
return(preds)
}
train$C <- as.factor(train$C)
cross_validation(train, train.predict.40)
set.seed(28)
sub.40 <- train.predict.40(train, test)
createSubmission(sub.40, "40")
### prueba 41
train.predict.41 <- function(train,test){
# Train
outliers.train <- which(apply(train[,-ncol(train)], MARGIN=1, function(x) any(!is.na(x) & x < -68000)))
outliers.test <- which(apply(test, MARGIN=1, function(x) any(!is.na(x) && x < -68000)))
#indices.nas.train <- which(has.na(train))
train <- train[-outliers.train,]
train.completed.0 <- knnImputation(train[train$C == 0,])
train.completed.1 <- knnImputation(train[train$C == 1,])
train.completed <- rbind(train.completed.0, train.completed.1)
model = ctree(C ~ .,data = train.completed,control = ctree_control(mincriterion = 0.9)) # Predict
preds <- predict(model, test)
preds[outliers.test] <- 0
return(preds)
}
train$C <- as.factor(train$C)
cross_validation(train, train.predict.41)
set.seed(28)
sub.41 <- train.predict.41(train, test)
createSubmission(sub.41, "41")
### prueba 42
set.seed(28)
train.predict.42<- function(train, test){
# Train
outliers.train.por.la.cara <- which(apply(train[,-ncol(train)], MARGIN=1, function(x) any(!is.na(x) & x < -68000)))
outliers.test.por.la.cara <- which(apply(test, MARGIN=1, function(x) any(!is.na(x) & x < -68000)))
if(length(outliers.train.por.la.cara) > 0) train <- train[-outliers.train.por.la.cara,]
indices.outliers <- which(
train$X1 > 1000 |
train$X7 > 300 |
train$X15 > 600 |
train$X16 > 400000 |
train$X17 > 250 |
train$X20 > 300 |
train$X21 < -1300 |
train$X24 > 1700 |
train$X26 < -1500 |
train$X29 > 39 |
train$X33 > 480 |
train$X39 > 400 |
train$X43 > 2000 |
train$X45 > 25
)
if(length(indices.outliers) > 0) train <- train[-indices.outliers,]
print(paste0("Eliminados ",length(indices.outliers), " outliers."))
train.completed <- knnImputation(train)
train.cleaned <- EF(train.completed)$cleanData
model = ctree(C ~ .,data = train.completed)
# Predict
preds <- predict(model, test)
preds[outliers.test.por.la.cara] <- 0
return(preds)
}
train$C <- as.factor(train$C)
cross_validation(train, train.predict.42)
set.seed(28)
sub.42 <- train.predict.42(train, test)
createSubmission(sub.42, "42")
### prueba 43
require(imbalance)
train.predict.43 <- function(train, test){
# Train
outliers.train.por.la.cara <- which(apply(train[,-ncol(train)], MARGIN=1, function(x) any(!is.na(x) & x < -68000)))
outliers.test.por.la.cara <- which(apply(test, MARGIN=1, function(x) any(!is.na(x) && x < -68000)))
if(length(outliers.train.por.la.cara) > 0) train <- train[-outliers.train.por.la.cara,]
train.completed <- amelia(train, m = 1,p2s = 2)$imputations[[1]]
train.completed$C <- as.factor(train.completed$C)
train.cleaned <- EF(as.data.frame(train.completed))$cleanData
ctrl <- trainControl(method="repeatedcv",number=5,repeats = 3,
sampling = "smote")
model <- caret::train(C ~ ., train.cleaned, method="ctree", preProcess = c("YeoJohnson","center", "scale"), trControl = ctrl,
tuneGrid = expand.grid(mincriterion = c(0.95,0.94,0.93,0.92,0.91,0.90)))
# Predict
preds <- predict(model, test)
preds[outliers.test.por.la.cara] <- 0
return(preds)
}
cross_validation(train, train.predict.43)
set.seed(28)
sub.43 = preds
sub.43 <- train.predict.43(train, test)
createSubmission(sub.43, "43")
### prueba 44
set.seed(28)
train.predict.knn.44 <- function(train, test){
# Train
outliers.train.por.la.cara <- which(apply(train[,-ncol(train)], MARGIN=1, function(x) any(!is.na(x) & x < -68000)))
outliers.test.por.la.cara <- which(apply(test, MARGIN=1, function(x) any(!is.na(x) && x < -68000)))
if(length(outliers.train.por.la.cara) > 0) train <- train[-outliers.train.por.la.cara,]
train.completed <- knnImputation(train)
train.cleaned <- EF(train.completed)$cleanData
model <- caret::train(C ~ ., train.cleaned, method="J48", preProcess = c("center", "scale"),
tuneGrid = expand.grid(C = c(0.1,0.15,0.2,0.25), M = c(1,2,3,4,5)))
model$bestTune
# entrenamos con el mejor C,M
model = J48(C~., data=train.cleaned, control = Weka_control(M = 4, C = 0.1 ))
# Predict
preds <- predict(model, test)
preds[outliers.test.por.la.cara] <- 0
return(preds)
}
train$C <- as.factor(train$C)
cross_validation(train, train.predict.knn.44)
set.seed(28)
sub.44 = preds
sub.44 <- train.predict.knn.44(train, test)
createSubmission(sub.44, "44")
### prueba 45
train.predict.knn.45 <- function(train, test){
# Train
outliers.train.por.la.cara <- which(apply(train[,-ncol(train)], MARGIN=1, function(x) any(!is.na(x) & x < -68000)))
outliers.test.por.la.cara <- which(apply(test, MARGIN=1, function(x) any(!is.na(x) && x < -68000)))
if(length(outliers.train.por.la.cara) > 0) train <- train[-outliers.train.por.la.cara,]
train.completed <- knnImputation(train)
train.cleaned <- CVCF(train.completed, consensus = F)$cleanData
Class = train.cleaned[,ncol(train.cleaned)]
atributos = FSelector::random.forest.importance(Class~., train.cleaned[,-ncol(train.cleaned)])
print(atributos)
subset = FSelector::cutoff.k(atributos,25)
f = as.simple.formula(subset,"C")
model <- caret::train(f, train.cleaned, method="J48", preProcess = c("center", "scale"),
tuneGrid = expand.grid(C = c(0.1,0.15,0.2,0.25), M = c(1,2,3,4,5)))
model$bestTune
# entrenamos con el mejor C,M
model = J48(C~., data=train, control = Weka_control(M = 4, C = 0.1))
# Predict
preds <- predict(model, test)
preds[outliers.test.por.la.cara] <- 0
return(preds)
}
train$C <- as.factor(train$C)
cross_validation(train, train.predict.knn.45)
set.seed(28)
sub.45 <- train.predict.knn.45(train, test)
createSubmission(sub.45, "45")
## prueba 46
train.predict.46 <- function(train, test){
# Train
train.cleaned <- CVCF(train, consensus = F)$cleanData
outliers.train.por.la.cara <- which(apply(train.cleaned[,-ncol(train.cleaned)], MARGIN=1, function(x) any(!is.na(x) & x < -68000)))
outliers.test.por.la.cara <- which(apply(test, MARGIN=1, function(x) any(!is.na(x) & x < -68000)))
if(length(outliers.train.por.la.cara) > 0) train.cleaned <- train.cleaned[-outliers.train.por.la.cara,]
train.transformed <- attr.transform.add(train.cleaned) %>% dplyr::select(-C,C)
train.completed <- knnImputation(train.transformed)
scaler <- preProcess(train.completed)
train.scaled <- predict(scaler, train.completed)
model <- caret::train(C ~ ., train.scaled, method="J48",
tuneGrid = expand.grid(C = c(0.1,0.15,0.2,0.25), M = c(1,2,3,4,5)))
model$bestTune
# completar con el que salga el mejor
model = J48(C~., data=train.scaled, control = Weka_control(M = 5, C = 0.1))
# Predict
test[outliers.test.por.la.cara,] <- 1
test.transformed <- attr.transform.add(test)
test.scaled <- predict(scaler, test.transformed)
preds <- predict(model, test.scaled)
preds[outliers.test.por.la.cara] <- 0
return(preds)
}
train$C <- as.factor(train$C)
cross_validation(train, train.predict.knn.46)
set.seed(28)
sub.46 = preds
sub.46 <- train.predict.46(train, test)
createSubmission(sub.46, "46")
### prueba 47
train.predict.47 <- function(train, test){
# Train
train.cleaned <- CVCF(train, consensus = F)$cleanData
outliers.train.por.la.cara <- which(apply(train.cleaned[,-ncol(train.cleaned)], MARGIN=1, function(x) any(!is.na(x) & x < -68000)))
outliers.test.por.la.cara <- which(apply(test, MARGIN=1, function(x) any(!is.na(x) & x < -68000)))
if(length(outliers.train.por.la.cara) > 0) train.cleaned <- train.cleaned[-outliers.train.por.la.cara,]
train.transformed <- attr.transform.add(train.cleaned) %>% dplyr::select(-C,C)
train.completed <- knnImputation(train.transformed)
scaler <- preProcess(train.completed)
train.scaled <- predict(scaler, train.completed)
model <- caret::train(C ~ ., train.scaled, method="ctree",
tuneGrid = expand.grid(mincriterion = c(0.95,0.94,0.93,0.92,0.91,0.90)))
model$bestTune
# completar con el que salga el mejor
model = ctree(C ~ .,data = train.scaled,control = ctree_control(mincriterion = 0.95))
# Predict
test[outliers.test.por.la.cara,] <- 1
test.transformed <- attr.transform.add(test)
test.scaled <- predict(scaler, test.transformed)
preds <- predict(model, test.scaled)
preds[outliers.test.por.la.cara] <- 0
return(preds)
}
train$C <- as.factor(train$C)
cross_validation(train, train.predict.knn.46)
set.seed(28)
sub.46 = preds
sub.46 <- train.predict.46(train, test)
createSubmission(sub.46, "46")
### prueba 48
train.predict.48 <- function(train, test){
# Train
train.cleaned <- CVCF(train, consensus = F)$cleanData
outliers.train.por.la.cara <- which(apply(train.cleaned[,-ncol(train.cleaned)], MARGIN=1, function(x) any(!is.na(x) & x < -68000)))
outliers.test.por.la.cara <- which(apply(test, MARGIN=1, function(x) any(!is.na(x) & x < -68000)))
if(length(outliers.train.por.la.cara) > 0) train.cleaned <- train.cleaned[-outliers.train.por.la.cara,]
train.transformed <- attr.transform.add(train.cleaned) %>% dplyr::select(-C,C)
train.completed <- knnImputation(train.transformed)
#tmp <- cor(train.completed[,-ncol(train.completed)])
#tmp[upper.tri(tmp)] <- 0
#diag(tmp) <- 0
train.discretizado <- arulesCBA::discretizeDF.supervised(C~ .,train.completed, method= "mdlp")
Class = train.discretizado[,ncol(train.discretizado)]
pesos <- FSelector::chi.squared(Class ~., train.discretizado[,-ncol(train.discretizado)])
variables <- FSelector::cutoff.k(pesos,25)
f <- as.simple.formula(variables,"C")
model <- caret::train(C ~ ., train.scaled, method="J48",
tuneGrid = expand.grid(C = c(0.1,0.15,0.2,0.25), M = c(1,2,3,4,5)))
model <- caret::train(C ~ ., train.scaled, method="ctree",
tuneGrid = expand.grid(mincriterion = c(0.95,0.94,0.93,0.92,0.91,0.90)))
# completar con el que salga el mejor
model = J48(f, data=train.discretizado, control = Weka_control(M = 5, C = 0.1))
model = ctree(f,data = train.discretizado, control = ctree_control(mincriterion = 0.95))
# Predict
test[outliers.test.por.la.cara,] <- 1
test.transformed <- attr.transform.add(test)
test.discretized <- arules::discretizeDF(
test.transformed,train.discretizado[,-ncol(train.discretizado)])
preds <- predict(model, test.discretized)
preds[outliers.test.por.la.cara] <- 0
return(preds)
}
train$C <- as.factor(train$C)
set.seed(15)
# las ultimas subidas salen de aqui modificando la función y ejecutando paso a paso
sub.48 <- train.predict.48(train, test)
createSubmission(sub.48, "52")
|
/eljumaja/R/arboles.R
|
no_license
|
jlsuarezdiaz/MD_Clasificacion
|
R
| false | false | 38,078 |
r
|
# lectura de datos
library(ggplot2)
library(caret)
library(RKEEL)
# library(rDML) # Por si acaso
library(kknn)
library(GGally)
library(Hmisc)
library(dplyr)
library(corrplot)
library(tidyr)
library(VIM)
library(mice)
library(bmrm)
library(DMwR)
library(NoiseFiltersR)
library(beeswarm)
library(moments)
library(MASS)
library(FSelector)
library("tree")
library(rpart)
require(discretization)
library(party)
library(RWeka)
library(Amelia)
train <- read.csv("./train.csv", header=TRUE, na.strings="?")
test <- read.csv("./test.csv", header = TRUE, na.strings = "?")
attr.transform.add <- function(data){
trans <- wrong.remove(data)
trans$tX2 <- log(trans$X2)
trans$tX3 <- log(trans$X3)
trans$tX4 <- log(trans$X4)
trans$X6[trans$X6 < 0] <- 0
trans$tX7 <- log(trans$X7)
trans$tX8 <- trans$X8^2
trans$tX9 <- log(trans$X9)
trans$tX13 <- trans$X13^2
trans$tX15 <- log(trans$X15)
trans$tX16 <- log(trans$X16)
trans$tX17 <- log(trans$X17)
trans$tX18 <- log(trans$X18)
# trans$X21 <- cbrt(trans$X21)
train$X23[train$X23 < 0] <- 0
trans$tX23 <- log(trans$X23 + 100)
trans$tX24 <- log(trans$X24)
trans$tX25 <- trans$X25^2
trans$tX26 <- cbrt(trans$X26)
trans$tX27 <- cbrt(trans$X27)
trans$tX28 <- log(trans$X28)
trans$tX29 <- log(trans$X29)
trans$tX31 <- log(trans$X31)
trans$tX33 <- log(trans$X33)
# trans$tX34 <- trans$X34^2
trans$X35[trans$X35 < 0] <- 0
trans$tX35 <- log(trans$X35 + 0.1)
# trans$tX39 <- log(trans$X39)
trans$tX40 <- cbrt(trans$X40)
# trans$tX43 <- cbrt(trans$X43)
trans$tX44 <- log(trans$X44)
trans$X45[trans$X45 < 0] <- 0
trans$tX45 <- sqrt(trans$X45)
trans$tX47 <- cbrt(trans$X47)
trans$tX48 <- log(trans$X48 + 25)
trans$tX49 <- cbrt(trans$X49)
##
trans <- trans[,-c(11,34,39,41,43)]
##
trans
}
wrong.remove <- function(data){
transf <- data.frame(data)
wrongs <- which(
transf$X7 <= 0
)
if(length(wrongs) > 0){
transf <- transf[-wrongs,]
}
transf
}
cbrt <- function(x) sign(x) * abs(x)^(1/3)
# 1-2
# preprocesamiento
# aprendizaje del modelo
model = rpart(as.factor(train$C)~.,data = train, method = "class",
control = rpart.control(minsplit = 10, xval = 10))
# prediccion de etiquetas
model.pred.train = as.numeric(predict(model, train, type = "class"))
model.pred.train[which(model.pred.train == 1)] = 0
model.pred.train[which(model.pred.train == 2)] = 1
round(mean(model.pred.train==train$C)*100,digits=1)
model.pred = as.numeric(predict(model, test, type = "class"))
model.pred
model.pred[which(model.pred == 1)] = 0
model.pred[which(model.pred == 2)] = 1
model.pred
# 1 -> 2
# 0 -> 1
write.csv(data.frame('Id'=c(1:length(model.pred)),'Prediction'=model.pred), file = paste("RAW.csv"), row.names=FALSE)
# 3
# preprocesamiento
train = computeMissingValues(train, type = "knn", k = 2)
new_data = preProcessData(train,test)
scaled.train = new_data[[1]]
scaled.test = new_data[[2]]
# aprendizaje del modelo
model = rpart(as.factor(scaled.train$C)~.,data = scaled.train, method = "class",
control = rpart.control(minsplit = 10, xval = 10))
# prediccion de etiquetas
model.pred.train = as.numeric(predict(model, scaled.train, type = "class"))
model.pred.train[which(model.pred.train == 1)] = 0
model.pred.train[which(model.pred.train == 2)] = 1
round(mean(model.pred.train==train$C)*100,digits=1)
model.pred = as.numeric(predict(model, scaled.test, type = "class"))
model.pred
model.pred[which(model.pred == 1)] = 0
model.pred[which(model.pred == 2)] = 1
model.pred
# 1 -> 2
# 0 -> 1
write.csv(data.frame('Id'=c(1:length(model.pred)),'Prediction'=model.pred), file = paste("2KNNCS.csv"), row.names=FALSE)
# 4
# preprocesamiento
train = computeMissingValues(train, type = "median")
new_data = preProcessData(train,test)
scaled.train = new_data[[1]]
scaled.test = new_data[[2]]
# aprendizaje del modelo
model = rpart(as.factor(scaled.train$C)~.,data = scaled.train, method = "class",
control = rpart.control(minsplit = 10, xval = 10))
model = prune(model, cp = model$cptable[which.min(model$cptable[,"xerror"]),"CP"])
# prediccion de etiquetas
model.pred.train = as.numeric(predict(model, scaled.train, type = "class"))
model.pred.train[which(model.pred.train == 1)] = 0
model.pred.train[which(model.pred.train == 2)] = 1
round(mean(model.pred.train==train$C)*100,digits=1)
model.pred = as.numeric(predict(model, scaled.test, type = "class"))
model.pred[which(model.pred == 1)] = 0
model.pred[which(model.pred == 2)] = 1
# 1 -> 2
# 0 -> 1
write.csv(data.frame('Id'=c(1:length(model.pred)),'Prediction'=model.pred), file = paste("MedianCS.csv"), row.names=FALSE)
# 5
# preprocesamiento
ini = mice(train, maxit = 0)
quitar = as.character(ini$loggedEvents[,"out"])
valores = mice(train, meth="pmm", seed = 500, remove_collinear = FALSE)
compData = complete(valores,1)
train_1 = compData
new_data = preProcessData(train_1,test)
scaled.train = new_data[[1]]
scaled.test = new_data[[2]]
# aprendizaje del modelo
model = rpart(as.factor(scaled.train$C)~.,data = scaled.train, method = "class",
control = rpart.control(minsplit = 10, xval = 10))
# prediccion de etiquetas
model.pred.train = as.numeric(predict(model, scaled.train, type = "class"))
model.pred.train[which(model.pred.train == 1)] = 0
model.pred.train[which(model.pred.train == 2)] = 1
round(mean(model.pred.train==train$C)*100,digits=1)
model.pred = as.numeric(predict(model, scaled.test, type = "class"))
model.pred
model.pred[which(model.pred == 1)] = 0
model.pred[which(model.pred == 2)] = 1
model.pred
# 1 -> 2
# 0 -> 1
write.csv(data.frame('Id'=c(1:length(model.pred)),'Prediction'=model.pred), file = paste("MedianCS.csv"), row.names=FALSE)
# 8
# preprocesamiento
data.sin.na = computeMissingValues(train, type = 'rf')
new_data = preProcessData(data.sin.na,test)
scaled.train = new_data[[1]]
scaled.test = new_data[[2]]
# aprendizaje del modelo
model = rpart(as.factor(scaled.train$C)~.,data = scaled.train, method = "class",
control = rpart.control(minsplit = 10, xval = 10))
model = prune(model,cp = model$cptable[which.min(model$cptable[,"xerror"]),"CP"])
# prediccion de etiquetas
model.pred.train = as.numeric(predict(model, scaled.train, type = "class"))
model.pred.train[which(model.pred.train == 1)] = 0
model.pred.train[which(model.pred.train == 2)] = 1
round(mean(model.pred.train==scaled.train$C)*100,digits=1)
model.pred = as.numeric(predict(model, scaled.test, type = "class"))
model.pred
model.pred[which(model.pred == 1)] = 0
model.pred[which(model.pred == 2)] = 1
model.pred
# 1 -> 2
# 0 -> 1
write.csv(data.frame('Id'=c(1:length(model.pred)),'Prediction'=model.pred), file = paste("RFCS.csv"), row.names=FALSE)
# 9-11
library(FSelector)
Class = train[,51]
train_1 = train[,-51]
weights = FSelector::linear.correlation(Class~.,data = train_1)
subset = FSelector::cutoff.k(weights,30)
f1 = as.simple.formula(subset,"Class")
subset = FSelector::cutoff.k(weights,3)
f2 = as.simple.formula(subset,"Class")
weights = FSelector::rank.correlation(Class~.,data = train_1)
subset = FSelector::cutoff.k(weights,7)
f3 = as.simple.formula(subset,"Class")
new_data = preProcessData(cbind(train_1,Class),test)
scaled.train = new_data[[1]]
scaled.test = new_data[[2]]
# aprendizaje del modelo
model = rpart(f3,data = scaled.train[,subset], method = "class",
control = rpart.control(minsplit = 10, xval = 10))
model = prune(model,cp = model$cptable[which.min(model$cptable[,"xerror"]),"CP"])
# prediccion de etiquetas
model.pred.train = as.numeric(predict(model, scaled.train, type = "class"))
model.pred.train[which(model.pred.train == 1)] = 0
model.pred.train[which(model.pred.train == 2)] = 1
round(mean(model.pred.train==scaled.train$C)*100,digits=1)
model.pred = as.numeric(predict(model, scaled.test, type = "class"))
model.pred
model.pred[which(model.pred == 1)] = 0
model.pred[which(model.pred == 2)] = 1
model.pred
# 1 -> 2
# 0 -> 1
write.csv(data.frame('Id'=c(1:length(model.pred)),'Prediction'=model.pred), file = paste("RC7MEDCS.csv"), row.names=FALSE)
# 12-14
library(NoiseFiltersR)
library(FSelector)
train_1 = computeMissingValues(train, type = "knn", k = 1)
train_1[,51] <- as.factor(train_1[,51])
set.seed(1)
out.data <- NoiseFiltersR::IPF(train_1, nfolds = 5, consensus = FALSE)
data.clean = out.data$cleanData
new_data = preProcessData(data.clean,test)
scaled.train = new_data[[1]]
scaled.test = new_data[[2]]
Class = scaled.train[,51]
weights <- FSelector::random.forest.importance(Class ~ .,scaled.train[,-51], importance.type = 1)
print(weights)
subset <- cutoff.k(weights,10)
subset1 <- cutoff.k(weights,20)
subset
f <- as.simple.formula(subset,"C")
f1 <- as.simple.formula(subset,"C")
control <- caret::trainControl(method = "repeatedcv", number = 10, repeats = 5)
modelo <- caret::train(f, data = scaled.train, method="ctree", trControl = control)
confusionMatrix(modelo)
pred.train = predict(modelo,scaled.train, type = "prob")
fit.train = ifelse(pred.train[1]> 0.5,0,1)
round(mean(fit.train==scaled.train$C)*100,digits=1)
pred.test = predict(modelo,scaled.test, type = "prob")
pred.test
fit.test = ifelse(pred.test[1]> 0.5,0,1)
fit.test
write.csv(data.frame('Id'=c(1:length(fit.test)),'Prediction'=fit.test), file = paste("IPFRFICTREE.csv"), row.names=FALSE)
#write.csv(data.frame('Id'=c(1:length(fit.test)),'Prediction'=fit.test), file = paste("IPFRFICTREE20.csv"), row.names=FALSE)
# 15
# preprocesamiento
train.sin.na = computeMissingValues(train, type = 'rf')
train.sin.na[,51] = as.factor(train.sin.na[,51])
train.sin.ruido = filterNoiseData(train.sin.na)
train.sin.ruido[,51] = as.numeric(train.sin.ruido[,51])
train.sin = computeOutliers(train.sin.ruido, type = 'remove')
new_data = preProcessData(train.sin,test)
scaled_train = new_data[[1]]
scaled_test = new_data[[2]]
# comentar para quitar la discretización.
cm <- discretization::disc.Topdown(scaled_train,1)
scaled_train = cm$Disc.data
Class = scaled_train[,51]
predictores.1 = rankingLearningRandomForest(scaled_train[,-51],Class,numeroVars = c(0,1))
p1 = predictores.1[1:10]
predictores.1 = featureSelection('chi',10, scaled_train[,-51],Class)
p1=predictores.1
#Class = scaled_train[,51]
#predictores.2 = featureSelection('rfi',10,scaled_train[,-51],Class)
#predictores.2
f1 = as.simple.formula(p1,"C")
#f2 = as.simple.formula(predictores.2,"C")
# convertimos en facctor
tmp = as.numeric(scaled_train[,51])
tmp[which(tmp == 1)] = 0
tmp[which(tmp == 2)] = 1
scaled_train[,51] = as.factor(tmp)
control <- caret::trainControl(method = "repeatedcv", number = 10, repeats = 5)
modelo <- caret::train(f1, data = scaled_train, method="ctree", trControl = control)
confusionMatrix(modelo)
pred.train = predict(modelo,scaled_train, type = "prob")
fit.train = ifelse(pred.train[1]> 0.5,0,1)
round(mean(fit.train==scaled_train$C)*100,digits=1)
pred.test = predict(modelo,scaled_test, type = "prob")
fit.test = ifelse(pred.test[1]> 0.5,0,1)
# estos son los trialNumero.csv
write.csv(data.frame('Id'=c(1:length(fit.test)),'Prediction'=fit.test), file = paste("trial3.csv"), row.names=FALSE)
######
## cambiar knn por median
train.sin.na = computeMissingValues(train, type = "knn")
train.sin.outliers = computeOutliers(train.sin.na, type = 'mean')
train.sin.ruido = filterNoiseData(train.sin.outliers)
new_data = preProcessData(train.sin.ruido,test)
scaled_train = new_data[[1]]
scaled_test = new_data[[2]]
scaled_train[,51] = scaled_train[,51]-1
train_1 = solveUnbalance(scaled_train)
predictores.1 = featureSelection('rfi',25, train[,-51],train[,51])
p1=predictores.1
p1
f1 = as.simple.formula(p1,"Y")
model = ctree(f1,data = train_1,control = ctree_control(mincriterion = 0.9))
# prediccion de etiquetas
model = prune(model,cp = 0.01)
model =prune.misclass (model ,best =3)
model.pred.train = predict(model, train, type = "prob")
fit.train = ifelse(model.pred.train[[1]]> 0.5,0,1)
model.pred.train
model.pred.train
model.pred.train[which(model.pred.train == 1)] = 0
model.pred.train[which(model.pred.train == 2)] = 1
round(mean(model.pred.train==train$C)*100,digits=1)
model.pred = as.numeric(predict(model, scaled_test, type = "class"))
model.pred
model.pred[which(model.pred == 1)] = 0
model.pred[which(model.pred == 2)] = 1
model.pred
### PCA + unbalance
datos.pca = prcomp(train_1,scale = TRUE)
library(factoextra)
std_dev <- datos.pca$sdev
pr_var <- std_dev^2
prop_varex <- pr_var/sum(pr_var)
plot(cumsum(prop_varex), xlab = "PCA", type = "b")
data.train = data.frame(datos.pca$x,Y)
test.data = predict(datos.pca, newdata = scaled_test)
test.data = as.data.frame(test.data)
control <- caret::trainControl(method = "repeatedcv", number = 10, repeats = 5)
modelo <- caret::train(Y~., data = data.train[,1:30], method="ctree", trControl = control)
confusionMatrix(modelo)
pred.train = predict(modelo,data.train, type = "prob")
fit.train = ifelse(pred.train[1]> 0.5,0,1)
pred.train
round(mean(fit.train==data.train$Y)*100,digits=1)
pred.test = predict(modelo,test.data, type = "prob")
pred.test
fit.test = ifelse(pred.test[1]> 0.5,0,1)
head(pred.test[1])
head(fit.test)
write.csv(data.frame('Id'=c(1:length(fit.test)),'Prediction'=fit.test), file = paste("fsrpart.csv"), row.names=FALSE)
## amelia y unbalance correlation
train.sin.na = computeMissingValues(train,type = 'mean')
train.sin.na = removeHighCorrelationAttributes(train.sin.na,0.99)
###################################################
missmap(train)
completed_data <- amelia(train, m = 1,p2s = 2)
train1 = completed_data$imputations[[1]]
train2 = completed_data$imputations[[2]]
train3 = completed_data$imputations[[3]]
train4 = completed_data$imputations[[4]]
train5 = completed_data$imputations[[5]]
#######################################################
# cambiar train.sin.na por trainNum que son los conjuntos de amelia.
train.sin.outliers = computeOutliers(train1, type = 'median')
train.sin.ruido = filterNoiseData(train.sin.outliers)
train.sin.ruido = solveUnbalance(train.sin.ruido)
new_data = preProcessData(train.sin.ruido,test)
scaled_train = new_data[[1]]
scaled_test = new_data[[2]]
p1 = featureSelection('rfi',10,train[,-ncol(scaled_train)], as.factor(train[,ncol(scaled_train)]))
f = as.simple.formula(p1,"C")
control <- caret::trainControl(method = "repeatedcv", number = 10, repeats = 5)
modelo <- caret::train(f, data = train, method="ctree", trControl = control)
confusionMatrix(modelo)
pred.train = predict(modelo,train, type = "prob")
fit.train = ifelse(pred.train[1]> 0.5,0,1)
round(mean(fit.train==train$C)*100,digits=1)
pred.test = predict(modelo,test, type = "prob")
pred.test
fit.test = ifelse(pred.test[1]> 0.5,0,1)
fit.test
write.csv(data.frame('Id'=c(1:length(fit.test)),'Prediction'=fit.test), file = paste("unbalancecorrelation.csv"), row.names=FALSE)
######### nuevas pruebas #########
library(bmrm)
# Calcula el score de validación cruzada para el dataset
# funcion.train.predict: función(train, test) que entrena el clasificador con train y devuelve las predicciones sobre test
cross_validation <- function(dataset, funcion.train.predict, folds = 10){
fold.indexes <- balanced.cv.fold(dataset$C)
return(mean(sapply(1:folds, cross_validation_fold, fold.indexes, dataset, funcion.train.predict)))
}
cross_validation_fold <- function(fold, indexes, dataset, funcion.train.predict){
train.inds <- which(indexes==fold)
train <- dataset[train.inds,]
test <- na.omit(dataset[-train.inds,])
ypred <- funcion.train.predict(train, test[,-ncol(test)])
mean(ypred==test$C)
}
set.seed(28)
funcion.train.predict <- function(train, test){
# Train
outliers.train.por.la.cara <- which(apply(train[,-ncol(train)], MARGIN=1, function(x) any(!is.na(x) & x < -68000)))
outliers.test.por.la.cara <- which(apply(test, MARGIN=1, function(x) any(!is.na(x) && x < -68000)))
indices.nas.train <- which(has.na(train))
model <- caret::train(as.factor(C) ~ ., train[-c(outliers.train.por.la.cara, indices.nas.train),], method="ctree", preProcess = c("center", "scale"))
# Predict
preds <- predict(model, test)
preds[outliers.test.por.la.cara] <- 0
print(preds)
return(preds)
}
has.na <- function(x) apply(x,1,function(z)any(is.na(z)))
createSubmission <- function(pred, filename){
sub <- cbind(Id = 1:length(pred), Prediction = as.numeric(as.character(pred)))
write.csv(sub, paste0("subs-tree/",filename), row.names = F)
sub
}
cross_validation(train, funcion.train.predict)
sub.prueba <- funcion.train.predict(train, test)
sub.prueba
sub <- createSubmission(sub.prueba, "pruebaseguronomejora") # 0.83 ??????????
### prueba numero 32
set.seed(28)
train.predict.32 <- function(train, test){
# Train
# Fuera outliers por la cara
outliers.train.por.la.cara <- which(apply(train[,-ncol(train)], MARGIN=1, function(x) any(!is.na(x) & x < -68000)))
outliers.test.por.la.cara <- which(apply(test, MARGIN=1, function(x) any(!is.na(x) & x < -68000)))
if(length(outliers.train.por.la.cara) > 0) train <- train[-outliers.train.por.la.cara,]
# Imputación knn
train.completed <- knnImputation(train)
# Filtro de ruido
train.cleaned <- CVCF(train.completed, consensus = F)$cleanData
# Train
model <- caret::train(C ~ ., train.cleaned, method="ctree", preProcess = c("YeoJohnson","center", "scale"),
tuneGrid = expand.grid(mincriterion = c(0.99,0.98,0.97,0.96,0.95,0.94,0.93,0.92,0.91,0.90)))
#print(model)
# Predict
preds <- predict(model, test)
preds[outliers.test.por.la.cara] <- 0
return(preds)
}
train$C <- as.factor(train$C)
cross_validation(train, train.predict.34)
sub.34 <- train.predict.32(train, test)
createSubmission(sub.32, "34")
## prueba 33 # -> 0.882
set.seed(28)
train.predict.knn.imputation.ef <- function(train, test){
# Train
outliers.train.por.la.cara <- which(apply(train[,-ncol(train)], MARGIN=1, function(x) any(!is.na(x) & x < -68000)))
outliers.test.por.la.cara <- which(apply(test, MARGIN=1, function(x) any(!is.na(x) && x < -68000)))
if(length(outliers.train.por.la.cara) > 0) train <- train[-outliers.train.por.la.cara,]
train.completed <- knnImputation(train)
train.cleaned <- EF(train.completed)$cleanData
model <- caret::train(C ~ ., train.cleaned, method="ctree", preProcess = c("YeoJohnson","center", "scale"),
tuneGrid = expand.grid(mincriterion = c(0.99,0.98,0.97,0.96,0.95,0.94,0.93,0.92,0.91,0.90)))
# Predict
preds <- predict(model, test)
preds[outliers.test.por.la.cara] <- 0
return(preds)
}
train$C <- as.factor(train$C)
cross_validation(train, train.predict.knn.imputation.ef)
set.seed(28)
sub.33 <- train.predict.knn.imputation.ef(train, test)
createSubmission(sub.33, "33")
#### prueba 34
train.predict.34 <- function(train, test){
# Train
# Fuera outliers por la cara
outliers.train.por.la.cara <- which(apply(train[,-ncol(train)], MARGIN=1, function(x) any(!is.na(x) & x < -68000)))
outliers.test.por.la.cara <- which(apply(test, MARGIN=1, function(x) any(!is.na(x) & x < -68000)))
if(length(outliers.train.por.la.cara) > 0) train <- train[-outliers.train.por.la.cara,]
# Imputación knn
train.completed <- knnImputation(train)
# Filtro de ruido
train.cleaned <- CVCF(train.completed, consensus = F)$cleanData
# Train
Class = train.cleaned[,ncol(train.cleaned)]
weights <- FSelector::random.forest.importance(Class ~ .,train.cleaned[,-ncol(train.cleaned)], importance.type = 1)
subset <- cutoff.k(weights,10)
f = as.simple.formula(subset, "C")
model <- caret::train(f, train.cleaned, method="ctree", preProcess = c("YeoJohnson","center", "scale"),
tuneGrid = expand.grid(mincriterion = c(0.99,0.98,0.97,0.96,0.95,0.94,0.93,0.92,0.91,0.90)))
#print(model)
# Predict
preds <- predict(model, test)
preds[outliers.test.por.la.cara] <- 0
return(preds)
}
train$C <- as.factor(train$C)
cross_validation(train, train.predict.34)
set.seed(28)
sub.34 <- train.predict.34(train, test)
createSubmission(sub.34, "34")
#### prueba 35
set.seed(28)
train.predict.knn.imputation.ef.majority <- function(train, test){
# Train
outliers.train.por.la.cara <- which(apply(train[,-ncol(train)], MARGIN=1, function(x) any(!is.na(x) & x < -68000)))
outliers.test.por.la.cara <- which(apply(test, MARGIN=1, function(x) any(!is.na(x) && x < -68000)))
if(length(outliers.train.por.la.cara) > 0) train <- train[-outliers.train.por.la.cara,]
train.completed <- knnImputation(train)
train.cleaned <- EF(train.completed, consensus = F)$cleanData
model <- caret::train(C ~ ., train.cleaned, method="ctree", preProcess = c("YeoJohnson","center", "scale"),
tuneGrid = expand.grid(mincriterion = c(0.99,0.98,0.97,0.96,0.95,0.94,0.93,0.92,0.91,0.90)))
# Predict
preds <- predict(model, test)
preds[outliers.test.por.la.cara] <- 0
return(preds)
}
train$C <- as.factor(train$C)
cross_validation(train, train.predict.knn.imputation.ef.majority)
set.seed(28)
sub.35 <- train.predict.knn.imputation.ef.majority(train, test)
createSubmission(sub.35, "35")
### prueba 36
set.seed(28)
train.predict.knn.imputation.cvcf <- function(train, test){
# Train
outliers.train.por.la.cara <- which(apply(train[,-ncol(train)], MARGIN=1, function(x) any(!is.na(x) & x < -68000)))
outliers.test.por.la.cara <- which(apply(test, MARGIN=1, function(x) any(!is.na(x) && x < -68000)))
if(length(outliers.train.por.la.cara) > 0) train <- train[-outliers.train.por.la.cara,]
train.completed <- knnImputation(train)
train.cleaned <- CVCF(train.completed, consensus = T)$cleanData
model <- caret::train(C ~ ., train.cleaned, method="ctree", preProcess = c("center", "scale"),
tuneGrid = expand.grid(mincriterion = c(0.99,0.98,0.97,0.96,0.95,0.94,0.93,0.92,0.91,0.90)))
# Predict
preds <- predict(model, test)
preds[outliers.test.por.la.cara] <- 0
return(preds)
}
train$C <- as.factor(train$C)
cross_validation(train, train.predict.knn.imputation.cvcf)
set.seed(28)
sub.36 <- train.predict.knn.imputation.cvcf(train, test)
createSubmission(sub.36, "36")
##### prueba 37
set.seed(28)
train.predict.37 <- function(train, test){
# Train
outliers.train.por.la.cara <- which(apply(train[,-ncol(train)], MARGIN=1, function(x) any(!is.na(x) & x < -68000)))
outliers.test.por.la.cara <- which(apply(test, MARGIN=1, function(x) any(!is.na(x) & x < -68000)))
if(length(outliers.train.por.la.cara) > 0) train <- train[-outliers.train.por.la.cara,]
train.completed <- knnImputation(train)
train.cleaned <- EF(train.completed)$cleanData
train.transformed <- attr.transform.add(train.cleaned)
scaler <- preProcess(train.transformed) # Centrado y escalado
train.scaled <- predict(scaler, train.transformed)
model = ctree(C ~ .,data = train.scaled,control = ctree_control(mincriterion = 0.9))
# Predict
test[outliers.test.por.la.cara,] <- 1
test.transformed <- attr.transform.add(test)
test.scaled <- predict(scaler, test.transformed)
preds <- predict(model, test.scaled)
preds[outliers.test.por.la.cara] <- 0
return(preds)
}
set.seed(28)
train$C <- as.factor(train$C)
sub.37 <- train.predict.37(train, test)
createSubmission(sub.37, "37")
##### prueba 38
set.seed(28)
train.predict.knn.imputation.ipf <- function(train, test){
# Train
outliers.train.por.la.cara <- which(apply(train[,-ncol(train)], MARGIN=1, function(x) any(!is.na(x) & x < -68000)))
outliers.test.por.la.cara <- which(apply(test, MARGIN=1, function(x) any(!is.na(x) && x < -68000)))
if(length(outliers.train.por.la.cara) > 0) train <- train[-outliers.train.por.la.cara,]
train.completed <- knnImputation(train)
train.cleaned <- IPF(train.completed, consensus = T)$cleanData
model = rpart(C ~.,data = train.cleaned, method = "class",
control = rpart.control(minsplit = 10, xval = 10))
model = prune(model, cp = model$cptable[which.min(model$cptable[,"xerror"]),"CP"])
# Predict
preds <- predict(model, test, type="class")
preds[outliers.test.por.la.cara] <- 0
return(preds)
}
train$C <- as.factor(train$C)
cross_validation(train, train.predict.knn.imputation.ipf)
set.seed(28)
sub.38 <- train.predict.knn.imputation.ipf(train, test)
createSubmission(sub.38, "38")
### prueba 39
set.seed(28)
train.predict.knn.imputation.ef.transforms <- function(train, test){
# Train
outliers.train.por.la.cara <- which(apply(train[,-ncol(train)], MARGIN=1, function(x) any(!is.na(x) & x < -68000)))
outliers.test.por.la.cara <- which(apply(test, MARGIN=1, function(x) any(!is.na(x) && x < -68000)))
if(length(outliers.train.por.la.cara) > 0) train <- train[-outliers.train.por.la.cara,]
train.completed <- knnImputation(train)
train.cleaned <- EF(train.completed)$cleanData
train.transformed <- attr.transform.add(train.cleaned)
scaler <- preProcess(train.transformed) # Centrado y escalado
train.scaled <- predict(scaler, train.transformed)
model <- caret::train(C ~ ., train.scaled, method="ctree", preProcess = c("YeoJohnson"),
tuneGrid = expand.grid(mincriterion = c(0.99,0.98,0.97,0.96,0.95,0.94,0.93,0.92,0.91,0.90)))
# Predict
test[outliers.test.por.la.cara,] <- 1
test.transformed <- attr.transform.add(test)
test.scaled <- predict(scaler, test.transformed)
preds <- predict(model, test.scaled)
preds[outliers.test.por.la.cara] <- 0
return(preds)
}
train$C <- as.factor(train$C)
cross_validation(train, train.predict.knn.imputation.ef.transforms)
set.seed(28)
sub.39 <- train.predict.knn.imputation.ef.transforms(train, test)
createSubmission(sub.39, "39")
### prueba 40
set.seed(28)
train.predict.40<- function(train, test){
# Train
outliers.train.por.la.cara <- which(apply(train[,-ncol(train)], MARGIN=1, function(x) any(!is.na(x) & x < -68000)))
outliers.test.por.la.cara <- which(apply(test, MARGIN=1, function(x) any(!is.na(x) & x < -68000)))
if(length(outliers.train.por.la.cara) > 0) train <- train[-outliers.train.por.la.cara,]
indices.outliers <- which(
train$X1 > 1000 |
train$X7 > 300 |
train$X15 > 600 |
train$X16 > 400000 |
train$X17 > 250 |
train$X20 > 300 |
train$X21 < -1300 |
train$X24 > 1700 |
train$X26 < -1500 |
train$X29 > 39 |
train$X33 > 480 |
train$X39 > 400 |
train$X43 > 2000 |
train$X45 > 25
)
if(length(indices.outliers) > 0) train <- train[-indices.outliers,]
print(paste0("Eliminados ",length(indices.outliers), " outliers."))
train.completed <- knnImputation(train)
train.cleaned <- EF(train.completed)$cleanData
model <- caret::train(C ~ ., train.cleaned, method="ctree", preProcess = c("YeoJohnson","center", "scale"),
tuneGrid = expand.grid(mincriterion = c(0.99,0.98,0.97,0.96,0.95,0.94,0.93,0.92,0.91,0.90)))
# Predict
preds <- predict(model, test)
preds[outliers.test.por.la.cara] <- 0
return(preds)
}
train$C <- as.factor(train$C)
cross_validation(train, train.predict.40)
set.seed(28)
sub.40 <- train.predict.40(train, test)
createSubmission(sub.40, "40")
### prueba 41
train.predict.41 <- function(train,test){
# Train
outliers.train <- which(apply(train[,-ncol(train)], MARGIN=1, function(x) any(!is.na(x) & x < -68000)))
outliers.test <- which(apply(test, MARGIN=1, function(x) any(!is.na(x) && x < -68000)))
#indices.nas.train <- which(has.na(train))
train <- train[-outliers.train,]
train.completed.0 <- knnImputation(train[train$C == 0,])
train.completed.1 <- knnImputation(train[train$C == 1,])
train.completed <- rbind(train.completed.0, train.completed.1)
model = ctree(C ~ .,data = train.completed,control = ctree_control(mincriterion = 0.9)) # Predict
preds <- predict(model, test)
preds[outliers.test] <- 0
return(preds)
}
train$C <- as.factor(train$C)
cross_validation(train, train.predict.41)
set.seed(28)
sub.41 <- train.predict.41(train, test)
createSubmission(sub.41, "41")
### prueba 42
set.seed(28)
train.predict.42<- function(train, test){
# Train
outliers.train.por.la.cara <- which(apply(train[,-ncol(train)], MARGIN=1, function(x) any(!is.na(x) & x < -68000)))
outliers.test.por.la.cara <- which(apply(test, MARGIN=1, function(x) any(!is.na(x) & x < -68000)))
if(length(outliers.train.por.la.cara) > 0) train <- train[-outliers.train.por.la.cara,]
indices.outliers <- which(
train$X1 > 1000 |
train$X7 > 300 |
train$X15 > 600 |
train$X16 > 400000 |
train$X17 > 250 |
train$X20 > 300 |
train$X21 < -1300 |
train$X24 > 1700 |
train$X26 < -1500 |
train$X29 > 39 |
train$X33 > 480 |
train$X39 > 400 |
train$X43 > 2000 |
train$X45 > 25
)
if(length(indices.outliers) > 0) train <- train[-indices.outliers,]
print(paste0("Eliminados ",length(indices.outliers), " outliers."))
train.completed <- knnImputation(train)
train.cleaned <- EF(train.completed)$cleanData
model = ctree(C ~ .,data = train.completed)
# Predict
preds <- predict(model, test)
preds[outliers.test.por.la.cara] <- 0
return(preds)
}
train$C <- as.factor(train$C)
cross_validation(train, train.predict.42)
set.seed(28)
sub.42 <- train.predict.42(train, test)
createSubmission(sub.42, "42")
### prueba 43
require(imbalance)
train.predict.43 <- function(train, test){
# Train
outliers.train.por.la.cara <- which(apply(train[,-ncol(train)], MARGIN=1, function(x) any(!is.na(x) & x < -68000)))
outliers.test.por.la.cara <- which(apply(test, MARGIN=1, function(x) any(!is.na(x) && x < -68000)))
if(length(outliers.train.por.la.cara) > 0) train <- train[-outliers.train.por.la.cara,]
train.completed <- amelia(train, m = 1,p2s = 2)$imputations[[1]]
train.completed$C <- as.factor(train.completed$C)
train.cleaned <- EF(as.data.frame(train.completed))$cleanData
ctrl <- trainControl(method="repeatedcv",number=5,repeats = 3,
sampling = "smote")
model <- caret::train(C ~ ., train.cleaned, method="ctree", preProcess = c("YeoJohnson","center", "scale"), trControl = ctrl,
tuneGrid = expand.grid(mincriterion = c(0.95,0.94,0.93,0.92,0.91,0.90)))
# Predict
preds <- predict(model, test)
preds[outliers.test.por.la.cara] <- 0
return(preds)
}
cross_validation(train, train.predict.43)
set.seed(28)
sub.43 = preds
sub.43 <- train.predict.43(train, test)
createSubmission(sub.43, "43")
### prueba 44
set.seed(28)
train.predict.knn.44 <- function(train, test){
# Train
outliers.train.por.la.cara <- which(apply(train[,-ncol(train)], MARGIN=1, function(x) any(!is.na(x) & x < -68000)))
outliers.test.por.la.cara <- which(apply(test, MARGIN=1, function(x) any(!is.na(x) && x < -68000)))
if(length(outliers.train.por.la.cara) > 0) train <- train[-outliers.train.por.la.cara,]
train.completed <- knnImputation(train)
train.cleaned <- EF(train.completed)$cleanData
model <- caret::train(C ~ ., train.cleaned, method="J48", preProcess = c("center", "scale"),
tuneGrid = expand.grid(C = c(0.1,0.15,0.2,0.25), M = c(1,2,3,4,5)))
model$bestTune
# entrenamos con el mejor C,M
model = J48(C~., data=train.cleaned, control = Weka_control(M = 4, C = 0.1 ))
# Predict
preds <- predict(model, test)
preds[outliers.test.por.la.cara] <- 0
return(preds)
}
train$C <- as.factor(train$C)
cross_validation(train, train.predict.knn.44)
set.seed(28)
sub.44 = preds
sub.44 <- train.predict.knn.44(train, test)
createSubmission(sub.44, "44")
### prueba 45
train.predict.knn.45 <- function(train, test){
# Train
outliers.train.por.la.cara <- which(apply(train[,-ncol(train)], MARGIN=1, function(x) any(!is.na(x) & x < -68000)))
outliers.test.por.la.cara <- which(apply(test, MARGIN=1, function(x) any(!is.na(x) && x < -68000)))
if(length(outliers.train.por.la.cara) > 0) train <- train[-outliers.train.por.la.cara,]
train.completed <- knnImputation(train)
train.cleaned <- CVCF(train.completed, consensus = F)$cleanData
Class = train.cleaned[,ncol(train.cleaned)]
atributos = FSelector::random.forest.importance(Class~., train.cleaned[,-ncol(train.cleaned)])
print(atributos)
subset = FSelector::cutoff.k(atributos,25)
f = as.simple.formula(subset,"C")
model <- caret::train(f, train.cleaned, method="J48", preProcess = c("center", "scale"),
tuneGrid = expand.grid(C = c(0.1,0.15,0.2,0.25), M = c(1,2,3,4,5)))
model$bestTune
# entrenamos con el mejor C,M
model = J48(C~., data=train, control = Weka_control(M = 4, C = 0.1))
# Predict
preds <- predict(model, test)
preds[outliers.test.por.la.cara] <- 0
return(preds)
}
train$C <- as.factor(train$C)
cross_validation(train, train.predict.knn.45)
set.seed(28)
sub.45 <- train.predict.knn.45(train, test)
createSubmission(sub.45, "45")
## prueba 46
train.predict.46 <- function(train, test){
# Train
train.cleaned <- CVCF(train, consensus = F)$cleanData
outliers.train.por.la.cara <- which(apply(train.cleaned[,-ncol(train.cleaned)], MARGIN=1, function(x) any(!is.na(x) & x < -68000)))
outliers.test.por.la.cara <- which(apply(test, MARGIN=1, function(x) any(!is.na(x) & x < -68000)))
if(length(outliers.train.por.la.cara) > 0) train.cleaned <- train.cleaned[-outliers.train.por.la.cara,]
train.transformed <- attr.transform.add(train.cleaned) %>% dplyr::select(-C,C)
train.completed <- knnImputation(train.transformed)
scaler <- preProcess(train.completed)
train.scaled <- predict(scaler, train.completed)
model <- caret::train(C ~ ., train.scaled, method="J48",
tuneGrid = expand.grid(C = c(0.1,0.15,0.2,0.25), M = c(1,2,3,4,5)))
model$bestTune
# completar con el que salga el mejor
model = J48(C~., data=train.scaled, control = Weka_control(M = 5, C = 0.1))
# Predict
test[outliers.test.por.la.cara,] <- 1
test.transformed <- attr.transform.add(test)
test.scaled <- predict(scaler, test.transformed)
preds <- predict(model, test.scaled)
preds[outliers.test.por.la.cara] <- 0
return(preds)
}
train$C <- as.factor(train$C)
cross_validation(train, train.predict.knn.46)
set.seed(28)
sub.46 = preds
sub.46 <- train.predict.46(train, test)
createSubmission(sub.46, "46")
### prueba 47
train.predict.47 <- function(train, test){
# Train
train.cleaned <- CVCF(train, consensus = F)$cleanData
outliers.train.por.la.cara <- which(apply(train.cleaned[,-ncol(train.cleaned)], MARGIN=1, function(x) any(!is.na(x) & x < -68000)))
outliers.test.por.la.cara <- which(apply(test, MARGIN=1, function(x) any(!is.na(x) & x < -68000)))
if(length(outliers.train.por.la.cara) > 0) train.cleaned <- train.cleaned[-outliers.train.por.la.cara,]
train.transformed <- attr.transform.add(train.cleaned) %>% dplyr::select(-C,C)
train.completed <- knnImputation(train.transformed)
scaler <- preProcess(train.completed)
train.scaled <- predict(scaler, train.completed)
model <- caret::train(C ~ ., train.scaled, method="ctree",
tuneGrid = expand.grid(mincriterion = c(0.95,0.94,0.93,0.92,0.91,0.90)))
model$bestTune
# completar con el que salga el mejor
model = ctree(C ~ .,data = train.scaled,control = ctree_control(mincriterion = 0.95))
# Predict
test[outliers.test.por.la.cara,] <- 1
test.transformed <- attr.transform.add(test)
test.scaled <- predict(scaler, test.transformed)
preds <- predict(model, test.scaled)
preds[outliers.test.por.la.cara] <- 0
return(preds)
}
train$C <- as.factor(train$C)
cross_validation(train, train.predict.knn.46)
set.seed(28)
sub.46 = preds
sub.46 <- train.predict.46(train, test)
createSubmission(sub.46, "46")
### prueba 48
train.predict.48 <- function(train, test){
# Train
train.cleaned <- CVCF(train, consensus = F)$cleanData
outliers.train.por.la.cara <- which(apply(train.cleaned[,-ncol(train.cleaned)], MARGIN=1, function(x) any(!is.na(x) & x < -68000)))
outliers.test.por.la.cara <- which(apply(test, MARGIN=1, function(x) any(!is.na(x) & x < -68000)))
if(length(outliers.train.por.la.cara) > 0) train.cleaned <- train.cleaned[-outliers.train.por.la.cara,]
train.transformed <- attr.transform.add(train.cleaned) %>% dplyr::select(-C,C)
train.completed <- knnImputation(train.transformed)
#tmp <- cor(train.completed[,-ncol(train.completed)])
#tmp[upper.tri(tmp)] <- 0
#diag(tmp) <- 0
train.discretizado <- arulesCBA::discretizeDF.supervised(C~ .,train.completed, method= "mdlp")
Class = train.discretizado[,ncol(train.discretizado)]
pesos <- FSelector::chi.squared(Class ~., train.discretizado[,-ncol(train.discretizado)])
variables <- FSelector::cutoff.k(pesos,25)
f <- as.simple.formula(variables,"C")
model <- caret::train(C ~ ., train.scaled, method="J48",
tuneGrid = expand.grid(C = c(0.1,0.15,0.2,0.25), M = c(1,2,3,4,5)))
model <- caret::train(C ~ ., train.scaled, method="ctree",
tuneGrid = expand.grid(mincriterion = c(0.95,0.94,0.93,0.92,0.91,0.90)))
# completar con el que salga el mejor
model = J48(f, data=train.discretizado, control = Weka_control(M = 5, C = 0.1))
model = ctree(f,data = train.discretizado, control = ctree_control(mincriterion = 0.95))
# Predict
test[outliers.test.por.la.cara,] <- 1
test.transformed <- attr.transform.add(test)
test.discretized <- arules::discretizeDF(
test.transformed,train.discretizado[,-ncol(train.discretizado)])
preds <- predict(model, test.discretized)
preds[outliers.test.por.la.cara] <- 0
return(preds)
}
train$C <- as.factor(train$C)
set.seed(15)
# las ultimas subidas salen de aqui modificando la función y ejecutando paso a paso
sub.48 <- train.predict.48(train, test)
createSubmission(sub.48, "52")
|
library(shiny)
shinyUI(pageWithSidebar(
headerPanel("BDA Bank Credit Scoring Calculator"),
sidebarPanel(
h2('Applicant data:'),
numericInput('age', 'Age :',18,min=18, max=65, step=1),
radioButtons('sex', "Sex: ", c("Male"="1","Female"="0")),
radioButtons('married', "Married: ", c("Yes"="1","No"="0")),
numericInput('income', 'Year Income:',15000,min=15000, max=120000, step=1000),
numericInput('creditcard', 'How many credit cards have:',0,min=0, max=6, step=1),
numericInput('expenses', 'Monthly current expenses:',0,min=0, max=60000, step=500),
numericInput('loan', 'Loan amount requested:',15000,min=1000, max=1200000, step=1000),
numericInput('term', 'Loan Term in Months:',12,min=12, max=120, step=12),
numericInput('rate', 'Year Interest rate:',4.5,min=4.5, max=12, step=0.05),
submitButton("Score")
),
mainPanel(
h2('Credit Score is'),
verbatimTextOutput('score'),
h3('Loan Amount granted'),
verbatimTextOutput('oloan'),
h3('Interest rate:'),
verbatimTextOutput('orate'),
h3('Monthly payment:'),
verbatimTextOutput('opayment'),
h2('INSTRUCTIONS'),
h4('Enter in the left panel the requested information about the loan applicant.
Upon completing the information, press the Score button to get the credit
score and the general conditions of the loan.
The higher the score (range 150-990, minimum accepted 600) more likely to get back the capital.
The monthly payment may not be larger than the 40% of the net income after
substracting the monthly expenses, So, the loan amount will be adjusted accordingly. ')
)
))
|
/ui.R
|
no_license
|
fllaugel/DDP
|
R
| false | false | 1,717 |
r
|
library(shiny)
shinyUI(pageWithSidebar(
headerPanel("BDA Bank Credit Scoring Calculator"),
sidebarPanel(
h2('Applicant data:'),
numericInput('age', 'Age :',18,min=18, max=65, step=1),
radioButtons('sex', "Sex: ", c("Male"="1","Female"="0")),
radioButtons('married', "Married: ", c("Yes"="1","No"="0")),
numericInput('income', 'Year Income:',15000,min=15000, max=120000, step=1000),
numericInput('creditcard', 'How many credit cards have:',0,min=0, max=6, step=1),
numericInput('expenses', 'Monthly current expenses:',0,min=0, max=60000, step=500),
numericInput('loan', 'Loan amount requested:',15000,min=1000, max=1200000, step=1000),
numericInput('term', 'Loan Term in Months:',12,min=12, max=120, step=12),
numericInput('rate', 'Year Interest rate:',4.5,min=4.5, max=12, step=0.05),
submitButton("Score")
),
mainPanel(
h2('Credit Score is'),
verbatimTextOutput('score'),
h3('Loan Amount granted'),
verbatimTextOutput('oloan'),
h3('Interest rate:'),
verbatimTextOutput('orate'),
h3('Monthly payment:'),
verbatimTextOutput('opayment'),
h2('INSTRUCTIONS'),
h4('Enter in the left panel the requested information about the loan applicant.
Upon completing the information, press the Score button to get the credit
score and the general conditions of the loan.
The higher the score (range 150-990, minimum accepted 600) more likely to get back the capital.
The monthly payment may not be larger than the 40% of the net income after
substracting the monthly expenses, So, the loan amount will be adjusted accordingly. ')
)
))
|
# anGrp.R
# Dombrovski WTW data
library("survival")
library("beeswarm")
library("matrixStats")
# analysis sub-functions
# still need to adapt paths
if (Sys.info()[6]!='Alex')
{source('helperFxs.R')
source('loadData.R')
}
if (Sys.info()[6]=='Alex')
{setwd('~/code/wtwSuicide/JTM')
source('~/code/wtwSuicide/JTM/helperFxs.R')
source('~/code/wtwSuicide/JTM/loadData.R')}
# load all data
allData = loadData()
trialData = allData$trialData # unpack trial-level data
subjectData = allData$subjectData # unpack subject-level data
allIDs = names(trialData) # subject IDs
n = length(allIDs) # n
cat('Analyzing data for n','=',n,'subjects.\n')
# control which individual-level plots to generate
plotScheduledDelays = FALSE
plotTrialwiseData = FALSE
plotKMSC = FALSE
plotKMSC_nonImmed = TRUE
# initialize matrices to hold subject-level time series data
kmGrid = seq(0, 20, by=0.1) # grid on which to average survival curves.
subjectKMSC = matrix(NA, nrow=n, ncol=length(kmGrid)) # structure to hold all subjects' survival curves
subjectKMSC_nonImmed = matrix(NA, nrow=n, ncol=length(kmGrid)) # version for the non-immediate-quit analysis
tsGrid = seq(0, 5*60, by=1) # grid on which to average whole-block WTW time series
subjectWTWTS = matrix(NA, nrow=n, ncol=length(tsGrid)) # structure to hold all subjects' WTW time series
# initialize new subject-level columns to hold group data
# subjectData$grpAUC = NA
# subjectData$grpAUC_nonImmed = NA
# subjectData$grpEarnings = NA
# subjectData$nImmedQuits = NA
# descriptive statistics for individual subjects
for (sIdx in 1:n) {
# pull this subject's data
thisID = allIDs[sIdx]
thisTrialData = trialData[[thisID]]
subjectRowIdx = (subjectData$ID == thisID)
subjectData[subjectRowIdx,'grpEarnings'] = max(thisTrialData$totalEarned)
# plot and summarize the distribution of scheduled delays
if (plotScheduledDelays) {
scheduledDelays(thisTrialData,thisID)
}
# plot trial-by-trial data
if (plotTrialwiseData) {
trialPlots(thisTrialData,thisID)
}
# survival analysis
tMax = 20 # time window for the survival analysis (the longest wait time was 20 s)
kmscResults = kmsc(thisTrialData, tMax, thisID, plotKMSC, kmGrid)
subjectData[subjectRowIdx,'grpAUC'] = kmscResults[['auc']]
subjectKMSC[subjectRowIdx,] = kmscResults[['kmOnGrid']]
# survival analysis excluding immediate quits.
immedQuitIdx = (thisTrialData$initialPos == 'optSmall')
subjectData[subjectRowIdx,'nImmedQuits'] = sum(immedQuitIdx) # record the number of immediate quits
kmscResults_nonImmed = kmsc(thisTrialData[!immedQuitIdx,], tMax, thisID, plotKMSC_nonImmed, kmGrid)
subjectData[subjectRowIdx,'grpAUC_nonImmed'] = kmscResults_nonImmed[['auc']]
subjectKMSC_nonImmed[subjectRowIdx,] = kmscResults_nonImmed[['kmOnGrid']]
# note: it's not feasible to compute the KMSC separately for post-long-delay and post-short-delay trials
# because for many subjects, the set of post-long-delay trials includes no 20 s scheduled delays
# (long delays have only 25% frequency and scheduled delays are anticorrelated)
# rate of immediate quits, post-long and post-short delay
isLongDelay = (thisTrialData$designatedWait == 20)
postLongDelay = c(FALSE, head(isLongDelay, n=-1))
postShortDelay = c(FALSE, head(!isLongDelay, n=-1))
subjectData[subjectRowIdx,'propImmedQuits_postLongDelay'] = mean(immedQuitIdx[postLongDelay])
subjectData[subjectRowIdx,'propImmedQuits_postShortDelay'] = mean(immedQuitIdx[postShortDelay])
# calculate an index of flexibility
# i.e. how many times the cursor moved between the two boxes, either within or between trials
# +1 if this trial is not a fast quit but was quit eventually. (wait -> quit during the trial)
# +1 if this trial is not a fast quit and the previous trial was quit. (quit -> wait b/w trials)
# +1 if this trial *is* a fast quit and the previous trial was rewarded. (wait -> quit b/w trials)
subjectData[subjectRowIdx,'nCursorMoves'] = nCursorMoves(thisTrialData)
# for testing: can uncomment the line below and turn on plotTrialwiseData
# cat('Number of cursor moves: ',subjectData[subjectRowIdx,'nCursorMoves'],"\n")
# WTW time series
subjectWTWTS[subjectRowIdx,] = wtwTS(thisTrialData, tsGrid)
# ***incude a measure of consistency for the WTW time series
# wait for input before continuing.
if (any(plotScheduledDelays, plotTrialwiseData, plotKMSC)) {
readline(prompt = paste('subject',thisID,'(hit ENTER to continue)'))
}
# temporary: inspect only a few subjects
# if (sIdx>2) {break}
}
##################################################
##### summarize and save group-level results #####
cat('Group Ns by the group1245 field:')
print(table(subjectData$group1245))
# save group summary statistics to a csv file
outfname = sprintf('output/grpSummary_n=%d.csv', n)
write.csv(x=subjectData, file=outfname, row.names=FALSE)
cat('Saved group summary output to:',outfname,'\n')
# plot and summarize AUC results
# cat('Distribution of AUC values:\n')
# print(summary(subjectData$grpAUC)) # print a summary
fn <- ecdf(subjectData$grpAUC)
plot(fn, main = sprintf('AUC values, n = %d',n), xlab='AUC (s)',
ylab='Cumulative proportion') # plot the empirical CDF
hist(subjectData$grpAUC, breaks=16, freq=TRUE, main = sprintf('AUC values (n = %d)',n),
xlab='AUC (s)', xaxp=c(0,180,9)) # plot in histogram form
### beeswarm plots and Kruskal-Wallis tests by clinical group
# set up the grouping variable
# subjectData$PATTYPE = as.factor(subjectData$PATTYPE)
# total earnings by group
beeswarm(grpEarnings ~ group1245, subjectData, cex=1.2, pch=16,
xlab="Group", ylab="Earnings", main="Task earnings",
bty="n", cex.lab=1.5, cex.axis=1.2)
print(kruskal.test(grpEarnings ~ group1245, subjectData))
# AUC by group
beeswarm(grpAUC ~ group1245, subjectData, cex=1.2, pch=16,
xlab="Group", ylab="AUC (s)", main="Willingness to wait (AUC)",
ylim=c(0,20), bty="n", cex.lab=1.5, cex.axis=1.2)
print(kruskal.test(grpAUC ~ group1245, subjectData))
# nImmedQuits by group
beeswarm(nImmedQuits ~ group1245, subjectData, cex=1.2, pch=16,
xlab="Group", ylab="Number of immediate quits", main="Immediate quits",
bty="n", cex.lab=1.5, cex.axis=1.2)
print(kruskal.test(nImmedQuits ~ group1245, subjectData))
# AUC_nonImmed by group
beeswarm(grpAUC_nonImmed ~ group1245, subjectData, cex=1.2, pch=16,
xlab="Group", ylab="AUC (s)", main="Willingness to wait (AUC) w/o immediate quits",
ylim=c(0,20), bty="n", cex.lab=1.5, cex.axis=1.2)
print(kruskal.test(grpAUC_nonImmed ~ group1245, subjectData))
# difference in propImmedQuits after long versus short delays on the previous trial
subjectData$prevDelayDiff = subjectData$propImmedQuits_postLongDelay - subjectData$propImmedQuits_postShortDelay
beeswarm(prevDelayDiff ~ group1245, subjectData, cex=1.2, pch=16,
xlab="Group", ylab="Difference of rates (post-long minus post-short)",
main="Rate of fast quits after long vs. short prior delays",
bty="n", cex.lab=1.5, cex.axis=1.2)
print(kruskal.test(prevDelayDiff ~ group1245, subjectData))
print(wilcox.test(subjectData$propImmedQuits_postLongDelay - subjectData$propImmedQuits_postShortDelay))
# number of cursor movements
beeswarm(nCursorMoves ~ group1245, subjectData, cex=1.2, pch=16,
xlab="Group", ylab="Cursor movements", main="Number of cursor movements",
bty="n", cex.lab=1.5, cex.axis=1.2)
print(kruskal.test(nCursorMoves ~ group1245, subjectData))
# plot group-average KMSC
plot(1, type="l", main="Subgroup mean KMSC",
xlab="Time (s)", ylab="Survival rate", bty="n", xlim=c(0,20), ylim=c(0,1))
lines(kmGrid, colMeans(subjectKMSC_nonImmed[subjectData$group1245==1,]), col='black', type='l', lwd=3, pch=16)
lines(kmGrid, colMeans(subjectKMSC_nonImmed[subjectData$group1245==2,]), col='red', type='l', lwd=3, pch=16)
lines(kmGrid, colMeans(subjectKMSC_nonImmed[subjectData$group1245==4,]), col='green', type='l', lwd=3, pch=16)
lines(kmGrid, colMeans(subjectKMSC_nonImmed[subjectData$group1245==5,]), col='blue', type='l', lwd=3, pch=16)
legend("bottomleft", c("Grp 1", "Grp 2", "Grp 4", "Grp 5"), bty="n", lty=1, lwd=3,
col=c("black", "red", "green", "blue"))
# plot group-average WTW time series
# can test the slope of this (and whether it differs by group)
# can tabulate the absolute difference from optimal over time.
# use matplot instead? put each variable in 1 col.
plot(1, type="l", main="Subgroup WTW time series",
xlab="Time in block (s)", ylab="WTW (s)", bty="n", xlim=c(0,300), ylim=c(0,20))
lines(tsGrid, colMeans(subjectWTWTS[subjectData$group1245==1,]), col='black', type='l', lwd=3, pch=16)
lines(tsGrid, colMeans(subjectWTWTS[subjectData$group1245==2,]), col='red', type='l', lwd=3, pch=16)
lines(tsGrid, colMeans(subjectWTWTS[subjectData$group1245==4,]), col='green', type='l', lwd=3, pch=16)
lines(tsGrid, colMeans(subjectWTWTS[subjectData$group1245==5,]), col='blue', type='l', lwd=3, pch=16)
legend("bottomleft", c("Grp 1", "Grp 2", "Grp 4", "Grp 5"), bty="n", lty=1, lwd=3,
col=c("black", "red", "green", "blue"))
|
/JTM/anGrp.R
|
no_license
|
mcguirej/wtwSuicide_new
|
R
| false | false | 9,226 |
r
|
# anGrp.R
# Dombrovski WTW data
library("survival")
library("beeswarm")
library("matrixStats")
# analysis sub-functions
# still need to adapt paths
if (Sys.info()[6]!='Alex')
{source('helperFxs.R')
source('loadData.R')
}
if (Sys.info()[6]=='Alex')
{setwd('~/code/wtwSuicide/JTM')
source('~/code/wtwSuicide/JTM/helperFxs.R')
source('~/code/wtwSuicide/JTM/loadData.R')}
# load all data
allData = loadData()
trialData = allData$trialData # unpack trial-level data
subjectData = allData$subjectData # unpack subject-level data
allIDs = names(trialData) # subject IDs
n = length(allIDs) # n
cat('Analyzing data for n','=',n,'subjects.\n')
# control which individual-level plots to generate
plotScheduledDelays = FALSE
plotTrialwiseData = FALSE
plotKMSC = FALSE
plotKMSC_nonImmed = TRUE
# initialize matrices to hold subject-level time series data
kmGrid = seq(0, 20, by=0.1) # grid on which to average survival curves.
subjectKMSC = matrix(NA, nrow=n, ncol=length(kmGrid)) # structure to hold all subjects' survival curves
subjectKMSC_nonImmed = matrix(NA, nrow=n, ncol=length(kmGrid)) # version for the non-immediate-quit analysis
tsGrid = seq(0, 5*60, by=1) # grid on which to average whole-block WTW time series
subjectWTWTS = matrix(NA, nrow=n, ncol=length(tsGrid)) # structure to hold all subjects' WTW time series
# initialize new subject-level columns to hold group data
# subjectData$grpAUC = NA
# subjectData$grpAUC_nonImmed = NA
# subjectData$grpEarnings = NA
# subjectData$nImmedQuits = NA
# descriptive statistics for individual subjects
for (sIdx in 1:n) {
# pull this subject's data
thisID = allIDs[sIdx]
thisTrialData = trialData[[thisID]]
subjectRowIdx = (subjectData$ID == thisID)
subjectData[subjectRowIdx,'grpEarnings'] = max(thisTrialData$totalEarned)
# plot and summarize the distribution of scheduled delays
if (plotScheduledDelays) {
scheduledDelays(thisTrialData,thisID)
}
# plot trial-by-trial data
if (plotTrialwiseData) {
trialPlots(thisTrialData,thisID)
}
# survival analysis
tMax = 20 # time window for the survival analysis (the longest wait time was 20 s)
kmscResults = kmsc(thisTrialData, tMax, thisID, plotKMSC, kmGrid)
subjectData[subjectRowIdx,'grpAUC'] = kmscResults[['auc']]
subjectKMSC[subjectRowIdx,] = kmscResults[['kmOnGrid']]
# survival analysis excluding immediate quits.
immedQuitIdx = (thisTrialData$initialPos == 'optSmall')
subjectData[subjectRowIdx,'nImmedQuits'] = sum(immedQuitIdx) # record the number of immediate quits
kmscResults_nonImmed = kmsc(thisTrialData[!immedQuitIdx,], tMax, thisID, plotKMSC_nonImmed, kmGrid)
subjectData[subjectRowIdx,'grpAUC_nonImmed'] = kmscResults_nonImmed[['auc']]
subjectKMSC_nonImmed[subjectRowIdx,] = kmscResults_nonImmed[['kmOnGrid']]
# note: it's not feasible to compute the KMSC separately for post-long-delay and post-short-delay trials
# because for many subjects, the set of post-long-delay trials includes no 20 s scheduled delays
# (long delays have only 25% frequency and scheduled delays are anticorrelated)
# rate of immediate quits, post-long and post-short delay
isLongDelay = (thisTrialData$designatedWait == 20)
postLongDelay = c(FALSE, head(isLongDelay, n=-1))
postShortDelay = c(FALSE, head(!isLongDelay, n=-1))
subjectData[subjectRowIdx,'propImmedQuits_postLongDelay'] = mean(immedQuitIdx[postLongDelay])
subjectData[subjectRowIdx,'propImmedQuits_postShortDelay'] = mean(immedQuitIdx[postShortDelay])
# calculate an index of flexibility
# i.e. how many times the cursor moved between the two boxes, either within or between trials
# +1 if this trial is not a fast quit but was quit eventually. (wait -> quit during the trial)
# +1 if this trial is not a fast quit and the previous trial was quit. (quit -> wait b/w trials)
# +1 if this trial *is* a fast quit and the previous trial was rewarded. (wait -> quit b/w trials)
subjectData[subjectRowIdx,'nCursorMoves'] = nCursorMoves(thisTrialData)
# for testing: can uncomment the line below and turn on plotTrialwiseData
# cat('Number of cursor moves: ',subjectData[subjectRowIdx,'nCursorMoves'],"\n")
# WTW time series
subjectWTWTS[subjectRowIdx,] = wtwTS(thisTrialData, tsGrid)
# ***incude a measure of consistency for the WTW time series
# wait for input before continuing.
if (any(plotScheduledDelays, plotTrialwiseData, plotKMSC)) {
readline(prompt = paste('subject',thisID,'(hit ENTER to continue)'))
}
# temporary: inspect only a few subjects
# if (sIdx>2) {break}
}
##################################################
##### summarize and save group-level results #####
cat('Group Ns by the group1245 field:')
print(table(subjectData$group1245))
# save group summary statistics to a csv file
outfname = sprintf('output/grpSummary_n=%d.csv', n)
write.csv(x=subjectData, file=outfname, row.names=FALSE)
cat('Saved group summary output to:',outfname,'\n')
# plot and summarize AUC results
# cat('Distribution of AUC values:\n')
# print(summary(subjectData$grpAUC)) # print a summary
fn <- ecdf(subjectData$grpAUC)
plot(fn, main = sprintf('AUC values, n = %d',n), xlab='AUC (s)',
ylab='Cumulative proportion') # plot the empirical CDF
hist(subjectData$grpAUC, breaks=16, freq=TRUE, main = sprintf('AUC values (n = %d)',n),
xlab='AUC (s)', xaxp=c(0,180,9)) # plot in histogram form
### beeswarm plots and Kruskal-Wallis tests by clinical group
# set up the grouping variable
# subjectData$PATTYPE = as.factor(subjectData$PATTYPE)
# total earnings by group
beeswarm(grpEarnings ~ group1245, subjectData, cex=1.2, pch=16,
xlab="Group", ylab="Earnings", main="Task earnings",
bty="n", cex.lab=1.5, cex.axis=1.2)
print(kruskal.test(grpEarnings ~ group1245, subjectData))
# AUC by group
beeswarm(grpAUC ~ group1245, subjectData, cex=1.2, pch=16,
xlab="Group", ylab="AUC (s)", main="Willingness to wait (AUC)",
ylim=c(0,20), bty="n", cex.lab=1.5, cex.axis=1.2)
print(kruskal.test(grpAUC ~ group1245, subjectData))
# nImmedQuits by group
beeswarm(nImmedQuits ~ group1245, subjectData, cex=1.2, pch=16,
xlab="Group", ylab="Number of immediate quits", main="Immediate quits",
bty="n", cex.lab=1.5, cex.axis=1.2)
print(kruskal.test(nImmedQuits ~ group1245, subjectData))
# AUC_nonImmed by group
beeswarm(grpAUC_nonImmed ~ group1245, subjectData, cex=1.2, pch=16,
xlab="Group", ylab="AUC (s)", main="Willingness to wait (AUC) w/o immediate quits",
ylim=c(0,20), bty="n", cex.lab=1.5, cex.axis=1.2)
print(kruskal.test(grpAUC_nonImmed ~ group1245, subjectData))
# difference in propImmedQuits after long versus short delays on the previous trial
subjectData$prevDelayDiff = subjectData$propImmedQuits_postLongDelay - subjectData$propImmedQuits_postShortDelay
beeswarm(prevDelayDiff ~ group1245, subjectData, cex=1.2, pch=16,
xlab="Group", ylab="Difference of rates (post-long minus post-short)",
main="Rate of fast quits after long vs. short prior delays",
bty="n", cex.lab=1.5, cex.axis=1.2)
print(kruskal.test(prevDelayDiff ~ group1245, subjectData))
print(wilcox.test(subjectData$propImmedQuits_postLongDelay - subjectData$propImmedQuits_postShortDelay))
# number of cursor movements
beeswarm(nCursorMoves ~ group1245, subjectData, cex=1.2, pch=16,
xlab="Group", ylab="Cursor movements", main="Number of cursor movements",
bty="n", cex.lab=1.5, cex.axis=1.2)
print(kruskal.test(nCursorMoves ~ group1245, subjectData))
# plot group-average KMSC
plot(1, type="l", main="Subgroup mean KMSC",
xlab="Time (s)", ylab="Survival rate", bty="n", xlim=c(0,20), ylim=c(0,1))
lines(kmGrid, colMeans(subjectKMSC_nonImmed[subjectData$group1245==1,]), col='black', type='l', lwd=3, pch=16)
lines(kmGrid, colMeans(subjectKMSC_nonImmed[subjectData$group1245==2,]), col='red', type='l', lwd=3, pch=16)
lines(kmGrid, colMeans(subjectKMSC_nonImmed[subjectData$group1245==4,]), col='green', type='l', lwd=3, pch=16)
lines(kmGrid, colMeans(subjectKMSC_nonImmed[subjectData$group1245==5,]), col='blue', type='l', lwd=3, pch=16)
legend("bottomleft", c("Grp 1", "Grp 2", "Grp 4", "Grp 5"), bty="n", lty=1, lwd=3,
col=c("black", "red", "green", "blue"))
# plot group-average WTW time series
# can test the slope of this (and whether it differs by group)
# can tabulate the absolute difference from optimal over time.
# use matplot instead? put each variable in 1 col.
plot(1, type="l", main="Subgroup WTW time series",
xlab="Time in block (s)", ylab="WTW (s)", bty="n", xlim=c(0,300), ylim=c(0,20))
lines(tsGrid, colMeans(subjectWTWTS[subjectData$group1245==1,]), col='black', type='l', lwd=3, pch=16)
lines(tsGrid, colMeans(subjectWTWTS[subjectData$group1245==2,]), col='red', type='l', lwd=3, pch=16)
lines(tsGrid, colMeans(subjectWTWTS[subjectData$group1245==4,]), col='green', type='l', lwd=3, pch=16)
lines(tsGrid, colMeans(subjectWTWTS[subjectData$group1245==5,]), col='blue', type='l', lwd=3, pch=16)
legend("bottomleft", c("Grp 1", "Grp 2", "Grp 4", "Grp 5"), bty="n", lty=1, lwd=3,
col=c("black", "red", "green", "blue"))
|
## charles ferté
### Charles Ferté
### Sage Bionetworks
### Seattle, WA
### January, 6th 2012
############################################################################################################################
### script for running modelling prediction
############################################################################################################################
PATH <- "/Volumes"
setwd(paste(PATH,"/cferte/FELLOW/cferte/NSCLC_MA/CLIN_DATA_FILES/",sep=""))
load("TS_CLIN.Rdata")
load("VS_CLIN.Rdata")
load("VS2_CLIN.Rdata")
load("VS3_CLIN.Rdata")
TOTAL <- rbind(DirClinF,GirClinF)
TOTAL <- rbind(TOTAL,HouClinF)
TOTAL <- as.data.frame(rbind(TOTAL,ZhuClinF))
TOTAL$OS <- as.numeric(TOTAL$MONTHS_TO_LAST_CONTACT_OR_DEATH)
TOTAL$OSC <- as.numeric(TOTAL$VITAL_STATUS)
MATRIX <- t(TOTAL)
MATRIX <- MATRIX[-c(1,11,12,13,14,20),]
############################################################################################################################
###### cox ph analysis
############################################################################################################################
library(randomSurvivalForest)
library(survival)
mySurv <-Surv(TOTAL$OS,TOTAL$OSC)
myCoxFun <- function(x){
summary(coxph(mySurv ~ as.numeric(x)))$logtest["pvalue"]
}
colnames(TOTAL)
plot(survfit(Surv(TOTAL$OS,TOTAL$OSC) ~ TOTAL$P_Stage), col=1:length(TOTAL$P_Stage),main="OS according to pStage in 464 NSCLC", xlab="months")
summary(coxph(Surv(TOTAL$OS, TOTAL$OSC) ~ TOTAL$P_Stage+TOTAL$SMOKING+TOTAL$Histology+TOTAL$H_Grade+TOTAL$RACE+TOTAL$Age+TOTAL$GENDER+TOTAL$SITE ,method="breslow",data=TOTAL))
myResults <- apply(MATRIX, 1, myCoxFun)
hist(myResults)
summary(myResults)
summary(p.adjust(myResults,method="BH"))
blah <- myResults<.1
blah1 <- p.adjust(myResults,method="BH")<.05
MATRIX_TSb <- t(MATRIX_TS[blah,])
MATRIX_VSb <- t(MATRIX_VS[blah,])
all(rownames(y_OS_TS)==rownames(MATRIX_TSb))
all(rownames(y_OS_VS)==rownames(MATRIX_VSb))
MATFINAL <- as.data.frame(cbind(MATRIX_TSb,y_OS_TS))
MATVAL <- as.data.frame(cbind(MATRIX_VSb,y_OS_VS))
colnames(MATFINAL)[!(colnames(MATFINAL) %in% c("time", "status"))] <- paste("v", colnames(MATFINAL)[!(colnames(MATFINAL) %in% c("time", "status"))], sep="")
colnames(MATVAL)[!(colnames(MATVAL) %in% c("time", "status"))] <- paste("v", colnames(MATVAL)[!(colnames(MATVAL) %in% c("time", "status"))], sep="")
|
/CLIN_DATA_FILES/COX_CLINCOVARIATES.R
|
no_license
|
chferte/NSCLC_Sig
|
R
| false | false | 2,395 |
r
|
## charles ferté
### Charles Ferté
### Sage Bionetworks
### Seattle, WA
### January, 6th 2012
############################################################################################################################
### script for running modelling prediction
############################################################################################################################
PATH <- "/Volumes"
setwd(paste(PATH,"/cferte/FELLOW/cferte/NSCLC_MA/CLIN_DATA_FILES/",sep=""))
load("TS_CLIN.Rdata")
load("VS_CLIN.Rdata")
load("VS2_CLIN.Rdata")
load("VS3_CLIN.Rdata")
TOTAL <- rbind(DirClinF,GirClinF)
TOTAL <- rbind(TOTAL,HouClinF)
TOTAL <- as.data.frame(rbind(TOTAL,ZhuClinF))
TOTAL$OS <- as.numeric(TOTAL$MONTHS_TO_LAST_CONTACT_OR_DEATH)
TOTAL$OSC <- as.numeric(TOTAL$VITAL_STATUS)
MATRIX <- t(TOTAL)
MATRIX <- MATRIX[-c(1,11,12,13,14,20),]
############################################################################################################################
###### cox ph analysis
############################################################################################################################
library(randomSurvivalForest)
library(survival)
mySurv <-Surv(TOTAL$OS,TOTAL$OSC)
myCoxFun <- function(x){
summary(coxph(mySurv ~ as.numeric(x)))$logtest["pvalue"]
}
colnames(TOTAL)
plot(survfit(Surv(TOTAL$OS,TOTAL$OSC) ~ TOTAL$P_Stage), col=1:length(TOTAL$P_Stage),main="OS according to pStage in 464 NSCLC", xlab="months")
summary(coxph(Surv(TOTAL$OS, TOTAL$OSC) ~ TOTAL$P_Stage+TOTAL$SMOKING+TOTAL$Histology+TOTAL$H_Grade+TOTAL$RACE+TOTAL$Age+TOTAL$GENDER+TOTAL$SITE ,method="breslow",data=TOTAL))
myResults <- apply(MATRIX, 1, myCoxFun)
hist(myResults)
summary(myResults)
summary(p.adjust(myResults,method="BH"))
blah <- myResults<.1
blah1 <- p.adjust(myResults,method="BH")<.05
MATRIX_TSb <- t(MATRIX_TS[blah,])
MATRIX_VSb <- t(MATRIX_VS[blah,])
all(rownames(y_OS_TS)==rownames(MATRIX_TSb))
all(rownames(y_OS_VS)==rownames(MATRIX_VSb))
MATFINAL <- as.data.frame(cbind(MATRIX_TSb,y_OS_TS))
MATVAL <- as.data.frame(cbind(MATRIX_VSb,y_OS_VS))
colnames(MATFINAL)[!(colnames(MATFINAL) %in% c("time", "status"))] <- paste("v", colnames(MATFINAL)[!(colnames(MATFINAL) %in% c("time", "status"))], sep="")
colnames(MATVAL)[!(colnames(MATVAL) %in% c("time", "status"))] <- paste("v", colnames(MATVAL)[!(colnames(MATVAL) %in% c("time", "status"))], sep="")
|
#' DuckDB driver class
#'
#' Implements \linkS4class{DBIDriver}.
#'
#' @aliases duckdb_driver
#' @keywords internal
#' @export
setClass("duckdb_driver", contains = "DBIDriver", slots = list(database_ref = "externalptr", dbdir = "character", read_only = "logical"))
#' DuckDB connection class
#'
#' Implements \linkS4class{DBIConnection}.
#'
#' @aliases duckdb_connection
#' @keywords internal
#' @export
setClass("duckdb_connection",
contains = "DBIConnection",
slots = list(conn_ref = "externalptr",
driver = "duckdb_driver",
debug = "logical",
timezone_out = "character",
tz_out_convert = "character")
)
duckdb_connection <- function(duckdb_driver, debug) {
new(
"duckdb_connection",
conn_ref = .Call(duckdb_connect_R, duckdb_driver@database_ref),
driver = duckdb_driver,
debug = debug,
timezone_out = "UTC",
tz_out_convert = "with"
)
}
#' @rdname duckdb_connection-class
#' @inheritParams methods::show
#' @export
setMethod(
"show", "duckdb_connection",
function(object) {
message(sprintf("<duckdb_connection %s driver=%s>", extptr_str(object@conn_ref), drv_to_string(object@driver)))
invisible(NULL)
}
)
#' @rdname duckdb_connection-class
#' @inheritParams DBI::dbIsValid
#' @export
setMethod(
"dbIsValid", "duckdb_connection",
function(dbObj, ...) {
valid <- FALSE
tryCatch(
{
dbGetQuery(dbObj, SQL("SELECT 1"))
valid <- TRUE
},
error = function(c) {
}
)
valid
}
)
#' @rdname duckdb_connection-class
#' @inheritParams DBI::dbSendQuery
#' @inheritParams DBI::dbBind
#' @export
setMethod(
"dbSendQuery", c("duckdb_connection", "character"),
function(conn, statement, params = NULL, ...) {
if (conn@debug) {
message("Q ", statement)
}
statement <- enc2utf8(statement)
stmt_lst <- .Call(duckdb_prepare_R, conn@conn_ref, statement)
res <- duckdb_result(
connection = conn,
stmt_lst = stmt_lst
)
if (length(params) > 0) {
dbBind(res, params)
}
return(res)
}
)
#' @rdname duckdb_connection-class
#' @inheritParams DBI::dbDataType
#' @export
setMethod(
"dbDataType", "duckdb_connection",
function(dbObj, obj, ...) {
dbDataType(dbObj@driver, obj, ...)
}
)
duckdb_random_string <- function(x) {
paste(sample(letters, 10, replace = TRUE), collapse = "")
}
#' @rdname duckdb_connection-class
#' @inheritParams DBI::dbWriteTable
#' @param row.names Whether the row.names of the data.frame should be preserved
#' @param overwrite If a table with the given name already exists, should it be overwritten?
#' @param append If a table with the given name already exists, just try to append the passed data to it
#' @param field.types Override the auto-generated SQL types
#' @param temporary Should the created table be temporary?
#' @export
setMethod(
"dbWriteTable", c("duckdb_connection", "character", "data.frame"),
function(conn,
name,
value,
row.names = FALSE,
overwrite = FALSE,
append = FALSE,
field.types = NULL,
temporary = FALSE,
...) {
check_flag(overwrite)
check_flag(append)
check_flag(temporary)
# TODO: start a transaction if one is not already running
if (overwrite && append) {
stop("Setting both overwrite and append makes no sense")
}
# oof
if (!is.null(field.types) &&
(
!is.character(field.types) ||
any(is.na(names(field.types))) ||
length(unique(names(field.types))) != length(names(field.types)) ||
append
)) {
stop("invalid field.types argument")
}
value <- as.data.frame(value)
if (!is.data.frame(value)) {
stop("need a data frame as parameter")
}
# use Kirill's magic, convert rownames to additional column
value <- sqlRownamesToColumn(value, row.names)
if (dbExistsTable(conn, name)) {
if (overwrite) {
dbRemoveTable(conn, name)
}
if (!overwrite && !append) {
stop(
"Table ",
name,
" already exists. Set overwrite=TRUE if you want
to remove the existing table. Set append=TRUE if you would like to add the new data to the
existing table."
)
}
if (append && any(names(value) != dbListFields(conn, name))) {
stop("Column name mismatch for append")
}
}
table_name <- dbQuoteIdentifier(conn, name)
if (!dbExistsTable(conn, name)) {
column_names <- dbQuoteIdentifier(conn, names(value))
column_types <-
vapply(value, dbDataType, dbObj = conn, FUN.VALUE = "character")
if (!is.null(field.types)) {
mapped_column_types <- field.types[names(value)]
if (any(is.na(mapped_column_types)) ||
length(mapped_column_types) != length(names(value))) {
stop("Column name/type mismatch")
}
column_types <- mapped_column_types
}
temp_str <- ""
if (temporary) temp_str <- "TEMPORARY"
schema_str <- paste(column_names, column_types, collapse = ", ")
dbExecute(conn, SQL(sprintf(
"CREATE %s TABLE %s (%s)", temp_str, table_name, schema_str
)))
}
if (length(value[[1]])) {
classes <- unlist(lapply(value, function(v) {
class(v)[[1]]
}))
for (c in names(classes[classes == "character"])) {
value[[c]] <- enc2utf8(value[[c]])
}
for (c in names(classes[classes == "factor"])) {
levels(value[[c]]) <- enc2utf8(levels(value[[c]]))
}
}
view_name <- sprintf("_duckdb_append_view_%s", duckdb_random_string())
on.exit(duckdb_unregister(conn, view_name))
duckdb_register(conn, view_name, value)
dbExecute(conn, sprintf("INSERT INTO %s SELECT * FROM %s", table_name, view_name))
on_connection_updated(conn, hint=paste0("Updated table'", table_name,"'"))
invisible(TRUE)
}
)
#' @rdname duckdb_connection-class
#' @inheritParams DBI::dbListTables
#' @export
setMethod(
"dbListTables", "duckdb_connection",
function(conn, ...) {
dbGetQuery(
conn,
SQL(
"SELECT name FROM sqlite_master WHERE type='table' ORDER BY name"
)
)[[1]]
}
)
#' @rdname duckdb_connection-class
#' @inheritParams DBI::dbExistsTable
#' @export
setMethod(
"dbExistsTable", c("duckdb_connection", "character"),
function(conn, name, ...) {
if (!dbIsValid(conn)) {
stop("Invalid connection")
}
if (length(name) != 1) {
stop("Can only have a single name argument")
}
exists <- FALSE
tryCatch(
{
dbGetQuery(
conn,
sqlInterpolate(
conn,
"SELECT * FROM ? WHERE FALSE",
dbQuoteIdentifier(conn, name)
)
)
exists <- TRUE
},
error = function(c) {
}
)
exists
}
)
#' @rdname duckdb_connection-class
#' @inheritParams DBI::dbListFields
#' @export
setMethod(
"dbListFields", c("duckdb_connection", "character"),
function(conn, name, ...) {
names(dbGetQuery(
conn,
sqlInterpolate(
conn,
"SELECT * FROM ? WHERE FALSE",
dbQuoteIdentifier(conn, name)
)
))
}
)
#' @rdname duckdb_connection-class
#' @inheritParams DBI::dbRemoveTable
#' @export
setMethod(
"dbRemoveTable", c("duckdb_connection", "character"),
function(conn, name, ...) {
dbExecute(
conn,
sqlInterpolate(conn, "DROP TABLE ?", dbQuoteIdentifier(conn, name))
)
invisible(TRUE)
}
)
#' @rdname duckdb_connection-class
#' @inheritParams DBI::dbGetInfo
#' @export
setMethod(
"dbGetInfo", "duckdb_connection",
function(dbObj, ...) {
info <- dbGetInfo(dbObj@driver)
list(
dbname = info$dbname,
db.version = info$driver.version,
username = NA,
host = NA,
port = NA
)
}
)
#' @rdname duckdb_connection-class
#' @inheritParams DBI::dbBegin
#' @export
setMethod(
"dbBegin", "duckdb_connection",
function(conn, ...) {
dbExecute(conn, SQL("BEGIN TRANSACTION"))
invisible(TRUE)
}
)
#' @rdname duckdb_connection-class
#' @inheritParams DBI::dbCommit
#' @export
setMethod(
"dbCommit", "duckdb_connection",
function(conn, ...) {
dbExecute(conn, SQL("COMMIT"))
on_connection_updated(conn, "Committing changes")
invisible(TRUE)
}
)
#' @rdname duckdb_connection-class
#' @inheritParams DBI::dbRollback
#' @export
setMethod(
"dbRollback", "duckdb_connection",
function(conn, ...) {
dbExecute(conn, SQL("ROLLBACK"))
invisible(TRUE)
}
)
|
/tools/rpkg/R/Connection.R
|
permissive
|
mbasmanova/duckdb
|
R
| false | false | 8,649 |
r
|
#' DuckDB driver class
#'
#' Implements \linkS4class{DBIDriver}.
#'
#' @aliases duckdb_driver
#' @keywords internal
#' @export
setClass("duckdb_driver", contains = "DBIDriver", slots = list(database_ref = "externalptr", dbdir = "character", read_only = "logical"))
#' DuckDB connection class
#'
#' Implements \linkS4class{DBIConnection}.
#'
#' @aliases duckdb_connection
#' @keywords internal
#' @export
setClass("duckdb_connection",
contains = "DBIConnection",
slots = list(conn_ref = "externalptr",
driver = "duckdb_driver",
debug = "logical",
timezone_out = "character",
tz_out_convert = "character")
)
duckdb_connection <- function(duckdb_driver, debug) {
new(
"duckdb_connection",
conn_ref = .Call(duckdb_connect_R, duckdb_driver@database_ref),
driver = duckdb_driver,
debug = debug,
timezone_out = "UTC",
tz_out_convert = "with"
)
}
#' @rdname duckdb_connection-class
#' @inheritParams methods::show
#' @export
setMethod(
"show", "duckdb_connection",
function(object) {
message(sprintf("<duckdb_connection %s driver=%s>", extptr_str(object@conn_ref), drv_to_string(object@driver)))
invisible(NULL)
}
)
#' @rdname duckdb_connection-class
#' @inheritParams DBI::dbIsValid
#' @export
setMethod(
"dbIsValid", "duckdb_connection",
function(dbObj, ...) {
valid <- FALSE
tryCatch(
{
dbGetQuery(dbObj, SQL("SELECT 1"))
valid <- TRUE
},
error = function(c) {
}
)
valid
}
)
#' @rdname duckdb_connection-class
#' @inheritParams DBI::dbSendQuery
#' @inheritParams DBI::dbBind
#' @export
setMethod(
"dbSendQuery", c("duckdb_connection", "character"),
function(conn, statement, params = NULL, ...) {
if (conn@debug) {
message("Q ", statement)
}
statement <- enc2utf8(statement)
stmt_lst <- .Call(duckdb_prepare_R, conn@conn_ref, statement)
res <- duckdb_result(
connection = conn,
stmt_lst = stmt_lst
)
if (length(params) > 0) {
dbBind(res, params)
}
return(res)
}
)
#' @rdname duckdb_connection-class
#' @inheritParams DBI::dbDataType
#' @export
setMethod(
"dbDataType", "duckdb_connection",
function(dbObj, obj, ...) {
dbDataType(dbObj@driver, obj, ...)
}
)
duckdb_random_string <- function(x) {
paste(sample(letters, 10, replace = TRUE), collapse = "")
}
#' @rdname duckdb_connection-class
#' @inheritParams DBI::dbWriteTable
#' @param row.names Whether the row.names of the data.frame should be preserved
#' @param overwrite If a table with the given name already exists, should it be overwritten?
#' @param append If a table with the given name already exists, just try to append the passed data to it
#' @param field.types Override the auto-generated SQL types
#' @param temporary Should the created table be temporary?
#' @export
setMethod(
"dbWriteTable", c("duckdb_connection", "character", "data.frame"),
function(conn,
name,
value,
row.names = FALSE,
overwrite = FALSE,
append = FALSE,
field.types = NULL,
temporary = FALSE,
...) {
check_flag(overwrite)
check_flag(append)
check_flag(temporary)
# TODO: start a transaction if one is not already running
if (overwrite && append) {
stop("Setting both overwrite and append makes no sense")
}
# oof
if (!is.null(field.types) &&
(
!is.character(field.types) ||
any(is.na(names(field.types))) ||
length(unique(names(field.types))) != length(names(field.types)) ||
append
)) {
stop("invalid field.types argument")
}
value <- as.data.frame(value)
if (!is.data.frame(value)) {
stop("need a data frame as parameter")
}
# use Kirill's magic, convert rownames to additional column
value <- sqlRownamesToColumn(value, row.names)
if (dbExistsTable(conn, name)) {
if (overwrite) {
dbRemoveTable(conn, name)
}
if (!overwrite && !append) {
stop(
"Table ",
name,
" already exists. Set overwrite=TRUE if you want
to remove the existing table. Set append=TRUE if you would like to add the new data to the
existing table."
)
}
if (append && any(names(value) != dbListFields(conn, name))) {
stop("Column name mismatch for append")
}
}
table_name <- dbQuoteIdentifier(conn, name)
if (!dbExistsTable(conn, name)) {
column_names <- dbQuoteIdentifier(conn, names(value))
column_types <-
vapply(value, dbDataType, dbObj = conn, FUN.VALUE = "character")
if (!is.null(field.types)) {
mapped_column_types <- field.types[names(value)]
if (any(is.na(mapped_column_types)) ||
length(mapped_column_types) != length(names(value))) {
stop("Column name/type mismatch")
}
column_types <- mapped_column_types
}
temp_str <- ""
if (temporary) temp_str <- "TEMPORARY"
schema_str <- paste(column_names, column_types, collapse = ", ")
dbExecute(conn, SQL(sprintf(
"CREATE %s TABLE %s (%s)", temp_str, table_name, schema_str
)))
}
if (length(value[[1]])) {
classes <- unlist(lapply(value, function(v) {
class(v)[[1]]
}))
for (c in names(classes[classes == "character"])) {
value[[c]] <- enc2utf8(value[[c]])
}
for (c in names(classes[classes == "factor"])) {
levels(value[[c]]) <- enc2utf8(levels(value[[c]]))
}
}
view_name <- sprintf("_duckdb_append_view_%s", duckdb_random_string())
on.exit(duckdb_unregister(conn, view_name))
duckdb_register(conn, view_name, value)
dbExecute(conn, sprintf("INSERT INTO %s SELECT * FROM %s", table_name, view_name))
on_connection_updated(conn, hint=paste0("Updated table'", table_name,"'"))
invisible(TRUE)
}
)
#' @rdname duckdb_connection-class
#' @inheritParams DBI::dbListTables
#' @export
setMethod(
"dbListTables", "duckdb_connection",
function(conn, ...) {
dbGetQuery(
conn,
SQL(
"SELECT name FROM sqlite_master WHERE type='table' ORDER BY name"
)
)[[1]]
}
)
#' @rdname duckdb_connection-class
#' @inheritParams DBI::dbExistsTable
#' @export
setMethod(
"dbExistsTable", c("duckdb_connection", "character"),
function(conn, name, ...) {
if (!dbIsValid(conn)) {
stop("Invalid connection")
}
if (length(name) != 1) {
stop("Can only have a single name argument")
}
exists <- FALSE
tryCatch(
{
dbGetQuery(
conn,
sqlInterpolate(
conn,
"SELECT * FROM ? WHERE FALSE",
dbQuoteIdentifier(conn, name)
)
)
exists <- TRUE
},
error = function(c) {
}
)
exists
}
)
#' @rdname duckdb_connection-class
#' @inheritParams DBI::dbListFields
#' @export
setMethod(
"dbListFields", c("duckdb_connection", "character"),
function(conn, name, ...) {
names(dbGetQuery(
conn,
sqlInterpolate(
conn,
"SELECT * FROM ? WHERE FALSE",
dbQuoteIdentifier(conn, name)
)
))
}
)
#' @rdname duckdb_connection-class
#' @inheritParams DBI::dbRemoveTable
#' @export
setMethod(
"dbRemoveTable", c("duckdb_connection", "character"),
function(conn, name, ...) {
dbExecute(
conn,
sqlInterpolate(conn, "DROP TABLE ?", dbQuoteIdentifier(conn, name))
)
invisible(TRUE)
}
)
#' @rdname duckdb_connection-class
#' @inheritParams DBI::dbGetInfo
#' @export
setMethod(
"dbGetInfo", "duckdb_connection",
function(dbObj, ...) {
info <- dbGetInfo(dbObj@driver)
list(
dbname = info$dbname,
db.version = info$driver.version,
username = NA,
host = NA,
port = NA
)
}
)
#' @rdname duckdb_connection-class
#' @inheritParams DBI::dbBegin
#' @export
setMethod(
"dbBegin", "duckdb_connection",
function(conn, ...) {
dbExecute(conn, SQL("BEGIN TRANSACTION"))
invisible(TRUE)
}
)
#' @rdname duckdb_connection-class
#' @inheritParams DBI::dbCommit
#' @export
setMethod(
"dbCommit", "duckdb_connection",
function(conn, ...) {
dbExecute(conn, SQL("COMMIT"))
on_connection_updated(conn, "Committing changes")
invisible(TRUE)
}
)
#' @rdname duckdb_connection-class
#' @inheritParams DBI::dbRollback
#' @export
setMethod(
"dbRollback", "duckdb_connection",
function(conn, ...) {
dbExecute(conn, SQL("ROLLBACK"))
invisible(TRUE)
}
)
|
#R Test Number 3
#pull in data trees
data(trees)
trees
#find median for three variables
apply(trees,2,median)
#print the row number and values that match the median of trees volume
med_val <- median(trees$Volume)
if (med_val == median(trees$Volume)){
print(trees[trees$Volume==med_val, ])
}
#calculate diameter
diameter <- trees$Girth/pi
summary(diameter)
#calculate radius
radius <- diameter/2
area <- pi*(radius)^2
summary(area)
#create boxplots
myColor <- "#FF6347"
par(mfrow = c(1,2))
boxplot(diameter, main = "Tree Diameters", notch = TRUE, ylab = "Diameter", col = myColor)
boxplot(area, main = "Tree Areas", notch = TRUE, ylab = "Area", col = "deepskyblue")
#use R as calculator to show that outlier in areas is not an extreme outlier
area
mean(area)
mean(area, trim = .10)
|
/Week 3/Week 3/RTest-3.R
|
no_license
|
zeelat7/StatistiscalAnalysisPart1
|
R
| false | false | 795 |
r
|
#R Test Number 3
#pull in data trees
data(trees)
trees
#find median for three variables
apply(trees,2,median)
#print the row number and values that match the median of trees volume
med_val <- median(trees$Volume)
if (med_val == median(trees$Volume)){
print(trees[trees$Volume==med_val, ])
}
#calculate diameter
diameter <- trees$Girth/pi
summary(diameter)
#calculate radius
radius <- diameter/2
area <- pi*(radius)^2
summary(area)
#create boxplots
myColor <- "#FF6347"
par(mfrow = c(1,2))
boxplot(diameter, main = "Tree Diameters", notch = TRUE, ylab = "Diameter", col = myColor)
boxplot(area, main = "Tree Areas", notch = TRUE, ylab = "Area", col = "deepskyblue")
#use R as calculator to show that outlier in areas is not an extreme outlier
area
mean(area)
mean(area, trim = .10)
|
# load dependencies
lefftpack::lazy_setup(); library("ggthemes"); library("jsonlite")
# https://api.mysportsfeeds.com/v1.1/pull/nba/{season-name}/game_boxscore.{format}?gameid={game-identifier}
# load functions + api credentials
source("msf_functions.r")
# generate url for play-by-play api call from season and game string
url <- make_msf_pbp_url(cred, "2017-playoff", "20170612-CLE-GSW")
# read in the game data and clean up the data a bit
dat <-
query_msf_api(url, write=FALSE, flatten=TRUE)$gameplaybyplay$plays$play %>%
mutate(play_id=paste0("q_",quarter,"_",time,"_play", seq_len(nrow(.)))) %>%
melt(id.vars=c("quarter","time","play_id"), factorsAsStrings=TRUE) %>%
mutate(variable=as.character(variable)) %>%
filter(!is.na(value)) %>% arrange(play_id)
# dat$play_id <- ifelse(
# grepl("_play\\d$", dat$play_id),
# gsub("_play", "_play00", dat$play_id), ifelse(
# grepl("_play\\d\\d$", dat$play_id),
# gsub("_play", "_play0", dat$play_id), dat$play_id
# )
# )
dat <- dat %>% arrange(play_id)
# dat$play_id[grep("_play\\d$", dat_info$play_id)] <-
# gsub("_play", "_play00", dat$play_id[grep("_play\\d$", dat_info$play_id)])
# generate lookup table so we can safely toss most of the data + recover players
player_lkup <- make_player_lkup(dat)
# now going to trim data by tossing unnecessary player rows (e.g. name)
# the four player attributes to toss:
toss <- c("LastName","FirstName","JerseyNumber","Position") %>%
paste0(collapse="|") %>% grep(unique(dat$variable), value=TRUE)
# toss all the rows that are like this:
dat <- dat %>% filter(!variable %in% toss)
dat_info <- dat %>% select(play_id, variable) %>%
mutate(play_type=nth_split(variable, n=1)) %>%
group_by(play_id) %>% summarize(play_type=unique(play_type))
### GET GAME SCHEDULE ######
sched <- read.csv(make_msf_schedule_url(cred, "2017-playoff"))
### GET SEASON "BOX" SCORES ###### [**NOT WORKING YETS**]
box_url <- make_msf_box_url(cred,"2017-playoff","json","20170612-CLE-GSW")
box <- fromJSON(txt=box_url)
### GET ROSTERS ######
rost <- read.csv(msf_rost(cred))
### GET PLAYER/GAME LOGS ######
playaz_url <- make_playerlog_url(cred, "2017-playoff", "csv", "cle")
playaz <- read.csv(playaz_url, row.names=NULL)
### SCRATCH AREAYAYA ~~~ ##############
# msf_query_factory <- function(credentials, season_string, type, format, ...){
# start <- "https://"
# cred <- credentials
# site <- "@www.mysportsfeeds.com/api/feed/pull/nba/"
# seas <- season_string
# type <- type
# format <- format
# }
|
/hoops/msf/msf_boosh.r
|
no_license
|
lefft/boosh
|
R
| false | false | 2,551 |
r
|
# load dependencies
lefftpack::lazy_setup(); library("ggthemes"); library("jsonlite")
# https://api.mysportsfeeds.com/v1.1/pull/nba/{season-name}/game_boxscore.{format}?gameid={game-identifier}
# load functions + api credentials
source("msf_functions.r")
# generate url for play-by-play api call from season and game string
url <- make_msf_pbp_url(cred, "2017-playoff", "20170612-CLE-GSW")
# read in the game data and clean up the data a bit
dat <-
query_msf_api(url, write=FALSE, flatten=TRUE)$gameplaybyplay$plays$play %>%
mutate(play_id=paste0("q_",quarter,"_",time,"_play", seq_len(nrow(.)))) %>%
melt(id.vars=c("quarter","time","play_id"), factorsAsStrings=TRUE) %>%
mutate(variable=as.character(variable)) %>%
filter(!is.na(value)) %>% arrange(play_id)
# dat$play_id <- ifelse(
# grepl("_play\\d$", dat$play_id),
# gsub("_play", "_play00", dat$play_id), ifelse(
# grepl("_play\\d\\d$", dat$play_id),
# gsub("_play", "_play0", dat$play_id), dat$play_id
# )
# )
dat <- dat %>% arrange(play_id)
# dat$play_id[grep("_play\\d$", dat_info$play_id)] <-
# gsub("_play", "_play00", dat$play_id[grep("_play\\d$", dat_info$play_id)])
# generate lookup table so we can safely toss most of the data + recover players
player_lkup <- make_player_lkup(dat)
# now going to trim data by tossing unnecessary player rows (e.g. name)
# the four player attributes to toss:
toss <- c("LastName","FirstName","JerseyNumber","Position") %>%
paste0(collapse="|") %>% grep(unique(dat$variable), value=TRUE)
# toss all the rows that are like this:
dat <- dat %>% filter(!variable %in% toss)
dat_info <- dat %>% select(play_id, variable) %>%
mutate(play_type=nth_split(variable, n=1)) %>%
group_by(play_id) %>% summarize(play_type=unique(play_type))
### GET GAME SCHEDULE ######
sched <- read.csv(make_msf_schedule_url(cred, "2017-playoff"))
### GET SEASON "BOX" SCORES ###### [**NOT WORKING YETS**]
box_url <- make_msf_box_url(cred,"2017-playoff","json","20170612-CLE-GSW")
box <- fromJSON(txt=box_url)
### GET ROSTERS ######
rost <- read.csv(msf_rost(cred))
### GET PLAYER/GAME LOGS ######
playaz_url <- make_playerlog_url(cred, "2017-playoff", "csv", "cle")
playaz <- read.csv(playaz_url, row.names=NULL)
### SCRATCH AREAYAYA ~~~ ##############
# msf_query_factory <- function(credentials, season_string, type, format, ...){
# start <- "https://"
# cred <- credentials
# site <- "@www.mysportsfeeds.com/api/feed/pull/nba/"
# seas <- season_string
# type <- type
# format <- format
# }
|
ggColorHue <- function(n) {
hues = seq(15, 375, length = n+1)
hcl(h = hues, l = 65, c = 100)[1:n]
}
DownloadButton <- function(outputId, label = "Download", class = NULL) {
aTag <- tags$a(id = outputId, class = paste("btn btn-default shiny-download-link",
class), href = "", target = "_blank", icon("file-pdf-o"),
label)
}
Quantile_95 <- function(vector) {
m <- mean(vector)
p95 <- quantile(vector, 0.95)[[1]]
p05 <- quantile(vector, 0.05)[[1]]
return(c(upper = p95, mean = m, lower = p05))
}
NonZeroCheck <- function(x) if (x < 0) 0 else x
NonZeroVectorCheck <- function(x) {
for (i in 1:length(x)) {
x[i] <- NonZeroCheck(x[[i]])
}
x
}
|
/inst/app/server/misc-functions.R
|
no_license
|
jackolney/CascadeDashboard
|
R
| false | false | 705 |
r
|
ggColorHue <- function(n) {
hues = seq(15, 375, length = n+1)
hcl(h = hues, l = 65, c = 100)[1:n]
}
DownloadButton <- function(outputId, label = "Download", class = NULL) {
aTag <- tags$a(id = outputId, class = paste("btn btn-default shiny-download-link",
class), href = "", target = "_blank", icon("file-pdf-o"),
label)
}
Quantile_95 <- function(vector) {
m <- mean(vector)
p95 <- quantile(vector, 0.95)[[1]]
p05 <- quantile(vector, 0.05)[[1]]
return(c(upper = p95, mean = m, lower = p05))
}
NonZeroCheck <- function(x) if (x < 0) 0 else x
NonZeroVectorCheck <- function(x) {
for (i in 1:length(x)) {
x[i] <- NonZeroCheck(x[[i]])
}
x
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/augment.R
\name{augment}
\alias{augment}
\title{Augment data with information from an object}
\usage{
augment(x, ...)
}
\arguments{
\item{x}{Model object or other R object with information to append to
observations.}
\item{...}{Addition arguments to \code{augment} method.}
}
\value{
A \code{\link[tibble:tibble]{tibble::tibble()}} with information about data points.
}
\description{
Augment data with information from an object
}
\section{Methods}{
\Sexpr[stage=render,results=rd]{generics:::methods_rd("augment")}
}
|
/man/augment.Rd
|
permissive
|
r-lib/generics
|
R
| false | true | 599 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/augment.R
\name{augment}
\alias{augment}
\title{Augment data with information from an object}
\usage{
augment(x, ...)
}
\arguments{
\item{x}{Model object or other R object with information to append to
observations.}
\item{...}{Addition arguments to \code{augment} method.}
}
\value{
A \code{\link[tibble:tibble]{tibble::tibble()}} with information about data points.
}
\description{
Augment data with information from an object
}
\section{Methods}{
\Sexpr[stage=render,results=rd]{generics:::methods_rd("augment")}
}
|
library(ggplot2)
library(dplyr)
library(plotly)
library(shiny)
library(tidyr)
library(scales)
library(plotly)
library(lazyeval)
df <- read.csv('https://raw.githubusercontent.com/SubhalaxmiRout002/DATA-608/main/Final%20Project/Supermarket%20Sales%20-%20Stock.csv')
df <- data.frame(df)
df$Date <- as.Date(df$Date, format = "%m/%d/%Y")
df$Month <- months(as.Date(df$Date))
df$Time <- as.factor(df$Time)
ui <- shinyUI(navbarPage(title = "Sales Analysis",
# first tab to display project info of the project markdown file
tabPanel("Project Information",
fluidPage(
includeMarkdown("project_information.Rmd"))),
# next tab to the right contains sidebar with controls and chart
tabPanel("Supermarket Sales Dashboard",
sidebarPanel(width = 4,style='border:2px solid; padding: 10px',
selectInput('Product.line', 'Product Line',sort(unique(df$Product.line)), selected='Electronic accessories'),
selectInput('Month', 'Months Name', unique(df$Month), selected='January'),
selectInput('Branch', 'Branch Name', sort(unique(df$Branch)), selected='Mandalay')
),
mainPanel(
tabsetPanel(type = "tabs",
tabPanel("Gross Profit",
tabsetPanel(
tabPanel("Plot", plotOutput("plot1")),
tabPanel("Table", tableOutput("table1"))
),
),
tabPanel("Net Sales",
tabsetPanel(
type = "tabs",
tabPanel("Plot", plotOutput("plot2")),
tabPanel("Table", tableOutput("table2"))
),
),
tabPanel("Units Sold",
tabsetPanel(
type = "tabs",
tabPanel("Plot", plotOutput("plot3")),
tabPanel("Table", tableOutput("table3"))
),
),
tabPanel("Gender",
tabsetPanel(
type = "tabs",
tabPanel("Plot", plotOutput("plot4")),
tabPanel("Table", tableOutput("table4"))
),
),
tabPanel("Payment Type",
tabsetPanel(
type = "tabs",
tabPanel("Plot", plotOutput("plot5")),
tabPanel("Table", tableOutput("table5"))
),
),
tabPanel("Customer Type",
tabsetPanel(
type = "tabs",
tabPanel("Plot", plotOutput("plot6")),
tabPanel("Table", tableOutput("table6"))
),
),
tabPanel("Daily Sales / Gross Profit",
tabsetPanel(
type = "tabs",
tabPanel("Net Sales", plotlyOutput("aniplot")),
tabPanel("Gross Profit", plotlyOutput("aniplot2"))
)
)
)
)
),
# tags$style-s below is to overwrite shiny default colours
tags$style(
type = 'text/css',
HTML('
.navbar{background-color: #337ab7; border-color: #2e6da4}
')),
tags$style(
type = 'text/css',
HTML('
.navbar-default .navbar-brand {color: white; }
')),
tags$style(
type = 'text/css',
HTML('
.navbar-default .navbar-nav>li>a {color: white; }
')),
tags$style(
type = 'text/css',
HTML('
.navbar-default .navbar-nav>.active>a, .navbar-default .navbar-nav>.active>a:focus, .navbar-default .navbar-nav>.active>a:hover {color: black; }
')),
tags$style(
type = 'text/css',
HTML('
.navbar-default .navbar-nav>li>a:hover {color: #2299D4;}
')),
tags$style(
type = 'text/css',
HTML('
.navbar-header .navbar-brand:hover {color: #2299D4;}
')),
tags$style(
type = 'text/css',
HTML('
.navbar-default .navbar-nav>.active>a, .navbar-default .navbar-nav, .navbar-default .navbar-nav>a:hover {color: black; }
'))
))
server <- function(input, output) {
# plot 1
output$plot1 <- renderPlot(
{
df2 <- df %>% select(Month, Branch, Product.line,Gross.Income) %>%
group_by(Month, Branch, Product.line) %>%
summarise(across(everything(), sum)) %>%
filter(Product.line == input$Product.line)
df2$Month = factor(df2$Month, levels = month.name)
ggplot(data=df2, aes(x=Month, y=Gross.Income, group=Branch)) +
geom_line(aes(color=Branch), size=2)+
geom_point(aes(color=Branch), size=3) +
labs(title="Gross Profit Trend",x="Month", y = "Gross Profit (Myanmar kyat in millions)") +
scale_colour_manual(values=c(B="#F4A460",C="#A0522D",A="#A9A9A9"))+
theme(panel.background = element_rect(fill = "white", color = NA),
plot.title = element_text(hjust = 0.5, size = 20),
axis.title.y = element_text(size = 15),
axis.title.x = element_text(size = 15),
axis.ticks.x=element_blank()
)
}, height = 700, width = 800
)
# table 1
output$table1 <- renderTable(
{
df2 <- df %>% select(Month, Branch, Product.line,Gross.Income) %>%
group_by(Month, Branch, Product.line) %>%
summarise(across(everything(), sum)) %>%
filter(Product.line == input$Product.line)
df2
}
)
# plot 2
output$plot2 <- renderPlot(
{
df3 <- df %>% select(Month,Branch,Product.line, Total) %>% group_by(Month,Branch,Product.line) %>% summarise(across(everything(), sum)) %>%
filter(Product.line == input$Product.line)
df3$Month = factor(df3$Month, levels = month.name)
ggplot(data=df3, aes(x=Month, y=Total, group=Branch)) +
geom_line(aes(color=Branch), size=2)+
geom_point(aes(color=Branch), size=3) +
scale_colour_manual(values=c(B="#F4A460",C="#A0522D",A="#A9A9A9"))+
labs(title="Net Sales Trend",x="Month", y = "Net Sales (Myanmar kyat in millions)") +
theme(panel.background = element_rect(fill = "white", color = NA),
plot.title = element_text(hjust = 0.5, size = 20),
axis.title.y = element_text(size = 15),
axis.title.x = element_text(size = 15),
#axis.text.x = element_blank(),
axis.ticks.x=element_blank()
)
}, height = 700, width = 800
)
# table 2
output$table2 <- renderTable(
{
df3 <- df %>% select(Month,Branch,Product.line, Total) %>% group_by(Month,Branch,Product.line) %>% summarise(across(everything(), sum)) %>%
filter(Product.line == input$Product.line)
df3
}
)
# plot animation Total
output$aniplot <- renderPlotly(
{
accumulate_by <- function(dat, var) {
var <- f_eval(var, dat)
lvls <- plotly:::getLevels(var)
dats <- lapply(seq_along(lvls), function(x) {
cbind(dat[var %in% lvls[seq(1, x)], ], frame = lvls[[x]])
})
bind_rows(dats)
}
df8 <- df %>% select(Month,Branch,Product.line, Date, Total) %>%
group_by(Month,Branch,Product.line, Date) %>% summarise(across(everything(), sum)) %>%
filter(Product.line == input$Product.line, Month == input$Month, Branch == input$Branch)
df8 <- df8 %>% accumulate_by(~Date)
plot_ly(data = df8,
x = ~df8$Date,
y = ~df8$Total,
frame = ~df8$frame,
type = 'scatter',
mode = 'lines+markers',
markers = list(size = 10, color = '#F4A460'),
line = list(color = '#F4A460', width = 2)
) %>% layout(
title = "Daily Net Sales",
width = 1000, height = 700,
xaxis = list(
range= c(head(df8$Date, n = 1), tail(df8$Date, n=1)+1),
title = "Date"
),
yaxis = list(
title = "Daily Net Sales (Myanmar kyat in millions)"
)
) %>% animation_opts(
frame = 100,
transition = 0,
easing = "elastic",
redraw = FALSE
) %>% animation_button(
x = 1, xanchor = "right", y = 0, yanchor = "bottom"
) %>% animation_slider(
currentvalue = list(prefix = "Daily :", font = list(color="steelblue"))
)
}
)
# plot animation Gross Profit
output$aniplot2 <- renderPlotly(
{
accumulate_by <- function(dat, var) {
var <- f_eval(var, dat)
lvls <- plotly:::getLevels(var)
dats <- lapply(seq_along(lvls), function(x) {
cbind(dat[var %in% lvls[seq(1, x)], ], frame = lvls[[x]])
})
bind_rows(dats)
}
df9 <- df %>% select(Month,Branch,Product.line, Date, Gross.Income) %>%
group_by(Month,Branch,Product.line, Date) %>% summarise(across(everything(), sum)) %>%
filter(Product.line == input$Product.line, Month == input$Month, Branch == input$Branch)
df9 <- df9 %>% accumulate_by(~Date)
plot_ly(data = df9,
x = ~df9$Date,
y = ~df9$Gross.Income,
frame = ~df9$frame,
type = 'scatter',
mode = 'lines+markers',
markers = list(size = 10, color = '#F4A460'),
line = list(color = '#F4A460', width = 2)
) %>% layout(
title = "Daily Gross Profit",
width = 1000, height = 700,
xaxis = list(
range= c(head(df9$Date, n = 1), tail(df9$Date, n=1)+1),
title = "Date"
),
yaxis = list(
title = "Daily Gross Profit (Myanmar kyat in millions)"
)
) %>% animation_opts(
frame = 100,
transition = 0,
easing = "elastic",
redraw = FALSE
) %>% animation_button(
x = 1, xanchor = "right", y = 0, yanchor = "bottom"
) %>% animation_slider(
currentvalue = list(prefix = "Daily :", font = list(color="steelblue"))
)
}
)
# plot 3
output$plot3 <- renderPlot({
df4 <- df %>% select(Month,Branch,Product.line, Quantity) %>% group_by(Month,Branch,Product.line) %>% summarise(across(everything(), sum)) %>%
filter(Month == input$Month)
ggplot(data=df4, aes(x=Product.line, y=Quantity, fill = Branch)) +
geom_bar(stat = "identity",
position = position_dodge())+
coord_flip() +
scale_fill_manual(values=c(B="#F4A460",C="#A0522D",A="#A9A9A9"))+
labs(title="Units Sold",x="Product Types", y = "# in thousands") +
geom_text(aes(label = Quantity),
position = position_dodge(width = 1), size = 4, hjust = -0.10) +
theme(panel.background = element_rect(fill = "white", color = NA),
plot.title = element_text(hjust = 0.5, size = 20),
axis.title.y = element_text(size = 15),
axis.title.x = element_text(size = 15),
#axis.title.x=element_blank(),
axis.text.x = element_blank(),
axis.ticks.x=element_blank()
)
}, height = 700, width = 800)
# table 3
output$table3 <- renderTable(
{
df4 <- df %>% select(Month,Branch,Product.line, Quantity) %>% group_by(Month,Branch,Product.line) %>% summarise(across(everything(), sum)) %>%
filter(Month == input$Month)
df4
}
)
# plot 4
output$plot4 <- renderPlot({
df5 <- df %>% select(Month,Branch,Product.line, Gender) %>% group_by(Month,Branch,Product.line, Gender) %>%
summarise(gender_n=n()) %>%
group_by(Month,Branch,Product.line) %>%
mutate(Percentage=round(gender_n/sum(gender_n)*100, 2)) %>%
filter(Product.line == input$Product.line & Month == input$Month, Branch == input$Branch)
ggplot(df5, aes(x="", y=Percentage, fill=Gender))+
geom_bar(width = 1,stat = "identity") +
coord_polar("y", start=0) +
scale_fill_manual(values=c("#E69F00", "#999999")) +
labs(title="Gender") +
geom_text(aes(y = Percentage,
label = percent(Percentage/100)), size=6, position = position_stack(vjust = 0.5)) +
theme_minimal() +
theme(
axis.title.x = element_blank(),
axis.title.y = element_blank(),
panel.border = element_blank(),
panel.grid=element_blank(),
axis.ticks = element_blank(),
plot.title = element_text(hjust = 0.5, size = 15),
axis.text.x=element_blank()
)
})
# table 4
output$table4 <- renderTable(
{
df5 <- df %>% select(Month,Branch,Product.line, Gender) %>% group_by(Month,Branch,Product.line, Gender) %>%
summarise(gender_n=n()) %>%
group_by(Month,Branch,Product.line) %>%
mutate(Percentage=round(gender_n/sum(gender_n)*100, 2)) %>%
filter(Product.line == input$Product.line & Month == input$Month, Branch == input$Branch)
df5
}
)
# plot 5
output$plot5 <- renderPlot({
df6 <- df %>% select(Month,Branch,Product.line, Payment) %>% group_by(Month,Branch,Product.line, Payment) %>%
summarise(payment_n=n()) %>%
group_by(Month,Branch,Product.line) %>%
mutate(Percentage=round(payment_n/sum(payment_n)*100, 2)) %>%
filter(Product.line == input$Product.line & Month == input$Month, Branch == input$Branch)
ggplot(df6, aes(x="", y=Percentage, fill=Payment))+
geom_bar(width = 1,stat = "identity") +
coord_polar("y", start=0) +
scale_fill_manual(values=c("#E69F00", "#999999", "#A0522D")) +
labs(title="Payment Type") +
geom_text(aes(y = Percentage,
label = percent(Percentage/100)), size=6, position = position_stack(vjust = 0.5)) +
theme_minimal() +
theme(
axis.title.x = element_blank(),
axis.title.y = element_blank(),
panel.border = element_blank(),
panel.grid=element_blank(),
axis.ticks = element_blank(),
plot.title = element_text(hjust = 0.5, size = 15),
axis.text.x=element_blank()
)
})
# table 5
output$table5 <- renderTable(
{
df6 <- df %>% select(Month,Branch,Product.line, Payment) %>% group_by(Month,Branch,Product.line, Payment) %>%
summarise(payment_n=n()) %>%
group_by(Month,Branch,Product.line) %>%
mutate(Percentage=round(payment_n/sum(payment_n)*100, 2)) %>%
filter(Product.line == input$Product.line & Month == input$Month, Branch == input$Branch)
df6
}
)
# plot 6
output$plot6 <- renderPlot({
df7 <- df %>% select(Month,Branch,Product.line, Customer.type) %>% group_by(Month,Branch,Product.line, Customer.type) %>%
summarise(cust_n=n()) %>%
group_by(Month,Branch,Product.line) %>%
mutate(Percentage=round(cust_n/sum(cust_n)*100, 2)) %>%
filter(Product.line == input$Product.line & Month == input$Month, Branch == input$Branch)
ggplot(df7, aes(x="", y=Percentage, fill=Customer.type))+
geom_bar(width = 1,stat = "identity") +
coord_polar("y", start=0) +
scale_fill_manual(values=c("#E69F00", "#999999")) +
labs(title="Customer Type") +
geom_text(aes(y = Percentage,
label = percent(Percentage/100)), size=6, position = position_stack(vjust = 0.5)) +
theme_minimal() +
theme(
axis.title.x = element_blank(),
axis.title.y = element_blank(),
panel.border = element_blank(),
panel.grid=element_blank(),
axis.ticks = element_blank(),
plot.title = element_text(hjust = 0.5, size = 15),
axis.text.x=element_blank()
)
})
# table 6
output$table6 <- renderTable(
{
df7 <- df %>% select(Month,Branch,Product.line, Customer.type) %>% group_by(Month,Branch,Product.line, Customer.type) %>%
summarise(cust_n=n()) %>%
group_by(Month,Branch,Product.line) %>%
mutate(Percentage=round(cust_n/sum(cust_n)*100, 2)) %>%
filter(Product.line == input$Product.line & Month == input$Month, Branch == input$Branch)
df7
}
)
}
shinyApp(ui = ui, server = server)
|
/Final Project/Dashboard.R
|
no_license
|
SubhalaxmiRout002/DATA-608
|
R
| false | false | 20,091 |
r
|
library(ggplot2)
library(dplyr)
library(plotly)
library(shiny)
library(tidyr)
library(scales)
library(plotly)
library(lazyeval)
df <- read.csv('https://raw.githubusercontent.com/SubhalaxmiRout002/DATA-608/main/Final%20Project/Supermarket%20Sales%20-%20Stock.csv')
df <- data.frame(df)
df$Date <- as.Date(df$Date, format = "%m/%d/%Y")
df$Month <- months(as.Date(df$Date))
df$Time <- as.factor(df$Time)
ui <- shinyUI(navbarPage(title = "Sales Analysis",
# first tab to display project info of the project markdown file
tabPanel("Project Information",
fluidPage(
includeMarkdown("project_information.Rmd"))),
# next tab to the right contains sidebar with controls and chart
tabPanel("Supermarket Sales Dashboard",
sidebarPanel(width = 4,style='border:2px solid; padding: 10px',
selectInput('Product.line', 'Product Line',sort(unique(df$Product.line)), selected='Electronic accessories'),
selectInput('Month', 'Months Name', unique(df$Month), selected='January'),
selectInput('Branch', 'Branch Name', sort(unique(df$Branch)), selected='Mandalay')
),
mainPanel(
tabsetPanel(type = "tabs",
tabPanel("Gross Profit",
tabsetPanel(
tabPanel("Plot", plotOutput("plot1")),
tabPanel("Table", tableOutput("table1"))
),
),
tabPanel("Net Sales",
tabsetPanel(
type = "tabs",
tabPanel("Plot", plotOutput("plot2")),
tabPanel("Table", tableOutput("table2"))
),
),
tabPanel("Units Sold",
tabsetPanel(
type = "tabs",
tabPanel("Plot", plotOutput("plot3")),
tabPanel("Table", tableOutput("table3"))
),
),
tabPanel("Gender",
tabsetPanel(
type = "tabs",
tabPanel("Plot", plotOutput("plot4")),
tabPanel("Table", tableOutput("table4"))
),
),
tabPanel("Payment Type",
tabsetPanel(
type = "tabs",
tabPanel("Plot", plotOutput("plot5")),
tabPanel("Table", tableOutput("table5"))
),
),
tabPanel("Customer Type",
tabsetPanel(
type = "tabs",
tabPanel("Plot", plotOutput("plot6")),
tabPanel("Table", tableOutput("table6"))
),
),
tabPanel("Daily Sales / Gross Profit",
tabsetPanel(
type = "tabs",
tabPanel("Net Sales", plotlyOutput("aniplot")),
tabPanel("Gross Profit", plotlyOutput("aniplot2"))
)
)
)
)
),
# tags$style-s below is to overwrite shiny default colours
tags$style(
type = 'text/css',
HTML('
.navbar{background-color: #337ab7; border-color: #2e6da4}
')),
tags$style(
type = 'text/css',
HTML('
.navbar-default .navbar-brand {color: white; }
')),
tags$style(
type = 'text/css',
HTML('
.navbar-default .navbar-nav>li>a {color: white; }
')),
tags$style(
type = 'text/css',
HTML('
.navbar-default .navbar-nav>.active>a, .navbar-default .navbar-nav>.active>a:focus, .navbar-default .navbar-nav>.active>a:hover {color: black; }
')),
tags$style(
type = 'text/css',
HTML('
.navbar-default .navbar-nav>li>a:hover {color: #2299D4;}
')),
tags$style(
type = 'text/css',
HTML('
.navbar-header .navbar-brand:hover {color: #2299D4;}
')),
tags$style(
type = 'text/css',
HTML('
.navbar-default .navbar-nav>.active>a, .navbar-default .navbar-nav, .navbar-default .navbar-nav>a:hover {color: black; }
'))
))
server <- function(input, output) {
# plot 1
output$plot1 <- renderPlot(
{
df2 <- df %>% select(Month, Branch, Product.line,Gross.Income) %>%
group_by(Month, Branch, Product.line) %>%
summarise(across(everything(), sum)) %>%
filter(Product.line == input$Product.line)
df2$Month = factor(df2$Month, levels = month.name)
ggplot(data=df2, aes(x=Month, y=Gross.Income, group=Branch)) +
geom_line(aes(color=Branch), size=2)+
geom_point(aes(color=Branch), size=3) +
labs(title="Gross Profit Trend",x="Month", y = "Gross Profit (Myanmar kyat in millions)") +
scale_colour_manual(values=c(B="#F4A460",C="#A0522D",A="#A9A9A9"))+
theme(panel.background = element_rect(fill = "white", color = NA),
plot.title = element_text(hjust = 0.5, size = 20),
axis.title.y = element_text(size = 15),
axis.title.x = element_text(size = 15),
axis.ticks.x=element_blank()
)
}, height = 700, width = 800
)
# table 1
output$table1 <- renderTable(
{
df2 <- df %>% select(Month, Branch, Product.line,Gross.Income) %>%
group_by(Month, Branch, Product.line) %>%
summarise(across(everything(), sum)) %>%
filter(Product.line == input$Product.line)
df2
}
)
# plot 2
output$plot2 <- renderPlot(
{
df3 <- df %>% select(Month,Branch,Product.line, Total) %>% group_by(Month,Branch,Product.line) %>% summarise(across(everything(), sum)) %>%
filter(Product.line == input$Product.line)
df3$Month = factor(df3$Month, levels = month.name)
ggplot(data=df3, aes(x=Month, y=Total, group=Branch)) +
geom_line(aes(color=Branch), size=2)+
geom_point(aes(color=Branch), size=3) +
scale_colour_manual(values=c(B="#F4A460",C="#A0522D",A="#A9A9A9"))+
labs(title="Net Sales Trend",x="Month", y = "Net Sales (Myanmar kyat in millions)") +
theme(panel.background = element_rect(fill = "white", color = NA),
plot.title = element_text(hjust = 0.5, size = 20),
axis.title.y = element_text(size = 15),
axis.title.x = element_text(size = 15),
#axis.text.x = element_blank(),
axis.ticks.x=element_blank()
)
}, height = 700, width = 800
)
# table 2
output$table2 <- renderTable(
{
df3 <- df %>% select(Month,Branch,Product.line, Total) %>% group_by(Month,Branch,Product.line) %>% summarise(across(everything(), sum)) %>%
filter(Product.line == input$Product.line)
df3
}
)
# plot animation Total
output$aniplot <- renderPlotly(
{
accumulate_by <- function(dat, var) {
var <- f_eval(var, dat)
lvls <- plotly:::getLevels(var)
dats <- lapply(seq_along(lvls), function(x) {
cbind(dat[var %in% lvls[seq(1, x)], ], frame = lvls[[x]])
})
bind_rows(dats)
}
df8 <- df %>% select(Month,Branch,Product.line, Date, Total) %>%
group_by(Month,Branch,Product.line, Date) %>% summarise(across(everything(), sum)) %>%
filter(Product.line == input$Product.line, Month == input$Month, Branch == input$Branch)
df8 <- df8 %>% accumulate_by(~Date)
plot_ly(data = df8,
x = ~df8$Date,
y = ~df8$Total,
frame = ~df8$frame,
type = 'scatter',
mode = 'lines+markers',
markers = list(size = 10, color = '#F4A460'),
line = list(color = '#F4A460', width = 2)
) %>% layout(
title = "Daily Net Sales",
width = 1000, height = 700,
xaxis = list(
range= c(head(df8$Date, n = 1), tail(df8$Date, n=1)+1),
title = "Date"
),
yaxis = list(
title = "Daily Net Sales (Myanmar kyat in millions)"
)
) %>% animation_opts(
frame = 100,
transition = 0,
easing = "elastic",
redraw = FALSE
) %>% animation_button(
x = 1, xanchor = "right", y = 0, yanchor = "bottom"
) %>% animation_slider(
currentvalue = list(prefix = "Daily :", font = list(color="steelblue"))
)
}
)
# plot animation Gross Profit
output$aniplot2 <- renderPlotly(
{
accumulate_by <- function(dat, var) {
var <- f_eval(var, dat)
lvls <- plotly:::getLevels(var)
dats <- lapply(seq_along(lvls), function(x) {
cbind(dat[var %in% lvls[seq(1, x)], ], frame = lvls[[x]])
})
bind_rows(dats)
}
df9 <- df %>% select(Month,Branch,Product.line, Date, Gross.Income) %>%
group_by(Month,Branch,Product.line, Date) %>% summarise(across(everything(), sum)) %>%
filter(Product.line == input$Product.line, Month == input$Month, Branch == input$Branch)
df9 <- df9 %>% accumulate_by(~Date)
plot_ly(data = df9,
x = ~df9$Date,
y = ~df9$Gross.Income,
frame = ~df9$frame,
type = 'scatter',
mode = 'lines+markers',
markers = list(size = 10, color = '#F4A460'),
line = list(color = '#F4A460', width = 2)
) %>% layout(
title = "Daily Gross Profit",
width = 1000, height = 700,
xaxis = list(
range= c(head(df9$Date, n = 1), tail(df9$Date, n=1)+1),
title = "Date"
),
yaxis = list(
title = "Daily Gross Profit (Myanmar kyat in millions)"
)
) %>% animation_opts(
frame = 100,
transition = 0,
easing = "elastic",
redraw = FALSE
) %>% animation_button(
x = 1, xanchor = "right", y = 0, yanchor = "bottom"
) %>% animation_slider(
currentvalue = list(prefix = "Daily :", font = list(color="steelblue"))
)
}
)
# plot 3
output$plot3 <- renderPlot({
df4 <- df %>% select(Month,Branch,Product.line, Quantity) %>% group_by(Month,Branch,Product.line) %>% summarise(across(everything(), sum)) %>%
filter(Month == input$Month)
ggplot(data=df4, aes(x=Product.line, y=Quantity, fill = Branch)) +
geom_bar(stat = "identity",
position = position_dodge())+
coord_flip() +
scale_fill_manual(values=c(B="#F4A460",C="#A0522D",A="#A9A9A9"))+
labs(title="Units Sold",x="Product Types", y = "# in thousands") +
geom_text(aes(label = Quantity),
position = position_dodge(width = 1), size = 4, hjust = -0.10) +
theme(panel.background = element_rect(fill = "white", color = NA),
plot.title = element_text(hjust = 0.5, size = 20),
axis.title.y = element_text(size = 15),
axis.title.x = element_text(size = 15),
#axis.title.x=element_blank(),
axis.text.x = element_blank(),
axis.ticks.x=element_blank()
)
}, height = 700, width = 800)
# table 3
output$table3 <- renderTable(
{
df4 <- df %>% select(Month,Branch,Product.line, Quantity) %>% group_by(Month,Branch,Product.line) %>% summarise(across(everything(), sum)) %>%
filter(Month == input$Month)
df4
}
)
# plot 4
output$plot4 <- renderPlot({
df5 <- df %>% select(Month,Branch,Product.line, Gender) %>% group_by(Month,Branch,Product.line, Gender) %>%
summarise(gender_n=n()) %>%
group_by(Month,Branch,Product.line) %>%
mutate(Percentage=round(gender_n/sum(gender_n)*100, 2)) %>%
filter(Product.line == input$Product.line & Month == input$Month, Branch == input$Branch)
ggplot(df5, aes(x="", y=Percentage, fill=Gender))+
geom_bar(width = 1,stat = "identity") +
coord_polar("y", start=0) +
scale_fill_manual(values=c("#E69F00", "#999999")) +
labs(title="Gender") +
geom_text(aes(y = Percentage,
label = percent(Percentage/100)), size=6, position = position_stack(vjust = 0.5)) +
theme_minimal() +
theme(
axis.title.x = element_blank(),
axis.title.y = element_blank(),
panel.border = element_blank(),
panel.grid=element_blank(),
axis.ticks = element_blank(),
plot.title = element_text(hjust = 0.5, size = 15),
axis.text.x=element_blank()
)
})
# table 4
output$table4 <- renderTable(
{
df5 <- df %>% select(Month,Branch,Product.line, Gender) %>% group_by(Month,Branch,Product.line, Gender) %>%
summarise(gender_n=n()) %>%
group_by(Month,Branch,Product.line) %>%
mutate(Percentage=round(gender_n/sum(gender_n)*100, 2)) %>%
filter(Product.line == input$Product.line & Month == input$Month, Branch == input$Branch)
df5
}
)
# plot 5
output$plot5 <- renderPlot({
df6 <- df %>% select(Month,Branch,Product.line, Payment) %>% group_by(Month,Branch,Product.line, Payment) %>%
summarise(payment_n=n()) %>%
group_by(Month,Branch,Product.line) %>%
mutate(Percentage=round(payment_n/sum(payment_n)*100, 2)) %>%
filter(Product.line == input$Product.line & Month == input$Month, Branch == input$Branch)
ggplot(df6, aes(x="", y=Percentage, fill=Payment))+
geom_bar(width = 1,stat = "identity") +
coord_polar("y", start=0) +
scale_fill_manual(values=c("#E69F00", "#999999", "#A0522D")) +
labs(title="Payment Type") +
geom_text(aes(y = Percentage,
label = percent(Percentage/100)), size=6, position = position_stack(vjust = 0.5)) +
theme_minimal() +
theme(
axis.title.x = element_blank(),
axis.title.y = element_blank(),
panel.border = element_blank(),
panel.grid=element_blank(),
axis.ticks = element_blank(),
plot.title = element_text(hjust = 0.5, size = 15),
axis.text.x=element_blank()
)
})
# table 5
output$table5 <- renderTable(
{
df6 <- df %>% select(Month,Branch,Product.line, Payment) %>% group_by(Month,Branch,Product.line, Payment) %>%
summarise(payment_n=n()) %>%
group_by(Month,Branch,Product.line) %>%
mutate(Percentage=round(payment_n/sum(payment_n)*100, 2)) %>%
filter(Product.line == input$Product.line & Month == input$Month, Branch == input$Branch)
df6
}
)
# plot 6
output$plot6 <- renderPlot({
df7 <- df %>% select(Month,Branch,Product.line, Customer.type) %>% group_by(Month,Branch,Product.line, Customer.type) %>%
summarise(cust_n=n()) %>%
group_by(Month,Branch,Product.line) %>%
mutate(Percentage=round(cust_n/sum(cust_n)*100, 2)) %>%
filter(Product.line == input$Product.line & Month == input$Month, Branch == input$Branch)
ggplot(df7, aes(x="", y=Percentage, fill=Customer.type))+
geom_bar(width = 1,stat = "identity") +
coord_polar("y", start=0) +
scale_fill_manual(values=c("#E69F00", "#999999")) +
labs(title="Customer Type") +
geom_text(aes(y = Percentage,
label = percent(Percentage/100)), size=6, position = position_stack(vjust = 0.5)) +
theme_minimal() +
theme(
axis.title.x = element_blank(),
axis.title.y = element_blank(),
panel.border = element_blank(),
panel.grid=element_blank(),
axis.ticks = element_blank(),
plot.title = element_text(hjust = 0.5, size = 15),
axis.text.x=element_blank()
)
})
# table 6
output$table6 <- renderTable(
{
df7 <- df %>% select(Month,Branch,Product.line, Customer.type) %>% group_by(Month,Branch,Product.line, Customer.type) %>%
summarise(cust_n=n()) %>%
group_by(Month,Branch,Product.line) %>%
mutate(Percentage=round(cust_n/sum(cust_n)*100, 2)) %>%
filter(Product.line == input$Product.line & Month == input$Month, Branch == input$Branch)
df7
}
)
}
shinyApp(ui = ui, server = server)
|
#' @useDynLib spass
#' @importFrom Rcpp evalCpp
NULL
|
/spass/R/documentation.r
|
no_license
|
akhikolla/TestedPackages-NoIssues
|
R
| false | false | 56 |
r
|
#' @useDynLib spass
#' @importFrom Rcpp evalCpp
NULL
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/AllGenerics.R, R/conversion.R
\name{as.SlingshotDataSet}
\alias{as.SlingshotDataSet}
\alias{as.SlingshotDataSet,PseudotimeOrdering-method}
\alias{as.SlingshotDataSet,SingleCellExperiment-method}
\alias{as.SlingshotDataSet,SlingshotDataSet-method}
\title{Conversion to SlingshotDataSet}
\usage{
as.SlingshotDataSet(x, ...)
\S4method{as.SlingshotDataSet}{PseudotimeOrdering}(x)
\S4method{as.SlingshotDataSet}{SingleCellExperiment}(x)
\S4method{as.SlingshotDataSet}{SlingshotDataSet}(x)
}
\arguments{
\item{x}{an object containing \code{slingshot} output.}
\item{...}{additional arguments to pass to object-specific methods.}
}
\value{
A \code{SlingshotDataSet} object containing the \code{slingshot}
results from the original object, \code{x}.
}
\description{
This function converts objects that contain \code{slingshot}
results into a \code{SlingshotDataSet}.
}
\examples{
data("slingshotExample")
rd <- slingshotExample$rd
cl <- slingshotExample$cl
pto <- slingshot(rd, cl, start.clus = '1')
as.SlingshotDataSet(pto)
}
\seealso{
\code{\link[TrajectoryUtils]{PseudotimeOrdering}}
}
|
/man/as.SlingshotDataSet.Rd
|
no_license
|
tangbozeng/slingshot
|
R
| false | true | 1,168 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/AllGenerics.R, R/conversion.R
\name{as.SlingshotDataSet}
\alias{as.SlingshotDataSet}
\alias{as.SlingshotDataSet,PseudotimeOrdering-method}
\alias{as.SlingshotDataSet,SingleCellExperiment-method}
\alias{as.SlingshotDataSet,SlingshotDataSet-method}
\title{Conversion to SlingshotDataSet}
\usage{
as.SlingshotDataSet(x, ...)
\S4method{as.SlingshotDataSet}{PseudotimeOrdering}(x)
\S4method{as.SlingshotDataSet}{SingleCellExperiment}(x)
\S4method{as.SlingshotDataSet}{SlingshotDataSet}(x)
}
\arguments{
\item{x}{an object containing \code{slingshot} output.}
\item{...}{additional arguments to pass to object-specific methods.}
}
\value{
A \code{SlingshotDataSet} object containing the \code{slingshot}
results from the original object, \code{x}.
}
\description{
This function converts objects that contain \code{slingshot}
results into a \code{SlingshotDataSet}.
}
\examples{
data("slingshotExample")
rd <- slingshotExample$rd
cl <- slingshotExample$cl
pto <- slingshot(rd, cl, start.clus = '1')
as.SlingshotDataSet(pto)
}
\seealso{
\code{\link[TrajectoryUtils]{PseudotimeOrdering}}
}
|
# Script to perform Hotellings T2 on module members between aerobic, DC, and Mac Mtbs
# and make heatmaps of module expression
options(stringsAsFactors=F)
#Functions
get_module <- function(modID, modules){
return(modules[modules$moduleID==modID,2])
}
get_parents <- function(modID, parents){
raw <- parents[parents$moduleID==modID,2]
parent.genes <- unique(unlist(strsplit(raw, " ", fixed=T)))
return(parent.genes)
}
selectGenes <- function(genes, arrays){
genes.sel <- which(rownames(arrays) %in% genes)
genes.expr <- t(arrays[genes.sel,])
return(genes.expr)
}
#Hotelling package functions
calc.Hotelling <- function(mod.expr, grouping){
result <- Hott(mod.expr ~ as.factor(grouping), shrinkage=FALSE)
return(result)
}
module.Hotelling <- function(modIDs, modules, parents, arrays, grouping,
shrink=F, perm=T, filterNA=F){
results <- data.frame(moduleID=vector(mode="character"),
p=vector(mode="numeric"),
T_stat=vector(mode="numeric"),
n_x=vector(mode="integer"),
n_y=vector(mode="integer"),
n_vars=vector(mode="integer"))
for (mod in modIDs){
#Get the subset of the expression data for the module
mod.genes <- get_module(mod, modules)
mod.genes <- c(mod.genes, get_parents(mod, parents))
mod.expr <- as.data.frame(selectGenes(mod.genes, arrays))
if(filterNA == T){
mod.expr <- remove_na_cols(mod.expr)
}
#Create group vector of {1,2}
groups <- rep(1, length(grouping))
groups[grouping==unique(grouping)[2]] <- 2
#Add to matrix
mod.expr.groups <- cbind(groups, mod.expr)
#Run the test and store the results
mod.out <- hotelling.test(.~groups, data=mod.expr.groups, shrinkage=shrink,
perm=perm, B=10000)
results <- rbind(results, c(mod, mod.out$pval, mod.out$stats$statistic,
mod.out$stats$nx, mod.out$stats$ny, mod.out$stats$p))
}
colnames(results) <- c("moduleID", "p", "T_stat", "n_x", "n_y", "n_vars")
return(results)
}
remove_na_cols <- function(df){
cols.nas <- unlist(apply(df, 1, function(x) which(is.na(x))))
if(length(cols.nas)==0){
return(df)
}
df.clean <- df[,-unique(cols.nas)]
return(df.clean)
}
#Function to do the immune cells against each other at each timepoint
immune.time.DE <- function(samples, arrays, good.modules, modules, parents, time, filterNA=F){
#Macs vs DCs at 18h
samp.sel <- samples[(samples$celltype != "Aerobic" & samples$time==time),]
expr <- arrays[,colnames(arrays)%in%samp.sel$filename]
pvals <- module.Hotelling(good.modules, modules, parents,
expr, samp.sel$celltype,
shrink=T, filterNA=filterNA)
pvals$p.adj <- p.adjust(pvals$p, method="BH")
return(pvals)
}
heat_labels <- function(arrayIDs, samples){
# arrayIDs - char vector of filenames of arrays
# samples - dataframe of the sample metadata
array.info <- samples[samples$filename %in% arrayIDs,]
array.info$labels <- unlist(apply(array.info, 1, function(array){
label <- paste(c(array[2],
array[3],
array[4]),
collapse=" - "
)
return(label)
}
))
return(array.info$labels)
}
mod.heat <- function(mod.genes, expr, samples, title, labSize=0.5){
# mod.genes - char vector of genes for heatmap
# expr - all expression data
mod.expr <- selectGenes(mod.genes, expr)
#mod.expr <- mod.expr[complete.cases(mod.expr),]
mod.expr.t <- t(mod.expr)
colnames(mod.expr.t) <- heat_labels(rownames(mod.expr), samples)
#Set colors for DCs and pass to function.
cell.type <- grepl("DC", colnames(mod.expr.t), fixed=T)
col.side <- rep("seagreen", length(cell.type))
col.side[cell.type] <- "skyblue"
heatmap.2(mod.expr.t,
cexRow = labSize,
cexCol = labSize,
na.rm=T,
trace="none",
symkey=T,
col=redgreen,
key=TRUE,
density.info="none",
ColSideColors=col.side)
}
mod.heat.time <- function(mod.genes, expr, samples, title){
# mod.genes - char vector of genes for heatmap
# expr - all expression data
mod.expr <- selectGenes(mod.genes, expr)
#mod.expr <- mod.expr[complete.cases(mod.expr),]
mod.expr.t <- t(mod.expr)
colnames(mod.expr.t) <- heat_labels(rownames(mod.expr), samples)
#Set colors for DCs and pass to function.
time.4h <- grepl("4h", colnames(mod.expr.t), fixed=T)
time.18h <- grepl("18h", colnames(mod.expr.t), fixed=T)
col.side <- rep("orange", length(time.4h))
col.side[time.4h] <- "purple"
col.side[time.18h] <- "red"
heatmap.2(mod.expr.t,
cexRow = 0.5,
cexCol = 0.5,
na.rm=T,
trace="none",
symkey=T,
col=redgreen,
key=TRUE,
ColSideColors=col.side)
}
get_arrays_time <- function(samples, expr, time){
arrays.idx <- which(samples$time == time & samples$celltype!="Aerobic")
array.ids <- samples[arrays.idx,]$filename
expr.time <- expr[,colnames(expr)%in%array.ids]
return(expr.time)
}
library(gplots)
library(Hotelling)
setwd("~/Dropbox/thesis_work/")
#Load data
#Module data
#Filter out the modules with no probabilities
modules <- read.table("PMN_output/4.17.30_mods_members.txt",
head=T, sep="\t")
parents <- read.table("PMN_output/4.17.30_mods_parsed.txt",
head=T, sep="\t")
modules.stats <- read.table("PMN_output/4.17_30mods_genes_pathsizes.txt",
head=T, sep="\t")
good.modules <- modules.stats[modules.stats$thresh.0.2 > 0 & modules.stats$n_genes < 100,]$moduleID
#Arrays
load("data/exprs/EBUGS58/EBUGS58.arrays.RData")
#Samples
load("data/exprs/EBUGS58/BUGS58.samples.RData")
##############################
#
# Hotellings T2 Analysis
#
##############################
## DC vs Macs
immune.arrays <- BUGS58.samples[BUGS58.samples$celltype != "Aerobic",1]
celltype <- BUGS58.samples[BUGS58.samples$celltype != "Aerobic", 2]
expr.immune <- BUGS58.arrays[,colnames(BUGS58.arrays)%in%immune.arrays]
dim(expr.immune)
#[1] 3765 36, 36 total arrays
dc_mac.p.shrink.perm <- module.Hotelling(good.modules, modules, parents, expr.immune,
celltype, shrink=T)
dc_mac.p.shrink.perm$p.adj <- p.adjust(dc_mac.p.shrink.perm$p, method="BH")
write.table(dc_mac.p.shrink.perm, "data/results/PMN_DC_vs_Mac_DE.txt",
col.names=T, sep="\t", quote=F, row.names=F)
#########################
#
# Heatmaps of Expression
#
#########################
##
# DCs vs Macs all times
mod2 <- get_module("mod2", modules)
mod2 <- c(mod2, get_parents("mod2", parents))
mod.heat(mod2, expr.immune, BUGS58.samples)
## DCs = blue, Macs = green
mod.heat.time(mod2, expr.immune, BUGS58.samples)
## 1h = orange, 4h = purple, 18h = red
# 1 hour
png("data/results/PMN_DE_heat/PMN_DC_v_Mac_1h.png",
1000, 881, pointsize=14, bg="transparent")
arrays.1h <- get_arrays_time(BUGS58.samples, BUGS58.arrays, "1h")
mod.heat(mod2, arrays.1h, BUGS58.samples)
dev.off()
# 4 hours
png("data/results/PMN_DE_heat/PMN_DC_v_Mac_4h.png",
1000, 881, pointsize=14, bg="transparent")
arrays.4h <- get_arrays_time(BUGS58.samples, BUGS58.arrays, "4h")
mod.heat(mod2, arrays.4h, BUGS58.samples)
dev.off()
# 18 hours
png("data/results/PMN_DE_heat/PMN_DC_v_Mac_18h.png",
1000, 881, pointsize=14, bg="transparent")
arrays.18h <- get_arrays_time(BUGS58.samples, BUGS58.arrays, "18h")
mod.heat(mod2, arrays.18h, BUGS58.samples, labSize=1)
dev.off()
####
#
# Time
#
####
# 1h vs 18h, Immune cells
time.1.18 <- BUGS58.samples[BUGS58.samples$celltype != "Aerobic" & BUGS58.samples$time %in% c("1h", "18h"),]
expr.time.1.18 <- BUGS58.arrays[,colnames(BUGS58.arrays)%in%time.1.18$filename]
dim(expr.time.1.18)
#[1] 3765 24, 24 arrays
time.1.18.pvals <- module.Hotelling(good.modules, modules, parents,
expr.time.1.18, time.1.18$time,
shrink=T)
time.1.18.pvals$p.adj <- p.adjust(time.1.18.pvals$p, method="BH")
write.table(time.1.18.pvals, "data/results/PMN_immunes_1h_vs_18h.txt",
col.names=T, sep="\t", quote=F, row.names=F)
#Macs vs DCs at 1h, 4h, 18h with NAs removed
dc_mac_18h.pvals.noNA <- immune.time.DE(BUGS58.samples, BUGS58.arrays, good.modules,
modules, parents, "18h", filterNA=T)
write.table(dc_mac_18h.pvals.noNA, "data/results/PMN_DC_vs_macs_18h_noNA.txt",
col.names=T, sep="\t", quote=F, row.names=F)
dc_mac_4h.pvals.noNA <- immune.time.DE(BUGS58.samples, BUGS58.arrays, good.modules,
modules, parents, "4h", filterNA=T)
write.table(dc_mac_4h.pvals.noNA, "data/results/PMN_DC_vs_macs_4h_noNA.txt",
col.names=T, sep="\t", quote=F, row.names=F)
dc_mac_1h.pvals.noNA <- immune.time.DE(BUGS58.samples, BUGS58.arrays, good.modules,
modules, parents, "1h", filterNA=T)
write.table(dc_mac_1h.pvals.noNA, "data/results/PMN_DC_vs_macs_1h_noNA.txt",
col.names=T, sep="\t", quote=F, row.names=F)
####
#
# MACS
#
####
#Macs vs Aerobic
mac.aer.arr <- BUGS58.samples[BUGS58.samples$celltype != "DC",1]
mac.aer.labels <- BUGS58.samples[BUGS58.samples$celltype != "DC", 2]
expr.mac.aer <- BUGS58.arrays[,colnames(BUGS58.arrays)%in%mac.aer.arr]
dim(expr.mac.aer)
#[1] 3765 26, 26 total arrays
mac.aer.pvals <- module.Hotelling(good.modules, modules, parents,
expr.mac.aer, mac.aer.labels,
shrink=T)
mac.aer.pvals$p.adj <- p.adjust(mac.aer.pvals$p, method="BH")
write.table(mac.aer.pvals, "data/results/PMN_Macs_vs_Aerobic.txt",
col.names=T, sep="\t", quote=F, row.names=F)
#Macs 1h vs 4h
mac.1.4 <- BUGS58.samples[BUGS58.samples$celltype == "MDM" & BUGS58.samples$time %in% c("1h", "4h"),]
expr.mac.1.4 <- BUGS58.arrays[,colnames(BUGS58.arrays)%in%mac.1.4$filename]
dim(expr.mac.1.4)
#[1] 3765 12, 12 arrays
mac.1.4.pvals <- module.Hotelling(good.modules, modules, parents,
expr.mac.1.4, mac.1.4$time,
shrink=T, filterNA=T)
mac.1.4.pvals$p.adj <- p.adjust(mac.1.4.pvals$p, method="BH")
write.table(mac.1.4.pvals,"data/results/PMN_Macs_1h_vs_4h.txt",
col.names=T, sep="\t", quote=F, row.names=F)
#Macs 4h vs 18 h
mac.4.18 <- BUGS58.samples[BUGS58.samples$celltype == "MDM" & BUGS58.samples$time %in% c("4h", "18h"),]
expr.mac.4.18 <- BUGS58.arrays[,colnames(BUGS58.arrays)%in%mac.4.18$filename]
dim(expr.mac.4.18)
#[1] 3765 12, 12 arrays
mac.4.18.pvals <- module.Hotelling(good.modules, modules, parents,
expr.mac.4.18, mac.4.18$time,
shrink=T, filterNA=T)
mac.4.18.pvals$p.adj <- p.adjust(mac.4.18.pvals$p, method="BH")
write.table(mac.4.18.pvals,"data/results/PMN_Macs_4h_vs_18h.txt",
col.names=T, sep="\t", quote=F, row.names=F)
#Macs 1h vs 18 h
mac.1.18 <- BUGS58.samples[BUGS58.samples$celltype == "MDM" & BUGS58.samples$time %in% c("1h", "18h"),]
expr.mac.1.18 <- BUGS58.arrays[,colnames(BUGS58.arrays)%in%mac.1.18$filename]
dim(expr.mac.1.18)
#[1] 3765 12, 12 arrays
mac.1.18.pvals <- module.Hotelling(good.modules, modules, parents,
expr.mac.1.18, mac.1.18$time,
shrink=T, filterNA=T)
mac.1.18.pvals$p.adj <- p.adjust(mac.1.18.pvals$p, method="BH")
write.table(mac.1.18.pvals,"data/results/PMN_Macs_1h_vs_18h.txt",
col.names=T, sep="\t", quote=F, row.names=F)
####
#
# DCs
#
####
#DC vs aerobic
DC.aer.arr <- BUGS58.samples[BUGS58.samples$celltype != "MDM",1]
DC.aer.labels <- BUGS58.samples[BUGS58.samples$celltype != "MDM", 2]
expr.DC.aer <- BUGS58.arrays[,colnames(BUGS58.arrays)%in%DC.aer.arr]
dim(expr.DC.aer)
#[1] 3765 26, 26 total arrays
DC.aer.pvals <- module.Hotelling(good.modules, modules, parents,
expr.DC.aer, DC.aer.labels,
shrink=T, filterNA=T)
DC.aer.pvals$adj <- p.adjust(DC.aer.pvals$p, method="BH")
write.table(DC.aer.pvals,"data/results/PMN_DCs_vs_Aerobic.txt",
col.names=T, sep="\t", quote=F, row.names=F)
#DCs 1h vs 4h
DC.1.4 <- BUGS58.samples[BUGS58.samples$celltype == "DC" & BUGS58.samples$time %in% c("1h", "4h"),]
expr.DC.1.4 <- BUGS58.arrays[,colnames(BUGS58.arrays)%in%DC.1.4$filename]
dim(expr.DC.1.4)
#[1] 3765 12, 12 arrays
DC.1.4.pvals <- module.Hotelling(good.modules, modules, parents,
expr.DC.1.4, DC.1.4$time,
shrink=T, filterNA=T)
DC.1.4.pvals$p.adj <- p.adjust(DC.1.4.pvals$p, method="BH")
write.table(DC.1.4.pvals,"data/results/PMN_DCs_1h_vs_4h.txt",
col.names=T, sep="\t", quote=F, row.names=F)
#DC 4h vs 18h
DC.4.18 <- BUGS58.samples[BUGS58.samples$celltype == "DC" & BUGS58.samples$time %in% c("4h", "18h"),]
expr.DC.4.18 <- BUGS58.arrays[,colnames(BUGS58.arrays)%in%DC.4.18$filename]
dim(expr.DC.4.18)
#[1] 3765 12, 12 arrays
DC.4.18.pvals <- module.Hotelling(good.modules, modules, parents,
expr.DC.4.18, DC.4.18$time,
shrink=T, filterNA=T)
DC.4.18.pvals$p.adj <- p.adjust(DC.4.18.pvals$p, method="BH")
write.table(DC.4.18.pvals,"data/results/PMN_DCs_4h_vs_18h.txt",
col.names=T, sep="\t", quote=F, row.names=F)
#DC 1h vs 18h
DC.1.18 <- BUGS58.samples[BUGS58.samples$celltype == "DC" & BUGS58.samples$time %in% c("1h", "18h"),]
expr.DC.1.18 <- BUGS58.arrays[,colnames(BUGS58.arrays)%in%DC.1.18$filename]
dim(expr.DC.1.18)
#[1] 3765 12, 12 arrays
DC.1.18.pvals <- module.Hotelling(good.modules, modules, parents,
expr.DC.1.18, DC.1.18$time,
shrink=T, filterNA=T)
DC.1.18.pvals$p.adj <- p.adjust(DC.1.18.pvals$p, method="BH")
write.table(DC.1.18.pvals,"data/results/PMN_DCs_1h_vs_18h.txt",
col.names=T, sep="\t", quote=F, row.names=F)
|
/results/PMN_mod_DE.R
|
no_license
|
jreistetter/MS_thesis
|
R
| false | false | 14,436 |
r
|
# Script to perform Hotellings T2 on module members between aerobic, DC, and Mac Mtbs
# and make heatmaps of module expression
options(stringsAsFactors=F)
#Functions
get_module <- function(modID, modules){
return(modules[modules$moduleID==modID,2])
}
get_parents <- function(modID, parents){
raw <- parents[parents$moduleID==modID,2]
parent.genes <- unique(unlist(strsplit(raw, " ", fixed=T)))
return(parent.genes)
}
selectGenes <- function(genes, arrays){
genes.sel <- which(rownames(arrays) %in% genes)
genes.expr <- t(arrays[genes.sel,])
return(genes.expr)
}
#Hotelling package functions
calc.Hotelling <- function(mod.expr, grouping){
result <- Hott(mod.expr ~ as.factor(grouping), shrinkage=FALSE)
return(result)
}
module.Hotelling <- function(modIDs, modules, parents, arrays, grouping,
shrink=F, perm=T, filterNA=F){
results <- data.frame(moduleID=vector(mode="character"),
p=vector(mode="numeric"),
T_stat=vector(mode="numeric"),
n_x=vector(mode="integer"),
n_y=vector(mode="integer"),
n_vars=vector(mode="integer"))
for (mod in modIDs){
#Get the subset of the expression data for the module
mod.genes <- get_module(mod, modules)
mod.genes <- c(mod.genes, get_parents(mod, parents))
mod.expr <- as.data.frame(selectGenes(mod.genes, arrays))
if(filterNA == T){
mod.expr <- remove_na_cols(mod.expr)
}
#Create group vector of {1,2}
groups <- rep(1, length(grouping))
groups[grouping==unique(grouping)[2]] <- 2
#Add to matrix
mod.expr.groups <- cbind(groups, mod.expr)
#Run the test and store the results
mod.out <- hotelling.test(.~groups, data=mod.expr.groups, shrinkage=shrink,
perm=perm, B=10000)
results <- rbind(results, c(mod, mod.out$pval, mod.out$stats$statistic,
mod.out$stats$nx, mod.out$stats$ny, mod.out$stats$p))
}
colnames(results) <- c("moduleID", "p", "T_stat", "n_x", "n_y", "n_vars")
return(results)
}
remove_na_cols <- function(df){
cols.nas <- unlist(apply(df, 1, function(x) which(is.na(x))))
if(length(cols.nas)==0){
return(df)
}
df.clean <- df[,-unique(cols.nas)]
return(df.clean)
}
#Function to do the immune cells against each other at each timepoint
immune.time.DE <- function(samples, arrays, good.modules, modules, parents, time, filterNA=F){
#Macs vs DCs at 18h
samp.sel <- samples[(samples$celltype != "Aerobic" & samples$time==time),]
expr <- arrays[,colnames(arrays)%in%samp.sel$filename]
pvals <- module.Hotelling(good.modules, modules, parents,
expr, samp.sel$celltype,
shrink=T, filterNA=filterNA)
pvals$p.adj <- p.adjust(pvals$p, method="BH")
return(pvals)
}
heat_labels <- function(arrayIDs, samples){
# arrayIDs - char vector of filenames of arrays
# samples - dataframe of the sample metadata
array.info <- samples[samples$filename %in% arrayIDs,]
array.info$labels <- unlist(apply(array.info, 1, function(array){
label <- paste(c(array[2],
array[3],
array[4]),
collapse=" - "
)
return(label)
}
))
return(array.info$labels)
}
mod.heat <- function(mod.genes, expr, samples, title, labSize=0.5){
# mod.genes - char vector of genes for heatmap
# expr - all expression data
mod.expr <- selectGenes(mod.genes, expr)
#mod.expr <- mod.expr[complete.cases(mod.expr),]
mod.expr.t <- t(mod.expr)
colnames(mod.expr.t) <- heat_labels(rownames(mod.expr), samples)
#Set colors for DCs and pass to function.
cell.type <- grepl("DC", colnames(mod.expr.t), fixed=T)
col.side <- rep("seagreen", length(cell.type))
col.side[cell.type] <- "skyblue"
heatmap.2(mod.expr.t,
cexRow = labSize,
cexCol = labSize,
na.rm=T,
trace="none",
symkey=T,
col=redgreen,
key=TRUE,
density.info="none",
ColSideColors=col.side)
}
mod.heat.time <- function(mod.genes, expr, samples, title){
# mod.genes - char vector of genes for heatmap
# expr - all expression data
mod.expr <- selectGenes(mod.genes, expr)
#mod.expr <- mod.expr[complete.cases(mod.expr),]
mod.expr.t <- t(mod.expr)
colnames(mod.expr.t) <- heat_labels(rownames(mod.expr), samples)
#Set colors for DCs and pass to function.
time.4h <- grepl("4h", colnames(mod.expr.t), fixed=T)
time.18h <- grepl("18h", colnames(mod.expr.t), fixed=T)
col.side <- rep("orange", length(time.4h))
col.side[time.4h] <- "purple"
col.side[time.18h] <- "red"
heatmap.2(mod.expr.t,
cexRow = 0.5,
cexCol = 0.5,
na.rm=T,
trace="none",
symkey=T,
col=redgreen,
key=TRUE,
ColSideColors=col.side)
}
get_arrays_time <- function(samples, expr, time){
arrays.idx <- which(samples$time == time & samples$celltype!="Aerobic")
array.ids <- samples[arrays.idx,]$filename
expr.time <- expr[,colnames(expr)%in%array.ids]
return(expr.time)
}
library(gplots)
library(Hotelling)
setwd("~/Dropbox/thesis_work/")
#Load data
#Module data
#Filter out the modules with no probabilities
modules <- read.table("PMN_output/4.17.30_mods_members.txt",
head=T, sep="\t")
parents <- read.table("PMN_output/4.17.30_mods_parsed.txt",
head=T, sep="\t")
modules.stats <- read.table("PMN_output/4.17_30mods_genes_pathsizes.txt",
head=T, sep="\t")
good.modules <- modules.stats[modules.stats$thresh.0.2 > 0 & modules.stats$n_genes < 100,]$moduleID
#Arrays
load("data/exprs/EBUGS58/EBUGS58.arrays.RData")
#Samples
load("data/exprs/EBUGS58/BUGS58.samples.RData")
##############################
#
# Hotellings T2 Analysis
#
##############################
## DC vs Macs
immune.arrays <- BUGS58.samples[BUGS58.samples$celltype != "Aerobic",1]
celltype <- BUGS58.samples[BUGS58.samples$celltype != "Aerobic", 2]
expr.immune <- BUGS58.arrays[,colnames(BUGS58.arrays)%in%immune.arrays]
dim(expr.immune)
#[1] 3765 36, 36 total arrays
dc_mac.p.shrink.perm <- module.Hotelling(good.modules, modules, parents, expr.immune,
celltype, shrink=T)
dc_mac.p.shrink.perm$p.adj <- p.adjust(dc_mac.p.shrink.perm$p, method="BH")
write.table(dc_mac.p.shrink.perm, "data/results/PMN_DC_vs_Mac_DE.txt",
col.names=T, sep="\t", quote=F, row.names=F)
#########################
#
# Heatmaps of Expression
#
#########################
##
# DCs vs Macs all times
mod2 <- get_module("mod2", modules)
mod2 <- c(mod2, get_parents("mod2", parents))
mod.heat(mod2, expr.immune, BUGS58.samples)
## DCs = blue, Macs = green
mod.heat.time(mod2, expr.immune, BUGS58.samples)
## 1h = orange, 4h = purple, 18h = red
# 1 hour
png("data/results/PMN_DE_heat/PMN_DC_v_Mac_1h.png",
1000, 881, pointsize=14, bg="transparent")
arrays.1h <- get_arrays_time(BUGS58.samples, BUGS58.arrays, "1h")
mod.heat(mod2, arrays.1h, BUGS58.samples)
dev.off()
# 4 hours
png("data/results/PMN_DE_heat/PMN_DC_v_Mac_4h.png",
1000, 881, pointsize=14, bg="transparent")
arrays.4h <- get_arrays_time(BUGS58.samples, BUGS58.arrays, "4h")
mod.heat(mod2, arrays.4h, BUGS58.samples)
dev.off()
# 18 hours
png("data/results/PMN_DE_heat/PMN_DC_v_Mac_18h.png",
1000, 881, pointsize=14, bg="transparent")
arrays.18h <- get_arrays_time(BUGS58.samples, BUGS58.arrays, "18h")
mod.heat(mod2, arrays.18h, BUGS58.samples, labSize=1)
dev.off()
####
#
# Time
#
####
# 1h vs 18h, Immune cells
time.1.18 <- BUGS58.samples[BUGS58.samples$celltype != "Aerobic" & BUGS58.samples$time %in% c("1h", "18h"),]
expr.time.1.18 <- BUGS58.arrays[,colnames(BUGS58.arrays)%in%time.1.18$filename]
dim(expr.time.1.18)
#[1] 3765 24, 24 arrays
time.1.18.pvals <- module.Hotelling(good.modules, modules, parents,
expr.time.1.18, time.1.18$time,
shrink=T)
time.1.18.pvals$p.adj <- p.adjust(time.1.18.pvals$p, method="BH")
write.table(time.1.18.pvals, "data/results/PMN_immunes_1h_vs_18h.txt",
col.names=T, sep="\t", quote=F, row.names=F)
#Macs vs DCs at 1h, 4h, 18h with NAs removed
dc_mac_18h.pvals.noNA <- immune.time.DE(BUGS58.samples, BUGS58.arrays, good.modules,
modules, parents, "18h", filterNA=T)
write.table(dc_mac_18h.pvals.noNA, "data/results/PMN_DC_vs_macs_18h_noNA.txt",
col.names=T, sep="\t", quote=F, row.names=F)
dc_mac_4h.pvals.noNA <- immune.time.DE(BUGS58.samples, BUGS58.arrays, good.modules,
modules, parents, "4h", filterNA=T)
write.table(dc_mac_4h.pvals.noNA, "data/results/PMN_DC_vs_macs_4h_noNA.txt",
col.names=T, sep="\t", quote=F, row.names=F)
dc_mac_1h.pvals.noNA <- immune.time.DE(BUGS58.samples, BUGS58.arrays, good.modules,
modules, parents, "1h", filterNA=T)
write.table(dc_mac_1h.pvals.noNA, "data/results/PMN_DC_vs_macs_1h_noNA.txt",
col.names=T, sep="\t", quote=F, row.names=F)
####
#
# MACS
#
####
#Macs vs Aerobic
mac.aer.arr <- BUGS58.samples[BUGS58.samples$celltype != "DC",1]
mac.aer.labels <- BUGS58.samples[BUGS58.samples$celltype != "DC", 2]
expr.mac.aer <- BUGS58.arrays[,colnames(BUGS58.arrays)%in%mac.aer.arr]
dim(expr.mac.aer)
#[1] 3765 26, 26 total arrays
mac.aer.pvals <- module.Hotelling(good.modules, modules, parents,
expr.mac.aer, mac.aer.labels,
shrink=T)
mac.aer.pvals$p.adj <- p.adjust(mac.aer.pvals$p, method="BH")
write.table(mac.aer.pvals, "data/results/PMN_Macs_vs_Aerobic.txt",
col.names=T, sep="\t", quote=F, row.names=F)
#Macs 1h vs 4h
mac.1.4 <- BUGS58.samples[BUGS58.samples$celltype == "MDM" & BUGS58.samples$time %in% c("1h", "4h"),]
expr.mac.1.4 <- BUGS58.arrays[,colnames(BUGS58.arrays)%in%mac.1.4$filename]
dim(expr.mac.1.4)
#[1] 3765 12, 12 arrays
mac.1.4.pvals <- module.Hotelling(good.modules, modules, parents,
expr.mac.1.4, mac.1.4$time,
shrink=T, filterNA=T)
mac.1.4.pvals$p.adj <- p.adjust(mac.1.4.pvals$p, method="BH")
write.table(mac.1.4.pvals,"data/results/PMN_Macs_1h_vs_4h.txt",
col.names=T, sep="\t", quote=F, row.names=F)
#Macs 4h vs 18 h
mac.4.18 <- BUGS58.samples[BUGS58.samples$celltype == "MDM" & BUGS58.samples$time %in% c("4h", "18h"),]
expr.mac.4.18 <- BUGS58.arrays[,colnames(BUGS58.arrays)%in%mac.4.18$filename]
dim(expr.mac.4.18)
#[1] 3765 12, 12 arrays
mac.4.18.pvals <- module.Hotelling(good.modules, modules, parents,
expr.mac.4.18, mac.4.18$time,
shrink=T, filterNA=T)
mac.4.18.pvals$p.adj <- p.adjust(mac.4.18.pvals$p, method="BH")
write.table(mac.4.18.pvals,"data/results/PMN_Macs_4h_vs_18h.txt",
col.names=T, sep="\t", quote=F, row.names=F)
#Macs 1h vs 18 h
mac.1.18 <- BUGS58.samples[BUGS58.samples$celltype == "MDM" & BUGS58.samples$time %in% c("1h", "18h"),]
expr.mac.1.18 <- BUGS58.arrays[,colnames(BUGS58.arrays)%in%mac.1.18$filename]
dim(expr.mac.1.18)
#[1] 3765 12, 12 arrays
mac.1.18.pvals <- module.Hotelling(good.modules, modules, parents,
expr.mac.1.18, mac.1.18$time,
shrink=T, filterNA=T)
mac.1.18.pvals$p.adj <- p.adjust(mac.1.18.pvals$p, method="BH")
write.table(mac.1.18.pvals,"data/results/PMN_Macs_1h_vs_18h.txt",
col.names=T, sep="\t", quote=F, row.names=F)
####
#
# DCs
#
####
#DC vs aerobic
DC.aer.arr <- BUGS58.samples[BUGS58.samples$celltype != "MDM",1]
DC.aer.labels <- BUGS58.samples[BUGS58.samples$celltype != "MDM", 2]
expr.DC.aer <- BUGS58.arrays[,colnames(BUGS58.arrays)%in%DC.aer.arr]
dim(expr.DC.aer)
#[1] 3765 26, 26 total arrays
DC.aer.pvals <- module.Hotelling(good.modules, modules, parents,
expr.DC.aer, DC.aer.labels,
shrink=T, filterNA=T)
DC.aer.pvals$adj <- p.adjust(DC.aer.pvals$p, method="BH")
write.table(DC.aer.pvals,"data/results/PMN_DCs_vs_Aerobic.txt",
col.names=T, sep="\t", quote=F, row.names=F)
#DCs 1h vs 4h
DC.1.4 <- BUGS58.samples[BUGS58.samples$celltype == "DC" & BUGS58.samples$time %in% c("1h", "4h"),]
expr.DC.1.4 <- BUGS58.arrays[,colnames(BUGS58.arrays)%in%DC.1.4$filename]
dim(expr.DC.1.4)
#[1] 3765 12, 12 arrays
DC.1.4.pvals <- module.Hotelling(good.modules, modules, parents,
expr.DC.1.4, DC.1.4$time,
shrink=T, filterNA=T)
DC.1.4.pvals$p.adj <- p.adjust(DC.1.4.pvals$p, method="BH")
write.table(DC.1.4.pvals,"data/results/PMN_DCs_1h_vs_4h.txt",
col.names=T, sep="\t", quote=F, row.names=F)
#DC 4h vs 18h
DC.4.18 <- BUGS58.samples[BUGS58.samples$celltype == "DC" & BUGS58.samples$time %in% c("4h", "18h"),]
expr.DC.4.18 <- BUGS58.arrays[,colnames(BUGS58.arrays)%in%DC.4.18$filename]
dim(expr.DC.4.18)
#[1] 3765 12, 12 arrays
DC.4.18.pvals <- module.Hotelling(good.modules, modules, parents,
expr.DC.4.18, DC.4.18$time,
shrink=T, filterNA=T)
DC.4.18.pvals$p.adj <- p.adjust(DC.4.18.pvals$p, method="BH")
write.table(DC.4.18.pvals,"data/results/PMN_DCs_4h_vs_18h.txt",
col.names=T, sep="\t", quote=F, row.names=F)
#DC 1h vs 18h
DC.1.18 <- BUGS58.samples[BUGS58.samples$celltype == "DC" & BUGS58.samples$time %in% c("1h", "18h"),]
expr.DC.1.18 <- BUGS58.arrays[,colnames(BUGS58.arrays)%in%DC.1.18$filename]
dim(expr.DC.1.18)
#[1] 3765 12, 12 arrays
DC.1.18.pvals <- module.Hotelling(good.modules, modules, parents,
expr.DC.1.18, DC.1.18$time,
shrink=T, filterNA=T)
DC.1.18.pvals$p.adj <- p.adjust(DC.1.18.pvals$p, method="BH")
write.table(DC.1.18.pvals,"data/results/PMN_DCs_1h_vs_18h.txt",
col.names=T, sep="\t", quote=F, row.names=F)
|
#######################################################################
##### file: biologicalData.R #####
##### input: results/*CurrentRun*/preprocTCGAData-2/RData/ #####
##### subsets.RData #####
##### output: profiles (*.txt and *.RData) of samples from #####
##### common participants #####
##### packages: -- #####
##### author: B. Pucher #####
##### date created: 23/07/2015 #####
##### last change: 10/01/2018 #####
#######################################################################
rm(list=ls())
#library("impute")
#####------------------------------------------------------------------
# TCGA barcode structure labeling samples
#####------------------------------------------------------------------
barcode.parts = c("Project", "TSS", "Participant", "Sample", "Analyte",
"Plate", "Center")
#####------------------------------------------------------------------
# tissue type code from TCGA Wiki at National Cancer Institute
#####------------------------------------------------------------------
tumor = 1
normal = 11
metastatic = 6
control = 20
#######################################################################
##### #####
##### FUNCTIONS #####
##### #####
#######################################################################
#####------------------------------------------------------------------
# get type of dataset
#####------------------------------------------------------------------
getDataType = function(dataset){
return(dataset$dataType)
}
#####------------------------------------------------------------------
# split sample names of TCGA dataset into 7 blocks
# example sample name: "TCGA-A8-A07B-01A-11R-A007Z-07"
#####------------------------------------------------------------------
split_names = function(names, sep = "-"){
blocks = matrix(unlist(strsplit(names, split = sep)),
nrow = length(names), byrow = TRUE,
dimnames = list(NULL, barcode.parts))
return(blocks)
}
#####------------------------------------------------------------------
# split the 4th block of sample names to access tissue type
#####------------------------------------------------------------------
sampleType = function(name.blocks){
type = matrix(unlist(strsplit(name.blocks[,"Sample"], split = "")),
nrow = nrow(name.blocks), byrow = TRUE)
type.fc = factor(as.numeric(paste(type[,1], type[,2], sep = "")),
levels = c(tumor, metastatic, normal, control),
labels = c("tumor", "metastatic", "normal", "control"))
return(type.fc)
}
#####------------------------------------------------------------------
# split the Analyte-block of sample names to get data type
#####------------------------------------------------------------------
analyteType = function(name.blocks){
type = matrix(unlist(strsplit(name.blocks[,"Analyte"], split = "")),
nrow = nrow(name.blocks), byrow = TRUE)[,3]
return(type)
}
#####------------------------------------------------------------------
# returns the participant id of a provided sample
#####------------------------------------------------------------------
getParticipantId = function(sample){
return(split_names(sample)[,"Participant"])
}
#####------------------------------------------------------------------
# get participants common to all subsets
#####------------------------------------------------------------------
getCommonParticipantsTable = function(subset.list){
commons.table = numeric(0)
for(i in 1:length(subset.list)){
commons = lapply(subset.list, intersect, subset.list[[i]])
commons.table = cbind(commons.table, unlist(lapply(commons, length)))
}
colnames(commons.table) = names(subset.list)
return(commons.table)
}
#####------------------------------------------------------------------
# extract profiles of samples with provided barcode
#####------------------------------------------------------------------
getProfiles = function(barcodes, dataset){
profile.idx = which(colnames(dataset$Data) %in% barcodes)
profile.set = dataset$Data[,profile.idx]
row.names(profile.set) = dataset$Des[,grep("EntrezID|REF", colnames(dataset$Des))]
profiles.sorted = profile.set[,order(colnames(profile.set))]
nb.samples = length(barcodes)
nb.participants = length(unique(getParticipantId(barcodes)))
if(nb.samples > nb.participants){
redundant.participant =
names(which(table(getParticipantId(colnames(profiles.sorted))) > 1))
idx.redundant = which(getParticipantId(colnames(profiles.sorted)) ==
redundant.participant)
# if one participant was measured several times within the same data
# type, keep only one sample
profiles.sorted = profiles.sorted[,-idx.redundant[-1]]
}
return(profiles.sorted)
}
#####------------------------------------------------------------------
# save profiles of samples common to all subsets to files
#####------------------------------------------------------------------
saveProfiles = function(profiles, filename, save.RData = TRUE){
write.table(profiles,
file.path(sub.dir.files, paste(filename, ".txt", sep = "")),
quote = F, row.names = T, col.names = T)
cat("Profiles saved to ", filename, ".txt \n", sep = "")
if(save.RData){
saveRDS(profiles,
file.path(sub.dir.RData, paste(filename, ".RData", sep = "")))
cat("Profiles saved to ", filename, ".RData \n", sep = "")
}
}
#######################################################################
##### #####
##### MAIN SECTION #####
##### #####
#######################################################################
current.run = .current.biolog
sub.dir.name = file.path("biologicalData", current.run)
source(file.path(.src.dir, "setSubDirPath.R"))
load(file.path(.run.dir, "preprocTCGAData-2", "RData", "subsets.RData"))
log.con = file(file.path(sub.dir.files, "biologicalDataLog.txt"))
sink(file = log.con, type = "output")
flush(log.con)
cat("Continue preprocessing of biological datasets ...\n")
# split up subset list
gene.exp.tu =
subsets[[which(lapply(subsets, getDataType) == "GeneExp_tu")]]
gene.exp.no =
subsets[[which(lapply(subsets, getDataType) == "GeneExp_no")]]
methyl.tu =
subsets[[which(lapply(subsets, getDataType) == "Methylation_tu")]]
methyl.no =
subsets[[which(lapply(subsets, getDataType) == "Methylation_no")]]
cat("Dimension of data subsets:",
unlist(lapply(subsets, function(s) {
paste(getDataType(s), paste(unlist(lapply(s, dim)$Data), collapse = " "))
})), sep = "\n")
#rm(subsets)
# sample names of each subset
ge.tu.samples = dimnames(gene.exp.tu$Data)[[2]]
ge.no.samples = dimnames(gene.exp.no$Data)[[2]]
met.tu.samples = dimnames(methyl.tu$Data)[[2]]
met.no.samples = dimnames(methyl.no$Data)[[2]]
# sample names of subsets split into blocks
ge.tu.samples.blocks = split_names(ge.tu.samples)
ge.no.samples.blocks = split_names(ge.no.samples)
met.tu.samples.blocks = split_names(met.tu.samples)
met.no.samples.blocks = split_names(met.no.samples)
#####------------------------------------------------------------------
# find common pariticipants across all data types and subsets
#####------------------------------------------------------------------
# extract the ID of all participants involved
tu.part = list(ge.tu = ge.tu.samples.blocks[,"Participant"],
met.tu = met.tu.samples.blocks[,"Participant"])
# prot.tu = prot.tu.samples.blocks[,"Participant"])
no.part = list(ge.no = ge.no.samples.blocks[,"Participant"],
met.no = met.no.samples.blocks[,"Participant"])
subsets.part = c(tu.part, no.part)
# create table with participants common to pairs of subsets
getCommonParticipantsTable(subsets.part)
#Nb of participants common to all tumor subsets
common.tu = Reduce(intersect, tu.part)
#Nb of participants common to all normal subsets
common.no = Reduce(intersect, no.part)
#Nb of participants common to all subsets
commons.all = intersect(common.tu, common.no)
cat("Number of Participants common to all tumor Subsets: ",
length(common.tu), "\n",
"Number of Participants common to all normal Subsets: ",
length(common.no), "\n",
"Number of Participants common to all Subsets: ",
length(commons.all), "\n",
sep = "")
cat("IDs of common Participants sorted: \n")
print(getParticipantId(sort(colnames(gene.exp.tu$Data)[
which(getParticipantId(colnames(gene.exp.tu$Data)) %in%
commons.all)])), quote = F)
#samples from common participants (blocks)
ge.tu.com = ge.tu.samples.blocks[which(
ge.tu.samples.blocks[,"Participant"] %in% commons.all),]
ge.no.com = ge.no.samples.blocks[which(
ge.no.samples.blocks[,"Participant"] %in% commons.all),]
met.tu.com = met.tu.samples.blocks[which(
met.tu.samples.blocks[,"Participant"] %in% commons.all),]
met.no.com = met.no.samples.blocks[which(
met.no.samples.blocks[,"Participant"] %in% commons.all),]
ge.tu.com = ge.tu.com[!duplicated(ge.tu.com[,"Participant"]),]
ge.no.com = ge.no.com[!duplicated(ge.no.com[,"Participant"]),]
met.tu.com = met.tu.com[!duplicated(met.tu.com[,"Participant"]),]
met.no.com = met.no.com[!duplicated(met.no.com[,"Participant"]),]
#samples from common participants (barcode)
ge.samples.tu.com = apply(ge.tu.com, 1, paste, collapse = "-")
ge.samples.no.com = apply(ge.no.com, 1, paste, collapse = "-")
met.samples.tu.com = apply(met.tu.com, 1, paste, collapse = "-")
met.samples.no.com = apply(met.no.com, 1, paste, collapse = "-")
#####------------------------------------------------------------------
# extract profiles of samples with provided barcode
#####------------------------------------------------------------------
ge.tu.profiles = getProfiles(ge.samples.tu.com, gene.exp.tu)
ge.no.profiles = getProfiles(ge.samples.no.com, gene.exp.no)
met.tu.profiles = getProfiles(met.samples.tu.com, methyl.tu)
met.no.profiles = getProfiles(met.samples.no.com, methyl.no)
#####------------------------------------------------------------------
# recombine tumor and normal samples of each data type
#####------------------------------------------------------------------
ge.profiles = cbind(ge.tu.profiles, ge.no.profiles)
met.profiles = cbind(met.tu.profiles, met.no.profiles)
#####------------------------------------------------------------------
# in RNAseq dataset replace zeroes with NAs
#####------------------------------------------------------------------
cat("In RNASeq dataset: replace zeroes with NA\n")
ge.profiles.na = ge.profiles
ge.profiles.na[which(ge.profiles.na == 0)] = NA
#####------------------------------------------------------------------
# in MET dataset replace zeroes with a very small value to avoid
# infinite values after transformation
#####------------------------------------------------------------------
cat("In Methyl dataset: replace zeroes with", .Machine$double.eps, "\n")
met.profiles.eps = met.profiles
met.profiles.eps[which(met.profiles.eps == 0)] = .Machine$double.eps
#####------------------------------------------------------------------
# remove cases with more than 10% NAs
#####------------------------------------------------------------------
NAs.th = 10
cat("Remove features (rows) with more than", NAs.th, "% NAs\n")
ge.percent.na = apply(ge.profiles.na, 1, function(r) {length(which(is.na(r)))/length(r)*100})
met.percent.na = apply(met.profiles.eps, 1, function(r) {length(which(is.na(r)))/length(r)*100})
ge.profiles.dupl = ge.profiles.na[which(ge.percent.na < NAs.th),]
met.profiles.dupl = met.profiles.eps[which(met.percent.na < NAs.th),]
cat("Dimensions after removing cases with >", NAs.th, "% NAs:",
"\n GeneExp: ", dim(ge.profiles.dupl),
"\n Methylation: ", dim(met.profiles.dupl),
"\n")
#####------------------------------------------------------------------
# Remove duplicated rows (features with same unique identifier
# referring to multiple gene symbols) from the datasets.
#####------------------------------------------------------------------
cat("Remove duplicated rows (features with the same unique identifier\n",
"referring to multiple gene symbols) from datasets.\n")
ge.profiles.filter = ge.profiles.dupl[!duplicated(rownames(ge.profiles.dupl)),]
met.profiles.filter = met.profiles.dupl[!duplicated(rownames(met.profiles.dupl)),]
cat("Dimensions after removing duplicated rows:",
"\n GeneExp: ", dim(ge.profiles.filter),
"\n Methylation: ", dim(met.profiles.filter),
"\n")
#####------------------------------------------------------------------
# save Histograms of datasets before transformation as .png and .eps
#####------------------------------------------------------------------
subsets.list <- list(ge.profiles.filter, met.profiles.filter)
names(subsets.list) <- c("RNA-seq Gene Expression Levels",
"DNA-Methylation Levels - Beta-values")
# print to eps
postscript(file = file.path(sub.dir.figures, "subsets_hist.eps"),
onefile = FALSE, width = 8, height = 3.5, paper = "special",
family = "serif", horizontal = FALSE)
y = c(2*10^-5, 10)
par(mfrow = c(1,2), mar = c(2,2,6,2))
invisible(sapply(seq(1,2,1), function(x, data){
hist(data[[x]], prob = T, breaks = 10,
main = names(data[x]),
ylim = c(0,y[x]))
lines(density(data[[x]], na.rm = TRUE), lwd = 2, col = "darkblue")
}, data = subsets.list))
par(mfrow = c(1,1), mar = c(1,1,1,1), cex = 1)
title(main = paste("Histogram before Transformation"))
invisible(dev.off())
# print to png
png(file.path(sub.dir.figures, "subsets_hist.png"), width = 3000,
height = 1300, res = 300)
y = c(2*10^-5, 10)
par(mfrow = c(1,2), mar = c(2,2,6,2))
invisible(sapply(seq(1,2,1), function(x, data){
hist(data[[x]], prob = T, breaks = 10,
main = names(data[x]),
ylim = c(0,y[x]))
lines(density(data[[x]], na.rm = TRUE), lwd = 2, col = "darkblue")
}, data = subsets.list))
par(mfrow = c(1,1), mar = c(1,1,1,1), cex = 1)
title(main = paste("Histogram before Transformation"))
invisible(dev.off())
cat("Save Histogram before transformation and imputation ... \n")
#####------------------------------------------------------------------
# impute NA GeneExp and Methylation values
#####------------------------------------------------------------------
# Imputation of missing values with half the lowest value in each feature
cat("Impute low-expression values in RNA-Seq data with half",
"of the lowest value in each feature.\n")
ge.profiles.im = t(apply(ge.profiles.filter, MARGIN = 1, FUN = function(x) {
x[which(is.na(x))] = min(x, na.rm = TRUE)/2; x}))
cat("Impute missing beta values in Methylation data with half",
"of the lowest value in each feature.\n")
met.profiles.im = t(apply(met.profiles.filter, MARGIN = 1, FUN = function(x) {
x[which(is.na(x))] = min(x, na.rm = TRUE)/2; x}))
#####------------------------------------------------------------------
# log2 transform gene expression data
#####------------------------------------------------------------------
pc = 0
cat("Add pseudocount of", pc, "and log2 transform RNA-seq data.\n")
ge.profiles.tr = log2(ge.profiles.im+pc)
saveProfiles(ge.profiles.tr, "GeneExp_TCGAbarcode", save.RData = FALSE)
# Reduce sample names to "tumor_R", "normal_R", "tumor_D", ..
colnames(ge.profiles.tr) =
paste(sampleType(split_names(colnames(ge.profiles.tr))),
analyteType(split_names(colnames(ge.profiles.tr))), sep = "_")
#####------------------------------------------------------------------
# transform methylation data from beta- to M-values
#####------------------------------------------------------------------
cat("Transform Methylation data from beta- to M-values ...\n")
cat("Reverse sign of M-values to mirror distribution at 0.\n")
met.profiles.tr = -1*log2(met.profiles.im/(1-met.profiles.im))
saveProfiles(met.profiles.tr, "Methyl_TCGAbarcode", save.RData = FALSE)
# Reduce sample names to "tumor_R", "normal_R", "tumor_D", ..
colnames(met.profiles.tr) =
paste(sampleType(split_names(colnames(met.profiles.tr))),
analyteType(split_names(colnames(met.profiles.tr))), sep = "_")
#####------------------------------------------------------------------
# save Histograms of datasets after transformation to .png and .eps
#####------------------------------------------------------------------
subsets.tr.list <- list(ge.profiles.tr, met.profiles.tr)
names(subsets.tr.list) <- c("Log2 of Gene Expression Levels",
"DNA-Methylation Levels - M-values")
# print to eps
postscript(file = file.path(sub.dir.figures, "subsets_hist_tr.eps"),
onefile = FALSE, width = 8, height = 3.5, paper = "special",
family = "serif", horizontal = FALSE)
y = c(0.2,0.3)
par(mfrow = c(1,2), mar = c(2,2,6,2))
invisible(sapply(seq(1,2,1), function(x, data){
hist(data[[x]], prob = T, breaks = 20, main = names(data[x]), ylim = c(0,y[x]))
lines(density(data[[x]], adjust = 3), lwd = 2, col = "darkblue")
}, data = subsets.tr.list))
par(mfrow = c(1,1), mar = c(1,1,1,1), cex = 1)
title(main = paste("Histogram after Transformation and Imputation"))
invisible(dev.off())
# print to png
png(file.path(sub.dir.figures, "subsets_hist_tr.png"), width = 3000,
height = 1300, res = 300)
y = c(0.2,0.3)
par(mfrow = c(1,2), mar = c(2,2,6,2))
invisible(sapply(seq(1,2,1), function(x, data){
hist(data[[x]], prob = T, breaks = 20, main = names(data[x]), ylim = c(0,y[x]))
lines(density(data[[x]], adjust = 3), lwd = 2, col = "darkblue")
}, data = subsets.tr.list))
par(mfrow = c(1,1), mar = c(1,1,1,1), cex = 1)
title(main = paste("Histogram after Transformation and Imputation"))
invisible(dev.off())
cat("Save Histogram after transformation and imputation ... \n")
#####------------------------------------------------------------------
# assess differences in mean expression value per gene btw. groups
#####------------------------------------------------------------------
cat("Assess log2 fold change of gene expression:\n")
cat("Back-transform to expression levels to calculate log fold change.\n")
ge.tu.means = rowMeans(2^ge.profiles.tr[,grep("tumor", colnames(ge.profiles.tr))])
ge.no.means = rowMeans(2^ge.profiles.tr[,grep("normal", colnames(ge.profiles.tr))])
cat("Calculate log-FC: log2(mean(tumor)/mean(normal))\n")
ge.means.lfc = log2(ge.tu.means/ge.no.means)
ge.lfc.th = 3.5
ge.sig.diff = length(which(abs(ge.means.lfc) > ge.lfc.th))
cat("Thresholds for differential gene expression:\n log2 Fold Change",
ge.lfc.th, "\n")
cat("Number of features (%) considered as differentially expressed:\n",
ge.sig.diff, "(", round(ge.sig.diff/nrow(ge.profiles.tr)*100, 2), "% )\n")
# Plot and save Histogram of logFC in gene expression data
# print to eps
postscript(file = file.path(sub.dir.figures, paste0("GeneExp_logFC", ge.lfc.th, ".eps")),
onefile = FALSE, width = 8, height = 4.2, paper = "special",
family = "serif", horizontal = FALSE)
hist(ge.means.lfc, prob = T,
main = "Log Fold Change of gene expression in tumor vs. normal samples",
xlim = c(-5,5), xlab = ("LFC"),
ylim = c(0,max(density(ge.means.lfc)$y)))
axis(side = 1, at = c(-ge.lfc.th, ge.lfc.th))
lines(density(ge.means.lfc, adjust = 3), lwd = 2, col = "darkblue")
Fn = ecdf(ge.means.lfc)
abline(v = quantile(ge.means.lfc, probs = Fn(c(-ge.lfc.th, ge.lfc.th))))
text(x = c(-ge.lfc.th, ge.lfc.th), y = c(0.6,0.6), pos = 4,
labels = paste0(round(Fn(c(-ge.lfc.th, ge.lfc.th))*100, 2), "%"))
invisible(dev.off())
# print to png
png(file.path(sub.dir.figures, paste0("GeneExp_logFC", ge.lfc.th, ".png")),
width = 3000, height = 1500, res = 300)
hist(ge.means.lfc, prob = T,
main = "Log Fold Change of gene expression in tumor vs. normal samples",
xlim = c(-5,5), xlab = ("LFC"),
ylim = c(0,max(density(ge.means.lfc)$y)))
axis(side = 1, at = c(-ge.lfc.th, ge.lfc.th))
lines(density(ge.means.lfc, adjust = 3), lwd = 2, col = "darkblue")
Fn = ecdf(ge.means.lfc)
abline(v = quantile(ge.means.lfc, probs = Fn(c(-ge.lfc.th, ge.lfc.th))))
text(x = c(-ge.lfc.th, ge.lfc.th), y = c(0.6,0.6), pos = 4,
labels = paste0(round(Fn(c(-ge.lfc.th, ge.lfc.th))*100, 2), "%"))
invisible(dev.off())
#####------------------------------------------------------------------
# assess differences in methylation levels per site btw. groups
#####------------------------------------------------------------------
cat("Assess difference in means of Methylation levels:\n")
cat("Calulate means of M-values.\n")
met.tu.means = rowMeans(met.profiles.tr[,grep("tumor", colnames(met.profiles.tr))])
met.no.means = rowMeans(met.profiles.tr[,grep("normal", colnames(met.profiles.tr))])
# find differentially methylated CpG sites by assessing absolute
# difference between M-values (Du, 2010)
cat("Calculate difference of mean M-values.\n")
met.means.diff = met.tu.means - met.no.means
met.diff.th = 2.2
met.sig.diff = length(which(abs(met.means.diff) > met.diff.th))
cat("Thresholds for differential methylation:\nabsolute difference in mean",
met.diff.th, "\n")
cat("Number of features (%) considered as significantly different:\n",
met.sig.diff, "(", round(met.sig.diff/nrow(met.profiles.tr)*100, 2), "% )\n")
# print to eps
postscript(file = file.path(sub.dir.figures, paste0("Methyl_mean_diff", met.diff.th, ".eps")),
onefile = FALSE, width = 8, height = 4.2, paper = "special",
family = "serif", horizontal = FALSE)
hist(met.means.diff, prob = T,
main = "Difference in mean methylation level of tumor and normal samples",
xlim = c(-4,4), xlab = "M-value difference",
ylim = c(0, max(density(met.means.diff)$y)))
axis(side = 1, at = c(-met.diff.th, met.diff.th))
lines(density(met.means.diff, adjust = 3), lwd = 2, col = "darkblue")
Fn = ecdf(met.means.diff)
abline(v = quantile(met.means.diff, probs = Fn(c(-met.diff.th, met.diff.th))))
text(x = c(-met.diff.th, met.diff.th), y = c(1.5,1.5), pos = 4,
labels = paste0(round(Fn(c(-met.diff.th, met.diff.th))*100, 2), "%"))
invisible(dev.off())
# print to png
png(file.path(sub.dir.figures,paste0("Methyl_mean_diff", met.diff.th, ".png")),
width = 3000, height = 1500, res = 300)
hist(met.means.diff, prob = T,
main = "Difference in mean methylation level of tumor and normal samples",
xlim = c(-4,4), xlab = "M-value difference",
ylim = c(0, max(density(met.means.diff)$y)))
axis(side = 1, at = c(-met.diff.th, met.diff.th))
lines(density(met.means.diff, adjust = 3), lwd = 2, col = "darkblue")
Fn = ecdf(met.means.diff)
abline(v = quantile(met.means.diff, probs = Fn(c(-met.diff.th, met.diff.th))))
text(x = c(-met.diff.th, met.diff.th), y = c(1.5,1.5), pos = 4,
labels = paste0(round(Fn(c(-met.diff.th, met.diff.th))*100, 2), "%"))
invisible(dev.off())
#####------------------------------------------------------------------
# save profiles of samples common to all subsets to .txt and .RData
#####------------------------------------------------------------------
saveProfiles(ge.profiles.tr, "GeneExp")
saveProfiles(met.profiles.tr, "Methyl")
#####------------------------------------------------------------------
# Prepare datasets for cross validation
#####------------------------------------------------------------------
if(.prepare.CV == TRUE){
source(file.path(.src.dir, "crossValidation.R"))
data.names = c("GeneExp", "Methyl")
datasets = paste0(data.names, ".RData")
data.paths = as.list(file.path(sub.dir.RData, datasets))
data.raw = lapply(data.paths, readRDS)
names(data.raw) = data.names
set.seed(.biolog.seed)
randomSplit(data.raw, subsets = .subsets.val, .pr.train,
samples.paired = TRUE)
}
sink()
close(log.con)
|
/src/biologicalData.R
|
no_license
|
ThallingerLab/IamComparison
|
R
| false | false | 24,413 |
r
|
#######################################################################
##### file: biologicalData.R #####
##### input: results/*CurrentRun*/preprocTCGAData-2/RData/ #####
##### subsets.RData #####
##### output: profiles (*.txt and *.RData) of samples from #####
##### common participants #####
##### packages: -- #####
##### author: B. Pucher #####
##### date created: 23/07/2015 #####
##### last change: 10/01/2018 #####
#######################################################################
rm(list=ls())
#library("impute")
#####------------------------------------------------------------------
# TCGA barcode structure labeling samples
#####------------------------------------------------------------------
barcode.parts = c("Project", "TSS", "Participant", "Sample", "Analyte",
"Plate", "Center")
#####------------------------------------------------------------------
# tissue type code from TCGA Wiki at National Cancer Institute
#####------------------------------------------------------------------
tumor = 1
normal = 11
metastatic = 6
control = 20
#######################################################################
##### #####
##### FUNCTIONS #####
##### #####
#######################################################################
#####------------------------------------------------------------------
# get type of dataset
#####------------------------------------------------------------------
getDataType = function(dataset){
return(dataset$dataType)
}
#####------------------------------------------------------------------
# split sample names of TCGA dataset into 7 blocks
# example sample name: "TCGA-A8-A07B-01A-11R-A007Z-07"
#####------------------------------------------------------------------
split_names = function(names, sep = "-"){
blocks = matrix(unlist(strsplit(names, split = sep)),
nrow = length(names), byrow = TRUE,
dimnames = list(NULL, barcode.parts))
return(blocks)
}
#####------------------------------------------------------------------
# split the 4th block of sample names to access tissue type
#####------------------------------------------------------------------
sampleType = function(name.blocks){
type = matrix(unlist(strsplit(name.blocks[,"Sample"], split = "")),
nrow = nrow(name.blocks), byrow = TRUE)
type.fc = factor(as.numeric(paste(type[,1], type[,2], sep = "")),
levels = c(tumor, metastatic, normal, control),
labels = c("tumor", "metastatic", "normal", "control"))
return(type.fc)
}
#####------------------------------------------------------------------
# split the Analyte-block of sample names to get data type
#####------------------------------------------------------------------
analyteType = function(name.blocks){
type = matrix(unlist(strsplit(name.blocks[,"Analyte"], split = "")),
nrow = nrow(name.blocks), byrow = TRUE)[,3]
return(type)
}
#####------------------------------------------------------------------
# returns the participant id of a provided sample
#####------------------------------------------------------------------
getParticipantId = function(sample){
return(split_names(sample)[,"Participant"])
}
#####------------------------------------------------------------------
# get participants common to all subsets
#####------------------------------------------------------------------
getCommonParticipantsTable = function(subset.list){
commons.table = numeric(0)
for(i in 1:length(subset.list)){
commons = lapply(subset.list, intersect, subset.list[[i]])
commons.table = cbind(commons.table, unlist(lapply(commons, length)))
}
colnames(commons.table) = names(subset.list)
return(commons.table)
}
#####------------------------------------------------------------------
# extract profiles of samples with provided barcode
#####------------------------------------------------------------------
getProfiles = function(barcodes, dataset){
profile.idx = which(colnames(dataset$Data) %in% barcodes)
profile.set = dataset$Data[,profile.idx]
row.names(profile.set) = dataset$Des[,grep("EntrezID|REF", colnames(dataset$Des))]
profiles.sorted = profile.set[,order(colnames(profile.set))]
nb.samples = length(barcodes)
nb.participants = length(unique(getParticipantId(barcodes)))
if(nb.samples > nb.participants){
redundant.participant =
names(which(table(getParticipantId(colnames(profiles.sorted))) > 1))
idx.redundant = which(getParticipantId(colnames(profiles.sorted)) ==
redundant.participant)
# if one participant was measured several times within the same data
# type, keep only one sample
profiles.sorted = profiles.sorted[,-idx.redundant[-1]]
}
return(profiles.sorted)
}
#####------------------------------------------------------------------
# save profiles of samples common to all subsets to files
#####------------------------------------------------------------------
saveProfiles = function(profiles, filename, save.RData = TRUE){
write.table(profiles,
file.path(sub.dir.files, paste(filename, ".txt", sep = "")),
quote = F, row.names = T, col.names = T)
cat("Profiles saved to ", filename, ".txt \n", sep = "")
if(save.RData){
saveRDS(profiles,
file.path(sub.dir.RData, paste(filename, ".RData", sep = "")))
cat("Profiles saved to ", filename, ".RData \n", sep = "")
}
}
#######################################################################
##### #####
##### MAIN SECTION #####
##### #####
#######################################################################
current.run = .current.biolog
sub.dir.name = file.path("biologicalData", current.run)
source(file.path(.src.dir, "setSubDirPath.R"))
load(file.path(.run.dir, "preprocTCGAData-2", "RData", "subsets.RData"))
log.con = file(file.path(sub.dir.files, "biologicalDataLog.txt"))
sink(file = log.con, type = "output")
flush(log.con)
cat("Continue preprocessing of biological datasets ...\n")
# split up subset list
gene.exp.tu =
subsets[[which(lapply(subsets, getDataType) == "GeneExp_tu")]]
gene.exp.no =
subsets[[which(lapply(subsets, getDataType) == "GeneExp_no")]]
methyl.tu =
subsets[[which(lapply(subsets, getDataType) == "Methylation_tu")]]
methyl.no =
subsets[[which(lapply(subsets, getDataType) == "Methylation_no")]]
cat("Dimension of data subsets:",
unlist(lapply(subsets, function(s) {
paste(getDataType(s), paste(unlist(lapply(s, dim)$Data), collapse = " "))
})), sep = "\n")
#rm(subsets)
# sample names of each subset
ge.tu.samples = dimnames(gene.exp.tu$Data)[[2]]
ge.no.samples = dimnames(gene.exp.no$Data)[[2]]
met.tu.samples = dimnames(methyl.tu$Data)[[2]]
met.no.samples = dimnames(methyl.no$Data)[[2]]
# sample names of subsets split into blocks
ge.tu.samples.blocks = split_names(ge.tu.samples)
ge.no.samples.blocks = split_names(ge.no.samples)
met.tu.samples.blocks = split_names(met.tu.samples)
met.no.samples.blocks = split_names(met.no.samples)
#####------------------------------------------------------------------
# find common pariticipants across all data types and subsets
#####------------------------------------------------------------------
# extract the ID of all participants involved
tu.part = list(ge.tu = ge.tu.samples.blocks[,"Participant"],
met.tu = met.tu.samples.blocks[,"Participant"])
# prot.tu = prot.tu.samples.blocks[,"Participant"])
no.part = list(ge.no = ge.no.samples.blocks[,"Participant"],
met.no = met.no.samples.blocks[,"Participant"])
subsets.part = c(tu.part, no.part)
# create table with participants common to pairs of subsets
getCommonParticipantsTable(subsets.part)
#Nb of participants common to all tumor subsets
common.tu = Reduce(intersect, tu.part)
#Nb of participants common to all normal subsets
common.no = Reduce(intersect, no.part)
#Nb of participants common to all subsets
commons.all = intersect(common.tu, common.no)
cat("Number of Participants common to all tumor Subsets: ",
length(common.tu), "\n",
"Number of Participants common to all normal Subsets: ",
length(common.no), "\n",
"Number of Participants common to all Subsets: ",
length(commons.all), "\n",
sep = "")
cat("IDs of common Participants sorted: \n")
print(getParticipantId(sort(colnames(gene.exp.tu$Data)[
which(getParticipantId(colnames(gene.exp.tu$Data)) %in%
commons.all)])), quote = F)
#samples from common participants (blocks)
ge.tu.com = ge.tu.samples.blocks[which(
ge.tu.samples.blocks[,"Participant"] %in% commons.all),]
ge.no.com = ge.no.samples.blocks[which(
ge.no.samples.blocks[,"Participant"] %in% commons.all),]
met.tu.com = met.tu.samples.blocks[which(
met.tu.samples.blocks[,"Participant"] %in% commons.all),]
met.no.com = met.no.samples.blocks[which(
met.no.samples.blocks[,"Participant"] %in% commons.all),]
ge.tu.com = ge.tu.com[!duplicated(ge.tu.com[,"Participant"]),]
ge.no.com = ge.no.com[!duplicated(ge.no.com[,"Participant"]),]
met.tu.com = met.tu.com[!duplicated(met.tu.com[,"Participant"]),]
met.no.com = met.no.com[!duplicated(met.no.com[,"Participant"]),]
#samples from common participants (barcode)
ge.samples.tu.com = apply(ge.tu.com, 1, paste, collapse = "-")
ge.samples.no.com = apply(ge.no.com, 1, paste, collapse = "-")
met.samples.tu.com = apply(met.tu.com, 1, paste, collapse = "-")
met.samples.no.com = apply(met.no.com, 1, paste, collapse = "-")
#####------------------------------------------------------------------
# extract profiles of samples with provided barcode
#####------------------------------------------------------------------
ge.tu.profiles = getProfiles(ge.samples.tu.com, gene.exp.tu)
ge.no.profiles = getProfiles(ge.samples.no.com, gene.exp.no)
met.tu.profiles = getProfiles(met.samples.tu.com, methyl.tu)
met.no.profiles = getProfiles(met.samples.no.com, methyl.no)
#####------------------------------------------------------------------
# recombine tumor and normal samples of each data type
#####------------------------------------------------------------------
ge.profiles = cbind(ge.tu.profiles, ge.no.profiles)
met.profiles = cbind(met.tu.profiles, met.no.profiles)
#####------------------------------------------------------------------
# in RNAseq dataset replace zeroes with NAs
#####------------------------------------------------------------------
cat("In RNASeq dataset: replace zeroes with NA\n")
ge.profiles.na = ge.profiles
ge.profiles.na[which(ge.profiles.na == 0)] = NA
#####------------------------------------------------------------------
# in MET dataset replace zeroes with a very small value to avoid
# infinite values after transformation
#####------------------------------------------------------------------
cat("In Methyl dataset: replace zeroes with", .Machine$double.eps, "\n")
met.profiles.eps = met.profiles
met.profiles.eps[which(met.profiles.eps == 0)] = .Machine$double.eps
#####------------------------------------------------------------------
# remove cases with more than 10% NAs
#####------------------------------------------------------------------
NAs.th = 10
cat("Remove features (rows) with more than", NAs.th, "% NAs\n")
ge.percent.na = apply(ge.profiles.na, 1, function(r) {length(which(is.na(r)))/length(r)*100})
met.percent.na = apply(met.profiles.eps, 1, function(r) {length(which(is.na(r)))/length(r)*100})
ge.profiles.dupl = ge.profiles.na[which(ge.percent.na < NAs.th),]
met.profiles.dupl = met.profiles.eps[which(met.percent.na < NAs.th),]
cat("Dimensions after removing cases with >", NAs.th, "% NAs:",
"\n GeneExp: ", dim(ge.profiles.dupl),
"\n Methylation: ", dim(met.profiles.dupl),
"\n")
#####------------------------------------------------------------------
# Remove duplicated rows (features with same unique identifier
# referring to multiple gene symbols) from the datasets.
#####------------------------------------------------------------------
cat("Remove duplicated rows (features with the same unique identifier\n",
"referring to multiple gene symbols) from datasets.\n")
ge.profiles.filter = ge.profiles.dupl[!duplicated(rownames(ge.profiles.dupl)),]
met.profiles.filter = met.profiles.dupl[!duplicated(rownames(met.profiles.dupl)),]
cat("Dimensions after removing duplicated rows:",
"\n GeneExp: ", dim(ge.profiles.filter),
"\n Methylation: ", dim(met.profiles.filter),
"\n")
#####------------------------------------------------------------------
# save Histograms of datasets before transformation as .png and .eps
#####------------------------------------------------------------------
subsets.list <- list(ge.profiles.filter, met.profiles.filter)
names(subsets.list) <- c("RNA-seq Gene Expression Levels",
"DNA-Methylation Levels - Beta-values")
# print to eps
postscript(file = file.path(sub.dir.figures, "subsets_hist.eps"),
onefile = FALSE, width = 8, height = 3.5, paper = "special",
family = "serif", horizontal = FALSE)
y = c(2*10^-5, 10)
par(mfrow = c(1,2), mar = c(2,2,6,2))
invisible(sapply(seq(1,2,1), function(x, data){
hist(data[[x]], prob = T, breaks = 10,
main = names(data[x]),
ylim = c(0,y[x]))
lines(density(data[[x]], na.rm = TRUE), lwd = 2, col = "darkblue")
}, data = subsets.list))
par(mfrow = c(1,1), mar = c(1,1,1,1), cex = 1)
title(main = paste("Histogram before Transformation"))
invisible(dev.off())
# print to png
png(file.path(sub.dir.figures, "subsets_hist.png"), width = 3000,
height = 1300, res = 300)
y = c(2*10^-5, 10)
par(mfrow = c(1,2), mar = c(2,2,6,2))
invisible(sapply(seq(1,2,1), function(x, data){
hist(data[[x]], prob = T, breaks = 10,
main = names(data[x]),
ylim = c(0,y[x]))
lines(density(data[[x]], na.rm = TRUE), lwd = 2, col = "darkblue")
}, data = subsets.list))
par(mfrow = c(1,1), mar = c(1,1,1,1), cex = 1)
title(main = paste("Histogram before Transformation"))
invisible(dev.off())
cat("Save Histogram before transformation and imputation ... \n")
#####------------------------------------------------------------------
# impute NA GeneExp and Methylation values
#####------------------------------------------------------------------
# Imputation of missing values with half the lowest value in each feature
cat("Impute low-expression values in RNA-Seq data with half",
"of the lowest value in each feature.\n")
ge.profiles.im = t(apply(ge.profiles.filter, MARGIN = 1, FUN = function(x) {
x[which(is.na(x))] = min(x, na.rm = TRUE)/2; x}))
cat("Impute missing beta values in Methylation data with half",
"of the lowest value in each feature.\n")
met.profiles.im = t(apply(met.profiles.filter, MARGIN = 1, FUN = function(x) {
x[which(is.na(x))] = min(x, na.rm = TRUE)/2; x}))
#####------------------------------------------------------------------
# log2 transform gene expression data
#####------------------------------------------------------------------
pc = 0
cat("Add pseudocount of", pc, "and log2 transform RNA-seq data.\n")
ge.profiles.tr = log2(ge.profiles.im+pc)
saveProfiles(ge.profiles.tr, "GeneExp_TCGAbarcode", save.RData = FALSE)
# Reduce sample names to "tumor_R", "normal_R", "tumor_D", ..
colnames(ge.profiles.tr) =
paste(sampleType(split_names(colnames(ge.profiles.tr))),
analyteType(split_names(colnames(ge.profiles.tr))), sep = "_")
#####------------------------------------------------------------------
# transform methylation data from beta- to M-values
#####------------------------------------------------------------------
cat("Transform Methylation data from beta- to M-values ...\n")
cat("Reverse sign of M-values to mirror distribution at 0.\n")
met.profiles.tr = -1*log2(met.profiles.im/(1-met.profiles.im))
saveProfiles(met.profiles.tr, "Methyl_TCGAbarcode", save.RData = FALSE)
# Reduce sample names to "tumor_R", "normal_R", "tumor_D", ..
colnames(met.profiles.tr) =
paste(sampleType(split_names(colnames(met.profiles.tr))),
analyteType(split_names(colnames(met.profiles.tr))), sep = "_")
#####------------------------------------------------------------------
# save Histograms of datasets after transformation to .png and .eps
#####------------------------------------------------------------------
subsets.tr.list <- list(ge.profiles.tr, met.profiles.tr)
names(subsets.tr.list) <- c("Log2 of Gene Expression Levels",
"DNA-Methylation Levels - M-values")
# print to eps
postscript(file = file.path(sub.dir.figures, "subsets_hist_tr.eps"),
onefile = FALSE, width = 8, height = 3.5, paper = "special",
family = "serif", horizontal = FALSE)
y = c(0.2,0.3)
par(mfrow = c(1,2), mar = c(2,2,6,2))
invisible(sapply(seq(1,2,1), function(x, data){
hist(data[[x]], prob = T, breaks = 20, main = names(data[x]), ylim = c(0,y[x]))
lines(density(data[[x]], adjust = 3), lwd = 2, col = "darkblue")
}, data = subsets.tr.list))
par(mfrow = c(1,1), mar = c(1,1,1,1), cex = 1)
title(main = paste("Histogram after Transformation and Imputation"))
invisible(dev.off())
# print to png
png(file.path(sub.dir.figures, "subsets_hist_tr.png"), width = 3000,
height = 1300, res = 300)
y = c(0.2,0.3)
par(mfrow = c(1,2), mar = c(2,2,6,2))
invisible(sapply(seq(1,2,1), function(x, data){
hist(data[[x]], prob = T, breaks = 20, main = names(data[x]), ylim = c(0,y[x]))
lines(density(data[[x]], adjust = 3), lwd = 2, col = "darkblue")
}, data = subsets.tr.list))
par(mfrow = c(1,1), mar = c(1,1,1,1), cex = 1)
title(main = paste("Histogram after Transformation and Imputation"))
invisible(dev.off())
cat("Save Histogram after transformation and imputation ... \n")
#####------------------------------------------------------------------
# assess differences in mean expression value per gene btw. groups
#####------------------------------------------------------------------
cat("Assess log2 fold change of gene expression:\n")
cat("Back-transform to expression levels to calculate log fold change.\n")
ge.tu.means = rowMeans(2^ge.profiles.tr[,grep("tumor", colnames(ge.profiles.tr))])
ge.no.means = rowMeans(2^ge.profiles.tr[,grep("normal", colnames(ge.profiles.tr))])
cat("Calculate log-FC: log2(mean(tumor)/mean(normal))\n")
ge.means.lfc = log2(ge.tu.means/ge.no.means)
ge.lfc.th = 3.5
ge.sig.diff = length(which(abs(ge.means.lfc) > ge.lfc.th))
cat("Thresholds for differential gene expression:\n log2 Fold Change",
ge.lfc.th, "\n")
cat("Number of features (%) considered as differentially expressed:\n",
ge.sig.diff, "(", round(ge.sig.diff/nrow(ge.profiles.tr)*100, 2), "% )\n")
# Plot and save Histogram of logFC in gene expression data
# print to eps
postscript(file = file.path(sub.dir.figures, paste0("GeneExp_logFC", ge.lfc.th, ".eps")),
onefile = FALSE, width = 8, height = 4.2, paper = "special",
family = "serif", horizontal = FALSE)
hist(ge.means.lfc, prob = T,
main = "Log Fold Change of gene expression in tumor vs. normal samples",
xlim = c(-5,5), xlab = ("LFC"),
ylim = c(0,max(density(ge.means.lfc)$y)))
axis(side = 1, at = c(-ge.lfc.th, ge.lfc.th))
lines(density(ge.means.lfc, adjust = 3), lwd = 2, col = "darkblue")
Fn = ecdf(ge.means.lfc)
abline(v = quantile(ge.means.lfc, probs = Fn(c(-ge.lfc.th, ge.lfc.th))))
text(x = c(-ge.lfc.th, ge.lfc.th), y = c(0.6,0.6), pos = 4,
labels = paste0(round(Fn(c(-ge.lfc.th, ge.lfc.th))*100, 2), "%"))
invisible(dev.off())
# print to png
png(file.path(sub.dir.figures, paste0("GeneExp_logFC", ge.lfc.th, ".png")),
width = 3000, height = 1500, res = 300)
hist(ge.means.lfc, prob = T,
main = "Log Fold Change of gene expression in tumor vs. normal samples",
xlim = c(-5,5), xlab = ("LFC"),
ylim = c(0,max(density(ge.means.lfc)$y)))
axis(side = 1, at = c(-ge.lfc.th, ge.lfc.th))
lines(density(ge.means.lfc, adjust = 3), lwd = 2, col = "darkblue")
Fn = ecdf(ge.means.lfc)
abline(v = quantile(ge.means.lfc, probs = Fn(c(-ge.lfc.th, ge.lfc.th))))
text(x = c(-ge.lfc.th, ge.lfc.th), y = c(0.6,0.6), pos = 4,
labels = paste0(round(Fn(c(-ge.lfc.th, ge.lfc.th))*100, 2), "%"))
invisible(dev.off())
#####------------------------------------------------------------------
# assess differences in methylation levels per site btw. groups
#####------------------------------------------------------------------
cat("Assess difference in means of Methylation levels:\n")
cat("Calulate means of M-values.\n")
met.tu.means = rowMeans(met.profiles.tr[,grep("tumor", colnames(met.profiles.tr))])
met.no.means = rowMeans(met.profiles.tr[,grep("normal", colnames(met.profiles.tr))])
# find differentially methylated CpG sites by assessing absolute
# difference between M-values (Du, 2010)
cat("Calculate difference of mean M-values.\n")
met.means.diff = met.tu.means - met.no.means
met.diff.th = 2.2
met.sig.diff = length(which(abs(met.means.diff) > met.diff.th))
cat("Thresholds for differential methylation:\nabsolute difference in mean",
met.diff.th, "\n")
cat("Number of features (%) considered as significantly different:\n",
met.sig.diff, "(", round(met.sig.diff/nrow(met.profiles.tr)*100, 2), "% )\n")
# print to eps
postscript(file = file.path(sub.dir.figures, paste0("Methyl_mean_diff", met.diff.th, ".eps")),
onefile = FALSE, width = 8, height = 4.2, paper = "special",
family = "serif", horizontal = FALSE)
hist(met.means.diff, prob = T,
main = "Difference in mean methylation level of tumor and normal samples",
xlim = c(-4,4), xlab = "M-value difference",
ylim = c(0, max(density(met.means.diff)$y)))
axis(side = 1, at = c(-met.diff.th, met.diff.th))
lines(density(met.means.diff, adjust = 3), lwd = 2, col = "darkblue")
Fn = ecdf(met.means.diff)
abline(v = quantile(met.means.diff, probs = Fn(c(-met.diff.th, met.diff.th))))
text(x = c(-met.diff.th, met.diff.th), y = c(1.5,1.5), pos = 4,
labels = paste0(round(Fn(c(-met.diff.th, met.diff.th))*100, 2), "%"))
invisible(dev.off())
# print to png
png(file.path(sub.dir.figures,paste0("Methyl_mean_diff", met.diff.th, ".png")),
width = 3000, height = 1500, res = 300)
hist(met.means.diff, prob = T,
main = "Difference in mean methylation level of tumor and normal samples",
xlim = c(-4,4), xlab = "M-value difference",
ylim = c(0, max(density(met.means.diff)$y)))
axis(side = 1, at = c(-met.diff.th, met.diff.th))
lines(density(met.means.diff, adjust = 3), lwd = 2, col = "darkblue")
Fn = ecdf(met.means.diff)
abline(v = quantile(met.means.diff, probs = Fn(c(-met.diff.th, met.diff.th))))
text(x = c(-met.diff.th, met.diff.th), y = c(1.5,1.5), pos = 4,
labels = paste0(round(Fn(c(-met.diff.th, met.diff.th))*100, 2), "%"))
invisible(dev.off())
#####------------------------------------------------------------------
# save profiles of samples common to all subsets to .txt and .RData
#####------------------------------------------------------------------
saveProfiles(ge.profiles.tr, "GeneExp")
saveProfiles(met.profiles.tr, "Methyl")
#####------------------------------------------------------------------
# Prepare datasets for cross validation
#####------------------------------------------------------------------
if(.prepare.CV == TRUE){
source(file.path(.src.dir, "crossValidation.R"))
data.names = c("GeneExp", "Methyl")
datasets = paste0(data.names, ".RData")
data.paths = as.list(file.path(sub.dir.RData, datasets))
data.raw = lapply(data.paths, readRDS)
names(data.raw) = data.names
set.seed(.biolog.seed)
randomSplit(data.raw, subsets = .subsets.val, .pr.train,
samples.paired = TRUE)
}
sink()
close(log.con)
|
if(!require(tidyverse)) install.packages("tidyverse", repos = "http://cran.us.r-project.org")
if(!require(caret)) install.packages("caret", repos = "http://cran.us.r-project.org")
if(!require(data.table)) install.packages("data.table", repos = "http://cran.us.r-project.org")
# MovieLens 10M dataset:
# https://grouplens.org/datasets/movielens/10m/
# http://files.grouplens.org/datasets/movielens/ml-10m.zip
dl <- tempfile()
download.file("http://files.grouplens.org/datasets/movielens/ml-10m.zip", dl)
ratings <- fread(text = gsub("::", "\t", readLines(unzip(dl, "ml-10M100K/ratings.dat"))),
col.names = c("userId", "movieId", "rating", "timestamp"))
movies <- str_split_fixed(readLines(unzip(dl, "ml-10M100K/movies.dat")), "\\::", 3)
colnames(movies) <- c("movieId", "title", "genres")
movies <- as.data.frame(movies) %>% mutate(movieId = as.numeric(levels(movieId))[movieId], title = as.character(title), genres = as.character(genres))
movielens <- left_join(ratings, movies, by = "movieId")
# Validation set will be 10% of MovieLens data
set.seed(1, sample.kind="Rounding")
test_index <- createDataPartition(y = movielens$rating, times = 1, p = 0.1, list = FALSE)
edx <- movielens[-test_index,]
temp <- movielens[test_index,]
# Make sure userId and movieId in validation set are also in edx set
validation <- temp %>% semi_join(edx, by = "movieId") %>% semi_join(edx, by = "userId")
# Add rows removed from validation set back into edx set
removed <- anti_join(temp, validation)
edx <- rbind(edx, removed)
rm(dl, ratings, movies, test_index, temp, movielens, removed)
#setting the ramdom number generato.
set.seed(1998, sample.kind = "Rounding")
# creating a serie of test/traininh partions
test_index <- createDataPartition(y = edx$rating, times = 1, p = 0.2, list = FALSE)
train_set <- edx[-test_index,]
test_set <- edx[test_index,]
#matching the test set to train set
test_set <- test_set %>% semi_join(train_set, by = "movieId") %>% semi_join(train_set, by = "userId")
#creating RMSE function
RMSE <- function(true_ratings, predicted_ratings){
sqrt(mean((true_ratings - predicted_ratings)^2))
}
#average of all rates and bias
mu <- mean(train_set$rating)
movie_avgs <- train_set %>% group_by(movieId) %>% summarize(b_i = mean(rating - mu))
predicted_ratings <- mu + test_set %>% left_join(movie_avgs, by='movieId') %>% pull(b_i)
# fit <- lm(rating ~ as.factor(movieId) + as.factor(userId))
user_avgs <- test_set %>% left_join(movie_avgs, by='movieId') %>%
group_by(userId) %>% summarize(b_u = mean(rating - mu - b_i))
predicted_ratings <- test_set %>% left_join(movie_avgs, by='movieId') %>% left_join(user_avgs, by='userId') %>%
mutate(pred = mu + b_i + b_u) %>% .$pred
model_rmse <- RMSE(predicted_ratings, test_set$rating)
model_rmse
|
/edxmovielensproj.r
|
no_license
|
bushdanielkwajaffa/edxcapstone
|
R
| false | false | 2,791 |
r
|
if(!require(tidyverse)) install.packages("tidyverse", repos = "http://cran.us.r-project.org")
if(!require(caret)) install.packages("caret", repos = "http://cran.us.r-project.org")
if(!require(data.table)) install.packages("data.table", repos = "http://cran.us.r-project.org")
# MovieLens 10M dataset:
# https://grouplens.org/datasets/movielens/10m/
# http://files.grouplens.org/datasets/movielens/ml-10m.zip
dl <- tempfile()
download.file("http://files.grouplens.org/datasets/movielens/ml-10m.zip", dl)
ratings <- fread(text = gsub("::", "\t", readLines(unzip(dl, "ml-10M100K/ratings.dat"))),
col.names = c("userId", "movieId", "rating", "timestamp"))
movies <- str_split_fixed(readLines(unzip(dl, "ml-10M100K/movies.dat")), "\\::", 3)
colnames(movies) <- c("movieId", "title", "genres")
movies <- as.data.frame(movies) %>% mutate(movieId = as.numeric(levels(movieId))[movieId], title = as.character(title), genres = as.character(genres))
movielens <- left_join(ratings, movies, by = "movieId")
# Validation set will be 10% of MovieLens data
set.seed(1, sample.kind="Rounding")
test_index <- createDataPartition(y = movielens$rating, times = 1, p = 0.1, list = FALSE)
edx <- movielens[-test_index,]
temp <- movielens[test_index,]
# Make sure userId and movieId in validation set are also in edx set
validation <- temp %>% semi_join(edx, by = "movieId") %>% semi_join(edx, by = "userId")
# Add rows removed from validation set back into edx set
removed <- anti_join(temp, validation)
edx <- rbind(edx, removed)
rm(dl, ratings, movies, test_index, temp, movielens, removed)
#setting the ramdom number generato.
set.seed(1998, sample.kind = "Rounding")
# creating a serie of test/traininh partions
test_index <- createDataPartition(y = edx$rating, times = 1, p = 0.2, list = FALSE)
train_set <- edx[-test_index,]
test_set <- edx[test_index,]
#matching the test set to train set
test_set <- test_set %>% semi_join(train_set, by = "movieId") %>% semi_join(train_set, by = "userId")
#creating RMSE function
RMSE <- function(true_ratings, predicted_ratings){
sqrt(mean((true_ratings - predicted_ratings)^2))
}
#average of all rates and bias
mu <- mean(train_set$rating)
movie_avgs <- train_set %>% group_by(movieId) %>% summarize(b_i = mean(rating - mu))
predicted_ratings <- mu + test_set %>% left_join(movie_avgs, by='movieId') %>% pull(b_i)
# fit <- lm(rating ~ as.factor(movieId) + as.factor(userId))
user_avgs <- test_set %>% left_join(movie_avgs, by='movieId') %>%
group_by(userId) %>% summarize(b_u = mean(rating - mu - b_i))
predicted_ratings <- test_set %>% left_join(movie_avgs, by='movieId') %>% left_join(user_avgs, by='userId') %>%
mutate(pred = mu + b_i + b_u) %>% .$pred
model_rmse <- RMSE(predicted_ratings, test_set$rating)
model_rmse
|
# ====== #
# Author: Robert Smith
# Contact: rasmith3@sheffield.ac.uk
# Project: HEAT VSLY scripts
# Description: This script creates the plots from the results file, it is stand-alone.
# ====== #
library(kableExtra)
library(DT)
library(reshape2)
library(ggplot2)
library(ggrepel)
library(gridExtra)
library(dplyr)
rm(list=ls())
#--- PLOTTING COLOURS ---#
# fixed to ensure consistency
col_list = list(col_vsly = "blue",
col_heat1 = "red",
col_vsl55 = "green")
params <- list()
params$heat_or_gbd = "gbd" # select either heat or gbd.
#--- LOAD RESULTS ---#
# either heat or gbd
if(params$heat_or_gbd == "gbd"){
results <- read.csv("./outputs/heat_results.csv",stringsAsFactors = F,row.names = 1)
}else if(params$heat_or_gbd == "heat"){
results <- read.csv("./outputs/gbd_results.csv",stringsAsFactors = F,row.names = 1)
}
#---- Table 1 ----
# Latex
kable(x = results[c("FRA", "DEU","LUX","ROU","LVA", "POL"),1:4],
align = 'c',
format = "latex",
digits = 2,
col.names = c("VSLY","Heat1","Heat2","VSL55"),
caption = "Walking age 20-74")
# datatable
table1 <- datatable(round(results[c("FRA", "DEU","LUX","ROU","LVA", "POL"),1:4],2),
colnames = c("VSLY","Heat1","Heat2","VSL55"),
options = list(dom = 't'))
# SAVE DATATABLE either heat or gbd # not working.
if(params$heat_or_gbd == "gbd"){
ggsave(filename = "./outputs/gbd_table1.png",plot = table1)
}else if(params$heat_or_gbd == "heat"){
ggsave(filename = "./outputs/heat_table1.png",plot = table1)
}
# full table of results for all countries
datatable(results[,1:4],
colnames = c("VSLY","Heat1","Heat2","VSL55"))
#---- Figure 1 ----
# Comparing estimated monetary benefit per capita (2017 Euro) using four approaches.
# construct data-frame.
df <- data.frame(vsly = results[,"vsly_w2074"],
heat1 = results[,"heat1_w2074"],
heat2 = results[,"heat2_w2074"],
vsl55 = results[,"vsl55_w2074"],
country = rownames(results))
#class(df$country) = "character"
# change to long format.
long <- melt(data=df,
measure.vars =c("vsly", "heat1","vsl55"),
variable.name = "method",
value.name = "estimate")
# create plot
fig1 <- ggplot(data = long,
aes(x=heat2,y=estimate))+
theme_classic()+
geom_point(aes(colour = method))+
geom_abline(slope = 1)+
#annotate(geom="text", x=130, y=150,
# label="VSLY = HEAT 2Grp", color="black")+
labs(title = "Estimated Annual Monetary Benefit per capita (in 2017 Euro) in Scenario 1",
subtitle = "Comparing alternative methods to HEAT-2Grp",
caption = "Data Sources: WHO Mort, GBD Pop, HEAT VSL",
x = "HEAT 2 Group Method (Euro)",
y = "Other Methods (Euro)",
col = "Model")+
xlim(0, 150) + ylim(0,150)+
geom_label_repel(data = long[long$method=="vsly",],
label = long$country[long$method=="vsly"],
size = 2,nudge_x = 2, nudge_y = -5,direction = "y",
segment.color = "blue",colour = "blue")+
theme(legend.position = c(0.9, 0.2))+
scale_colour_manual(values = c(vsly = col_list$col_vsly,
heat1 = col_list$col_heat1,
vsl55 = col_list$col_vsl55))
# SAVE FIG1 either heat or gbd
if(params$heat_or_gbd == "gbd"){
ggsave(filename = "./outputs/gbd_figure1.png",plot = fig1, width = 8, height = 8)
}else if(params$heat_or_gbd == "heat"){
ggsave(filename = "./outputs/heat_figure1.png",plot = fig1, width = 8, height = 8)
}
#---- Figure 2 ----
# Population age 20-44
long_yng <- data.frame(vsly = results[,"vsly_w2044"],
heat1 = results[,"heat1_w2044"],
heat2 = results[,"heat2_w2044"],
vsl55 = results[,"vsl55_w2044"],
country = rownames(results)) %>%
melt(.,
measure.vars =c("vsly", "heat1","vsl55"),
variable.name = "method",
value.name = "estimate")
class(long_yng$country) = "character"
long_old <- data.frame(vsly = results[,"vsly_w4574"],
heat1 = results[,"heat1_w4574"],
heat2 = results[,"heat2_w4574"],
vsl55 = results[,"vsl55_w4574"],
country = rownames(results)) %>%
melt(data=.,
measure.vars =c("vsly", "heat1","vsl55"),
variable.name = "method",
value.name = "estimate")
class(long_old$country) = "character"
# create young plot
plot_yng <- ggplot(data = long_yng,
aes(x=heat2,y=estimate, col = method))+
theme_classic()+
geom_point()+
geom_abline(slope = 1)+
labs(title = "Age 20-44",
#subtitle = "Comparing alternative methods to HEAT-2Grp",
caption = "Data Sources: WHO Mort, GBD Pop, HEAT VSL",
x = "HEAT 2 Group Method (Euro)",
y = "Other Methods (Euro)",
col = "Model")+
xlim(0, 150) + ylim(0,150)+
theme(legend.position = c(0.9, 0.2),plot.title = element_text(hjust = 0.5))+
scale_colour_manual(values = c(vsly=col_list$col_vsly,
heat1 = col_list$col_heat1,
vsl55 = col_list$col_vsl55))
# create old plot
plot_old <- ggplot(data = long_old,
aes(x=heat2, y=estimate,col=method))+
theme_classic()+
geom_point()+
geom_abline(slope = 1)+
labs(title = "Age 45-74",
caption = "Data Sources: WHO Mort, GBD Pop, HEAT VSL",
x = "HEAT 2 Group Method (Euro)",
y = "Other Methods (Euro)",
col = "Model")+
xlim(0, 150) + ylim(0,150)+
theme(legend.position = c(0.9, 0.2),plot.title = element_text(hjust = 0.5))+
scale_colour_manual(values = c(vsly = col_list$col_vsly,
heat1 = col_list$col_heat1,
vsl55 = col_list$col_vsl55))
fig2 <- grid.arrange(plot_yng, plot_old, ncol=2) # create gridplot
# SAVE FIG2 either heat or gbd
if(params$heat_or_gbd == "gbd"){
ggsave(filename = "./outputs/gbd_figure2.png",plot = fig2, width = 8, height = 8)
}else if(params$heat_or_gbd == "heat"){
ggsave(filename = "./outputs/heat_figure2.png",plot = fig2, width = 8, height = 8)
}
#-------------#
# Figure 3 #
#-------------#
d.f <- read.csv("outputs/gbd_age_results.csv",row.names = 1,col.names = c("","vsly","heat1","heat2","vsl55"))
d.f$age <- 1:100
# melt dataframe to use ggplot.
d.f <- melt(d.f,measure.vars = c("vsly","heat1","heat2","vsl55")); colnames(d.f)[colnames(d.f)=="variable"] <- "Model"
# ggplot
fig3 <- (ggplot(d.f, aes(x = age, y= value, col = Model))+
theme_classic()+
geom_step()+
ylim(c(0,150))+
labs(caption = "Data Sources: WHO Mort, GBD Pop, HEAT VSL", # note need main title in paper.
x = "Age",
y = "Monetary Benefit (Euro)")+
scale_x_continuous(limits = c(20,74),
breaks = c(20,30,40,50,60,70))+
#xlim(20,74)+
ylim(c(0,60))+
theme(axis.text.x = element_text(size = 8),
axis.text.y = element_text(size = 8)) +
theme(legend.position = c(0.1, 0.8)) +
scale_colour_manual(values = c(vsly = col_list$col_vsly,
heat1 = col_list$col_heat1,
vsl55 = col_list$col_vsl55,
heat2 = "black"))
)
# SAVE FIG1 either heat or gbd
if(params$heat_or_gbd == "gbd"){
ggsave(filename = "./outputs/gbd_figure3.png",plot = fig3, width = 8, height = 8)
}else if(params$heat_or_gbd == "heat"){
ggsave(filename = "./outputs/heat_figure3.png",plot = fig3, width = 8, height = 8)
}
|
/R/getting_plots.R
|
no_license
|
RobertASmith/heat_vsly_public
|
R
| false | false | 7,961 |
r
|
# ====== #
# Author: Robert Smith
# Contact: rasmith3@sheffield.ac.uk
# Project: HEAT VSLY scripts
# Description: This script creates the plots from the results file, it is stand-alone.
# ====== #
library(kableExtra)
library(DT)
library(reshape2)
library(ggplot2)
library(ggrepel)
library(gridExtra)
library(dplyr)
rm(list=ls())
#--- PLOTTING COLOURS ---#
# fixed to ensure consistency
col_list = list(col_vsly = "blue",
col_heat1 = "red",
col_vsl55 = "green")
params <- list()
params$heat_or_gbd = "gbd" # select either heat or gbd.
#--- LOAD RESULTS ---#
# either heat or gbd
if(params$heat_or_gbd == "gbd"){
results <- read.csv("./outputs/heat_results.csv",stringsAsFactors = F,row.names = 1)
}else if(params$heat_or_gbd == "heat"){
results <- read.csv("./outputs/gbd_results.csv",stringsAsFactors = F,row.names = 1)
}
#---- Table 1 ----
# Latex
kable(x = results[c("FRA", "DEU","LUX","ROU","LVA", "POL"),1:4],
align = 'c',
format = "latex",
digits = 2,
col.names = c("VSLY","Heat1","Heat2","VSL55"),
caption = "Walking age 20-74")
# datatable
table1 <- datatable(round(results[c("FRA", "DEU","LUX","ROU","LVA", "POL"),1:4],2),
colnames = c("VSLY","Heat1","Heat2","VSL55"),
options = list(dom = 't'))
# SAVE DATATABLE either heat or gbd # not working.
if(params$heat_or_gbd == "gbd"){
ggsave(filename = "./outputs/gbd_table1.png",plot = table1)
}else if(params$heat_or_gbd == "heat"){
ggsave(filename = "./outputs/heat_table1.png",plot = table1)
}
# full table of results for all countries
datatable(results[,1:4],
colnames = c("VSLY","Heat1","Heat2","VSL55"))
#---- Figure 1 ----
# Comparing estimated monetary benefit per capita (2017 Euro) using four approaches.
# construct data-frame.
df <- data.frame(vsly = results[,"vsly_w2074"],
heat1 = results[,"heat1_w2074"],
heat2 = results[,"heat2_w2074"],
vsl55 = results[,"vsl55_w2074"],
country = rownames(results))
#class(df$country) = "character"
# change to long format.
long <- melt(data=df,
measure.vars =c("vsly", "heat1","vsl55"),
variable.name = "method",
value.name = "estimate")
# create plot
fig1 <- ggplot(data = long,
aes(x=heat2,y=estimate))+
theme_classic()+
geom_point(aes(colour = method))+
geom_abline(slope = 1)+
#annotate(geom="text", x=130, y=150,
# label="VSLY = HEAT 2Grp", color="black")+
labs(title = "Estimated Annual Monetary Benefit per capita (in 2017 Euro) in Scenario 1",
subtitle = "Comparing alternative methods to HEAT-2Grp",
caption = "Data Sources: WHO Mort, GBD Pop, HEAT VSL",
x = "HEAT 2 Group Method (Euro)",
y = "Other Methods (Euro)",
col = "Model")+
xlim(0, 150) + ylim(0,150)+
geom_label_repel(data = long[long$method=="vsly",],
label = long$country[long$method=="vsly"],
size = 2,nudge_x = 2, nudge_y = -5,direction = "y",
segment.color = "blue",colour = "blue")+
theme(legend.position = c(0.9, 0.2))+
scale_colour_manual(values = c(vsly = col_list$col_vsly,
heat1 = col_list$col_heat1,
vsl55 = col_list$col_vsl55))
# SAVE FIG1 either heat or gbd
if(params$heat_or_gbd == "gbd"){
ggsave(filename = "./outputs/gbd_figure1.png",plot = fig1, width = 8, height = 8)
}else if(params$heat_or_gbd == "heat"){
ggsave(filename = "./outputs/heat_figure1.png",plot = fig1, width = 8, height = 8)
}
#---- Figure 2 ----
# Population age 20-44
long_yng <- data.frame(vsly = results[,"vsly_w2044"],
heat1 = results[,"heat1_w2044"],
heat2 = results[,"heat2_w2044"],
vsl55 = results[,"vsl55_w2044"],
country = rownames(results)) %>%
melt(.,
measure.vars =c("vsly", "heat1","vsl55"),
variable.name = "method",
value.name = "estimate")
class(long_yng$country) = "character"
long_old <- data.frame(vsly = results[,"vsly_w4574"],
heat1 = results[,"heat1_w4574"],
heat2 = results[,"heat2_w4574"],
vsl55 = results[,"vsl55_w4574"],
country = rownames(results)) %>%
melt(data=.,
measure.vars =c("vsly", "heat1","vsl55"),
variable.name = "method",
value.name = "estimate")
class(long_old$country) = "character"
# create young plot
plot_yng <- ggplot(data = long_yng,
aes(x=heat2,y=estimate, col = method))+
theme_classic()+
geom_point()+
geom_abline(slope = 1)+
labs(title = "Age 20-44",
#subtitle = "Comparing alternative methods to HEAT-2Grp",
caption = "Data Sources: WHO Mort, GBD Pop, HEAT VSL",
x = "HEAT 2 Group Method (Euro)",
y = "Other Methods (Euro)",
col = "Model")+
xlim(0, 150) + ylim(0,150)+
theme(legend.position = c(0.9, 0.2),plot.title = element_text(hjust = 0.5))+
scale_colour_manual(values = c(vsly=col_list$col_vsly,
heat1 = col_list$col_heat1,
vsl55 = col_list$col_vsl55))
# create old plot
plot_old <- ggplot(data = long_old,
aes(x=heat2, y=estimate,col=method))+
theme_classic()+
geom_point()+
geom_abline(slope = 1)+
labs(title = "Age 45-74",
caption = "Data Sources: WHO Mort, GBD Pop, HEAT VSL",
x = "HEAT 2 Group Method (Euro)",
y = "Other Methods (Euro)",
col = "Model")+
xlim(0, 150) + ylim(0,150)+
theme(legend.position = c(0.9, 0.2),plot.title = element_text(hjust = 0.5))+
scale_colour_manual(values = c(vsly = col_list$col_vsly,
heat1 = col_list$col_heat1,
vsl55 = col_list$col_vsl55))
fig2 <- grid.arrange(plot_yng, plot_old, ncol=2) # create gridplot
# SAVE FIG2 either heat or gbd
if(params$heat_or_gbd == "gbd"){
ggsave(filename = "./outputs/gbd_figure2.png",plot = fig2, width = 8, height = 8)
}else if(params$heat_or_gbd == "heat"){
ggsave(filename = "./outputs/heat_figure2.png",plot = fig2, width = 8, height = 8)
}
#-------------#
# Figure 3 #
#-------------#
d.f <- read.csv("outputs/gbd_age_results.csv",row.names = 1,col.names = c("","vsly","heat1","heat2","vsl55"))
d.f$age <- 1:100
# melt dataframe to use ggplot.
d.f <- melt(d.f,measure.vars = c("vsly","heat1","heat2","vsl55")); colnames(d.f)[colnames(d.f)=="variable"] <- "Model"
# ggplot
fig3 <- (ggplot(d.f, aes(x = age, y= value, col = Model))+
theme_classic()+
geom_step()+
ylim(c(0,150))+
labs(caption = "Data Sources: WHO Mort, GBD Pop, HEAT VSL", # note need main title in paper.
x = "Age",
y = "Monetary Benefit (Euro)")+
scale_x_continuous(limits = c(20,74),
breaks = c(20,30,40,50,60,70))+
#xlim(20,74)+
ylim(c(0,60))+
theme(axis.text.x = element_text(size = 8),
axis.text.y = element_text(size = 8)) +
theme(legend.position = c(0.1, 0.8)) +
scale_colour_manual(values = c(vsly = col_list$col_vsly,
heat1 = col_list$col_heat1,
vsl55 = col_list$col_vsl55,
heat2 = "black"))
)
# SAVE FIG1 either heat or gbd
if(params$heat_or_gbd == "gbd"){
ggsave(filename = "./outputs/gbd_figure3.png",plot = fig3, width = 8, height = 8)
}else if(params$heat_or_gbd == "heat"){
ggsave(filename = "./outputs/heat_figure3.png",plot = fig3, width = 8, height = 8)
}
|
library(marmap)
### Name: autoplot.bathy
### Title: Ploting bathymetric data with ggplot
### Aliases: autoplot.bathy
### ** Examples
# load NW Atlantic data and convert to class bathy
data(nw.atlantic)
atl <- as.bathy(nw.atlantic)
# basic plot
## Not run:
##D library("ggplot2")
##D autoplot(atl)
##D
##D # plot images
##D autoplot(atl, geom=c("tile"))
##D autoplot(atl, geom=c("raster")) # faster but not resolution independant
##D
##D # plot both!
##D autoplot(atl, geom=c("raster", "contour"))
##D
##D # geom names can be abbreviated
##D autoplot(atl, geom=c("r", "c"))
##D
##D # do not highlight the coastline
##D autoplot(atl, coast=FALSE)
##D
##D # better colour scale
##D autoplot(atl, geom=c("r", "c")) +
##D scale_fill_gradient2(low="dodgerblue4", mid="gainsboro", high="darkgreen")
##D
##D # set aesthetics
##D autoplot(atl, geom=c("r", "c"), colour="white", size=0.1)
##D
##D # topographical colour scale, see ?scale_fill_etopo
##D autoplot(atl, geom=c("r", "c"), colour="white", size=0.1) + scale_fill_etopo()
##D
##D # add sampling locations
##D data(metallo)
##D last_plot() + geom_point(aes(x=lon, y=lat), data=metallo, alpha=0.5)
##D
##D # an alternative contour map making use of additional mappings
##D # see ?stat_contour in ggplot2 to understand the ..level.. argument
##D autoplot(atl, geom="contour", mapping=aes(colour=..level..))
## End(Not run)
|
/data/genthat_extracted_code/marmap/examples/autoplot.bathy.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false | false | 1,443 |
r
|
library(marmap)
### Name: autoplot.bathy
### Title: Ploting bathymetric data with ggplot
### Aliases: autoplot.bathy
### ** Examples
# load NW Atlantic data and convert to class bathy
data(nw.atlantic)
atl <- as.bathy(nw.atlantic)
# basic plot
## Not run:
##D library("ggplot2")
##D autoplot(atl)
##D
##D # plot images
##D autoplot(atl, geom=c("tile"))
##D autoplot(atl, geom=c("raster")) # faster but not resolution independant
##D
##D # plot both!
##D autoplot(atl, geom=c("raster", "contour"))
##D
##D # geom names can be abbreviated
##D autoplot(atl, geom=c("r", "c"))
##D
##D # do not highlight the coastline
##D autoplot(atl, coast=FALSE)
##D
##D # better colour scale
##D autoplot(atl, geom=c("r", "c")) +
##D scale_fill_gradient2(low="dodgerblue4", mid="gainsboro", high="darkgreen")
##D
##D # set aesthetics
##D autoplot(atl, geom=c("r", "c"), colour="white", size=0.1)
##D
##D # topographical colour scale, see ?scale_fill_etopo
##D autoplot(atl, geom=c("r", "c"), colour="white", size=0.1) + scale_fill_etopo()
##D
##D # add sampling locations
##D data(metallo)
##D last_plot() + geom_point(aes(x=lon, y=lat), data=metallo, alpha=0.5)
##D
##D # an alternative contour map making use of additional mappings
##D # see ?stat_contour in ggplot2 to understand the ..level.. argument
##D autoplot(atl, geom="contour", mapping=aes(colour=..level..))
## End(Not run)
|
#' @export
plot_layout_vis.plotly <- function(p_obj, # An empty plotly object.
x, # Named list with x_ticks and x_labels.
y, # Named list with y_ticks and y_labels.
distribution = c(
"weibull", "lognormal", "loglogistic",
"sev", "normal", "logistic",
"exponential"
),
title_main = "Probability Plot",
title_x = "Characteristic",
title_y = "Unreliability"
) {
distribution <- match.arg(distribution)
# Configuration of x axis:
x_config <- list(
title = list(
text = title_x
),
autorange = TRUE,
rangemode = "nonnegative",
ticks = "inside",
tickwidth = 1,
tickfont = list(family = 'Arial', size = 10),
#tickmode = "array",
tickangle = 90,
showticklabels = TRUE,
zeroline = FALSE,
showgrid = TRUE,
gridwidth = 1,
exponentformat = "none",
showline = TRUE,
linecolor = "#a0a0a0"
)
## Distributions that need a log transformed x axis:
if (distribution %in% c("weibull", "lognormal", "loglogistic")) {
x_config <- c(
x_config,
list(
type = "log",
tickvals = x$x_ticks,
ticktext = x$x_labels
)
)
}
# Configuration y axis:
## Adjust y values for exponential distribution (no overlapping):
if (distribution != "exponential") {
y_tickvals <- y$y_ticks
y_ticktext <- y$y_labels
} else {
### Smarter values for exponential:
y_labs <- c(.01, .1, .2, .3, .5, .6, .7, .8, .9, .95, .99,
.999, .9999, .99999) * 100
ind <- y$y_labels %in% y_labs
y_tickvals <- y$y_ticks[ind]
y_ticktext <- y$y_labels[ind]
}
y_config <- list(
title = list(
text = title_y
),
autorange = TRUE,
tickvals = y_tickvals,
ticktext = y_ticktext,
ticks = "inside",
tickwidth = 1,
tickfont = list(family = 'Arial', size = 10),
showticklabels = TRUE,
zeroline = FALSE,
showgrid = TRUE,
gridwidth = 1,
exponentformat = "none",
showline = TRUE,
linecolor = "#a0a0a0"
)
# Configuration of legend:
l <- list(
title = list(
font = list(
family = "Arial",
size = 10,
color = "#000000"
)
)
)
# Layout margins:
m <- list(
l = 55,
r = 10,
b = 55,
t = 25,
pad = 4
)
title <- list(
text = title_main,
font = list(
family = "Arial",
size = 16,
color = "#000000"
)
)
# Create grid:
p_obj <- p_obj %>%
plotly::layout(
title = title,
separators = ".",
legend = l,
xaxis = x_config,
yaxis = y_config,
margin = m
)
p_obj
}
#' @export
plot_prob_vis.plotly <- function(p_obj,
tbl_prob,
distribution = c(
"weibull", "lognormal", "loglogistic",
"sev", "normal", "logistic",
"exponential"
),
title_main = "Probability Plot",
title_x = "Characteristic",
title_y = "Unreliability",
title_trace = "Sample"
) {
distribution <- match.arg(distribution)
mark_x <- unlist(strsplit(title_x, " "))[1]
mark_y <- unlist(strsplit(title_y, " "))[1]
# Suppress warning by subsetting with character:
n_group <- length(unique(tbl_prob[["group"]]))
n_method <- length(unique(tbl_prob$cdf_estimation_method))
color <- if (n_method == 1) I("#3C8DBC") else ~cdf_estimation_method
symbol <- if (n_group == 0) NULL else ~group
name <- to_name(tbl_prob, n_method, n_group, title_trace)
# Construct probability plot:
p_prob <- p_obj %>%
plotly::add_trace(
data = tbl_prob,
x = ~x,
y = ~q,
type = "scatter",
mode = "markers",
hoverinfo = "text",
name = name,
color = color,
colors = "Set2",
symbol = symbol,
legendgroup = ~cdf_estimation_method,
text = paste(
"ID:", tbl_prob$id,
paste("<br>", paste0(mark_x, ":")), format(tbl_prob$x, digits = 3),
paste("<br>", paste0(mark_y, ":")), format(tbl_prob$prob, digits = 6)
)
) %>%
plotly::layout(showlegend = TRUE)
p_prob
}
#' @export
plot_mod_vis.plotly <- function(p_obj,
tbl_mod,
title_trace = "Fit"
) {
x_mark <- unlist(strsplit(p_obj$x$layoutAttrs[[2]]$xaxis$title$text, " "))[1]
y_mark <- unlist(strsplit(p_obj$x$layoutAttrs[[2]]$yaxis$title$text, " "))[1]
n_method <- length(unique(tbl_mod$cdf_estimation_method))
n_group <- length(unique(tbl_mod$group))
color <- if (n_method == 1) I("#CC2222") else ~cdf_estimation_method
## Creation of hovertext
arg_list <- list(
x = tbl_mod$x_p,
y = tbl_mod$y_p,
param_val = tbl_mod$param_val,
param_label = tbl_mod$param_label
)
# tbl_mod has names lower / upper if set in plot_conf()
if (hasName(tbl_mod, "lower")) {
arg_list$lower <- tbl_mod$lower
}
if (hasName(tbl_mod, "upper")) {
arg_list$upper <- tbl_mod$upper
}
tbl_mod <- tbl_mod %>%
dplyr::mutate(
hovertext = purrr::pmap_chr(
arg_list,
hovertext_mod,
x_mark = x_mark,
y_mark = y_mark
)
)
# Reminder: Splitting the line by group happens by using the name
name <- to_name(tbl_mod, n_method, n_group, title_trace)
p_mod <- plotly::add_lines(
p = p_obj,
data = tbl_mod,
x = ~x_p,
y = ~q,
type = "scatter",
mode = "lines",
hoverinfo = "text",
name = name,
color = color,
colors = "Set2",
legendgroup = ~cdf_estimation_method,
text = ~hovertext
)
p_mod
}
#' @export
plot_conf_vis.plotly <- function(p_obj,
tbl_p,
title_trace
) {
# Get axis labels in hover:
x_mark <- unlist(strsplit(p_obj$x$layoutAttrs[[2]]$xaxis$title$text, " "))[1]
y_mark <- unlist(strsplit(p_obj$x$layoutAttrs[[2]]$yaxis$title$text, " "))[1]
n_method <- length(unique(tbl_p$cdf_estimation_method))
color <- if (n_method == 1) I("#CC2222") else ~cdf_estimation_method
name <- to_name(tbl_p, n_method, n_group = 0, title_trace)
p_conf <- plotly::add_lines(
p = p_obj,
# tbl_p is grouped by bound. Therefore two separate lines are drawn
# for two-sided confidence intervals
data = tbl_p,
x = ~x, y = ~q,
type = "scatter", mode = "lines",
# hoverinfo text is set in plot_mod
hoverinfo = "skip",
line = list(dash = "dash", width = 1),
color = color,
colors = "Set2",
name = name,
legendgroup = ~cdf_estimation_method
)
p_conf
}
#' @export
plot_pop_vis.plotly <- function(p_obj,
tbl_pop,
title_trace
) {
# Get axis labels in hover
x_mark <- unlist(strsplit(p_obj$x$layoutAttrs[[2]]$xaxis$title$text, " "))[1]
y_mark <- unlist(strsplit(p_obj$x$layoutAttrs[[2]]$yaxis$title$text, " "))[1]
# Hovertext and name
tbl_pop <- tbl_pop %>%
dplyr::mutate(
hovertext = purrr::pmap_chr(
list(
x = .data$x_s,
y = .data$y_s,
param_val = .data$param_val,
param_label = .data$param_label
),
hovertext_mod,
x_mark = x_mark,
y_mark = y_mark
),
name = purrr::map2_chr(.data$param_val, .data$param_label, to_name_pop)
)
p_pop <- plotly::add_lines(
p = p_obj, data = tbl_pop,
x = ~x_s, y = ~q,
type = "scatter",
mode = "lines",
hoverinfo = "text",
# color = ~group,
colors = "Set2",
name = ~name,
line = list(width = 1),
text = ~hovertext
) %>%
plotly::layout(
showlegend = TRUE,
legend = list(
title = list(
text = title_trace
)
)
)
p_pop
}
# Hover text for plot_mod() and plot_conf():
hovertext_mod <- function(x,
y,
param_val,
param_label,
x_mark,
y_mark,
lower = NULL,
upper = NULL
) {
not_na <- !is.na(param_val)
x_text <- paste0(x_mark, ": ", format(x, digits = 3))
y_text <- paste0(y_mark, ": ", format(y, digits = 3))
lower_text <- if (!is.null(lower))
paste("Lower Bound:", format(lower, digits = 3))
upper_text <- if (!is.null(upper))
paste("Upper Bound:", format(upper, digits = 3))
param_text <- paste(param_label[not_na], param_val[not_na], collapse = ", ")
do.call(
paste,
c(
# Drop NULLs, otherwise paste will add one <br> per NULL
purrr::compact(
list(
x_text,
y_text,
lower_text,
upper_text,
param_text
)
),
sep = "<br>"
)
)
}
# Trace name for plot_pop():
to_name_pop <- function(param_val,
param_label
) {
not_na <- !is.na(param_val)
paste(param_label[not_na], param_val[not_na], collapse = ", ")
}
# Trace name for plot_prob(), plot_mod() and plot_conf():
to_name <- function(tbl,
n_method,
n_group,
title_trace
) {
if (n_method <= 1) {
if (n_group <= 1) {
title_trace
} else {
paste0(title_trace, ": ", tbl$group)
}
} else {
if (n_group <= 1) {
paste0(title_trace, ": ", tbl$cdf_estimation_method)
} else {
paste0(title_trace, ": ", tbl$cdf_estimation_method, ", ", tbl$group)
}
}
}
|
/R/plot_functions_plotly.R
|
no_license
|
jiaoruohong/weibulltools
|
R
| false | false | 10,004 |
r
|
#' @export
plot_layout_vis.plotly <- function(p_obj, # An empty plotly object.
x, # Named list with x_ticks and x_labels.
y, # Named list with y_ticks and y_labels.
distribution = c(
"weibull", "lognormal", "loglogistic",
"sev", "normal", "logistic",
"exponential"
),
title_main = "Probability Plot",
title_x = "Characteristic",
title_y = "Unreliability"
) {
distribution <- match.arg(distribution)
# Configuration of x axis:
x_config <- list(
title = list(
text = title_x
),
autorange = TRUE,
rangemode = "nonnegative",
ticks = "inside",
tickwidth = 1,
tickfont = list(family = 'Arial', size = 10),
#tickmode = "array",
tickangle = 90,
showticklabels = TRUE,
zeroline = FALSE,
showgrid = TRUE,
gridwidth = 1,
exponentformat = "none",
showline = TRUE,
linecolor = "#a0a0a0"
)
## Distributions that need a log transformed x axis:
if (distribution %in% c("weibull", "lognormal", "loglogistic")) {
x_config <- c(
x_config,
list(
type = "log",
tickvals = x$x_ticks,
ticktext = x$x_labels
)
)
}
# Configuration y axis:
## Adjust y values for exponential distribution (no overlapping):
if (distribution != "exponential") {
y_tickvals <- y$y_ticks
y_ticktext <- y$y_labels
} else {
### Smarter values for exponential:
y_labs <- c(.01, .1, .2, .3, .5, .6, .7, .8, .9, .95, .99,
.999, .9999, .99999) * 100
ind <- y$y_labels %in% y_labs
y_tickvals <- y$y_ticks[ind]
y_ticktext <- y$y_labels[ind]
}
y_config <- list(
title = list(
text = title_y
),
autorange = TRUE,
tickvals = y_tickvals,
ticktext = y_ticktext,
ticks = "inside",
tickwidth = 1,
tickfont = list(family = 'Arial', size = 10),
showticklabels = TRUE,
zeroline = FALSE,
showgrid = TRUE,
gridwidth = 1,
exponentformat = "none",
showline = TRUE,
linecolor = "#a0a0a0"
)
# Configuration of legend:
l <- list(
title = list(
font = list(
family = "Arial",
size = 10,
color = "#000000"
)
)
)
# Layout margins:
m <- list(
l = 55,
r = 10,
b = 55,
t = 25,
pad = 4
)
title <- list(
text = title_main,
font = list(
family = "Arial",
size = 16,
color = "#000000"
)
)
# Create grid:
p_obj <- p_obj %>%
plotly::layout(
title = title,
separators = ".",
legend = l,
xaxis = x_config,
yaxis = y_config,
margin = m
)
p_obj
}
#' @export
plot_prob_vis.plotly <- function(p_obj,
tbl_prob,
distribution = c(
"weibull", "lognormal", "loglogistic",
"sev", "normal", "logistic",
"exponential"
),
title_main = "Probability Plot",
title_x = "Characteristic",
title_y = "Unreliability",
title_trace = "Sample"
) {
distribution <- match.arg(distribution)
mark_x <- unlist(strsplit(title_x, " "))[1]
mark_y <- unlist(strsplit(title_y, " "))[1]
# Suppress warning by subsetting with character:
n_group <- length(unique(tbl_prob[["group"]]))
n_method <- length(unique(tbl_prob$cdf_estimation_method))
color <- if (n_method == 1) I("#3C8DBC") else ~cdf_estimation_method
symbol <- if (n_group == 0) NULL else ~group
name <- to_name(tbl_prob, n_method, n_group, title_trace)
# Construct probability plot:
p_prob <- p_obj %>%
plotly::add_trace(
data = tbl_prob,
x = ~x,
y = ~q,
type = "scatter",
mode = "markers",
hoverinfo = "text",
name = name,
color = color,
colors = "Set2",
symbol = symbol,
legendgroup = ~cdf_estimation_method,
text = paste(
"ID:", tbl_prob$id,
paste("<br>", paste0(mark_x, ":")), format(tbl_prob$x, digits = 3),
paste("<br>", paste0(mark_y, ":")), format(tbl_prob$prob, digits = 6)
)
) %>%
plotly::layout(showlegend = TRUE)
p_prob
}
#' @export
plot_mod_vis.plotly <- function(p_obj,
tbl_mod,
title_trace = "Fit"
) {
x_mark <- unlist(strsplit(p_obj$x$layoutAttrs[[2]]$xaxis$title$text, " "))[1]
y_mark <- unlist(strsplit(p_obj$x$layoutAttrs[[2]]$yaxis$title$text, " "))[1]
n_method <- length(unique(tbl_mod$cdf_estimation_method))
n_group <- length(unique(tbl_mod$group))
color <- if (n_method == 1) I("#CC2222") else ~cdf_estimation_method
## Creation of hovertext
arg_list <- list(
x = tbl_mod$x_p,
y = tbl_mod$y_p,
param_val = tbl_mod$param_val,
param_label = tbl_mod$param_label
)
# tbl_mod has names lower / upper if set in plot_conf()
if (hasName(tbl_mod, "lower")) {
arg_list$lower <- tbl_mod$lower
}
if (hasName(tbl_mod, "upper")) {
arg_list$upper <- tbl_mod$upper
}
tbl_mod <- tbl_mod %>%
dplyr::mutate(
hovertext = purrr::pmap_chr(
arg_list,
hovertext_mod,
x_mark = x_mark,
y_mark = y_mark
)
)
# Reminder: Splitting the line by group happens by using the name
name <- to_name(tbl_mod, n_method, n_group, title_trace)
p_mod <- plotly::add_lines(
p = p_obj,
data = tbl_mod,
x = ~x_p,
y = ~q,
type = "scatter",
mode = "lines",
hoverinfo = "text",
name = name,
color = color,
colors = "Set2",
legendgroup = ~cdf_estimation_method,
text = ~hovertext
)
p_mod
}
#' @export
plot_conf_vis.plotly <- function(p_obj,
tbl_p,
title_trace
) {
# Get axis labels in hover:
x_mark <- unlist(strsplit(p_obj$x$layoutAttrs[[2]]$xaxis$title$text, " "))[1]
y_mark <- unlist(strsplit(p_obj$x$layoutAttrs[[2]]$yaxis$title$text, " "))[1]
n_method <- length(unique(tbl_p$cdf_estimation_method))
color <- if (n_method == 1) I("#CC2222") else ~cdf_estimation_method
name <- to_name(tbl_p, n_method, n_group = 0, title_trace)
p_conf <- plotly::add_lines(
p = p_obj,
# tbl_p is grouped by bound. Therefore two separate lines are drawn
# for two-sided confidence intervals
data = tbl_p,
x = ~x, y = ~q,
type = "scatter", mode = "lines",
# hoverinfo text is set in plot_mod
hoverinfo = "skip",
line = list(dash = "dash", width = 1),
color = color,
colors = "Set2",
name = name,
legendgroup = ~cdf_estimation_method
)
p_conf
}
#' @export
plot_pop_vis.plotly <- function(p_obj,
tbl_pop,
title_trace
) {
# Get axis labels in hover
x_mark <- unlist(strsplit(p_obj$x$layoutAttrs[[2]]$xaxis$title$text, " "))[1]
y_mark <- unlist(strsplit(p_obj$x$layoutAttrs[[2]]$yaxis$title$text, " "))[1]
# Hovertext and name
tbl_pop <- tbl_pop %>%
dplyr::mutate(
hovertext = purrr::pmap_chr(
list(
x = .data$x_s,
y = .data$y_s,
param_val = .data$param_val,
param_label = .data$param_label
),
hovertext_mod,
x_mark = x_mark,
y_mark = y_mark
),
name = purrr::map2_chr(.data$param_val, .data$param_label, to_name_pop)
)
p_pop <- plotly::add_lines(
p = p_obj, data = tbl_pop,
x = ~x_s, y = ~q,
type = "scatter",
mode = "lines",
hoverinfo = "text",
# color = ~group,
colors = "Set2",
name = ~name,
line = list(width = 1),
text = ~hovertext
) %>%
plotly::layout(
showlegend = TRUE,
legend = list(
title = list(
text = title_trace
)
)
)
p_pop
}
# Hover text for plot_mod() and plot_conf():
hovertext_mod <- function(x,
y,
param_val,
param_label,
x_mark,
y_mark,
lower = NULL,
upper = NULL
) {
not_na <- !is.na(param_val)
x_text <- paste0(x_mark, ": ", format(x, digits = 3))
y_text <- paste0(y_mark, ": ", format(y, digits = 3))
lower_text <- if (!is.null(lower))
paste("Lower Bound:", format(lower, digits = 3))
upper_text <- if (!is.null(upper))
paste("Upper Bound:", format(upper, digits = 3))
param_text <- paste(param_label[not_na], param_val[not_na], collapse = ", ")
do.call(
paste,
c(
# Drop NULLs, otherwise paste will add one <br> per NULL
purrr::compact(
list(
x_text,
y_text,
lower_text,
upper_text,
param_text
)
),
sep = "<br>"
)
)
}
# Trace name for plot_pop():
to_name_pop <- function(param_val,
param_label
) {
not_na <- !is.na(param_val)
paste(param_label[not_na], param_val[not_na], collapse = ", ")
}
# Trace name for plot_prob(), plot_mod() and plot_conf():
to_name <- function(tbl,
n_method,
n_group,
title_trace
) {
if (n_method <= 1) {
if (n_group <= 1) {
title_trace
} else {
paste0(title_trace, ": ", tbl$group)
}
} else {
if (n_group <= 1) {
paste0(title_trace, ": ", tbl$cdf_estimation_method)
} else {
paste0(title_trace, ": ", tbl$cdf_estimation_method, ", ", tbl$group)
}
}
}
|
library(shiny)
library(xtable)
clist<- c("Norway","Australia","Switzerland","Netherlands","United States","Germany","New Zealand","Canada","Singapore","Denmark","Ireland","Sweden","Iceland","United Kingdom","Hong Kong, China (SAR)","Korea (Republic of)",
"Japan","Liechtenstein","Israel","France","Austria","Belgium","Luxembourg","Finland","Slovenia","Italy","Spain","Czech Republic","Greece","Brunei Darussalam","Qatar","Cyprus","Estonia","Saudi Arabia","Lithuania","Poland","Andorra","Slovakia","Malta","United Arab Emirates","Chile","Portugal","Hungary","Bahrain",
"Cuba","Kuwait","Croatia","Latvia","Argentina","Uruguay","Bahamas","Montenegro","Belarus","Romania","Libya","Oman","Russian Federation","Bulgaria","Barbados","Palau","Antigua and Barbuda","Malaysia","Mauritius","Trinidad and Tobago","Lebanon","Panama","Venezuela (Bolivarian Republic of)","Costa Rica","Turkey","Kazakhstan","Mexico","Seychelles","Saint Kitts and Nevis","Sri Lanka","Iran (Islamic Republic of)",
"Azerbaijan","Jordan","Serbia","Brazil","Georgia","Grenada","Peru","Ukraine","Belize","The former Yugoslav Republic of Macedonia",
"Bosnia and Herzegovina","Armenia","Fiji","Thailand","Tunisia","China","Saint Vincent and the Grenadines",
"Algeria","Dominica","Albania","Jamaica","Saint Lucia","Colombia","Ecuador","Suriname","Tonga",
"Dominican Republic","Maldives","Mongolia","Turkmenistan","Samoa","Palestine, State of","Indonesia",
"Botswana","Egypt","Paraguay","Gabon","Bolivia (Plurinational State of)","Moldova (Republic of)","El Salvador",
"Uzbekistan","Philippines","South Africa","Syrian Arab Republic","Iraq","Guyana","Viet Nam","Cape Verde",
"Micronesia (Federated States of)","Guatemala","Kyrgyzstan","Namibia","Timor-Leste","Honduras","Morocco",
"Vanuatu","Nicaragua","Kiribati","Tajikistan","India","Bhutan","Cambodia","Ghana","Lao People's Democratic Republic",
"Congo","Zambia","Bangladesh","Sao Tome and Principe","Equatorial Guinea","Nepal","Pakistan","Kenya",
"Swaziland","Angola","Myanmar","Rwanda","Cameroon","Nigeria","Yemen","Madagascar","Zimbabwe","Papua New Guinea",
"Solomon Islands","Comoros","Tanzania (United Republic of)","Mauritania","Lesotho","Senegal","Uganda",
"Benin","Sudan","Togo","Haiti","Afghanistan","Djibouti","C™te d'Ivoire","Gambia","Ethiopia","Malawi",
"Liberia","Mali","Guinea-Bissau","Mozambique","Guinea","Burundi","Burkina Faso","Eritrea","Sierra Leone",
"Chad","Central African Republic","Congo (Democratic Republic of the)","Niger")
shinyUI(pageWithSidebar(
headerPanel("Model: Changes in Qual. of Life if emigrating.. "),
sidebarPanel(h4("Information"),
p("The UNDP compiles , on a yearly basis, a global index known as the Human Development Index,
(the higher the better) whereby each country is rated and given a ranking.
This simulation uses several of the indicators from the vaailable dataset
to predict how your quality of life could change if you emigrated to somewhere else. The data ,
used for this application was downloaded from http://hdr.undp.org/en/data.,
The data was then sampled and cleaned, with limited number of variables retained for this simulation."),
h4("Instructions"),
p("To use this application, fill in your (1) gender (male/female), (2)current locatrion , (3) where
your intend to relocation"),
h4("Go!"),
selectizeInput("countryNow", label="Where are you you currently living ?",selected = "Mauritius",choices=clist),
selectizeInput("countryFuture", label="Where do you plan to Relocate ?",selected = "Germany",choices=clist),
selectizeInput("gender", label="Input your gender",selected = "Male",choices=c("Male","Female")),
submitButton("Submit")
),
mainPanel(
p("To avoid information overload, this simulation is limited to 12 indicators but can be modified to include others."),
# tableOutput("hdi"),
verbatimTextOutput("ocountryn"),
verbatimTextOutput("ocountryf"),
verbatimTextOutput("ogender"),
p(h5("RESULT"),'Here is how your quality fo life could change if you relocated to the country selected'),
p('The below table give you the % change to expect,compared to your current situation'),
h5("Results = (Index Country of Origin - Index Country of Destination) Values in %" ),
tableOutput("ohdi2"),
plotOutput("myPlot"),
includeHTML("legend.html")
)
)
)
|
/ui.R
|
no_license
|
Pramesh1/DDPFinal2
|
R
| false | false | 4,885 |
r
|
library(shiny)
library(xtable)
clist<- c("Norway","Australia","Switzerland","Netherlands","United States","Germany","New Zealand","Canada","Singapore","Denmark","Ireland","Sweden","Iceland","United Kingdom","Hong Kong, China (SAR)","Korea (Republic of)",
"Japan","Liechtenstein","Israel","France","Austria","Belgium","Luxembourg","Finland","Slovenia","Italy","Spain","Czech Republic","Greece","Brunei Darussalam","Qatar","Cyprus","Estonia","Saudi Arabia","Lithuania","Poland","Andorra","Slovakia","Malta","United Arab Emirates","Chile","Portugal","Hungary","Bahrain",
"Cuba","Kuwait","Croatia","Latvia","Argentina","Uruguay","Bahamas","Montenegro","Belarus","Romania","Libya","Oman","Russian Federation","Bulgaria","Barbados","Palau","Antigua and Barbuda","Malaysia","Mauritius","Trinidad and Tobago","Lebanon","Panama","Venezuela (Bolivarian Republic of)","Costa Rica","Turkey","Kazakhstan","Mexico","Seychelles","Saint Kitts and Nevis","Sri Lanka","Iran (Islamic Republic of)",
"Azerbaijan","Jordan","Serbia","Brazil","Georgia","Grenada","Peru","Ukraine","Belize","The former Yugoslav Republic of Macedonia",
"Bosnia and Herzegovina","Armenia","Fiji","Thailand","Tunisia","China","Saint Vincent and the Grenadines",
"Algeria","Dominica","Albania","Jamaica","Saint Lucia","Colombia","Ecuador","Suriname","Tonga",
"Dominican Republic","Maldives","Mongolia","Turkmenistan","Samoa","Palestine, State of","Indonesia",
"Botswana","Egypt","Paraguay","Gabon","Bolivia (Plurinational State of)","Moldova (Republic of)","El Salvador",
"Uzbekistan","Philippines","South Africa","Syrian Arab Republic","Iraq","Guyana","Viet Nam","Cape Verde",
"Micronesia (Federated States of)","Guatemala","Kyrgyzstan","Namibia","Timor-Leste","Honduras","Morocco",
"Vanuatu","Nicaragua","Kiribati","Tajikistan","India","Bhutan","Cambodia","Ghana","Lao People's Democratic Republic",
"Congo","Zambia","Bangladesh","Sao Tome and Principe","Equatorial Guinea","Nepal","Pakistan","Kenya",
"Swaziland","Angola","Myanmar","Rwanda","Cameroon","Nigeria","Yemen","Madagascar","Zimbabwe","Papua New Guinea",
"Solomon Islands","Comoros","Tanzania (United Republic of)","Mauritania","Lesotho","Senegal","Uganda",
"Benin","Sudan","Togo","Haiti","Afghanistan","Djibouti","C™te d'Ivoire","Gambia","Ethiopia","Malawi",
"Liberia","Mali","Guinea-Bissau","Mozambique","Guinea","Burundi","Burkina Faso","Eritrea","Sierra Leone",
"Chad","Central African Republic","Congo (Democratic Republic of the)","Niger")
shinyUI(pageWithSidebar(
headerPanel("Model: Changes in Qual. of Life if emigrating.. "),
sidebarPanel(h4("Information"),
p("The UNDP compiles , on a yearly basis, a global index known as the Human Development Index,
(the higher the better) whereby each country is rated and given a ranking.
This simulation uses several of the indicators from the vaailable dataset
to predict how your quality of life could change if you emigrated to somewhere else. The data ,
used for this application was downloaded from http://hdr.undp.org/en/data.,
The data was then sampled and cleaned, with limited number of variables retained for this simulation."),
h4("Instructions"),
p("To use this application, fill in your (1) gender (male/female), (2)current locatrion , (3) where
your intend to relocation"),
h4("Go!"),
selectizeInput("countryNow", label="Where are you you currently living ?",selected = "Mauritius",choices=clist),
selectizeInput("countryFuture", label="Where do you plan to Relocate ?",selected = "Germany",choices=clist),
selectizeInput("gender", label="Input your gender",selected = "Male",choices=c("Male","Female")),
submitButton("Submit")
),
mainPanel(
p("To avoid information overload, this simulation is limited to 12 indicators but can be modified to include others."),
# tableOutput("hdi"),
verbatimTextOutput("ocountryn"),
verbatimTextOutput("ocountryf"),
verbatimTextOutput("ogender"),
p(h5("RESULT"),'Here is how your quality fo life could change if you relocated to the country selected'),
p('The below table give you the % change to expect,compared to your current situation'),
h5("Results = (Index Country of Origin - Index Country of Destination) Values in %" ),
tableOutput("ohdi2"),
plotOutput("myPlot"),
includeHTML("legend.html")
)
)
)
|
# options
#########################
lengths <- seq.int(from = 10, to = 100, by = 10);
global_path <- "error/";
output_mse <- paste(global_path, "mse/MSE_", sep="");
output_rmse <- paste(global_path, "rmse/RMSE_", sep="");
output_mae <- paste(global_path, "mae/MAE_", sep="");
output_cor <- paste(global_path, "correlation.dat", sep="");
input_missingmat <- paste("recovery/values/recovered_matrices/recoveredMat", sep="");
list_algos <- c("softimp");
#########################
algos_str = paste(list_algos, collapse="\t");
TITLEBAR=paste("=====================================================",
paste(" # \t|| ref\t\t", algos_str, sep=""),
"=====================================================\n", sep="\n");
SEPARATE="=====================================================\n";
msqe <- function() {
dftest <- read.table(paste(input_missingmat, lengths[1], ".txt", sep=""), header=FALSE);
for(i in 2:length(dftest)) {
fileName = paste(output_mse, list_algos[i-1], ".dat", sep="");
write(paste("#", list_algos[i-1]), fileName); #rewrite
}
for(len in lengths) {
df <- read.table(paste(input_missingmat, len, ".txt", sep=""), header=FALSE);
dfmx <- as.matrix(df);
ref = dfmx[,1];
for(i in 2:length(df)) {
fileName = paste(output_mse, list_algos[i-1], ".dat", sep="");
comp <- dfmx[,i];
comp <- comp - ref;
msqe_val <- mean(comp^2);
#if (msqe_val > 1E10) { msqe_val = 30.0; }
#else if (msqe_val > 25.0) { msqe_val = 25.0; }
lin <- paste(len, " ", msqe_val, sep="");
write(lin, fileName, append=TRUE);
}
}
}
rmsqe <- function() {
dftest <- read.table(paste(input_missingmat, lengths[1], ".txt", sep=""), header=FALSE);
for(i in 2:length(dftest)) {
fileName = paste(output_rmse, list_algos[i-1], ".dat", sep="");
write(paste("#", list_algos[i-1]), fileName); #rewrite
}
for(len in lengths) {
df <- read.table(paste(input_missingmat, len, ".txt", sep=""), header=FALSE);
dfmx <- as.matrix(df);
ref = dfmx[,1];
for(i in 2:length(df)) {
fileName = paste(output_rmse, list_algos[i-1], ".dat", sep="");
comp <- dfmx[,i];
comp <- comp - ref;
msqe_val <- sqrt(mean(comp^2));
#if (msqe_val > 1E10) { msqe_val = 5.5; }
#else if (msqe_val > 5.0) { msqe_val = 5.0; }
lin <- paste(len, " ", msqe_val, sep="");
write(lin, fileName, append=TRUE);
}
}
}
maerr <- function() {
dftest <- read.table(paste(input_missingmat, lengths[1], ".txt", sep=""), header=FALSE);
for(i in 2:length(dftest)) {
fileName = paste(output_mae, list_algos[i-1], ".dat", sep="");
write(paste("#", list_algos[i-1]), fileName); #rewrite
}
for(len in lengths) {
df <- read.table(paste(input_missingmat, len, ".txt", sep=""), header=FALSE);
dfmx <- as.matrix(df);
ref = dfmx[,1];
for(i in 2:length(df)) {
fileName = paste(output_mae, list_algos[i-1], ".dat", sep="");
comp <- dfmx[,i];
comp <- comp - ref;
msqe_val <- mean(abs(comp));
#if (msqe_val > 1E10) { msqe_val = 5.5; }
#else if (msqe_val > 5.0) { msqe_val = 5.0; }
lin <- paste(len, " ", msqe_val, sep="");
write(lin, fileName, append=TRUE);
}
}
}
corr <- function() {
write("(pearson)", output_cor); #rewrite
cat(TITLEBAR, file=output_cor, append=TRUE);
for(i in lengths) {
df <- read.table(paste(input_missingmat, i, ".txt", sep=""), header=FALSE);
cat(i, file=output_cor, append=TRUE);
cat(" \t|| \t", file=output_cor, append=TRUE);
mat <- cor(df, method="pearson");
mat = round(mat * 100);
cat(mat[,1], file=output_cor, sep="\t\t", append=TRUE);
cat("\n", file=output_cor, append=TRUE);
}
cat(SEPARATE, file=output_cor, append=TRUE);
cat("\n\n(spearman)\n", file=output_cor, append=TRUE);
cat(TITLEBAR, file=output_cor, append=TRUE);
for(i in lengths) {
df <- read.table(paste(input_missingmat, i, ".txt", sep=""), header=FALSE);
cat(i, file=output_cor, append=TRUE);
cat(" \t|| \t", file=output_cor, append=TRUE);
mat <- cor(df, method="spearman");
mat = round(mat * 100);
cat(mat[,1], file=output_cor, sep="\t\t", append=TRUE);
cat("\n", file=output_cor, append=TRUE);
}
cat(SEPARATE, file=output_cor, append=TRUE);
#cat("\n\n(kendall)\n", file=output_cor, append=TRUE);
#cat(TITLEBAR, file=output_cor, append=TRUE);
#for(i in lengths) {
#df <- read.table(paste(input_missingmat, i, ".txt", sep=""), header=FALSE);
#cat(i, file=output_cor, append=TRUE);
#cat(" \t|| \t", file=output_cor, append=TRUE);
#mat <- cor(df, method="kendall");
#mat = round(mat * 100);
#cat(mat[,1], file=output_cor, sep="\t\t", append=TRUE);
#cat("\n", file=output_cor, append=TRUE);
#}
cat(SEPARATE, file=output_cor, append=TRUE);
cat("\n", file=output_cor, append=TRUE);
}
corr();
msqe();
rmsqe();
maerr();
|
/TestingFramework/bin/Debug/Results/mcar/airq/scripts/precision/error_calculation.r
|
no_license
|
gkgupta11k/csisProject
|
R
| false | false | 4,723 |
r
|
# options
#########################
lengths <- seq.int(from = 10, to = 100, by = 10);
global_path <- "error/";
output_mse <- paste(global_path, "mse/MSE_", sep="");
output_rmse <- paste(global_path, "rmse/RMSE_", sep="");
output_mae <- paste(global_path, "mae/MAE_", sep="");
output_cor <- paste(global_path, "correlation.dat", sep="");
input_missingmat <- paste("recovery/values/recovered_matrices/recoveredMat", sep="");
list_algos <- c("softimp");
#########################
algos_str = paste(list_algos, collapse="\t");
TITLEBAR=paste("=====================================================",
paste(" # \t|| ref\t\t", algos_str, sep=""),
"=====================================================\n", sep="\n");
SEPARATE="=====================================================\n";
msqe <- function() {
dftest <- read.table(paste(input_missingmat, lengths[1], ".txt", sep=""), header=FALSE);
for(i in 2:length(dftest)) {
fileName = paste(output_mse, list_algos[i-1], ".dat", sep="");
write(paste("#", list_algos[i-1]), fileName); #rewrite
}
for(len in lengths) {
df <- read.table(paste(input_missingmat, len, ".txt", sep=""), header=FALSE);
dfmx <- as.matrix(df);
ref = dfmx[,1];
for(i in 2:length(df)) {
fileName = paste(output_mse, list_algos[i-1], ".dat", sep="");
comp <- dfmx[,i];
comp <- comp - ref;
msqe_val <- mean(comp^2);
#if (msqe_val > 1E10) { msqe_val = 30.0; }
#else if (msqe_val > 25.0) { msqe_val = 25.0; }
lin <- paste(len, " ", msqe_val, sep="");
write(lin, fileName, append=TRUE);
}
}
}
rmsqe <- function() {
dftest <- read.table(paste(input_missingmat, lengths[1], ".txt", sep=""), header=FALSE);
for(i in 2:length(dftest)) {
fileName = paste(output_rmse, list_algos[i-1], ".dat", sep="");
write(paste("#", list_algos[i-1]), fileName); #rewrite
}
for(len in lengths) {
df <- read.table(paste(input_missingmat, len, ".txt", sep=""), header=FALSE);
dfmx <- as.matrix(df);
ref = dfmx[,1];
for(i in 2:length(df)) {
fileName = paste(output_rmse, list_algos[i-1], ".dat", sep="");
comp <- dfmx[,i];
comp <- comp - ref;
msqe_val <- sqrt(mean(comp^2));
#if (msqe_val > 1E10) { msqe_val = 5.5; }
#else if (msqe_val > 5.0) { msqe_val = 5.0; }
lin <- paste(len, " ", msqe_val, sep="");
write(lin, fileName, append=TRUE);
}
}
}
maerr <- function() {
dftest <- read.table(paste(input_missingmat, lengths[1], ".txt", sep=""), header=FALSE);
for(i in 2:length(dftest)) {
fileName = paste(output_mae, list_algos[i-1], ".dat", sep="");
write(paste("#", list_algos[i-1]), fileName); #rewrite
}
for(len in lengths) {
df <- read.table(paste(input_missingmat, len, ".txt", sep=""), header=FALSE);
dfmx <- as.matrix(df);
ref = dfmx[,1];
for(i in 2:length(df)) {
fileName = paste(output_mae, list_algos[i-1], ".dat", sep="");
comp <- dfmx[,i];
comp <- comp - ref;
msqe_val <- mean(abs(comp));
#if (msqe_val > 1E10) { msqe_val = 5.5; }
#else if (msqe_val > 5.0) { msqe_val = 5.0; }
lin <- paste(len, " ", msqe_val, sep="");
write(lin, fileName, append=TRUE);
}
}
}
corr <- function() {
write("(pearson)", output_cor); #rewrite
cat(TITLEBAR, file=output_cor, append=TRUE);
for(i in lengths) {
df <- read.table(paste(input_missingmat, i, ".txt", sep=""), header=FALSE);
cat(i, file=output_cor, append=TRUE);
cat(" \t|| \t", file=output_cor, append=TRUE);
mat <- cor(df, method="pearson");
mat = round(mat * 100);
cat(mat[,1], file=output_cor, sep="\t\t", append=TRUE);
cat("\n", file=output_cor, append=TRUE);
}
cat(SEPARATE, file=output_cor, append=TRUE);
cat("\n\n(spearman)\n", file=output_cor, append=TRUE);
cat(TITLEBAR, file=output_cor, append=TRUE);
for(i in lengths) {
df <- read.table(paste(input_missingmat, i, ".txt", sep=""), header=FALSE);
cat(i, file=output_cor, append=TRUE);
cat(" \t|| \t", file=output_cor, append=TRUE);
mat <- cor(df, method="spearman");
mat = round(mat * 100);
cat(mat[,1], file=output_cor, sep="\t\t", append=TRUE);
cat("\n", file=output_cor, append=TRUE);
}
cat(SEPARATE, file=output_cor, append=TRUE);
#cat("\n\n(kendall)\n", file=output_cor, append=TRUE);
#cat(TITLEBAR, file=output_cor, append=TRUE);
#for(i in lengths) {
#df <- read.table(paste(input_missingmat, i, ".txt", sep=""), header=FALSE);
#cat(i, file=output_cor, append=TRUE);
#cat(" \t|| \t", file=output_cor, append=TRUE);
#mat <- cor(df, method="kendall");
#mat = round(mat * 100);
#cat(mat[,1], file=output_cor, sep="\t\t", append=TRUE);
#cat("\n", file=output_cor, append=TRUE);
#}
cat(SEPARATE, file=output_cor, append=TRUE);
cat("\n", file=output_cor, append=TRUE);
}
corr();
msqe();
rmsqe();
maerr();
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/today.R
\name{today}
\alias{today}
\title{Today
Get today's date}
\usage{
today(format = "computer", sep = "_")
}
\arguments{
\item{format}{Character. Return computer-friendly or human-friendly format. One in c("computer", "human").}
\item{sep}{Character. Format separator. Default is "_".}
}
\value{
Character.
}
\description{
Today
Get today's date
}
\examples{
today()
}
|
/man/today.Rd
|
no_license
|
MirzaCengic/Rahat
|
R
| false | true | 453 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/today.R
\name{today}
\alias{today}
\title{Today
Get today's date}
\usage{
today(format = "computer", sep = "_")
}
\arguments{
\item{format}{Character. Return computer-friendly or human-friendly format. One in c("computer", "human").}
\item{sep}{Character. Format separator. Default is "_".}
}
\value{
Character.
}
\description{
Today
Get today's date
}
\examples{
today()
}
|
#' This example shows how a user can use the impute.subject() function to impute
#' the visits of a single patient by using the data from another clinical
#' register.
data(patient.data)
data(new.patient)
#' The user must define which features are static/dynamic and
#' continuous/categorical/ordinal.
static.features = c(
"sex",
"bmi_premorbid",
"bmi_diagnosis",
"fvc_diagnosis",
"familiality",
"genetics",
"ftd",
"onset_site",
"onset_age"
)
dynamic.features = c(
"niv",
"peg",
"alsfrs_1",
"alsfrs_2",
"alsfrs_3",
"alsfrs_4",
"alsfrs_5",
"alsfrs_6",
"alsfrs_7",
"alsfrs_8",
"alsfrs_9",
"alsfrs_10",
"alsfrs_11",
"alsfrs_12"
)
continuous.features = c("bmi_premorbid",
"bmi_diagnosis",
"fvc_diagnosis",
"onset_age")
categorical.features = c("sex",
"familiality",
"genetics",
"ftd",
"onset_site",
"niv",
"peg")
ordinal.features = c(
"alsfrs_1",
"alsfrs_2",
"alsfrs_3",
"alsfrs_4",
"alsfrs_5",
"alsfrs_6",
"alsfrs_7",
"alsfrs_8",
"alsfrs_9",
"alsfrs_10",
"alsfrs_11",
"alsfrs_12"
)
#' In what follows, the impute.subject() function is used to impute the missing
#' values in the visits of a new patient in a 3 months wide time window.
#' Please note that missing values in the visits outside of this window will not
#' be imputed.
imputed.patient.data <-
impute.subject(
subject.to.impute = new.patient,
# data frame containing two visits with missing data to be imputed
candidates = patient.data,
# dataset of patients to be used as candiates for the wkNNMI algorithm
window_size = 3,
# how many months of patient data to impute
K = 5,
# number of neighbours to consider for the imputation
static.features = static.features,
dynamic.features = dynamic.features,
continuous.features = continuous.features,
categorical.features = categorical.features,
ordinal.features = ordinal.features,
time.feature = "visit_time",
# the time feature
sub.id.feature = "subID"
)
|
/R/examples/new.patient.imputation.example.R
|
no_license
|
cran/wkNNMI
|
R
| false | false | 2,222 |
r
|
#' This example shows how a user can use the impute.subject() function to impute
#' the visits of a single patient by using the data from another clinical
#' register.
data(patient.data)
data(new.patient)
#' The user must define which features are static/dynamic and
#' continuous/categorical/ordinal.
static.features = c(
"sex",
"bmi_premorbid",
"bmi_diagnosis",
"fvc_diagnosis",
"familiality",
"genetics",
"ftd",
"onset_site",
"onset_age"
)
dynamic.features = c(
"niv",
"peg",
"alsfrs_1",
"alsfrs_2",
"alsfrs_3",
"alsfrs_4",
"alsfrs_5",
"alsfrs_6",
"alsfrs_7",
"alsfrs_8",
"alsfrs_9",
"alsfrs_10",
"alsfrs_11",
"alsfrs_12"
)
continuous.features = c("bmi_premorbid",
"bmi_diagnosis",
"fvc_diagnosis",
"onset_age")
categorical.features = c("sex",
"familiality",
"genetics",
"ftd",
"onset_site",
"niv",
"peg")
ordinal.features = c(
"alsfrs_1",
"alsfrs_2",
"alsfrs_3",
"alsfrs_4",
"alsfrs_5",
"alsfrs_6",
"alsfrs_7",
"alsfrs_8",
"alsfrs_9",
"alsfrs_10",
"alsfrs_11",
"alsfrs_12"
)
#' In what follows, the impute.subject() function is used to impute the missing
#' values in the visits of a new patient in a 3 months wide time window.
#' Please note that missing values in the visits outside of this window will not
#' be imputed.
imputed.patient.data <-
impute.subject(
subject.to.impute = new.patient,
# data frame containing two visits with missing data to be imputed
candidates = patient.data,
# dataset of patients to be used as candiates for the wkNNMI algorithm
window_size = 3,
# how many months of patient data to impute
K = 5,
# number of neighbours to consider for the imputation
static.features = static.features,
dynamic.features = dynamic.features,
continuous.features = continuous.features,
categorical.features = categorical.features,
ordinal.features = ordinal.features,
time.feature = "visit_time",
# the time feature
sub.id.feature = "subID"
)
|
library(ggplot2)
library(stringr)
load(file="processed_data/gamm_weights/gam3_weights.Rdata")
summary(gam3.weights)
factors.fits <- c("fit.tmean", "fit.precip", "fit.dbh.recon", "fit.full", "BA.inc")
factors.weights <- c("weight.tmean", "weight.dbh.recon", "weight.precip")
# Transforming things back to BA.inc rather than log
gam3.weights[,which(substr(names(gam3.weights),1,3)=="fit")] <- exp(gam3.weights[,which(substr(names(gam3.weights),1,3)=="fit")] )
othervars <- c("Year", "Site", "group.cc", "Model")
data.graph1 <- aggregate(gam3.weights[,factors.fits], by = gam3.weights[,othervars], FUN= mean, na.rm=T)
data.graph1[,paste(factors.fits, "upr", sep=".")] <- aggregate(gam3.weights[,factors.fits], by = gam3.weights[,othervars], FUN= quantile, prob= 0.975, na.rm=T)[,factors.fits]
data.graph1[,paste(factors.fits, "lwr", sep=".")] <- aggregate(gam3.weights[,factors.fits], by = gam3.weights[,othervars], FUN= quantile, prob= 0.025, na.rm=T)[,factors.fits]
summary(data.graph1)
data.graph2 <- aggregate(abs(gam3.weights[,factors.weights]), by = gam3.weights[,othervars], FUN= mean, na.rm=T)
data.graph2[,paste(factors.weights, "upr", sep=".")] <- aggregate(abs(gam3.weights[,factors.weights]), by = gam3.weights[,othervars], FUN= quantile, prob= 0.975, na.rm=T)[,factors.weights]
data.graph2[,paste(factors.weights, "lwr", sep=".")] <- aggregate(abs(gam3.weights[,factors.weights]), by = gam3.weights[,othervars], FUN= quantile, prob= 0.025, na.rm=T)[,factors.weights]
summary(data.graph2)
data.graph <- merge(data.graph1, data.graph2, all.x=T, all.y=T)
# data.graph <- gam3.weights[gam3.weights$TreeID== "MMA014",]
summary(data.graph)
gam3.weights$wts.check <- rowSums(abs(gam3.weights[,c("weight.tmean", "weight.precip", "weight.dbh.recon")]))
data.graph$wts.check <- rowSums(abs(data.graph[,c("weight.tmean", "weight.precip", "weight.dbh.recon")]))
summary(gam3.weights)
summary(data.graph)
# Ordering the data for graphing
data.graph<- data.graph[order(data.graph$Year, data.graph$group.cc, data.graph$Site, decreasing=F),]
plot.rgb <- function(STATE, GC, SIZE){ geom_point(data=data.graph[data.graph$State==STATE & data.graph$group.cc==GC,],aes(x=Year, y=fit.full), size=SIZE,
color=rgb(abs(data.graph[data.graph$State==STATE & data.graph$group.cc==GC,"weight.tmean" ]), # red
abs(data.graph[data.graph$State==STATE & data.graph$group.cc==GC,"weight.dbh.recon" ]), # green
abs(data.graph[data.graph$State==STATE & data.graph$group.cc==GC,"weight.precip" ]))) } # blue
# Plotting the Obs and modeled with influence coloring
data.graph$State <- recode(data.graph$Site, "'Howland' = 'ME';'Harvard' = 'MA';'Morgan Monroe State Park' = 'IN';'Missouri Ozark' = 'MO';'Oak Openings Toledo' = 'OH'")
data.graph$State <- factor(data.graph$State, levels=c("MO", "IN", "OH", "MA", "ME"))
for(i in unique(data.graph$group.cc)){
data.graph[data.graph$group.cc==i,"Canopy.Class"] <- str_sub(data.graph[data.graph$group.cc==i,"group.cc"], -1)
data.graph[data.graph$group.cc==i,"group"] <- substr(data.graph[data.graph$group.cc==i,"group.cc"], 1,4)
}
data.graph$Canopy.Class <- as.factor(data.graph$Canopy.Class)
data.graph$group <- as.factor(data.graph$group)
summary(data.graph)
# Plotting the Obs and modeled with influence coloring
pdf("figures/gam3/gam3_SPP_CC_BAI_limiting_factors_All.pdf", width= 13, height = 8.5)
ggplot(data = data.graph[!data.graph$group.cc %in% c("BETULA.D", "CARYA.D", "FAGR.D", "FRAX.D", "SAAL.D",
"BETULA.I", "CARYA.I", "FAGR.I", "FRAX.I", "SAAL.I",
"BETULA.S", "CARYA.S", "FAGR.S", "FRAX.S", "SAAL.S"),]) +
facet_grid(group.cc ~ State) +
scale_x_continuous(expand=c(0,0), name="Year") +
scale_y_continuous(expand=c(0,0), name="BAI") +
# facet_wrap(~TreeID, scales="free_y", space="free") +
# geom_ribbon(data=gam1.weights[gam1.weights$data.type=="Model",], aes(x=Year, ymin=Y.rel.10.lo*100, ymax=Y.rel.10.hi*100), alpha=0.5) +
geom_line(aes(x=Year, y=BA.inc), size=2, alpha=0.5) +
geom_ribbon(aes(x=Year, ymin=BA.inc.lwr, ymax=BA.inc.upr), alpha=0.3) +
geom_line(aes(x=Year, y = 0), linetype="dashed") +
# Harvard
plot.rgb("MA", "ACRU.C", 3) +
plot.rgb("MA", "ACRU.D", 3) +
plot.rgb("MA", "ACRU.I", 3) +
plot.rgb("MA", "ACRU.S", 3) +
plot.rgb("MA", "PIST.C", 3) +
plot.rgb("MA", "PIST.D", 3) +
plot.rgb("MA", "PIST.I", 3) +
plot.rgb("MA", "PIST.S", 3) +
plot.rgb("MA", "TSCA.C", 3) +
plot.rgb("MA", "TSCA.D", 3) +
plot.rgb("MA", "TSCA.I", 3) +
plot.rgb("MA", "TSCA.S", 3) +
plot.rgb("MA", "QURU.C", 3) +
plot.rgb("MA", "QURU.D", 3) +
plot.rgb("MA", "QURU.I", 3) +
plot.rgb("MA", "QURU.S", 3) +
plot.rgb("MA", "QUVE.C", 3) +
plot.rgb("MA", "QUVE.D", 3) +
plot.rgb("MA", "QUVE.I", 3) +
plot.rgb("MA", "QUVE.S", 3) +
# plot.rgb("MA", "FRAX.C", 3) +
# plot.rgb("MA", "FRAX.D", 3) +
# plot.rgb("MA", "FRAX.I", 3) +
# plot.rgb("MA", "FRAX.S", 3) +
# plot.rgb("MA", "BETULA.C", 3) +
# plot.rgb("MA", "BETULA.D", 3) +
# plot.rgb("MA", "BETULA.I", 3) +
# plot.rgb("MA", "BETULA.S", 3) +
plot.rgb("MA", "ACSA.C", 3) +
plot.rgb("MA", "ACSA.D", 3) +
plot.rgb("MA", "ACSA.I", 3) +
plot.rgb("MA", "ACSA.S", 3) +
plot.rgb("MA", "QUAL.C", 3) +
plot.rgb("MA", "QUAL.D", 3) +
plot.rgb("MA", "QUAL.I", 3) +
plot.rgb("MA", "QUAL.S", 3) +
# plot.rgb("MA", "FAGR.C", 3) +
# plot.rgb("MA", "FAGR.D", 3) +
# plot.rgb("MA", "FAGR.I", 3) +
# plot.rgb("MA", "FAGR.S", 3) +
plot.rgb("MA", "ULRU.C", 3) +
plot.rgb("MA", "ULRU.D", 3) +
plot.rgb("MA", "ULRU.I", 3) +
plot.rgb("MA", "ULRU.S", 3) +
# plot.rgb("MA", "CARYA.C", 3) +
# plot.rgb("MA", "CARYA.D", 3) +
# plot.rgb("MA", "CARYA.I", 3) +
# plot.rgb("MA", "CARYA.S", 3) +
# plot.rgb("MA", "SAAL.C", 3) +
# plot.rgb("MA", "SAAL.D", 3) +
# plot.rgb("MA", "SAAL.I", 3) +
# plot.rgb("MA", "SAAL.S", 3) +
# Howland
plot.rgb("ME", "ACRU.C", 3) +
plot.rgb("ME", "ACRU.D", 3) +
plot.rgb("ME", "ACRU.I", 3) +
plot.rgb("ME", "ACRU.S", 3) +
plot.rgb("ME", "PIST.C", 3) +
plot.rgb("ME", "PIST.D", 3) +
plot.rgb("ME", "PIST.I", 3) +
plot.rgb("ME", "PIST.S", 3) +
plot.rgb("ME", "TSCA.C", 3) +
plot.rgb("ME", "TSCA.D", 3) +
plot.rgb("ME", "TSCA.I", 3) +
plot.rgb("ME", "TSCA.S", 3) +
plot.rgb("ME", "QURU.C", 3) +
plot.rgb("ME", "QURU.D", 3) +
plot.rgb("ME", "QURU.I", 3) +
plot.rgb("ME", "QURU.S", 3) +
plot.rgb("ME", "QUVE.C", 3) +
plot.rgb("ME", "QUVE.D", 3) +
plot.rgb("ME", "QUVE.I", 3) +
plot.rgb("ME", "QUVE.S", 3) +
# plot.rgb("ME", "FRAX.C", 3) +
# plot.rgb("ME", "FRAX.D", 3) +
# plot.rgb("ME", "FRAX.I", 3) +
# plot.rgb("ME", "FRAX.S", 3) +
# plot.rgb("ME", "BETULA.C", 3) +
# plot.rgb("ME", "BETULA.D", 3) +
# plot.rgb("ME", "BETULA.I", 3) +
# plot.rgb("ME", "BETULA.S", 3) +
plot.rgb("ME", "ACSA.C", 3) +
plot.rgb("ME", "ACSA.D", 3) +
plot.rgb("ME", "ACSA.I", 3) +
plot.rgb("ME", "ACSA.S", 3) +
plot.rgb("ME", "QUAL.C", 3) +
plot.rgb("ME", "QUAL.D", 3) +
plot.rgb("ME", "QUAL.I", 3) +
plot.rgb("ME", "QUAL.S", 3) +
# plot.rgb("ME", "FAGR.C", 3) +
# plot.rgb("ME", "FAGR.D", 3) +
# plot.rgb("ME", "FAGR.I", 3) +
# plot.rgb("ME", "FAGR.S", 3) +
plot.rgb("ME", "ULRU.C", 3) +
plot.rgb("ME", "ULRU.D", 3) +
plot.rgb("ME", "ULRU.I", 3) +
plot.rgb("ME", "ULRU.S", 3) +
# plot.rgb("ME", "CARYA.C", 3) +
# plot.rgb("ME", "CARYA.D", 3) +
# plot.rgb("ME", "CARYA.I", 3) +
# plot.rgb("ME", "CARYA.S", 3) +
# plot.rgb("ME", "SAAL.C", 3) +
# plot.rgb("ME", "SAAL.D", 3) +
# plot.rgb("ME", "SAAL.I", 3) +
# plot.rgb("ME", "SAAL.S", 3) +
# Morgan Monroe
plot.rgb("IN", "ACRU.C", 3) +
plot.rgb("IN", "ACRU.D", 3) +
plot.rgb("IN", "ACRU.I", 3) +
plot.rgb("IN", "ACRU.S", 3) +
plot.rgb("IN", "PIST.C", 3) +
plot.rgb("IN", "PIST.D", 3) +
plot.rgb("IN", "PIST.I", 3) +
plot.rgb("IN", "PIST.S", 3) +
plot.rgb("IN", "TSCA.C", 3) +
plot.rgb("IN", "TSCA.D", 3) +
plot.rgb("IN", "TSCA.I", 3) +
plot.rgb("IN", "TSCA.S", 3) +
plot.rgb("IN", "QURU.C", 3) +
plot.rgb("IN", "QURU.D", 3) +
plot.rgb("IN", "QURU.I", 3) +
plot.rgb("IN", "QURU.S", 3) +
plot.rgb("IN", "QUVE.C", 3) +
plot.rgb("IN", "QUVE.D", 3) +
plot.rgb("IN", "QUVE.I", 3) +
plot.rgb("IN", "QUVE.S", 3) +
# plot.rgb("IN", "FRAX.C", 3) +
# plot.rgb("IN", "FRAX.D", 3) +
# plot.rgb("IN", "FRAX.I", 3) +
# plot.rgb("IN", "FRAX.S", 3) +
# plot.rgb("IN", "BETULA.C", 3) +
# plot.rgb("IN", "BETULA.D", 3) +
# plot.rgb("IN", "BETULA.I", 3) +
# plot.rgb("IN", "BETULA.S", 3) +
plot.rgb("IN", "ACSA.C", 3) +
plot.rgb("IN", "ACSA.D", 3) +
plot.rgb("IN", "ACSA.I", 3) +
plot.rgb("IN", "ACSA.S", 3) +
plot.rgb("IN", "QUAL.C", 3) +
plot.rgb("IN", "QUAL.D", 3) +
plot.rgb("IN", "QUAL.I", 3) +
plot.rgb("IN", "QUAL.S", 3) +
# plot.rgb("IN", "FAGR.C", 3) +
# plot.rgb("IN", "FAGR.D", 3) +
# plot.rgb("IN", "FAGR.I", 3) +
# plot.rgb("IN", "FAGR.S", 3) +
plot.rgb("IN", "ULRU.C", 3) +
plot.rgb("IN", "ULRU.D", 3) +
plot.rgb("IN", "ULRU.I", 3) +
plot.rgb("IN", "ULRU.S", 3) +
# plot.rgb("IN", "CARYA.C", 3) +
# plot.rgb("IN", "CARYA.D", 3) +
# plot.rgb("IN", "CARYA.I", 3) +
# plot.rgb("IN", "CARYA.S", 3) +
# plot.rgb("IN", "SAAL.C", 3) +
# plot.rgb("IN", "SAAL.D", 3) +
# plot.rgb("IN", "SAAL.I", 3) +
# plot.rgb("IN", "SAAL.S", 3) +
# Missouri Ozark
plot.rgb("MO", "ACRU.C", 3) +
plot.rgb("MO", "ACRU.D", 3) +
plot.rgb("MO", "ACRU.I", 3) +
plot.rgb("MO", "ACRU.S", 3) +
plot.rgb("MO", "PIST.C", 3) +
plot.rgb("MO", "PIST.D", 3) +
plot.rgb("MO", "PIST.I", 3) +
plot.rgb("MO", "PIST.S", 3) +
plot.rgb("MO", "TSCA.C", 3) +
plot.rgb("MO", "TSCA.D", 3) +
plot.rgb("MO", "TSCA.I", 3) +
plot.rgb("MO", "TSCA.S", 3) +
plot.rgb("MO", "QURU.C", 3) +
plot.rgb("MO", "QURU.D", 3) +
plot.rgb("MO", "QURU.I", 3) +
plot.rgb("MO", "QURU.S", 3) +
plot.rgb("MO", "QUVE.C", 3) +
plot.rgb("MO", "QUVE.D", 3) +
plot.rgb("MO", "QUVE.I", 3) +
plot.rgb("MO", "QUVE.S", 3) +
# plot.rgb("MO", "FRAX.C", 3) +
# plot.rgb("MO", "FRAX.D", 3) +
# plot.rgb("MO", "FRAX.I", 3) +
# plot.rgb("MO", "FRAX.S", 3) +
# plot.rgb("MO", "BETULA.C", 3) +
# plot.rgb("MO", "BETULA.D", 3) +
# plot.rgb("MO", "BETULA.I", 3) +
# plot.rgb("MO", "BETULA.S", 3) +
plot.rgb("MO", "ACSA.C", 3) +
plot.rgb("MO", "ACSA.D", 3) +
plot.rgb("MO", "ACSA.I", 3) +
plot.rgb("MO", "ACSA.S", 3) +
plot.rgb("MO", "QUAL.C", 3) +
plot.rgb("MO", "QUAL.D", 3) +
plot.rgb("MO", "QUAL.I", 3) +
plot.rgb("MO", "QUAL.S", 3) +
# plot.rgb("MO", "FAGR.C", 3) +
# plot.rgb("MO", "FAGR.D", 3) +
# plot.rgb("MO", "FAGR.I", 3) +
# plot.rgb("MO", "FAGR.S", 3) +
plot.rgb("MO", "ULRU.C", 3) +
plot.rgb("MO", "ULRU.D", 3) +
plot.rgb("MO", "ULRU.I", 3) +
plot.rgb("MO", "ULRU.S", 3) +
# plot.rgb("MO", "CARYA.C", 3) +
# plot.rgb("MO", "CARYA.D", 3) +
# plot.rgb("MO", "CARYA.I", 3) +
# plot.rgb("MO", "CARYA.S", 3) +
# plot.rgb("MO", "SAAL.C", 3) +
# plot.rgb("MO", "SAAL.D", 3) +
# plot.rgb("MO", "SAAL.I", 3) +
# plot.rgb("MO", "SAAL.S", 3) +
# Oak Openings
plot.rgb("OH", "ACRU.C", 3) +
plot.rgb("OH", "ACRU.D", 3) +
plot.rgb("OH", "ACRU.I", 3) +
plot.rgb("OH", "ACRU.S", 3) +
plot.rgb("OH", "PIST.C", 3) +
plot.rgb("OH", "PIST.D", 3) +
plot.rgb("OH", "PIST.I", 3) +
plot.rgb("OH", "PIST.S", 3) +
plot.rgb("OH", "TSCA.C", 3) +
plot.rgb("OH", "TSCA.D", 3) +
plot.rgb("OH", "TSCA.I", 3) +
plot.rgb("OH", "TSCA.S", 3) +
plot.rgb("OH", "QURU.C", 3) +
plot.rgb("OH", "QURU.D", 3) +
plot.rgb("OH", "QURU.I", 3) +
plot.rgb("OH", "QURU.S", 3) +
plot.rgb("OH", "QUVE.C", 3) +
plot.rgb("OH", "QUVE.D", 3) +
plot.rgb("OH", "QUVE.I", 3) +
plot.rgb("OH", "QUVE.S", 3) +
# plot.rgb("OH", "FRAX.C", 3) +
# plot.rgb("OH", "FRAX.D", 3) +
# plot.rgb("OH", "FRAX.I", 3) +
# plot.rgb("OH", "FRAX.S", 3) +
# plot.rgb("OH", "BETULA.C", 3) +
# plot.rgb("OH", "BETULA.D", 3) +
# plot.rgb("OH", "BETULA.I", 3) +
# plot.rgb("OH", "BETULA.S", 3) +
plot.rgb("OH", "ACSA.C", 3) +
plot.rgb("OH", "ACSA.D", 3) +
plot.rgb("OH", "ACSA.I", 3) +
plot.rgb("OH", "ACSA.S", 3) +
plot.rgb("OH", "QUAL.C", 3) +
plot.rgb("OH", "QUAL.D", 3) +
plot.rgb("OH", "QUAL.I", 3) +
plot.rgb("OH", "QUAL.S", 3) +
# plot.rgb("OH", "FAGR.C", 3) +
# plot.rgb("OH", "FAGR.D", 3) +
# plot.rgb("OH", "FAGR.I", 3) +
# plot.rgb("OH", "FAGR.S", 3) +
plot.rgb("OH", "ULRU.C", 3) +
plot.rgb("OH", "ULRU.D", 3) +
plot.rgb("OH", "ULRU.I", 3) +
plot.rgb("OH", "ULRU.S", 3) #+
# plot.rgb("OH", "CARYA.C", 3) +
# plot.rgb("OH", "CARYA.D", 3) +
# plot.rgb("OH", "CARYA.I", 3) +
# plot.rgb("OH", "CARYA.S", 3) +
# plot.rgb("OH", "SAAL.C", 3) +
# plot.rgb("OH", "SAAL.D", 3) +
# plot.rgb("OH", "SAAL.I", 3) +
# plot.rgb("OH", "SAAL.S", 3)
dev.off()
# Just plotting the BAI fits
summary(data.graph)
ggplot(data.graph) + facet_wrap(group.cc~Site) +
scale_x_continuous(expand=c(0,0), name="Year") +
scale_y_continuous(expand=c(0,0), name="BAI") +
# facet_wrap(~TreeID, scales="free_y", space="free") +
# geom_ribbon(data=gam1.weights[gam1.weights$data.type=="Model",], aes(x=Year, ymin=Y.rel.10.lo*100, ymax=Y.rel.10.hi*100), alpha=0.5) +
geom_line(aes(x=Year, y=fit.full), size=2, alpha=0.5) +
geom_ribbon(aes(x=Year, ymin=fit.full.lwr, ymax=fit.full.upr), alpha=0.3)
# Plotting the Effects
pdf("figures/gam3/gam3_influence_in_time_all.pdf", width= 13, height = 8.5)
ggplot(data.graph) + facet_grid(group.cc~State) +
scale_x_continuous(expand=c(0,0), name="Year") +
scale_y_continuous(expand=c(0,0), name="Effect on RW (in mm)") +
# facet_wrap(~TreeID, scales="free_y", space="free") +
# geom_ribbon(data=gam1.weights[gam1.weights$data.type=="Model",], aes(x=Year, ymin=Y.rel.10.lo*100, ymax=Y.rel.10.hi*100), alpha=0.5) +
geom_line(aes(x=Year, y=fit.tmean), size=1, color="red") +
geom_line(aes(x=Year, y=fit.precip), size=1, color="blue") +
geom_line(aes(x=Year, y=fit.dbh.recon), size=1, color="green")
dev.off()
########################################################################
########################################################################
# Oaks only for Ameridendro
pdf("figures/gam3/gam3_SPP_CC_BAI_limiting_factors_All_oaks.pdf", width= 13, height = 8.5)
ggplot(data = data.graph[substr(data.graph$group, 1,2)=="QU",]) + facet_grid(group.cc ~ State) +
scale_x_continuous(expand=c(0,0), name="Year") +
scale_y_continuous(expand=c(0,0), name="BAI") +
# facet_wrap(~TreeID, scales="free_y", space="free") +
# geom_ribbon(data=gam1.weights[gam1.weights$data.type=="Model",], aes(x=Year, ymin=Y.rel.10.lo*100, ymax=Y.rel.10.hi*100), alpha=0.5) +
geom_line(aes(x=Year, y=BA.inc), size=2, alpha=0.5) +
geom_ribbon(aes(x=Year, ymin=BA.inc.lwr, ymax=BA.inc.upr), alpha=0.3) +
geom_line(aes(x=Year, y = 0), linetype="dashed") +
# Harvard
plot.rgb("MA", "QURU.C", 3) +
plot.rgb("MA", "QURU.D", 3) +
plot.rgb("MA", "QURU.I", 3) +
plot.rgb("MA", "QURU.S", 3) +
plot.rgb("MA", "QUVE.C", 3) +
plot.rgb("MA", "QUVE.D", 3) +
plot.rgb("MA", "QUVE.I", 3) +
plot.rgb("MA", "QUVE.S", 3) +
plot.rgb("MA", "QUAL.C", 3) +
plot.rgb("MA", "QUAL.D", 3) +
plot.rgb("MA", "QUAL.I", 3) +
plot.rgb("MA", "QUAL.S", 3) +
# Howland
plot.rgb("ME", "QURU.C", 3) +
plot.rgb("ME", "QURU.D", 3) +
plot.rgb("ME", "QURU.I", 3) +
plot.rgb("ME", "QURU.S", 3) +
plot.rgb("ME", "QUVE.C", 3) +
plot.rgb("ME", "QUVE.D", 3) +
plot.rgb("ME", "QUVE.I", 3) +
plot.rgb("ME", "QUVE.S", 3) +
plot.rgb("ME", "QUAL.C", 3) +
plot.rgb("ME", "QUAL.D", 3) +
plot.rgb("ME", "QUAL.I", 3) +
plot.rgb("ME", "QUAL.S", 3) +
# Morgan Monroe
plot.rgb("IN", "QURU.C", 3) +
plot.rgb("IN", "QURU.D", 3) +
plot.rgb("IN", "QURU.I", 3) +
plot.rgb("IN", "QURU.S", 3) +
plot.rgb("IN", "QUVE.C", 3) +
plot.rgb("IN", "QUVE.D", 3) +
plot.rgb("IN", "QUVE.I", 3) +
plot.rgb("IN", "QUVE.S", 3) +
plot.rgb("IN", "QUAL.C", 3) +
plot.rgb("IN", "QUAL.D", 3) +
plot.rgb("IN", "QUAL.I", 3) +
plot.rgb("IN", "QUAL.S", 3) +
# Missouri Ozark
plot.rgb("MO", "QURU.C", 3) +
plot.rgb("MO", "QURU.D", 3) +
plot.rgb("MO", "QURU.I", 3) +
plot.rgb("MO", "QURU.S", 3) +
plot.rgb("MO", "QUVE.C", 3) +
plot.rgb("MO", "QUVE.D", 3) +
plot.rgb("MO", "QUVE.I", 3) +
plot.rgb("MO", "QUVE.S", 3) +
plot.rgb("MO", "QUAL.C", 3) +
plot.rgb("MO", "QUAL.D", 3) +
plot.rgb("MO", "QUAL.I", 3) +
plot.rgb("MO", "QUAL.S", 3) +
# Oak Openings
plot.rgb("OH", "QURU.C", 3) +
plot.rgb("OH", "QURU.D", 3) +
plot.rgb("OH", "QURU.I", 3) +
plot.rgb("OH", "QURU.S", 3) +
plot.rgb("OH", "QUVE.C", 3) +
plot.rgb("OH", "QUVE.D", 3) +
plot.rgb("OH", "QUVE.I", 3) +
plot.rgb("OH", "QUVE.S", 3) +
plot.rgb("OH", "QUAL.C", 3) +
plot.rgb("OH", "QUAL.D", 3) +
plot.rgb("OH", "QUAL.I", 3) +
plot.rgb("OH", "QUAL.S", 3)
dev.off()
# Just plotting the BAI fits
summary(data.graph)
ggplot(data.graph) + facet_wrap(group.cc~Site) +
scale_x_continuous(expand=c(0,0), name="Year") +
scale_y_continuous(expand=c(0,0), name="BAI") +
# facet_wrap(~TreeID, scales="free_y", space="free") +
# geom_ribbon(data=gam1.weights[gam1.weights$data.type=="Model",], aes(x=Year, ymin=Y.rel.10.lo*100, ymax=Y.rel.10.hi*100), alpha=0.5) +
geom_line(aes(x=Year, y=fit.full), size=2, alpha=0.5) +
geom_ribbon(aes(x=Year, ymin=fit.full.lwr, ymax=fit.full.upr), alpha=0.3)
# Plotting the Effects
pdf("figures/gam3/gam3_influence_in_time_all_oaks.pdf", width= 13, height = 8.5)
ggplot(data.graph[substr(data.graph$group, 1,2)=="QU",]) + facet_grid(group.cc~State) +
scale_x_continuous(expand=c(0,0), name="Year") +
scale_y_continuous(expand=c(0,0), name="Effect on RW (in mm)") +
# facet_wrap(~TreeID, scales="free_y", space="free") +
# geom_ribbon(data=gam1.weights[gam1.weights$data.type=="Model",], aes(x=Year, ymin=Y.rel.10.lo*100, ymax=Y.rel.10.hi*100), alpha=0.5) +
geom_line(aes(x=Year, y=fit.tmean), size=2, color="red") +
geom_line(aes(x=Year, y=fit.precip), size=2, color="blue") +
geom_line(aes(x=Year, y=fit.dbh.recon), size=2, color="green")
dev.off()
# Separating things out By canopy class to see things better
########################################################
########################################################
# Dominant
########################################################
########################################################
pdf("figures/gam3/gam3_SPP_CC_BAI_limiting_factors_D.pdf", width= 13, height = 8.5)
ggplot(data = data.graph[data.graph$Canopy.Class=="D" & !data.graph$group.cc %in% c("BETULA.D", "CARYA.D", "FAGR.D", "FRAX.D", "SAAL.D", "ULRU.D", "ACSA.D"),]) + facet_grid(group ~ State) +
scale_x_continuous(expand=c(0,0)) +
scale_y_continuous(expand=c(0,0)) +
# facet_wrap(~TreeID, scales="free_y", space="free") +
# geom_ribbon(data=gam1.weights[gam1.weights$data.type=="Model",], aes(x=Year, ymin=Y.rel.10.lo*100, ymax=Y.rel.10.hi*100), alpha=0.5) +
geom_line(aes(x=Year, y=BA.inc), size=2, alpha=0.5) +
geom_ribbon(aes(x=Year, ymin=BA.inc.lwr, ymax=BA.inc.upr), alpha=0.3) +
geom_line(aes(x=Year, y = 0), linetype="dashed") +
# Harvard
plot.rgb("MA", "ACRU.D", 3) +
plot.rgb("MA", "PIST.D", 3) +
plot.rgb("MA", "TSCA.D", 3) +
plot.rgb("MA", "QURU.D", 3) +
plot.rgb("MA", "QUVE.D", 3) +
# plot.rgb("MA", "FRAX.D", 3) +
# plot.rgb("MA", "BETULA.D", 3) +
#plot.rgb("MA", "ACSA.D", 3) +
plot.rgb("MA", "QUAL.D", 3) +
# plot.rgb("MA", "FAGR.D", 3) +
#plot.rgb("MA", "ULRU.D", 3) +
# plot.rgb("MA", "CARYA.D", 3) +
# plot.rgb("MA", "SAAL.D", 3) +
# Howland
plot.rgb("ME", "ACRU.D", 3) +
plot.rgb("ME", "PIST.D", 3) +
plot.rgb("ME", "TSCA.D", 3) +
plot.rgb("ME", "QURU.D", 3) +
plot.rgb("ME", "QUVE.D", 3) +
# plot.rgb("ME", "FRAX.D", 3) +
# plot.rgb("ME", "BETULA.D", 3) +
#plot.rgb("ME", "ACSA.D", 3) +
plot.rgb("ME", "QUAL.D", 3) +
# plot.rgb("ME", "FAGR.D", 3) +
#plot.rgb("ME", "ULRU.D", 3) +
# plot.rgb("ME", "CARYA.D", 3) +
# plot.rgb("ME", "SAAL.D", 3) +
# Morgan Monroe
plot.rgb("IN", "ACRU.D", 3) +
plot.rgb("IN", "PIST.D", 3) +
plot.rgb("IN", "TSCA.D", 3) +
plot.rgb("IN", "QURU.D", 3) +
plot.rgb("IN", "QUVE.D", 3) +
# plot.rgb("IN", "FRAX.D", 3) +
# plot.rgb("IN", "BETULA.D", 3) +
#plot.rgb("IN", "ACSA.D", 3) +
plot.rgb("IN", "QUAL.D", 3) +
# plot.rgb("IN", "FAGR.D", 3) +
#plot.rgb("IN", "ULRU.D", 3) +
# plot.rgb("IN", "CARYA.D", 3) +
# plot.rgb("IN", "SAAL.D", 3) +
# Missouri Ozark
plot.rgb("MO", "ACRU.D", 3) +
plot.rgb("MO", "PIST.D", 3) +
plot.rgb("MO", "TSCA.D", 3) +
plot.rgb("MO", "QURU.D", 3) +
plot.rgb("MO", "QUVE.D", 3) +
# plot.rgb("MO", "FRAX.D", 3) +
# plot.rgb("MO", "BETULA.D", 3) +
#plot.rgb("MO", "ACSA.D", 3) +
plot.rgb("MO", "QUAL.D", 3) +
# plot.rgb("MO", "FAGR.D", 3) +
#plot.rgb("MO", "ULRU.D", 3) +
# plot.rgb("MO", "CARYA.D", 3) +
# plot.rgb("MO", "SAAL.D", 3) +
# Oak Openings
plot.rgb("OH", "ACRU.D", 3) +
plot.rgb("OH", "PIST.D", 3) +
plot.rgb("OH", "TSCA.D", 3) +
plot.rgb("OH", "QURU.D", 3) +
plot.rgb("OH", "QUVE.D", 3) +
# plot.rgb("OH", "FRAX.D", 3) +
# plot.rgb("OH", "BETULA.D", 3) +
#plot.rgb("OH", "ACSA.D", 3) +
plot.rgb("OH", "QUAL.D", 3) +
# plot.rgb("OH", "FAGR.D", 3) +
#plot.rgb("OH", "ULRU.D", 3) +
# plot.rgb("OH", "CARYA.D", 3) +
# plot.rgb("OH", "SAAL.D", 3) +
poster.theme2 +
labs(title= "Dominant Trees", x="Year", y = expression(bold(paste("BAI (mm"^"2", "y"^"-1",")"))))
dev.off()
data.graph$State <- factor(data.graph$State, levels=c("MO", "IN", "OH", "MA", "ME"))
# Just plotting the BAI fits
summary(data.graph)
ggplot(data.graph[data.graph$Canopy.Class=="D" & !data.graph$group.cc %in% c("BETULA.D", "CARYA.D", "FAGR.D", "FRAX.D", "SAAL.D"),]) + facet_grid(group.cc~State) +
scale_x_continuous(expand=c(0,0), name="Year") +
scale_y_continuous(expand=c(0,0), name="BAI") +
# facet_wrap(~TreeID, scales="free_y", space="free") +
# geom_ribbon(data=gam1.weights[gam1.weights$data.type=="Model",], aes(x=Year, ymin=Y.rel.10.lo*100, ymax=Y.rel.10.hi*100), alpha=0.5) +
geom_line(aes(x=Year, y=fit.full), size=2, alpha=0.5) +
geom_ribbon(aes(x=Year, ymin=fit.full.lwr, ymax=fit.full.upr), alpha=0.3)
# Plotting the Effects
pdf("figures/gam3/gam3_influence_in_time_D.pdf", width= 13, height = 8.5)
ggplot(data.graph[data.graph$Canopy.Class=="D" & !data.graph$group.cc %in% c("BETULA.D", "CARYA.D", "FAGR.D", "FRAX.D", "SAAL.D"),]) + facet_grid(group~State) +
scale_x_continuous(expand=c(0,0), name="Year") +
scale_y_continuous(expand=c(0,0), name="Effect on RW (in mm)") +
# facet_wrap(~TreeID, scales="free_y", space="free") +
# geom_ribbon(data=gam1.weights[gam1.weights$data.type=="Model",], aes(x=Year, ymin=Y.rel.10.lo*100, ymax=Y.rel.10.hi*100), alpha=0.5) +
#geom_ribbon(aes(x=Year, ymin=fit.dbh.recon.lwr, ymax=fit.dbh.recon.upr), alpha=0.4, fill="green")+
geom_line(aes(x=Year, y=fit.tmean), size=1, color="red") +
geom_line(aes(x=Year, y=fit.precip), size=1, color="blue") +
geom_line(aes(x=Year, y=fit.dbh.recon), size=1, color="green")+
ylim(c(0,3)) +
labs(title= "Dominant Effects (group.cc)")
dev.off()
################################################################
################################################################
# Intermediate Trees
################################################################
################################################################
pdf("figures/gam3/gam3_SPP_CC_BAI_limiting_factors_I.pdf", width= 13, height = 8.5)
ggplot(data = data.graph[data.graph$Canopy.Class=="I" & !data.graph$group.cc %in% c("BETULA.I", "CARYA.I", "FAGR.I", "FRAX.I", "SAAL.I"),]) + facet_grid(group ~ State) +
scale_x_continuous(expand=c(0,0)) +
scale_y_continuous(expand=c(0,0)) +
# facet_wrap(~TreeID, scales="free_y", space="free") +
# geom_ribbon(data=gam1.weights[gam1.weights$data.type=="Model",], aes(x=Year, ymin=Y.rel.10.lo*100, ymax=Y.rel.10.hi*100), alpha=0.5) +
geom_line(aes(x=Year, y=BA.inc), size=2, alpha=0.5) +
geom_ribbon(aes(x=Year, ymin=BA.inc.lwr, ymax=BA.inc.upr), alpha=0.3) +
geom_line(aes(x=Year, y = 0), linetype="dashed") +
# Harvard
plot.rgb("MA", "ACRU.I", 3) +
plot.rgb("MA", "PIST.I", 3) +
plot.rgb("MA", "TSCA.I", 3) +
plot.rgb("MA", "QURU.I", 3) +
plot.rgb("MA", "QUVE.I", 3) +
# plot.rgb("MA", "FRAX.I", 3) +
# plot.rgb("MA", "BETULA.I", 3) +
plot.rgb("MA", "ACSA.I", 3) +
plot.rgb("MA", "QUAL.I", 3) +
# plot.rgb("MA", "FAGR.I", 3) +
plot.rgb("MA", "ULRU.I", 3) +
# plot.rgb("MA", "CARYA.I", 3) +
# plot.rgb("MA", "SAAL.I", 3) +
# Howland
plot.rgb("ME", "ACRU.I", 3) +
plot.rgb("ME", "PIST.I", 3) +
plot.rgb("ME", "TSCA.I", 3) +
plot.rgb("ME", "QURU.I", 3) +
plot.rgb("ME", "QUVE.I", 3) +
# plot.rgb("ME", "FRAX.I", 3) +
# plot.rgb("ME", "BETULA.I", 3) +
plot.rgb("ME", "ACSA.I", 3) +
plot.rgb("ME", "QUAL.I", 3) +
# plot.rgb("ME", "FAGR.I", 3) +
plot.rgb("ME", "ULRU.I", 3) +
# plot.rgb("ME", "CARYA.I", 3) +
# plot.rgb("ME", "SAAL.I", 3) +
# Morgan Monroe
plot.rgb("IN", "ACRU.I", 3) +
plot.rgb("IN", "PIST.I", 3) +
plot.rgb("IN", "TSCA.I", 3) +
plot.rgb("IN", "QURU.I", 3) +
plot.rgb("IN", "QUVE.I", 3) +
# plot.rgb("IN", "FRAX.I", 3) +
# plot.rgb("IN", "BETULA.I", 3) +
plot.rgb("IN", "ACSA.I", 3) +
plot.rgb("IN", "QUAL.I", 3) +
# plot.rgb("IN", "FAGR.I", 3) +
plot.rgb("IN", "ULRU.I", 3) +
# plot.rgb("IN", "CARYA.I", 3) +
# plot.rgb("IN", "SAAL.I", 3) +
# Missouri Ozark
plot.rgb("MO", "ACRU.I", 3) +
plot.rgb("MO", "PIST.I", 3) +
plot.rgb("MO", "TSCA.I", 3) +
plot.rgb("MO", "QURU.I", 3) +
plot.rgb("MO", "QUVE.I", 3) +
# plot.rgb("MO", "FRAX.I", 3) +
# plot.rgb("MO", "BETULA.I", 3) +
plot.rgb("MO", "ACSA.I", 3) +
plot.rgb("MO", "QUAL.I", 3) +
# plot.rgb("MO", "FAGR.I", 3) +
plot.rgb("MO", "ULRU.I", 3) +
# plot.rgb("MO", "CARYA.I", 3) +
# plot.rgb("MO", "SAAL.I", 3) +
# Oak Openings
plot.rgb("OH", "ACRU.I", 3) +
plot.rgb("OH", "PIST.I", 3) +
plot.rgb("OH", "TSCA.I", 3) +
plot.rgb("OH", "QURU.I", 3) +
plot.rgb("OH", "QUVE.I", 3) +
# plot.rgb("OH", "FRAX.I", 3) +
# plot.rgb("OH", "BETULA.I", 3) +
plot.rgb("OH", "ACSA.I", 3) +
plot.rgb("OH", "QUAL.I", 3) +
# plot.rgb("OH", "FAGR.I", 3) +
plot.rgb("OH", "ULRU.I", 3) +
# plot.rgb("OH", "CARYA.I", 3) +
# plot.rgb("OH", "SAAL.I", 3) +
poster.theme2 +
labs(title= "Intermediate Trees (group.cc)", x="Year", y=expression(bold(paste("BAI (mm2 / year)"))))
dev.off()
# Just plotting the BAI fits
summary(data.graph)
ggplot(data.graph[data.graph$Canopy.Class=="I" & !data.graph$group.cc %in% c("BETULA.I", "CARYA.I", "FAGR.I", "FRAX.I", "SAAL.I"),]) + facet_grid(group.cc~State) +
scale_x_continuous(expand=c(0,0), name="Year") +
scale_y_continuous(expand=c(0,0), name="BAI") +
# facet_wrap(~TreeID, scales="free_y", space="free") +
# geom_ribbon(data=gam1.weights[gam1.weights$data.type=="Model",], aes(x=Year, ymin=Y.rel.10.lo*100, ymax=Y.rel.10.hi*100), alpha=0.5) +
geom_line(aes(x=Year, y=fit.full), size=2, alpha=0.5) +
geom_ribbon(aes(x=Year, ymin=fit.full.lwr, ymax=fit.full.upr), alpha=0.3)
# Plotting the Effects
pdf("figures/gam3/gam3_influence_in_time_I.pdf", width= 13, height = 8.5)
ggplot(data.graph[data.graph$Canopy.Class=="I" & !data.graph$group.cc %in% c("BETULA.I", "CARYA.I", "FAGR.I", "FRAX.I", "SAAL.I"),]) + facet_grid(group~State) +
scale_x_continuous(expand=c(0,0), name="Year") +
scale_y_continuous(expand=c(0,0), name="Effect on RW (in mm)") +
# facet_wrap(~TreeID, scales="free_y", space="free") +
# geom_ribbon(data=gam1.weights[gam1.weights$data.type=="Model",], aes(x=Year, ymin=Y.rel.10.lo*100, ymax=Y.rel.10.hi*100), alpha=0.5) +
geom_line(aes(x=Year, y=fit.tmean), size=1, color="red") +
geom_line(aes(x=Year, y=fit.precip), size=1, color="blue") +
geom_line(aes(x=Year, y=fit.dbh.recon), size=1, color="green")+
ylim(c(0,3)) +
labs(title= "Intermediate Effects (group.cc)")
dev.off()
################################################################
################################################################
# Suppressed Trees
################################################################
################################################################
pdf("figures/gam3/gam3_SPP_CC_BAI_limiting_factors_S.pdf", width= 13, height = 8.5)
ggplot(data = data.graph[data.graph$Canopy.Class=="S" & !data.graph$group.cc %in% c("BETULA.S", "CARYA.S", "FAGR.S", "FRAX.S", "SAAL.S"),]) + facet_grid(group ~ State) +
scale_x_continuous(expand=c(0,0)) +
scale_y_continuous(expand=c(0,0)) +
# facet_wrap(~TreeID, scales="free_y", space="free") +
# geom_ribbon(data=gam1.weights[gam1.weights$data.type=="Model",], aes(x=Year, ymin=Y.rel.10.lo*100, ymax=Y.rel.10.hi*100), alpha=0.5) +
geom_line(aes(x=Year, y=BA.inc), size=2, alpha=0.5) +
geom_ribbon(aes(x=Year, ymin=BA.inc.lwr, ymax=BA.inc.upr), alpha=0.3) +
geom_line(aes(x=Year, y = 0), linetype="dashed") +
# Harvard
plot.rgb("MA", "ACRU.S", 3) +
plot.rgb("MA", "PIST.S", 3) +
plot.rgb("MA", "TSCA.S", 3) +
plot.rgb("MA", "QURU.S", 3) +
plot.rgb("MA", "QUVE.S", 3) +
# plot.rgb("MA", "FRAX.S", 3) +
# plot.rgb("MA", "BETULA.S", 3) +
plot.rgb("MA", "ACSA.S", 3) +
plot.rgb("MA", "QUAL.S", 3) +
# plot.rgb("MA", "FAGR.S", 3) +
plot.rgb("MA", "ULRU.S", 3) +
# plot.rgb("MA", "CARYA.S", 3) +
# plot.rgb("MA", "SAAL.S", 3) +
# Howland
plot.rgb("ME", "ACRU.S", 3) +
plot.rgb("ME", "PIST.S", 3) +
plot.rgb("ME", "TSCA.S", 3) +
plot.rgb("ME", "QURU.S", 3) +
plot.rgb("ME", "QUVE.S", 3) +
# plot.rgb("ME", "FRAX.S", 3) +
# plot.rgb("ME", "BETULA.S", 3) +
plot.rgb("ME", "ACSA.S", 3) +
plot.rgb("ME", "QUAL.S", 3) +
# plot.rgb("ME", "FAGR.S", 3) +
plot.rgb("ME", "ULRU.S", 3) +
# plot.rgb("ME", "CARYA.S", 3) +
# plot.rgb("ME", "SAAL.S", 3) +
# Morgan Monroe
plot.rgb("IN", "ACRU.S", 3) +
plot.rgb("IN", "PIST.S", 3) +
plot.rgb("IN", "TSCA.S", 3) +
plot.rgb("IN", "QURU.S", 3) +
plot.rgb("IN", "QUVE.S", 3) +
# plot.rgb("IN", "FRAX.S", 3) +
# plot.rgb("IN", "BETULA.S", 3) +
plot.rgb("IN", "ACSA.S", 3) +
plot.rgb("IN", "QUAL.S", 3) +
# plot.rgb("IN", "FAGR.S", 3) +
plot.rgb("IN", "ULRU.S", 3) +
# plot.rgb("IN", "CARYA.S", 3) +
# plot.rgb("IN", "SAAL.S", 3) +
# Missouri Ozark
plot.rgb("MO", "ACRU.S", 3) +
plot.rgb("MO", "PIST.S", 3) +
plot.rgb("MO", "TSCA.S", 3) +
plot.rgb("MO", "QURU.S", 3) +
plot.rgb("MO", "QUVE.S", 3) +
# plot.rgb("MO", "FRAX.S", 3) +
# plot.rgb("MO", "BETULA.S", 3) +
plot.rgb("MO", "ACSA.S", 3) +
plot.rgb("MO", "QUAL.S", 3) +
# plot.rgb("MO", "FAGR.S", 3) +
plot.rgb("MO", "ULRU.S", 3) +
# plot.rgb("MO", "CARYA.S", 3) +
# plot.rgb("MO", "SAAL.S", 3) +
# Oak Openings
plot.rgb("OH", "ACRU.S", 3) +
plot.rgb("OH", "PIST.S", 3) +
plot.rgb("OH", "TSCA.S", 3) +
plot.rgb("OH", "QURU.S", 3) +
plot.rgb("OH", "QUVE.S", 3) +
# plot.rgb("OH", "FRAX.S", 3) +
# plot.rgb("OH", "BETULA.S", 3) +
plot.rgb("OH", "ACSA.S", 3) +
plot.rgb("OH", "QUAL.S", 3) +
# plot.rgb("OH", "FAGR.S", 3) +
plot.rgb("OH", "ULRU.S", 3) +
# plot.rgb("OH", "CARYA.S", 3) +
# plot.rgb("OH", "SAAL.S", 3) +
poster.theme2 +
labs(title= "Suppressed Trees (group.cc)", x="Year", y=expression(bold(paste("BAI (mm2 / year)"))))
dev.off()
# Just plotting the BAI fits
summary(data.graph)
ggplot(data.graph[data.graph$Canopy.Class=="S" & !data.graph$group.cc %in% c("BETULA.S", "CARYA.S", "FAGR.S", "FRAX.S", "SAAL.S"),]) + facet_grid(group.cc~State) +
scale_x_continuous(expand=c(0,0), name="Year") +
scale_y_continuous(expand=c(0,0), name="BAI") +
# facet_wrap(~TreeID, scales="free_y", space="free") +
# geom_ribbon(data=gam1.weights[gam1.weights$data.type=="Model",], aes(x=Year, ymin=Y.rel.10.lo*100, ymax=Y.rel.10.hi*100), alpha=0.5) +
geom_line(aes(x=Year, y=fit.full), size=2, alpha=0.5) +
geom_ribbon(aes(x=Year, ymin=fit.full.lwr, ymax=fit.full.upr), alpha=0.3)+
ylim(c(-0.5,20))
# Plotting the Effects
pdf("figures/gam3/gam3_influence_in_time_S.pdf", width= 13, height = 8.5)
ggplot(data.graph[data.graph$Canopy.Class=="S" & !data.graph$group.cc %in% c("BETULA.S", "CARYA.S", "FAGR.S", "FRAX.S", "SAAL.S"),]) + facet_grid(group~State) +
scale_x_continuous(expand=c(0,0), name="Year") +
scale_y_continuous(expand=c(0,0), name="Effect on RW (in mm)") +
# facet_wrap(~TreeID, scales="free_y", space="free") +
# geom_ribbon(data=gam1.weights[gam1.weights$data.type=="Model",], aes(x=Year, ymin=Y.rel.10.lo*100, ymax=Y.rel.10.hi*100), alpha=0.5) +
geom_line(aes(x=Year, y=fit.tmean), size=1, color="red") +
geom_line(aes(x=Year, y=fit.precip), size=1, color="blue") +
geom_line(aes(x=Year, y=fit.dbh.recon), size=1, color="green")+
ylim(c(0,3)) +
labs(title= "Suppressed Effects (group.cc)")
dev.off()
########################################################
########################################################
# Plotting out sites individually
########################################################
########################################################
# Missouri
# Plotting the Obs and modeled with influence coloring
pdf("figures/gam3/gam3_SPP_CC_BAI_limiting_factors_MO.pdf", width= 13, height = 8.5)
ggplot(data = data.graph[data.graph$State %in% "MO",]) + facet_grid(Canopy.Class ~ group) +
scale_x_continuous(expand=c(0,0), name="Year") +
scale_y_continuous(expand=c(0,0), name="BAI") +
# facet_wrap(~TreeID, scales="free_y", space="free") +
# geom_ribbon(data=gam1.weights[gam1.weights$data.type=="Model",], aes(x=Year, ymin=Y.rel.10.lo*100, ymax=Y.rel.10.hi*100), alpha=0.5) +
geom_line(aes(x=Year, y=BA.inc), size=2, alpha=0.5) +
geom_ribbon(aes(x=Year, ymin=BA.inc.lwr, ymax=BA.inc.upr), alpha=0.3) +
geom_line(aes(x=Year, y = 0), linetype="dashed") +
# Missouri Ozark
plot.rgb("MO", "ACRU.C", 3) +
plot.rgb("MO", "ACRU.D", 3) +
plot.rgb("MO", "ACRU.I", 3) +
plot.rgb("MO", "ACRU.S", 3) +
plot.rgb("MO", "PIST.C", 3) +
plot.rgb("MO", "PIST.D", 3) +
plot.rgb("MO", "PIST.I", 3) +
plot.rgb("MO", "PIST.S", 3) +
plot.rgb("MO", "TSCA.C", 3) +
plot.rgb("MO", "TSCA.D", 3) +
plot.rgb("MO", "TSCA.I", 3) +
plot.rgb("MO", "TSCA.S", 3) +
plot.rgb("MO", "QURU.C", 3) +
plot.rgb("MO", "QURU.D", 3) +
plot.rgb("MO", "QURU.I", 3) +
plot.rgb("MO", "QURU.S", 3) +
plot.rgb("MO", "QUVE.C", 3) +
plot.rgb("MO", "QUVE.D", 3) +
plot.rgb("MO", "QUVE.I", 3) +
plot.rgb("MO", "QUVE.S", 3) +
plot.rgb("MO", "FRAX.C", 3) +
plot.rgb("MO", "FRAX.D", 3) +
plot.rgb("MO", "FRAX.I", 3) +
plot.rgb("MO", "FRAX.S", 3) +
plot.rgb("MO", "BETULA.C", 3) +
plot.rgb("MO", "BETULA.D", 3) +
plot.rgb("MO", "BETULA.I", 3) +
plot.rgb("MO", "BETULA.S", 3) +
plot.rgb("MO", "ACSA.C", 3) +
plot.rgb("MO", "ACSA.D", 3) +
plot.rgb("MO", "ACSA.I", 3) +
plot.rgb("MO", "ACSA.S", 3) +
plot.rgb("MO", "QUAL.C", 3) +
plot.rgb("MO", "QUAL.D", 3) +
plot.rgb("MO", "QUAL.I", 3) +
plot.rgb("MO", "QUAL.S", 3) +
plot.rgb("MO", "FAGR.C", 3) +
plot.rgb("MO", "FAGR.D", 3) +
plot.rgb("MO", "FAGR.I", 3) +
plot.rgb("MO", "FAGR.S", 3) +
plot.rgb("MO", "ULRU.C", 3) +
plot.rgb("MO", "ULRU.D", 3) +
plot.rgb("MO", "ULRU.I", 3) +
plot.rgb("MO", "ULRU.S", 3) +
plot.rgb("MO", "CARYA.C", 3) +
plot.rgb("MO", "CARYA.D", 3) +
plot.rgb("MO", "CARYA.I", 3) +
plot.rgb("MO", "CARYA.S", 3) +
plot.rgb("MO", "SAAL.C", 3) +
plot.rgb("MO", "SAAL.D", 3) +
plot.rgb("MO", "SAAL.I", 3) +
plot.rgb("MO", "SAAL.S", 3) +
poster.theme2
dev.off()
#----------------------------------------
# Indiana
# Plotting the Obs and modeled with influence coloring
pdf("figures/gam3/gam3_SPP_CC_BAI_limiting_factors_IN.pdf", width= 13, height = 8.5)
ggplot(data = data.graph[data.graph$State %in% "IN" & !,]) + facet_grid(Canopy.Class ~ group) +
labs(title= "Dominant Trees", x="Year", y = expression(bold(paste("BAI (mm"^"2", "y"^"-1",")")))) +
scale_x_continuous(expand=c(0,0)) +
scale_y_continuous(expand=c(0,0)) +
# facet_wrap(~TreeID, scales="free_y", space="free") +
# geom_ribbon(data=gam1.weights[gam1.weights$data.type=="Model",], aes(x=Year, ymin=Y.rel.10.lo*100, ymax=Y.rel.10.hi*100), alpha=0.5) +
geom_line(aes(x=Year, y=BA.inc), size=2, alpha=0.5) +
geom_ribbon(aes(x=Year, ymin=BA.inc.lwr, ymax=BA.inc.upr), alpha=0.3) +
geom_line(aes(x=Year, y = 0), linetype="dashed") +
# Morgan Monroe
plot.rgb("IN", "ACRU.C", 3) +
plot.rgb("IN", "ACRU.D", 3) +
plot.rgb("IN", "ACRU.I", 3) +
plot.rgb("IN", "ACRU.S", 3) +
plot.rgb("IN", "PIST.C", 3) +
plot.rgb("IN", "PIST.D", 3) +
plot.rgb("IN", "PIST.I", 3) +
plot.rgb("IN", "PIST.S", 3) +
plot.rgb("IN", "TSCA.C", 3) +
plot.rgb("IN", "TSCA.D", 3) +
plot.rgb("IN", "TSCA.I", 3) +
plot.rgb("IN", "TSCA.S", 3) +
plot.rgb("IN", "QURU.C", 3) +
plot.rgb("IN", "QURU.D", 3) +
plot.rgb("IN", "QURU.I", 3) +
plot.rgb("IN", "QURU.S", 3) +
plot.rgb("IN", "QUVE.C", 3) +
plot.rgb("IN", "QUVE.D", 3) +
plot.rgb("IN", "QUVE.I", 3) +
plot.rgb("IN", "QUVE.S", 3) +
plot.rgb("IN", "FRAX.C", 3) +
plot.rgb("IN", "FRAX.D", 3) +
plot.rgb("IN", "FRAX.I", 3) +
plot.rgb("IN", "FRAX.S", 3) +
plot.rgb("IN", "BETULA.C", 3) +
plot.rgb("IN", "BETULA.D", 3) +
plot.rgb("IN", "BETULA.I", 3) +
plot.rgb("IN", "BETULA.S", 3) +
plot.rgb("IN", "ACSA.C", 3) +
plot.rgb("IN", "ACSA.D", 3) +
plot.rgb("IN", "ACSA.I", 3) +
plot.rgb("IN", "ACSA.S", 3) +
plot.rgb("IN", "QUAL.C", 3) +
plot.rgb("IN", "QUAL.D", 3) +
plot.rgb("IN", "QUAL.I", 3) +
plot.rgb("IN", "QUAL.S", 3) +
plot.rgb("IN", "FAGR.C", 3) +
plot.rgb("IN", "FAGR.D", 3) +
plot.rgb("IN", "FAGR.I", 3) +
plot.rgb("IN", "FAGR.S", 3) +
plot.rgb("IN", "ULRU.C", 3) +
plot.rgb("IN", "ULRU.D", 3) +
plot.rgb("IN", "ULRU.I", 3) +
plot.rgb("IN", "ULRU.S", 3) +
plot.rgb("IN", "CARYA.C", 3) +
plot.rgb("IN", "CARYA.D", 3) +
plot.rgb("IN", "CARYA.I", 3) +
plot.rgb("IN", "CARYA.S", 3) +
plot.rgb("IN", "SAAL.C", 3) +
plot.rgb("IN", "SAAL.D", 3) +
plot.rgb("IN", "SAAL.I", 3) +
plot.rgb("IN", "SAAL.S", 3) +
poster.theme2
dev.off()
# Indiana Dominant
# Plotting the Obs and modeled with influence coloring
pdf("figures/gam3/gam3_SPP_CC_BAI_limiting_factors_IN_dom.pdf", width= 13, height = 8.5)
ggplot(data = data.graph[data.graph$State %in% "IN" & data.graph$Canopy.Class=="D" & !data.graph$group.cc %in% c("BETULA.D", "CARYA.D", "FAGR.D", "FRAX.D", "SAAL.D"),]) + facet_grid(group ~.) +
labs(title= " IN Dominant Trees", x="Year", y = expression(bold(paste("BAI (mm"^"2", "y"^"-1",")")))) +
scale_x_continuous(expand=c(0,0)) +
scale_y_continuous(expand=c(0,0)) +
# facet_wrap(~TreeID, scales="free_y", space="free") +
# geom_ribbon(data=gam1.weights[gam1.weights$data.type=="Model",], aes(x=Year, ymin=Y.rel.10.lo*100, ymax=Y.rel.10.hi*100), alpha=0.5) +
geom_line(aes(x=Year, y=BA.inc), size=2, alpha=0.5) +
geom_ribbon(aes(x=Year, ymin=BA.inc.lwr, ymax=BA.inc.upr), alpha=0.3) +
geom_line(aes(x=Year, y = 0), linetype="dashed") +
# Morgan Monroe
plot.rgb("IN", "ACRU.D", 3) +
plot.rgb("IN", "PIST.D", 3) +
plot.rgb("IN", "TSCA.D", 3) +
plot.rgb("IN", "QURU.D", 3) +
plot.rgb("IN", "QUVE.D", 3) +
plot.rgb("IN", "ACSA.D", 3) +
plot.rgb("IN", "QUAL.D", 3) +
plot.rgb("IN", "ULRU.D", 3) +
poster.theme2
dev.off()
#-------------------------------------------
# Ohio
# Plotting the Obs and modeled with influence coloring
pdf("figures/gam3/gam3_SPP_CC_BAI_limiting_factors_OH.pdf", width= 13, height = 8.5)
ggplot(data = data.graph[data.graph$State %in% "OH",]) + facet_grid(Canopy.Class ~ group) +
scale_x_continuous(expand=c(0,0), name="Year") +
scale_y_continuous(expand=c(0,0), name="BAI") +
# facet_wrap(~TreeID, scales="free_y", space="free") +
# geom_ribbon(data=gam1.weights[gam1.weights$data.type=="Model",], aes(x=Year, ymin=Y.rel.10.lo*100, ymax=Y.rel.10.hi*100), alpha=0.5) +
geom_line(aes(x=Year, y=BA.inc), size=2, alpha=0.5) +
geom_ribbon(aes(x=Year, ymin=BA.inc.lwr, ymax=BA.inc.upr), alpha=0.3) +
geom_line(aes(x=Year, y = 0), linetype="dashed") +
# Oak Openings
plot.rgb("OH", "ACRU.C", 3) +
plot.rgb("OH", "ACRU.D", 3) +
plot.rgb("OH", "ACRU.I", 3) +
plot.rgb("OH", "ACRU.S", 3) +
plot.rgb("OH", "PIST.C", 3) +
plot.rgb("OH", "PIST.D", 3) +
plot.rgb("OH", "PIST.I", 3) +
plot.rgb("OH", "PIST.S", 3) +
plot.rgb("OH", "TSCA.C", 3) +
plot.rgb("OH", "TSCA.D", 3) +
plot.rgb("OH", "TSCA.I", 3) +
plot.rgb("OH", "TSCA.S", 3) +
plot.rgb("OH", "QURU.C", 3) +
plot.rgb("OH", "QURU.D", 3) +
plot.rgb("OH", "QURU.I", 3) +
plot.rgb("OH", "QURU.S", 3) +
plot.rgb("OH", "QUVE.C", 3) +
plot.rgb("OH", "QUVE.D", 3) +
plot.rgb("OH", "QUVE.I", 3) +
plot.rgb("OH", "QUVE.S", 3) +
plot.rgb("OH", "FRAX.C", 3) +
plot.rgb("OH", "FRAX.D", 3) +
plot.rgb("OH", "FRAX.I", 3) +
plot.rgb("OH", "FRAX.S", 3) +
plot.rgb("OH", "BETULA.C", 3) +
plot.rgb("OH", "BETULA.D", 3) +
plot.rgb("OH", "BETULA.I", 3) +
plot.rgb("OH", "BETULA.S", 3) +
plot.rgb("OH", "ACSA.C", 3) +
plot.rgb("OH", "ACSA.D", 3) +
plot.rgb("OH", "ACSA.I", 3) +
plot.rgb("OH", "ACSA.S", 3) +
plot.rgb("OH", "QUAL.C", 3) +
plot.rgb("OH", "QUAL.D", 3) +
plot.rgb("OH", "QUAL.I", 3) +
plot.rgb("OH", "QUAL.S", 3) +
plot.rgb("OH", "FAGR.C", 3) +
plot.rgb("OH", "FAGR.D", 3) +
plot.rgb("OH", "FAGR.I", 3) +
plot.rgb("OH", "FAGR.S", 3) +
plot.rgb("OH", "ULRU.C", 3) +
plot.rgb("OH", "ULRU.D", 3) +
plot.rgb("OH", "ULRU.I", 3) +
plot.rgb("OH", "ULRU.S", 3) +
plot.rgb("OH", "CARYA.C", 3) +
plot.rgb("OH", "CARYA.D", 3) +
plot.rgb("OH", "CARYA.I", 3) +
plot.rgb("OH", "CARYA.S", 3) +
plot.rgb("OH", "SAAL.C", 3) +
plot.rgb("OH", "SAAL.D", 3) +
plot.rgb("OH", "SAAL.I", 3) +
plot.rgb("OH", "SAAL.S", 3)+
poster.theme2
dev.off()
#-----------------------------------------------
# Massachusetts
# Plotting the Obs and modeled with influence coloring
pdf("figures/gam3/gam3_SPP_CC_BAI_limiting_factors_MA.pdf", width= 13, height = 8.5)
ggplot(data = data.graph[data.graph$State %in% "MA",]) + facet_grid(Canopy.Class~group) +
scale_x_continuous(expand=c(0,0), name="Year") +
scale_y_continuous(expand=c(0,0), name="BAI") +
# facet_wrap(~TreeID, scales="free_y", space="free") +
# geom_ribbon(data=gam1.weights[gam1.weights$data.type=="Model",], aes(x=Year, ymin=Y.rel.10.lo*100, ymax=Y.rel.10.hi*100), alpha=0.5) +
geom_line(aes(x=Year, y=BA.inc), size=2, alpha=0.5) +
geom_ribbon(aes(x=Year, ymin=BA.inc.lwr, ymax=BA.inc.upr), alpha=0.3) +
geom_line(aes(x=Year, y = 0), linetype="dashed") +
# Harvard
plot.rgb("MA", "ACRU.C", 3) +
plot.rgb("MA", "ACRU.D", 3) +
plot.rgb("MA", "ACRU.I", 3) +
plot.rgb("MA", "ACRU.S", 3) +
plot.rgb("MA", "PIST.C", 3) +
plot.rgb("MA", "PIST.D", 3) +
plot.rgb("MA", "PIST.I", 3) +
plot.rgb("MA", "PIST.S", 3) +
plot.rgb("MA", "TSCA.C", 3) +
plot.rgb("MA", "TSCA.D", 3) +
plot.rgb("MA", "TSCA.I", 3) +
plot.rgb("MA", "TSCA.S", 3) +
plot.rgb("MA", "QURU.C", 3) +
plot.rgb("MA", "QURU.D", 3) +
plot.rgb("MA", "QURU.I", 3) +
plot.rgb("MA", "QURU.S", 3) +
plot.rgb("MA", "QUVE.C", 3) +
plot.rgb("MA", "QUVE.D", 3) +
plot.rgb("MA", "QUVE.I", 3) +
plot.rgb("MA", "QUVE.S", 3) +
plot.rgb("MA", "FRAX.C", 3) +
plot.rgb("MA", "FRAX.D", 3) +
plot.rgb("MA", "FRAX.I", 3) +
plot.rgb("MA", "FRAX.S", 3) +
plot.rgb("MA", "BETULA.C", 3) +
plot.rgb("MA", "BETULA.D", 3) +
plot.rgb("MA", "BETULA.I", 3) +
plot.rgb("MA", "BETULA.S", 3) +
plot.rgb("MA", "ACSA.C", 3) +
plot.rgb("MA", "ACSA.D", 3) +
plot.rgb("MA", "ACSA.I", 3) +
plot.rgb("MA", "ACSA.S", 3) +
plot.rgb("MA", "QUAL.C", 3) +
plot.rgb("MA", "QUAL.D", 3) +
plot.rgb("MA", "QUAL.I", 3) +
plot.rgb("MA", "QUAL.S", 3) +
plot.rgb("MA", "FAGR.C", 3) +
plot.rgb("MA", "FAGR.D", 3) +
plot.rgb("MA", "FAGR.I", 3) +
plot.rgb("MA", "FAGR.S", 3) +
plot.rgb("MA", "ULRU.C", 3) +
plot.rgb("MA", "ULRU.D", 3) +
plot.rgb("MA", "ULRU.I", 3) +
plot.rgb("MA", "ULRU.S", 3) +
plot.rgb("MA", "CARYA.C", 3) +
plot.rgb("MA", "CARYA.D", 3) +
plot.rgb("MA", "CARYA.I", 3) +
plot.rgb("MA", "CARYA.S", 3) +
plot.rgb("MA", "SAAL.C", 3) +
plot.rgb("MA", "SAAL.D", 3) +
plot.rgb("MA", "SAAL.I", 3) +
plot.rgb("MA", "SAAL.S", 3) +
poster.theme2
dev.off()
#-----------------------------------------------------
# Plotting the Obs and modeled with influence coloring
pdf("figures/gam3/gam3_SPP_CC_BAI_limiting_factors_ME.pdf", width= 13, height = 8.5)
ggplot(data = data.graph[data.graph$State %in% "ME",]) + facet_grid(Canopy.Class ~ group) +
scale_x_continuous(expand=c(0,0), name="Year") +
scale_y_continuous(expand=c(0,0), name="BAI") +
# facet_wrap(~TreeID, scales="free_y", space="free") +
# geom_ribbon(data=gam1.weights[gam1.weights$data.type=="Model",], aes(x=Year, ymin=Y.rel.10.lo*100, ymax=Y.rel.10.hi*100), alpha=0.5) +
geom_line(aes(x=Year, y=BA.inc), size=2, alpha=0.5) +
geom_ribbon(aes(x=Year, ymin=BA.inc.lwr, ymax=BA.inc.upr), alpha=0.3) +
geom_line(aes(x=Year, y = 0), linetype="dashed") +
# Howland
plot.rgb("ME", "ACRU.C", 3) +
plot.rgb("ME", "ACRU.D", 3) +
plot.rgb("ME", "ACRU.I", 3) +
plot.rgb("ME", "ACRU.S", 3) +
plot.rgb("ME", "PIST.C", 3) +
plot.rgb("ME", "PIST.D", 3) +
plot.rgb("ME", "PIST.I", 3) +
plot.rgb("ME", "PIST.S", 3) +
plot.rgb("ME", "TSCA.C", 3) +
plot.rgb("ME", "TSCA.D", 3) +
plot.rgb("ME", "TSCA.I", 3) +
plot.rgb("ME", "TSCA.S", 3) +
plot.rgb("ME", "QURU.C", 3) +
plot.rgb("ME", "QURU.D", 3) +
plot.rgb("ME", "QURU.I", 3) +
plot.rgb("ME", "QURU.S", 3) +
plot.rgb("ME", "QUVE.C", 3) +
plot.rgb("ME", "QUVE.D", 3) +
plot.rgb("ME", "QUVE.I", 3) +
plot.rgb("ME", "QUVE.S", 3) +
plot.rgb("ME", "FRAX.C", 3) +
plot.rgb("ME", "FRAX.D", 3) +
plot.rgb("ME", "FRAX.I", 3) +
plot.rgb("ME", "FRAX.S", 3) +
plot.rgb("ME", "BETULA.C", 3) +
plot.rgb("ME", "BETULA.D", 3) +
plot.rgb("ME", "BETULA.I", 3) +
plot.rgb("ME", "BETULA.S", 3) +
plot.rgb("ME", "ACSA.C", 3) +
plot.rgb("ME", "ACSA.D", 3) +
plot.rgb("ME", "ACSA.I", 3) +
plot.rgb("ME", "ACSA.S", 3) +
plot.rgb("ME", "QUAL.C", 3) +
plot.rgb("ME", "QUAL.D", 3) +
plot.rgb("ME", "QUAL.I", 3) +
plot.rgb("ME", "QUAL.S", 3) +
plot.rgb("ME", "FAGR.C", 3) +
plot.rgb("ME", "FAGR.D", 3) +
plot.rgb("ME", "FAGR.I", 3) +
plot.rgb("ME", "FAGR.S", 3) +
plot.rgb("ME", "ULRU.C", 3) +
plot.rgb("ME", "ULRU.D", 3) +
plot.rgb("ME", "ULRU.I", 3) +
plot.rgb("ME", "ULRU.S", 3) +
plot.rgb("ME", "CARYA.C", 3) +
plot.rgb("ME", "CARYA.D", 3) +
plot.rgb("ME", "CARYA.I", 3) +
plot.rgb("ME", "CARYA.S", 3) +
plot.rgb("ME", "SAAL.C", 3) +
plot.rgb("ME", "SAAL.D", 3) +
plot.rgb("ME", "SAAL.I", 3) +
plot.rgb("ME", "SAAL.S", 3)+
poster.theme2
dev.off()
gam3.data.graph <- data.graph
save(gam3.data.graph, file="processed_data/gam3_graph_data.Rdata")
|
/4.3_gam3_weights_graphs.R
|
no_license
|
alexanderm10/multiple_limiting_factors_ch2
|
R
| false | false | 46,520 |
r
|
library(ggplot2)
library(stringr)
load(file="processed_data/gamm_weights/gam3_weights.Rdata")
summary(gam3.weights)
factors.fits <- c("fit.tmean", "fit.precip", "fit.dbh.recon", "fit.full", "BA.inc")
factors.weights <- c("weight.tmean", "weight.dbh.recon", "weight.precip")
# Transforming things back to BA.inc rather than log
gam3.weights[,which(substr(names(gam3.weights),1,3)=="fit")] <- exp(gam3.weights[,which(substr(names(gam3.weights),1,3)=="fit")] )
othervars <- c("Year", "Site", "group.cc", "Model")
data.graph1 <- aggregate(gam3.weights[,factors.fits], by = gam3.weights[,othervars], FUN= mean, na.rm=T)
data.graph1[,paste(factors.fits, "upr", sep=".")] <- aggregate(gam3.weights[,factors.fits], by = gam3.weights[,othervars], FUN= quantile, prob= 0.975, na.rm=T)[,factors.fits]
data.graph1[,paste(factors.fits, "lwr", sep=".")] <- aggregate(gam3.weights[,factors.fits], by = gam3.weights[,othervars], FUN= quantile, prob= 0.025, na.rm=T)[,factors.fits]
summary(data.graph1)
data.graph2 <- aggregate(abs(gam3.weights[,factors.weights]), by = gam3.weights[,othervars], FUN= mean, na.rm=T)
data.graph2[,paste(factors.weights, "upr", sep=".")] <- aggregate(abs(gam3.weights[,factors.weights]), by = gam3.weights[,othervars], FUN= quantile, prob= 0.975, na.rm=T)[,factors.weights]
data.graph2[,paste(factors.weights, "lwr", sep=".")] <- aggregate(abs(gam3.weights[,factors.weights]), by = gam3.weights[,othervars], FUN= quantile, prob= 0.025, na.rm=T)[,factors.weights]
summary(data.graph2)
data.graph <- merge(data.graph1, data.graph2, all.x=T, all.y=T)
# data.graph <- gam3.weights[gam3.weights$TreeID== "MMA014",]
summary(data.graph)
gam3.weights$wts.check <- rowSums(abs(gam3.weights[,c("weight.tmean", "weight.precip", "weight.dbh.recon")]))
data.graph$wts.check <- rowSums(abs(data.graph[,c("weight.tmean", "weight.precip", "weight.dbh.recon")]))
summary(gam3.weights)
summary(data.graph)
# Ordering the data for graphing
data.graph<- data.graph[order(data.graph$Year, data.graph$group.cc, data.graph$Site, decreasing=F),]
plot.rgb <- function(STATE, GC, SIZE){ geom_point(data=data.graph[data.graph$State==STATE & data.graph$group.cc==GC,],aes(x=Year, y=fit.full), size=SIZE,
color=rgb(abs(data.graph[data.graph$State==STATE & data.graph$group.cc==GC,"weight.tmean" ]), # red
abs(data.graph[data.graph$State==STATE & data.graph$group.cc==GC,"weight.dbh.recon" ]), # green
abs(data.graph[data.graph$State==STATE & data.graph$group.cc==GC,"weight.precip" ]))) } # blue
# Plotting the Obs and modeled with influence coloring
data.graph$State <- recode(data.graph$Site, "'Howland' = 'ME';'Harvard' = 'MA';'Morgan Monroe State Park' = 'IN';'Missouri Ozark' = 'MO';'Oak Openings Toledo' = 'OH'")
data.graph$State <- factor(data.graph$State, levels=c("MO", "IN", "OH", "MA", "ME"))
for(i in unique(data.graph$group.cc)){
data.graph[data.graph$group.cc==i,"Canopy.Class"] <- str_sub(data.graph[data.graph$group.cc==i,"group.cc"], -1)
data.graph[data.graph$group.cc==i,"group"] <- substr(data.graph[data.graph$group.cc==i,"group.cc"], 1,4)
}
data.graph$Canopy.Class <- as.factor(data.graph$Canopy.Class)
data.graph$group <- as.factor(data.graph$group)
summary(data.graph)
# Plotting the Obs and modeled with influence coloring
pdf("figures/gam3/gam3_SPP_CC_BAI_limiting_factors_All.pdf", width= 13, height = 8.5)
ggplot(data = data.graph[!data.graph$group.cc %in% c("BETULA.D", "CARYA.D", "FAGR.D", "FRAX.D", "SAAL.D",
"BETULA.I", "CARYA.I", "FAGR.I", "FRAX.I", "SAAL.I",
"BETULA.S", "CARYA.S", "FAGR.S", "FRAX.S", "SAAL.S"),]) +
facet_grid(group.cc ~ State) +
scale_x_continuous(expand=c(0,0), name="Year") +
scale_y_continuous(expand=c(0,0), name="BAI") +
# facet_wrap(~TreeID, scales="free_y", space="free") +
# geom_ribbon(data=gam1.weights[gam1.weights$data.type=="Model",], aes(x=Year, ymin=Y.rel.10.lo*100, ymax=Y.rel.10.hi*100), alpha=0.5) +
geom_line(aes(x=Year, y=BA.inc), size=2, alpha=0.5) +
geom_ribbon(aes(x=Year, ymin=BA.inc.lwr, ymax=BA.inc.upr), alpha=0.3) +
geom_line(aes(x=Year, y = 0), linetype="dashed") +
# Harvard
plot.rgb("MA", "ACRU.C", 3) +
plot.rgb("MA", "ACRU.D", 3) +
plot.rgb("MA", "ACRU.I", 3) +
plot.rgb("MA", "ACRU.S", 3) +
plot.rgb("MA", "PIST.C", 3) +
plot.rgb("MA", "PIST.D", 3) +
plot.rgb("MA", "PIST.I", 3) +
plot.rgb("MA", "PIST.S", 3) +
plot.rgb("MA", "TSCA.C", 3) +
plot.rgb("MA", "TSCA.D", 3) +
plot.rgb("MA", "TSCA.I", 3) +
plot.rgb("MA", "TSCA.S", 3) +
plot.rgb("MA", "QURU.C", 3) +
plot.rgb("MA", "QURU.D", 3) +
plot.rgb("MA", "QURU.I", 3) +
plot.rgb("MA", "QURU.S", 3) +
plot.rgb("MA", "QUVE.C", 3) +
plot.rgb("MA", "QUVE.D", 3) +
plot.rgb("MA", "QUVE.I", 3) +
plot.rgb("MA", "QUVE.S", 3) +
# plot.rgb("MA", "FRAX.C", 3) +
# plot.rgb("MA", "FRAX.D", 3) +
# plot.rgb("MA", "FRAX.I", 3) +
# plot.rgb("MA", "FRAX.S", 3) +
# plot.rgb("MA", "BETULA.C", 3) +
# plot.rgb("MA", "BETULA.D", 3) +
# plot.rgb("MA", "BETULA.I", 3) +
# plot.rgb("MA", "BETULA.S", 3) +
plot.rgb("MA", "ACSA.C", 3) +
plot.rgb("MA", "ACSA.D", 3) +
plot.rgb("MA", "ACSA.I", 3) +
plot.rgb("MA", "ACSA.S", 3) +
plot.rgb("MA", "QUAL.C", 3) +
plot.rgb("MA", "QUAL.D", 3) +
plot.rgb("MA", "QUAL.I", 3) +
plot.rgb("MA", "QUAL.S", 3) +
# plot.rgb("MA", "FAGR.C", 3) +
# plot.rgb("MA", "FAGR.D", 3) +
# plot.rgb("MA", "FAGR.I", 3) +
# plot.rgb("MA", "FAGR.S", 3) +
plot.rgb("MA", "ULRU.C", 3) +
plot.rgb("MA", "ULRU.D", 3) +
plot.rgb("MA", "ULRU.I", 3) +
plot.rgb("MA", "ULRU.S", 3) +
# plot.rgb("MA", "CARYA.C", 3) +
# plot.rgb("MA", "CARYA.D", 3) +
# plot.rgb("MA", "CARYA.I", 3) +
# plot.rgb("MA", "CARYA.S", 3) +
# plot.rgb("MA", "SAAL.C", 3) +
# plot.rgb("MA", "SAAL.D", 3) +
# plot.rgb("MA", "SAAL.I", 3) +
# plot.rgb("MA", "SAAL.S", 3) +
# Howland
plot.rgb("ME", "ACRU.C", 3) +
plot.rgb("ME", "ACRU.D", 3) +
plot.rgb("ME", "ACRU.I", 3) +
plot.rgb("ME", "ACRU.S", 3) +
plot.rgb("ME", "PIST.C", 3) +
plot.rgb("ME", "PIST.D", 3) +
plot.rgb("ME", "PIST.I", 3) +
plot.rgb("ME", "PIST.S", 3) +
plot.rgb("ME", "TSCA.C", 3) +
plot.rgb("ME", "TSCA.D", 3) +
plot.rgb("ME", "TSCA.I", 3) +
plot.rgb("ME", "TSCA.S", 3) +
plot.rgb("ME", "QURU.C", 3) +
plot.rgb("ME", "QURU.D", 3) +
plot.rgb("ME", "QURU.I", 3) +
plot.rgb("ME", "QURU.S", 3) +
plot.rgb("ME", "QUVE.C", 3) +
plot.rgb("ME", "QUVE.D", 3) +
plot.rgb("ME", "QUVE.I", 3) +
plot.rgb("ME", "QUVE.S", 3) +
# plot.rgb("ME", "FRAX.C", 3) +
# plot.rgb("ME", "FRAX.D", 3) +
# plot.rgb("ME", "FRAX.I", 3) +
# plot.rgb("ME", "FRAX.S", 3) +
# plot.rgb("ME", "BETULA.C", 3) +
# plot.rgb("ME", "BETULA.D", 3) +
# plot.rgb("ME", "BETULA.I", 3) +
# plot.rgb("ME", "BETULA.S", 3) +
plot.rgb("ME", "ACSA.C", 3) +
plot.rgb("ME", "ACSA.D", 3) +
plot.rgb("ME", "ACSA.I", 3) +
plot.rgb("ME", "ACSA.S", 3) +
plot.rgb("ME", "QUAL.C", 3) +
plot.rgb("ME", "QUAL.D", 3) +
plot.rgb("ME", "QUAL.I", 3) +
plot.rgb("ME", "QUAL.S", 3) +
# plot.rgb("ME", "FAGR.C", 3) +
# plot.rgb("ME", "FAGR.D", 3) +
# plot.rgb("ME", "FAGR.I", 3) +
# plot.rgb("ME", "FAGR.S", 3) +
plot.rgb("ME", "ULRU.C", 3) +
plot.rgb("ME", "ULRU.D", 3) +
plot.rgb("ME", "ULRU.I", 3) +
plot.rgb("ME", "ULRU.S", 3) +
# plot.rgb("ME", "CARYA.C", 3) +
# plot.rgb("ME", "CARYA.D", 3) +
# plot.rgb("ME", "CARYA.I", 3) +
# plot.rgb("ME", "CARYA.S", 3) +
# plot.rgb("ME", "SAAL.C", 3) +
# plot.rgb("ME", "SAAL.D", 3) +
# plot.rgb("ME", "SAAL.I", 3) +
# plot.rgb("ME", "SAAL.S", 3) +
# Morgan Monroe
plot.rgb("IN", "ACRU.C", 3) +
plot.rgb("IN", "ACRU.D", 3) +
plot.rgb("IN", "ACRU.I", 3) +
plot.rgb("IN", "ACRU.S", 3) +
plot.rgb("IN", "PIST.C", 3) +
plot.rgb("IN", "PIST.D", 3) +
plot.rgb("IN", "PIST.I", 3) +
plot.rgb("IN", "PIST.S", 3) +
plot.rgb("IN", "TSCA.C", 3) +
plot.rgb("IN", "TSCA.D", 3) +
plot.rgb("IN", "TSCA.I", 3) +
plot.rgb("IN", "TSCA.S", 3) +
plot.rgb("IN", "QURU.C", 3) +
plot.rgb("IN", "QURU.D", 3) +
plot.rgb("IN", "QURU.I", 3) +
plot.rgb("IN", "QURU.S", 3) +
plot.rgb("IN", "QUVE.C", 3) +
plot.rgb("IN", "QUVE.D", 3) +
plot.rgb("IN", "QUVE.I", 3) +
plot.rgb("IN", "QUVE.S", 3) +
# plot.rgb("IN", "FRAX.C", 3) +
# plot.rgb("IN", "FRAX.D", 3) +
# plot.rgb("IN", "FRAX.I", 3) +
# plot.rgb("IN", "FRAX.S", 3) +
# plot.rgb("IN", "BETULA.C", 3) +
# plot.rgb("IN", "BETULA.D", 3) +
# plot.rgb("IN", "BETULA.I", 3) +
# plot.rgb("IN", "BETULA.S", 3) +
plot.rgb("IN", "ACSA.C", 3) +
plot.rgb("IN", "ACSA.D", 3) +
plot.rgb("IN", "ACSA.I", 3) +
plot.rgb("IN", "ACSA.S", 3) +
plot.rgb("IN", "QUAL.C", 3) +
plot.rgb("IN", "QUAL.D", 3) +
plot.rgb("IN", "QUAL.I", 3) +
plot.rgb("IN", "QUAL.S", 3) +
# plot.rgb("IN", "FAGR.C", 3) +
# plot.rgb("IN", "FAGR.D", 3) +
# plot.rgb("IN", "FAGR.I", 3) +
# plot.rgb("IN", "FAGR.S", 3) +
plot.rgb("IN", "ULRU.C", 3) +
plot.rgb("IN", "ULRU.D", 3) +
plot.rgb("IN", "ULRU.I", 3) +
plot.rgb("IN", "ULRU.S", 3) +
# plot.rgb("IN", "CARYA.C", 3) +
# plot.rgb("IN", "CARYA.D", 3) +
# plot.rgb("IN", "CARYA.I", 3) +
# plot.rgb("IN", "CARYA.S", 3) +
# plot.rgb("IN", "SAAL.C", 3) +
# plot.rgb("IN", "SAAL.D", 3) +
# plot.rgb("IN", "SAAL.I", 3) +
# plot.rgb("IN", "SAAL.S", 3) +
# Missouri Ozark
plot.rgb("MO", "ACRU.C", 3) +
plot.rgb("MO", "ACRU.D", 3) +
plot.rgb("MO", "ACRU.I", 3) +
plot.rgb("MO", "ACRU.S", 3) +
plot.rgb("MO", "PIST.C", 3) +
plot.rgb("MO", "PIST.D", 3) +
plot.rgb("MO", "PIST.I", 3) +
plot.rgb("MO", "PIST.S", 3) +
plot.rgb("MO", "TSCA.C", 3) +
plot.rgb("MO", "TSCA.D", 3) +
plot.rgb("MO", "TSCA.I", 3) +
plot.rgb("MO", "TSCA.S", 3) +
plot.rgb("MO", "QURU.C", 3) +
plot.rgb("MO", "QURU.D", 3) +
plot.rgb("MO", "QURU.I", 3) +
plot.rgb("MO", "QURU.S", 3) +
plot.rgb("MO", "QUVE.C", 3) +
plot.rgb("MO", "QUVE.D", 3) +
plot.rgb("MO", "QUVE.I", 3) +
plot.rgb("MO", "QUVE.S", 3) +
# plot.rgb("MO", "FRAX.C", 3) +
# plot.rgb("MO", "FRAX.D", 3) +
# plot.rgb("MO", "FRAX.I", 3) +
# plot.rgb("MO", "FRAX.S", 3) +
# plot.rgb("MO", "BETULA.C", 3) +
# plot.rgb("MO", "BETULA.D", 3) +
# plot.rgb("MO", "BETULA.I", 3) +
# plot.rgb("MO", "BETULA.S", 3) +
plot.rgb("MO", "ACSA.C", 3) +
plot.rgb("MO", "ACSA.D", 3) +
plot.rgb("MO", "ACSA.I", 3) +
plot.rgb("MO", "ACSA.S", 3) +
plot.rgb("MO", "QUAL.C", 3) +
plot.rgb("MO", "QUAL.D", 3) +
plot.rgb("MO", "QUAL.I", 3) +
plot.rgb("MO", "QUAL.S", 3) +
# plot.rgb("MO", "FAGR.C", 3) +
# plot.rgb("MO", "FAGR.D", 3) +
# plot.rgb("MO", "FAGR.I", 3) +
# plot.rgb("MO", "FAGR.S", 3) +
plot.rgb("MO", "ULRU.C", 3) +
plot.rgb("MO", "ULRU.D", 3) +
plot.rgb("MO", "ULRU.I", 3) +
plot.rgb("MO", "ULRU.S", 3) +
# plot.rgb("MO", "CARYA.C", 3) +
# plot.rgb("MO", "CARYA.D", 3) +
# plot.rgb("MO", "CARYA.I", 3) +
# plot.rgb("MO", "CARYA.S", 3) +
# plot.rgb("MO", "SAAL.C", 3) +
# plot.rgb("MO", "SAAL.D", 3) +
# plot.rgb("MO", "SAAL.I", 3) +
# plot.rgb("MO", "SAAL.S", 3) +
# Oak Openings
plot.rgb("OH", "ACRU.C", 3) +
plot.rgb("OH", "ACRU.D", 3) +
plot.rgb("OH", "ACRU.I", 3) +
plot.rgb("OH", "ACRU.S", 3) +
plot.rgb("OH", "PIST.C", 3) +
plot.rgb("OH", "PIST.D", 3) +
plot.rgb("OH", "PIST.I", 3) +
plot.rgb("OH", "PIST.S", 3) +
plot.rgb("OH", "TSCA.C", 3) +
plot.rgb("OH", "TSCA.D", 3) +
plot.rgb("OH", "TSCA.I", 3) +
plot.rgb("OH", "TSCA.S", 3) +
plot.rgb("OH", "QURU.C", 3) +
plot.rgb("OH", "QURU.D", 3) +
plot.rgb("OH", "QURU.I", 3) +
plot.rgb("OH", "QURU.S", 3) +
plot.rgb("OH", "QUVE.C", 3) +
plot.rgb("OH", "QUVE.D", 3) +
plot.rgb("OH", "QUVE.I", 3) +
plot.rgb("OH", "QUVE.S", 3) +
# plot.rgb("OH", "FRAX.C", 3) +
# plot.rgb("OH", "FRAX.D", 3) +
# plot.rgb("OH", "FRAX.I", 3) +
# plot.rgb("OH", "FRAX.S", 3) +
# plot.rgb("OH", "BETULA.C", 3) +
# plot.rgb("OH", "BETULA.D", 3) +
# plot.rgb("OH", "BETULA.I", 3) +
# plot.rgb("OH", "BETULA.S", 3) +
plot.rgb("OH", "ACSA.C", 3) +
plot.rgb("OH", "ACSA.D", 3) +
plot.rgb("OH", "ACSA.I", 3) +
plot.rgb("OH", "ACSA.S", 3) +
plot.rgb("OH", "QUAL.C", 3) +
plot.rgb("OH", "QUAL.D", 3) +
plot.rgb("OH", "QUAL.I", 3) +
plot.rgb("OH", "QUAL.S", 3) +
# plot.rgb("OH", "FAGR.C", 3) +
# plot.rgb("OH", "FAGR.D", 3) +
# plot.rgb("OH", "FAGR.I", 3) +
# plot.rgb("OH", "FAGR.S", 3) +
plot.rgb("OH", "ULRU.C", 3) +
plot.rgb("OH", "ULRU.D", 3) +
plot.rgb("OH", "ULRU.I", 3) +
plot.rgb("OH", "ULRU.S", 3) #+
# plot.rgb("OH", "CARYA.C", 3) +
# plot.rgb("OH", "CARYA.D", 3) +
# plot.rgb("OH", "CARYA.I", 3) +
# plot.rgb("OH", "CARYA.S", 3) +
# plot.rgb("OH", "SAAL.C", 3) +
# plot.rgb("OH", "SAAL.D", 3) +
# plot.rgb("OH", "SAAL.I", 3) +
# plot.rgb("OH", "SAAL.S", 3)
dev.off()
# Just plotting the BAI fits
summary(data.graph)
ggplot(data.graph) + facet_wrap(group.cc~Site) +
scale_x_continuous(expand=c(0,0), name="Year") +
scale_y_continuous(expand=c(0,0), name="BAI") +
# facet_wrap(~TreeID, scales="free_y", space="free") +
# geom_ribbon(data=gam1.weights[gam1.weights$data.type=="Model",], aes(x=Year, ymin=Y.rel.10.lo*100, ymax=Y.rel.10.hi*100), alpha=0.5) +
geom_line(aes(x=Year, y=fit.full), size=2, alpha=0.5) +
geom_ribbon(aes(x=Year, ymin=fit.full.lwr, ymax=fit.full.upr), alpha=0.3)
# Plotting the Effects
pdf("figures/gam3/gam3_influence_in_time_all.pdf", width= 13, height = 8.5)
ggplot(data.graph) + facet_grid(group.cc~State) +
scale_x_continuous(expand=c(0,0), name="Year") +
scale_y_continuous(expand=c(0,0), name="Effect on RW (in mm)") +
# facet_wrap(~TreeID, scales="free_y", space="free") +
# geom_ribbon(data=gam1.weights[gam1.weights$data.type=="Model",], aes(x=Year, ymin=Y.rel.10.lo*100, ymax=Y.rel.10.hi*100), alpha=0.5) +
geom_line(aes(x=Year, y=fit.tmean), size=1, color="red") +
geom_line(aes(x=Year, y=fit.precip), size=1, color="blue") +
geom_line(aes(x=Year, y=fit.dbh.recon), size=1, color="green")
dev.off()
########################################################################
########################################################################
# Oaks only for Ameridendro
pdf("figures/gam3/gam3_SPP_CC_BAI_limiting_factors_All_oaks.pdf", width= 13, height = 8.5)
ggplot(data = data.graph[substr(data.graph$group, 1,2)=="QU",]) + facet_grid(group.cc ~ State) +
scale_x_continuous(expand=c(0,0), name="Year") +
scale_y_continuous(expand=c(0,0), name="BAI") +
# facet_wrap(~TreeID, scales="free_y", space="free") +
# geom_ribbon(data=gam1.weights[gam1.weights$data.type=="Model",], aes(x=Year, ymin=Y.rel.10.lo*100, ymax=Y.rel.10.hi*100), alpha=0.5) +
geom_line(aes(x=Year, y=BA.inc), size=2, alpha=0.5) +
geom_ribbon(aes(x=Year, ymin=BA.inc.lwr, ymax=BA.inc.upr), alpha=0.3) +
geom_line(aes(x=Year, y = 0), linetype="dashed") +
# Harvard
plot.rgb("MA", "QURU.C", 3) +
plot.rgb("MA", "QURU.D", 3) +
plot.rgb("MA", "QURU.I", 3) +
plot.rgb("MA", "QURU.S", 3) +
plot.rgb("MA", "QUVE.C", 3) +
plot.rgb("MA", "QUVE.D", 3) +
plot.rgb("MA", "QUVE.I", 3) +
plot.rgb("MA", "QUVE.S", 3) +
plot.rgb("MA", "QUAL.C", 3) +
plot.rgb("MA", "QUAL.D", 3) +
plot.rgb("MA", "QUAL.I", 3) +
plot.rgb("MA", "QUAL.S", 3) +
# Howland
plot.rgb("ME", "QURU.C", 3) +
plot.rgb("ME", "QURU.D", 3) +
plot.rgb("ME", "QURU.I", 3) +
plot.rgb("ME", "QURU.S", 3) +
plot.rgb("ME", "QUVE.C", 3) +
plot.rgb("ME", "QUVE.D", 3) +
plot.rgb("ME", "QUVE.I", 3) +
plot.rgb("ME", "QUVE.S", 3) +
plot.rgb("ME", "QUAL.C", 3) +
plot.rgb("ME", "QUAL.D", 3) +
plot.rgb("ME", "QUAL.I", 3) +
plot.rgb("ME", "QUAL.S", 3) +
# Morgan Monroe
plot.rgb("IN", "QURU.C", 3) +
plot.rgb("IN", "QURU.D", 3) +
plot.rgb("IN", "QURU.I", 3) +
plot.rgb("IN", "QURU.S", 3) +
plot.rgb("IN", "QUVE.C", 3) +
plot.rgb("IN", "QUVE.D", 3) +
plot.rgb("IN", "QUVE.I", 3) +
plot.rgb("IN", "QUVE.S", 3) +
plot.rgb("IN", "QUAL.C", 3) +
plot.rgb("IN", "QUAL.D", 3) +
plot.rgb("IN", "QUAL.I", 3) +
plot.rgb("IN", "QUAL.S", 3) +
# Missouri Ozark
plot.rgb("MO", "QURU.C", 3) +
plot.rgb("MO", "QURU.D", 3) +
plot.rgb("MO", "QURU.I", 3) +
plot.rgb("MO", "QURU.S", 3) +
plot.rgb("MO", "QUVE.C", 3) +
plot.rgb("MO", "QUVE.D", 3) +
plot.rgb("MO", "QUVE.I", 3) +
plot.rgb("MO", "QUVE.S", 3) +
plot.rgb("MO", "QUAL.C", 3) +
plot.rgb("MO", "QUAL.D", 3) +
plot.rgb("MO", "QUAL.I", 3) +
plot.rgb("MO", "QUAL.S", 3) +
# Oak Openings
plot.rgb("OH", "QURU.C", 3) +
plot.rgb("OH", "QURU.D", 3) +
plot.rgb("OH", "QURU.I", 3) +
plot.rgb("OH", "QURU.S", 3) +
plot.rgb("OH", "QUVE.C", 3) +
plot.rgb("OH", "QUVE.D", 3) +
plot.rgb("OH", "QUVE.I", 3) +
plot.rgb("OH", "QUVE.S", 3) +
plot.rgb("OH", "QUAL.C", 3) +
plot.rgb("OH", "QUAL.D", 3) +
plot.rgb("OH", "QUAL.I", 3) +
plot.rgb("OH", "QUAL.S", 3)
dev.off()
# Just plotting the BAI fits
summary(data.graph)
ggplot(data.graph) + facet_wrap(group.cc~Site) +
scale_x_continuous(expand=c(0,0), name="Year") +
scale_y_continuous(expand=c(0,0), name="BAI") +
# facet_wrap(~TreeID, scales="free_y", space="free") +
# geom_ribbon(data=gam1.weights[gam1.weights$data.type=="Model",], aes(x=Year, ymin=Y.rel.10.lo*100, ymax=Y.rel.10.hi*100), alpha=0.5) +
geom_line(aes(x=Year, y=fit.full), size=2, alpha=0.5) +
geom_ribbon(aes(x=Year, ymin=fit.full.lwr, ymax=fit.full.upr), alpha=0.3)
# Plotting the Effects
pdf("figures/gam3/gam3_influence_in_time_all_oaks.pdf", width= 13, height = 8.5)
ggplot(data.graph[substr(data.graph$group, 1,2)=="QU",]) + facet_grid(group.cc~State) +
scale_x_continuous(expand=c(0,0), name="Year") +
scale_y_continuous(expand=c(0,0), name="Effect on RW (in mm)") +
# facet_wrap(~TreeID, scales="free_y", space="free") +
# geom_ribbon(data=gam1.weights[gam1.weights$data.type=="Model",], aes(x=Year, ymin=Y.rel.10.lo*100, ymax=Y.rel.10.hi*100), alpha=0.5) +
geom_line(aes(x=Year, y=fit.tmean), size=2, color="red") +
geom_line(aes(x=Year, y=fit.precip), size=2, color="blue") +
geom_line(aes(x=Year, y=fit.dbh.recon), size=2, color="green")
dev.off()
# Separating things out By canopy class to see things better
########################################################
########################################################
# Dominant
########################################################
########################################################
pdf("figures/gam3/gam3_SPP_CC_BAI_limiting_factors_D.pdf", width= 13, height = 8.5)
ggplot(data = data.graph[data.graph$Canopy.Class=="D" & !data.graph$group.cc %in% c("BETULA.D", "CARYA.D", "FAGR.D", "FRAX.D", "SAAL.D", "ULRU.D", "ACSA.D"),]) + facet_grid(group ~ State) +
scale_x_continuous(expand=c(0,0)) +
scale_y_continuous(expand=c(0,0)) +
# facet_wrap(~TreeID, scales="free_y", space="free") +
# geom_ribbon(data=gam1.weights[gam1.weights$data.type=="Model",], aes(x=Year, ymin=Y.rel.10.lo*100, ymax=Y.rel.10.hi*100), alpha=0.5) +
geom_line(aes(x=Year, y=BA.inc), size=2, alpha=0.5) +
geom_ribbon(aes(x=Year, ymin=BA.inc.lwr, ymax=BA.inc.upr), alpha=0.3) +
geom_line(aes(x=Year, y = 0), linetype="dashed") +
# Harvard
plot.rgb("MA", "ACRU.D", 3) +
plot.rgb("MA", "PIST.D", 3) +
plot.rgb("MA", "TSCA.D", 3) +
plot.rgb("MA", "QURU.D", 3) +
plot.rgb("MA", "QUVE.D", 3) +
# plot.rgb("MA", "FRAX.D", 3) +
# plot.rgb("MA", "BETULA.D", 3) +
#plot.rgb("MA", "ACSA.D", 3) +
plot.rgb("MA", "QUAL.D", 3) +
# plot.rgb("MA", "FAGR.D", 3) +
#plot.rgb("MA", "ULRU.D", 3) +
# plot.rgb("MA", "CARYA.D", 3) +
# plot.rgb("MA", "SAAL.D", 3) +
# Howland
plot.rgb("ME", "ACRU.D", 3) +
plot.rgb("ME", "PIST.D", 3) +
plot.rgb("ME", "TSCA.D", 3) +
plot.rgb("ME", "QURU.D", 3) +
plot.rgb("ME", "QUVE.D", 3) +
# plot.rgb("ME", "FRAX.D", 3) +
# plot.rgb("ME", "BETULA.D", 3) +
#plot.rgb("ME", "ACSA.D", 3) +
plot.rgb("ME", "QUAL.D", 3) +
# plot.rgb("ME", "FAGR.D", 3) +
#plot.rgb("ME", "ULRU.D", 3) +
# plot.rgb("ME", "CARYA.D", 3) +
# plot.rgb("ME", "SAAL.D", 3) +
# Morgan Monroe
plot.rgb("IN", "ACRU.D", 3) +
plot.rgb("IN", "PIST.D", 3) +
plot.rgb("IN", "TSCA.D", 3) +
plot.rgb("IN", "QURU.D", 3) +
plot.rgb("IN", "QUVE.D", 3) +
# plot.rgb("IN", "FRAX.D", 3) +
# plot.rgb("IN", "BETULA.D", 3) +
#plot.rgb("IN", "ACSA.D", 3) +
plot.rgb("IN", "QUAL.D", 3) +
# plot.rgb("IN", "FAGR.D", 3) +
#plot.rgb("IN", "ULRU.D", 3) +
# plot.rgb("IN", "CARYA.D", 3) +
# plot.rgb("IN", "SAAL.D", 3) +
# Missouri Ozark
plot.rgb("MO", "ACRU.D", 3) +
plot.rgb("MO", "PIST.D", 3) +
plot.rgb("MO", "TSCA.D", 3) +
plot.rgb("MO", "QURU.D", 3) +
plot.rgb("MO", "QUVE.D", 3) +
# plot.rgb("MO", "FRAX.D", 3) +
# plot.rgb("MO", "BETULA.D", 3) +
#plot.rgb("MO", "ACSA.D", 3) +
plot.rgb("MO", "QUAL.D", 3) +
# plot.rgb("MO", "FAGR.D", 3) +
#plot.rgb("MO", "ULRU.D", 3) +
# plot.rgb("MO", "CARYA.D", 3) +
# plot.rgb("MO", "SAAL.D", 3) +
# Oak Openings
plot.rgb("OH", "ACRU.D", 3) +
plot.rgb("OH", "PIST.D", 3) +
plot.rgb("OH", "TSCA.D", 3) +
plot.rgb("OH", "QURU.D", 3) +
plot.rgb("OH", "QUVE.D", 3) +
# plot.rgb("OH", "FRAX.D", 3) +
# plot.rgb("OH", "BETULA.D", 3) +
#plot.rgb("OH", "ACSA.D", 3) +
plot.rgb("OH", "QUAL.D", 3) +
# plot.rgb("OH", "FAGR.D", 3) +
#plot.rgb("OH", "ULRU.D", 3) +
# plot.rgb("OH", "CARYA.D", 3) +
# plot.rgb("OH", "SAAL.D", 3) +
poster.theme2 +
labs(title= "Dominant Trees", x="Year", y = expression(bold(paste("BAI (mm"^"2", "y"^"-1",")"))))
dev.off()
data.graph$State <- factor(data.graph$State, levels=c("MO", "IN", "OH", "MA", "ME"))
# Just plotting the BAI fits
summary(data.graph)
ggplot(data.graph[data.graph$Canopy.Class=="D" & !data.graph$group.cc %in% c("BETULA.D", "CARYA.D", "FAGR.D", "FRAX.D", "SAAL.D"),]) + facet_grid(group.cc~State) +
scale_x_continuous(expand=c(0,0), name="Year") +
scale_y_continuous(expand=c(0,0), name="BAI") +
# facet_wrap(~TreeID, scales="free_y", space="free") +
# geom_ribbon(data=gam1.weights[gam1.weights$data.type=="Model",], aes(x=Year, ymin=Y.rel.10.lo*100, ymax=Y.rel.10.hi*100), alpha=0.5) +
geom_line(aes(x=Year, y=fit.full), size=2, alpha=0.5) +
geom_ribbon(aes(x=Year, ymin=fit.full.lwr, ymax=fit.full.upr), alpha=0.3)
# Plotting the Effects
pdf("figures/gam3/gam3_influence_in_time_D.pdf", width= 13, height = 8.5)
ggplot(data.graph[data.graph$Canopy.Class=="D" & !data.graph$group.cc %in% c("BETULA.D", "CARYA.D", "FAGR.D", "FRAX.D", "SAAL.D"),]) + facet_grid(group~State) +
scale_x_continuous(expand=c(0,0), name="Year") +
scale_y_continuous(expand=c(0,0), name="Effect on RW (in mm)") +
# facet_wrap(~TreeID, scales="free_y", space="free") +
# geom_ribbon(data=gam1.weights[gam1.weights$data.type=="Model",], aes(x=Year, ymin=Y.rel.10.lo*100, ymax=Y.rel.10.hi*100), alpha=0.5) +
#geom_ribbon(aes(x=Year, ymin=fit.dbh.recon.lwr, ymax=fit.dbh.recon.upr), alpha=0.4, fill="green")+
geom_line(aes(x=Year, y=fit.tmean), size=1, color="red") +
geom_line(aes(x=Year, y=fit.precip), size=1, color="blue") +
geom_line(aes(x=Year, y=fit.dbh.recon), size=1, color="green")+
ylim(c(0,3)) +
labs(title= "Dominant Effects (group.cc)")
dev.off()
################################################################
################################################################
# Intermediate Trees
################################################################
################################################################
pdf("figures/gam3/gam3_SPP_CC_BAI_limiting_factors_I.pdf", width= 13, height = 8.5)
ggplot(data = data.graph[data.graph$Canopy.Class=="I" & !data.graph$group.cc %in% c("BETULA.I", "CARYA.I", "FAGR.I", "FRAX.I", "SAAL.I"),]) + facet_grid(group ~ State) +
scale_x_continuous(expand=c(0,0)) +
scale_y_continuous(expand=c(0,0)) +
# facet_wrap(~TreeID, scales="free_y", space="free") +
# geom_ribbon(data=gam1.weights[gam1.weights$data.type=="Model",], aes(x=Year, ymin=Y.rel.10.lo*100, ymax=Y.rel.10.hi*100), alpha=0.5) +
geom_line(aes(x=Year, y=BA.inc), size=2, alpha=0.5) +
geom_ribbon(aes(x=Year, ymin=BA.inc.lwr, ymax=BA.inc.upr), alpha=0.3) +
geom_line(aes(x=Year, y = 0), linetype="dashed") +
# Harvard
plot.rgb("MA", "ACRU.I", 3) +
plot.rgb("MA", "PIST.I", 3) +
plot.rgb("MA", "TSCA.I", 3) +
plot.rgb("MA", "QURU.I", 3) +
plot.rgb("MA", "QUVE.I", 3) +
# plot.rgb("MA", "FRAX.I", 3) +
# plot.rgb("MA", "BETULA.I", 3) +
plot.rgb("MA", "ACSA.I", 3) +
plot.rgb("MA", "QUAL.I", 3) +
# plot.rgb("MA", "FAGR.I", 3) +
plot.rgb("MA", "ULRU.I", 3) +
# plot.rgb("MA", "CARYA.I", 3) +
# plot.rgb("MA", "SAAL.I", 3) +
# Howland
plot.rgb("ME", "ACRU.I", 3) +
plot.rgb("ME", "PIST.I", 3) +
plot.rgb("ME", "TSCA.I", 3) +
plot.rgb("ME", "QURU.I", 3) +
plot.rgb("ME", "QUVE.I", 3) +
# plot.rgb("ME", "FRAX.I", 3) +
# plot.rgb("ME", "BETULA.I", 3) +
plot.rgb("ME", "ACSA.I", 3) +
plot.rgb("ME", "QUAL.I", 3) +
# plot.rgb("ME", "FAGR.I", 3) +
plot.rgb("ME", "ULRU.I", 3) +
# plot.rgb("ME", "CARYA.I", 3) +
# plot.rgb("ME", "SAAL.I", 3) +
# Morgan Monroe
plot.rgb("IN", "ACRU.I", 3) +
plot.rgb("IN", "PIST.I", 3) +
plot.rgb("IN", "TSCA.I", 3) +
plot.rgb("IN", "QURU.I", 3) +
plot.rgb("IN", "QUVE.I", 3) +
# plot.rgb("IN", "FRAX.I", 3) +
# plot.rgb("IN", "BETULA.I", 3) +
plot.rgb("IN", "ACSA.I", 3) +
plot.rgb("IN", "QUAL.I", 3) +
# plot.rgb("IN", "FAGR.I", 3) +
plot.rgb("IN", "ULRU.I", 3) +
# plot.rgb("IN", "CARYA.I", 3) +
# plot.rgb("IN", "SAAL.I", 3) +
# Missouri Ozark
plot.rgb("MO", "ACRU.I", 3) +
plot.rgb("MO", "PIST.I", 3) +
plot.rgb("MO", "TSCA.I", 3) +
plot.rgb("MO", "QURU.I", 3) +
plot.rgb("MO", "QUVE.I", 3) +
# plot.rgb("MO", "FRAX.I", 3) +
# plot.rgb("MO", "BETULA.I", 3) +
plot.rgb("MO", "ACSA.I", 3) +
plot.rgb("MO", "QUAL.I", 3) +
# plot.rgb("MO", "FAGR.I", 3) +
plot.rgb("MO", "ULRU.I", 3) +
# plot.rgb("MO", "CARYA.I", 3) +
# plot.rgb("MO", "SAAL.I", 3) +
# Oak Openings
plot.rgb("OH", "ACRU.I", 3) +
plot.rgb("OH", "PIST.I", 3) +
plot.rgb("OH", "TSCA.I", 3) +
plot.rgb("OH", "QURU.I", 3) +
plot.rgb("OH", "QUVE.I", 3) +
# plot.rgb("OH", "FRAX.I", 3) +
# plot.rgb("OH", "BETULA.I", 3) +
plot.rgb("OH", "ACSA.I", 3) +
plot.rgb("OH", "QUAL.I", 3) +
# plot.rgb("OH", "FAGR.I", 3) +
plot.rgb("OH", "ULRU.I", 3) +
# plot.rgb("OH", "CARYA.I", 3) +
# plot.rgb("OH", "SAAL.I", 3) +
poster.theme2 +
labs(title= "Intermediate Trees (group.cc)", x="Year", y=expression(bold(paste("BAI (mm2 / year)"))))
dev.off()
# Just plotting the BAI fits
summary(data.graph)
ggplot(data.graph[data.graph$Canopy.Class=="I" & !data.graph$group.cc %in% c("BETULA.I", "CARYA.I", "FAGR.I", "FRAX.I", "SAAL.I"),]) + facet_grid(group.cc~State) +
scale_x_continuous(expand=c(0,0), name="Year") +
scale_y_continuous(expand=c(0,0), name="BAI") +
# facet_wrap(~TreeID, scales="free_y", space="free") +
# geom_ribbon(data=gam1.weights[gam1.weights$data.type=="Model",], aes(x=Year, ymin=Y.rel.10.lo*100, ymax=Y.rel.10.hi*100), alpha=0.5) +
geom_line(aes(x=Year, y=fit.full), size=2, alpha=0.5) +
geom_ribbon(aes(x=Year, ymin=fit.full.lwr, ymax=fit.full.upr), alpha=0.3)
# Plotting the Effects
pdf("figures/gam3/gam3_influence_in_time_I.pdf", width= 13, height = 8.5)
ggplot(data.graph[data.graph$Canopy.Class=="I" & !data.graph$group.cc %in% c("BETULA.I", "CARYA.I", "FAGR.I", "FRAX.I", "SAAL.I"),]) + facet_grid(group~State) +
scale_x_continuous(expand=c(0,0), name="Year") +
scale_y_continuous(expand=c(0,0), name="Effect on RW (in mm)") +
# facet_wrap(~TreeID, scales="free_y", space="free") +
# geom_ribbon(data=gam1.weights[gam1.weights$data.type=="Model",], aes(x=Year, ymin=Y.rel.10.lo*100, ymax=Y.rel.10.hi*100), alpha=0.5) +
geom_line(aes(x=Year, y=fit.tmean), size=1, color="red") +
geom_line(aes(x=Year, y=fit.precip), size=1, color="blue") +
geom_line(aes(x=Year, y=fit.dbh.recon), size=1, color="green")+
ylim(c(0,3)) +
labs(title= "Intermediate Effects (group.cc)")
dev.off()
################################################################
################################################################
# Suppressed Trees
################################################################
################################################################
pdf("figures/gam3/gam3_SPP_CC_BAI_limiting_factors_S.pdf", width= 13, height = 8.5)
ggplot(data = data.graph[data.graph$Canopy.Class=="S" & !data.graph$group.cc %in% c("BETULA.S", "CARYA.S", "FAGR.S", "FRAX.S", "SAAL.S"),]) + facet_grid(group ~ State) +
scale_x_continuous(expand=c(0,0)) +
scale_y_continuous(expand=c(0,0)) +
# facet_wrap(~TreeID, scales="free_y", space="free") +
# geom_ribbon(data=gam1.weights[gam1.weights$data.type=="Model",], aes(x=Year, ymin=Y.rel.10.lo*100, ymax=Y.rel.10.hi*100), alpha=0.5) +
geom_line(aes(x=Year, y=BA.inc), size=2, alpha=0.5) +
geom_ribbon(aes(x=Year, ymin=BA.inc.lwr, ymax=BA.inc.upr), alpha=0.3) +
geom_line(aes(x=Year, y = 0), linetype="dashed") +
# Harvard
plot.rgb("MA", "ACRU.S", 3) +
plot.rgb("MA", "PIST.S", 3) +
plot.rgb("MA", "TSCA.S", 3) +
plot.rgb("MA", "QURU.S", 3) +
plot.rgb("MA", "QUVE.S", 3) +
# plot.rgb("MA", "FRAX.S", 3) +
# plot.rgb("MA", "BETULA.S", 3) +
plot.rgb("MA", "ACSA.S", 3) +
plot.rgb("MA", "QUAL.S", 3) +
# plot.rgb("MA", "FAGR.S", 3) +
plot.rgb("MA", "ULRU.S", 3) +
# plot.rgb("MA", "CARYA.S", 3) +
# plot.rgb("MA", "SAAL.S", 3) +
# Howland
plot.rgb("ME", "ACRU.S", 3) +
plot.rgb("ME", "PIST.S", 3) +
plot.rgb("ME", "TSCA.S", 3) +
plot.rgb("ME", "QURU.S", 3) +
plot.rgb("ME", "QUVE.S", 3) +
# plot.rgb("ME", "FRAX.S", 3) +
# plot.rgb("ME", "BETULA.S", 3) +
plot.rgb("ME", "ACSA.S", 3) +
plot.rgb("ME", "QUAL.S", 3) +
# plot.rgb("ME", "FAGR.S", 3) +
plot.rgb("ME", "ULRU.S", 3) +
# plot.rgb("ME", "CARYA.S", 3) +
# plot.rgb("ME", "SAAL.S", 3) +
# Morgan Monroe
plot.rgb("IN", "ACRU.S", 3) +
plot.rgb("IN", "PIST.S", 3) +
plot.rgb("IN", "TSCA.S", 3) +
plot.rgb("IN", "QURU.S", 3) +
plot.rgb("IN", "QUVE.S", 3) +
# plot.rgb("IN", "FRAX.S", 3) +
# plot.rgb("IN", "BETULA.S", 3) +
plot.rgb("IN", "ACSA.S", 3) +
plot.rgb("IN", "QUAL.S", 3) +
# plot.rgb("IN", "FAGR.S", 3) +
plot.rgb("IN", "ULRU.S", 3) +
# plot.rgb("IN", "CARYA.S", 3) +
# plot.rgb("IN", "SAAL.S", 3) +
# Missouri Ozark
plot.rgb("MO", "ACRU.S", 3) +
plot.rgb("MO", "PIST.S", 3) +
plot.rgb("MO", "TSCA.S", 3) +
plot.rgb("MO", "QURU.S", 3) +
plot.rgb("MO", "QUVE.S", 3) +
# plot.rgb("MO", "FRAX.S", 3) +
# plot.rgb("MO", "BETULA.S", 3) +
plot.rgb("MO", "ACSA.S", 3) +
plot.rgb("MO", "QUAL.S", 3) +
# plot.rgb("MO", "FAGR.S", 3) +
plot.rgb("MO", "ULRU.S", 3) +
# plot.rgb("MO", "CARYA.S", 3) +
# plot.rgb("MO", "SAAL.S", 3) +
# Oak Openings
plot.rgb("OH", "ACRU.S", 3) +
plot.rgb("OH", "PIST.S", 3) +
plot.rgb("OH", "TSCA.S", 3) +
plot.rgb("OH", "QURU.S", 3) +
plot.rgb("OH", "QUVE.S", 3) +
# plot.rgb("OH", "FRAX.S", 3) +
# plot.rgb("OH", "BETULA.S", 3) +
plot.rgb("OH", "ACSA.S", 3) +
plot.rgb("OH", "QUAL.S", 3) +
# plot.rgb("OH", "FAGR.S", 3) +
plot.rgb("OH", "ULRU.S", 3) +
# plot.rgb("OH", "CARYA.S", 3) +
# plot.rgb("OH", "SAAL.S", 3) +
poster.theme2 +
labs(title= "Suppressed Trees (group.cc)", x="Year", y=expression(bold(paste("BAI (mm2 / year)"))))
dev.off()
# Just plotting the BAI fits
summary(data.graph)
ggplot(data.graph[data.graph$Canopy.Class=="S" & !data.graph$group.cc %in% c("BETULA.S", "CARYA.S", "FAGR.S", "FRAX.S", "SAAL.S"),]) + facet_grid(group.cc~State) +
scale_x_continuous(expand=c(0,0), name="Year") +
scale_y_continuous(expand=c(0,0), name="BAI") +
# facet_wrap(~TreeID, scales="free_y", space="free") +
# geom_ribbon(data=gam1.weights[gam1.weights$data.type=="Model",], aes(x=Year, ymin=Y.rel.10.lo*100, ymax=Y.rel.10.hi*100), alpha=0.5) +
geom_line(aes(x=Year, y=fit.full), size=2, alpha=0.5) +
geom_ribbon(aes(x=Year, ymin=fit.full.lwr, ymax=fit.full.upr), alpha=0.3)+
ylim(c(-0.5,20))
# Plotting the Effects
pdf("figures/gam3/gam3_influence_in_time_S.pdf", width= 13, height = 8.5)
ggplot(data.graph[data.graph$Canopy.Class=="S" & !data.graph$group.cc %in% c("BETULA.S", "CARYA.S", "FAGR.S", "FRAX.S", "SAAL.S"),]) + facet_grid(group~State) +
scale_x_continuous(expand=c(0,0), name="Year") +
scale_y_continuous(expand=c(0,0), name="Effect on RW (in mm)") +
# facet_wrap(~TreeID, scales="free_y", space="free") +
# geom_ribbon(data=gam1.weights[gam1.weights$data.type=="Model",], aes(x=Year, ymin=Y.rel.10.lo*100, ymax=Y.rel.10.hi*100), alpha=0.5) +
geom_line(aes(x=Year, y=fit.tmean), size=1, color="red") +
geom_line(aes(x=Year, y=fit.precip), size=1, color="blue") +
geom_line(aes(x=Year, y=fit.dbh.recon), size=1, color="green")+
ylim(c(0,3)) +
labs(title= "Suppressed Effects (group.cc)")
dev.off()
########################################################
########################################################
# Plotting out sites individually
########################################################
########################################################
# Missouri
# Plotting the Obs and modeled with influence coloring
pdf("figures/gam3/gam3_SPP_CC_BAI_limiting_factors_MO.pdf", width= 13, height = 8.5)
ggplot(data = data.graph[data.graph$State %in% "MO",]) + facet_grid(Canopy.Class ~ group) +
scale_x_continuous(expand=c(0,0), name="Year") +
scale_y_continuous(expand=c(0,0), name="BAI") +
# facet_wrap(~TreeID, scales="free_y", space="free") +
# geom_ribbon(data=gam1.weights[gam1.weights$data.type=="Model",], aes(x=Year, ymin=Y.rel.10.lo*100, ymax=Y.rel.10.hi*100), alpha=0.5) +
geom_line(aes(x=Year, y=BA.inc), size=2, alpha=0.5) +
geom_ribbon(aes(x=Year, ymin=BA.inc.lwr, ymax=BA.inc.upr), alpha=0.3) +
geom_line(aes(x=Year, y = 0), linetype="dashed") +
# Missouri Ozark
plot.rgb("MO", "ACRU.C", 3) +
plot.rgb("MO", "ACRU.D", 3) +
plot.rgb("MO", "ACRU.I", 3) +
plot.rgb("MO", "ACRU.S", 3) +
plot.rgb("MO", "PIST.C", 3) +
plot.rgb("MO", "PIST.D", 3) +
plot.rgb("MO", "PIST.I", 3) +
plot.rgb("MO", "PIST.S", 3) +
plot.rgb("MO", "TSCA.C", 3) +
plot.rgb("MO", "TSCA.D", 3) +
plot.rgb("MO", "TSCA.I", 3) +
plot.rgb("MO", "TSCA.S", 3) +
plot.rgb("MO", "QURU.C", 3) +
plot.rgb("MO", "QURU.D", 3) +
plot.rgb("MO", "QURU.I", 3) +
plot.rgb("MO", "QURU.S", 3) +
plot.rgb("MO", "QUVE.C", 3) +
plot.rgb("MO", "QUVE.D", 3) +
plot.rgb("MO", "QUVE.I", 3) +
plot.rgb("MO", "QUVE.S", 3) +
plot.rgb("MO", "FRAX.C", 3) +
plot.rgb("MO", "FRAX.D", 3) +
plot.rgb("MO", "FRAX.I", 3) +
plot.rgb("MO", "FRAX.S", 3) +
plot.rgb("MO", "BETULA.C", 3) +
plot.rgb("MO", "BETULA.D", 3) +
plot.rgb("MO", "BETULA.I", 3) +
plot.rgb("MO", "BETULA.S", 3) +
plot.rgb("MO", "ACSA.C", 3) +
plot.rgb("MO", "ACSA.D", 3) +
plot.rgb("MO", "ACSA.I", 3) +
plot.rgb("MO", "ACSA.S", 3) +
plot.rgb("MO", "QUAL.C", 3) +
plot.rgb("MO", "QUAL.D", 3) +
plot.rgb("MO", "QUAL.I", 3) +
plot.rgb("MO", "QUAL.S", 3) +
plot.rgb("MO", "FAGR.C", 3) +
plot.rgb("MO", "FAGR.D", 3) +
plot.rgb("MO", "FAGR.I", 3) +
plot.rgb("MO", "FAGR.S", 3) +
plot.rgb("MO", "ULRU.C", 3) +
plot.rgb("MO", "ULRU.D", 3) +
plot.rgb("MO", "ULRU.I", 3) +
plot.rgb("MO", "ULRU.S", 3) +
plot.rgb("MO", "CARYA.C", 3) +
plot.rgb("MO", "CARYA.D", 3) +
plot.rgb("MO", "CARYA.I", 3) +
plot.rgb("MO", "CARYA.S", 3) +
plot.rgb("MO", "SAAL.C", 3) +
plot.rgb("MO", "SAAL.D", 3) +
plot.rgb("MO", "SAAL.I", 3) +
plot.rgb("MO", "SAAL.S", 3) +
poster.theme2
dev.off()
#----------------------------------------
# Indiana
# Plotting the Obs and modeled with influence coloring
pdf("figures/gam3/gam3_SPP_CC_BAI_limiting_factors_IN.pdf", width= 13, height = 8.5)
ggplot(data = data.graph[data.graph$State %in% "IN" & !,]) + facet_grid(Canopy.Class ~ group) +
labs(title= "Dominant Trees", x="Year", y = expression(bold(paste("BAI (mm"^"2", "y"^"-1",")")))) +
scale_x_continuous(expand=c(0,0)) +
scale_y_continuous(expand=c(0,0)) +
# facet_wrap(~TreeID, scales="free_y", space="free") +
# geom_ribbon(data=gam1.weights[gam1.weights$data.type=="Model",], aes(x=Year, ymin=Y.rel.10.lo*100, ymax=Y.rel.10.hi*100), alpha=0.5) +
geom_line(aes(x=Year, y=BA.inc), size=2, alpha=0.5) +
geom_ribbon(aes(x=Year, ymin=BA.inc.lwr, ymax=BA.inc.upr), alpha=0.3) +
geom_line(aes(x=Year, y = 0), linetype="dashed") +
# Morgan Monroe
plot.rgb("IN", "ACRU.C", 3) +
plot.rgb("IN", "ACRU.D", 3) +
plot.rgb("IN", "ACRU.I", 3) +
plot.rgb("IN", "ACRU.S", 3) +
plot.rgb("IN", "PIST.C", 3) +
plot.rgb("IN", "PIST.D", 3) +
plot.rgb("IN", "PIST.I", 3) +
plot.rgb("IN", "PIST.S", 3) +
plot.rgb("IN", "TSCA.C", 3) +
plot.rgb("IN", "TSCA.D", 3) +
plot.rgb("IN", "TSCA.I", 3) +
plot.rgb("IN", "TSCA.S", 3) +
plot.rgb("IN", "QURU.C", 3) +
plot.rgb("IN", "QURU.D", 3) +
plot.rgb("IN", "QURU.I", 3) +
plot.rgb("IN", "QURU.S", 3) +
plot.rgb("IN", "QUVE.C", 3) +
plot.rgb("IN", "QUVE.D", 3) +
plot.rgb("IN", "QUVE.I", 3) +
plot.rgb("IN", "QUVE.S", 3) +
plot.rgb("IN", "FRAX.C", 3) +
plot.rgb("IN", "FRAX.D", 3) +
plot.rgb("IN", "FRAX.I", 3) +
plot.rgb("IN", "FRAX.S", 3) +
plot.rgb("IN", "BETULA.C", 3) +
plot.rgb("IN", "BETULA.D", 3) +
plot.rgb("IN", "BETULA.I", 3) +
plot.rgb("IN", "BETULA.S", 3) +
plot.rgb("IN", "ACSA.C", 3) +
plot.rgb("IN", "ACSA.D", 3) +
plot.rgb("IN", "ACSA.I", 3) +
plot.rgb("IN", "ACSA.S", 3) +
plot.rgb("IN", "QUAL.C", 3) +
plot.rgb("IN", "QUAL.D", 3) +
plot.rgb("IN", "QUAL.I", 3) +
plot.rgb("IN", "QUAL.S", 3) +
plot.rgb("IN", "FAGR.C", 3) +
plot.rgb("IN", "FAGR.D", 3) +
plot.rgb("IN", "FAGR.I", 3) +
plot.rgb("IN", "FAGR.S", 3) +
plot.rgb("IN", "ULRU.C", 3) +
plot.rgb("IN", "ULRU.D", 3) +
plot.rgb("IN", "ULRU.I", 3) +
plot.rgb("IN", "ULRU.S", 3) +
plot.rgb("IN", "CARYA.C", 3) +
plot.rgb("IN", "CARYA.D", 3) +
plot.rgb("IN", "CARYA.I", 3) +
plot.rgb("IN", "CARYA.S", 3) +
plot.rgb("IN", "SAAL.C", 3) +
plot.rgb("IN", "SAAL.D", 3) +
plot.rgb("IN", "SAAL.I", 3) +
plot.rgb("IN", "SAAL.S", 3) +
poster.theme2
dev.off()
# Indiana Dominant
# Plotting the Obs and modeled with influence coloring
pdf("figures/gam3/gam3_SPP_CC_BAI_limiting_factors_IN_dom.pdf", width= 13, height = 8.5)
ggplot(data = data.graph[data.graph$State %in% "IN" & data.graph$Canopy.Class=="D" & !data.graph$group.cc %in% c("BETULA.D", "CARYA.D", "FAGR.D", "FRAX.D", "SAAL.D"),]) + facet_grid(group ~.) +
labs(title= " IN Dominant Trees", x="Year", y = expression(bold(paste("BAI (mm"^"2", "y"^"-1",")")))) +
scale_x_continuous(expand=c(0,0)) +
scale_y_continuous(expand=c(0,0)) +
# facet_wrap(~TreeID, scales="free_y", space="free") +
# geom_ribbon(data=gam1.weights[gam1.weights$data.type=="Model",], aes(x=Year, ymin=Y.rel.10.lo*100, ymax=Y.rel.10.hi*100), alpha=0.5) +
geom_line(aes(x=Year, y=BA.inc), size=2, alpha=0.5) +
geom_ribbon(aes(x=Year, ymin=BA.inc.lwr, ymax=BA.inc.upr), alpha=0.3) +
geom_line(aes(x=Year, y = 0), linetype="dashed") +
# Morgan Monroe
plot.rgb("IN", "ACRU.D", 3) +
plot.rgb("IN", "PIST.D", 3) +
plot.rgb("IN", "TSCA.D", 3) +
plot.rgb("IN", "QURU.D", 3) +
plot.rgb("IN", "QUVE.D", 3) +
plot.rgb("IN", "ACSA.D", 3) +
plot.rgb("IN", "QUAL.D", 3) +
plot.rgb("IN", "ULRU.D", 3) +
poster.theme2
dev.off()
#-------------------------------------------
# Ohio
# Plotting the Obs and modeled with influence coloring
pdf("figures/gam3/gam3_SPP_CC_BAI_limiting_factors_OH.pdf", width= 13, height = 8.5)
ggplot(data = data.graph[data.graph$State %in% "OH",]) + facet_grid(Canopy.Class ~ group) +
scale_x_continuous(expand=c(0,0), name="Year") +
scale_y_continuous(expand=c(0,0), name="BAI") +
# facet_wrap(~TreeID, scales="free_y", space="free") +
# geom_ribbon(data=gam1.weights[gam1.weights$data.type=="Model",], aes(x=Year, ymin=Y.rel.10.lo*100, ymax=Y.rel.10.hi*100), alpha=0.5) +
geom_line(aes(x=Year, y=BA.inc), size=2, alpha=0.5) +
geom_ribbon(aes(x=Year, ymin=BA.inc.lwr, ymax=BA.inc.upr), alpha=0.3) +
geom_line(aes(x=Year, y = 0), linetype="dashed") +
# Oak Openings
plot.rgb("OH", "ACRU.C", 3) +
plot.rgb("OH", "ACRU.D", 3) +
plot.rgb("OH", "ACRU.I", 3) +
plot.rgb("OH", "ACRU.S", 3) +
plot.rgb("OH", "PIST.C", 3) +
plot.rgb("OH", "PIST.D", 3) +
plot.rgb("OH", "PIST.I", 3) +
plot.rgb("OH", "PIST.S", 3) +
plot.rgb("OH", "TSCA.C", 3) +
plot.rgb("OH", "TSCA.D", 3) +
plot.rgb("OH", "TSCA.I", 3) +
plot.rgb("OH", "TSCA.S", 3) +
plot.rgb("OH", "QURU.C", 3) +
plot.rgb("OH", "QURU.D", 3) +
plot.rgb("OH", "QURU.I", 3) +
plot.rgb("OH", "QURU.S", 3) +
plot.rgb("OH", "QUVE.C", 3) +
plot.rgb("OH", "QUVE.D", 3) +
plot.rgb("OH", "QUVE.I", 3) +
plot.rgb("OH", "QUVE.S", 3) +
plot.rgb("OH", "FRAX.C", 3) +
plot.rgb("OH", "FRAX.D", 3) +
plot.rgb("OH", "FRAX.I", 3) +
plot.rgb("OH", "FRAX.S", 3) +
plot.rgb("OH", "BETULA.C", 3) +
plot.rgb("OH", "BETULA.D", 3) +
plot.rgb("OH", "BETULA.I", 3) +
plot.rgb("OH", "BETULA.S", 3) +
plot.rgb("OH", "ACSA.C", 3) +
plot.rgb("OH", "ACSA.D", 3) +
plot.rgb("OH", "ACSA.I", 3) +
plot.rgb("OH", "ACSA.S", 3) +
plot.rgb("OH", "QUAL.C", 3) +
plot.rgb("OH", "QUAL.D", 3) +
plot.rgb("OH", "QUAL.I", 3) +
plot.rgb("OH", "QUAL.S", 3) +
plot.rgb("OH", "FAGR.C", 3) +
plot.rgb("OH", "FAGR.D", 3) +
plot.rgb("OH", "FAGR.I", 3) +
plot.rgb("OH", "FAGR.S", 3) +
plot.rgb("OH", "ULRU.C", 3) +
plot.rgb("OH", "ULRU.D", 3) +
plot.rgb("OH", "ULRU.I", 3) +
plot.rgb("OH", "ULRU.S", 3) +
plot.rgb("OH", "CARYA.C", 3) +
plot.rgb("OH", "CARYA.D", 3) +
plot.rgb("OH", "CARYA.I", 3) +
plot.rgb("OH", "CARYA.S", 3) +
plot.rgb("OH", "SAAL.C", 3) +
plot.rgb("OH", "SAAL.D", 3) +
plot.rgb("OH", "SAAL.I", 3) +
plot.rgb("OH", "SAAL.S", 3)+
poster.theme2
dev.off()
#-----------------------------------------------
# Massachusetts
# Plotting the Obs and modeled with influence coloring
pdf("figures/gam3/gam3_SPP_CC_BAI_limiting_factors_MA.pdf", width= 13, height = 8.5)
ggplot(data = data.graph[data.graph$State %in% "MA",]) + facet_grid(Canopy.Class~group) +
scale_x_continuous(expand=c(0,0), name="Year") +
scale_y_continuous(expand=c(0,0), name="BAI") +
# facet_wrap(~TreeID, scales="free_y", space="free") +
# geom_ribbon(data=gam1.weights[gam1.weights$data.type=="Model",], aes(x=Year, ymin=Y.rel.10.lo*100, ymax=Y.rel.10.hi*100), alpha=0.5) +
geom_line(aes(x=Year, y=BA.inc), size=2, alpha=0.5) +
geom_ribbon(aes(x=Year, ymin=BA.inc.lwr, ymax=BA.inc.upr), alpha=0.3) +
geom_line(aes(x=Year, y = 0), linetype="dashed") +
# Harvard
plot.rgb("MA", "ACRU.C", 3) +
plot.rgb("MA", "ACRU.D", 3) +
plot.rgb("MA", "ACRU.I", 3) +
plot.rgb("MA", "ACRU.S", 3) +
plot.rgb("MA", "PIST.C", 3) +
plot.rgb("MA", "PIST.D", 3) +
plot.rgb("MA", "PIST.I", 3) +
plot.rgb("MA", "PIST.S", 3) +
plot.rgb("MA", "TSCA.C", 3) +
plot.rgb("MA", "TSCA.D", 3) +
plot.rgb("MA", "TSCA.I", 3) +
plot.rgb("MA", "TSCA.S", 3) +
plot.rgb("MA", "QURU.C", 3) +
plot.rgb("MA", "QURU.D", 3) +
plot.rgb("MA", "QURU.I", 3) +
plot.rgb("MA", "QURU.S", 3) +
plot.rgb("MA", "QUVE.C", 3) +
plot.rgb("MA", "QUVE.D", 3) +
plot.rgb("MA", "QUVE.I", 3) +
plot.rgb("MA", "QUVE.S", 3) +
plot.rgb("MA", "FRAX.C", 3) +
plot.rgb("MA", "FRAX.D", 3) +
plot.rgb("MA", "FRAX.I", 3) +
plot.rgb("MA", "FRAX.S", 3) +
plot.rgb("MA", "BETULA.C", 3) +
plot.rgb("MA", "BETULA.D", 3) +
plot.rgb("MA", "BETULA.I", 3) +
plot.rgb("MA", "BETULA.S", 3) +
plot.rgb("MA", "ACSA.C", 3) +
plot.rgb("MA", "ACSA.D", 3) +
plot.rgb("MA", "ACSA.I", 3) +
plot.rgb("MA", "ACSA.S", 3) +
plot.rgb("MA", "QUAL.C", 3) +
plot.rgb("MA", "QUAL.D", 3) +
plot.rgb("MA", "QUAL.I", 3) +
plot.rgb("MA", "QUAL.S", 3) +
plot.rgb("MA", "FAGR.C", 3) +
plot.rgb("MA", "FAGR.D", 3) +
plot.rgb("MA", "FAGR.I", 3) +
plot.rgb("MA", "FAGR.S", 3) +
plot.rgb("MA", "ULRU.C", 3) +
plot.rgb("MA", "ULRU.D", 3) +
plot.rgb("MA", "ULRU.I", 3) +
plot.rgb("MA", "ULRU.S", 3) +
plot.rgb("MA", "CARYA.C", 3) +
plot.rgb("MA", "CARYA.D", 3) +
plot.rgb("MA", "CARYA.I", 3) +
plot.rgb("MA", "CARYA.S", 3) +
plot.rgb("MA", "SAAL.C", 3) +
plot.rgb("MA", "SAAL.D", 3) +
plot.rgb("MA", "SAAL.I", 3) +
plot.rgb("MA", "SAAL.S", 3) +
poster.theme2
dev.off()
#-----------------------------------------------------
# Plotting the Obs and modeled with influence coloring
pdf("figures/gam3/gam3_SPP_CC_BAI_limiting_factors_ME.pdf", width= 13, height = 8.5)
ggplot(data = data.graph[data.graph$State %in% "ME",]) + facet_grid(Canopy.Class ~ group) +
scale_x_continuous(expand=c(0,0), name="Year") +
scale_y_continuous(expand=c(0,0), name="BAI") +
# facet_wrap(~TreeID, scales="free_y", space="free") +
# geom_ribbon(data=gam1.weights[gam1.weights$data.type=="Model",], aes(x=Year, ymin=Y.rel.10.lo*100, ymax=Y.rel.10.hi*100), alpha=0.5) +
geom_line(aes(x=Year, y=BA.inc), size=2, alpha=0.5) +
geom_ribbon(aes(x=Year, ymin=BA.inc.lwr, ymax=BA.inc.upr), alpha=0.3) +
geom_line(aes(x=Year, y = 0), linetype="dashed") +
# Howland
plot.rgb("ME", "ACRU.C", 3) +
plot.rgb("ME", "ACRU.D", 3) +
plot.rgb("ME", "ACRU.I", 3) +
plot.rgb("ME", "ACRU.S", 3) +
plot.rgb("ME", "PIST.C", 3) +
plot.rgb("ME", "PIST.D", 3) +
plot.rgb("ME", "PIST.I", 3) +
plot.rgb("ME", "PIST.S", 3) +
plot.rgb("ME", "TSCA.C", 3) +
plot.rgb("ME", "TSCA.D", 3) +
plot.rgb("ME", "TSCA.I", 3) +
plot.rgb("ME", "TSCA.S", 3) +
plot.rgb("ME", "QURU.C", 3) +
plot.rgb("ME", "QURU.D", 3) +
plot.rgb("ME", "QURU.I", 3) +
plot.rgb("ME", "QURU.S", 3) +
plot.rgb("ME", "QUVE.C", 3) +
plot.rgb("ME", "QUVE.D", 3) +
plot.rgb("ME", "QUVE.I", 3) +
plot.rgb("ME", "QUVE.S", 3) +
plot.rgb("ME", "FRAX.C", 3) +
plot.rgb("ME", "FRAX.D", 3) +
plot.rgb("ME", "FRAX.I", 3) +
plot.rgb("ME", "FRAX.S", 3) +
plot.rgb("ME", "BETULA.C", 3) +
plot.rgb("ME", "BETULA.D", 3) +
plot.rgb("ME", "BETULA.I", 3) +
plot.rgb("ME", "BETULA.S", 3) +
plot.rgb("ME", "ACSA.C", 3) +
plot.rgb("ME", "ACSA.D", 3) +
plot.rgb("ME", "ACSA.I", 3) +
plot.rgb("ME", "ACSA.S", 3) +
plot.rgb("ME", "QUAL.C", 3) +
plot.rgb("ME", "QUAL.D", 3) +
plot.rgb("ME", "QUAL.I", 3) +
plot.rgb("ME", "QUAL.S", 3) +
plot.rgb("ME", "FAGR.C", 3) +
plot.rgb("ME", "FAGR.D", 3) +
plot.rgb("ME", "FAGR.I", 3) +
plot.rgb("ME", "FAGR.S", 3) +
plot.rgb("ME", "ULRU.C", 3) +
plot.rgb("ME", "ULRU.D", 3) +
plot.rgb("ME", "ULRU.I", 3) +
plot.rgb("ME", "ULRU.S", 3) +
plot.rgb("ME", "CARYA.C", 3) +
plot.rgb("ME", "CARYA.D", 3) +
plot.rgb("ME", "CARYA.I", 3) +
plot.rgb("ME", "CARYA.S", 3) +
plot.rgb("ME", "SAAL.C", 3) +
plot.rgb("ME", "SAAL.D", 3) +
plot.rgb("ME", "SAAL.I", 3) +
plot.rgb("ME", "SAAL.S", 3)+
poster.theme2
dev.off()
gam3.data.graph <- data.graph
save(gam3.data.graph, file="processed_data/gam3_graph_data.Rdata")
|
loadNamespace("forecast")
handleForecast <- function(model, params) {
outputs = list()
output_types = params$output_types
if ("samples" %in% output_types) {
outputs$samples <- lapply(1:params$num_samples, function(n) { simulate(model, params$prediction_length) } )
}
if("quantiles" %in% output_types) {
f_matrix <- forecast::forecast(model, h=params$prediction_length, level=unlist(params$levels))$upper
outputs$quantiles <- split(f_matrix, col(f_matrix))
}
if("mean" %in% output_types) {
outputs$mean <- forecast::forecast(model, h=params$prediction_length)$mean
}
outputs
}
arima <- function(ts, params) {
model <- forecast::auto.arima(ts, trace=TRUE)
handleForecast(model, params)
}
ets <- function(ts, params) {
model <- forecast::ets(ts, additive.only=TRUE)
handleForecast(model, params)
}
croston <- function(ts, params) {
model <- forecast::croston(ts)
handleForecast(model, params)
}
tbats <- function(ts, params) {
model <- forecast::tbats(ts)
handleForecast(model, params)
}
mlp <- function(ts, params) {
model <- nnfor::mlp(ts, hd.auto.type="valid")
handleForecast(model, params)
}
handleForecastTheta <- function(forecasts, params) {
outputs = list()
output_types = params$output_types
if ("samples" %in% output_types) {
outputs$samples <- lapply(1:params$num_samples, function(n) {forecasts$mean} )
}
if("quantiles" %in% output_types) {
f_matrix <- forecasts$upper
outputs$quantiles <- split(f_matrix, col(f_matrix))
}
if("mean" %in% output_types) {
outputs$mean <- forecasts$mean
}
outputs
}
thetaf <- function(ts, params) {
if("quantiles" %in% params$output_types) {
forecasts <- forecast::thetaf(y=ts, h=params$prediction_length, level=unlist(params$levels))
} else {
forecasts <- forecast::thetaf(y=ts, h=params$prediction_length)
}
handleForecastTheta(forecasts, params)
}
|
/src/gluonts/model/r_forecast/R/forecast_methods.R
|
permissive
|
dibgerge/gluon-ts
|
R
| false | false | 2,002 |
r
|
loadNamespace("forecast")
handleForecast <- function(model, params) {
outputs = list()
output_types = params$output_types
if ("samples" %in% output_types) {
outputs$samples <- lapply(1:params$num_samples, function(n) { simulate(model, params$prediction_length) } )
}
if("quantiles" %in% output_types) {
f_matrix <- forecast::forecast(model, h=params$prediction_length, level=unlist(params$levels))$upper
outputs$quantiles <- split(f_matrix, col(f_matrix))
}
if("mean" %in% output_types) {
outputs$mean <- forecast::forecast(model, h=params$prediction_length)$mean
}
outputs
}
arima <- function(ts, params) {
model <- forecast::auto.arima(ts, trace=TRUE)
handleForecast(model, params)
}
ets <- function(ts, params) {
model <- forecast::ets(ts, additive.only=TRUE)
handleForecast(model, params)
}
croston <- function(ts, params) {
model <- forecast::croston(ts)
handleForecast(model, params)
}
tbats <- function(ts, params) {
model <- forecast::tbats(ts)
handleForecast(model, params)
}
mlp <- function(ts, params) {
model <- nnfor::mlp(ts, hd.auto.type="valid")
handleForecast(model, params)
}
handleForecastTheta <- function(forecasts, params) {
outputs = list()
output_types = params$output_types
if ("samples" %in% output_types) {
outputs$samples <- lapply(1:params$num_samples, function(n) {forecasts$mean} )
}
if("quantiles" %in% output_types) {
f_matrix <- forecasts$upper
outputs$quantiles <- split(f_matrix, col(f_matrix))
}
if("mean" %in% output_types) {
outputs$mean <- forecasts$mean
}
outputs
}
thetaf <- function(ts, params) {
if("quantiles" %in% params$output_types) {
forecasts <- forecast::thetaf(y=ts, h=params$prediction_length, level=unlist(params$levels))
} else {
forecasts <- forecast::thetaf(y=ts, h=params$prediction_length)
}
handleForecastTheta(forecasts, params)
}
|
#ISLR Chap. 7 Lab: Moving Beyond Linearity (4/13/18)
library(ISLR)
attach(Wage)
#7.8.1 Polynomial Regression and Step Functions
fit= lm(wage~poly(age,4),data = Wage) #EAch column is a linear combination of variables age, age^2, etc..
coef(summary(fit))
#can get those values directly too
fit2= lm(wage~poly(age,4, raw = T),data = Wage)
coef(summary(fit2))
#create a gird of values of age at whcih we want predications, and then use predict() and and specify we want SE
agelims = range(age)
age.grid <- seq(from=agelims[1], to = agelims[2])
preds = predict(fit, newdata = list(age =age.grid), se = T)
se.bands <- cbind(preds$fit + 2*preds$se.fit, preds$fit -2*preds$se.fit)
#Finally plot the data and add the fit from the degree 4 polynomial
par(mfrow=c(1,2),mar=c(4.5,4.5,1,1) ,oma=c(0,0,4,0))
plot(age ,wage ,xlim=agelims ,cex =.5,col=" darkgrey ")
title(" Degree -4 Polynomial ",outer=T)
lines(age.grid ,preds$fit ,lwd=2,col="blue")
matlines(age.grid ,se.bands ,lwd=1, col=" blue",lty=3)
# In performing a polynomial regression we must decide on the degree of the polynomial to use. One way do this is by hypothesis tests.
#We now fit models ranging from linear to degree 5 and seek to determine the simplest model which is sufficient to explain teh relationship
#Use anova()
fit.1=lm(wage~age ,data=Wage)
fit.2=lm(wage~poly(age ,2),data=Wage)
fit.3=lm(wage~poly(age ,3),data=Wage)
fit.4=lm(wage~poly(age ,4),data=Wage)
fit.5=lm(wage~poly(age ,5),data=Wage)
anova(fit.1,fit.2, fit.3, fit.4, fit.5) #Looks like either a cubic or quartic poly model is good
#Choose poly degree using CV
fit=glm(I(wage >250)~poly(age ,4),data=Wage , family=binomial)
#make predictions using predict()
preds=predict (fit ,newdata =list(age=age.grid),se=T)
#In order to create confidence intervals we need to do some transformation
pfit <- exp(preds$fit)/ (1+exp(preds$fit))
se.bands.logit <- cbind(preds$fit + 2*preds$se.fit, preds$fit -2*preds$se.fit)
se.bands <- exp(se.bands.logit)/(1+exp(se.bands.logit))
#Note we can directly compute the probablities by selecting the type = "response" in predict()
preds <- predict(fit, newdata = list(age = age.grid), type = "response", se = T)
#Finally create plot
plot(age ,I(wage >250),xlim=agelims ,type="n",ylim=c(0,.2))
points(jitter(age), I((wage >250)/5),cex=.5,pch ="|",col="darkgrey ")
lines(age.grid ,pfit ,lwd=2, col ="blue")
matlines (age.grid ,se.bands ,lwd=1, col=" blue",lty=3)
#Use jitter() so that observations with the same age are not covering each other up
#To fit a step function
table(cut(age,4))
fit <- lm(wage~cut(age,4), data = Wage) #Auto picked the breaks, we coudl ahve done it manually with breaks =
coef(summary(fit))
#7.8.2 Splines
library(splines)
fit = lm(wage~bs(age,knots = c(25,40,60)), data = Wage)
pred <- predict(fit, newdata= list(age = age.grid), se = T)
plot(age ,wage ,col="gray")
lines(age.grid ,pred$fit ,lwd=2)
lines(age.grid ,pred$fit +2*pred$se ,lty="dashed")
lines(age.grid ,pred$fit -2*pred$se ,lty="dashed")
#Produces a spline with 6 basis functions (cubic spline with 3 knots has 7df; these are used with 1 by the intercept and 6 basis functions)
dim(bs(age,knots = c(25,40,60)))
dim(bs(age ,df=6))
attr(bs(age ,df=6) ,"knots") #R has chosen the knots at 33.75, 42, and 51 using the bs() function
#bs() can also change the degree agrument to something else. default is 3 (cubic spline)
#Natural Spline
fit2=lm(wage~ns(age ,df=4),data=Wage)
pred2=predict (fit2 ,newdata=list(age=age.grid),se=T)
lines(age.grid , pred2$fit ,col="red",lwd=2)
#Smooth Spline
plot(age ,wage ,xlim=agelims ,cex =.5,col=" darkgrey ")
title("Smoothing Spline ")
fit=smooth.spline(age ,wage ,df=16)
fit2=smooth.spline(age ,wage ,cv=TRUE)
fit2$df
lines(fit ,col="red",lwd =2)
lines(fit2 ,col="blue",lwd=2)
legend ("topright",legend=c("16 DF" ,"6.8 DF"),col=c("red","blue"),lty=1,lwd=2, cex =.8)
#Local Regression use loess() function
plot(age ,wage ,xlim=agelims ,cex =.5,col=" darkgrey ")
title("Local Regression ")
fit=loess(wage~age ,span=.2,data=Wage)
fit2=loess(wage~age ,span=.5,data=Wage)
lines(age.grid ,predict (fit ,data.frame(age=age.grid)),col="red",lwd=2)
lines(age.grid ,predict (fit2 ,data.frame(age=age.grid)),col="blue",lwd=2)
legend ("topright",legend=c("Span=0.2"," Span=0.5"),col=c("red","blue"),lty=1,lwd=2, cex =.8)
#Span is .5 and .2. That is each neighborhood consists of 20% to 50% of the observations. The larger the span, the smoother the fit.
#7.8.3 GAMs
#Using natural splines can use lm since it's just a big linear regression model
gam1 <- lm(wage~ns(year,4) + ns(age,5) + education, data = Wage)
#To fit other splines like smooth we need gam()
library(gam)
gam.m3 <- gam(wage~s(year,4) + s(age,5) + education, data = Wage)
par(mfrow = c(1,3))
plot(gam.m3, se = T, col = "blue")
#Can compare models with anova()
#M1 is GAM the excludes year
#M2 is GAM using linear function of year
#M3 is GAM using spline function of year
gam.m1=gam(wage~s(age,5)+education ,data=Wage)
gam.m2=gam(wage~year+s(age,5)+education ,data=Wage)
anova(gam.m1,gam.m2,gam.m3,test="F") #Linear is better than no year, and spline is not better than linear year
summary(gam.m3) #big p value for year under nonparametric effect says we don't need a nonlinear relationship. Can't say the same for age
#Make predictions using GAM
preds <- predict(gam.m2, newdata = Wage)
#We can use local regression fits as building blocks in GAM, using lo() function
gam.lo <- gam(wage~s(year, df = 4) + lo(age,span =.7) + education, data = Wage)
plot.gam(gam.lo, se = T, col = "green")
#Here we have a local regression for age term with a span of .7. We can also use the lo() function to create interacionts before calling the gam function
gam.lo.i <- gam(wage~lo(year, age, span = .5) + education, data = Wage)
#fits a 2 term model in which the first term is an intercation between year and age, fit by local regression surface.
#We plot the resulting 2D surface if we install akima package
library(akima)
plot(gam.lo.i)
#Logistic GAM
gam.lr=gam(I(wage >250)~year+s(age ,df=5)+education ,family=binomial ,data=Wage)
par(mfrow=c(1,3))
plot(gam.lr,se=T,col="green")
#It is easy to see that there are no high earners in the <HS category
table(education ,I(wage >250))
#hence we see that fitting a logistic regression GAM without this category is a better result
gam.lr=gam(I(wage >250)~year+s(age ,df=5)+education ,family=binomial ,data=Wage, subset = (education!="1. <HS Grad"))
plot(gam.lr, se =T, col = "Green")
|
/Chap 7 Lab (Moving Beyond Linearity).R
|
no_license
|
awong026/ISLR.Labs
|
R
| false | false | 6,490 |
r
|
#ISLR Chap. 7 Lab: Moving Beyond Linearity (4/13/18)
library(ISLR)
attach(Wage)
#7.8.1 Polynomial Regression and Step Functions
fit= lm(wage~poly(age,4),data = Wage) #EAch column is a linear combination of variables age, age^2, etc..
coef(summary(fit))
#can get those values directly too
fit2= lm(wage~poly(age,4, raw = T),data = Wage)
coef(summary(fit2))
#create a gird of values of age at whcih we want predications, and then use predict() and and specify we want SE
agelims = range(age)
age.grid <- seq(from=agelims[1], to = agelims[2])
preds = predict(fit, newdata = list(age =age.grid), se = T)
se.bands <- cbind(preds$fit + 2*preds$se.fit, preds$fit -2*preds$se.fit)
#Finally plot the data and add the fit from the degree 4 polynomial
par(mfrow=c(1,2),mar=c(4.5,4.5,1,1) ,oma=c(0,0,4,0))
plot(age ,wage ,xlim=agelims ,cex =.5,col=" darkgrey ")
title(" Degree -4 Polynomial ",outer=T)
lines(age.grid ,preds$fit ,lwd=2,col="blue")
matlines(age.grid ,se.bands ,lwd=1, col=" blue",lty=3)
# In performing a polynomial regression we must decide on the degree of the polynomial to use. One way do this is by hypothesis tests.
#We now fit models ranging from linear to degree 5 and seek to determine the simplest model which is sufficient to explain teh relationship
#Use anova()
fit.1=lm(wage~age ,data=Wage)
fit.2=lm(wage~poly(age ,2),data=Wage)
fit.3=lm(wage~poly(age ,3),data=Wage)
fit.4=lm(wage~poly(age ,4),data=Wage)
fit.5=lm(wage~poly(age ,5),data=Wage)
anova(fit.1,fit.2, fit.3, fit.4, fit.5) #Looks like either a cubic or quartic poly model is good
#Choose poly degree using CV
fit=glm(I(wage >250)~poly(age ,4),data=Wage , family=binomial)
#make predictions using predict()
preds=predict (fit ,newdata =list(age=age.grid),se=T)
#In order to create confidence intervals we need to do some transformation
pfit <- exp(preds$fit)/ (1+exp(preds$fit))
se.bands.logit <- cbind(preds$fit + 2*preds$se.fit, preds$fit -2*preds$se.fit)
se.bands <- exp(se.bands.logit)/(1+exp(se.bands.logit))
#Note we can directly compute the probablities by selecting the type = "response" in predict()
preds <- predict(fit, newdata = list(age = age.grid), type = "response", se = T)
#Finally create plot
plot(age ,I(wage >250),xlim=agelims ,type="n",ylim=c(0,.2))
points(jitter(age), I((wage >250)/5),cex=.5,pch ="|",col="darkgrey ")
lines(age.grid ,pfit ,lwd=2, col ="blue")
matlines (age.grid ,se.bands ,lwd=1, col=" blue",lty=3)
#Use jitter() so that observations with the same age are not covering each other up
#To fit a step function
table(cut(age,4))
fit <- lm(wage~cut(age,4), data = Wage) #Auto picked the breaks, we coudl ahve done it manually with breaks =
coef(summary(fit))
#7.8.2 Splines
library(splines)
fit = lm(wage~bs(age,knots = c(25,40,60)), data = Wage)
pred <- predict(fit, newdata= list(age = age.grid), se = T)
plot(age ,wage ,col="gray")
lines(age.grid ,pred$fit ,lwd=2)
lines(age.grid ,pred$fit +2*pred$se ,lty="dashed")
lines(age.grid ,pred$fit -2*pred$se ,lty="dashed")
#Produces a spline with 6 basis functions (cubic spline with 3 knots has 7df; these are used with 1 by the intercept and 6 basis functions)
dim(bs(age,knots = c(25,40,60)))
dim(bs(age ,df=6))
attr(bs(age ,df=6) ,"knots") #R has chosen the knots at 33.75, 42, and 51 using the bs() function
#bs() can also change the degree agrument to something else. default is 3 (cubic spline)
#Natural Spline
fit2=lm(wage~ns(age ,df=4),data=Wage)
pred2=predict (fit2 ,newdata=list(age=age.grid),se=T)
lines(age.grid , pred2$fit ,col="red",lwd=2)
#Smooth Spline
plot(age ,wage ,xlim=agelims ,cex =.5,col=" darkgrey ")
title("Smoothing Spline ")
fit=smooth.spline(age ,wage ,df=16)
fit2=smooth.spline(age ,wage ,cv=TRUE)
fit2$df
lines(fit ,col="red",lwd =2)
lines(fit2 ,col="blue",lwd=2)
legend ("topright",legend=c("16 DF" ,"6.8 DF"),col=c("red","blue"),lty=1,lwd=2, cex =.8)
#Local Regression use loess() function
plot(age ,wage ,xlim=agelims ,cex =.5,col=" darkgrey ")
title("Local Regression ")
fit=loess(wage~age ,span=.2,data=Wage)
fit2=loess(wage~age ,span=.5,data=Wage)
lines(age.grid ,predict (fit ,data.frame(age=age.grid)),col="red",lwd=2)
lines(age.grid ,predict (fit2 ,data.frame(age=age.grid)),col="blue",lwd=2)
legend ("topright",legend=c("Span=0.2"," Span=0.5"),col=c("red","blue"),lty=1,lwd=2, cex =.8)
#Span is .5 and .2. That is each neighborhood consists of 20% to 50% of the observations. The larger the span, the smoother the fit.
#7.8.3 GAMs
#Using natural splines can use lm since it's just a big linear regression model
gam1 <- lm(wage~ns(year,4) + ns(age,5) + education, data = Wage)
#To fit other splines like smooth we need gam()
library(gam)
gam.m3 <- gam(wage~s(year,4) + s(age,5) + education, data = Wage)
par(mfrow = c(1,3))
plot(gam.m3, se = T, col = "blue")
#Can compare models with anova()
#M1 is GAM the excludes year
#M2 is GAM using linear function of year
#M3 is GAM using spline function of year
gam.m1=gam(wage~s(age,5)+education ,data=Wage)
gam.m2=gam(wage~year+s(age,5)+education ,data=Wage)
anova(gam.m1,gam.m2,gam.m3,test="F") #Linear is better than no year, and spline is not better than linear year
summary(gam.m3) #big p value for year under nonparametric effect says we don't need a nonlinear relationship. Can't say the same for age
#Make predictions using GAM
preds <- predict(gam.m2, newdata = Wage)
#We can use local regression fits as building blocks in GAM, using lo() function
gam.lo <- gam(wage~s(year, df = 4) + lo(age,span =.7) + education, data = Wage)
plot.gam(gam.lo, se = T, col = "green")
#Here we have a local regression for age term with a span of .7. We can also use the lo() function to create interacionts before calling the gam function
gam.lo.i <- gam(wage~lo(year, age, span = .5) + education, data = Wage)
#fits a 2 term model in which the first term is an intercation between year and age, fit by local regression surface.
#We plot the resulting 2D surface if we install akima package
library(akima)
plot(gam.lo.i)
#Logistic GAM
gam.lr=gam(I(wage >250)~year+s(age ,df=5)+education ,family=binomial ,data=Wage)
par(mfrow=c(1,3))
plot(gam.lr,se=T,col="green")
#It is easy to see that there are no high earners in the <HS category
table(education ,I(wage >250))
#hence we see that fitting a logistic regression GAM without this category is a better result
gam.lr=gam(I(wage >250)~year+s(age ,df=5)+education ,family=binomial ,data=Wage, subset = (education!="1. <HS Grad"))
plot(gam.lr, se =T, col = "Green")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/getAnimalAlleles.R
\name{getAnimalAlleles}
\alias{getAnimalAlleles}
\title{Tests to see if the potential trios file has the appropriate header.}
\usage{
getAnimalAlleles(alleleFile, dateType)
}
\arguments{
\item{alleleFile}{Character vector of length one having the path of the
animal alleles file.}
\item{dateType}{A character vector of length one used to guide
interpretation of date fields. Either "YYYYMMDD" or "mm/dd/YYYY"}
}
\description{
Tests to see if the potential trios file has the appropriate header.
}
|
/man/getAnimalAlleles.Rd
|
permissive
|
rmsharp/parentfindr
|
R
| false | true | 595 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/getAnimalAlleles.R
\name{getAnimalAlleles}
\alias{getAnimalAlleles}
\title{Tests to see if the potential trios file has the appropriate header.}
\usage{
getAnimalAlleles(alleleFile, dateType)
}
\arguments{
\item{alleleFile}{Character vector of length one having the path of the
animal alleles file.}
\item{dateType}{A character vector of length one used to guide
interpretation of date fields. Either "YYYYMMDD" or "mm/dd/YYYY"}
}
\description{
Tests to see if the potential trios file has the appropriate header.
}
|
### =========================================================================
### ViewsList objects
### -------------------------------------------------------------------------
setClass("ViewsList",
contains="IntegerRangesList",
representation("VIRTUAL"),
prototype(elementType="Views")
)
setClass("SimpleViewsList",
contains=c("ViewsList", "SimpleList"),
representation("VIRTUAL")
)
### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
### Accessor methods.
###
setMethod("ranges", "SimpleViewsList",
function(x, use.names=TRUE, use.mcols=FALSE)
S4Vectors:::new_SimpleList_from_list("SimpleIRangesList",
lapply(x, ranges, use.names=use.names, use.mcols=use.mcols))
)
setMethod("start", "SimpleViewsList", function(x, ...) start(ranges(x)))
setMethod("end", "SimpleViewsList", function(x, ...) end(ranges(x)))
setMethod("width", "SimpleViewsList", function(x) width(ranges(x)))
### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
### Coercion
###
setMethod("as.matrix", "ViewsList",
function(x, rev = FALSE, use.names = FALSE)
{
if (!isTRUEorFALSE(use.names))
stop("use.names must be TRUE or FALSE")
if (!is(rev, "List"))
rev <- as(rev, "List")
rev <- S4Vectors:::VH_recycle(rev, x, "rev", "x")
max_width <- max(max(width(restrict(ranges(x), start = 1L))))
m <- do.call(rbind, mapply(as.matrix, x, rev,
IntegerList(max_width),
SIMPLIFY = FALSE))
nms <- names(x)
if (!is.null(nms) && use.names) {
nms <- rep(nms, elementNROWS(x))
rownms <- rownames(m)
if (is.null(rownms))
rownms <- unlist_as_integer(IRanges(1L, width=elementNROWS(x)))
rownames(m) <- paste(nms, rownms, sep = ".")
}
m
})
|
/R/ViewsList-class.R
|
no_license
|
Bioconductor/IRanges
|
R
| false | false | 2,010 |
r
|
### =========================================================================
### ViewsList objects
### -------------------------------------------------------------------------
setClass("ViewsList",
contains="IntegerRangesList",
representation("VIRTUAL"),
prototype(elementType="Views")
)
setClass("SimpleViewsList",
contains=c("ViewsList", "SimpleList"),
representation("VIRTUAL")
)
### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
### Accessor methods.
###
setMethod("ranges", "SimpleViewsList",
function(x, use.names=TRUE, use.mcols=FALSE)
S4Vectors:::new_SimpleList_from_list("SimpleIRangesList",
lapply(x, ranges, use.names=use.names, use.mcols=use.mcols))
)
setMethod("start", "SimpleViewsList", function(x, ...) start(ranges(x)))
setMethod("end", "SimpleViewsList", function(x, ...) end(ranges(x)))
setMethod("width", "SimpleViewsList", function(x) width(ranges(x)))
### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
### Coercion
###
setMethod("as.matrix", "ViewsList",
function(x, rev = FALSE, use.names = FALSE)
{
if (!isTRUEorFALSE(use.names))
stop("use.names must be TRUE or FALSE")
if (!is(rev, "List"))
rev <- as(rev, "List")
rev <- S4Vectors:::VH_recycle(rev, x, "rev", "x")
max_width <- max(max(width(restrict(ranges(x), start = 1L))))
m <- do.call(rbind, mapply(as.matrix, x, rev,
IntegerList(max_width),
SIMPLIFY = FALSE))
nms <- names(x)
if (!is.null(nms) && use.names) {
nms <- rep(nms, elementNROWS(x))
rownms <- rownames(m)
if (is.null(rownms))
rownms <- unlist_as_integer(IRanges(1L, width=elementNROWS(x)))
rownames(m) <- paste(nms, rownms, sep = ".")
}
m
})
|
library(rxSeq)
### Name: data.A
### Title: Sample data example for autosomal genes
### Aliases: data.A
### Keywords: utilities
### ** Examples
# see total read counts (TReC) for first 2 autosomal genes of a data example:
data.A$y[1:2,]
|
/data/genthat_extracted_code/rxSeq/examples/data.A.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false | false | 244 |
r
|
library(rxSeq)
### Name: data.A
### Title: Sample data example for autosomal genes
### Aliases: data.A
### Keywords: utilities
### ** Examples
# see total read counts (TReC) for first 2 autosomal genes of a data example:
data.A$y[1:2,]
|
generate.random.points <- function(design, strata.id, samplers, calc.cov.area = TRUE, clip.to.strata = TRUE, quiet = FALSE){
#Check positive number of samplers
if(samplers <= 0){
if(!quiet){
warning(paste("No samplers allocated to strata ", strata.id, ". Cannot generate samplers.", sep = ""), call. = FALSE, immediate. = TRUE)
}
return(NULL)
}
#Generates random points
region <- design@region
#Get the current strata and spacing
sf.column <- attr(region@region, "sf_column")
strata <- region@region[[sf.column]][[strata.id]]
#Buffer strata for plus sampling?
if(design@edge.protocol[strata.id] == "plus"){
strata <- sf::st_buffer(strata, design@truncation)
}
#Generate random points
random_pt <- sf::st_sample(strata , size = samplers, type = "random")
while (length(random_pt) < samplers) {
diff <- samplers - length(random_pt)
random_pt_new <- sf::st_sample(strata , size = diff, type = "random")
random_pt <- c(random_pt, random_pt_new)
}
#Rotate back again
transects <- random_pt
if(calc.cov.area){
cov.area.polys <- lapply(transects, FUN = sf::st_buffer, dist = design@truncation)
if(clip.to.strata){
cov.area.polys <- lapply(cov.area.polys, sf::st_intersection, y = strata)
}
return(list(transects = transects, cover.polys = cov.area.polys))
}
return(list(transects = transects, cover.polys = list()))
}
|
/R/generate.random.points.R
|
no_license
|
DistanceDevelopment/dssd
|
R
| false | false | 1,407 |
r
|
generate.random.points <- function(design, strata.id, samplers, calc.cov.area = TRUE, clip.to.strata = TRUE, quiet = FALSE){
#Check positive number of samplers
if(samplers <= 0){
if(!quiet){
warning(paste("No samplers allocated to strata ", strata.id, ". Cannot generate samplers.", sep = ""), call. = FALSE, immediate. = TRUE)
}
return(NULL)
}
#Generates random points
region <- design@region
#Get the current strata and spacing
sf.column <- attr(region@region, "sf_column")
strata <- region@region[[sf.column]][[strata.id]]
#Buffer strata for plus sampling?
if(design@edge.protocol[strata.id] == "plus"){
strata <- sf::st_buffer(strata, design@truncation)
}
#Generate random points
random_pt <- sf::st_sample(strata , size = samplers, type = "random")
while (length(random_pt) < samplers) {
diff <- samplers - length(random_pt)
random_pt_new <- sf::st_sample(strata , size = diff, type = "random")
random_pt <- c(random_pt, random_pt_new)
}
#Rotate back again
transects <- random_pt
if(calc.cov.area){
cov.area.polys <- lapply(transects, FUN = sf::st_buffer, dist = design@truncation)
if(clip.to.strata){
cov.area.polys <- lapply(cov.area.polys, sf::st_intersection, y = strata)
}
return(list(transects = transects, cover.polys = cov.area.polys))
}
return(list(transects = transects, cover.polys = list()))
}
|
###### Extra functions
# Local decoding: from Zucchini 2016
### A.1.7 Computing log(forward probabilities)
norm.HMM.lforward<-function(x,mod,allprobs)
{
n <- dim(x)[1]
lalpha <- matrix(NA,mod$m,n)
foo <- mod$delta*allprobs[1,]
sumfoo <- sum(foo)
lscale <- log(sumfoo)
foo <- foo/sumfoo
lalpha[,1] <- lscale+log(foo)
for (i in 2:n)
{
foo <- foo%*%mod$tpm*allprobs[i,]
sumfoo <- sum(foo)
lscale <- lscale+log(sumfoo)
foo <- foo/sumfoo
lalpha[,i] <- log(foo)+lscale
}
return(lalpha)
}
### A.1.8 Computing log(backward probabilities)
norm.HMM.lbackward<-function(x,mod,allprobs)
{
n <- dim(x)[1]
m <- mod$m
lbeta <- matrix(NA,m,n)
lbeta[,n] <- rep(0,m)
foo <- rep(1/m,m)
lscale <- log(m)
for (i in (n-1):1)
{
foo <- mod$tpm%*%(allprobs[i+1,]*foo)
lbeta[,i] <- log(foo)+lscale
sumfoo <- sum(foo)
foo <- foo/sumfoo
lscale <- lscale+log(sumfoo)
}
return(lbeta)
}
norm.HMM.state_probs <- function (x , mod ,allprobs)
{
n <- dim(x)[1]
la <- norm.HMM.lforward(x,mod,allprobs)
lb <- norm.HMM.lbackward(x,mod,allprobs)
c <- max(la[,n])
llk <- c + log(sum(exp(la[,n]-c)))
stateprobs <- matrix (NA,ncol=n,nrow = mod$m)
for (i in 1:n) stateprobs [,i] <-exp(la[,i]+lb[,i]-llk)
return (stateprobs)
}
## Global decoding: Algoritmo de Viterbi
HMM.viterbi <- function(x, m, gamma, allprobs, delta=NULL, ...)
{
if(is.null(delta)) delta <- solve(t(diag(m) - gamma + 1), rep(1, m))
n <- dim(x)[1]
xi <- matrix(0, n, m)
foo <- delta*allprobs[1,]
xi[1,] <- foo/sum(foo)
for(i in 2:n)
{
foo <- apply(xi[i-1,]*gamma, 2, max)*allprobs[i,]
xi[i,] <- foo/sum(foo)
}
iv <- numeric(n)
iv[n] <- which.max(xi[n,])
for(i in (n-1):1)
iv[i] <- which.max(gamma[, iv[i+1]]*xi[i,])
return(iv)
}
## Cross entropy index
cross.entropy=function(gamma,true.states,quienes)
{
out=0
correctos=gamma[,quienes]
for (i in 1:length(quienes))
{
out=out+log(correctos[true.states[quienes[i]],i])
}
return(-out)
}
|
/code/extrafunctions.R
|
permissive
|
vianeylb/riiaa_workshop_hmm
|
R
| false | false | 2,114 |
r
|
###### Extra functions
# Local decoding: from Zucchini 2016
### A.1.7 Computing log(forward probabilities)
norm.HMM.lforward<-function(x,mod,allprobs)
{
n <- dim(x)[1]
lalpha <- matrix(NA,mod$m,n)
foo <- mod$delta*allprobs[1,]
sumfoo <- sum(foo)
lscale <- log(sumfoo)
foo <- foo/sumfoo
lalpha[,1] <- lscale+log(foo)
for (i in 2:n)
{
foo <- foo%*%mod$tpm*allprobs[i,]
sumfoo <- sum(foo)
lscale <- lscale+log(sumfoo)
foo <- foo/sumfoo
lalpha[,i] <- log(foo)+lscale
}
return(lalpha)
}
### A.1.8 Computing log(backward probabilities)
norm.HMM.lbackward<-function(x,mod,allprobs)
{
n <- dim(x)[1]
m <- mod$m
lbeta <- matrix(NA,m,n)
lbeta[,n] <- rep(0,m)
foo <- rep(1/m,m)
lscale <- log(m)
for (i in (n-1):1)
{
foo <- mod$tpm%*%(allprobs[i+1,]*foo)
lbeta[,i] <- log(foo)+lscale
sumfoo <- sum(foo)
foo <- foo/sumfoo
lscale <- lscale+log(sumfoo)
}
return(lbeta)
}
norm.HMM.state_probs <- function (x , mod ,allprobs)
{
n <- dim(x)[1]
la <- norm.HMM.lforward(x,mod,allprobs)
lb <- norm.HMM.lbackward(x,mod,allprobs)
c <- max(la[,n])
llk <- c + log(sum(exp(la[,n]-c)))
stateprobs <- matrix (NA,ncol=n,nrow = mod$m)
for (i in 1:n) stateprobs [,i] <-exp(la[,i]+lb[,i]-llk)
return (stateprobs)
}
## Global decoding: Algoritmo de Viterbi
HMM.viterbi <- function(x, m, gamma, allprobs, delta=NULL, ...)
{
if(is.null(delta)) delta <- solve(t(diag(m) - gamma + 1), rep(1, m))
n <- dim(x)[1]
xi <- matrix(0, n, m)
foo <- delta*allprobs[1,]
xi[1,] <- foo/sum(foo)
for(i in 2:n)
{
foo <- apply(xi[i-1,]*gamma, 2, max)*allprobs[i,]
xi[i,] <- foo/sum(foo)
}
iv <- numeric(n)
iv[n] <- which.max(xi[n,])
for(i in (n-1):1)
iv[i] <- which.max(gamma[, iv[i+1]]*xi[i,])
return(iv)
}
## Cross entropy index
cross.entropy=function(gamma,true.states,quienes)
{
out=0
correctos=gamma[,quienes]
for (i in 1:length(quienes))
{
out=out+log(correctos[true.states[quienes[i]],i])
}
return(-out)
}
|
# if(!require(igraph)){install.packages('igraph'); library(igraph)}
#library(igraph)
#if(!require(igraph)){install.packages('igraph', lib = '~/myRlibs', repos='http://cran.us.r-project.org'); library('igraph', lib = '~/myRlibs')}
require(igraph)
networkstats = function(start, path.in, chain.file)
{
# read in posterior network samples
chain = readLines(paste(path.in, chain.file, sep = ''))
chain = strsplit(chain,';')
numNodes = as.numeric(strsplit(tail(chain[[1]],1),'-')[[1]][3])
# set up array to store networks
networks = matrix(0, nrow = numNodes + 1, ncol = numNodes + 1)
ancestor = descendant = character()
k = ii.in = jj.in = numeric()
k.distribution = matrix(0,length(chain)-start+1,length(chain[[1]]))
# populate network array
for(ii in start : length(chain)){
for(jj in 1 : length(chain[[ii]])){
ancestor = strsplit(as.character(chain[[ii]][jj]), '-')[[1]][1]
k = strsplit(as.character(chain[[ii]][jj]), '-')[[1]][2]
descendant = strsplit(as.character(chain[[ii]][jj]), '-')[[1]][3]
if(ancestor == 's'){ii.in = 1}
if(ancestor != 's'){ii.in = as.integer(ancestor) + 1}
jj.in = as.integer(descendant) + 1
networks[ii.in, jj.in] = networks[ii.in, jj.in] + 1
k.distribution[ii-start+1,jj] = as.numeric(k)
}
}
# return a list with posterior samples of network metrics
return(list(
edgeProbs = networks / (length(chain)-start+1),
k.distribution = k.distribution))
}
# generate path in
path.in = commandArgs(trailingOnly = TRUE)
print(path.in)
path.out = path.in
# compute network statistics on posterior networks
files = list.files(path.in)
files = files[which(grepl('networks',files))]
network.data = list()
for(ff in 1:length(files)){
chain.length =
as.numeric(
strsplit(
system(paste(
'wc -l ', path.in, files[ff], sep=''), intern=T), split = ' ')[[1]][1])
if(chain.length > 10000){
network.data[[ff]] = networkstats(10001, path.in, files[ff])
}
save(network.data,file=paste(path.in, 'networks_processed.RData', sep = ''))
}
|
/code/R/process_MC3.R
|
no_license
|
johnhhuber/SpaceTime_Networks
|
R
| false | false | 2,075 |
r
|
# if(!require(igraph)){install.packages('igraph'); library(igraph)}
#library(igraph)
#if(!require(igraph)){install.packages('igraph', lib = '~/myRlibs', repos='http://cran.us.r-project.org'); library('igraph', lib = '~/myRlibs')}
require(igraph)
networkstats = function(start, path.in, chain.file)
{
# read in posterior network samples
chain = readLines(paste(path.in, chain.file, sep = ''))
chain = strsplit(chain,';')
numNodes = as.numeric(strsplit(tail(chain[[1]],1),'-')[[1]][3])
# set up array to store networks
networks = matrix(0, nrow = numNodes + 1, ncol = numNodes + 1)
ancestor = descendant = character()
k = ii.in = jj.in = numeric()
k.distribution = matrix(0,length(chain)-start+1,length(chain[[1]]))
# populate network array
for(ii in start : length(chain)){
for(jj in 1 : length(chain[[ii]])){
ancestor = strsplit(as.character(chain[[ii]][jj]), '-')[[1]][1]
k = strsplit(as.character(chain[[ii]][jj]), '-')[[1]][2]
descendant = strsplit(as.character(chain[[ii]][jj]), '-')[[1]][3]
if(ancestor == 's'){ii.in = 1}
if(ancestor != 's'){ii.in = as.integer(ancestor) + 1}
jj.in = as.integer(descendant) + 1
networks[ii.in, jj.in] = networks[ii.in, jj.in] + 1
k.distribution[ii-start+1,jj] = as.numeric(k)
}
}
# return a list with posterior samples of network metrics
return(list(
edgeProbs = networks / (length(chain)-start+1),
k.distribution = k.distribution))
}
# generate path in
path.in = commandArgs(trailingOnly = TRUE)
print(path.in)
path.out = path.in
# compute network statistics on posterior networks
files = list.files(path.in)
files = files[which(grepl('networks',files))]
network.data = list()
for(ff in 1:length(files)){
chain.length =
as.numeric(
strsplit(
system(paste(
'wc -l ', path.in, files[ff], sep=''), intern=T), split = ' ')[[1]][1])
if(chain.length > 10000){
network.data[[ff]] = networkstats(10001, path.in, files[ff])
}
save(network.data,file=paste(path.in, 'networks_processed.RData', sep = ''))
}
|
### ===== actuar: An R Package for Actuarial Science =====
###
### Definition of the {d,p,q,r,m,lev}single-parameter pareto
### functions. The single-parameter Pareto distribution used in these
### functions has cumulative distribution function
###
### Pr[X <= x] = 1 - (min/x)^shape, x > 0.
###
### See Appendix A of Klugman, Panjer & Willmot, Loss Models, Wiley.
###
### AUTHORS: Mathieu Pigeon, Vincent Goulet <vincent.goulet@act.ulaval.ca>
dpareto1 <- function (x, shape, min, log = FALSE)
.External("actuar_do_dpq", "dpareto1", x, shape, min, log)
ppareto1 <- function(q, shape, min, lower.tail = TRUE, log.p = FALSE)
.External("actuar_do_dpq", "ppareto1", q, shape, min, lower.tail, log.p)
qpareto1 <- function(p, shape, min, lower.tail = TRUE, log.p = FALSE)
.External("actuar_do_dpq", "qpareto1", p, shape, min, lower.tail, log.p)
rpareto1 <- function(n, shape, min)
.External("actuar_do_random", "rpareto1", n, shape, min)
mpareto1 <- function(order, shape, min)
.External("actuar_do_dpq", "mpareto1", order, shape, min, FALSE)
levpareto1 <- function(limit, shape, min, order = 1)
.External("actuar_do_dpq", "levpareto1", limit, shape, min, order, FALSE)
|
/R/SingleParameterPareto.R
|
no_license
|
LauraHuang100/actuar
|
R
| false | false | 1,199 |
r
|
### ===== actuar: An R Package for Actuarial Science =====
###
### Definition of the {d,p,q,r,m,lev}single-parameter pareto
### functions. The single-parameter Pareto distribution used in these
### functions has cumulative distribution function
###
### Pr[X <= x] = 1 - (min/x)^shape, x > 0.
###
### See Appendix A of Klugman, Panjer & Willmot, Loss Models, Wiley.
###
### AUTHORS: Mathieu Pigeon, Vincent Goulet <vincent.goulet@act.ulaval.ca>
dpareto1 <- function (x, shape, min, log = FALSE)
.External("actuar_do_dpq", "dpareto1", x, shape, min, log)
ppareto1 <- function(q, shape, min, lower.tail = TRUE, log.p = FALSE)
.External("actuar_do_dpq", "ppareto1", q, shape, min, lower.tail, log.p)
qpareto1 <- function(p, shape, min, lower.tail = TRUE, log.p = FALSE)
.External("actuar_do_dpq", "qpareto1", p, shape, min, lower.tail, log.p)
rpareto1 <- function(n, shape, min)
.External("actuar_do_random", "rpareto1", n, shape, min)
mpareto1 <- function(order, shape, min)
.External("actuar_do_dpq", "mpareto1", order, shape, min, FALSE)
levpareto1 <- function(limit, shape, min, order = 1)
.External("actuar_do_dpq", "levpareto1", limit, shape, min, order, FALSE)
|
library(shiny)
library(shinyjs)
library(tidyverse)
library(plotly)
library(DT)
library(sp) #1.3-2
library(rgdal) #1.4-8
library(micromap)
#### FIX THESE ################### EACH IR CYCLE!!!! #######################################################################
## need to manually update MoE %>% in trend IR, Year modules!!
newIR <- 2022
newIRwindowEnd <- 2020
panelWindow <- c("2001-2020", '2001-2010', '2011-2020')
bioPanelWindow <- c('2001-2005', '2006-2010', '2011-2015', '2016-2020')
IRwindows <- paste0('IR', seq(2008, newIR, 2))
years <- paste0("Year ", 2001:newIRwindowEnd)
# Define each subpopulation category for easy module building, rebuild years each IR update
superBasinSubpopulations <- c('Virginia',"Roanoke Basin","James Basin",
"Potomac-Shenandoah","Rappahannock-York",
"New","Chowan","Tennessee")
subBasinSubpopulations <- c('Virginia',"Roanoke Basin","James Basin",
"Potomac","Shenandoah","Rappahannock","York",
"New","Chowan","Holston", "Big Sandy", "Clinch-Powell")
VAHUSBSubpopulations <- c('Virginia',"Roanoke River, Upper", "James River, Middle (Piedmont)", "New River",
"James River, Upper (Mountain)", "Roanoke River- Dan River", "York River", "Potomac River, Lower",
"Chowan River, Upper", "Rappahannock River", "Big Sandy River",
"Tennessee-Holston River", "Potomac River-Shenandoah River", "James River- Appomattox River",
"Chowan River-Meherrin River", "Tennessee-Clinch River")
ecoregionSubpopulations <- c("Piedmont", "Northern Piedmont", "Central Appalachian Ridges and Valleys",
"Southeastern Plains", "Blue Ridge Mountains", "Central Appalachians")
bioregionSubpopulations <- c("Mountain Bioregion", "Piedmont Bioregion", "Coast Bioregion" )
streamOrderSubpopulations <- c("First Order", "Second Order", "Third Order", "Fourth Order", "Fifth Order" )
watershedSizeSubpopulations <- c("<1 square mile", "1 to 10 square mile", "10 to 50 square mile", ">50 square mile")
streamSizeSubpopulations <- c("Small", "Medium", "Large")
biophaseXStreamSizeSubpopulations<- c("Phase1Small", "Phase2Small", "Phase1Medium", "Phase2Medium", "Phase1Large", "Phase2Large")
IRWindowSubpopulations <- paste0('IR', seq(2008, newIR, 2))
#c("IR2008", "IR2010", "IR2012", "IR2014", "IR2016", "IR2018", "IR2020" )
yearSubpopulations <- paste0("Year ", 2001:newIRwindowEnd)
# c("Year 2001", "Year 2002", "Year 2003", "Year 2004", "Year 2005", "Year 2006",
# "Year 2007", "Year 2008", "Year 2009", "Year 2010", "Year 2011", "Year 2012",
# "Year 2013", "Year 2014", "Year 2015", "Year 2016", "Year 2017", "Year 2018")
bayNonBaySubpopulations <- c(paste0('Bay Watersheds ',panelWindow[1]), paste0('Non-Bay Watersheds ',panelWindow[1]),
paste0('Bay Watersheds ',panelWindow[2]), paste0('Non-Bay Watersheds ',panelWindow[2]),
paste0('Bay Watersheds ',panelWindow[3]), paste0('Non-Bay Watersheds ',panelWindow[3]))
# c("Bay Watersheds 2001-2018", "Non-Bay Watersheds 2001-2018", "Bay Watersheds 2001-2008",
# "Bay Watersheds 2009-2018", "Non-Bay Watersheds 2001-2008", "Non-Bay Watersheds 2009-2018")
VSCIyearSubpopulations <- paste0('VSCI Scores ',bioPanelWindow)
#c("VSCI Scores 2001-2004", "VSCI Scores 2005-2008", "VSCI Scores 2009-2013", "VSCI Scores 2014-2018")
biophaseSubpopulations <- c(paste0("Phase One ", panelWindow[2]), paste0("Phase Two ", panelWindow[3]))
#c("Phase One 2001-2008","Phase Two 2009-2018")
######################################################################################################################
source('micromapFunction.R')
statusModulesToReadIn <- c('Status_Superbasin','Status_Subbasin','Status_VAHUSB','Status_Ecoregion',
'Status_Bioregion', 'Status_StreamOrder','Status_WatershedSize', 'Status_StreamSize')
trendModulesToReadIn <- c('Trend_IRWindow','Trend_Year','Trend_BayNonBay','Trend_VSCIyear',
'Trend_Biophase','Trend_BiophaseXStreamSize')
for (i in 1:length(statusModulesToReadIn)){
source(paste('module_',statusModulesToReadIn[i],'Module.R',sep=''))
}
for (i in 1:length(trendModulesToReadIn)){
source(paste('module_',trendModulesToReadIn[i],'Module.R',sep=''))
}
# Loading screen
load_data <- function() {
Sys.sleep(2)
shinyjs::hide("loading_page")
shinyjs::show("main_content")
}
percentileSubpop <- function(indicatorDataset, subpopulationDesired, indicatorLookup){
results <- data.frame(Percentile= NA, MoE = NA)
for(i in 1:length(subpopulationDesired)){
rawDat <- filter(indicatorDataset, Subpopulation == subpopulationDesired[i]) %>%
mutate(MoE.P = StdError.P * 1.96) %>%
select(Value,Estimate.P, MoE.P) %>% as.data.frame()
results[i,] <- data.frame(Percentile= as.numeric(vlookup(indicatorLookup,rawDat, 2, TRUE)),
MoE = as.numeric(vlookup(indicatorLookup,rawDat, 3, TRUE)))
}
results <- mutate(results, Subpopulation = subpopulationDesired) %>% dplyr::select(Subpopulation, everything())
return(results)
}
percentileSubpopN <- function(indicatorDataset, subpopulationDesired, indicatorLookup){
results <- data.frame(Percentile= NA, MoE = NA, n = NA)
for(i in 1:length(subpopulationDesired)){
rawDat <- filter(indicatorDataset, Subpopulation == subpopulationDesired[i]) %>%
mutate(MoE.P = StdError.P * 1.96) %>%
select(Value,Estimate.P, MoE.P, NResp) %>% as.data.frame()
results[i,] <- data.frame(Percentile= as.numeric(vlookup(indicatorLookup,rawDat, 2, TRUE)),
MoE = as.numeric(vlookup(indicatorLookup,rawDat, 3, TRUE)),
n = as.numeric(vlookup(indicatorLookup,rawDat, 4, TRUE)))
}
results <- mutate(results, Subpopulation = subpopulationDesired) %>% dplyr::select(Subpopulation, everything())
return(results)
}
# VLOOKUP (Excel function hack) by Julin Maloof
vlookup <- function(ref, #the value or values that you want to look for
table, #the table where you want to look for it; will look in first column
column, #the column that you want the return data to come from,
range=FALSE, #if there is not an exact match, return the closest?
larger=FALSE) #if doing a range lookup, should the smaller or larger key be used?)
{
# 2020 addition, make tibbles dataframes
table <- as.data.frame(table)
if(!is.numeric(column) & !column %in% colnames(table)) {
stop(paste("can't find column",column,"in table"))
}
if(range) {
if(!is.numeric(table[,1])) {
stop(paste("The first column of table must be numeric when using range lookup"))
}
table <- table[order(table[,1]),]
index <- findInterval(ref,table[,1])
if(larger) {
index <- ifelse(ref %in% table[,1],index,index+1)
}
output <- table[index,column]
output[!index <= dim(table)[1]] <- NA
} else {
output <- table[match(ref,table[,1]),column]
output[!ref %in% table[,1]] <- NA #not needed?
}
dim(output) <- dim(ref)
output
}
# add margin of error to plotly plots for each subpopulation efficiently
addMoE <- function(p, dataset, subpopulation){
add_ribbons(p, data = filter(dataset, Subpopulation== subpopulation),
x = ~Value, ymin = ~ymin, ymax = ~ymax, line = list(color = 'rgba(7, 164, 181, 0.05)'),
fillcolor = 'rgba(7, 164, 181, 0.2)', name = paste(subpopulation," Margin of Error",sep=""), visible = 'legendonly')
}
|
/app/global.R
|
no_license
|
EmmaVJones/ProbDash
|
R
| false | false | 7,716 |
r
|
library(shiny)
library(shinyjs)
library(tidyverse)
library(plotly)
library(DT)
library(sp) #1.3-2
library(rgdal) #1.4-8
library(micromap)
#### FIX THESE ################### EACH IR CYCLE!!!! #######################################################################
## need to manually update MoE %>% in trend IR, Year modules!!
newIR <- 2022
newIRwindowEnd <- 2020
panelWindow <- c("2001-2020", '2001-2010', '2011-2020')
bioPanelWindow <- c('2001-2005', '2006-2010', '2011-2015', '2016-2020')
IRwindows <- paste0('IR', seq(2008, newIR, 2))
years <- paste0("Year ", 2001:newIRwindowEnd)
# Define each subpopulation category for easy module building, rebuild years each IR update
superBasinSubpopulations <- c('Virginia',"Roanoke Basin","James Basin",
"Potomac-Shenandoah","Rappahannock-York",
"New","Chowan","Tennessee")
subBasinSubpopulations <- c('Virginia',"Roanoke Basin","James Basin",
"Potomac","Shenandoah","Rappahannock","York",
"New","Chowan","Holston", "Big Sandy", "Clinch-Powell")
VAHUSBSubpopulations <- c('Virginia',"Roanoke River, Upper", "James River, Middle (Piedmont)", "New River",
"James River, Upper (Mountain)", "Roanoke River- Dan River", "York River", "Potomac River, Lower",
"Chowan River, Upper", "Rappahannock River", "Big Sandy River",
"Tennessee-Holston River", "Potomac River-Shenandoah River", "James River- Appomattox River",
"Chowan River-Meherrin River", "Tennessee-Clinch River")
ecoregionSubpopulations <- c("Piedmont", "Northern Piedmont", "Central Appalachian Ridges and Valleys",
"Southeastern Plains", "Blue Ridge Mountains", "Central Appalachians")
bioregionSubpopulations <- c("Mountain Bioregion", "Piedmont Bioregion", "Coast Bioregion" )
streamOrderSubpopulations <- c("First Order", "Second Order", "Third Order", "Fourth Order", "Fifth Order" )
watershedSizeSubpopulations <- c("<1 square mile", "1 to 10 square mile", "10 to 50 square mile", ">50 square mile")
streamSizeSubpopulations <- c("Small", "Medium", "Large")
biophaseXStreamSizeSubpopulations<- c("Phase1Small", "Phase2Small", "Phase1Medium", "Phase2Medium", "Phase1Large", "Phase2Large")
IRWindowSubpopulations <- paste0('IR', seq(2008, newIR, 2))
#c("IR2008", "IR2010", "IR2012", "IR2014", "IR2016", "IR2018", "IR2020" )
yearSubpopulations <- paste0("Year ", 2001:newIRwindowEnd)
# c("Year 2001", "Year 2002", "Year 2003", "Year 2004", "Year 2005", "Year 2006",
# "Year 2007", "Year 2008", "Year 2009", "Year 2010", "Year 2011", "Year 2012",
# "Year 2013", "Year 2014", "Year 2015", "Year 2016", "Year 2017", "Year 2018")
bayNonBaySubpopulations <- c(paste0('Bay Watersheds ',panelWindow[1]), paste0('Non-Bay Watersheds ',panelWindow[1]),
paste0('Bay Watersheds ',panelWindow[2]), paste0('Non-Bay Watersheds ',panelWindow[2]),
paste0('Bay Watersheds ',panelWindow[3]), paste0('Non-Bay Watersheds ',panelWindow[3]))
# c("Bay Watersheds 2001-2018", "Non-Bay Watersheds 2001-2018", "Bay Watersheds 2001-2008",
# "Bay Watersheds 2009-2018", "Non-Bay Watersheds 2001-2008", "Non-Bay Watersheds 2009-2018")
VSCIyearSubpopulations <- paste0('VSCI Scores ',bioPanelWindow)
#c("VSCI Scores 2001-2004", "VSCI Scores 2005-2008", "VSCI Scores 2009-2013", "VSCI Scores 2014-2018")
biophaseSubpopulations <- c(paste0("Phase One ", panelWindow[2]), paste0("Phase Two ", panelWindow[3]))
#c("Phase One 2001-2008","Phase Two 2009-2018")
######################################################################################################################
source('micromapFunction.R')
statusModulesToReadIn <- c('Status_Superbasin','Status_Subbasin','Status_VAHUSB','Status_Ecoregion',
'Status_Bioregion', 'Status_StreamOrder','Status_WatershedSize', 'Status_StreamSize')
trendModulesToReadIn <- c('Trend_IRWindow','Trend_Year','Trend_BayNonBay','Trend_VSCIyear',
'Trend_Biophase','Trend_BiophaseXStreamSize')
for (i in 1:length(statusModulesToReadIn)){
source(paste('module_',statusModulesToReadIn[i],'Module.R',sep=''))
}
for (i in 1:length(trendModulesToReadIn)){
source(paste('module_',trendModulesToReadIn[i],'Module.R',sep=''))
}
# Loading screen
load_data <- function() {
Sys.sleep(2)
shinyjs::hide("loading_page")
shinyjs::show("main_content")
}
percentileSubpop <- function(indicatorDataset, subpopulationDesired, indicatorLookup){
results <- data.frame(Percentile= NA, MoE = NA)
for(i in 1:length(subpopulationDesired)){
rawDat <- filter(indicatorDataset, Subpopulation == subpopulationDesired[i]) %>%
mutate(MoE.P = StdError.P * 1.96) %>%
select(Value,Estimate.P, MoE.P) %>% as.data.frame()
results[i,] <- data.frame(Percentile= as.numeric(vlookup(indicatorLookup,rawDat, 2, TRUE)),
MoE = as.numeric(vlookup(indicatorLookup,rawDat, 3, TRUE)))
}
results <- mutate(results, Subpopulation = subpopulationDesired) %>% dplyr::select(Subpopulation, everything())
return(results)
}
percentileSubpopN <- function(indicatorDataset, subpopulationDesired, indicatorLookup){
results <- data.frame(Percentile= NA, MoE = NA, n = NA)
for(i in 1:length(subpopulationDesired)){
rawDat <- filter(indicatorDataset, Subpopulation == subpopulationDesired[i]) %>%
mutate(MoE.P = StdError.P * 1.96) %>%
select(Value,Estimate.P, MoE.P, NResp) %>% as.data.frame()
results[i,] <- data.frame(Percentile= as.numeric(vlookup(indicatorLookup,rawDat, 2, TRUE)),
MoE = as.numeric(vlookup(indicatorLookup,rawDat, 3, TRUE)),
n = as.numeric(vlookup(indicatorLookup,rawDat, 4, TRUE)))
}
results <- mutate(results, Subpopulation = subpopulationDesired) %>% dplyr::select(Subpopulation, everything())
return(results)
}
# VLOOKUP (Excel function hack) by Julin Maloof
vlookup <- function(ref, #the value or values that you want to look for
table, #the table where you want to look for it; will look in first column
column, #the column that you want the return data to come from,
range=FALSE, #if there is not an exact match, return the closest?
larger=FALSE) #if doing a range lookup, should the smaller or larger key be used?)
{
# 2020 addition, make tibbles dataframes
table <- as.data.frame(table)
if(!is.numeric(column) & !column %in% colnames(table)) {
stop(paste("can't find column",column,"in table"))
}
if(range) {
if(!is.numeric(table[,1])) {
stop(paste("The first column of table must be numeric when using range lookup"))
}
table <- table[order(table[,1]),]
index <- findInterval(ref,table[,1])
if(larger) {
index <- ifelse(ref %in% table[,1],index,index+1)
}
output <- table[index,column]
output[!index <= dim(table)[1]] <- NA
} else {
output <- table[match(ref,table[,1]),column]
output[!ref %in% table[,1]] <- NA #not needed?
}
dim(output) <- dim(ref)
output
}
# add margin of error to plotly plots for each subpopulation efficiently
addMoE <- function(p, dataset, subpopulation){
add_ribbons(p, data = filter(dataset, Subpopulation== subpopulation),
x = ~Value, ymin = ~ymin, ymax = ~ymax, line = list(color = 'rgba(7, 164, 181, 0.05)'),
fillcolor = 'rgba(7, 164, 181, 0.2)', name = paste(subpopulation," Margin of Error",sep=""), visible = 'legendonly')
}
|
library(shiny)
# Define UI for random distribution application
shinyUI(fluidPage(
# Application title
titlePanel(HTML("<h1>Developing Data Products Course Project</h1>
<h2><em>Fun with probability distributions</em></h2><br>"),
windowTitle="Fun with probability distributions"),
# Sidebar with controls to select the random distribution type
# and number of observations to generate. Note the use of the
# br() element to introduce extra vertical spacing
sidebarLayout(
sidebarPanel(
sliderInput("n",
"Number of observations:",
value = 500,
min = 1,
max = 1000),
br(),
radioButtons("dist", "Distribution type:",
c("Normal" = "norm",
"Uniform" = "unif",
"Log-normal" = "lnorm",
"Exponential" = "exp",
"Logistic" = "logis")
),
br(),
radioButtons("border", "Bar border color:",
c("Black" = "black",
"Red" = "red",
"Blue" = "blue",
"Green" = "green",
"Yellow" = "yellow"),
selected="black"
),
br(),
radioButtons("fill", "Bar fill color:",
c("Black" = "black",
"Red" = "red",
"Blue" = "blue",
"Green" = "green",
"Yellow" = "yellow"),
selected="red"
)
),
# Show a tabset that includes a help, histogram, summary and qq-plot
# of the generated distribution
mainPanel(
tabsetPanel(type = "tabs",
tabPanel("Info", HTML("Choose number of observations, distribution type, and bar border
and fill colors using the sidepanel. <br><br>The <strong>Histogram</strong>
and <strong>Descriptives</strong> tabs provide the histogram and summary
of the chosen distribution respectively. The <strong>Normal Q-Q plot</strong>
tab assists in assessing deviation of chosen distribution from a normal
distribution.")),
tabPanel("Histogram", plotOutput("hist")),
tabPanel("Descriptives", verbatimTextOutput("summary")),
tabPanel("Normal Q-Q plot", plotOutput("qqplot"))
)
)
)
))
|
/ui.R
|
no_license
|
aatishk/DDP_Course_Project
|
R
| false | false | 2,659 |
r
|
library(shiny)
# Define UI for random distribution application
shinyUI(fluidPage(
# Application title
titlePanel(HTML("<h1>Developing Data Products Course Project</h1>
<h2><em>Fun with probability distributions</em></h2><br>"),
windowTitle="Fun with probability distributions"),
# Sidebar with controls to select the random distribution type
# and number of observations to generate. Note the use of the
# br() element to introduce extra vertical spacing
sidebarLayout(
sidebarPanel(
sliderInput("n",
"Number of observations:",
value = 500,
min = 1,
max = 1000),
br(),
radioButtons("dist", "Distribution type:",
c("Normal" = "norm",
"Uniform" = "unif",
"Log-normal" = "lnorm",
"Exponential" = "exp",
"Logistic" = "logis")
),
br(),
radioButtons("border", "Bar border color:",
c("Black" = "black",
"Red" = "red",
"Blue" = "blue",
"Green" = "green",
"Yellow" = "yellow"),
selected="black"
),
br(),
radioButtons("fill", "Bar fill color:",
c("Black" = "black",
"Red" = "red",
"Blue" = "blue",
"Green" = "green",
"Yellow" = "yellow"),
selected="red"
)
),
# Show a tabset that includes a help, histogram, summary and qq-plot
# of the generated distribution
mainPanel(
tabsetPanel(type = "tabs",
tabPanel("Info", HTML("Choose number of observations, distribution type, and bar border
and fill colors using the sidepanel. <br><br>The <strong>Histogram</strong>
and <strong>Descriptives</strong> tabs provide the histogram and summary
of the chosen distribution respectively. The <strong>Normal Q-Q plot</strong>
tab assists in assessing deviation of chosen distribution from a normal
distribution.")),
tabPanel("Histogram", plotOutput("hist")),
tabPanel("Descriptives", verbatimTextOutput("summary")),
tabPanel("Normal Q-Q plot", plotOutput("qqplot"))
)
)
)
))
|
library("tseries")
library(ggplot2)
library(forecast)
library(Tcomp)
library(doParallel)
library(dplyr)
library(tidyr)
library(xgboost)
library(M4metalearning)
library(M4metaresults)
library("ggplot2")
library("fpp2")
library("dplyr")
library("Mcomp")
library("xtable")
library("sophisthse")
library("devtools")
sophisthse_tables()
View(series_info)
# ГОДОВЫЕ ДАННЫЕ
# Прогнозы для рядов из sophisthse с частотой 1 (годовые данные)
series <- filter(series_info, freq == 1)
series_1 <- series
# Создадим список названий рядов
label <- NA
for (i in c(1:length(series$table))) {
label <- rbind(label, series$table[i])
}
# Удалим дубликаты
label <- unique(label)[-1]
# метод ETS
rus <- NA
rus1 <- NA
accuracy_ets <- NA
for (i in c(1:length(label))) {
rus1 <- sophisthse(label[i])
d <- dim(sophisthse(label[i]))
max <- d[2]
for (j in c(1:max)) {
rus <- na.remove(rus1[, j])
h <- 2
l <- length(rus) - h
train <- rus[1:l]
test <- rus[(l + 1):(l + h)]
model <- ets(train)
forecast_result <- forecast(model, h = h)
accuracy_result <- accuracy(f = forecast_result$mean, test)
accuracy_ets <- rbind(accuracy_ets, accuracy_result[5])
}
}
# метод ARIMA
rus <- NA
accuracy_arima <- NA
for (i in c(1:length(label))) {
rus1 <- sophisthse(label[i])
d <- dim(sophisthse(label[i]))
max <- d[2]
for (j in c(1:max)) {
rus <- na.remove(rus1[, j])
h <- 2
l <- length(rus) - h
train <- rus[1:l]
test <- rus[(l + 1):(l + h)]
model <- auto.arima(train)
forecast_result <- forecast(model, h = h)
accuracy_result <- accuracy(f = forecast_result$mean, test)
accuracy_arima <- rbind(accuracy_arima, accuracy_result[5])
}
}
sum(accuracy_arima)
# Комбинация META
rus <- NA
accuracy_meta <- NA
for (i in c(1:length(label))) {
rus1 <- sophisthse(label[i])
d <- dim(sophisthse(label[i]))
max <- d[2]
for (j in c(1:max)) {
rus <- na.remove(rus1[, j])
if (length(rus) > 4){
h <- 2
l <- length(rus) - h
train <- ts(rus[1:l])
test <- ts(rus[(l + 1):(l + h)])
forecast_result <- forecast_meta_M4(model_M4, train, h = h)
accuracy_result <- accuracy(f = forecast_result$mean, test)
accuracy_meta <- rbind(accuracy_meta, accuracy_result[5])
}
}
}
c(i, j)
rus
length(rus)
# КВАРТАЛЬНЫЕ
series <- filter(series_info, freq == 4)
s_quart <- series
series$table[1]
length(series$table)
# Создадим список названий рядов
label <- NA
for (i in c(1:length(series$table))) {
label <- rbind(label, series$table[i])
}
# Удалим дубликаты
label <- unique(label)[-1]
# метод ETS
rus <- NA
rus1 <- NA
accuracy_ets_4 <- NA
for (i in c(1:length(label))) {
rus1 <- sophisthse(label[i])
d <- dim(sophisthse(label[i]))
max <- d[2]
for (j in c(1:max)) {
rus <- na.remove(rus1[, j])
h <- 4
l <- length(rus) - h
train <- rus[1:l]
test <- rus[(l + 1):(l + h)]
model <- ets(train)
forecast_result <- forecast(model, h = h)
accuracy_result <- accuracy(f = forecast_result$mean, test)
accuracy_ets_4 <- rbind(accuracy_ets_4, accuracy_result[5])
}
}
c(i, j)
# метод ARIMA
rus <- NA
accuracy_arima_4 <- NA
for (i in c(1:length(label))) {
rus1 <- sophisthse(label[i])
d <- dim(sophisthse(label[i]))
max <- d[2]
for (j in c(1:max)) {
rus <- na.remove(rus1[, j])
h <- 4
l <- length(rus) - h
train <- rus[1:l]
test <- rus[(l + 1):(l + h)]
model <- auto.arima(train)
forecast_result <- forecast(model, h = h)
accuracy_result <- accuracy(f = forecast_result$mean, test)
accuracy_arima_4 <- rbind(accuracy_arima_4, accuracy_result[5])
}
}
sum(accuracy_arima)
# Комбинация
rus <- NA
accuracy_meta_4 <- NA
for (i in c(1:length(label))) {
rus1 <- sophisthse(label[i])
d <- dim(sophisthse(label[i]))
max <- d[2]
for (j in c(1:max)) {
rus <- na.remove(rus1[, j])
if (length(rus) > 4){
h <- 2
l <- length(rus) - h
train <- ts(rus[1:l])
test <- ts(rus[(l + 1):(l + h)])
forecast_result <- forecast_meta_M4(model_M4, train, h = h)
accuracy_result <- accuracy(f = forecast_result$mean, test)
accuracy_meta_4 <- rbind(accuracy_meta_4, accuracy_result[5])
}
}
}
c(i, j)
rus1
# МЕСЯЧНЫЕ
# Месячные данные
series <- filter(series_info, freq == 12)
series_12 <- series
series$table[1]
length(series$table)
# Создадим список названий рядов
label <- NA
for (i in c(1:length(series$table))) {
label <- rbind(label, series$table[i])
}
# Удалим дубликаты
label <- unique(label)[-1]
# метод ETS
rus <- NA
rus1 <- NA
accuracy_ets_12 <- NA
for (i in c(1:length(label))) {
rus1 <- sophisthse(label[i])
d <- dim(sophisthse(label[i]))
max <- d[2]
for (j in c(1:max)) {
rus <- na.remove(rus1[, j])
h <- 12
l <- length(rus) - h
train <- rus[1:l]
test <- rus[(l + 1):(l + h)]
model <- ets(train)
forecast_result <- forecast(model, h = h)
accuracy_result <- accuracy(f = forecast_result$mean, test)
accuracy_ets_12 <- rbind(accuracy_ets_12, accuracy_result[5])
}
}
c(i, j)
# метод ARIMA
rus <- NA
accuracy_arima_12 <- NA
for (i in c(1:length(label))) {
rus1 <- sophisthse(label[i])
d <- dim(sophisthse(label[i]))
max <- d[2]
for (j in c(1:max)) {
rus <- na.remove(rus1[, j])
h <- 12
l <- length(rus) - h
train <- rus[1:l]
test <- rus[(l + 1):(l + h)]
model <- auto.arima(train)
forecast_result <- forecast(model, h = h)
accuracy_result <- accuracy(f = forecast_result$mean, test)
accuracy_arima_12 <- rbind(accuracy_arima_12, accuracy_result[5])
}
}
sum(accuracy_arima)
# Комбинация
rus <- NA
accuracy_meta_12_1 <- NA
length(label)
for (i in c(50:length(label))) {
rus1 <- sophisthse(label[i])
d <- dim(sophisthse(label[i]))
max <- d[2]
for (j in c(1:max)) {
rus <- na.remove(rus1[, j])
if (length(rus) > 4){
h <- 2
l <- length(rus) - h
train <- ts(rus[1:l])
test <- ts(rus[(l + 1):(l + h)])
forecast_result <- forecast_meta_M4(meta_model, train, h = h)
accuracy_result <- accuracy(f = forecast_result$mean, test)
accuracy_meta_12_1 <- rbind(accuracy_meta_12_1, accuracy_result[5])
}
}
}
length(label)
c(i, j)
rus1
# Качество прогнозов годовых
accuracy_arima <- na.omit(accuracy_arima)
accuracy_arima <- accuracy_arima[is.finite(accuracy_arima)]
mean(accuracy_arima) # 12.69631
accuracy_ets <- na.omit(accuracy_ets)
accuracy_ets <- accuracy_ets[is.finite(accuracy_ets)]
mean(accuracy_ets) # 15.90514
accuracy_meta <- na.omit(accuracy_meta)
accuracy_meta <- accuracy_meta[is.finite(accuracy_meta)]
mean(accuracy_meta) # 11.85945
# Качество прогнозов квартальных
accuracy_arima_4 <- na.omit(accuracy_arima_4)
accuracy_arima_4 <- accuracy_arima_4[is.finite(accuracy_arima_4)]
mean(accuracy_arima_4) # 11.35624
accuracy_ets_4 <- na.omit(accuracy_ets_4)
accuracy_ets_4 <- accuracy_ets_4[is.finite(accuracy_ets_4)]
mean(accuracy_ets_4) # 13.97002
accuracy_meta_4 <- na.omit(accuracy_meta_4)
accuracy_meta_4 <- accuracy_meta_4[is.finite(accuracy_meta_4)]
mean(accuracy_meta_4) # 10.19009
# Качество прогнозов месячных рядов
accuracy_arima_12 <- na.omit(accuracy_arima_12)
accuracy_arima_12 <- accuracy_arima_12[is.finite(accuracy_arima_12)]
mean(accuracy_arima_12) # 10.85314
accuracy_ets_12 <- na.omit(accuracy_ets_12)
accuracy_ets_12 <- accuracy_ets_12[is.finite(accuracy_ets_12)]
mean(accuracy_ets_12) # 14.21897
accuracy_meta_12 <- na.omit(accuracy_meta_12)
accuracy_meta_12 <- accuracy_meta_12[is.finite(accuracy_meta_12)]
mean(accuracy_meta_12) # 4.149181
# Соединим полученные результаты в одной таблице
comparison_soph <- cbind(
Type = c("Monthly series", "Quarterly series", "Yearly series"),
ETS = c(mean(accuracy_ets_12), mean(accuracy_ets_4), mean(accuracy_ets)),
ARIMA = c(mean(accuracy_arima_12), mean(accuracy_arima_4), mean(accuracy_arima)),
META = c(mean(accuracy_meta_12), mean(accuracy_meta_4), mean(accuracy_meta))
)
# SD
comparison_soph_sd <- cbind(
Type = c("Monthly series", "Quarterly series", "Yearly series"),
ETS = c(sd(accuracy_ets_12), sd(accuracy_ets_4), sd(accuracy_ets)),
ARIMA = c(sd(accuracy_arima_12), sd(accuracy_arima_4), sd(accuracy_arima)),
META = c(sd(accuracy_meta_12), sd(accuracy_meta_4), sd(accuracy_meta))
)
# Экспорт таблицы в латех
xtable(comparison_soph, caption = "MAPE прогнозов sophisthse с помощью 3 основных методов", display = c("s", "f", "e", "E", "g"))
# Экспорт таблицы в латех
xtable(comparison_soph_sd, caption = "MAPE прогнозов sophisthse с помощью 3 основных методов", display = c("s", "f", "e", "E", "g"))
|
/sophisthse.R
|
no_license
|
Anastasia4111/Metalearning
|
R
| false | false | 9,137 |
r
|
library("tseries")
library(ggplot2)
library(forecast)
library(Tcomp)
library(doParallel)
library(dplyr)
library(tidyr)
library(xgboost)
library(M4metalearning)
library(M4metaresults)
library("ggplot2")
library("fpp2")
library("dplyr")
library("Mcomp")
library("xtable")
library("sophisthse")
library("devtools")
sophisthse_tables()
View(series_info)
# ГОДОВЫЕ ДАННЫЕ
# Прогнозы для рядов из sophisthse с частотой 1 (годовые данные)
series <- filter(series_info, freq == 1)
series_1 <- series
# Создадим список названий рядов
label <- NA
for (i in c(1:length(series$table))) {
label <- rbind(label, series$table[i])
}
# Удалим дубликаты
label <- unique(label)[-1]
# метод ETS
rus <- NA
rus1 <- NA
accuracy_ets <- NA
for (i in c(1:length(label))) {
rus1 <- sophisthse(label[i])
d <- dim(sophisthse(label[i]))
max <- d[2]
for (j in c(1:max)) {
rus <- na.remove(rus1[, j])
h <- 2
l <- length(rus) - h
train <- rus[1:l]
test <- rus[(l + 1):(l + h)]
model <- ets(train)
forecast_result <- forecast(model, h = h)
accuracy_result <- accuracy(f = forecast_result$mean, test)
accuracy_ets <- rbind(accuracy_ets, accuracy_result[5])
}
}
# метод ARIMA
rus <- NA
accuracy_arima <- NA
for (i in c(1:length(label))) {
rus1 <- sophisthse(label[i])
d <- dim(sophisthse(label[i]))
max <- d[2]
for (j in c(1:max)) {
rus <- na.remove(rus1[, j])
h <- 2
l <- length(rus) - h
train <- rus[1:l]
test <- rus[(l + 1):(l + h)]
model <- auto.arima(train)
forecast_result <- forecast(model, h = h)
accuracy_result <- accuracy(f = forecast_result$mean, test)
accuracy_arima <- rbind(accuracy_arima, accuracy_result[5])
}
}
sum(accuracy_arima)
# Комбинация META
rus <- NA
accuracy_meta <- NA
for (i in c(1:length(label))) {
rus1 <- sophisthse(label[i])
d <- dim(sophisthse(label[i]))
max <- d[2]
for (j in c(1:max)) {
rus <- na.remove(rus1[, j])
if (length(rus) > 4){
h <- 2
l <- length(rus) - h
train <- ts(rus[1:l])
test <- ts(rus[(l + 1):(l + h)])
forecast_result <- forecast_meta_M4(model_M4, train, h = h)
accuracy_result <- accuracy(f = forecast_result$mean, test)
accuracy_meta <- rbind(accuracy_meta, accuracy_result[5])
}
}
}
c(i, j)
rus
length(rus)
# КВАРТАЛЬНЫЕ
series <- filter(series_info, freq == 4)
s_quart <- series
series$table[1]
length(series$table)
# Создадим список названий рядов
label <- NA
for (i in c(1:length(series$table))) {
label <- rbind(label, series$table[i])
}
# Удалим дубликаты
label <- unique(label)[-1]
# метод ETS
rus <- NA
rus1 <- NA
accuracy_ets_4 <- NA
for (i in c(1:length(label))) {
rus1 <- sophisthse(label[i])
d <- dim(sophisthse(label[i]))
max <- d[2]
for (j in c(1:max)) {
rus <- na.remove(rus1[, j])
h <- 4
l <- length(rus) - h
train <- rus[1:l]
test <- rus[(l + 1):(l + h)]
model <- ets(train)
forecast_result <- forecast(model, h = h)
accuracy_result <- accuracy(f = forecast_result$mean, test)
accuracy_ets_4 <- rbind(accuracy_ets_4, accuracy_result[5])
}
}
c(i, j)
# метод ARIMA
rus <- NA
accuracy_arima_4 <- NA
for (i in c(1:length(label))) {
rus1 <- sophisthse(label[i])
d <- dim(sophisthse(label[i]))
max <- d[2]
for (j in c(1:max)) {
rus <- na.remove(rus1[, j])
h <- 4
l <- length(rus) - h
train <- rus[1:l]
test <- rus[(l + 1):(l + h)]
model <- auto.arima(train)
forecast_result <- forecast(model, h = h)
accuracy_result <- accuracy(f = forecast_result$mean, test)
accuracy_arima_4 <- rbind(accuracy_arima_4, accuracy_result[5])
}
}
sum(accuracy_arima)
# Комбинация
rus <- NA
accuracy_meta_4 <- NA
for (i in c(1:length(label))) {
rus1 <- sophisthse(label[i])
d <- dim(sophisthse(label[i]))
max <- d[2]
for (j in c(1:max)) {
rus <- na.remove(rus1[, j])
if (length(rus) > 4){
h <- 2
l <- length(rus) - h
train <- ts(rus[1:l])
test <- ts(rus[(l + 1):(l + h)])
forecast_result <- forecast_meta_M4(model_M4, train, h = h)
accuracy_result <- accuracy(f = forecast_result$mean, test)
accuracy_meta_4 <- rbind(accuracy_meta_4, accuracy_result[5])
}
}
}
c(i, j)
rus1
# МЕСЯЧНЫЕ
# Месячные данные
series <- filter(series_info, freq == 12)
series_12 <- series
series$table[1]
length(series$table)
# Создадим список названий рядов
label <- NA
for (i in c(1:length(series$table))) {
label <- rbind(label, series$table[i])
}
# Удалим дубликаты
label <- unique(label)[-1]
# метод ETS
rus <- NA
rus1 <- NA
accuracy_ets_12 <- NA
for (i in c(1:length(label))) {
rus1 <- sophisthse(label[i])
d <- dim(sophisthse(label[i]))
max <- d[2]
for (j in c(1:max)) {
rus <- na.remove(rus1[, j])
h <- 12
l <- length(rus) - h
train <- rus[1:l]
test <- rus[(l + 1):(l + h)]
model <- ets(train)
forecast_result <- forecast(model, h = h)
accuracy_result <- accuracy(f = forecast_result$mean, test)
accuracy_ets_12 <- rbind(accuracy_ets_12, accuracy_result[5])
}
}
c(i, j)
# метод ARIMA
rus <- NA
accuracy_arima_12 <- NA
for (i in c(1:length(label))) {
rus1 <- sophisthse(label[i])
d <- dim(sophisthse(label[i]))
max <- d[2]
for (j in c(1:max)) {
rus <- na.remove(rus1[, j])
h <- 12
l <- length(rus) - h
train <- rus[1:l]
test <- rus[(l + 1):(l + h)]
model <- auto.arima(train)
forecast_result <- forecast(model, h = h)
accuracy_result <- accuracy(f = forecast_result$mean, test)
accuracy_arima_12 <- rbind(accuracy_arima_12, accuracy_result[5])
}
}
sum(accuracy_arima)
# Комбинация
rus <- NA
accuracy_meta_12_1 <- NA
length(label)
for (i in c(50:length(label))) {
rus1 <- sophisthse(label[i])
d <- dim(sophisthse(label[i]))
max <- d[2]
for (j in c(1:max)) {
rus <- na.remove(rus1[, j])
if (length(rus) > 4){
h <- 2
l <- length(rus) - h
train <- ts(rus[1:l])
test <- ts(rus[(l + 1):(l + h)])
forecast_result <- forecast_meta_M4(meta_model, train, h = h)
accuracy_result <- accuracy(f = forecast_result$mean, test)
accuracy_meta_12_1 <- rbind(accuracy_meta_12_1, accuracy_result[5])
}
}
}
length(label)
c(i, j)
rus1
# Качество прогнозов годовых
accuracy_arima <- na.omit(accuracy_arima)
accuracy_arima <- accuracy_arima[is.finite(accuracy_arima)]
mean(accuracy_arima) # 12.69631
accuracy_ets <- na.omit(accuracy_ets)
accuracy_ets <- accuracy_ets[is.finite(accuracy_ets)]
mean(accuracy_ets) # 15.90514
accuracy_meta <- na.omit(accuracy_meta)
accuracy_meta <- accuracy_meta[is.finite(accuracy_meta)]
mean(accuracy_meta) # 11.85945
# Качество прогнозов квартальных
accuracy_arima_4 <- na.omit(accuracy_arima_4)
accuracy_arima_4 <- accuracy_arima_4[is.finite(accuracy_arima_4)]
mean(accuracy_arima_4) # 11.35624
accuracy_ets_4 <- na.omit(accuracy_ets_4)
accuracy_ets_4 <- accuracy_ets_4[is.finite(accuracy_ets_4)]
mean(accuracy_ets_4) # 13.97002
accuracy_meta_4 <- na.omit(accuracy_meta_4)
accuracy_meta_4 <- accuracy_meta_4[is.finite(accuracy_meta_4)]
mean(accuracy_meta_4) # 10.19009
# Качество прогнозов месячных рядов
accuracy_arima_12 <- na.omit(accuracy_arima_12)
accuracy_arima_12 <- accuracy_arima_12[is.finite(accuracy_arima_12)]
mean(accuracy_arima_12) # 10.85314
accuracy_ets_12 <- na.omit(accuracy_ets_12)
accuracy_ets_12 <- accuracy_ets_12[is.finite(accuracy_ets_12)]
mean(accuracy_ets_12) # 14.21897
accuracy_meta_12 <- na.omit(accuracy_meta_12)
accuracy_meta_12 <- accuracy_meta_12[is.finite(accuracy_meta_12)]
mean(accuracy_meta_12) # 4.149181
# Соединим полученные результаты в одной таблице
comparison_soph <- cbind(
Type = c("Monthly series", "Quarterly series", "Yearly series"),
ETS = c(mean(accuracy_ets_12), mean(accuracy_ets_4), mean(accuracy_ets)),
ARIMA = c(mean(accuracy_arima_12), mean(accuracy_arima_4), mean(accuracy_arima)),
META = c(mean(accuracy_meta_12), mean(accuracy_meta_4), mean(accuracy_meta))
)
# SD
comparison_soph_sd <- cbind(
Type = c("Monthly series", "Quarterly series", "Yearly series"),
ETS = c(sd(accuracy_ets_12), sd(accuracy_ets_4), sd(accuracy_ets)),
ARIMA = c(sd(accuracy_arima_12), sd(accuracy_arima_4), sd(accuracy_arima)),
META = c(sd(accuracy_meta_12), sd(accuracy_meta_4), sd(accuracy_meta))
)
# Экспорт таблицы в латех
xtable(comparison_soph, caption = "MAPE прогнозов sophisthse с помощью 3 основных методов", display = c("s", "f", "e", "E", "g"))
# Экспорт таблицы в латех
xtable(comparison_soph_sd, caption = "MAPE прогнозов sophisthse с помощью 3 основных методов", display = c("s", "f", "e", "E", "g"))
|
##Read the file
householdData <- read.csv("./household_power_consumption.txt", sep = ";",
na.strings="?", stringsAsFactors = FALSE)
##Convert the Date variable from char to Date class
householdData$Date <- as.Date(householdData$Date, format="%d/%m/%Y")
#Subset the Data
subHouseholdData <- subset(householdData,
householdData$Date="2007-02-01" & householdData$Date="2007-02-02")
#convert into time class
datetime <- paste(subHouseholdData$Date, subHouseholdData$Time)
subHouseholdData$datetime <- strptime(datetime, format = "%Y-%m-%d %H:%M:%S")
##Open the file device
png(file="plot2.png")
## Plot 2
plot(subHouseholdData$datetime, subHouseholdData$Global_active_power,
type = "l", ylab= "Global Active Power (kilowatts)", xlab="")
##Closes the file device
dev.off()
|
/plot2.R
|
no_license
|
poojasah/ExData_Plotting1
|
R
| false | false | 834 |
r
|
##Read the file
householdData <- read.csv("./household_power_consumption.txt", sep = ";",
na.strings="?", stringsAsFactors = FALSE)
##Convert the Date variable from char to Date class
householdData$Date <- as.Date(householdData$Date, format="%d/%m/%Y")
#Subset the Data
subHouseholdData <- subset(householdData,
householdData$Date="2007-02-01" & householdData$Date="2007-02-02")
#convert into time class
datetime <- paste(subHouseholdData$Date, subHouseholdData$Time)
subHouseholdData$datetime <- strptime(datetime, format = "%Y-%m-%d %H:%M:%S")
##Open the file device
png(file="plot2.png")
## Plot 2
plot(subHouseholdData$datetime, subHouseholdData$Global_active_power,
type = "l", ylab= "Global Active Power (kilowatts)", xlab="")
##Closes the file device
dev.off()
|
#' File reading function for primary accelerometer files
#'
#' @param file A character scalar giving path to primary accelerometer file
#' @param output_window_secs the desired epoch length; defaults to one second
#' @param calibrate logical. Perform autocalibration using \link[GGIR]{g.calibrate}
#' @param return_raw logical. Return raw triaxial data?
#' @param ... Arguments passed to \code{read.csv} in
#' \code{\link{check_columns}}
#' @inheritParams read_AG_counts
#' @param block logical. Should file be read in blocks? Will be automatically
#' invoked if file is larger than 2 GB.
#'
#' @return A dataframe giving processed raw data from the primary accelerometer
#' in the specified epoch length
#'
#' @examples
#' raw_file <- system.file(
#' "extdata",
#' "exampleRAW.csv",
#' package = "AGread"
#' )
#'
#' ## suppress messages that indicate truncation when sampling
#' ## rate and output window don't line up
#' AG_RAW <- suppressMessages(
#' read_AG_raw(raw_file)
#' )
#' head(AG_RAW)
#'
#' @export
read_AG_raw <- function(file, output_window_secs = 1,
calibrate = FALSE, verbose = FALSE, block = FALSE,
return_raw = FALSE, ...) {
timer <- proc.time()
if (verbose) message_update(1, file = file)
meta <- get_raw_file_meta(file)
skip <- find_skip(file)
if (any(block, get_file_size__gb(file) > 2)) {
message("\nReading file in blocks, due to excessive size.")
AG <- read_AG_raw_block(
file, output_window_secs, calibrate,
verbose, skip, meta, timer, ...
)
} else {
raw_data <- check_columns(file, skip = skip, ...)
if (!raw_data) {
message_update(17, is_message = TRUE)
AG <- utils::read.csv(file, stringsAsFactors = FALSE, skip = skip)
} else {
AG <- data.table::fread(file, stringsAsFactors = FALSE,
showProgress = FALSE, skip = skip)
}
if (nrow(AG) == 0) {
message("No data in the file. Returning NULL.")
return(NULL)
}
names(AG) <- gsub("\\.", " ", names(AG))
if (calibrate) {
AG <- calibrate_raw(AG, file)
}
if (return_raw) {
AG$Timestamp <-
nrow(AG) %>%
{. - 1} %>%
rep(1/meta$samp_freq, .) %>%
cumsum(.) %>%
c(0, .) %>%
{meta$start + .}
AG$file_source_PrimaryAccel <- basename(file)
AG$date_processed_PrimaryAccel <- Sys.time()
ordered_names <-
c(
"file_source_PrimaryAccel",
"date_processed_PrimaryAccel",
"Timestamp"
) %>%
c(., setdiff(names(AG), .)) %>%
gsub("[. ]+", "_", .)
AG %<>%
data.frame(
stringsAsFactors = FALSE,
row.names = NULL
) %>%
stats::setNames(., gsub("[. ]+", "_", names(.))) %T>%
{stopifnot(setequal(names(.), ordered_names))} %>%
.[, ordered_names]
if (verbose) message_update(
16, dur = PAutilities::get_duration(timer)
)
return(AG)
}
AG <- AG_collapse(AG, output_window_secs, meta$samp_freq)
}
AG$file_source_PrimaryAccel <- basename(file)
AG <- ag_raw_format(AG, meta$start, output_window_secs)
if (verbose) message_update(
16, dur = PAutilities::get_duration(timer)
)
return(AG)
}
|
/R/Read_Raw.R
|
permissive
|
paulhibbing/AGread
|
R
| false | false | 3,239 |
r
|
#' File reading function for primary accelerometer files
#'
#' @param file A character scalar giving path to primary accelerometer file
#' @param output_window_secs the desired epoch length; defaults to one second
#' @param calibrate logical. Perform autocalibration using \link[GGIR]{g.calibrate}
#' @param return_raw logical. Return raw triaxial data?
#' @param ... Arguments passed to \code{read.csv} in
#' \code{\link{check_columns}}
#' @inheritParams read_AG_counts
#' @param block logical. Should file be read in blocks? Will be automatically
#' invoked if file is larger than 2 GB.
#'
#' @return A dataframe giving processed raw data from the primary accelerometer
#' in the specified epoch length
#'
#' @examples
#' raw_file <- system.file(
#' "extdata",
#' "exampleRAW.csv",
#' package = "AGread"
#' )
#'
#' ## suppress messages that indicate truncation when sampling
#' ## rate and output window don't line up
#' AG_RAW <- suppressMessages(
#' read_AG_raw(raw_file)
#' )
#' head(AG_RAW)
#'
#' @export
read_AG_raw <- function(file, output_window_secs = 1,
calibrate = FALSE, verbose = FALSE, block = FALSE,
return_raw = FALSE, ...) {
timer <- proc.time()
if (verbose) message_update(1, file = file)
meta <- get_raw_file_meta(file)
skip <- find_skip(file)
if (any(block, get_file_size__gb(file) > 2)) {
message("\nReading file in blocks, due to excessive size.")
AG <- read_AG_raw_block(
file, output_window_secs, calibrate,
verbose, skip, meta, timer, ...
)
} else {
raw_data <- check_columns(file, skip = skip, ...)
if (!raw_data) {
message_update(17, is_message = TRUE)
AG <- utils::read.csv(file, stringsAsFactors = FALSE, skip = skip)
} else {
AG <- data.table::fread(file, stringsAsFactors = FALSE,
showProgress = FALSE, skip = skip)
}
if (nrow(AG) == 0) {
message("No data in the file. Returning NULL.")
return(NULL)
}
names(AG) <- gsub("\\.", " ", names(AG))
if (calibrate) {
AG <- calibrate_raw(AG, file)
}
if (return_raw) {
AG$Timestamp <-
nrow(AG) %>%
{. - 1} %>%
rep(1/meta$samp_freq, .) %>%
cumsum(.) %>%
c(0, .) %>%
{meta$start + .}
AG$file_source_PrimaryAccel <- basename(file)
AG$date_processed_PrimaryAccel <- Sys.time()
ordered_names <-
c(
"file_source_PrimaryAccel",
"date_processed_PrimaryAccel",
"Timestamp"
) %>%
c(., setdiff(names(AG), .)) %>%
gsub("[. ]+", "_", .)
AG %<>%
data.frame(
stringsAsFactors = FALSE,
row.names = NULL
) %>%
stats::setNames(., gsub("[. ]+", "_", names(.))) %T>%
{stopifnot(setequal(names(.), ordered_names))} %>%
.[, ordered_names]
if (verbose) message_update(
16, dur = PAutilities::get_duration(timer)
)
return(AG)
}
AG <- AG_collapse(AG, output_window_secs, meta$samp_freq)
}
AG$file_source_PrimaryAccel <- basename(file)
AG <- ag_raw_format(AG, meta$start, output_window_secs)
if (verbose) message_update(
16, dur = PAutilities::get_duration(timer)
)
return(AG)
}
|
consolidateData <- function(spotify_client_id=NULL,
spotify_client_secret=NULL,
spotify_id=NULL,
user_name=NULL,
playlistName=NULL){
# package installation check ----
source("helpers/checkPkg.R", local = T)
pkgs <- c("magrittr", "data.table", "crayon", "devtools")
for(i in seq_along(pkgs)){
checkPkg(pkg=pkgs[[i]])
}
rm(i, pkgs)
if(!nzchar(system.file(package = "spotifyr"))){
ans <- menu(choices = c("Y", "N"),
title = "Package spotifyr not installed in your system.\n\nDo you wish to install it? (The function will thrown an error if 'N')")
if(ans == 2L) stop("Execution aborted.")
devtools::install_github('charlie86/spotifyr')
}
# check args ----
# check for args that can't be null first
if(is.null(user_name) || is.null(playlistName)) stop("You need to supply something to \"user_name\" and/or \"playlistName\".")
# check for the other args, if null, try to find it in the system
if(is.null(spotify_client_id) || is.null(spotify_client_secret)){
if(!nzchar(Sys.getenv("SPOTIFY_CLIENT_ID"))) stop("Supply your spotify client ID to the system: 'Sys.setenv(SPOTIFY_CLIENT_ID='secret_id').")
if(!nzchar(Sys.getenv("SPOTIFY_CLIENT_SECRET"))) stop("Supply your spotify client secret to the system: 'Sys.setenv(SPOTIFY_CLIENT_SECRET='secret_id').")
}
# register id and secret (if id or secret NOT null)
if(!is.null(spotify_client_id) || !is.null(spotify_client_secret)){
Sys.setenv(SPOTIFY_CLIENT_ID = spotify_client_id)
Sys.setenv(SPOTIFY_CLIENT_SECRET = spotify_client_secret)
}
# set id to default id associated with spotify client id and secret
if(is.null(spotify_id)){
if(nzchar(Sys.getenv("SPOTIFY_CLIENT_ID")) && nzchar(Sys.getenv("SPOTIFY_CLIENT_SECRET"))) spotify_id <- spotifyr::get_my_profile()[, "id"] # access profile
}
# checking other args----
if(missing(user_name)) stop("You need to supply a character vector for \"user_name\".\n\nThis argument creates a column in the final data output with the name of the user (e.g. \"Joe\", \"Cesar\".")
if(missing(playlistName)) stop("You need to supply a string for \"playlistName\".\n\nThis argument searchs the playlist in the user's playlist library.")
playlistName <- tolower(playlistName)
# load helpers ----
source("data/getPlaylistData.R", local=T) # pull all the data
# load libraries ----
if(any(.packages() %in% "magrittr")) suppressPackageStartupMessages(library(magrittr))
if(any(.packages() %in% "data.table")) suppressPackageStartupMessages(library(data.table))
# get consolidated data from spotify
master <- getPlaylistData(spotify_id = spotify_id, playlistName = playlistName)
master[, user_name:=user_name]
return(master)
}
|
/project3/data/consolidateData.R
|
no_license
|
csarvi/mat4376
|
R
| false | false | 2,849 |
r
|
consolidateData <- function(spotify_client_id=NULL,
spotify_client_secret=NULL,
spotify_id=NULL,
user_name=NULL,
playlistName=NULL){
# package installation check ----
source("helpers/checkPkg.R", local = T)
pkgs <- c("magrittr", "data.table", "crayon", "devtools")
for(i in seq_along(pkgs)){
checkPkg(pkg=pkgs[[i]])
}
rm(i, pkgs)
if(!nzchar(system.file(package = "spotifyr"))){
ans <- menu(choices = c("Y", "N"),
title = "Package spotifyr not installed in your system.\n\nDo you wish to install it? (The function will thrown an error if 'N')")
if(ans == 2L) stop("Execution aborted.")
devtools::install_github('charlie86/spotifyr')
}
# check args ----
# check for args that can't be null first
if(is.null(user_name) || is.null(playlistName)) stop("You need to supply something to \"user_name\" and/or \"playlistName\".")
# check for the other args, if null, try to find it in the system
if(is.null(spotify_client_id) || is.null(spotify_client_secret)){
if(!nzchar(Sys.getenv("SPOTIFY_CLIENT_ID"))) stop("Supply your spotify client ID to the system: 'Sys.setenv(SPOTIFY_CLIENT_ID='secret_id').")
if(!nzchar(Sys.getenv("SPOTIFY_CLIENT_SECRET"))) stop("Supply your spotify client secret to the system: 'Sys.setenv(SPOTIFY_CLIENT_SECRET='secret_id').")
}
# register id and secret (if id or secret NOT null)
if(!is.null(spotify_client_id) || !is.null(spotify_client_secret)){
Sys.setenv(SPOTIFY_CLIENT_ID = spotify_client_id)
Sys.setenv(SPOTIFY_CLIENT_SECRET = spotify_client_secret)
}
# set id to default id associated with spotify client id and secret
if(is.null(spotify_id)){
if(nzchar(Sys.getenv("SPOTIFY_CLIENT_ID")) && nzchar(Sys.getenv("SPOTIFY_CLIENT_SECRET"))) spotify_id <- spotifyr::get_my_profile()[, "id"] # access profile
}
# checking other args----
if(missing(user_name)) stop("You need to supply a character vector for \"user_name\".\n\nThis argument creates a column in the final data output with the name of the user (e.g. \"Joe\", \"Cesar\".")
if(missing(playlistName)) stop("You need to supply a string for \"playlistName\".\n\nThis argument searchs the playlist in the user's playlist library.")
playlistName <- tolower(playlistName)
# load helpers ----
source("data/getPlaylistData.R", local=T) # pull all the data
# load libraries ----
if(any(.packages() %in% "magrittr")) suppressPackageStartupMessages(library(magrittr))
if(any(.packages() %in% "data.table")) suppressPackageStartupMessages(library(data.table))
# get consolidated data from spotify
master <- getPlaylistData(spotify_id = spotify_id, playlistName = playlistName)
master[, user_name:=user_name]
return(master)
}
|
# Exercise Sheet 3, Task 3
# Computations for solutions
library(MASS)
# a)
# (reverse) characteristic polynomial: 1 - z + 0.21 z^2 - 0.025 z^3 = 0
roots <- polyroot(c(1, -1, 0.21, 0.025))
roots # there are some imaginary parts attached to it
length(Re(roots)^2 + Im(roots)^2 > 1) # count how many roots lie outside the unit circle
# Alternative: VAR(1) approach and eigenvalues.
phi_1 <- matrix(data = c(0.5, 0.4, 0.1, 0.5), nrow = 2)
phi_2 <- matrix(data = c(0, 0.25, 0, 0), nrow = 2)
I2x2 <- diag(2)
O2x2 <- matrix(data = rep(0, 4), nrow = 2)
Phi <- rbind( cbind(phi_1, phi_2), cbind(I2x2, O2x2) )
Phi
var1.eigen <- eigen(Phi)
length(Re(var1.eigen$values)^2 + Im(var1.eigen$values)^2 < 1) # How many eigenvalues lie inside the unit circle?
# b)
phi_0 <- c(2,1)
mu <- solve((I2x2 - phi_1 - phi_2)) %*% phi_0
fractions(mu)
# e)
# Alternative solution to b) using the VAR(1) representation:
mu2 <- solve(diag(4) - Phi) %*% c(phi_0, rep(0,2))
fractions(mu2)
# f)
Sigma_a <- diag(2)
Sigma_b <- rbind( cbind(Sigma_a, O2x2), cbind(O2x2, O2x2) )
Gamma0ast.vec <- solve(diag(16) - Phi %x% Phi) %*% as.vector(Sigma_b)
Gamma0ast.mat <- matrix(data = Gamma0ast.vec, nrow = 4)
Gamma0ast.mat
A <- matrix(c(0.75, 0, -0.25, 0.5 ), ncol = 2, byrow = TRUE)
B <- matrix(c(2.285714, -0.6857143, -0.6857143, 1.752381 ), ncol = 2, byrow = TRUE)
A%*%B
|
/exercise_MTSA/tutorial_03/ex3_task3.R
|
no_license
|
jens-klenke/MTSA
|
R
| false | false | 1,344 |
r
|
# Exercise Sheet 3, Task 3
# Computations for solutions
library(MASS)
# a)
# (reverse) characteristic polynomial: 1 - z + 0.21 z^2 - 0.025 z^3 = 0
roots <- polyroot(c(1, -1, 0.21, 0.025))
roots # there are some imaginary parts attached to it
length(Re(roots)^2 + Im(roots)^2 > 1) # count how many roots lie outside the unit circle
# Alternative: VAR(1) approach and eigenvalues.
phi_1 <- matrix(data = c(0.5, 0.4, 0.1, 0.5), nrow = 2)
phi_2 <- matrix(data = c(0, 0.25, 0, 0), nrow = 2)
I2x2 <- diag(2)
O2x2 <- matrix(data = rep(0, 4), nrow = 2)
Phi <- rbind( cbind(phi_1, phi_2), cbind(I2x2, O2x2) )
Phi
var1.eigen <- eigen(Phi)
length(Re(var1.eigen$values)^2 + Im(var1.eigen$values)^2 < 1) # How many eigenvalues lie inside the unit circle?
# b)
phi_0 <- c(2,1)
mu <- solve((I2x2 - phi_1 - phi_2)) %*% phi_0
fractions(mu)
# e)
# Alternative solution to b) using the VAR(1) representation:
mu2 <- solve(diag(4) - Phi) %*% c(phi_0, rep(0,2))
fractions(mu2)
# f)
Sigma_a <- diag(2)
Sigma_b <- rbind( cbind(Sigma_a, O2x2), cbind(O2x2, O2x2) )
Gamma0ast.vec <- solve(diag(16) - Phi %x% Phi) %*% as.vector(Sigma_b)
Gamma0ast.mat <- matrix(data = Gamma0ast.vec, nrow = 4)
Gamma0ast.mat
A <- matrix(c(0.75, 0, -0.25, 0.5 ), ncol = 2, byrow = TRUE)
B <- matrix(c(2.285714, -0.6857143, -0.6857143, 1.752381 ), ncol = 2, byrow = TRUE)
A%*%B
|
# load libraries
library(readr)
library(stringr)
library(dplyr)
library(tidyr)
# path to data
path_data <- "./UCI HAR Dataset/"
path_test <- paste0(path_data, "test/")
path_train <- paste0(path_data, "train/")
# load features names
features_names <- read_delim(paste0(path_data,"features.txt"),
delim = " ",
col_names = c("number", "name"),
col_types = cols(
number = col_skip(),
name = col_character()
))
# selecting columns indexes to extract with mean() and std() in name
extract_cols <- which(str_detect(features_names$name,"mean\\(\\)|std\\(\\)"))
# remove "-,()" characters
features_names <-
features_names %>%
mutate(name = gsub("-", "", name)) %>%
mutate(name = gsub("[\\,\\(\\)]", "", name)) %>%
mutate(name = gsub("mean", "Mean", name)) %>%
mutate(name = gsub("std", "Std", name)) %>%
mutate(name = gsub("^t", "time", name)) %>%
mutate(name = gsub("^f", "freq", name))
# selecting columns names to extract
extract_names <- features_names$name[extract_cols]
# load activity labels
activity_labels <- read_delim(paste0(path_data,"activity_labels.txt"),
delim = " ",
col_names = c("number", "activity"),
col_types = cols(
number = col_skip(),
activity = col_character()
))
# change chars to lower
activity_labels <-
activity_labels %>%
mutate(activity = tolower(activity)) %>%
mutate(activity = gsub("upstairs", "Up", activity)) %>%
mutate(activity = gsub("downstairs", "Down", activity)) %>%
mutate(activity = gsub("_", "", activity))
# load test data
test_measuremants <- read_fwf(paste0(path_test, "X_test.txt"),
fwf_widths(rep(16, times = 561)),
#col_names = features_names$name
col_types = cols (.default = col_double())
)
# read test activity data
test_activity <- read_table(paste0(path_test, "y_test.txt"),
col_names = ("activity"),
cols(
activity = col_integer()
))
# load test subject data
test_subject <- read_table(paste0(path_test, "subject_test.txt"),
col_names = ("subject"),
cols(
subject = col_integer()
))
#load train data
train_measuremants <- read_fwf(paste0(path_train, "X_train.txt"),
fwf_widths(rep(16, times = 561)),
col_types = cols (
.default = col_double()
))
# read train activity data
train_activity <- read_table(paste0(path_train, "y_train.txt"),
col_names = ("activity"),
cols(
activity = col_integer()
))
# load train subject data
train_subject <- read_table(paste0(path_train, "subject_train.txt"),
col_names = ("subject"),
cols(
subject = col_integer()
))
# bind test subject, activity measuremants data into one dataset
test_data <- bind_cols(test_subject, test_activity, test_measuremants)
# bind train subject, activity measuremants data into one dataset
train_data <- bind_cols(train_subject, train_activity, train_measuremants)
# bind data into one dataset
activity_data <- bind_rows(test_data, train_data)
# extract columns from test data first two and numeric variables to extract
activity_data_1 <- activity_data[,c(1, 2, extract_cols + 2)]
# label activities with names
activity_data_2 <-
activity_data_1 %>%
mutate(activity = activity_labels$activity[activity])
# set descriptive column names, first two and numeric variables to extract
colnames(activity_data_2)<-c("subject", "activity", extract_names)
# grouping by subject and activity
activity_data_2 <- group_by(activity_data_2, activity, subject )
# summarizing data to obtain avarage values
activity_sum <- summarize_all(activity_data_2, mean)
# write data do file
write.table(activity_sum, file = "activity_sum.txt", row.name = FALSE)
|
/run_analysis.R
|
no_license
|
kkurdybacha/course3
|
R
| false | false | 4,512 |
r
|
# load libraries
library(readr)
library(stringr)
library(dplyr)
library(tidyr)
# path to data
path_data <- "./UCI HAR Dataset/"
path_test <- paste0(path_data, "test/")
path_train <- paste0(path_data, "train/")
# load features names
features_names <- read_delim(paste0(path_data,"features.txt"),
delim = " ",
col_names = c("number", "name"),
col_types = cols(
number = col_skip(),
name = col_character()
))
# selecting columns indexes to extract with mean() and std() in name
extract_cols <- which(str_detect(features_names$name,"mean\\(\\)|std\\(\\)"))
# remove "-,()" characters
features_names <-
features_names %>%
mutate(name = gsub("-", "", name)) %>%
mutate(name = gsub("[\\,\\(\\)]", "", name)) %>%
mutate(name = gsub("mean", "Mean", name)) %>%
mutate(name = gsub("std", "Std", name)) %>%
mutate(name = gsub("^t", "time", name)) %>%
mutate(name = gsub("^f", "freq", name))
# selecting columns names to extract
extract_names <- features_names$name[extract_cols]
# load activity labels
activity_labels <- read_delim(paste0(path_data,"activity_labels.txt"),
delim = " ",
col_names = c("number", "activity"),
col_types = cols(
number = col_skip(),
activity = col_character()
))
# change chars to lower
activity_labels <-
activity_labels %>%
mutate(activity = tolower(activity)) %>%
mutate(activity = gsub("upstairs", "Up", activity)) %>%
mutate(activity = gsub("downstairs", "Down", activity)) %>%
mutate(activity = gsub("_", "", activity))
# load test data
test_measuremants <- read_fwf(paste0(path_test, "X_test.txt"),
fwf_widths(rep(16, times = 561)),
#col_names = features_names$name
col_types = cols (.default = col_double())
)
# read test activity data
test_activity <- read_table(paste0(path_test, "y_test.txt"),
col_names = ("activity"),
cols(
activity = col_integer()
))
# load test subject data
test_subject <- read_table(paste0(path_test, "subject_test.txt"),
col_names = ("subject"),
cols(
subject = col_integer()
))
#load train data
train_measuremants <- read_fwf(paste0(path_train, "X_train.txt"),
fwf_widths(rep(16, times = 561)),
col_types = cols (
.default = col_double()
))
# read train activity data
train_activity <- read_table(paste0(path_train, "y_train.txt"),
col_names = ("activity"),
cols(
activity = col_integer()
))
# load train subject data
train_subject <- read_table(paste0(path_train, "subject_train.txt"),
col_names = ("subject"),
cols(
subject = col_integer()
))
# bind test subject, activity measuremants data into one dataset
test_data <- bind_cols(test_subject, test_activity, test_measuremants)
# bind train subject, activity measuremants data into one dataset
train_data <- bind_cols(train_subject, train_activity, train_measuremants)
# bind data into one dataset
activity_data <- bind_rows(test_data, train_data)
# extract columns from test data first two and numeric variables to extract
activity_data_1 <- activity_data[,c(1, 2, extract_cols + 2)]
# label activities with names
activity_data_2 <-
activity_data_1 %>%
mutate(activity = activity_labels$activity[activity])
# set descriptive column names, first two and numeric variables to extract
colnames(activity_data_2)<-c("subject", "activity", extract_names)
# grouping by subject and activity
activity_data_2 <- group_by(activity_data_2, activity, subject )
# summarizing data to obtain avarage values
activity_sum <- summarize_all(activity_data_2, mean)
# write data do file
write.table(activity_sum, file = "activity_sum.txt", row.name = FALSE)
|
# Segment 2: time series analysis of 'Total paid customers'.
seg <- read.csv('segment-2.csv')
seg$month <- substr(seg$Date, 1, 2)
tpc <- seg$Total.paid.customers
mrr <- seg$Segment.2.MRR
adf.test(tpc)
# The series is not stationary.
adf.test(diff(tpc))
# The first differences are stationary.
acf(diff(tpc), main = '1st difference of TPC', lag.max = 100)
#
pacf(diff(tpc), lag.max = 100)
m1 <-
arima(
tpc,
order = c(12, 0, 0),
method = "ML",
optim.control = list(maxit = 1000)
)
future.tpc <- predict(m1, n.ahead = 90)
all.tpc <- c(tpc, future.tpc$pred)
plot(
tpc,
xlim = c(0, length(all.tpc)),
ylim = c(0, max(all.tpc)),
col = 'blue',
cex = 0.1,
xlab = 'time',
ylab = 'TPC',
main = 'Current and future TPC'
)
points(seq(from = 366, to = length(all.tpc)),
future.tpc$pred,
col = 'red',
cex = 0.1)
adf.test(mrr)
acf(mrr, lag.max = 120, main = 'MRR') # ACF persists for a very long time, 116 lags
pacf(mrr, main = 'MRR') # Does not look like MA process.
m2 <- arima(mrr, order = c(100, 0, 0), method = "ML", optim.control = list(maxit=1000))
|
/revenue-forecast/analysis-5.R
|
no_license
|
amey-joshi/am
|
R
| false | false | 1,099 |
r
|
# Segment 2: time series analysis of 'Total paid customers'.
seg <- read.csv('segment-2.csv')
seg$month <- substr(seg$Date, 1, 2)
tpc <- seg$Total.paid.customers
mrr <- seg$Segment.2.MRR
adf.test(tpc)
# The series is not stationary.
adf.test(diff(tpc))
# The first differences are stationary.
acf(diff(tpc), main = '1st difference of TPC', lag.max = 100)
#
pacf(diff(tpc), lag.max = 100)
m1 <-
arima(
tpc,
order = c(12, 0, 0),
method = "ML",
optim.control = list(maxit = 1000)
)
future.tpc <- predict(m1, n.ahead = 90)
all.tpc <- c(tpc, future.tpc$pred)
plot(
tpc,
xlim = c(0, length(all.tpc)),
ylim = c(0, max(all.tpc)),
col = 'blue',
cex = 0.1,
xlab = 'time',
ylab = 'TPC',
main = 'Current and future TPC'
)
points(seq(from = 366, to = length(all.tpc)),
future.tpc$pred,
col = 'red',
cex = 0.1)
adf.test(mrr)
acf(mrr, lag.max = 120, main = 'MRR') # ACF persists for a very long time, 116 lags
pacf(mrr, main = 'MRR') # Does not look like MA process.
m2 <- arima(mrr, order = c(100, 0, 0), method = "ML", optim.control = list(maxit=1000))
|
library(methods)
library(dplyr)
library(demest)
library(docopt)
first_year <- 1960
deaths <- readRDS("out/deaths.rds") %>%
subarray(year > first_year)
exposure <- readRDS("out/exposure.rds") %>%
subarray(year > first_year)
deaths <- subarray(deaths, series == "USA" & age == "60-64")
exposure <- subarray(exposure, series == "USA" & age == "60-64")
model <- Model(y ~ Poisson(mean ~ year + sex),
year ~ DLM(level = NULL, damp = NULL),
jump = 0.01)
filename1 <- tempfile()
filename1.pred <- tempfile()
estimateModel(model,
y = deaths,
exposure = exposure,
filename = filename1,
nBurnin = 5000,
nSim = 5000,
nChain = 4,
nThin = 5)
fetchSummary(filename1)
predictModel(filenameEst = filename1, filenamePred = filename1.pred, n = 25)
scale.lev <- fetchMCMC(filename1, c("mod", "hy", "ye", "scaleLev"))
rate <- fetchBoth(filename1, filename1.pred, c("mod", "lik", "rate"))
dplot( ~ year | sex, data = rate)
rate <- fetch(filename1, c("mod", "lik", "rate"))
dplot( ~ year | sex, data = rate)
direct <- deaths/exposure
diff.fem <- diff(log(direct[1,]), diff = 2)
## model <- Model(y ~ Poisson(mean ~ age * sex * year + age * sex * series + year * series),
## age ~ DLM(damp = NULL,
## covariates = Covariates(infant = TRUE)),
## year ~ DLM(damp = NULL),
## age:sex ~ Zero(),
## age:year ~ Zero(),
## sex:year ~ Zero(),
## age:sex:year ~ Mix(),
## age:series ~ Zero(),
## sex:series ~ Zero(),
## age:sex:series ~ Mix(),
## year:series ~ DLM(trend = NULL,
## damp = NULL))
## model <- Model(y ~ Poisson(mean ~ (age + sex + series + year)^2 + age:sex:series + age:sex:year),
## age ~ DLM(level = NULL,
## damp = NULL,
## covariates = Covariates(infant = TRUE)),
## year ~ DLM(damp = NULL),
## age:sex ~ Zero(),
## age:series ~ Zero(),
## age:year ~ Zero(),
## sex:series ~ Zero(),
## sex:year ~ Zero(),
## series:year ~ Mix(),
## age:sex:series ~ Mix(),
## age:sex:year ~ Mix(),
## jump = 0.07)
## model <- Model(y ~ Poisson(mean ~ (age + sex + series + year)^2 + age:sex:series + age:sex:year),
## age ~ DLM(level = Level(scale = HalfT(scale = 5)),
## trend = NULL,
## damp = NULL,
## error = Error(scale = HalfT(scale = 0.000001, max = 0.000002))),
## year ~ Exch(),
## series ~ Exch(),
## age:sex ~ Zero(),
## age:series ~ Zero(),
## age:year ~ Zero(),
## sex:series ~ Zero(),
## sex:year ~ Zero(),
## series:year ~ Mix(),
## age:sex:series ~ Exch(),
## age:sex:year ~ Exch(),
## jump = 0.07)
## model <- Model(y ~ Poisson(mean ~ (age + sex + series + year)^2 + age:sex:series + age:sex:year),
## age ~ DLM(level = Level(scale = HalfT(scale = 5)),
## trend = NULL,
## damp = NULL,
## error = Error(scale = HalfT(scale = 0.000001, max = 0.000002))),
## year ~ DLM(level = NULL,
## damp = NULL),
## series ~ Exch(),
## age:sex ~ Zero(),
## age:series ~ Zero(),
## age:year ~ Zero(),
## sex:series ~ Zero(),
## sex:year ~ Zero(),
## series:year ~ Mix(),
## age:sex:series ~ Exch(),
## age:sex:year ~ Mix(),
## jump = 0.07)
model <- Model(y ~ Poisson(mean ~ (age + sex + series + year)^3),
age ~ DLM(level = NULL, damp = NULL),
year ~ DLM(level = NULL, damp = NULL),
age:sex ~ DLM(level = NULL, damp = NULL),
age:series ~ Zero(),
age:year ~ DLM(),
sex:series ~ Zero(),
sex:year ~ DLM(),
series:year ~ DLM(),
age:sex:series ~ Mix(),
age:sex:year ~ Mix(),
age:series:year ~ Mix(),
sex:series:year ~ Mix(),
jump = 0.07)
filename <- "out/model_base.est"
Sys.time()
estimateModel(model,
y = deaths,
exposure = exposure,
filename = filename,
nBurnin = n_burnin,
nSim = n_sim,
nChain = n_chain,
nThin = n_thin)
Sys.time()
s <- fetchSummary(filename)
print(s)
for (i in 1:20) {
print(Sys.time())
continueEstimation(filename,
nBurnin = 200,
nSim = 200)
print(Sys.time())
s <- fetchSummary(filename)
print(s)
}
for (i in 1:4) {
print(Sys.time())
continueEstimation(filename,
nBurnin = 0,
nSim = 200)
print(Sys.time())
s <- fetchSummary(filename)
print(s)
}
|
/old/single_life_exp.R
|
no_license
|
johnrbryant/hmd
|
R
| false | false | 5,381 |
r
|
library(methods)
library(dplyr)
library(demest)
library(docopt)
first_year <- 1960
deaths <- readRDS("out/deaths.rds") %>%
subarray(year > first_year)
exposure <- readRDS("out/exposure.rds") %>%
subarray(year > first_year)
deaths <- subarray(deaths, series == "USA" & age == "60-64")
exposure <- subarray(exposure, series == "USA" & age == "60-64")
model <- Model(y ~ Poisson(mean ~ year + sex),
year ~ DLM(level = NULL, damp = NULL),
jump = 0.01)
filename1 <- tempfile()
filename1.pred <- tempfile()
estimateModel(model,
y = deaths,
exposure = exposure,
filename = filename1,
nBurnin = 5000,
nSim = 5000,
nChain = 4,
nThin = 5)
fetchSummary(filename1)
predictModel(filenameEst = filename1, filenamePred = filename1.pred, n = 25)
scale.lev <- fetchMCMC(filename1, c("mod", "hy", "ye", "scaleLev"))
rate <- fetchBoth(filename1, filename1.pred, c("mod", "lik", "rate"))
dplot( ~ year | sex, data = rate)
rate <- fetch(filename1, c("mod", "lik", "rate"))
dplot( ~ year | sex, data = rate)
direct <- deaths/exposure
diff.fem <- diff(log(direct[1,]), diff = 2)
## model <- Model(y ~ Poisson(mean ~ age * sex * year + age * sex * series + year * series),
## age ~ DLM(damp = NULL,
## covariates = Covariates(infant = TRUE)),
## year ~ DLM(damp = NULL),
## age:sex ~ Zero(),
## age:year ~ Zero(),
## sex:year ~ Zero(),
## age:sex:year ~ Mix(),
## age:series ~ Zero(),
## sex:series ~ Zero(),
## age:sex:series ~ Mix(),
## year:series ~ DLM(trend = NULL,
## damp = NULL))
## model <- Model(y ~ Poisson(mean ~ (age + sex + series + year)^2 + age:sex:series + age:sex:year),
## age ~ DLM(level = NULL,
## damp = NULL,
## covariates = Covariates(infant = TRUE)),
## year ~ DLM(damp = NULL),
## age:sex ~ Zero(),
## age:series ~ Zero(),
## age:year ~ Zero(),
## sex:series ~ Zero(),
## sex:year ~ Zero(),
## series:year ~ Mix(),
## age:sex:series ~ Mix(),
## age:sex:year ~ Mix(),
## jump = 0.07)
## model <- Model(y ~ Poisson(mean ~ (age + sex + series + year)^2 + age:sex:series + age:sex:year),
## age ~ DLM(level = Level(scale = HalfT(scale = 5)),
## trend = NULL,
## damp = NULL,
## error = Error(scale = HalfT(scale = 0.000001, max = 0.000002))),
## year ~ Exch(),
## series ~ Exch(),
## age:sex ~ Zero(),
## age:series ~ Zero(),
## age:year ~ Zero(),
## sex:series ~ Zero(),
## sex:year ~ Zero(),
## series:year ~ Mix(),
## age:sex:series ~ Exch(),
## age:sex:year ~ Exch(),
## jump = 0.07)
## model <- Model(y ~ Poisson(mean ~ (age + sex + series + year)^2 + age:sex:series + age:sex:year),
## age ~ DLM(level = Level(scale = HalfT(scale = 5)),
## trend = NULL,
## damp = NULL,
## error = Error(scale = HalfT(scale = 0.000001, max = 0.000002))),
## year ~ DLM(level = NULL,
## damp = NULL),
## series ~ Exch(),
## age:sex ~ Zero(),
## age:series ~ Zero(),
## age:year ~ Zero(),
## sex:series ~ Zero(),
## sex:year ~ Zero(),
## series:year ~ Mix(),
## age:sex:series ~ Exch(),
## age:sex:year ~ Mix(),
## jump = 0.07)
model <- Model(y ~ Poisson(mean ~ (age + sex + series + year)^3),
age ~ DLM(level = NULL, damp = NULL),
year ~ DLM(level = NULL, damp = NULL),
age:sex ~ DLM(level = NULL, damp = NULL),
age:series ~ Zero(),
age:year ~ DLM(),
sex:series ~ Zero(),
sex:year ~ DLM(),
series:year ~ DLM(),
age:sex:series ~ Mix(),
age:sex:year ~ Mix(),
age:series:year ~ Mix(),
sex:series:year ~ Mix(),
jump = 0.07)
filename <- "out/model_base.est"
Sys.time()
estimateModel(model,
y = deaths,
exposure = exposure,
filename = filename,
nBurnin = n_burnin,
nSim = n_sim,
nChain = n_chain,
nThin = n_thin)
Sys.time()
s <- fetchSummary(filename)
print(s)
for (i in 1:20) {
print(Sys.time())
continueEstimation(filename,
nBurnin = 200,
nSim = 200)
print(Sys.time())
s <- fetchSummary(filename)
print(s)
}
for (i in 1:4) {
print(Sys.time())
continueEstimation(filename,
nBurnin = 0,
nSim = 200)
print(Sys.time())
s <- fetchSummary(filename)
print(s)
}
|
## Link to dataset used
## https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip
dataFile <- "./data/household_power_consumption.txt"
data <- read.table(dataFile, header=TRUE, sep=";", stringsAsFactors=FALSE, dec=".")
## Only using data from Feb 1st and 2nd of 2007
subSetData <- data[data$Date %in% c("1/2/2007","2/2/2007") ,]
datetime <- strptime(paste(subSetData$Date, subSetData$Time, sep=" "), "%d/%m/%Y %H:%M:%S")
globalActivePower <- as.numeric(subSetData$Global_active_power)
globalReactivePower <- as.numeric(subSetData$Global_reactive_power)
voltage <- as.numeric(subSetData$Voltage)
subMetering1 <- as.numeric(subSetData$Sub_metering_1)
subMetering2 <- as.numeric(subSetData$Sub_metering_2)
subMetering3 <- as.numeric(subSetData$Sub_metering_3)
png("plot4.png", width=480, height=480)
par(mfrow = c(2, 2))
plot(datetime, globalActivePower, type="l", xlab="", ylab="Global Active Power", cex=0.2)
plot(datetime, voltage, type="l", xlab="datetime", ylab="Voltage")
plot(datetime, subMetering1, type="l", ylab="Energy Submetering", xlab="")
lines(datetime, subMetering2, type="l", col="red")
lines(datetime, subMetering3, type="l", col="blue")
legend("topright", c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lty=, lwd=2.5, col=c("black", "red", "blue"), bty="o")
plot(datetime, globalReactivePower, type="l", xlab="datetime", ylab="Global_reactive_power")
dev.off()
|
/plot4.R
|
no_license
|
wychum/Exporatory_Data_Analysis
|
R
| false | false | 1,457 |
r
|
## Link to dataset used
## https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip
dataFile <- "./data/household_power_consumption.txt"
data <- read.table(dataFile, header=TRUE, sep=";", stringsAsFactors=FALSE, dec=".")
## Only using data from Feb 1st and 2nd of 2007
subSetData <- data[data$Date %in% c("1/2/2007","2/2/2007") ,]
datetime <- strptime(paste(subSetData$Date, subSetData$Time, sep=" "), "%d/%m/%Y %H:%M:%S")
globalActivePower <- as.numeric(subSetData$Global_active_power)
globalReactivePower <- as.numeric(subSetData$Global_reactive_power)
voltage <- as.numeric(subSetData$Voltage)
subMetering1 <- as.numeric(subSetData$Sub_metering_1)
subMetering2 <- as.numeric(subSetData$Sub_metering_2)
subMetering3 <- as.numeric(subSetData$Sub_metering_3)
png("plot4.png", width=480, height=480)
par(mfrow = c(2, 2))
plot(datetime, globalActivePower, type="l", xlab="", ylab="Global Active Power", cex=0.2)
plot(datetime, voltage, type="l", xlab="datetime", ylab="Voltage")
plot(datetime, subMetering1, type="l", ylab="Energy Submetering", xlab="")
lines(datetime, subMetering2, type="l", col="red")
lines(datetime, subMetering3, type="l", col="blue")
legend("topright", c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lty=, lwd=2.5, col=c("black", "red", "blue"), bty="o")
plot(datetime, globalReactivePower, type="l", xlab="datetime", ylab="Global_reactive_power")
dev.off()
|
# 1. fitting
fit <- function(cm) {
# cm1 <- ped_creator(cm$fit$par)
fn1 <- function(th) m2lL(th, cm = cm)
# kl <- c(rep(0.8, 12), rep(0.5, 4))
# names(kl) <- names(cm$fit$par)
# ku <- c(rep(1.2, 12), rep(5, 4))
# names(ku) <- names(cm$fit$par)
# old <- cm$fit
# succu <- FALSE
# succl <- FALSE
# while (!(succu & succl)) {
# succl <- is.numeric(try(fn1(cm$fit$par * kl)))
# succu <- is.numeric(try(fn1(cm$fit$par * ku)))
# if (!succu) ku[1:12] <- ku[1:12] - 0.1
# if (!succl) kl[1:12] <- kl[1:12] + 0.1
# }
optimx(par = cm$fit$par, fn = fn1,
# gr = function(x) grad(fn1, x, method = 'simple', method.args = list(eps = 1e-8)),
method = 'L-BFGS-B',
lower = cm_true$fit$par * 0.2, upper = cm_true$fit$par * 5,
control = list(maxit = 1e4, factr = 10, trace = 0,
parscale = sc1)
)
}
fit2 <- function(cm) {
fn1 <- function(th) m2lL(th, cm = cm)
exc_l <- cm$fit$par - cm_true$fit$par * 0.2 < 1e-2
exc_u <- -cm$fit$par + cm_true$fit$par * 5 < 1e-2
if (all(exc_l == FALSE) & all(exc_u == FALSE)) return(cm$fit)
left_m <- cm_true$fit$par * 0.2
right_m <- cm_true$fit$par * 5
left_m[exc_l] <- cm$parbox[exc_l, 'l']
right_m[exc_u] <- cm$parbox[exc_u, 'r']
optimx(par = cm$fit$par, fn = fn1,
# gr = function(x) grad(fn1, x, method = 'simple', method.args = list(eps = 1e-8)),
method = 'L-BFGS-B',
lower = left_m, upper = right_m,
control = list(maxit = 1e4, factr = 10, trace = 0,
parscale = sc1)
)
}
fit_bs <- function(cm_bs, start, cm) {
cm1 <- cm
cm1$data_ode <- cm_bs$data_ode
cm1$data_expl <- cm_bs$data_expl
fn1 <- function(th) m2lL(th, cm = cm1)
res <- optimx(par = start, fn = fn1,
# gr = function(x) grad(fn1, x, method = 'simple', method.args = list(eps = 1e-8)),
method = 'L-BFGS-B',
lower = start * 0.1, upper = start * 10,
control = list(maxit = 1e4, factr = 10, trace = 0,
parscale = sc1)
)
gr <- grad(fn1, res[1:16], method = 'simple', method.args = list(eps = 1e-8))
list(res, gr)
}
# 2. visualization
#visualize an ode solution - returns a ggplot object
ode_gg <- function(sol) {
l <- list()
for (i in 1:(ncol(sol) - 1))
l[[i]] <- ggplot(data = as.data.frame(sol), aes_string(x = 'time', y = colnames(sol)[i + 1])) +
geom_line()
l
}
#gets an ode solution matrix and returns an explicit functions df
ode_explicit <- function(sol) {
# browser()
explicit <- data.frame(time = sol[, 'time'],
Epo_ext = sol[, 'Epo'] + sol[, 'dEpo_e'],
Epo_int = sol[, 'Epo_EpoR_i'] + sol[, 'dEpo_i']
)
explicit$Epo_ext_cpm <- ch2n(attr(sol, 'parms')['offset']) + ch2n(attr(sol, 'parms')['scale']) * explicit$Epo_ext
explicit$Epo_mem_cpm <- ch2n(attr(sol, 'parms')['offset']) + ch2n(attr(sol, 'parms')['scale']) * sol[, 'Epo_EpoR']
explicit$Epo_int_cpm <- ch2n(attr(sol, 'parms')['offset']) + ch2n(attr(sol, 'parms')['scale']) * explicit$Epo_int
attr(explicit, 'parms') <- attr(sol, 'parms')
attr(explicit, 'state') <- attr(sol, 'state')
explicit
}
# 3. profiles
# надо считать профили с адаптивным шагом, не считать выше 99% инт.
profile <- function(parname, cm, seq_length = 20) {
fn1 <- function(th) m2lL(th, cm)
parfit <- as.numeric(cm$fit$par[parname])
crit <- c(cm$fit$value + qchisq(0.99, df = 1))
# MLE is a center of the profiling interval
# ifelse(parfit < (cm$parbox[parname, 1] + cm$parbox[parname, 2]) / 2,
# seq <- seq(cm$parbox[parname, 1], 2 * parfit - cm$parbox[parname, 1], length.out = seq_length),
# seq <- seq(2 * parfit - cm$parbox[parname, 2], cm$parbox[parname, 2], length.out = seq_length))
#determine if log-scale should be used
cent <- (cm$parbox[parname, 1] + cm$parbox[parname, 2]) / 2
logcent <- 10 ^ ((log(cm$parbox[parname, 1], base = 10) + log(cm$parbox[parname, 2], base = 10)) / 2)
# logflag <- ifelse(abs(logcent - as.numeric(cm$fit$par[parname])) < abs(cent - as.numeric(cm$fit$par[parname])), 1, 0)
# if (is.na(logflag)) logflag <- 0
ifelse(logflag[parname] == 1,
seq <- 10 ^ seq(log(cm$parbox[parname, 1], base = 10), log(cm$parbox[parname, 2], base = 10), length.out = seq_length),
seq <- seq(cm$parbox[parname, 1], cm$parbox[parname, 2], length.out = seq_length)
)
# browser()
seq <- sort(c(seq, as.numeric(cm$fit$par[parname])))
par <- cm$fit$par
midind <- which(seq %in% par[parname])
parind <- which(names(par) == parname)
profileseq <- rep(NA, length(seq))
profileseq[midind] <- cm$fit$value
pl_fit_list <- as.list(rep(NA, length(seq)))
#right
for (i in (midind + 1):length(seq)) {
par[parind] <- seq[i]
res <- try(optim.fix(par, fn1, parind))
pl_fit_list[[i]] <- res
if (class(res) == 'list') {
par[-parind] <- res$par
profileseq[i] <- res$value
if (res$value > crit) break
}
}
#left
par <- cm$fit$par
for (i in 1:(midind - 1)) {
par[parind] <- seq[midind - i]
res <- try(optim.fix(par, fn1, parind))
pl_fit_list[[midind - i]] <- res
if (class(res) == 'list') {
par[-parind] <- res$par
profileseq[midind - i] <- res$value
if (res$value > crit) break
}
}
lseq <- as.numeric(unlist(sapply(seq, function(x) {
par1 <- cm$fit$par
par1[parname] <- x
try(fn1(par1))})))
list(parseq = seq, profileseq = profileseq, projectseq = lseq, pl_fit_list = pl_fit_list)
}
# profile visualization
profile_vis <- function(parname, cm) {
fn1 <- function(th) m2lL(th, cm)
parfit <- as.numeric(cm$fit$par[parname])
df <- data.frame(x = cm$profiles[[parname]]$parseq, y = cm$profiles[[parname]]$profileseq)
df$y <- try(as.numeric(as.character(df$y)))
keepind <- which(!is.na(df$y))
df <- df[keepind,]
profile <- approxfun(x = df$x, y = df$y)
df$l <- cm$profiles[[parname]]$projectseq[keepind]
conf <- c(0.5, 0.8, 0.9, 0.95, 0.99)
crit <- cm$fit$value + qchisq(conf, df = 1)
# browser()
plot <- ggplot(data = df) +
geom_line(aes(x = x, y = y)) +
# geom_line(aes(x = x, y = l), color = 'blue', linetype = "dotted") +
geom_hline(data = data.frame(y = crit), aes(yintercept = y), color = 'red') +
geom_vline(data = data.frame(x = cm_true$fit$par[parname]), aes(xintercept = x), color = 'green') +
geom_point(data = data.frame(x = parfit, y = cm$fit$value), aes(x = x, y = y), col = 'red') +
xlab(parname) +
ylab('-2lnL') +
coord_cartesian(xlim = c(min(df$x), max(df$x)))
plot
}
# 4. confidence intervals
CI_cm <- function(parname, cm, conf = c(0.5, 0.8, 0.9, 0.95, 0.99), seq_length = 100) {
range <- range(cm$profiles[[parname]]$parseq[!is.na(cm$profiles[[parname]]$profileseq)])
prof <- approxfun(x = cm$profiles[[parname]]$parseq, y = cm$profiles[[parname]]$profileseq)
# crit <- cm$fit$value + qchisq(conf, df = 1)
# t2<-u.crit(theta_MLE[j],t1,crit)
# l.LRT[j,]<-t2[,1]
# r.LRT[j,]<-t2[,2]
conf <- c(conf)
crit <- c(cm$fit$value + qchisq(conf, df = 1))
ci_df <- data.frame(emp = c(l = 1, r = 1))[, -1]
for (i in 1:length(conf)) {
l <- try(uniroot(function(x) prof(x) - crit[i], lower = range[1], upper = as.numeric(cm$fit$par[parname]), tol = 1e-64)$root)
r <- try(uniroot(function(x) prof(x) - crit[i], lower = as.numeric(cm$fit$par[parname]), upper = range[2], tol = 1e-64)$root)
ci_df <- data.frame(ci_df, v = c(as.numeric(l), as.numeric(r)))
names(ci_df)[ncol(ci_df)] <- as.character(conf[i])
}
ci_df
}
CI_cm_mod <- function(parname, cm, conf = c(0.5, 0.8, 0.9, 0.95, 0.99), seq_length = 100) {
range <- range(cm$profiles[[parname]]$parseq[!is.na(cm$profiles[[parname]]$profileseq)])
prof <- approxfun(x = cm$profiles[[parname]]$parseq, y = cm$profiles[[parname]]$profileseq)
# crit <- cm$fit$value + qchisq(conf, df = 1)
# t2<-u.crit(theta_MLE[j],t1,crit)
# l.LRT[j,]<-t2[,1]
# r.LRT[j,]<-t2[,2]
conf <- c(conf)
n <- nrow(cm$data_expl) + nrow(cm$data_ode)
crit <- cm$fit$value + n / (n - length(cm$fit$par)) * qchisq(conf, df = 1)
ci_df <- data.frame(emp = c(l = 1, r = 1))[, -1]
for (i in 1:length(conf)) {
l <- try(uniroot(function(x) prof(x) - crit[i], lower = range[1], upper = as.numeric(cm$fit$par[parname]), tol = 1e-64)$root)
r <- try(uniroot(function(x) prof(x) - crit[i], lower = as.numeric(cm$fit$par[parname]), upper = range[2], tol = 1e-64)$root)
ci_df <- data.frame(ci_df, v = c(as.numeric(l), as.numeric(r)))
names(ci_df)[ncol(ci_df)] <- as.character(conf[i])
}
ci_df
}
#determines parbox
parbox <- function(cm) {
fn1 <- function(th) m2lL(th, cm)
df <- data.frame(l = cm$fit$par, r = cm$fit$par)
for (i in 1:length(cm$fit$par)) {
paru <- cm$fit$par
parl <- cm$fit$par
ifelse(logflag[i], {
paru[i] <- cm$fit$par[i] * 10
parl[i] <- cm$fit$par[i] * 0.1
}, {
paru[i] <- cm$fit$par[i] * 5
parl[i] <- -cm$fit$par[i] * 4
})
succu <- FALSE
succl <- FALSE
#browser()
while (!(succu & succl)) {
succl <- is.finite(try(fn1(parl)))
succu <- is.finite(try(fn1(paru)))
if (!succu) ifelse(logflag[i],
paru[i] <- paru[i] * 0.9,
paru[i] <- paru[i] - cm$fit$par[i] * 0.1)
if (!succl) ifelse(logflag[i],
parl[i] <- parl[i] * 1.1,
parl[i] <- parl[i] + cm$fit$par[i] * 0.1)
}
df[i,] <- c(parl[i], paru[i])
}
df
}
#############################
### 5. Confidence regions ###
#############################
CR_isin <- function(cm, conf = c(0.5, 0.8, 0.9, 0.95, 0.99)) {
m2lL(cm_true$fit$par, cm) - cm$fit$value < qchisq(conf, df = length(cm$fit$par))
}
CR_mod_isin <- function(cm, conf = c(0.5, 0.8, 0.9, 0.95, 0.99)) {
n <- nrow(cm$data_ode) + nrow(cm$data_expl)
m2lL(cm_true$fit$par, cm) - cm$fit$value < n / (n - length(cm$fit$par)) * qchisq(conf, df = length(cm$fit$par))
}
##########################
### Visualize accuracy ###
##########################
# takes a data.frame returns a ggplot
ci_acc_vis <- function(df) {
df1 <- data.frame(p = as.numeric(as.character(colnames(df))), pi = df['T',] / (df['T',] + df['F',]))
ggplot(data = df1, mapping = aes(x = p, y = pi, group = 1)) +
geom_point() +
geom_segment(aes(x = 0.5, y = 0.5, xend = 0.99, yend = 0.99), col = 'green')
}
###########################
### PL confidence bands ###
###########################
CB <- function(xseq, fname, cm) {
lapply(xseq, CB_point, fname, cm)
}
CB_point <- function(x, fname, cm) {
q1 <- explicit_fun(x = x, par = cm$fit$par, fname = fname, cm = cm)
seq <- seq(q1 * 0.95, q1 * 1.05, length.out = 9)
crit <- c(cm$fit$value + qchisq(0.99, df = 1))
fn1 <- function(th) m2lL(th, cm)
midind <- ceiling(length(seq) / 2)
plvalues <- rep(NA, length(seq))
heqvec <- rep(NA, length(seq))
p <- length(cm$fit$par)
plvalues[midind] <- fn1(cm$fit$par)
heqvec[midind] <- 0
res <- list(pars = cm$fit$par)
resmid <- res #save the middle
#res <- resmid
# right half
## start from previouly found values
# browser()
for (i in (midind + 1):length(seq)) {
# browser()
heq <- function(theta) explicit_fun(x, theta, fname, cm = cm) - seq[i]
system.time(res <- solnp(pars = res$pars,
fun = fn1,
eqfun = heq,
eqB = 0,
LB = res$pars - abs(cm$fit$par) * 0.2,
UB = res$pars + abs(cm$fit$par) * 0.2
))
heqvec[i] <- heq(res$pars)
plvalues[i] <- fn1(res$pars)
if (class(plvalues[i]) != 'numeric') browser()
# temp <<- i
if (plvalues[i] > crit) break
}
# left half
res <- resmid
## start from previouly found values
for (i in 1:(midind - 1)) {
# browser()
heq <- function(theta) explicit_fun(x, theta, fname, cm = cm) - seq[midind - i]
system.time(res <- solnp(pars = res$pars,
fun = fn1,
eqfun = heq,
eqB = 0,
LB = res$pars - abs(cm$fit$par) * 0.2,
UB = res$pars + abs(cm$fit$par) * 0.2
))
heqvec[midind - i] <- heq(res$pars)
plvalues[midind - i] <- fn1(res$pars)
if (class(plvalues[midind - i]) != 'numeric') browser()
if (plvalues[midind - i] > crit) break
}
return(list(plvalues = plvalues, heqvalues = heqvec, xseq = seq))
}
CB_point_naive <- function(x, fname, cm, conf = 0.95) {
fn1 <- function(th) m2lL(th, cm)
hobj <- function(theta) explicit_fun(x, theta, fname, cm = cm)
mhobj <- function(theta) (-1) * explicit_fun(x, theta, fname, cm = cm)
resu <- list(pars = cm$fit$par)
system.time(resu <- solnp(pars = cm$fit$par,
fun = hobj,
ineqfun = fn1,
ineqUB = cm$fit$value + qchisq(conf, df = length(cm$fit$par)),
ineqLB = cm$fit$value,
LB = cm$fit$par - abs(cm$fit$par) * 0.2,
UB = cm$fit$par + abs(cm$fit$par) * 0.2
))
resl <- list(pars = cm$fit$par)
system.time(resl <- solnp(pars = cm$fit$par,
fun = mhobj,
ineqfun = fn1,
ineqUB = cm$fit$value + qchisq(conf, df = length(cm$fit$par)),
ineqLB = cm$fit$value,
LB = cm$fit$par - abs(cm$fit$par) * 0.2,
UB = cm$fit$par + abs(cm$fit$par) * 0.2
))
list(l = resl, u = resu)
}
band_point <- function(prof_res, conf) {
prof <- approxfun(x = prof_res$xseq[!is.na(prof_res$plvalues)], y = prof_res$plvalues[!is.na(prof_res$plvalues)])
crit <- min(prof_res$plvalues, na.rm = TRUE) + qchisq(conf, df = 1)
l <- try(uniroot(function(x) prof(x) - crit, lower = min(prof_res$xseq[!is.na(prof_res$plvalues)]),
upper = prof_res$xseq[which(prof_res$plvalues == min(prof_res$plvalues, na.rm = TRUE))], tol = 1e-64)$root)
r <- try(uniroot(function(x) prof(x) - crit, upper = max(prof_res$xseq[!is.na(prof_res$plvalues)]),
lower = prof_res$xseq[which(prof_res$plvalues == min(prof_res$plvalues, na.rm = TRUE))], tol = 1e-64)$root)
c(l = l, r = r)
}
#check band accuracy
band_acc <- function(num = 50, xseq = c(50, 150, 250), expl_name) {
ifelse(expl_name %in% names(cm_true$expl_fun$ode),
df1 <- cm_true$expl_fun$ode[, c('time', expl_name)],
df1 <- cm_true$expl_fun$extra)
truefun <- approxfun(x = df1[, 1], y = df1[, 2])
checklist <- list()
for (k in c('0.5', '0.8', '0.9', '0.95', '0.99')) {
checklist[[k]] <- matrix(NA, nrow = num, ncol = length(xseq))
for (i in 1:num) {
for (j in 1:length(xseq)) {
tr <- explicit_fun(x = xseq[j], par = cm_true$fit$par, fname = expl_name, cm = cm_true)
b <- band_constructor(cm_ped_list[[i]]$band[[expl_name]], as.numeric(k))
checklist[[k]][i, j] <- tr > b['l', as.character(xseq[j])] &
tr < b['r', as.character(xseq[j])]
}
}
}
checklist
}
#####################
### bootstrapping ###
#####################
#take a model and create a single non-parametric bootstrap models from it
bootstrap_model <- function(cm) {
bs <- list()
y1_ind <- which(cm$data_ode$yname == 'Epo_ext_cpm')
y2_ind <- which(cm$data_ode$yname == 'Epo_mem_cpm')
y3_ind <- which(cm$data_ode$yname == 'Epo_int_cpm')
new_i <- c(sample(y1_ind, length(y1_ind), replace = TRUE),
sample(y2_ind, length(y2_ind), replace = TRUE),
sample(y3_ind, length(y3_ind), replace = TRUE))
bs$data_ode <- cm$data_ode[new_i,]
bs$data_expl <- cm$data_expl[sample(1:nrow(cm$data_expl), nrow(cm$data_expl), replace = TRUE),]
bs
}
|
/main_pipeline.R
|
no_license
|
insysbio/confest
|
R
| false | false | 16,079 |
r
|
# 1. fitting
fit <- function(cm) {
# cm1 <- ped_creator(cm$fit$par)
fn1 <- function(th) m2lL(th, cm = cm)
# kl <- c(rep(0.8, 12), rep(0.5, 4))
# names(kl) <- names(cm$fit$par)
# ku <- c(rep(1.2, 12), rep(5, 4))
# names(ku) <- names(cm$fit$par)
# old <- cm$fit
# succu <- FALSE
# succl <- FALSE
# while (!(succu & succl)) {
# succl <- is.numeric(try(fn1(cm$fit$par * kl)))
# succu <- is.numeric(try(fn1(cm$fit$par * ku)))
# if (!succu) ku[1:12] <- ku[1:12] - 0.1
# if (!succl) kl[1:12] <- kl[1:12] + 0.1
# }
optimx(par = cm$fit$par, fn = fn1,
# gr = function(x) grad(fn1, x, method = 'simple', method.args = list(eps = 1e-8)),
method = 'L-BFGS-B',
lower = cm_true$fit$par * 0.2, upper = cm_true$fit$par * 5,
control = list(maxit = 1e4, factr = 10, trace = 0,
parscale = sc1)
)
}
fit2 <- function(cm) {
fn1 <- function(th) m2lL(th, cm = cm)
exc_l <- cm$fit$par - cm_true$fit$par * 0.2 < 1e-2
exc_u <- -cm$fit$par + cm_true$fit$par * 5 < 1e-2
if (all(exc_l == FALSE) & all(exc_u == FALSE)) return(cm$fit)
left_m <- cm_true$fit$par * 0.2
right_m <- cm_true$fit$par * 5
left_m[exc_l] <- cm$parbox[exc_l, 'l']
right_m[exc_u] <- cm$parbox[exc_u, 'r']
optimx(par = cm$fit$par, fn = fn1,
# gr = function(x) grad(fn1, x, method = 'simple', method.args = list(eps = 1e-8)),
method = 'L-BFGS-B',
lower = left_m, upper = right_m,
control = list(maxit = 1e4, factr = 10, trace = 0,
parscale = sc1)
)
}
fit_bs <- function(cm_bs, start, cm) {
cm1 <- cm
cm1$data_ode <- cm_bs$data_ode
cm1$data_expl <- cm_bs$data_expl
fn1 <- function(th) m2lL(th, cm = cm1)
res <- optimx(par = start, fn = fn1,
# gr = function(x) grad(fn1, x, method = 'simple', method.args = list(eps = 1e-8)),
method = 'L-BFGS-B',
lower = start * 0.1, upper = start * 10,
control = list(maxit = 1e4, factr = 10, trace = 0,
parscale = sc1)
)
gr <- grad(fn1, res[1:16], method = 'simple', method.args = list(eps = 1e-8))
list(res, gr)
}
# 2. visualization
#visualize an ode solution - returns a ggplot object
ode_gg <- function(sol) {
l <- list()
for (i in 1:(ncol(sol) - 1))
l[[i]] <- ggplot(data = as.data.frame(sol), aes_string(x = 'time', y = colnames(sol)[i + 1])) +
geom_line()
l
}
#gets an ode solution matrix and returns an explicit functions df
ode_explicit <- function(sol) {
# browser()
explicit <- data.frame(time = sol[, 'time'],
Epo_ext = sol[, 'Epo'] + sol[, 'dEpo_e'],
Epo_int = sol[, 'Epo_EpoR_i'] + sol[, 'dEpo_i']
)
explicit$Epo_ext_cpm <- ch2n(attr(sol, 'parms')['offset']) + ch2n(attr(sol, 'parms')['scale']) * explicit$Epo_ext
explicit$Epo_mem_cpm <- ch2n(attr(sol, 'parms')['offset']) + ch2n(attr(sol, 'parms')['scale']) * sol[, 'Epo_EpoR']
explicit$Epo_int_cpm <- ch2n(attr(sol, 'parms')['offset']) + ch2n(attr(sol, 'parms')['scale']) * explicit$Epo_int
attr(explicit, 'parms') <- attr(sol, 'parms')
attr(explicit, 'state') <- attr(sol, 'state')
explicit
}
# 3. profiles
# надо считать профили с адаптивным шагом, не считать выше 99% инт.
profile <- function(parname, cm, seq_length = 20) {
fn1 <- function(th) m2lL(th, cm)
parfit <- as.numeric(cm$fit$par[parname])
crit <- c(cm$fit$value + qchisq(0.99, df = 1))
# MLE is a center of the profiling interval
# ifelse(parfit < (cm$parbox[parname, 1] + cm$parbox[parname, 2]) / 2,
# seq <- seq(cm$parbox[parname, 1], 2 * parfit - cm$parbox[parname, 1], length.out = seq_length),
# seq <- seq(2 * parfit - cm$parbox[parname, 2], cm$parbox[parname, 2], length.out = seq_length))
#determine if log-scale should be used
cent <- (cm$parbox[parname, 1] + cm$parbox[parname, 2]) / 2
logcent <- 10 ^ ((log(cm$parbox[parname, 1], base = 10) + log(cm$parbox[parname, 2], base = 10)) / 2)
# logflag <- ifelse(abs(logcent - as.numeric(cm$fit$par[parname])) < abs(cent - as.numeric(cm$fit$par[parname])), 1, 0)
# if (is.na(logflag)) logflag <- 0
ifelse(logflag[parname] == 1,
seq <- 10 ^ seq(log(cm$parbox[parname, 1], base = 10), log(cm$parbox[parname, 2], base = 10), length.out = seq_length),
seq <- seq(cm$parbox[parname, 1], cm$parbox[parname, 2], length.out = seq_length)
)
# browser()
seq <- sort(c(seq, as.numeric(cm$fit$par[parname])))
par <- cm$fit$par
midind <- which(seq %in% par[parname])
parind <- which(names(par) == parname)
profileseq <- rep(NA, length(seq))
profileseq[midind] <- cm$fit$value
pl_fit_list <- as.list(rep(NA, length(seq)))
#right
for (i in (midind + 1):length(seq)) {
par[parind] <- seq[i]
res <- try(optim.fix(par, fn1, parind))
pl_fit_list[[i]] <- res
if (class(res) == 'list') {
par[-parind] <- res$par
profileseq[i] <- res$value
if (res$value > crit) break
}
}
#left
par <- cm$fit$par
for (i in 1:(midind - 1)) {
par[parind] <- seq[midind - i]
res <- try(optim.fix(par, fn1, parind))
pl_fit_list[[midind - i]] <- res
if (class(res) == 'list') {
par[-parind] <- res$par
profileseq[midind - i] <- res$value
if (res$value > crit) break
}
}
lseq <- as.numeric(unlist(sapply(seq, function(x) {
par1 <- cm$fit$par
par1[parname] <- x
try(fn1(par1))})))
list(parseq = seq, profileseq = profileseq, projectseq = lseq, pl_fit_list = pl_fit_list)
}
# profile visualization
profile_vis <- function(parname, cm) {
fn1 <- function(th) m2lL(th, cm)
parfit <- as.numeric(cm$fit$par[parname])
df <- data.frame(x = cm$profiles[[parname]]$parseq, y = cm$profiles[[parname]]$profileseq)
df$y <- try(as.numeric(as.character(df$y)))
keepind <- which(!is.na(df$y))
df <- df[keepind,]
profile <- approxfun(x = df$x, y = df$y)
df$l <- cm$profiles[[parname]]$projectseq[keepind]
conf <- c(0.5, 0.8, 0.9, 0.95, 0.99)
crit <- cm$fit$value + qchisq(conf, df = 1)
# browser()
plot <- ggplot(data = df) +
geom_line(aes(x = x, y = y)) +
# geom_line(aes(x = x, y = l), color = 'blue', linetype = "dotted") +
geom_hline(data = data.frame(y = crit), aes(yintercept = y), color = 'red') +
geom_vline(data = data.frame(x = cm_true$fit$par[parname]), aes(xintercept = x), color = 'green') +
geom_point(data = data.frame(x = parfit, y = cm$fit$value), aes(x = x, y = y), col = 'red') +
xlab(parname) +
ylab('-2lnL') +
coord_cartesian(xlim = c(min(df$x), max(df$x)))
plot
}
# 4. confidence intervals
CI_cm <- function(parname, cm, conf = c(0.5, 0.8, 0.9, 0.95, 0.99), seq_length = 100) {
range <- range(cm$profiles[[parname]]$parseq[!is.na(cm$profiles[[parname]]$profileseq)])
prof <- approxfun(x = cm$profiles[[parname]]$parseq, y = cm$profiles[[parname]]$profileseq)
# crit <- cm$fit$value + qchisq(conf, df = 1)
# t2<-u.crit(theta_MLE[j],t1,crit)
# l.LRT[j,]<-t2[,1]
# r.LRT[j,]<-t2[,2]
conf <- c(conf)
crit <- c(cm$fit$value + qchisq(conf, df = 1))
ci_df <- data.frame(emp = c(l = 1, r = 1))[, -1]
for (i in 1:length(conf)) {
l <- try(uniroot(function(x) prof(x) - crit[i], lower = range[1], upper = as.numeric(cm$fit$par[parname]), tol = 1e-64)$root)
r <- try(uniroot(function(x) prof(x) - crit[i], lower = as.numeric(cm$fit$par[parname]), upper = range[2], tol = 1e-64)$root)
ci_df <- data.frame(ci_df, v = c(as.numeric(l), as.numeric(r)))
names(ci_df)[ncol(ci_df)] <- as.character(conf[i])
}
ci_df
}
CI_cm_mod <- function(parname, cm, conf = c(0.5, 0.8, 0.9, 0.95, 0.99), seq_length = 100) {
range <- range(cm$profiles[[parname]]$parseq[!is.na(cm$profiles[[parname]]$profileseq)])
prof <- approxfun(x = cm$profiles[[parname]]$parseq, y = cm$profiles[[parname]]$profileseq)
# crit <- cm$fit$value + qchisq(conf, df = 1)
# t2<-u.crit(theta_MLE[j],t1,crit)
# l.LRT[j,]<-t2[,1]
# r.LRT[j,]<-t2[,2]
conf <- c(conf)
n <- nrow(cm$data_expl) + nrow(cm$data_ode)
crit <- cm$fit$value + n / (n - length(cm$fit$par)) * qchisq(conf, df = 1)
ci_df <- data.frame(emp = c(l = 1, r = 1))[, -1]
for (i in 1:length(conf)) {
l <- try(uniroot(function(x) prof(x) - crit[i], lower = range[1], upper = as.numeric(cm$fit$par[parname]), tol = 1e-64)$root)
r <- try(uniroot(function(x) prof(x) - crit[i], lower = as.numeric(cm$fit$par[parname]), upper = range[2], tol = 1e-64)$root)
ci_df <- data.frame(ci_df, v = c(as.numeric(l), as.numeric(r)))
names(ci_df)[ncol(ci_df)] <- as.character(conf[i])
}
ci_df
}
#determines parbox
parbox <- function(cm) {
fn1 <- function(th) m2lL(th, cm)
df <- data.frame(l = cm$fit$par, r = cm$fit$par)
for (i in 1:length(cm$fit$par)) {
paru <- cm$fit$par
parl <- cm$fit$par
ifelse(logflag[i], {
paru[i] <- cm$fit$par[i] * 10
parl[i] <- cm$fit$par[i] * 0.1
}, {
paru[i] <- cm$fit$par[i] * 5
parl[i] <- -cm$fit$par[i] * 4
})
succu <- FALSE
succl <- FALSE
#browser()
while (!(succu & succl)) {
succl <- is.finite(try(fn1(parl)))
succu <- is.finite(try(fn1(paru)))
if (!succu) ifelse(logflag[i],
paru[i] <- paru[i] * 0.9,
paru[i] <- paru[i] - cm$fit$par[i] * 0.1)
if (!succl) ifelse(logflag[i],
parl[i] <- parl[i] * 1.1,
parl[i] <- parl[i] + cm$fit$par[i] * 0.1)
}
df[i,] <- c(parl[i], paru[i])
}
df
}
#############################
### 5. Confidence regions ###
#############################
CR_isin <- function(cm, conf = c(0.5, 0.8, 0.9, 0.95, 0.99)) {
m2lL(cm_true$fit$par, cm) - cm$fit$value < qchisq(conf, df = length(cm$fit$par))
}
CR_mod_isin <- function(cm, conf = c(0.5, 0.8, 0.9, 0.95, 0.99)) {
n <- nrow(cm$data_ode) + nrow(cm$data_expl)
m2lL(cm_true$fit$par, cm) - cm$fit$value < n / (n - length(cm$fit$par)) * qchisq(conf, df = length(cm$fit$par))
}
##########################
### Visualize accuracy ###
##########################
# takes a data.frame returns a ggplot
ci_acc_vis <- function(df) {
df1 <- data.frame(p = as.numeric(as.character(colnames(df))), pi = df['T',] / (df['T',] + df['F',]))
ggplot(data = df1, mapping = aes(x = p, y = pi, group = 1)) +
geom_point() +
geom_segment(aes(x = 0.5, y = 0.5, xend = 0.99, yend = 0.99), col = 'green')
}
###########################
### PL confidence bands ###
###########################
CB <- function(xseq, fname, cm) {
lapply(xseq, CB_point, fname, cm)
}
CB_point <- function(x, fname, cm) {
q1 <- explicit_fun(x = x, par = cm$fit$par, fname = fname, cm = cm)
seq <- seq(q1 * 0.95, q1 * 1.05, length.out = 9)
crit <- c(cm$fit$value + qchisq(0.99, df = 1))
fn1 <- function(th) m2lL(th, cm)
midind <- ceiling(length(seq) / 2)
plvalues <- rep(NA, length(seq))
heqvec <- rep(NA, length(seq))
p <- length(cm$fit$par)
plvalues[midind] <- fn1(cm$fit$par)
heqvec[midind] <- 0
res <- list(pars = cm$fit$par)
resmid <- res #save the middle
#res <- resmid
# right half
## start from previouly found values
# browser()
for (i in (midind + 1):length(seq)) {
# browser()
heq <- function(theta) explicit_fun(x, theta, fname, cm = cm) - seq[i]
system.time(res <- solnp(pars = res$pars,
fun = fn1,
eqfun = heq,
eqB = 0,
LB = res$pars - abs(cm$fit$par) * 0.2,
UB = res$pars + abs(cm$fit$par) * 0.2
))
heqvec[i] <- heq(res$pars)
plvalues[i] <- fn1(res$pars)
if (class(plvalues[i]) != 'numeric') browser()
# temp <<- i
if (plvalues[i] > crit) break
}
# left half
res <- resmid
## start from previouly found values
for (i in 1:(midind - 1)) {
# browser()
heq <- function(theta) explicit_fun(x, theta, fname, cm = cm) - seq[midind - i]
system.time(res <- solnp(pars = res$pars,
fun = fn1,
eqfun = heq,
eqB = 0,
LB = res$pars - abs(cm$fit$par) * 0.2,
UB = res$pars + abs(cm$fit$par) * 0.2
))
heqvec[midind - i] <- heq(res$pars)
plvalues[midind - i] <- fn1(res$pars)
if (class(plvalues[midind - i]) != 'numeric') browser()
if (plvalues[midind - i] > crit) break
}
return(list(plvalues = plvalues, heqvalues = heqvec, xseq = seq))
}
CB_point_naive <- function(x, fname, cm, conf = 0.95) {
fn1 <- function(th) m2lL(th, cm)
hobj <- function(theta) explicit_fun(x, theta, fname, cm = cm)
mhobj <- function(theta) (-1) * explicit_fun(x, theta, fname, cm = cm)
resu <- list(pars = cm$fit$par)
system.time(resu <- solnp(pars = cm$fit$par,
fun = hobj,
ineqfun = fn1,
ineqUB = cm$fit$value + qchisq(conf, df = length(cm$fit$par)),
ineqLB = cm$fit$value,
LB = cm$fit$par - abs(cm$fit$par) * 0.2,
UB = cm$fit$par + abs(cm$fit$par) * 0.2
))
resl <- list(pars = cm$fit$par)
system.time(resl <- solnp(pars = cm$fit$par,
fun = mhobj,
ineqfun = fn1,
ineqUB = cm$fit$value + qchisq(conf, df = length(cm$fit$par)),
ineqLB = cm$fit$value,
LB = cm$fit$par - abs(cm$fit$par) * 0.2,
UB = cm$fit$par + abs(cm$fit$par) * 0.2
))
list(l = resl, u = resu)
}
band_point <- function(prof_res, conf) {
prof <- approxfun(x = prof_res$xseq[!is.na(prof_res$plvalues)], y = prof_res$plvalues[!is.na(prof_res$plvalues)])
crit <- min(prof_res$plvalues, na.rm = TRUE) + qchisq(conf, df = 1)
l <- try(uniroot(function(x) prof(x) - crit, lower = min(prof_res$xseq[!is.na(prof_res$plvalues)]),
upper = prof_res$xseq[which(prof_res$plvalues == min(prof_res$plvalues, na.rm = TRUE))], tol = 1e-64)$root)
r <- try(uniroot(function(x) prof(x) - crit, upper = max(prof_res$xseq[!is.na(prof_res$plvalues)]),
lower = prof_res$xseq[which(prof_res$plvalues == min(prof_res$plvalues, na.rm = TRUE))], tol = 1e-64)$root)
c(l = l, r = r)
}
#check band accuracy
band_acc <- function(num = 50, xseq = c(50, 150, 250), expl_name) {
ifelse(expl_name %in% names(cm_true$expl_fun$ode),
df1 <- cm_true$expl_fun$ode[, c('time', expl_name)],
df1 <- cm_true$expl_fun$extra)
truefun <- approxfun(x = df1[, 1], y = df1[, 2])
checklist <- list()
for (k in c('0.5', '0.8', '0.9', '0.95', '0.99')) {
checklist[[k]] <- matrix(NA, nrow = num, ncol = length(xseq))
for (i in 1:num) {
for (j in 1:length(xseq)) {
tr <- explicit_fun(x = xseq[j], par = cm_true$fit$par, fname = expl_name, cm = cm_true)
b <- band_constructor(cm_ped_list[[i]]$band[[expl_name]], as.numeric(k))
checklist[[k]][i, j] <- tr > b['l', as.character(xseq[j])] &
tr < b['r', as.character(xseq[j])]
}
}
}
checklist
}
#####################
### bootstrapping ###
#####################
#take a model and create a single non-parametric bootstrap models from it
bootstrap_model <- function(cm) {
bs <- list()
y1_ind <- which(cm$data_ode$yname == 'Epo_ext_cpm')
y2_ind <- which(cm$data_ode$yname == 'Epo_mem_cpm')
y3_ind <- which(cm$data_ode$yname == 'Epo_int_cpm')
new_i <- c(sample(y1_ind, length(y1_ind), replace = TRUE),
sample(y2_ind, length(y2_ind), replace = TRUE),
sample(y3_ind, length(y3_ind), replace = TRUE))
bs$data_ode <- cm$data_ode[new_i,]
bs$data_expl <- cm$data_expl[sample(1:nrow(cm$data_expl), nrow(cm$data_expl), replace = TRUE),]
bs
}
|
# BUSINESS SCIENCE ----
# DS4B 202-R ----
# STOCK ANALYZER APP - PERSISTENT DATA -----
# Version 1
# APPLICATION DESCRIPTION ----
# - Perform CRUD Operations
# - Use local data storage via RDS File
# LIBRARIES ----
library(shiny)
library(shinyWidgets)
library(shinythemes)
library(shinyjs)
library(shinyauthr) # devtools::install_github("business-science/shinyauthr")
library(plotly)
library(tidyquant)
library(tidyverse)
source(file = "00_scripts/stock_analysis_functions.R")
source(file = "00_scripts/info_card.R")
source(file = "00_scripts/panel_card.R")
source(file = "00_scripts/generate_favorite_cards.R")
source(file = "00_scripts/crud_operations_local.R")
stock_list_tbl <- get_stock_list("SP500")
# UI ----
ui <- tagList(
# CSS ----
tags$head(
tags$link(rel = "stylesheet", type = "text/css", href = shinytheme("cyborg")),
tags$link(rel = "stylesheet", type = "text/css", href = "styles.css")
),
# JS ----
shinyjs::useShinyjs(),
# User Login ----
# verbatimTextOutput(outputId = "creds"),
shinyauthr::loginUI(
id = "login",
title = tagList(h2(class = "text-center", "Stock Analyzer"),
p(class = "text-center", "Please Log In")),
login_title = "Enter"
),
# Website ----
uiOutput(outputId = "website")
)
# SERVER ----
server <- function(input, output, session) {
# 0.0 READ USER BASE & AUTHENTICATE USER LOGIN ----
# 0.1 Return user_base_tbl - To Global Environment -----
read_user_base()
# 0.2 Credentials ----
credentials <- callModule(
module = shinyauthr::login,
id = "login",
data = user_base_tbl,
user_col = user,
pwd_col = password,
log_out = reactive(logout_init())
)
logout_init <- callModule(
module = shinyauthr::logout,
id = "logout",
active = reactive(credentials()$user_auth)
)
# 0.3 Instantiating User Information ----
reactive_values <- reactiveValues()
observe({
if (credentials()$user_auth) {
user_data_tbl <- credentials()$info
reactive_values$permissions <- user_data_tbl$permissions
reactive_values$user_name <- user_data_tbl$name
reactive_values$favorites_list <- user_data_tbl %>% pull(favorites) %>% pluck(1)
reactive_values$last_symbol <- user_data_tbl$last_symbol
reactive_values$user_settings <- user_data_tbl$user_settings
}
})
output$creds <- renderPrint({
list(
credentials(),
reactive_values$permissions,
reactive_values$user_name,
reactive_values$favorites_list,
reactive_values$last_symbol,
reactive_values$user_settings
)
})
# 1.0 SETTINGS ----
# 1.1 Toggle Input Settings ----
observeEvent(input$settings_toggle, {
toggle(id = "input_settings", anim = TRUE)
})
# 1.2 Stock Symbol ----
observeEvent(input$analyze, {
update_and_write_user_base(
user_name = credentials()$info$user,
column_name = "last_symbol",
assign_input = get_symbol_from_user_input(input$stock_selection)
)
})
stock_symbol <- eventReactive(input$analyze, {
get_symbol_from_user_input(input$stock_selection)
}, ignoreNULL = FALSE)
# 1.3 User Input ----
stock_selection_triggered <- eventReactive(input$analyze, {
input$stock_selection
}, ignoreNULL = FALSE)
# 1.4 Apply & Save Settings ----
observeEvent(input$apply_and_save, {
user_settings_tbl <- tibble(
mavg_short = input$mavg_short,
mavg_long = input$mavg_long,
time_window = input$time_window
)
update_and_write_user_base(
user_name = credentials()$info$user,
column_name = "user_settings",
assign_input = list(user_settings_tbl)
)
})
mavg_short <- eventReactive(input$apply_and_save, {
input$mavg_short
}, ignoreNULL = FALSE)
mavg_long <- eventReactive(input$apply_and_save, {
input$mavg_long
}, ignoreNULL = FALSE)
time_window <- eventReactive(input$apply_and_save, {
input$time_window
}, ignoreNULL = FALSE)
selected_tab <- eventReactive(input$apply_and_save, {
if (is.character(input$tab_panel_stock_chart)) {
# Tab already selected
selected_tab <- input$tab_panel_stock_chart
} else {
# Tab panel not built yet
selected_tab <- NULL
}
selected_tab
}, ignoreNULL = FALSE)
# 1.5 Get Stock Data ----
stock_data_tbl <- reactive({
stock_symbol() %>%
get_stock_data(
from = today() - time_window(),
to = today(),
mavg_short = mavg_short(),
mavg_long = mavg_long())
})
# 2.0 FAVORITE CARDS ----
# 2.1 Reactive Values - User Favorites ----
# 2.2 Add Favorites ----
observeEvent(input$favorites_add, {
new_symbol <- get_symbol_from_user_input(input$stock_selection)
new_symbol_already_in_favorites <- new_symbol %in% reactive_values$favorites_list
if (!new_symbol_already_in_favorites) {
reactive_values$favorites_list <- c(reactive_values$favorites_list, new_symbol) %>% unique()
updateTabsetPanel(session = session, inputId = "tab_panel_stock_chart", selected = new_symbol)
update_and_write_user_base(
user_name = credentials()$info$user,
column_name = "favorites",
assign_input = list(reactive_values$favorites_list)
)
}
})
# 2.3 Render Favorite Cards ----
output$favorite_cards <- renderUI({
if (length(reactive_values$favorites_list) > 0) {
generate_favorite_cards(
favorites = reactive_values$favorites_list,
from = today() - time_window(),
to = today(),
mavg_short = mavg_short(),
mavg_long = mavg_long()
)
}
})
# 2.4 Delete Favorites ----
observeEvent(input$favorites_clear, {
modalDialog(
title = "Clear Favorites",
size = "m",
easyClose = TRUE,
p("Are you sure you want to remove favorites?"),
br(),
div(
selectInput(inputId = "drop_list",
label = "Remove Single Favorite",
choices = reactive_values$favorites_list %>% sort()),
actionButton(inputId = "remove_single_favorite",
label = "Clear Single",
class = "btn-warning"),
actionButton(inputId = "remove_all_favorites",
label = "Clear ALL Favorites",
class = "btn-danger")
),
footer = modalButton("Exit")
) %>% showModal()
})
# 2.4.1 Clear Single ----
observeEvent(input$remove_single_favorite, {
reactive_values$favorites_list <- reactive_values$favorites_list %>%
.[reactive_values$favorites_list != input$drop_list]
updateSelectInput(session = session,
inputId = "drop_list",
choices = reactive_values$favorites_list %>% sort())
update_and_write_user_base(
user_name = credentials()$info$user,
column_name = "favorites",
assign_input = list(reactive_values$favorites_list)
)
})
# 2.4.2 Clear All ----
observeEvent(input$remove_all_favorites, {
reactive_values$favorites_list <- NULL
updateSelectInput(session = session,
inputId = "drop_list",
choices = reactive_values$favorites_list %>% sort())
update_and_write_user_base(
user_name = credentials()$info$user,
column_name = "favorites",
assign_input = list(reactive_values$favorites_list)
)
})
# 2.5 Show/Hide Favorites ----
observeEvent(input$favorites_toggle, {
shinyjs::toggle(id = "favorite_card_section", anim = TRUE, animType = "slide")
})
# 3.0 FAVORITE PLOT ----
# 3.1 Plot Header ----
output$plot_header <- renderText({
stock_selection_triggered()
})
# 3.2 Plotly Plot ----
output$plotly_plot <- renderPlotly({
stock_data_tbl() %>% plot_stock_data()
})
# 3.3 Favorite Plots ----
output$stock_charts <- renderUI({
# First Tab Panel
tab_panel_1 <- tabPanel(
title = "Last Analysis",
panel_card(
title = stock_symbol(),
plotlyOutput(outputId = "plotly_plot")
)
)
# Favorite Panels
favorite_tab_panels <- NULL
if (length(reactive_values$favorites_list) > 0) {
favorite_tab_panels <- reactive_values$favorites_list %>%
map(.f = function(x) {
tabPanel(
title = x,
panel_card(
title = x,
x %>%
get_stock_data(
from = today() - time_window(),
to = today(),
mavg_short = mavg_short(),
mavg_long = mavg_long()
) %>%
plot_stock_data()
)
)
})
}
# Building the Tabset Panel
do.call(
what = tabsetPanel,
args = list(tab_panel_1) %>%
append(favorite_tab_panels) %>%
append(list(id = "tab_panel_stock_chart", type = "pills", selected = selected_tab() ))
)
})
# 4.0 COMMENTARY ----
# 4.1 Generate Commentary ----
output$analyst_commentary <- renderText({
generate_commentary(data = stock_data_tbl(), user_input = stock_selection_triggered())
})
# 5.0 RENDER WEBSITE ----
output$website <- renderUI({
req(credentials()$user_auth, reactive_values$last_symbol)
navbarPage(
title = "Stock Analyzer",
inverse = FALSE,
collapsible = TRUE,
theme = shinytheme("cyborg"),
header = div(
class = "pull-right",
style = "padding-right: 20px;",
p("Welcome, ", reactive_values$user_name)
),
tabPanel(
title = "Analysis",
# 5.1.0 HEADER ----
div(
class = "container",
id = "header",
h1(class = "page-header", "Stock Analyzer", tags$small("by Business Science")),
p(class = "lead", "This is the first mini-project completed in our",
a(href = "https://www.business-science.io/", target = "_blank", "Expert Shiny Applications Course (DS4B 202-R)"))
),
# 5.2.0 FAVORITES ----
div(
class = "container hidden-sm hidden-xs",
id = "favorite_container",
# 5.2.1 USER INPUTS ----
div(
class = "",
column(
width = 12,
h5(class = "pull-left", "Favorites"),
actionButton(inputId = "favorites_clear", "Clear Favorites", class = "pull-right"),
actionButton(inputId = "favorites_toggle", "Show/Hide", class = "pull-right")
)
),
# 5.2.2 FAVORITE CARDS ----
div(
class = "row",
id = "favorite_card_section",
uiOutput(outputId = "favorite_cards", class = "container")
)
),
# 5.3.0 APPLICATION UI -----
div(
class = "container",
id = "application_ui",
# 5.3.1 USER INPUTS ----
column(
width = 4,
wellPanel(
div(
id = "input_main",
pickerInput(
inputId = "stock_selection",
label = "Stock List (Pick One to Analyze)",
choices = stock_list_tbl$label,
multiple = FALSE,
selected = stock_list_tbl %>%
filter(label %>% str_detect(pattern = paste0(reactive_values$last_symbol, ","))) %>%
pull(label),
options = pickerOptions(
actionsBox = FALSE,
liveSearch = TRUE,
size = 10
)
)
),
div(
id = "input_buttons",
actionButton(inputId = "analyze", label = "Analyze", icon = icon("download")),
div(
class = "pull-right",
actionButton(inputId = "favorites_add", label = NULL, icon = icon("heart")),
actionButton(inputId = "settings_toggle", label = NULL, icon = icon("cog"))
)
),
div(
id = "input_settings",
hr(),
sliderInput(inputId = "mavg_short",
label = "Short Moving Average (Days)",
value = reactive_values$user_settings %>% pluck(1) %>% pull(mavg_short),
min = 5,
max = 40),
sliderInput(inputId = "mavg_long",
label = "Long Moving Average (Days)",
value = reactive_values$user_settings %>% pluck(1) %>% pull(mavg_long),
min = 50,
max = 120),
sliderInput(inputId = "time_window",
label = "Time Window (Days)",
value = reactive_values$user_settings %>% pluck(1) %>% pull(time_window),
min = 180,
max = 730),
actionButton(inputId = "apply_and_save", label = "Apply & Save", icon = icon("save"))
) %>% hidden()
)
),
# 5.3.2 PLOT PANEL ----
column(
width = 8,
uiOutput(outputId = "stock_charts")
)
),
# 5.4.0 ANALYST COMMENTARY ----
div(
class = "container",
id = "commentary",
column(
width = 12,
div(
class = "panel",
div(class = "panel-header", h4("Analyst Commentary")),
div(
class = "panel-body",
textOutput(outputId = "analyst_commentary")
)
)
)
)
)
)
})
}
# RUN APP ----
shinyApp(ui = ui, server = server)
|
/src/ds4b_202a_part4_apps_start/stock_analyzer_local_data/app.R
|
no_license
|
nayefahmad/shiny-developer-with-aws-course
|
R
| false | false | 17,326 |
r
|
# BUSINESS SCIENCE ----
# DS4B 202-R ----
# STOCK ANALYZER APP - PERSISTENT DATA -----
# Version 1
# APPLICATION DESCRIPTION ----
# - Perform CRUD Operations
# - Use local data storage via RDS File
# LIBRARIES ----
library(shiny)
library(shinyWidgets)
library(shinythemes)
library(shinyjs)
library(shinyauthr) # devtools::install_github("business-science/shinyauthr")
library(plotly)
library(tidyquant)
library(tidyverse)
source(file = "00_scripts/stock_analysis_functions.R")
source(file = "00_scripts/info_card.R")
source(file = "00_scripts/panel_card.R")
source(file = "00_scripts/generate_favorite_cards.R")
source(file = "00_scripts/crud_operations_local.R")
stock_list_tbl <- get_stock_list("SP500")
# UI ----
ui <- tagList(
# CSS ----
tags$head(
tags$link(rel = "stylesheet", type = "text/css", href = shinytheme("cyborg")),
tags$link(rel = "stylesheet", type = "text/css", href = "styles.css")
),
# JS ----
shinyjs::useShinyjs(),
# User Login ----
# verbatimTextOutput(outputId = "creds"),
shinyauthr::loginUI(
id = "login",
title = tagList(h2(class = "text-center", "Stock Analyzer"),
p(class = "text-center", "Please Log In")),
login_title = "Enter"
),
# Website ----
uiOutput(outputId = "website")
)
# SERVER ----
server <- function(input, output, session) {
# 0.0 READ USER BASE & AUTHENTICATE USER LOGIN ----
# 0.1 Return user_base_tbl - To Global Environment -----
read_user_base()
# 0.2 Credentials ----
credentials <- callModule(
module = shinyauthr::login,
id = "login",
data = user_base_tbl,
user_col = user,
pwd_col = password,
log_out = reactive(logout_init())
)
logout_init <- callModule(
module = shinyauthr::logout,
id = "logout",
active = reactive(credentials()$user_auth)
)
# 0.3 Instantiating User Information ----
reactive_values <- reactiveValues()
observe({
if (credentials()$user_auth) {
user_data_tbl <- credentials()$info
reactive_values$permissions <- user_data_tbl$permissions
reactive_values$user_name <- user_data_tbl$name
reactive_values$favorites_list <- user_data_tbl %>% pull(favorites) %>% pluck(1)
reactive_values$last_symbol <- user_data_tbl$last_symbol
reactive_values$user_settings <- user_data_tbl$user_settings
}
})
output$creds <- renderPrint({
list(
credentials(),
reactive_values$permissions,
reactive_values$user_name,
reactive_values$favorites_list,
reactive_values$last_symbol,
reactive_values$user_settings
)
})
# 1.0 SETTINGS ----
# 1.1 Toggle Input Settings ----
observeEvent(input$settings_toggle, {
toggle(id = "input_settings", anim = TRUE)
})
# 1.2 Stock Symbol ----
observeEvent(input$analyze, {
update_and_write_user_base(
user_name = credentials()$info$user,
column_name = "last_symbol",
assign_input = get_symbol_from_user_input(input$stock_selection)
)
})
stock_symbol <- eventReactive(input$analyze, {
get_symbol_from_user_input(input$stock_selection)
}, ignoreNULL = FALSE)
# 1.3 User Input ----
stock_selection_triggered <- eventReactive(input$analyze, {
input$stock_selection
}, ignoreNULL = FALSE)
# 1.4 Apply & Save Settings ----
observeEvent(input$apply_and_save, {
user_settings_tbl <- tibble(
mavg_short = input$mavg_short,
mavg_long = input$mavg_long,
time_window = input$time_window
)
update_and_write_user_base(
user_name = credentials()$info$user,
column_name = "user_settings",
assign_input = list(user_settings_tbl)
)
})
mavg_short <- eventReactive(input$apply_and_save, {
input$mavg_short
}, ignoreNULL = FALSE)
mavg_long <- eventReactive(input$apply_and_save, {
input$mavg_long
}, ignoreNULL = FALSE)
time_window <- eventReactive(input$apply_and_save, {
input$time_window
}, ignoreNULL = FALSE)
selected_tab <- eventReactive(input$apply_and_save, {
if (is.character(input$tab_panel_stock_chart)) {
# Tab already selected
selected_tab <- input$tab_panel_stock_chart
} else {
# Tab panel not built yet
selected_tab <- NULL
}
selected_tab
}, ignoreNULL = FALSE)
# 1.5 Get Stock Data ----
stock_data_tbl <- reactive({
stock_symbol() %>%
get_stock_data(
from = today() - time_window(),
to = today(),
mavg_short = mavg_short(),
mavg_long = mavg_long())
})
# 2.0 FAVORITE CARDS ----
# 2.1 Reactive Values - User Favorites ----
# 2.2 Add Favorites ----
observeEvent(input$favorites_add, {
new_symbol <- get_symbol_from_user_input(input$stock_selection)
new_symbol_already_in_favorites <- new_symbol %in% reactive_values$favorites_list
if (!new_symbol_already_in_favorites) {
reactive_values$favorites_list <- c(reactive_values$favorites_list, new_symbol) %>% unique()
updateTabsetPanel(session = session, inputId = "tab_panel_stock_chart", selected = new_symbol)
update_and_write_user_base(
user_name = credentials()$info$user,
column_name = "favorites",
assign_input = list(reactive_values$favorites_list)
)
}
})
# 2.3 Render Favorite Cards ----
output$favorite_cards <- renderUI({
if (length(reactive_values$favorites_list) > 0) {
generate_favorite_cards(
favorites = reactive_values$favorites_list,
from = today() - time_window(),
to = today(),
mavg_short = mavg_short(),
mavg_long = mavg_long()
)
}
})
# 2.4 Delete Favorites ----
observeEvent(input$favorites_clear, {
modalDialog(
title = "Clear Favorites",
size = "m",
easyClose = TRUE,
p("Are you sure you want to remove favorites?"),
br(),
div(
selectInput(inputId = "drop_list",
label = "Remove Single Favorite",
choices = reactive_values$favorites_list %>% sort()),
actionButton(inputId = "remove_single_favorite",
label = "Clear Single",
class = "btn-warning"),
actionButton(inputId = "remove_all_favorites",
label = "Clear ALL Favorites",
class = "btn-danger")
),
footer = modalButton("Exit")
) %>% showModal()
})
# 2.4.1 Clear Single ----
observeEvent(input$remove_single_favorite, {
reactive_values$favorites_list <- reactive_values$favorites_list %>%
.[reactive_values$favorites_list != input$drop_list]
updateSelectInput(session = session,
inputId = "drop_list",
choices = reactive_values$favorites_list %>% sort())
update_and_write_user_base(
user_name = credentials()$info$user,
column_name = "favorites",
assign_input = list(reactive_values$favorites_list)
)
})
# 2.4.2 Clear All ----
observeEvent(input$remove_all_favorites, {
reactive_values$favorites_list <- NULL
updateSelectInput(session = session,
inputId = "drop_list",
choices = reactive_values$favorites_list %>% sort())
update_and_write_user_base(
user_name = credentials()$info$user,
column_name = "favorites",
assign_input = list(reactive_values$favorites_list)
)
})
# 2.5 Show/Hide Favorites ----
observeEvent(input$favorites_toggle, {
shinyjs::toggle(id = "favorite_card_section", anim = TRUE, animType = "slide")
})
# 3.0 FAVORITE PLOT ----
# 3.1 Plot Header ----
output$plot_header <- renderText({
stock_selection_triggered()
})
# 3.2 Plotly Plot ----
output$plotly_plot <- renderPlotly({
stock_data_tbl() %>% plot_stock_data()
})
# 3.3 Favorite Plots ----
output$stock_charts <- renderUI({
# First Tab Panel
tab_panel_1 <- tabPanel(
title = "Last Analysis",
panel_card(
title = stock_symbol(),
plotlyOutput(outputId = "plotly_plot")
)
)
# Favorite Panels
favorite_tab_panels <- NULL
if (length(reactive_values$favorites_list) > 0) {
favorite_tab_panels <- reactive_values$favorites_list %>%
map(.f = function(x) {
tabPanel(
title = x,
panel_card(
title = x,
x %>%
get_stock_data(
from = today() - time_window(),
to = today(),
mavg_short = mavg_short(),
mavg_long = mavg_long()
) %>%
plot_stock_data()
)
)
})
}
# Building the Tabset Panel
do.call(
what = tabsetPanel,
args = list(tab_panel_1) %>%
append(favorite_tab_panels) %>%
append(list(id = "tab_panel_stock_chart", type = "pills", selected = selected_tab() ))
)
})
# 4.0 COMMENTARY ----
# 4.1 Generate Commentary ----
output$analyst_commentary <- renderText({
generate_commentary(data = stock_data_tbl(), user_input = stock_selection_triggered())
})
# 5.0 RENDER WEBSITE ----
output$website <- renderUI({
req(credentials()$user_auth, reactive_values$last_symbol)
navbarPage(
title = "Stock Analyzer",
inverse = FALSE,
collapsible = TRUE,
theme = shinytheme("cyborg"),
header = div(
class = "pull-right",
style = "padding-right: 20px;",
p("Welcome, ", reactive_values$user_name)
),
tabPanel(
title = "Analysis",
# 5.1.0 HEADER ----
div(
class = "container",
id = "header",
h1(class = "page-header", "Stock Analyzer", tags$small("by Business Science")),
p(class = "lead", "This is the first mini-project completed in our",
a(href = "https://www.business-science.io/", target = "_blank", "Expert Shiny Applications Course (DS4B 202-R)"))
),
# 5.2.0 FAVORITES ----
div(
class = "container hidden-sm hidden-xs",
id = "favorite_container",
# 5.2.1 USER INPUTS ----
div(
class = "",
column(
width = 12,
h5(class = "pull-left", "Favorites"),
actionButton(inputId = "favorites_clear", "Clear Favorites", class = "pull-right"),
actionButton(inputId = "favorites_toggle", "Show/Hide", class = "pull-right")
)
),
# 5.2.2 FAVORITE CARDS ----
div(
class = "row",
id = "favorite_card_section",
uiOutput(outputId = "favorite_cards", class = "container")
)
),
# 5.3.0 APPLICATION UI -----
div(
class = "container",
id = "application_ui",
# 5.3.1 USER INPUTS ----
column(
width = 4,
wellPanel(
div(
id = "input_main",
pickerInput(
inputId = "stock_selection",
label = "Stock List (Pick One to Analyze)",
choices = stock_list_tbl$label,
multiple = FALSE,
selected = stock_list_tbl %>%
filter(label %>% str_detect(pattern = paste0(reactive_values$last_symbol, ","))) %>%
pull(label),
options = pickerOptions(
actionsBox = FALSE,
liveSearch = TRUE,
size = 10
)
)
),
div(
id = "input_buttons",
actionButton(inputId = "analyze", label = "Analyze", icon = icon("download")),
div(
class = "pull-right",
actionButton(inputId = "favorites_add", label = NULL, icon = icon("heart")),
actionButton(inputId = "settings_toggle", label = NULL, icon = icon("cog"))
)
),
div(
id = "input_settings",
hr(),
sliderInput(inputId = "mavg_short",
label = "Short Moving Average (Days)",
value = reactive_values$user_settings %>% pluck(1) %>% pull(mavg_short),
min = 5,
max = 40),
sliderInput(inputId = "mavg_long",
label = "Long Moving Average (Days)",
value = reactive_values$user_settings %>% pluck(1) %>% pull(mavg_long),
min = 50,
max = 120),
sliderInput(inputId = "time_window",
label = "Time Window (Days)",
value = reactive_values$user_settings %>% pluck(1) %>% pull(time_window),
min = 180,
max = 730),
actionButton(inputId = "apply_and_save", label = "Apply & Save", icon = icon("save"))
) %>% hidden()
)
),
# 5.3.2 PLOT PANEL ----
column(
width = 8,
uiOutput(outputId = "stock_charts")
)
),
# 5.4.0 ANALYST COMMENTARY ----
div(
class = "container",
id = "commentary",
column(
width = 12,
div(
class = "panel",
div(class = "panel-header", h4("Analyst Commentary")),
div(
class = "panel-body",
textOutput(outputId = "analyst_commentary")
)
)
)
)
)
)
})
}
# RUN APP ----
shinyApp(ui = ui, server = server)
|
testlist <- list(type = 12L, z = 2.12199579689406e-314)
result <- do.call(esreg::G1_fun,testlist)
str(result)
|
/esreg/inst/testfiles/G1_fun/libFuzzer_G1_fun/G1_fun_valgrind_files/1609890405-test.R
|
no_license
|
akhikolla/updated-only-Issues
|
R
| false | false | 109 |
r
|
testlist <- list(type = 12L, z = 2.12199579689406e-314)
result <- do.call(esreg::G1_fun,testlist)
str(result)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/eq_map.R
\name{eq_map}
\alias{eq_map}
\title{Mapping of earthquake data}
\usage{
eq_map(data, annot_col)
}
\arguments{
\item{data}{a cleaned data file}
\item{annot_col}{name of column used for annotations}
}
\value{
map of the earthquakes including annotations
}
\description{
This function plots the earthquake and adds some annotations to it
}
\examples{
\dontrun{
readr::read_delim("earthquakes_data.txt.zip", delim = "\\t") \%>\%
eq_clean_data() \%>\%
dplyr::filter(COUNTRY == "MEXICO" & lubridate::year(datetime) >= 2000)\%>\%
eq_map(annot_col = "datetime")
}
}
|
/man/eq_map.Rd
|
no_license
|
AGrosserHH/earthquake
|
R
| false | true | 652 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/eq_map.R
\name{eq_map}
\alias{eq_map}
\title{Mapping of earthquake data}
\usage{
eq_map(data, annot_col)
}
\arguments{
\item{data}{a cleaned data file}
\item{annot_col}{name of column used for annotations}
}
\value{
map of the earthquakes including annotations
}
\description{
This function plots the earthquake and adds some annotations to it
}
\examples{
\dontrun{
readr::read_delim("earthquakes_data.txt.zip", delim = "\\t") \%>\%
eq_clean_data() \%>\%
dplyr::filter(COUNTRY == "MEXICO" & lubridate::year(datetime) >= 2000)\%>\%
eq_map(annot_col = "datetime")
}
}
|
shinyPlot_TL.PH_TL <- function(
TL.signal,
TL.temperatures
){
old.par <- par( no.readonly = TRUE )
par( oma = c(0.5, 0, 3, 0 ) )
#Plot TL
if(length(TL.signal) > 0)
{
#Boundary
plot.TL.Tmax <- max(TL.temperatures)
plot.TL.Lmax <- max(TL.signal)
#color
colors <- 1:ncol(TL.signal)
for(i in 1 : ncol(TL.signal)){
temp.temperatures <- TL.temperatures[,i]
temp.TL <- TL.signal[,i]
temp.color <- colors[i]
if(i == 1) {
plot(main= "Thermoluminescence signal",
x=temp.temperatures,
y=temp.TL,
xlim=c(0,plot.TL.Tmax),
ylim=c(0,plot.TL.Lmax),
xlab="Temperature (\u00b0C)",
ylab = "Luminescence signal (TL)",
type="l",
col=temp.color
)
par(new = TRUE)
}else{
lines(x=temp.temperatures,
y=temp.TL,
xlim=c(0,plot.TL.Tmax),
ylim=c(0,plot.TL.Lmax),
col=temp.color
)
}
}
par(new = FALSE)
}
#clean layout...
layout(matrix(c(1), 1, 1, byrow = TRUE))
par(old.par)
}
|
/inst/shinyTLdating/shinyPlot/shinyPlot_TL.PH_TL.R
|
no_license
|
dstreble/TLdating
|
R
| false | false | 1,176 |
r
|
shinyPlot_TL.PH_TL <- function(
TL.signal,
TL.temperatures
){
old.par <- par( no.readonly = TRUE )
par( oma = c(0.5, 0, 3, 0 ) )
#Plot TL
if(length(TL.signal) > 0)
{
#Boundary
plot.TL.Tmax <- max(TL.temperatures)
plot.TL.Lmax <- max(TL.signal)
#color
colors <- 1:ncol(TL.signal)
for(i in 1 : ncol(TL.signal)){
temp.temperatures <- TL.temperatures[,i]
temp.TL <- TL.signal[,i]
temp.color <- colors[i]
if(i == 1) {
plot(main= "Thermoluminescence signal",
x=temp.temperatures,
y=temp.TL,
xlim=c(0,plot.TL.Tmax),
ylim=c(0,plot.TL.Lmax),
xlab="Temperature (\u00b0C)",
ylab = "Luminescence signal (TL)",
type="l",
col=temp.color
)
par(new = TRUE)
}else{
lines(x=temp.temperatures,
y=temp.TL,
xlim=c(0,plot.TL.Tmax),
ylim=c(0,plot.TL.Lmax),
col=temp.color
)
}
}
par(new = FALSE)
}
#clean layout...
layout(matrix(c(1), 1, 1, byrow = TRUE))
par(old.par)
}
|
#' Perform Gibbs sampling algorithm for Poisson Mixture Models (PMMs) used for
#' modelling NGS RNA-Seq data.
#'
#' The parameter values are initialized using 'kmeans' algorithm.
#'
pmm.LL.gibbs <- function(X, K=2, N.Sims=10000, burnin=5000, theta, params, eqProp=FALSE, stephens=FALSE){
# Unwrap parameters from the 'params' list object
conds <- params$conds
libSize <- params$libSize
libType <- params$libType
rm(params)
N <- NROW(X) # Length of the dataset
q <- NCOL(X) # Number of variables
w <- rowSums(X) # Overall expression for each object
D <- length(unique(conds)) # Total number of conditions
r <- as.vector(table(conds)) # Number of replicates in each condition
post.resp <- matrix(0, nrow=N, ncol=K) # Posterior responsibilities
pdf.w <- matrix(0, nrow=N, ncol=K) # PDF of each point on each cluster k
lambdas <- matrix(0, nrow=D, ncol=K) # Matrix for holding estimated lambdas
total.l <- matrix(0, nrow=D, ncol=K) # Store the sum of posterior means
mean.mat <- vector("list", K) # List for holding the mean matrices l
C.n <- matrix(0, nrow=N, ncol=K) # Mixture components
C.matrix <- matrix(0, nrow=N, ncol=K) # Total Mixture components
NLL <- vector(mode="numeric") # Hold NLL for all MCMC iterations
lambda.draws <- list() # Mean vector of each Poisson
pi.draws <- matrix(0, nrow=N.Sims-burnin, ncol=K) # Mixing Proportions
if (stephens) # Use Stephens algorithm for relabelling MCMC outputs
postRespArr <- array(0, dim=c(N.Sims-burnin, N, K)) # Post resp for each MCMC run
# Grouping columns of X in order of condition (all replicates put together)
o.ycols <- order(conds) # Order of conditions
X <- X[,o.ycols] # Order the observations X accordingly
conds <- conds[o.ycols] # Order the conds vector accordingly
rm(o.ycols)
# Make sure X is an N x q matrix and assign unique names to X and conds
X <- as.matrix(X, nrow=N, ncol=q)
if(length(rownames(X)) == 0){ # If matrix X has no row names
rn <- 1:nrow(X)
}else if(length(rownames(X)) > 0){
rn <- rownames(X)
}
rownames(X) <- rn # Assign names to each row of X
conds.names <- unique(conds) # Get unique condition names
# Compute the library size normalization factors for each variable
s <- normFactors(X, libSize=libSize, libType=libType)
# Sum of s for all replicates l on each condition d
s.dot <- rep(NA, D)
for (j in 1:D){
s.dot[j] <- sum( s[which(conds == unique(conds)[j])] )
}
# Create matrices of dimension N x q, for faster computations
w.mat <- matrix(rep(w, times=q), nrow=N, ncol=q)
s.mat <- matrix(rep(s, each=N) , nrow=N, ncol=q)
##=======================================
# Initialize parameters using 'kmeans', #
# if 'theta' argument is missing #
##=======================================
if (missing(theta)){
initParams <- kmeansInit(X=X,
K=K,
w=w,
s.dot=s.dot,
conds=conds,
lambdas=lambdas,
eqProp=eqProp)
Poisson <- list()
Poisson$l <- initParams$lambdas # Poisson mean vector for each cluster
Poisson$Gamma <- list(shape.0=1, rate.0=1) # Initialize Gamma hyperparameters
pi.cur <- initParams$pi.c # Mixing proportions
dir.a <- rep(1/K, K) # Dirichlet concentration parameter
}else{
Poisson <- theta$Poisson
pi.cur <- theta$pi.cur
dir.a <- theta$dir.a
}
for (t in 1:N.Sims){ # Start Gibbs sampling
# Compute mean matrix using the estimated lambdas, normalization factors s and
# the overall expression levels for each object w.
for (k in 1:K){
lambda.mat <- matrix(rep(rep(Poisson$l[,k], times=r), each=N), nrow=N, ncol=q)
mean.mat[[k]] <- w.mat * s.mat * lambda.mat
}
# Compute responsibilities
res <- compute.resp(X, pdf.w, K, Poisson, mean.mat, pi.cur)
post.resp <- res$post.resp
# Draw mixture components for ith simulation
C.n <- c.n.update(N, K, post.resp)
# Calculate component counts of each cluster
N.k <- colSums(C.n)
# Update mixing proportions using new cluster component counts
if (eqProp){
pi.cur <- rep(1/K, K)
}else{
pi.cur <- pi.update(dir.a, N.k)
}
# Update posterior mean
Poisson$l <- lambda.update(X, K, C.n, Poisson, w, s.dot)
# Keep only the simulations after the burned in period has passed
if (t > burnin){
total.l <- total.l + Poisson$l
NLL <- c(NLL, res$NLL)
C.matrix <- C.matrix + C.n
pi.draws[t - burnin,] <- pi.cur
lambda.draws[[t - burnin]] <- Poisson$l
if (stephens) # Use Stephens algorithm for relabelling MCMC outputs
postRespArr[t-burnin, , ] <- post.resp
}
}
# Object to keep input data
dat <- NULL
dat$X <- X
dat$K <- K
dat$N <- N
dat$D <- D
dat$N.Sims <- N.Sims
dat$burnin <- burnin
# Object to hold all the MCMC draws
draws <- NULL
draws$pi <- pi.draws
draws$l <- lambda.draws
# Object to hold the summaries for the parameters
summary <- NULL
summary$pi <- apply(pi.draws, 2, mean) # Expected value of mix. prop.
summary$l <- total.l / ((N.Sims-burnin)) # Expected value of each mean vector
# Add names to the estimated variables for clarity
names(summary$pi) <- paste("Clust", 1:K)
colnames(summary$l) <- paste("Clust", 1:K)
rownames(summary$l) <- conds.names
summary$C <- C.matrix / (N.Sims-burnin) # Convert C.matrix to probs
summary$NLL <- NLL
# Object to hold the credible intervals for the parameters
cred.interv <- NULL
cred.interv$pi <- apply(pi.draws, 2, quantile, prob=c(0.025, 0.5, 0.975))
#cred.interv$l <- apply(lambda.draws, 2, quantile, prob=c(0.025, 0.5, 0.975))
return(list(dat=dat, draws=draws, summary=summary, cred.interv=cred.interv))
}
# Compute the responsibilities
compute.resp <- function(X, pdf.w, K, Poisson, mean.mat, pi.cur){
for (k in 1:K){
pdf.w[,k] <- log(pi.cur[k]) + rowSums(dpois(X, lambda=mean.mat[[k]], log=TRUE))
}
# Calculate probabilities using the logSumExp trick for numerical stability
Z <- apply(pdf.w, 1, logSumExp)
post.resp <- pdf.w - Z
post.resp <- apply(post.resp, 2, exp) # Exponentiate to get actual probabilities
NLL <- -sum(Z) # Evaluate the NLL
return(list(post.resp=post.resp, NLL=NLL))
}
# Update the mixture components
c.n.update <- function(N, K, post.resp){
c.i.draw <- matrix(0, nrow=N, ncol=K)
for (i in 1:N){ # Sample one point from a multinomial i.e. ~ Discrete
c.i.draw[i,] = rmultinom(1, 1, post.resp[i,])
#c.i.draw[i] <- sample(1:clusters, size=1, prob=post.resp[i,],replace=TRUE)
}
return(c.i.draw)
}
# Update the mixing proportions
pi.update <- function(dir.a, N.k){
a.n <- dir.a + N.k
return(as.vector(rdirichlet(n=1, alpha=a.n))) # Sample from Dirichlet
}
# Update the posterior mean
lambda.update <- function(X, K, C.n, Poisson, w, s.dot){
D <- NROW(Poisson$l) # Number of conditions
lambda.post <- matrix(0, nrow=D, ncol=K) # Matrix for holding estimated lambdas
X.k.sum <- colSums(C.n * w) # Calculate sum of data points for each k
for (j in 1:D){
beta.n <- Poisson$Gamma$rate.0 + s.dot[j]*X.k.sum
X.j. <- rowSums(as.matrix(X[,which(conds == (unique(conds))[j])]))
alpha.n <- Poisson$Gamma$shape.0 + colSums(C.n * matrix(rep(X.j., K), ncol=K))
lambda.post[j,] <- rgamma(K, shape=alpha.n, rate=beta.n) # Sample from Gamma
}
return(lambda.post)
}
|
/Gibbs-Sampling/pmm-logLinear-gibbs.R
|
no_license
|
andreaskapou/Mixture-Models
|
R
| false | false | 8,176 |
r
|
#' Perform Gibbs sampling algorithm for Poisson Mixture Models (PMMs) used for
#' modelling NGS RNA-Seq data.
#'
#' The parameter values are initialized using 'kmeans' algorithm.
#'
pmm.LL.gibbs <- function(X, K=2, N.Sims=10000, burnin=5000, theta, params, eqProp=FALSE, stephens=FALSE){
# Unwrap parameters from the 'params' list object
conds <- params$conds
libSize <- params$libSize
libType <- params$libType
rm(params)
N <- NROW(X) # Length of the dataset
q <- NCOL(X) # Number of variables
w <- rowSums(X) # Overall expression for each object
D <- length(unique(conds)) # Total number of conditions
r <- as.vector(table(conds)) # Number of replicates in each condition
post.resp <- matrix(0, nrow=N, ncol=K) # Posterior responsibilities
pdf.w <- matrix(0, nrow=N, ncol=K) # PDF of each point on each cluster k
lambdas <- matrix(0, nrow=D, ncol=K) # Matrix for holding estimated lambdas
total.l <- matrix(0, nrow=D, ncol=K) # Store the sum of posterior means
mean.mat <- vector("list", K) # List for holding the mean matrices l
C.n <- matrix(0, nrow=N, ncol=K) # Mixture components
C.matrix <- matrix(0, nrow=N, ncol=K) # Total Mixture components
NLL <- vector(mode="numeric") # Hold NLL for all MCMC iterations
lambda.draws <- list() # Mean vector of each Poisson
pi.draws <- matrix(0, nrow=N.Sims-burnin, ncol=K) # Mixing Proportions
if (stephens) # Use Stephens algorithm for relabelling MCMC outputs
postRespArr <- array(0, dim=c(N.Sims-burnin, N, K)) # Post resp for each MCMC run
# Grouping columns of X in order of condition (all replicates put together)
o.ycols <- order(conds) # Order of conditions
X <- X[,o.ycols] # Order the observations X accordingly
conds <- conds[o.ycols] # Order the conds vector accordingly
rm(o.ycols)
# Make sure X is an N x q matrix and assign unique names to X and conds
X <- as.matrix(X, nrow=N, ncol=q)
if(length(rownames(X)) == 0){ # If matrix X has no row names
rn <- 1:nrow(X)
}else if(length(rownames(X)) > 0){
rn <- rownames(X)
}
rownames(X) <- rn # Assign names to each row of X
conds.names <- unique(conds) # Get unique condition names
# Compute the library size normalization factors for each variable
s <- normFactors(X, libSize=libSize, libType=libType)
# Sum of s for all replicates l on each condition d
s.dot <- rep(NA, D)
for (j in 1:D){
s.dot[j] <- sum( s[which(conds == unique(conds)[j])] )
}
# Create matrices of dimension N x q, for faster computations
w.mat <- matrix(rep(w, times=q), nrow=N, ncol=q)
s.mat <- matrix(rep(s, each=N) , nrow=N, ncol=q)
##=======================================
# Initialize parameters using 'kmeans', #
# if 'theta' argument is missing #
##=======================================
if (missing(theta)){
initParams <- kmeansInit(X=X,
K=K,
w=w,
s.dot=s.dot,
conds=conds,
lambdas=lambdas,
eqProp=eqProp)
Poisson <- list()
Poisson$l <- initParams$lambdas # Poisson mean vector for each cluster
Poisson$Gamma <- list(shape.0=1, rate.0=1) # Initialize Gamma hyperparameters
pi.cur <- initParams$pi.c # Mixing proportions
dir.a <- rep(1/K, K) # Dirichlet concentration parameter
}else{
Poisson <- theta$Poisson
pi.cur <- theta$pi.cur
dir.a <- theta$dir.a
}
for (t in 1:N.Sims){ # Start Gibbs sampling
# Compute mean matrix using the estimated lambdas, normalization factors s and
# the overall expression levels for each object w.
for (k in 1:K){
lambda.mat <- matrix(rep(rep(Poisson$l[,k], times=r), each=N), nrow=N, ncol=q)
mean.mat[[k]] <- w.mat * s.mat * lambda.mat
}
# Compute responsibilities
res <- compute.resp(X, pdf.w, K, Poisson, mean.mat, pi.cur)
post.resp <- res$post.resp
# Draw mixture components for ith simulation
C.n <- c.n.update(N, K, post.resp)
# Calculate component counts of each cluster
N.k <- colSums(C.n)
# Update mixing proportions using new cluster component counts
if (eqProp){
pi.cur <- rep(1/K, K)
}else{
pi.cur <- pi.update(dir.a, N.k)
}
# Update posterior mean
Poisson$l <- lambda.update(X, K, C.n, Poisson, w, s.dot)
# Keep only the simulations after the burned in period has passed
if (t > burnin){
total.l <- total.l + Poisson$l
NLL <- c(NLL, res$NLL)
C.matrix <- C.matrix + C.n
pi.draws[t - burnin,] <- pi.cur
lambda.draws[[t - burnin]] <- Poisson$l
if (stephens) # Use Stephens algorithm for relabelling MCMC outputs
postRespArr[t-burnin, , ] <- post.resp
}
}
# Object to keep input data
dat <- NULL
dat$X <- X
dat$K <- K
dat$N <- N
dat$D <- D
dat$N.Sims <- N.Sims
dat$burnin <- burnin
# Object to hold all the MCMC draws
draws <- NULL
draws$pi <- pi.draws
draws$l <- lambda.draws
# Object to hold the summaries for the parameters
summary <- NULL
summary$pi <- apply(pi.draws, 2, mean) # Expected value of mix. prop.
summary$l <- total.l / ((N.Sims-burnin)) # Expected value of each mean vector
# Add names to the estimated variables for clarity
names(summary$pi) <- paste("Clust", 1:K)
colnames(summary$l) <- paste("Clust", 1:K)
rownames(summary$l) <- conds.names
summary$C <- C.matrix / (N.Sims-burnin) # Convert C.matrix to probs
summary$NLL <- NLL
# Object to hold the credible intervals for the parameters
cred.interv <- NULL
cred.interv$pi <- apply(pi.draws, 2, quantile, prob=c(0.025, 0.5, 0.975))
#cred.interv$l <- apply(lambda.draws, 2, quantile, prob=c(0.025, 0.5, 0.975))
return(list(dat=dat, draws=draws, summary=summary, cred.interv=cred.interv))
}
# Compute the responsibilities
compute.resp <- function(X, pdf.w, K, Poisson, mean.mat, pi.cur){
for (k in 1:K){
pdf.w[,k] <- log(pi.cur[k]) + rowSums(dpois(X, lambda=mean.mat[[k]], log=TRUE))
}
# Calculate probabilities using the logSumExp trick for numerical stability
Z <- apply(pdf.w, 1, logSumExp)
post.resp <- pdf.w - Z
post.resp <- apply(post.resp, 2, exp) # Exponentiate to get actual probabilities
NLL <- -sum(Z) # Evaluate the NLL
return(list(post.resp=post.resp, NLL=NLL))
}
# Update the mixture components
c.n.update <- function(N, K, post.resp){
c.i.draw <- matrix(0, nrow=N, ncol=K)
for (i in 1:N){ # Sample one point from a multinomial i.e. ~ Discrete
c.i.draw[i,] = rmultinom(1, 1, post.resp[i,])
#c.i.draw[i] <- sample(1:clusters, size=1, prob=post.resp[i,],replace=TRUE)
}
return(c.i.draw)
}
# Update the mixing proportions
pi.update <- function(dir.a, N.k){
a.n <- dir.a + N.k
return(as.vector(rdirichlet(n=1, alpha=a.n))) # Sample from Dirichlet
}
# Update the posterior mean
lambda.update <- function(X, K, C.n, Poisson, w, s.dot){
D <- NROW(Poisson$l) # Number of conditions
lambda.post <- matrix(0, nrow=D, ncol=K) # Matrix for holding estimated lambdas
X.k.sum <- colSums(C.n * w) # Calculate sum of data points for each k
for (j in 1:D){
beta.n <- Poisson$Gamma$rate.0 + s.dot[j]*X.k.sum
X.j. <- rowSums(as.matrix(X[,which(conds == (unique(conds))[j])]))
alpha.n <- Poisson$Gamma$shape.0 + colSums(C.n * matrix(rep(X.j., K), ncol=K))
lambda.post[j,] <- rgamma(K, shape=alpha.n, rate=beta.n) # Sample from Gamma
}
return(lambda.post)
}
|
#Para manejar archivos en repositorios tarea 1
library(tidyverse)
library(ghclass)
Roster <- read.csv("roster_seed707.csv")
github_set_token("8030afbb3ac5faee560484a9607e6823c3b00d5e")
#repo_delete(paste0("Tarea_1_review-", Roster$user))
#Detectar los archivos de los autores de tareas
repo_ls(repo= "BIO4022/Tarea_1_GiorgiaGraells", full_path = TRUE)
# Obtener la tarea del autor
A <- repo_get_file(
repo = "BIO4022/Tarea_1_GiorgiaGraells",
path = "tarea1.Rmd",
branch = "master",
quiet = FALSE,
include_details = TRUE
)
# Poner la tarea en el revisor
repo_put_file(
repo="BIO4022/Tarea_1review-derek-corcoran-barrios",
path= "aut1/tarea1.Rmd",
content=A,
message = "Subiendo tarea 1",
branch = "master",
verbose = TRUE
)
####NicolasGatica
Usuario <- "NicolasGatica"
#Detectar los archivos de los autores de tareas
Archivos <- repo_ls(repo= paste0("Curso-programacion/Tarea_1_",Usuario), full_path = TRUE)
Archivos <- Archivos[!str_detect(Archivos,".rds")]
#repo_ls(repo="BIO4022/Tarea_1review-derek-corcoran-barrios", path="aut1", full_path = TRUE)
# Obtener la tarea del autor
Archivos_listos <- list()
for(i in 1:length(Archivos)){
try({
Archivos_listos[[i]] <- repo_get_file(
repo = paste0("Curso-programacion/Tarea_1_",Usuario),
path = Archivos[i],
branch = "master",
quiet = FALSE,
include_details = TRUE
)})
}
Autor <- Roster %>% dplyr::filter(user == Usuario) %>% mutate(user_random = paste0(user_random, "/")) %>% pull(user_random)
Revisores <- Roster %>% dplyr::filter(user == Usuario) %>% dplyr::select(rev1, rev2, rev3) %>% pivot_longer(cols = everything()) %>% pull(value) %>% as.character()
Revisores_nombres <- Roster %>% dplyr::filter(user_random %in% Revisores) %>% pull(user) %>% as.character()
Repos_rev <- paste0("BIO4022/Tarea_1_review-", Revisores_nombres)
# Poner la tarea en el revisor
for(i in 1:length(Repos_rev)){
for(j in 1:length(Archivos_listos)){
repo_put_file(
repo=Repos_rev[i],
path= paste0(Autor, Archivos[j]),
content=Archivos_listos[[j]],
message = "Subiendo tarea 1",
branch = "master",
verbose = TRUE
)
}
}
###########tarea 3
Personas <- ghclass::org_members("Curso-programacion")
Faltantes <- org_pending("Curso-programacion")
Admin <- org_admins("Curso-programacion")
Todos <- c(Personas, Faltantes)
Estudiantes <- Todos[!(Todos %in% Admin)]
ghclass::org_create_assignment(
org = "Curso-programacion",
user = Estudiantes,
repo = paste0("Tarea_3_", Estudiantes),
team = NULL,
source_repo = "Curso-programacion/Tarea_3",
private = TRUE
)
#############
# Obtener peer review
RevTarea1 <- peer_score_review(org = "Curso-programacion",
roster = Roster,
form_review = "Evaluacion.Rmd",
prefix = "Tarea_1_",
suffix = "",
write_csv = TRUE
)
|
/Manejo_ghclass.R
|
no_license
|
Curso-programacion/Tarea_1
|
R
| false | false | 2,992 |
r
|
#Para manejar archivos en repositorios tarea 1
library(tidyverse)
library(ghclass)
Roster <- read.csv("roster_seed707.csv")
github_set_token("8030afbb3ac5faee560484a9607e6823c3b00d5e")
#repo_delete(paste0("Tarea_1_review-", Roster$user))
#Detectar los archivos de los autores de tareas
repo_ls(repo= "BIO4022/Tarea_1_GiorgiaGraells", full_path = TRUE)
# Obtener la tarea del autor
A <- repo_get_file(
repo = "BIO4022/Tarea_1_GiorgiaGraells",
path = "tarea1.Rmd",
branch = "master",
quiet = FALSE,
include_details = TRUE
)
# Poner la tarea en el revisor
repo_put_file(
repo="BIO4022/Tarea_1review-derek-corcoran-barrios",
path= "aut1/tarea1.Rmd",
content=A,
message = "Subiendo tarea 1",
branch = "master",
verbose = TRUE
)
####NicolasGatica
Usuario <- "NicolasGatica"
#Detectar los archivos de los autores de tareas
Archivos <- repo_ls(repo= paste0("Curso-programacion/Tarea_1_",Usuario), full_path = TRUE)
Archivos <- Archivos[!str_detect(Archivos,".rds")]
#repo_ls(repo="BIO4022/Tarea_1review-derek-corcoran-barrios", path="aut1", full_path = TRUE)
# Obtener la tarea del autor
Archivos_listos <- list()
for(i in 1:length(Archivos)){
try({
Archivos_listos[[i]] <- repo_get_file(
repo = paste0("Curso-programacion/Tarea_1_",Usuario),
path = Archivos[i],
branch = "master",
quiet = FALSE,
include_details = TRUE
)})
}
Autor <- Roster %>% dplyr::filter(user == Usuario) %>% mutate(user_random = paste0(user_random, "/")) %>% pull(user_random)
Revisores <- Roster %>% dplyr::filter(user == Usuario) %>% dplyr::select(rev1, rev2, rev3) %>% pivot_longer(cols = everything()) %>% pull(value) %>% as.character()
Revisores_nombres <- Roster %>% dplyr::filter(user_random %in% Revisores) %>% pull(user) %>% as.character()
Repos_rev <- paste0("BIO4022/Tarea_1_review-", Revisores_nombres)
# Poner la tarea en el revisor
for(i in 1:length(Repos_rev)){
for(j in 1:length(Archivos_listos)){
repo_put_file(
repo=Repos_rev[i],
path= paste0(Autor, Archivos[j]),
content=Archivos_listos[[j]],
message = "Subiendo tarea 1",
branch = "master",
verbose = TRUE
)
}
}
###########tarea 3
Personas <- ghclass::org_members("Curso-programacion")
Faltantes <- org_pending("Curso-programacion")
Admin <- org_admins("Curso-programacion")
Todos <- c(Personas, Faltantes)
Estudiantes <- Todos[!(Todos %in% Admin)]
ghclass::org_create_assignment(
org = "Curso-programacion",
user = Estudiantes,
repo = paste0("Tarea_3_", Estudiantes),
team = NULL,
source_repo = "Curso-programacion/Tarea_3",
private = TRUE
)
#############
# Obtener peer review
RevTarea1 <- peer_score_review(org = "Curso-programacion",
roster = Roster,
form_review = "Evaluacion.Rmd",
prefix = "Tarea_1_",
suffix = "",
write_csv = TRUE
)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/NMFSReports.R
\name{TitleCase}
\alias{TitleCase}
\title{Make a String Title Case}
\usage{
TitleCase(str = "", add_dont_cap = "")
}
\arguments{
\item{str}{A string that you want to be in title case}
\item{add_dont_cap}{A vector of strings that the user does not want capitalized}
}
\description{
Make a String Title Case (making and, the, an, etc. lower case)
}
\examples{
TitleCase("HelLo WoRLD OR good-bye?")
}
\keyword{Case,}
\keyword{Title,}
\keyword{strings}
\keyword{word}
|
/man/TitleCase.Rd
|
permissive
|
kellijohnson-NOAA/NMFSReports
|
R
| false | true | 557 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/NMFSReports.R
\name{TitleCase}
\alias{TitleCase}
\title{Make a String Title Case}
\usage{
TitleCase(str = "", add_dont_cap = "")
}
\arguments{
\item{str}{A string that you want to be in title case}
\item{add_dont_cap}{A vector of strings that the user does not want capitalized}
}
\description{
Make a String Title Case (making and, the, an, etc. lower case)
}
\examples{
TitleCase("HelLo WoRLD OR good-bye?")
}
\keyword{Case,}
\keyword{Title,}
\keyword{strings}
\keyword{word}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/paws.workdocs_operations.R
\name{get_resources}
\alias{get_resources}
\title{Retrieves a collection of resources, including folders and documents}
\usage{
get_resources(AuthenticationToken = NULL, UserId = NULL,
CollectionType = NULL, Limit = NULL, Marker = NULL)
}
\arguments{
\item{AuthenticationToken}{The Amazon WorkDocs authentication token. Do not set this field when using administrative API actions, as in accessing the API operation using AWS credentials.}
\item{UserId}{The user ID for the resource collection. This is a required field for accessing the API operation using IAM credentials.}
\item{CollectionType}{The collection type.}
\item{Limit}{The maximum number of resources to return.}
\item{Marker}{The marker for the next set of results. This marker was received from a previous call.}
}
\description{
Retrieves a collection of resources, including folders and documents. The only \code{CollectionType} supported is \code{SHARED_WITH_ME}.
}
\section{Accepted Parameters}{
\preformatted{get_resources(
AuthenticationToken = "string",
UserId = "string",
CollectionType = "SHARED_WITH_ME",
Limit = 123,
Marker = "string"
)
}
}
|
/service/paws.workdocs/man/get_resources.Rd
|
permissive
|
CR-Mercado/paws
|
R
| false | true | 1,238 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/paws.workdocs_operations.R
\name{get_resources}
\alias{get_resources}
\title{Retrieves a collection of resources, including folders and documents}
\usage{
get_resources(AuthenticationToken = NULL, UserId = NULL,
CollectionType = NULL, Limit = NULL, Marker = NULL)
}
\arguments{
\item{AuthenticationToken}{The Amazon WorkDocs authentication token. Do not set this field when using administrative API actions, as in accessing the API operation using AWS credentials.}
\item{UserId}{The user ID for the resource collection. This is a required field for accessing the API operation using IAM credentials.}
\item{CollectionType}{The collection type.}
\item{Limit}{The maximum number of resources to return.}
\item{Marker}{The marker for the next set of results. This marker was received from a previous call.}
}
\description{
Retrieves a collection of resources, including folders and documents. The only \code{CollectionType} supported is \code{SHARED_WITH_ME}.
}
\section{Accepted Parameters}{
\preformatted{get_resources(
AuthenticationToken = "string",
UserId = "string",
CollectionType = "SHARED_WITH_ME",
Limit = 123,
Marker = "string"
)
}
}
|
# logistic regression
#
d<-read.csv("fin-ratio.csv")
names(d)
summary(lm(HSI~EY+CFTP+ln_MV+DY+BTME+DTE,data=d))
readline("Hit <Return> to continue:")
summary(lm(HSI~EY+CFTP+ln_MV+DY+BTME,data=d))
readline("Hit <Return> to continue:")
summary(lm(HSI~EY+CFTP+ln_MV+DY,data=d))
readline("Hit <Return> to continue:")
summary(lm(HSI~CFTP+ln_MV+DY,data=d))
readline("Hit <Return> to continue:")
summary(lm(HSI~CFTP+ln_MV,data=d))
readline("Hit <Return> to continue:")
reg<-lm(HSI~CFTP+ln_MV,data=d)
names(reg)
par(mfrow=c(2,2))
plot(reg$fit,reg$resid)
qqnorm(reg$resid)
qqline(reg$resid)
res<-as.ts(reg$resid)
plot(res,lag(res))
plot(reg$resid)
readline("Hit <Return> to continue:")
summary(glm(HSI~EY+CFTP+ln_MV+DY+BTME+DTE,data=d,binomial))
readline("Hit <Return> to continue:")
lreg<-glm(HSI~EY+CFTP+ln_MV+DY+BTME+DTE,data=d,binomial)
names(lreg)
pr<-(lreg$fit>0.5)
table(pr,d$HSI)
readline("Hit <Return> to continue:")
# outlier detection
#
d0<-d[d$HSI==0,]
d1<-d[d$HSI==1,]
dim(d0)
dim(d1)
source("mdist.r")
x<-d0[,1:6]
md<-mdist(x)
par(mfrow=c(1,1))
plot(md)
c<-qchisq(0.99,df=6)
c
d2<-d0[md<c,]
dim(d2)
d3<-rbind(d1,d2)
dim(d3)
write.table(d3,file="fin-ratio1.csv",sep=",",row.names=F)
summary(glm(HSI~CFTP+ln_MV+BTME,data=d3,binomial))
lreg<-glm(HSI~CFTP+ln_MV+BTME,data=d3,binomial)
pr<-(lreg$fitted.values>0.5)
table(pr,d3$HSI)
readline("Hit <Return> to continue:")
# Dummy variable in logistic regression
#
g<-(d3$ln_MV>9.4766)+1
summary(glm(HSI~EY+CFTP+g+DY+BTME+DTE+EY*g+CFTP*g+DY*g+BTME*g+DTE*g,data=d3,binomial))
readline("Hit <Return> to continue:")
summary(glm(HSI~EY+CFTP+g+DY+DTE+EY*g+CFTP*g+DY*g+DTE*g,data=d3,binomial))
readline("Hit <Return> to continue:")
summary(glm(HSI~EY+CFTP+g+DY+EY*g+CFTP*g+DY*g,data=d3,binomial))
readline("Hit <Return> to continue:")
summary(glm(HSI~EY+g+DY+EY*g+DY*g,data=d3,binomial))
readline("Hit <Return> to continue:")
summary(glm(HSI~g+DY+DY*g,data=d3,binomial))
readline("Hit <Return> to continue:")
lreg<-glm(HSI~g+DY+DY*g,data=d3,binomial)
pr<-(lreg$fitted.values>0.5)
table(pr,d3$HSI)
readline("Hit <Return> to continue:")
# multinomial logit
#
d<-read.csv("iris.csv")
names(d)
library(nnet)
mn1<-multinom(Species~.,data=d)
summary(mn1)
readline("Hit <Return> to continue:")
pred<-predict(mn1)
table(pred,d$Species)
readline("Hit <Return> to continue:")
# model selection
#
d<-read.csv("fin-ratio1.csv")
lreg<-glm(HSI~.,data=d,binomial)
step(lreg)
readline("Hit <Return> to continue:")
# LDA
#
d<-read.csv("fin-ratio.csv")
x<-d[,1:6]
d1<-x[d$HSI==0,]
d2<-x[d$HSI==1,]
n1<-dim(d1)[1]
n2<-dim(d2)[1]
m1<-apply(d1,2,mean)
m2<-apply(d2,2,mean)
s1<-var(d1)
s2<-var(d2)
sp<-((n1-1)*s1+(n2-1)*s2)/(n1+n2-2)
dist1<-mahalanobis(x,m1,sp)
dist2<-mahalanobis(x,m2,sp)
w12<-(dist2-dist1)/2
pr<-w12<0
table(pr,d$HSI)
readline("Hit <Return> to continue:")
c<-log(n2/n1)
pr<-(w12<c)
table(pr,d$HSI)
readline("Hit <Return> to continue:")
library(MASS)
lda<-lda(HSI~EY+CFTP+ln_MV+DY+BTME+DTE,data=d)
pred<-predict(lda)$class
table(pred,d$HSI)
readline("Hit <Return> to continue:")
d<-read.csv("iris.csv")
names(d)
lda<-lda(Species~.,data=d)
pred<-predict(lda)$class
table(pred,d$Species)
readline("Hit <Return> to continue:")
d<-read.csv("fin-ratio.csv")
lda<-lda(HSI~.,data=d,CV=T)
table(lda$class,d$HSI)
|
/4.r
|
no_license
|
twtang/RMSC4002
|
R
| false | false | 3,272 |
r
|
# logistic regression
#
d<-read.csv("fin-ratio.csv")
names(d)
summary(lm(HSI~EY+CFTP+ln_MV+DY+BTME+DTE,data=d))
readline("Hit <Return> to continue:")
summary(lm(HSI~EY+CFTP+ln_MV+DY+BTME,data=d))
readline("Hit <Return> to continue:")
summary(lm(HSI~EY+CFTP+ln_MV+DY,data=d))
readline("Hit <Return> to continue:")
summary(lm(HSI~CFTP+ln_MV+DY,data=d))
readline("Hit <Return> to continue:")
summary(lm(HSI~CFTP+ln_MV,data=d))
readline("Hit <Return> to continue:")
reg<-lm(HSI~CFTP+ln_MV,data=d)
names(reg)
par(mfrow=c(2,2))
plot(reg$fit,reg$resid)
qqnorm(reg$resid)
qqline(reg$resid)
res<-as.ts(reg$resid)
plot(res,lag(res))
plot(reg$resid)
readline("Hit <Return> to continue:")
summary(glm(HSI~EY+CFTP+ln_MV+DY+BTME+DTE,data=d,binomial))
readline("Hit <Return> to continue:")
lreg<-glm(HSI~EY+CFTP+ln_MV+DY+BTME+DTE,data=d,binomial)
names(lreg)
pr<-(lreg$fit>0.5)
table(pr,d$HSI)
readline("Hit <Return> to continue:")
# outlier detection
#
d0<-d[d$HSI==0,]
d1<-d[d$HSI==1,]
dim(d0)
dim(d1)
source("mdist.r")
x<-d0[,1:6]
md<-mdist(x)
par(mfrow=c(1,1))
plot(md)
c<-qchisq(0.99,df=6)
c
d2<-d0[md<c,]
dim(d2)
d3<-rbind(d1,d2)
dim(d3)
write.table(d3,file="fin-ratio1.csv",sep=",",row.names=F)
summary(glm(HSI~CFTP+ln_MV+BTME,data=d3,binomial))
lreg<-glm(HSI~CFTP+ln_MV+BTME,data=d3,binomial)
pr<-(lreg$fitted.values>0.5)
table(pr,d3$HSI)
readline("Hit <Return> to continue:")
# Dummy variable in logistic regression
#
g<-(d3$ln_MV>9.4766)+1
summary(glm(HSI~EY+CFTP+g+DY+BTME+DTE+EY*g+CFTP*g+DY*g+BTME*g+DTE*g,data=d3,binomial))
readline("Hit <Return> to continue:")
summary(glm(HSI~EY+CFTP+g+DY+DTE+EY*g+CFTP*g+DY*g+DTE*g,data=d3,binomial))
readline("Hit <Return> to continue:")
summary(glm(HSI~EY+CFTP+g+DY+EY*g+CFTP*g+DY*g,data=d3,binomial))
readline("Hit <Return> to continue:")
summary(glm(HSI~EY+g+DY+EY*g+DY*g,data=d3,binomial))
readline("Hit <Return> to continue:")
summary(glm(HSI~g+DY+DY*g,data=d3,binomial))
readline("Hit <Return> to continue:")
lreg<-glm(HSI~g+DY+DY*g,data=d3,binomial)
pr<-(lreg$fitted.values>0.5)
table(pr,d3$HSI)
readline("Hit <Return> to continue:")
# multinomial logit
#
d<-read.csv("iris.csv")
names(d)
library(nnet)
mn1<-multinom(Species~.,data=d)
summary(mn1)
readline("Hit <Return> to continue:")
pred<-predict(mn1)
table(pred,d$Species)
readline("Hit <Return> to continue:")
# model selection
#
d<-read.csv("fin-ratio1.csv")
lreg<-glm(HSI~.,data=d,binomial)
step(lreg)
readline("Hit <Return> to continue:")
# LDA
#
d<-read.csv("fin-ratio.csv")
x<-d[,1:6]
d1<-x[d$HSI==0,]
d2<-x[d$HSI==1,]
n1<-dim(d1)[1]
n2<-dim(d2)[1]
m1<-apply(d1,2,mean)
m2<-apply(d2,2,mean)
s1<-var(d1)
s2<-var(d2)
sp<-((n1-1)*s1+(n2-1)*s2)/(n1+n2-2)
dist1<-mahalanobis(x,m1,sp)
dist2<-mahalanobis(x,m2,sp)
w12<-(dist2-dist1)/2
pr<-w12<0
table(pr,d$HSI)
readline("Hit <Return> to continue:")
c<-log(n2/n1)
pr<-(w12<c)
table(pr,d$HSI)
readline("Hit <Return> to continue:")
library(MASS)
lda<-lda(HSI~EY+CFTP+ln_MV+DY+BTME+DTE,data=d)
pred<-predict(lda)$class
table(pred,d$HSI)
readline("Hit <Return> to continue:")
d<-read.csv("iris.csv")
names(d)
lda<-lda(Species~.,data=d)
pred<-predict(lda)$class
table(pred,d$Species)
readline("Hit <Return> to continue:")
d<-read.csv("fin-ratio.csv")
lda<-lda(HSI~.,data=d,CV=T)
table(lda$class,d$HSI)
|
\name{Weka_clusterers}
\alias{Cobweb}
\alias{FarthestFirst}
\alias{SimpleKMeans}
\alias{XMeans}
\alias{DBScan}
\title{R/Weka Clusterers}
\description{
R interfaces to Weka clustering algorithms.
}
\usage{
Cobweb(x, control = NULL)
FarthestFirst(x, control = NULL)
SimpleKMeans(x, control = NULL)
XMeans(x, control = NULL)
DBScan(x, control = NULL)
}
\arguments{
\item{x}{an R object with the data to be clustered.}
\item{control}{an object of class \code{\link{Weka_control}}, or a
character vector of control options, or \code{NULL} (default).
Available options can be obtained on-line using the Weka Option
Wizard \code{\link{WOW}}, or the Weka documentation.}
}
\value{
A list inheriting from class \code{Weka_clusterers} with components
including
\item{clusterer}{a reference (of class
\code{\link[rJava:jobjRef-class]{jobjRef}}) to a Java object
obtained by applying the Weka \code{buildClusterer} method to the
training instances using the given control options.}
\item{class_ids}{a vector of integers indicating the class to which
each training instance is allocated (the results of calling the Weka
\code{clusterInstance} method for the built clusterer and each
instance).}
}
\details{
There is a \code{\link[=predict.Weka_clusterer]{predict}} method for
predicting class ids or memberships from the fitted clusterers.
\code{Cobweb} implements the Cobweb (Fisher, 1987) and Classit
(Gennari et al., 1989) clustering algorithms.
\code{FarthestFirst} provides the \dQuote{farthest first traversal
algorithm} by Hochbaum and Shmoys, which works as a fast simple
approximate clusterer modeled after simple \eqn{k}-means.
\code{SimpleKMeans} provides clustering with the \eqn{k}-means
algorithm.
\code{XMeans} provides \eqn{k}-means extended by an
\dQuote{Improve-Structure part} and automatically determines the
number of clusters.
\code{DBScan} provides the \dQuote{density-based clustering algorithm}
by Ester, Kriegel, Sander, and Xu. Note that noise points are assigned
to \code{NA}.
}
\references{
M. Ester, H.-P. Kriegel, J. Sander, and X. Xu (1996).
A Density-Based Algorithm for Discovering Clusters in Large Spatial
Databases with Noise.
\emph{Proceedings of the Second International Conference on Knowledge
Discovery and Data Mining (KDD'96)},
Portland, OR, 226--231.
AAAI Press.
D. H. Fisher (1987).
Knowledge acquisition via incremental conceptual clustering.
\emph{Machine Learning}, \bold{2}/2, 139--172.
\doi{10.1023/A:1022852608280}.
J. Gennari, P. Langley, and D. H. Fisher (1989).
Models of incremental concept formation.
\emph{Artificial Intelligence}, \bold{40}, 11--62.
D. S. Hochbaum and D. B. Shmoys (1985).
A best possible heuristic for the \eqn{k}-center problem,
\emph{Mathematics of Operations Research}, \bold{10}(2), 180--184.
\doi{10.1287/moor.10.2.180}.
D. Pelleg and A. W. Moore (2006).
X-means: Extending K-means with Efficient Estimation of the Number of
Clusters.
In: \emph{Seventeenth International Conference on Machine Learning},
727--734.
Morgan Kaufmann.
I. H. Witten and E. Frank (2005).
\emph{Data Mining: Practical Machine Learning Tools and Techniques}.
2nd Edition, Morgan Kaufmann, San Francisco.
}
\note{
\code{XMeans} requires Weka package \pkg{XMeans} to be installed.
\code{DBScan} requires Weka package \pkg{optics_dbScan} to be
installed.
}
\examples{
cl1 <- SimpleKMeans(iris[, -5], Weka_control(N = 3))
cl1
table(predict(cl1), iris$Species)
\dontrun{
## Requires Weka package 'XMeans' to be installed.
## Use XMeans with a KDTree.
cl2 <- XMeans(iris[, -5],
c("-L", 3, "-H", 7, "-use-kdtree",
"-K", "weka.core.neighboursearch.KDTree -P"))
cl2
table(predict(cl2), iris$Species)
}
}
\keyword{cluster}
|
/man/Weka_clusterers.Rd
|
no_license
|
cran/RWeka
|
R
| false | false | 3,850 |
rd
|
\name{Weka_clusterers}
\alias{Cobweb}
\alias{FarthestFirst}
\alias{SimpleKMeans}
\alias{XMeans}
\alias{DBScan}
\title{R/Weka Clusterers}
\description{
R interfaces to Weka clustering algorithms.
}
\usage{
Cobweb(x, control = NULL)
FarthestFirst(x, control = NULL)
SimpleKMeans(x, control = NULL)
XMeans(x, control = NULL)
DBScan(x, control = NULL)
}
\arguments{
\item{x}{an R object with the data to be clustered.}
\item{control}{an object of class \code{\link{Weka_control}}, or a
character vector of control options, or \code{NULL} (default).
Available options can be obtained on-line using the Weka Option
Wizard \code{\link{WOW}}, or the Weka documentation.}
}
\value{
A list inheriting from class \code{Weka_clusterers} with components
including
\item{clusterer}{a reference (of class
\code{\link[rJava:jobjRef-class]{jobjRef}}) to a Java object
obtained by applying the Weka \code{buildClusterer} method to the
training instances using the given control options.}
\item{class_ids}{a vector of integers indicating the class to which
each training instance is allocated (the results of calling the Weka
\code{clusterInstance} method for the built clusterer and each
instance).}
}
\details{
There is a \code{\link[=predict.Weka_clusterer]{predict}} method for
predicting class ids or memberships from the fitted clusterers.
\code{Cobweb} implements the Cobweb (Fisher, 1987) and Classit
(Gennari et al., 1989) clustering algorithms.
\code{FarthestFirst} provides the \dQuote{farthest first traversal
algorithm} by Hochbaum and Shmoys, which works as a fast simple
approximate clusterer modeled after simple \eqn{k}-means.
\code{SimpleKMeans} provides clustering with the \eqn{k}-means
algorithm.
\code{XMeans} provides \eqn{k}-means extended by an
\dQuote{Improve-Structure part} and automatically determines the
number of clusters.
\code{DBScan} provides the \dQuote{density-based clustering algorithm}
by Ester, Kriegel, Sander, and Xu. Note that noise points are assigned
to \code{NA}.
}
\references{
M. Ester, H.-P. Kriegel, J. Sander, and X. Xu (1996).
A Density-Based Algorithm for Discovering Clusters in Large Spatial
Databases with Noise.
\emph{Proceedings of the Second International Conference on Knowledge
Discovery and Data Mining (KDD'96)},
Portland, OR, 226--231.
AAAI Press.
D. H. Fisher (1987).
Knowledge acquisition via incremental conceptual clustering.
\emph{Machine Learning}, \bold{2}/2, 139--172.
\doi{10.1023/A:1022852608280}.
J. Gennari, P. Langley, and D. H. Fisher (1989).
Models of incremental concept formation.
\emph{Artificial Intelligence}, \bold{40}, 11--62.
D. S. Hochbaum and D. B. Shmoys (1985).
A best possible heuristic for the \eqn{k}-center problem,
\emph{Mathematics of Operations Research}, \bold{10}(2), 180--184.
\doi{10.1287/moor.10.2.180}.
D. Pelleg and A. W. Moore (2006).
X-means: Extending K-means with Efficient Estimation of the Number of
Clusters.
In: \emph{Seventeenth International Conference on Machine Learning},
727--734.
Morgan Kaufmann.
I. H. Witten and E. Frank (2005).
\emph{Data Mining: Practical Machine Learning Tools and Techniques}.
2nd Edition, Morgan Kaufmann, San Francisco.
}
\note{
\code{XMeans} requires Weka package \pkg{XMeans} to be installed.
\code{DBScan} requires Weka package \pkg{optics_dbScan} to be
installed.
}
\examples{
cl1 <- SimpleKMeans(iris[, -5], Weka_control(N = 3))
cl1
table(predict(cl1), iris$Species)
\dontrun{
## Requires Weka package 'XMeans' to be installed.
## Use XMeans with a KDTree.
cl2 <- XMeans(iris[, -5],
c("-L", 3, "-H", 7, "-use-kdtree",
"-K", "weka.core.neighboursearch.KDTree -P"))
cl2
table(predict(cl2), iris$Species)
}
}
\keyword{cluster}
|
/plugins/MacAU/Highpass2/Highpass2.r
|
permissive
|
airwindows/airwindows
|
R
| false | false | 3,238 |
r
| ||
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Microclusters.R
\name{readjust_clusters}
\alias{readjust_clusters}
\title{Repartitions existing clusters to achieve desired granularity.}
\usage{
readjust_clusters(clusters, data, cellsPerPartition = 100)
}
\arguments{
\item{clusters}{List of clusters, each entry being a vector of cells in a
cluster.}
\item{data}{NUM_SAMPLES x NUM_PROTEINS data matrix that was used to generate
clusters}
\item{cellsPerPartition}{the number of cells for a single partition the
algorithm should aim for}
}
\value{
Repartitioned clusters, such that a desireable number of
microclusters is acheived.
}
\description{
By default, minimum number of clusters to be generated is the squareroot of
the number of cells.
}
|
/man/readjust_clusters.Rd
|
permissive
|
YosefLab/VISION
|
R
| false | true | 777 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Microclusters.R
\name{readjust_clusters}
\alias{readjust_clusters}
\title{Repartitions existing clusters to achieve desired granularity.}
\usage{
readjust_clusters(clusters, data, cellsPerPartition = 100)
}
\arguments{
\item{clusters}{List of clusters, each entry being a vector of cells in a
cluster.}
\item{data}{NUM_SAMPLES x NUM_PROTEINS data matrix that was used to generate
clusters}
\item{cellsPerPartition}{the number of cells for a single partition the
algorithm should aim for}
}
\value{
Repartitioned clusters, such that a desireable number of
microclusters is acheived.
}
\description{
By default, minimum number of clusters to be generated is the squareroot of
the number of cells.
}
|
\name{unique.matrix}
\alias{unique.matrix}
\alias{duplicated.matrix}
\alias{anyDuplicated.matrix}
\title{Finding Unique or Duplicated Rows or Columns for Atomic Matrices}
\description{
These S3 methods are alternative (typically much faster) implementations of counterparts in the \code{base} package for atomic matrices.
\code{unique.matrix} returns a matrix with duplicated rows (or columns) removed.
\code{duplicated.matrix} returns a logical vector indicating which rows (or columns) are duplicated.
\code{anyDuplicated.matrix} returns an integer indicating the index of the first duplicate row (or column) if any, and \code{0L} otherwise.
}
\usage{
\method{unique}{matrix}(x, incomparables = FALSE, MARGIN = 1,
fromLast = FALSE, signif=Inf, \dots)
\method{duplicated}{matrix}(x, incomparables = FALSE, MARGIN = 1,
fromLast = FALSE, signif=Inf,\dots)
\method{anyDuplicated}{matrix}(x, incomparables = FALSE,
MARGIN = 1, fromLast = FALSE, signif=Inf,\dots)
}
\arguments{
\item{x}{an atomic matrix of mode \code{"numeric"}, \code{"integer"}, \code{"logical"}, \code{"complex"}, \code{"character"} or \code{"raw"}. When \code{x} is not atomic or when it is not a matrix, the \code{\link[base:unique]{base::unique.matrix}} in the \code{base} package will be called. }
\item{incomparables}{a vector of values that cannot be compared, as in \code{\link[base:unique]{base::unique.matrix}}. Only when \code{incomparables=FALSE} will the code in \code{uniqueAtomMat} package be used; otherwise, the \code{base} version will be called. }
\item{fromLast}{a logical scalar indicating if duplication should be considered
from the last, as in \code{\link[base:unique]{base::unique.matrix}}. }
\item{\dots}{arguments for particular methods.}
\item{MARGIN}{a numeric scalar, the matrix margin to be held fixed, as in \code{\link{apply}}. For \code{unique.matrix}, only \code{MARGIN=1} and \code{MARGIN=2} are allowed; for \code{duplicated.matrix} and \code{anyDuplicated.matrix}, \code{MARGIN=0} is also allowed. For all other cases, the implementation in the \code{base} package will be called.}
\item{signif}{a numerical scalar only applicable to numeric or complex \code{x}. If \code{signif=NULL}, then \code{x} will first be passed to \code{\link{signif}} function with the number of significant digits being the \code{C} constant \code{DBL_DIG}, as explained in \code{\link{as.character}}. If \code{signif=Inf} (which is the default value), then \code{x} is untouched before finding duplicates. If \code{signif} is any other number, it specifies the required number of significant digits for \code{\link{signif}} function. }
}
\details{
These S3 methods are alternative implementations of counterparts in the \code{base} package for atomic matrices (i.e., double, integer, logical, character, complex and raw) directly based on C++98 Standard Template Library (STL) \code{std::set}, or C++11 STL \code{std::unordered_set}. The implementation treats the whole row (or column) \emph{vector} as the key, without the intermediate steps of converting the mode to \code{character} nor collapsing them into a \emph{scalar} as done in \code{base}. On systems with empty \code{`R CMD config CXX11`}, the C++98 STL \code{std::set} is used, which is typically implemented as a self-balancing tree (usually a red-black tree) that takes \eqn{O[n\log{(n)}]}{O[n log(n)]} to find all duplicates, where \code{n=dim(x)[MARGIN]}. On systems with non-empty \code{`R CMD config CXX11`}, the C++11 STL \code{std::unordered_set} is used, with average \eqn{O(n)}{O(n)} performance and worst case \eqn{O(n^2)}{O(n^2)} performance.
Missing values are regarded as equal, but \code{NaN} is not equal to
\code{NA_real_}.
Further, in contrast to the \code{base} counterparts, characters are compared directly based on their internal representations; i.e., no encoding issues for characters. Complex values are compared by their real and imaginary parts separately.
}
\value{
\code{unique.matrix} returns a matrix with duplicated rows (if \code{MARGIN=1}) or columns (if \code{MARGIN=2}) removed.
\code{duplicated.matrix} returns a logical vector indicating which rows (if \code{MARGIN=1}) or columns (if \code{MARGIN=2}) are duplicated.
\code{anyDuplicated.matrix} returns an integer indicating the index of the first (if \code{fromLast=FALSE}) or last (if \code{fromLast=TRUE}) duplicate row (if \code{MARGIN=1}) or column (if \code{MARGIN=2}) if any, and \code{0L} otherwise.
}
\section{Warning}{
In contrast to the \code{base} counterparts,
characters are compared directly based on their internal representations without considering encoding issues; for numeric and complex matrices, the default \code{signif} is \code{Inf}, i.e. comparing floating point values directly without rounding; and \link{long vectors} are not supported yet.
}
\seealso{
\code{\link[base:duplicated]{base::duplicated}}, \code{\link[base:unique]{base::unique}}, \code{\link{signif}}, \code{\link{grpDuplicated}}
}
\examples{
## prepare test data:
set.seed(9992722L, kind="Mersenne-Twister")
x.double=model.matrix(~gl(5,8))[sample(40), ]
## typical uses
unique(x.double)
unique(x.double, fromLast=TRUE)
unique(t(x.double), MARGIN=2)
unique(t(x.double), MARGIN=2, fromLast=TRUE)
anyDuplicated(x.double)
anyDuplicated(x.double, fromLast = TRUE)
## additional atomic test data
x.integer=as.integer(x.double); attributes(x.integer)=attributes(x.double)
x.factor=as.factor(x.integer); dim(x.factor)=dim(x.integer); dimnames(x.factor)=dimnames(x.integer)
x.logical=as.logical(x.double); attributes(x.logical)=attributes(x.double)
x.character=as.character(x.double); attributes(x.character)=attributes(x.double)
x.complex=as.complex(x.double); attributes(x.complex)=attributes(x.double)
x.raw=as.raw(x.double); attributes(x.raw)=attributes(x.double)
## compare results with base:
stopifnot(identical(base::duplicated.matrix(x.double),
uniqueAtomMat::duplicated.matrix(x.double)
))
stopifnot(identical(base::duplicated.matrix(x.integer, fromLast=TRUE),
uniqueAtomMat::duplicated.matrix(x.integer, fromLast=TRUE)
))
stopifnot(identical(base::duplicated.matrix(t(x.logical), MARGIN=2L),
uniqueAtomMat::duplicated.matrix(t(x.logical), MARGIN=2L)
))
stopifnot(identical(base::duplicated.matrix(t(x.character), MARGIN=2L, fromLast=TRUE),
uniqueAtomMat::duplicated.matrix(t(x.character), MARGIN=2L, fromLast=TRUE)
))
stopifnot(identical(base::unique.matrix(x.complex),
uniqueAtomMat::unique.matrix(x.complex)
))
stopifnot(identical(base::unique.matrix(x.raw),
uniqueAtomMat::unique.matrix(x.raw)
))
stopifnot(identical(base::unique.matrix(x.factor),
uniqueAtomMat::unique.matrix(x.factor)
))
stopifnot(identical(base::duplicated.matrix(x.double, MARGIN=0),
uniqueAtomMat::duplicated.matrix(x.double, MARGIN=0)
))
stopifnot(identical(base::anyDuplicated.matrix(x.integer, MARGIN=0),
uniqueAtomMat::anyDuplicated.matrix(x.integer, MARGIN=0)
))
## benchmarking
if (require(microbenchmark)){
print(microbenchmark(base::duplicated.matrix(x.double)))
print(microbenchmark(uniqueAtomMat::duplicated.matrix(x.double)))
print(microbenchmark(base::duplicated.matrix(x.character)))
print(microbenchmark(uniqueAtomMat::duplicated.matrix(x.character)))
}else{
print(system.time(replicate(5e3L, base::duplicated.matrix(x.double))))
print(system.time(replicate(5e3L, uniqueAtomMat::duplicated.matrix(x.double))))
print(system.time(replicate(5e3L, base::duplicated.matrix(x.character))))
print(system.time(replicate(5e3L, uniqueAtomMat::duplicated.matrix(x.character))))
}
}
\keyword{manip}
\keyword{logic}
|
/man/unique.Rd
|
no_license
|
gitlongor/uniqueAtomMat
|
R
| false | false | 7,879 |
rd
|
\name{unique.matrix}
\alias{unique.matrix}
\alias{duplicated.matrix}
\alias{anyDuplicated.matrix}
\title{Finding Unique or Duplicated Rows or Columns for Atomic Matrices}
\description{
These S3 methods are alternative (typically much faster) implementations of counterparts in the \code{base} package for atomic matrices.
\code{unique.matrix} returns a matrix with duplicated rows (or columns) removed.
\code{duplicated.matrix} returns a logical vector indicating which rows (or columns) are duplicated.
\code{anyDuplicated.matrix} returns an integer indicating the index of the first duplicate row (or column) if any, and \code{0L} otherwise.
}
\usage{
\method{unique}{matrix}(x, incomparables = FALSE, MARGIN = 1,
fromLast = FALSE, signif=Inf, \dots)
\method{duplicated}{matrix}(x, incomparables = FALSE, MARGIN = 1,
fromLast = FALSE, signif=Inf,\dots)
\method{anyDuplicated}{matrix}(x, incomparables = FALSE,
MARGIN = 1, fromLast = FALSE, signif=Inf,\dots)
}
\arguments{
\item{x}{an atomic matrix of mode \code{"numeric"}, \code{"integer"}, \code{"logical"}, \code{"complex"}, \code{"character"} or \code{"raw"}. When \code{x} is not atomic or when it is not a matrix, the \code{\link[base:unique]{base::unique.matrix}} in the \code{base} package will be called. }
\item{incomparables}{a vector of values that cannot be compared, as in \code{\link[base:unique]{base::unique.matrix}}. Only when \code{incomparables=FALSE} will the code in \code{uniqueAtomMat} package be used; otherwise, the \code{base} version will be called. }
\item{fromLast}{a logical scalar indicating if duplication should be considered
from the last, as in \code{\link[base:unique]{base::unique.matrix}}. }
\item{\dots}{arguments for particular methods.}
\item{MARGIN}{a numeric scalar, the matrix margin to be held fixed, as in \code{\link{apply}}. For \code{unique.matrix}, only \code{MARGIN=1} and \code{MARGIN=2} are allowed; for \code{duplicated.matrix} and \code{anyDuplicated.matrix}, \code{MARGIN=0} is also allowed. For all other cases, the implementation in the \code{base} package will be called.}
\item{signif}{a numerical scalar only applicable to numeric or complex \code{x}. If \code{signif=NULL}, then \code{x} will first be passed to \code{\link{signif}} function with the number of significant digits being the \code{C} constant \code{DBL_DIG}, as explained in \code{\link{as.character}}. If \code{signif=Inf} (which is the default value), then \code{x} is untouched before finding duplicates. If \code{signif} is any other number, it specifies the required number of significant digits for \code{\link{signif}} function. }
}
\details{
These S3 methods are alternative implementations of counterparts in the \code{base} package for atomic matrices (i.e., double, integer, logical, character, complex and raw) directly based on C++98 Standard Template Library (STL) \code{std::set}, or C++11 STL \code{std::unordered_set}. The implementation treats the whole row (or column) \emph{vector} as the key, without the intermediate steps of converting the mode to \code{character} nor collapsing them into a \emph{scalar} as done in \code{base}. On systems with empty \code{`R CMD config CXX11`}, the C++98 STL \code{std::set} is used, which is typically implemented as a self-balancing tree (usually a red-black tree) that takes \eqn{O[n\log{(n)}]}{O[n log(n)]} to find all duplicates, where \code{n=dim(x)[MARGIN]}. On systems with non-empty \code{`R CMD config CXX11`}, the C++11 STL \code{std::unordered_set} is used, with average \eqn{O(n)}{O(n)} performance and worst case \eqn{O(n^2)}{O(n^2)} performance.
Missing values are regarded as equal, but \code{NaN} is not equal to
\code{NA_real_}.
Further, in contrast to the \code{base} counterparts, characters are compared directly based on their internal representations; i.e., no encoding issues for characters. Complex values are compared by their real and imaginary parts separately.
}
\value{
\code{unique.matrix} returns a matrix with duplicated rows (if \code{MARGIN=1}) or columns (if \code{MARGIN=2}) removed.
\code{duplicated.matrix} returns a logical vector indicating which rows (if \code{MARGIN=1}) or columns (if \code{MARGIN=2}) are duplicated.
\code{anyDuplicated.matrix} returns an integer indicating the index of the first (if \code{fromLast=FALSE}) or last (if \code{fromLast=TRUE}) duplicate row (if \code{MARGIN=1}) or column (if \code{MARGIN=2}) if any, and \code{0L} otherwise.
}
\section{Warning}{
In contrast to the \code{base} counterparts,
characters are compared directly based on their internal representations without considering encoding issues; for numeric and complex matrices, the default \code{signif} is \code{Inf}, i.e. comparing floating point values directly without rounding; and \link{long vectors} are not supported yet.
}
\seealso{
\code{\link[base:duplicated]{base::duplicated}}, \code{\link[base:unique]{base::unique}}, \code{\link{signif}}, \code{\link{grpDuplicated}}
}
\examples{
## prepare test data:
set.seed(9992722L, kind="Mersenne-Twister")
x.double=model.matrix(~gl(5,8))[sample(40), ]
## typical uses
unique(x.double)
unique(x.double, fromLast=TRUE)
unique(t(x.double), MARGIN=2)
unique(t(x.double), MARGIN=2, fromLast=TRUE)
anyDuplicated(x.double)
anyDuplicated(x.double, fromLast = TRUE)
## additional atomic test data
x.integer=as.integer(x.double); attributes(x.integer)=attributes(x.double)
x.factor=as.factor(x.integer); dim(x.factor)=dim(x.integer); dimnames(x.factor)=dimnames(x.integer)
x.logical=as.logical(x.double); attributes(x.logical)=attributes(x.double)
x.character=as.character(x.double); attributes(x.character)=attributes(x.double)
x.complex=as.complex(x.double); attributes(x.complex)=attributes(x.double)
x.raw=as.raw(x.double); attributes(x.raw)=attributes(x.double)
## compare results with base:
stopifnot(identical(base::duplicated.matrix(x.double),
uniqueAtomMat::duplicated.matrix(x.double)
))
stopifnot(identical(base::duplicated.matrix(x.integer, fromLast=TRUE),
uniqueAtomMat::duplicated.matrix(x.integer, fromLast=TRUE)
))
stopifnot(identical(base::duplicated.matrix(t(x.logical), MARGIN=2L),
uniqueAtomMat::duplicated.matrix(t(x.logical), MARGIN=2L)
))
stopifnot(identical(base::duplicated.matrix(t(x.character), MARGIN=2L, fromLast=TRUE),
uniqueAtomMat::duplicated.matrix(t(x.character), MARGIN=2L, fromLast=TRUE)
))
stopifnot(identical(base::unique.matrix(x.complex),
uniqueAtomMat::unique.matrix(x.complex)
))
stopifnot(identical(base::unique.matrix(x.raw),
uniqueAtomMat::unique.matrix(x.raw)
))
stopifnot(identical(base::unique.matrix(x.factor),
uniqueAtomMat::unique.matrix(x.factor)
))
stopifnot(identical(base::duplicated.matrix(x.double, MARGIN=0),
uniqueAtomMat::duplicated.matrix(x.double, MARGIN=0)
))
stopifnot(identical(base::anyDuplicated.matrix(x.integer, MARGIN=0),
uniqueAtomMat::anyDuplicated.matrix(x.integer, MARGIN=0)
))
## benchmarking
if (require(microbenchmark)){
print(microbenchmark(base::duplicated.matrix(x.double)))
print(microbenchmark(uniqueAtomMat::duplicated.matrix(x.double)))
print(microbenchmark(base::duplicated.matrix(x.character)))
print(microbenchmark(uniqueAtomMat::duplicated.matrix(x.character)))
}else{
print(system.time(replicate(5e3L, base::duplicated.matrix(x.double))))
print(system.time(replicate(5e3L, uniqueAtomMat::duplicated.matrix(x.double))))
print(system.time(replicate(5e3L, base::duplicated.matrix(x.character))))
print(system.time(replicate(5e3L, uniqueAtomMat::duplicated.matrix(x.character))))
}
}
\keyword{manip}
\keyword{logic}
|
#Load relevant libraries
library(dplyr)
library(data.table)
# Create working directory if necessary
if(!file.exists("~/Data/")){
dir.create("~/Data/")
}
# Determine if dataset has been loaded to global environment
if(!exists("powerSubset", envir = globalenv())){
# Download and unzip the data
fileUrl <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
download.file(fileUrl, destfile = "~/Data/household_power_consumption.zip")
unzip(zipfile = "~/data/household_power_consumption.zip", exdir = "~/data")
# Set working directory
datasetPath <- "~/data/"
setwd(file.path(datasetPath, "ExData_Plotting1"))
# Read data to R
powerDataset <- tbl_df(read.table(file.path(datasetPath,"household_power_consumption.txt"), header = TRUE, sep = ";",
na.strings = "?", colClasses = c("character", "character", rep("numeric",7))))
# Convert Time variable to Time class
powerDataset$Time <- strptime(paste(powerDataset$Date, powerDataset$Time), "%d/%m/%Y %H:%M:%S")
# Convert Date variable to Date class
powerDataset$Date <- as.Date(powerDataset$Date, "%d/%m/%Y")
# Subset relevant data
powerSubset <- subset(powerDataset, Date == "2007-02-01" | Date == "2007-02-02")
}
# Launch png graphics device
png("plot2.png", width = 500, height = 500)
# Plot Global Active Power vs Time
with(powerSubset, plot(Time, Global_active_power, xlab = "", ylab = "Global Active Power (kilowatts)", type = "l"))
# Close graphics device
dev.off()
|
/plot2.R
|
no_license
|
enwude/ExData_Plotting1
|
R
| false | false | 1,684 |
r
|
#Load relevant libraries
library(dplyr)
library(data.table)
# Create working directory if necessary
if(!file.exists("~/Data/")){
dir.create("~/Data/")
}
# Determine if dataset has been loaded to global environment
if(!exists("powerSubset", envir = globalenv())){
# Download and unzip the data
fileUrl <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
download.file(fileUrl, destfile = "~/Data/household_power_consumption.zip")
unzip(zipfile = "~/data/household_power_consumption.zip", exdir = "~/data")
# Set working directory
datasetPath <- "~/data/"
setwd(file.path(datasetPath, "ExData_Plotting1"))
# Read data to R
powerDataset <- tbl_df(read.table(file.path(datasetPath,"household_power_consumption.txt"), header = TRUE, sep = ";",
na.strings = "?", colClasses = c("character", "character", rep("numeric",7))))
# Convert Time variable to Time class
powerDataset$Time <- strptime(paste(powerDataset$Date, powerDataset$Time), "%d/%m/%Y %H:%M:%S")
# Convert Date variable to Date class
powerDataset$Date <- as.Date(powerDataset$Date, "%d/%m/%Y")
# Subset relevant data
powerSubset <- subset(powerDataset, Date == "2007-02-01" | Date == "2007-02-02")
}
# Launch png graphics device
png("plot2.png", width = 500, height = 500)
# Plot Global Active Power vs Time
with(powerSubset, plot(Time, Global_active_power, xlab = "", ylab = "Global Active Power (kilowatts)", type = "l"))
# Close graphics device
dev.off()
|
#' Calculations for a Single Generation Functionin Non-Matrix Form
#'
#' This function allows you to calculate the pairwise C-score using the hypergeometric approach, a p-value for 'all lineages' contrast using chi-square, and the estimates of the effective proportion of adaptive loci for a data set with a single generation.
#'
#' @param paper the data in csv that you want to analyze, in a folder named data-in
#' @param environment The environment in which the experiment occured
#' @param species Specify if the organism is "Sac" or "Ecoli_K12" or "Ecoli_O157-H7", or manually input the gene count of your species
#' @return a table with all the calculated infromation
#' @export
#' @examples
#singlegen_c_hyper("Author2018","YPD", "Sac")
#####################
singlegen_c_hyper <- function(paper, environment, species, numGenes = NA){
data <- read_csv(file.path(getwd(), "data-in", paste0(paper, ".csv")))
if (species %in% geneNumbers$Species){
numGenes <- filter(geneNumbers, Species == species)$NumGenes
}
if(is.na(numGenes)){
prompt <- "Your species is unspecified or not in our database. How many genes does it have? \n"
numGenes <- as.numeric(readline(prompt))
}
# library(tidyverse)
# library(readr)
# library(devtools)
# library(dgconstraint)
# library(Hmisc)
geneNumbers <- read_csv(file.path(getwd(),"data-in/GeneDatabase.csv"))
data.1 <- data %>%
arrange(Gene) %>%
drop_na(Gene) %>%
drop_na(Population)
num_genes <- length((unique(data.1$Gene)))
num_lineages <- length(unique(data.1$Population))
data.array <- array(0, dim =c(num_genes, num_lineages), dimnames = list(unique(data.1$Gene), unique(data.1$Population)))
for(i in 1:num_lineages) {
sub <- subset(data.1, data.1$Population == unique(data.1$Population)[i])
sub2 <- subset(sub, frequency > 0)
geneRows <- which(row.names(data.array) %in% sub2$Gene)
data.array[geneRows, i] <- 1
num_parallel <- data.frame(data.array, Count=rowSums(data.array, na.rm = FALSE, dims = 1), Genes = row.names(data.array))
}
genes_parallel <- num_parallel %>%
as_tibble() %>%
filter(Count > 1)
Non_genes_parallel <- num_parallel %>%
as_tibble() %>%
filter(Count == 1)
num_parallel_genes <- nrow(genes_parallel)
num_non_parallel_genes <- nrow(Non_genes_parallel)
total_genes <- num_non_parallel_genes + num_parallel_genes
parallel_genes <- paste0(genes_parallel$Genes, collapse=", ")
full_matrix <- rbind(data.array, array(0,c(numGenes-total_genes,ncol(data.array))))
c_hyper <- append(c_hyper, pairwise_c_hyper(full_matrix))
p_chisq <- append(p_chisq, allwise_p_chisq(full_matrix, num_permute = 200))
estimate <- append(estimate, estimate_pa(full_matrix,ndigits = 4, show.plot = T))
c_hyper[c_hyper <= 0] <- 0
c_hyper[c_hyper == "NaN"] <- 0
df <- tibble( paper = paper, environment = environment, c_hyper = round(c_hyper, 3), p_chisq, estimate = round(estimate, 3) ,N_genes.notParallel= num_non_parallel_genes, N_genes.parallel=num_parallel_genes, parallel_genes)
newdir <- file.path(getwd(), "data-out")
if (!file.exists(newdir)){
dir.create(newdir, showWarnings = FALSE)
cat(paste("\n\tCreating new directory: ", newdir), sep="")
}
filename <- file.path(getwd(), "data-out", paste(paper, "_Analysis.csv", sep=""))
write.csv(df, file=filename, row.names=FALSE)
}
|
/R/Single_NonMatrix.R
|
no_license
|
samyeaman/dgconstraint
|
R
| false | false | 3,276 |
r
|
#' Calculations for a Single Generation Functionin Non-Matrix Form
#'
#' This function allows you to calculate the pairwise C-score using the hypergeometric approach, a p-value for 'all lineages' contrast using chi-square, and the estimates of the effective proportion of adaptive loci for a data set with a single generation.
#'
#' @param paper the data in csv that you want to analyze, in a folder named data-in
#' @param environment The environment in which the experiment occured
#' @param species Specify if the organism is "Sac" or "Ecoli_K12" or "Ecoli_O157-H7", or manually input the gene count of your species
#' @return a table with all the calculated infromation
#' @export
#' @examples
#singlegen_c_hyper("Author2018","YPD", "Sac")
#####################
singlegen_c_hyper <- function(paper, environment, species, numGenes = NA){
data <- read_csv(file.path(getwd(), "data-in", paste0(paper, ".csv")))
if (species %in% geneNumbers$Species){
numGenes <- filter(geneNumbers, Species == species)$NumGenes
}
if(is.na(numGenes)){
prompt <- "Your species is unspecified or not in our database. How many genes does it have? \n"
numGenes <- as.numeric(readline(prompt))
}
# library(tidyverse)
# library(readr)
# library(devtools)
# library(dgconstraint)
# library(Hmisc)
geneNumbers <- read_csv(file.path(getwd(),"data-in/GeneDatabase.csv"))
data.1 <- data %>%
arrange(Gene) %>%
drop_na(Gene) %>%
drop_na(Population)
num_genes <- length((unique(data.1$Gene)))
num_lineages <- length(unique(data.1$Population))
data.array <- array(0, dim =c(num_genes, num_lineages), dimnames = list(unique(data.1$Gene), unique(data.1$Population)))
for(i in 1:num_lineages) {
sub <- subset(data.1, data.1$Population == unique(data.1$Population)[i])
sub2 <- subset(sub, frequency > 0)
geneRows <- which(row.names(data.array) %in% sub2$Gene)
data.array[geneRows, i] <- 1
num_parallel <- data.frame(data.array, Count=rowSums(data.array, na.rm = FALSE, dims = 1), Genes = row.names(data.array))
}
genes_parallel <- num_parallel %>%
as_tibble() %>%
filter(Count > 1)
Non_genes_parallel <- num_parallel %>%
as_tibble() %>%
filter(Count == 1)
num_parallel_genes <- nrow(genes_parallel)
num_non_parallel_genes <- nrow(Non_genes_parallel)
total_genes <- num_non_parallel_genes + num_parallel_genes
parallel_genes <- paste0(genes_parallel$Genes, collapse=", ")
full_matrix <- rbind(data.array, array(0,c(numGenes-total_genes,ncol(data.array))))
c_hyper <- append(c_hyper, pairwise_c_hyper(full_matrix))
p_chisq <- append(p_chisq, allwise_p_chisq(full_matrix, num_permute = 200))
estimate <- append(estimate, estimate_pa(full_matrix,ndigits = 4, show.plot = T))
c_hyper[c_hyper <= 0] <- 0
c_hyper[c_hyper == "NaN"] <- 0
df <- tibble( paper = paper, environment = environment, c_hyper = round(c_hyper, 3), p_chisq, estimate = round(estimate, 3) ,N_genes.notParallel= num_non_parallel_genes, N_genes.parallel=num_parallel_genes, parallel_genes)
newdir <- file.path(getwd(), "data-out")
if (!file.exists(newdir)){
dir.create(newdir, showWarnings = FALSE)
cat(paste("\n\tCreating new directory: ", newdir), sep="")
}
filename <- file.path(getwd(), "data-out", paste(paper, "_Analysis.csv", sep=""))
write.csv(df, file=filename, row.names=FALSE)
}
|
# v2.0 In Progress
# 01. Libraries
require(caret)
require(corrplot)
#require(Rtsne)
require(xgboost)
require(stats)
require(knitr)
require(ggplot2)
knitr::opts_chunk$set(cache=TRUE)
require(DiagrammeR)
require(plyr)
require(dplyr)
require(sqldf)
require(reshape)
require(tidyr)
require(gbm)
# rm(list=ls())
setwd("C:/Users/padepu/Documents/R")
# 02. Set Seed
# you must know why I am using set.seed()
set.seed(546)
# 03. Import source files data
# Importing data into R
train <- read.csv("./Telstra/train.csv" , h=TRUE, sep=",")
test <- read.csv("./Telstra/test.csv" , h=TRUE, sep=",")
event <- read.csv("./Telstra/event_type.csv" , h=TRUE, sep=",")
log <- read.csv("./Telstra/log_feature.csv" , h=TRUE, sep=",")
resource <- read.csv("./Telstra/resource_type.csv", h=TRUE, sep=",")
severity <- read.csv("./Telstra/severity_type.csv", h=TRUE, sep=",")
head(event)
# 04. Set target variable to test data
test$fault_severity <- -1
df_all <- rbind(train,test)
head(log,2)
FeatureFreqs <- sqldf("select log_feature, count(*) as featureFreq from log group by log_feature")
head(FeatureFreqs,2)
logFeatureFreqs <- merge(log,FeatureFreqs, by= "log_feature", all.x = TRUE)
logFeatureFreqs$volume <- NULL
logFeatureFreqs$log_feature <- NULL
logFeatureFreqs <- sqldf("select id , sum(featureFreq) as TotalfeatureFreq
, MAX(featureFreq) as MaxfeatureFreq
, MIN(featureFreq) as MinfeatureFreq
, AVG(featureFreq) as AvgfeatureFreq
from logFeatureFreqs group by id ")
logFeatureFreqs$DifffeatureFreq <- logFeatureFreqs$MaxfeatureFreq - logFeatureFreqs$MinfeatureFreq
# logFeatureFreqs$TotalfeatureFreq <- log(logFeatureFreqs$TotalfeatureFreq + 1)
# logFeatureFreqs$MaxfeatureFreq <- log(logFeatureFreqs$MaxfeatureFreq + 1)
# logFeatureFreqs$MinfeatureFreq <- log(logFeatureFreqs$MinfeatureFreq + 1)
# logFeatureFreqs$AvgfeatureFreq <- log(logFeatureFreqs$AvgfeatureFreq + 1)
#
# logFeatureFreqs$NormfeatureFreq <- logFeatureFreqs$MaxfeatureFreq - logFeatureFreqs$MinfeatureFreq
head(logFeatureFreqs,2)
# head(df_all,2)
head(event,2)
EventFreqs <- sqldf("select event_type, count(*) as eventFreq from event group by event_type")
head(EventFreqs,2)
eventTypeFreqs <- merge(event,EventFreqs, by= "event_type", all.x = TRUE)
eventTypeFreqs$event_type <- NULL
eventTypeFreqs <- sqldf("select id , sum(eventFreq) as TotaleventFreq
, MAX(eventFreq) as MaxeventFreq
, MIN(eventFreq) as MineventFreq
, AVG(eventFreq) as AvgeventFreq
from eventTypeFreqs group by id ")
eventTypeFreqs$DiffeventFreq <- eventTypeFreqs$MaxeventFreq - eventTypeFreqs$MineventFreq
head(eventTypeFreqs,2)
head(resource,2)
ResourceFreqs <- sqldf("select resource_type, count(*) as resourceFreq from resource group by resource_type")
head(ResourceFreqs,2)
resourceTypeFreqs <- merge(resource,ResourceFreqs, by= "resource_type", all.x = TRUE)
resourceTypeFreqs$resource_type <- NULL
resourceTypeFreqs <- sqldf("select id , sum(resourceFreq) as TotalresourceFreq
, MAX(resourceFreq) as MaxresourceFreq
, MIN(resourceFreq) as MinresourceFreq
, AVG(resourceFreq) as AvgresourceFreq
from resourceTypeFreqs group by id ")
resourceTypeFreqs$DiffresourceFreq <- resourceTypeFreqs$MaxresourceFreq - resourceTypeFreqs$MinresourceFreq
head(resourceTypeFreqs)
# merging data
Moves <- merge(event,log,by="id" ,all = T)
Moves <- merge(Moves,resource,by="id" ,all = T)
Moves <- merge(Moves,severity,by="id" ,all = T)
df_all_combinedForMovings <- merge(df_all,Moves,by="id" ,all = T)
df_all_combinedForMovings$location <- as.integer(gsub("location ","",df_all_combinedForMovings$location))
df_all_combinedForMovings$event_type <- as.integer(gsub("event_type ","",df_all_combinedForMovings$event_type))
df_all_combinedForMovings$log_feature <- as.integer(gsub("feature ","",df_all_combinedForMovings$log_feature))
df_all_combinedForMovings$resource_type <- as.integer(gsub("resource_type ","",df_all_combinedForMovings$resource_type))
df_all_combinedForMovings$severity_type <- as.integer(gsub("severity_type ","",df_all_combinedForMovings$severity_type))
# head(df_all_combinedForMovings,2)
Movings <- sqldf("SELECT id, location, MAX(event_type) Maxevent_type,
MIN(event_type) Minevent_type,
(MAX(event_type) - MIN(event_type) ) Diffevent_type,
MAX(log_feature) Maxlog_feature,
MIN(log_feature) Minlog_feature,
(MAX(log_feature) - MIN(log_feature) ) Difflog_feature,
MAX(resource_type) Maxresource_type,
MIN(resource_type) Minresource_type,
(MAX(resource_type) - MIN(resource_type) ) Diffresource_type,
AVG(event_type) Meanevent_Type,
avg(resource_type) + avg(event_type) TotalEventResource,
avg(resource_type) + avg(log_feature) TotalLogResource,
avg(event_type) + avg(log_feature) TotalEventLog
--,
-- avg(resource_type) + avg(event_type) + avg(log_feature) TotalAllTest
-- ,
-- AVG(resource_type) Avgresource_type,
-- AVG(log_feature) Avglog_feature,
-- MAX(volume) as MaxVolume,
-- MIN(volume) as MinVolume,
-- stdev(log_feature) as Stdlog_feature,
-- stdev(volume) as StdVolume
-- ,
-- AVG(log_feature) Meanlog_feature
-- (( MAX(volume) - MIN(volume) )/stdev(volume)) as NormVolume
--,
-- MIN(volume) as MinVolume,
-- stdev(volume) as StdVolume
-- MAX(severity_type) Maxseverity_type,
-- MIN(severity_type) Minseverity_type,
-- (MAX(severity_type) - MIN(severity_type) ) Diffseverity_type,
-- COUNT(*) AS RowCount
FROM df_all_combinedForMovings GROUP BY id, location")
Movings[is.na(Movings)] <- 0
#Movins$TotalVolume <- log(Movins$TotalVolume)
M <- cor(Movings)
corrplot.mixed(M)
head(Movings)
event$eventtype <- 1 #as.integer(gsub("event_type ","",event$event_type))
head(event)
events <- spread(event, event_type , eventtype )
head(events)
sqldf("select * from events where id = 10024")
events[is.na(events)] <- 0
sqldf("select * from events where id = 10024")
# dim(events)
# names(events[,2:54])
events$eventsCount <- rowSums(events[,2:54]>0)
head(resource)
resource$resourcetype <- 1 # as.integer(gsub("resource_type ","",resource$resource_type))
head(resource)
resources <- spread(resource, resource_type , resourcetype )
head(resources)
sqldf("select * from resources where id = 10024")
resources[is.na(resources)] <- 0
sqldf("select * from resources where id = 10024")
# dim(resources)
# names(resources[,2:11])
resources$resourcesCount <-rowSums(resources[,2:11]>0)
severitytypeInts <- severity
head(severity)
severity$severitytype <- 1 #as.integer(gsub("severity_type ","",severity$severity_type))
head(severity)
severities <- spread(severity, severity_type , severitytype )
head(severities)
sqldf("select * from severities where id = 10024")
severities[is.na(severities)] <- 0
sqldf("select * from severities where id = 10024")
severity$SeverityInt <- as.integer(gsub("severity_type ","",severity$severity_type))
severityhelper <- sqldf("SELECT id , SeverityInt as SeverityInt, case when severity_type in ('severity_type 1' , 'severity_type 2') then 1 else 0 end as severityhelper
FROM severity")
#head(severityhelper)
severitytypeInts$severity_type <- as.integer(gsub("severity_type ","",severitytypeInts$severity_type))
head(log)
logs <- spread(log, log_feature , volume )
head(logs)
sqldf("select * from logs where id = 10024")
logs[is.na(logs)] <- 0
sqldf("select * from logs where id = 10024")
# dim(logs)
# names(logs [,2:387])
logs$logsCount <-rowSums(logs[,2:387]>0)
logs$logsVolume <- rowSums(logs[,2:387])
logseverities <- merge(log,severitytypeInts ,by="id" ,all = T)
logseverities$severityXvolume <- logseverities$volume * logseverities$severity_type
head(logseverities)
logseverityvolume <- sqldf("SELECT id , SUM(severityXvolume) as severityXvolume from logseverities group by id")
head(logseverityvolume)
# head(train,2)
# head(test ,2)
# 04. Set target variable to test data
# test$fault_severity <- -1
#
# df_all <- rbind(train,test)
# head(df_all,2)
# merging data
dim(events)
dim(logs)
sessionsdata <- merge(events,logs,by="id" ,all = T)
dim(sessionsdata)
sessionsdata <- merge(sessionsdata,resources,by="id" ,all = T)
dim(sessionsdata)
sessionsdata <- merge(sessionsdata,severities,by="id" ,all = T)
dim(sessionsdata)
sessionsdata <- merge(sessionsdata,severityhelper,by="id" ,all = T)
dim(sessionsdata)
sessionsdata <- merge(sessionsdata,logseverityvolume,by="id" ,all = T)
dim(sessionsdata)
sessionsdata <- merge(sessionsdata,logFeatureFreqs,by="id" ,all = T)
dim(sessionsdata)
sessionsdata <- merge(sessionsdata,eventTypeFreqs,by="id" ,all = T)
dim(sessionsdata)
sessionsdata <- merge(sessionsdata,resourceTypeFreqs,by="id" ,all = T)
dim(sessionsdata)
dim(df_all)
df_all_combined <- merge(df_all,sessionsdata,by="id" ,all = T)
dim(df_all_combined)
# logstats <- sqldf("SELECT id, SUM(volume) logsvolume, COUNT(*) as logsCount, AVG(volume) as MeanVolume, MAX(volume) Maxvolume, MIN(volume) Minvolume,
# stdev(volume) Stdvolume
# FROM log GROUP BY 1")
df_all_combined$location <- as.numeric(gsub("location",'',df_all_combined$location))
df_all_combined$Rows <- df_all_combined$eventsCount * df_all_combined$resourcesCount * df_all_combined$logsCount
df_all_combined$RowBins <- ifelse(df_all_combined$Rows > 100 , 1, 0)
# #
df_all_combined$LocationBins <- ifelse(df_all_combined$location > 550 , 1, 0)
df_all_combined$RowsSum <- df_all_combined$eventsCount + df_all_combined$resourcesCount + df_all_combined$logsCount
#
#df_all_combined$Rows <- NULL
#head(df_all_combined,2)
#head(Movings,2)
Locationfrequencies <- sqldf("select location , count(*) as LocationFreq from df_all_combined group by location ")
df_all_combined <- merge(df_all_combined,Locationfrequencies,by="location" ,all.x = T)
df_all_combined <- merge(df_all_combined, Movings, by=(c("id" , "location" )) ,all = T)
# df_all_combined %>%
# group_by(location) %>%
# mutate(locationTimeOrder = row_number(-id))
#
# df_all_combined <- transform(df_all_combined,
# locationTimeOrder = ave(id, location,
# FUN = function(x) rank(-x, ties.method = "first")))
#
# df_all_combined$locationTimeOrder
df_all_combined$allFreq <- (df_all_combined$LocationFreq + df_all_combined$TotalresourceFreq + df_all_combined$TotaleventFreq )
#df_all_combined$MaxMeanEventDiff <- df_all_combined$Maxevent_type - df_all_combined$Meanevent_Type
#df_all_combined$MinMeanEventDiff <- df_all_combined$Meanevent_type - df_all_combined$Minevent_Type
# df_all_combined$LogsFeatureFreq <- df_all_combined$TotalfeatureFreq/df_all_combined$logsCount
# df_all_combined$logsCountBin <- ifelse(df_all_combined$logsCount > 13 , 1 , 0)
# df_all_combined$VolumeToEvents <- df_all_combined$logsVolume/df_all_combined$eventsCount
# df_all_combined$VolumeToResources <- df_all_combined$logsVolume/df_all_combined$resourcesCount
# df_all_combined$severityXvolume <- log(df_all_combined$severityXvolume)
#
# Eventfrequencies <- sqldf("select event_type , count(*) as EventFreq from event group by event_type")
#
# EventFreqCounts <- merge(event, Eventfrequencies, by="event_type",all.x = T)
#
# EventFreqCounts$eventtype <- NULL
#
# EventFreqCounts$eventtype <- NULL
#
# EventFreqCounts <- sqldf("select id , sum(EventFreq) EventFreq from EventFreqCounts group by id")
#
# df_all_combined <- merge(df_all_combined,EventFreqCounts,by="id" ,all.x = T)
#
# df_all_combined$LocationToEventFreqRatio <- df_all_combined$EventFreq / df_all_combined$LocationFreq
# head(EventFreqCounts)
# summary(df_all_combined$LocationToEventFreqRatio)
# Locationfrequencies <- sqldf("select location , count(*) as LocationFreq from df_all_combined group by location ")
# sqldf("select id, fault_severity from df_all_combined where id IN(10024,1,10059)")
Fulltrain <- df_all_combined[which(df_all_combined$fault_severity > -1), ]
Fulltest <- df_all_combined[which(df_all_combined$fault_severity < 0), ]
Fulltest$fault_severity <- NULL
# #######################################################################################################
# # Data Visualisations
## 3D Scatterplot
# require(scatterplot3d)
# scatterplot3d(Fulltrain$location,Fulltrain$fault_severity,Fulltrain$Maxevent_type, main="3D Scatterplot")
#
# require(rgl)
# plot3d(Fulltrain$location,Fulltrain$fault_severity,Fulltrain$Maxevent_type, col="red", size=3)
#
# pairs(iris[1:4], main = "Anderson's Iris Data -- 3 species",
# pch = 21, bg = c("red", "green3", "blue")[unclass(iris$Species)])
#
# # Using formula
#
# pairs(~Fulltrain$location + Fulltrain$fault_severity + Fulltrain$Maxevent_type, main = "Fault Severity",
# pch = 21, bg = c("red", "green3", "blue")[unclass(Fulltrain$fault_severity)])
# M <- cor(Fulltrain[c(3,462:468,470:483)])
# corrplot.mixed(M)
# conputationally expensive
# require(GGally)
# ggpairs(Fulltrain[c(1,2,3)], colour="fault_severity", alpha=0.4)
# head(Fulltrain[c(1,2,3)])
#
# filter(Fulltrain, location == "601")
########################################################################################################
# names(Fulltrain)
featureNames <- names(Fulltrain [-c(1,3)]) # ,57,444
names(Fulltrain)
featureNames <- names(Fulltrain[-c(1,3)])#,480
names(Fulltest)
TestfeatureNames <- names(Fulltest[-c(1)])#,479
Fulltrain$fault_severity <- ifelse(Fulltrain$fault_severity==0,'Zero', ifelse(Fulltrain$fault_severity==1,'One', 'Two'))
Fulltrain$fault_severity <- as.factor(Fulltrain$fault_severity)
#
# # check for zero variance
# zero.var = nearZeroVar(Fulltrain[c(2,4:453)], saveMetrics=TRUE)
# zero.var[zero.var[,"zeroVar"] == 0, ]
# nzv <- zero.var[zero.var[,"zeroVar"] + zero.var[,"nzv"] > 0, ]
# zero.var
# filter(zero.var, nzv$zeroVar == FALSE)
# badCols <- nearZeroVar(Fulltrain[c(2,4:453)])
# print(paste("Fraction of nearZeroVar columns:", round(length(badCols)/length(Fulltrain[c(2,4:453)]),4)))
#
# # remove those "bad" columns from the training and cross-validation sets
#
# train <- train[, -badCols]
# test <- test[, -badCols]
#
# # corrPlot
# featurePlot(totaltrain[c(2,458,460:462)], outcome.org, "strip")
#
# head(train.matrix)
require(ranger)
gbmCtrl <- trainControl(method="cv", # use repeated 10fold cross validation
#repeats=5,
number = 2, # do 5 repititions of 10-fold cv
summaryFunction=multiClassSummary, # Use AUC to pick the best model
classProbs=TRUE,
savePredictions = TRUE,
verboseIter=TRUE)
gbmGrid <- expand.grid(mtry = 400 #,
# interaction.depth = c(30), # seq(1,8,by=2), # look at tree depths from 1 to 7
# n.trees=c(600), #seq(10,100,by=5), # let iterations go from 10 to 100
# shrinkage=c(0.01),
# n.minobsinnode = c(10)
)
#set.seed(546)
# registerDoParallel(4) # Registrer a parallel backend for train
# getDoParWorkers()
system.time(ranger.tune <- train(x=Fulltrain[,featureNames],y=Fulltrain$fault_severity,
method = "ranger",
metric = "logloss",
trControl = gbmCtrl,
tuneGrid=gbmGrid,
verbose=TRUE))
summary(ranger.tune)
ranger.tune$results
head(ranger.tune$pred)
# Training error rate
confusionMatrix(predict(ranger.tune, Fulltrain[,featureNames]), Fulltrain$fault_severity)
# Accuracy : 0.6931
#Accuracy : 0.8859
#Accuracy : 0.8832
# predictedClasses <- predict(svm.c, Fulltest[,TestfeatureNames] )
predictedProbs <- predict(ranger.tune, Fulltest[,TestfeatureNames], type = "prob")
head(predictedProbs)
# head(prediction)
# sqldf("select * from prediction where id = 1442")
# names(prediction)
prediction <- cbind( id = Fulltest$id , predict_0 = predictedProbs[,3] , predict_1 = predictedProbs[,1], predict_2 = predictedProbs[,2] )
write.csv(prediction, "submission50.csv", quote=FALSE, row.names = FALSE)
#write.table(prediction,file="submission21.csv", append=TRUE,sep=",",col.names=TRUE,row.names=FALSE)
|
/Telstra/Telstra.Ranger.v20.R
|
no_license
|
PraveenAdepu/kaggle_competitions
|
R
| false | false | 17,271 |
r
|
# v2.0 In Progress
# 01. Libraries
require(caret)
require(corrplot)
#require(Rtsne)
require(xgboost)
require(stats)
require(knitr)
require(ggplot2)
knitr::opts_chunk$set(cache=TRUE)
require(DiagrammeR)
require(plyr)
require(dplyr)
require(sqldf)
require(reshape)
require(tidyr)
require(gbm)
# rm(list=ls())
setwd("C:/Users/padepu/Documents/R")
# 02. Set Seed
# you must know why I am using set.seed()
set.seed(546)
# 03. Import source files data
# Importing data into R
train <- read.csv("./Telstra/train.csv" , h=TRUE, sep=",")
test <- read.csv("./Telstra/test.csv" , h=TRUE, sep=",")
event <- read.csv("./Telstra/event_type.csv" , h=TRUE, sep=",")
log <- read.csv("./Telstra/log_feature.csv" , h=TRUE, sep=",")
resource <- read.csv("./Telstra/resource_type.csv", h=TRUE, sep=",")
severity <- read.csv("./Telstra/severity_type.csv", h=TRUE, sep=",")
head(event)
# 04. Set target variable to test data
test$fault_severity <- -1
df_all <- rbind(train,test)
head(log,2)
FeatureFreqs <- sqldf("select log_feature, count(*) as featureFreq from log group by log_feature")
head(FeatureFreqs,2)
logFeatureFreqs <- merge(log,FeatureFreqs, by= "log_feature", all.x = TRUE)
logFeatureFreqs$volume <- NULL
logFeatureFreqs$log_feature <- NULL
logFeatureFreqs <- sqldf("select id , sum(featureFreq) as TotalfeatureFreq
, MAX(featureFreq) as MaxfeatureFreq
, MIN(featureFreq) as MinfeatureFreq
, AVG(featureFreq) as AvgfeatureFreq
from logFeatureFreqs group by id ")
logFeatureFreqs$DifffeatureFreq <- logFeatureFreqs$MaxfeatureFreq - logFeatureFreqs$MinfeatureFreq
# logFeatureFreqs$TotalfeatureFreq <- log(logFeatureFreqs$TotalfeatureFreq + 1)
# logFeatureFreqs$MaxfeatureFreq <- log(logFeatureFreqs$MaxfeatureFreq + 1)
# logFeatureFreqs$MinfeatureFreq <- log(logFeatureFreqs$MinfeatureFreq + 1)
# logFeatureFreqs$AvgfeatureFreq <- log(logFeatureFreqs$AvgfeatureFreq + 1)
#
# logFeatureFreqs$NormfeatureFreq <- logFeatureFreqs$MaxfeatureFreq - logFeatureFreqs$MinfeatureFreq
head(logFeatureFreqs,2)
# head(df_all,2)
head(event,2)
EventFreqs <- sqldf("select event_type, count(*) as eventFreq from event group by event_type")
head(EventFreqs,2)
eventTypeFreqs <- merge(event,EventFreqs, by= "event_type", all.x = TRUE)
eventTypeFreqs$event_type <- NULL
eventTypeFreqs <- sqldf("select id , sum(eventFreq) as TotaleventFreq
, MAX(eventFreq) as MaxeventFreq
, MIN(eventFreq) as MineventFreq
, AVG(eventFreq) as AvgeventFreq
from eventTypeFreqs group by id ")
eventTypeFreqs$DiffeventFreq <- eventTypeFreqs$MaxeventFreq - eventTypeFreqs$MineventFreq
head(eventTypeFreqs,2)
head(resource,2)
ResourceFreqs <- sqldf("select resource_type, count(*) as resourceFreq from resource group by resource_type")
head(ResourceFreqs,2)
resourceTypeFreqs <- merge(resource,ResourceFreqs, by= "resource_type", all.x = TRUE)
resourceTypeFreqs$resource_type <- NULL
resourceTypeFreqs <- sqldf("select id , sum(resourceFreq) as TotalresourceFreq
, MAX(resourceFreq) as MaxresourceFreq
, MIN(resourceFreq) as MinresourceFreq
, AVG(resourceFreq) as AvgresourceFreq
from resourceTypeFreqs group by id ")
resourceTypeFreqs$DiffresourceFreq <- resourceTypeFreqs$MaxresourceFreq - resourceTypeFreqs$MinresourceFreq
head(resourceTypeFreqs)
# merging data
Moves <- merge(event,log,by="id" ,all = T)
Moves <- merge(Moves,resource,by="id" ,all = T)
Moves <- merge(Moves,severity,by="id" ,all = T)
df_all_combinedForMovings <- merge(df_all,Moves,by="id" ,all = T)
df_all_combinedForMovings$location <- as.integer(gsub("location ","",df_all_combinedForMovings$location))
df_all_combinedForMovings$event_type <- as.integer(gsub("event_type ","",df_all_combinedForMovings$event_type))
df_all_combinedForMovings$log_feature <- as.integer(gsub("feature ","",df_all_combinedForMovings$log_feature))
df_all_combinedForMovings$resource_type <- as.integer(gsub("resource_type ","",df_all_combinedForMovings$resource_type))
df_all_combinedForMovings$severity_type <- as.integer(gsub("severity_type ","",df_all_combinedForMovings$severity_type))
# head(df_all_combinedForMovings,2)
Movings <- sqldf("SELECT id, location, MAX(event_type) Maxevent_type,
MIN(event_type) Minevent_type,
(MAX(event_type) - MIN(event_type) ) Diffevent_type,
MAX(log_feature) Maxlog_feature,
MIN(log_feature) Minlog_feature,
(MAX(log_feature) - MIN(log_feature) ) Difflog_feature,
MAX(resource_type) Maxresource_type,
MIN(resource_type) Minresource_type,
(MAX(resource_type) - MIN(resource_type) ) Diffresource_type,
AVG(event_type) Meanevent_Type,
avg(resource_type) + avg(event_type) TotalEventResource,
avg(resource_type) + avg(log_feature) TotalLogResource,
avg(event_type) + avg(log_feature) TotalEventLog
--,
-- avg(resource_type) + avg(event_type) + avg(log_feature) TotalAllTest
-- ,
-- AVG(resource_type) Avgresource_type,
-- AVG(log_feature) Avglog_feature,
-- MAX(volume) as MaxVolume,
-- MIN(volume) as MinVolume,
-- stdev(log_feature) as Stdlog_feature,
-- stdev(volume) as StdVolume
-- ,
-- AVG(log_feature) Meanlog_feature
-- (( MAX(volume) - MIN(volume) )/stdev(volume)) as NormVolume
--,
-- MIN(volume) as MinVolume,
-- stdev(volume) as StdVolume
-- MAX(severity_type) Maxseverity_type,
-- MIN(severity_type) Minseverity_type,
-- (MAX(severity_type) - MIN(severity_type) ) Diffseverity_type,
-- COUNT(*) AS RowCount
FROM df_all_combinedForMovings GROUP BY id, location")
Movings[is.na(Movings)] <- 0
#Movins$TotalVolume <- log(Movins$TotalVolume)
M <- cor(Movings)
corrplot.mixed(M)
head(Movings)
event$eventtype <- 1 #as.integer(gsub("event_type ","",event$event_type))
head(event)
events <- spread(event, event_type , eventtype )
head(events)
sqldf("select * from events where id = 10024")
events[is.na(events)] <- 0
sqldf("select * from events where id = 10024")
# dim(events)
# names(events[,2:54])
events$eventsCount <- rowSums(events[,2:54]>0)
head(resource)
resource$resourcetype <- 1 # as.integer(gsub("resource_type ","",resource$resource_type))
head(resource)
resources <- spread(resource, resource_type , resourcetype )
head(resources)
sqldf("select * from resources where id = 10024")
resources[is.na(resources)] <- 0
sqldf("select * from resources where id = 10024")
# dim(resources)
# names(resources[,2:11])
resources$resourcesCount <-rowSums(resources[,2:11]>0)
severitytypeInts <- severity
head(severity)
severity$severitytype <- 1 #as.integer(gsub("severity_type ","",severity$severity_type))
head(severity)
severities <- spread(severity, severity_type , severitytype )
head(severities)
sqldf("select * from severities where id = 10024")
severities[is.na(severities)] <- 0
sqldf("select * from severities where id = 10024")
severity$SeverityInt <- as.integer(gsub("severity_type ","",severity$severity_type))
severityhelper <- sqldf("SELECT id , SeverityInt as SeverityInt, case when severity_type in ('severity_type 1' , 'severity_type 2') then 1 else 0 end as severityhelper
FROM severity")
#head(severityhelper)
severitytypeInts$severity_type <- as.integer(gsub("severity_type ","",severitytypeInts$severity_type))
head(log)
logs <- spread(log, log_feature , volume )
head(logs)
sqldf("select * from logs where id = 10024")
logs[is.na(logs)] <- 0
sqldf("select * from logs where id = 10024")
# dim(logs)
# names(logs [,2:387])
logs$logsCount <-rowSums(logs[,2:387]>0)
logs$logsVolume <- rowSums(logs[,2:387])
logseverities <- merge(log,severitytypeInts ,by="id" ,all = T)
logseverities$severityXvolume <- logseverities$volume * logseverities$severity_type
head(logseverities)
logseverityvolume <- sqldf("SELECT id , SUM(severityXvolume) as severityXvolume from logseverities group by id")
head(logseverityvolume)
# head(train,2)
# head(test ,2)
# 04. Set target variable to test data
# test$fault_severity <- -1
#
# df_all <- rbind(train,test)
# head(df_all,2)
# merging data
dim(events)
dim(logs)
sessionsdata <- merge(events,logs,by="id" ,all = T)
dim(sessionsdata)
sessionsdata <- merge(sessionsdata,resources,by="id" ,all = T)
dim(sessionsdata)
sessionsdata <- merge(sessionsdata,severities,by="id" ,all = T)
dim(sessionsdata)
sessionsdata <- merge(sessionsdata,severityhelper,by="id" ,all = T)
dim(sessionsdata)
sessionsdata <- merge(sessionsdata,logseverityvolume,by="id" ,all = T)
dim(sessionsdata)
sessionsdata <- merge(sessionsdata,logFeatureFreqs,by="id" ,all = T)
dim(sessionsdata)
sessionsdata <- merge(sessionsdata,eventTypeFreqs,by="id" ,all = T)
dim(sessionsdata)
sessionsdata <- merge(sessionsdata,resourceTypeFreqs,by="id" ,all = T)
dim(sessionsdata)
dim(df_all)
df_all_combined <- merge(df_all,sessionsdata,by="id" ,all = T)
dim(df_all_combined)
# logstats <- sqldf("SELECT id, SUM(volume) logsvolume, COUNT(*) as logsCount, AVG(volume) as MeanVolume, MAX(volume) Maxvolume, MIN(volume) Minvolume,
# stdev(volume) Stdvolume
# FROM log GROUP BY 1")
df_all_combined$location <- as.numeric(gsub("location",'',df_all_combined$location))
df_all_combined$Rows <- df_all_combined$eventsCount * df_all_combined$resourcesCount * df_all_combined$logsCount
df_all_combined$RowBins <- ifelse(df_all_combined$Rows > 100 , 1, 0)
# #
df_all_combined$LocationBins <- ifelse(df_all_combined$location > 550 , 1, 0)
df_all_combined$RowsSum <- df_all_combined$eventsCount + df_all_combined$resourcesCount + df_all_combined$logsCount
#
#df_all_combined$Rows <- NULL
#head(df_all_combined,2)
#head(Movings,2)
Locationfrequencies <- sqldf("select location , count(*) as LocationFreq from df_all_combined group by location ")
df_all_combined <- merge(df_all_combined,Locationfrequencies,by="location" ,all.x = T)
df_all_combined <- merge(df_all_combined, Movings, by=(c("id" , "location" )) ,all = T)
# df_all_combined %>%
# group_by(location) %>%
# mutate(locationTimeOrder = row_number(-id))
#
# df_all_combined <- transform(df_all_combined,
# locationTimeOrder = ave(id, location,
# FUN = function(x) rank(-x, ties.method = "first")))
#
# df_all_combined$locationTimeOrder
df_all_combined$allFreq <- (df_all_combined$LocationFreq + df_all_combined$TotalresourceFreq + df_all_combined$TotaleventFreq )
#df_all_combined$MaxMeanEventDiff <- df_all_combined$Maxevent_type - df_all_combined$Meanevent_Type
#df_all_combined$MinMeanEventDiff <- df_all_combined$Meanevent_type - df_all_combined$Minevent_Type
# df_all_combined$LogsFeatureFreq <- df_all_combined$TotalfeatureFreq/df_all_combined$logsCount
# df_all_combined$logsCountBin <- ifelse(df_all_combined$logsCount > 13 , 1 , 0)
# df_all_combined$VolumeToEvents <- df_all_combined$logsVolume/df_all_combined$eventsCount
# df_all_combined$VolumeToResources <- df_all_combined$logsVolume/df_all_combined$resourcesCount
# df_all_combined$severityXvolume <- log(df_all_combined$severityXvolume)
#
# Eventfrequencies <- sqldf("select event_type , count(*) as EventFreq from event group by event_type")
#
# EventFreqCounts <- merge(event, Eventfrequencies, by="event_type",all.x = T)
#
# EventFreqCounts$eventtype <- NULL
#
# EventFreqCounts$eventtype <- NULL
#
# EventFreqCounts <- sqldf("select id , sum(EventFreq) EventFreq from EventFreqCounts group by id")
#
# df_all_combined <- merge(df_all_combined,EventFreqCounts,by="id" ,all.x = T)
#
# df_all_combined$LocationToEventFreqRatio <- df_all_combined$EventFreq / df_all_combined$LocationFreq
# head(EventFreqCounts)
# summary(df_all_combined$LocationToEventFreqRatio)
# Locationfrequencies <- sqldf("select location , count(*) as LocationFreq from df_all_combined group by location ")
# sqldf("select id, fault_severity from df_all_combined where id IN(10024,1,10059)")
Fulltrain <- df_all_combined[which(df_all_combined$fault_severity > -1), ]
Fulltest <- df_all_combined[which(df_all_combined$fault_severity < 0), ]
Fulltest$fault_severity <- NULL
# #######################################################################################################
# # Data Visualisations
## 3D Scatterplot
# require(scatterplot3d)
# scatterplot3d(Fulltrain$location,Fulltrain$fault_severity,Fulltrain$Maxevent_type, main="3D Scatterplot")
#
# require(rgl)
# plot3d(Fulltrain$location,Fulltrain$fault_severity,Fulltrain$Maxevent_type, col="red", size=3)
#
# pairs(iris[1:4], main = "Anderson's Iris Data -- 3 species",
# pch = 21, bg = c("red", "green3", "blue")[unclass(iris$Species)])
#
# # Using formula
#
# pairs(~Fulltrain$location + Fulltrain$fault_severity + Fulltrain$Maxevent_type, main = "Fault Severity",
# pch = 21, bg = c("red", "green3", "blue")[unclass(Fulltrain$fault_severity)])
# M <- cor(Fulltrain[c(3,462:468,470:483)])
# corrplot.mixed(M)
# conputationally expensive
# require(GGally)
# ggpairs(Fulltrain[c(1,2,3)], colour="fault_severity", alpha=0.4)
# head(Fulltrain[c(1,2,3)])
#
# filter(Fulltrain, location == "601")
########################################################################################################
# names(Fulltrain)
featureNames <- names(Fulltrain [-c(1,3)]) # ,57,444
names(Fulltrain)
featureNames <- names(Fulltrain[-c(1,3)])#,480
names(Fulltest)
TestfeatureNames <- names(Fulltest[-c(1)])#,479
Fulltrain$fault_severity <- ifelse(Fulltrain$fault_severity==0,'Zero', ifelse(Fulltrain$fault_severity==1,'One', 'Two'))
Fulltrain$fault_severity <- as.factor(Fulltrain$fault_severity)
#
# # check for zero variance
# zero.var = nearZeroVar(Fulltrain[c(2,4:453)], saveMetrics=TRUE)
# zero.var[zero.var[,"zeroVar"] == 0, ]
# nzv <- zero.var[zero.var[,"zeroVar"] + zero.var[,"nzv"] > 0, ]
# zero.var
# filter(zero.var, nzv$zeroVar == FALSE)
# badCols <- nearZeroVar(Fulltrain[c(2,4:453)])
# print(paste("Fraction of nearZeroVar columns:", round(length(badCols)/length(Fulltrain[c(2,4:453)]),4)))
#
# # remove those "bad" columns from the training and cross-validation sets
#
# train <- train[, -badCols]
# test <- test[, -badCols]
#
# # corrPlot
# featurePlot(totaltrain[c(2,458,460:462)], outcome.org, "strip")
#
# head(train.matrix)
require(ranger)
gbmCtrl <- trainControl(method="cv", # use repeated 10fold cross validation
#repeats=5,
number = 2, # do 5 repititions of 10-fold cv
summaryFunction=multiClassSummary, # Use AUC to pick the best model
classProbs=TRUE,
savePredictions = TRUE,
verboseIter=TRUE)
gbmGrid <- expand.grid(mtry = 400 #,
# interaction.depth = c(30), # seq(1,8,by=2), # look at tree depths from 1 to 7
# n.trees=c(600), #seq(10,100,by=5), # let iterations go from 10 to 100
# shrinkage=c(0.01),
# n.minobsinnode = c(10)
)
#set.seed(546)
# registerDoParallel(4) # Registrer a parallel backend for train
# getDoParWorkers()
system.time(ranger.tune <- train(x=Fulltrain[,featureNames],y=Fulltrain$fault_severity,
method = "ranger",
metric = "logloss",
trControl = gbmCtrl,
tuneGrid=gbmGrid,
verbose=TRUE))
summary(ranger.tune)
ranger.tune$results
head(ranger.tune$pred)
# Training error rate
confusionMatrix(predict(ranger.tune, Fulltrain[,featureNames]), Fulltrain$fault_severity)
# Accuracy : 0.6931
#Accuracy : 0.8859
#Accuracy : 0.8832
# predictedClasses <- predict(svm.c, Fulltest[,TestfeatureNames] )
predictedProbs <- predict(ranger.tune, Fulltest[,TestfeatureNames], type = "prob")
head(predictedProbs)
# head(prediction)
# sqldf("select * from prediction where id = 1442")
# names(prediction)
prediction <- cbind( id = Fulltest$id , predict_0 = predictedProbs[,3] , predict_1 = predictedProbs[,1], predict_2 = predictedProbs[,2] )
write.csv(prediction, "submission50.csv", quote=FALSE, row.names = FALSE)
#write.table(prediction,file="submission21.csv", append=TRUE,sep=",",col.names=TRUE,row.names=FALSE)
|
##' Get All Area Data
##'
##' Description
##'
##'
##'
##' @export
getAllAreaData = function(){
## Setups
areaKey = DatasetKey(
domain = "agriculture",
dataset = "aproduction",
dimensions = list(
Dimension(name = areaVar,
keys = getAllCountries()),
Dimension(name = elementVar,
keys = c(areaSownElementCode,areaHarvestedElementCode)),
Dimension(name = itemVar,
keys = getAllItemCPC()),
Dimension(name = yearVar,
keys = getAllYears())
)
)
## Pivot to vectorize yield computation
newPivot = c(
Pivoting(code = areaVar, ascending = TRUE),
Pivoting(code = itemVar, ascending = TRUE),
Pivoting(code = yearVar, ascending = FALSE),
Pivoting(code = elementVar, ascending = TRUE)
)
## Query the data
query = GetData(
key = areaKey,
flags = TRUE,
normalized = FALSE,
pivoting = newPivot
)
query[, timePointYears := as.numeric(timePointYears)]
setkeyv(query, c("geographicAreaM49", "measuredItemCPC", "timePointYears"))
query
}
|
/R/getAllAreaData.R
|
no_license
|
SWS-Methodology/faoswsSeed
|
R
| false | false | 1,092 |
r
|
##' Get All Area Data
##'
##' Description
##'
##'
##'
##' @export
getAllAreaData = function(){
## Setups
areaKey = DatasetKey(
domain = "agriculture",
dataset = "aproduction",
dimensions = list(
Dimension(name = areaVar,
keys = getAllCountries()),
Dimension(name = elementVar,
keys = c(areaSownElementCode,areaHarvestedElementCode)),
Dimension(name = itemVar,
keys = getAllItemCPC()),
Dimension(name = yearVar,
keys = getAllYears())
)
)
## Pivot to vectorize yield computation
newPivot = c(
Pivoting(code = areaVar, ascending = TRUE),
Pivoting(code = itemVar, ascending = TRUE),
Pivoting(code = yearVar, ascending = FALSE),
Pivoting(code = elementVar, ascending = TRUE)
)
## Query the data
query = GetData(
key = areaKey,
flags = TRUE,
normalized = FALSE,
pivoting = newPivot
)
query[, timePointYears := as.numeric(timePointYears)]
setkeyv(query, c("geographicAreaM49", "measuredItemCPC", "timePointYears"))
query
}
|
b2a26f59e67a9c106e50ce66faefd941 ev-pr-6x6-9-5-0-1-2-lg.qdimacs 3852 31496
|
/code/dcnf-ankit-optimized/Results/QBFLIB-2018/E1/Database/Ansotegui/evader-pursuer-6x6-logarithmic/ev-pr-6x6-9-5-0-1-2-lg/ev-pr-6x6-9-5-0-1-2-lg.R
|
no_license
|
arey0pushpa/dcnf-autarky
|
R
| false | false | 74 |
r
|
b2a26f59e67a9c106e50ce66faefd941 ev-pr-6x6-9-5-0-1-2-lg.qdimacs 3852 31496
|
## Put comments here that give an overall description of what your
## functions do
#The function makeCacheMatrix creates an environment that contains the functions inverse and x within the set, get, setsolve, getsolve environment.
#The function cacheSolve creates a function that first checks if there is already an inverse in the makeCacheMatrix getsolve function
# if not, it computes the setsolve function.
## Write a short comment describing this function
#The function makeCacheMatrix creates an environment that contains the functions inverse and x within the set, get, setsolve, getsolve environment.
makeCacheMatrix <- function(x = matrix()) {
inverse <- NULL
set <- function(y) {
x <<- y
inverse <<- NULL
}
get <- function() x
setsolve <- function(inverse) inverse <<- solve
getsolve <- function() inverse
list(set = set, get = get,
setsolve = setsolve,
getsolve = getsolve)
}
## Write a short comment describing this function
#The cacheSolve function first checks if there is already an inverse available, if not, it computes the inverse within the get, set, getsolve, setsolve functions.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inverse <- x$getsolve()
if(!is.null(inverse)) {
message("getting cached data")
return(inverse)
}
data <- x$get()
inverse <- solve(data, ...)
x$setsolve(inverse)
inverse
}
|
/cachematrix.R
|
no_license
|
mirrescholte/ProgrammingAssignment2
|
R
| false | false | 1,566 |
r
|
## Put comments here that give an overall description of what your
## functions do
#The function makeCacheMatrix creates an environment that contains the functions inverse and x within the set, get, setsolve, getsolve environment.
#The function cacheSolve creates a function that first checks if there is already an inverse in the makeCacheMatrix getsolve function
# if not, it computes the setsolve function.
## Write a short comment describing this function
#The function makeCacheMatrix creates an environment that contains the functions inverse and x within the set, get, setsolve, getsolve environment.
makeCacheMatrix <- function(x = matrix()) {
inverse <- NULL
set <- function(y) {
x <<- y
inverse <<- NULL
}
get <- function() x
setsolve <- function(inverse) inverse <<- solve
getsolve <- function() inverse
list(set = set, get = get,
setsolve = setsolve,
getsolve = getsolve)
}
## Write a short comment describing this function
#The cacheSolve function first checks if there is already an inverse available, if not, it computes the inverse within the get, set, getsolve, setsolve functions.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inverse <- x$getsolve()
if(!is.null(inverse)) {
message("getting cached data")
return(inverse)
}
data <- x$get()
inverse <- solve(data, ...)
x$setsolve(inverse)
inverse
}
|
library(tidyverse); library(here); library(extrafont)
merg_90 <- read_csv(here("final", "merg_90.csv")) %>%
mutate_at(vars(geoid), as.character)
merg_00 <- read_csv(here("final", "merg_00.csv")) %>%
mutate_at(vars(geoid), as.character)
merg_10 <- read_csv(here("final", "merg_10.csv")) %>%
mutate_at(vars(geoid), as.character)
merg_17 <- read_csv(here("final", "merg_17.csv")) %>%
mutate_at(vars(geoid), as.character)
ts <- bind_rows(merg_90, merg_00) %>%
bind_rows(., merg_10) %>%
bind_rows(., merg_17)
ts_cty <- ts %>% mutate(County = case_when(str_sub(geoid, 3, 5) == "005" ~ "Burlington",
str_sub(geoid, 3, 5) == "007" ~ "Camden",
str_sub(geoid, 3, 5) == "015" ~ "Gloucester",
str_sub(geoid, 3, 5) == "021" ~ "Mercer",
str_sub(geoid, 3, 5) == "017" ~ "Bucks",
str_sub(geoid, 3, 5) == "029" ~ "Chester",
str_sub(geoid, 3, 5) == "045" ~ "Delaware",
str_sub(geoid, 3, 5) == "091" ~ "Montgomery",
str_sub(geoid, 3, 5) == "101" ~ "Philadelphia")) %>%
group_by(County, year) %>%
summarize(hu = sum(hu, pop, na.rm = TRUE),
mhi = weighted.mean(mhi, pop, na.rm = TRUE),
mhv = weighted.mean(mhv, pop, na.rm = TRUE),
own = weighted.mean(own, pop, na.rm = TRUE),
pov199 = weighted.mean(pov199, pop, na.rm = TRUE),
pov99 = weighted.mean(pov99, pop, na.rm = TRUE),
rm = weighted.mean(rm, pop, na.rm = TRUE),
em = weighted.mean(em, pop, na.rm = TRUE),
unemp = weighted.mean(unemp, pop, na.rm = TRUE),
vhu = weighted.mean(vhu, pop, na.rm = TRUE),
pop = sum(pop, na.rm = TRUE))
ts_agg <- ts %>% mutate(Subregion = case_when(str_sub(geoid, 3, 5) %in% c("005", "007", "015", "021") ~ "NJ Suburban Counties",
str_sub(geoid, 3, 5) %in% c("017", "029", "045", "091") ~ "PA Suburban Counties",
str_sub(geoid, 3, 5) == "101" ~ "Philadelphia")) %>%
group_by(Subregion, year) %>%
summarize(hu = sum(hu, pop, na.rm = TRUE),
mhi = weighted.mean(mhi, pop, na.rm = TRUE),
mhv = weighted.mean(mhv, pop, na.rm = TRUE),
own = weighted.mean(own, pop, na.rm = TRUE),
pov199 = weighted.mean(pov199, pop, na.rm = TRUE),
pov99 = weighted.mean(pov99, pop, na.rm = TRUE),
rm = weighted.mean(rm, pop, na.rm = TRUE),
em = weighted.mean(em, pop, na.rm = TRUE),
unemp = weighted.mean(unemp, pop, na.rm = TRUE),
vhu = weighted.mean(vhu, pop, na.rm = TRUE),
pop = sum(pop, na.rm = TRUE))
ts_reg <- ts %>%
group_by(year) %>%
summarize(hu = sum(hu, pop, na.rm = TRUE),
mhi = weighted.mean(mhi, pop, na.rm = TRUE),
mhv = weighted.mean(mhv, pop, na.rm = TRUE),
own = weighted.mean(own, pop, na.rm = TRUE),
pov199 = weighted.mean(pov199, pop, na.rm = TRUE),
pov99 = weighted.mean(pov99, pop, na.rm = TRUE),
rm = weighted.mean(rm, pop, na.rm = TRUE),
em = weighted.mean(em, pop, na.rm = TRUE),
unemp = weighted.mean(unemp, pop, na.rm = TRUE),
vhu = weighted.mean(vhu, pop, na.rm = TRUE),
pop = sum(pop, na.rm = TRUE),
County = "Region")
ts_cty <- bind_rows(ts_cty, ts_reg)
# Export
write_csv(ts_agg, here("final", "ts_subregion.csv"))
write_csv(ts_cty, here("final", "ts_cty.csv"))
# By Subregion
ggplot(ts_agg, aes(x = year, y = pop)) + geom_line(aes(color = Subregion)) +
scale_color_manual(values = c("#F57D15", "#D44842", "#9F2A63")) +
theme(text = element_text(family = "Segoe UI", color = "#666666")) +
labs(title = "Change in population 1990-2017",
x = "Year",
y = "Total population (1000s)")
ggsave(here("figs", "d_pop_agg.png"), width = 7, height = 5, units = "in", dpi = 400)
ggplot(ts_agg, aes(x = year, y = hu)) + geom_line(aes(color = Subregion)) +
scale_color_manual(values = c("#F57D15", "#D44842", "#9F2A63")) +
theme(text = element_text(family = "Segoe UI", color = "#666666")) +
labs(title = "Change in housing units 1990-2017",
x = "Year",
y = "Total housing units (1000s)")
ggsave(here("figs", "d_hu_agg.png"), width = 7, height = 5, units = "in", dpi = 400)
ggplot(ts_agg, aes(x = year, y = mhi)) + geom_line(aes(color = Subregion)) +
scale_color_manual(values = c("#F57D15", "#D44842", "#9F2A63")) +
theme(text = element_text(family = "Segoe UI", color = "#666666")) +
labs(title = "Median household income 1990-2017",
x = "Year",
y = "Median household income ($1000s), 2017 dollars")
ggsave(here("figs", "d_mhi_agg.png"), width = 7, height = 5, units = "in", dpi = 400)
ggplot(ts_agg, aes(x = year, y = mhv)) + geom_line(aes(color = Subregion)) +
scale_color_manual(values = c("#F57D15", "#D44842", "#9F2A63")) +
theme(text = element_text(family = "Segoe UI", color = "#666666")) +
labs(title = "Median home value 1990-2017",
x = "Year",
y = "Median home value ($1000s), 2017 dollars")
ggsave(here("figs", "d_mhv_agg.png"), width = 7, height = 5, units = "in", dpi = 400)
ggplot(ts_agg, aes(x = year, y = own)) + geom_line(aes(color = Subregion)) +
scale_color_manual(values = c("#F57D15", "#D44842", "#9F2A63")) +
scale_y_continuous(limits = c(0,100), expand = c(0,0)) +
theme(text = element_text(family = "Segoe UI", color = "#666666")) +
labs(title = "Homeownership 1990-2017",
x = "Year",
y = "Percentage owner-occupied housing units")
ggsave(here("figs", "d_own_agg.png"), width = 7, height = 5, units = "in", dpi = 400)
ggplot(ts_agg, aes(x = year, y = pov199)) + geom_line(aes(color = Subregion)) +
scale_color_manual(values = c("#F57D15", "#D44842", "#9F2A63")) +
scale_y_continuous(limits = c(0,100), expand = c(0,0)) +
theme(text = element_text(family = "Segoe UI", color = "#666666")) +
labs(title = "Low-income residents 1990-2017",
x = "Year",
y = "Percentage residents with incomes below 199% FPL")
ggsave(here("figs", "d_pov199_agg.png"), width = 7, height = 5, units = "in", dpi = 400)
ggplot(ts_agg, aes(x = year, y = pov99)) + geom_line(aes(color = Subregion)) +
scale_color_manual(values = c("#F57D15", "#D44842", "#9F2A63")) +
scale_y_continuous(limits = c(0,100), expand = c(0,0)) +
theme(text = element_text(family = "Segoe UI", color = "#666666")) +
labs(title = "Residents in poverty 1990-2017",
x = "Year",
y = "Percentage residents with incomes below 100% FPL")
ggsave(here("figs", "d_pov99_agg.png"), width = 7, height = 5, units = "in", dpi = 400)
ggplot(ts_agg, aes(x = year, y = rm)) + geom_line(aes(color = Subregion)) +
scale_color_manual(values = c("#F57D15", "#D44842", "#9F2A63")) +
scale_y_continuous(limits = c(0,100), expand = c(0,0)) +
theme(text = element_text(family = "Segoe UI", color = "#666666")) +
labs(title = "Racial minority residents 1990-2017",
x = "Year",
y = "Percentage racial minority residents")
ggsave(here("figs", "d_rm_agg.png"), width = 7, height = 5, units = "in", dpi = 400)
ggplot(ts_agg, aes(x = year, y = em)) + geom_line(aes(color = Subregion)) +
scale_color_manual(values = c("#F57D15", "#D44842", "#9F2A63")) +
scale_y_continuous(limits = c(0,100), expand = c(0,0)) +
theme(text = element_text(family = "Segoe UI", color = "#666666")) +
labs(title = "Ethnic minority residents 1990-2017",
x = "Year",
y = "Percentage ethnic minority residents")
ggsave(here("figs", "d_em_agg.png"), width = 7, height = 5, units = "in", dpi = 400)
ggplot(ts_agg, aes(x = year, y = unemp)) + geom_line(aes(color = Subregion)) +
scale_color_manual(values = c("#F57D15", "#D44842", "#9F2A63")) +
scale_y_continuous(limits = c(0,100), expand = c(0,0)) +
theme(text = element_text(family = "Segoe UI", color = "#666666")) +
labs(title = "Unemployment 1990-2017",
x = "Year",
y = "Percentage unemployed residents in the labor force")
ggsave(here("figs", "d_unemp_agg.png"), width = 7, height = 5, units = "in", dpi = 400)
ggplot(ts_agg, aes(x = year, y = vhu)) + geom_line(aes(color = Subregion)) +
scale_color_manual(values = c("#F57D15", "#D44842", "#9F2A63")) +
scale_y_continuous(limits = c(0,100), expand = c(0,0)) +
theme(text = element_text(family = "Segoe UI", color = "#666666")) +
labs(title = "Vacant housing units 1990-2017",
x = "Year",
y = "Percentage vacant housing units")
ggsave(here("figs", "d_vhu_agg.png"), width = 7, height = 5, units = "in", dpi = 400)
# By County
ggplot(ts_cty %>% filter(County != "Region"), aes(x = year, y = pop)) + geom_line(aes(color = County)) +
scale_color_viridis_d(option = "inferno", alpha = 0.75) +
theme(text = element_text(family = "Segoe UI", color = "#666666")) +
# scale_color_manual(values = c("red")) +
labs(title = "Change in population 1990-2017",
x = "Year",
y = "Total population (1000s)")
ggsave(here("figs", "d_pop.png"), width = 7, height = 5, units = "in", dpi = 400)
ggplot(ts_cty %>% filter(County != "Region"), aes(x = year, y = hu)) + geom_line(aes(color = County)) +
scale_color_viridis_d(option = "inferno", alpha = 0.75) +
theme(text = element_text(family = "Segoe UI", color = "#666666")) +
labs(title = "Change in housing units 1990-2017",
x = "Year",
y = "Total housing units (1000s)")
ggsave(here("figs", "d_hu_cty.png"), width = 7, height = 5, units = "in", dpi = 400)
ggplot(ts_cty, aes(x = year, y = mhi)) + geom_line(aes(color = County)) +
scale_color_viridis_d(option = "inferno", alpha = 0.75) +
theme(text = element_text(family = "Segoe UI", color = "#666666")) +
labs(title = "Median household income 1990-2017",
x = "Year",
y = "Median household income ($1000s), 2017 dollars")
ggsave(here("figs", "d_mhi_cty.png"), width = 7, height = 5, units = "in", dpi = 400)
ggplot(ts_cty, aes(x = year, y = mhv)) + geom_line(aes(color = County)) +
scale_color_viridis_d(option = "inferno", alpha = 0.75) +
theme(text = element_text(family = "Segoe UI", color = "#666666")) +
labs(title = "Median home value 1990-2017",
x = "Year",
y = "Median home value ($1000s), 2017 dollars")
ggsave(here("figs", "d_mhv_cty.png"), width = 7, height = 5, units = "in", dpi = 400)
ggplot(ts_cty, aes(x = year, y = own)) + geom_line(aes(color = County)) +
scale_color_viridis_d(option = "inferno", alpha = 0.75) +
scale_y_continuous(limits = c(0,100), expand = c(0,0)) +
theme(text = element_text(family = "Segoe UI", color = "#666666")) +
labs(title = "Homeownership 1990-2017",
x = "Year",
y = "Percentage owner-occupied housing units")
ggsave(here("figs", "d_own_cty.png"), width = 7, height = 5, units = "in", dpi = 400)
ggplot(ts_cty, aes(x = year, y = pov199)) + geom_line(aes(color = County)) +
scale_color_viridis_d(option = "inferno", alpha = 0.75) +
scale_y_continuous(limits = c(0,100), expand = c(0,0)) +
theme(text = element_text(family = "Segoe UI", color = "#666666")) +
labs(title = "Low-income residents 1990-2017",
x = "Year",
y = "Percentage residents with incomes below 199% FPL")
ggsave(here("figs", "d_pov199_cty.png"), width = 7, height = 5, units = "in", dpi = 400)
ggplot(ts_cty, aes(x = year, y = pov99)) + geom_line(aes(color = County)) +
scale_color_viridis_d(option = "inferno", alpha = 0.75) +
scale_y_continuous(limits = c(0,100), expand = c(0,0)) +
theme(text = element_text(family = "Segoe UI", color = "#666666")) +
labs(title = "Residents in poverty 1990-2017",
x = "Year",
y = "Percentage residents with incomes below 100% FPL")
ggsave(here("figs", "d_pov99_cty.png"), width = 7, height = 5, units = "in", dpi = 400)
ggplot(ts_cty, aes(x = year, y = rm)) + geom_line(aes(color = County)) +
scale_color_viridis_d(option = "inferno", alpha = 0.75) +
scale_y_continuous(limits = c(0,100), expand = c(0,0)) +
theme(text = element_text(family = "Segoe UI", color = "#666666")) +
labs(title = "Racial minority residents 1990-2017",
x = "Year",
y = "Percentage racial minority residents")
ggsave(here("figs", "d_rm_cty.png"), width = 7, height = 5, units = "in", dpi = 400)
ggplot(ts_cty, aes(x = year, y = em)) + geom_line(aes(color = County)) +
scale_color_viridis_d(option = "inferno", alpha = 0.75) +
scale_y_continuous(limits = c(0,100), expand = c(0,0)) +
theme(text = element_text(family = "Segoe UI", color = "#666666")) +
labs(title = "Ethnic minority residents 1990-2017",
x = "Year",
y = "Percentage ethnic minority residents")
ggsave(here("figs", "d_em_cty.png"), width = 7, height = 5, units = "in", dpi = 400)
ggplot(ts_cty, aes(x = year, y = unemp)) + geom_line(aes(color = County)) +
scale_color_viridis_d(option = "inferno", alpha = 0.75) +
scale_y_continuous(limits = c(0,100), expand = c(0,0)) +
theme(text = element_text(family = "Segoe UI", color = "#666666")) +
labs(title = "Unemployment 1990-2017",
x = "Year",
y = "Percentage unemployed residents in the labor force")
ggsave(here("figs", "d_unemp_cty.png"), width = 7, height = 5, units = "in", dpi = 400)
ggplot(ts_cty, aes(x = year, y = vhu)) + geom_line(aes(color = County)) +
scale_color_viridis_d(option = "inferno", alpha = 0.75) +
scale_y_continuous(limits = c(0,100), expand = c(0,0)) +
theme(text = element_text(family = "Segoe UI", color = "#666666")) +
labs(title = "Vacant housing units 1990-2017",
x = "Year",
y = "Percentage vacant housing units")
ggsave(here("figs", "d_vhu_cty.png"), width = 7, height = 5, units = "in", dpi = 400)
|
/time_series.R
|
no_license
|
addisonlarson/SoP
|
R
| false | false | 13,934 |
r
|
library(tidyverse); library(here); library(extrafont)
merg_90 <- read_csv(here("final", "merg_90.csv")) %>%
mutate_at(vars(geoid), as.character)
merg_00 <- read_csv(here("final", "merg_00.csv")) %>%
mutate_at(vars(geoid), as.character)
merg_10 <- read_csv(here("final", "merg_10.csv")) %>%
mutate_at(vars(geoid), as.character)
merg_17 <- read_csv(here("final", "merg_17.csv")) %>%
mutate_at(vars(geoid), as.character)
ts <- bind_rows(merg_90, merg_00) %>%
bind_rows(., merg_10) %>%
bind_rows(., merg_17)
ts_cty <- ts %>% mutate(County = case_when(str_sub(geoid, 3, 5) == "005" ~ "Burlington",
str_sub(geoid, 3, 5) == "007" ~ "Camden",
str_sub(geoid, 3, 5) == "015" ~ "Gloucester",
str_sub(geoid, 3, 5) == "021" ~ "Mercer",
str_sub(geoid, 3, 5) == "017" ~ "Bucks",
str_sub(geoid, 3, 5) == "029" ~ "Chester",
str_sub(geoid, 3, 5) == "045" ~ "Delaware",
str_sub(geoid, 3, 5) == "091" ~ "Montgomery",
str_sub(geoid, 3, 5) == "101" ~ "Philadelphia")) %>%
group_by(County, year) %>%
summarize(hu = sum(hu, pop, na.rm = TRUE),
mhi = weighted.mean(mhi, pop, na.rm = TRUE),
mhv = weighted.mean(mhv, pop, na.rm = TRUE),
own = weighted.mean(own, pop, na.rm = TRUE),
pov199 = weighted.mean(pov199, pop, na.rm = TRUE),
pov99 = weighted.mean(pov99, pop, na.rm = TRUE),
rm = weighted.mean(rm, pop, na.rm = TRUE),
em = weighted.mean(em, pop, na.rm = TRUE),
unemp = weighted.mean(unemp, pop, na.rm = TRUE),
vhu = weighted.mean(vhu, pop, na.rm = TRUE),
pop = sum(pop, na.rm = TRUE))
ts_agg <- ts %>% mutate(Subregion = case_when(str_sub(geoid, 3, 5) %in% c("005", "007", "015", "021") ~ "NJ Suburban Counties",
str_sub(geoid, 3, 5) %in% c("017", "029", "045", "091") ~ "PA Suburban Counties",
str_sub(geoid, 3, 5) == "101" ~ "Philadelphia")) %>%
group_by(Subregion, year) %>%
summarize(hu = sum(hu, pop, na.rm = TRUE),
mhi = weighted.mean(mhi, pop, na.rm = TRUE),
mhv = weighted.mean(mhv, pop, na.rm = TRUE),
own = weighted.mean(own, pop, na.rm = TRUE),
pov199 = weighted.mean(pov199, pop, na.rm = TRUE),
pov99 = weighted.mean(pov99, pop, na.rm = TRUE),
rm = weighted.mean(rm, pop, na.rm = TRUE),
em = weighted.mean(em, pop, na.rm = TRUE),
unemp = weighted.mean(unemp, pop, na.rm = TRUE),
vhu = weighted.mean(vhu, pop, na.rm = TRUE),
pop = sum(pop, na.rm = TRUE))
ts_reg <- ts %>%
group_by(year) %>%
summarize(hu = sum(hu, pop, na.rm = TRUE),
mhi = weighted.mean(mhi, pop, na.rm = TRUE),
mhv = weighted.mean(mhv, pop, na.rm = TRUE),
own = weighted.mean(own, pop, na.rm = TRUE),
pov199 = weighted.mean(pov199, pop, na.rm = TRUE),
pov99 = weighted.mean(pov99, pop, na.rm = TRUE),
rm = weighted.mean(rm, pop, na.rm = TRUE),
em = weighted.mean(em, pop, na.rm = TRUE),
unemp = weighted.mean(unemp, pop, na.rm = TRUE),
vhu = weighted.mean(vhu, pop, na.rm = TRUE),
pop = sum(pop, na.rm = TRUE),
County = "Region")
ts_cty <- bind_rows(ts_cty, ts_reg)
# Export
write_csv(ts_agg, here("final", "ts_subregion.csv"))
write_csv(ts_cty, here("final", "ts_cty.csv"))
# By Subregion
ggplot(ts_agg, aes(x = year, y = pop)) + geom_line(aes(color = Subregion)) +
scale_color_manual(values = c("#F57D15", "#D44842", "#9F2A63")) +
theme(text = element_text(family = "Segoe UI", color = "#666666")) +
labs(title = "Change in population 1990-2017",
x = "Year",
y = "Total population (1000s)")
ggsave(here("figs", "d_pop_agg.png"), width = 7, height = 5, units = "in", dpi = 400)
ggplot(ts_agg, aes(x = year, y = hu)) + geom_line(aes(color = Subregion)) +
scale_color_manual(values = c("#F57D15", "#D44842", "#9F2A63")) +
theme(text = element_text(family = "Segoe UI", color = "#666666")) +
labs(title = "Change in housing units 1990-2017",
x = "Year",
y = "Total housing units (1000s)")
ggsave(here("figs", "d_hu_agg.png"), width = 7, height = 5, units = "in", dpi = 400)
ggplot(ts_agg, aes(x = year, y = mhi)) + geom_line(aes(color = Subregion)) +
scale_color_manual(values = c("#F57D15", "#D44842", "#9F2A63")) +
theme(text = element_text(family = "Segoe UI", color = "#666666")) +
labs(title = "Median household income 1990-2017",
x = "Year",
y = "Median household income ($1000s), 2017 dollars")
ggsave(here("figs", "d_mhi_agg.png"), width = 7, height = 5, units = "in", dpi = 400)
ggplot(ts_agg, aes(x = year, y = mhv)) + geom_line(aes(color = Subregion)) +
scale_color_manual(values = c("#F57D15", "#D44842", "#9F2A63")) +
theme(text = element_text(family = "Segoe UI", color = "#666666")) +
labs(title = "Median home value 1990-2017",
x = "Year",
y = "Median home value ($1000s), 2017 dollars")
ggsave(here("figs", "d_mhv_agg.png"), width = 7, height = 5, units = "in", dpi = 400)
ggplot(ts_agg, aes(x = year, y = own)) + geom_line(aes(color = Subregion)) +
scale_color_manual(values = c("#F57D15", "#D44842", "#9F2A63")) +
scale_y_continuous(limits = c(0,100), expand = c(0,0)) +
theme(text = element_text(family = "Segoe UI", color = "#666666")) +
labs(title = "Homeownership 1990-2017",
x = "Year",
y = "Percentage owner-occupied housing units")
ggsave(here("figs", "d_own_agg.png"), width = 7, height = 5, units = "in", dpi = 400)
ggplot(ts_agg, aes(x = year, y = pov199)) + geom_line(aes(color = Subregion)) +
scale_color_manual(values = c("#F57D15", "#D44842", "#9F2A63")) +
scale_y_continuous(limits = c(0,100), expand = c(0,0)) +
theme(text = element_text(family = "Segoe UI", color = "#666666")) +
labs(title = "Low-income residents 1990-2017",
x = "Year",
y = "Percentage residents with incomes below 199% FPL")
ggsave(here("figs", "d_pov199_agg.png"), width = 7, height = 5, units = "in", dpi = 400)
ggplot(ts_agg, aes(x = year, y = pov99)) + geom_line(aes(color = Subregion)) +
scale_color_manual(values = c("#F57D15", "#D44842", "#9F2A63")) +
scale_y_continuous(limits = c(0,100), expand = c(0,0)) +
theme(text = element_text(family = "Segoe UI", color = "#666666")) +
labs(title = "Residents in poverty 1990-2017",
x = "Year",
y = "Percentage residents with incomes below 100% FPL")
ggsave(here("figs", "d_pov99_agg.png"), width = 7, height = 5, units = "in", dpi = 400)
ggplot(ts_agg, aes(x = year, y = rm)) + geom_line(aes(color = Subregion)) +
scale_color_manual(values = c("#F57D15", "#D44842", "#9F2A63")) +
scale_y_continuous(limits = c(0,100), expand = c(0,0)) +
theme(text = element_text(family = "Segoe UI", color = "#666666")) +
labs(title = "Racial minority residents 1990-2017",
x = "Year",
y = "Percentage racial minority residents")
ggsave(here("figs", "d_rm_agg.png"), width = 7, height = 5, units = "in", dpi = 400)
ggplot(ts_agg, aes(x = year, y = em)) + geom_line(aes(color = Subregion)) +
scale_color_manual(values = c("#F57D15", "#D44842", "#9F2A63")) +
scale_y_continuous(limits = c(0,100), expand = c(0,0)) +
theme(text = element_text(family = "Segoe UI", color = "#666666")) +
labs(title = "Ethnic minority residents 1990-2017",
x = "Year",
y = "Percentage ethnic minority residents")
ggsave(here("figs", "d_em_agg.png"), width = 7, height = 5, units = "in", dpi = 400)
ggplot(ts_agg, aes(x = year, y = unemp)) + geom_line(aes(color = Subregion)) +
scale_color_manual(values = c("#F57D15", "#D44842", "#9F2A63")) +
scale_y_continuous(limits = c(0,100), expand = c(0,0)) +
theme(text = element_text(family = "Segoe UI", color = "#666666")) +
labs(title = "Unemployment 1990-2017",
x = "Year",
y = "Percentage unemployed residents in the labor force")
ggsave(here("figs", "d_unemp_agg.png"), width = 7, height = 5, units = "in", dpi = 400)
ggplot(ts_agg, aes(x = year, y = vhu)) + geom_line(aes(color = Subregion)) +
scale_color_manual(values = c("#F57D15", "#D44842", "#9F2A63")) +
scale_y_continuous(limits = c(0,100), expand = c(0,0)) +
theme(text = element_text(family = "Segoe UI", color = "#666666")) +
labs(title = "Vacant housing units 1990-2017",
x = "Year",
y = "Percentage vacant housing units")
ggsave(here("figs", "d_vhu_agg.png"), width = 7, height = 5, units = "in", dpi = 400)
# By County
ggplot(ts_cty %>% filter(County != "Region"), aes(x = year, y = pop)) + geom_line(aes(color = County)) +
scale_color_viridis_d(option = "inferno", alpha = 0.75) +
theme(text = element_text(family = "Segoe UI", color = "#666666")) +
# scale_color_manual(values = c("red")) +
labs(title = "Change in population 1990-2017",
x = "Year",
y = "Total population (1000s)")
ggsave(here("figs", "d_pop.png"), width = 7, height = 5, units = "in", dpi = 400)
ggplot(ts_cty %>% filter(County != "Region"), aes(x = year, y = hu)) + geom_line(aes(color = County)) +
scale_color_viridis_d(option = "inferno", alpha = 0.75) +
theme(text = element_text(family = "Segoe UI", color = "#666666")) +
labs(title = "Change in housing units 1990-2017",
x = "Year",
y = "Total housing units (1000s)")
ggsave(here("figs", "d_hu_cty.png"), width = 7, height = 5, units = "in", dpi = 400)
ggplot(ts_cty, aes(x = year, y = mhi)) + geom_line(aes(color = County)) +
scale_color_viridis_d(option = "inferno", alpha = 0.75) +
theme(text = element_text(family = "Segoe UI", color = "#666666")) +
labs(title = "Median household income 1990-2017",
x = "Year",
y = "Median household income ($1000s), 2017 dollars")
ggsave(here("figs", "d_mhi_cty.png"), width = 7, height = 5, units = "in", dpi = 400)
ggplot(ts_cty, aes(x = year, y = mhv)) + geom_line(aes(color = County)) +
scale_color_viridis_d(option = "inferno", alpha = 0.75) +
theme(text = element_text(family = "Segoe UI", color = "#666666")) +
labs(title = "Median home value 1990-2017",
x = "Year",
y = "Median home value ($1000s), 2017 dollars")
ggsave(here("figs", "d_mhv_cty.png"), width = 7, height = 5, units = "in", dpi = 400)
ggplot(ts_cty, aes(x = year, y = own)) + geom_line(aes(color = County)) +
scale_color_viridis_d(option = "inferno", alpha = 0.75) +
scale_y_continuous(limits = c(0,100), expand = c(0,0)) +
theme(text = element_text(family = "Segoe UI", color = "#666666")) +
labs(title = "Homeownership 1990-2017",
x = "Year",
y = "Percentage owner-occupied housing units")
ggsave(here("figs", "d_own_cty.png"), width = 7, height = 5, units = "in", dpi = 400)
ggplot(ts_cty, aes(x = year, y = pov199)) + geom_line(aes(color = County)) +
scale_color_viridis_d(option = "inferno", alpha = 0.75) +
scale_y_continuous(limits = c(0,100), expand = c(0,0)) +
theme(text = element_text(family = "Segoe UI", color = "#666666")) +
labs(title = "Low-income residents 1990-2017",
x = "Year",
y = "Percentage residents with incomes below 199% FPL")
ggsave(here("figs", "d_pov199_cty.png"), width = 7, height = 5, units = "in", dpi = 400)
ggplot(ts_cty, aes(x = year, y = pov99)) + geom_line(aes(color = County)) +
scale_color_viridis_d(option = "inferno", alpha = 0.75) +
scale_y_continuous(limits = c(0,100), expand = c(0,0)) +
theme(text = element_text(family = "Segoe UI", color = "#666666")) +
labs(title = "Residents in poverty 1990-2017",
x = "Year",
y = "Percentage residents with incomes below 100% FPL")
ggsave(here("figs", "d_pov99_cty.png"), width = 7, height = 5, units = "in", dpi = 400)
ggplot(ts_cty, aes(x = year, y = rm)) + geom_line(aes(color = County)) +
scale_color_viridis_d(option = "inferno", alpha = 0.75) +
scale_y_continuous(limits = c(0,100), expand = c(0,0)) +
theme(text = element_text(family = "Segoe UI", color = "#666666")) +
labs(title = "Racial minority residents 1990-2017",
x = "Year",
y = "Percentage racial minority residents")
ggsave(here("figs", "d_rm_cty.png"), width = 7, height = 5, units = "in", dpi = 400)
ggplot(ts_cty, aes(x = year, y = em)) + geom_line(aes(color = County)) +
scale_color_viridis_d(option = "inferno", alpha = 0.75) +
scale_y_continuous(limits = c(0,100), expand = c(0,0)) +
theme(text = element_text(family = "Segoe UI", color = "#666666")) +
labs(title = "Ethnic minority residents 1990-2017",
x = "Year",
y = "Percentage ethnic minority residents")
ggsave(here("figs", "d_em_cty.png"), width = 7, height = 5, units = "in", dpi = 400)
ggplot(ts_cty, aes(x = year, y = unemp)) + geom_line(aes(color = County)) +
scale_color_viridis_d(option = "inferno", alpha = 0.75) +
scale_y_continuous(limits = c(0,100), expand = c(0,0)) +
theme(text = element_text(family = "Segoe UI", color = "#666666")) +
labs(title = "Unemployment 1990-2017",
x = "Year",
y = "Percentage unemployed residents in the labor force")
ggsave(here("figs", "d_unemp_cty.png"), width = 7, height = 5, units = "in", dpi = 400)
ggplot(ts_cty, aes(x = year, y = vhu)) + geom_line(aes(color = County)) +
scale_color_viridis_d(option = "inferno", alpha = 0.75) +
scale_y_continuous(limits = c(0,100), expand = c(0,0)) +
theme(text = element_text(family = "Segoe UI", color = "#666666")) +
labs(title = "Vacant housing units 1990-2017",
x = "Year",
y = "Percentage vacant housing units")
ggsave(here("figs", "d_vhu_cty.png"), width = 7, height = 5, units = "in", dpi = 400)
|
runname <- '0036_thetaC'
# Master Results Processer: Salus Raw Results to Maps and Spatial Summaries
# Jill Deines
# June 24 2018
# Goal: Take the SALUS raw daily output and derive:
# annual summaries for yield, irrigation, drainage, ET... for each experiment
# spatialize results by connecting experiments back to spatial grid cell key
# summarize regional results
# output includes tabular summaries and annual variable maps (as RData files)
# Packages Required
library(dplyr)
library(tidyr)
library(readr)
library(sp)
library(raster)
library(salustools)
# user variables -----------------------------------------------------------
# raw salus output directory (run will be subdirectory)
salusScratch <- '/mnt/scratch/deinesji/salus/'
salusHome <- '/mnt/home/deinesji/salus/'
startYear <- 2006 # excluding wheat planting, aka first year of harvest
endYear <- 2017
# aoi - sheridan, in aea
aoi <- readRDS(paste0(salusHome,'sheridan6_aea.rds'))
# Crops in runs:
crops <- c('MZ','WH','SG','SB','AL')
# end user vars ------------------------------------------------------------
# raw results dir
rawDir <- paste0(salusScratch, runname, '/results')
outDir <- paste0(salusHome, runname,'/results')
# vector of years wanted
yearsWanted <- startYear:endYear
# step 1: raw to annual --------------------------------------------------------
#convert daily results to annual values, combining all experiments into 1 csv
# includes export
runResults <- salusRawToAnnual(rawDir, outDir, runname, startYear)
# step 2: expt to spatial ------------------------------------------------------
# directory of processed results
workingDir <- outDir
# load unique experiments and grid cell to Experiment code key
gridKey <- readRDS(
paste0(salusHome,'1_Experiments_gridCellKey_SD6_top7_aimhpa_20180618.rds'))
expts <- readRDS(
paste0(salusHome,'1_Experiments_SD6_top7_aimhpa_20180618.rds'))
# gmd4+ template raster grid (based on CDL clipped to study region boundary)
aeaProj <- "+proj=aea +lat_1=29.5 +lat_2=45.5 +lat_0=23 +lon_0=-96 +x_0=0 +y_0=0 +ellps=GRS80 +datum=NAD83 +units=m +no_defs"
template <- raster(nrow = 4359, ncol = 7108, crs = aeaProj,
xmn = -522870, xmx = -309630, ymn = 1777860,
ymx = 1908630)
template[] <- 1:ncell(template)
templateVector <- getValues(template)
# Yield - Break down by crop and irrigation status
# salus variable name for yield
yield <- 'GWAD'
# irrigated
irrCrops <- runResults %>% filter(irrigation == 'Y')
irrCropsList <- list()
for(m in 1:length(crops)){
# subset results for crop
crop <- crops[m]
cropdf <- irrCrops %>% filter(SpeciesID == crop)
cropStack <- spatializeAll(cropdf, yield, yearsWanted, gridKey,
template, aoi, writeOut = 'N', outFolder = workingDir)
irrCropsList[[crop]] <- cropStack
}
# save as RData
saveRDS(irrCropsList, file = paste0(workingDir, '/All_IrrigatedCrops_YieldMaps.rds'))
# rainfed
rainCrops <- runResults %>% filter(irrigation == 'N')
rainCropsList <- list()
for(m in 1:length(crops)){
# subset results for crop
crop <- crops[m]
cropdf <- rainCrops %>% filter(SpeciesID == crop)
cropStack <- spatializeAll(cropdf, yield, yearsWanted, gridKey,
template, aoi, writeOut = 'N', outFolder = workingDir)
rainCropsList[[crop]] <- cropStack
}
# save as RData
saveRDS(rainCropsList, file = paste0(workingDir, '/All_RainfedCrops_YieldMaps.rds'))
### Irrigation
irrStack <- spatializeAll(runResults, 'IRRC_mm', yearsWanted, gridKey,
template, aoi, writeOut = 'N', outFolder = workingDir)
saveRDS(irrStack, file = paste0(workingDir,'/Irr_stack.rds'))
### Precip
pcpStack <- spatializeAll(runResults, 'PREC_mm', yearsWanted, gridKey,
template, aoi, writeOut = 'N', outFolder = workingDir)
saveRDS(pcpStack, file = paste0(workingDir,'/Pcpt_stack.rds'))
### Recharge
rchStack <- spatializeAll(runResults, 'DRNC_mm', yearsWanted, gridKey,
template, aoi, writeOut = 'N', outFolder = workingDir)
saveRDS(rchStack, file = paste0(workingDir,'/Rch_stack.rds'))
### ET
ETStack <- spatializeAll(runResults, 'ETAC_mm', yearsWanted, gridKey,
template, aoi, writeOut = 'N', outFolder = workingDir)
saveRDS(ETStack, file = paste0(workingDir,'/etac_stack.rds'))
### soil evap
et2Stack <- spatializeAll(runResults, 'ESAC_mm', yearsWanted, gridKey,
template, aoi, writeOut = 'N', outFolder = workingDir)
saveRDS(et2Stack, file = paste0(workingDir,'/esac_stack.rds'))
### plant transipriation
et3Stack <- spatializeAll(runResults, 'EPAC_mm', yearsWanted, gridKey,
template, aoi, writeOut = 'N', outFolder = workingDir)
saveRDS(et3Stack, file = paste0(workingDir,'/epac_stack.rds'))
### runoff
runStack <- spatializeAll(runResults, 'ROFC_mm', yearsWanted, gridKey,
template, aoi, writeOut = 'N', outFolder = workingDir)
saveRDS(runStack, file = paste0(workingDir,'/ROFC_runoff_stack.rds'))
# step 3: summarize spatial maps into data tables -----------------------------
# precip is averaged over the domain and kept in mm
# irrigation includes total volumetric water and mean depth
# drainage includes total volumetric water and mean depth
# also etac, esac, and epac, rofc
# yields - summed by crop type and irrigation status, as totals and regional stats
# summarize irrigation and recharge
irrSummary <- summarizeWaterStacks(irrStack, 'irrigation')
rchSummary <- summarizeWaterStacks(rchStack, 'recharge')
etacSummary <- summarizeWaterStacks(ETStack, 'etac')
esacSummary <- summarizeWaterStacks(et2Stack, 'esac')
epacSummary <- summarizeWaterStacks(et3Stack, 'epac')
rofcSummary <- summarizeWaterStacks(runStack, 'rofc')
# get total ppt over time - mean mm
pcpByYear <- data.frame(year = yearsWanted,
variable = 'precip',
depth_mean_mm = cellStats(pcpStack, stat = 'mean', na.rm=TRUE),
stringsAsFactors = FALSE)
# combine
waterVars <- irrSummary %>%
bind_rows(rchSummary,pcpByYear, etacSummary, esacSummary, epacSummary, rofcSummary)
# export water variables
write.csv(waterVars, row.names = FALSE,
file = paste0(workingDir,'/WaterVars_meansTotals.csv'))
### Yields
# summary stats for active pixels by class and regional totals. rbinds are
# ugly but get the job done. oops.
yieldStats <- NA
# rainfed crops
for(crop in crops) {
# calculate totals, means, and summary stats
sumStats <- summarizeYieldStacks(cropCode = crop, yieldList = rainCropsList,
irrigationStatus = 'N')
# add to master data frame
yieldStats <- rbind(yieldStats, sumStats)
}
# irrigated crops
for(crop in crops) {
# calculate totals, means, and summary stats
sumStats <- summarizeYieldStacks(cropCode = crop, yieldList = irrCropsList,
irrigationStatus = 'Y')
# add to master data frame
yieldStats <- rbind(yieldStats, sumStats)
}
# remove na row
yieldStats2 <- yieldStats %>%
filter(!is.na(yield_mean_kgha))
write.csv(yieldStats2, row.names=FALSE,
file = paste0(workingDir,'/yields_statsAndTotals.csv'))
|
/data/SALUS_output/0030_waterSA/0036_thetaC/03.70_salusResultsProcesser_rawToSpatial.R
|
permissive
|
jdeines/Deines_etal_LEMA_SALUS
|
R
| false | false | 7,494 |
r
|
runname <- '0036_thetaC'
# Master Results Processer: Salus Raw Results to Maps and Spatial Summaries
# Jill Deines
# June 24 2018
# Goal: Take the SALUS raw daily output and derive:
# annual summaries for yield, irrigation, drainage, ET... for each experiment
# spatialize results by connecting experiments back to spatial grid cell key
# summarize regional results
# output includes tabular summaries and annual variable maps (as RData files)
# Packages Required
library(dplyr)
library(tidyr)
library(readr)
library(sp)
library(raster)
library(salustools)
# user variables -----------------------------------------------------------
# raw salus output directory (run will be subdirectory)
salusScratch <- '/mnt/scratch/deinesji/salus/'
salusHome <- '/mnt/home/deinesji/salus/'
startYear <- 2006 # excluding wheat planting, aka first year of harvest
endYear <- 2017
# aoi - sheridan, in aea
aoi <- readRDS(paste0(salusHome,'sheridan6_aea.rds'))
# Crops in runs:
crops <- c('MZ','WH','SG','SB','AL')
# end user vars ------------------------------------------------------------
# raw results dir
rawDir <- paste0(salusScratch, runname, '/results')
outDir <- paste0(salusHome, runname,'/results')
# vector of years wanted
yearsWanted <- startYear:endYear
# step 1: raw to annual --------------------------------------------------------
#convert daily results to annual values, combining all experiments into 1 csv
# includes export
runResults <- salusRawToAnnual(rawDir, outDir, runname, startYear)
# step 2: expt to spatial ------------------------------------------------------
# directory of processed results
workingDir <- outDir
# load unique experiments and grid cell to Experiment code key
gridKey <- readRDS(
paste0(salusHome,'1_Experiments_gridCellKey_SD6_top7_aimhpa_20180618.rds'))
expts <- readRDS(
paste0(salusHome,'1_Experiments_SD6_top7_aimhpa_20180618.rds'))
# gmd4+ template raster grid (based on CDL clipped to study region boundary)
aeaProj <- "+proj=aea +lat_1=29.5 +lat_2=45.5 +lat_0=23 +lon_0=-96 +x_0=0 +y_0=0 +ellps=GRS80 +datum=NAD83 +units=m +no_defs"
template <- raster(nrow = 4359, ncol = 7108, crs = aeaProj,
xmn = -522870, xmx = -309630, ymn = 1777860,
ymx = 1908630)
template[] <- 1:ncell(template)
templateVector <- getValues(template)
# Yield - Break down by crop and irrigation status
# salus variable name for yield
yield <- 'GWAD'
# irrigated
irrCrops <- runResults %>% filter(irrigation == 'Y')
irrCropsList <- list()
for(m in 1:length(crops)){
# subset results for crop
crop <- crops[m]
cropdf <- irrCrops %>% filter(SpeciesID == crop)
cropStack <- spatializeAll(cropdf, yield, yearsWanted, gridKey,
template, aoi, writeOut = 'N', outFolder = workingDir)
irrCropsList[[crop]] <- cropStack
}
# save as RData
saveRDS(irrCropsList, file = paste0(workingDir, '/All_IrrigatedCrops_YieldMaps.rds'))
# rainfed
rainCrops <- runResults %>% filter(irrigation == 'N')
rainCropsList <- list()
for(m in 1:length(crops)){
# subset results for crop
crop <- crops[m]
cropdf <- rainCrops %>% filter(SpeciesID == crop)
cropStack <- spatializeAll(cropdf, yield, yearsWanted, gridKey,
template, aoi, writeOut = 'N', outFolder = workingDir)
rainCropsList[[crop]] <- cropStack
}
# save as RData
saveRDS(rainCropsList, file = paste0(workingDir, '/All_RainfedCrops_YieldMaps.rds'))
### Irrigation
irrStack <- spatializeAll(runResults, 'IRRC_mm', yearsWanted, gridKey,
template, aoi, writeOut = 'N', outFolder = workingDir)
saveRDS(irrStack, file = paste0(workingDir,'/Irr_stack.rds'))
### Precip
pcpStack <- spatializeAll(runResults, 'PREC_mm', yearsWanted, gridKey,
template, aoi, writeOut = 'N', outFolder = workingDir)
saveRDS(pcpStack, file = paste0(workingDir,'/Pcpt_stack.rds'))
### Recharge
rchStack <- spatializeAll(runResults, 'DRNC_mm', yearsWanted, gridKey,
template, aoi, writeOut = 'N', outFolder = workingDir)
saveRDS(rchStack, file = paste0(workingDir,'/Rch_stack.rds'))
### ET
ETStack <- spatializeAll(runResults, 'ETAC_mm', yearsWanted, gridKey,
template, aoi, writeOut = 'N', outFolder = workingDir)
saveRDS(ETStack, file = paste0(workingDir,'/etac_stack.rds'))
### soil evap
et2Stack <- spatializeAll(runResults, 'ESAC_mm', yearsWanted, gridKey,
template, aoi, writeOut = 'N', outFolder = workingDir)
saveRDS(et2Stack, file = paste0(workingDir,'/esac_stack.rds'))
### plant transipriation
et3Stack <- spatializeAll(runResults, 'EPAC_mm', yearsWanted, gridKey,
template, aoi, writeOut = 'N', outFolder = workingDir)
saveRDS(et3Stack, file = paste0(workingDir,'/epac_stack.rds'))
### runoff
runStack <- spatializeAll(runResults, 'ROFC_mm', yearsWanted, gridKey,
template, aoi, writeOut = 'N', outFolder = workingDir)
saveRDS(runStack, file = paste0(workingDir,'/ROFC_runoff_stack.rds'))
# step 3: summarize spatial maps into data tables -----------------------------
# precip is averaged over the domain and kept in mm
# irrigation includes total volumetric water and mean depth
# drainage includes total volumetric water and mean depth
# also etac, esac, and epac, rofc
# yields - summed by crop type and irrigation status, as totals and regional stats
# summarize irrigation and recharge
irrSummary <- summarizeWaterStacks(irrStack, 'irrigation')
rchSummary <- summarizeWaterStacks(rchStack, 'recharge')
etacSummary <- summarizeWaterStacks(ETStack, 'etac')
esacSummary <- summarizeWaterStacks(et2Stack, 'esac')
epacSummary <- summarizeWaterStacks(et3Stack, 'epac')
rofcSummary <- summarizeWaterStacks(runStack, 'rofc')
# get total ppt over time - mean mm
pcpByYear <- data.frame(year = yearsWanted,
variable = 'precip',
depth_mean_mm = cellStats(pcpStack, stat = 'mean', na.rm=TRUE),
stringsAsFactors = FALSE)
# combine
waterVars <- irrSummary %>%
bind_rows(rchSummary,pcpByYear, etacSummary, esacSummary, epacSummary, rofcSummary)
# export water variables
write.csv(waterVars, row.names = FALSE,
file = paste0(workingDir,'/WaterVars_meansTotals.csv'))
### Yields
# summary stats for active pixels by class and regional totals. rbinds are
# ugly but get the job done. oops.
yieldStats <- NA
# rainfed crops
for(crop in crops) {
# calculate totals, means, and summary stats
sumStats <- summarizeYieldStacks(cropCode = crop, yieldList = rainCropsList,
irrigationStatus = 'N')
# add to master data frame
yieldStats <- rbind(yieldStats, sumStats)
}
# irrigated crops
for(crop in crops) {
# calculate totals, means, and summary stats
sumStats <- summarizeYieldStacks(cropCode = crop, yieldList = irrCropsList,
irrigationStatus = 'Y')
# add to master data frame
yieldStats <- rbind(yieldStats, sumStats)
}
# remove na row
yieldStats2 <- yieldStats %>%
filter(!is.na(yield_mean_kgha))
write.csv(yieldStats2, row.names=FALSE,
file = paste0(workingDir,'/yields_statsAndTotals.csv'))
|
\name{Runs}
\alias{druns}
\alias{pruns}
\alias{qruns}
\alias{rruns}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Distribution of the Wald Wolfowitz Runs Statistic
}
\description{
Probability function, distribution function, quantile function and random generation for the distribution of the Runs statistic obtained from samples with \eqn{n_1}{n1} and \eqn{n_2}{n2} elements of each type.
}
\usage{
druns(x, n1, n2, log = FALSE)
pruns(q, n1, n2, lower.tail = TRUE, log.p = FALSE)
qruns(p, n1, n2, lower.tail = TRUE, log.p = FALSE)
rruns(n, n1, n2)
}
\arguments{
\item{x, q}{a numeric vector of quantiles.}
\item{p}{a numeric vector of probabilities.}
\item{n}{number of observations to return.}
\item{n1, n2}{the number of elements of first and second type, respectively.}
\item{log, log.p}{logical; if TRUE, probabilities p are given as log(p).}
\item{lower.tail}{logical; if TRUE (default), probabilities are P[X \eqn{\le} x] otherwise, P[X > x].}
}
\details{
The Runs distribution has probability function
\deqn{
P(R=r)=
\left\{
\begin{array}{cc}
\frac{2{n_1-1 \choose r/2-1}{n_2-1 \choose r/2-1}}{{n_1+n_2 \choose n_1}}, & \mbox{if } r \mbox{ is even}\\
\frac{{n_1-1 \choose (r-1)/2}{n_2-1 \choose (r-3)/2}\,+\,{n_1-1 \choose (r-3)/2}{n_2-1 \choose (r-1)/2}}{{n_1+n_2 \choose n_1}}, & \mbox{if } r \mbox{ is odd}\\
\end{array}
\right.
%\qquad r=2,3,\ldots, n_1+n_2.
}{P(R=r) = 2 choose(n1-1,r/2-1)choose(n2-1,r/2-1)/choose(n1+n2,n1), if r is even
P(R=r) =
}
for \eqn{r=2,3,\ldots, 2\min(n_1+n_2)+c}{r = 2, 3, \ldots, 2 min(n1+n2)+c} with \eqn{c=0} if \eqn{n_1=n_2}{n1 = n2} or \eqn{c=1} if \eqn{n_1 \neq n_2}{n_1 =! n_2}.
If an element of \code{x} is not integer, the result of \code{druns} is zero.
The quantile is defined as the smallest value \eqn{x} such that \eqn{F(x) \ge p}, where \eqn{F} is the distribution function.
}
\value{
\code{druns} gives the probability function, \code{pruns} gives the distribution function and \code{qruns} gives the quantile function.
%, and rruns generates random deviates.
%The length of the result is determined by nn for rwilcox, and is the maximum of the lengths of the numerical parameters for the other functions.
%The numerical parameters other than nn are recycled to the length of the result. Only the first elements of the logical parameters are used.
}
\references{
Swed, F.S. and Eisenhart, C. (1943).
Tables for Testing Randomness of Grouping in a Sequence of Alternatives, \emph{Ann. Math Statist.} \bold{14}(1), 66-87.
}
\examples{
##
## Example: Distribution Function
## Creates Table I in Swed and Eisenhart (1943), p. 70,
## with n1 = 2 and n1 <= n2 <= 20
##
m <- NULL
for (i in 2:20){
m <- rbind(m, pruns(2:5,2,i))
}
rownames(m)=2:20
colnames(m)=2:5
#
# 2 3 4 5
# 2 0.333333333 0.6666667 1.0000000 1
# 3 0.200000000 0.5000000 0.9000000 1
# 4 0.133333333 0.4000000 0.8000000 1
# 5 0.095238095 0.3333333 0.7142857 1
# 6 0.071428571 0.2857143 0.6428571 1
# 7 0.055555556 0.2500000 0.5833333 1
# 8 0.044444444 0.2222222 0.5333333 1
# 9 0.036363636 0.2000000 0.4909091 1
# 10 0.030303030 0.1818182 0.4545455 1
# 11 0.025641026 0.1666667 0.4230769 1
# 12 0.021978022 0.1538462 0.3956044 1
# 13 0.019047619 0.1428571 0.3714286 1
# 14 0.016666667 0.1333333 0.3500000 1
# 15 0.014705882 0.1250000 0.3308824 1
# 16 0.013071895 0.1176471 0.3137255 1
# 17 0.011695906 0.1111111 0.2982456 1
# 18 0.010526316 0.1052632 0.2842105 1
# 19 0.009523810 0.1000000 0.2714286 1
# 20 0.008658009 0.0952381 0.2597403 1
#
}
\keyword{ distribution }
%\keyword{ randomness test }% __ONLY ONE__ keyword per line
|
/man/runs.Rd
|
no_license
|
cran/randtests
|
R
| false | false | 3,637 |
rd
|
\name{Runs}
\alias{druns}
\alias{pruns}
\alias{qruns}
\alias{rruns}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Distribution of the Wald Wolfowitz Runs Statistic
}
\description{
Probability function, distribution function, quantile function and random generation for the distribution of the Runs statistic obtained from samples with \eqn{n_1}{n1} and \eqn{n_2}{n2} elements of each type.
}
\usage{
druns(x, n1, n2, log = FALSE)
pruns(q, n1, n2, lower.tail = TRUE, log.p = FALSE)
qruns(p, n1, n2, lower.tail = TRUE, log.p = FALSE)
rruns(n, n1, n2)
}
\arguments{
\item{x, q}{a numeric vector of quantiles.}
\item{p}{a numeric vector of probabilities.}
\item{n}{number of observations to return.}
\item{n1, n2}{the number of elements of first and second type, respectively.}
\item{log, log.p}{logical; if TRUE, probabilities p are given as log(p).}
\item{lower.tail}{logical; if TRUE (default), probabilities are P[X \eqn{\le} x] otherwise, P[X > x].}
}
\details{
The Runs distribution has probability function
\deqn{
P(R=r)=
\left\{
\begin{array}{cc}
\frac{2{n_1-1 \choose r/2-1}{n_2-1 \choose r/2-1}}{{n_1+n_2 \choose n_1}}, & \mbox{if } r \mbox{ is even}\\
\frac{{n_1-1 \choose (r-1)/2}{n_2-1 \choose (r-3)/2}\,+\,{n_1-1 \choose (r-3)/2}{n_2-1 \choose (r-1)/2}}{{n_1+n_2 \choose n_1}}, & \mbox{if } r \mbox{ is odd}\\
\end{array}
\right.
%\qquad r=2,3,\ldots, n_1+n_2.
}{P(R=r) = 2 choose(n1-1,r/2-1)choose(n2-1,r/2-1)/choose(n1+n2,n1), if r is even
P(R=r) =
}
for \eqn{r=2,3,\ldots, 2\min(n_1+n_2)+c}{r = 2, 3, \ldots, 2 min(n1+n2)+c} with \eqn{c=0} if \eqn{n_1=n_2}{n1 = n2} or \eqn{c=1} if \eqn{n_1 \neq n_2}{n_1 =! n_2}.
If an element of \code{x} is not integer, the result of \code{druns} is zero.
The quantile is defined as the smallest value \eqn{x} such that \eqn{F(x) \ge p}, where \eqn{F} is the distribution function.
}
\value{
\code{druns} gives the probability function, \code{pruns} gives the distribution function and \code{qruns} gives the quantile function.
%, and rruns generates random deviates.
%The length of the result is determined by nn for rwilcox, and is the maximum of the lengths of the numerical parameters for the other functions.
%The numerical parameters other than nn are recycled to the length of the result. Only the first elements of the logical parameters are used.
}
\references{
Swed, F.S. and Eisenhart, C. (1943).
Tables for Testing Randomness of Grouping in a Sequence of Alternatives, \emph{Ann. Math Statist.} \bold{14}(1), 66-87.
}
\examples{
##
## Example: Distribution Function
## Creates Table I in Swed and Eisenhart (1943), p. 70,
## with n1 = 2 and n1 <= n2 <= 20
##
m <- NULL
for (i in 2:20){
m <- rbind(m, pruns(2:5,2,i))
}
rownames(m)=2:20
colnames(m)=2:5
#
# 2 3 4 5
# 2 0.333333333 0.6666667 1.0000000 1
# 3 0.200000000 0.5000000 0.9000000 1
# 4 0.133333333 0.4000000 0.8000000 1
# 5 0.095238095 0.3333333 0.7142857 1
# 6 0.071428571 0.2857143 0.6428571 1
# 7 0.055555556 0.2500000 0.5833333 1
# 8 0.044444444 0.2222222 0.5333333 1
# 9 0.036363636 0.2000000 0.4909091 1
# 10 0.030303030 0.1818182 0.4545455 1
# 11 0.025641026 0.1666667 0.4230769 1
# 12 0.021978022 0.1538462 0.3956044 1
# 13 0.019047619 0.1428571 0.3714286 1
# 14 0.016666667 0.1333333 0.3500000 1
# 15 0.014705882 0.1250000 0.3308824 1
# 16 0.013071895 0.1176471 0.3137255 1
# 17 0.011695906 0.1111111 0.2982456 1
# 18 0.010526316 0.1052632 0.2842105 1
# 19 0.009523810 0.1000000 0.2714286 1
# 20 0.008658009 0.0952381 0.2597403 1
#
}
\keyword{ distribution }
%\keyword{ randomness test }% __ONLY ONE__ keyword per line
|
numCompoments <- 10
dataset <- read_excel("C:/Users/User/Documents/Proyecto01/Revista_03/Base_RAcademico_Vacios.xlsx")
datosnuevos <- data.frame(dataset)
datos<-data.matrix(datosnuevos[2:19])
#FUNCION_OBJETIVO
funcion_objetivo <- function(x){
vDegree <- x[1]
if(vDegree==0){
vDegree <- 0.00001
}
kergauss<-rbfdot(sigma = vDegree)
K<-kernelMatrix(kergauss,datos)
jd_kpls = plsreg1(K[,],datosnuevos[,20:20],comps = numCompoments)
Q2 <- jd_kpls$Q2
Res <- data.frame(Q2)
v <- c() #Declaramos un vector vacio
q <- length(Res) #Cantidad de columnas
for (i in 1:numCompoments){
for (j in 1:q){
if(is.null(Q2[i,j])){
Q2[i,j] <- 0
}else {
Q2[i,j] <- Q2[i,j]
}
}
}
for (i in 1:numCompoments){
v[i] <- Q2[i,q]
}
s <- c() #Declaramos un vector vacio
s[1] <- max(v)
for (i in 1:numCompoments){
if(s[1]==v[i]){
s[2] <- i
}
}
return(s[1]) # Q2cum -> retorna el valor maximo
}
##
funcion_objetivo_2 <- function(x){ #Para evaluacion final
vDegree <- x[1]
if(vDegree==0){
vDegree <- 0.00001
}
kergauss<-rbfdot(sigma = vDegree)
K<-kernelMatrix(kergauss,datos)
jd_kpls = plsreg1(K[,],datosnuevos[,20:20],comps = numCompoments)
Q2 <- jd_kpls$Q2
Res <- data.frame(Q2)
v <- c() #Declaramos un vector vacio
q <- length(Res) #Cantidad de columnas
for (i in 1:numCompoments){
for (j in 1:q){
if(is.null(Q2[i,j])){
Q2[i,j] <- 0
}else {
Q2[i,j] <- Q2[i,j]
}
}
}
for (i in 1:numCompoments){
v[i] <- Q2[i,q]
}
s <- c() #Declaramos un vector vacio
s[1] <- max(v)
for (i in 1:numCompoments){
if(s[1]==v[i]){
s[2] <- i
s[3] <- vDegree
}
}
return(s) # retorna vector de valores
}
resultado <- matrix(0,30,6)
eF <- 0.50
for (iter in 1:30) {
tinicial <- proc.time() # Inicia el cron?metro
## calculate the optimum solution using
res.fun <- malschains(function(x) {-funcion_objetivo(x)}, lower=c(0), upper=c(10), maxEvals = 5000,
control = malschains.control(popsize = 50, ls = "cmaes", istep = 500, effort = eF))
print(res.fun$sol)
print(res.fun$fitness)
result <- res.fun$sol
tfinal <- proc.time()-tinicial # Detiene el cron?metro
## calculate the optimum value using funcion_objetivo function
optimum.value <- funcion_objetivo_2(result)
##########################################################################
#Guardar resultados
##########################################################################
resultado[iter,1]<-iter #iter
resultado[iter,2]<-optimum.value[1] #Q2cum maximo funcion 2
resultado[iter,3]<-optimum.value[2] #Num componente
resultado[iter,4]<-optimum.value[3] #degree
resultado[iter,5]<-tfinal[3] #Tiempo de computo
resultado[iter,6]<-eF #effort
print(resultado)
}
Res <- data.frame(resultado)
write.csv2(Res,"C:/Users/User/Documents/Proyecto01/Revista_03/MMA_R3_050.csv")
|
/MA_KPLS_3.R
|
no_license
|
jorgemellopy/KPLS-MA
|
R
| false | false | 3,106 |
r
|
numCompoments <- 10
dataset <- read_excel("C:/Users/User/Documents/Proyecto01/Revista_03/Base_RAcademico_Vacios.xlsx")
datosnuevos <- data.frame(dataset)
datos<-data.matrix(datosnuevos[2:19])
#FUNCION_OBJETIVO
funcion_objetivo <- function(x){
vDegree <- x[1]
if(vDegree==0){
vDegree <- 0.00001
}
kergauss<-rbfdot(sigma = vDegree)
K<-kernelMatrix(kergauss,datos)
jd_kpls = plsreg1(K[,],datosnuevos[,20:20],comps = numCompoments)
Q2 <- jd_kpls$Q2
Res <- data.frame(Q2)
v <- c() #Declaramos un vector vacio
q <- length(Res) #Cantidad de columnas
for (i in 1:numCompoments){
for (j in 1:q){
if(is.null(Q2[i,j])){
Q2[i,j] <- 0
}else {
Q2[i,j] <- Q2[i,j]
}
}
}
for (i in 1:numCompoments){
v[i] <- Q2[i,q]
}
s <- c() #Declaramos un vector vacio
s[1] <- max(v)
for (i in 1:numCompoments){
if(s[1]==v[i]){
s[2] <- i
}
}
return(s[1]) # Q2cum -> retorna el valor maximo
}
##
funcion_objetivo_2 <- function(x){ #Para evaluacion final
vDegree <- x[1]
if(vDegree==0){
vDegree <- 0.00001
}
kergauss<-rbfdot(sigma = vDegree)
K<-kernelMatrix(kergauss,datos)
jd_kpls = plsreg1(K[,],datosnuevos[,20:20],comps = numCompoments)
Q2 <- jd_kpls$Q2
Res <- data.frame(Q2)
v <- c() #Declaramos un vector vacio
q <- length(Res) #Cantidad de columnas
for (i in 1:numCompoments){
for (j in 1:q){
if(is.null(Q2[i,j])){
Q2[i,j] <- 0
}else {
Q2[i,j] <- Q2[i,j]
}
}
}
for (i in 1:numCompoments){
v[i] <- Q2[i,q]
}
s <- c() #Declaramos un vector vacio
s[1] <- max(v)
for (i in 1:numCompoments){
if(s[1]==v[i]){
s[2] <- i
s[3] <- vDegree
}
}
return(s) # retorna vector de valores
}
resultado <- matrix(0,30,6)
eF <- 0.50
for (iter in 1:30) {
tinicial <- proc.time() # Inicia el cron?metro
## calculate the optimum solution using
res.fun <- malschains(function(x) {-funcion_objetivo(x)}, lower=c(0), upper=c(10), maxEvals = 5000,
control = malschains.control(popsize = 50, ls = "cmaes", istep = 500, effort = eF))
print(res.fun$sol)
print(res.fun$fitness)
result <- res.fun$sol
tfinal <- proc.time()-tinicial # Detiene el cron?metro
## calculate the optimum value using funcion_objetivo function
optimum.value <- funcion_objetivo_2(result)
##########################################################################
#Guardar resultados
##########################################################################
resultado[iter,1]<-iter #iter
resultado[iter,2]<-optimum.value[1] #Q2cum maximo funcion 2
resultado[iter,3]<-optimum.value[2] #Num componente
resultado[iter,4]<-optimum.value[3] #degree
resultado[iter,5]<-tfinal[3] #Tiempo de computo
resultado[iter,6]<-eF #effort
print(resultado)
}
Res <- data.frame(resultado)
write.csv2(Res,"C:/Users/User/Documents/Proyecto01/Revista_03/MMA_R3_050.csv")
|
## run_analysis.R
## R code to read in the course project dataset and produce tidy data
## This file editied in GNU Emacs
## Note: As J. Adler indicates on pg 151 of "R in a Nutshell"
## R is not the best language for preprocessing data.
## I would normally use Perl, but that is outside the
## scope of this assignment.
########################################
## ##
## In no way is this code efficient ##
## ##
########################################
run_analysis <- function () {
##
## By specification this function has no parameters
## It is the case then that many things will be hard coded
## Assumption: this program will be ruin in a directory that
## has the source data set will be in a directory named "UCI HAR Dataset"
##
## This directory will have been unzipped without alteration
##
## This script will do the following steps (not neccessarily in this order):
## 1. Merges the training and the test sets to create one data set.
## 2. Extracts only the measurements on the mean and standard deviation for each measurement.
## 3. Uses descriptive activity names to name the activities in the data set
## 4. Appropriately labels the data set with descriptive variable names.
## 5. From the data set in step 4, creates a second, independent tidy data set
## with the average of each variable for each activity and each subject.
## We are eventually going to use the dplyr library
library(dplyr)
default.stringsAsFactors=FALSE
## ok, now we get the data
## features are the variables (column header labels) in the dataset
filepath <- "./UCI HAR Dataset/features.txt"
features <- read.table(filepath)
## activities are a map from the numeric activity label to a descriptive textual label
filepath <- "./UCI HAR Dataset/activity_labels.txt"
activities <- read.table(filepath)
########################################
##
## Now we get the training data
##
## this column contains the subject identifiers
filepath <- "./UCI HAR Dataset/train/subject_train.txt"
train.subjects <- read.table(filepath)
## This column contains the activity identifiers
filepath <- "./UCI HAR Dataset/train/y_train.txt"
train.activity <- read.table(filepath)
## This matrix contains all the measurement data
filepath <- "./UCI HAR Dataset/train/X_train.txt"
train.measurements <- read.table(filepath)
########################################
##
## Now we get the test data
##
## this column contains the subject identifiers
filepath <- "./UCI HAR Dataset/test/subject_test.txt"
test.subjects <- read.table(filepath)
## This column contains the activity identifiers
filepath <- "./UCI HAR Dataset/test/y_test.txt"
test.activity <- read.table(filepath)
## This matrix contains all the measurement data
filepath <- "./UCI HAR Dataset/test/X_test.txt"
test.measurements <- read.table(filepath)
########################################
##
## Now, we have to do some assembling and mapping
##
## It is assumed that the rows line up for these two
## sets of data. That is row 1 of the activity, subject,
## and measurement data all correspond.
comb.subjects <- rbind(test.subjects, train.subjects, dparse.level=0)
comb.activity <- rbind(test.activity, train.activity, dparse.level=0)
comb.measurements <- rbind(test.measurements, train.measurements, dparse.level=0)
comb.data <- cbind(comb.activity, comb.subjects, comb.measurements)
## Add the descriptive activity labels
comb.data.2 <- merge (activities, comb.data, by.x=1, by.y=1, sort=FALSE)
features.new <- array(c(1:3, "activity", "activityName", "subject"), c(3,2))
features.2 <- rbind(features.new, features)
features.3 <- features.2[,2]
names(comb.data.2) <- features.3
## fix the duplicate column names
comb.data.3 <- data.frame(comb.data.2, check.names=TRUE)
## convert to table for easy column removal
comb.data.tbl <- tbl_df(comb.data.3)
## Reduce the table by selecting only the variables (columns) we want: means and diseases (bad joke)
combined.reduced.tbl <- select(comb.data.tbl, activityName, subject, contains("mean"), contains("std"))
## Now lets build our tidy data
subj.act.groups <- group_by(combined.reduced.tbl, subject, activityName)
## I could have combined this with the line above
tidy <- subj.act.groups %>% summarise_each(funs(mean), contains("mean"), contains("std"))
write.table(tidy, file="getdata_tidy.txt" ,row.name=FALSE)
} # function
|
/run_analysis.R
|
no_license
|
RogerThompson/CleaningDataProject
|
R
| false | false | 4,825 |
r
|
## run_analysis.R
## R code to read in the course project dataset and produce tidy data
## This file editied in GNU Emacs
## Note: As J. Adler indicates on pg 151 of "R in a Nutshell"
## R is not the best language for preprocessing data.
## I would normally use Perl, but that is outside the
## scope of this assignment.
########################################
## ##
## In no way is this code efficient ##
## ##
########################################
run_analysis <- function () {
##
## By specification this function has no parameters
## It is the case then that many things will be hard coded
## Assumption: this program will be ruin in a directory that
## has the source data set will be in a directory named "UCI HAR Dataset"
##
## This directory will have been unzipped without alteration
##
## This script will do the following steps (not neccessarily in this order):
## 1. Merges the training and the test sets to create one data set.
## 2. Extracts only the measurements on the mean and standard deviation for each measurement.
## 3. Uses descriptive activity names to name the activities in the data set
## 4. Appropriately labels the data set with descriptive variable names.
## 5. From the data set in step 4, creates a second, independent tidy data set
## with the average of each variable for each activity and each subject.
## We are eventually going to use the dplyr library
library(dplyr)
default.stringsAsFactors=FALSE
## ok, now we get the data
## features are the variables (column header labels) in the dataset
filepath <- "./UCI HAR Dataset/features.txt"
features <- read.table(filepath)
## activities are a map from the numeric activity label to a descriptive textual label
filepath <- "./UCI HAR Dataset/activity_labels.txt"
activities <- read.table(filepath)
########################################
##
## Now we get the training data
##
## this column contains the subject identifiers
filepath <- "./UCI HAR Dataset/train/subject_train.txt"
train.subjects <- read.table(filepath)
## This column contains the activity identifiers
filepath <- "./UCI HAR Dataset/train/y_train.txt"
train.activity <- read.table(filepath)
## This matrix contains all the measurement data
filepath <- "./UCI HAR Dataset/train/X_train.txt"
train.measurements <- read.table(filepath)
########################################
##
## Now we get the test data
##
## this column contains the subject identifiers
filepath <- "./UCI HAR Dataset/test/subject_test.txt"
test.subjects <- read.table(filepath)
## This column contains the activity identifiers
filepath <- "./UCI HAR Dataset/test/y_test.txt"
test.activity <- read.table(filepath)
## This matrix contains all the measurement data
filepath <- "./UCI HAR Dataset/test/X_test.txt"
test.measurements <- read.table(filepath)
########################################
##
## Now, we have to do some assembling and mapping
##
## It is assumed that the rows line up for these two
## sets of data. That is row 1 of the activity, subject,
## and measurement data all correspond.
comb.subjects <- rbind(test.subjects, train.subjects, dparse.level=0)
comb.activity <- rbind(test.activity, train.activity, dparse.level=0)
comb.measurements <- rbind(test.measurements, train.measurements, dparse.level=0)
comb.data <- cbind(comb.activity, comb.subjects, comb.measurements)
## Add the descriptive activity labels
comb.data.2 <- merge (activities, comb.data, by.x=1, by.y=1, sort=FALSE)
features.new <- array(c(1:3, "activity", "activityName", "subject"), c(3,2))
features.2 <- rbind(features.new, features)
features.3 <- features.2[,2]
names(comb.data.2) <- features.3
## fix the duplicate column names
comb.data.3 <- data.frame(comb.data.2, check.names=TRUE)
## convert to table for easy column removal
comb.data.tbl <- tbl_df(comb.data.3)
## Reduce the table by selecting only the variables (columns) we want: means and diseases (bad joke)
combined.reduced.tbl <- select(comb.data.tbl, activityName, subject, contains("mean"), contains("std"))
## Now lets build our tidy data
subj.act.groups <- group_by(combined.reduced.tbl, subject, activityName)
## I could have combined this with the line above
tidy <- subj.act.groups %>% summarise_each(funs(mean), contains("mean"), contains("std"))
write.table(tidy, file="getdata_tidy.txt" ,row.name=FALSE)
} # function
|
####################
P1_GRN1<-function(R_basal, parameters) {
G<-(1-parameters["k0"])*(parameters["h1"]*parameters["h2"]*parameters["h3"]/((parameters["Kd1"]+parameters["h1"])*(parameters["Kd2"]+parameters["h2"])*(parameters["Kd3"]+parameters["h3"])))+parameters["k0"]
return(G*parameters["ksyn"]/parameters["kdeg"]+(R_basal-G*parameters["ksyn"]/parameters["kdeg"])*exp(-parameters["kdeg"]*caRNA_time))
}
P1_GRN2<-function(R_basal, parameters) {
G<-(1-parameters["k0"])*(1-parameters["Kd1"]*parameters["Kd2"]*parameters["Kd3"]/((parameters["Kd1"]+parameters["h1"])*(parameters["Kd2"]+parameters["h2"])*(parameters["Kd3"]+parameters["h3"])))+parameters["k0"]
return(G*parameters["ksyn"]/parameters["kdeg"]+(R_basal-G*parameters["ksyn"]/parameters["kdeg"])*exp(-parameters["kdeg"]*caRNA_time))
}
P1_GRN3<-function(R_basal, parameters) {
G<-(1-parameters["k0"])*(1-(parameters["Kd1"]/(parameters["Kd1"]+parameters["h1"]))*(1-parameters["h2"]*parameters["h3"]/((parameters["Kd2"]+parameters["h2"])*(parameters["Kd3"]+parameters["h3"]))))+parameters["k0"]
return(G*parameters["ksyn"]/parameters["kdeg"]+(R_basal-G*parameters["ksyn"]/parameters["kdeg"])*exp(-parameters["kdeg"]*caRNA_time))
}
P1_GRN4<-function(R_basal, parameters) {
G<-(1-parameters["k0"])*((parameters["h1"]/(parameters["Kd1"]+parameters["h1"]))*(1-parameters["Kd2"]*parameters["Kd3"]/((parameters["Kd2"]+parameters["h2"])*(parameters["Kd3"]+parameters["h3"]))))+parameters["k0"]
return(G*parameters["ksyn"]/parameters["kdeg"]+(R_basal-G*parameters["ksyn"]/parameters["kdeg"])*exp(-parameters["kdeg"]*caRNA_time))
}
P1_GRN5<-function(R_basal, parameters) {
G<-(1-parameters["k0"])*(1-(parameters["Kd2"]/(parameters["Kd2"]+parameters["h2"]))*(1-parameters["h1"]*parameters["h3"]/((parameters["Kd1"]+parameters["h1"])*(parameters["Kd3"]+parameters["h3"]))))+parameters["k0"]
return(G*parameters["ksyn"]/parameters["kdeg"]+(R_basal-G*parameters["ksyn"]/parameters["kdeg"])*exp(-parameters["kdeg"]*caRNA_time))
}
P1_GRN6<-function(R_basal, parameters) {
G<-(1-parameters["k0"])*((parameters["h2"]/(parameters["Kd2"]+parameters["h2"]))*(1-parameters["Kd1"]*parameters["Kd3"]/((parameters["Kd1"]+parameters["h1"])*(parameters["Kd3"]+parameters["h3"]))))+parameters["k0"]
return(G*parameters["ksyn"]/parameters["kdeg"]+(R_basal-G*parameters["ksyn"]/parameters["kdeg"])*exp(-parameters["kdeg"]*caRNA_time))
}
P1_GRN7<-function(R_basal, parameters) {
G<-(1-parameters["k0"])*(1-(parameters["Kd3"]/(parameters["Kd3"]+parameters["h3"]))*(1-parameters["h1"]*parameters["h2"]/((parameters["Kd1"]+parameters["h1"])*(parameters["Kd2"]+parameters["h2"]))))+parameters["k0"]
return(G*parameters["ksyn"]/parameters["kdeg"]+(R_basal-G*parameters["ksyn"]/parameters["kdeg"])*exp(-parameters["kdeg"]*caRNA_time))
}
P1_GRN8<-function(R_basal, parameters) {
G<-(1-parameters["k0"])*((parameters["h3"]/(parameters["Kd3"]+parameters["h3"]))*(1-parameters["Kd1"]*parameters["Kd2"]/((parameters["Kd1"]+parameters["h1"])*(parameters["Kd2"]+parameters["h2"]))))+parameters["k0"]
return(G*parameters["ksyn"]/parameters["kdeg"]+(R_basal-G*parameters["ksyn"]/parameters["kdeg"])*exp(-parameters["kdeg"]*caRNA_time))
}
################################
##################################
##################################
G_GRN1<-function(parameters) {
G<-(1-parameters["k0"])*(parameters["h1"]*parameters["h2"]*parameters["h3"]/((parameters["Kd1"]+parameters["h1"])*(parameters["Kd2"]+parameters["h2"])*(parameters["Kd3"]+parameters["h3"])))+parameters["k0"]
return(G)
}
G_GRN2<-function(parameters) {
G<-(1-parameters["k0"])*(1-parameters["Kd1"]*parameters["Kd2"]*parameters["Kd3"]/((parameters["Kd1"]+parameters["h1"])*(parameters["Kd2"]+parameters["h2"])*(parameters["Kd3"]+parameters["h3"])))+parameters["k0"]
return(G)
}
G_GRN3<-function(parameters) {
G<-(1-parameters["k0"])*(1-(parameters["Kd1"]/(parameters["Kd1"]+parameters["h1"]))*(1-parameters["h2"]*parameters["h3"]/((parameters["Kd2"]+parameters["h2"])*(parameters["Kd3"]+parameters["h3"]))))+parameters["k0"]
return(G)
}
G_GRN4<-function( parameters) {
G<-(1-parameters["k0"])*((parameters["h1"]/(parameters["Kd1"]+parameters["h1"]))*(1-parameters["Kd2"]*parameters["Kd3"]/((parameters["Kd2"]+parameters["h2"])*(parameters["Kd3"]+parameters["h3"]))))+parameters["k0"]
return(G)
}
G_GRN5<-function( parameters) {
G<-(1-parameters["k0"])*(1-(parameters["Kd2"]/(parameters["Kd2"]+parameters["h2"]))*(1-parameters["h1"]*parameters["h3"]/((parameters["Kd1"]+parameters["h1"])*(parameters["Kd3"]+parameters["h3"]))))+parameters["k0"]
return(G)
}
G_GRN6<-function( parameters) {
G<-(1-parameters["k0"])*((parameters["h2"]/(parameters["Kd2"]+parameters["h2"]))*(1-parameters["Kd1"]*parameters["Kd3"]/((parameters["Kd1"]+parameters["h1"])*(parameters["Kd3"]+parameters["h3"]))))+parameters["k0"]
return(G)
}
G_GRN7<-function( parameters) {
G<-(1-parameters["k0"])*(1-(parameters["Kd3"]/(parameters["Kd3"]+parameters["h3"]))*(1-parameters["h1"]*parameters["h2"]/((parameters["Kd1"]+parameters["h1"])*(parameters["Kd2"]+parameters["h2"]))))+parameters["k0"]
return(G)
}
G_GRN8<-function( parameters) {
G<-(1-parameters["k0"])*((parameters["h3"]/(parameters["Kd3"]+parameters["h3"]))*(1-parameters["Kd1"]*parameters["Kd2"]/((parameters["Kd1"]+parameters["h1"])*(parameters["Kd2"]+parameters["h2"]))))+parameters["k0"]
return(G)
}
##################################
##################################
##################################
##################################
##################################
##################################
G_basal1<-function(Kd1,Kd2,Kd3,k0,Basal_level){
return((1-k0)*((Basal_level)^3/((Kd1+Basal_level)*(Kd2+Basal_level)*(Kd3+Basal_level)))+k0)
}
G_basal2<-function(Kd1,Kd2,Kd3,k0,Basal_level){
return((1-k0)*((1-Kd1*Kd2*Kd3/((Kd1+Basal_level)*(Kd2+Basal_level)*(Kd3+Basal_level))))+k0)
}
G_basal3<-function(Kd1,Kd2,Kd3,k0,Basal_level){
return((1-k0)*(1-(Kd1/(Kd1+Basal_level))*(1-Basal_level^2/((Kd2+Basal_level)*(Kd3+Basal_level))))+k0)
}
G_basal4<-function(Kd1,Kd2,Kd3,k0,Basal_level){
return((1-k0)*((Basal_level/(Kd1+Basal_level))*(1-Kd2*Kd3/((Kd2+Basal_level)*(Kd3+Basal_level))))+k0)
}
G_basal5<-function(Kd1,Kd2,Kd3,k0,Basal_level){
return((1-k0)*(1-(Kd2/(Kd2+Basal_level))*(1-Basal_level^2/((Kd1+Basal_level)*(Kd3+Basal_level))))+k0)
}
G_basal6<-function(Kd1,Kd2,Kd3,k0,Basal_level){
return((1-k0)*((Basal_level/(Kd2+Basal_level))*(1-Kd1*Kd3/((Kd1+Basal_level)*(Kd3+Basal_level))))+k0)
}
G_basal7<-function(Kd1,Kd2,Kd3,k0,Basal_level){
return((1-k0)*(1-(Kd3/(Kd3+Basal_level))*(1-Basal_level^2/((Kd1+Basal_level)*(Kd2+Basal_level))))+k0)
}
G_basal8<-function(Kd1,Kd2,Kd3,k0,Basal_level){
return((1-k0)*((Basal_level/(Kd3+Basal_level))*(1-Kd1*Kd2/((Kd1+Basal_level)*(Kd2+Basal_level))))+k0)
}
P1_fun_GRNs<-function(X){
# print(X)
if(any(X[1:3]>2)|any(X[1:3]<(-2))|(X[6]>(1))|(X[6]<(0))) {return(10^8)}
Kd1<-10^(X[1])
Kd2<-10^(X[2])
Kd3<-10^(X[3])
ksyn<-10^(X[4])
kdeg<-10^(X[5])
k0<-X[6]
# Kd1<-10^(0.356590974)
# Kd2<-10^(-1.352434174)
# Kd3<-10^(-0.028680619)
# ksyn<-10^(1.264826761)
# kdeg<-10^(-1.022643040)
# k0<-0.002599714
#correct
# Kd1<-10^(-1)
# Kd2<-10^(0)
# Kd3<-10^(1)
# ksyn<-10.55
# kdeg<-10^(-1)
# k0<-0
# # #best_fit
# Kd1<-10^(-1.62)
# Kd2<-10^(-0.095)
# Kd3<-10^(1.05)
# ksyn<-10^(0.91)
# kdeg<-10^(-1.08)
# k0<-0.0003
#40
#correct
# Kd1<-10^(-1)
# Kd2<-10^(-1)
# Kd3<-10^(0)
# ksyn<-10.54852
# kdeg<-10^(-1)
# k0<-0
# # #best_fit
# Kd1<-10^(-1.3211072421)
# Kd2<-10^(0.0555631110)
# Kd3<-10^(-0.6961266731)
# ksyn<-10^(0.9333445404)
# kdeg<-10^(-1.0864844054)
# k0<-0.0001743699
# # 681.7661
#
# #70
# #correct
# Kd1<-10^(-1)
# Kd2<-10^(-1)
# Kd3<-10^(0)
# ksyn<-11.56018
# kdeg<-10^(-1)
# k0<-0
# # #best_fit
# Kd1<-10^(-0.3654665228)
# Kd2<-10^(-0.7637120744)
# Kd3<-10^(0.2200285179)
# ksyn<-10^(0.9272315376)
# kdeg<-10^(-1.3843749226)
# k0<-0.0008822873
# 549.4992
parS<-c(Kd1=Kd1,Kd2=Kd2,Kd3=Kd3,k0=k0,ksyn=ksyn,kdeg=kdeg)
R_basal<-ksyn*(get(paste("G_basal",GRN_ind,sep=""))(Kd1,Kd2,Kd3,k0,Basal_level))/kdeg
# print(R_basal)
if(is.nan(R_basal)|(R_basal<0)) {return(10^8)}
# print("yes")
Total_mse<-target_fun(c(get(paste("P1_GRN",GRN_ind,sep=""))(R_basal,c(parS,h1=input_list[1,1],h2=input_list[1,2],h3=input_list[1,3])),
get(paste("P1_GRN",GRN_ind,sep=""))(R_basal,c(parS,h1=input_list[2,1],h2=input_list[2,2],h3=input_list[2,3])),
get(paste("P1_GRN",GRN_ind,sep=""))(R_basal,c(parS,h1=input_list[3,1],h2=input_list[3,2],h3=input_list[3,3])),
get(paste("P1_GRN",GRN_ind,sep=""))(R_basal,c(parS,h1=input_list[4,1],h2=input_list[4,2],h3=input_list[4,3])),
get(paste("P1_GRN",GRN_ind,sep=""))(R_basal,c(parS,h1=input_list[5,1],h2=input_list[5,2],h3=input_list[5,3])),
get(paste("P1_GRN",GRN_ind,sep=""))(R_basal,c(parS,h1=input_list[6,1],h2=input_list[6,2],h3=input_list[6,3])),
get(paste("P1_GRN",GRN_ind,sep=""))(R_basal,c(parS,h1=input_list[7,1],h2=input_list[7,2],h3=input_list[7,3])),
get(paste("P1_GRN",GRN_ind,sep=""))(R_basal,c(parS,h1=input_list[8,1],h2=input_list[8,2],h3=input_list[8,3])),
get(paste("P1_GRN",GRN_ind,sep=""))(R_basal,c(parS,h1=input_list[9,1],h2=input_list[9,2],h3=input_list[9,3])),
get(paste("P1_GRN",GRN_ind,sep=""))(R_basal,c(parS,h1=input_list[10,1],h2=input_list[10,2],h3=input_list[10,3])),
get(paste("P1_GRN",GRN_ind,sep=""))(R_basal,c(parS,h1=input_list[11,1],h2=input_list[11,2],h3=input_list[11,3])),
get(paste("P1_GRN",GRN_ind,sep=""))(R_basal,c(parS,h1=input_list[12,1],h2=input_list[12,2],h3=input_list[12,3])),
get(paste("P1_GRN",GRN_ind,sep=""))(R_basal,c(parS,h1=input_list[13,1],h2=input_list[13,2],h3=input_list[13,3])),
get(paste("P1_GRN",GRN_ind,sep=""))(R_basal,c(parS,h1=input_list[14,1],h2=input_list[14,2],h3=input_list[14,3])),
get(paste("P1_GRN",GRN_ind,sep=""))(R_basal,c(parS,h1=input_list[15,1],h2=input_list[15,2],h3=input_list[15,3])),
get(paste("P1_GRN",GRN_ind,sep=""))(R_basal,c(parS,h1=input_list[16,1],h2=input_list[16,2],h3=input_list[16,3])),
get(paste("P1_GRN",GRN_ind,sep=""))(R_basal,c(parS,h1=input_list[17,1],h2=input_list[17,2],h3=input_list[17,3])),
get(paste("P1_GRN",GRN_ind,sep=""))(R_basal,c(parS,h1=input_list[18,1],h2=input_list[18,2],h3=input_list[18,3])),
get(paste("P1_GRN",GRN_ind,sep=""))(R_basal,c(parS,h1=input_list[19,1],h2=input_list[19,2],h3=input_list[19,3])),
get(paste("P1_GRN",GRN_ind,sep=""))(R_basal,c(parS,h1=input_list[20,1],h2=input_list[20,2],h3=input_list[20,3])),
get(paste("P1_GRN",GRN_ind,sep=""))(R_basal,c(parS,h1=input_list[21,1],h2=input_list[21,2],h3=input_list[21,3])),
get(paste("P1_GRN",GRN_ind,sep=""))(R_basal,c(parS,h1=input_list[22,1],h2=input_list[22,2],h3=input_list[22,3])),
get(paste("P1_GRN",GRN_ind,sep=""))(R_basal,c(parS,h1=input_list[23,1],h2=input_list[23,2],h3=input_list[23,3])),
get(paste("P1_GRN",GRN_ind,sep=""))(R_basal,c(parS,h1=input_list[24,1],h2=input_list[24,2],h3=input_list[24,3])),
get(paste("P1_GRN",GRN_ind,sep=""))(R_basal,c(parS,h1=input_list[25,1],h2=input_list[25,2],h3=input_list[25,3])),
get(paste("P1_GRN",GRN_ind,sep=""))(R_basal,c(parS,h1=input_list[26,1],h2=input_list[26,2],h3=input_list[26,3]))))
# print(Total_mse)
if(is.infinite(Total_mse)){return(10^8)}
if(is.na(Total_mse)){return(10^8)}
return(Total_mse)
}
|
/Source_code/A_workflow_to_fit_GRS_models_to_data /Optimization_different_error_level/Nondyn_opt_newbasalnew.R
|
no_license
|
nwang00/Identifying-the-combinatorial-control-of-signal-dependent-transcription-factors
|
R
| false | false | 12,020 |
r
|
####################
P1_GRN1<-function(R_basal, parameters) {
G<-(1-parameters["k0"])*(parameters["h1"]*parameters["h2"]*parameters["h3"]/((parameters["Kd1"]+parameters["h1"])*(parameters["Kd2"]+parameters["h2"])*(parameters["Kd3"]+parameters["h3"])))+parameters["k0"]
return(G*parameters["ksyn"]/parameters["kdeg"]+(R_basal-G*parameters["ksyn"]/parameters["kdeg"])*exp(-parameters["kdeg"]*caRNA_time))
}
P1_GRN2<-function(R_basal, parameters) {
G<-(1-parameters["k0"])*(1-parameters["Kd1"]*parameters["Kd2"]*parameters["Kd3"]/((parameters["Kd1"]+parameters["h1"])*(parameters["Kd2"]+parameters["h2"])*(parameters["Kd3"]+parameters["h3"])))+parameters["k0"]
return(G*parameters["ksyn"]/parameters["kdeg"]+(R_basal-G*parameters["ksyn"]/parameters["kdeg"])*exp(-parameters["kdeg"]*caRNA_time))
}
P1_GRN3<-function(R_basal, parameters) {
G<-(1-parameters["k0"])*(1-(parameters["Kd1"]/(parameters["Kd1"]+parameters["h1"]))*(1-parameters["h2"]*parameters["h3"]/((parameters["Kd2"]+parameters["h2"])*(parameters["Kd3"]+parameters["h3"]))))+parameters["k0"]
return(G*parameters["ksyn"]/parameters["kdeg"]+(R_basal-G*parameters["ksyn"]/parameters["kdeg"])*exp(-parameters["kdeg"]*caRNA_time))
}
P1_GRN4<-function(R_basal, parameters) {
G<-(1-parameters["k0"])*((parameters["h1"]/(parameters["Kd1"]+parameters["h1"]))*(1-parameters["Kd2"]*parameters["Kd3"]/((parameters["Kd2"]+parameters["h2"])*(parameters["Kd3"]+parameters["h3"]))))+parameters["k0"]
return(G*parameters["ksyn"]/parameters["kdeg"]+(R_basal-G*parameters["ksyn"]/parameters["kdeg"])*exp(-parameters["kdeg"]*caRNA_time))
}
P1_GRN5<-function(R_basal, parameters) {
G<-(1-parameters["k0"])*(1-(parameters["Kd2"]/(parameters["Kd2"]+parameters["h2"]))*(1-parameters["h1"]*parameters["h3"]/((parameters["Kd1"]+parameters["h1"])*(parameters["Kd3"]+parameters["h3"]))))+parameters["k0"]
return(G*parameters["ksyn"]/parameters["kdeg"]+(R_basal-G*parameters["ksyn"]/parameters["kdeg"])*exp(-parameters["kdeg"]*caRNA_time))
}
P1_GRN6<-function(R_basal, parameters) {
G<-(1-parameters["k0"])*((parameters["h2"]/(parameters["Kd2"]+parameters["h2"]))*(1-parameters["Kd1"]*parameters["Kd3"]/((parameters["Kd1"]+parameters["h1"])*(parameters["Kd3"]+parameters["h3"]))))+parameters["k0"]
return(G*parameters["ksyn"]/parameters["kdeg"]+(R_basal-G*parameters["ksyn"]/parameters["kdeg"])*exp(-parameters["kdeg"]*caRNA_time))
}
P1_GRN7<-function(R_basal, parameters) {
G<-(1-parameters["k0"])*(1-(parameters["Kd3"]/(parameters["Kd3"]+parameters["h3"]))*(1-parameters["h1"]*parameters["h2"]/((parameters["Kd1"]+parameters["h1"])*(parameters["Kd2"]+parameters["h2"]))))+parameters["k0"]
return(G*parameters["ksyn"]/parameters["kdeg"]+(R_basal-G*parameters["ksyn"]/parameters["kdeg"])*exp(-parameters["kdeg"]*caRNA_time))
}
P1_GRN8<-function(R_basal, parameters) {
G<-(1-parameters["k0"])*((parameters["h3"]/(parameters["Kd3"]+parameters["h3"]))*(1-parameters["Kd1"]*parameters["Kd2"]/((parameters["Kd1"]+parameters["h1"])*(parameters["Kd2"]+parameters["h2"]))))+parameters["k0"]
return(G*parameters["ksyn"]/parameters["kdeg"]+(R_basal-G*parameters["ksyn"]/parameters["kdeg"])*exp(-parameters["kdeg"]*caRNA_time))
}
################################
##################################
##################################
G_GRN1<-function(parameters) {
G<-(1-parameters["k0"])*(parameters["h1"]*parameters["h2"]*parameters["h3"]/((parameters["Kd1"]+parameters["h1"])*(parameters["Kd2"]+parameters["h2"])*(parameters["Kd3"]+parameters["h3"])))+parameters["k0"]
return(G)
}
G_GRN2<-function(parameters) {
G<-(1-parameters["k0"])*(1-parameters["Kd1"]*parameters["Kd2"]*parameters["Kd3"]/((parameters["Kd1"]+parameters["h1"])*(parameters["Kd2"]+parameters["h2"])*(parameters["Kd3"]+parameters["h3"])))+parameters["k0"]
return(G)
}
G_GRN3<-function(parameters) {
G<-(1-parameters["k0"])*(1-(parameters["Kd1"]/(parameters["Kd1"]+parameters["h1"]))*(1-parameters["h2"]*parameters["h3"]/((parameters["Kd2"]+parameters["h2"])*(parameters["Kd3"]+parameters["h3"]))))+parameters["k0"]
return(G)
}
G_GRN4<-function( parameters) {
G<-(1-parameters["k0"])*((parameters["h1"]/(parameters["Kd1"]+parameters["h1"]))*(1-parameters["Kd2"]*parameters["Kd3"]/((parameters["Kd2"]+parameters["h2"])*(parameters["Kd3"]+parameters["h3"]))))+parameters["k0"]
return(G)
}
G_GRN5<-function( parameters) {
G<-(1-parameters["k0"])*(1-(parameters["Kd2"]/(parameters["Kd2"]+parameters["h2"]))*(1-parameters["h1"]*parameters["h3"]/((parameters["Kd1"]+parameters["h1"])*(parameters["Kd3"]+parameters["h3"]))))+parameters["k0"]
return(G)
}
G_GRN6<-function( parameters) {
G<-(1-parameters["k0"])*((parameters["h2"]/(parameters["Kd2"]+parameters["h2"]))*(1-parameters["Kd1"]*parameters["Kd3"]/((parameters["Kd1"]+parameters["h1"])*(parameters["Kd3"]+parameters["h3"]))))+parameters["k0"]
return(G)
}
G_GRN7<-function( parameters) {
G<-(1-parameters["k0"])*(1-(parameters["Kd3"]/(parameters["Kd3"]+parameters["h3"]))*(1-parameters["h1"]*parameters["h2"]/((parameters["Kd1"]+parameters["h1"])*(parameters["Kd2"]+parameters["h2"]))))+parameters["k0"]
return(G)
}
G_GRN8<-function( parameters) {
G<-(1-parameters["k0"])*((parameters["h3"]/(parameters["Kd3"]+parameters["h3"]))*(1-parameters["Kd1"]*parameters["Kd2"]/((parameters["Kd1"]+parameters["h1"])*(parameters["Kd2"]+parameters["h2"]))))+parameters["k0"]
return(G)
}
##################################
##################################
##################################
##################################
##################################
##################################
G_basal1<-function(Kd1,Kd2,Kd3,k0,Basal_level){
return((1-k0)*((Basal_level)^3/((Kd1+Basal_level)*(Kd2+Basal_level)*(Kd3+Basal_level)))+k0)
}
G_basal2<-function(Kd1,Kd2,Kd3,k0,Basal_level){
return((1-k0)*((1-Kd1*Kd2*Kd3/((Kd1+Basal_level)*(Kd2+Basal_level)*(Kd3+Basal_level))))+k0)
}
G_basal3<-function(Kd1,Kd2,Kd3,k0,Basal_level){
return((1-k0)*(1-(Kd1/(Kd1+Basal_level))*(1-Basal_level^2/((Kd2+Basal_level)*(Kd3+Basal_level))))+k0)
}
G_basal4<-function(Kd1,Kd2,Kd3,k0,Basal_level){
return((1-k0)*((Basal_level/(Kd1+Basal_level))*(1-Kd2*Kd3/((Kd2+Basal_level)*(Kd3+Basal_level))))+k0)
}
G_basal5<-function(Kd1,Kd2,Kd3,k0,Basal_level){
return((1-k0)*(1-(Kd2/(Kd2+Basal_level))*(1-Basal_level^2/((Kd1+Basal_level)*(Kd3+Basal_level))))+k0)
}
G_basal6<-function(Kd1,Kd2,Kd3,k0,Basal_level){
return((1-k0)*((Basal_level/(Kd2+Basal_level))*(1-Kd1*Kd3/((Kd1+Basal_level)*(Kd3+Basal_level))))+k0)
}
G_basal7<-function(Kd1,Kd2,Kd3,k0,Basal_level){
return((1-k0)*(1-(Kd3/(Kd3+Basal_level))*(1-Basal_level^2/((Kd1+Basal_level)*(Kd2+Basal_level))))+k0)
}
G_basal8<-function(Kd1,Kd2,Kd3,k0,Basal_level){
return((1-k0)*((Basal_level/(Kd3+Basal_level))*(1-Kd1*Kd2/((Kd1+Basal_level)*(Kd2+Basal_level))))+k0)
}
P1_fun_GRNs<-function(X){
# print(X)
if(any(X[1:3]>2)|any(X[1:3]<(-2))|(X[6]>(1))|(X[6]<(0))) {return(10^8)}
Kd1<-10^(X[1])
Kd2<-10^(X[2])
Kd3<-10^(X[3])
ksyn<-10^(X[4])
kdeg<-10^(X[5])
k0<-X[6]
# Kd1<-10^(0.356590974)
# Kd2<-10^(-1.352434174)
# Kd3<-10^(-0.028680619)
# ksyn<-10^(1.264826761)
# kdeg<-10^(-1.022643040)
# k0<-0.002599714
#correct
# Kd1<-10^(-1)
# Kd2<-10^(0)
# Kd3<-10^(1)
# ksyn<-10.55
# kdeg<-10^(-1)
# k0<-0
# # #best_fit
# Kd1<-10^(-1.62)
# Kd2<-10^(-0.095)
# Kd3<-10^(1.05)
# ksyn<-10^(0.91)
# kdeg<-10^(-1.08)
# k0<-0.0003
#40
#correct
# Kd1<-10^(-1)
# Kd2<-10^(-1)
# Kd3<-10^(0)
# ksyn<-10.54852
# kdeg<-10^(-1)
# k0<-0
# # #best_fit
# Kd1<-10^(-1.3211072421)
# Kd2<-10^(0.0555631110)
# Kd3<-10^(-0.6961266731)
# ksyn<-10^(0.9333445404)
# kdeg<-10^(-1.0864844054)
# k0<-0.0001743699
# # 681.7661
#
# #70
# #correct
# Kd1<-10^(-1)
# Kd2<-10^(-1)
# Kd3<-10^(0)
# ksyn<-11.56018
# kdeg<-10^(-1)
# k0<-0
# # #best_fit
# Kd1<-10^(-0.3654665228)
# Kd2<-10^(-0.7637120744)
# Kd3<-10^(0.2200285179)
# ksyn<-10^(0.9272315376)
# kdeg<-10^(-1.3843749226)
# k0<-0.0008822873
# 549.4992
parS<-c(Kd1=Kd1,Kd2=Kd2,Kd3=Kd3,k0=k0,ksyn=ksyn,kdeg=kdeg)
R_basal<-ksyn*(get(paste("G_basal",GRN_ind,sep=""))(Kd1,Kd2,Kd3,k0,Basal_level))/kdeg
# print(R_basal)
if(is.nan(R_basal)|(R_basal<0)) {return(10^8)}
# print("yes")
Total_mse<-target_fun(c(get(paste("P1_GRN",GRN_ind,sep=""))(R_basal,c(parS,h1=input_list[1,1],h2=input_list[1,2],h3=input_list[1,3])),
get(paste("P1_GRN",GRN_ind,sep=""))(R_basal,c(parS,h1=input_list[2,1],h2=input_list[2,2],h3=input_list[2,3])),
get(paste("P1_GRN",GRN_ind,sep=""))(R_basal,c(parS,h1=input_list[3,1],h2=input_list[3,2],h3=input_list[3,3])),
get(paste("P1_GRN",GRN_ind,sep=""))(R_basal,c(parS,h1=input_list[4,1],h2=input_list[4,2],h3=input_list[4,3])),
get(paste("P1_GRN",GRN_ind,sep=""))(R_basal,c(parS,h1=input_list[5,1],h2=input_list[5,2],h3=input_list[5,3])),
get(paste("P1_GRN",GRN_ind,sep=""))(R_basal,c(parS,h1=input_list[6,1],h2=input_list[6,2],h3=input_list[6,3])),
get(paste("P1_GRN",GRN_ind,sep=""))(R_basal,c(parS,h1=input_list[7,1],h2=input_list[7,2],h3=input_list[7,3])),
get(paste("P1_GRN",GRN_ind,sep=""))(R_basal,c(parS,h1=input_list[8,1],h2=input_list[8,2],h3=input_list[8,3])),
get(paste("P1_GRN",GRN_ind,sep=""))(R_basal,c(parS,h1=input_list[9,1],h2=input_list[9,2],h3=input_list[9,3])),
get(paste("P1_GRN",GRN_ind,sep=""))(R_basal,c(parS,h1=input_list[10,1],h2=input_list[10,2],h3=input_list[10,3])),
get(paste("P1_GRN",GRN_ind,sep=""))(R_basal,c(parS,h1=input_list[11,1],h2=input_list[11,2],h3=input_list[11,3])),
get(paste("P1_GRN",GRN_ind,sep=""))(R_basal,c(parS,h1=input_list[12,1],h2=input_list[12,2],h3=input_list[12,3])),
get(paste("P1_GRN",GRN_ind,sep=""))(R_basal,c(parS,h1=input_list[13,1],h2=input_list[13,2],h3=input_list[13,3])),
get(paste("P1_GRN",GRN_ind,sep=""))(R_basal,c(parS,h1=input_list[14,1],h2=input_list[14,2],h3=input_list[14,3])),
get(paste("P1_GRN",GRN_ind,sep=""))(R_basal,c(parS,h1=input_list[15,1],h2=input_list[15,2],h3=input_list[15,3])),
get(paste("P1_GRN",GRN_ind,sep=""))(R_basal,c(parS,h1=input_list[16,1],h2=input_list[16,2],h3=input_list[16,3])),
get(paste("P1_GRN",GRN_ind,sep=""))(R_basal,c(parS,h1=input_list[17,1],h2=input_list[17,2],h3=input_list[17,3])),
get(paste("P1_GRN",GRN_ind,sep=""))(R_basal,c(parS,h1=input_list[18,1],h2=input_list[18,2],h3=input_list[18,3])),
get(paste("P1_GRN",GRN_ind,sep=""))(R_basal,c(parS,h1=input_list[19,1],h2=input_list[19,2],h3=input_list[19,3])),
get(paste("P1_GRN",GRN_ind,sep=""))(R_basal,c(parS,h1=input_list[20,1],h2=input_list[20,2],h3=input_list[20,3])),
get(paste("P1_GRN",GRN_ind,sep=""))(R_basal,c(parS,h1=input_list[21,1],h2=input_list[21,2],h3=input_list[21,3])),
get(paste("P1_GRN",GRN_ind,sep=""))(R_basal,c(parS,h1=input_list[22,1],h2=input_list[22,2],h3=input_list[22,3])),
get(paste("P1_GRN",GRN_ind,sep=""))(R_basal,c(parS,h1=input_list[23,1],h2=input_list[23,2],h3=input_list[23,3])),
get(paste("P1_GRN",GRN_ind,sep=""))(R_basal,c(parS,h1=input_list[24,1],h2=input_list[24,2],h3=input_list[24,3])),
get(paste("P1_GRN",GRN_ind,sep=""))(R_basal,c(parS,h1=input_list[25,1],h2=input_list[25,2],h3=input_list[25,3])),
get(paste("P1_GRN",GRN_ind,sep=""))(R_basal,c(parS,h1=input_list[26,1],h2=input_list[26,2],h3=input_list[26,3]))))
# print(Total_mse)
if(is.infinite(Total_mse)){return(10^8)}
if(is.na(Total_mse)){return(10^8)}
return(Total_mse)
}
|
\name{tukey.26b}
\alias{tukey.26b}
\docType{data}
\title{Tukey straightening exercise 24b}
\description{
Demand deposits in post-office savings accounts in Switzerland.}
\usage{
tukey.26b
}
\format{
A data frame with 29 observations on the following 2 variables.
\describe{
\item{year}{year}
\item{deposits}{deposits}}
}
\source{Tukey, EDA}
\keyword{datasets}
|
/man/tukey.26b.Rd
|
no_license
|
bayesball/LearnEDAfunctions
|
R
| false | false | 386 |
rd
|
\name{tukey.26b}
\alias{tukey.26b}
\docType{data}
\title{Tukey straightening exercise 24b}
\description{
Demand deposits in post-office savings accounts in Switzerland.}
\usage{
tukey.26b
}
\format{
A data frame with 29 observations on the following 2 variables.
\describe{
\item{year}{year}
\item{deposits}{deposits}}
}
\source{Tukey, EDA}
\keyword{datasets}
|
###This is a script to generate RLE objects out of the bam files for our CAGE data
# @ author Dermot Harnett, EMBL Heidelberg
# @date 18/10/2013
# @title Load the 3' tag data
########################################
setwd(dir ='/g/furlong/Harnett/TSS_CAGE_myfolder/')
source('src/tss_cage_functions.R')
#1 Read in files
tagseq.files = list.files('/g/furlong/mdavis/projects/enrico/data/local_bams',full.names=T,pattern='Tagseq.*.bam$')
linetable = read.table('data/line_name_table.txt',header=T)
tagseq.df<-data.frame(
accession = gsub('.*/(.*?)\\..*','\\1',tagseq.files),
replicate = gsub(pattern='.*\\dh_((RAL)?\\d+)(_(\\d+))?\\..*',replacement='\\4',x=tagseq.files,perl=T),
line = gsub(pattern='.*\\dh_(RAL)?(\\d+)(_\\d+)?\\..*',replacement='\\2',x=tagseq.files,perl=T),
timepoint = gsub(pattern='.*_(\\d+_\\d+)h_(RAL)?\\d+(_\\d)?\\..*',replacement='\\1',x=tagseq.files,perl=T),
tissue = 'embryo',
RAL = grepl('h_RAL\\d+',x=tagseq.files),#vector describing if the RAL were used (so 0 for bloomington lin number)
stringsAsFactors = F
)
#convert all our line information to the RAL #
bloom2ral = linetable[,2]
names(bloom2ral) = linetable[,1]
tagseq.df$line[!tagseq.df$RAL] = bloom2ral[tagseq.df$line[ !tagseq.df$RAL]]
#make timepoint info comparable between tagseq and cage tables
tagseq.df$timepoint =gsub('(\\d+)_(\\d+)','\\1\\2h',tagseq.df$timepoint)
#save
save(tagseq.df,file ='data/objects/tagseq.df.object.R')
#3 Process the BAM files, name them with the dataframe, creating and naming our Rle list of stucture name->strand->chr
ts<-mclapply(mc.cores=20,mc.cleanup=T,tagseq.files,function(x)bam2coverage(x,doshift=F,doresize=F,stranded=T))
names(ts)<-tagseq.df$accession#name them
#create a third strand for each library with the other two summed
for(acc in names(ts)){ts[[acc]]$both<-ts[[acc]]$pos+ts[[acc]]$neg}
#calculate size of library and put this in the dataframe
tagseq.df$library.size<-sapply(as.character(tagseq.df$accession),function(acc){
sum(as.numeric(sum(ts[[as.character(acc)]]$both)))
})
tagseq.df$genome.coverage<-(tagseq.df$library.size/sum(seqlengths(si)))
#save
save(tagseq.df,file= 'data/objects/tagseq.df.object.R' )
save(ts,file='data/objects/ts.object.R')
#load('data/objects/tagseq.df.object.R')
#load('data/objects/all.tagseq.unprocessed.R')
## 4 do the power law normalization
fits<-sapply(tagseq.df$accession,function(acc){
sitecounts=sort(c(unlist(unname(ts[[acc]][['pos']])),unlist(unname(ts[[acc]][['neg']]))))
sitecounts=sitecounts[sitecounts!=0]
fit=power.law.fit(as.vector(sitecounts),xmin=200)#not that power.law.fit excludes the lower count numbers
total.tags=sum(sitecounts)
o=getOffset(fit$alpha,total.tags)#calculate the offset (second parameter)
c(fit,offset=o,tagcount=total.tags)#now tack it onto the fit list and return
})
tagseq.df$alpha<-as.numeric(fits['alpha',])
mean.alpha=mean(tagseq.df$alpha)
r.offset <-getOffset(mean.alpha,refsize)
tagseq.df$offset<-as.numeric(fits['offset',])
save(tagseq.df,file='data/objects/tagseq.df.full.object.R')
#4a now finally normalize all of our cage libraries to a common power law reference
ts.pl<-mapply(SIMPLIFY=F,names(ts),tagseq.df$alpha,tagseq.df$offset,FUN=function(acc,alpha,offset){
lapply(ts[[acc]],function(srle){
pl.norm(srle,x.alpha=alpha,x.offset=offset[[1]])
})
})
save(ts.pl,file='data/objects/ts.pl.object.R')
#5 create a summed alltags object for each timepoint
message('summing tagseq tags')
alltagseq=sapply(simplify=F,split(ts,as.character(tagseq.df$timepoint)),function(alltags){
alltags=alltags
list(
pos=Reduce('+',sapply(alltags,'[[','pos')),
neg=Reduce('+',sapply(alltags,'[[','neg')),
both=Reduce('+',sapply(alltags,'[[','both'))
)
})
#export bigwigs for these
for(set in names(alltagseq)){
export(alltagseq[[set]]$pos,paste0('/g/furlong/Harnett/TSS_CAGE_myfolder/data/solexa/wig/alltagseq.',set,'.pl.pos.bw'))
export(alltagseq[[set]]$neg,paste0('/g/furlong/Harnett/TSS_CAGE_myfolder/data/solexa/wig/alltagseq.',set,'.pl.neg.bw'))
}
save(alltagseq,file ='data/objects/alltagseq.object.R')
#6 load up the peaks and creat files for these as well.
tagseq.peaks=list(
tp24=read.delim('/g/furlong/Harnett/TSS_CAGE_myfolder/data/Tagseq_peaks/peaks.polya.2h.default.gff.features.gff',header=F,comment.char='#'),
tp68=read.delim('/g/furlong/Harnett/TSS_CAGE_myfolder/data/Tagseq_peaks/peaks.polya.6h.default.gff.features.gff',header=F,comment.char='#'),
tp1012=read.delim('/g/furlong/Harnett/TSS_CAGE_myfolder/data/Tagseq_peaks/peaks.polya.10h.default.gff.features.gff',header=F,comment.char='#')
)
tagseq.peaks=sapply(tagseq.peaks,function(f){
GRanges(paste0('chr',f$V1),IRanges(f$V4,f$V5),strand=f$V7,seqinfo=si)
})
sapply(names(tagseq.peaks),function(npks){
export(tagseq.peaks[[npks]],paste0('/g/furlong/Harnett/TSS_CAGE_myfolder/data/tagseqpeaks_',npks,'.bed'))
})
save(tagseq.peaks,file='data/objects/tagseq.peaks.object.R')
|
/load_tagseq.R
|
no_license
|
pnandak/eRNA_project
|
R
| false | false | 5,050 |
r
|
###This is a script to generate RLE objects out of the bam files for our CAGE data
# @ author Dermot Harnett, EMBL Heidelberg
# @date 18/10/2013
# @title Load the 3' tag data
########################################
setwd(dir ='/g/furlong/Harnett/TSS_CAGE_myfolder/')
source('src/tss_cage_functions.R')
#1 Read in files
tagseq.files = list.files('/g/furlong/mdavis/projects/enrico/data/local_bams',full.names=T,pattern='Tagseq.*.bam$')
linetable = read.table('data/line_name_table.txt',header=T)
tagseq.df<-data.frame(
accession = gsub('.*/(.*?)\\..*','\\1',tagseq.files),
replicate = gsub(pattern='.*\\dh_((RAL)?\\d+)(_(\\d+))?\\..*',replacement='\\4',x=tagseq.files,perl=T),
line = gsub(pattern='.*\\dh_(RAL)?(\\d+)(_\\d+)?\\..*',replacement='\\2',x=tagseq.files,perl=T),
timepoint = gsub(pattern='.*_(\\d+_\\d+)h_(RAL)?\\d+(_\\d)?\\..*',replacement='\\1',x=tagseq.files,perl=T),
tissue = 'embryo',
RAL = grepl('h_RAL\\d+',x=tagseq.files),#vector describing if the RAL were used (so 0 for bloomington lin number)
stringsAsFactors = F
)
#convert all our line information to the RAL #
bloom2ral = linetable[,2]
names(bloom2ral) = linetable[,1]
tagseq.df$line[!tagseq.df$RAL] = bloom2ral[tagseq.df$line[ !tagseq.df$RAL]]
#make timepoint info comparable between tagseq and cage tables
tagseq.df$timepoint =gsub('(\\d+)_(\\d+)','\\1\\2h',tagseq.df$timepoint)
#save
save(tagseq.df,file ='data/objects/tagseq.df.object.R')
#3 Process the BAM files, name them with the dataframe, creating and naming our Rle list of stucture name->strand->chr
ts<-mclapply(mc.cores=20,mc.cleanup=T,tagseq.files,function(x)bam2coverage(x,doshift=F,doresize=F,stranded=T))
names(ts)<-tagseq.df$accession#name them
#create a third strand for each library with the other two summed
for(acc in names(ts)){ts[[acc]]$both<-ts[[acc]]$pos+ts[[acc]]$neg}
#calculate size of library and put this in the dataframe
tagseq.df$library.size<-sapply(as.character(tagseq.df$accession),function(acc){
sum(as.numeric(sum(ts[[as.character(acc)]]$both)))
})
tagseq.df$genome.coverage<-(tagseq.df$library.size/sum(seqlengths(si)))
#save
save(tagseq.df,file= 'data/objects/tagseq.df.object.R' )
save(ts,file='data/objects/ts.object.R')
#load('data/objects/tagseq.df.object.R')
#load('data/objects/all.tagseq.unprocessed.R')
## 4 do the power law normalization
fits<-sapply(tagseq.df$accession,function(acc){
sitecounts=sort(c(unlist(unname(ts[[acc]][['pos']])),unlist(unname(ts[[acc]][['neg']]))))
sitecounts=sitecounts[sitecounts!=0]
fit=power.law.fit(as.vector(sitecounts),xmin=200)#not that power.law.fit excludes the lower count numbers
total.tags=sum(sitecounts)
o=getOffset(fit$alpha,total.tags)#calculate the offset (second parameter)
c(fit,offset=o,tagcount=total.tags)#now tack it onto the fit list and return
})
tagseq.df$alpha<-as.numeric(fits['alpha',])
mean.alpha=mean(tagseq.df$alpha)
r.offset <-getOffset(mean.alpha,refsize)
tagseq.df$offset<-as.numeric(fits['offset',])
save(tagseq.df,file='data/objects/tagseq.df.full.object.R')
#4a now finally normalize all of our cage libraries to a common power law reference
ts.pl<-mapply(SIMPLIFY=F,names(ts),tagseq.df$alpha,tagseq.df$offset,FUN=function(acc,alpha,offset){
lapply(ts[[acc]],function(srle){
pl.norm(srle,x.alpha=alpha,x.offset=offset[[1]])
})
})
save(ts.pl,file='data/objects/ts.pl.object.R')
#5 create a summed alltags object for each timepoint
message('summing tagseq tags')
alltagseq=sapply(simplify=F,split(ts,as.character(tagseq.df$timepoint)),function(alltags){
alltags=alltags
list(
pos=Reduce('+',sapply(alltags,'[[','pos')),
neg=Reduce('+',sapply(alltags,'[[','neg')),
both=Reduce('+',sapply(alltags,'[[','both'))
)
})
#export bigwigs for these
for(set in names(alltagseq)){
export(alltagseq[[set]]$pos,paste0('/g/furlong/Harnett/TSS_CAGE_myfolder/data/solexa/wig/alltagseq.',set,'.pl.pos.bw'))
export(alltagseq[[set]]$neg,paste0('/g/furlong/Harnett/TSS_CAGE_myfolder/data/solexa/wig/alltagseq.',set,'.pl.neg.bw'))
}
save(alltagseq,file ='data/objects/alltagseq.object.R')
#6 load up the peaks and creat files for these as well.
tagseq.peaks=list(
tp24=read.delim('/g/furlong/Harnett/TSS_CAGE_myfolder/data/Tagseq_peaks/peaks.polya.2h.default.gff.features.gff',header=F,comment.char='#'),
tp68=read.delim('/g/furlong/Harnett/TSS_CAGE_myfolder/data/Tagseq_peaks/peaks.polya.6h.default.gff.features.gff',header=F,comment.char='#'),
tp1012=read.delim('/g/furlong/Harnett/TSS_CAGE_myfolder/data/Tagseq_peaks/peaks.polya.10h.default.gff.features.gff',header=F,comment.char='#')
)
tagseq.peaks=sapply(tagseq.peaks,function(f){
GRanges(paste0('chr',f$V1),IRanges(f$V4,f$V5),strand=f$V7,seqinfo=si)
})
sapply(names(tagseq.peaks),function(npks){
export(tagseq.peaks[[npks]],paste0('/g/furlong/Harnett/TSS_CAGE_myfolder/data/tagseqpeaks_',npks,'.bed'))
})
save(tagseq.peaks,file='data/objects/tagseq.peaks.object.R')
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fit_two_part.R
\name{hdgamma}
\alias{hdgamma}
\title{Fitting function for lasso penalized gamma GLMs}
\usage{
hdgamma(
x,
y,
weights = rep(1, NROW(x)),
offset = NULL,
penalty_factor = NULL,
nlambda = 100L,
lambda_min_ratio = ifelse(n < p, 0.05, 0.005),
lambda = NULL,
tau = 0,
intercept = TRUE,
strongrule = TRUE,
maxit_irls = 50,
tol_irls = 1e-05,
maxit_mm = 500,
tol_mm = 1e-05
)
}
\arguments{
\item{x}{an n x p matrix of covariates for the zero part data, where each row is an observation
and each column is a predictor}
\item{y}{a length n vector of responses taking strictly positive values.}
\item{weights}{a length n vector of observation weights}
\item{offset}{a length n vector of offset terms}
\item{penalty_factor}{a length p vector of penalty adjustment factors corresponding to each covariate.
A value of 0 in the jth location indicates no penalization on the jth variable, and any positive value will
indicate a multiplicative factor on top of the common penalization amount. The default value is 1 for
all variables}
\item{nlambda}{the number of lambda values. The default is 100.}
\item{lambda_min_ratio}{Smallest value for \code{lambda}, as a fraction of lambda.max, the data-derived largest lambda value
The default depends on the sample size relative to the number of variables.}
\item{lambda}{a user supplied sequence of penalization tuning parameters. By default, the program automatically
chooses a sequence of lambda values based on \code{nlambda} and \code{lambda_min_ratio}}
\item{tau}{a scalar numeric value between 0 and 1 (included) which is a mixing parameter for sparse group lasso penalty.
0 indicates group lasso and 1 indicates lasso, values in between reflect different emphasis on group and lasso penalties}
\item{intercept}{whether or not to include an intercept. Default is \code{TRUE}.}
\item{strongrule}{should a strong rule be used?}
\item{maxit_irls}{maximum number of IRLS iterations}
\item{tol_irls}{convergence tolerance for IRLS iterations}
\item{maxit_mm}{maximum number of MM iterations. Note that for \code{algorithm = "irls"}, MM is used within
each IRLS iteration, so \code{maxit_mm} applies to the convergence of the inner iterations in this case.}
\item{tol_mm}{convergence tolerance for MM iterations. Note that for \code{algorithm = "irls"}, MM is used within
each IRLS iteration, so \code{tol_mm} applies to the convergence of the inner iterations in this case.}
}
\description{
This function fits penalized gamma GLMs
}
\examples{
library(personalized2part)
}
|
/man/hdgamma.Rd
|
no_license
|
jaredhuling/personalized2part
|
R
| false | true | 2,640 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fit_two_part.R
\name{hdgamma}
\alias{hdgamma}
\title{Fitting function for lasso penalized gamma GLMs}
\usage{
hdgamma(
x,
y,
weights = rep(1, NROW(x)),
offset = NULL,
penalty_factor = NULL,
nlambda = 100L,
lambda_min_ratio = ifelse(n < p, 0.05, 0.005),
lambda = NULL,
tau = 0,
intercept = TRUE,
strongrule = TRUE,
maxit_irls = 50,
tol_irls = 1e-05,
maxit_mm = 500,
tol_mm = 1e-05
)
}
\arguments{
\item{x}{an n x p matrix of covariates for the zero part data, where each row is an observation
and each column is a predictor}
\item{y}{a length n vector of responses taking strictly positive values.}
\item{weights}{a length n vector of observation weights}
\item{offset}{a length n vector of offset terms}
\item{penalty_factor}{a length p vector of penalty adjustment factors corresponding to each covariate.
A value of 0 in the jth location indicates no penalization on the jth variable, and any positive value will
indicate a multiplicative factor on top of the common penalization amount. The default value is 1 for
all variables}
\item{nlambda}{the number of lambda values. The default is 100.}
\item{lambda_min_ratio}{Smallest value for \code{lambda}, as a fraction of lambda.max, the data-derived largest lambda value
The default depends on the sample size relative to the number of variables.}
\item{lambda}{a user supplied sequence of penalization tuning parameters. By default, the program automatically
chooses a sequence of lambda values based on \code{nlambda} and \code{lambda_min_ratio}}
\item{tau}{a scalar numeric value between 0 and 1 (included) which is a mixing parameter for sparse group lasso penalty.
0 indicates group lasso and 1 indicates lasso, values in between reflect different emphasis on group and lasso penalties}
\item{intercept}{whether or not to include an intercept. Default is \code{TRUE}.}
\item{strongrule}{should a strong rule be used?}
\item{maxit_irls}{maximum number of IRLS iterations}
\item{tol_irls}{convergence tolerance for IRLS iterations}
\item{maxit_mm}{maximum number of MM iterations. Note that for \code{algorithm = "irls"}, MM is used within
each IRLS iteration, so \code{maxit_mm} applies to the convergence of the inner iterations in this case.}
\item{tol_mm}{convergence tolerance for MM iterations. Note that for \code{algorithm = "irls"}, MM is used within
each IRLS iteration, so \code{tol_mm} applies to the convergence of the inner iterations in this case.}
}
\description{
This function fits penalized gamma GLMs
}
\examples{
library(personalized2part)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/as.antweb.R
\name{as.antweb}
\alias{as.antweb}
\title{Coerce occurrence keys to antwebkey/occkey objects}
\usage{
as.antweb(x)
}
\arguments{
\item{x}{Various inputs, including the output from a call to \code{\link{occ}}
(class occdat), \code{\link{occ2df}} (class data.frame), or a list, numeric,
character, or antwebkey, or occkey.}
}
\value{
One or more in a list of both class antwebkey and occkey
}
\description{
Coerce occurrence keys to antwebkey/occkey objects
}
\examples{
\dontrun{
spp <- c("linepithema humile", "acanthognathus")
out <- occ(query=spp, from='antweb', limit=2)
res <- occ2df(out)
(tt <- as.antweb(out))
(uu <- as.antweb(res))
as.antweb(res$key[1])
as.antweb(as.list(res$key[1:2]))
as.antweb(tt[[1]])
as.antweb(uu[[1]])
as.antweb(tt[1:2])
}
}
|
/man/as.antweb.Rd
|
permissive
|
nsm120/spocc
|
R
| false | true | 846 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/as.antweb.R
\name{as.antweb}
\alias{as.antweb}
\title{Coerce occurrence keys to antwebkey/occkey objects}
\usage{
as.antweb(x)
}
\arguments{
\item{x}{Various inputs, including the output from a call to \code{\link{occ}}
(class occdat), \code{\link{occ2df}} (class data.frame), or a list, numeric,
character, or antwebkey, or occkey.}
}
\value{
One or more in a list of both class antwebkey and occkey
}
\description{
Coerce occurrence keys to antwebkey/occkey objects
}
\examples{
\dontrun{
spp <- c("linepithema humile", "acanthognathus")
out <- occ(query=spp, from='antweb', limit=2)
res <- occ2df(out)
(tt <- as.antweb(out))
(uu <- as.antweb(res))
as.antweb(res$key[1])
as.antweb(as.list(res$key[1:2]))
as.antweb(tt[[1]])
as.antweb(uu[[1]])
as.antweb(tt[1:2])
}
}
|
# Generated by using Rcpp::compileAttributes() -> do not edit by hand
# Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393
#' @useDynLib PopIn
#' @importFrom Rcpp sourceCpp
NULL
indmodelseC <- function(land_r, nrow, ncol, n_steps = 20L, init_population = 10L, hr_size = 1L, birth_rate = 2.0, breeding_age = 1L, survival = 0.4, distance_weight = 0.001, dispersal_distance = 5.0, dispersal_mode = 2L, sink_avoidance = 0.5, neigh_avoidance = 1.0, sink_mortality = 0.7, file_name = "Res_imse") {
.Call('PopIn_indmodelseC', PACKAGE = 'PopIn', land_r, nrow, ncol, n_steps, init_population, hr_size, birth_rate, breeding_age, survival, distance_weight, dispersal_distance, dispersal_mode, sink_avoidance, neigh_avoidance, sink_mortality, file_name)
}
|
/R/RcppExports.R
|
no_license
|
anaceiahasse/PopIn
|
R
| false | false | 754 |
r
|
# Generated by using Rcpp::compileAttributes() -> do not edit by hand
# Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393
#' @useDynLib PopIn
#' @importFrom Rcpp sourceCpp
NULL
indmodelseC <- function(land_r, nrow, ncol, n_steps = 20L, init_population = 10L, hr_size = 1L, birth_rate = 2.0, breeding_age = 1L, survival = 0.4, distance_weight = 0.001, dispersal_distance = 5.0, dispersal_mode = 2L, sink_avoidance = 0.5, neigh_avoidance = 1.0, sink_mortality = 0.7, file_name = "Res_imse") {
.Call('PopIn_indmodelseC', PACKAGE = 'PopIn', land_r, nrow, ncol, n_steps, init_population, hr_size, birth_rate, breeding_age, survival, distance_weight, dispersal_distance, dispersal_mode, sink_avoidance, neigh_avoidance, sink_mortality, file_name)
}
|
context("Correct data structures returned")
data(immigration, package = "cregg")
immigration$wts <- stats::rbeta(nrow(immigration), 2, 5)*5
test_that("cj() works", {
expect_true(inherits(x <- cj(immigration, ChosenImmigrant ~ LanguageSkills, estimate = "mm", id = ~ CaseID), "cj_mm"),
label = "cj() works w/o 'by'")
expect_true(inherits(plot(x), "ggplot"), label = "plot.cj_amce() works w/o 'by' argument")
expect_true(inherits(x <- cj(immigration, ChosenImmigrant ~ LanguageSkills, estimate = "mm", id = ~ CaseID, by = ~ Gender), "cj_mm"),
label = "cj() works w/ 'by'")
expect_true(inherits(plot(x, group = "BY"), "ggplot"), label = "plot.cj_amce() works w/ 'by' argument")
expect_true(!identical(cj(immigration, ChosenImmigrant ~ LanguageSkills, estimate = "mm", id = ~ CaseID,
by = ~ Gender, level_order = "ascending")$level,
cj(immigration, ChosenImmigrant ~ LanguageSkills, estimate = "mm", id = ~ CaseID,
by = ~ Gender, level_order = "descending")$level),
label = "cj() respects 'level_order'")
expect_true(identical(levels(cj(immigration, ChosenImmigrant ~ Education + Gender, id = ~CaseID,
feature_order = c("Gender", "Education"), estimate = "mm")$feature),
c("Gender", "Educational Attainment")),
label = "cj() respects 'feature_order'")
# expected errors
expect_error(cj(immigration, ChosenImmigrant ~ Education + Gender, id = ~CaseID, feature_order = "Education", estimate = "mm"),
label = "cj() fails for missing feature names in 'feature_order'")
expect_error(cj(immigration, ChosenImmigrant ~ Education + Gender, id = ~CaseID, feature_order = c("Education", "Gender", "foo"), estimate = "mm"),
label = "cj() fails for too many feature names in 'feature_order'")
expect_error(cj(immigration, ChosenImmigrant ~ Education + Gender, id = ~CaseID, feature_order = c("Education", "foo"), estimate = "mm"),
label = "cj() fails for wrong feature names in 'feature_order'")
expect_error(inherits(x <- cj(immigration, ChosenImmigrant ~ LanguageSkills, estimate = "diff", id = ~ CaseID), "cj_mm"),
label = "cj() fails on estimate = 'diff' w/o 'by'")
})
test_that("amce() works", {
expect_true(inherits(x <- amce(immigration, ChosenImmigrant ~ Gender, id = ~ CaseID), "cj_amce"))
expect_true(inherits(plot(x), "ggplot"))
expect_true(inherits(amce(immigration, ChosenImmigrant ~ LanguageSkills, id = ~ CaseID, weights = ~ wts), "cj_amce"),
label = "amce() works w/ 'weights'")
expect_error(amce(immigration, ~ LanguageSkills, id = ~ CaseID), label = "amce() fails w/o LHS variable in formula")
})
test_that("amce() works", {
x1 <- amce(immigration, ChosenImmigrant ~ Gender + LanguageSkills, id = ~ CaseID)
x2 <- amce(immigration, ChosenImmigrant ~ Gender * LanguageSkills, id = ~ CaseID)
expect_true(!identical(x1, x2), label = "amce() respects model specification")
})
test_that("amce_diffs() works", {
expect_true(inherits(x <- amce_diffs(immigration, ChosenImmigrant ~ LanguageSkills, by = ~ Gender, id = ~ CaseID), "cj_diffs"))
expect_true(inherits(plot(x), "ggplot"))
expect_true(inherits(amce_diffs(immigration, ChosenImmigrant ~ LanguageSkills, id = ~ CaseID, by = ~ Gender, weights = ~ wts), "cj_diffs"),
label = "amce_diffs() works w/ 'weights'")
expect_error(amce(immigration, ~ LanguageSkills, id = ~ CaseID), label = "amce_diffs() fails w/o LHS variable in formula")
})
test_that("amce_by_reference() works", {
expect_true(inherits(x <- amce_by_reference(immigration, ChosenImmigrant ~ LanguageSkills + Gender, id = ~ CaseID, variable = ~Gender), "cj_amce"))
expect_true(inherits(plot(x), "ggplot"))
expect_true(inherits(amce_by_reference(immigration, ChosenImmigrant ~ LanguageSkills, id = ~ CaseID, variable = ~ Gender, weights = ~ wts), "cj_amce"),
label = "amce_by_reference() works w/ 'weights'")
expect_error(amce_by_reference(immigration, ~ LanguageSkills, variable = ~ Gender, id = ~ CaseID), label = "amce_by_reference() fails w/o LHS variable in formula")
expect_error(amce_by_reference(immigration, ~ LanguageSkills, id = ~ CaseID), label = "amce_by_reference() fails w/o 'variable'")
})
test_that("mm() works", {
expect_true(inherits(x <- mm(immigration, ChosenImmigrant ~ Gender, id = ~ CaseID), "cj_mm"))
expect_true(inherits(plot(x), "ggplot"))
expect_true(inherits(mm(immigration, ChosenImmigrant ~ LanguageSkills, id = ~ CaseID, weights = ~ wts), "cj_mm"),
label = "mm() works w/ 'weights'")
expect_error(mm(immigration, ~ LanguageSkills, id = ~ CaseID), label = "mm() fails w/o LHS variable in formula")
})
test_that("mm_diffs() works", {
expect_true(inherits(x <- mm_diffs(immigration, ChosenImmigrant ~ LanguageSkills, by = ~ Gender, id = ~ CaseID), "cj_diffs"))
expect_true(inherits(plot(x), "ggplot"))
expect_true(inherits(mm_diffs(immigration, ChosenImmigrant ~ LanguageSkills, by = ~ Gender, id = ~ CaseID, weights = ~ wts), "cj_diffs"),
label = "mm_diffs() works w/ 'weights'")
expect_error(mm_diffs(immigration, ~ LanguageSkills, id = ~ CaseID), label = "mm_diffs() fails w/o 'by'")
expect_error(mm_diffs(immigration, ~ LanguageSkills, by = ~ Gender, id = ~ CaseID), label = "mm_diffs() fails w/o LHS variable in formula")
})
test_that("cj_table() works", {
expect_true(inherits(cj_table(immigration, ~ Gender + Education), "data.frame"))
expect_true("reference" %in% names(cj_table(immigration, ~ Gender + Education, include_reference = TRUE)))
})
test_that("cj_freqs() works", {
expect_true(inherits(x <- cj_freqs(immigration, ~ Gender, id = ~ CaseID), "cj_freqs"))
expect_true(inherits(plot(x), "ggplot"))
expect_true(inherits(cj_freqs(immigration, ~ Gender, id = ~ CaseID, weights = ~ wts), "cj_freqs"), label = "freqs() works w/ 'weights'")
})
test_that("cj_props() works", {
expect_true(inherits(x <- cj_props(immigration, ~ Gender, id = ~ CaseID), "cj_props"))
expect_true(inherits(cj_props(immigration, ~ Gender, id = ~ CaseID, weights = ~ wts), "cj_props"), label = "props() works w/ 'weights'")
})
test_that("cj_anova() works", {
expect_true(inherits(cj_anova(immigration, ChosenImmigrant ~ Education, id = ~CaseID, by = ~Gender), "anova"), label = "cj_anova() works")
# cj_anova() currently doesn't work with 'weights' due to issues in **survey**
#expect_true(inherits(cj_anova(immigration, ChosenImmigrant ~ Education, id = ~CaseID, by = ~Gender, weights = ~wts), "anova"),
# label = "cj_anova() works w/ 'weights'")
expect_error(cj_anova(immigration, ChosenImmigrant ~ Education, id = ~CaseID, by = ~Gender, weights = ~wts),
label = "cj_anova() fails w/ 'weights', as expected at the moment")
expect_error(cj_anova(immigration, ~ Education, id = ~ CaseID, by = ~Gender), label = "cj_anova() fails w/o LHS variable in formula")
})
|
/tests/testthat/tests-classes.R
|
no_license
|
jasonyaopku/cregg
|
R
| false | false | 7,209 |
r
|
context("Correct data structures returned")
data(immigration, package = "cregg")
immigration$wts <- stats::rbeta(nrow(immigration), 2, 5)*5
test_that("cj() works", {
expect_true(inherits(x <- cj(immigration, ChosenImmigrant ~ LanguageSkills, estimate = "mm", id = ~ CaseID), "cj_mm"),
label = "cj() works w/o 'by'")
expect_true(inherits(plot(x), "ggplot"), label = "plot.cj_amce() works w/o 'by' argument")
expect_true(inherits(x <- cj(immigration, ChosenImmigrant ~ LanguageSkills, estimate = "mm", id = ~ CaseID, by = ~ Gender), "cj_mm"),
label = "cj() works w/ 'by'")
expect_true(inherits(plot(x, group = "BY"), "ggplot"), label = "plot.cj_amce() works w/ 'by' argument")
expect_true(!identical(cj(immigration, ChosenImmigrant ~ LanguageSkills, estimate = "mm", id = ~ CaseID,
by = ~ Gender, level_order = "ascending")$level,
cj(immigration, ChosenImmigrant ~ LanguageSkills, estimate = "mm", id = ~ CaseID,
by = ~ Gender, level_order = "descending")$level),
label = "cj() respects 'level_order'")
expect_true(identical(levels(cj(immigration, ChosenImmigrant ~ Education + Gender, id = ~CaseID,
feature_order = c("Gender", "Education"), estimate = "mm")$feature),
c("Gender", "Educational Attainment")),
label = "cj() respects 'feature_order'")
# expected errors
expect_error(cj(immigration, ChosenImmigrant ~ Education + Gender, id = ~CaseID, feature_order = "Education", estimate = "mm"),
label = "cj() fails for missing feature names in 'feature_order'")
expect_error(cj(immigration, ChosenImmigrant ~ Education + Gender, id = ~CaseID, feature_order = c("Education", "Gender", "foo"), estimate = "mm"),
label = "cj() fails for too many feature names in 'feature_order'")
expect_error(cj(immigration, ChosenImmigrant ~ Education + Gender, id = ~CaseID, feature_order = c("Education", "foo"), estimate = "mm"),
label = "cj() fails for wrong feature names in 'feature_order'")
expect_error(inherits(x <- cj(immigration, ChosenImmigrant ~ LanguageSkills, estimate = "diff", id = ~ CaseID), "cj_mm"),
label = "cj() fails on estimate = 'diff' w/o 'by'")
})
test_that("amce() works", {
expect_true(inherits(x <- amce(immigration, ChosenImmigrant ~ Gender, id = ~ CaseID), "cj_amce"))
expect_true(inherits(plot(x), "ggplot"))
expect_true(inherits(amce(immigration, ChosenImmigrant ~ LanguageSkills, id = ~ CaseID, weights = ~ wts), "cj_amce"),
label = "amce() works w/ 'weights'")
expect_error(amce(immigration, ~ LanguageSkills, id = ~ CaseID), label = "amce() fails w/o LHS variable in formula")
})
test_that("amce() works", {
x1 <- amce(immigration, ChosenImmigrant ~ Gender + LanguageSkills, id = ~ CaseID)
x2 <- amce(immigration, ChosenImmigrant ~ Gender * LanguageSkills, id = ~ CaseID)
expect_true(!identical(x1, x2), label = "amce() respects model specification")
})
test_that("amce_diffs() works", {
expect_true(inherits(x <- amce_diffs(immigration, ChosenImmigrant ~ LanguageSkills, by = ~ Gender, id = ~ CaseID), "cj_diffs"))
expect_true(inherits(plot(x), "ggplot"))
expect_true(inherits(amce_diffs(immigration, ChosenImmigrant ~ LanguageSkills, id = ~ CaseID, by = ~ Gender, weights = ~ wts), "cj_diffs"),
label = "amce_diffs() works w/ 'weights'")
expect_error(amce(immigration, ~ LanguageSkills, id = ~ CaseID), label = "amce_diffs() fails w/o LHS variable in formula")
})
test_that("amce_by_reference() works", {
expect_true(inherits(x <- amce_by_reference(immigration, ChosenImmigrant ~ LanguageSkills + Gender, id = ~ CaseID, variable = ~Gender), "cj_amce"))
expect_true(inherits(plot(x), "ggplot"))
expect_true(inherits(amce_by_reference(immigration, ChosenImmigrant ~ LanguageSkills, id = ~ CaseID, variable = ~ Gender, weights = ~ wts), "cj_amce"),
label = "amce_by_reference() works w/ 'weights'")
expect_error(amce_by_reference(immigration, ~ LanguageSkills, variable = ~ Gender, id = ~ CaseID), label = "amce_by_reference() fails w/o LHS variable in formula")
expect_error(amce_by_reference(immigration, ~ LanguageSkills, id = ~ CaseID), label = "amce_by_reference() fails w/o 'variable'")
})
test_that("mm() works", {
expect_true(inherits(x <- mm(immigration, ChosenImmigrant ~ Gender, id = ~ CaseID), "cj_mm"))
expect_true(inherits(plot(x), "ggplot"))
expect_true(inherits(mm(immigration, ChosenImmigrant ~ LanguageSkills, id = ~ CaseID, weights = ~ wts), "cj_mm"),
label = "mm() works w/ 'weights'")
expect_error(mm(immigration, ~ LanguageSkills, id = ~ CaseID), label = "mm() fails w/o LHS variable in formula")
})
test_that("mm_diffs() works", {
expect_true(inherits(x <- mm_diffs(immigration, ChosenImmigrant ~ LanguageSkills, by = ~ Gender, id = ~ CaseID), "cj_diffs"))
expect_true(inherits(plot(x), "ggplot"))
expect_true(inherits(mm_diffs(immigration, ChosenImmigrant ~ LanguageSkills, by = ~ Gender, id = ~ CaseID, weights = ~ wts), "cj_diffs"),
label = "mm_diffs() works w/ 'weights'")
expect_error(mm_diffs(immigration, ~ LanguageSkills, id = ~ CaseID), label = "mm_diffs() fails w/o 'by'")
expect_error(mm_diffs(immigration, ~ LanguageSkills, by = ~ Gender, id = ~ CaseID), label = "mm_diffs() fails w/o LHS variable in formula")
})
test_that("cj_table() works", {
expect_true(inherits(cj_table(immigration, ~ Gender + Education), "data.frame"))
expect_true("reference" %in% names(cj_table(immigration, ~ Gender + Education, include_reference = TRUE)))
})
test_that("cj_freqs() works", {
expect_true(inherits(x <- cj_freqs(immigration, ~ Gender, id = ~ CaseID), "cj_freqs"))
expect_true(inherits(plot(x), "ggplot"))
expect_true(inherits(cj_freqs(immigration, ~ Gender, id = ~ CaseID, weights = ~ wts), "cj_freqs"), label = "freqs() works w/ 'weights'")
})
test_that("cj_props() works", {
expect_true(inherits(x <- cj_props(immigration, ~ Gender, id = ~ CaseID), "cj_props"))
expect_true(inherits(cj_props(immigration, ~ Gender, id = ~ CaseID, weights = ~ wts), "cj_props"), label = "props() works w/ 'weights'")
})
test_that("cj_anova() works", {
expect_true(inherits(cj_anova(immigration, ChosenImmigrant ~ Education, id = ~CaseID, by = ~Gender), "anova"), label = "cj_anova() works")
# cj_anova() currently doesn't work with 'weights' due to issues in **survey**
#expect_true(inherits(cj_anova(immigration, ChosenImmigrant ~ Education, id = ~CaseID, by = ~Gender, weights = ~wts), "anova"),
# label = "cj_anova() works w/ 'weights'")
expect_error(cj_anova(immigration, ChosenImmigrant ~ Education, id = ~CaseID, by = ~Gender, weights = ~wts),
label = "cj_anova() fails w/ 'weights', as expected at the moment")
expect_error(cj_anova(immigration, ~ Education, id = ~ CaseID, by = ~Gender), label = "cj_anova() fails w/o LHS variable in formula")
})
|
## This is a class to cache the inverse of a matrix and return it when necessary
## Calculating the inverse of a matrix is a costly operation and
## it makes sense to cache the value that we calculate and return it when it's needed
## This function returns a list of functions which have access to the matrix and the cached value of th einverse.
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y) {
## Set the new matrix and reset the inverse to NULL
x <<- y
inv <<- NULL
}
get <- function() x
## Getter and setter for the inverse
setInverse <- function(inverse) inv <<- inverse
getInverse <- function() inv
list(set = set, get = get, setInverse = setInverse, getInverse = getInverse)
}
## This function calculates the inverse and caches it for future use
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inv <- x$getInverse()
if (!is.null(inv)) {
## if getInverse returned a non-NULL valuse, it means that we already
## have a cached value. Return it.
message("getting cached data")
return(inv)
}
## If we don't have a cached value, calculate the inverse, cache it in a
## variable and return the new value.
data <- x$get()
inv <- solve(data, ...)
x$setInverse(inv)
inv
}
|
/cachematrix.R
|
no_license
|
srinarasi/ProgrammingAssignment2
|
R
| false | false | 1,266 |
r
|
## This is a class to cache the inverse of a matrix and return it when necessary
## Calculating the inverse of a matrix is a costly operation and
## it makes sense to cache the value that we calculate and return it when it's needed
## This function returns a list of functions which have access to the matrix and the cached value of th einverse.
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y) {
## Set the new matrix and reset the inverse to NULL
x <<- y
inv <<- NULL
}
get <- function() x
## Getter and setter for the inverse
setInverse <- function(inverse) inv <<- inverse
getInverse <- function() inv
list(set = set, get = get, setInverse = setInverse, getInverse = getInverse)
}
## This function calculates the inverse and caches it for future use
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inv <- x$getInverse()
if (!is.null(inv)) {
## if getInverse returned a non-NULL valuse, it means that we already
## have a cached value. Return it.
message("getting cached data")
return(inv)
}
## If we don't have a cached value, calculate the inverse, cache it in a
## variable and return the new value.
data <- x$get()
inv <- solve(data, ...)
x$setInverse(inv)
inv
}
|
#' Extract Fitted Values from linreg object.
#'
#' \code{pred} returns the fitted values of the regression model calculated by the \code{\link{ridgereg}} function.
#' @param object An object of class \code{ridgereg}
#' @param newdata If another data set with same variables is wished to use for prediction
#' @param ... optional arguments that can be passed on the function.
#' @return A numeric vector of fitted values extracted from the model in \code{\link{ridgereg}}.
#'
#' @examples
#' x<- ridgereg(formula= Sepal.Width ~ Sepal.Length + Petal.Width, data= iris)
#' predict(x)
#' @export
predict.ridgereg<- function(object, newdata,...){
stopifnot(inherits(object, "ridgereg"))
if(missing(newdata)) {
predd<-as.vector(object$Fitted_values)
}else{
pred1<-ridgereg(formula=object$Formula, data=newdata)
predd<-pred1$Fitted_values
}
return(predd)
}
|
/multireg/R/predictridgereg.R
|
no_license
|
lemonisa/lab4
|
R
| false | false | 885 |
r
|
#' Extract Fitted Values from linreg object.
#'
#' \code{pred} returns the fitted values of the regression model calculated by the \code{\link{ridgereg}} function.
#' @param object An object of class \code{ridgereg}
#' @param newdata If another data set with same variables is wished to use for prediction
#' @param ... optional arguments that can be passed on the function.
#' @return A numeric vector of fitted values extracted from the model in \code{\link{ridgereg}}.
#'
#' @examples
#' x<- ridgereg(formula= Sepal.Width ~ Sepal.Length + Petal.Width, data= iris)
#' predict(x)
#' @export
predict.ridgereg<- function(object, newdata,...){
stopifnot(inherits(object, "ridgereg"))
if(missing(newdata)) {
predd<-as.vector(object$Fitted_values)
}else{
pred1<-ridgereg(formula=object$Formula, data=newdata)
predd<-pred1$Fitted_values
}
return(predd)
}
|
## The direct application of this function is an advanced use that consists of using this
# function directly and performing a search process in a feature space
## Classification problem
# Generates the filter evaluation function with ACO
filter_evaluator <- filterEvaluator('determinationCoefficient')
# Generates the search function
aco_search <- antColony()
# Performs the search process directly (parameters: dataset, target variable and evaluator)
res <- aco_search(iris, 'Species', filter_evaluator)
print(res)
|
/demo/aco.R
|
no_license
|
cran/FSinR
|
R
| false | false | 522 |
r
|
## The direct application of this function is an advanced use that consists of using this
# function directly and performing a search process in a feature space
## Classification problem
# Generates the filter evaluation function with ACO
filter_evaluator <- filterEvaluator('determinationCoefficient')
# Generates the search function
aco_search <- antColony()
# Performs the search process directly (parameters: dataset, target variable and evaluator)
res <- aco_search(iris, 'Species', filter_evaluator)
print(res)
|
# Create 100 random number in normal
x = rnorm(100)
# Find percent of elements in 1, 2, 3 standard deviations
n = length(x)
for (sigma in 1:3){
cat("" , sigma , "standard deviation: ", sum(-sigma < x & x < sigma) / n, "\n")
}
|
/HW3/Answer4.R
|
no_license
|
RKDSOne/Rprojects
|
R
| false | false | 228 |
r
|
# Create 100 random number in normal
x = rnorm(100)
# Find percent of elements in 1, 2, 3 standard deviations
n = length(x)
for (sigma in 1:3){
cat("" , sigma , "standard deviation: ", sum(-sigma < x & x < sigma) / n, "\n")
}
|
#' @export
#' @importFrom BiocGenerics strand start end
#' @importFrom S4Vectors runValue
#' @importFrom GenomeInfoDb seqnames
findMaxima <- function(regions, range, metric, ignore.strand=TRUE)
# This function finds the maximum window in 'data', given a range
# around which the maxima is to be considered. The 'metric' is,
# by default, the average count, but others can be used if supplied.
#
# written by Aaron Lun
# created 9 November 2014.
{
strs <- strand(regions)
if (!ignore.strand && length(runValue(strs))!=1) {
# Strand-specific maxima identification.
forward <- as.logical(strs=="+")
reverse <- as.logical(strs=="-")
neither <- as.logical(strs=="*")
out <- logical(length(regions))
if (any(forward)) {
out[forward] <- Recall(regions=regions[forward], range=range, metric=metric[forward], ignore.strand=TRUE)
}
if (any(reverse)) {
out[reverse] <- Recall(regions=regions[reverse], range=range, metric=metric[reverse], ignore.strand=TRUE)
}
if (any(neither)) {
out[neither] <- Recall(regions=regions[neither], range=range, metric=metric[neither], ignore.strand=TRUE)
}
return(out)
}
chrs <- as.integer(seqnames(regions))
starts <- start(regions)
ends <- end(regions)
o <- order(chrs, starts, ends)
if (length(metric)!=length(regions)) {
stop("one metric must be supplied per region")
}
if (any(is.na(metric))) {
stop("missing values in 'metric'")
}
if (!is.double(metric)) {
metric <- as.double(metric)
}
if (!is.integer(range)) {
range <- as.integer(range)
}
out <- .Call(cxx_find_maxima, chrs[o], starts[o], ends[o], metric[o], range)
out[o] <- out
return(out)
}
|
/R/findMaxima.R
|
no_license
|
LTLA/csaw
|
R
| false | false | 1,826 |
r
|
#' @export
#' @importFrom BiocGenerics strand start end
#' @importFrom S4Vectors runValue
#' @importFrom GenomeInfoDb seqnames
findMaxima <- function(regions, range, metric, ignore.strand=TRUE)
# This function finds the maximum window in 'data', given a range
# around which the maxima is to be considered. The 'metric' is,
# by default, the average count, but others can be used if supplied.
#
# written by Aaron Lun
# created 9 November 2014.
{
strs <- strand(regions)
if (!ignore.strand && length(runValue(strs))!=1) {
# Strand-specific maxima identification.
forward <- as.logical(strs=="+")
reverse <- as.logical(strs=="-")
neither <- as.logical(strs=="*")
out <- logical(length(regions))
if (any(forward)) {
out[forward] <- Recall(regions=regions[forward], range=range, metric=metric[forward], ignore.strand=TRUE)
}
if (any(reverse)) {
out[reverse] <- Recall(regions=regions[reverse], range=range, metric=metric[reverse], ignore.strand=TRUE)
}
if (any(neither)) {
out[neither] <- Recall(regions=regions[neither], range=range, metric=metric[neither], ignore.strand=TRUE)
}
return(out)
}
chrs <- as.integer(seqnames(regions))
starts <- start(regions)
ends <- end(regions)
o <- order(chrs, starts, ends)
if (length(metric)!=length(regions)) {
stop("one metric must be supplied per region")
}
if (any(is.na(metric))) {
stop("missing values in 'metric'")
}
if (!is.double(metric)) {
metric <- as.double(metric)
}
if (!is.integer(range)) {
range <- as.integer(range)
}
out <- .Call(cxx_find_maxima, chrs[o], starts[o], ends[o], metric[o], range)
out[o] <- out
return(out)
}
|
\name{breast}
\docType{data}
\alias{breast}
\title{German Breast Cancer Data}
\description{
Breast cancer survival data.
}
\references{
M. Schumacher, et al. (1994). Randomized \eqn{2\times2} trial
evaluating hormonal treatment and the duration of chemotherapy in
node-positive breast cancer patients. The German Breast Cancer Study
Group, \emph{J. Clinical Oncology}, 12:2086-2093.
}
\keyword{datasets}
|
/man/breast.Rd
|
no_license
|
cran/randomSurvivalForest
|
R
| false | false | 416 |
rd
|
\name{breast}
\docType{data}
\alias{breast}
\title{German Breast Cancer Data}
\description{
Breast cancer survival data.
}
\references{
M. Schumacher, et al. (1994). Randomized \eqn{2\times2} trial
evaluating hormonal treatment and the duration of chemotherapy in
node-positive breast cancer patients. The German Breast Cancer Study
Group, \emph{J. Clinical Oncology}, 12:2086-2093.
}
\keyword{datasets}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.