content
large_stringlengths 0
6.46M
| path
large_stringlengths 3
331
| license_type
large_stringclasses 2
values | repo_name
large_stringlengths 5
125
| language
large_stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.46M
| extension
large_stringclasses 75
values | text
stringlengths 0
6.46M
|
---|---|---|---|---|---|---|---|---|---|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/chime_operations.R
\name{chime_put_events_configuration}
\alias{chime_put_events_configuration}
\title{Creates an events configuration that allows a bot to receive outgoing
events sent by Amazon Chime}
\usage{
chime_put_events_configuration(AccountId, BotId,
OutboundEventsHTTPSEndpoint, LambdaFunctionArn)
}
\arguments{
\item{AccountId}{[required] The Amazon Chime account ID.}
\item{BotId}{[required] The bot ID.}
\item{OutboundEventsHTTPSEndpoint}{HTTPS endpoint that allows the bot to receive outgoing events.}
\item{LambdaFunctionArn}{Lambda function ARN that allows the bot to receive outgoing events.}
}
\description{
Creates an events configuration that allows a bot to receive outgoing
events sent by Amazon Chime. Choose either an HTTPS endpoint or a Lambda
function ARN. For more information, see Bot.
}
\section{Request syntax}{
\preformatted{svc$put_events_configuration(
AccountId = "string",
BotId = "string",
OutboundEventsHTTPSEndpoint = "string",
LambdaFunctionArn = "string"
)
}
}
\keyword{internal}
|
/paws/man/chime_put_events_configuration.Rd
|
permissive
|
johnnytommy/paws
|
R
| false | true | 1,111 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/chime_operations.R
\name{chime_put_events_configuration}
\alias{chime_put_events_configuration}
\title{Creates an events configuration that allows a bot to receive outgoing
events sent by Amazon Chime}
\usage{
chime_put_events_configuration(AccountId, BotId,
OutboundEventsHTTPSEndpoint, LambdaFunctionArn)
}
\arguments{
\item{AccountId}{[required] The Amazon Chime account ID.}
\item{BotId}{[required] The bot ID.}
\item{OutboundEventsHTTPSEndpoint}{HTTPS endpoint that allows the bot to receive outgoing events.}
\item{LambdaFunctionArn}{Lambda function ARN that allows the bot to receive outgoing events.}
}
\description{
Creates an events configuration that allows a bot to receive outgoing
events sent by Amazon Chime. Choose either an HTTPS endpoint or a Lambda
function ARN. For more information, see Bot.
}
\section{Request syntax}{
\preformatted{svc$put_events_configuration(
AccountId = "string",
BotId = "string",
OutboundEventsHTTPSEndpoint = "string",
LambdaFunctionArn = "string"
)
}
}
\keyword{internal}
|
#!/usr/bin/env Rscript
library(Seurat)
library(ggplot2)
args = commandArgs(trailingOnly=TRUE)
# test if there is at least one argument: if not, return an error
if (length(args) < 2) {
stop("Input and output files must be supplied (input file).n", call.=FALSE)
}
infile = args[1]
outdir = args[2]
sample <- readRDS(infile)
sample <- FindVariableFeatures(sample)
p1 <- JackStrawPlot(sample, dims = 1:20)
p2 <- ElbowPlot(sample, ndims = 50)
p3 <- VariableFeaturePlot(sample)
p3 <- LabelPoints(plot = p3, points = head(VariableFeatures(sample), 10))
p4 <- VlnPlot(sample, features = c("nFeature_RNA", "nCount_RNA", "percent.mt"), group.by = "treatment")
dir.create(outdir)
tiff(paste0(outdir, "/JackStrawPlot.tiff"))
p1
dev.off()
tiff(paste0(outdir, "/Elbow_plot.tiff"))
p2
dev.off()
tiff(paste0(outdir, "/top_variables.tiff"))
p3
dev.off()
tiff(paste0(outdir, "/feature_qc.tiff"))
p4
dev.off()
|
/R/QC_plot.R
|
permissive
|
yexiang2046/sc_seq
|
R
| false | false | 922 |
r
|
#!/usr/bin/env Rscript
library(Seurat)
library(ggplot2)
args = commandArgs(trailingOnly=TRUE)
# test if there is at least one argument: if not, return an error
if (length(args) < 2) {
stop("Input and output files must be supplied (input file).n", call.=FALSE)
}
infile = args[1]
outdir = args[2]
sample <- readRDS(infile)
sample <- FindVariableFeatures(sample)
p1 <- JackStrawPlot(sample, dims = 1:20)
p2 <- ElbowPlot(sample, ndims = 50)
p3 <- VariableFeaturePlot(sample)
p3 <- LabelPoints(plot = p3, points = head(VariableFeatures(sample), 10))
p4 <- VlnPlot(sample, features = c("nFeature_RNA", "nCount_RNA", "percent.mt"), group.by = "treatment")
dir.create(outdir)
tiff(paste0(outdir, "/JackStrawPlot.tiff"))
p1
dev.off()
tiff(paste0(outdir, "/Elbow_plot.tiff"))
p2
dev.off()
tiff(paste0(outdir, "/top_variables.tiff"))
p3
dev.off()
tiff(paste0(outdir, "/feature_qc.tiff"))
p4
dev.off()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/paFm.r
\name{paFm}
\alias{paFm}
\title{Compute cumulative mean attribution for factor models}
\usage{
paFm(fit, ...)
}
\arguments{
\item{fit}{an object of class \code{tsfm}, \code{sfm} or \code{ffm}.}
\item{...}{other arguments/controls passed to the fit methods.}
}
\value{
The returned object is of class \code{"pafm"} containing
\item{cum.ret.attr.f}{N X K matrix of cumulative return attributed to
factors.}
\item{cum.spec.ret}{length-N vector of cumulative specific returns.}
\item{attr.list}{list of time series of attributed returns for every
portfolio.}
}
\description{
Decompose total returns into returns attributed to factors and
specific returns. An object of class \code{"pafm"} is generated, with
methods for generic functions \code{plot}, \code{summary} and \code{print}.
}
\details{
Total returns can be decomposed into returns attributed to factors
and specific returns. \cr \eqn{R_t = \sum b_k * f_kt + u_t, t=1...T} \cr
\code{b_k} is exposure to factor k and \code{f_kt} is factor k's return at
time t. The return attributed to factor k is \code{b_k * f_kt} and specific
return is \code{u_t}.
}
\examples{
data(managers)
fit <- fitTsfm(asset.names=colnames(managers[, (1:6)]),
factor.names=c("EDHEC.LS.EQ","SP500.TR"), data=managers)
# without benchmark
fm.attr <- paFm(fit)
}
\references{
Grinold, R. and Kahn, R. (1999) Active Portfolio Management: A
Quantitative Approach for Producing Superior Returns and Controlling Risk.
McGraw-Hill.
}
\seealso{
\code{\link{fitTsfm}}, \code{\link{fitSfm}}, \code{\link{fitFfm}}
for the factor model fitting functions.
The \code{pafm} methods for generic functions:
\code{\link{plot.pafm}}, \code{\link{print.pafm}} and
\code{\link{summary.pafm}}.
}
\author{
Yi-An Chen and Sangeetha Srinivasan
}
|
/man/paFm.Rd
|
no_license
|
AvinashAcharya/factorAnalytics
|
R
| false | true | 1,864 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/paFm.r
\name{paFm}
\alias{paFm}
\title{Compute cumulative mean attribution for factor models}
\usage{
paFm(fit, ...)
}
\arguments{
\item{fit}{an object of class \code{tsfm}, \code{sfm} or \code{ffm}.}
\item{...}{other arguments/controls passed to the fit methods.}
}
\value{
The returned object is of class \code{"pafm"} containing
\item{cum.ret.attr.f}{N X K matrix of cumulative return attributed to
factors.}
\item{cum.spec.ret}{length-N vector of cumulative specific returns.}
\item{attr.list}{list of time series of attributed returns for every
portfolio.}
}
\description{
Decompose total returns into returns attributed to factors and
specific returns. An object of class \code{"pafm"} is generated, with
methods for generic functions \code{plot}, \code{summary} and \code{print}.
}
\details{
Total returns can be decomposed into returns attributed to factors
and specific returns. \cr \eqn{R_t = \sum b_k * f_kt + u_t, t=1...T} \cr
\code{b_k} is exposure to factor k and \code{f_kt} is factor k's return at
time t. The return attributed to factor k is \code{b_k * f_kt} and specific
return is \code{u_t}.
}
\examples{
data(managers)
fit <- fitTsfm(asset.names=colnames(managers[, (1:6)]),
factor.names=c("EDHEC.LS.EQ","SP500.TR"), data=managers)
# without benchmark
fm.attr <- paFm(fit)
}
\references{
Grinold, R. and Kahn, R. (1999) Active Portfolio Management: A
Quantitative Approach for Producing Superior Returns and Controlling Risk.
McGraw-Hill.
}
\seealso{
\code{\link{fitTsfm}}, \code{\link{fitSfm}}, \code{\link{fitFfm}}
for the factor model fitting functions.
The \code{pafm} methods for generic functions:
\code{\link{plot.pafm}}, \code{\link{print.pafm}} and
\code{\link{summary.pafm}}.
}
\author{
Yi-An Chen and Sangeetha Srinivasan
}
|
\name{MultiNetworks}
\alias{MultiNetworks}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
%% ~~function to do ... ~~
Generic function for objects of class MultiNetworks
}
\description{
%% ~~ A concise (1-5 lines) description of what the function does. ~~
Generic function for objects of class MultiNetworks. See other functions for more details.
}
\usage{
MultiNetworks(x, ...)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{x}{
%% ~~Describe \code{x} here~~
Default parameter for generic functions
}
\item{\dots}{
%% ~~Describe \code{\dots} here~~
Additional parameters
}
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
%% ~~objects to See Also as \code{\link{help}}, ~~~
\code{\link{MultiNetworks.default}}, \code{\link{print.MultiNetworks}}
}
\examples{
# data(SpADataExpression)
# data(SpADEGenes)
# data(SpASamples)
# SpAData<-DEGeneExpr(t(SpADataExpression),SpADEGenes)
# StatusFactor<-paste(SpASamples$status,SpASamples$b27,sep=".")
# names(StatusFactor)=SpASamples$chipnum
# NodesForSIMoNe<-rownames(SpADEGenes)[1:17]
# GaussianSpAData<-DEGeneExpr(t(SpADataExpression[NodesForSIMoNe,]),SpADEGenes[NodesForSIMoNe,])
# MultiSpAData<-MultiDEGeneExpr(GaussianSpAData,DEGeneExpr(t(SpADataExpression[18:34,]),
# SpADEGenes[18:34,]),DEGeneExpr(t(SpADataExpression[35:51,]),SpADEGenes[35:51,]))
# MultiSpANetworks<-MultiNetworks(MultiSpAData,
# SelectInteractionsSTRING=c("coexpression","experimental","knowledge"),STRINGThreshold=0.9,
# FilterSIMoNeOptions=list(Threshold=0.4),Factors=StatusFactor,
# STRINGOptions=list(AddAnnotations=FALSE),SIMoNeOptions=list(AddAnnotations=FALSE),
# WGCNAOptions=list(AddAnnotations=FALSE))
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{internal}
|
/man/MultiNetworks.Rd
|
no_license
|
cran/stringgaussnet
|
R
| false | false | 1,848 |
rd
|
\name{MultiNetworks}
\alias{MultiNetworks}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
%% ~~function to do ... ~~
Generic function for objects of class MultiNetworks
}
\description{
%% ~~ A concise (1-5 lines) description of what the function does. ~~
Generic function for objects of class MultiNetworks. See other functions for more details.
}
\usage{
MultiNetworks(x, ...)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{x}{
%% ~~Describe \code{x} here~~
Default parameter for generic functions
}
\item{\dots}{
%% ~~Describe \code{\dots} here~~
Additional parameters
}
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
%% ~~objects to See Also as \code{\link{help}}, ~~~
\code{\link{MultiNetworks.default}}, \code{\link{print.MultiNetworks}}
}
\examples{
# data(SpADataExpression)
# data(SpADEGenes)
# data(SpASamples)
# SpAData<-DEGeneExpr(t(SpADataExpression),SpADEGenes)
# StatusFactor<-paste(SpASamples$status,SpASamples$b27,sep=".")
# names(StatusFactor)=SpASamples$chipnum
# NodesForSIMoNe<-rownames(SpADEGenes)[1:17]
# GaussianSpAData<-DEGeneExpr(t(SpADataExpression[NodesForSIMoNe,]),SpADEGenes[NodesForSIMoNe,])
# MultiSpAData<-MultiDEGeneExpr(GaussianSpAData,DEGeneExpr(t(SpADataExpression[18:34,]),
# SpADEGenes[18:34,]),DEGeneExpr(t(SpADataExpression[35:51,]),SpADEGenes[35:51,]))
# MultiSpANetworks<-MultiNetworks(MultiSpAData,
# SelectInteractionsSTRING=c("coexpression","experimental","knowledge"),STRINGThreshold=0.9,
# FilterSIMoNeOptions=list(Threshold=0.4),Factors=StatusFactor,
# STRINGOptions=list(AddAnnotations=FALSE),SIMoNeOptions=list(AddAnnotations=FALSE),
# WGCNAOptions=list(AddAnnotations=FALSE))
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{internal}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dots-ellipsis.R
\name{check_dots_empty0}
\alias{check_dots_empty0}
\title{Check that dots are empty (low level variant)}
\usage{
check_dots_empty0(..., call = caller_env())
}
\arguments{
\item{...}{Dots which should be empty.}
}
\description{
\code{check_dots_empty0()} is a more efficient version of
\code{\link[=check_dots_empty]{check_dots_empty()}} with a slightly different interface. Instead
of inspecting the current environment for dots, it directly takes
\code{...}. It is only meant for very low level functions where a
couple microseconds make a difference.
}
\keyword{internal}
|
/man/check_dots_empty0.Rd
|
permissive
|
r-lib/rlang
|
R
| false | true | 668 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dots-ellipsis.R
\name{check_dots_empty0}
\alias{check_dots_empty0}
\title{Check that dots are empty (low level variant)}
\usage{
check_dots_empty0(..., call = caller_env())
}
\arguments{
\item{...}{Dots which should be empty.}
}
\description{
\code{check_dots_empty0()} is a more efficient version of
\code{\link[=check_dots_empty]{check_dots_empty()}} with a slightly different interface. Instead
of inspecting the current environment for dots, it directly takes
\code{...}. It is only meant for very low level functions where a
couple microseconds make a difference.
}
\keyword{internal}
|
library(arm)
library(jagsUI)
library(ggplot2)
library(gridExtra)
library(parallel)
library(Rcpp)
setwd("~/Documents/GitHub/Ecology-models/simcoms-master/ExampleFiles")
load("params.rds")
load("sim_names.rds")
load("comp_inter.rds")
load("fac_inter.rds")
setwd("~/Documents/GitHub/Ecology-models/simcoms-master")
sim_data<-readRDS("sim_data.rds")
###############################################################################################
run_model <- function(data) {
jsdm_jags <- function() {
model.file <- tempfile()
cat(
"model {
for (i in 1:n) {
Z[i, 1:J] ~ dmnorm(Mu[i, ], Tau)
for (j in 1:J) {
Mu[i, j] <- inprod(B_raw[j, ], X[i, ])
Y[i, j] ~ dbern(step(Z[i, j]))
}
}
for (j in 1:J) {
sigma_[j] <- sqrt(Sigma[j, j])
env_sigma_[j] <- sqrt(EnvSigma[j, j])
for (k in 1:K) {
B_raw[j, k] ~ dnorm(mu[k], tau[k])
B[j, k] <- B_raw[j, k] / sigma_[j]
}
for (j_ in 1:J) {
Rho[j, j_] <- Sigma[j, j_] / (sigma_[j] * sigma_[j_])
EnvRho[j, j_] <- EnvSigma[j, j_] / (env_sigma_[j] * env_sigma_[j_])
EnvSigma[j, j_] <- sum(EnvSigma1[, j, j_]) + sum(EnvSigma2[, , j, j_])
for (k in 2:K) {
EnvSigma1[k - 1, j, j_] <- B[j, k] * B[j_, k]
for (k_ in 2:K) {
EnvSigma2[k - 1, k_ - 1, j, j_] <-
B[j, k] * B[j_, k_] * ifelse(k_ != k, covx[k, k_], 0)
}
}
}
}
for (k in 1:K) {
mu[k] ~ dnorm(0, 1)
tau[k] <- pow(sigma[k], -2)
sigma[k] ~ dnorm(0, 1)T(0,)
}
Tau ~ dwish(I, df)
Sigma <- inverse(Tau)
}",
file = model.file
)
model.file
}
inits <- function(data) {
Y <- as.matrix(data$Y)
X <- as.matrix(data$X)[, -1]
Tau <- rWishart(1, data$df, data$I)[, , 1]
Sigma <- solve(Tau)
Z <- rep(0, data$J)
Z <- mvrnorm(1, Z, Sigma)
Z <- replicate(data$n, Z)
Z <- t(Z)
Z <- abs(Z)
Z <- ifelse(Y, Z, -Z)
Sigma <- cov(Z)
B <- sapply(
seq_len(data$J),
function(x) coef(bayesglm(Y[, x] ~ X, family = binomial(link = "probit")))
)
B <- t(B)
B_raw <- B * sqrt(diag(Sigma))
mu <- apply(B_raw, 2, mean)
sigma <- pmin(99, apply(B_raw, 2, sd))
Tau <- solve(Sigma)
list(Tau = Tau, Z = Z, B_raw = B_raw, mu = mu, sigma = sigma)
}
data <- list(
Y = subset(data, select = -env),
X = cbind(1, scale(poly(data$env, 2))),
covx = cov(cbind(1, scale(poly(data$env, 2)))),
K = 3,
J = ncol(data) - 1,
n = nrow(data),
I = diag(ncol(data) - 1),
df = ncol(data)
)
model <- jags(
data,
function() inits(data), c("B", "Rho", "EnvRho","Tau"), jsdm_jags(), ###added Tau
n.chains = 5, n.iter = 2e3, n.adapt = 25e3, n.thin = 10, ####changed chains from 15 to 5
parallel = TRUE, DIC = FALSE
)
save(
model,
file = paste0("jsdmmodel-", format(Sys.time(), "%Y-%m-%d-%H-%M-%S"), ".rda")
)
model
}
# Fit models
data<-sim_data$FacCompSparseSp5
M1<- run_model(data)
models <- lapply(sim_data, run_model)
|
/simcoms-master/run_short.R
|
no_license
|
dbystrova/Ecology-models
|
R
| false | false | 3,063 |
r
|
library(arm)
library(jagsUI)
library(ggplot2)
library(gridExtra)
library(parallel)
library(Rcpp)
setwd("~/Documents/GitHub/Ecology-models/simcoms-master/ExampleFiles")
load("params.rds")
load("sim_names.rds")
load("comp_inter.rds")
load("fac_inter.rds")
setwd("~/Documents/GitHub/Ecology-models/simcoms-master")
sim_data<-readRDS("sim_data.rds")
###############################################################################################
run_model <- function(data) {
jsdm_jags <- function() {
model.file <- tempfile()
cat(
"model {
for (i in 1:n) {
Z[i, 1:J] ~ dmnorm(Mu[i, ], Tau)
for (j in 1:J) {
Mu[i, j] <- inprod(B_raw[j, ], X[i, ])
Y[i, j] ~ dbern(step(Z[i, j]))
}
}
for (j in 1:J) {
sigma_[j] <- sqrt(Sigma[j, j])
env_sigma_[j] <- sqrt(EnvSigma[j, j])
for (k in 1:K) {
B_raw[j, k] ~ dnorm(mu[k], tau[k])
B[j, k] <- B_raw[j, k] / sigma_[j]
}
for (j_ in 1:J) {
Rho[j, j_] <- Sigma[j, j_] / (sigma_[j] * sigma_[j_])
EnvRho[j, j_] <- EnvSigma[j, j_] / (env_sigma_[j] * env_sigma_[j_])
EnvSigma[j, j_] <- sum(EnvSigma1[, j, j_]) + sum(EnvSigma2[, , j, j_])
for (k in 2:K) {
EnvSigma1[k - 1, j, j_] <- B[j, k] * B[j_, k]
for (k_ in 2:K) {
EnvSigma2[k - 1, k_ - 1, j, j_] <-
B[j, k] * B[j_, k_] * ifelse(k_ != k, covx[k, k_], 0)
}
}
}
}
for (k in 1:K) {
mu[k] ~ dnorm(0, 1)
tau[k] <- pow(sigma[k], -2)
sigma[k] ~ dnorm(0, 1)T(0,)
}
Tau ~ dwish(I, df)
Sigma <- inverse(Tau)
}",
file = model.file
)
model.file
}
inits <- function(data) {
Y <- as.matrix(data$Y)
X <- as.matrix(data$X)[, -1]
Tau <- rWishart(1, data$df, data$I)[, , 1]
Sigma <- solve(Tau)
Z <- rep(0, data$J)
Z <- mvrnorm(1, Z, Sigma)
Z <- replicate(data$n, Z)
Z <- t(Z)
Z <- abs(Z)
Z <- ifelse(Y, Z, -Z)
Sigma <- cov(Z)
B <- sapply(
seq_len(data$J),
function(x) coef(bayesglm(Y[, x] ~ X, family = binomial(link = "probit")))
)
B <- t(B)
B_raw <- B * sqrt(diag(Sigma))
mu <- apply(B_raw, 2, mean)
sigma <- pmin(99, apply(B_raw, 2, sd))
Tau <- solve(Sigma)
list(Tau = Tau, Z = Z, B_raw = B_raw, mu = mu, sigma = sigma)
}
data <- list(
Y = subset(data, select = -env),
X = cbind(1, scale(poly(data$env, 2))),
covx = cov(cbind(1, scale(poly(data$env, 2)))),
K = 3,
J = ncol(data) - 1,
n = nrow(data),
I = diag(ncol(data) - 1),
df = ncol(data)
)
model <- jags(
data,
function() inits(data), c("B", "Rho", "EnvRho","Tau"), jsdm_jags(), ###added Tau
n.chains = 5, n.iter = 2e3, n.adapt = 25e3, n.thin = 10, ####changed chains from 15 to 5
parallel = TRUE, DIC = FALSE
)
save(
model,
file = paste0("jsdmmodel-", format(Sys.time(), "%Y-%m-%d-%H-%M-%S"), ".rda")
)
model
}
# Fit models
data<-sim_data$FacCompSparseSp5
M1<- run_model(data)
models <- lapply(sim_data, run_model)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/db.R
\name{write_table_shard_map}
\alias{write_table_shard_map}
\title{Register shards in table_shard_map}
\usage{
write_table_shard_map(dbconn, tblname, shard_names)
}
\arguments{
\item{dbconn}{SQLConnection. The database connection.}
\item{tblname}{character. Table name.}
\item{shard_names}{character. Calculated shard names given the table name}
}
\description{
Register shards in table_shard_map
}
|
/man/write_table_shard_map.Rd
|
permissive
|
robertzk/cachemeifyoucan
|
R
| false | true | 484 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/db.R
\name{write_table_shard_map}
\alias{write_table_shard_map}
\title{Register shards in table_shard_map}
\usage{
write_table_shard_map(dbconn, tblname, shard_names)
}
\arguments{
\item{dbconn}{SQLConnection. The database connection.}
\item{tblname}{character. Table name.}
\item{shard_names}{character. Calculated shard names given the table name}
}
\description{
Register shards in table_shard_map
}
|
test_exception <- function() {
RUnit::checkException(packager:::throw("Hello, error!"))
}
|
/inst/runit_tests/test_throw.R
|
no_license
|
cran/packager
|
R
| false | false | 94 |
r
|
test_exception <- function() {
RUnit::checkException(packager:::throw("Hello, error!"))
}
|
# This is a Testprogramm
1+1
1*1
1/1
1-1
|
/main.R
|
no_license
|
muellertorben/myrepo
|
R
| false | false | 40 |
r
|
# This is a Testprogramm
1+1
1*1
1/1
1-1
|
traceserver_s1_car0<-read.table(file = 'server_s1tf_car0.txt', sep=' ')
names(traceserver_s1_car0)<-c("time", "id", "size", "ori", "dest" )
options(digits.secs = 6)
traceserver_s1_car0$time <- as.POSIXlt(traceserver_s1_car0$time, origin = "1987-10-05 11:00:00")
traceserver_s1_car0$size<- traceserver_s1_car0$size*8
taxabps1segserver_s1_car0<-aggregate(list(size = traceserver_s1_car0$size), list(segundos = cut(traceserver_s1_car0$time, "1 sec")), sum)
taxabps1segserverts_s1_car0<-ts(traceserver_s1_car0$size, frequency = 1)
traceserver_s1_car1<-read.table(file = 'server_s1tf_car1.txt', sep=' ')
names(traceserver_s1_car1)<-c("time", "id", "size", "ori", "dest" )
traceserver_s1_car1$time <- as.POSIXlt(traceserver_s1_car1$time, origin = "1987-10-05 11:00:00")
traceserver_s1_car1$size<- traceserver_s1_car1$size*8
taxabps1segserver_s1_car1<-aggregate(list(size = traceserver_s1_car1$size), list(segundos = cut(traceserver_s1_car1$time, "1 sec")), sum)
taxabps1segserverts_s1_car1<-ts(traceserver_s1_car1$size, frequency = 1)
# traceserver_s1_car2<-read.table(file = 'server_s1tf_car2.txt', sep=' ')
# names(traceserver_s1_car2)<-c("time", "id", "size", "ori", "dest" )
# traceserver_s1_car2$time <- as.POSIXlt(traceserver_s1_car2$time, origin = "1987-10-05 11:00:00")
# traceserver_s1_car2$size<- traceserver_s1_car2$size*8
# taxabps1segserver_s1_car2<-aggregate(list(size = traceserver_s1_car2$size), list(segundos = cut(traceserver_s1_car2$time, "1 sec")), sum)
# taxabps1segserverts_s1_car2<-ts(traceserver_s1_car2$size, frequency = 1)
# traceserver_s1_car3<-read.table(file = 'server_s1tf_car3.txt', sep=' ')
# names(traceserver_s1_car3)<-c("time", "id", "size", "ori", "dest" )
# traceserver_s1_car3$time <- as.POSIXlt(traceserver_s1_car3$time, origin = "1987-10-05 11:00:00")
# traceserver_s1_car3$size<- traceserver_s1_car3$size*8
# taxabps1segserver_s1_car3<-aggregate(list(size = traceserver_s1_car3$size), list(segundos = cut(traceserver_s1_car3$time, "1 sec")), sum)
# taxabps1segserverts_s1_car3<-ts(traceserver_s1_car3$size, frequency = 1)
#
# traceserver_s1_car4<-read.table(file = 'server_s1tf_car4.txt', sep=' ')
# names(traceserver_s1_car4)<-c("time", "id", "size", "ori", "dest" )
# traceserver_s1_car4$time <- as.POSIXlt(traceserver_s1_car4$time, origin = "1987-10-05 11:00:00")
# traceserver_s1_car4$size<- traceserver_s1_car4$size*8
# taxabps1segserver_s1_car4<-aggregate(list(size = traceserver_s1_car4$size), list(segundos = cut(traceserver_s1_car4$time, "1 sec")), sum)
# taxabps1segserverts_s1_car4<-ts(traceserver_s1_car4$size, frequency = 1)
#
# traceserver_s1_car5<-read.table(file = 'server_s1tf_car5.txt', sep=' ')
# names(traceserver_s1_car5)<-c("time", "id", "size", "ori", "dest" )
# traceserver_s1_car5$time <- as.POSIXlt(traceserver_s1_car5$time, origin = "1987-10-05 11:00:00")
# traceserver_s1_car5$size<- traceserver_s1_car5$size*8
# taxabps1segserver_s1_car5<-aggregate(list(size = traceserver_s1_car5$size), list(segundos = cut(traceserver_s1_car5$time, "1 sec")), sum)
# taxabps1segserverts_s1_car5<-ts(traceserver_s1_car5$size, frequency = 1)
# traceserver_s1_car6<-read.table(file = 'server_s1tf_car6.txt', sep=' ')
# names(traceserver_s1_car6)<-c("time", "id", "size", "ori", "dest" )
# traceserver_s1_car6$time <- as.POSIXlt(traceserver_s1_car6$time, origin = "1987-10-05 11:00:00")
# traceserver_s1_car6$size<- traceserver_s1_car6$size*8
# taxabps1segserver_s1_car6<-aggregate(list(size = traceserver_s1_car6$size), list(segundos = cut(traceserver_s1_car6$time, "1 sec")), sum)
# taxabps1segserverts_s1_car6<-ts(traceserver_s1_car6$size, frequency = 1)
# plot(c(1:length(taxabps1segserver_s1_car0$size)), taxabps1segserver_s1_car0$size, main="Server s1", ylab='bits/s', xlab='time(s)', type = "l", col="blue")
# lines(c(1:length(taxabps1segserver_s1_car1$size)), taxabps1segserver_s1_car1$size, main="Server reception (bits/s)", ylab='bits/s', xlab='time(s)', col="red")
# lines(taxabps1segserver_s1_car2$segundos, taxabps1segserver_s1_car2$size, main="Server reception (bits/s)", ylab='bits/s', xlab='time(s)', col="red")
# lines(taxabps1segserver_s1_car3$segundos, taxabps1segserver_s1_car3$size, main="Server reception (bits/s)", ylab='bits/s', xlab='time(s)', col="red")
# lines(taxabps1segserver_s1_car4$segundos, taxabps1segserver_s1_car4$size, main="Server reception (bits/s)", ylab='bits/s', xlab='time(s)', col="red")
# lines(taxabps1segserver_s1_car5$segundos, taxabps1segserver_s1_car5$size, main="Server reception (bits/s)", ylab='bits/s', xlab='time(s)', col="red")
# # lines(taxabps1segserver_s1_car6$segundos, taxabps1segserver_s1_car6$size, main="Server reception (bits/s)", ylab='bits/s', xlab='time(s)', col="red")
# legend(240, 2e+06, legend=c("Car transmission", "Server reception"), col=c("blue", "red"), lty=1:2, cex=0.8)
#trafego dos veiculos para server_s1
tracecar0_s1<-read.table(file = 'car0tf_5005.txt', sep=' ')
names(tracecar0_s1)<-c("time", "id", "size", "ori", "dest" )
options(digits.secs = 6)
tracecar0_s1$time <- as.POSIXlt(tracecar0_s1$time, origin = "1987-10-05 11:00:00")
tracecar0_s1$size<- tracecar0_s1$size*8
taxabps1segcar0_s1<-aggregate(list(size = tracecar0_s1$size), list(segundos = cut(tracecar0_s1$time, "1 sec")), sum)
taxabps1segcar0_s1ts<-ts(tracecar0_s1$size, frequency = 1)
tracecar1_s1<-read.table(file = 'car1tf_5005.txt', sep=' ')
names(tracecar1_s1)<-c("time", "id", "size", "ori", "dest" )
tracecar1_s1$time <- as.POSIXlt(tracecar1_s1$time, origin = "1987-10-05 11:00:00")
tracecar1_s1$size<- tracecar1_s1$size*8
taxabps1segcar1_s1<-aggregate(list(size = tracecar1_s1$size), list(segundos = cut(tracecar1_s1$time, "1 sec")), sum)
taxabps1segcar1_s1ts<-ts(tracecar1_s1$size, frequency = 1)
# tracecar2_s1<-read.table(file = 'car2tf_5005.txt', sep=' ')
# names(tracecar2_s1)<-c("time", "id", "size", "ori", "dest" )
# tracecar2_s1$time <- as.POSIXlt(tracecar2_s1$time, origin = "1987-10-05 11:00:00")
# tracecar2_s1$size<- tracecar2_s1$size*8
# taxabps1segcar2_s1<-aggregate(list(size = tracecar2_s1$size), list(segundos = cut(tracecar2_s1$time, "1 sec")), sum)
# taxabps1segcar2_s1ts<-ts(tracecar2_s1$size, frequency = 1)
#
# tracecar3_s1<-read.table(file = 'car3tf_5005.txt', sep=' ')
# names(tracecar3_s1)<-c("time", "id", "size", "ori", "dest" )
# tracecar3_s1$time <- as.POSIXlt(tracecar3_s1$time, origin = "1987-10-05 11:00:00")
# tracecar3_s1$size<- tracecar3_s1$size*8
# taxabps1segcar3_s1<-aggregate(list(size = tracecar3_s1$size), list(segundos = cut(tracecar3_s1$time, "1 sec")), sum)
# taxabps1segcar3_s1ts<-ts(tracecar3_s1$size, frequency = 1)
#
# tracecar4_s1<-read.table(file = 'car4tf_5005.txt', sep=' ')
# names(tracecar4_s1)<-c("time", "id", "size", "ori", "dest" )
# tracecar4_s1$time <- as.POSIXlt(tracecar4_s1$time, origin = "1987-10-05 11:00:00")
# tracecar4_s1$size<- tracecar4_s1$size*8
# taxabps1segcar4_s1<-aggregate(list(size = tracecar4_s1$size), list(segundos = cut(tracecar4_s1$time, "1 sec")), sum)
# taxabps1segcar4_s1ts<-ts(tracecar4_s1$size, frequency = 1)
#
# tracecar5_s1<-read.table(file = 'car5tf_5005.txt', sep=' ')
# names(tracecar5_s1)<-c("time", "id", "size", "ori", "dest" )
# options(digits.secs = 6)
# tracecar5_s1$time <- as.POSIXlt(tracecar5_s1$time, origin = "1987-10-05 11:00:00")
# tracecar5_s1$size<- tracecar5_s1$size*8
# taxabps1segcar5_s1<-aggregate(list(size = tracecar5_s1$size), list(segundos = cut(tracecar5_s1$time, "1 sec")), sum)
# taxabps1segcar5_s1ts<-ts(tracecar5_s1$size, frequency = 1)
# tracecar6_s1<-read.table(file = 'car6tf_5005.txt', sep=' ')
# names(tracecar6_s1)<-c("time", "id", "size", "ori", "dest" )
# tracecar6_s1$time <- as.POSIXlt(tracecar6_s1$time, origin = "1987-10-05 11:00:00")
# tracecar6_s1$size<- tracecar6_s1$size*8
# taxabps1segcar6_s1<-aggregate(list(size = tracecar6_s1$size), list(segundos = cut(tracecar6_s1$time, "1 sec")), sum)
# taxabps1segcar6_s1ts<-ts(tracecar6_s1$size, frequency = 1)
ttime<-c(0:194)
#tt<-c(taxabps1segcar0_s1$size[1:195], taxabps1segcar1_s1$size[1:195], taxabps1segcar2_s1$size[1:195], taxabps1segcar3_s1$size[1:195], taxabps1segcar4_s1$size[1:195], taxabps1segcar5_s1$size[1:195], taxabps1segcar6_s1$size[1:195], taxabps1segcar7_s1$size[1:195], taxabps1segcar8_s1$size[1:195], taxabps1segcar9_s1$size[1:195], taxabps1segcar10_s1$size[1:195], taxabps1segcar11_s1$size[1:195], taxabps1segcar12_s1$size[1:195], taxabps1segcar13_s1$size[1:195], taxabps1segcar14_s1$size[1:195], taxabps1segcar15_s1$size[1:195], taxabps1segcar16_s1$size[1:195], taxabps1segcar17_s1$size[1:195], taxabps1segcar18_s1$size[1:195], taxabps1segcar19_s1$size[1:195])
tt<-c(taxabps1segcar0_s1$size[1:195], taxabps1segcar1_s1$size[1:195])
ttm<-c(taxabps1segcar0_s1$size[1:195] + taxabps1segcar1_s1$size[1:195])/2
tttime<-c(ttime, ttime)
length(ttm)
length(ttime)
require(Rmisc)
x<- cbind(tttime,tt)
x<-data.frame(x)
y<-group.CI(tt~tttime,x,ci = 0.95)
require(plotrix)
plotCI(ttime, ttm, ui=y$tt.upper, li=y$tt.lower, col="red", main="Server s1", ylab = "Taxa de entrega de pacotes - PDR", xlab = "tempo (s)")
lines(ttime,ttm, type = "l", col="black", lwd="2")
par(new=T)
ttime<-c(0:194)
#tt<-c(taxabps1segserver_s1_car0$size[1:195], taxabps1segserver_s1_car1$size[1:195], taxabps1segserver_s1_car2$size[1:195], taxabps1segserver_s1_car3$size[1:195], taxabps1segserver_s1_car4$size[1:195], taxabps1segserver_s1_car5$size[1:195], taxabps1segserver_s1_car6$size[1:195], taxabps1segserver_s1_car7$size[1:195], taxabps1segserver_s1_car8$size[1:195], taxabps1segserver_s1_car9$size[1:195], taxabps1segserver_s1_car10$size[1:195], taxabps1segserver_s1_car11$size[1:195], taxabps1segserver_s1_car12$size[1:195], taxabps1segserver_s1_car13$size[1:195], taxabps1segserver_s1_car14$size[1:195], taxabps1segserver_s1_car15$size[1:195], taxabps1segserver_s1_car16$size[1:195], taxabps1segserver_s1_car17$size[1:195], taxabps1segserver_s1_car18$size[1:195], taxabps1segserver_s1_car19$size[1:195])
tt<-c(taxabps1segserver_s1_car0$size[1:195], taxabps1segserver_s1_car1$size[1:195])
ttm<-c(taxabps1segserver_s1_car0$size[1:195] + taxabps1segserver_s1_car1$size[1:195])/2
tttime<-c(ttime, ttime)
length(ttm)
length(ttime)
require(Rmisc)
x<- cbind(tttime,tt)
x<-data.frame(x)
y<-group.CI(tt~tttime,x,ci = 0.95)
require(plotrix)
plotCI(ttime, ttm, ui=y$tt.upper, li=y$tt.lower, col="blue", main="Server s1", axes = FALSE)
lines(ttime,ttm, type = "l", col="black", lwd="2")
|
/bw_server_s1.R
|
no_license
|
wborbaneto/framework_its_sdn
|
R
| false | false | 10,372 |
r
|
traceserver_s1_car0<-read.table(file = 'server_s1tf_car0.txt', sep=' ')
names(traceserver_s1_car0)<-c("time", "id", "size", "ori", "dest" )
options(digits.secs = 6)
traceserver_s1_car0$time <- as.POSIXlt(traceserver_s1_car0$time, origin = "1987-10-05 11:00:00")
traceserver_s1_car0$size<- traceserver_s1_car0$size*8
taxabps1segserver_s1_car0<-aggregate(list(size = traceserver_s1_car0$size), list(segundos = cut(traceserver_s1_car0$time, "1 sec")), sum)
taxabps1segserverts_s1_car0<-ts(traceserver_s1_car0$size, frequency = 1)
traceserver_s1_car1<-read.table(file = 'server_s1tf_car1.txt', sep=' ')
names(traceserver_s1_car1)<-c("time", "id", "size", "ori", "dest" )
traceserver_s1_car1$time <- as.POSIXlt(traceserver_s1_car1$time, origin = "1987-10-05 11:00:00")
traceserver_s1_car1$size<- traceserver_s1_car1$size*8
taxabps1segserver_s1_car1<-aggregate(list(size = traceserver_s1_car1$size), list(segundos = cut(traceserver_s1_car1$time, "1 sec")), sum)
taxabps1segserverts_s1_car1<-ts(traceserver_s1_car1$size, frequency = 1)
# traceserver_s1_car2<-read.table(file = 'server_s1tf_car2.txt', sep=' ')
# names(traceserver_s1_car2)<-c("time", "id", "size", "ori", "dest" )
# traceserver_s1_car2$time <- as.POSIXlt(traceserver_s1_car2$time, origin = "1987-10-05 11:00:00")
# traceserver_s1_car2$size<- traceserver_s1_car2$size*8
# taxabps1segserver_s1_car2<-aggregate(list(size = traceserver_s1_car2$size), list(segundos = cut(traceserver_s1_car2$time, "1 sec")), sum)
# taxabps1segserverts_s1_car2<-ts(traceserver_s1_car2$size, frequency = 1)
# traceserver_s1_car3<-read.table(file = 'server_s1tf_car3.txt', sep=' ')
# names(traceserver_s1_car3)<-c("time", "id", "size", "ori", "dest" )
# traceserver_s1_car3$time <- as.POSIXlt(traceserver_s1_car3$time, origin = "1987-10-05 11:00:00")
# traceserver_s1_car3$size<- traceserver_s1_car3$size*8
# taxabps1segserver_s1_car3<-aggregate(list(size = traceserver_s1_car3$size), list(segundos = cut(traceserver_s1_car3$time, "1 sec")), sum)
# taxabps1segserverts_s1_car3<-ts(traceserver_s1_car3$size, frequency = 1)
#
# traceserver_s1_car4<-read.table(file = 'server_s1tf_car4.txt', sep=' ')
# names(traceserver_s1_car4)<-c("time", "id", "size", "ori", "dest" )
# traceserver_s1_car4$time <- as.POSIXlt(traceserver_s1_car4$time, origin = "1987-10-05 11:00:00")
# traceserver_s1_car4$size<- traceserver_s1_car4$size*8
# taxabps1segserver_s1_car4<-aggregate(list(size = traceserver_s1_car4$size), list(segundos = cut(traceserver_s1_car4$time, "1 sec")), sum)
# taxabps1segserverts_s1_car4<-ts(traceserver_s1_car4$size, frequency = 1)
#
# traceserver_s1_car5<-read.table(file = 'server_s1tf_car5.txt', sep=' ')
# names(traceserver_s1_car5)<-c("time", "id", "size", "ori", "dest" )
# traceserver_s1_car5$time <- as.POSIXlt(traceserver_s1_car5$time, origin = "1987-10-05 11:00:00")
# traceserver_s1_car5$size<- traceserver_s1_car5$size*8
# taxabps1segserver_s1_car5<-aggregate(list(size = traceserver_s1_car5$size), list(segundos = cut(traceserver_s1_car5$time, "1 sec")), sum)
# taxabps1segserverts_s1_car5<-ts(traceserver_s1_car5$size, frequency = 1)
# traceserver_s1_car6<-read.table(file = 'server_s1tf_car6.txt', sep=' ')
# names(traceserver_s1_car6)<-c("time", "id", "size", "ori", "dest" )
# traceserver_s1_car6$time <- as.POSIXlt(traceserver_s1_car6$time, origin = "1987-10-05 11:00:00")
# traceserver_s1_car6$size<- traceserver_s1_car6$size*8
# taxabps1segserver_s1_car6<-aggregate(list(size = traceserver_s1_car6$size), list(segundos = cut(traceserver_s1_car6$time, "1 sec")), sum)
# taxabps1segserverts_s1_car6<-ts(traceserver_s1_car6$size, frequency = 1)
# plot(c(1:length(taxabps1segserver_s1_car0$size)), taxabps1segserver_s1_car0$size, main="Server s1", ylab='bits/s', xlab='time(s)', type = "l", col="blue")
# lines(c(1:length(taxabps1segserver_s1_car1$size)), taxabps1segserver_s1_car1$size, main="Server reception (bits/s)", ylab='bits/s', xlab='time(s)', col="red")
# lines(taxabps1segserver_s1_car2$segundos, taxabps1segserver_s1_car2$size, main="Server reception (bits/s)", ylab='bits/s', xlab='time(s)', col="red")
# lines(taxabps1segserver_s1_car3$segundos, taxabps1segserver_s1_car3$size, main="Server reception (bits/s)", ylab='bits/s', xlab='time(s)', col="red")
# lines(taxabps1segserver_s1_car4$segundos, taxabps1segserver_s1_car4$size, main="Server reception (bits/s)", ylab='bits/s', xlab='time(s)', col="red")
# lines(taxabps1segserver_s1_car5$segundos, taxabps1segserver_s1_car5$size, main="Server reception (bits/s)", ylab='bits/s', xlab='time(s)', col="red")
# # lines(taxabps1segserver_s1_car6$segundos, taxabps1segserver_s1_car6$size, main="Server reception (bits/s)", ylab='bits/s', xlab='time(s)', col="red")
# legend(240, 2e+06, legend=c("Car transmission", "Server reception"), col=c("blue", "red"), lty=1:2, cex=0.8)
#trafego dos veiculos para server_s1
tracecar0_s1<-read.table(file = 'car0tf_5005.txt', sep=' ')
names(tracecar0_s1)<-c("time", "id", "size", "ori", "dest" )
options(digits.secs = 6)
tracecar0_s1$time <- as.POSIXlt(tracecar0_s1$time, origin = "1987-10-05 11:00:00")
tracecar0_s1$size<- tracecar0_s1$size*8
taxabps1segcar0_s1<-aggregate(list(size = tracecar0_s1$size), list(segundos = cut(tracecar0_s1$time, "1 sec")), sum)
taxabps1segcar0_s1ts<-ts(tracecar0_s1$size, frequency = 1)
tracecar1_s1<-read.table(file = 'car1tf_5005.txt', sep=' ')
names(tracecar1_s1)<-c("time", "id", "size", "ori", "dest" )
tracecar1_s1$time <- as.POSIXlt(tracecar1_s1$time, origin = "1987-10-05 11:00:00")
tracecar1_s1$size<- tracecar1_s1$size*8
taxabps1segcar1_s1<-aggregate(list(size = tracecar1_s1$size), list(segundos = cut(tracecar1_s1$time, "1 sec")), sum)
taxabps1segcar1_s1ts<-ts(tracecar1_s1$size, frequency = 1)
# tracecar2_s1<-read.table(file = 'car2tf_5005.txt', sep=' ')
# names(tracecar2_s1)<-c("time", "id", "size", "ori", "dest" )
# tracecar2_s1$time <- as.POSIXlt(tracecar2_s1$time, origin = "1987-10-05 11:00:00")
# tracecar2_s1$size<- tracecar2_s1$size*8
# taxabps1segcar2_s1<-aggregate(list(size = tracecar2_s1$size), list(segundos = cut(tracecar2_s1$time, "1 sec")), sum)
# taxabps1segcar2_s1ts<-ts(tracecar2_s1$size, frequency = 1)
#
# tracecar3_s1<-read.table(file = 'car3tf_5005.txt', sep=' ')
# names(tracecar3_s1)<-c("time", "id", "size", "ori", "dest" )
# tracecar3_s1$time <- as.POSIXlt(tracecar3_s1$time, origin = "1987-10-05 11:00:00")
# tracecar3_s1$size<- tracecar3_s1$size*8
# taxabps1segcar3_s1<-aggregate(list(size = tracecar3_s1$size), list(segundos = cut(tracecar3_s1$time, "1 sec")), sum)
# taxabps1segcar3_s1ts<-ts(tracecar3_s1$size, frequency = 1)
#
# tracecar4_s1<-read.table(file = 'car4tf_5005.txt', sep=' ')
# names(tracecar4_s1)<-c("time", "id", "size", "ori", "dest" )
# tracecar4_s1$time <- as.POSIXlt(tracecar4_s1$time, origin = "1987-10-05 11:00:00")
# tracecar4_s1$size<- tracecar4_s1$size*8
# taxabps1segcar4_s1<-aggregate(list(size = tracecar4_s1$size), list(segundos = cut(tracecar4_s1$time, "1 sec")), sum)
# taxabps1segcar4_s1ts<-ts(tracecar4_s1$size, frequency = 1)
#
# tracecar5_s1<-read.table(file = 'car5tf_5005.txt', sep=' ')
# names(tracecar5_s1)<-c("time", "id", "size", "ori", "dest" )
# options(digits.secs = 6)
# tracecar5_s1$time <- as.POSIXlt(tracecar5_s1$time, origin = "1987-10-05 11:00:00")
# tracecar5_s1$size<- tracecar5_s1$size*8
# taxabps1segcar5_s1<-aggregate(list(size = tracecar5_s1$size), list(segundos = cut(tracecar5_s1$time, "1 sec")), sum)
# taxabps1segcar5_s1ts<-ts(tracecar5_s1$size, frequency = 1)
# tracecar6_s1<-read.table(file = 'car6tf_5005.txt', sep=' ')
# names(tracecar6_s1)<-c("time", "id", "size", "ori", "dest" )
# tracecar6_s1$time <- as.POSIXlt(tracecar6_s1$time, origin = "1987-10-05 11:00:00")
# tracecar6_s1$size<- tracecar6_s1$size*8
# taxabps1segcar6_s1<-aggregate(list(size = tracecar6_s1$size), list(segundos = cut(tracecar6_s1$time, "1 sec")), sum)
# taxabps1segcar6_s1ts<-ts(tracecar6_s1$size, frequency = 1)
ttime<-c(0:194)
#tt<-c(taxabps1segcar0_s1$size[1:195], taxabps1segcar1_s1$size[1:195], taxabps1segcar2_s1$size[1:195], taxabps1segcar3_s1$size[1:195], taxabps1segcar4_s1$size[1:195], taxabps1segcar5_s1$size[1:195], taxabps1segcar6_s1$size[1:195], taxabps1segcar7_s1$size[1:195], taxabps1segcar8_s1$size[1:195], taxabps1segcar9_s1$size[1:195], taxabps1segcar10_s1$size[1:195], taxabps1segcar11_s1$size[1:195], taxabps1segcar12_s1$size[1:195], taxabps1segcar13_s1$size[1:195], taxabps1segcar14_s1$size[1:195], taxabps1segcar15_s1$size[1:195], taxabps1segcar16_s1$size[1:195], taxabps1segcar17_s1$size[1:195], taxabps1segcar18_s1$size[1:195], taxabps1segcar19_s1$size[1:195])
tt<-c(taxabps1segcar0_s1$size[1:195], taxabps1segcar1_s1$size[1:195])
ttm<-c(taxabps1segcar0_s1$size[1:195] + taxabps1segcar1_s1$size[1:195])/2
tttime<-c(ttime, ttime)
length(ttm)
length(ttime)
require(Rmisc)
x<- cbind(tttime,tt)
x<-data.frame(x)
y<-group.CI(tt~tttime,x,ci = 0.95)
require(plotrix)
plotCI(ttime, ttm, ui=y$tt.upper, li=y$tt.lower, col="red", main="Server s1", ylab = "Taxa de entrega de pacotes - PDR", xlab = "tempo (s)")
lines(ttime,ttm, type = "l", col="black", lwd="2")
par(new=T)
ttime<-c(0:194)
#tt<-c(taxabps1segserver_s1_car0$size[1:195], taxabps1segserver_s1_car1$size[1:195], taxabps1segserver_s1_car2$size[1:195], taxabps1segserver_s1_car3$size[1:195], taxabps1segserver_s1_car4$size[1:195], taxabps1segserver_s1_car5$size[1:195], taxabps1segserver_s1_car6$size[1:195], taxabps1segserver_s1_car7$size[1:195], taxabps1segserver_s1_car8$size[1:195], taxabps1segserver_s1_car9$size[1:195], taxabps1segserver_s1_car10$size[1:195], taxabps1segserver_s1_car11$size[1:195], taxabps1segserver_s1_car12$size[1:195], taxabps1segserver_s1_car13$size[1:195], taxabps1segserver_s1_car14$size[1:195], taxabps1segserver_s1_car15$size[1:195], taxabps1segserver_s1_car16$size[1:195], taxabps1segserver_s1_car17$size[1:195], taxabps1segserver_s1_car18$size[1:195], taxabps1segserver_s1_car19$size[1:195])
tt<-c(taxabps1segserver_s1_car0$size[1:195], taxabps1segserver_s1_car1$size[1:195])
ttm<-c(taxabps1segserver_s1_car0$size[1:195] + taxabps1segserver_s1_car1$size[1:195])/2
tttime<-c(ttime, ttime)
length(ttm)
length(ttime)
require(Rmisc)
x<- cbind(tttime,tt)
x<-data.frame(x)
y<-group.CI(tt~tttime,x,ci = 0.95)
require(plotrix)
plotCI(ttime, ttm, ui=y$tt.upper, li=y$tt.lower, col="blue", main="Server s1", axes = FALSE)
lines(ttime,ttm, type = "l", col="black", lwd="2")
|
#### -- Packrat Autoloader (version 0.4.8-1) -- ####
options(repos = c(CRAN = "https://mran.revolutionanalytics.com/snapshot/2017-01-30"))
source("packrat/init.R")
#### -- End Packrat Autoloader -- ####
|
/.Rprofile
|
no_license
|
mycpp/my-clean-power
|
R
| false | false | 202 |
rprofile
|
#### -- Packrat Autoloader (version 0.4.8-1) -- ####
options(repos = c(CRAN = "https://mran.revolutionanalytics.com/snapshot/2017-01-30"))
source("packrat/init.R")
#### -- End Packrat Autoloader -- ####
|
library(tidyverse)
library(ggpubr)
setwd(dirname(rstudioapi::getActiveDocumentContext()$path))
load(file="../model_outputs/Rda_files/df_maintext_agents.Rda")
load(file="../model_outputs/Rda_files/df_maintext.Rda")
df_params = df_ABM_maintext %>% group_by(sim,EWA_sigma,EWA_chi,EWA_rho,EWA_alpha,memory_window) %>% summarize()
df_maintext_agents = df_maintext_agents %>% select(-X)
df = left_join(df_maintext_agents,df_params) %>% filter(timestep_knowledgable<=40)
####E_Mat####
df_plot = df %>% filter(EWA_chi=="linear bias", EWA_rho=="medium", EWA_alpha=="risk-neutral", memory_window==10)
p1a=ggplot(df_plot, aes(x=timestep_knowledgable, y = B_e, color=EWA_sigma))+
stat_summary()+
scale_color_viridis_d(option = "C", direction=-1, end=0.7)+
coord_cartesian(ylim=c(0,.5))+
theme_classic()+
labs(x="", y="E(t)", color="", title="Soc. info bias")
p1b=ggplot(df_plot, aes(x=timestep_knowledgable, y = B_i, color=EWA_sigma))+
stat_summary()+
scale_color_viridis_d(option = "C", direction=-1, end=0.7)+
coord_cartesian(ylim=c(0,.5))+
theme_classic()+
labs(x="", y="I(t)", color="")
p1c=ggplot(df_plot, aes(x=timestep_knowledgable, y = B_s, color=EWA_sigma))+
stat_summary()+
scale_color_viridis_d(option = "C", direction=-1, end=0.7)+
coord_cartesian(ylim=c(0,.5))+
theme_classic()+
labs(x="", y="S(t)", color="")
p1d=ggplot(df_plot, aes(x=timestep_knowledgable, y = B_p, color=EWA_sigma))+
stat_summary()+
scale_color_viridis_d(option = "C", direction=-1, end=0.7)+
coord_cartesian(ylim=c(0,.5))+
theme_classic()+
labs(x="Timesteps knowledgable of b", y="P(produce b)", color="")
g1= ggarrange(p1a,p1b,p1c,p1d, nrow=4,ncol=1, common.legend=T,legend="top")
df_plot = df %>% filter(EWA_sigma=="medium", EWA_rho=="medium", EWA_alpha=="risk-neutral", memory_window==10)
p2a=ggplot(df_plot, aes(x=timestep_knowledgable, y = B_e, color=EWA_chi))+
stat_summary()+
scale_color_viridis_d(option = "C", direction=-1, end=0.7)+
coord_cartesian(ylim=c(0,.5))+
theme_classic()+
labs(x="", y="", color="", title="Conformity bias")
p2b=ggplot(df_plot, aes(x=timestep_knowledgable, y = B_i, color=EWA_chi))+
stat_summary()+
scale_color_viridis_d(option = "C", direction=-1, end=0.7)+
coord_cartesian(ylim=c(0,.5))+
theme_classic()+
labs(x="", y="", color="")
p2c=ggplot(df_plot, aes(x=timestep_knowledgable, y = B_s, color=EWA_chi))+
stat_summary()+
scale_color_viridis_d(option = "C", direction=-1, end=0.7)+
coord_cartesian(ylim=c(0,.5))+
theme_classic()+
labs(x="", y="", color="")
p2d=ggplot(df_plot, aes(x=timestep_knowledgable, y = B_p, color=EWA_chi))+
stat_summary()+
scale_color_viridis_d(option = "C", direction=-1, end=0.7)+
coord_cartesian(ylim=c(0,.5))+
theme_classic()+
labs(x="Timesteps knowledgable of b", y="", color="")
g2= ggarrange(p2a,p2b,p2c,p2d, nrow=4,ncol=1, common.legend=T,legend="top")
df_plot = df %>% filter(EWA_sigma=="medium", EWA_chi=="linear bias", EWA_rho=="medium", EWA_alpha=="risk-neutral")
p3a=ggplot(df_plot, aes(x=timestep_knowledgable, y = B_e, color=as.factor(memory_window)))+
stat_summary()+
scale_color_viridis_d(option = "C", direction=-1, end=0.7)+
coord_cartesian(ylim=c(0,.5))+
theme_classic()+
labs(x="", y="", color="", title="Memory")
p3b=ggplot(df_plot, aes(x=timestep_knowledgable, y = B_i, color=as.factor(memory_window)))+
stat_summary()+
scale_color_viridis_d(option = "C", direction=-1, end=0.7)+
coord_cartesian(ylim=c(0,.5))+
theme_classic()+
labs(x="", y="", color="")
p3c=ggplot(df_plot, aes(x=timestep_knowledgable, y = B_s, color=as.factor(memory_window)))+
stat_summary()+
scale_color_viridis_d(option = "C", direction=-1, end=0.7)+
coord_cartesian(ylim=c(0,.5))+
theme_classic()+
labs(x="", y="", color="")
p3d=ggplot(df_plot, aes(x=timestep_knowledgable, y = B_p, color=as.factor(memory_window)))+
stat_summary()+
scale_color_viridis_d(option = "C", direction=-1, end=0.7)+
coord_cartesian(ylim=c(0,.5))+
theme_classic()+
labs(x="Timesteps knowledgable of b", y="", color="")
g3= ggarrange(p3a,p3b,p3c,p3d, nrow=4,ncol=1, common.legend=T,legend="top")
df_plot = df %>% filter(EWA_sigma=="medium", EWA_chi=="linear bias", EWA_alpha=="risk-neutral", memory_window==10)
p4a=ggplot(df_plot, aes(x=timestep_knowledgable, y = B_e, color=EWA_rho))+
stat_summary()+
scale_color_viridis_d(option = "C", direction=-1, end=0.7)+
coord_cartesian(ylim=c(0,.5))+
theme_classic()+
labs(x="", y="", color="", title="Recent exp. bias")
p4b=ggplot(df_plot, aes(x=timestep_knowledgable, y = B_i, color=EWA_rho))+
stat_summary()+
scale_color_viridis_d(option = "C", direction=-1, end=0.7)+
coord_cartesian(ylim=c(0,.5))+
theme_classic()+
labs(x="", y="", color="")
p4c=ggplot(df_plot, aes(x=timestep_knowledgable, y = B_s, color=EWA_rho))+
stat_summary()+
scale_color_viridis_d(option = "C", direction=-1, end=0.7)+
coord_cartesian(ylim=c(0,.5))+
theme_classic()+
labs(x="", y="", color="")
p4d=ggplot(df_plot, aes(x=timestep_knowledgable, y = B_p, color=EWA_rho))+
stat_summary()+
scale_color_viridis_d(option = "C", direction=-1, end=0.7)+
coord_cartesian(ylim=c(0,.5))+
theme_classic()+
labs(x="Timesteps knowledgable of b", y="", color="")
g4= ggarrange(p4a,p4b,p4c,p4d, nrow=4,ncol=1, common.legend=T,legend="top")
df_plot = df %>% filter(EWA_sigma=="medium", EWA_chi=="linear bias", EWA_rho=="medium", memory_window==10)
p5a=ggplot(df_plot, aes(x=timestep_knowledgable, y = B_e, color=EWA_alpha))+
stat_summary()+
scale_color_viridis_d(option = "C", direction=-1, end=0.7)+
coord_cartesian(ylim=c(0,.5))+
theme_classic()+
labs(x="", y="", color="", title="Risk-appetite")
p5b=ggplot(df_plot, aes(x=timestep_knowledgable, y = B_i, color=EWA_alpha))+
stat_summary()+
scale_color_viridis_d(option = "C", direction=-1, end=0.7)+
coord_cartesian(ylim=c(0,.5))+
theme_classic()+
labs(x="", y="", color="")
p5c=ggplot(df_plot, aes(x=timestep_knowledgable, y = B_s, color=EWA_alpha))+
stat_summary()+
scale_color_viridis_d(option = "C", direction=-1, end=0.7)+
coord_cartesian(ylim=c(0,.5))+
theme_classic()+
labs(x="", y="", color="")
p5d=ggplot(df_plot, aes(x=timestep_knowledgable, y = B_p, color=EWA_alpha))+
stat_summary()+
scale_color_viridis_d(option = "C", direction=-1, end=0.7)+
coord_cartesian(ylim=c(0,.5))+
theme_classic()+
labs(x="Timesteps knowledgable of b", y="", color="")
g5= ggarrange(p5a,p5b,p5c,p5d, nrow=4,ncol=1, common.legend=T,legend="top")
g_all = ggarrange(g1,g2,g3,g4,g5, labels=c("A","B","C","D","E"), ncol=5)
ggsave(g_all, file="../output/Fig_S_EISP.png",width=15,height=15,scale=3,units="cm")
|
/analysis/Fig_S_EISP.R
|
no_license
|
michaelchimento/acquisition_production_abm
|
R
| false | false | 6,693 |
r
|
library(tidyverse)
library(ggpubr)
setwd(dirname(rstudioapi::getActiveDocumentContext()$path))
load(file="../model_outputs/Rda_files/df_maintext_agents.Rda")
load(file="../model_outputs/Rda_files/df_maintext.Rda")
df_params = df_ABM_maintext %>% group_by(sim,EWA_sigma,EWA_chi,EWA_rho,EWA_alpha,memory_window) %>% summarize()
df_maintext_agents = df_maintext_agents %>% select(-X)
df = left_join(df_maintext_agents,df_params) %>% filter(timestep_knowledgable<=40)
####E_Mat####
df_plot = df %>% filter(EWA_chi=="linear bias", EWA_rho=="medium", EWA_alpha=="risk-neutral", memory_window==10)
p1a=ggplot(df_plot, aes(x=timestep_knowledgable, y = B_e, color=EWA_sigma))+
stat_summary()+
scale_color_viridis_d(option = "C", direction=-1, end=0.7)+
coord_cartesian(ylim=c(0,.5))+
theme_classic()+
labs(x="", y="E(t)", color="", title="Soc. info bias")
p1b=ggplot(df_plot, aes(x=timestep_knowledgable, y = B_i, color=EWA_sigma))+
stat_summary()+
scale_color_viridis_d(option = "C", direction=-1, end=0.7)+
coord_cartesian(ylim=c(0,.5))+
theme_classic()+
labs(x="", y="I(t)", color="")
p1c=ggplot(df_plot, aes(x=timestep_knowledgable, y = B_s, color=EWA_sigma))+
stat_summary()+
scale_color_viridis_d(option = "C", direction=-1, end=0.7)+
coord_cartesian(ylim=c(0,.5))+
theme_classic()+
labs(x="", y="S(t)", color="")
p1d=ggplot(df_plot, aes(x=timestep_knowledgable, y = B_p, color=EWA_sigma))+
stat_summary()+
scale_color_viridis_d(option = "C", direction=-1, end=0.7)+
coord_cartesian(ylim=c(0,.5))+
theme_classic()+
labs(x="Timesteps knowledgable of b", y="P(produce b)", color="")
g1= ggarrange(p1a,p1b,p1c,p1d, nrow=4,ncol=1, common.legend=T,legend="top")
df_plot = df %>% filter(EWA_sigma=="medium", EWA_rho=="medium", EWA_alpha=="risk-neutral", memory_window==10)
p2a=ggplot(df_plot, aes(x=timestep_knowledgable, y = B_e, color=EWA_chi))+
stat_summary()+
scale_color_viridis_d(option = "C", direction=-1, end=0.7)+
coord_cartesian(ylim=c(0,.5))+
theme_classic()+
labs(x="", y="", color="", title="Conformity bias")
p2b=ggplot(df_plot, aes(x=timestep_knowledgable, y = B_i, color=EWA_chi))+
stat_summary()+
scale_color_viridis_d(option = "C", direction=-1, end=0.7)+
coord_cartesian(ylim=c(0,.5))+
theme_classic()+
labs(x="", y="", color="")
p2c=ggplot(df_plot, aes(x=timestep_knowledgable, y = B_s, color=EWA_chi))+
stat_summary()+
scale_color_viridis_d(option = "C", direction=-1, end=0.7)+
coord_cartesian(ylim=c(0,.5))+
theme_classic()+
labs(x="", y="", color="")
p2d=ggplot(df_plot, aes(x=timestep_knowledgable, y = B_p, color=EWA_chi))+
stat_summary()+
scale_color_viridis_d(option = "C", direction=-1, end=0.7)+
coord_cartesian(ylim=c(0,.5))+
theme_classic()+
labs(x="Timesteps knowledgable of b", y="", color="")
g2= ggarrange(p2a,p2b,p2c,p2d, nrow=4,ncol=1, common.legend=T,legend="top")
df_plot = df %>% filter(EWA_sigma=="medium", EWA_chi=="linear bias", EWA_rho=="medium", EWA_alpha=="risk-neutral")
p3a=ggplot(df_plot, aes(x=timestep_knowledgable, y = B_e, color=as.factor(memory_window)))+
stat_summary()+
scale_color_viridis_d(option = "C", direction=-1, end=0.7)+
coord_cartesian(ylim=c(0,.5))+
theme_classic()+
labs(x="", y="", color="", title="Memory")
p3b=ggplot(df_plot, aes(x=timestep_knowledgable, y = B_i, color=as.factor(memory_window)))+
stat_summary()+
scale_color_viridis_d(option = "C", direction=-1, end=0.7)+
coord_cartesian(ylim=c(0,.5))+
theme_classic()+
labs(x="", y="", color="")
p3c=ggplot(df_plot, aes(x=timestep_knowledgable, y = B_s, color=as.factor(memory_window)))+
stat_summary()+
scale_color_viridis_d(option = "C", direction=-1, end=0.7)+
coord_cartesian(ylim=c(0,.5))+
theme_classic()+
labs(x="", y="", color="")
p3d=ggplot(df_plot, aes(x=timestep_knowledgable, y = B_p, color=as.factor(memory_window)))+
stat_summary()+
scale_color_viridis_d(option = "C", direction=-1, end=0.7)+
coord_cartesian(ylim=c(0,.5))+
theme_classic()+
labs(x="Timesteps knowledgable of b", y="", color="")
g3= ggarrange(p3a,p3b,p3c,p3d, nrow=4,ncol=1, common.legend=T,legend="top")
df_plot = df %>% filter(EWA_sigma=="medium", EWA_chi=="linear bias", EWA_alpha=="risk-neutral", memory_window==10)
p4a=ggplot(df_plot, aes(x=timestep_knowledgable, y = B_e, color=EWA_rho))+
stat_summary()+
scale_color_viridis_d(option = "C", direction=-1, end=0.7)+
coord_cartesian(ylim=c(0,.5))+
theme_classic()+
labs(x="", y="", color="", title="Recent exp. bias")
p4b=ggplot(df_plot, aes(x=timestep_knowledgable, y = B_i, color=EWA_rho))+
stat_summary()+
scale_color_viridis_d(option = "C", direction=-1, end=0.7)+
coord_cartesian(ylim=c(0,.5))+
theme_classic()+
labs(x="", y="", color="")
p4c=ggplot(df_plot, aes(x=timestep_knowledgable, y = B_s, color=EWA_rho))+
stat_summary()+
scale_color_viridis_d(option = "C", direction=-1, end=0.7)+
coord_cartesian(ylim=c(0,.5))+
theme_classic()+
labs(x="", y="", color="")
p4d=ggplot(df_plot, aes(x=timestep_knowledgable, y = B_p, color=EWA_rho))+
stat_summary()+
scale_color_viridis_d(option = "C", direction=-1, end=0.7)+
coord_cartesian(ylim=c(0,.5))+
theme_classic()+
labs(x="Timesteps knowledgable of b", y="", color="")
g4= ggarrange(p4a,p4b,p4c,p4d, nrow=4,ncol=1, common.legend=T,legend="top")
df_plot = df %>% filter(EWA_sigma=="medium", EWA_chi=="linear bias", EWA_rho=="medium", memory_window==10)
p5a=ggplot(df_plot, aes(x=timestep_knowledgable, y = B_e, color=EWA_alpha))+
stat_summary()+
scale_color_viridis_d(option = "C", direction=-1, end=0.7)+
coord_cartesian(ylim=c(0,.5))+
theme_classic()+
labs(x="", y="", color="", title="Risk-appetite")
p5b=ggplot(df_plot, aes(x=timestep_knowledgable, y = B_i, color=EWA_alpha))+
stat_summary()+
scale_color_viridis_d(option = "C", direction=-1, end=0.7)+
coord_cartesian(ylim=c(0,.5))+
theme_classic()+
labs(x="", y="", color="")
p5c=ggplot(df_plot, aes(x=timestep_knowledgable, y = B_s, color=EWA_alpha))+
stat_summary()+
scale_color_viridis_d(option = "C", direction=-1, end=0.7)+
coord_cartesian(ylim=c(0,.5))+
theme_classic()+
labs(x="", y="", color="")
p5d=ggplot(df_plot, aes(x=timestep_knowledgable, y = B_p, color=EWA_alpha))+
stat_summary()+
scale_color_viridis_d(option = "C", direction=-1, end=0.7)+
coord_cartesian(ylim=c(0,.5))+
theme_classic()+
labs(x="Timesteps knowledgable of b", y="", color="")
g5= ggarrange(p5a,p5b,p5c,p5d, nrow=4,ncol=1, common.legend=T,legend="top")
g_all = ggarrange(g1,g2,g3,g4,g5, labels=c("A","B","C","D","E"), ncol=5)
ggsave(g_all, file="../output/Fig_S_EISP.png",width=15,height=15,scale=3,units="cm")
|
% Generated by roxygen2 (4.0.1): do not edit by hand
\name{stability}
\alias{stability}
\title{Calculate the stability of the clustering of samples}
\usage{
stability(geneSel, metric)
}
\arguments{
\item{geneSel}{The selected genes}
\item{metric}{The metric to use for clustering (see clValid documentation for all the options)}
}
\description{
For each signature a heatmap, among other results, is created. In the heatmap the samples are clustered
according to their expression profiles. This calculates the stability of that clustering
}
|
/drugFinder/man/stability.Rd
|
no_license
|
npklein/drugFinder
|
R
| false | false | 559 |
rd
|
% Generated by roxygen2 (4.0.1): do not edit by hand
\name{stability}
\alias{stability}
\title{Calculate the stability of the clustering of samples}
\usage{
stability(geneSel, metric)
}
\arguments{
\item{geneSel}{The selected genes}
\item{metric}{The metric to use for clustering (see clValid documentation for all the options)}
}
\description{
For each signature a heatmap, among other results, is created. In the heatmap the samples are clustered
according to their expression profiles. This calculates the stability of that clustering
}
|
#' Builds a synthetic variable for education attainment - 2010
#' @param data.frame
#' @value data.frame
#' @export
build_migration_bornSameStateMCA_1970 <- function(CensusData){
if(!is.data.frame(CensusData)){
stop("'CensusData' is not a data.frame")
}
if(!is.data.table(CensusData)){
CensusData = as.data.table(CensusData)
}
stateMinimumComparable_just_created <- F
check_vars <- harmonizeIBGE:::check_var_existence(CensusData, c("stateMinimumComparable"))
if(length(check_vars) > 0){
CensusData <- build_geography_stateMinimumComparable_1970(CensusData)
stateMinimumComparable_just_created <- T
gc()
}
stateOfBirthMCA_just_created <- F
check_vars <- harmonizeIBGE:::check_var_existence(CensusData, c("stateOfBirthMCA"))
if(length(check_vars) > 0){
CensusData <- build_migration_stateOfBirthMCA_1970(CensusData)
stateOfBirthMCA_just_created <- T
gc()
}
CensusData[ , bornSameStateMCA := as.numeric(stateMinimumComparable == stateOfBirthMCA)]
# Unknown state will be NA
CensusData[stateOfBirthMCA == 99, bornSameStateMCA := NA]
# Foreigns will be zero
CensusData[stateOfBirthMCA == 999, bornSameStateMCA := 0]
gc()
if(stateMinimumComparable_just_created == T){
CensusData[, stateCurrent := NULL]
}
gc()
if(stateOfBirthMCA_just_created == T){
CensusData[, stateOfBirthMCA := NULL]
}
gc()
CensusData
}
|
/R/build_migration_bornSameStateMCA_1970.R
|
no_license
|
antrologos/harmonizeIBGE
|
R
| false | false | 1,786 |
r
|
#' Builds a synthetic variable for education attainment - 2010
#' @param data.frame
#' @value data.frame
#' @export
build_migration_bornSameStateMCA_1970 <- function(CensusData){
if(!is.data.frame(CensusData)){
stop("'CensusData' is not a data.frame")
}
if(!is.data.table(CensusData)){
CensusData = as.data.table(CensusData)
}
stateMinimumComparable_just_created <- F
check_vars <- harmonizeIBGE:::check_var_existence(CensusData, c("stateMinimumComparable"))
if(length(check_vars) > 0){
CensusData <- build_geography_stateMinimumComparable_1970(CensusData)
stateMinimumComparable_just_created <- T
gc()
}
stateOfBirthMCA_just_created <- F
check_vars <- harmonizeIBGE:::check_var_existence(CensusData, c("stateOfBirthMCA"))
if(length(check_vars) > 0){
CensusData <- build_migration_stateOfBirthMCA_1970(CensusData)
stateOfBirthMCA_just_created <- T
gc()
}
CensusData[ , bornSameStateMCA := as.numeric(stateMinimumComparable == stateOfBirthMCA)]
# Unknown state will be NA
CensusData[stateOfBirthMCA == 99, bornSameStateMCA := NA]
# Foreigns will be zero
CensusData[stateOfBirthMCA == 999, bornSameStateMCA := 0]
gc()
if(stateMinimumComparable_just_created == T){
CensusData[, stateCurrent := NULL]
}
gc()
if(stateOfBirthMCA_just_created == T){
CensusData[, stateOfBirthMCA := NULL]
}
gc()
CensusData
}
|
# README.md for code test result
makeCacheMatrix <- function(x=numeric()){
cache <- NULL
setMatrix <- function(y){
x <<- y
#get a new value
cache <<- NULL
}
getMatrix <- function() x
cacheInverse <- function(solve) cache <<- solve
getInverse <- function() cache
# return a list of functions.
list(setMatrix=setMatrix,
getMatrix=getMatrix,
cacheInverse=cacheInverse,
getInverse=getInverse)
}
cacheSolve <- function(x,...){
inver <- x$getInverse()
if(!is.null(inver)){
message("getting cached data")
return(inver)
}
data <- x$getMatrix()
inver <- solve(data,...)
x$cacheInverse(inver)
inver
}
|
/cachematrix.R
|
no_license
|
wolfstudying/ProgrammingAssignment2
|
R
| false | false | 651 |
r
|
# README.md for code test result
makeCacheMatrix <- function(x=numeric()){
cache <- NULL
setMatrix <- function(y){
x <<- y
#get a new value
cache <<- NULL
}
getMatrix <- function() x
cacheInverse <- function(solve) cache <<- solve
getInverse <- function() cache
# return a list of functions.
list(setMatrix=setMatrix,
getMatrix=getMatrix,
cacheInverse=cacheInverse,
getInverse=getInverse)
}
cacheSolve <- function(x,...){
inver <- x$getInverse()
if(!is.null(inver)){
message("getting cached data")
return(inver)
}
data <- x$getMatrix()
inver <- solve(data,...)
x$cacheInverse(inver)
inver
}
|
fluidRow(
box(width = 12, title = "Forschungsfrage und Studiendesign", status = "primary",collapsible = TRUE, collapsed = FALSE,
withMathJax(includeMarkdown("texts/text_researchquestion.Rmd"))
),
box(width = 12, title = "Studie 1: Genauigkeit von Schätzparametern", status = "primary",collapsible = TRUE, collapsed = TRUE,
withMathJax(includeMarkdown("texts/text_study_1.Rmd"))
),
box(width = 12, title = "Studie 2: Zuverlässigkeit von LM und HLM", status = "primary",collapsible = TRUE, collapsed = TRUE,
withMathJax(includeMarkdown("texts/text_study_2.Rmd"))
),
box(width = 12, title = "Literatur", status = "primary", collapsible = TRUE, collapsed = TRUE,
withMathJax(includeMarkdown("texts/literatur.Rmd")))
)
|
/Code/Master_Thesis_App/tabs/tab_simstudy_app.R
|
no_license
|
noboss93/master_thesis
|
R
| false | false | 755 |
r
|
fluidRow(
box(width = 12, title = "Forschungsfrage und Studiendesign", status = "primary",collapsible = TRUE, collapsed = FALSE,
withMathJax(includeMarkdown("texts/text_researchquestion.Rmd"))
),
box(width = 12, title = "Studie 1: Genauigkeit von Schätzparametern", status = "primary",collapsible = TRUE, collapsed = TRUE,
withMathJax(includeMarkdown("texts/text_study_1.Rmd"))
),
box(width = 12, title = "Studie 2: Zuverlässigkeit von LM und HLM", status = "primary",collapsible = TRUE, collapsed = TRUE,
withMathJax(includeMarkdown("texts/text_study_2.Rmd"))
),
box(width = 12, title = "Literatur", status = "primary", collapsible = TRUE, collapsed = TRUE,
withMathJax(includeMarkdown("texts/literatur.Rmd")))
)
|
\name{ancThresh}
\alias{ancThresh}
\title{Ancestral character estimation under the threshold model using Bayesian MCMC}
\usage{
ancThresh(tree, x, ngen=10000, sequence=NULL, method="mcmc",
model=c("BM","OU","lambda"), control=list(), ...)
}
\arguments{
\item{tree}{phylogenetic tree.}
\item{x}{a named vector containing discrete character states; or a matrix containing the tip species, in rows, and probabilities of being in each state, in columns.}
\item{ngen}{number of generations to run the MCMC.}
\item{sequence}{assumed ordering of the discrete character state. If not supplied and \code{x} is a vector then numerical/alphabetical order is assumed; if not supplied and \code{x} is a matrix, then the column order of \code{x} is used.}
\item{method}{only method currently available is \code{"mcmc"}.}
\item{model}{model for the evolution of the liability. Options are \code{"BM"} (Brownian motion, the default), \code{"OU"} (Ornstein-Uhlenbeck), or \code{"lambda"} (the lambda model).}
\item{control}{list containing the following elements: \code{sample}, the sampling interval; \code{propliab} variance of the proposal distribution for liabilities; \code{propthresh} variance on the proposal distribution for the thresholds; \code{propalpha} variance on the proposal distribution for \code{alpha} (for \code{model="OU"}); \code{pr.anc} prior probability distribution on the ancestral states for each node, in a matrix - not all nodes need to be supplied; \code{pr.th} prior density on the thresholds; \code{burnin} number of generations to exclude for burn-in when plotting posterior probabilities on the tree; \code{plot} logical value indicating whether or not to plot the posterior probabilities; \code{print} logical value indicating whether or not to print the state of the MCMC; \code{piecol} colors for the posterior probabilities plotted as pie charts at internal nodes; and \code{tipcol} which indicates whether the tip colors should be based on the input data (\code{"input"}) or sampled tip liabilities (\code{"estimated"}). These will only differ if there is uncertainty in the tip states.}
\item{...}{additional arguments to be passed to \code{\link{plotThresh}} (called internally).}
}
\description{
This function uses Bayesian MCMC to estimate ancestral states and thresholds for a discrete character under the threshold model from quantitative genetics (Felsenstein 2012).
}
\details{
\code{print} and \code{plot} S3 methods are now available for the object class \code{"ancThresh"}.
}
\value{
This function returns an object of class \code{"ancThresh"} containing the posterior sample from our analysis, although with other components.
}
\references{
Felsenstein, J. (2012) A comparative method for both discrete and continuous characters using the threshold model. \emph{American Naturalist}, \bold{179}, 145-156.
Revell, L. J. (2014) Ancestral character estimation under the threshold model from quantitative genetics. \emph{Evolution}, bold{68}, 743-759.
}
\author{Liam Revell \email{liam.revell@umb.edu}}
\seealso{
\code{\link{anc.Bayes}}, \code{\link{threshBayes}}
}
\keyword{phylogenetics}
\keyword{comparative method}
\keyword{bayesian}
|
/man/ancThresh.Rd
|
no_license
|
olmen/phytools
|
R
| false | false | 3,187 |
rd
|
\name{ancThresh}
\alias{ancThresh}
\title{Ancestral character estimation under the threshold model using Bayesian MCMC}
\usage{
ancThresh(tree, x, ngen=10000, sequence=NULL, method="mcmc",
model=c("BM","OU","lambda"), control=list(), ...)
}
\arguments{
\item{tree}{phylogenetic tree.}
\item{x}{a named vector containing discrete character states; or a matrix containing the tip species, in rows, and probabilities of being in each state, in columns.}
\item{ngen}{number of generations to run the MCMC.}
\item{sequence}{assumed ordering of the discrete character state. If not supplied and \code{x} is a vector then numerical/alphabetical order is assumed; if not supplied and \code{x} is a matrix, then the column order of \code{x} is used.}
\item{method}{only method currently available is \code{"mcmc"}.}
\item{model}{model for the evolution of the liability. Options are \code{"BM"} (Brownian motion, the default), \code{"OU"} (Ornstein-Uhlenbeck), or \code{"lambda"} (the lambda model).}
\item{control}{list containing the following elements: \code{sample}, the sampling interval; \code{propliab} variance of the proposal distribution for liabilities; \code{propthresh} variance on the proposal distribution for the thresholds; \code{propalpha} variance on the proposal distribution for \code{alpha} (for \code{model="OU"}); \code{pr.anc} prior probability distribution on the ancestral states for each node, in a matrix - not all nodes need to be supplied; \code{pr.th} prior density on the thresholds; \code{burnin} number of generations to exclude for burn-in when plotting posterior probabilities on the tree; \code{plot} logical value indicating whether or not to plot the posterior probabilities; \code{print} logical value indicating whether or not to print the state of the MCMC; \code{piecol} colors for the posterior probabilities plotted as pie charts at internal nodes; and \code{tipcol} which indicates whether the tip colors should be based on the input data (\code{"input"}) or sampled tip liabilities (\code{"estimated"}). These will only differ if there is uncertainty in the tip states.}
\item{...}{additional arguments to be passed to \code{\link{plotThresh}} (called internally).}
}
\description{
This function uses Bayesian MCMC to estimate ancestral states and thresholds for a discrete character under the threshold model from quantitative genetics (Felsenstein 2012).
}
\details{
\code{print} and \code{plot} S3 methods are now available for the object class \code{"ancThresh"}.
}
\value{
This function returns an object of class \code{"ancThresh"} containing the posterior sample from our analysis, although with other components.
}
\references{
Felsenstein, J. (2012) A comparative method for both discrete and continuous characters using the threshold model. \emph{American Naturalist}, \bold{179}, 145-156.
Revell, L. J. (2014) Ancestral character estimation under the threshold model from quantitative genetics. \emph{Evolution}, bold{68}, 743-759.
}
\author{Liam Revell \email{liam.revell@umb.edu}}
\seealso{
\code{\link{anc.Bayes}}, \code{\link{threshBayes}}
}
\keyword{phylogenetics}
\keyword{comparative method}
\keyword{bayesian}
|
# Build and Reload Package: 'Ctrl + Shift + B'
# Check Package: 'Ctrl + Shift + E'
# Test Package: 'Ctrl + Shift + T'
library(rgdal)
library(ggplot2)
library(maptools)
library(rgeos)
library(plyr)
library(tools)
library(mapproj)
library(raster)
library(zoo)
library(ggspatial)
library(outliers)
library(rmarkdown)
DataSelect <- function(area, year, path=NA, data=NA, observer="all", seat="all", strata="all", species="all", method="other", zeroes=FALSE, endpts=TRUE){
#if no R object is passed as data
if(length(data)<1){
if(!is.na(path)){year=1}
if(is.na(path)){
if(area=="ykd"){path = system.file("external/YKD8516dat.csv", package="AKaerial")}
else{
if (area=="ykg"){path = system.file("external/YKG8516dat.csv", package="AKaerial")}
else{
if (area=="acp"){path = system.file("external/ACP9717dat.csv", package="AKaerial")}
else{
print("Area not specified or incorrect.")
break
}}}
}
data=read.csv(path, header=TRUE)
}
else{year=1}
if(year==1){year=c(as.numeric(unique(data$yr[which.min(data$yr)])),as.numeric(unique(data$yr[which.max(data$yr)]))) }
print("check year")
if (length(year)==1){year= rep(year, 2)}
data$yr=zoo::na.approx(data$yr)
data=data[data$yr>=year[1] & data$yr<=year[2],]
print("SpatialNA")
data=SpatialNA(data)
print("PointsToStrata")
data=PointsToStrata(data,area)
print("Filters")
if(strata != "all"){data=data[data$strat %in% strata,]}
data$obs=sapply(data$obs, toupper)
data$obs[data$obs=="WL"]="WWL"
data$obs[data$obs=="RM"]="RDM"
if(observer != "all"){data=data[data$obs==observer,]}
data$se=sapply(data$se, toupper)
if(seat != "all"){data=data[data$se==seat,]}
data$sppn=sapply(data$sppn, toupper)
data$sppn=ShouldBe(data$sppn)
data$grp=as.numeric(as.character(data$grp))
if(species != "all"){
species2=c(species, "START", "ENDPT")
data=data[data$sppn %in% species2,]
}
print("CorrectTrans")
data=CorrectTrans(data, area=area)
print("CorrectUnit")
data=CorrectUnit(data)
data=droplevels(data)
if(species != "all"){
obs.data=data[data$sppn %in% species,]
}
print("TransSummary")
flight=TransSummary(data, area)
if(zeroes==TRUE){data=MakeZeroes(data)}
if(endpts==FALSE){data=data[data$sppn != "START" & data$sppn != "ENDPT", ]}
data=AdjustCounts(data)
data=list("obs"=data, "flight"=flight)
if(method=="transect"){data=TransData(data)}
return(data)
}
CorrectUnit=function(full.data){
acceptable=c("open", "single", "pair","flkdrake")
for (i in 1:length(full.data$unit)){
if(full.data$unit[i] %in% acceptable){next}
print(paste("Nonsense detected. Unit ", full.data$unit[i], " is not acceptable."))
}
full.data=full.data[full.data$unit %in% acceptable,]
return(full.data)
}
TransData=function(selected.data){
#groupings list
unit.list=c("single", "pair","open", "flkdrake")
#list of years
yr.list=as.character(unique(selected.data$flight$yr))
#list of species
sp.list=as.character(unique(selected.data$obs$sppn))
#grid method
for (observer in unique(selected.data$flight$obs)){
print(observer)
yr.list=unique(selected.data$flight$yr[selected.data$flight$obs==observer])
for (year in yr.list){
tran.list=unique(selected.data$flight$part.of[selected.data$flight$yr==year & selected.data$flight$obs==observer])
new.rows=expand.grid(year, NA, NA, NA, observer, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, sp.list, 0, unit.list, tran.list,0,0,0)
print(length(new.rows))
print(names(selected.data$obs))
names(new.rows)=names(selected.data$obs)
selected.data$obs=rbind(selected.data$obs,new.rows)
print(year)
}
}
#cycle through, check each transect/observer combo for each species
#for (h in 1:length(yr.list)){
# sub.data=selected.data$obs[selected.data$obs$yr==yr.list[h],]
# obs.list=unique(as.character(selected.data$flight$obs[selected.data$flight$yr==yr.list[h]]))
# print(paste("Making zeroes for year ", yr.list[h]))
# for (i in 1:length(sp.list)){
# sub.data=sub.data[sub.data$sppn==sp.list[i],]
# for (j in 1:length(obs.list)){
# print(paste("Observer ", obs.list[j]))
# tran.list=unique(selected.data$flight$part.of[selected.data$flight$yr==yr.list[h] & selected.data$flight$obs==obs.list[j]])
# sub.data=sub.data[as.character(sub.data$obs)==obs.list[j],]
# for (k in 1:length(tran.list)){
# sub.data=sub.data[as.character(sub.data$ctran)==tran.list[k],]
# for (m in 1:length(unit.list)){
#skip if count exists
#if(any(as.character(selected.data$obs$sppn)==sp.list[i] & as.character(selected.data$obs$ctran)==tran.list[k] & as.character(selected.data$obs$obs)==obs.list[j] & selected.data$obs$yr==yr.list[h] & selected.data$obs$unit==unit.list[m]))
# if(any(sub.data$unit==unit.list[m]))
# {next}
#add the 0 row
# new.row=c(yr.list[h], NA, NA, NA, obs.list[j], NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, sp.list[i], 0, unit.list[m], NA, NA, tran.list[k], NA, 0)
# selected.data$obs=rbind(selected.data$obs,new.row)
# } #end unit
# } #end tran
# } #end obs
# } #end sp
#} #end yr
selected.data$obs$grp=as.numeric(selected.data$obs$grp)
agg=aggregate(cbind(grp,itotal,total,ibb)~yr+obs+sppn+unit+ctran,data=selected.data$obs, FUN=sum)
colnames(agg)=c("yr", "obs", "sppn", "unit", "ctran", "grp", "itotal", "total", "ibb")
agg$area=0
agg$strata="none"
for(g in 1:length(agg$area)){
agg$area[g]=sum(selected.data$flight$sampled.area[selected.data$flight$yr==agg$yr[g] & selected.data$flight$obs==agg$obs[g] & selected.data$flight$part.of==agg$ctran[g]])
agg$strata[g]=selected.data$flight$strata[selected.data$flight$yr==agg$yr[g] & selected.data$flight$obs==agg$obs[g] & selected.data$flight$part.of==agg$ctran[g]][1]
}
return(agg[order(agg$yr, agg$obs, agg$sppn, as.numeric(agg$ctran), agg$unit),])
}
SplitDesign <- function(area, SegCheck=FALSE, file.name, layer.name){
#design=readOGR("D:/CharlesFrost/AKaerial/data/TransectDesign/YKD_trans_B.shp", "YKD_trans_B")
#file.name = system.file("external/YKD_trans_B.shp", package="AKaerial")
design=readOGR(file.name, layer.name, verbose=FALSE)
design.proj <- spTransform(design, "+proj=longlat +ellps=WGS84")
strata.proj=LoadMap(area, type="proj")
newlines = raster::intersect(design.proj, strata.proj)
newlines@data$id=rownames(newlines@data)
newlines.fort=fortify(newlines, region="STRAT")
newlines.df=join(newlines.fort, newlines@data, by="id")
if(area=="ykg"){
newlines.df=newlines.df[,c("long","lat", "order", "piece", "id", "OBJECTID", "STRAT")]
}
if(area=="ykd"){
newlines.df=newlines.df[,c("long","lat", "order", "piece", "id", "OBJECTID", "STRAT")]
}
#newlines.df$order[newlines.df$order==3]=1
#newlines.df$order[newlines.df$order==4]=2
#newlines.df$order[newlines.df$order==5]=1
#newlines.df$order[newlines.df$order==6]=2
newlines.df$order[(newlines.df$order %% 2)==1]=1
newlines.df$order[(newlines.df$order %% 2)==0]=2
if(SegCheck==TRUE){
temp=aggregate(newlines.df$order~newlines.df$OBJECTID+newlines.df$STRAT, FUN="length")
colnames(temp)=c("original", "strata", "segs")
temp$segs=temp$segs/2
temp=temp[order(temp$original, temp$strata),]
write.table(temp, file="segcheck.txt", quote=FALSE, row.names=FALSE)
}
colnames(newlines.df)[6]="original"
newlines.df$id=rep(1:(length(newlines.df$id)/2), each=2)
return(newlines.df)
}
LoadMap <- function(area, type="df") {
if(area=="acp"){
map = system.file("external/a483web7_polygon.shp", package="AKaerial")
#map="D:/CharlesFrost/AKaerial/data/a483web7 polygon.shp"
lay="a483web7_polygon"
}
if(area=="ykd"){
map = system.file("external/newaird3_polygon.shp", package="AKaerial")
#map="D:/CharlesFrost/AKaerial/data/newaird3_polygon.shp"
lay="newaird3_polygon"
}
if(area=="ykg"){
map = system.file("external/YKG__2018_MemoAnalysisStrata.shp", package="AKaerial")
#map="D:/CharlesFrost/AKaerial/data/StratificationForHodgesAnalysis2.shp"
lay="YKG__2018_MemoAnalysisStrata"
}
if(area=="crd"){
map = system.file("external/CRD_2018_AnalysisStrata.shp", package="AKaerial")
#map="D:/CharlesFrost/AKaerial/data/CRD_2018_AnalysisStrata.shp"
lay="CRD_2018_AnalysisStrata"
}
maptools::gpclibPermit()
strata <- readOGR(map, lay, verbose=FALSE)
strata.proj <- spTransform(strata, "+proj=longlat +ellps=WGS84")
strata.proj@data$id = rownames(strata.proj@data)
#ifelse(area=="acp", strata.fort <- fortify(strata.proj, region="STRATNAME"), strata.fort <- fortify(strata.proj, region="STRAT"))
strata.fort <- fortify(strata.proj, region="id")
strata.df=join(strata.fort, strata.proj@data, by="id")
if(type=="df") {return(strata.df)}
if(type=="proj") {return(strata.proj)}
}
ViewStrata <- function(area, year=NULL, ViewTrans=FALSE, strata="all", numbers=FALSE) {
GIS.obj = LoadMap(area)
if(strata=="all"){
if(area=="acp" || area=="crd"){
strata.plot <- ggplot() +
geom_path(data=GIS.obj, aes(long,lat,group=group) ) +
geom_path(color="black") +
coord_map(xlim=c(min(GIS.obj$long), max(GIS.obj$long)), ylim=c(min(GIS.obj$lat), max(GIS.obj$lat)))
#scale_fill_manual(name="Strata", values=c("red","green","yellow","grey", "orange"))
}
if(area=="ykd"){
strata.plot <- ggplot() +
geom_polygon(data=GIS.obj, aes(long,lat,group=group,fill=id) ) +
geom_path(color="black") + coord_map(xlim=c(min(GIS.obj$long), max(GIS.obj$long)), ylim=c(min(GIS.obj$lat), max(GIS.obj$lat)))
}
if(area=="ykg"){
strata.plot <- ggplot() +
geom_polygon(data=GIS.obj, aes(long,lat,group=group,fill=id) ) +
geom_path(color="black") + coord_map(xlim=c(min(GIS.obj$long), max(GIS.obj$long)), ylim=c(min(GIS.obj$lat), max(GIS.obj$lat)))
}
}
if(strata!="all"){
data=GIS.obj[as.character(GIS.obj$STRATNAME) %in% strata,]
if(area=="acp" || area=="crd"){
strata.plot <- ggplot() +
geom_path(data=data, aes(long,lat,group=group) ) +
geom_path(color="black", lwd=1.5) +
coord_map(xlim=c(min(data$long), max(data$long)), ylim=c(min(data$lat), max(data$lat))) +
scale_x_continuous("Longitude (degrees)") + scale_y_continuous("Latitude (degrees)")
}
if(area=="ykd"){
strata.plot <- ggplot() +
geom_polygon(data=GIS.obj, aes(long,lat,group=group,fill=id) ) +
geom_path(color="black") + coord_map(xlim=c(min(GIS.obj$long), max(GIS.obj$long)), ylim=c(min(GIS.obj$lat), max(GIS.obj$lat)))
}
if(area=="ykg"){
strata.plot <- ggplot() +
geom_polygon(data=GIS.obj, aes(long,lat,group=group,fill=id) ) +
geom_path(color="black") + coord_map(xlim=c(min(GIS.obj$long), max(GIS.obj$long)), ylim=c(min(GIS.obj$lat), max(GIS.obj$lat)))
}
}
if(ViewTrans){
trans=TranSelect(year=year, area=area)
trans.file=system.file(paste("external/", trans$file, sep=""), package="AKaerial")
trans.layer=trans$layer
trans.obj=readOGR(trans.file, trans.layer, verbose=FALSE)
trans.proj <- spTransform(trans.obj, "+proj=longlat +ellps=WGS84")
trans.proj@data$id = rownames(trans.proj@data)
trans.df=fortify(trans.proj, region=OBJECTID)
trans.df=join(trans.df,trans.proj@data, by="id")
trans.labels=trans.df[trans.df$order==1,]
strata.plot = strata.plot +
geom_path(data=trans.df, aes(x=long, y=lat, group=group))
if(numbers==TRUE){
strata.plot = strata.plot +
geom_text(data=trans.labels, aes(x=long, y=lat, label=OBJECTID))
}
}
print(strata.plot)
return(strata.plot)
}
AdjustCounts <- function(full.data){
full.data$grp=as.numeric(as.character(full.data$grp))
for (i in 1:length(full.data$grp)){
#0 out the coded start and end points (codes are logged in the species column)
if(full.data$sppn[i]=="START" | full.data$sppn[i]=="ENDPT" | full.data$sppn[i]=="start" | full.data$sppn[i]=="end" | full.data$sppn[i]=="ENDT" | full.data$sppn[i]=="BEGT") {
full.data$itotal[i]=0
full.data$total[i]=0
full.data$ibb[i]=0
}
#Double singles for indicated totals
else if(full.data$unit[i]=="single") {
full.data$itotal[i]=2*full.data$grp[i]
full.data$ibb[i]=2*full.data$grp[i]
full.data$total[i]=full.data$grp[i]
}
#Pairs are doubled for both total and indicated total
else if(full.data$unit[i]=="pair") {
full.data$itotal[i]=2*full.data$grp[i]
full.data$ibb[i]=2*full.data$grp[i]
full.data$total[i]=2*full.data$grp[i]
}
#Open indicates a flock, nothing doubled, zero for ibb
else if(full.data$unit[i]=="open"){
full.data$itotal[i]=full.data$grp[i]
full.data$total[i]=full.data$grp[i]
full.data$ibb[i]=0
}
#Flocked drakes are doubled for 1-4 seen for indicated bb/totals. Reference would be useful.
else if(full.data$unit[i]=="flkdrake" & full.data$grp[i]<5){
full.data$itotal[i]=2*full.data$grp[i]
full.data$total[i]=full.data$grp[i]
full.data$ibb[i]=2*full.data$grp[i]
}
#Flocked drakes 5 and above aren't doubled because of science stuff, 0 for ibb.
else if(full.data$unit[i]=="flkdrake" & full.data$grp[i]>4){
full.data$itotal[i]=full.data$grp[i]
full.data$total[i]=full.data$grp[i]
full.data$ibb[i]=0
}
}
sppn=unique(full.data$sppn)
# for (i in 1:length(sppn)){
# itot=sppntable$itot[as.character(sppntable$sppn)==as.character(sppn[i])]
# full.data$itotal[full.data$sppn==as.character(sppn[i])]=itot*full.data$itotal[full.data$sppn==as.character(sppn[i])]
# }
return(full.data)
}
CountsTable=function(adj.counts) {
t1=(aggregate(adj.counts$total~adj.counts$yr+adj.counts$obs+adj.counts$ctran+adj.counts$sppn+adj.counts$strata, FUN=sum))
t2=(aggregate(adj.counts$itotal~adj.counts$yr+adj.counts$obs+adj.counts$ctran+adj.counts$sppn+adj.counts$strata, FUN=sum))
t2b=aggregate(adj.counts$ibb~adj.counts$yr+adj.counts$obs+adj.counts$ctran+adj.counts$sppn+adj.counts$strata, FUN=sum)
t3=merge(t1,t2,by=c("adj.counts$yr", "adj.counts$obs","adj.counts$ctran", "adj.counts$sppn", "adj.counts$strata"))
t3=merge(t3,t2b,by=c("adj.counts$yr", "adj.counts$obs","adj.counts$ctran", "adj.counts$sppn", "adj.counts$strata"))
colnames(t3)=c("yr","obs","ctran", "sppn", "strata", "total", "itotal", "ibb")
return(t3[order(t3$yr, t3$obs, t3$sppn, as.numeric(t3$ctran)),])
} #end CountsTable
PointsToStrata=function(full.data, area){
#full.data=SpatialNA(full.data)
x=na.approx(full.data$long)
y=na.approx(full.data$lat)
sp=cbind(x,y)
sp=SpatialPoints(sp)
proj4string(sp)=CRS("+proj=longlat +ellps=WGS84")
sp=spTransform(sp, "+proj=aea +lat_1=55 +lat_2=65 +lat_0=50 +lon_0=-154 +x_0=0 +y_0=0
+datum=NAD83 +units=m +no_defs +ellps=GRS80 +towgs84=0,0,0")
map=LoadMap(area, type="proj")
map=spTransform(map, "+proj=aea +lat_1=55 +lat_2=65 +lat_0=50 +lon_0=-154 +x_0=0 +y_0=0
+datum=NAD83 +units=m +no_defs +ellps=GRS80 +towgs84=0,0,0")
full.data$strat=over(sp,map)$STRAT
if(area != "crd"){
for(a in 1:length(full.data$strat)){
#print(a)
#If NA found, replace with strata type shared by entries on that transect
#This assumes the click/logging was actually over the strata and not ACTUALLY early/late
if(is.na(full.data$strat[a])){
temp=full.data[full.data$tran==full.data$tran[a],]
#full.data$strat[a]=names(sort(table(temp$strat), decreasing=TRUE))[1]
if(length(temp$strat)>0){
full.data$strat[a]=names(which.max(table(temp$strat)))
}
else{full.data$strat[a]="undefined"}
}
}
}
return(full.data)
}
TranSelect = function(year, area){
if(area=="ykg"){area="ykd"
trans=list("file"="YKG_2018_MemoTrans.shp", "layer"="YKG_2018_MemoTrans")
}
if(area=="crd"){
trans=list("file"="CRD_2018_Transects.shp", "layer"="CRD_2018_Transects")
return(trans)
}
#area=toupper(area)
#year=as.numeric(year)
#if((year %% 4)==1){letter="B"}
#if((year %% 4)==2){letter="C"}
#if((year %% 4)==3){letter="D"}
#if((year %% 4)==0){letter="A"}
#trans.layer=paste(area, "trans", letter, sep="_")
#trans.file=paste(trans.layer,".shp", sep="")
#trans=list("file"=trans.file, "layer"=trans.layer)
return(trans)
}
Densities=function(data, n.obs=1, trans.method="gis", trans.width=.2, area, output=TRUE) {
#data=read.csv(file=(system.file("external/YKG17_HMW_MAS.v7.5.17.csv", package="AKaerial")), header=TRUE)
#trans=TranSelect(year=data$yr[1], area=area)
#trans.file=system.file(paste("external/", trans$file, sep=""), package="AKaerial")
#trans.layer=trans$layer
shp=LoadMap(area=area,type="proj")
#Save old warning settings to revert to before leaving function
oldw <- getOption("warn")
#Suppress spatial warnings inside function call
options(warn=-1)
#Attach strata ownership to data points (moved to DataSelect)
#data=PointsToStrata(data,area=area)
#Change col type from factor to character
#data$strat=as.character(data$strat)
#Compile the length (d), strata (type), area covered (des.area), and original trans name in data (original)
#Splits transects by strata if needed
#transects=TransectTable(trans.file=trans.file, trans.layer=trans.layer, method=trans.method, area=area, obs=n.obs)
#transects=transects[,-1]
#Moved NA function to DataSelect
#Find any NA for strata ownership (caused by late/early click on receiver in plane)
#for(a in 1:length(data$strat)){
#If NA found, replace with strata type shared by entries on that transect
#This assumes the click/logging was actually over the strata and not ACTUALLY early/late
# if(is.na(data$strat[a])){
# temp=data[data$tran==data$tran[a],]
# data$strat[a]=names(sort(table(temp$strat), decreasing=TRUE))[1]
# }
#}
#Compute the total/indicated total for the group sizes indicated in the data
#adj.counts=AdjustCounts(data)
#Sum the counts by combinations of species/transect
counts.t=CountsTable(data)
counts.t$area=0
for (i in 1:length(counts.t$area)){
counts.t$area[i]=data$area[data$yr==counts.t$yr[i] & data$obs==counts.t$obs[i] & data$ctran==counts.t$ctran[i]][1]
}
#Add a row with a 0 count for every species counted somewhere in the data but not on a given transect
#t3=MakeZeroes(data, counts.t)
t3=counts.t
#t4=merge(t3, transects, by.x=c("tran", "strat"), by.y=c("original", "type"))
#Remove transect start and end from species list
t3=t3[t3$sppn != "ENDPT",]
t3=t3[t3$sppn != "START",]
#t3=t3[t3$sppn != "end",]
#t3=t3[t3$sppn != "start",]
#t3=t3[t3$sppn != "ENDT",]
#t3=t3[t3$sppn != "BEGT",]
#Make sure totals are numeric
t3$total=as.numeric(t3$total)
t3$itotal=as.numeric(t3$itotal)
t3$ibb=as.numeric(t3$ibb)
#t3$des.area=0
#Sum the sampled areas for segments of the same transect
#trans.area=aggregate(des.area~type+original, transects, sum)
#des.area.sum=aggregate(des.area~type, transects,sum)
#for (a in 1:length(t3$sppn)){
# if(any(trans.area$des.area[trans.area$original==t3$tran[a] & trans.area$type==t3$strat[a]])){
# t3$des.area[a]=trans.area$des.area[trans.area$original==t3$tran[a] & trans.area$type==t3$strat[a]]
# }
#}
#Trim off nonsense pieces created by gis clipping
#t3=t3[t3$des.area>.5,]
#Sum the counts of each species by strata type
sp.strat.total=aggregate(t3$total~t3$yr+t3$obs+t3$sppn+t3$strata, FUN=sum)
colnames(sp.strat.total)=c("yr","obs", "sppn", "strata", "total")
sp.strat.itotal=aggregate(t3$itotal~t3$yr+t3$obs+t3$sppn+t3$strat, FUN=sum)
colnames(sp.strat.itotal)=c("yr","obs", "sppn", "strata", "itotal")
sp.strat.ibb=aggregate(t3$ibb~t3$yr+t3$obs+t3$sppn+t3$strat, FUN=sum)
colnames(sp.strat.ibb)=c("yr","obs", "sppn", "strata", "ibb")
#Variance of the counts within each strata
sp.strat.total.v=aggregate(t3$total~t3$yr+t3$obs+t3$sppn+t3$strat, FUN=var)
colnames(sp.strat.total.v)=c("yr", "obs", "sppn", "strata", "total.v")
sp.strat.itotal.v=aggregate(t3$itotal~t3$yr+t3$obs+t3$sppn+t3$strat, FUN=var)
colnames(sp.strat.itotal.v)=c("yr", "obs", "sppn", "strata", "itotal.v")
sp.strat.ibb.v=aggregate(t3$ibb~t3$yr+t3$obs+t3$sppn+t3$strat, FUN=var)
colnames(sp.strat.ibb.v)=c("yr","obs", "sppn", "strata", "ibb.v")
sp.strat=merge(sp.strat.total, sp.strat.itotal)
sp.strat=merge(sp.strat, sp.strat.ibb)
sp.strat.v=merge(sp.strat.total.v, sp.strat.itotal.v)
sp.strat.v=merge(sp.strat.v, sp.strat.ibb.v)
#Put the totals together and leave placeholders for var and cov
sp.strat.final=merge(sp.strat, sp.strat.v)
sp.strat.final$total.cov=0
sp.strat.final$itotal.cov=0
sp.strat.final$var.N=0
sp.strat.final$var.Ni=0
sp.strat.final$ibb.cov=0
sp.strat.final$var.Nib=0
#Calculate covariance of total counts and area sampled
for (i in 1:length(sp.strat.final$strata)){
temp.t3=t3[t3$yr==sp.strat.final$yr[i] & t3$obs==sp.strat.final$obs[i] & t3$sppn==sp.strat.final$sppn[i] & t3$strata==sp.strat.final$strata[i],]
sp.strat.final$total.cov[i]=cov(temp.t3$total, temp.t3$area)
sp.strat.final$itotal.cov[i]=cov(temp.t3$itotal, temp.t3$area)
sp.strat.final$ibb.cov[i]=cov(temp.t3$ibb, temp.t3$area)
}
#Calculate the total area by type and the variance of the areas
area.strat=aggregate(t3$area~t3$yr+t3$obs+t3$strata+t3$sppn, FUN=sum)
area.strat.v=aggregate(t3$area~t3$yr+t3$obs+t3$strata+t3$sppn, FUN=var)
colnames(area.strat)=c("yr", "obs", "strata", "sppn","total.area")
colnames(area.strat.v)=c("yr", "obs", "strata", "sppn", "total.area.var")
area.strat=area.strat[!duplicated(area.strat[1:3]),-4]
area.strat.v=area.strat.v[!duplicated(area.strat.v[1:3]),-4]
#Put spatial summary together
area.summary=merge(area.strat, area.strat.v)
#print(area.summary)
#Merge the counts and spatial stats
counts.final=merge(sp.strat.final,area.summary, by=c("yr", "obs", "strata"))
#Calculate final densities for each strata layer
density.total=counts.final$total/counts.final$total.area
density.itotal=counts.final$itotal/counts.final$total.area
density.ibb=counts.final$ibb/counts.final$total.area
counts.final=cbind(counts.final, density.total, density.itotal, density.ibb)
#print(head(counts.final))
#Get actual areas from gis layers
strata.area=aggregate(shp@data$AREA~shp@data$STRATNAME, FUN=sum)
colnames(strata.area)=c("strata", "layer.area")
#Convert from m^2 to km^2
strata.area$layer.area=strata.area$layer.area / 1000000
#print(strata.area)
counts.final=merge(counts.final, strata.area, by="strata")
#Extrapolate density estimates across area calculation
total.est=counts.final$density.total * counts.final$layer.area
itotal.est=counts.final$density.itotal * counts.final$layer.area
ibbtotal.est=counts.final$density.ibb * counts.final$layer.area
counts.final=cbind(counts.final, total.est, itotal.est,ibbtotal.est)
#Summarize in table
estimates=aggregate(counts.final$total.est~counts.final$yr+counts.final$obs+counts.final$sppn, FUN=sum)
colnames(estimates)=c("yr", "obs", "sppn", "total.est")
estimates.i=aggregate(counts.final$itotal.est~counts.final$yr+counts.final$obs+counts.final$sppn, FUN=sum)
colnames(estimates.i)=c("yr", "obs", "sppn","itotal.est")
estimates.ibb=aggregate(counts.final$ibbtotal.est~counts.final$yr+counts.final$obs+counts.final$sppn, FUN=sum)
colnames(estimates.ibb)=c("yr", "obs", "sppn","ibbtotal.est")
estimates=merge(estimates, estimates.i, by=c("yr", "obs", "sppn"))
estimates=merge(estimates, estimates.ibb, by=c("yr", "obs", "sppn"))
#adj.counts=merge(adj.counts, transects, by.x="tran", by.y="original")
### Var(N) ###
#Keep projection consistent
shp.proj <- spTransform(shp, "+proj=longlat +ellps=WGS84")
shp.proj@data$id = rownames(shp.proj@data)
shp.fort <- fortify(shp.proj, region="STRATNAME")
shp.df=join(shp.fort, shp.proj@data, by="id")
##extract min and max lat from shp.df, calc gcd and / by sampled width
min.lat=aggregate(shp.df$lat~shp.df$id, FUN=min)
max.lat=aggregate(shp.df$lat~shp.df$id, FUN=max)
piece.min.lat=aggregate(shp.df$lat~shp.df$piece+shp.df$id, FUN=min)
colnames(piece.min.lat)=c("piece", "id", "min")
piece.max.lat=aggregate(shp.df$lat~shp.df$piece+shp.df$id, FUN=max)
colnames(piece.max.lat)=c("piece", "id", "max")
pieces=data.frame("id"=piece.max.lat$id, "min"=piece.min.lat$min, "max"=piece.max.lat$max)
pieces=pieces[order(pieces$id, -pieces$max),]
#Find holes between shape polygons
voids=FindVoids(pieces=pieces)
#111.5 km in 1 deg lat
diff.lat=data.frame("strata"=min.lat[,1], "diff"=abs(max.lat[,2]-min.lat[,2])*111.5)
#If there are voids in strata, remove them from possible sample area
diff.lat$strata=as.character(diff.lat$strata)
for (i in 1:length(diff.lat$strata)){
if (diff.lat$strata[i] %in% voids$id){diff.lat$diff[i]=diff.lat$diff[i]-111.5*voids$d[voids$id==diff.lat$strata[i]]}
}
#Total possible transects available (M)
diff.lat$M=diff.lat$diff/(trans.width*n.obs)
#print(transects)
#Number of transects sampled (m of a possible M)
#reps=aggregate(transects$original~transects$type, FUN=length)
#reps=data.frame("strata"=transects$type, "original"=transects$original)
#reps=unique(reps)
#print(reps)
reps2=aggregate(t3$ctran~t3$yr+t3$obs+t3$strata+t3$sppn, FUN=length)
colnames(reps2)=c("yr", "obs", "strata", "sppn","m")
reps2=reps2[!duplicated(reps2[1:3]),-4]
diff.lat=merge(diff.lat, reps2, by="strata")
diff.lat=merge(diff.lat, area.summary, by=c("yr", "obs", "strata"))
print(diff.lat)
#print(diff.lat)
#See equation 12.9, p. 249 in "Analysis and Management of Animal Populations"
#Williams, Nichols, Conroy; 2002
for (j in 1:length(counts.final$sppn)){
M=diff.lat$M[diff.lat$yr==counts.final$yr[j] & diff.lat$obs==counts.final$obs[j] & diff.lat$strata==counts.final$strata[j]]
m=diff.lat$m[diff.lat$yr==counts.final$yr[j] & diff.lat$obs==counts.final$obs[j] & diff.lat$strata==counts.final$strata[j]]
prop.m=((1-(m/M))/m)
#if(counts.final$sppn[j]=="SPEI"){print((counts.final$total.v[j]+(counts.final$density.total[j]^2)*(counts.final$total.area.var[j])-(2*counts.final$density.total[j]*counts.final$total.cov[j])))}
counts.final$var.N[j]=(M^2)*prop.m*(counts.final$total.v[j]+(counts.final$density.total[j]^2)*(counts.final$total.area.var[j])-(2*counts.final$density.total[j]*counts.final$total.cov[j]))
counts.final$var.Ni[j]=(M^2)*prop.m*(counts.final$itotal.v[j]+(counts.final$density.itotal[j]^2)*(counts.final$total.area.var[j])-(2*counts.final$density.itotal[j]*counts.final$itotal.cov[j]))
counts.final$var.Nib[j]=(M^2)*prop.m*(counts.final$ibb.v[j]+(counts.final$density.ibb[j]^2)*(counts.final$total.area.var[j])-(2*counts.final$density.ibb[j]*counts.final$ibb.cov[j]))
}
var.est=aggregate(counts.final$var.N~counts.final$yr+counts.final$obs+counts.final$sppn, FUN=sum)
colnames(var.est)=c("yr", "obs", "sppn","var.N")
var.est.i=aggregate(counts.final$var.Ni~counts.final$yr+counts.final$obs+counts.final$sppn, FUN=sum)
colnames(var.est.i)=c("yr", "obs", "sppn","var.Ni")
var.est.ibb=aggregate(counts.final$var.Nib~counts.final$yr+counts.final$obs+counts.final$sppn, FUN=sum)
colnames(var.est.ibb)=c("yr", "obs", "sppn","var.Nib")
estimates=merge(estimates, var.est, by=c("yr", "obs", "sppn"), all=TRUE)
estimates=merge(estimates, var.est.i, by=c("yr", "obs", "sppn"), all=TRUE)
estimates=merge(estimates, var.est.ibb, by=c("yr", "obs", "sppn"), all=TRUE)
estimates$SE=sqrt(estimates$var.N)
estimates$SE.i=sqrt(estimates$var.Ni)
estimates$SE.ibb=sqrt(estimates$var.Nib)
estimates$total.est=as.integer(estimates$total.est)
estimates$itotal.est=as.integer(estimates$itotal.est)
estimates$ibbtotal.est=as.integer(estimates$ibbtotal.est)
options(warn = oldw)
#Output tables to txt files if requested
if(output==TRUE){
write.table(counts.final, file="finalcounts.txt", quote=FALSE, row.names=FALSE)
write.table(estimates, file="estimates.txt", quote=FALSE, row.names=FALSE)
}
return(estimates)
}
CombineEstimates=function(estimates){
yr.list=unique(estimates$yr)
sp.list=unique(estimates$sppn)
combined=data.frame(yr=rep(yr.list, each=length(unique(estimates$sppn))), sppn=rep(sp.list, length(yr.list)), total=0, total.var=0, total.se=0, itotal=0, itotal.var=0, itotal.se=0, ibb=0, ibb.var=0, ibb.se=0)
for(i in 1:length(combined$yr)){
combined$total[i]=mean(estimates$total.est[estimates$yr==combined$yr[i] & estimates$sppn==combined$sppn[i]])
combined$itotal[i]=mean(estimates$itotal.est[estimates$yr==combined$yr[i] & estimates$sppn==combined$sppn[i]])
combined$ibb[i]=mean(estimates$ibbtotal.est[estimates$yr==combined$yr[i] & estimates$sppn==combined$sppn[i]])
combined$total.var[i]=sum(estimates$var.N[estimates$yr==combined$yr[i] & estimates$sppn==combined$sppn[i]])/(length(estimates$var.N[estimates$yr==combined$yr[i] & estimates$sppn==combined$sppn[i]])^2)
combined$itotal.var[i]=sum(estimates$var.Ni[estimates$yr==combined$yr[i] & estimates$sppn==combined$sppn[i]])/(length(estimates$var.Ni[estimates$yr==combined$yr[i] & estimates$sppn==combined$sppn[i]])^2)
combined$ibb.var[i]=sum(estimates$var.Nib[estimates$yr==combined$yr[i] & estimates$sppn==combined$sppn[i]])/(length(estimates$var.Nib[estimates$yr==combined$yr[i] & estimates$sppn==combined$sppn[i]])^2)
}
combined$total.se=sqrt(combined$total.var)
combined$itotal.se=sqrt(combined$itotal.var)
combined$ibb.se=sqrt(combined$ibb.var)
return(combined)
}
MakeZeroes=function(full.data){
#Appends a count of 0 to transects that were flown but did not record any of a given species
#list of years
year.list=as.numeric(unique(full.data$yr))
#list of observers
obs.list=as.character(unique(full.data$obs))
#list of transects
tran.list=as.character(unique(full.data$ctran))
#list of species
sp.list=as.character(unique(full.data$sppn))
#cycle through, check each transect/observer combo for each species
for (h in 1:length(year.list)){
print(paste("Making zeroes for ", year.list[h]))
for (i in 1:length(sp.list)){
for (j in 1:length(tran.list)){
for (k in 1:length(obs.list)){
#skip if count exists
if(any(as.character(full.data$sppn)==sp.list[i] & as.character(full.data$ctran)==tran.list[j] & as.character(full.data$obs)==obs.list[k] & full.data$yr==year.list[h]))
{next}
#make sure that transect was flown
if(any(as.character(full.data$obs)==obs.list[k] & as.character(full.data$ctran)==tran.list[j] & full.data$yr==year.list[h]))
{
#add the 0 row
new.row=c(year.list[h], NA, NA, NA, obs.list[k], NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, sp.list[i], 0, "open", NA, NA, tran.list[j], NA, 0)
full.data=rbind(full.data,new.row)
}
} #end obs
} #end tran
} #end sp
} #end year
return(full.data)
}
TransectTable <- function(trans.file, trans.layer, obs=1, method, area) {
if(method=="gis" & area != "acp"){
#split the design shp file into component segments (file has each transect cutting through
#multiple strata and keeping same number)
trans.points=SplitDesign(file.name=trans.file, layer.name=trans.layer, area=area)
id=trans.points$id
x=trans.points$long
y=trans.points$lat
}
if(method=="gis" & area == "acp"){
trans.obj=readOGR(trans.file, trans.layer, verbose=FALSE)
trans.proj <- spTransform(trans.obj, "+proj=longlat +ellps=WGS84")
strata.proj=LoadMap(area, type="proj")
newlines = raster::intersect(trans.proj, strata.proj)
newlines@data$id=rownames(newlines@data)
newlines.fort=fortify(newlines, region="STRAT")
trans.points=join(newlines.fort, newlines@data, by="id")
id=trans.points$id
x=vector("numeric",length=2*length(unique(trans.points$id)))
y=vector("numeric",length=2*length(unique(trans.points$id)))
id=unique(trans.points$id)
original=vector("character",length=length(unique(trans.points$id)))
renum=vector("character",length=length(unique(trans.points$id)))
type=vector("character",length=length(unique(trans.points$id)))
for(i in 1:length(id)){
x[2*i-1]=trans.points$long[trans.points$id==id[i] & trans.points$order==1]
x[2*i]=trans.points$long[trans.points$order==which.max(trans.points$order[trans.points$id==id[i]]) & trans.points$id==id[i]]
y[2*i-1]=trans.points$lat[trans.points$id==id[i] & trans.points$order==1]
y[2*i]=trans.points$lat[trans.points$order==which.max(trans.points$order[trans.points$id==id[i]]) & trans.points$id==id[i]]
original[i]=trans.points$ORIGID[trans.points$id==id[i] & trans.points$order==1]
renum[i]=as.character(trans.points$OBJECTID[trans.points$id==id[i] & trans.points$order==1])
type[i]=trans.points$STRAT[trans.points$id==id[i] & trans.points$order==1]
}
}
#pulls in design file that is a list of start and end points
if(method=="text"){
trans.points=trans.file
code=as.character(trans.points$ident)
id=code
side=code
original=NULL
for (i in 1:length(unique(code))){
id[i]=as.numeric(substr(code[i],nchar(code[i])-2, nchar(code[i])-1))
side[i]=substr(code[i], nchar(code[i]), nchar(code[i]))
}
}
#either method above results in x,y and the following code calculates great circle distance
#for each set of coordinates
width=obs*.2
id=as.numeric(id)
d=vector("numeric",length=length(unique(id)))
#type=vector("character", length=length(unique(id)))
sp=cbind(x,y)
sp.which=seq(1,length(x), by=2)
sp.set=data.frame(x=((sp[sp.which,1]+sp[sp.which+1,1])/2), y=sp[sp.which,2])
sp.set=SpatialPoints(sp.set)
proj4string(sp.set)=CRS("+proj=longlat +ellps=WGS84")
if(method=="text"){type=over(sp.set,LoadMap(area, type="proj"))$STRATNAME}
#if(method=="gis"){type=trans.points$STRAT[seq(1,length(trans.points$STRAT), by=2)]}
#gcd.slc works best for a matrix of coords
for (i in 1:length(d)){
coords=matrix(c(x[2*i-1], x[2*i], y[2*i-1], y[2*i]), nrow=2, ncol=2)
d[i]=gcd.slc(coords[1,1], coords[1,2], coords[2,1], coords[2,2])
}
output=data.frame(oldid=original, newid=renum, d=d, type=type, des.area=width*d)
output=output[output$d>0.25,]
#if(method=="gis"){
# id=unique(id)
# for(i in seq(1,length(trans.points$lat),2)) {
# for(j in seq(1,length(trans.points$lat),2)){
# if(i != j){
# if(trans.points$STRAT[i]==trans.points$STRAT[j]){
# if(var(trans.points$lat[c(i,i+1,j,j+1)])*10000 < .2){
# trans.points$original[j]=trans.points$original[i]
# trans.points$original[j+1]=trans.points$original[i]
# }
# }
# }
# }}
# output=unique(merge(output, trans.points[,5:6], by="id"))
#}
if(method=="text"){output$original=output$id}
#output=output[output$d>0.5,]
print(output)
return(output)
}
#converts degrees to radians
deg2rad <- function(deg) return(deg*pi/180)
# Calculates the geodesic distance between two points specified by radian latitude/longitude using the
# Spherical Law of Cosines (slc)
gcd.slc <- function(long1, lat1, long2, lat2) {
long1=deg2rad(long1)
lat1=deg2rad(lat1)
long2=deg2rad(long2)
lat2=deg2rad(lat2)
R <- 6371 # Earth mean radius [km]
d <- acos(sin(lat1)*sin(lat2) + cos(lat1)*cos(lat2) * cos(long2-long1)) * R
return(d) # Distance in km
}
#necessary for the calculation of the maximum available transect calculation (M) component of
#the variance equation when strata are separate polygons in a shp file. Calculates the distance
#between discontinuous pieces and removes it from possible sampled area.
FindVoids= function(pieces) {
voids=data.frame("id"=0,"d"=0)
for (i in 1:(length(pieces$id)-1)){
#check sequential pieces (sorted by decreasing maximum northernmost values) for voids
if(pieces$id[i]==pieces$id[i+1]){
temp=data.frame("id"=pieces$id[i], "d"=pieces$min[i]-pieces$max[i+1])
voids=rbind(voids,temp)
}
}
#any positive values indicate actual voids in sampled area
voids=voids[voids$d>0, ]
if(sum(voids$d>0)){
voids=aggregate(voids$d~voids$id, FUN=sum)
colnames(voids)=c("id", "d")
}
return(voids)
}
TransectLevel=function(data, n.obs=2, trans.method="gis", trans.width=.2, area) {
trans=TranSelect(year=data$yr[1], area=area)
trans.file=system.file(paste("external/", trans$file, sep=""), package="AKaerial")
trans.layer=trans$layer
shp=LoadMap(area=area,type="proj")
#Save old warning settings to revert to before leaving function
oldw <- getOption("warn")
#Suppress spatial warnings inside function call
options(warn=-1)
data$strat=as.character(data$strat)
#Compile the length (d), strata (type), area covered (des.area), and original trans name in data (original)
#Splits transects by strata if needed
transects=TransectTable(trans.file=trans.file, trans.layer=trans.layer, method=trans.method, area=area, obs=n.obs)
#transects=transects[,-1]
#Compute the total/indicated total for the group sizes indicated in the data
adj.counts=AdjustCounts(data)
#Sum the counts by combinations of species/transect
counts.t=CountsTable(adj.counts)
return(counts.t)
}
CorrectTrans=function(full.data, area){
years=unique(full.data$yr)
coordinates(full.data)=~long+lat
proj4string(full.data)=CRS("+proj=longlat +ellps=WGS84")
if(area=="crd"){
full.data$ctran=full.data$tran
}
if(area=="acp"){
full.data$ctran=full.data$tran
full.data$closest=full.data$tran
full.data$dist=0
for (i in 1:length(years)){
print(years[i])
trans=TranSelect(year=years[i], area="acp")
trans.file=system.file(paste("external/", trans$file, sep=""), package="AKaerial")
trans.layer=trans$layer
trans.obj=readOGR(trans.file, trans.layer, verbose=FALSE)
trans.proj <- spTransform(trans.obj, "+proj=longlat +ellps=WGS84")
temp.data=full.data[full.data$yr==years[i],]
for (j in seq_along(temp.data$closest)){
temp.data$closest[j]=as.numeric(as.character(trans.proj$OBJECTID[which.min(suppressWarnings(gDistance(temp.data[j,],trans.proj,byid=TRUE)))]))
temp.data$dist[j]=min(suppressWarnings(gDistance(temp.data[j,],trans.proj,byid=TRUE)))
}
full.data$closest[full.data$yr==years[i]]=temp.data$closest
full.data$dist[full.data$yr==years[i]]=temp.data$dist
#m=gDistance(full.data[full.data$yr==years[i],], trans.proj, byid=TRUE)
#full.data$closest[full.data$yr==years[i]]=apply(m, 2, function(X) order(X)[1])
#full.data$dist[full.data$yr==years[i]]=apply(m, 2, function(X) min(X)[1]) * 111
trans.proj@data$id = rownames(trans.proj@data)
trans.df=fortify(trans.proj, region=OBJECTID)
trans.df=join(trans.df,trans.proj@data, by="id")
trans.df$OBJECTID=as.numeric(as.character(trans.df$OBJECTID))
old=unique(trans.df$ORIGID)
new=array(NA,length(old))
for (j in 1:length(old)){
new[j]=trans.df$OBJECTID[trans.df$ORIGID==old[j]][1]
}
renum=data.frame(old=old, new=new)
if(years[i]>2011){
for (k in 1:length(full.data$ctran)){
if(full.data$yr[k]==years[i]){
full.data$ctran[k]=renum$new[renum$old==full.data$ctran[k]]
}
}
}
if(years[i]<=2011){
for (k in 1:length(full.data$ctran)){
if(full.data$yr[k]==years[i]){
full.data$ctran[k]=full.data$closest[k]
#if(any(renum$old==full.data$closest[k])){
#full.data$ctran[k]=renum$new[renum$old==full.data$closest[k]]
}
#else{full.data$ctran[k]==NA}
}
}
}
} #end acp
return(as.data.frame(full.data))
} #end CorrectTrans()
PlotObs=function(strata.plot, selected.data, multiyear=TRUE, labelyear=FALSE, box=FALSE, set.box=c(-9999,0,0,0)){
if(multiyear==TRUE){
strata.plot= strata.plot + geom_point(data=selected.data, aes(x=long, y=lat))
}
if(labelyear==TRUE){
strata.plot= strata.plot + geom_text(data=selected.data, aes(x=long, y=lat, label=yr), hjust=0, vjust=0)
}
if (box==TRUE){
coordinates(selected.data)=~long+lat
bound=bbox(selected.data)
strata.plot= strata.plot + coord_map(xlim=c(bound[1,1]-.5, bound[1,2]+.5), ylim=c(bound[2,1]-.25, bound[2,2]+.25))
}
if (set.box[1]!=-9999){
strata.plot= strata.plot + coord_map(xlim=c(set.box[1], set.box[2]), ylim=c(set.box[3], set.box[4]))
#Barrow set.box=c(-157.5,-155,70.75,71.4)
}
print(strata.plot)
return(strata.plot)
}
TransSummary=function(full.data, area){
observers=unique(as.character(full.data$obs))
years=unique(full.data$yr)
print(observers)
print(years)
#tsum=data.frame(yr=NULL,obs=NULL, orig=NULL, len=NULL, part.of=NULL)
tsum=NULL
for (i in 1:length(years)){
if(area=="acp"){
trans=TranSelect(year=years[i], area="acp")
trans.file=system.file(paste("external/", trans$file, sep=""), package="AKaerial")
trans.layer=trans$layer
trans.obj=readOGR(trans.file, trans.layer, verbose=FALSE)
trans.proj <- spTransform(trans.obj, "+proj=longlat +ellps=WGS84")
GIS.obj = LoadMap(area, type="proj")
} #end acp
if(area=="crd"){
trans=TranSelect(year=years[i], area="crd")
trans.file=system.file(paste("external/", trans$file, sep=""), package="AKaerial")
trans.layer=trans$layer
trans.obj=readOGR(trans.file, trans.layer, verbose=FALSE)
trans.obj <- spTransform(trans.obj, "+proj=longlat +ellps=WGS84")
GIS.obj = LoadMap(area, type="proj")
trans.obj=intersect(trans.obj, GIS.obj) #trim the excess lines
} #end crd
trans.obj@data$len=SpatialLinesLengths(trans.obj, longlat=TRUE)
tpoints=as(trans.obj, "SpatialPointsDataFrame")
tpoints=spTransform(tpoints, "+proj=longlat +ellps=WGS84")
tpoints$strata=over(tpoints,GIS.obj)$STRATNAME
names(sort(table(tpoints$name[tpoints$OBJECTID==1]),decreasing=TRUE)[1])
for (j in 1:length(observers)){
if(length(full.data$long[full.data$obs==observers[j] & full.data$yr==years[i]])>0){
obs.flown=full.data[!duplicated(full.data[c("yr","obs","tran","ctran")]),]
#if (years[i]<=2011){
# obs.flown=obs.flown[!duplicated(obs.flown["yr","ctran"]),]
#}
obs.flown=obs.flown[obs.flown$yr==years[i] & obs.flown$obs==observers[j],]
yr=obs.flown$yr
obs=obs.flown$obs
orig=as.numeric(as.character(obs.flown$tran))
len=array(0,length(orig))
strata=array(0,length(orig))
part.of=as.numeric(as.character(obs.flown$ctran))
for (k in 1:length(orig)){
if (years[i]>2011){
len[k]=sum(trans.obj@data$len[trans.obj@data$ORIGID==orig[k] & trans.obj@data$OBJECTID==part.of[k]])
}
if (years[i]<=2011){
len[k]=sum(trans.obj@data$len[trans.obj@data$OBJECTID==part.of[k]])
}
#len[k]=sum(trans.obj@data$len[trans.obj@data$ORIGID==orig[k]])
#part.of[k]=as.character(trans.obj@data$OBJECTID[trans.obj@data$ORIGID==orig[k]])
strata[k]=names(sort(table(tpoints$strata[tpoints$OBJECTID==part.of[k]]),decreasing=TRUE)[1])
} #end k
temp.frame=data.frame(yr=yr, obs=obs, orig=orig, len=len, part.of=part.of, strata=strata)
temp.frame=temp.frame[order(orig),]
if (years[i]<=2011 & area=="acp"){
temp.frame=temp.frame[!duplicated(temp.frame[c("obs","len","part.of")]),]
}
tsum=rbind(tsum, temp.frame)
} #end if any obs/yr
} #end j observers
} #end i years
tsum$sampled.area=.2*tsum$len
return(tsum)
}
TransData2=function(selected.data){
#groupings list
unit.list=c("single", "pair","open", "flkdrake")
#list of years
yr.list=as.character(unique(selected.data$flight$yr))
#list of species
sp.list=as.character(unique(selected.data$obs$sppn))
#cycle through, check each transect/observer combo for each species
for (h in 1:length(yr.list)){
sub.data=selected.data$obs[selected.data$obs$yr==yr.list[h],]
obs.list=unique(as.character(selected.data$flight$obs[selected.data$flight$yr==yr.list[h]]))
print(paste("Making zeroes for year ", yr.list[h]))
for (i in 1:length(sp.list)){
sub.data=sub.data[sub.data$sppn==sp.list[i],]
for (j in 1:length(obs.list)){
print(paste("Observer ", obs.list[j]))
tran.list=unique(selected.data$flight$part.of[selected.data$flight$yr==yr.list[h] & selected.data$flight$obs==obs.list[j]])
sub.data=sub.data[as.character(sub.data$obs)==obs.list[j],]
for (k in 1:length(tran.list)){
sub.data=sub.data[as.character(sub.data$ctran)==tran.list[k],]
for (m in 1:length(unit.list)){
#skip if count exists
if(any(as.character(selected.data$obs$sppn)==sp.list[i] & as.character(selected.data$obs$ctran)==tran.list[k] & as.character(selected.data$obs$obs)==obs.list[j] & selected.data$obs$yr==yr.list[h] & selected.data$obs$unit==unit.list[m]))
if(any(sub.data$unit==unit.list[m]))
{next}
#add the 0 row
new.row=c(yr.list[h], NA, NA, NA, obs.list[j], NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, sp.list[i], 0, unit.list[m], NA, NA, tran.list[k], NA, 0)
selected.data$obs=rbind(selected.data$obs,new.row)
} #end unit
} #end tran
} #end obs
} #end sp
} #end yr
selected.data$obs$grp=as.numeric(selected.data$obs$grp)
agg=aggregate(grp~yr+obs+sppn+unit+ctran, data=selected.data$obs, FUN=sum)
colnames(agg)=c("yr", "obs", "sppn", "unit", "ctran", "grp")
agg$area=0
agg$strata="none"
for(g in 1:length(agg$area)){
agg$area[g]=sum(selected.data$flight$sampled.area[selected.data$flight$yr==agg$yr[g] & selected.data$flight$obs==agg$obs[g] & selected.data$flight$part.of==agg$ctran[g]])
agg$strata[g]=selected.data$flight$strata[selected.data$flight$yr==agg$yr[g] & selected.data$flight$obs==agg$obs[g] & selected.data$flight$part.of==agg$ctran[g]][1]
}
return(agg[order(agg$yr, agg$obs, agg$sppn, as.numeric(agg$ctran), agg$unit),])
}
|
/R/main.R
|
no_license
|
eosnas/AKaerial
|
R
| false | false | 48,245 |
r
|
# Build and Reload Package: 'Ctrl + Shift + B'
# Check Package: 'Ctrl + Shift + E'
# Test Package: 'Ctrl + Shift + T'
library(rgdal)
library(ggplot2)
library(maptools)
library(rgeos)
library(plyr)
library(tools)
library(mapproj)
library(raster)
library(zoo)
library(ggspatial)
library(outliers)
library(rmarkdown)
DataSelect <- function(area, year, path=NA, data=NA, observer="all", seat="all", strata="all", species="all", method="other", zeroes=FALSE, endpts=TRUE){
#if no R object is passed as data
if(length(data)<1){
if(!is.na(path)){year=1}
if(is.na(path)){
if(area=="ykd"){path = system.file("external/YKD8516dat.csv", package="AKaerial")}
else{
if (area=="ykg"){path = system.file("external/YKG8516dat.csv", package="AKaerial")}
else{
if (area=="acp"){path = system.file("external/ACP9717dat.csv", package="AKaerial")}
else{
print("Area not specified or incorrect.")
break
}}}
}
data=read.csv(path, header=TRUE)
}
else{year=1}
if(year==1){year=c(as.numeric(unique(data$yr[which.min(data$yr)])),as.numeric(unique(data$yr[which.max(data$yr)]))) }
print("check year")
if (length(year)==1){year= rep(year, 2)}
data$yr=zoo::na.approx(data$yr)
data=data[data$yr>=year[1] & data$yr<=year[2],]
print("SpatialNA")
data=SpatialNA(data)
print("PointsToStrata")
data=PointsToStrata(data,area)
print("Filters")
if(strata != "all"){data=data[data$strat %in% strata,]}
data$obs=sapply(data$obs, toupper)
data$obs[data$obs=="WL"]="WWL"
data$obs[data$obs=="RM"]="RDM"
if(observer != "all"){data=data[data$obs==observer,]}
data$se=sapply(data$se, toupper)
if(seat != "all"){data=data[data$se==seat,]}
data$sppn=sapply(data$sppn, toupper)
data$sppn=ShouldBe(data$sppn)
data$grp=as.numeric(as.character(data$grp))
if(species != "all"){
species2=c(species, "START", "ENDPT")
data=data[data$sppn %in% species2,]
}
print("CorrectTrans")
data=CorrectTrans(data, area=area)
print("CorrectUnit")
data=CorrectUnit(data)
data=droplevels(data)
if(species != "all"){
obs.data=data[data$sppn %in% species,]
}
print("TransSummary")
flight=TransSummary(data, area)
if(zeroes==TRUE){data=MakeZeroes(data)}
if(endpts==FALSE){data=data[data$sppn != "START" & data$sppn != "ENDPT", ]}
data=AdjustCounts(data)
data=list("obs"=data, "flight"=flight)
if(method=="transect"){data=TransData(data)}
return(data)
}
CorrectUnit=function(full.data){
acceptable=c("open", "single", "pair","flkdrake")
for (i in 1:length(full.data$unit)){
if(full.data$unit[i] %in% acceptable){next}
print(paste("Nonsense detected. Unit ", full.data$unit[i], " is not acceptable."))
}
full.data=full.data[full.data$unit %in% acceptable,]
return(full.data)
}
TransData=function(selected.data){
#groupings list
unit.list=c("single", "pair","open", "flkdrake")
#list of years
yr.list=as.character(unique(selected.data$flight$yr))
#list of species
sp.list=as.character(unique(selected.data$obs$sppn))
#grid method
for (observer in unique(selected.data$flight$obs)){
print(observer)
yr.list=unique(selected.data$flight$yr[selected.data$flight$obs==observer])
for (year in yr.list){
tran.list=unique(selected.data$flight$part.of[selected.data$flight$yr==year & selected.data$flight$obs==observer])
new.rows=expand.grid(year, NA, NA, NA, observer, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, sp.list, 0, unit.list, tran.list,0,0,0)
print(length(new.rows))
print(names(selected.data$obs))
names(new.rows)=names(selected.data$obs)
selected.data$obs=rbind(selected.data$obs,new.rows)
print(year)
}
}
#cycle through, check each transect/observer combo for each species
#for (h in 1:length(yr.list)){
# sub.data=selected.data$obs[selected.data$obs$yr==yr.list[h],]
# obs.list=unique(as.character(selected.data$flight$obs[selected.data$flight$yr==yr.list[h]]))
# print(paste("Making zeroes for year ", yr.list[h]))
# for (i in 1:length(sp.list)){
# sub.data=sub.data[sub.data$sppn==sp.list[i],]
# for (j in 1:length(obs.list)){
# print(paste("Observer ", obs.list[j]))
# tran.list=unique(selected.data$flight$part.of[selected.data$flight$yr==yr.list[h] & selected.data$flight$obs==obs.list[j]])
# sub.data=sub.data[as.character(sub.data$obs)==obs.list[j],]
# for (k in 1:length(tran.list)){
# sub.data=sub.data[as.character(sub.data$ctran)==tran.list[k],]
# for (m in 1:length(unit.list)){
#skip if count exists
#if(any(as.character(selected.data$obs$sppn)==sp.list[i] & as.character(selected.data$obs$ctran)==tran.list[k] & as.character(selected.data$obs$obs)==obs.list[j] & selected.data$obs$yr==yr.list[h] & selected.data$obs$unit==unit.list[m]))
# if(any(sub.data$unit==unit.list[m]))
# {next}
#add the 0 row
# new.row=c(yr.list[h], NA, NA, NA, obs.list[j], NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, sp.list[i], 0, unit.list[m], NA, NA, tran.list[k], NA, 0)
# selected.data$obs=rbind(selected.data$obs,new.row)
# } #end unit
# } #end tran
# } #end obs
# } #end sp
#} #end yr
selected.data$obs$grp=as.numeric(selected.data$obs$grp)
agg=aggregate(cbind(grp,itotal,total,ibb)~yr+obs+sppn+unit+ctran,data=selected.data$obs, FUN=sum)
colnames(agg)=c("yr", "obs", "sppn", "unit", "ctran", "grp", "itotal", "total", "ibb")
agg$area=0
agg$strata="none"
for(g in 1:length(agg$area)){
agg$area[g]=sum(selected.data$flight$sampled.area[selected.data$flight$yr==agg$yr[g] & selected.data$flight$obs==agg$obs[g] & selected.data$flight$part.of==agg$ctran[g]])
agg$strata[g]=selected.data$flight$strata[selected.data$flight$yr==agg$yr[g] & selected.data$flight$obs==agg$obs[g] & selected.data$flight$part.of==agg$ctran[g]][1]
}
return(agg[order(agg$yr, agg$obs, agg$sppn, as.numeric(agg$ctran), agg$unit),])
}
SplitDesign <- function(area, SegCheck=FALSE, file.name, layer.name){
#design=readOGR("D:/CharlesFrost/AKaerial/data/TransectDesign/YKD_trans_B.shp", "YKD_trans_B")
#file.name = system.file("external/YKD_trans_B.shp", package="AKaerial")
design=readOGR(file.name, layer.name, verbose=FALSE)
design.proj <- spTransform(design, "+proj=longlat +ellps=WGS84")
strata.proj=LoadMap(area, type="proj")
newlines = raster::intersect(design.proj, strata.proj)
newlines@data$id=rownames(newlines@data)
newlines.fort=fortify(newlines, region="STRAT")
newlines.df=join(newlines.fort, newlines@data, by="id")
if(area=="ykg"){
newlines.df=newlines.df[,c("long","lat", "order", "piece", "id", "OBJECTID", "STRAT")]
}
if(area=="ykd"){
newlines.df=newlines.df[,c("long","lat", "order", "piece", "id", "OBJECTID", "STRAT")]
}
#newlines.df$order[newlines.df$order==3]=1
#newlines.df$order[newlines.df$order==4]=2
#newlines.df$order[newlines.df$order==5]=1
#newlines.df$order[newlines.df$order==6]=2
newlines.df$order[(newlines.df$order %% 2)==1]=1
newlines.df$order[(newlines.df$order %% 2)==0]=2
if(SegCheck==TRUE){
temp=aggregate(newlines.df$order~newlines.df$OBJECTID+newlines.df$STRAT, FUN="length")
colnames(temp)=c("original", "strata", "segs")
temp$segs=temp$segs/2
temp=temp[order(temp$original, temp$strata),]
write.table(temp, file="segcheck.txt", quote=FALSE, row.names=FALSE)
}
colnames(newlines.df)[6]="original"
newlines.df$id=rep(1:(length(newlines.df$id)/2), each=2)
return(newlines.df)
}
LoadMap <- function(area, type="df") {
if(area=="acp"){
map = system.file("external/a483web7_polygon.shp", package="AKaerial")
#map="D:/CharlesFrost/AKaerial/data/a483web7 polygon.shp"
lay="a483web7_polygon"
}
if(area=="ykd"){
map = system.file("external/newaird3_polygon.shp", package="AKaerial")
#map="D:/CharlesFrost/AKaerial/data/newaird3_polygon.shp"
lay="newaird3_polygon"
}
if(area=="ykg"){
map = system.file("external/YKG__2018_MemoAnalysisStrata.shp", package="AKaerial")
#map="D:/CharlesFrost/AKaerial/data/StratificationForHodgesAnalysis2.shp"
lay="YKG__2018_MemoAnalysisStrata"
}
if(area=="crd"){
map = system.file("external/CRD_2018_AnalysisStrata.shp", package="AKaerial")
#map="D:/CharlesFrost/AKaerial/data/CRD_2018_AnalysisStrata.shp"
lay="CRD_2018_AnalysisStrata"
}
maptools::gpclibPermit()
strata <- readOGR(map, lay, verbose=FALSE)
strata.proj <- spTransform(strata, "+proj=longlat +ellps=WGS84")
strata.proj@data$id = rownames(strata.proj@data)
#ifelse(area=="acp", strata.fort <- fortify(strata.proj, region="STRATNAME"), strata.fort <- fortify(strata.proj, region="STRAT"))
strata.fort <- fortify(strata.proj, region="id")
strata.df=join(strata.fort, strata.proj@data, by="id")
if(type=="df") {return(strata.df)}
if(type=="proj") {return(strata.proj)}
}
ViewStrata <- function(area, year=NULL, ViewTrans=FALSE, strata="all", numbers=FALSE) {
GIS.obj = LoadMap(area)
if(strata=="all"){
if(area=="acp" || area=="crd"){
strata.plot <- ggplot() +
geom_path(data=GIS.obj, aes(long,lat,group=group) ) +
geom_path(color="black") +
coord_map(xlim=c(min(GIS.obj$long), max(GIS.obj$long)), ylim=c(min(GIS.obj$lat), max(GIS.obj$lat)))
#scale_fill_manual(name="Strata", values=c("red","green","yellow","grey", "orange"))
}
if(area=="ykd"){
strata.plot <- ggplot() +
geom_polygon(data=GIS.obj, aes(long,lat,group=group,fill=id) ) +
geom_path(color="black") + coord_map(xlim=c(min(GIS.obj$long), max(GIS.obj$long)), ylim=c(min(GIS.obj$lat), max(GIS.obj$lat)))
}
if(area=="ykg"){
strata.plot <- ggplot() +
geom_polygon(data=GIS.obj, aes(long,lat,group=group,fill=id) ) +
geom_path(color="black") + coord_map(xlim=c(min(GIS.obj$long), max(GIS.obj$long)), ylim=c(min(GIS.obj$lat), max(GIS.obj$lat)))
}
}
if(strata!="all"){
data=GIS.obj[as.character(GIS.obj$STRATNAME) %in% strata,]
if(area=="acp" || area=="crd"){
strata.plot <- ggplot() +
geom_path(data=data, aes(long,lat,group=group) ) +
geom_path(color="black", lwd=1.5) +
coord_map(xlim=c(min(data$long), max(data$long)), ylim=c(min(data$lat), max(data$lat))) +
scale_x_continuous("Longitude (degrees)") + scale_y_continuous("Latitude (degrees)")
}
if(area=="ykd"){
strata.plot <- ggplot() +
geom_polygon(data=GIS.obj, aes(long,lat,group=group,fill=id) ) +
geom_path(color="black") + coord_map(xlim=c(min(GIS.obj$long), max(GIS.obj$long)), ylim=c(min(GIS.obj$lat), max(GIS.obj$lat)))
}
if(area=="ykg"){
strata.plot <- ggplot() +
geom_polygon(data=GIS.obj, aes(long,lat,group=group,fill=id) ) +
geom_path(color="black") + coord_map(xlim=c(min(GIS.obj$long), max(GIS.obj$long)), ylim=c(min(GIS.obj$lat), max(GIS.obj$lat)))
}
}
if(ViewTrans){
trans=TranSelect(year=year, area=area)
trans.file=system.file(paste("external/", trans$file, sep=""), package="AKaerial")
trans.layer=trans$layer
trans.obj=readOGR(trans.file, trans.layer, verbose=FALSE)
trans.proj <- spTransform(trans.obj, "+proj=longlat +ellps=WGS84")
trans.proj@data$id = rownames(trans.proj@data)
trans.df=fortify(trans.proj, region=OBJECTID)
trans.df=join(trans.df,trans.proj@data, by="id")
trans.labels=trans.df[trans.df$order==1,]
strata.plot = strata.plot +
geom_path(data=trans.df, aes(x=long, y=lat, group=group))
if(numbers==TRUE){
strata.plot = strata.plot +
geom_text(data=trans.labels, aes(x=long, y=lat, label=OBJECTID))
}
}
print(strata.plot)
return(strata.plot)
}
AdjustCounts <- function(full.data){
full.data$grp=as.numeric(as.character(full.data$grp))
for (i in 1:length(full.data$grp)){
#0 out the coded start and end points (codes are logged in the species column)
if(full.data$sppn[i]=="START" | full.data$sppn[i]=="ENDPT" | full.data$sppn[i]=="start" | full.data$sppn[i]=="end" | full.data$sppn[i]=="ENDT" | full.data$sppn[i]=="BEGT") {
full.data$itotal[i]=0
full.data$total[i]=0
full.data$ibb[i]=0
}
#Double singles for indicated totals
else if(full.data$unit[i]=="single") {
full.data$itotal[i]=2*full.data$grp[i]
full.data$ibb[i]=2*full.data$grp[i]
full.data$total[i]=full.data$grp[i]
}
#Pairs are doubled for both total and indicated total
else if(full.data$unit[i]=="pair") {
full.data$itotal[i]=2*full.data$grp[i]
full.data$ibb[i]=2*full.data$grp[i]
full.data$total[i]=2*full.data$grp[i]
}
#Open indicates a flock, nothing doubled, zero for ibb
else if(full.data$unit[i]=="open"){
full.data$itotal[i]=full.data$grp[i]
full.data$total[i]=full.data$grp[i]
full.data$ibb[i]=0
}
#Flocked drakes are doubled for 1-4 seen for indicated bb/totals. Reference would be useful.
else if(full.data$unit[i]=="flkdrake" & full.data$grp[i]<5){
full.data$itotal[i]=2*full.data$grp[i]
full.data$total[i]=full.data$grp[i]
full.data$ibb[i]=2*full.data$grp[i]
}
#Flocked drakes 5 and above aren't doubled because of science stuff, 0 for ibb.
else if(full.data$unit[i]=="flkdrake" & full.data$grp[i]>4){
full.data$itotal[i]=full.data$grp[i]
full.data$total[i]=full.data$grp[i]
full.data$ibb[i]=0
}
}
sppn=unique(full.data$sppn)
# for (i in 1:length(sppn)){
# itot=sppntable$itot[as.character(sppntable$sppn)==as.character(sppn[i])]
# full.data$itotal[full.data$sppn==as.character(sppn[i])]=itot*full.data$itotal[full.data$sppn==as.character(sppn[i])]
# }
return(full.data)
}
CountsTable=function(adj.counts) {
t1=(aggregate(adj.counts$total~adj.counts$yr+adj.counts$obs+adj.counts$ctran+adj.counts$sppn+adj.counts$strata, FUN=sum))
t2=(aggregate(adj.counts$itotal~adj.counts$yr+adj.counts$obs+adj.counts$ctran+adj.counts$sppn+adj.counts$strata, FUN=sum))
t2b=aggregate(adj.counts$ibb~adj.counts$yr+adj.counts$obs+adj.counts$ctran+adj.counts$sppn+adj.counts$strata, FUN=sum)
t3=merge(t1,t2,by=c("adj.counts$yr", "adj.counts$obs","adj.counts$ctran", "adj.counts$sppn", "adj.counts$strata"))
t3=merge(t3,t2b,by=c("adj.counts$yr", "adj.counts$obs","adj.counts$ctran", "adj.counts$sppn", "adj.counts$strata"))
colnames(t3)=c("yr","obs","ctran", "sppn", "strata", "total", "itotal", "ibb")
return(t3[order(t3$yr, t3$obs, t3$sppn, as.numeric(t3$ctran)),])
} #end CountsTable
PointsToStrata=function(full.data, area){
#full.data=SpatialNA(full.data)
x=na.approx(full.data$long)
y=na.approx(full.data$lat)
sp=cbind(x,y)
sp=SpatialPoints(sp)
proj4string(sp)=CRS("+proj=longlat +ellps=WGS84")
sp=spTransform(sp, "+proj=aea +lat_1=55 +lat_2=65 +lat_0=50 +lon_0=-154 +x_0=0 +y_0=0
+datum=NAD83 +units=m +no_defs +ellps=GRS80 +towgs84=0,0,0")
map=LoadMap(area, type="proj")
map=spTransform(map, "+proj=aea +lat_1=55 +lat_2=65 +lat_0=50 +lon_0=-154 +x_0=0 +y_0=0
+datum=NAD83 +units=m +no_defs +ellps=GRS80 +towgs84=0,0,0")
full.data$strat=over(sp,map)$STRAT
if(area != "crd"){
for(a in 1:length(full.data$strat)){
#print(a)
#If NA found, replace with strata type shared by entries on that transect
#This assumes the click/logging was actually over the strata and not ACTUALLY early/late
if(is.na(full.data$strat[a])){
temp=full.data[full.data$tran==full.data$tran[a],]
#full.data$strat[a]=names(sort(table(temp$strat), decreasing=TRUE))[1]
if(length(temp$strat)>0){
full.data$strat[a]=names(which.max(table(temp$strat)))
}
else{full.data$strat[a]="undefined"}
}
}
}
return(full.data)
}
TranSelect = function(year, area){
if(area=="ykg"){area="ykd"
trans=list("file"="YKG_2018_MemoTrans.shp", "layer"="YKG_2018_MemoTrans")
}
if(area=="crd"){
trans=list("file"="CRD_2018_Transects.shp", "layer"="CRD_2018_Transects")
return(trans)
}
#area=toupper(area)
#year=as.numeric(year)
#if((year %% 4)==1){letter="B"}
#if((year %% 4)==2){letter="C"}
#if((year %% 4)==3){letter="D"}
#if((year %% 4)==0){letter="A"}
#trans.layer=paste(area, "trans", letter, sep="_")
#trans.file=paste(trans.layer,".shp", sep="")
#trans=list("file"=trans.file, "layer"=trans.layer)
return(trans)
}
Densities=function(data, n.obs=1, trans.method="gis", trans.width=.2, area, output=TRUE) {
#data=read.csv(file=(system.file("external/YKG17_HMW_MAS.v7.5.17.csv", package="AKaerial")), header=TRUE)
#trans=TranSelect(year=data$yr[1], area=area)
#trans.file=system.file(paste("external/", trans$file, sep=""), package="AKaerial")
#trans.layer=trans$layer
shp=LoadMap(area=area,type="proj")
#Save old warning settings to revert to before leaving function
oldw <- getOption("warn")
#Suppress spatial warnings inside function call
options(warn=-1)
#Attach strata ownership to data points (moved to DataSelect)
#data=PointsToStrata(data,area=area)
#Change col type from factor to character
#data$strat=as.character(data$strat)
#Compile the length (d), strata (type), area covered (des.area), and original trans name in data (original)
#Splits transects by strata if needed
#transects=TransectTable(trans.file=trans.file, trans.layer=trans.layer, method=trans.method, area=area, obs=n.obs)
#transects=transects[,-1]
#Moved NA function to DataSelect
#Find any NA for strata ownership (caused by late/early click on receiver in plane)
#for(a in 1:length(data$strat)){
#If NA found, replace with strata type shared by entries on that transect
#This assumes the click/logging was actually over the strata and not ACTUALLY early/late
# if(is.na(data$strat[a])){
# temp=data[data$tran==data$tran[a],]
# data$strat[a]=names(sort(table(temp$strat), decreasing=TRUE))[1]
# }
#}
#Compute the total/indicated total for the group sizes indicated in the data
#adj.counts=AdjustCounts(data)
#Sum the counts by combinations of species/transect
counts.t=CountsTable(data)
counts.t$area=0
for (i in 1:length(counts.t$area)){
counts.t$area[i]=data$area[data$yr==counts.t$yr[i] & data$obs==counts.t$obs[i] & data$ctran==counts.t$ctran[i]][1]
}
#Add a row with a 0 count for every species counted somewhere in the data but not on a given transect
#t3=MakeZeroes(data, counts.t)
t3=counts.t
#t4=merge(t3, transects, by.x=c("tran", "strat"), by.y=c("original", "type"))
#Remove transect start and end from species list
t3=t3[t3$sppn != "ENDPT",]
t3=t3[t3$sppn != "START",]
#t3=t3[t3$sppn != "end",]
#t3=t3[t3$sppn != "start",]
#t3=t3[t3$sppn != "ENDT",]
#t3=t3[t3$sppn != "BEGT",]
#Make sure totals are numeric
t3$total=as.numeric(t3$total)
t3$itotal=as.numeric(t3$itotal)
t3$ibb=as.numeric(t3$ibb)
#t3$des.area=0
#Sum the sampled areas for segments of the same transect
#trans.area=aggregate(des.area~type+original, transects, sum)
#des.area.sum=aggregate(des.area~type, transects,sum)
#for (a in 1:length(t3$sppn)){
# if(any(trans.area$des.area[trans.area$original==t3$tran[a] & trans.area$type==t3$strat[a]])){
# t3$des.area[a]=trans.area$des.area[trans.area$original==t3$tran[a] & trans.area$type==t3$strat[a]]
# }
#}
#Trim off nonsense pieces created by gis clipping
#t3=t3[t3$des.area>.5,]
#Sum the counts of each species by strata type
sp.strat.total=aggregate(t3$total~t3$yr+t3$obs+t3$sppn+t3$strata, FUN=sum)
colnames(sp.strat.total)=c("yr","obs", "sppn", "strata", "total")
sp.strat.itotal=aggregate(t3$itotal~t3$yr+t3$obs+t3$sppn+t3$strat, FUN=sum)
colnames(sp.strat.itotal)=c("yr","obs", "sppn", "strata", "itotal")
sp.strat.ibb=aggregate(t3$ibb~t3$yr+t3$obs+t3$sppn+t3$strat, FUN=sum)
colnames(sp.strat.ibb)=c("yr","obs", "sppn", "strata", "ibb")
#Variance of the counts within each strata
sp.strat.total.v=aggregate(t3$total~t3$yr+t3$obs+t3$sppn+t3$strat, FUN=var)
colnames(sp.strat.total.v)=c("yr", "obs", "sppn", "strata", "total.v")
sp.strat.itotal.v=aggregate(t3$itotal~t3$yr+t3$obs+t3$sppn+t3$strat, FUN=var)
colnames(sp.strat.itotal.v)=c("yr", "obs", "sppn", "strata", "itotal.v")
sp.strat.ibb.v=aggregate(t3$ibb~t3$yr+t3$obs+t3$sppn+t3$strat, FUN=var)
colnames(sp.strat.ibb.v)=c("yr","obs", "sppn", "strata", "ibb.v")
sp.strat=merge(sp.strat.total, sp.strat.itotal)
sp.strat=merge(sp.strat, sp.strat.ibb)
sp.strat.v=merge(sp.strat.total.v, sp.strat.itotal.v)
sp.strat.v=merge(sp.strat.v, sp.strat.ibb.v)
#Put the totals together and leave placeholders for var and cov
sp.strat.final=merge(sp.strat, sp.strat.v)
sp.strat.final$total.cov=0
sp.strat.final$itotal.cov=0
sp.strat.final$var.N=0
sp.strat.final$var.Ni=0
sp.strat.final$ibb.cov=0
sp.strat.final$var.Nib=0
#Calculate covariance of total counts and area sampled
for (i in 1:length(sp.strat.final$strata)){
temp.t3=t3[t3$yr==sp.strat.final$yr[i] & t3$obs==sp.strat.final$obs[i] & t3$sppn==sp.strat.final$sppn[i] & t3$strata==sp.strat.final$strata[i],]
sp.strat.final$total.cov[i]=cov(temp.t3$total, temp.t3$area)
sp.strat.final$itotal.cov[i]=cov(temp.t3$itotal, temp.t3$area)
sp.strat.final$ibb.cov[i]=cov(temp.t3$ibb, temp.t3$area)
}
#Calculate the total area by type and the variance of the areas
area.strat=aggregate(t3$area~t3$yr+t3$obs+t3$strata+t3$sppn, FUN=sum)
area.strat.v=aggregate(t3$area~t3$yr+t3$obs+t3$strata+t3$sppn, FUN=var)
colnames(area.strat)=c("yr", "obs", "strata", "sppn","total.area")
colnames(area.strat.v)=c("yr", "obs", "strata", "sppn", "total.area.var")
area.strat=area.strat[!duplicated(area.strat[1:3]),-4]
area.strat.v=area.strat.v[!duplicated(area.strat.v[1:3]),-4]
#Put spatial summary together
area.summary=merge(area.strat, area.strat.v)
#print(area.summary)
#Merge the counts and spatial stats
counts.final=merge(sp.strat.final,area.summary, by=c("yr", "obs", "strata"))
#Calculate final densities for each strata layer
density.total=counts.final$total/counts.final$total.area
density.itotal=counts.final$itotal/counts.final$total.area
density.ibb=counts.final$ibb/counts.final$total.area
counts.final=cbind(counts.final, density.total, density.itotal, density.ibb)
#print(head(counts.final))
#Get actual areas from gis layers
strata.area=aggregate(shp@data$AREA~shp@data$STRATNAME, FUN=sum)
colnames(strata.area)=c("strata", "layer.area")
#Convert from m^2 to km^2
strata.area$layer.area=strata.area$layer.area / 1000000
#print(strata.area)
counts.final=merge(counts.final, strata.area, by="strata")
#Extrapolate density estimates across area calculation
total.est=counts.final$density.total * counts.final$layer.area
itotal.est=counts.final$density.itotal * counts.final$layer.area
ibbtotal.est=counts.final$density.ibb * counts.final$layer.area
counts.final=cbind(counts.final, total.est, itotal.est,ibbtotal.est)
#Summarize in table
estimates=aggregate(counts.final$total.est~counts.final$yr+counts.final$obs+counts.final$sppn, FUN=sum)
colnames(estimates)=c("yr", "obs", "sppn", "total.est")
estimates.i=aggregate(counts.final$itotal.est~counts.final$yr+counts.final$obs+counts.final$sppn, FUN=sum)
colnames(estimates.i)=c("yr", "obs", "sppn","itotal.est")
estimates.ibb=aggregate(counts.final$ibbtotal.est~counts.final$yr+counts.final$obs+counts.final$sppn, FUN=sum)
colnames(estimates.ibb)=c("yr", "obs", "sppn","ibbtotal.est")
estimates=merge(estimates, estimates.i, by=c("yr", "obs", "sppn"))
estimates=merge(estimates, estimates.ibb, by=c("yr", "obs", "sppn"))
#adj.counts=merge(adj.counts, transects, by.x="tran", by.y="original")
### Var(N) ###
#Keep projection consistent
shp.proj <- spTransform(shp, "+proj=longlat +ellps=WGS84")
shp.proj@data$id = rownames(shp.proj@data)
shp.fort <- fortify(shp.proj, region="STRATNAME")
shp.df=join(shp.fort, shp.proj@data, by="id")
##extract min and max lat from shp.df, calc gcd and / by sampled width
min.lat=aggregate(shp.df$lat~shp.df$id, FUN=min)
max.lat=aggregate(shp.df$lat~shp.df$id, FUN=max)
piece.min.lat=aggregate(shp.df$lat~shp.df$piece+shp.df$id, FUN=min)
colnames(piece.min.lat)=c("piece", "id", "min")
piece.max.lat=aggregate(shp.df$lat~shp.df$piece+shp.df$id, FUN=max)
colnames(piece.max.lat)=c("piece", "id", "max")
pieces=data.frame("id"=piece.max.lat$id, "min"=piece.min.lat$min, "max"=piece.max.lat$max)
pieces=pieces[order(pieces$id, -pieces$max),]
#Find holes between shape polygons
voids=FindVoids(pieces=pieces)
#111.5 km in 1 deg lat
diff.lat=data.frame("strata"=min.lat[,1], "diff"=abs(max.lat[,2]-min.lat[,2])*111.5)
#If there are voids in strata, remove them from possible sample area
diff.lat$strata=as.character(diff.lat$strata)
for (i in 1:length(diff.lat$strata)){
if (diff.lat$strata[i] %in% voids$id){diff.lat$diff[i]=diff.lat$diff[i]-111.5*voids$d[voids$id==diff.lat$strata[i]]}
}
#Total possible transects available (M)
diff.lat$M=diff.lat$diff/(trans.width*n.obs)
#print(transects)
#Number of transects sampled (m of a possible M)
#reps=aggregate(transects$original~transects$type, FUN=length)
#reps=data.frame("strata"=transects$type, "original"=transects$original)
#reps=unique(reps)
#print(reps)
reps2=aggregate(t3$ctran~t3$yr+t3$obs+t3$strata+t3$sppn, FUN=length)
colnames(reps2)=c("yr", "obs", "strata", "sppn","m")
reps2=reps2[!duplicated(reps2[1:3]),-4]
diff.lat=merge(diff.lat, reps2, by="strata")
diff.lat=merge(diff.lat, area.summary, by=c("yr", "obs", "strata"))
print(diff.lat)
#print(diff.lat)
#See equation 12.9, p. 249 in "Analysis and Management of Animal Populations"
#Williams, Nichols, Conroy; 2002
for (j in 1:length(counts.final$sppn)){
M=diff.lat$M[diff.lat$yr==counts.final$yr[j] & diff.lat$obs==counts.final$obs[j] & diff.lat$strata==counts.final$strata[j]]
m=diff.lat$m[diff.lat$yr==counts.final$yr[j] & diff.lat$obs==counts.final$obs[j] & diff.lat$strata==counts.final$strata[j]]
prop.m=((1-(m/M))/m)
#if(counts.final$sppn[j]=="SPEI"){print((counts.final$total.v[j]+(counts.final$density.total[j]^2)*(counts.final$total.area.var[j])-(2*counts.final$density.total[j]*counts.final$total.cov[j])))}
counts.final$var.N[j]=(M^2)*prop.m*(counts.final$total.v[j]+(counts.final$density.total[j]^2)*(counts.final$total.area.var[j])-(2*counts.final$density.total[j]*counts.final$total.cov[j]))
counts.final$var.Ni[j]=(M^2)*prop.m*(counts.final$itotal.v[j]+(counts.final$density.itotal[j]^2)*(counts.final$total.area.var[j])-(2*counts.final$density.itotal[j]*counts.final$itotal.cov[j]))
counts.final$var.Nib[j]=(M^2)*prop.m*(counts.final$ibb.v[j]+(counts.final$density.ibb[j]^2)*(counts.final$total.area.var[j])-(2*counts.final$density.ibb[j]*counts.final$ibb.cov[j]))
}
var.est=aggregate(counts.final$var.N~counts.final$yr+counts.final$obs+counts.final$sppn, FUN=sum)
colnames(var.est)=c("yr", "obs", "sppn","var.N")
var.est.i=aggregate(counts.final$var.Ni~counts.final$yr+counts.final$obs+counts.final$sppn, FUN=sum)
colnames(var.est.i)=c("yr", "obs", "sppn","var.Ni")
var.est.ibb=aggregate(counts.final$var.Nib~counts.final$yr+counts.final$obs+counts.final$sppn, FUN=sum)
colnames(var.est.ibb)=c("yr", "obs", "sppn","var.Nib")
estimates=merge(estimates, var.est, by=c("yr", "obs", "sppn"), all=TRUE)
estimates=merge(estimates, var.est.i, by=c("yr", "obs", "sppn"), all=TRUE)
estimates=merge(estimates, var.est.ibb, by=c("yr", "obs", "sppn"), all=TRUE)
estimates$SE=sqrt(estimates$var.N)
estimates$SE.i=sqrt(estimates$var.Ni)
estimates$SE.ibb=sqrt(estimates$var.Nib)
estimates$total.est=as.integer(estimates$total.est)
estimates$itotal.est=as.integer(estimates$itotal.est)
estimates$ibbtotal.est=as.integer(estimates$ibbtotal.est)
options(warn = oldw)
#Output tables to txt files if requested
if(output==TRUE){
write.table(counts.final, file="finalcounts.txt", quote=FALSE, row.names=FALSE)
write.table(estimates, file="estimates.txt", quote=FALSE, row.names=FALSE)
}
return(estimates)
}
CombineEstimates=function(estimates){
yr.list=unique(estimates$yr)
sp.list=unique(estimates$sppn)
combined=data.frame(yr=rep(yr.list, each=length(unique(estimates$sppn))), sppn=rep(sp.list, length(yr.list)), total=0, total.var=0, total.se=0, itotal=0, itotal.var=0, itotal.se=0, ibb=0, ibb.var=0, ibb.se=0)
for(i in 1:length(combined$yr)){
combined$total[i]=mean(estimates$total.est[estimates$yr==combined$yr[i] & estimates$sppn==combined$sppn[i]])
combined$itotal[i]=mean(estimates$itotal.est[estimates$yr==combined$yr[i] & estimates$sppn==combined$sppn[i]])
combined$ibb[i]=mean(estimates$ibbtotal.est[estimates$yr==combined$yr[i] & estimates$sppn==combined$sppn[i]])
combined$total.var[i]=sum(estimates$var.N[estimates$yr==combined$yr[i] & estimates$sppn==combined$sppn[i]])/(length(estimates$var.N[estimates$yr==combined$yr[i] & estimates$sppn==combined$sppn[i]])^2)
combined$itotal.var[i]=sum(estimates$var.Ni[estimates$yr==combined$yr[i] & estimates$sppn==combined$sppn[i]])/(length(estimates$var.Ni[estimates$yr==combined$yr[i] & estimates$sppn==combined$sppn[i]])^2)
combined$ibb.var[i]=sum(estimates$var.Nib[estimates$yr==combined$yr[i] & estimates$sppn==combined$sppn[i]])/(length(estimates$var.Nib[estimates$yr==combined$yr[i] & estimates$sppn==combined$sppn[i]])^2)
}
combined$total.se=sqrt(combined$total.var)
combined$itotal.se=sqrt(combined$itotal.var)
combined$ibb.se=sqrt(combined$ibb.var)
return(combined)
}
MakeZeroes=function(full.data){
#Appends a count of 0 to transects that were flown but did not record any of a given species
#list of years
year.list=as.numeric(unique(full.data$yr))
#list of observers
obs.list=as.character(unique(full.data$obs))
#list of transects
tran.list=as.character(unique(full.data$ctran))
#list of species
sp.list=as.character(unique(full.data$sppn))
#cycle through, check each transect/observer combo for each species
for (h in 1:length(year.list)){
print(paste("Making zeroes for ", year.list[h]))
for (i in 1:length(sp.list)){
for (j in 1:length(tran.list)){
for (k in 1:length(obs.list)){
#skip if count exists
if(any(as.character(full.data$sppn)==sp.list[i] & as.character(full.data$ctran)==tran.list[j] & as.character(full.data$obs)==obs.list[k] & full.data$yr==year.list[h]))
{next}
#make sure that transect was flown
if(any(as.character(full.data$obs)==obs.list[k] & as.character(full.data$ctran)==tran.list[j] & full.data$yr==year.list[h]))
{
#add the 0 row
new.row=c(year.list[h], NA, NA, NA, obs.list[k], NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, sp.list[i], 0, "open", NA, NA, tran.list[j], NA, 0)
full.data=rbind(full.data,new.row)
}
} #end obs
} #end tran
} #end sp
} #end year
return(full.data)
}
TransectTable <- function(trans.file, trans.layer, obs=1, method, area) {
if(method=="gis" & area != "acp"){
#split the design shp file into component segments (file has each transect cutting through
#multiple strata and keeping same number)
trans.points=SplitDesign(file.name=trans.file, layer.name=trans.layer, area=area)
id=trans.points$id
x=trans.points$long
y=trans.points$lat
}
if(method=="gis" & area == "acp"){
trans.obj=readOGR(trans.file, trans.layer, verbose=FALSE)
trans.proj <- spTransform(trans.obj, "+proj=longlat +ellps=WGS84")
strata.proj=LoadMap(area, type="proj")
newlines = raster::intersect(trans.proj, strata.proj)
newlines@data$id=rownames(newlines@data)
newlines.fort=fortify(newlines, region="STRAT")
trans.points=join(newlines.fort, newlines@data, by="id")
id=trans.points$id
x=vector("numeric",length=2*length(unique(trans.points$id)))
y=vector("numeric",length=2*length(unique(trans.points$id)))
id=unique(trans.points$id)
original=vector("character",length=length(unique(trans.points$id)))
renum=vector("character",length=length(unique(trans.points$id)))
type=vector("character",length=length(unique(trans.points$id)))
for(i in 1:length(id)){
x[2*i-1]=trans.points$long[trans.points$id==id[i] & trans.points$order==1]
x[2*i]=trans.points$long[trans.points$order==which.max(trans.points$order[trans.points$id==id[i]]) & trans.points$id==id[i]]
y[2*i-1]=trans.points$lat[trans.points$id==id[i] & trans.points$order==1]
y[2*i]=trans.points$lat[trans.points$order==which.max(trans.points$order[trans.points$id==id[i]]) & trans.points$id==id[i]]
original[i]=trans.points$ORIGID[trans.points$id==id[i] & trans.points$order==1]
renum[i]=as.character(trans.points$OBJECTID[trans.points$id==id[i] & trans.points$order==1])
type[i]=trans.points$STRAT[trans.points$id==id[i] & trans.points$order==1]
}
}
#pulls in design file that is a list of start and end points
if(method=="text"){
trans.points=trans.file
code=as.character(trans.points$ident)
id=code
side=code
original=NULL
for (i in 1:length(unique(code))){
id[i]=as.numeric(substr(code[i],nchar(code[i])-2, nchar(code[i])-1))
side[i]=substr(code[i], nchar(code[i]), nchar(code[i]))
}
}
#either method above results in x,y and the following code calculates great circle distance
#for each set of coordinates
width=obs*.2
id=as.numeric(id)
d=vector("numeric",length=length(unique(id)))
#type=vector("character", length=length(unique(id)))
sp=cbind(x,y)
sp.which=seq(1,length(x), by=2)
sp.set=data.frame(x=((sp[sp.which,1]+sp[sp.which+1,1])/2), y=sp[sp.which,2])
sp.set=SpatialPoints(sp.set)
proj4string(sp.set)=CRS("+proj=longlat +ellps=WGS84")
if(method=="text"){type=over(sp.set,LoadMap(area, type="proj"))$STRATNAME}
#if(method=="gis"){type=trans.points$STRAT[seq(1,length(trans.points$STRAT), by=2)]}
#gcd.slc works best for a matrix of coords
for (i in 1:length(d)){
coords=matrix(c(x[2*i-1], x[2*i], y[2*i-1], y[2*i]), nrow=2, ncol=2)
d[i]=gcd.slc(coords[1,1], coords[1,2], coords[2,1], coords[2,2])
}
output=data.frame(oldid=original, newid=renum, d=d, type=type, des.area=width*d)
output=output[output$d>0.25,]
#if(method=="gis"){
# id=unique(id)
# for(i in seq(1,length(trans.points$lat),2)) {
# for(j in seq(1,length(trans.points$lat),2)){
# if(i != j){
# if(trans.points$STRAT[i]==trans.points$STRAT[j]){
# if(var(trans.points$lat[c(i,i+1,j,j+1)])*10000 < .2){
# trans.points$original[j]=trans.points$original[i]
# trans.points$original[j+1]=trans.points$original[i]
# }
# }
# }
# }}
# output=unique(merge(output, trans.points[,5:6], by="id"))
#}
if(method=="text"){output$original=output$id}
#output=output[output$d>0.5,]
print(output)
return(output)
}
#converts degrees to radians
deg2rad <- function(deg) return(deg*pi/180)
# Calculates the geodesic distance between two points specified by radian latitude/longitude using the
# Spherical Law of Cosines (slc)
gcd.slc <- function(long1, lat1, long2, lat2) {
long1=deg2rad(long1)
lat1=deg2rad(lat1)
long2=deg2rad(long2)
lat2=deg2rad(lat2)
R <- 6371 # Earth mean radius [km]
d <- acos(sin(lat1)*sin(lat2) + cos(lat1)*cos(lat2) * cos(long2-long1)) * R
return(d) # Distance in km
}
#necessary for the calculation of the maximum available transect calculation (M) component of
#the variance equation when strata are separate polygons in a shp file. Calculates the distance
#between discontinuous pieces and removes it from possible sampled area.
FindVoids= function(pieces) {
voids=data.frame("id"=0,"d"=0)
for (i in 1:(length(pieces$id)-1)){
#check sequential pieces (sorted by decreasing maximum northernmost values) for voids
if(pieces$id[i]==pieces$id[i+1]){
temp=data.frame("id"=pieces$id[i], "d"=pieces$min[i]-pieces$max[i+1])
voids=rbind(voids,temp)
}
}
#any positive values indicate actual voids in sampled area
voids=voids[voids$d>0, ]
if(sum(voids$d>0)){
voids=aggregate(voids$d~voids$id, FUN=sum)
colnames(voids)=c("id", "d")
}
return(voids)
}
TransectLevel=function(data, n.obs=2, trans.method="gis", trans.width=.2, area) {
trans=TranSelect(year=data$yr[1], area=area)
trans.file=system.file(paste("external/", trans$file, sep=""), package="AKaerial")
trans.layer=trans$layer
shp=LoadMap(area=area,type="proj")
#Save old warning settings to revert to before leaving function
oldw <- getOption("warn")
#Suppress spatial warnings inside function call
options(warn=-1)
data$strat=as.character(data$strat)
#Compile the length (d), strata (type), area covered (des.area), and original trans name in data (original)
#Splits transects by strata if needed
transects=TransectTable(trans.file=trans.file, trans.layer=trans.layer, method=trans.method, area=area, obs=n.obs)
#transects=transects[,-1]
#Compute the total/indicated total for the group sizes indicated in the data
adj.counts=AdjustCounts(data)
#Sum the counts by combinations of species/transect
counts.t=CountsTable(adj.counts)
return(counts.t)
}
CorrectTrans=function(full.data, area){
years=unique(full.data$yr)
coordinates(full.data)=~long+lat
proj4string(full.data)=CRS("+proj=longlat +ellps=WGS84")
if(area=="crd"){
full.data$ctran=full.data$tran
}
if(area=="acp"){
full.data$ctran=full.data$tran
full.data$closest=full.data$tran
full.data$dist=0
for (i in 1:length(years)){
print(years[i])
trans=TranSelect(year=years[i], area="acp")
trans.file=system.file(paste("external/", trans$file, sep=""), package="AKaerial")
trans.layer=trans$layer
trans.obj=readOGR(trans.file, trans.layer, verbose=FALSE)
trans.proj <- spTransform(trans.obj, "+proj=longlat +ellps=WGS84")
temp.data=full.data[full.data$yr==years[i],]
for (j in seq_along(temp.data$closest)){
temp.data$closest[j]=as.numeric(as.character(trans.proj$OBJECTID[which.min(suppressWarnings(gDistance(temp.data[j,],trans.proj,byid=TRUE)))]))
temp.data$dist[j]=min(suppressWarnings(gDistance(temp.data[j,],trans.proj,byid=TRUE)))
}
full.data$closest[full.data$yr==years[i]]=temp.data$closest
full.data$dist[full.data$yr==years[i]]=temp.data$dist
#m=gDistance(full.data[full.data$yr==years[i],], trans.proj, byid=TRUE)
#full.data$closest[full.data$yr==years[i]]=apply(m, 2, function(X) order(X)[1])
#full.data$dist[full.data$yr==years[i]]=apply(m, 2, function(X) min(X)[1]) * 111
trans.proj@data$id = rownames(trans.proj@data)
trans.df=fortify(trans.proj, region=OBJECTID)
trans.df=join(trans.df,trans.proj@data, by="id")
trans.df$OBJECTID=as.numeric(as.character(trans.df$OBJECTID))
old=unique(trans.df$ORIGID)
new=array(NA,length(old))
for (j in 1:length(old)){
new[j]=trans.df$OBJECTID[trans.df$ORIGID==old[j]][1]
}
renum=data.frame(old=old, new=new)
if(years[i]>2011){
for (k in 1:length(full.data$ctran)){
if(full.data$yr[k]==years[i]){
full.data$ctran[k]=renum$new[renum$old==full.data$ctran[k]]
}
}
}
if(years[i]<=2011){
for (k in 1:length(full.data$ctran)){
if(full.data$yr[k]==years[i]){
full.data$ctran[k]=full.data$closest[k]
#if(any(renum$old==full.data$closest[k])){
#full.data$ctran[k]=renum$new[renum$old==full.data$closest[k]]
}
#else{full.data$ctran[k]==NA}
}
}
}
} #end acp
return(as.data.frame(full.data))
} #end CorrectTrans()
PlotObs=function(strata.plot, selected.data, multiyear=TRUE, labelyear=FALSE, box=FALSE, set.box=c(-9999,0,0,0)){
if(multiyear==TRUE){
strata.plot= strata.plot + geom_point(data=selected.data, aes(x=long, y=lat))
}
if(labelyear==TRUE){
strata.plot= strata.plot + geom_text(data=selected.data, aes(x=long, y=lat, label=yr), hjust=0, vjust=0)
}
if (box==TRUE){
coordinates(selected.data)=~long+lat
bound=bbox(selected.data)
strata.plot= strata.plot + coord_map(xlim=c(bound[1,1]-.5, bound[1,2]+.5), ylim=c(bound[2,1]-.25, bound[2,2]+.25))
}
if (set.box[1]!=-9999){
strata.plot= strata.plot + coord_map(xlim=c(set.box[1], set.box[2]), ylim=c(set.box[3], set.box[4]))
#Barrow set.box=c(-157.5,-155,70.75,71.4)
}
print(strata.plot)
return(strata.plot)
}
TransSummary=function(full.data, area){
observers=unique(as.character(full.data$obs))
years=unique(full.data$yr)
print(observers)
print(years)
#tsum=data.frame(yr=NULL,obs=NULL, orig=NULL, len=NULL, part.of=NULL)
tsum=NULL
for (i in 1:length(years)){
if(area=="acp"){
trans=TranSelect(year=years[i], area="acp")
trans.file=system.file(paste("external/", trans$file, sep=""), package="AKaerial")
trans.layer=trans$layer
trans.obj=readOGR(trans.file, trans.layer, verbose=FALSE)
trans.proj <- spTransform(trans.obj, "+proj=longlat +ellps=WGS84")
GIS.obj = LoadMap(area, type="proj")
} #end acp
if(area=="crd"){
trans=TranSelect(year=years[i], area="crd")
trans.file=system.file(paste("external/", trans$file, sep=""), package="AKaerial")
trans.layer=trans$layer
trans.obj=readOGR(trans.file, trans.layer, verbose=FALSE)
trans.obj <- spTransform(trans.obj, "+proj=longlat +ellps=WGS84")
GIS.obj = LoadMap(area, type="proj")
trans.obj=intersect(trans.obj, GIS.obj) #trim the excess lines
} #end crd
trans.obj@data$len=SpatialLinesLengths(trans.obj, longlat=TRUE)
tpoints=as(trans.obj, "SpatialPointsDataFrame")
tpoints=spTransform(tpoints, "+proj=longlat +ellps=WGS84")
tpoints$strata=over(tpoints,GIS.obj)$STRATNAME
names(sort(table(tpoints$name[tpoints$OBJECTID==1]),decreasing=TRUE)[1])
for (j in 1:length(observers)){
if(length(full.data$long[full.data$obs==observers[j] & full.data$yr==years[i]])>0){
obs.flown=full.data[!duplicated(full.data[c("yr","obs","tran","ctran")]),]
#if (years[i]<=2011){
# obs.flown=obs.flown[!duplicated(obs.flown["yr","ctran"]),]
#}
obs.flown=obs.flown[obs.flown$yr==years[i] & obs.flown$obs==observers[j],]
yr=obs.flown$yr
obs=obs.flown$obs
orig=as.numeric(as.character(obs.flown$tran))
len=array(0,length(orig))
strata=array(0,length(orig))
part.of=as.numeric(as.character(obs.flown$ctran))
for (k in 1:length(orig)){
if (years[i]>2011){
len[k]=sum(trans.obj@data$len[trans.obj@data$ORIGID==orig[k] & trans.obj@data$OBJECTID==part.of[k]])
}
if (years[i]<=2011){
len[k]=sum(trans.obj@data$len[trans.obj@data$OBJECTID==part.of[k]])
}
#len[k]=sum(trans.obj@data$len[trans.obj@data$ORIGID==orig[k]])
#part.of[k]=as.character(trans.obj@data$OBJECTID[trans.obj@data$ORIGID==orig[k]])
strata[k]=names(sort(table(tpoints$strata[tpoints$OBJECTID==part.of[k]]),decreasing=TRUE)[1])
} #end k
temp.frame=data.frame(yr=yr, obs=obs, orig=orig, len=len, part.of=part.of, strata=strata)
temp.frame=temp.frame[order(orig),]
if (years[i]<=2011 & area=="acp"){
temp.frame=temp.frame[!duplicated(temp.frame[c("obs","len","part.of")]),]
}
tsum=rbind(tsum, temp.frame)
} #end if any obs/yr
} #end j observers
} #end i years
tsum$sampled.area=.2*tsum$len
return(tsum)
}
TransData2=function(selected.data){
#groupings list
unit.list=c("single", "pair","open", "flkdrake")
#list of years
yr.list=as.character(unique(selected.data$flight$yr))
#list of species
sp.list=as.character(unique(selected.data$obs$sppn))
#cycle through, check each transect/observer combo for each species
for (h in 1:length(yr.list)){
sub.data=selected.data$obs[selected.data$obs$yr==yr.list[h],]
obs.list=unique(as.character(selected.data$flight$obs[selected.data$flight$yr==yr.list[h]]))
print(paste("Making zeroes for year ", yr.list[h]))
for (i in 1:length(sp.list)){
sub.data=sub.data[sub.data$sppn==sp.list[i],]
for (j in 1:length(obs.list)){
print(paste("Observer ", obs.list[j]))
tran.list=unique(selected.data$flight$part.of[selected.data$flight$yr==yr.list[h] & selected.data$flight$obs==obs.list[j]])
sub.data=sub.data[as.character(sub.data$obs)==obs.list[j],]
for (k in 1:length(tran.list)){
sub.data=sub.data[as.character(sub.data$ctran)==tran.list[k],]
for (m in 1:length(unit.list)){
#skip if count exists
if(any(as.character(selected.data$obs$sppn)==sp.list[i] & as.character(selected.data$obs$ctran)==tran.list[k] & as.character(selected.data$obs$obs)==obs.list[j] & selected.data$obs$yr==yr.list[h] & selected.data$obs$unit==unit.list[m]))
if(any(sub.data$unit==unit.list[m]))
{next}
#add the 0 row
new.row=c(yr.list[h], NA, NA, NA, obs.list[j], NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, sp.list[i], 0, unit.list[m], NA, NA, tran.list[k], NA, 0)
selected.data$obs=rbind(selected.data$obs,new.row)
} #end unit
} #end tran
} #end obs
} #end sp
} #end yr
selected.data$obs$grp=as.numeric(selected.data$obs$grp)
agg=aggregate(grp~yr+obs+sppn+unit+ctran, data=selected.data$obs, FUN=sum)
colnames(agg)=c("yr", "obs", "sppn", "unit", "ctran", "grp")
agg$area=0
agg$strata="none"
for(g in 1:length(agg$area)){
agg$area[g]=sum(selected.data$flight$sampled.area[selected.data$flight$yr==agg$yr[g] & selected.data$flight$obs==agg$obs[g] & selected.data$flight$part.of==agg$ctran[g]])
agg$strata[g]=selected.data$flight$strata[selected.data$flight$yr==agg$yr[g] & selected.data$flight$obs==agg$obs[g] & selected.data$flight$part.of==agg$ctran[g]][1]
}
return(agg[order(agg$yr, agg$obs, agg$sppn, as.numeric(agg$ctran), agg$unit),])
}
|
#' Get the number of available pages
#'
#' @param a_page any XML object
#'
#' @return number of pages availabl
#'
#' @importFrom xml2 xml_double xml_contents xml_find_all
get_n_pages <- function(a_page) {
max(xml_double(xml_contents(xml_find_all(a_page, xpath = ".//select/option"))))
}
|
/R/utils.R
|
no_license
|
Techneaux/gdqr
|
R
| false | false | 288 |
r
|
#' Get the number of available pages
#'
#' @param a_page any XML object
#'
#' @return number of pages availabl
#'
#' @importFrom xml2 xml_double xml_contents xml_find_all
get_n_pages <- function(a_page) {
max(xml_double(xml_contents(xml_find_all(a_page, xpath = ".//select/option"))))
}
|
gghdr <- function(group, y, probs= c(90, 50, 25), data) {
require(hdrcde)
group <- data[,group]
y <- data[, y]
data <- data.frame(group, y)
hdr.df <- ddply(data, .(group), function(x) {
res <- hdr(x$y, prob=probs)
m <- res$hdr
k <- dim(m)[2]/2
out <- data.frame(x1=m[,1], x2=m[,2])
if (k > 1)
for (i in 2:k)
out <- rbind(out, data.frame(x1=m[,2*i-1], x2=m[,2*i]))
out$probs <- probs
out$group <- x$group[1]
out$mode <- res$mode
out
})
hdr.df <- na.omit(hdr.df)
outliers <- ddply(data, .(group), function(x) {
outsub <- subset(hdr.df, group==x$group[1])
res <- x[x$y > max(outsub$x2) | x$y < min(outsub$x1),]
res
})
p <- ggplot(aes(fill=factor(group)), data=hdr.df) +
geom_rect(aes(xmin=group-0.4, #*sqrt(probs/100),
xmax=group+0.4, #*sqrt(probs/100),
ymin=x1, ymax=x2), alpha=0.5) +
geom_segment(aes(x=group-0.45,
xend=group+0.45,
y=mode, yend=mode,
colour=factor(group))) +
geom_point(aes(x=group, y=y), data=outliers) +
scale_x_continuous(breaks=unique(group)) +
theme(legend.position="none") + xlab("") + ylab("")
p
}
#
#frame <- simstudy(type=3, pars=list(n=96, mu=4, n1=48, group=2))
#gghdr(y="vals", group="group", data=frame)
#ggsave(file="images/hdr-xpl.pdf", width=2, height=2)
|
/code/gghdr.R
|
no_license
|
heike/boxplot-variations
|
R
| false | false | 1,427 |
r
|
gghdr <- function(group, y, probs= c(90, 50, 25), data) {
require(hdrcde)
group <- data[,group]
y <- data[, y]
data <- data.frame(group, y)
hdr.df <- ddply(data, .(group), function(x) {
res <- hdr(x$y, prob=probs)
m <- res$hdr
k <- dim(m)[2]/2
out <- data.frame(x1=m[,1], x2=m[,2])
if (k > 1)
for (i in 2:k)
out <- rbind(out, data.frame(x1=m[,2*i-1], x2=m[,2*i]))
out$probs <- probs
out$group <- x$group[1]
out$mode <- res$mode
out
})
hdr.df <- na.omit(hdr.df)
outliers <- ddply(data, .(group), function(x) {
outsub <- subset(hdr.df, group==x$group[1])
res <- x[x$y > max(outsub$x2) | x$y < min(outsub$x1),]
res
})
p <- ggplot(aes(fill=factor(group)), data=hdr.df) +
geom_rect(aes(xmin=group-0.4, #*sqrt(probs/100),
xmax=group+0.4, #*sqrt(probs/100),
ymin=x1, ymax=x2), alpha=0.5) +
geom_segment(aes(x=group-0.45,
xend=group+0.45,
y=mode, yend=mode,
colour=factor(group))) +
geom_point(aes(x=group, y=y), data=outliers) +
scale_x_continuous(breaks=unique(group)) +
theme(legend.position="none") + xlab("") + ylab("")
p
}
#
#frame <- simstudy(type=3, pars=list(n=96, mu=4, n1=48, group=2))
#gghdr(y="vals", group="group", data=frame)
#ggsave(file="images/hdr-xpl.pdf", width=2, height=2)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/set_node_attr_w_fcn.R
\name{set_node_attr_w_fcn}
\alias{set_node_attr_w_fcn}
\title{Set node attribute values with a graph function}
\usage{
set_node_attr_w_fcn(graph, node_attr_fcn, ..., column_name = NULL)
}
\arguments{
\item{graph}{A graph object of class \code{dgr_graph}.}
\item{node_attr_fcn}{The name of the function to use for creating a column of
node attribute values. Valid functions are: \code{\link[=get_alpha_centrality]{get_alpha_centrality()}},
\code{\link[=get_authority_centrality]{get_authority_centrality()}}, \code{\link[=get_betweenness]{get_betweenness()}}, \code{\link[=get_bridging]{get_bridging()}},
\code{\link[=get_closeness]{get_closeness()}}, \code{\link[=get_cmty_edge_btwns]{get_cmty_edge_btwns()}}, \code{\link[=get_cmty_fast_greedy]{get_cmty_fast_greedy()}},
\code{\link[=get_cmty_l_eigenvec]{get_cmty_l_eigenvec()}}, \code{\link[=get_cmty_louvain]{get_cmty_louvain()}}, \code{\link[=get_cmty_walktrap]{get_cmty_walktrap()}},
\code{\link[=get_constraint]{get_constraint()}}, \code{\link[=get_degree_distribution]{get_degree_distribution()}}, \code{\link[=get_degree_histogram]{get_degree_histogram()}},
\code{\link[=get_degree_in]{get_degree_in()}}, \code{\link[=get_degree_out]{get_degree_out()}}, \code{\link[=get_degree_total]{get_degree_total()}},
\code{\link[=get_eccentricity]{get_eccentricity()}}, \code{\link[=get_eigen_centrality]{get_eigen_centrality()}}, \code{\link[=get_pagerank]{get_pagerank()}},
\code{\link[=get_s_connected_cmpts]{get_s_connected_cmpts()}}, and \code{\link[=get_w_connected_cmpts]{get_w_connected_cmpts()}}.}
\item{...}{Arguments and values to pass to the named function in
\code{node_attr_fcn}, if necessary.}
\item{column_name}{An option to supply a column name for the new node
attribute column. If \code{NULL} then the column name supplied by the function
will used along with a \verb{__A} suffix.}
}
\value{
A graph object of class \code{dgr_graph}.
}
\description{
From a graph object of class \code{dgr_graph} or a node data frame, set node
attribute properties for all nodes in the graph using one of several
whole-graph functions.
}
\examples{
# Create a random graph using the
# `add_gnm_graph()` function
graph <-
create_graph() \%>\%
add_gnm_graph(
n = 10,
m = 22,
set_seed = 23) \%>\%
set_node_attrs(
node_attr = value,
values = rnorm(
n = count_nodes(.),
mean = 5,
sd = 1) \%>\% round(1))
# Get the betweenness values for
# each of the graph's nodes as a
# node attribute
graph_1 <-
graph \%>\%
set_node_attr_w_fcn(
node_attr_fcn = "get_betweenness")
# Inspect the graph's internal
# node data frame
graph_1 \%>\% get_node_df()
# If a specified function takes argument
# values, these can be supplied as well
graph_2 <-
graph \%>\%
set_node_attr_w_fcn(
node_attr_fcn = "get_alpha_centrality",
alpha = 2,
exo = 2)
# Inspect the graph's internal
# node data frame
graph_2 \%>\% get_node_df()
# The new column name can be provided
graph_3 <-
graph \%>\%
set_node_attr_w_fcn(
node_attr_fcn = "get_pagerank",
column_name = "pagerank")
# Inspect the graph's internal
# node data frame
graph_3 \%>\% get_node_df()
# If `graph_3` is modified by
# adding a new node then the column
# `pagerank` will have stale data; we
# can run the function again and re-use
# the existing column name to provide
# updated values
graph_3 <-
graph_3 \%>\%
add_node(
from = 1,
to = 3) \%>\%
set_node_attr_w_fcn(
node_attr_fcn = "get_pagerank",
column_name = "pagerank")
# Inspect the graph's internal
# node data frame
graph_3 \%>\% get_node_df()
}
|
/man/set_node_attr_w_fcn.Rd
|
permissive
|
nograpes/DiagrammeR
|
R
| false | true | 3,696 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/set_node_attr_w_fcn.R
\name{set_node_attr_w_fcn}
\alias{set_node_attr_w_fcn}
\title{Set node attribute values with a graph function}
\usage{
set_node_attr_w_fcn(graph, node_attr_fcn, ..., column_name = NULL)
}
\arguments{
\item{graph}{A graph object of class \code{dgr_graph}.}
\item{node_attr_fcn}{The name of the function to use for creating a column of
node attribute values. Valid functions are: \code{\link[=get_alpha_centrality]{get_alpha_centrality()}},
\code{\link[=get_authority_centrality]{get_authority_centrality()}}, \code{\link[=get_betweenness]{get_betweenness()}}, \code{\link[=get_bridging]{get_bridging()}},
\code{\link[=get_closeness]{get_closeness()}}, \code{\link[=get_cmty_edge_btwns]{get_cmty_edge_btwns()}}, \code{\link[=get_cmty_fast_greedy]{get_cmty_fast_greedy()}},
\code{\link[=get_cmty_l_eigenvec]{get_cmty_l_eigenvec()}}, \code{\link[=get_cmty_louvain]{get_cmty_louvain()}}, \code{\link[=get_cmty_walktrap]{get_cmty_walktrap()}},
\code{\link[=get_constraint]{get_constraint()}}, \code{\link[=get_degree_distribution]{get_degree_distribution()}}, \code{\link[=get_degree_histogram]{get_degree_histogram()}},
\code{\link[=get_degree_in]{get_degree_in()}}, \code{\link[=get_degree_out]{get_degree_out()}}, \code{\link[=get_degree_total]{get_degree_total()}},
\code{\link[=get_eccentricity]{get_eccentricity()}}, \code{\link[=get_eigen_centrality]{get_eigen_centrality()}}, \code{\link[=get_pagerank]{get_pagerank()}},
\code{\link[=get_s_connected_cmpts]{get_s_connected_cmpts()}}, and \code{\link[=get_w_connected_cmpts]{get_w_connected_cmpts()}}.}
\item{...}{Arguments and values to pass to the named function in
\code{node_attr_fcn}, if necessary.}
\item{column_name}{An option to supply a column name for the new node
attribute column. If \code{NULL} then the column name supplied by the function
will used along with a \verb{__A} suffix.}
}
\value{
A graph object of class \code{dgr_graph}.
}
\description{
From a graph object of class \code{dgr_graph} or a node data frame, set node
attribute properties for all nodes in the graph using one of several
whole-graph functions.
}
\examples{
# Create a random graph using the
# `add_gnm_graph()` function
graph <-
create_graph() \%>\%
add_gnm_graph(
n = 10,
m = 22,
set_seed = 23) \%>\%
set_node_attrs(
node_attr = value,
values = rnorm(
n = count_nodes(.),
mean = 5,
sd = 1) \%>\% round(1))
# Get the betweenness values for
# each of the graph's nodes as a
# node attribute
graph_1 <-
graph \%>\%
set_node_attr_w_fcn(
node_attr_fcn = "get_betweenness")
# Inspect the graph's internal
# node data frame
graph_1 \%>\% get_node_df()
# If a specified function takes argument
# values, these can be supplied as well
graph_2 <-
graph \%>\%
set_node_attr_w_fcn(
node_attr_fcn = "get_alpha_centrality",
alpha = 2,
exo = 2)
# Inspect the graph's internal
# node data frame
graph_2 \%>\% get_node_df()
# The new column name can be provided
graph_3 <-
graph \%>\%
set_node_attr_w_fcn(
node_attr_fcn = "get_pagerank",
column_name = "pagerank")
# Inspect the graph's internal
# node data frame
graph_3 \%>\% get_node_df()
# If `graph_3` is modified by
# adding a new node then the column
# `pagerank` will have stale data; we
# can run the function again and re-use
# the existing column name to provide
# updated values
graph_3 <-
graph_3 \%>\%
add_node(
from = 1,
to = 3) \%>\%
set_node_attr_w_fcn(
node_attr_fcn = "get_pagerank",
column_name = "pagerank")
# Inspect the graph's internal
# node data frame
graph_3 \%>\% get_node_df()
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/abs-cat-functions.R
\name{abs_cat_tables}
\alias{abs_cat_tables}
\title{Return ABS catalogue tables}
\usage{
abs_cat_tables(
cat_no,
releases = "Latest",
types = c("tss", "css"),
include_urls = FALSE
)
}
\arguments{
\item{cat_no}{ABS catalogue numbers.}
\item{releases}{Date or character string object specifying the month and year denoting which
release to download. Default is "Latest", which downloads the latest available data. See
examples for further details.}
\item{types}{ABS publication types to return. Permissable options include one or more of: 'tss'
-- ABS Time Series Spreadsheets, 'css' - ABS Data Cubes and 'pub' -- ABS Publications. The
default returns all Time Series Spreadsheets and Data Cubes.}
\item{include_urls}{Include full URLs to returned ABS data files. Default (FALSE) does not
include data file URLs.}
}
\value{
Returns a data frame listing the data collection tables and URLs for Excel (column:
\code{path_xls}) and, if available, Zip (column: \code{path_zip}) files.
}
\description{
Return list of data tables available from specified ABS catalogue number.
}
\examples{
\donttest{
## List latest available quarterly National Accounts tables
ana_tables <- abs_cat_tables("5206.0", releases="Latest");
ana_tables_url <- abs_cat_tables("5206.0", releases="Latest", include_urls=TRUE);
## List latest available CPI Time Series Spreadsheet tables only
cpi_tables <- abs_cat_tables("6401.0", releases="Latest", types="tss");
cpi_tables_url <- abs_cat_tables("5206.0", releases="Latest", types="tss", include_urls=TRUE);
## List latest available ASGS Volume 3 Data Cubes
asgs_vol3_tables <- abs_cat_tables("1270.0.55.003", releases="Latest", types="css");
asgs_vol3_tables_url <- abs_cat_tables("1270.0.55.003", releases="Latest",
types="css", include_urls=TRUE);
## List latest available ASGS ANZSIC publications (PDF) files
anzsic_2006 <- abs_cat_tables("1292.0", releases="Latest", types="pub", include_urls=TRUE);
}
}
\author{
David Mitchell <david.pk.mitchell@gmail.com>
}
|
/man/abs_cat_tables.Rd
|
no_license
|
RoelVerbelen/raustats
|
R
| false | true | 2,192 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/abs-cat-functions.R
\name{abs_cat_tables}
\alias{abs_cat_tables}
\title{Return ABS catalogue tables}
\usage{
abs_cat_tables(
cat_no,
releases = "Latest",
types = c("tss", "css"),
include_urls = FALSE
)
}
\arguments{
\item{cat_no}{ABS catalogue numbers.}
\item{releases}{Date or character string object specifying the month and year denoting which
release to download. Default is "Latest", which downloads the latest available data. See
examples for further details.}
\item{types}{ABS publication types to return. Permissable options include one or more of: 'tss'
-- ABS Time Series Spreadsheets, 'css' - ABS Data Cubes and 'pub' -- ABS Publications. The
default returns all Time Series Spreadsheets and Data Cubes.}
\item{include_urls}{Include full URLs to returned ABS data files. Default (FALSE) does not
include data file URLs.}
}
\value{
Returns a data frame listing the data collection tables and URLs for Excel (column:
\code{path_xls}) and, if available, Zip (column: \code{path_zip}) files.
}
\description{
Return list of data tables available from specified ABS catalogue number.
}
\examples{
\donttest{
## List latest available quarterly National Accounts tables
ana_tables <- abs_cat_tables("5206.0", releases="Latest");
ana_tables_url <- abs_cat_tables("5206.0", releases="Latest", include_urls=TRUE);
## List latest available CPI Time Series Spreadsheet tables only
cpi_tables <- abs_cat_tables("6401.0", releases="Latest", types="tss");
cpi_tables_url <- abs_cat_tables("5206.0", releases="Latest", types="tss", include_urls=TRUE);
## List latest available ASGS Volume 3 Data Cubes
asgs_vol3_tables <- abs_cat_tables("1270.0.55.003", releases="Latest", types="css");
asgs_vol3_tables_url <- abs_cat_tables("1270.0.55.003", releases="Latest",
types="css", include_urls=TRUE);
## List latest available ASGS ANZSIC publications (PDF) files
anzsic_2006 <- abs_cat_tables("1292.0", releases="Latest", types="pub", include_urls=TRUE);
}
}
\author{
David Mitchell <david.pk.mitchell@gmail.com>
}
|
#' Corrected Z-test of Looney and Jones
#' @description Perform two sample z test of Looney and Jones on vectors of data.
#' @usage lj.z.test(x, y, alternative = c("two.sided", "less", "greater"))
#' @param x a (non-empty) numeric vector of data values
#' @param y a (non-empty) numeric vector of data values.
#' @param alternative a character string specifying the alternative hypothesis, must be one of "two.sided" (default), "greater" or "less". You can specify just the initial letter.
#' @details
#' --------------Needs to be filled...------------------
#' @return p.value p value of the test
#' @return statistic aliases the value of the z-statistic.
#'
#' @examples
#' vec1 <- c(2,3,NA,4,3,4,NA,5,4,2)
#' vec2 <- c(3,NA,4,4,3,NA,NA,4,NA,2)
#' lj.z.test(vec1, vec2, alternative = "greater")
#'
lj.z.test <- function(x, y, alternative = "two.sided") {
# get the paired sample, assign it to t.paired and n.paired
t.paired <- x[(!is.na(x)) & (!is.na(y))]
n.paired <- y[(!is.na(x)) & (!is.na(y))]
n1 <- length(t.paired) # number of paired sample
# t.rest is the independent sample from x without NAs
t.rest <- x[(is.na(y)) & (!is.na(x))]
n2 <- length(t.rest)
# n.rest is the independent sample from y without NAs
n.rest <- y[(is.na(x)) & (!is.na(y))]
n3 <- length(n.rest)
#calculate mean.star and var.star
t.mu.star <- mean(c(t.paired,t.rest))
n.mu.star <- mean(c(n.paired,n.rest))
t.var.star <- var(c(t.paired,t.rest))
n.var.star <- var(c(n.paired,n.rest))
paired.cov <- cov(t.paired,n.paired)
sd <- sqrt(t.var.star/(n1+n2) + n.var.star/(n1+n3) - 2*n1*paired.cov/((n1+n2)*(n1+n3)))
z.corr = (t.mu.star - n.mu.star)/sd
p.value <- 0
if(alternative == "two.sided") {
p.value <- 2*(1- pnorm(abs(z.corr)))
}else if(alternative == "greater") {
p.value <- 1 - pnorm(z.corr)
}else if(alternative == "less") {
p.value <- pnorm(z.corr)
} else{
stop("arg should be one of \"two.sided\", \"greater\", \"less\"")
}
cat(" Corrected Z-test of Looney and Jones\n P value is:", p.value, "\n statistic:", z.corr)
}
|
/R/lj.z.test.R
|
no_license
|
AMSProject/pmtest
|
R
| false | false | 2,098 |
r
|
#' Corrected Z-test of Looney and Jones
#' @description Perform two sample z test of Looney and Jones on vectors of data.
#' @usage lj.z.test(x, y, alternative = c("two.sided", "less", "greater"))
#' @param x a (non-empty) numeric vector of data values
#' @param y a (non-empty) numeric vector of data values.
#' @param alternative a character string specifying the alternative hypothesis, must be one of "two.sided" (default), "greater" or "less". You can specify just the initial letter.
#' @details
#' --------------Needs to be filled...------------------
#' @return p.value p value of the test
#' @return statistic aliases the value of the z-statistic.
#'
#' @examples
#' vec1 <- c(2,3,NA,4,3,4,NA,5,4,2)
#' vec2 <- c(3,NA,4,4,3,NA,NA,4,NA,2)
#' lj.z.test(vec1, vec2, alternative = "greater")
#'
lj.z.test <- function(x, y, alternative = "two.sided") {
# get the paired sample, assign it to t.paired and n.paired
t.paired <- x[(!is.na(x)) & (!is.na(y))]
n.paired <- y[(!is.na(x)) & (!is.na(y))]
n1 <- length(t.paired) # number of paired sample
# t.rest is the independent sample from x without NAs
t.rest <- x[(is.na(y)) & (!is.na(x))]
n2 <- length(t.rest)
# n.rest is the independent sample from y without NAs
n.rest <- y[(is.na(x)) & (!is.na(y))]
n3 <- length(n.rest)
#calculate mean.star and var.star
t.mu.star <- mean(c(t.paired,t.rest))
n.mu.star <- mean(c(n.paired,n.rest))
t.var.star <- var(c(t.paired,t.rest))
n.var.star <- var(c(n.paired,n.rest))
paired.cov <- cov(t.paired,n.paired)
sd <- sqrt(t.var.star/(n1+n2) + n.var.star/(n1+n3) - 2*n1*paired.cov/((n1+n2)*(n1+n3)))
z.corr = (t.mu.star - n.mu.star)/sd
p.value <- 0
if(alternative == "two.sided") {
p.value <- 2*(1- pnorm(abs(z.corr)))
}else if(alternative == "greater") {
p.value <- 1 - pnorm(z.corr)
}else if(alternative == "less") {
p.value <- pnorm(z.corr)
} else{
stop("arg should be one of \"two.sided\", \"greater\", \"less\"")
}
cat(" Corrected Z-test of Looney and Jones\n P value is:", p.value, "\n statistic:", z.corr)
}
|
c DCNF-Autarky [version 0.0.1].
c Copyright (c) 2018-2019 Swansea University.
c
c Input Clause Count: 2650
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 2650
c
c Input Parameter (command line, file):
c input filename QBFLIB/Jordan-Kaiser/reduction-finding-full-set-params-k1c3n4/query33_query54_1344.qdimacs
c output filename /tmp/dcnfAutarky.dimacs
c autarky level 1
c conformity level 0
c encoding type 2
c no.of var 1135
c no.of clauses 2650
c no.of taut cls 0
c
c Output Parameters:
c remaining no.of clauses 2650
c
c QBFLIB/Jordan-Kaiser/reduction-finding-full-set-params-k1c3n4/query33_query54_1344.qdimacs 1135 2650 E1 [] 0 16 1119 2650 NONE
|
/code/dcnf-ankit-optimized/Results/QBFLIB-2018/E1/Experiments/Jordan-Kaiser/reduction-finding-full-set-params-k1c3n4/query33_query54_1344/query33_query54_1344.R
|
no_license
|
arey0pushpa/dcnf-autarky
|
R
| false | false | 711 |
r
|
c DCNF-Autarky [version 0.0.1].
c Copyright (c) 2018-2019 Swansea University.
c
c Input Clause Count: 2650
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 2650
c
c Input Parameter (command line, file):
c input filename QBFLIB/Jordan-Kaiser/reduction-finding-full-set-params-k1c3n4/query33_query54_1344.qdimacs
c output filename /tmp/dcnfAutarky.dimacs
c autarky level 1
c conformity level 0
c encoding type 2
c no.of var 1135
c no.of clauses 2650
c no.of taut cls 0
c
c Output Parameters:
c remaining no.of clauses 2650
c
c QBFLIB/Jordan-Kaiser/reduction-finding-full-set-params-k1c3n4/query33_query54_1344.qdimacs 1135 2650 E1 [] 0 16 1119 2650 NONE
|
# arithmetic calculations
2 + 3
# assigning variable
x <- 3
y <- 4 # perks of using '<-' is that you can specify the direction it's working in
# plotting in R
x <- c(1,2,3)
y <- c(2,3,1)
plot(x,y)
# class of variables
class(x)
class(y)
length(x)
length(y)
# strings
str <- "This is my first script"
class(str)
length(str)
# clear my history
rm(list = ls())
ls() # this will?
# functions
?mean
?sd
?max
# total function
# name
# function statement
# some argument
# body
# return statement
my_sum <- function(input_1,input_2) {
tot = input_1 + input_2
return(tot) # return is optional
}
# assigning value
input_1 = 3
input_2 = 4
input_1 + input_2
# calling a function
my_sum(input_1, input_2)
my_sum(input_1 = 4, input_2 = 5)
# Native function in R
sum(input_1+input_2)
# how about converting fahrenheit to kelvin
fah_to_kelvin <- function(temp) {
kelvin <- ((temp - 32) * (5 / 9) + 273.15)
return(kelvin)
}
# what about kelvin to celsius?
kel_to_celsius <- function(temp_k) {
celsius <- (temp_k - 273.15)
return(celsius)
}
# alternative "fah_to_celsius"
fah_to_celsius <- function(temp_k) {
temp_k <- fah_to_kelvin(temp=32)
celsius < kel_to_celsius(temp_k)
return(celsius)
}
fah_to_kelvin(temp=32)
# write a function to convert "fah_to_celsius"
fah_to_celsius <- function(temp) {
celsius <- ((temp - 32) * (5 / 9))
return(celsius)
}
fah_to_celsius(temp=61)
# Clear functions fah_to_cel
rm(fah_to_celsius)
rm(kel_to_celsius)
rm(fah_to_kelvin)
# call fah_to_celsius_functions.R script
source("fah_to_cel_functions.R")
fah_to_kelvin(temp=32)
kel_to_celsius(0)
fah_to_celsius(0)
# Exercise 2
best_practice <- c("write", "programs", "for", "people", "not", "computers")
asterisk <- "***"
fence <- function(input_1, input_2) {
result = c(input_2, input_1, input_2)
return(result)
}
input_1 <- best_practice
input_2 <- asterisk
fence(input_1 = best_practice, input_2 = asterisk)
# Now doing something with real dataset
dir.create("data") # create a directory
download.file("https://raw.githubusercontent.com/swcarpentry/r-novice-gapminder/gh-pages/_episodes_rmd/data/gapminder-FiveYearData.csv", destfile = "data/gapminder.csv")
dat <- read.csv("data/gapminder.csv", header=TRUE)
head(dat)
summary(dat)
str(dat)
# write function called analyse that takes country as argument and
# displays mean, min, and max lifeExp of that country
# subsetting data frame
country_name <- subset(dat, dat$country == thecountry)
head(country_name)
summary(country_name)
# calculate mean, min, and max of lifeExp
mean(country_name$lifeExp)
min(country_name$lifeExp)
max(country_name$lifeExp)
# function now
analyse <- function(thecountry) {
country_name <- subset(dat, dat$country == thecountry)
print(mean(country_name$lifeExp))
print(min(country_name$lifeExp))
print(max(country_name$lifeExp))
out <- c("Mean_LE" = mean(country_name$lifeExp), "Min_LE" = min(country_name$lifeExp), "Max_LE" = max(country_name$lifeExp))
# modify the analyse function to generate a plot with "year" on
# x-axis and "lifeExp" on y-axis
plot (country_name$year, country_name$lifeExp, xlab = "Year", ylab = "Life Expectancy", main=thecountry)
print(out)
}
analyse(thecountry = "Albania")
# loop function - the tedious way
best_practice
best_practice_fun <- function(some_argument) {
print(some_argument[1])
print(some_argument[2])
print(some_argument[3])
print(some_argument[4])
print(some_argument[5])
print(some_argument[6])
}
best_practice_fun(best_practice)
#loop function - the for loop
best_practice
for (w in best_practice) {
print(w)
}
# alternatively
best_practice_fun2 <- function(some_argument) {
for (w in some_argument) {
print(w)
}
}
best_practice_fun2(best_practice)
# Generate a file that contains years "1952" and "1997" and name it
# as gapminder_52_97
# and another file that contains years "1966" and "2007" and name it
# as gapminder_66_07
#hint: use subset
summary(dat)
# subset(dat, dat$country == "Uganda")
gapminder_52_97 <- subset(dat, dat$year == 1952 | dat$year == 1997)
head(gapminder_52_97)
gapminder_67_07 <- subset(dat, dat$year == 1967 | dat$year == 2007)
head(gapminder_67_07)
# writing a dataset
write.csv(file = "data/gapminder_52_97.csv", gapminder_52_97, row.names = FALSE, quote = FALSE)
write.csv(file = "data/gapminder_67_07.csv", gapminder_67_07, row.names = FALSE, quote = FALSE)
# listing files with a pattern in a directory
list.files(path = "data", pattern = ".csv", full.names = TRUE)
# write a for loop to print each filename on a different line for
# the above output
filenames <- list.files(path = "data", pattern = ".csv", full.names = TRUE)
for (f in filenames) {
print(f)
}
# using the function you wrote this morning to print out the results
# from multiple data-sets
analyse_data <- function(file, new) {
file_out <- read.csv(file, header = TRUE)
country_name <- subset(file_out, file_out$country == new)
out2 <- c("Mean_LE" = mean(country_name$lifeExp),
"Min_LE" = min(country_name$lifeExp),
"Max_LE" = max(country_name$lifeExp))
print(file)
print(new)
print(out2)
plot(country_name$year, country_name$lifeExp,
xlab = "Year", ylab = "Life Expectancy", main=new)
}
analyse_all <- function(pattern, new) {
filenames <- list.files(path = "data", pattern = pattern, full.names = TRUE)
for (f in filenames) {
print(f)
analyse_data(f, "Uganda")
}
}
analyse_all(".csv","Uganda")
|
/Day2magic.R
|
no_license
|
laurenkoh/R-magic
|
R
| false | false | 5,454 |
r
|
# arithmetic calculations
2 + 3
# assigning variable
x <- 3
y <- 4 # perks of using '<-' is that you can specify the direction it's working in
# plotting in R
x <- c(1,2,3)
y <- c(2,3,1)
plot(x,y)
# class of variables
class(x)
class(y)
length(x)
length(y)
# strings
str <- "This is my first script"
class(str)
length(str)
# clear my history
rm(list = ls())
ls() # this will?
# functions
?mean
?sd
?max
# total function
# name
# function statement
# some argument
# body
# return statement
my_sum <- function(input_1,input_2) {
tot = input_1 + input_2
return(tot) # return is optional
}
# assigning value
input_1 = 3
input_2 = 4
input_1 + input_2
# calling a function
my_sum(input_1, input_2)
my_sum(input_1 = 4, input_2 = 5)
# Native function in R
sum(input_1+input_2)
# how about converting fahrenheit to kelvin
fah_to_kelvin <- function(temp) {
kelvin <- ((temp - 32) * (5 / 9) + 273.15)
return(kelvin)
}
# what about kelvin to celsius?
kel_to_celsius <- function(temp_k) {
celsius <- (temp_k - 273.15)
return(celsius)
}
# alternative "fah_to_celsius"
fah_to_celsius <- function(temp_k) {
temp_k <- fah_to_kelvin(temp=32)
celsius < kel_to_celsius(temp_k)
return(celsius)
}
fah_to_kelvin(temp=32)
# write a function to convert "fah_to_celsius"
fah_to_celsius <- function(temp) {
celsius <- ((temp - 32) * (5 / 9))
return(celsius)
}
fah_to_celsius(temp=61)
# Clear functions fah_to_cel
rm(fah_to_celsius)
rm(kel_to_celsius)
rm(fah_to_kelvin)
# call fah_to_celsius_functions.R script
source("fah_to_cel_functions.R")
fah_to_kelvin(temp=32)
kel_to_celsius(0)
fah_to_celsius(0)
# Exercise 2
best_practice <- c("write", "programs", "for", "people", "not", "computers")
asterisk <- "***"
fence <- function(input_1, input_2) {
result = c(input_2, input_1, input_2)
return(result)
}
input_1 <- best_practice
input_2 <- asterisk
fence(input_1 = best_practice, input_2 = asterisk)
# Now doing something with real dataset
dir.create("data") # create a directory
download.file("https://raw.githubusercontent.com/swcarpentry/r-novice-gapminder/gh-pages/_episodes_rmd/data/gapminder-FiveYearData.csv", destfile = "data/gapminder.csv")
dat <- read.csv("data/gapminder.csv", header=TRUE)
head(dat)
summary(dat)
str(dat)
# write function called analyse that takes country as argument and
# displays mean, min, and max lifeExp of that country
# subsetting data frame
country_name <- subset(dat, dat$country == thecountry)
head(country_name)
summary(country_name)
# calculate mean, min, and max of lifeExp
mean(country_name$lifeExp)
min(country_name$lifeExp)
max(country_name$lifeExp)
# function now
analyse <- function(thecountry) {
country_name <- subset(dat, dat$country == thecountry)
print(mean(country_name$lifeExp))
print(min(country_name$lifeExp))
print(max(country_name$lifeExp))
out <- c("Mean_LE" = mean(country_name$lifeExp), "Min_LE" = min(country_name$lifeExp), "Max_LE" = max(country_name$lifeExp))
# modify the analyse function to generate a plot with "year" on
# x-axis and "lifeExp" on y-axis
plot (country_name$year, country_name$lifeExp, xlab = "Year", ylab = "Life Expectancy", main=thecountry)
print(out)
}
analyse(thecountry = "Albania")
# loop function - the tedious way
best_practice
best_practice_fun <- function(some_argument) {
print(some_argument[1])
print(some_argument[2])
print(some_argument[3])
print(some_argument[4])
print(some_argument[5])
print(some_argument[6])
}
best_practice_fun(best_practice)
#loop function - the for loop
best_practice
for (w in best_practice) {
print(w)
}
# alternatively
best_practice_fun2 <- function(some_argument) {
for (w in some_argument) {
print(w)
}
}
best_practice_fun2(best_practice)
# Generate a file that contains years "1952" and "1997" and name it
# as gapminder_52_97
# and another file that contains years "1966" and "2007" and name it
# as gapminder_66_07
#hint: use subset
summary(dat)
# subset(dat, dat$country == "Uganda")
gapminder_52_97 <- subset(dat, dat$year == 1952 | dat$year == 1997)
head(gapminder_52_97)
gapminder_67_07 <- subset(dat, dat$year == 1967 | dat$year == 2007)
head(gapminder_67_07)
# writing a dataset
write.csv(file = "data/gapminder_52_97.csv", gapminder_52_97, row.names = FALSE, quote = FALSE)
write.csv(file = "data/gapminder_67_07.csv", gapminder_67_07, row.names = FALSE, quote = FALSE)
# listing files with a pattern in a directory
list.files(path = "data", pattern = ".csv", full.names = TRUE)
# write a for loop to print each filename on a different line for
# the above output
filenames <- list.files(path = "data", pattern = ".csv", full.names = TRUE)
for (f in filenames) {
print(f)
}
# using the function you wrote this morning to print out the results
# from multiple data-sets
analyse_data <- function(file, new) {
file_out <- read.csv(file, header = TRUE)
country_name <- subset(file_out, file_out$country == new)
out2 <- c("Mean_LE" = mean(country_name$lifeExp),
"Min_LE" = min(country_name$lifeExp),
"Max_LE" = max(country_name$lifeExp))
print(file)
print(new)
print(out2)
plot(country_name$year, country_name$lifeExp,
xlab = "Year", ylab = "Life Expectancy", main=new)
}
analyse_all <- function(pattern, new) {
filenames <- list.files(path = "data", pattern = pattern, full.names = TRUE)
for (f in filenames) {
print(f)
analyse_data(f, "Uganda")
}
}
analyse_all(".csv","Uganda")
|
\name{hdv2003}
\docType{data}
\alias{hdv2003}
\title{Histoire de Vie 2003}
\description{
Échantillon de 2000 individus et de 20 variables issu de l'enquête
\emph{Histoire de Vie} réalisé par l'INSEE en 2003.
}
\usage{data(hdv2003)}
\format{Data frame comportant 2000 lignes et 20 colonnes}
\source{Fichiers détail de l'INSEE : \url{http://www.insee.fr/fr/themes/detail.asp?ref_id=fd-HDV03}}
\keyword{datasets}
|
/man/hdv2003.Rd
|
no_license
|
juba/rgrs
|
R
| false | false | 418 |
rd
|
\name{hdv2003}
\docType{data}
\alias{hdv2003}
\title{Histoire de Vie 2003}
\description{
Échantillon de 2000 individus et de 20 variables issu de l'enquête
\emph{Histoire de Vie} réalisé par l'INSEE en 2003.
}
\usage{data(hdv2003)}
\format{Data frame comportant 2000 lignes et 20 colonnes}
\source{Fichiers détail de l'INSEE : \url{http://www.insee.fr/fr/themes/detail.asp?ref_id=fd-HDV03}}
\keyword{datasets}
|
#load markers and data
datafile = "D:/UPitts/Research/cytof/data/"
setwd(datafile)
library(flowCore)
dt<-read.FCS("downsample.fcs",transformation = FALSE)
load("ref_label.Rdata")
markers<- attributes(ref_label)$markers$name
dt2<-dt[,markers]
#transformation
asinhTrans=arcsinhTransform(a=0, b=0.2)
translist<-transformList(markers, asinhTrans)
dt2.transform<-transform(dt2, translist)
dt2.transform<-exprs(dt2.transform)
desc = attributes(ref_label)$markers$desc
desc2 = sub("[0-9]*[A-Za-z]*_(.*)", "\\1", desc)
dt2.transform = as.data.frame(dt2.transform)
colnames(dt2.transform) = desc2
# standardize = function(gene.i){
# x = as.numeric(gene.i)
# names(x) = names(gene.i)
# s.x = (x-mean(x))/sd(x)
# s.x[which(s.x>2)] = 2
# s.x[which(s.x<(-2))] = -2
# return(s.x)
# }
#ACCENSE
#load cluster labels
label = read.csv("D:/UPitts/Research/cytof/accense/accense_output.csv")
#label = read.csv("D:/CyTof_review_Xiangning/flowgrid/out.csv")
label$population = as.factor(label$population)
cluster_label = label$population
# #rearrange: put samples in the same cluster together
# order_label = order(cluster_label$cluster_label)
# cluster_label_ordered = as.data.frame(cluster_label[order_label, ])
# colnames(cluster_label_ordered) = "cluster_label"
# dt2.ordered = dt2.transform[order_label, ]
#calculate the mean expression for each cluster
dt.split = split(dt2.transform, cluster_label)
dt.split.mean = lapply(dt.split, function(x){apply(x, 2, mean, na.rm = TRUE)})
dt.mean = plyr::ldply(dt.split.mean, rbind)[, -1]
#test subset
# sample = sample(1:nrow(dt2.ordered), 200)
# sample_order = sample[order(sample)]
# dt2.sub = dt2.ordered[sample_order, ]
# label.sub = as.data.frame(cluster_label_ordered[sample_order, ])
# colnames(label.sub) = "cluster_label"
# nlevels_sub = nlevels(label.sub$cluster_label)
#
# colors = distinctColorPalette(k = nlevels_sub, altCol = FALSE, runTsne = FALSE)
# x = aheatmap(t(dt2.sub), annCol = label.sub, annColors = list(colors),
# Rowv = NA, Colv = NA, main = "Heatmap of Stardarized log2CPM of \n 50 top Differentially Expressed Genes in DLPFC",
# color = "YlGnBu", #breaks = c(-2, seq(-1, 1, length.out = 9), 2),
# cexRow = 1.2)
library(NMF)
#install.packages("randomcoloR")
library(randomcoloR)
cluster_label = as.data.frame(list(cluster_label = c(1:nrow(dt.mean))))
pdf("ACCENSE_heatmap2.pdf", width = 10, height = 10, onefile = FALSE)
colors = distinctColorPalette(k = nrow(dt.mean), altCol = FALSE, runTsne = FALSE)
aheatmap(as.matrix(t(dt.mean)),annCol = cluster_label, annColors = list(colors),annLegend = FALSE,
Rowv = NA, Colv = NA, main = paste0("Heatmap of clusters generated by ACCENSE \n Number of clusters = ", nrow(dt.mean)),
color = "YlGnBu",
cexRow = 1.2)
dev.off()
#DensVM
load("D:/UPitts/Research/cytof/cytofkit/densVMresult1.rData")
cluster_label<-cluster_DensVM[["clusters"]]$cluster
dt.split = split(dt2.transform, cluster_label)
dt.split.mean = lapply(dt.split, function(x){apply(x, 2, mean, na.rm = TRUE)})
dt.mean = plyr::ldply(dt.split.mean, rbind)[, -1]
cluster_label = as.data.frame(list(cluster_label = c(1:nrow(dt.mean))))
pdf("DensVM_heatmap2.pdf", width = 10, height = 10, onefile = FALSE)
colors = distinctColorPalette(k = nrow(dt.mean), altCol = FALSE, runTsne = FALSE)
aheatmap(as.matrix(t(dt.mean)),annCol = cluster_label, annColors = list(colors),annLegend = FALSE,
Rowv = NA, Colv = NA, main = paste0("Heatmap of clusters generated by DensVM \n Number of clusters = ", nrow(dt.mean)),
color = "YlGnBu",
cexRow = 1.2)
dev.off()
|
/code/tsne/cytof_heatmap2.R
|
no_license
|
liupeng2117/CyTOF-review-paper-data-and-code-
|
R
| false | false | 3,616 |
r
|
#load markers and data
datafile = "D:/UPitts/Research/cytof/data/"
setwd(datafile)
library(flowCore)
dt<-read.FCS("downsample.fcs",transformation = FALSE)
load("ref_label.Rdata")
markers<- attributes(ref_label)$markers$name
dt2<-dt[,markers]
#transformation
asinhTrans=arcsinhTransform(a=0, b=0.2)
translist<-transformList(markers, asinhTrans)
dt2.transform<-transform(dt2, translist)
dt2.transform<-exprs(dt2.transform)
desc = attributes(ref_label)$markers$desc
desc2 = sub("[0-9]*[A-Za-z]*_(.*)", "\\1", desc)
dt2.transform = as.data.frame(dt2.transform)
colnames(dt2.transform) = desc2
# standardize = function(gene.i){
# x = as.numeric(gene.i)
# names(x) = names(gene.i)
# s.x = (x-mean(x))/sd(x)
# s.x[which(s.x>2)] = 2
# s.x[which(s.x<(-2))] = -2
# return(s.x)
# }
#ACCENSE
#load cluster labels
label = read.csv("D:/UPitts/Research/cytof/accense/accense_output.csv")
#label = read.csv("D:/CyTof_review_Xiangning/flowgrid/out.csv")
label$population = as.factor(label$population)
cluster_label = label$population
# #rearrange: put samples in the same cluster together
# order_label = order(cluster_label$cluster_label)
# cluster_label_ordered = as.data.frame(cluster_label[order_label, ])
# colnames(cluster_label_ordered) = "cluster_label"
# dt2.ordered = dt2.transform[order_label, ]
#calculate the mean expression for each cluster
dt.split = split(dt2.transform, cluster_label)
dt.split.mean = lapply(dt.split, function(x){apply(x, 2, mean, na.rm = TRUE)})
dt.mean = plyr::ldply(dt.split.mean, rbind)[, -1]
#test subset
# sample = sample(1:nrow(dt2.ordered), 200)
# sample_order = sample[order(sample)]
# dt2.sub = dt2.ordered[sample_order, ]
# label.sub = as.data.frame(cluster_label_ordered[sample_order, ])
# colnames(label.sub) = "cluster_label"
# nlevels_sub = nlevels(label.sub$cluster_label)
#
# colors = distinctColorPalette(k = nlevels_sub, altCol = FALSE, runTsne = FALSE)
# x = aheatmap(t(dt2.sub), annCol = label.sub, annColors = list(colors),
# Rowv = NA, Colv = NA, main = "Heatmap of Stardarized log2CPM of \n 50 top Differentially Expressed Genes in DLPFC",
# color = "YlGnBu", #breaks = c(-2, seq(-1, 1, length.out = 9), 2),
# cexRow = 1.2)
library(NMF)
#install.packages("randomcoloR")
library(randomcoloR)
cluster_label = as.data.frame(list(cluster_label = c(1:nrow(dt.mean))))
pdf("ACCENSE_heatmap2.pdf", width = 10, height = 10, onefile = FALSE)
colors = distinctColorPalette(k = nrow(dt.mean), altCol = FALSE, runTsne = FALSE)
aheatmap(as.matrix(t(dt.mean)),annCol = cluster_label, annColors = list(colors),annLegend = FALSE,
Rowv = NA, Colv = NA, main = paste0("Heatmap of clusters generated by ACCENSE \n Number of clusters = ", nrow(dt.mean)),
color = "YlGnBu",
cexRow = 1.2)
dev.off()
#DensVM
load("D:/UPitts/Research/cytof/cytofkit/densVMresult1.rData")
cluster_label<-cluster_DensVM[["clusters"]]$cluster
dt.split = split(dt2.transform, cluster_label)
dt.split.mean = lapply(dt.split, function(x){apply(x, 2, mean, na.rm = TRUE)})
dt.mean = plyr::ldply(dt.split.mean, rbind)[, -1]
cluster_label = as.data.frame(list(cluster_label = c(1:nrow(dt.mean))))
pdf("DensVM_heatmap2.pdf", width = 10, height = 10, onefile = FALSE)
colors = distinctColorPalette(k = nrow(dt.mean), altCol = FALSE, runTsne = FALSE)
aheatmap(as.matrix(t(dt.mean)),annCol = cluster_label, annColors = list(colors),annLegend = FALSE,
Rowv = NA, Colv = NA, main = paste0("Heatmap of clusters generated by DensVM \n Number of clusters = ", nrow(dt.mean)),
color = "YlGnBu",
cexRow = 1.2)
dev.off()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/aaa.R
\name{install_axidraw}
\alias{install_axidraw}
\title{Install the AxiDraw python API}
\usage{
install_axidraw(path = NULL, ...)
}
\arguments{
\item{path}{The path to the AxiDraw API. If NULL it will be downloaded
automatically.}
\item{...}{
Arguments passed on to \code{\link[reticulate:py_install]{reticulate::py_install}}
\describe{
\item{\code{envname}}{The name, or full path, of the environment in which Python
packages are to be installed. When \code{NULL} (the default), the active
environment as set by the \code{RETICULATE_PYTHON_ENV} variable will be used;
if that is unset, then the \code{r-reticulate} environment will be used.}
\item{\code{method}}{Installation method. By default, "auto" automatically finds a
method that will work in the local environment. Change the default to force
a specific installation method. Note that the "virtualenv" method is not
available on Windows.}
\item{\code{conda}}{The path to a \code{conda} executable. Use \code{"auto"} to allow \code{reticulate} to
automatically find an appropriate \code{conda} binary. See \strong{Finding Conda} for more details.}
\item{\code{python_version}}{The requested Python version. Ignored when attempting
to install with a Python virtual environment.}
\item{\code{pip}}{Boolean; use \code{pip} for package installation? This is only relevant
when Conda environments are used, as otherwise packages will be installed
from the Conda repositories.}
}}
}
\description{
This function will download and install the AxiDraw python API for use with
reticulate. It is necessary to have this installed in order to use any of the
functionality in the fawkes package.
}
|
/man/install_axidraw.Rd
|
permissive
|
thomasp85/fawkes
|
R
| false | true | 1,751 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/aaa.R
\name{install_axidraw}
\alias{install_axidraw}
\title{Install the AxiDraw python API}
\usage{
install_axidraw(path = NULL, ...)
}
\arguments{
\item{path}{The path to the AxiDraw API. If NULL it will be downloaded
automatically.}
\item{...}{
Arguments passed on to \code{\link[reticulate:py_install]{reticulate::py_install}}
\describe{
\item{\code{envname}}{The name, or full path, of the environment in which Python
packages are to be installed. When \code{NULL} (the default), the active
environment as set by the \code{RETICULATE_PYTHON_ENV} variable will be used;
if that is unset, then the \code{r-reticulate} environment will be used.}
\item{\code{method}}{Installation method. By default, "auto" automatically finds a
method that will work in the local environment. Change the default to force
a specific installation method. Note that the "virtualenv" method is not
available on Windows.}
\item{\code{conda}}{The path to a \code{conda} executable. Use \code{"auto"} to allow \code{reticulate} to
automatically find an appropriate \code{conda} binary. See \strong{Finding Conda} for more details.}
\item{\code{python_version}}{The requested Python version. Ignored when attempting
to install with a Python virtual environment.}
\item{\code{pip}}{Boolean; use \code{pip} for package installation? This is only relevant
when Conda environments are used, as otherwise packages will be installed
from the Conda repositories.}
}}
}
\description{
This function will download and install the AxiDraw python API for use with
reticulate. It is necessary to have this installed in order to use any of the
functionality in the fawkes package.
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/IMS_parallel.R
\name{IMS_parallel}
\alias{IMS_parallel}
\title{IMS_parallel.}
\usage{
IMS_parallel(
spectra = NULL,
ncores = 8,
precursor = NULL,
correct_peak = NULL,
...
)
}
\arguments{
\item{spectra}{List of spectra.}
\item{ncores}{Number of cores available.}
\item{precursor}{vector of precursor masses of length(spectra).}
\item{correct_peak}{Potentially a vector of correct Peaks, see \code{InterpretMSSpectrum} for details.}
\item{...}{Further parameters passed directly to \code{InterpretMSSpectrum}.}
}
\value{
A list of \code{InterpretMSSpectrum} result objects which can be systematically evaluated.
However, note that plotting is unfortunately not enabled for parallel processing.
}
\description{
\code{IMS_parallel} is a parallel implementation of \code{\link{InterpretMSSpectrum}}.
}
\details{
For mass processing and testing it may be sufficient to use \code{InterpretMSSpectrum}
without plotting functionality. However, function is likely to be deprecated or integrated
as an option into the main function in the future.
}
\seealso{
\code{\link{InterpretMSSpectrum}}
}
|
/man/IMS_parallel.Rd
|
no_license
|
cran/InterpretMSSpectrum
|
R
| false | true | 1,231 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/IMS_parallel.R
\name{IMS_parallel}
\alias{IMS_parallel}
\title{IMS_parallel.}
\usage{
IMS_parallel(
spectra = NULL,
ncores = 8,
precursor = NULL,
correct_peak = NULL,
...
)
}
\arguments{
\item{spectra}{List of spectra.}
\item{ncores}{Number of cores available.}
\item{precursor}{vector of precursor masses of length(spectra).}
\item{correct_peak}{Potentially a vector of correct Peaks, see \code{InterpretMSSpectrum} for details.}
\item{...}{Further parameters passed directly to \code{InterpretMSSpectrum}.}
}
\value{
A list of \code{InterpretMSSpectrum} result objects which can be systematically evaluated.
However, note that plotting is unfortunately not enabled for parallel processing.
}
\description{
\code{IMS_parallel} is a parallel implementation of \code{\link{InterpretMSSpectrum}}.
}
\details{
For mass processing and testing it may be sufficient to use \code{InterpretMSSpectrum}
without plotting functionality. However, function is likely to be deprecated or integrated
as an option into the main function in the future.
}
\seealso{
\code{\link{InterpretMSSpectrum}}
}
|
testlist <- list(type = 151L, z = 3.95035078382263e-317)
result <- do.call(esreg::G1_fun,testlist)
str(result)
|
/esreg/inst/testfiles/G1_fun/libFuzzer_G1_fun/G1_fun_valgrind_files/1609889271-test.R
|
no_license
|
akhikolla/updated-only-Issues
|
R
| false | false | 110 |
r
|
testlist <- list(type = 151L, z = 3.95035078382263e-317)
result <- do.call(esreg::G1_fun,testlist)
str(result)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/3_by_Ignas_Ciplys.R
\name{save_rds_unique}
\alias{save_rds_unique}
\title{[~!~] Save object as `.RDS` with unique name (i.e., without overwritting)}
\usage{
save_rds_unique(what, path)
}
\arguments{
\item{what}{Object to save.}
\item{path}{File name with path to save to.}
}
\description{
/No description yet/
}
\examples{
#NO EXAMPLES YET
}
\author{
Ignas Ciplys
}
\seealso{
Other functions by Ignas Ciplys: \code{\link{plot_multiDim}}
}
|
/man/save_rds_unique.Rd
|
no_license
|
Ignnn/spHelper
|
R
| false | true | 521 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/3_by_Ignas_Ciplys.R
\name{save_rds_unique}
\alias{save_rds_unique}
\title{[~!~] Save object as `.RDS` with unique name (i.e., without overwritting)}
\usage{
save_rds_unique(what, path)
}
\arguments{
\item{what}{Object to save.}
\item{path}{File name with path to save to.}
}
\description{
/No description yet/
}
\examples{
#NO EXAMPLES YET
}
\author{
Ignas Ciplys
}
\seealso{
Other functions by Ignas Ciplys: \code{\link{plot_multiDim}}
}
|
#' Check and format the data to be used by PPBstats functions
#'
#' @description
#' \code{format_data_PPBstats} checks and formats the data to be used by PPBstats functions
#'
#' @param data The data frame to format
#'
#' @param type type of format :
#' \itemize{
#' \item data_network
#' \item data_agro
#' \item data_agro_SR
#' \item data_agro_HA
#' \item data_agro_LF
#' \item data_organo_napping
#' \item data_organo_hedonic
#' }
#'
#' @param threshold For type = data_organo, number of occurence of descriptors <= threshold are kept
#'
#' @param network_part For type = "data_network", element of the network, it can be "unipart" or "bipart"
#'
#' @param vertex_type For type = "data_network",
#' \itemize{
#' \item for unipart network : "seed_lots" or "location"
#' \item for bipart network : c("germplasm", "location")
#' }
#'
#' @param network_split For type = "data_network" and network_part = "unipart" and
#' vertex_type = "location", split of the data that can be "germplasm" or "relation_year_start"
#'
#'
#' @details
#' See for more details :
#' \itemize{
#' \item \code{\link{format_data_PPBstats.data_network}}
#' \item \code{\link{format_data_PPBstats.data_agro}}
#' \item \code{\link{format_data_PPBstats.data_agro_SR}}
#' \item \code{\link{format_data_PPBstats.data_agro_HA}}
#' \item \code{\link{format_data_PPBstats.data_agro_LF}}
#' \item \code{\link{format_data_PPBstats.data_organo_napping}}
#' \item \code{\link{format_data_PPBstats.data_organo_hedonic}}
#' }
#'
#' @author Pierre Riviere
#'
#' @export
#'
format_data_PPBstats = function(
data,
type,
threshold,
network_part = c("unipart", "bipart"),
network_split = c("germplasm", "relation_year_start"),
vertex_type = NULL
)
{
# 0. Error messages ----------
match.arg(type, c("data_network", "data_agro",
"data_agro_SR", "data_agro_HA", "data_agro_LF",
"data_organo_napping", "data_organo_hedonic")
)
# 1.Network ----------
if(type == "data_network"){
d = format_data_PPBstats.data_network(data, network_part, network_split, vertex_type)
}
# 2.Agro ----------
if(type == "data_agro"){
d = format_data_PPBstats.data_agro(data)
}
if(type == "data_agro_SR"){
d = format_data_PPBstats.data_agro_SR(data)
}
if(type == "data_agro_HA"){
d = format_data_PPBstats.data_agro_HA(data)
}
if(type == "data_agro_LF"){
d = format_data_PPBstats.data_agro_LF(data)
}
# 3.Organo ----------
if(type == "data_organo_napping"){
d = format_data_PPBstats.data_organo_napping(data, threshold)
}
if(type == "data_organo_hedonic"){
d = format_data_PPBstats.data_organo_hedonic(data, threshold)
}
# 4.Return results ----------
return(d)
}
|
/R/format_data_PPBstats.R
|
no_license
|
gaelleVF/PPBstats-PPBmelange
|
R
| false | false | 2,795 |
r
|
#' Check and format the data to be used by PPBstats functions
#'
#' @description
#' \code{format_data_PPBstats} checks and formats the data to be used by PPBstats functions
#'
#' @param data The data frame to format
#'
#' @param type type of format :
#' \itemize{
#' \item data_network
#' \item data_agro
#' \item data_agro_SR
#' \item data_agro_HA
#' \item data_agro_LF
#' \item data_organo_napping
#' \item data_organo_hedonic
#' }
#'
#' @param threshold For type = data_organo, number of occurence of descriptors <= threshold are kept
#'
#' @param network_part For type = "data_network", element of the network, it can be "unipart" or "bipart"
#'
#' @param vertex_type For type = "data_network",
#' \itemize{
#' \item for unipart network : "seed_lots" or "location"
#' \item for bipart network : c("germplasm", "location")
#' }
#'
#' @param network_split For type = "data_network" and network_part = "unipart" and
#' vertex_type = "location", split of the data that can be "germplasm" or "relation_year_start"
#'
#'
#' @details
#' See for more details :
#' \itemize{
#' \item \code{\link{format_data_PPBstats.data_network}}
#' \item \code{\link{format_data_PPBstats.data_agro}}
#' \item \code{\link{format_data_PPBstats.data_agro_SR}}
#' \item \code{\link{format_data_PPBstats.data_agro_HA}}
#' \item \code{\link{format_data_PPBstats.data_agro_LF}}
#' \item \code{\link{format_data_PPBstats.data_organo_napping}}
#' \item \code{\link{format_data_PPBstats.data_organo_hedonic}}
#' }
#'
#' @author Pierre Riviere
#'
#' @export
#'
format_data_PPBstats = function(
data,
type,
threshold,
network_part = c("unipart", "bipart"),
network_split = c("germplasm", "relation_year_start"),
vertex_type = NULL
)
{
# 0. Error messages ----------
match.arg(type, c("data_network", "data_agro",
"data_agro_SR", "data_agro_HA", "data_agro_LF",
"data_organo_napping", "data_organo_hedonic")
)
# 1.Network ----------
if(type == "data_network"){
d = format_data_PPBstats.data_network(data, network_part, network_split, vertex_type)
}
# 2.Agro ----------
if(type == "data_agro"){
d = format_data_PPBstats.data_agro(data)
}
if(type == "data_agro_SR"){
d = format_data_PPBstats.data_agro_SR(data)
}
if(type == "data_agro_HA"){
d = format_data_PPBstats.data_agro_HA(data)
}
if(type == "data_agro_LF"){
d = format_data_PPBstats.data_agro_LF(data)
}
# 3.Organo ----------
if(type == "data_organo_napping"){
d = format_data_PPBstats.data_organo_napping(data, threshold)
}
if(type == "data_organo_hedonic"){
d = format_data_PPBstats.data_organo_hedonic(data, threshold)
}
# 4.Return results ----------
return(d)
}
|
# load database -----------------------------------------------------------
# variables in the data set
# Date: Date in format dd/mm/yyyy
# Time: time in format hh:mm:ss
# Global_active_power: household global minute-averaged active power (in kilowatt)
# Global_reactive_power: household global minute-averaged reactive power (in kilowatt)
# Voltage: minute-averaged voltage (in volt)
# Global_intensity: household global minute-averaged current intensity (in ampere)
# Sub_metering_1: energy sub-metering No. 1 (in watt-hour of active energy). It corresponds to the kitchen, containing mainly a dishwasher, an oven and a microwave (hot plates are not electric but gas powered).
# Sub_metering_2: energy sub-metering No. 2 (in watt-hour of active energy). It corresponds to the laundry room, containing a washing-machine, a tumble-drier, a refrigerator and a light.
# Sub_metering_3: energy sub-metering No. 3 (in watt-hour of active energy). It corresponds to an electric water-heater and an air-conditioner.
#
arquivo <- file.path("./data/household_power_consumption.txt")
dados <- read.table(
file = arquivo,
header = TRUE,
sep = ";",
na.strings = "?", # Note that in this dataset missing values are coded as '?'.
colClasses = c(rep("character",2), rep("numeric", 7))
)
head(dados)
dim(dados)
str(dados)
dados$Time <- as.POSIXct(paste(dados$Date, dados$Time), format="%d/%m/%Y %H:%M:%S")
# dados$Time <- strptime(paste(dados$Date, dados$Time), format="%d/%m/%Y %H:%M:%S")
head(dados$Time)
dados$Date <- as.Date(dados$Date, format = "%d/%m/%Y")
head(dados$Date)
# We will only be using data from the dates 2007-02-01 and 2007-02-02.
dados_r <- subset(dados, Date %in% c(as.Date("2007-02-01"), as.Date("2007-02-02")))
# Plot 2 ------------------------------------------------------------------
par(mfrow = c(1,1))
titulo <- ""
xlabel <- ""
ylabel <- "Global Active Power (kilowatts)"
with(dados_r, {
plot(Time, Global_active_power, type = "n", main = titulo , ylab = ylabel, xlab = xlabel )
lines(Time, Global_active_power, lwd = 1, lty = 1, col = 1)
})
dev.copy(png, file = "plot2.png", width = 480, height = 480) ## Copy my plot to a PNG file
dev.off()
|
/plot2.R
|
no_license
|
motabe/ExData_Plotting1
|
R
| false | false | 2,189 |
r
|
# load database -----------------------------------------------------------
# variables in the data set
# Date: Date in format dd/mm/yyyy
# Time: time in format hh:mm:ss
# Global_active_power: household global minute-averaged active power (in kilowatt)
# Global_reactive_power: household global minute-averaged reactive power (in kilowatt)
# Voltage: minute-averaged voltage (in volt)
# Global_intensity: household global minute-averaged current intensity (in ampere)
# Sub_metering_1: energy sub-metering No. 1 (in watt-hour of active energy). It corresponds to the kitchen, containing mainly a dishwasher, an oven and a microwave (hot plates are not electric but gas powered).
# Sub_metering_2: energy sub-metering No. 2 (in watt-hour of active energy). It corresponds to the laundry room, containing a washing-machine, a tumble-drier, a refrigerator and a light.
# Sub_metering_3: energy sub-metering No. 3 (in watt-hour of active energy). It corresponds to an electric water-heater and an air-conditioner.
#
arquivo <- file.path("./data/household_power_consumption.txt")
dados <- read.table(
file = arquivo,
header = TRUE,
sep = ";",
na.strings = "?", # Note that in this dataset missing values are coded as '?'.
colClasses = c(rep("character",2), rep("numeric", 7))
)
head(dados)
dim(dados)
str(dados)
dados$Time <- as.POSIXct(paste(dados$Date, dados$Time), format="%d/%m/%Y %H:%M:%S")
# dados$Time <- strptime(paste(dados$Date, dados$Time), format="%d/%m/%Y %H:%M:%S")
head(dados$Time)
dados$Date <- as.Date(dados$Date, format = "%d/%m/%Y")
head(dados$Date)
# We will only be using data from the dates 2007-02-01 and 2007-02-02.
dados_r <- subset(dados, Date %in% c(as.Date("2007-02-01"), as.Date("2007-02-02")))
# Plot 2 ------------------------------------------------------------------
par(mfrow = c(1,1))
titulo <- ""
xlabel <- ""
ylabel <- "Global Active Power (kilowatts)"
with(dados_r, {
plot(Time, Global_active_power, type = "n", main = titulo , ylab = ylabel, xlab = xlabel )
lines(Time, Global_active_power, lwd = 1, lty = 1, col = 1)
})
dev.copy(png, file = "plot2.png", width = 480, height = 480) ## Copy my plot to a PNG file
dev.off()
|
# Copyright 2018 Christian Diener <mail[at]cdiener.com>
#
# Apache license 2.0. See LICENSE for more information.
#' Build a configuration for the long read alignment workflow.
#'
#' This can be saved and passed on to others to ensure reproducibility.
#'
#' @param ... Any arguments are used to update the default configuration. See
#' the example below. Optional.
#' @return A list with the parameters used in the long read alignment
#' workflow.
#' @export
#' @examples
#' config <- config_align_long(index = "refs/mouse")
config_align_long <- function(...) {
config <- list(
reference = NULL,
threads = 1,
alignment_dir = "alignments",
max_hits = 100,
progress = TRUE
)
args <- list(...)
for (arg in names(args)) {
config[[arg]] <- args[[arg]]
}
return(config)
}
#' Align long reads (for instance from nanopore sequencing) to a reference
#' database.
#'
#' @param object An artifact or list of files.
#' @param config A configuration as obtained by \code{\link{config_align_short}}.
#' @return A list with the generated alignments and some general diagnostics.
#'
#' @export
align_long_reads <- function(object, config) {
files <- get_files(object)
if (is.null(config$reference)) {
stop("must specify a reference genome in configuration :/")
}
if (!dir.exists(config$alignment_dir)) {
flog.info("Creating output directory %s.", config$alignment_dir)
dir.create(config$alignment_dir, recursive = TRUE)
}
paired <- "reverse" %in% names(files)
flog.info(paste("Aligning %d samples on %d threads.",
"Keeping up to %d secondary alignments."),
nrow(files), config$threads, config$max_hits)
alns <- apply(files, 1, function(file) {
file <- as.list(file)
flog.info("Aligning %s...", file$id)
reads <- file$forward
if (paired) {
reads <- c(reads, file$reverse)
}
out_path <- file.path(config$alignment_dir, paste0(file$id, ".bam"))
log_file <- file.path(config$alignment_dir, paste0(file$id, ".log"))
args <- c("-acx", "map-ont", "-t", config$threads, "-N",
config$max_hits, config$reference, reads)
args <- append(args, c(paste0("2>", log_file), "|", "samtools",
"view", "-bS", "-", ">", out_path))
success <- system2("minimap2", args = args)
return(data.table(id = file$id, alignment = out_path,
success = success == 0))
})
logs <- lapply(files$id, function(id) {
log_file <- file.path(config$alignment_dir, paste0(id, ".log"))
content <- readChar(log_file, min(file.info(log_file)$size, 1e8))
file.remove(log_file)
return(content)
})
alns <- rbindlist(alns)
if (alns[, any(!success)]) {
flog.error("%d alignments failed!", alns[, sum(!success)])
}
artifact <- list(
alignments = alns,
logs = logs,
disk_size = sum(sapply(alns$alignment,
function(f) file.info(f)$size)),
steps = c(object[["steps"]], "align_long_reads")
)
return(artifact)
}
|
/R/nanopore.R
|
permissive
|
diegoibt/mbtools
|
R
| false | false | 3,221 |
r
|
# Copyright 2018 Christian Diener <mail[at]cdiener.com>
#
# Apache license 2.0. See LICENSE for more information.
#' Build a configuration for the long read alignment workflow.
#'
#' This can be saved and passed on to others to ensure reproducibility.
#'
#' @param ... Any arguments are used to update the default configuration. See
#' the example below. Optional.
#' @return A list with the parameters used in the long read alignment
#' workflow.
#' @export
#' @examples
#' config <- config_align_long(index = "refs/mouse")
config_align_long <- function(...) {
config <- list(
reference = NULL,
threads = 1,
alignment_dir = "alignments",
max_hits = 100,
progress = TRUE
)
args <- list(...)
for (arg in names(args)) {
config[[arg]] <- args[[arg]]
}
return(config)
}
#' Align long reads (for instance from nanopore sequencing) to a reference
#' database.
#'
#' @param object An artifact or list of files.
#' @param config A configuration as obtained by \code{\link{config_align_short}}.
#' @return A list with the generated alignments and some general diagnostics.
#'
#' @export
align_long_reads <- function(object, config) {
files <- get_files(object)
if (is.null(config$reference)) {
stop("must specify a reference genome in configuration :/")
}
if (!dir.exists(config$alignment_dir)) {
flog.info("Creating output directory %s.", config$alignment_dir)
dir.create(config$alignment_dir, recursive = TRUE)
}
paired <- "reverse" %in% names(files)
flog.info(paste("Aligning %d samples on %d threads.",
"Keeping up to %d secondary alignments."),
nrow(files), config$threads, config$max_hits)
alns <- apply(files, 1, function(file) {
file <- as.list(file)
flog.info("Aligning %s...", file$id)
reads <- file$forward
if (paired) {
reads <- c(reads, file$reverse)
}
out_path <- file.path(config$alignment_dir, paste0(file$id, ".bam"))
log_file <- file.path(config$alignment_dir, paste0(file$id, ".log"))
args <- c("-acx", "map-ont", "-t", config$threads, "-N",
config$max_hits, config$reference, reads)
args <- append(args, c(paste0("2>", log_file), "|", "samtools",
"view", "-bS", "-", ">", out_path))
success <- system2("minimap2", args = args)
return(data.table(id = file$id, alignment = out_path,
success = success == 0))
})
logs <- lapply(files$id, function(id) {
log_file <- file.path(config$alignment_dir, paste0(id, ".log"))
content <- readChar(log_file, min(file.info(log_file)$size, 1e8))
file.remove(log_file)
return(content)
})
alns <- rbindlist(alns)
if (alns[, any(!success)]) {
flog.error("%d alignments failed!", alns[, sum(!success)])
}
artifact <- list(
alignments = alns,
logs = logs,
disk_size = sum(sapply(alns$alignment,
function(f) file.info(f)$size)),
steps = c(object[["steps"]], "align_long_reads")
)
return(artifact)
}
|
#' @title visualize probabisitic mutaiton signature for the independent model
#' @description Generate visualization of mutation signatures for the model with
#' substitution patterns and flanking bases represented by the
#' indepenent representation.
#'
#' @param vF a matrix for mutation signature
#' @param numBases the number of flanking bases
#' @param baseCol the colour of the bases (A, C, G, T, plus/minus strand)
#' @param trDir the index whether the strand direction is plotted or not
#' @param charSize the size of the character
#' @param isScale the index whether the height of the flanking base is changed
#' or not
#' @param alpha the parameter for the Renyi entropy (applicable only if the
#' isScale is TRUE)
#' @param charLimit the limit of char size
#' @return a plot of the input mutational signature
#'
#' @import ggplot2
#'
#' @examples
#'
#' load(system.file("extdata/sample.rdata", package="HiLDA"))
#' Param <- pmgetSignature(G, K = 3)
#'
#' sig <- slot(Param, "signatureFeatureDistribution")[1,,]
#' visPMS(sig, numBases = 5, isScale = TRUE)
#'
#'
#' @export
visPMS <- function(vF, numBases, baseCol=NA, trDir=FALSE, charSize=5,
isScale=FALSE, alpha=2, charLimit=0.25) {
if(ncol(vF) != 6) {
stop("The size of the matrix is wrong.")
}
if((numBases %% 2) != 1) {
stop("The number of bases should include the central base.")
}
if (is.na(baseCol)) {
gg_color_hue6 <- hcl(h=seq(15, 375, length=7), l=65, c=100)[seq_len(6)]
baseCol <- c(gg_color_hue6[c(3, 5, 2, 1, 6)])
}
centerBase <- (1 + numBases) / 2
v1 <- vF[1,seq_len(6)]
V2 <- vF[2:(numBases),seq_len(4)]
A <- matrix(0, numBases, 4)
B <- matrix(0, 4, 4)
if (trDir == TRUE) {
v3 <- vF[(numBases + 1),seq_len(2)]
}
for (l in seq_len(numBases)) {
if (l < centerBase) {
A[l, ] <- V2[l, ]
} else if (l > centerBase) {
A[l, ] <- V2[l - 1, ]
}
}
A[centerBase,2] <- sum(v1[seq_len(3)])
A[centerBase,4] <- sum(v1[4:6])
B[2, c(1, 3, 4)] <- v1[seq_len(3)] / sum(v1[seq_len(3)])
B[4, c(1, 2, 3)] <- v1[4:6] / sum(v1[4:6])
renyi <- function(p, tAlpha=alpha) {
if (tAlpha == 1) {
return(- sum(p * log2(p), na.rm=TRUE))
} else {
return( log(sum(p^tAlpha)) / (1 - tAlpha))
}
}
if (isScale == FALSE) {
fheight <- rep(1, numBases)
} else {
fheight <- 0.5 * (2 - apply(A, MARGIN=1, FUN=renyi))
}
## collecting data for ggplot
x_start <- c()
x_end <- c()
y_start <- c()
y_end <- c()
text_x <- c()
text_y <- c()
text_lab <- c()
text_col <- c()
rectType <- c()
num2base <- c("A", "C", "G", "T")
# flanking bases
tempStartX <- 0
for (i in seq_len(numBases)) {
x_start <- c(x_start, tempStartX + c(0, cumsum(A[i,seq_len(3)])))
x_end <- c(x_end, tempStartX + cumsum(A[i,seq_len(4)]))
y_start <- c(y_start, rep(0, 4))
y_end <- c(y_end, rep(fheight[i], 4))
rectType <- c(rectType, c("A", "C", "G", "T"))
for (j in seq_len(4)) {
tempPos <- c(0, cumsum(A[i,seq_len(4)]))
if (A[i,j] > charLimit && fheight[i] > charLimit) {
text_x <- c(text_x, tempStartX + 0.5 * (tempPos[j] + tempPos[j + 1]))
text_y <- c(text_y, 0.5 * (0 + fheight[i]))
text_lab <- c(text_lab, num2base[j])
text_col <- c(text_col, "w")
}
}
tempStartX <- tempStartX + 1.25
}
## alternative bases from C
tempStartX <- (centerBase - 1) * 1.25
x_start <- c(x_start, rep(tempStartX, 4))
x_end <- c(x_end, rep(tempStartX + A[centerBase, 2], 4))
y_start <- c(y_start, 2 + c(0, cumsum(B[2,seq_len(3)])))
y_end <- c(y_end, 2 + cumsum(B[2,seq_len(4)]))
rectType <- c(rectType, c("A", "C", "G", "T"))
tempPos <- c(0, cumsum(B[2,seq_len(4)]))
for (j in seq_len(4)) {
if (A[centerBase, 2] > charLimit && B[2,j] > charLimit) {
text_x <- c(text_x, tempStartX + 0.5 * A[centerBase, 2])
text_y <- c(text_y, 2 + 0.5 * (tempPos[j] + tempPos[j + 1]))
text_lab <- c(text_lab, num2base[j])
text_col <- c(text_col, "w")
}
}
## alternative bases from T
tempStartX <- tempStartX + A[centerBase, 2]
x_start <- c(x_start, rep(tempStartX, 4))
x_end <- c(x_end, rep(tempStartX + A[centerBase, 4], 4))
y_start <- c(y_start, 2 + c(0, cumsum(B[4,seq_len(3)])))
y_end <- c(y_end, 2 + cumsum(B[4,seq_len(4)]))
rectType <- c(rectType, c("A", "C", "G", "T"))
tempPos <- c(0, cumsum(B[4,seq_len(4)]))
for (j in seq_len(4)) {
if (A[centerBase, 4] > charLimit && B[4,j] > charLimit) {
text_x <- c(text_x, tempStartX + 0.5 * A[centerBase, 4])
text_y <- c(text_y, 2 + 0.5 * (tempPos[j] + tempPos[j + 1]))
text_lab <- c(text_lab, num2base[j])
text_col <- c(text_col, "w")
}
}
if (trDir == TRUE) {
# draw direction bias
x_start <- c(x_start, (numBases - 1) * 1.25 + 0.24)
x_end <- c(x_end, (numBases - 1) * 1.25 + 0.49)
y_start <- c(y_start, 2)
y_end <- c(y_end, 2 + v3[1])
rectType <- c(rectType, c("+"))
if (v3[1] > 0.125) {
text_x <- c(text_x, (numBases - 1) * 1.25 + 0.5 * (0.24 + 0.49))
text_y <- c(text_y, 2 + 0.5 * v3[1])
text_lab <- c(text_lab, "+")
text_col <- c(text_col, "w")
}
x_start <- c(x_start, (numBases - 1) * 1.25 + 0.51)
x_end <- c(x_end, (numBases - 1) * 1.25 + 0.76)
y_start <- c(y_start, 2)
y_end <- c(y_end, 2 + v3[2])
rectType <- c(rectType, c("-"))
if (v3[2] > 0.125) {
text_x <- c(text_x, (numBases - 1) * 1.25 + 0.5 * (0.51 + 0.76))
text_y <- c(text_y, 2 + 0.5 * v3[2])
text_lab <- c(text_lab, "-")
text_col <- c(text_col, "w")
}
}
## arrow
xs <- c(1 / 3, 2 / 3, 2 / 3, 5 / 6, 1 / 2, 1 / 6, 1 / 3, 1 / 3) +
(centerBase - 1) * 1.25
ys <- c(1 / 4, 1 / 4, 1 / 2, 1 / 2, 3 / 4, 1 / 2, 1 / 2, 1 / 4) + 1
vs <- rep("arrow", length(xs))
arrow_poly <- data.frame(x=xs, y=ys, v=vs)
rect_data <- data.frame(x_start=x_start, x_end=x_end,
y_start=y_start, y_end=y_end, rectType=rectType)
text_data <- data.frame(x=text_x, y=text_y,
label=text_lab, text_col=text_col)
ggplot() +
geom_rect(data=rect_data, aes(xmin=.data$x_start, xmax=.data$x_end,
ymin=.data$y_start, ymax=.data$y_end,
fill=.data$rectType)) +
geom_polygon(data=arrow_poly, aes(x=.data$x, y=.data$y, fill=.data$v)) +
geom_text(data=text_data, aes(label=.data$label, x=.data$x, y=.data$y,
colour=.data$text_col), size=charSize) +
scale_colour_manual(values=c("#FFFFFF")) +
scale_fill_manual(
values=c("A"=baseCol[1], "C"=baseCol[2], "G"=baseCol[3],
"T"=baseCol[4], "+"=baseCol[5], "-"=baseCol[6],
arrow="#A8A8A8")) +
guides(fill="none") +
guides(colour="none") +
guides(size="none") +
theme(axis.text=element_blank(),
axis.ticks=element_blank(),
panel.background=element_blank(),
panel.grid=element_blank(),
axis.title=element_blank(),
line = element_blank())
}
|
/R/vis_signature.R
|
no_license
|
USCbiostats/HiLDA
|
R
| false | false | 7,719 |
r
|
#' @title visualize probabisitic mutaiton signature for the independent model
#' @description Generate visualization of mutation signatures for the model with
#' substitution patterns and flanking bases represented by the
#' indepenent representation.
#'
#' @param vF a matrix for mutation signature
#' @param numBases the number of flanking bases
#' @param baseCol the colour of the bases (A, C, G, T, plus/minus strand)
#' @param trDir the index whether the strand direction is plotted or not
#' @param charSize the size of the character
#' @param isScale the index whether the height of the flanking base is changed
#' or not
#' @param alpha the parameter for the Renyi entropy (applicable only if the
#' isScale is TRUE)
#' @param charLimit the limit of char size
#' @return a plot of the input mutational signature
#'
#' @import ggplot2
#'
#' @examples
#'
#' load(system.file("extdata/sample.rdata", package="HiLDA"))
#' Param <- pmgetSignature(G, K = 3)
#'
#' sig <- slot(Param, "signatureFeatureDistribution")[1,,]
#' visPMS(sig, numBases = 5, isScale = TRUE)
#'
#'
#' @export
visPMS <- function(vF, numBases, baseCol=NA, trDir=FALSE, charSize=5,
isScale=FALSE, alpha=2, charLimit=0.25) {
if(ncol(vF) != 6) {
stop("The size of the matrix is wrong.")
}
if((numBases %% 2) != 1) {
stop("The number of bases should include the central base.")
}
if (is.na(baseCol)) {
gg_color_hue6 <- hcl(h=seq(15, 375, length=7), l=65, c=100)[seq_len(6)]
baseCol <- c(gg_color_hue6[c(3, 5, 2, 1, 6)])
}
centerBase <- (1 + numBases) / 2
v1 <- vF[1,seq_len(6)]
V2 <- vF[2:(numBases),seq_len(4)]
A <- matrix(0, numBases, 4)
B <- matrix(0, 4, 4)
if (trDir == TRUE) {
v3 <- vF[(numBases + 1),seq_len(2)]
}
for (l in seq_len(numBases)) {
if (l < centerBase) {
A[l, ] <- V2[l, ]
} else if (l > centerBase) {
A[l, ] <- V2[l - 1, ]
}
}
A[centerBase,2] <- sum(v1[seq_len(3)])
A[centerBase,4] <- sum(v1[4:6])
B[2, c(1, 3, 4)] <- v1[seq_len(3)] / sum(v1[seq_len(3)])
B[4, c(1, 2, 3)] <- v1[4:6] / sum(v1[4:6])
renyi <- function(p, tAlpha=alpha) {
if (tAlpha == 1) {
return(- sum(p * log2(p), na.rm=TRUE))
} else {
return( log(sum(p^tAlpha)) / (1 - tAlpha))
}
}
if (isScale == FALSE) {
fheight <- rep(1, numBases)
} else {
fheight <- 0.5 * (2 - apply(A, MARGIN=1, FUN=renyi))
}
## collecting data for ggplot
x_start <- c()
x_end <- c()
y_start <- c()
y_end <- c()
text_x <- c()
text_y <- c()
text_lab <- c()
text_col <- c()
rectType <- c()
num2base <- c("A", "C", "G", "T")
# flanking bases
tempStartX <- 0
for (i in seq_len(numBases)) {
x_start <- c(x_start, tempStartX + c(0, cumsum(A[i,seq_len(3)])))
x_end <- c(x_end, tempStartX + cumsum(A[i,seq_len(4)]))
y_start <- c(y_start, rep(0, 4))
y_end <- c(y_end, rep(fheight[i], 4))
rectType <- c(rectType, c("A", "C", "G", "T"))
for (j in seq_len(4)) {
tempPos <- c(0, cumsum(A[i,seq_len(4)]))
if (A[i,j] > charLimit && fheight[i] > charLimit) {
text_x <- c(text_x, tempStartX + 0.5 * (tempPos[j] + tempPos[j + 1]))
text_y <- c(text_y, 0.5 * (0 + fheight[i]))
text_lab <- c(text_lab, num2base[j])
text_col <- c(text_col, "w")
}
}
tempStartX <- tempStartX + 1.25
}
## alternative bases from C
tempStartX <- (centerBase - 1) * 1.25
x_start <- c(x_start, rep(tempStartX, 4))
x_end <- c(x_end, rep(tempStartX + A[centerBase, 2], 4))
y_start <- c(y_start, 2 + c(0, cumsum(B[2,seq_len(3)])))
y_end <- c(y_end, 2 + cumsum(B[2,seq_len(4)]))
rectType <- c(rectType, c("A", "C", "G", "T"))
tempPos <- c(0, cumsum(B[2,seq_len(4)]))
for (j in seq_len(4)) {
if (A[centerBase, 2] > charLimit && B[2,j] > charLimit) {
text_x <- c(text_x, tempStartX + 0.5 * A[centerBase, 2])
text_y <- c(text_y, 2 + 0.5 * (tempPos[j] + tempPos[j + 1]))
text_lab <- c(text_lab, num2base[j])
text_col <- c(text_col, "w")
}
}
## alternative bases from T
tempStartX <- tempStartX + A[centerBase, 2]
x_start <- c(x_start, rep(tempStartX, 4))
x_end <- c(x_end, rep(tempStartX + A[centerBase, 4], 4))
y_start <- c(y_start, 2 + c(0, cumsum(B[4,seq_len(3)])))
y_end <- c(y_end, 2 + cumsum(B[4,seq_len(4)]))
rectType <- c(rectType, c("A", "C", "G", "T"))
tempPos <- c(0, cumsum(B[4,seq_len(4)]))
for (j in seq_len(4)) {
if (A[centerBase, 4] > charLimit && B[4,j] > charLimit) {
text_x <- c(text_x, tempStartX + 0.5 * A[centerBase, 4])
text_y <- c(text_y, 2 + 0.5 * (tempPos[j] + tempPos[j + 1]))
text_lab <- c(text_lab, num2base[j])
text_col <- c(text_col, "w")
}
}
if (trDir == TRUE) {
# draw direction bias
x_start <- c(x_start, (numBases - 1) * 1.25 + 0.24)
x_end <- c(x_end, (numBases - 1) * 1.25 + 0.49)
y_start <- c(y_start, 2)
y_end <- c(y_end, 2 + v3[1])
rectType <- c(rectType, c("+"))
if (v3[1] > 0.125) {
text_x <- c(text_x, (numBases - 1) * 1.25 + 0.5 * (0.24 + 0.49))
text_y <- c(text_y, 2 + 0.5 * v3[1])
text_lab <- c(text_lab, "+")
text_col <- c(text_col, "w")
}
x_start <- c(x_start, (numBases - 1) * 1.25 + 0.51)
x_end <- c(x_end, (numBases - 1) * 1.25 + 0.76)
y_start <- c(y_start, 2)
y_end <- c(y_end, 2 + v3[2])
rectType <- c(rectType, c("-"))
if (v3[2] > 0.125) {
text_x <- c(text_x, (numBases - 1) * 1.25 + 0.5 * (0.51 + 0.76))
text_y <- c(text_y, 2 + 0.5 * v3[2])
text_lab <- c(text_lab, "-")
text_col <- c(text_col, "w")
}
}
## arrow
xs <- c(1 / 3, 2 / 3, 2 / 3, 5 / 6, 1 / 2, 1 / 6, 1 / 3, 1 / 3) +
(centerBase - 1) * 1.25
ys <- c(1 / 4, 1 / 4, 1 / 2, 1 / 2, 3 / 4, 1 / 2, 1 / 2, 1 / 4) + 1
vs <- rep("arrow", length(xs))
arrow_poly <- data.frame(x=xs, y=ys, v=vs)
rect_data <- data.frame(x_start=x_start, x_end=x_end,
y_start=y_start, y_end=y_end, rectType=rectType)
text_data <- data.frame(x=text_x, y=text_y,
label=text_lab, text_col=text_col)
ggplot() +
geom_rect(data=rect_data, aes(xmin=.data$x_start, xmax=.data$x_end,
ymin=.data$y_start, ymax=.data$y_end,
fill=.data$rectType)) +
geom_polygon(data=arrow_poly, aes(x=.data$x, y=.data$y, fill=.data$v)) +
geom_text(data=text_data, aes(label=.data$label, x=.data$x, y=.data$y,
colour=.data$text_col), size=charSize) +
scale_colour_manual(values=c("#FFFFFF")) +
scale_fill_manual(
values=c("A"=baseCol[1], "C"=baseCol[2], "G"=baseCol[3],
"T"=baseCol[4], "+"=baseCol[5], "-"=baseCol[6],
arrow="#A8A8A8")) +
guides(fill="none") +
guides(colour="none") +
guides(size="none") +
theme(axis.text=element_blank(),
axis.ticks=element_blank(),
panel.background=element_blank(),
panel.grid=element_blank(),
axis.title=element_blank(),
line = element_blank())
}
|
## path <- "P:/Cluster/LVMproject/article-multipleComparisons/"
## setwd(path)
## source("BATCH_simulation-modelsearch-power.R")
## * seed
iter_sim <- as.numeric(Sys.getenv("SGE_TASK_ID"))
n.iter_sim <- as.numeric(Sys.getenv("SGE_TASK_LAST"))
if(is.na(iter_sim)){iter_sim <- 1}
if(is.na(n.iter_sim)){n.iter_sim <- 1}
cat("iteration ",iter_sim," over ",n.iter_sim,"\n", sep = "")
set.seed(1)
seqSeed <- sample(1:max(1e5,n.iter_sim),size=n.iter_sim,replace=FALSE)
iSeed <- seqSeed[iter_sim]
set.seed(iSeed)
cat("seed: ",iSeed,"\n")
## * path
path <- "."
path.res <- file.path(path,"Results","simulation-modelsearch-power")
if(dir.exists(path.res)==FALSE){
dir.create(path.res)
}
path.output <- file.path(path,"output","simulation-modelsearch-power")
if(dir.exists(path.output)==FALSE){
dir.create(path.output)
}
## * libraries
library(lava)
library(data.table)
library(lavaSearch2)
lava.options(search.calc.quantile.int = FALSE)
## * settings
seqN <- c(30,50,75,100,150,200,300,500)
seqCor <- c(0,0.75,1,1.5,2.5,5)
n.rep <- 50
p <- 15
## * prepare
n.N <- length(seqN)
n.Cor <- length(seqCor)
X.all <- paste0("X",1:p)
link.all <- c("eta~Z",paste0("eta~",X.all))
m <- lvm(c(Y1,Y2,Y3,Y4,Y5)~eta, eta~Treatment)
covariance(m) <- Y1 ~ Y2
covariance(m) <- Y3 ~ Y4
covariance(m) <- Y3 ~ Y5
latent(m) <- ~eta
m.sim <- m
for(iConfounder in X.all){
regression(m.sim) <- as.formula(paste0(iConfounder," ~ a * confounder"))
}
regression(m.sim) <- eta ~ 0.25 * Z
latent(m.sim) <- ~confounder
## plot(m.sim)
## * loop
dt <- NULL
pb <- txtProgressBar(max = n.Cor)
dt.res <- NULL
for(iN in 1:n.N){ # iN <- 8
for(iCor in 1:n.Cor){ # iCor <- 6
cat("sample size=",seqN[iN],", correlation=",seqCor[iCor],": ", sep = "")
n.tempo <- seqN[iN]
a.tempo <- seqCor[iCor]
for(iRep in 1:n.rep){ # iRep <- 1
cat(iRep," ")
ls.max <- list()
## ** Simulate data
dt.data <- lava::sim(m.sim, n = n.tempo, p = c("a" = a.tempo))
## ** lvm
e.lvm <- estimate(m, data = dt.data)
## ** run score test
res.Wald <- try(modelsearch2(e.lvm,
data = dt.data,
nStep = 1,
link = link.all,
method.p.adjust = "max",
method.maxdist = "approximate",
trace = 0),
silent = TRUE)
if(inherits(res.Wald,"try-error")){
print(res.Wald)
next
} ## res.Wald$sequenceModel
iTable <- getStep(res.Wald, slot = "sequenceTest")
iTable$bonf.p.value <- p.adjust(iTable$p.value, method = "bonferroni")
iTable$hoch.p.value <- p.adjust(iTable$p.value, method = "hochberg")
iTable$hommel.p.value <- p.adjust(iTable$p.value, method = "hommel")
ls.max$Wald <- iTable[iTable$link=="eta~Z","p.value"]
ls.max$Wald.bonf <- iTable[iTable$link=="eta~Z","bonf.p.value"]
ls.max$Wald.max <- iTable[iTable$link=="eta~Z","adjusted.p.value"]
ls.max$selected <- which(iTable$link=="eta~Z") %in% which.max(abs(iTable$statistic))
cor.test <- getStep(res.Wald, slot = "sequenceSigma")
medianCor.test <- median(abs(cor.test[lower.tri(cor.test, diag = FALSE)]))
## ** merge
dt.tempo <- cbind(n = n.tempo,
a = a.tempo,
rep = iRep,
seed = iSeed,
medianCor = medianCor.test,
as.data.table(ls.max))
dt.res <- rbind(dt.res, dt.tempo)
}
cat("\n")
## ** export (tempo)
filename <- paste0("type1error-S",iter_sim,"(tempo).rds")
saveRDS(dt.res, file = file.path(path.res,filename))
}
}
## * export
filename <- paste0("type1error-S",iter_sim,".rds")
saveRDS(dt.res, file = file.path(path.res,filename))
## * display
print(sessionInfo())
|
/BATCH_simulation-modelsearch-power.R
|
no_license
|
bozenne/Article-lvm-multiple-comparisons
|
R
| false | false | 4,275 |
r
|
## path <- "P:/Cluster/LVMproject/article-multipleComparisons/"
## setwd(path)
## source("BATCH_simulation-modelsearch-power.R")
## * seed
iter_sim <- as.numeric(Sys.getenv("SGE_TASK_ID"))
n.iter_sim <- as.numeric(Sys.getenv("SGE_TASK_LAST"))
if(is.na(iter_sim)){iter_sim <- 1}
if(is.na(n.iter_sim)){n.iter_sim <- 1}
cat("iteration ",iter_sim," over ",n.iter_sim,"\n", sep = "")
set.seed(1)
seqSeed <- sample(1:max(1e5,n.iter_sim),size=n.iter_sim,replace=FALSE)
iSeed <- seqSeed[iter_sim]
set.seed(iSeed)
cat("seed: ",iSeed,"\n")
## * path
path <- "."
path.res <- file.path(path,"Results","simulation-modelsearch-power")
if(dir.exists(path.res)==FALSE){
dir.create(path.res)
}
path.output <- file.path(path,"output","simulation-modelsearch-power")
if(dir.exists(path.output)==FALSE){
dir.create(path.output)
}
## * libraries
library(lava)
library(data.table)
library(lavaSearch2)
lava.options(search.calc.quantile.int = FALSE)
## * settings
seqN <- c(30,50,75,100,150,200,300,500)
seqCor <- c(0,0.75,1,1.5,2.5,5)
n.rep <- 50
p <- 15
## * prepare
n.N <- length(seqN)
n.Cor <- length(seqCor)
X.all <- paste0("X",1:p)
link.all <- c("eta~Z",paste0("eta~",X.all))
m <- lvm(c(Y1,Y2,Y3,Y4,Y5)~eta, eta~Treatment)
covariance(m) <- Y1 ~ Y2
covariance(m) <- Y3 ~ Y4
covariance(m) <- Y3 ~ Y5
latent(m) <- ~eta
m.sim <- m
for(iConfounder in X.all){
regression(m.sim) <- as.formula(paste0(iConfounder," ~ a * confounder"))
}
regression(m.sim) <- eta ~ 0.25 * Z
latent(m.sim) <- ~confounder
## plot(m.sim)
## * loop
dt <- NULL
pb <- txtProgressBar(max = n.Cor)
dt.res <- NULL
for(iN in 1:n.N){ # iN <- 8
for(iCor in 1:n.Cor){ # iCor <- 6
cat("sample size=",seqN[iN],", correlation=",seqCor[iCor],": ", sep = "")
n.tempo <- seqN[iN]
a.tempo <- seqCor[iCor]
for(iRep in 1:n.rep){ # iRep <- 1
cat(iRep," ")
ls.max <- list()
## ** Simulate data
dt.data <- lava::sim(m.sim, n = n.tempo, p = c("a" = a.tempo))
## ** lvm
e.lvm <- estimate(m, data = dt.data)
## ** run score test
res.Wald <- try(modelsearch2(e.lvm,
data = dt.data,
nStep = 1,
link = link.all,
method.p.adjust = "max",
method.maxdist = "approximate",
trace = 0),
silent = TRUE)
if(inherits(res.Wald,"try-error")){
print(res.Wald)
next
} ## res.Wald$sequenceModel
iTable <- getStep(res.Wald, slot = "sequenceTest")
iTable$bonf.p.value <- p.adjust(iTable$p.value, method = "bonferroni")
iTable$hoch.p.value <- p.adjust(iTable$p.value, method = "hochberg")
iTable$hommel.p.value <- p.adjust(iTable$p.value, method = "hommel")
ls.max$Wald <- iTable[iTable$link=="eta~Z","p.value"]
ls.max$Wald.bonf <- iTable[iTable$link=="eta~Z","bonf.p.value"]
ls.max$Wald.max <- iTable[iTable$link=="eta~Z","adjusted.p.value"]
ls.max$selected <- which(iTable$link=="eta~Z") %in% which.max(abs(iTable$statistic))
cor.test <- getStep(res.Wald, slot = "sequenceSigma")
medianCor.test <- median(abs(cor.test[lower.tri(cor.test, diag = FALSE)]))
## ** merge
dt.tempo <- cbind(n = n.tempo,
a = a.tempo,
rep = iRep,
seed = iSeed,
medianCor = medianCor.test,
as.data.table(ls.max))
dt.res <- rbind(dt.res, dt.tempo)
}
cat("\n")
## ** export (tempo)
filename <- paste0("type1error-S",iter_sim,"(tempo).rds")
saveRDS(dt.res, file = file.path(path.res,filename))
}
}
## * export
filename <- paste0("type1error-S",iter_sim,".rds")
saveRDS(dt.res, file = file.path(path.res,filename))
## * display
print(sessionInfo())
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dca.r
\name{dca}
\alias{dca}
\title{Decision curve analysis}
\usage{
dca(
data,
outcome,
predictors,
xstart = 0.01,
xstop = 0.99,
xby = 0.01,
ymin = -0.05,
probability = NULL,
harm = NULL,
graph = TRUE,
intervention = FALSE,
interventionper = 100,
smooth = FALSE,
loess.span = 0.1
)
}
\description{
Decision curve analysis
}
|
/man/dca.Rd
|
permissive
|
awconway/capnopred
|
R
| false | true | 431 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dca.r
\name{dca}
\alias{dca}
\title{Decision curve analysis}
\usage{
dca(
data,
outcome,
predictors,
xstart = 0.01,
xstop = 0.99,
xby = 0.01,
ymin = -0.05,
probability = NULL,
harm = NULL,
graph = TRUE,
intervention = FALSE,
interventionper = 100,
smooth = FALSE,
loess.span = 0.1
)
}
\description{
Decision curve analysis
}
|
context("seas")
test_that("unaggregated", {
method <- mgr_init("seas")
expect_equal(3, length(method))
rnvl <- list(rep(seq(12), 11), rep(seq(12) + 12, 11))
repr <- lapply(rnvl, represent.seas, method = method)
expected <- seq(12)
expect_equal(expected, repr[[1]])
expect_equal(expected + 12, repr[[2]])
di <- distance.seas(method, repr[[1]], repr[[2]])
expect_equal(sqrt(144 * 132), di)
})
test_that("aggregated", {
method <- mgr_init("seas")
method$w <- 3
rnvl <- list(rep(seq(12), 11), rep(seq(12) + 12, 11))
repr <- lapply(rnvl, represent.seas, method = method)
expected <- c(sum(1:4), sum(5:8), sum(9:12)) / 4
expect_equal(expected, repr[[1]])
expect_equal(expected + 12, repr[[2]])
di <- distance.seas(method, repr[[1]], repr[[2]])
expect_equal(sqrt(144 * 132), di)
})
|
/tests/testthat/test_seas.R
|
no_license
|
lkegel/idxrepr
|
R
| false | false | 821 |
r
|
context("seas")
test_that("unaggregated", {
method <- mgr_init("seas")
expect_equal(3, length(method))
rnvl <- list(rep(seq(12), 11), rep(seq(12) + 12, 11))
repr <- lapply(rnvl, represent.seas, method = method)
expected <- seq(12)
expect_equal(expected, repr[[1]])
expect_equal(expected + 12, repr[[2]])
di <- distance.seas(method, repr[[1]], repr[[2]])
expect_equal(sqrt(144 * 132), di)
})
test_that("aggregated", {
method <- mgr_init("seas")
method$w <- 3
rnvl <- list(rep(seq(12), 11), rep(seq(12) + 12, 11))
repr <- lapply(rnvl, represent.seas, method = method)
expected <- c(sum(1:4), sum(5:8), sum(9:12)) / 4
expect_equal(expected, repr[[1]])
expect_equal(expected + 12, repr[[2]])
di <- distance.seas(method, repr[[1]], repr[[2]])
expect_equal(sqrt(144 * 132), di)
})
|
Bfkay<-function(sd, obtained, uniform, lower=0, upper=1, meanoftheory=0, sdtheory=1, tail=2)
{
area <- 0
if(identical(uniform, 1)){
theta <- lower
range <- upper - lower
incr <- range / 2000
for (A in -1000:1000){
theta <- theta + incr
dist_theta <- 1 / range
height <- dist_theta * dnorm(obtained, theta, sd)
area <- area + height * incr
}
}else{
theta <- meanoftheory - 5 * sdtheory
incr <- sdtheory / 200
for (A in -1000:1000){
theta <- theta + incr
dist_theta <- dnorm(theta, meanoftheory, sdtheory)
if(identical(tail, 1)){
if (theta <= 0){
dist_theta <- 0
} else {
dist_theta <- dist_theta * 2
}
}
height <- dist_theta * dnorm(obtained, theta, sd)
area <- area + height * incr
}
}
LikelihoodTheory <- area
Likelihoodnull <- dnorm(obtained, 0, sd)
BayesFactor <- LikelihoodTheory / Likelihoodnull
ret <- list("LikelihoodTheory" = LikelihoodTheory, "Likelihoodnull" = Likelihoodnull, "BayesFactor" = BayesFactor)
ret
}
Bayes <- function(sd,obtained,meanoftheory,uniform,sdtheory,tail)
{
g = Bfkay(sd,obtained,meanoftheory,uniform,sdtheory,tail)
h = c("The Likelihood of Theory is " ,g$LikelihoodTheory,"The Likelihood of Null is is" ,g$Likelihoodnull, "The Bayes Factor is",g$BayesFactor )
return(h)
}
|
/R/hello.R
|
no_license
|
anupamsingh81/BayesDienes
|
R
| false | false | 1,370 |
r
|
Bfkay<-function(sd, obtained, uniform, lower=0, upper=1, meanoftheory=0, sdtheory=1, tail=2)
{
area <- 0
if(identical(uniform, 1)){
theta <- lower
range <- upper - lower
incr <- range / 2000
for (A in -1000:1000){
theta <- theta + incr
dist_theta <- 1 / range
height <- dist_theta * dnorm(obtained, theta, sd)
area <- area + height * incr
}
}else{
theta <- meanoftheory - 5 * sdtheory
incr <- sdtheory / 200
for (A in -1000:1000){
theta <- theta + incr
dist_theta <- dnorm(theta, meanoftheory, sdtheory)
if(identical(tail, 1)){
if (theta <= 0){
dist_theta <- 0
} else {
dist_theta <- dist_theta * 2
}
}
height <- dist_theta * dnorm(obtained, theta, sd)
area <- area + height * incr
}
}
LikelihoodTheory <- area
Likelihoodnull <- dnorm(obtained, 0, sd)
BayesFactor <- LikelihoodTheory / Likelihoodnull
ret <- list("LikelihoodTheory" = LikelihoodTheory, "Likelihoodnull" = Likelihoodnull, "BayesFactor" = BayesFactor)
ret
}
Bayes <- function(sd,obtained,meanoftheory,uniform,sdtheory,tail)
{
g = Bfkay(sd,obtained,meanoftheory,uniform,sdtheory,tail)
h = c("The Likelihood of Theory is " ,g$LikelihoodTheory,"The Likelihood of Null is is" ,g$Likelihoodnull, "The Bayes Factor is",g$BayesFactor )
return(h)
}
|
#' @title Launch an RStudio addin to build ShinyApp using Electron Framework
#'
#' @export
#'
photon_rstudioaddin <- function(RscriptRepository = NULL) {
requireNamespace("shiny")
requireNamespace("miniUI")
requireNamespace("shinyFiles")
ui <- miniUI::miniPage(
# Shiny fileinput resethandler
miniUI::gadgetTitleBar("Use Photon to Build Standalone Shiny Apps",
left = NULL, right = NULL),
miniUI::miniTabstripPanel(
#miniUI::miniTabPanel(
#title = 'Build standalone Shiny App for first time',
#icon = shiny::icon("cloud-upload"),
miniUI::miniContentPanel(
shiny::h4("Shiny App Directory:"),
fillRow(flex = c(1, 3),
shinyFiles::shinyDirButton('dirSelect', label = 'Select directory',
title = 'Choose your Shiny App directory'),
shiny::verbatimTextOutput('currentdirselected')
),
shiny::br(),
shiny::br(),
shiny::h4("CRAN Packages:"),
shiny::textInput('cran_packages',
label = ("ex: mgcv,matrixStats"),
value = "NULL"),
shiny::br(),
shinyBS::bsCollapse(id = "adv", open = NULL,
shinyBS::bsCollapsePanel(
shiny::tags$b("> Click for Advanced Options"), NULL,
shiny::textInput('github_packages',
label = "GitHub packages (ex. thomasp85/patchwork):",
value = "NULL", width = "100%"),
shiny::textInput('bioc_packages',
label = "Bioconductor packages (ex. SummarizedExperiment,VariantAnnotation):",
value = "NULL", width = "100%")
# shiny::textInput('rscript_args',
# label = "Additional arguments to Rscript",
# value = ""),
# shiny::textInput('rscript_repository',
# label = "R script repository path: launch & log location",
# value =NULL, width = "100%")))
)))),
miniUI::miniButtonBlock(
actionButton("create", "Build", icon("play-circle"),
style="color: #fff; background-color: #337ab7; border-color: #2e6da4")
)
)
# Server code for the gadget.
server <- function(input, output, session) {
volumes <- c(
'Current working dir' = getwd(),
'HOME' = Sys.getenv('HOME'),
'R Installation' = R.home(),
'Root' = "/"
)
shinyFiles::shinyDirChoose(input,
id = 'dirSelect',
roots = volumes,
session = session)
output$dirSelect <-
shiny::renderUI({
shinyFiles::parseDirPath(volumes, input$dirSelect)
})
output$currentdirselected <-
shiny::renderText({
req(input$dirSelect)
print(paste("Selected directory:",
as.character(shinyFiles::parseDirPath(volumes, input$dirSelect))))
})
observeEvent(input$coll, ({
shinyBS::updateCollapse(session, "adv", open = "Advanced Options")
}))
###########################
# CREATE / OVERWRITE
###########################
shiny::observeEvent(input$create, {
rscript_args <- input$rscript_args
photon::startFun(
as.character(shinyFiles::parseDirPath(volumes, input$dirSelect)),
cran_packages = input$cran_packages,
bioc_packages = input$bioc_packages,
github_packages = input$github_packages
)
})
# Listen for the 'done' event. This event will be fired when a user
# is finished interacting with your application, and clicks the 'done'
# button.
shiny::observeEvent(input$done, {
# Here is where your Shiny application might now go an affect the
# contents of a document open in RStudio, using the `rstudioapi` package.
# At the end, your application should call 'stopApp()' here, to ensure that
# the gadget is closed after 'done' is clicked.
shiny::stopApp()
})
}
# Use a modal dialog as a viewr.
viewer <-
shiny::dialogViewer("Photon Shiny App Builder",
width = 700,
height = 800)
#viewer <- shiny::paneViewer()
shiny::runGadget(ui, server, viewer = viewer)
}
|
/R/photon_rstudioaddin.R
|
no_license
|
Brownsey/photon
|
R
| false | false | 4,564 |
r
|
#' @title Launch an RStudio addin to build ShinyApp using Electron Framework
#'
#' @export
#'
photon_rstudioaddin <- function(RscriptRepository = NULL) {
requireNamespace("shiny")
requireNamespace("miniUI")
requireNamespace("shinyFiles")
ui <- miniUI::miniPage(
# Shiny fileinput resethandler
miniUI::gadgetTitleBar("Use Photon to Build Standalone Shiny Apps",
left = NULL, right = NULL),
miniUI::miniTabstripPanel(
#miniUI::miniTabPanel(
#title = 'Build standalone Shiny App for first time',
#icon = shiny::icon("cloud-upload"),
miniUI::miniContentPanel(
shiny::h4("Shiny App Directory:"),
fillRow(flex = c(1, 3),
shinyFiles::shinyDirButton('dirSelect', label = 'Select directory',
title = 'Choose your Shiny App directory'),
shiny::verbatimTextOutput('currentdirselected')
),
shiny::br(),
shiny::br(),
shiny::h4("CRAN Packages:"),
shiny::textInput('cran_packages',
label = ("ex: mgcv,matrixStats"),
value = "NULL"),
shiny::br(),
shinyBS::bsCollapse(id = "adv", open = NULL,
shinyBS::bsCollapsePanel(
shiny::tags$b("> Click for Advanced Options"), NULL,
shiny::textInput('github_packages',
label = "GitHub packages (ex. thomasp85/patchwork):",
value = "NULL", width = "100%"),
shiny::textInput('bioc_packages',
label = "Bioconductor packages (ex. SummarizedExperiment,VariantAnnotation):",
value = "NULL", width = "100%")
# shiny::textInput('rscript_args',
# label = "Additional arguments to Rscript",
# value = ""),
# shiny::textInput('rscript_repository',
# label = "R script repository path: launch & log location",
# value =NULL, width = "100%")))
)))),
miniUI::miniButtonBlock(
actionButton("create", "Build", icon("play-circle"),
style="color: #fff; background-color: #337ab7; border-color: #2e6da4")
)
)
# Server code for the gadget.
server <- function(input, output, session) {
volumes <- c(
'Current working dir' = getwd(),
'HOME' = Sys.getenv('HOME'),
'R Installation' = R.home(),
'Root' = "/"
)
shinyFiles::shinyDirChoose(input,
id = 'dirSelect',
roots = volumes,
session = session)
output$dirSelect <-
shiny::renderUI({
shinyFiles::parseDirPath(volumes, input$dirSelect)
})
output$currentdirselected <-
shiny::renderText({
req(input$dirSelect)
print(paste("Selected directory:",
as.character(shinyFiles::parseDirPath(volumes, input$dirSelect))))
})
observeEvent(input$coll, ({
shinyBS::updateCollapse(session, "adv", open = "Advanced Options")
}))
###########################
# CREATE / OVERWRITE
###########################
shiny::observeEvent(input$create, {
rscript_args <- input$rscript_args
photon::startFun(
as.character(shinyFiles::parseDirPath(volumes, input$dirSelect)),
cran_packages = input$cran_packages,
bioc_packages = input$bioc_packages,
github_packages = input$github_packages
)
})
# Listen for the 'done' event. This event will be fired when a user
# is finished interacting with your application, and clicks the 'done'
# button.
shiny::observeEvent(input$done, {
# Here is where your Shiny application might now go an affect the
# contents of a document open in RStudio, using the `rstudioapi` package.
# At the end, your application should call 'stopApp()' here, to ensure that
# the gadget is closed after 'done' is clicked.
shiny::stopApp()
})
}
# Use a modal dialog as a viewr.
viewer <-
shiny::dialogViewer("Photon Shiny App Builder",
width = 700,
height = 800)
#viewer <- shiny::paneViewer()
shiny::runGadget(ui, server, viewer = viewer)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/MAR_MethodB.R
\name{MAR_MethodB}
\alias{MAR_MethodB}
\title{MAR_MethodB}
\usage{
MAR_MethodB(n, NSIM, missRate, trueValue, cores = 1)
}
\arguments{
\item{n}{sample size in each simulated dataset}
\item{NSIM}{the number of simulation runs}
\item{missRate}{the missing rate}
\item{trueValue}{the true value of the parameter}
\item{cores}{the number of cores for parallelization, defalut = 1}
}
\value{
the beta, betaSE, meanBias, meanError, cover for methodB
}
\description{
This function will call the \code{generateData} and \code{MAR} function to generate the dataset under MAR with certain missing rate,use method B for analysis, repeate for 1000 iterations and return the beta, betaSE, meanBias, meanError, cover for each iteration
}
|
/MIsimulation/man/MAR_MethodB.Rd
|
no_license
|
sheep720/MIsimulation
|
R
| false | true | 819 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/MAR_MethodB.R
\name{MAR_MethodB}
\alias{MAR_MethodB}
\title{MAR_MethodB}
\usage{
MAR_MethodB(n, NSIM, missRate, trueValue, cores = 1)
}
\arguments{
\item{n}{sample size in each simulated dataset}
\item{NSIM}{the number of simulation runs}
\item{missRate}{the missing rate}
\item{trueValue}{the true value of the parameter}
\item{cores}{the number of cores for parallelization, defalut = 1}
}
\value{
the beta, betaSE, meanBias, meanError, cover for methodB
}
\description{
This function will call the \code{generateData} and \code{MAR} function to generate the dataset under MAR with certain missing rate,use method B for analysis, repeate for 1000 iterations and return the beta, betaSE, meanBias, meanError, cover for each iteration
}
|
###################################################
### code chunk number 3: Cs02_plotwolfmoosedata
###################################################
x <- isleRoyal[, "Year"]
y <- log(isleRoyal[, c("Wolf", "Moose")])
graphics::matplot(x, y,
ylab = "Log count", xlab = "Year", type = "l",
lwd = 3, bty = "L", col = "black"
)
legend("topright", c("Wolf", "Moose"), lty = c(1, 2), bty = "n")
|
/inst/userguide/figures/CS7--Cs02_plotwolfmoosedata.R
|
permissive
|
nwfsc-timeseries/MARSS
|
R
| false | false | 397 |
r
|
###################################################
### code chunk number 3: Cs02_plotwolfmoosedata
###################################################
x <- isleRoyal[, "Year"]
y <- log(isleRoyal[, c("Wolf", "Moose")])
graphics::matplot(x, y,
ylab = "Log count", xlab = "Year", type = "l",
lwd = 3, bty = "L", col = "black"
)
legend("topright", c("Wolf", "Moose"), lty = c(1, 2), bty = "n")
|
#' Add a BibEntry Reference to an Existing Collection
#'
#' @description Merge one or more BibEntries together without
#' duplicating entries
#'
#' @details stuff
#'
#' @importFrom RefManageR as.BibEntry ReadBib WriteBib
#' @importFrom tools file_ext
#'
#' @param bib One more \code{BibEntry}-class objects as defined by \code{RefManageR::is.BibEntry()}
#' @param file The name of path (as a \code{character} string) of a .bib file containing \code{BibEntry}-class objects
#'
#' @examples
#' \dontrun{
#' require(RefManageR)
#'
#' # Create First BibEntry
#' ref <- as.BibEntry(citation(package = 'RefManageR'))
#'
#' # Write BibEntry to file new_bib.bib
#' WriteBib(bib = ref, file = 'new_bib.bib')
#'
#' # Create Second BibEntry
#' surv <- as.BibEntry(citation(package = 'survival'))
#'
#' # Add second BibEntry to File
#' add_reference(bib = surv, file = 'new_bib.bib')
#' }
#' @export
add_reference <- function(bib, file) {
if(tools::file_ext(file) != 'bib') stop('file is not a .bib file')
Bib <- as.BibEntry(bib)
bib_file = ReadBib(file = file)
BIB <- bib_file + bib
WriteBib(bib = BIB, file = file, append = F)
}
|
/R/add_reference.R
|
no_license
|
Auburngrads/DODschools
|
R
| false | false | 1,170 |
r
|
#' Add a BibEntry Reference to an Existing Collection
#'
#' @description Merge one or more BibEntries together without
#' duplicating entries
#'
#' @details stuff
#'
#' @importFrom RefManageR as.BibEntry ReadBib WriteBib
#' @importFrom tools file_ext
#'
#' @param bib One more \code{BibEntry}-class objects as defined by \code{RefManageR::is.BibEntry()}
#' @param file The name of path (as a \code{character} string) of a .bib file containing \code{BibEntry}-class objects
#'
#' @examples
#' \dontrun{
#' require(RefManageR)
#'
#' # Create First BibEntry
#' ref <- as.BibEntry(citation(package = 'RefManageR'))
#'
#' # Write BibEntry to file new_bib.bib
#' WriteBib(bib = ref, file = 'new_bib.bib')
#'
#' # Create Second BibEntry
#' surv <- as.BibEntry(citation(package = 'survival'))
#'
#' # Add second BibEntry to File
#' add_reference(bib = surv, file = 'new_bib.bib')
#' }
#' @export
add_reference <- function(bib, file) {
if(tools::file_ext(file) != 'bib') stop('file is not a .bib file')
Bib <- as.BibEntry(bib)
bib_file = ReadBib(file = file)
BIB <- bib_file + bib
WriteBib(bib = BIB, file = file, append = F)
}
|
library(KRIG)
### Name: KRIG-package
### Title: Spatial Statistics with Kriging
### Aliases: KRIG KRIG-package
### ** Examples
library( KRIG )
vignette( topic = 'copper_mining_2d', package = 'KRIG' )
|
/data/genthat_extracted_code/KRIG/examples/KRIG-package.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false | false | 208 |
r
|
library(KRIG)
### Name: KRIG-package
### Title: Spatial Statistics with Kriging
### Aliases: KRIG KRIG-package
### ** Examples
library( KRIG )
vignette( topic = 'copper_mining_2d', package = 'KRIG' )
|
#
#
# octavian
# Git
# /OneDrive/metis2/metic_select_retrieve/octavian
# - resegment_observe branch
# SERVER
#
#
# IMPORTANT NOTE - If plyr is loaded after dplyr (in tidyverse()), then group_by statement will fail
# with group factor not carried across
#
#
library(shiny)
library(shinydashboard)
Sys.setlocale('LC_ALL','C')
library(shiny)
library(pool)
library(tidyverse)
library(DBI)
library(zoo)
library(shinysky)
library(reshape2)
library(shinythemes)
library(shinyWidgets)
library(MASS)
library(plotly)
library(ggpubr)
library(ggbiplot)
library(ggrepel)
dashboardPage(
skin = "red",
dashboardHeader(title = "Media Sentiment Analysis", titleWidth = 450),
dashboardSidebar(
dateRangeInput(inputId= 'dateRange',
label = "Date range",
start = Sys.Date() - 14,
format = "yyyy_mm_dd"),
numericInput(inputId = "dateGrouping",
"Rolling average ",
value = 5,
min = 1,
max = 90),
sidebarMenu(
menuItem("Comparison", tabName = "comparison", icon = icon("chart-line")),
menuItem("Individual", tabName = "individual", icon = icon("dashboard")),
menuItem("Sentiment", tabName = "sentiment", icon = icon("dashboard")),
menuItem("Correlation", tabname = "correlation", icon = icon("dashboard")),
menuItem("Source", tabName = "source", icon = icon("dashboard"))
)
),
# print("ui 1 - Start of page"),
######
dashboardBody(
##### Dropdown selection items
fluidRow(column(width = 2,
dropdown(
# tooltip = TRUE,
label = "Selection 1",
tags$h3("Selection 1"),
# selectizeInput("isource",
# "Source 1",
# choices = c("One", "Two", "Three") #rssSources.names,
# multiple = TRUE),
selectizeInput("isourcetype",
"Source Type 1",
choices = rss.SourceTypes,
multiple = TRUE),
selectizeInput("icountry",
"Country 1",
choices = rss.Countries,
multiple = TRUE),
selectizeInput("iregion",
'Region 1', choices = rss.Regions,
multiple = TRUE),
selectizeInput("iorientation",
"Orientation 1",
choices = rss.Orientation,
multiple = TRUE),
selectizeInput("iSentimentFactor",
"Sentiment factor 1",
c("Syuzhet" = 'syuzhet', "Afinn" = "afinn","Bing" = "bing", "Anger - nrc" = "nrc_anger","Anticipation - nrc" = "nrc_anticipation","Disgust - nrc" = "nrc_disgust",
"Fear - nrc" = "nrc_fear", "Joy - nrc" = "nrc_joy","Positive- nrc"= "nrc_positive","Negative - nrc" = "nrc_negative","Sadness - nrc" = "nrc_sadness",
"Surprise - nrc" = "nrc_surprise", "Trust - nrc" = "nrc_trust","Constraining - Lo" = "loughran_constraining","Litigous - Lo" = "loughran_litigious",
"Uncertain- Lo" = "loughran_uncertain", "Negative - Lo" = "loughran_negative","Positive - Lo" = "loughran_positive", "Ensemble +/-" = "ensemble_posneg") ,
multiple = TRUE,
selected = "ensemble_posneg"),
textInput("itextinput",
"Text selection 1",
value = " ")
)
),
column(width = 2,
dropdown(
tooltip = TRUE,
label = "Selection 2",
tags$h3("Selection 2"),
# selectizeInput("isource2",
# "Source 2",
# choices = rssSources.names,
# multiple = TRUE),
selectizeInput("isourcetype2",
"Source Type 2",
choices = rss.SourceTypes,
multiple = TRUE),
selectizeInput("icountry2",
"Country 2",
choices = rss.Countries,
multiple = TRUE),
selectizeInput("iregion2",
'Region 2', choices = rss.Regions,
multiple = TRUE),
selectizeInput("iorientation2",
"Orientation 2",
choices = rss.Orientation,
multiple = TRUE),
selectizeInput("iSentimentFactor2",
"Sentiment factor 2",
c("Syuzhet" = 'syuzhet', "Afinn" = "afinn","Bing" = "bing", "Anger - nrc" = "nrc_anger","Anticipation - nrc" = "nrc_anticipation","Disgust - nrc" = "nrc_disgust",
"Fear - nrc" = "nrc_fear", "Joy - nrc" = "nrc_joy","Positive- nrc"= "nrc_positive","Negative - nrc" = "nrc_negative","Sadness - nrc" = "nrc_sadness",
"Surprise - nrc" = "nrc_surprise", "Trust - nrc" = "nrc_trust","Constraining - Lo" = "loughran_constraining","Litigous - Lo" = "loughran_litigious",
"Uncertain- Lo" = "loughran_uncertain", "Negative - Lo" = "loughran_negative","Positive - Lo" = "loughran_positive", "Ensemble +/-" = "ensemble_posneg") ,
multiple = TRUE,
selected = "ensemble_posneg"),
textInput("itextinput2",
"Text selection 2",
value = " ")
)
),
column(width = 2,
dropdown(
tootip = TRUE,
label = "Smooth/Corr",
tags$h3("Smoothing"),
radioButtons("ismooth", "Method",
c("None"= "", "loess" = "loess", "lm" = "lm","gam" = "gam", "glm" = "glm", "MASS:rlm" = "MASS:rlm" )),
numericInput("iconfidenceLevel", label = "Confidence value", value = 0.95, min = 0, max = 1, width = "30%" ),
checkboxInput("iconfidence", label = "On", FALSE),
tags$h3("Correlation"),
selectizeInput("icorrelate", label = "Method", c("pearson", "kendall", "spearman"), multiple = FALSE),
selectizeInput("icorr.alternate", label = "Alternative", c("two.sided", "greater", "less"))
)),
column(width = 2,
dropdown(
tooltip = TRUE,
label = "Normalize",
fluidRow(
tags$h3("Normalise"),
checkboxInput("iPosNegNorm", "Pos/neg"),
checkboxInput("iLRCNorm", "Orientation"),
checkboxInput("iCountryNorm", "Countries"))
)),
column(width = 2,
dropdown(
tooltip = TRUE,
label = "Format",
fluidRow(
tags$h3("Chart format"),
tags$h5("Time Series"),
checkboxInput("aColumn", "Column", FALSE),
checkboxInput("aLine", "Line", TRUE),
checkboxInput("aPoint", "Points", FALSE),
tags$h5("Correlation"),
checkboxInput("aStar", "Star", FALSE)
)
))),
#################### Chart section
fluidRow(
h4("Comparative content"),
column(width = 12, plotlyOutput("SA_by_date_line_comp"))
),
# End of tab 1
h4("Selection 1"),
fluidRow(
column(width = 6, plotlyOutput("SA_by_date_line")),
column(width = 6, plotlyOutput("SA_summary_by_period"))),
h4("Selection 2"),
fluidRow(
column(width = 6, plotlyOutput("SA_by_date_line2")),
column(width = 6, plotlyOutput("SA_summary_by_period2"))),
# End of tab 2
fluidRow(
h4("Autocorrelation"),
column(width = 6, plotOutput("ACF1_large")),
column(width = 6, plotOutput("ACF2_large"))
),
h4("Statistics"),
fluidRow(
column(width = 8, plotlyOutput("SA_correlation")),
column(width = 4, DT::dataTableOutput("corrStats"))
),
fluidRow(
h4("Cluster"),
column(width = 4, plotOutput("PCA")),
column(width = 2, plotOutput("PCA_scree")),
column(width = 6, DT::dataTableOutput("PCA_tab"))
),
##################### Source
h4("Sources"),
DT::dataTableOutput("tbl")
)
# )
)
|
/ui.R
|
no_license
|
charlesgwbrewer1953/octavian
|
R
| false | false | 9,473 |
r
|
#
#
# octavian
# Git
# /OneDrive/metis2/metic_select_retrieve/octavian
# - resegment_observe branch
# SERVER
#
#
# IMPORTANT NOTE - If plyr is loaded after dplyr (in tidyverse()), then group_by statement will fail
# with group factor not carried across
#
#
library(shiny)
library(shinydashboard)
Sys.setlocale('LC_ALL','C')
library(shiny)
library(pool)
library(tidyverse)
library(DBI)
library(zoo)
library(shinysky)
library(reshape2)
library(shinythemes)
library(shinyWidgets)
library(MASS)
library(plotly)
library(ggpubr)
library(ggbiplot)
library(ggrepel)
dashboardPage(
skin = "red",
dashboardHeader(title = "Media Sentiment Analysis", titleWidth = 450),
dashboardSidebar(
dateRangeInput(inputId= 'dateRange',
label = "Date range",
start = Sys.Date() - 14,
format = "yyyy_mm_dd"),
numericInput(inputId = "dateGrouping",
"Rolling average ",
value = 5,
min = 1,
max = 90),
sidebarMenu(
menuItem("Comparison", tabName = "comparison", icon = icon("chart-line")),
menuItem("Individual", tabName = "individual", icon = icon("dashboard")),
menuItem("Sentiment", tabName = "sentiment", icon = icon("dashboard")),
menuItem("Correlation", tabname = "correlation", icon = icon("dashboard")),
menuItem("Source", tabName = "source", icon = icon("dashboard"))
)
),
# print("ui 1 - Start of page"),
######
dashboardBody(
##### Dropdown selection items
fluidRow(column(width = 2,
dropdown(
# tooltip = TRUE,
label = "Selection 1",
tags$h3("Selection 1"),
# selectizeInput("isource",
# "Source 1",
# choices = c("One", "Two", "Three") #rssSources.names,
# multiple = TRUE),
selectizeInput("isourcetype",
"Source Type 1",
choices = rss.SourceTypes,
multiple = TRUE),
selectizeInput("icountry",
"Country 1",
choices = rss.Countries,
multiple = TRUE),
selectizeInput("iregion",
'Region 1', choices = rss.Regions,
multiple = TRUE),
selectizeInput("iorientation",
"Orientation 1",
choices = rss.Orientation,
multiple = TRUE),
selectizeInput("iSentimentFactor",
"Sentiment factor 1",
c("Syuzhet" = 'syuzhet', "Afinn" = "afinn","Bing" = "bing", "Anger - nrc" = "nrc_anger","Anticipation - nrc" = "nrc_anticipation","Disgust - nrc" = "nrc_disgust",
"Fear - nrc" = "nrc_fear", "Joy - nrc" = "nrc_joy","Positive- nrc"= "nrc_positive","Negative - nrc" = "nrc_negative","Sadness - nrc" = "nrc_sadness",
"Surprise - nrc" = "nrc_surprise", "Trust - nrc" = "nrc_trust","Constraining - Lo" = "loughran_constraining","Litigous - Lo" = "loughran_litigious",
"Uncertain- Lo" = "loughran_uncertain", "Negative - Lo" = "loughran_negative","Positive - Lo" = "loughran_positive", "Ensemble +/-" = "ensemble_posneg") ,
multiple = TRUE,
selected = "ensemble_posneg"),
textInput("itextinput",
"Text selection 1",
value = " ")
)
),
column(width = 2,
dropdown(
tooltip = TRUE,
label = "Selection 2",
tags$h3("Selection 2"),
# selectizeInput("isource2",
# "Source 2",
# choices = rssSources.names,
# multiple = TRUE),
selectizeInput("isourcetype2",
"Source Type 2",
choices = rss.SourceTypes,
multiple = TRUE),
selectizeInput("icountry2",
"Country 2",
choices = rss.Countries,
multiple = TRUE),
selectizeInput("iregion2",
'Region 2', choices = rss.Regions,
multiple = TRUE),
selectizeInput("iorientation2",
"Orientation 2",
choices = rss.Orientation,
multiple = TRUE),
selectizeInput("iSentimentFactor2",
"Sentiment factor 2",
c("Syuzhet" = 'syuzhet', "Afinn" = "afinn","Bing" = "bing", "Anger - nrc" = "nrc_anger","Anticipation - nrc" = "nrc_anticipation","Disgust - nrc" = "nrc_disgust",
"Fear - nrc" = "nrc_fear", "Joy - nrc" = "nrc_joy","Positive- nrc"= "nrc_positive","Negative - nrc" = "nrc_negative","Sadness - nrc" = "nrc_sadness",
"Surprise - nrc" = "nrc_surprise", "Trust - nrc" = "nrc_trust","Constraining - Lo" = "loughran_constraining","Litigous - Lo" = "loughran_litigious",
"Uncertain- Lo" = "loughran_uncertain", "Negative - Lo" = "loughran_negative","Positive - Lo" = "loughran_positive", "Ensemble +/-" = "ensemble_posneg") ,
multiple = TRUE,
selected = "ensemble_posneg"),
textInput("itextinput2",
"Text selection 2",
value = " ")
)
),
column(width = 2,
dropdown(
tootip = TRUE,
label = "Smooth/Corr",
tags$h3("Smoothing"),
radioButtons("ismooth", "Method",
c("None"= "", "loess" = "loess", "lm" = "lm","gam" = "gam", "glm" = "glm", "MASS:rlm" = "MASS:rlm" )),
numericInput("iconfidenceLevel", label = "Confidence value", value = 0.95, min = 0, max = 1, width = "30%" ),
checkboxInput("iconfidence", label = "On", FALSE),
tags$h3("Correlation"),
selectizeInput("icorrelate", label = "Method", c("pearson", "kendall", "spearman"), multiple = FALSE),
selectizeInput("icorr.alternate", label = "Alternative", c("two.sided", "greater", "less"))
)),
column(width = 2,
dropdown(
tooltip = TRUE,
label = "Normalize",
fluidRow(
tags$h3("Normalise"),
checkboxInput("iPosNegNorm", "Pos/neg"),
checkboxInput("iLRCNorm", "Orientation"),
checkboxInput("iCountryNorm", "Countries"))
)),
column(width = 2,
dropdown(
tooltip = TRUE,
label = "Format",
fluidRow(
tags$h3("Chart format"),
tags$h5("Time Series"),
checkboxInput("aColumn", "Column", FALSE),
checkboxInput("aLine", "Line", TRUE),
checkboxInput("aPoint", "Points", FALSE),
tags$h5("Correlation"),
checkboxInput("aStar", "Star", FALSE)
)
))),
#################### Chart section
fluidRow(
h4("Comparative content"),
column(width = 12, plotlyOutput("SA_by_date_line_comp"))
),
# End of tab 1
h4("Selection 1"),
fluidRow(
column(width = 6, plotlyOutput("SA_by_date_line")),
column(width = 6, plotlyOutput("SA_summary_by_period"))),
h4("Selection 2"),
fluidRow(
column(width = 6, plotlyOutput("SA_by_date_line2")),
column(width = 6, plotlyOutput("SA_summary_by_period2"))),
# End of tab 2
fluidRow(
h4("Autocorrelation"),
column(width = 6, plotOutput("ACF1_large")),
column(width = 6, plotOutput("ACF2_large"))
),
h4("Statistics"),
fluidRow(
column(width = 8, plotlyOutput("SA_correlation")),
column(width = 4, DT::dataTableOutput("corrStats"))
),
fluidRow(
h4("Cluster"),
column(width = 4, plotOutput("PCA")),
column(width = 2, plotOutput("PCA_scree")),
column(width = 6, DT::dataTableOutput("PCA_tab"))
),
##################### Source
h4("Sources"),
DT::dataTableOutput("tbl")
)
# )
)
|
setwd("E:/phd.project.main/")
source("rotation1scripts_v4/scripts/r/functions.R")
gff1 = read.table("rotation1scripts_v4/original_data/IWGSC/iwgsc.genes.onlyv2.gff3", sep = "\t", stringsAsFactors = F)
gff2 = split(gff1, gff1$V1)
centromeres = read.table("rotation1scripts_v4/original_data/IWGSC/centromere.positions.txt", sep = " ", stringsAsFactors = F)
#taken from S11 of supplementary data from 2018 IWGSC paper in Science. Where more than one region has been defined,
#the region that overlaps the highest centromeric TE family (either Cereba or Quinta; figure S7) has been taken.
centromere.positions = sapply(centromeres$V2, function(x){
g = strsplit(x, "-")
g = as.numeric(g[[1]])
mean(g)
})
centromere.positions.bp = centromere.positions * 1000000
centromeres$pos = centromere.positions
centromeres$pos.bp = centromere.positions.bp
hist(gff2[[1]]$V4, breaks = 1000)
abline(v = centromeres$pos.bp[[1]])
Map(function(x, y){
hist(x$V4, breaks = 1000)
abline(v = y)
}, gff2, centromeres$pos.bp)
|
/scripts/r/recombination/centromere.positions.R
|
no_license
|
alexcoulton/phd
|
R
| false | false | 1,060 |
r
|
setwd("E:/phd.project.main/")
source("rotation1scripts_v4/scripts/r/functions.R")
gff1 = read.table("rotation1scripts_v4/original_data/IWGSC/iwgsc.genes.onlyv2.gff3", sep = "\t", stringsAsFactors = F)
gff2 = split(gff1, gff1$V1)
centromeres = read.table("rotation1scripts_v4/original_data/IWGSC/centromere.positions.txt", sep = " ", stringsAsFactors = F)
#taken from S11 of supplementary data from 2018 IWGSC paper in Science. Where more than one region has been defined,
#the region that overlaps the highest centromeric TE family (either Cereba or Quinta; figure S7) has been taken.
centromere.positions = sapply(centromeres$V2, function(x){
g = strsplit(x, "-")
g = as.numeric(g[[1]])
mean(g)
})
centromere.positions.bp = centromere.positions * 1000000
centromeres$pos = centromere.positions
centromeres$pos.bp = centromere.positions.bp
hist(gff2[[1]]$V4, breaks = 1000)
abline(v = centromeres$pos.bp[[1]])
Map(function(x, y){
hist(x$V4, breaks = 1000)
abline(v = y)
}, gff2, centromeres$pos.bp)
|
#############################################################################################################################
rm(list=ls())
#############################################################################################################################
test <- TRUE # set test to FALSE to run the real simulation
scenarioName <- "3xELF"
ELFAge <- 18
big <- "UK"
outDir <- file.path("micro_v0.3_sensitivity", "MoreNE_SGUK", "outData")
#############################################################################################################################
source("micro_scenarios_v0.3/util/microsimulation_preparation.R")
source("micro_scenarios_v0.3/util/read_big_mat_transition.R")
init <- micro_prep()
eTrans <- readEcigTrans(big)
startAge <- 11
endAge <- 80
startYear <- 2017
endYear <- 2067
numAge <- endAge - startAge + 1
numYear <- endYear - startYear + 1
if(test){
n.i <- 10
v.M_1 <- rep(c("C", "N", "E", "Q", "Q"), n.i/5)
v.age <- rep(c(19, 4, 35, 180, 20), n.i/5)
n.t <- 10
} else {
n.i <- length(init$sin.ages) # number of simulated individuals
v.M_1 <- init$sin.states # beginning states
v.age <- init$sin.ages # initialize age
n.t <- 50 # time horizon
}
v.n <- c("N", "C", "Q", "D", "E", "X") # the model states: Never Smoker(N), Smoker(C), Quitter(Q), Dual(D), E-cig Only(E)
n.s <- length(v.n) # the number of states
d.e <- 0.03 # equal discounting of costs and QALYs by 3%
d.x <- 0.03 # mortality rate decreases by 3% annually
# v.Trt <- c("No Treatment", "Treatment") # store the strategy names
# Cost and utility inputs
u.N <- 1 # utility when not smoking
u.bTbase <- c(rep(0.98, 19), rep(0.96, 10), rep(0.97, 5), rep(0.96, 5), rep(0.97, 15), rep(0.98, 16))
u.Cbase <- c(rep(0.91,19), rep(c(0.88,0.86,0.83,0.81,0.78,0.76,0.74), each=5), rep(0.71, 16)) # utility when smoking
u.Qbase <- 1 - (1-u.Cbase) * 0.05
if(big == "JP"){
u.Ebase <- 1 - (1-u.Cbase) * 0.05
} else {
u.Ebase <- 1 - (1-u.Cbase) * 0.10
}
u.Dbase <- sqrt(u.Cbase * u.Ebase)
v.NXbase <- init$deathRateN
v.bTbase <- c(rep(0.92, 5), rep(0.93, 5), rep(0.94, 14), rep(0.95, 21), rep(0.96, 25))
v.RRCbase <- c(rep(2.8, 49), rep(2.5, 10), rep(2.0, 11))
v.RRQbase <- 1 + (v.RRCbase - 1) * 0.05
if(big == "JP"){
v.RREbase <- 1 + (v.RRCbase - 1) * 0.05
} else {
v.RREbase <- 1 + (v.RRCbase - 1) * 0.10
}
v.RRDbase <- sqrt(v.RRCbase * v.RREbase)
# Transition rates transformation
fELF = 1/2
fInitEcig = 3
v.NC.primer <- init$NCprimer
v.CQ.primer <- init$CQprimer
v.QC.primer <- init$QCprimer
v.NE.primer=eTrans$NE; v.ND.primer=eTrans$ND; v.CD.primer=eTrans$CD; v.CE.primer=eTrans$CE;
v.QD.primer=eTrans$QD; v.QE.primer=eTrans$QE; v.DC.primer=eTrans$DC; v.DQ.primer=eTrans$DQ;
v.DE.primer=eTrans$DE; v.EC.primer=eTrans$EC; v.ED.primer=eTrans$ED; v.EN.primer=eTrans$EN
v.NE.primer <- v.NE.primer * fInitEcig
row18 <- 18 - startAge + 1
rowAgeELF <- ELFAge - startAge + 1
v.NE.primer[row18:rowAgeELF] <- v.NE.primer[row18:rowAgeELF] * fELF
v.CD.primer[row18:rowAgeELF] <- v.CD.primer[row18:rowAgeELF] * fELF
v.CE.primer[row18:rowAgeELF] <- v.CE.primer[row18:rowAgeELF] * fELF
v.QE.primer[row18:rowAgeELF] <- v.QE.primer[row18:rowAgeELF] * fELF
v.ND.primer[row18:rowAgeELF] <- v.ND.primer[row18:rowAgeELF] * fELF
v.QD.primer[row18:rowAgeELF] <- v.QD.primer[row18:rowAgeELF] * fELF
##################################### Functions ###########################################
##################################### Helper functions ####################################
getNiVec <- function(v.base, v.index){
v.ni <- v.base[v.index]
v.ni[is.na(v.ni)] <- 0
v.ni
}
##################################### Main functions ######################################
# THE NEW samplev() FUNCTION
# efficient implementation of the rMultinom() function of the Hmisc package ####
samplev <- function (probs, m) {
d <- dim(probs)
n <- d[1]
k <- d[2]
lev <- dimnames(probs)[[2]]
if (!length(lev))
lev <- 1:k
ran <- matrix(lev[1], ncol = m, nrow = n)
U <- t(probs)
for(i in 2:k) {
U[i, ] <- U[i, ] + U[i - 1, ]
}
if (any((U[k, ] - 1) > 1e-05))
stop("error in multinom: probabilities do not sum to 1")
for (j in 1:m) {
un <- rep(runif(n), rep(k, n))
ran[, j] <- lev[1 + colSums(un > U)]
}
ran
}
# The MicroSim function for the simple microsimulation of the 'Sick-Sicker' model keeps track of what happens to each individual during each cycle.
MicroSim <- function(v.M_1, v.age, n.i, n.t, v.n, X = NULL, d.c, d.e, TR.out = TRUE, TS.out = FALSE, Trt = FALSE, seed = 1) {
# Arguments:
# v.M_1: vector of initial states for individuals
# n.i: number of individuals
# n.t: total number of cycles to run the model
# v.n: vector of health state names
# X: vector or matrix of individual characteristics
# d.c: discount rate for costs
# d.e: discount rate for health outcome (QALYs)
# TR.out: should the output include a Microsimulation trace? (default is TRUE)
# TS.out: should the output include a matrix of transitions between states? (default is TRUE)
# Trt: are the n.i individuals receiving treatment? (scalar with a Boolean value, default is FALSE)
# seed: starting seed number for random number generator (default is 1)
# Makes use of:
# Probs: function for the estimation of transition probabilities
# Costs: function for the estimation of cost state values
# Effs: function for the estimation of state specific health outcomes (QALYs)
v.index <- v.age - startAge + 1
v.dwe <- 1 / (1 + d.e) ^ (0:n.t)
# Create the matrix capturing the state name/costs/health outcomes for all individuals at each time point
m.M <- m.E <- matrix(nrow = n.i, ncol = n.t + 1,
dimnames = list(paste("ind", 1:n.i, sep = " "),
paste("cycle", 0:n.t, sep = " ")))
if(TR.out == TRUE) {
TR = matrix(NA, n.s, n.t)
}
m.M[, 1] <- v.M_1 # indicate the initial health state
v.RR <- getInitRR(v.M_1, v.index)
u <- getInitU(v.M_1, v.index)
m.E[, 1] <- Effs (u, cl=1)
set.seed(seed) # set the seed for every individual for the random number generator
for (t in 1:n.t) { # t <- 3
# print(v.index)
if (TR.out == TRUE) {
TR[,t] <- table(factor((m.M[,t])[v.age>=12 & v.age<=80], levels=v.n, ordered=TRUE))
}
if(t>1){
v.RR <- getRR(v.RR, m.M[,t], v.index)
}
# print(t)
# print(v.RR)
m.p <- Probs(m.M[, t], v.index, v.RR) # calculate the transition probabilities at cycle t
m.M[, t + 1] <- samplev(prob = m.p, m = 1) # sample the next health state and store that state in matrix m.M
cat('\r', paste(round(t/n.t * 100), "% done", sep = " ")) # display the progress of the simulation
v.age <- v.age + 1
v.index <- v.index + 1
v.NXbase <<- v.NXbase * (1-d.x)
u <- getU(u, m.M[,t+1], v.index)
m.E[,t + 1] <- Effs(u, cl=1)
} # close the loop for the time points
if (TS.out == TRUE) { # create a matrix of transitions across states
TS <- paste(m.M, cbind(m.M[, -1], NA), sep = "->") # transitions from one state to the other
TS <- matrix(TS, nrow = n.i)
rownames(TS) <- paste("Ind", 1:n.i, sep = " ") # name the rows
colnames(TS) <- paste("Cycle", 0:n.t, sep = " ") # name the columns
} else {
TS <- NULL
}
if(TR.out==TRUE){
TR <- prop.table(t(TR), margin = 1)
} else {
TR <- NULL
}
te <- m.E %*% v.dwe # total (discounted) QALYs per individual
te_hat <- mean(te) # average (discounted) QALYs
colSumME <- colSums(m.E)
results <- list(m.M = m.M, TS = TS, TR = TR, m.E = m.E, te = te, te_hat = te_hat, colSumME = colSumME) # store the results from the simulation in a list
return(results) # return the results
} # end of the MicroSim function
#### Probability function
getInitRR <- function(M_it, v.index){
v.index[v.index<=0] <- length(v.NXbase) + 10
v.RRC.ni <- getNiVec(v.RRCbase, v.index)
v.RRQ.ni <- getNiVec(v.RRQbase, v.index)
v.RRD.ni <- getNiVec(v.RRDbase, v.index)
v.RRE.ni <- getNiVec(v.RREbase, v.index)
v.RR <- rep(1, n.i)
v.RR[M_it=="N"] <- 1
v.RR[M_it=="C"] <- v.RRC.ni[M_it=="C"]
v.RR[M_it=="Q"] <- v.RRQ.ni[M_it=="Q"]
v.RR[M_it=="D"] <- v.RRD.ni[M_it=="D"]
v.RR[M_it=="E"] <- v.RRE.ni[M_it=="E"]
v.RR
}
getRR <- function(v.RRold, M_it, v.index){
v.index[v.index<=0] <- length(v.NXbase) + 10
v.bT.ni <- getNiVec(v.bTbase, v.index)
v.RRC.ni <- getNiVec(v.RRCbase, v.index)
v.RRQ.ni <- getNiVec(v.RRQbase, v.index)
v.RRD.ni <- getNiVec(v.RRDbase, v.index)
v.RRE.ni <- getNiVec(v.RREbase, v.index)
v.RR <- getInitRR(M_it, v.index)
v.worseToN <- M_it=="N" & v.RRold>1
v.RR[v.worseToN] <- 1 + (v.RRold[v.worseToN] - 1) * v.bT.ni[v.worseToN]
v.RR[M_it=="C"] <- v.RRC.ni[M_it=="C"]
v.RR[M_it=="Q"] <- 1 + (v.RRold[M_it=="Q"] - 1) * v.bT.ni[M_it=="Q"]
v.worseToD <- M_it=="D" & v.RRold>v.RRD.ni
v.RR[v.worseToD] <- v.RRD.ni[v.worseToD] + (v.RRold[v.worseToD] - v.RRD.ni[v.worseToD]) * v.bT.ni[v.worseToD]
v.worseToE <- M_it=="E" & v.RRold>v.RRE.ni
v.RR[v.worseToE] <- v.RRE.ni[v.worseToE] + (v.RRold[v.worseToE] - v.RRE.ni[v.worseToE]) * v.bT.ni[v.worseToE]
v.RR
}
# The Probs function that updates the transition probabilities of every cycle is shown below.
Probs <- function(M_it, v.index, v.RR) {
# M_it: health state occupied by individual i at cycle t (character variable)
# dur: the duration of being sick (sick/sicker)
v.index[v.index<=0] <- length(v.NXbase) + 10
m.p.it <- matrix(NA, n.s, n.i) # create vector of state transition probabilities
rownames(m.p.it) <- v.n # assign names to the vector
# Update base transition rates
v.NX.ni <- getNiVec(v.NXbase, v.index)
v.toX.ni <- v.RR * v.NX.ni
v.NC.primer.ni <- getNiVec(v.NC.primer, v.index)
v.NC.ni <- v.NC.primer.ni
v.CQ.primer.ni <- getNiVec(v.CQ.primer, v.index)
v.CQ.ni <- v.CQ.primer.ni
v.QC.primer.ni <- getNiVec(v.QC.primer, v.index)
v.QC.ni <- v.QC.primer.ni
# Ecig transitions
v.NE.ni <- getNiVec(v.NE.primer, v.index); v.ND.ni <- getNiVec(v.ND.primer, v.index)
v.NE.ni[v.index>15] <- 0; v.ND.ni[v.index>15] <- 0;
v.CD.ni <- getNiVec(v.CD.primer, v.index); v.CE.ni <- getNiVec(v.CE.primer, v.index)
v.QD.ni <- getNiVec(v.QD.primer, v.index); v.QE.ni <- getNiVec(v.QE.primer, v.index)
v.DC.ni <- getNiVec(v.DC.primer, v.index); v.DQ.ni <- getNiVec(v.DQ.primer, v.index)
v.DE.ni <- getNiVec(v.DE.primer, v.index); v.EC.ni <- getNiVec(v.EC.primer, v.index)
v.ED.ni <- getNiVec(v.ED.primer, v.index); v.EN.ni <- getNiVec(v.EN.primer, v.index)
v.NC.ni <- v.NC.ni * (1-v.NE.ni-v.ND.ni)
v.CQ.ni <- v.CQ.ni * (1-v.CD.ni-v.CE.ni)
v.QC.ni <- v.QC.ni * (1-v.QE.ni-v.QD.ni)
v.NC.ni <- v.NC.ni * (1-v.toX.ni); v.NE.ni <- v.NE.ni * (1-v.toX.ni); v.ND.ni <- v.ND.ni * (1-v.toX.ni)
v.CQ.ni <- v.CQ.ni * (1-v.toX.ni); v.CD.ni <- v.CD.ni * (1-v.toX.ni); v.CE.ni <- v.CE.ni * (1-v.toX.ni)
v.QC.ni <- v.QC.ni * (1-v.toX.ni); v.QE.ni <- v.QE.ni * (1-v.toX.ni); v.QD.ni <- v.QD.ni * (1-v.toX.ni)
v.DC.ni <- v.DC.ni * (1-v.toX.ni); v.DQ.ni <- v.DQ.ni * (1-v.toX.ni); v.DE.ni <- v.DE.ni * (1-v.toX.ni)
v.EC.ni <- v.EC.ni * (1-v.toX.ni); v.EN.ni <- v.EN.ni * (1-v.toX.ni); v.ED.ni <- v.ED.ni * (1-v.toX.ni)
v.NN.ni <- 1 - v.NC.ni - v.NE.ni - v.ND.ni - v.toX.ni
v.CC.ni <- 1 - v.CQ.ni - v.CD.ni - v.CE.ni - v.toX.ni
v.QQ.ni <- 1 - v.QC.ni - v.QE.ni - v.QD.ni - v.toX.ni
v.DD.ni <- 1 - v.DC.ni - v.DQ.ni - v.DE.ni - v.toX.ni
v.EE.ni <- 1 - v.EC.ni - v.EN.ni - v.ED.ni - v.toX.ni
youngN <- M_it=="N"&v.index<=15
m.p.it[,youngN] <- rbind(v.NN.ni[youngN], v.NC.ni[youngN], 0, v.ND.ni[youngN], v.NE.ni[youngN], v.toX.ni[youngN]) # transition probabilities when never smoke
youngE <- M_it=="E"&v.index<=15
m.p.it[,youngE] <- rbind(v.EN.ni[youngE], v.EC.ni[youngE], 0, v.ED.ni[youngE], v.EE.ni[youngE], v.toX.ni[youngE])
oldN <- M_it=="N"&v.index>15
m.p.it[,oldN] <- rbind(v.NN.ni[oldN], v.NC.ni[oldN], 0, 0, 0, v.toX.ni[oldN]) # transition probabilities when never smoke
oldE <- M_it=="E"&v.index>15
m.p.it[,oldE] <- rbind(0, v.EC.ni[oldE], v.EN.ni[oldE], v.ED.ni[oldE], v.EE.ni[oldE], v.toX.ni[oldE])
m.p.it[,M_it == "C"] <- rbind(0, v.CC.ni[M_it=="C"], v.CQ.ni[M_it=="C"], v.CD.ni[M_it=="C"], v.CE.ni[M_it=="C"], v.toX.ni[M_it=="C"]) # transition probabilities when current smoke
m.p.it[,M_it == "Q"] <- rbind(0, v.QC.ni[M_it=="Q"], v.QQ.ni[M_it=="Q"], v.QD.ni[M_it=="Q"], v.QE.ni[M_it=="Q"], v.toX.ni[M_it=="Q"]) # transition probabilities when quit smoke
m.p.it[,M_it == "D"] <- rbind(0, v.DC.ni[M_it=="D"], v.DQ.ni[M_it=="D"], v.DD.ni[M_it=="D"], v.DE.ni[M_it=="D"], v.toX.ni[M_it=="D"])
m.p.it[,M_it == "X"] <- c(0, 0, 0, 0, 0, 1) # transition probabilities when dead
# cat("\n")
# print(m.p.it)
# cat("\n")
ifelse(colSums(m.p.it) == 1, return(t(m.p.it)), print("Probabilities do not sum to 1")) # return the transition probabilities or produce an error
}
### Costs function
# The Costs function estimates the costs at every cycle.
getInitU <- function(M_it, v.index){
v.index[v.index<=0] <- length(v.NXbase) + 10
u.C.ni <- getNiVec(u.Cbase, v.index)
u.Q.ni <- getNiVec(u.Qbase, v.index)
u.D.ni <- getNiVec(u.Dbase, v.index)
u.E.ni <- getNiVec(u.Ebase, v.index)
u <- rep(0, n.i)
u[M_it=="N"] <- 1
u[M_it=="C"] <- u.C.ni[M_it=="C"]
u[M_it=="Q"] <- u.Q.ni[M_it=="Q"]
u[M_it=="D"] <- u.D.ni[M_it=="D"]
u[M_it=="E"] <- u.E.ni[M_it=="E"]
u[v.index<1 | v.index>70] <- 0
u
}
getU <- function(u.old, M_it, v.index){
v.index[v.index<=0] <- length(v.NXbase) + 10
u.bT.ni <- getNiVec(u.bTbase, v.index)
u.C.ni <- getNiVec(u.Cbase, v.index)
u.Q.ni <- getNiVec(u.Qbase, v.index)
u.D.ni <- getNiVec(u.Dbase, v.index)
u.E.ni <- getNiVec(u.Ebase, v.index)
u <- getInitU(M_it, v.index)
u.worseToN <- M_it=="N" & u.old!=0 & u.old<1
u[u.worseToN] <- 1 - (1 - u.old[u.worseToN]) * u.bT.ni[u.worseToN]
u[M_it=="C"] <- u.C.ni[M_it=="C"]
u[M_it=="Q"] <- 1 - (1 - u.old[M_it=="Q"]) * u.bT.ni[M_it=="Q"]
u.worseToD <- M_it=="D" & u.old<u.D.ni
u[u.worseToD] <- u.D.ni[u.worseToD] - (u.D.ni[u.worseToD] - u.old[u.worseToD]) * u.bT.ni[u.worseToD]
u.worseToE <- M_it=="E" & u.old<u.E.ni
u[u.worseToE] <- u.E.ni[u.worseToE] - (u.E.ni[u.worseToE] - u.old[u.worseToE]) * u.bT.ni[u.worseToE]
u[M_it == "X"] <- 0 # update the utility if dead
u[v.index<1 | v.index>70] <- 0
u
}
Effs <- function (u, cl = 1) {
# cl: cycle length (default is 1)
QALYs <- u * cl # calculate the QALYs during cycle t
return(QALYs) # return the QALYs
}
##################################### Run the simulation ##################################
# START SIMULATION
p = Sys.time()
sim_no_trt <- MicroSim(v.M_1, v.age, n.i, n.t, v.n, X = v.x, d.c, d.e, Trt = FALSE, seed = 200)
comp.time = Sys.time() - p
comp.time
# PRINT DATA
sim_no_trt$TR
# sim_no_trt$m.M
# SAVE DATA
saveRDS(sim_no_trt$TR, file.path(outDir, paste0(scenarioName,"_","SG",big,"_TR.rds")))
saveRDS(sim_no_trt$colSumME, file.path(outDir, paste0(scenarioName,"_","SG",big,"_colSumME.rds")))
# saveRDS(sim_no_trt, file.path(outDir, paste0(scenarioName,"_","SG",big,"_sim_no_trt.rds")))
|
/code/scenarios_micro/micro_v0.3_sensitivity/MoreNE_SGUK/3xELF_micro.R
|
no_license
|
KateDoan/gice
|
R
| false | false | 15,475 |
r
|
#############################################################################################################################
rm(list=ls())
#############################################################################################################################
test <- TRUE # set test to FALSE to run the real simulation
scenarioName <- "3xELF"
ELFAge <- 18
big <- "UK"
outDir <- file.path("micro_v0.3_sensitivity", "MoreNE_SGUK", "outData")
#############################################################################################################################
source("micro_scenarios_v0.3/util/microsimulation_preparation.R")
source("micro_scenarios_v0.3/util/read_big_mat_transition.R")
init <- micro_prep()
eTrans <- readEcigTrans(big)
startAge <- 11
endAge <- 80
startYear <- 2017
endYear <- 2067
numAge <- endAge - startAge + 1
numYear <- endYear - startYear + 1
if(test){
n.i <- 10
v.M_1 <- rep(c("C", "N", "E", "Q", "Q"), n.i/5)
v.age <- rep(c(19, 4, 35, 180, 20), n.i/5)
n.t <- 10
} else {
n.i <- length(init$sin.ages) # number of simulated individuals
v.M_1 <- init$sin.states # beginning states
v.age <- init$sin.ages # initialize age
n.t <- 50 # time horizon
}
v.n <- c("N", "C", "Q", "D", "E", "X") # the model states: Never Smoker(N), Smoker(C), Quitter(Q), Dual(D), E-cig Only(E)
n.s <- length(v.n) # the number of states
d.e <- 0.03 # equal discounting of costs and QALYs by 3%
d.x <- 0.03 # mortality rate decreases by 3% annually
# v.Trt <- c("No Treatment", "Treatment") # store the strategy names
# Cost and utility inputs
u.N <- 1 # utility when not smoking
u.bTbase <- c(rep(0.98, 19), rep(0.96, 10), rep(0.97, 5), rep(0.96, 5), rep(0.97, 15), rep(0.98, 16))
u.Cbase <- c(rep(0.91,19), rep(c(0.88,0.86,0.83,0.81,0.78,0.76,0.74), each=5), rep(0.71, 16)) # utility when smoking
u.Qbase <- 1 - (1-u.Cbase) * 0.05
if(big == "JP"){
u.Ebase <- 1 - (1-u.Cbase) * 0.05
} else {
u.Ebase <- 1 - (1-u.Cbase) * 0.10
}
u.Dbase <- sqrt(u.Cbase * u.Ebase)
v.NXbase <- init$deathRateN
v.bTbase <- c(rep(0.92, 5), rep(0.93, 5), rep(0.94, 14), rep(0.95, 21), rep(0.96, 25))
v.RRCbase <- c(rep(2.8, 49), rep(2.5, 10), rep(2.0, 11))
v.RRQbase <- 1 + (v.RRCbase - 1) * 0.05
if(big == "JP"){
v.RREbase <- 1 + (v.RRCbase - 1) * 0.05
} else {
v.RREbase <- 1 + (v.RRCbase - 1) * 0.10
}
v.RRDbase <- sqrt(v.RRCbase * v.RREbase)
# Transition rates transformation
fELF = 1/2
fInitEcig = 3
v.NC.primer <- init$NCprimer
v.CQ.primer <- init$CQprimer
v.QC.primer <- init$QCprimer
v.NE.primer=eTrans$NE; v.ND.primer=eTrans$ND; v.CD.primer=eTrans$CD; v.CE.primer=eTrans$CE;
v.QD.primer=eTrans$QD; v.QE.primer=eTrans$QE; v.DC.primer=eTrans$DC; v.DQ.primer=eTrans$DQ;
v.DE.primer=eTrans$DE; v.EC.primer=eTrans$EC; v.ED.primer=eTrans$ED; v.EN.primer=eTrans$EN
v.NE.primer <- v.NE.primer * fInitEcig
row18 <- 18 - startAge + 1
rowAgeELF <- ELFAge - startAge + 1
v.NE.primer[row18:rowAgeELF] <- v.NE.primer[row18:rowAgeELF] * fELF
v.CD.primer[row18:rowAgeELF] <- v.CD.primer[row18:rowAgeELF] * fELF
v.CE.primer[row18:rowAgeELF] <- v.CE.primer[row18:rowAgeELF] * fELF
v.QE.primer[row18:rowAgeELF] <- v.QE.primer[row18:rowAgeELF] * fELF
v.ND.primer[row18:rowAgeELF] <- v.ND.primer[row18:rowAgeELF] * fELF
v.QD.primer[row18:rowAgeELF] <- v.QD.primer[row18:rowAgeELF] * fELF
##################################### Functions ###########################################
##################################### Helper functions ####################################
getNiVec <- function(v.base, v.index){
v.ni <- v.base[v.index]
v.ni[is.na(v.ni)] <- 0
v.ni
}
##################################### Main functions ######################################
# THE NEW samplev() FUNCTION
# efficient implementation of the rMultinom() function of the Hmisc package ####
samplev <- function (probs, m) {
d <- dim(probs)
n <- d[1]
k <- d[2]
lev <- dimnames(probs)[[2]]
if (!length(lev))
lev <- 1:k
ran <- matrix(lev[1], ncol = m, nrow = n)
U <- t(probs)
for(i in 2:k) {
U[i, ] <- U[i, ] + U[i - 1, ]
}
if (any((U[k, ] - 1) > 1e-05))
stop("error in multinom: probabilities do not sum to 1")
for (j in 1:m) {
un <- rep(runif(n), rep(k, n))
ran[, j] <- lev[1 + colSums(un > U)]
}
ran
}
# The MicroSim function for the simple microsimulation of the 'Sick-Sicker' model keeps track of what happens to each individual during each cycle.
MicroSim <- function(v.M_1, v.age, n.i, n.t, v.n, X = NULL, d.c, d.e, TR.out = TRUE, TS.out = FALSE, Trt = FALSE, seed = 1) {
# Arguments:
# v.M_1: vector of initial states for individuals
# n.i: number of individuals
# n.t: total number of cycles to run the model
# v.n: vector of health state names
# X: vector or matrix of individual characteristics
# d.c: discount rate for costs
# d.e: discount rate for health outcome (QALYs)
# TR.out: should the output include a Microsimulation trace? (default is TRUE)
# TS.out: should the output include a matrix of transitions between states? (default is TRUE)
# Trt: are the n.i individuals receiving treatment? (scalar with a Boolean value, default is FALSE)
# seed: starting seed number for random number generator (default is 1)
# Makes use of:
# Probs: function for the estimation of transition probabilities
# Costs: function for the estimation of cost state values
# Effs: function for the estimation of state specific health outcomes (QALYs)
v.index <- v.age - startAge + 1
v.dwe <- 1 / (1 + d.e) ^ (0:n.t)
# Create the matrix capturing the state name/costs/health outcomes for all individuals at each time point
m.M <- m.E <- matrix(nrow = n.i, ncol = n.t + 1,
dimnames = list(paste("ind", 1:n.i, sep = " "),
paste("cycle", 0:n.t, sep = " ")))
if(TR.out == TRUE) {
TR = matrix(NA, n.s, n.t)
}
m.M[, 1] <- v.M_1 # indicate the initial health state
v.RR <- getInitRR(v.M_1, v.index)
u <- getInitU(v.M_1, v.index)
m.E[, 1] <- Effs (u, cl=1)
set.seed(seed) # set the seed for every individual for the random number generator
for (t in 1:n.t) { # t <- 3
# print(v.index)
if (TR.out == TRUE) {
TR[,t] <- table(factor((m.M[,t])[v.age>=12 & v.age<=80], levels=v.n, ordered=TRUE))
}
if(t>1){
v.RR <- getRR(v.RR, m.M[,t], v.index)
}
# print(t)
# print(v.RR)
m.p <- Probs(m.M[, t], v.index, v.RR) # calculate the transition probabilities at cycle t
m.M[, t + 1] <- samplev(prob = m.p, m = 1) # sample the next health state and store that state in matrix m.M
cat('\r', paste(round(t/n.t * 100), "% done", sep = " ")) # display the progress of the simulation
v.age <- v.age + 1
v.index <- v.index + 1
v.NXbase <<- v.NXbase * (1-d.x)
u <- getU(u, m.M[,t+1], v.index)
m.E[,t + 1] <- Effs(u, cl=1)
} # close the loop for the time points
if (TS.out == TRUE) { # create a matrix of transitions across states
TS <- paste(m.M, cbind(m.M[, -1], NA), sep = "->") # transitions from one state to the other
TS <- matrix(TS, nrow = n.i)
rownames(TS) <- paste("Ind", 1:n.i, sep = " ") # name the rows
colnames(TS) <- paste("Cycle", 0:n.t, sep = " ") # name the columns
} else {
TS <- NULL
}
if(TR.out==TRUE){
TR <- prop.table(t(TR), margin = 1)
} else {
TR <- NULL
}
te <- m.E %*% v.dwe # total (discounted) QALYs per individual
te_hat <- mean(te) # average (discounted) QALYs
colSumME <- colSums(m.E)
results <- list(m.M = m.M, TS = TS, TR = TR, m.E = m.E, te = te, te_hat = te_hat, colSumME = colSumME) # store the results from the simulation in a list
return(results) # return the results
} # end of the MicroSim function
#### Probability function
getInitRR <- function(M_it, v.index){
v.index[v.index<=0] <- length(v.NXbase) + 10
v.RRC.ni <- getNiVec(v.RRCbase, v.index)
v.RRQ.ni <- getNiVec(v.RRQbase, v.index)
v.RRD.ni <- getNiVec(v.RRDbase, v.index)
v.RRE.ni <- getNiVec(v.RREbase, v.index)
v.RR <- rep(1, n.i)
v.RR[M_it=="N"] <- 1
v.RR[M_it=="C"] <- v.RRC.ni[M_it=="C"]
v.RR[M_it=="Q"] <- v.RRQ.ni[M_it=="Q"]
v.RR[M_it=="D"] <- v.RRD.ni[M_it=="D"]
v.RR[M_it=="E"] <- v.RRE.ni[M_it=="E"]
v.RR
}
getRR <- function(v.RRold, M_it, v.index){
v.index[v.index<=0] <- length(v.NXbase) + 10
v.bT.ni <- getNiVec(v.bTbase, v.index)
v.RRC.ni <- getNiVec(v.RRCbase, v.index)
v.RRQ.ni <- getNiVec(v.RRQbase, v.index)
v.RRD.ni <- getNiVec(v.RRDbase, v.index)
v.RRE.ni <- getNiVec(v.RREbase, v.index)
v.RR <- getInitRR(M_it, v.index)
v.worseToN <- M_it=="N" & v.RRold>1
v.RR[v.worseToN] <- 1 + (v.RRold[v.worseToN] - 1) * v.bT.ni[v.worseToN]
v.RR[M_it=="C"] <- v.RRC.ni[M_it=="C"]
v.RR[M_it=="Q"] <- 1 + (v.RRold[M_it=="Q"] - 1) * v.bT.ni[M_it=="Q"]
v.worseToD <- M_it=="D" & v.RRold>v.RRD.ni
v.RR[v.worseToD] <- v.RRD.ni[v.worseToD] + (v.RRold[v.worseToD] - v.RRD.ni[v.worseToD]) * v.bT.ni[v.worseToD]
v.worseToE <- M_it=="E" & v.RRold>v.RRE.ni
v.RR[v.worseToE] <- v.RRE.ni[v.worseToE] + (v.RRold[v.worseToE] - v.RRE.ni[v.worseToE]) * v.bT.ni[v.worseToE]
v.RR
}
# The Probs function that updates the transition probabilities of every cycle is shown below.
Probs <- function(M_it, v.index, v.RR) {
# M_it: health state occupied by individual i at cycle t (character variable)
# dur: the duration of being sick (sick/sicker)
v.index[v.index<=0] <- length(v.NXbase) + 10
m.p.it <- matrix(NA, n.s, n.i) # create vector of state transition probabilities
rownames(m.p.it) <- v.n # assign names to the vector
# Update base transition rates
v.NX.ni <- getNiVec(v.NXbase, v.index)
v.toX.ni <- v.RR * v.NX.ni
v.NC.primer.ni <- getNiVec(v.NC.primer, v.index)
v.NC.ni <- v.NC.primer.ni
v.CQ.primer.ni <- getNiVec(v.CQ.primer, v.index)
v.CQ.ni <- v.CQ.primer.ni
v.QC.primer.ni <- getNiVec(v.QC.primer, v.index)
v.QC.ni <- v.QC.primer.ni
# Ecig transitions
v.NE.ni <- getNiVec(v.NE.primer, v.index); v.ND.ni <- getNiVec(v.ND.primer, v.index)
v.NE.ni[v.index>15] <- 0; v.ND.ni[v.index>15] <- 0;
v.CD.ni <- getNiVec(v.CD.primer, v.index); v.CE.ni <- getNiVec(v.CE.primer, v.index)
v.QD.ni <- getNiVec(v.QD.primer, v.index); v.QE.ni <- getNiVec(v.QE.primer, v.index)
v.DC.ni <- getNiVec(v.DC.primer, v.index); v.DQ.ni <- getNiVec(v.DQ.primer, v.index)
v.DE.ni <- getNiVec(v.DE.primer, v.index); v.EC.ni <- getNiVec(v.EC.primer, v.index)
v.ED.ni <- getNiVec(v.ED.primer, v.index); v.EN.ni <- getNiVec(v.EN.primer, v.index)
v.NC.ni <- v.NC.ni * (1-v.NE.ni-v.ND.ni)
v.CQ.ni <- v.CQ.ni * (1-v.CD.ni-v.CE.ni)
v.QC.ni <- v.QC.ni * (1-v.QE.ni-v.QD.ni)
v.NC.ni <- v.NC.ni * (1-v.toX.ni); v.NE.ni <- v.NE.ni * (1-v.toX.ni); v.ND.ni <- v.ND.ni * (1-v.toX.ni)
v.CQ.ni <- v.CQ.ni * (1-v.toX.ni); v.CD.ni <- v.CD.ni * (1-v.toX.ni); v.CE.ni <- v.CE.ni * (1-v.toX.ni)
v.QC.ni <- v.QC.ni * (1-v.toX.ni); v.QE.ni <- v.QE.ni * (1-v.toX.ni); v.QD.ni <- v.QD.ni * (1-v.toX.ni)
v.DC.ni <- v.DC.ni * (1-v.toX.ni); v.DQ.ni <- v.DQ.ni * (1-v.toX.ni); v.DE.ni <- v.DE.ni * (1-v.toX.ni)
v.EC.ni <- v.EC.ni * (1-v.toX.ni); v.EN.ni <- v.EN.ni * (1-v.toX.ni); v.ED.ni <- v.ED.ni * (1-v.toX.ni)
v.NN.ni <- 1 - v.NC.ni - v.NE.ni - v.ND.ni - v.toX.ni
v.CC.ni <- 1 - v.CQ.ni - v.CD.ni - v.CE.ni - v.toX.ni
v.QQ.ni <- 1 - v.QC.ni - v.QE.ni - v.QD.ni - v.toX.ni
v.DD.ni <- 1 - v.DC.ni - v.DQ.ni - v.DE.ni - v.toX.ni
v.EE.ni <- 1 - v.EC.ni - v.EN.ni - v.ED.ni - v.toX.ni
youngN <- M_it=="N"&v.index<=15
m.p.it[,youngN] <- rbind(v.NN.ni[youngN], v.NC.ni[youngN], 0, v.ND.ni[youngN], v.NE.ni[youngN], v.toX.ni[youngN]) # transition probabilities when never smoke
youngE <- M_it=="E"&v.index<=15
m.p.it[,youngE] <- rbind(v.EN.ni[youngE], v.EC.ni[youngE], 0, v.ED.ni[youngE], v.EE.ni[youngE], v.toX.ni[youngE])
oldN <- M_it=="N"&v.index>15
m.p.it[,oldN] <- rbind(v.NN.ni[oldN], v.NC.ni[oldN], 0, 0, 0, v.toX.ni[oldN]) # transition probabilities when never smoke
oldE <- M_it=="E"&v.index>15
m.p.it[,oldE] <- rbind(0, v.EC.ni[oldE], v.EN.ni[oldE], v.ED.ni[oldE], v.EE.ni[oldE], v.toX.ni[oldE])
m.p.it[,M_it == "C"] <- rbind(0, v.CC.ni[M_it=="C"], v.CQ.ni[M_it=="C"], v.CD.ni[M_it=="C"], v.CE.ni[M_it=="C"], v.toX.ni[M_it=="C"]) # transition probabilities when current smoke
m.p.it[,M_it == "Q"] <- rbind(0, v.QC.ni[M_it=="Q"], v.QQ.ni[M_it=="Q"], v.QD.ni[M_it=="Q"], v.QE.ni[M_it=="Q"], v.toX.ni[M_it=="Q"]) # transition probabilities when quit smoke
m.p.it[,M_it == "D"] <- rbind(0, v.DC.ni[M_it=="D"], v.DQ.ni[M_it=="D"], v.DD.ni[M_it=="D"], v.DE.ni[M_it=="D"], v.toX.ni[M_it=="D"])
m.p.it[,M_it == "X"] <- c(0, 0, 0, 0, 0, 1) # transition probabilities when dead
# cat("\n")
# print(m.p.it)
# cat("\n")
ifelse(colSums(m.p.it) == 1, return(t(m.p.it)), print("Probabilities do not sum to 1")) # return the transition probabilities or produce an error
}
### Costs function
# The Costs function estimates the costs at every cycle.
getInitU <- function(M_it, v.index){
v.index[v.index<=0] <- length(v.NXbase) + 10
u.C.ni <- getNiVec(u.Cbase, v.index)
u.Q.ni <- getNiVec(u.Qbase, v.index)
u.D.ni <- getNiVec(u.Dbase, v.index)
u.E.ni <- getNiVec(u.Ebase, v.index)
u <- rep(0, n.i)
u[M_it=="N"] <- 1
u[M_it=="C"] <- u.C.ni[M_it=="C"]
u[M_it=="Q"] <- u.Q.ni[M_it=="Q"]
u[M_it=="D"] <- u.D.ni[M_it=="D"]
u[M_it=="E"] <- u.E.ni[M_it=="E"]
u[v.index<1 | v.index>70] <- 0
u
}
getU <- function(u.old, M_it, v.index){
v.index[v.index<=0] <- length(v.NXbase) + 10
u.bT.ni <- getNiVec(u.bTbase, v.index)
u.C.ni <- getNiVec(u.Cbase, v.index)
u.Q.ni <- getNiVec(u.Qbase, v.index)
u.D.ni <- getNiVec(u.Dbase, v.index)
u.E.ni <- getNiVec(u.Ebase, v.index)
u <- getInitU(M_it, v.index)
u.worseToN <- M_it=="N" & u.old!=0 & u.old<1
u[u.worseToN] <- 1 - (1 - u.old[u.worseToN]) * u.bT.ni[u.worseToN]
u[M_it=="C"] <- u.C.ni[M_it=="C"]
u[M_it=="Q"] <- 1 - (1 - u.old[M_it=="Q"]) * u.bT.ni[M_it=="Q"]
u.worseToD <- M_it=="D" & u.old<u.D.ni
u[u.worseToD] <- u.D.ni[u.worseToD] - (u.D.ni[u.worseToD] - u.old[u.worseToD]) * u.bT.ni[u.worseToD]
u.worseToE <- M_it=="E" & u.old<u.E.ni
u[u.worseToE] <- u.E.ni[u.worseToE] - (u.E.ni[u.worseToE] - u.old[u.worseToE]) * u.bT.ni[u.worseToE]
u[M_it == "X"] <- 0 # update the utility if dead
u[v.index<1 | v.index>70] <- 0
u
}
Effs <- function (u, cl = 1) {
# cl: cycle length (default is 1)
QALYs <- u * cl # calculate the QALYs during cycle t
return(QALYs) # return the QALYs
}
##################################### Run the simulation ##################################
# START SIMULATION
p = Sys.time()
sim_no_trt <- MicroSim(v.M_1, v.age, n.i, n.t, v.n, X = v.x, d.c, d.e, Trt = FALSE, seed = 200)
comp.time = Sys.time() - p
comp.time
# PRINT DATA
sim_no_trt$TR
# sim_no_trt$m.M
# SAVE DATA
saveRDS(sim_no_trt$TR, file.path(outDir, paste0(scenarioName,"_","SG",big,"_TR.rds")))
saveRDS(sim_no_trt$colSumME, file.path(outDir, paste0(scenarioName,"_","SG",big,"_colSumME.rds")))
# saveRDS(sim_no_trt, file.path(outDir, paste0(scenarioName,"_","SG",big,"_sim_no_trt.rds")))
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/gen-namespace-docs.R,
% R/gen-namespace-examples.R
\name{torch_prod}
\alias{torch_prod}
\title{Prod}
\arguments{
\item{input}{(Tensor) the input tensor.}
\item{dtype}{(\code{torch.dtype}, optional) the desired data type of returned tensor. If specified, the input tensor is casted to \code{dtype} before the operation is performed. This is useful for preventing data type overflows. Default: None.}
\item{dim}{(int) the dimension to reduce.}
\item{keepdim}{(bool) whether the output tensor has \code{dim} retained or not.}
}
\description{
Prod
}
\section{prod(input, dtype=None) -> Tensor }{
Returns the product of all elements in the \code{input} tensor.
}
\section{prod(input, dim, keepdim=False, dtype=None) -> Tensor }{
Returns the product of each row of the \code{input} tensor in the given
dimension \code{dim}.
If \code{keepdim} is \code{True}, the output tensor is of the same size
as \code{input} except in the dimension \code{dim} where it is of size 1.
Otherwise, \code{dim} is squeezed (see \code{\link{torch_squeeze}}), resulting in
the output tensor having 1 fewer dimension than \code{input}.
}
\examples{
if (torch_is_installed()) {
a = torch_randn(c(1, 3))
a
torch_prod(a)
a = torch_randn(c(4, 2))
a
torch_prod(a, 1)
}
}
|
/man/torch_prod.Rd
|
permissive
|
minghao2016/torch
|
R
| false | true | 1,345 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/gen-namespace-docs.R,
% R/gen-namespace-examples.R
\name{torch_prod}
\alias{torch_prod}
\title{Prod}
\arguments{
\item{input}{(Tensor) the input tensor.}
\item{dtype}{(\code{torch.dtype}, optional) the desired data type of returned tensor. If specified, the input tensor is casted to \code{dtype} before the operation is performed. This is useful for preventing data type overflows. Default: None.}
\item{dim}{(int) the dimension to reduce.}
\item{keepdim}{(bool) whether the output tensor has \code{dim} retained or not.}
}
\description{
Prod
}
\section{prod(input, dtype=None) -> Tensor }{
Returns the product of all elements in the \code{input} tensor.
}
\section{prod(input, dim, keepdim=False, dtype=None) -> Tensor }{
Returns the product of each row of the \code{input} tensor in the given
dimension \code{dim}.
If \code{keepdim} is \code{True}, the output tensor is of the same size
as \code{input} except in the dimension \code{dim} where it is of size 1.
Otherwise, \code{dim} is squeezed (see \code{\link{torch_squeeze}}), resulting in
the output tensor having 1 fewer dimension than \code{input}.
}
\examples{
if (torch_is_installed()) {
a = torch_randn(c(1, 3))
a
torch_prod(a)
a = torch_randn(c(4, 2))
a
torch_prod(a, 1)
}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Tuition.R
\docType{data}
\name{Tuition}
\alias{Tuition}
\title{Costs at 4-year colleges}
\format{A data.frame wherein each row is one 4-year college or university
\itemize{
\item \code{INSTITUTION}: Name of four year institution
\item \code{TYPE}: Type of four year institution, Public_4_year, Private_4_year, For_profit_4_year.
\item \code{STATE}: What state the institution resides
\item \code{ROOM_BOARD}: The cost of room and board at the institution ($)
\item \code{INSTATE_TUTION}: The cost of instate tuition ($)
\item \code{INSTATE_TOTAL}: The cost of room and board and instate tuition ($ per year)
\item \code{OUTOFSTATE_TUTION}: The cost of out of state tuition ($ per year)
\item \code{OUTOFSTATE_TOTAL}: The cost of room and board and out of state tuition ($ per year)
}}
\source{
"Tuition and Fees, 1998-99 Through 2018-19". \emph{Chronicle of Higher Education} (2018, December 31). Retrieved from https://www.chronicle.com/interactives/tuition-and-fees
}
\usage{
data(Eyeglasses)
}
\description{
Costs at 4-year colleges
}
\keyword{datasets}
|
/man/Tuition.Rd
|
no_license
|
krkozak/MAT160
|
R
| false | true | 1,135 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Tuition.R
\docType{data}
\name{Tuition}
\alias{Tuition}
\title{Costs at 4-year colleges}
\format{A data.frame wherein each row is one 4-year college or university
\itemize{
\item \code{INSTITUTION}: Name of four year institution
\item \code{TYPE}: Type of four year institution, Public_4_year, Private_4_year, For_profit_4_year.
\item \code{STATE}: What state the institution resides
\item \code{ROOM_BOARD}: The cost of room and board at the institution ($)
\item \code{INSTATE_TUTION}: The cost of instate tuition ($)
\item \code{INSTATE_TOTAL}: The cost of room and board and instate tuition ($ per year)
\item \code{OUTOFSTATE_TUTION}: The cost of out of state tuition ($ per year)
\item \code{OUTOFSTATE_TOTAL}: The cost of room and board and out of state tuition ($ per year)
}}
\source{
"Tuition and Fees, 1998-99 Through 2018-19". \emph{Chronicle of Higher Education} (2018, December 31). Retrieved from https://www.chronicle.com/interactives/tuition-and-fees
}
\usage{
data(Eyeglasses)
}
\description{
Costs at 4-year colleges
}
\keyword{datasets}
|
## Get all data
data_all <- read.csv("./Data/household_power_consumption.txt", header=T, sep=';', na.strings="?", nrows=2075259, check.names=F, stringsAsFactors=F, comment.char="", quote='\"')
data_all$Date <- as.Date(data_all$Date, format="%d/%m/%Y")
## Subset data
data <- subset(data_all, subset=(Date >= "2007-02-01" & Date <= "2007-02-02"))
rm(data_all)
## Convert dates
datetime <- paste(as.Date(data$Date), data$Time)
data$Datetime <- as.POSIXct(datetime)
## Plot 4
par(mfrow=c(2,2), mar=c(4,4,2,1), oma=c(0,0,2,0))
with(data, {plot(Global_active_power~Datetime, type="l", ylab="Global Active Power (kilowatts)", xlab="") plot(Voltage~Datetime, type="l",
ylab="Voltage (volt)", xlab="") plot(Sub_metering_1~Datetime, type="l", ylab="Global Active Power (kilowatts)", xlab="")
lines(Sub_metering_2~Datetime,col='Red') lines(Sub_metering_3~Datetime,col='Blue') legend("topright", col=c("black", "red", "blue"), lty=1, lwd=2, bty="n",
legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3")) plot(Global_reactive_power~Datetime, type="l", ylab="Global Rective Power (kilowatts)",xlab="")})
## Save
dev.copy(png, file="plot4.png", height=480, width=480)
dev.off()
|
/plot4.R
|
no_license
|
damonous/exploratory_data_analysis_course_project_1
|
R
| false | false | 1,241 |
r
|
## Get all data
data_all <- read.csv("./Data/household_power_consumption.txt", header=T, sep=';', na.strings="?", nrows=2075259, check.names=F, stringsAsFactors=F, comment.char="", quote='\"')
data_all$Date <- as.Date(data_all$Date, format="%d/%m/%Y")
## Subset data
data <- subset(data_all, subset=(Date >= "2007-02-01" & Date <= "2007-02-02"))
rm(data_all)
## Convert dates
datetime <- paste(as.Date(data$Date), data$Time)
data$Datetime <- as.POSIXct(datetime)
## Plot 4
par(mfrow=c(2,2), mar=c(4,4,2,1), oma=c(0,0,2,0))
with(data, {plot(Global_active_power~Datetime, type="l", ylab="Global Active Power (kilowatts)", xlab="") plot(Voltage~Datetime, type="l",
ylab="Voltage (volt)", xlab="") plot(Sub_metering_1~Datetime, type="l", ylab="Global Active Power (kilowatts)", xlab="")
lines(Sub_metering_2~Datetime,col='Red') lines(Sub_metering_3~Datetime,col='Blue') legend("topright", col=c("black", "red", "blue"), lty=1, lwd=2, bty="n",
legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3")) plot(Global_reactive_power~Datetime, type="l", ylab="Global Rective Power (kilowatts)",xlab="")})
## Save
dev.copy(png, file="plot4.png", height=480, width=480)
dev.off()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/helper_RGCCA.R
\name{boot_evaluate}
\alias{boot_evaluate}
\title{Evaluates the bootstrapping of RGCCA}
\usage{
boot_evaluate(STAB)
}
\arguments{
\item{STAB}{List of weights of \code{rgcca} or \code{sgcca}}
}
\value{
Lateral effect: Prints plots
}
\description{
Evaluates the bootstrapping of RGCCA
}
|
/man/boot_evaluate.Rd
|
permissive
|
llrs/integration-helper
|
R
| false | true | 378 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/helper_RGCCA.R
\name{boot_evaluate}
\alias{boot_evaluate}
\title{Evaluates the bootstrapping of RGCCA}
\usage{
boot_evaluate(STAB)
}
\arguments{
\item{STAB}{List of weights of \code{rgcca} or \code{sgcca}}
}
\value{
Lateral effect: Prints plots
}
\description{
Evaluates the bootstrapping of RGCCA
}
|
# Decision Tree Classification
# Importing the dataset
dataset = read.csv('Social_Network_Ads.csv')
dataset = dataset[3:5]
# Encoding the target feature as factor
dataset$Purchased = factor(dataset$Purchased, levels = c(0, 1))
# Splitting the dataset into the Training set and Test set
# install.packages('caTools')
library(caTools)
set.seed(123)
split = sample.split(dataset$Purchased, SplitRatio = 0.75)
training_set = subset(dataset, split == TRUE)
test_set = subset(dataset, split == FALSE)
# Feature Scaling
training_set[-3] = scale(training_set[-3])
test_set[-3] = scale(test_set[-3])
# Fitting Decision Tree Clasification to the Training set
library(rpart)
classifier = rpart(formula = Purchased ~ .,
data = training_set)
# Predicting the Test set results
y_pred = predict(classifier, newdata = test_set[-3], type = 'class')
# Making the Confusion Matrix
cm = table(test_set[, 3], y_pred)
# Visualising the Training set results
library(ElemStatLearn)
set = training_set
X1 = seq(min(set[, 1]) - 1, max(set[, 1]) + 1, by = 0.01)
X2 = seq(min(set[, 2]) - 1, max(set[, 2]) + 1, by = 0.01)
grid_set = expand.grid(X1, X2)
colnames(grid_set) = c('Age', 'EstimatedSalary')
y_grid = predict(classifier, newdata = grid_set,type = 'class')
plot(set[, -3],
main = 'Decision Tree (Training set)',
xlab = 'Age', ylab = 'Estimated Salary',
xlim = range(X1), ylim = range(X2))
contour(X1, X2, matrix(as.numeric(y_grid), length(X1), length(X2)), add = TRUE)
points(grid_set, pch = '.', col = ifelse(y_grid == 1, 'springgreen3', 'tomato'))
points(set, pch = 21, bg = ifelse(set[, 3] == 1, 'green4', 'red3'))
# Visualising the Test set results
library(ElemStatLearn)
set = test_set
X1 = seq(min(set[, 1]) - 1, max(set[, 1]) + 1, by = 0.01)
X2 = seq(min(set[, 2]) - 1, max(set[, 2]) + 1, by = 0.01)
grid_set = expand.grid(X1, X2)
colnames(grid_set) = c('Age', 'EstimatedSalary')
y_grid = predict(classifier, newdata = grid_set, type = 'class')
plot(set[, -3], main = 'Decision Tree (Test set)',
xlab = 'Age', ylab = 'Estimated Salary',
xlim = range(X1), ylim = range(X2))
contour(X1, X2, matrix(as.numeric(y_grid), length(X1), length(X2)), add = TRUE)
points(grid_set, pch = '.', col = ifelse(y_grid == 1, 'springgreen3', 'tomato'))
points(set, pch = 21, bg = ifelse(set[, 3] == 1, 'green4', 'red3'))
# Plotting the decision tree(for this you have to remove feature scaling
# i.e. rexecute whole dataset except feature scaling)
plot(classifier)
text(classifier)
|
/decision_tree_classification.R
|
no_license
|
adityajain1310/social_network_ads-decision-tree-classification
|
R
| false | false | 2,505 |
r
|
# Decision Tree Classification
# Importing the dataset
dataset = read.csv('Social_Network_Ads.csv')
dataset = dataset[3:5]
# Encoding the target feature as factor
dataset$Purchased = factor(dataset$Purchased, levels = c(0, 1))
# Splitting the dataset into the Training set and Test set
# install.packages('caTools')
library(caTools)
set.seed(123)
split = sample.split(dataset$Purchased, SplitRatio = 0.75)
training_set = subset(dataset, split == TRUE)
test_set = subset(dataset, split == FALSE)
# Feature Scaling
training_set[-3] = scale(training_set[-3])
test_set[-3] = scale(test_set[-3])
# Fitting Decision Tree Clasification to the Training set
library(rpart)
classifier = rpart(formula = Purchased ~ .,
data = training_set)
# Predicting the Test set results
y_pred = predict(classifier, newdata = test_set[-3], type = 'class')
# Making the Confusion Matrix
cm = table(test_set[, 3], y_pred)
# Visualising the Training set results
library(ElemStatLearn)
set = training_set
X1 = seq(min(set[, 1]) - 1, max(set[, 1]) + 1, by = 0.01)
X2 = seq(min(set[, 2]) - 1, max(set[, 2]) + 1, by = 0.01)
grid_set = expand.grid(X1, X2)
colnames(grid_set) = c('Age', 'EstimatedSalary')
y_grid = predict(classifier, newdata = grid_set,type = 'class')
plot(set[, -3],
main = 'Decision Tree (Training set)',
xlab = 'Age', ylab = 'Estimated Salary',
xlim = range(X1), ylim = range(X2))
contour(X1, X2, matrix(as.numeric(y_grid), length(X1), length(X2)), add = TRUE)
points(grid_set, pch = '.', col = ifelse(y_grid == 1, 'springgreen3', 'tomato'))
points(set, pch = 21, bg = ifelse(set[, 3] == 1, 'green4', 'red3'))
# Visualising the Test set results
library(ElemStatLearn)
set = test_set
X1 = seq(min(set[, 1]) - 1, max(set[, 1]) + 1, by = 0.01)
X2 = seq(min(set[, 2]) - 1, max(set[, 2]) + 1, by = 0.01)
grid_set = expand.grid(X1, X2)
colnames(grid_set) = c('Age', 'EstimatedSalary')
y_grid = predict(classifier, newdata = grid_set, type = 'class')
plot(set[, -3], main = 'Decision Tree (Test set)',
xlab = 'Age', ylab = 'Estimated Salary',
xlim = range(X1), ylim = range(X2))
contour(X1, X2, matrix(as.numeric(y_grid), length(X1), length(X2)), add = TRUE)
points(grid_set, pch = '.', col = ifelse(y_grid == 1, 'springgreen3', 'tomato'))
points(set, pch = 21, bg = ifelse(set[, 3] == 1, 'green4', 'red3'))
# Plotting the decision tree(for this you have to remove feature scaling
# i.e. rexecute whole dataset except feature scaling)
plot(classifier)
text(classifier)
|
bioinformatics <- 1
is <- 2
for_ <- 3
cool <- 4
people <- 5
print(bioinformatics + is + for_ + cool + people)
|
/INCA-Assignment-1/6.R
|
no_license
|
andreclinio/Brasil_2019
|
R
| false | false | 110 |
r
|
bioinformatics <- 1
is <- 2
for_ <- 3
cool <- 4
people <- 5
print(bioinformatics + is + for_ + cool + people)
|
# server.R
library(ggplot2)
library(dplyr)
library(maps)
library(scales)
library(ggthemes)
# source("helpers.R")
rate <- readRDS("data/rate.rds")
us_map = map_data('state')
statename <- group_by(us_map, region) %>%
summarise(long = mean(long), lat = mean(lat))
statename$region.abb <- state.abb[match(statename$region,tolower(state.name))]
shinyServer(function(input, output) {
rate.year <- reactive({
subset(rate, (rate$BusinessYear == input$var &
rate$IndividualRate != 999999),
select = c(BusinessYear:IndividualTobaccoRate))
})
stateCounts <- reactive({
rate.year() %>%
select(StateCode, IssuerId, PlanId, IndividualRate) %>%
group_by(StateCode) %>%
summarize(Carriers = length(unique(IssuerId)),
PlanAvailable = length(unique(PlanId)),
MeanIndRate= mean(IndividualRate),
MedianIndRate = median(IndividualRate)) %>%
arrange(desc(PlanAvailable))
})
stateCounts_region <- reactive({
stateCounts() %>%
mutate(region = tolower(
state.name[match(StateCode, state.abb)]))
})
mergeMap <- reactive({
left_join(stateCounts_region(), us_map, by="region")
})
output$plot <- renderPlot({
if(input$var2 == "Median"){
geom <- geom_polygon(data=mergeMap(), aes(x=long, y=lat, group = group, fill=MedianIndRate))
lab <- labs(fill = "Median Premium $/mon", title = "Median Monthly Premium Distribution", x="", y="")
}else{
geom <- geom_polygon(data=mergeMap(), aes(x=long, y=lat, group = group, fill=MeanIndRate))
lab <- labs(fill = "Average Premium $/mon", title = "Average Monthly Premium Distribution", x="", y="")
}
ggplot() +
geom +
lab +
scale_fill_continuous(low = "thistle2", high = "darkblue", guide="colorbar") +
theme_bw() +
scale_y_continuous(breaks=c()) +
scale_x_continuous(breaks=c()) +
theme(panel.border = element_blank()) +
geom_text(data=statename, aes(x=long, y=lat, label=region.abb), na.rm = T, size=2) +
coord_map()
})
# Generate a summary of the data
output$summary <- renderPrint({
summary(stateCounts())
})
# Generate an HTML table view of the data
output$table <- renderTable({
data.frame(x=stateCounts_region())
})
})
|
/health-insurance-map/server.R
|
no_license
|
shihyinnn/R-Shiny-Gallery
|
R
| false | false | 2,584 |
r
|
# server.R
library(ggplot2)
library(dplyr)
library(maps)
library(scales)
library(ggthemes)
# source("helpers.R")
rate <- readRDS("data/rate.rds")
us_map = map_data('state')
statename <- group_by(us_map, region) %>%
summarise(long = mean(long), lat = mean(lat))
statename$region.abb <- state.abb[match(statename$region,tolower(state.name))]
shinyServer(function(input, output) {
rate.year <- reactive({
subset(rate, (rate$BusinessYear == input$var &
rate$IndividualRate != 999999),
select = c(BusinessYear:IndividualTobaccoRate))
})
stateCounts <- reactive({
rate.year() %>%
select(StateCode, IssuerId, PlanId, IndividualRate) %>%
group_by(StateCode) %>%
summarize(Carriers = length(unique(IssuerId)),
PlanAvailable = length(unique(PlanId)),
MeanIndRate= mean(IndividualRate),
MedianIndRate = median(IndividualRate)) %>%
arrange(desc(PlanAvailable))
})
stateCounts_region <- reactive({
stateCounts() %>%
mutate(region = tolower(
state.name[match(StateCode, state.abb)]))
})
mergeMap <- reactive({
left_join(stateCounts_region(), us_map, by="region")
})
output$plot <- renderPlot({
if(input$var2 == "Median"){
geom <- geom_polygon(data=mergeMap(), aes(x=long, y=lat, group = group, fill=MedianIndRate))
lab <- labs(fill = "Median Premium $/mon", title = "Median Monthly Premium Distribution", x="", y="")
}else{
geom <- geom_polygon(data=mergeMap(), aes(x=long, y=lat, group = group, fill=MeanIndRate))
lab <- labs(fill = "Average Premium $/mon", title = "Average Monthly Premium Distribution", x="", y="")
}
ggplot() +
geom +
lab +
scale_fill_continuous(low = "thistle2", high = "darkblue", guide="colorbar") +
theme_bw() +
scale_y_continuous(breaks=c()) +
scale_x_continuous(breaks=c()) +
theme(panel.border = element_blank()) +
geom_text(data=statename, aes(x=long, y=lat, label=region.abb), na.rm = T, size=2) +
coord_map()
})
# Generate a summary of the data
output$summary <- renderPrint({
summary(stateCounts())
})
# Generate an HTML table view of the data
output$table <- renderTable({
data.frame(x=stateCounts_region())
})
})
|
library(ggplot2)
library(plyr)
library(caret)
library(reshape2)
temp <- tempfile()
download.file("http://www.football-data.co.uk/mmz4281/1415/data.zip",temp)
files <- lapply(unzip(temp),function(x) read.csv(x))
raw.data <- Reduce(function(x, y) merge(x, y, all=T,sort=F), files, accumulate=F)
unlink(temp)
data<-raw.data
h.odds <- data[c("Date","FTR","B365H","BWH","IWH","LBH","PSH","WHH","SJH","VCH","BbMxH","BbAvH")]
df <- na.omit(melt(h.odds,id.vars=c("FTR","Date"),variable.name="book",value.name="h.odds"))
wins <- data.frame(date=as.Date(df$Date,format="%d/%m/%y")
,win=(df$FTR=="H"),book=df$book,pr=1/df$h.odds)
wins$pr=round(wins$pr,2)
x <- aggregate(win ~ pr, data=wins, mean)
x<-round(x,2)
df$Date <- as.Date(df$Date,format="%d/%m/%y")
backtest <- df['book'=="BbAvH",]
backtest <- backtest[with(backtest, order(Date)), ]
backtest <- backtest[backtest$h.odds >= 5 & backtest$h.odds <= 8.25 , ]
backtest$win <- backtest$FTR=="H"
backtest$returns <- backtest$win * backtest$h.odds - 1
#' Okay, so bookies give us odds on how a game might turn out. We've established that they are
#' usually quite correct in terms of estimating how likely the home team is to win:
p1 <- ggplot(data=x,aes(x=pr,y=win))+geom_abline(colour="red")+geom_point()+geom_line()+
xlab("Implied Probability of Home Win (from bookie odds)") +
ylab("Actual Outcome (aka posterior probability)") +
ggtitle("How Right is the House")
plot(p1)
#' The red line is what things would look like if bookies were always right.
#' In the graph above, anywhere that the points are not exactly on the red line is
#' a potential _edge_: the bookies are wrong here.
#'
#' However, since we are using data, we need to determine where we might have a systematic edge,
#' one that is not just showing up here due to random chance. The first step is to simplify what we have.
#' We've already done this a bit to get the previous graph, but now we'll remove all of these points
#' and replace them with a regression model, essentially a function.
p2 <- ggplot(data=x,aes(x=pr,y=win))+geom_abline(colour="red")+
stat_smooth(method="lm",formula=y~poly(x, 3),size=1)+
xlab("Implied Probability of Home Win (from bookie odds)") +
ylab("Actual Outcome (aka posterior probability)") +
ggtitle("How Right is the House")
plot(p2)
#' So we can see that even then the house doesn't always get things right and, in fact, there are some areas
#' where they are systematically wrong. Maybe we can exploit these, but to figure out how
#' we need to think about how gambling actually works.
#'
#' The house tells us their expected outcome in the form of odds, which we can choose to bet against.
#' We've backed out the expected probability using a pretty straightforward transformation on the odds
#' (one which we've discussed: Implied Probability = 1/(Decimal Odds)), so we can reverse it to represent this function:
p3 <- ggplot(data=x,aes(x=pr,y=1/pr)) + geom_point() + geom_line() +
xlab("Implied Probability of Home Win (from bookie odds)") +
ylab("Decimal Odds (as they would be at the sportsbook)") +
ggtitle("Odds are Probabiities are Odds")
plot(p3)
#' You may recognize this function. It's the top right quadrant of y=1/x.
#'
#' Anyway, so now we have:
#' -The bookie's odds, which tell us the potential payout
#' -The actual outcome, which tells us how much we'd have made
#' So let's break down how much we would have made, on average, per bet at each of the different
#' levels the bookie could have given us. While we will use the decimal odds to calculate our payouts,
#' we'll keep our x-axis the same since odds can range widely (and probabilities cannot).
#' Read this graph as "If I bet $1 whenever the implied probability is X%,
#' I would on average get $Y in profit. Here we go:
ret <- with(x,(win*1/pr-1))
p4 <- ggplot(data=x,aes(x=pr,y=ret,colour=ret>0)) + geom_point() +
geom_hline(colour='red') + xlab("House Odds (as probabilities") +
ylab("Returns per $1 bet") + ggtitle("Payouts by Odds")
plot(p4)
#' Even more apparent as a bar chart:
p4 <- ggplot(data=x,aes(x=pr,y=ret,colour=ret>0, fill=ret>0)) + geom_bar(stat="identity", ymin=-1)+ #geom_point()
geom_hline(colour='red') + xlab("House Odds (as probabilities") +
ylab("Returns per $1 bet") + ggtitle("Payouts by Odds")
plot(p4)
#' So, the conclusion is that if we are looking for places too seek an edge, these home team underdogs
#' might be a good place to start. The house seems to have some level of error when .
#'
#' For shits and giggles, let's see what would have happened if we
#' had bet entirely on home team underdogs in the range we are seeing as promising
#' (let's call it probabilities between 12%-20%, aka ~8.25-5). Part of our data is
#' odds from an odds aggregator, so we'll use those for the simulation.
p5 <- ggplot(data=backtest,aes(x=Date,y=cumsum(returns)))+geom_line()+
xlab("Date")+ylab("Cumulative Returns")+ggtitle("How Would We Have Done")
plot(p5)
#' So had we bet $1 when applicable starting in October, we would have been at a net gain of $50-60
#' by now. Not bad. And we haven't even thought about what game we're betting on yet!
#' There are some issues with this analysis, including that we just backtested our strategy
#' on the data we used to come up with the hypothesis (big no no!). But you get the point, and it's
#' as good a place as any to start looking for an edge.
|
/soccer/Untitled.R
|
no_license
|
ominari-insights/ominari
|
R
| false | false | 5,414 |
r
|
library(ggplot2)
library(plyr)
library(caret)
library(reshape2)
temp <- tempfile()
download.file("http://www.football-data.co.uk/mmz4281/1415/data.zip",temp)
files <- lapply(unzip(temp),function(x) read.csv(x))
raw.data <- Reduce(function(x, y) merge(x, y, all=T,sort=F), files, accumulate=F)
unlink(temp)
data<-raw.data
h.odds <- data[c("Date","FTR","B365H","BWH","IWH","LBH","PSH","WHH","SJH","VCH","BbMxH","BbAvH")]
df <- na.omit(melt(h.odds,id.vars=c("FTR","Date"),variable.name="book",value.name="h.odds"))
wins <- data.frame(date=as.Date(df$Date,format="%d/%m/%y")
,win=(df$FTR=="H"),book=df$book,pr=1/df$h.odds)
wins$pr=round(wins$pr,2)
x <- aggregate(win ~ pr, data=wins, mean)
x<-round(x,2)
df$Date <- as.Date(df$Date,format="%d/%m/%y")
backtest <- df['book'=="BbAvH",]
backtest <- backtest[with(backtest, order(Date)), ]
backtest <- backtest[backtest$h.odds >= 5 & backtest$h.odds <= 8.25 , ]
backtest$win <- backtest$FTR=="H"
backtest$returns <- backtest$win * backtest$h.odds - 1
#' Okay, so bookies give us odds on how a game might turn out. We've established that they are
#' usually quite correct in terms of estimating how likely the home team is to win:
p1 <- ggplot(data=x,aes(x=pr,y=win))+geom_abline(colour="red")+geom_point()+geom_line()+
xlab("Implied Probability of Home Win (from bookie odds)") +
ylab("Actual Outcome (aka posterior probability)") +
ggtitle("How Right is the House")
plot(p1)
#' The red line is what things would look like if bookies were always right.
#' In the graph above, anywhere that the points are not exactly on the red line is
#' a potential _edge_: the bookies are wrong here.
#'
#' However, since we are using data, we need to determine where we might have a systematic edge,
#' one that is not just showing up here due to random chance. The first step is to simplify what we have.
#' We've already done this a bit to get the previous graph, but now we'll remove all of these points
#' and replace them with a regression model, essentially a function.
p2 <- ggplot(data=x,aes(x=pr,y=win))+geom_abline(colour="red")+
stat_smooth(method="lm",formula=y~poly(x, 3),size=1)+
xlab("Implied Probability of Home Win (from bookie odds)") +
ylab("Actual Outcome (aka posterior probability)") +
ggtitle("How Right is the House")
plot(p2)
#' So we can see that even then the house doesn't always get things right and, in fact, there are some areas
#' where they are systematically wrong. Maybe we can exploit these, but to figure out how
#' we need to think about how gambling actually works.
#'
#' The house tells us their expected outcome in the form of odds, which we can choose to bet against.
#' We've backed out the expected probability using a pretty straightforward transformation on the odds
#' (one which we've discussed: Implied Probability = 1/(Decimal Odds)), so we can reverse it to represent this function:
p3 <- ggplot(data=x,aes(x=pr,y=1/pr)) + geom_point() + geom_line() +
xlab("Implied Probability of Home Win (from bookie odds)") +
ylab("Decimal Odds (as they would be at the sportsbook)") +
ggtitle("Odds are Probabiities are Odds")
plot(p3)
#' You may recognize this function. It's the top right quadrant of y=1/x.
#'
#' Anyway, so now we have:
#' -The bookie's odds, which tell us the potential payout
#' -The actual outcome, which tells us how much we'd have made
#' So let's break down how much we would have made, on average, per bet at each of the different
#' levels the bookie could have given us. While we will use the decimal odds to calculate our payouts,
#' we'll keep our x-axis the same since odds can range widely (and probabilities cannot).
#' Read this graph as "If I bet $1 whenever the implied probability is X%,
#' I would on average get $Y in profit. Here we go:
ret <- with(x,(win*1/pr-1))
p4 <- ggplot(data=x,aes(x=pr,y=ret,colour=ret>0)) + geom_point() +
geom_hline(colour='red') + xlab("House Odds (as probabilities") +
ylab("Returns per $1 bet") + ggtitle("Payouts by Odds")
plot(p4)
#' Even more apparent as a bar chart:
p4 <- ggplot(data=x,aes(x=pr,y=ret,colour=ret>0, fill=ret>0)) + geom_bar(stat="identity", ymin=-1)+ #geom_point()
geom_hline(colour='red') + xlab("House Odds (as probabilities") +
ylab("Returns per $1 bet") + ggtitle("Payouts by Odds")
plot(p4)
#' So, the conclusion is that if we are looking for places too seek an edge, these home team underdogs
#' might be a good place to start. The house seems to have some level of error when .
#'
#' For shits and giggles, let's see what would have happened if we
#' had bet entirely on home team underdogs in the range we are seeing as promising
#' (let's call it probabilities between 12%-20%, aka ~8.25-5). Part of our data is
#' odds from an odds aggregator, so we'll use those for the simulation.
p5 <- ggplot(data=backtest,aes(x=Date,y=cumsum(returns)))+geom_line()+
xlab("Date")+ylab("Cumulative Returns")+ggtitle("How Would We Have Done")
plot(p5)
#' So had we bet $1 when applicable starting in October, we would have been at a net gain of $50-60
#' by now. Not bad. And we haven't even thought about what game we're betting on yet!
#' There are some issues with this analysis, including that we just backtested our strategy
#' on the data we used to come up with the hypothesis (big no no!). But you get the point, and it's
#' as good a place as any to start looking for an edge.
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/pipeline_functions.R
\name{differential_score}
\alias{differential_score}
\title{The absolute difference of interaction score of two groups}
\usage{
differential_score(interaction_score_graphs, score_name = "weight")
}
\arguments{
\item{interaction_score_graphs}{Named list with elements `group1` and
`group2` containing iGraph objects with score as edge attribute. Output of
\code{\link{interaction_score}}.}
\item{score_name}{Character string specifying the name of the edge
attribute (default: `weight`).}
}
\value{
iGraph object with `differential_score` as only edge attribute
}
\description{
Computes the absolute difference of interaction score between
two groups. Returns a single graph with the differential score as only edge
attribute. The interaction score is computed by \code{\link{interaction_score}}.
}
\examples{
data(interaction_score_graphs_example)
interaction_score_graphs <- interaction_score_graphs_example
differential_score_graph <- differential_score(interaction_score_graphs, score_name = "weight")
}
|
/man/differential_score.Rd
|
permissive
|
molnet-org/molnet
|
R
| false | true | 1,108 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/pipeline_functions.R
\name{differential_score}
\alias{differential_score}
\title{The absolute difference of interaction score of two groups}
\usage{
differential_score(interaction_score_graphs, score_name = "weight")
}
\arguments{
\item{interaction_score_graphs}{Named list with elements `group1` and
`group2` containing iGraph objects with score as edge attribute. Output of
\code{\link{interaction_score}}.}
\item{score_name}{Character string specifying the name of the edge
attribute (default: `weight`).}
}
\value{
iGraph object with `differential_score` as only edge attribute
}
\description{
Computes the absolute difference of interaction score between
two groups. Returns a single graph with the differential score as only edge
attribute. The interaction score is computed by \code{\link{interaction_score}}.
}
\examples{
data(interaction_score_graphs_example)
interaction_score_graphs <- interaction_score_graphs_example
differential_score_graph <- differential_score(interaction_score_graphs, score_name = "weight")
}
|
library('rvest')
library('stringr')
library('plyr')
library('tidyverse')
myfiles = list.files()
myfiles1 <- str_remove(myfiles,' _ Motorsport Stats.htm')
for (i in 1:17) {
myfiles2[i] <- paste(myfiles1[i],'.csv',sep = '')
}
convert <- function(a,b) {
read_html(a) %>%
html_table(header = TRUE,fill=TRUE) -> x
y <- paste(b,'Finish Line Speed.csv',sep = ' ')
write.csv(x[[2]],file = y)
y <- paste(b,'Intermediate 1 Speed.csv',sep = ' ')
write.csv(x[[3]],file = y)
y <- paste(b,'Intermediate 2 Speed.csv',sep = ' ')
write.csv(x[[4]],file = y)
y <- paste(b,'Speed Trap.csv',sep = ' ')
write.csv(x[[5]],file = y)
}
for (i in 1:17) {
convert(myfiles[i],myfiles1[i])
}
convert1 <- function(a,b) {
read_html(a) %>%
html_table(header = TRUE,fill=TRUE) -> x
y <- paste(b,'.csv',sep = '')
write.csv(x[[1]],file = y)}
for (i in 1:16) {
convert1(myfiles[i],myfiles1[i])
}
z <- vector("list",16)
for (i in 1:17) {
read_html(myfiles[i]) %>%
html_table(header = TRUE,fill=TRUE) -> z[i]
}
for (i in 0:14) {
write.csv(z[[i+1]][[1]],file = myfiles2[i+1])
}
|
/Convert HTML.R
|
no_license
|
htilley/Formula1
|
R
| false | false | 1,168 |
r
|
library('rvest')
library('stringr')
library('plyr')
library('tidyverse')
myfiles = list.files()
myfiles1 <- str_remove(myfiles,' _ Motorsport Stats.htm')
for (i in 1:17) {
myfiles2[i] <- paste(myfiles1[i],'.csv',sep = '')
}
convert <- function(a,b) {
read_html(a) %>%
html_table(header = TRUE,fill=TRUE) -> x
y <- paste(b,'Finish Line Speed.csv',sep = ' ')
write.csv(x[[2]],file = y)
y <- paste(b,'Intermediate 1 Speed.csv',sep = ' ')
write.csv(x[[3]],file = y)
y <- paste(b,'Intermediate 2 Speed.csv',sep = ' ')
write.csv(x[[4]],file = y)
y <- paste(b,'Speed Trap.csv',sep = ' ')
write.csv(x[[5]],file = y)
}
for (i in 1:17) {
convert(myfiles[i],myfiles1[i])
}
convert1 <- function(a,b) {
read_html(a) %>%
html_table(header = TRUE,fill=TRUE) -> x
y <- paste(b,'.csv',sep = '')
write.csv(x[[1]],file = y)}
for (i in 1:16) {
convert1(myfiles[i],myfiles1[i])
}
z <- vector("list",16)
for (i in 1:17) {
read_html(myfiles[i]) %>%
html_table(header = TRUE,fill=TRUE) -> z[i]
}
for (i in 0:14) {
write.csv(z[[i+1]][[1]],file = myfiles2[i+1])
}
|
#This file is used for calculating RMSE and temporal moments of BTC at each well
rm(list=ls())
#set the path to the files
main_path = '/files2/scratch/chenxy/'
paths = c('Tracer_Mar2011_UK3_Iter14/')
prior_path = 'Tracer_Mar2011_UK3_Iter13/'
npath = length(paths)
fields = c(1:300)
par_idx = c(1:300)
nfields = length(fields) # number of random fields
#load the ensemble of parameters
perm_par = read.table(paste(prior_path,'/','logK_Post_Samples_300to310h_Err20_Iter14.txt',sep=''))
new_iter = 15
realization_by_row = F #whether realizations are presented in different rows
if(!realization_by_row)
perm_par = t(perm_par)
perm_par = perm_par[par_idx,]
#check if perm_par has non_random perm for a grid
for(i in 1:ncol(perm_par)){
var_temp = var(perm_par[,i])
if(var_temp == 0)
perm_par[,i] = perm_par[,i] + rnorm(nfields,0,0.1)
}
#prefix and extension of files
prefix_file = 'OBS_FluxAve'
ext_file = '.dat'
testvariable = 'Tracer'
true_col = 3
DataSet = 'Mar2011TT'
NormConc_True = 210.0 # Mar2011 test
NormTime_True = 1.0
y_convert = 1000.0/5.9234 #Mar2011 test, injected tracer
time_start = 0.25 #Mar2011 test using kriged boundary condition
#time_start = 191.25 #Mar2011 test using triangulated boundary condition
wells = c('2-07','2-08','2-09','2-11','2-12','2-13','2-14','2-15','2-16','2-17','2-18','2-19','2-20','2-21',
'2-22','2-23','2-24','2-26','2-27','2-28','2-29','2-30','2-31','3-23','3-25','3-28',
'3-29','3-30','3-31','3-32','3-35','2-37')
max_time_upper = 340.0
min_time = 320.0
if(min_time>=400.0)
wells = c(wells,'2-34')
nwell = length(wells)
nt_obs = numeric(nwell)
t_obs = array(0,c(0,1))
BTC_obs = array(0,c(0,1))
err_cv='20'
err_cv_val=0.20
for (ivar in 1:nwell)
{
well_name = wells[ivar]
true_file = paste('TrueData_',DataSet,'/Well_',well_name,'_',DataSet,'.txt',sep='')
truedata = read.table(true_file,skip=1)
true_t = truedata[,1]/NormTime_True #time units converted to hours
trueBTC = truedata[,true_col]/NormConc_True
if(length(grep('Tracer',testvariable)))
trueBTC = (truedata[,true_col]-mean(truedata[which(truedata[,1]<0),true_col]))/NormConc_True
trueBTC[which(trueBTC<0)] = 0
id_t = which(true_t<=max_time_upper & true_t>=min_time)
true_t = true_t[id_t]
trueBTC = trueBTC[id_t]
nt_obs[ivar] = length(id_t)
t_obs = c(t_obs,true_t)
BTC_obs = c(BTC_obs,trueBTC)
}
for (ipath in 1:npath)
{
path_prefix = paste(main_path,paths[ipath],'/',sep='')
sim_data = array(0,c(nfields,sum(nt_obs))) # ensemble of simulated data
for(ifield in fields)
{
input_file = paste(path_prefix, prefix_file,'R',ifield,ext_file,sep='')
a = readLines(input_file,n=1)
b = unlist(strsplit(a,','))
nvar = length(b)-1 #the first column is time
varnames = b[-1]
#find the columns needed
varcols = array(NA,nwell,1)
for (iw in 1:nwell)
varcols[iw] = intersect(grep(wells[iw],varnames),grep(testvariable,varnames)) + 1 #the first column is time
#read from files
data0 = read.table(paste(path_prefix, prefix_file,'R',ifield,ext_file,sep=''),skip=1) # the first line is skipped
t0 = data0[,1]-time_start
#linearly interpolate the data at the given observation time points
istart = 1
for (iw in 1:nwell)
{
if(nt_obs[iw]>=1)
{
fixedt = t_obs[istart:sum(nt_obs[1:iw])]
trueBTC = BTC_obs[istart:sum(nt_obs[1:iw])]
ids = which(is.na(data0[,varcols[iw]]) == 0) #the points used for interpolation
interp = approx(t0[ids],data0[ids,varcols[iw]],xout = fixedt,rule = 2)$y
interp = interp * y_convert
sim_data[which(fields==ifield),(istart:sum(nt_obs[1:iw]))] = interp
}
istart = istart + nt_obs[iw]
}
}
#measurement error matrix
meas_err = matrix(0,sum(nt_obs),sum(nt_obs))
#generate realizations of measurement error
meas_err_samp = matrix(0,sum(nt_obs),nfields)
for (ii in 1:sum(nt_obs))
{
meas_err[ii,ii] = (max(BTC_obs[ii] * err_cv_val,0.1*err_cv_val))^2
meas_err_samp[ii,] = rnorm(nfields,0,sqrt(meas_err[ii,ii]))
var_temp = var(sim_data[,ii])
if(var_temp==0)
sim_data[,ii] = sim_data[,ii] + rnorm(nfields,0,0.1*err_cv_val)
}
meas_err_samp = t(meas_err_samp)
# realizations for observtions considering measurement errors
obs_ensemble = matrix(rep(BTC_obs,times=nfields),nfields,sum(nt_obs),byrow=T) + meas_err_samp
obs_ensemble[which(obs_ensemble<0)] = 0 #remove negative concentrations
#save the ensembles for observations
write.table(obs_ensemble,file=paste(main_path,paths[ipath],'/obs_ensemble_',min_time,'to',max_time_upper,'h_Err',err_cv,'.txt',sep=''),row.names=F,col.names=F)
#load the material ids for each permeability
material_ids = read.table('material_ids.txt')
##update the perm field
#calcualte the covariance matrix between parameter ensemble and data
cov_yd_perm = cov(perm_par,sim_data)
cor_yd_perm = cor(perm_par,sim_data)
#make the correlations between simulated data and ringold formation K 0
cor_yd_perm[which(material_ids == 4),] = 0
cov_yd_perm[which(material_ids == 4),] = 0
#set covarience to be 0 if its absolute value is less than 0.001*max(cov_yd_perm)
cov_yd_perm[which(abs(cor_yd_perm)<0.01*max(abs(cor_yd_perm)))] = 0.0
write.table(cor_yd_perm,file=paste(main_path,paths[ipath],'/cor_yd_perm_',min_time,'to',max_time_upper,'h_Err',err_cv,'.txt',sep=''),row.names=F,col.names=F)
rm(cor_yd_perm)
cov_dd = cov(sim_data,sim_data)
cor_dd = cor(sim_data,sim_data)
cov_dd[which(abs(cor_dd)<0.01*max(abs(cor_dd)))] = 0.0
cov_dd_inv = solve(cov_dd + meas_err)
gain_perm = cov_yd_perm %*% cov_dd_inv
new_perm_par = perm_par
for (ipar in 1:nfields)
new_perm_par[ipar,] = perm_par[ipar,] + gain_perm %*% (matrix(obs_ensemble[ipar,]-sim_data[ipar,],sum(nt_obs),1))
new_perm_par[which(new_perm_par<=-11.0)] = -11.0 + rnorm(length(which(new_perm_par<=-11.0)),0,0.1)
new_perm_par[which(new_perm_par>=1)] = 1.0 + rnorm(length(which(new_perm_par>=1.0)),0,0.1)
#save the updated parameter sets to text files
write.table(t(new_perm_par),file=paste(main_path,paths[ipath],'/logK_Post_Samples_',min_time,'to',max_time_upper,'h_Err',err_cv,'_Iter',new_iter,'.txt',sep=''),row.names=F,col.names=F)
}
|
/scripts_on_file1/EnKF_update_Iter15.R
|
no_license
|
mrubayet/archived_codes_for_sfa_modeling
|
R
| false | false | 7,148 |
r
|
#This file is used for calculating RMSE and temporal moments of BTC at each well
rm(list=ls())
#set the path to the files
main_path = '/files2/scratch/chenxy/'
paths = c('Tracer_Mar2011_UK3_Iter14/')
prior_path = 'Tracer_Mar2011_UK3_Iter13/'
npath = length(paths)
fields = c(1:300)
par_idx = c(1:300)
nfields = length(fields) # number of random fields
#load the ensemble of parameters
perm_par = read.table(paste(prior_path,'/','logK_Post_Samples_300to310h_Err20_Iter14.txt',sep=''))
new_iter = 15
realization_by_row = F #whether realizations are presented in different rows
if(!realization_by_row)
perm_par = t(perm_par)
perm_par = perm_par[par_idx,]
#check if perm_par has non_random perm for a grid
for(i in 1:ncol(perm_par)){
var_temp = var(perm_par[,i])
if(var_temp == 0)
perm_par[,i] = perm_par[,i] + rnorm(nfields,0,0.1)
}
#prefix and extension of files
prefix_file = 'OBS_FluxAve'
ext_file = '.dat'
testvariable = 'Tracer'
true_col = 3
DataSet = 'Mar2011TT'
NormConc_True = 210.0 # Mar2011 test
NormTime_True = 1.0
y_convert = 1000.0/5.9234 #Mar2011 test, injected tracer
time_start = 0.25 #Mar2011 test using kriged boundary condition
#time_start = 191.25 #Mar2011 test using triangulated boundary condition
wells = c('2-07','2-08','2-09','2-11','2-12','2-13','2-14','2-15','2-16','2-17','2-18','2-19','2-20','2-21',
'2-22','2-23','2-24','2-26','2-27','2-28','2-29','2-30','2-31','3-23','3-25','3-28',
'3-29','3-30','3-31','3-32','3-35','2-37')
max_time_upper = 340.0
min_time = 320.0
if(min_time>=400.0)
wells = c(wells,'2-34')
nwell = length(wells)
nt_obs = numeric(nwell)
t_obs = array(0,c(0,1))
BTC_obs = array(0,c(0,1))
err_cv='20'
err_cv_val=0.20
for (ivar in 1:nwell)
{
well_name = wells[ivar]
true_file = paste('TrueData_',DataSet,'/Well_',well_name,'_',DataSet,'.txt',sep='')
truedata = read.table(true_file,skip=1)
true_t = truedata[,1]/NormTime_True #time units converted to hours
trueBTC = truedata[,true_col]/NormConc_True
if(length(grep('Tracer',testvariable)))
trueBTC = (truedata[,true_col]-mean(truedata[which(truedata[,1]<0),true_col]))/NormConc_True
trueBTC[which(trueBTC<0)] = 0
id_t = which(true_t<=max_time_upper & true_t>=min_time)
true_t = true_t[id_t]
trueBTC = trueBTC[id_t]
nt_obs[ivar] = length(id_t)
t_obs = c(t_obs,true_t)
BTC_obs = c(BTC_obs,trueBTC)
}
for (ipath in 1:npath)
{
path_prefix = paste(main_path,paths[ipath],'/',sep='')
sim_data = array(0,c(nfields,sum(nt_obs))) # ensemble of simulated data
for(ifield in fields)
{
input_file = paste(path_prefix, prefix_file,'R',ifield,ext_file,sep='')
a = readLines(input_file,n=1)
b = unlist(strsplit(a,','))
nvar = length(b)-1 #the first column is time
varnames = b[-1]
#find the columns needed
varcols = array(NA,nwell,1)
for (iw in 1:nwell)
varcols[iw] = intersect(grep(wells[iw],varnames),grep(testvariable,varnames)) + 1 #the first column is time
#read from files
data0 = read.table(paste(path_prefix, prefix_file,'R',ifield,ext_file,sep=''),skip=1) # the first line is skipped
t0 = data0[,1]-time_start
#linearly interpolate the data at the given observation time points
istart = 1
for (iw in 1:nwell)
{
if(nt_obs[iw]>=1)
{
fixedt = t_obs[istart:sum(nt_obs[1:iw])]
trueBTC = BTC_obs[istart:sum(nt_obs[1:iw])]
ids = which(is.na(data0[,varcols[iw]]) == 0) #the points used for interpolation
interp = approx(t0[ids],data0[ids,varcols[iw]],xout = fixedt,rule = 2)$y
interp = interp * y_convert
sim_data[which(fields==ifield),(istart:sum(nt_obs[1:iw]))] = interp
}
istart = istart + nt_obs[iw]
}
}
#measurement error matrix
meas_err = matrix(0,sum(nt_obs),sum(nt_obs))
#generate realizations of measurement error
meas_err_samp = matrix(0,sum(nt_obs),nfields)
for (ii in 1:sum(nt_obs))
{
meas_err[ii,ii] = (max(BTC_obs[ii] * err_cv_val,0.1*err_cv_val))^2
meas_err_samp[ii,] = rnorm(nfields,0,sqrt(meas_err[ii,ii]))
var_temp = var(sim_data[,ii])
if(var_temp==0)
sim_data[,ii] = sim_data[,ii] + rnorm(nfields,0,0.1*err_cv_val)
}
meas_err_samp = t(meas_err_samp)
# realizations for observtions considering measurement errors
obs_ensemble = matrix(rep(BTC_obs,times=nfields),nfields,sum(nt_obs),byrow=T) + meas_err_samp
obs_ensemble[which(obs_ensemble<0)] = 0 #remove negative concentrations
#save the ensembles for observations
write.table(obs_ensemble,file=paste(main_path,paths[ipath],'/obs_ensemble_',min_time,'to',max_time_upper,'h_Err',err_cv,'.txt',sep=''),row.names=F,col.names=F)
#load the material ids for each permeability
material_ids = read.table('material_ids.txt')
##update the perm field
#calcualte the covariance matrix between parameter ensemble and data
cov_yd_perm = cov(perm_par,sim_data)
cor_yd_perm = cor(perm_par,sim_data)
#make the correlations between simulated data and ringold formation K 0
cor_yd_perm[which(material_ids == 4),] = 0
cov_yd_perm[which(material_ids == 4),] = 0
#set covarience to be 0 if its absolute value is less than 0.001*max(cov_yd_perm)
cov_yd_perm[which(abs(cor_yd_perm)<0.01*max(abs(cor_yd_perm)))] = 0.0
write.table(cor_yd_perm,file=paste(main_path,paths[ipath],'/cor_yd_perm_',min_time,'to',max_time_upper,'h_Err',err_cv,'.txt',sep=''),row.names=F,col.names=F)
rm(cor_yd_perm)
cov_dd = cov(sim_data,sim_data)
cor_dd = cor(sim_data,sim_data)
cov_dd[which(abs(cor_dd)<0.01*max(abs(cor_dd)))] = 0.0
cov_dd_inv = solve(cov_dd + meas_err)
gain_perm = cov_yd_perm %*% cov_dd_inv
new_perm_par = perm_par
for (ipar in 1:nfields)
new_perm_par[ipar,] = perm_par[ipar,] + gain_perm %*% (matrix(obs_ensemble[ipar,]-sim_data[ipar,],sum(nt_obs),1))
new_perm_par[which(new_perm_par<=-11.0)] = -11.0 + rnorm(length(which(new_perm_par<=-11.0)),0,0.1)
new_perm_par[which(new_perm_par>=1)] = 1.0 + rnorm(length(which(new_perm_par>=1.0)),0,0.1)
#save the updated parameter sets to text files
write.table(t(new_perm_par),file=paste(main_path,paths[ipath],'/logK_Post_Samples_',min_time,'to',max_time_upper,'h_Err',err_cv,'_Iter',new_iter,'.txt',sep=''),row.names=F,col.names=F)
}
|
# TODO: ... may not work in the initialization. Check.
# TODO: Check if adding models after object initialization works, especially id we have asked for recommendations already
#### Non Seasonal Model ####
test_that("Non Seasonal Model", {
# data("USeconomic")
# data = as.data.frame(USeconomic)
# colnames(data) = c("logM1", "logGNP", "rs", "rl")
data = USeconomic
lag.max = 10
models = list("AIC None" = list(select = "aic", trend_type = "none", lag.max = lag.max),
"AIC Trend" = list(select = "aic", trend_type = "trend", lag.max = lag.max),
"AIC Both" = list(select = "aic", trend_type = "both", lag.max = lag.max),
"BIC None" = list(select = "bic", trend_type = "none", lag.max = lag.max),
"BIC Trend" = list(select = "bic", trend_type = "trend", lag.max = lag.max),
"BIC Both" = list(select = "bic", trend_type = "both", lag.max = lag.max)
)
var_interest = 'logGNP'
mdl_build = ModelBuildMultivariateVAR$new(data = data, var_interest = var_interest,
mdl_list = models, verbose = 0)
summary_build = mdl_build$summarize_build()
recommendations = mdl_build$get_recommendations()
#summary_build %>% write.csv(file = "multivar_VAR_summary.csv", row.names = FALSE)
#recommendations %>% write.csv(file = "multivar_VAR_recommendations.csv", row.names = FALSE)
# https://stackoverflow.com/questions/32328802/where-should-i-put-data-for-automated-tests-with-testthat
# http://r-pkgs.had.co.nz/data.html#other-data
summary_target_file = system.file("extdata", "multivar_VAR_build_no_season_summary.csv", package = "tswgewrapped", mustWork = TRUE)
summary_target = read.csv(summary_target_file, header = TRUE, stringsAsFactors = FALSE) %>%
dplyr::as_tibble() %>%
dplyr::mutate_if(is.numeric, as.double) # Converts integer to double to match type
good1 = all.equal(summary_build, summary_target)
testthat::expect_equal(good1, TRUE)
recommendation_target_file = system.file("extdata", "multivar_VAR_build_no_season_recommendations.csv", package = "tswgewrapped", mustWork = TRUE)
recommendation_target = read.csv(recommendation_target_file, header = TRUE, stringsAsFactors = FALSE) %>%
dplyr::as_tibble() %>%
dplyr::mutate_if(is.numeric, as.double) # Converts integer to double to match type
good2 = all.equal(recommendations %>% dplyr::mutate_if(is.numeric, as.double), recommendation_target)
testthat::expect_equal(good2, TRUE)
mdl_build$build_recommended_models()
final_models = mdl_build$get_final_models()
# saveRDS(final_models, "multivar_VAR_build_final_model_no_season_list.rds")
final_model_list_file = system.file("extdata", "multivar_VAR_build_final_model_no_season_list.rds", package = "tswgewrapped", mustWork = TRUE)
final_models_target = readRDS(final_model_list_file)
good3 = all.equal(final_models, final_models_target)
print(good3)
testthat::expect_equal(good3, TRUE)
})
#### Seasonal Model ####
test_that("Seasonal Model", {
# data("USeconomic")
# data = as.data.frame(USeconomic)
# colnames(data) = c("logM1", "logGNP", "rs", "rl")
data = USeconomic
lag.max = 10
models = list("AIC Trend" = list(select = "aic", trend_type = "trend", season = 3, lag.max = lag.max),
"BIC Trend" = list(select = "bic", trend_type = "trend", season = 4, lag.max = lag.max))
var_interest = 'logGNP'
mdl_build = ModelBuildMultivariateVAR$new(data = data, var_interest = var_interest,
mdl_list = models, verbose = 0)
summary_build = mdl_build$summarize_build()
recommendations = mdl_build$get_recommendations()
# summary_build %>% write.csv(file = "multivar_VAR_build_season_summary.csv", row.names = FALSE)
# recommendations %>% write.csv(file = "multivar_VAR_build_season_recommendations.csv", row.names = FALSE)
# https://stackoverflow.com/questions/32328802/where-should-i-put-data-for-automated-tests-with-testthat
# http://r-pkgs.had.co.nz/data.html#other-data
summary_target_file = system.file("extdata", "multivar_VAR_build_season_summary.csv", package = "tswgewrapped", mustWork = TRUE)
summary_target = read.csv(summary_target_file, header = TRUE, stringsAsFactors = FALSE) %>%
dplyr::as_tibble() %>%
dplyr::mutate_if(is.numeric, as.double) # Converts integer to double to match type
good1 = all.equal(summary_build, summary_target)
testthat::expect_equal(good1, TRUE)
recommendation_target_file = system.file("extdata", "multivar_VAR_build_season_recommendations.csv", package = "tswgewrapped", mustWork = TRUE)
recommendation_target = read.csv(recommendation_target_file, header = TRUE, stringsAsFactors = FALSE) %>%
dplyr::as_tibble() %>%
dplyr::mutate_if(is.numeric, as.double) # Converts integer to double to match type
good2 = all.equal(recommendations %>% dplyr::mutate_if(is.numeric, as.double), recommendation_target)
testthat::expect_equal(good2, TRUE)
mdl_build$build_recommended_models()
final_models = mdl_build$get_final_models()
#saveRDS(final_models, "multivar_VAR_build_final_model_season_list.rds")
final_model_list_file = system.file("extdata", "multivar_VAR_build_final_model_season_list.rds", package = "tswgewrapped", mustWork = TRUE)
final_models_target = readRDS(final_model_list_file)
good3 = all.equal(final_models, final_models_target)
testthat::expect_equal(good3, TRUE)
})
|
/tests/testthat/test-MultivariateBuildVAR.R
|
no_license
|
mattfarrow1/tswgewrapped
|
R
| false | false | 5,551 |
r
|
# TODO: ... may not work in the initialization. Check.
# TODO: Check if adding models after object initialization works, especially id we have asked for recommendations already
#### Non Seasonal Model ####
test_that("Non Seasonal Model", {
# data("USeconomic")
# data = as.data.frame(USeconomic)
# colnames(data) = c("logM1", "logGNP", "rs", "rl")
data = USeconomic
lag.max = 10
models = list("AIC None" = list(select = "aic", trend_type = "none", lag.max = lag.max),
"AIC Trend" = list(select = "aic", trend_type = "trend", lag.max = lag.max),
"AIC Both" = list(select = "aic", trend_type = "both", lag.max = lag.max),
"BIC None" = list(select = "bic", trend_type = "none", lag.max = lag.max),
"BIC Trend" = list(select = "bic", trend_type = "trend", lag.max = lag.max),
"BIC Both" = list(select = "bic", trend_type = "both", lag.max = lag.max)
)
var_interest = 'logGNP'
mdl_build = ModelBuildMultivariateVAR$new(data = data, var_interest = var_interest,
mdl_list = models, verbose = 0)
summary_build = mdl_build$summarize_build()
recommendations = mdl_build$get_recommendations()
#summary_build %>% write.csv(file = "multivar_VAR_summary.csv", row.names = FALSE)
#recommendations %>% write.csv(file = "multivar_VAR_recommendations.csv", row.names = FALSE)
# https://stackoverflow.com/questions/32328802/where-should-i-put-data-for-automated-tests-with-testthat
# http://r-pkgs.had.co.nz/data.html#other-data
summary_target_file = system.file("extdata", "multivar_VAR_build_no_season_summary.csv", package = "tswgewrapped", mustWork = TRUE)
summary_target = read.csv(summary_target_file, header = TRUE, stringsAsFactors = FALSE) %>%
dplyr::as_tibble() %>%
dplyr::mutate_if(is.numeric, as.double) # Converts integer to double to match type
good1 = all.equal(summary_build, summary_target)
testthat::expect_equal(good1, TRUE)
recommendation_target_file = system.file("extdata", "multivar_VAR_build_no_season_recommendations.csv", package = "tswgewrapped", mustWork = TRUE)
recommendation_target = read.csv(recommendation_target_file, header = TRUE, stringsAsFactors = FALSE) %>%
dplyr::as_tibble() %>%
dplyr::mutate_if(is.numeric, as.double) # Converts integer to double to match type
good2 = all.equal(recommendations %>% dplyr::mutate_if(is.numeric, as.double), recommendation_target)
testthat::expect_equal(good2, TRUE)
mdl_build$build_recommended_models()
final_models = mdl_build$get_final_models()
# saveRDS(final_models, "multivar_VAR_build_final_model_no_season_list.rds")
final_model_list_file = system.file("extdata", "multivar_VAR_build_final_model_no_season_list.rds", package = "tswgewrapped", mustWork = TRUE)
final_models_target = readRDS(final_model_list_file)
good3 = all.equal(final_models, final_models_target)
print(good3)
testthat::expect_equal(good3, TRUE)
})
#### Seasonal Model ####
test_that("Seasonal Model", {
# data("USeconomic")
# data = as.data.frame(USeconomic)
# colnames(data) = c("logM1", "logGNP", "rs", "rl")
data = USeconomic
lag.max = 10
models = list("AIC Trend" = list(select = "aic", trend_type = "trend", season = 3, lag.max = lag.max),
"BIC Trend" = list(select = "bic", trend_type = "trend", season = 4, lag.max = lag.max))
var_interest = 'logGNP'
mdl_build = ModelBuildMultivariateVAR$new(data = data, var_interest = var_interest,
mdl_list = models, verbose = 0)
summary_build = mdl_build$summarize_build()
recommendations = mdl_build$get_recommendations()
# summary_build %>% write.csv(file = "multivar_VAR_build_season_summary.csv", row.names = FALSE)
# recommendations %>% write.csv(file = "multivar_VAR_build_season_recommendations.csv", row.names = FALSE)
# https://stackoverflow.com/questions/32328802/where-should-i-put-data-for-automated-tests-with-testthat
# http://r-pkgs.had.co.nz/data.html#other-data
summary_target_file = system.file("extdata", "multivar_VAR_build_season_summary.csv", package = "tswgewrapped", mustWork = TRUE)
summary_target = read.csv(summary_target_file, header = TRUE, stringsAsFactors = FALSE) %>%
dplyr::as_tibble() %>%
dplyr::mutate_if(is.numeric, as.double) # Converts integer to double to match type
good1 = all.equal(summary_build, summary_target)
testthat::expect_equal(good1, TRUE)
recommendation_target_file = system.file("extdata", "multivar_VAR_build_season_recommendations.csv", package = "tswgewrapped", mustWork = TRUE)
recommendation_target = read.csv(recommendation_target_file, header = TRUE, stringsAsFactors = FALSE) %>%
dplyr::as_tibble() %>%
dplyr::mutate_if(is.numeric, as.double) # Converts integer to double to match type
good2 = all.equal(recommendations %>% dplyr::mutate_if(is.numeric, as.double), recommendation_target)
testthat::expect_equal(good2, TRUE)
mdl_build$build_recommended_models()
final_models = mdl_build$get_final_models()
#saveRDS(final_models, "multivar_VAR_build_final_model_season_list.rds")
final_model_list_file = system.file("extdata", "multivar_VAR_build_final_model_season_list.rds", package = "tswgewrapped", mustWork = TRUE)
final_models_target = readRDS(final_model_list_file)
good3 = all.equal(final_models, final_models_target)
testthat::expect_equal(good3, TRUE)
})
|
ammp_sample_grid <-
function(# geospatial file for site delineation. ESRI shapefile (.shp), ESRI
# geodatabase (.gdb), or Keyhole markup (.kml) preferred.
file,
# Provide a site name for folder creation and map labeling, if there
# is a formal identification for the site in a provincial registry,
# use that instead of a colloquial name
site_name,
# province site is located in, only used to speed up geospatial
# activities. e.g. 'N.B.' or 'N.S.'
province_abbr,
# CRS 32620 is UTM 20N, suitable for NB and most of NS, see NRCAN
# website for more detail :
# https://www.nrcan.gc.ca/earth-sciences/geography/topographic-information/maps/utm-grid-map-projections/utm-grid-universal-transverse-mercator-projection/9779
# Use ESPG codes for other projections based on where site is located
# (i.e. UTM21/22N for N.L. and UTM8/9/10N for BC depending on where
# the site is)
crs_grid = 32620,
# choose whether or not to download coastline file from Stats Canada,
# choose FALSE if you have already downloaded and have the shapefile
# saves as `coastlines` in your R environment)
dl_coastline = TRUE,
# output extension (haven't tried extensively, should work with 'shp',
# 'kml')
output_type = "shp") {
library(tidyverse)
library(sf)
# Check if data/output and data/input folders are created, and create them if necessary
if (!dir.exists(paste0("data/output/", site_name, "/"))) {
message(
paste0(
"Creating directory for script out puts at in 'data/output/",
site_name,
"/'"
)
)
dir.create(paste0("data/output/", site_name, "/"), recursive = TRUE)
}
if (!dir.exists("data/input/")) {
message("Creating directory for script out puts at in 'data/input/")
dir.create("data/input/", recursive = TRUE)
}
if (dl_coastline == TRUE) {
temp <- tempfile()
download.file(
"http://www12.statcan.gc.ca/census-recensement/2011/geo/bound-limit/files-fichiers/2016/lpr_000b16a_e.zip",
temp
)
coastlines <- st_read(unzip(temp, exdir = "data/input")[3])
message(
"Next time you rune this function, try setting `dl_coastline = FALSE` so you don't download the coastline file again!"
)
} else {
message("Using user provided coastline shapefile found in the data/coastline folder.")
coastlines <-
st_read(list.files("data/input/coastline/", full.names = TRUE)[grep(pattern = ".shp",
x = list.files("data/input/coastline/"))])
}
province <- coastlines %>%
filter(PREABBR == province_abbr) %>%
st_transform(crs = crs_grid)
lease_boundary <- st_read(file) %>%
st_transform(crs = crs_grid) %>%
st_zm()
lease_200m <- st_buffer(lease_boundary, dist = 200) %>%
st_difference(province) %>%
st_difference(lease_boundary) %>%
mutate(distance = "200m") %>%
select(distance)
lease_500m <- st_buffer(lease_boundary, dist = 500) %>%
st_difference(province) %>%
st_difference(lease_boundary) %>%
st_difference(lease_200m) %>%
mutate(distance = "500m") %>%
select(distance)
lease_1000m <- st_buffer(lease_boundary, dist = 1000) %>%
st_difference(province) %>%
st_difference(lease_boundary) %>%
st_difference(lease_200m) %>%
st_difference(lease_500m) %>%
mutate(distance = "1000m") %>%
select(distance)
lease_1500m <- st_buffer(lease_boundary, dist = 1500) %>%
st_difference(province) %>%
st_difference(lease_boundary) %>%
st_difference(lease_200m) %>%
st_difference(lease_500m) %>%
st_difference(lease_1000m) %>%
mutate(distance = "1500m") %>%
select(distance)
lease_limits <- st_bbox(lease_1500m)
AOI <- province %>%
st_crop(lease_limits)
lease_buffers <- ggplot(data = AOI) +
geom_sf(col = 'light grey') +
geom_sf(data = lease_200m, fill = NA) +
geom_sf(data = lease_500m, fill = NA) +
geom_sf(data = lease_1000m, fill = NA) +
geom_sf(data = lease_1500m, fill = NA)
# These next `while` functions are necessary as the `st_sample` function
# calculates the regular sample grid based on the bounding box of the
# polygon, which does not always return the exact proper number of sample
# points due to the odd shape of the polygon. It will simply keep running
# the sample grid generation until it hase the proper number of sample
# points in the polygon. Other options are random points, hexagonal, and
# triangular.
set.seed(0)
x <- 0
while (x != 10L) {
lease_200m_sample <- lease_200m %>%
st_sample(10, type = 'regular') %>%
st_as_sf() %>%
mutate(
distance = "200m",
X = st_coordinates(st_transform(., 'WGS84'))[, 1],
Y = st_coordinates(st_transform(., 'WGS84'))[, 2]
)
x <- length(lease_200m_sample$x)
}
x <- 0
while (x != 14L) {
lease_500m_sample <- lease_500m %>%
st_sample(14, type = 'regular') %>%
st_as_sf() %>%
mutate(
distance = "500m",
X = st_coordinates(st_transform(., 'WGS84'))[, 1],
Y = st_coordinates(st_transform(., 'WGS84'))[, 2]
)
x <- length(lease_500m_sample$x)
}
x <- 0
while (x != 20L) {
lease_1000m_sample <- lease_1000m %>%
st_sample(20, type = 'regular') %>%
st_as_sf() %>%
mutate(
distance = "1000m",
X = st_coordinates(st_transform(., 'WGS84'))[, 1],
Y = st_coordinates(st_transform(., 'WGS84'))[, 2]
)
x <- length(lease_1000m_sample$x)
}
x <- 0
while (x != 7L) {
lease_1500m_sample <- lease_1500m %>%
st_sample(7, type = 'regular') %>%
st_as_sf() %>%
mutate(
distance = "1500m",
X = st_coordinates(st_transform(., 'WGS84'))[, 1],
Y = st_coordinates(st_transform(., 'WGS84'))[, 2]
)
x <- length(lease_1500m_sample$x)
}
ammp_sample_sites <- lease_buffers +
geom_sf(data = lease_200m_sample, col = 'red') +
geom_sf(data = lease_500m_sample, col = 'blue') +
geom_sf(data = lease_1000m_sample, col = 'green') +
geom_sf(data = lease_1500m_sample, col = 'orange') +
geom_sf(data = lease_boundary, col = 'black', fill = 'red') +
coord_sf(expand = FALSE)
# print the sample site plan in R
print(ammp_sample_sites)
sample_sites <-
bind_rows(lease_200m_sample,
lease_500m_sample,
lease_1000m_sample,
lease_1500m_sample)
# save the sample sites in "data/output/[site name]/[site
# name]_sample_sites.shp
if (!dir.exists(paste0("data/output/", site_name, "/"))) {
message(
paste0(
"Creating directory for script out puts at in 'data/output/",
site_name,
"/'"
)
)
dir.create(paste0("data/output/", site_name, "/"), recursive = TRUE)
}
st_write(
sample_sites,
paste0(
"data/output/",
site_name,
"/",
site_name,
"_sample_sites.",
output_type
),
delete_layer = TRUE
)
st_write(
sample_sites,
paste0(
"data/output/",
site_name,
"/",
site_name,
"_sample_sites.csv"
),
layer_options = "GEOMETRY=AS_XY",
append = FALSE
)
st_write(
lease_200m,
paste0(
"data/output/",
site_name,
"/",
site_name,
"_200m_buffer.",
output_type
),
delete_layer = TRUE
)
st_write(
lease_500m,
paste0(
"data/output/",
site_name,
"/",
site_name,
"_500m_buffer.",
output_type
),
delete_layer = TRUE
)
st_write(
lease_1000m,
paste0(
"data/output/",
site_name,
"/",
site_name,
"_1000m_buffer.",
output_type
),
delete_layer = TRUE
)
st_write(
lease_1500m,
paste0(
"data/output/",
site_name,
"/",
site_name,
"_1500m_buffer.",
output_type
),
delete_layer = TRUE
)
}
|
/ammp_sample_grid.R
|
no_license
|
pkraska/ammp_sample_grid
|
R
| false | false | 8,566 |
r
|
ammp_sample_grid <-
function(# geospatial file for site delineation. ESRI shapefile (.shp), ESRI
# geodatabase (.gdb), or Keyhole markup (.kml) preferred.
file,
# Provide a site name for folder creation and map labeling, if there
# is a formal identification for the site in a provincial registry,
# use that instead of a colloquial name
site_name,
# province site is located in, only used to speed up geospatial
# activities. e.g. 'N.B.' or 'N.S.'
province_abbr,
# CRS 32620 is UTM 20N, suitable for NB and most of NS, see NRCAN
# website for more detail :
# https://www.nrcan.gc.ca/earth-sciences/geography/topographic-information/maps/utm-grid-map-projections/utm-grid-universal-transverse-mercator-projection/9779
# Use ESPG codes for other projections based on where site is located
# (i.e. UTM21/22N for N.L. and UTM8/9/10N for BC depending on where
# the site is)
crs_grid = 32620,
# choose whether or not to download coastline file from Stats Canada,
# choose FALSE if you have already downloaded and have the shapefile
# saves as `coastlines` in your R environment)
dl_coastline = TRUE,
# output extension (haven't tried extensively, should work with 'shp',
# 'kml')
output_type = "shp") {
library(tidyverse)
library(sf)
# Check if data/output and data/input folders are created, and create them if necessary
if (!dir.exists(paste0("data/output/", site_name, "/"))) {
message(
paste0(
"Creating directory for script out puts at in 'data/output/",
site_name,
"/'"
)
)
dir.create(paste0("data/output/", site_name, "/"), recursive = TRUE)
}
if (!dir.exists("data/input/")) {
message("Creating directory for script out puts at in 'data/input/")
dir.create("data/input/", recursive = TRUE)
}
if (dl_coastline == TRUE) {
temp <- tempfile()
download.file(
"http://www12.statcan.gc.ca/census-recensement/2011/geo/bound-limit/files-fichiers/2016/lpr_000b16a_e.zip",
temp
)
coastlines <- st_read(unzip(temp, exdir = "data/input")[3])
message(
"Next time you rune this function, try setting `dl_coastline = FALSE` so you don't download the coastline file again!"
)
} else {
message("Using user provided coastline shapefile found in the data/coastline folder.")
coastlines <-
st_read(list.files("data/input/coastline/", full.names = TRUE)[grep(pattern = ".shp",
x = list.files("data/input/coastline/"))])
}
province <- coastlines %>%
filter(PREABBR == province_abbr) %>%
st_transform(crs = crs_grid)
lease_boundary <- st_read(file) %>%
st_transform(crs = crs_grid) %>%
st_zm()
lease_200m <- st_buffer(lease_boundary, dist = 200) %>%
st_difference(province) %>%
st_difference(lease_boundary) %>%
mutate(distance = "200m") %>%
select(distance)
lease_500m <- st_buffer(lease_boundary, dist = 500) %>%
st_difference(province) %>%
st_difference(lease_boundary) %>%
st_difference(lease_200m) %>%
mutate(distance = "500m") %>%
select(distance)
lease_1000m <- st_buffer(lease_boundary, dist = 1000) %>%
st_difference(province) %>%
st_difference(lease_boundary) %>%
st_difference(lease_200m) %>%
st_difference(lease_500m) %>%
mutate(distance = "1000m") %>%
select(distance)
lease_1500m <- st_buffer(lease_boundary, dist = 1500) %>%
st_difference(province) %>%
st_difference(lease_boundary) %>%
st_difference(lease_200m) %>%
st_difference(lease_500m) %>%
st_difference(lease_1000m) %>%
mutate(distance = "1500m") %>%
select(distance)
lease_limits <- st_bbox(lease_1500m)
AOI <- province %>%
st_crop(lease_limits)
lease_buffers <- ggplot(data = AOI) +
geom_sf(col = 'light grey') +
geom_sf(data = lease_200m, fill = NA) +
geom_sf(data = lease_500m, fill = NA) +
geom_sf(data = lease_1000m, fill = NA) +
geom_sf(data = lease_1500m, fill = NA)
# These next `while` functions are necessary as the `st_sample` function
# calculates the regular sample grid based on the bounding box of the
# polygon, which does not always return the exact proper number of sample
# points due to the odd shape of the polygon. It will simply keep running
# the sample grid generation until it hase the proper number of sample
# points in the polygon. Other options are random points, hexagonal, and
# triangular.
set.seed(0)
x <- 0
while (x != 10L) {
lease_200m_sample <- lease_200m %>%
st_sample(10, type = 'regular') %>%
st_as_sf() %>%
mutate(
distance = "200m",
X = st_coordinates(st_transform(., 'WGS84'))[, 1],
Y = st_coordinates(st_transform(., 'WGS84'))[, 2]
)
x <- length(lease_200m_sample$x)
}
x <- 0
while (x != 14L) {
lease_500m_sample <- lease_500m %>%
st_sample(14, type = 'regular') %>%
st_as_sf() %>%
mutate(
distance = "500m",
X = st_coordinates(st_transform(., 'WGS84'))[, 1],
Y = st_coordinates(st_transform(., 'WGS84'))[, 2]
)
x <- length(lease_500m_sample$x)
}
x <- 0
while (x != 20L) {
lease_1000m_sample <- lease_1000m %>%
st_sample(20, type = 'regular') %>%
st_as_sf() %>%
mutate(
distance = "1000m",
X = st_coordinates(st_transform(., 'WGS84'))[, 1],
Y = st_coordinates(st_transform(., 'WGS84'))[, 2]
)
x <- length(lease_1000m_sample$x)
}
x <- 0
while (x != 7L) {
lease_1500m_sample <- lease_1500m %>%
st_sample(7, type = 'regular') %>%
st_as_sf() %>%
mutate(
distance = "1500m",
X = st_coordinates(st_transform(., 'WGS84'))[, 1],
Y = st_coordinates(st_transform(., 'WGS84'))[, 2]
)
x <- length(lease_1500m_sample$x)
}
ammp_sample_sites <- lease_buffers +
geom_sf(data = lease_200m_sample, col = 'red') +
geom_sf(data = lease_500m_sample, col = 'blue') +
geom_sf(data = lease_1000m_sample, col = 'green') +
geom_sf(data = lease_1500m_sample, col = 'orange') +
geom_sf(data = lease_boundary, col = 'black', fill = 'red') +
coord_sf(expand = FALSE)
# print the sample site plan in R
print(ammp_sample_sites)
sample_sites <-
bind_rows(lease_200m_sample,
lease_500m_sample,
lease_1000m_sample,
lease_1500m_sample)
# save the sample sites in "data/output/[site name]/[site
# name]_sample_sites.shp
if (!dir.exists(paste0("data/output/", site_name, "/"))) {
message(
paste0(
"Creating directory for script out puts at in 'data/output/",
site_name,
"/'"
)
)
dir.create(paste0("data/output/", site_name, "/"), recursive = TRUE)
}
st_write(
sample_sites,
paste0(
"data/output/",
site_name,
"/",
site_name,
"_sample_sites.",
output_type
),
delete_layer = TRUE
)
st_write(
sample_sites,
paste0(
"data/output/",
site_name,
"/",
site_name,
"_sample_sites.csv"
),
layer_options = "GEOMETRY=AS_XY",
append = FALSE
)
st_write(
lease_200m,
paste0(
"data/output/",
site_name,
"/",
site_name,
"_200m_buffer.",
output_type
),
delete_layer = TRUE
)
st_write(
lease_500m,
paste0(
"data/output/",
site_name,
"/",
site_name,
"_500m_buffer.",
output_type
),
delete_layer = TRUE
)
st_write(
lease_1000m,
paste0(
"data/output/",
site_name,
"/",
site_name,
"_1000m_buffer.",
output_type
),
delete_layer = TRUE
)
st_write(
lease_1500m,
paste0(
"data/output/",
site_name,
"/",
site_name,
"_1500m_buffer.",
output_type
),
delete_layer = TRUE
)
}
|
\name{pipe}
\alias{as.pipe}
\alias{is.pipe}
\alias{pipe}
\title{Create new "pipe" object.}
\usage{
pipe(type, ...)
is.pipe(x)
as.pipe(x, ...)
}
\description{
A pipe object represents a component in a
\code{\link{pipeline}}. Pipes provide a declarative
specification of interactive behaviour, and define the
behaviour of each component in the data hierarchy.
}
\details{
This function is designed to be used by authors of new
types of pipes. If you are a ggvis user, please use an
existing pipe: a data frame, a transform, a mark, or a
branch.
}
\keyword{internal}
|
/man/pipe.Rd
|
no_license
|
imclab/ggvis
|
R
| false | false | 583 |
rd
|
\name{pipe}
\alias{as.pipe}
\alias{is.pipe}
\alias{pipe}
\title{Create new "pipe" object.}
\usage{
pipe(type, ...)
is.pipe(x)
as.pipe(x, ...)
}
\description{
A pipe object represents a component in a
\code{\link{pipeline}}. Pipes provide a declarative
specification of interactive behaviour, and define the
behaviour of each component in the data hierarchy.
}
\details{
This function is designed to be used by authors of new
types of pipes. If you are a ggvis user, please use an
existing pipe: a data frame, a transform, a mark, or a
branch.
}
\keyword{internal}
|
## Plot 3 - Course Project 1
## The data set is unzipped in the current working directory
## Loading and cleaning data
data <- read.csv("household_power_consumption.txt", sep = ";")
data$DateTime <- paste(as.character(data[,1]), data[,2])
data[,1] <- as.Date(data$Date, "%d/%m/%Y")
data <- subset(data, Date == "2007-02-01" | Date == "2007-02-02")
data[,3] <- as.numeric(as.character(data[,3]))
data$data <- strptime(data$DateTime, "%d/%m/%Y %H:%M")
data$Sub_metering_1 <- (as.numeric(as.character(data$Sub_metering_1)))
data$Sub_metering_2 <- (as.numeric(as.character(data$Sub_metering_2)))
data$Sub_metering_3 <- (as.numeric(as.character(data$Sub_metering_3)))
## plot 3
png("plot3.png", width = 480, height = 480)
plot(data$data, data$Sub_metering_1, ylab = "Energy sub metering",
xlab = "", type = "l")
lines(data$data, data$Sub_metering_2, col = "red")
lines(data$data, data$Sub_metering_3, col = "blue")
legend("topright", legend = c("Sub_metering_1", "Sub_metering_2",
"Sub_metering_3"), col = c("black", "red",
"blue"),
lty = 1)
dev.off()
|
/ExploratoryDataAnalysis/plot3.R
|
no_license
|
dswirtz/DataScienceSpecialization
|
R
| false | false | 1,149 |
r
|
## Plot 3 - Course Project 1
## The data set is unzipped in the current working directory
## Loading and cleaning data
data <- read.csv("household_power_consumption.txt", sep = ";")
data$DateTime <- paste(as.character(data[,1]), data[,2])
data[,1] <- as.Date(data$Date, "%d/%m/%Y")
data <- subset(data, Date == "2007-02-01" | Date == "2007-02-02")
data[,3] <- as.numeric(as.character(data[,3]))
data$data <- strptime(data$DateTime, "%d/%m/%Y %H:%M")
data$Sub_metering_1 <- (as.numeric(as.character(data$Sub_metering_1)))
data$Sub_metering_2 <- (as.numeric(as.character(data$Sub_metering_2)))
data$Sub_metering_3 <- (as.numeric(as.character(data$Sub_metering_3)))
## plot 3
png("plot3.png", width = 480, height = 480)
plot(data$data, data$Sub_metering_1, ylab = "Energy sub metering",
xlab = "", type = "l")
lines(data$data, data$Sub_metering_2, col = "red")
lines(data$data, data$Sub_metering_3, col = "blue")
legend("topright", legend = c("Sub_metering_1", "Sub_metering_2",
"Sub_metering_3"), col = c("black", "red",
"blue"),
lty = 1)
dev.off()
|
# ......................................................................................
# ................Cvičení 6 - Vybraná rozdělení spojité náhodné veličiny................
# ..................Martina Litschmannová, Adéla Vrtková, Michal Béreš..................
# ......................................................................................
# Nezobrazuje-li se vám text korektně, nastavte File \ Reopen with Encoding... na UTF-8
# Pro zobrazení obsahu skriptu použijte CTRL+SHIFT+O
# Pro spouštění příkazů v jednotlivých řádcích použijte CTRL+ENTER
# Přehled rozdělení a jejich funkcí ####
# * Úvod: Hustota pravděpodobnosti, Distribuční funkce a Kvantilová funkce ####
# ** Hustota pravděpodobnosti ####
# - začíná písmenkem **d**: p = d...(x, ...)
#
# ** Distribuční funkce ####
# - začíná písmenkem **p**: $p = P(X < x)$: p = p...(x, ...)
#
# ** Kvantilová funkce ####
# - začíná písmenkem **q**: najdi x pro zadané p: $p = F(x) \rightarrow x = F^{-1}(p)$:
# x = q...(p, ...)
# * Rovnoměrné rozdělení: $X \sim Ro(a, b)$ ####
# - náhodná veličina nabývá pouze hodnot větších než a a menších než b
# - všechny hodnoty mají stejnou hustotu výskytu -> hustota pravděpodobnosti je
# konstantní mezi a a b, jinde nulová
# Hustota pravděpodobnosti f(x)
a = 2 # odkud
b = 4 # kam
x = 3
dunif(x, a, b)
# vykreslíme si Hustotu pravděpodobnosti
x = seq(from = 0, to = 6, by = 0.01)
f_x = dunif(x, a, b)
plot(x, f_x, cex = 0.1) # cex je velikost markerů
grid()
# Distribuční funkce F(x) = P(X < x)
a = 2 # odkud
b = 4 # kam
x = 3
punif(x, a, b)
# vykreslíme si Distribuční funkci
x = seq(from = 0, to = 6, by = 0.01)
F_x = punif(x, a, b)
plot(x, F_x, type = 'l')
grid()
# kvantilová funkce F^(-1)(p) = x: P(X<x)=p
a = 2 # odkud
b = 4 # kam
p = 0.75
qunif(p, a, b)
# vykreslení - kvantilová funkce F^(-1)(p) = x
p = seq(from=0, to=1, by=0.01)
x = qunif(p, a, b)
plot(p, x, type = 'l')
grid()
# * Exponenciální rozdělení: $X \sim Exp(\lambda)$ ####
# - doba do 1. události, doba mezi událostmi (pouze v období stabilního života -
# Poissonův proces)
# - parametr $\lambda$ je tentýž co v Poissonově rozdělení
# - střední hodnota je: $E(X)=1 / \lambda$
# Hustota pravděpodobnosti f(x)
lambda = 2
x = 1
dexp(x, lambda)
# vykreslíme si Hustotu pravděpodobnosti
x = seq(from = 0, to = 6, by = 0.01)
f_x = dexp(x, lambda)
plot(x, f_x, type='l')
grid()
# Distribuční funkce F(x) = P(X < x)
lambda = 2
x = 1
pexp(x, lambda)
# vykreslíme si Distribuční funkci
x = seq(from = 0, to = 6, by = 0.01)
F_x = pexp(x, lambda)
plot(x, F_x, type = 'l')
grid()
# kvantilová funkce F^(-1)(p) = x: P(X<x)=p
lambda = 2
p = 0.5
qexp(p, a, b)
# vykreslení - kvantilová funkce F^(-1)(p) = x
p = seq(from=0, to=1, by=0.001)
x = qexp(p, lambda)
plot(p, x, type = 'l')
grid()
# * Weibullovo rozdělení: $X \sim W(\theta,\beta)$ ####
# - doba do 1. události (poruchy)(vhodná volba β umožuje použití v libovolném období
# intenzity poruch)
# - rozšíření exponenciálního rozdělení Exp(λ) = W(Θ=1/λ, β=1)
# Hustota pravděpodobnosti f(x)
theta = 1/2 # ekvivalent 1/lambda u exp. rozdělení
beta = 1 # beta = 1 -> exponenciální rozdělení
x = 5
dweibull(x,shape=beta, scale=theta)
# vykreslíme si Hustotu pravděpodobnosti
x = seq(from = 0, to = 6, by = 0.01)
f_x = dweibull(x,shape=beta, scale=theta)
plot(x, f_x, type='l')
grid()
# Distribuční funkce F(x) = P(X < x)
theta = 3 # ekvivalent 1/lambda u exp. rozdělení
beta = 2 # beta = 1 -> exponenciální rozdělení
x = 5
pweibull(x,shape=beta, scale=theta)
# vykreslíme si Distribuční funkci
x = seq(from = 0, to = 6, by = 0.01)
F_x = pweibull(x,shape=beta, scale=theta)
plot(x, F_x, type = 'l')
grid()
# kvantilová funkce F^(-1)(p) = x: P(X<x)=p
theta = 3 # ekvivalent 1/lambda u exp. rozdělení
beta = 2 # beta = 1 -> exponenciální rozdělení
p = 0.5
qweibull(p,shape=beta, scale=theta)
# vykreslení - kvantilová funkce F^(-1)(p) = x
p = seq(from=0, to=1, by=0.01)
x = qweibull(p,shape=beta, scale=theta)
plot(p, x, type = 'l')
grid()
# * Normální rozdělení: $X \sim N(\mu,\sigma^2)$ ####
# - rozdělení modelující např. chyby měření, chování součtu/průměru mnoha jiných
# náhodných veličin
# - viz. Centrální limitní věta
# - $\mu$ je přímo střední hodnota rozdělení: $E(X)=\mu$
# - $\sigma$ je přímo směrodatná odchyla rozdělení: $D(X)=\sigma^2$
# - s parametry $\mu=0,\sigma=1$ se nazývá normované Normální rozdělení
# Hustota pravděpodobnosti f(x)
mu = 2
sigma = 3
x = 4
dnorm(x, mean=mu, sd=sigma)
# vykreslíme si Hustotu pravděpodobnosti
x = seq(from = -5, to = 10, by = 0.01)
f_x = dnorm(x, mean=mu, sd=sigma)
plot(x, f_x, type='l')
grid()
# Distribuční funkce F(x) = P(X < x)
mu = 2
sigma = 3
x = 4
pnorm(x, mean=mu, sd=sigma)
# vykreslíme si Distribuční funkci
x = seq(from = -5, to = 10, by = 0.01)
F_x = pnorm(x, mean=mu, sd=sigma)
plot(x, F_x, type = 'l')
grid()
# kvantilová funkce F^(-1)(p) = x: P(X<x)=p
mu = 2
sigma = 3
p = 0.5
qnorm(p, mean=mu, sd=sigma)
# vykreslení - kvantilová funkce F^(-1)(p) = x
p = seq(from=0, to=1, by=0.01)
x = qnorm(p, mean=mu, sd=sigma)
plot(p, x, type = 'l')
grid()
# Příklady ####
# * Příklad 1. ####
# Výška v populaci chlapců ve věku 3,5-4 roky má normální rozdělení se střední hodnotou
# 102 cm a směrodatnou odchylkou 4,5 cm. Určete, jaké procento chlapců v uvedeném věku
# má výšku menší nebo rovnou 93 cm.
# X ... výška chlapců ve věku 3.5 až 4 roky (cm)
# X ~ N(mu = 102, sd = 4.5)
mu = 102
sigma = 4.5
# P(X<=93)=F(93)
pnorm(93, mean=mu, sd=sigma)
# * Příklad 2. ####
# Průměrná životnost strojní součástky je 30 000 hodin. Předpokládejme, že součástka je
# v období
# stabilního života. Určete:
# X ... životnost součástky (h)
# X ~ Exp(lambda), kde E(X)=1/lambda
lambda = 1/30000
# ** a) ####
# pravděpodobnost, že součástka nevydrží více než 2 000 hodin,
#a) P(X<2000)=F(2000)
pexp(2000, lambda)
# ** b) ####
# pravděpodobnost, že součástka vydrží více než 35 000 hodin,
#b) P(X>35000)=1-F(35000)
1 - pexp(35000, lambda)
# ** c) ####
# dobu, do níž se porouchá 95 % součástek.
#c) P(X<t)=0,95 -> F(t)=0,95 -> t… 95% kvantil
qexp(0.95, lambda)
# * Příklad 3. ####
# Výrobní zařízení má poruchu v průměru jednou za 2000 hodin. Veličina Y představující
# dobu čekání na poruchu má exponenciální rozdělení. Určete dobu T0 tak, aby
# pravděpodobnost, že přístroj bude pracovat delší dobu než T0, byla 0,99.
# X ... doba čekání na poruchu (h)
# X ~ Exp(lambda), kde E(X)=1/lambda
lambda = 1/2000
#P(X>t)=0,99 -> 1-F(t)=0,99 -> F(t)=0,01 -> t… 1% kvant.
qexp(0.01, lambda)
# * Příklad 4. ####
# Výsledky měření jsou zatíženy jen normálně rozdělenou chybou s nulovou střední
# hodnotou a se směrodatnou odchylkou 3 mm. Jaká je pravděpodobnost, že při 3 měřeních
# bude alespoň jednou chyba v intervalu (0 mm; 2,4mm)?
# Y… velikost chyby měření (mm)
# Y ~ N(mu = 0,sigma = 3)
mu = 0
sigma = 3
# pp… pravd., že chyba měření bude v int. 0,0-2,4mm
pp = pnorm(2.4,mean=mu,sd=sigma) - pnorm(0,mean=mu,sd=sigma)
pp
# X … počet chyb měření v int. 0 mm -2,4 mm ve 3 měř.
# X ~ Bi(n = 3,p = pp)
n = 3
p = pp
# P(X>=1)=1-P(X=0)
1 - dbinom(0, n, p)
# * Příklad 5. ####
# Ve velké počítačové síti se průměrně přihlašuje 25 uživatelů za hodinu. Určete
# pravděpodobnost,
# že:
# ** a) ####
# se nikdo nepřihlásí během 14:30 - 14:36,
# X … počet uživatelů přihlášených za 6 minut
# X ~ Po(lt = 2.5)
lambda = 25/60
t = 6
lt = lambda*t
# P(X=0)
dpois(0, lt)
# ** b) ####
# do dalšího přihlášení uběhnou 2-3 minuty.
# Y … doba do dalšího přihlášení
# Y ~ Exp(lambda = 25/60), kde E(X)=1/lambda
lambda = 25/60
# P(2<Y<3)=F(3)-F(2)
pexp(3, lambda) - pexp(2, lambda)
# ** c) ####
# Určete maximální délku časového intervalu tak, aby pravděpodobnost, že se nikdo
# nepřihlásí byla alespoň 0,90.
# P(Y>t)=0,90 -> 1-F(t)=0,90 -> F(t)=0,10 -> t…10% kv.
qexp(0.10, lambda)*60
# * Příklad 6. ####
# Náhodná veličina X má normální rozdělení N(µ; σ). Určete:
# ** a) ####
# P(µ − 2σ < X < µ + 2σ),
# P(µ − 2σ < X < µ + 2σ) = F(µ + 2σ) - F(µ - 2σ)
# X~N(µ,σ)
# je jedno jaké hodnoty zvolíme
mu = -105.5447
sigma = 2.654
pnorm(mu + 2*sigma, mean=mu, sd=sigma) -
pnorm(mu - 2*sigma, mean=mu, sd=sigma)
# ** b) ####
# nejmenší k ∈ Z, tak, aby P(µ − kσ < X < µ + kσ) > 0,99.
# normální rozdělení je symetrické
# P(µ − kσ < X < µ + kσ) =
# = 1 - (P(X < µ − kσ ) + P(X > µ + kσ)) =
# = 1 - 2*P(X > µ + kσ) = 0.99 -> P(X > µ + kσ) = 0.005
# -> P(X < µ + kσ) = 0.995
# x = µ + kσ
x = qnorm(0.995, mean=mu, sd=sigma)
(x - mu)/sigma
for(k in 1:5){
p = pnorm(mu + k*sigma, mean=mu, sd=sigma) -
pnorm(mu - k*sigma, mean=mu, sd=sigma)
print(paste0(k,":",p))
}
# * Příklad 7. ####
# Na prohlídce výstavy je promítán doprovodný film o životě autora vystavovaných děl.
# Jeho projekce začíná každých 20 minut. Určete pravděpodobnost, že pokud náhodně
# přijdete do promítacího sálu,
# Y … doba do začátku další projekce
# Y ~ Ro(a=0, b=20)
a = 0
b = 20
# ** a) ####
# nebudete na začátek filmu čekat víc než 5 minut,
# P(X<5)
punif(5, a, b)
# ** b) ####
# budete čekat mezi 5 a 10 minutami,
# P(5<X<10)
punif(10, a, b) - punif(5, a, b)
# ** c) ####
# střední hodnotu a směrodatnou odchylku doby čekání na začátek filmu.
E_X = (a + b)/2
E_X
D_X = (a - b)^2/12
D_X
sigma_X = sqrt(D_X)
sigma_X
# * Příklad 8. ####
# Při kontrole jakosti přebíráme součástku pouze tehdy, jestliže se její rozměr pohybuje
# v mezích 26-27 mm. Rozměry součástek mají normální rozdělení se střední hodnotou 26,4
# mm a směrodatnou odchylkou 0,2 mm. Jaká je pravděpodobnost, že rozměr součástky
# náhodně vybrané ke kontrole bude v požadovaných mezích?
# X ... rozměr součástky (mm)
# X ~ N(mu = 26.4,sigma = 0.2)
mu = 26.4
sigma = 0.2
#P(26<X<27)=F(27)-F(26)
pnorm(27, mean=mu, sd=sigma) - pnorm(26, mean=mu, sd=sigma)
# * Příklad 9. ####
# Délka skoků sportovce Jakuba měřená v cm má normální rozdělení N(µ1; σ1), kde µ1 = 690
# a σ1 = 10. Délka skoků sportovce Aleše měřená v cm má také normální rozdělení N(µ2;
# σ2), kde µ2 = 705 a σ2 = 15. Na závody se kvalifikuje ten, kdo ze dvou skoků alespoň
# jednou skočí více než 700 cm.
# SJ ... délka skoku Jakuba
# SJ ~ N(mu = 690,sigma = 10)
mu_J = 690
sigma_J = 10
# SA … délka skoku Aleše
# SA ~ N(mu = 705,sigma = 15)
mu_A = 705
sigma_A = 15
# J...Jakubův skok je úspěšný (delší než 700 cm)
# A...Alešův skok je úspěšný (delší než 700 cm)
# P(J)=P(SJ>700)=1-F(700)
P.J = 1-pnorm(700,mean=mu_J,sd=sigma_J)
P.J
# P(A)=P(SA>700)=1-F(700)
P.A = 1-pnorm(700,mean=mu_A, sd=sigma_A)
P.A
# KJ … Jakub se kvalifikuje na závody,
# P(KJ) = 1-(1-P(J))(1-P(J))
P.KJ=1-(1-P.J)*(1-P.J)
P.KJ
# KA … Aleš se kvalifikuje na závody,
# P(KA) = 1-(1-P(A))(1-P(A))
P.KA=1-(1-P.A)*(1-P.A)
P.KA
# ** a) ####
# S jakou pravděpodobností se oba dva kvalifikují na závody?
# ada)
P.KJ*P.KA
# ** b) ####
# S jakou pravděpodobností se kvalifikuje Aleš, ale Jakub ne?
# adb)
(1-P.KJ)*P.KA
|
/CV6/cv6.r
|
permissive
|
Atheloses/VSB-S8-PS
|
R
| false | false | 11,568 |
r
|
# ......................................................................................
# ................Cvičení 6 - Vybraná rozdělení spojité náhodné veličiny................
# ..................Martina Litschmannová, Adéla Vrtková, Michal Béreš..................
# ......................................................................................
# Nezobrazuje-li se vám text korektně, nastavte File \ Reopen with Encoding... na UTF-8
# Pro zobrazení obsahu skriptu použijte CTRL+SHIFT+O
# Pro spouštění příkazů v jednotlivých řádcích použijte CTRL+ENTER
# Přehled rozdělení a jejich funkcí ####
# * Úvod: Hustota pravděpodobnosti, Distribuční funkce a Kvantilová funkce ####
# ** Hustota pravděpodobnosti ####
# - začíná písmenkem **d**: p = d...(x, ...)
#
# ** Distribuční funkce ####
# - začíná písmenkem **p**: $p = P(X < x)$: p = p...(x, ...)
#
# ** Kvantilová funkce ####
# - začíná písmenkem **q**: najdi x pro zadané p: $p = F(x) \rightarrow x = F^{-1}(p)$:
# x = q...(p, ...)
# * Rovnoměrné rozdělení: $X \sim Ro(a, b)$ ####
# - náhodná veličina nabývá pouze hodnot větších než a a menších než b
# - všechny hodnoty mají stejnou hustotu výskytu -> hustota pravděpodobnosti je
# konstantní mezi a a b, jinde nulová
# Hustota pravděpodobnosti f(x)
a = 2 # odkud
b = 4 # kam
x = 3
dunif(x, a, b)
# vykreslíme si Hustotu pravděpodobnosti
x = seq(from = 0, to = 6, by = 0.01)
f_x = dunif(x, a, b)
plot(x, f_x, cex = 0.1) # cex je velikost markerů
grid()
# Distribuční funkce F(x) = P(X < x)
a = 2 # odkud
b = 4 # kam
x = 3
punif(x, a, b)
# vykreslíme si Distribuční funkci
x = seq(from = 0, to = 6, by = 0.01)
F_x = punif(x, a, b)
plot(x, F_x, type = 'l')
grid()
# kvantilová funkce F^(-1)(p) = x: P(X<x)=p
a = 2 # odkud
b = 4 # kam
p = 0.75
qunif(p, a, b)
# vykreslení - kvantilová funkce F^(-1)(p) = x
p = seq(from=0, to=1, by=0.01)
x = qunif(p, a, b)
plot(p, x, type = 'l')
grid()
# * Exponenciální rozdělení: $X \sim Exp(\lambda)$ ####
# - doba do 1. události, doba mezi událostmi (pouze v období stabilního života -
# Poissonův proces)
# - parametr $\lambda$ je tentýž co v Poissonově rozdělení
# - střední hodnota je: $E(X)=1 / \lambda$
# Hustota pravděpodobnosti f(x)
lambda = 2
x = 1
dexp(x, lambda)
# vykreslíme si Hustotu pravděpodobnosti
x = seq(from = 0, to = 6, by = 0.01)
f_x = dexp(x, lambda)
plot(x, f_x, type='l')
grid()
# Distribuční funkce F(x) = P(X < x)
lambda = 2
x = 1
pexp(x, lambda)
# vykreslíme si Distribuční funkci
x = seq(from = 0, to = 6, by = 0.01)
F_x = pexp(x, lambda)
plot(x, F_x, type = 'l')
grid()
# kvantilová funkce F^(-1)(p) = x: P(X<x)=p
lambda = 2
p = 0.5
qexp(p, a, b)
# vykreslení - kvantilová funkce F^(-1)(p) = x
p = seq(from=0, to=1, by=0.001)
x = qexp(p, lambda)
plot(p, x, type = 'l')
grid()
# * Weibullovo rozdělení: $X \sim W(\theta,\beta)$ ####
# - doba do 1. události (poruchy)(vhodná volba β umožuje použití v libovolném období
# intenzity poruch)
# - rozšíření exponenciálního rozdělení Exp(λ) = W(Θ=1/λ, β=1)
# Hustota pravděpodobnosti f(x)
theta = 1/2 # ekvivalent 1/lambda u exp. rozdělení
beta = 1 # beta = 1 -> exponenciální rozdělení
x = 5
dweibull(x,shape=beta, scale=theta)
# vykreslíme si Hustotu pravděpodobnosti
x = seq(from = 0, to = 6, by = 0.01)
f_x = dweibull(x,shape=beta, scale=theta)
plot(x, f_x, type='l')
grid()
# Distribuční funkce F(x) = P(X < x)
theta = 3 # ekvivalent 1/lambda u exp. rozdělení
beta = 2 # beta = 1 -> exponenciální rozdělení
x = 5
pweibull(x,shape=beta, scale=theta)
# vykreslíme si Distribuční funkci
x = seq(from = 0, to = 6, by = 0.01)
F_x = pweibull(x,shape=beta, scale=theta)
plot(x, F_x, type = 'l')
grid()
# kvantilová funkce F^(-1)(p) = x: P(X<x)=p
theta = 3 # ekvivalent 1/lambda u exp. rozdělení
beta = 2 # beta = 1 -> exponenciální rozdělení
p = 0.5
qweibull(p,shape=beta, scale=theta)
# vykreslení - kvantilová funkce F^(-1)(p) = x
p = seq(from=0, to=1, by=0.01)
x = qweibull(p,shape=beta, scale=theta)
plot(p, x, type = 'l')
grid()
# * Normální rozdělení: $X \sim N(\mu,\sigma^2)$ ####
# - rozdělení modelující např. chyby měření, chování součtu/průměru mnoha jiných
# náhodných veličin
# - viz. Centrální limitní věta
# - $\mu$ je přímo střední hodnota rozdělení: $E(X)=\mu$
# - $\sigma$ je přímo směrodatná odchyla rozdělení: $D(X)=\sigma^2$
# - s parametry $\mu=0,\sigma=1$ se nazývá normované Normální rozdělení
# Hustota pravděpodobnosti f(x)
mu = 2
sigma = 3
x = 4
dnorm(x, mean=mu, sd=sigma)
# vykreslíme si Hustotu pravděpodobnosti
x = seq(from = -5, to = 10, by = 0.01)
f_x = dnorm(x, mean=mu, sd=sigma)
plot(x, f_x, type='l')
grid()
# Distribuční funkce F(x) = P(X < x)
mu = 2
sigma = 3
x = 4
pnorm(x, mean=mu, sd=sigma)
# vykreslíme si Distribuční funkci
x = seq(from = -5, to = 10, by = 0.01)
F_x = pnorm(x, mean=mu, sd=sigma)
plot(x, F_x, type = 'l')
grid()
# kvantilová funkce F^(-1)(p) = x: P(X<x)=p
mu = 2
sigma = 3
p = 0.5
qnorm(p, mean=mu, sd=sigma)
# vykreslení - kvantilová funkce F^(-1)(p) = x
p = seq(from=0, to=1, by=0.01)
x = qnorm(p, mean=mu, sd=sigma)
plot(p, x, type = 'l')
grid()
# Příklady ####
# * Příklad 1. ####
# Výška v populaci chlapců ve věku 3,5-4 roky má normální rozdělení se střední hodnotou
# 102 cm a směrodatnou odchylkou 4,5 cm. Určete, jaké procento chlapců v uvedeném věku
# má výšku menší nebo rovnou 93 cm.
# X ... výška chlapců ve věku 3.5 až 4 roky (cm)
# X ~ N(mu = 102, sd = 4.5)
mu = 102
sigma = 4.5
# P(X<=93)=F(93)
pnorm(93, mean=mu, sd=sigma)
# * Příklad 2. ####
# Průměrná životnost strojní součástky je 30 000 hodin. Předpokládejme, že součástka je
# v období
# stabilního života. Určete:
# X ... životnost součástky (h)
# X ~ Exp(lambda), kde E(X)=1/lambda
lambda = 1/30000
# ** a) ####
# pravděpodobnost, že součástka nevydrží více než 2 000 hodin,
#a) P(X<2000)=F(2000)
pexp(2000, lambda)
# ** b) ####
# pravděpodobnost, že součástka vydrží více než 35 000 hodin,
#b) P(X>35000)=1-F(35000)
1 - pexp(35000, lambda)
# ** c) ####
# dobu, do níž se porouchá 95 % součástek.
#c) P(X<t)=0,95 -> F(t)=0,95 -> t… 95% kvantil
qexp(0.95, lambda)
# * Příklad 3. ####
# Výrobní zařízení má poruchu v průměru jednou za 2000 hodin. Veličina Y představující
# dobu čekání na poruchu má exponenciální rozdělení. Určete dobu T0 tak, aby
# pravděpodobnost, že přístroj bude pracovat delší dobu než T0, byla 0,99.
# X ... doba čekání na poruchu (h)
# X ~ Exp(lambda), kde E(X)=1/lambda
lambda = 1/2000
#P(X>t)=0,99 -> 1-F(t)=0,99 -> F(t)=0,01 -> t… 1% kvant.
qexp(0.01, lambda)
# * Příklad 4. ####
# Výsledky měření jsou zatíženy jen normálně rozdělenou chybou s nulovou střední
# hodnotou a se směrodatnou odchylkou 3 mm. Jaká je pravděpodobnost, že při 3 měřeních
# bude alespoň jednou chyba v intervalu (0 mm; 2,4mm)?
# Y… velikost chyby měření (mm)
# Y ~ N(mu = 0,sigma = 3)
mu = 0
sigma = 3
# pp… pravd., že chyba měření bude v int. 0,0-2,4mm
pp = pnorm(2.4,mean=mu,sd=sigma) - pnorm(0,mean=mu,sd=sigma)
pp
# X … počet chyb měření v int. 0 mm -2,4 mm ve 3 měř.
# X ~ Bi(n = 3,p = pp)
n = 3
p = pp
# P(X>=1)=1-P(X=0)
1 - dbinom(0, n, p)
# * Příklad 5. ####
# Ve velké počítačové síti se průměrně přihlašuje 25 uživatelů za hodinu. Určete
# pravděpodobnost,
# že:
# ** a) ####
# se nikdo nepřihlásí během 14:30 - 14:36,
# X … počet uživatelů přihlášených za 6 minut
# X ~ Po(lt = 2.5)
lambda = 25/60
t = 6
lt = lambda*t
# P(X=0)
dpois(0, lt)
# ** b) ####
# do dalšího přihlášení uběhnou 2-3 minuty.
# Y … doba do dalšího přihlášení
# Y ~ Exp(lambda = 25/60), kde E(X)=1/lambda
lambda = 25/60
# P(2<Y<3)=F(3)-F(2)
pexp(3, lambda) - pexp(2, lambda)
# ** c) ####
# Určete maximální délku časového intervalu tak, aby pravděpodobnost, že se nikdo
# nepřihlásí byla alespoň 0,90.
# P(Y>t)=0,90 -> 1-F(t)=0,90 -> F(t)=0,10 -> t…10% kv.
qexp(0.10, lambda)*60
# * Příklad 6. ####
# Náhodná veličina X má normální rozdělení N(µ; σ). Určete:
# ** a) ####
# P(µ − 2σ < X < µ + 2σ),
# P(µ − 2σ < X < µ + 2σ) = F(µ + 2σ) - F(µ - 2σ)
# X~N(µ,σ)
# je jedno jaké hodnoty zvolíme
mu = -105.5447
sigma = 2.654
pnorm(mu + 2*sigma, mean=mu, sd=sigma) -
pnorm(mu - 2*sigma, mean=mu, sd=sigma)
# ** b) ####
# nejmenší k ∈ Z, tak, aby P(µ − kσ < X < µ + kσ) > 0,99.
# normální rozdělení je symetrické
# P(µ − kσ < X < µ + kσ) =
# = 1 - (P(X < µ − kσ ) + P(X > µ + kσ)) =
# = 1 - 2*P(X > µ + kσ) = 0.99 -> P(X > µ + kσ) = 0.005
# -> P(X < µ + kσ) = 0.995
# x = µ + kσ
x = qnorm(0.995, mean=mu, sd=sigma)
(x - mu)/sigma
for(k in 1:5){
p = pnorm(mu + k*sigma, mean=mu, sd=sigma) -
pnorm(mu - k*sigma, mean=mu, sd=sigma)
print(paste0(k,":",p))
}
# * Příklad 7. ####
# Na prohlídce výstavy je promítán doprovodný film o životě autora vystavovaných děl.
# Jeho projekce začíná každých 20 minut. Určete pravděpodobnost, že pokud náhodně
# přijdete do promítacího sálu,
# Y … doba do začátku další projekce
# Y ~ Ro(a=0, b=20)
a = 0
b = 20
# ** a) ####
# nebudete na začátek filmu čekat víc než 5 minut,
# P(X<5)
punif(5, a, b)
# ** b) ####
# budete čekat mezi 5 a 10 minutami,
# P(5<X<10)
punif(10, a, b) - punif(5, a, b)
# ** c) ####
# střední hodnotu a směrodatnou odchylku doby čekání na začátek filmu.
E_X = (a + b)/2
E_X
D_X = (a - b)^2/12
D_X
sigma_X = sqrt(D_X)
sigma_X
# * Příklad 8. ####
# Při kontrole jakosti přebíráme součástku pouze tehdy, jestliže se její rozměr pohybuje
# v mezích 26-27 mm. Rozměry součástek mají normální rozdělení se střední hodnotou 26,4
# mm a směrodatnou odchylkou 0,2 mm. Jaká je pravděpodobnost, že rozměr součástky
# náhodně vybrané ke kontrole bude v požadovaných mezích?
# X ... rozměr součástky (mm)
# X ~ N(mu = 26.4,sigma = 0.2)
mu = 26.4
sigma = 0.2
#P(26<X<27)=F(27)-F(26)
pnorm(27, mean=mu, sd=sigma) - pnorm(26, mean=mu, sd=sigma)
# * Příklad 9. ####
# Délka skoků sportovce Jakuba měřená v cm má normální rozdělení N(µ1; σ1), kde µ1 = 690
# a σ1 = 10. Délka skoků sportovce Aleše měřená v cm má také normální rozdělení N(µ2;
# σ2), kde µ2 = 705 a σ2 = 15. Na závody se kvalifikuje ten, kdo ze dvou skoků alespoň
# jednou skočí více než 700 cm.
# SJ ... délka skoku Jakuba
# SJ ~ N(mu = 690,sigma = 10)
mu_J = 690
sigma_J = 10
# SA … délka skoku Aleše
# SA ~ N(mu = 705,sigma = 15)
mu_A = 705
sigma_A = 15
# J...Jakubův skok je úspěšný (delší než 700 cm)
# A...Alešův skok je úspěšný (delší než 700 cm)
# P(J)=P(SJ>700)=1-F(700)
P.J = 1-pnorm(700,mean=mu_J,sd=sigma_J)
P.J
# P(A)=P(SA>700)=1-F(700)
P.A = 1-pnorm(700,mean=mu_A, sd=sigma_A)
P.A
# KJ … Jakub se kvalifikuje na závody,
# P(KJ) = 1-(1-P(J))(1-P(J))
P.KJ=1-(1-P.J)*(1-P.J)
P.KJ
# KA … Aleš se kvalifikuje na závody,
# P(KA) = 1-(1-P(A))(1-P(A))
P.KA=1-(1-P.A)*(1-P.A)
P.KA
# ** a) ####
# S jakou pravděpodobností se oba dva kvalifikují na závody?
# ada)
P.KJ*P.KA
# ** b) ####
# S jakou pravděpodobností se kvalifikuje Aleš, ale Jakub ne?
# adb)
(1-P.KJ)*P.KA
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/limits.R
\name{limits}
\alias{limits}
\title{Creates the limits
\code{limits} creates the limits}
\usage{
limits(d, x, xmin, xmax, log)
}
\description{
Creates the limits
\code{limits} creates the limits
}
\keyword{internal}
|
/man/limits.Rd
|
no_license
|
alexholcombe/quickpsy
|
R
| false | false | 312 |
rd
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/limits.R
\name{limits}
\alias{limits}
\title{Creates the limits
\code{limits} creates the limits}
\usage{
limits(d, x, xmin, xmax, log)
}
\description{
Creates the limits
\code{limits} creates the limits
}
\keyword{internal}
|
# figure out best way to calculate plus-minus
data(pbp)
library(dplyr)
game_df <- pbp %>%
filter(points > 0) %>%
arrange(period_sequence, desc(clock)) %>%
select(home_team_id, away_team_id, event_team_id,
matches("(home|away)(.*)id"), points) %>%
mutate(points = as.numeric(points))
home_players <- game_df %>%
select(matches("home_player"))
home_players <- t(apply(home_players, 1, sort))
game_df <- game_df %>%
mutate(home_player_one_id = home_players[, 1],
home_player_two_id = home_players[, 2],
home_player_three_id = home_players[, 3],
home_player_four_id = home_players[, 4],
home_player_five_id = home_players[, 5])
away_players <- game_df %>%
select(matches("away_player"))
away_players <- t(apply(away_players, 1, sort))
game_df <- game_df %>%
mutate(away_player_one_id = away_players[, 1],
away_player_two_id = away_players[, 2],
away_player_three_id = away_players[, 3],
away_player_four_id = away_players[, 4],
away_player_five_id = away_players[, 5])
lineup_df <- game_df %>%
select(matches("(home|away)(.*)id")) %>%
unique() %>%
na.omit()
lineup_df <- lineup_df %>% select(matches("player"))
ulineups <- lineup_df %>%
select(matches("^player_(.*)")) %>%
duplicated()
lineup_df <- lineup_df %>%
filter(!ulineups) %>%
mutate(lineup_id = 1:n())
plus_minus_df <- left_join(game_df, lineup_df, by = ) %>%
mutate(plus_minus = ifelse(event_team_id == home_team_id, points, -1*points)) %>%
group_by(lineup_id) %>%
select(lineup_id, plus_minus) %>%
summarise(plus_minus = sum(plus_minus))
pbp <- query_db("select *
from sportradar_play_by_play
where game_id = any(array(select game_id
from sportradar_schedule
where season_year = 2017
and season_type = 'REG'
and league_alias = 'NBA'))")
schedule <- query_db("select * from sportradar_schedule
where season_year = 2017
and season_type = 'REG'
and league_alias = 'NBA'")
postgres_array(players_vec) -> players_array
player_profiles <- query_db(sprintf("select * from sportradar_player_profile
where player_id = any(%s)
and season_year = 2017", players_array))
|
/R/rough-draft.R
|
no_license
|
jwmortensen/roster
|
R
| false | false | 2,378 |
r
|
# figure out best way to calculate plus-minus
data(pbp)
library(dplyr)
game_df <- pbp %>%
filter(points > 0) %>%
arrange(period_sequence, desc(clock)) %>%
select(home_team_id, away_team_id, event_team_id,
matches("(home|away)(.*)id"), points) %>%
mutate(points = as.numeric(points))
home_players <- game_df %>%
select(matches("home_player"))
home_players <- t(apply(home_players, 1, sort))
game_df <- game_df %>%
mutate(home_player_one_id = home_players[, 1],
home_player_two_id = home_players[, 2],
home_player_three_id = home_players[, 3],
home_player_four_id = home_players[, 4],
home_player_five_id = home_players[, 5])
away_players <- game_df %>%
select(matches("away_player"))
away_players <- t(apply(away_players, 1, sort))
game_df <- game_df %>%
mutate(away_player_one_id = away_players[, 1],
away_player_two_id = away_players[, 2],
away_player_three_id = away_players[, 3],
away_player_four_id = away_players[, 4],
away_player_five_id = away_players[, 5])
lineup_df <- game_df %>%
select(matches("(home|away)(.*)id")) %>%
unique() %>%
na.omit()
lineup_df <- lineup_df %>% select(matches("player"))
ulineups <- lineup_df %>%
select(matches("^player_(.*)")) %>%
duplicated()
lineup_df <- lineup_df %>%
filter(!ulineups) %>%
mutate(lineup_id = 1:n())
plus_minus_df <- left_join(game_df, lineup_df, by = ) %>%
mutate(plus_minus = ifelse(event_team_id == home_team_id, points, -1*points)) %>%
group_by(lineup_id) %>%
select(lineup_id, plus_minus) %>%
summarise(plus_minus = sum(plus_minus))
pbp <- query_db("select *
from sportradar_play_by_play
where game_id = any(array(select game_id
from sportradar_schedule
where season_year = 2017
and season_type = 'REG'
and league_alias = 'NBA'))")
schedule <- query_db("select * from sportradar_schedule
where season_year = 2017
and season_type = 'REG'
and league_alias = 'NBA'")
postgres_array(players_vec) -> players_array
player_profiles <- query_db(sprintf("select * from sportradar_player_profile
where player_id = any(%s)
and season_year = 2017", players_array))
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fetch_das1_datafronttargets.R
\name{fetch_das1_datafronttargets}
\alias{fetch_das1_datafronttargets}
\title{Fetch Das1 Front Targets Data}
\usage{
fetch_das1_datafronttargets(con, deviceID, noLimit = FALSE)
}
\arguments{
\item{con}{PostgreSQL db connection object}
\item{deviceID}{optional search parameter, Device ID}
}
\value{
DataFrame representing data from the das1_datafrontobjects table
}
\examples{
fetch_das1_datafronttargets(con, deviceID = 17103)
}
|
/man/fetch_das1_datafronttargets.Rd
|
no_license
|
cmarkymark/SafeD
|
R
| false | true | 539 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fetch_das1_datafronttargets.R
\name{fetch_das1_datafronttargets}
\alias{fetch_das1_datafronttargets}
\title{Fetch Das1 Front Targets Data}
\usage{
fetch_das1_datafronttargets(con, deviceID, noLimit = FALSE)
}
\arguments{
\item{con}{PostgreSQL db connection object}
\item{deviceID}{optional search parameter, Device ID}
}
\value{
DataFrame representing data from the das1_datafrontobjects table
}
\examples{
fetch_das1_datafronttargets(con, deviceID = 17103)
}
|
corr <- function(directory, threshold=0) {
dat <- complete(directory)
ids = dat[dat["nobs"] > threshold, ]$id
corre <- numeric() ##class assignment
for (i in ids) {
corrdata <- read.csv(paste(directory, "/", formatC(i, width = 3, flag = "0"),
".csv", sep = ""))
corrdata <- corrdata[complete.cases(corrdata), ] ##subset complete cases
corre <- c(corre, cor(corrdata$sulfate, corrdata$nitrate)) ##correlation between sulfate and nitrate in dat
}
return(corre)
}
|
/corr.R
|
no_license
|
4ringutan/R-programming
|
R
| false | false | 585 |
r
|
corr <- function(directory, threshold=0) {
dat <- complete(directory)
ids = dat[dat["nobs"] > threshold, ]$id
corre <- numeric() ##class assignment
for (i in ids) {
corrdata <- read.csv(paste(directory, "/", formatC(i, width = 3, flag = "0"),
".csv", sep = ""))
corrdata <- corrdata[complete.cases(corrdata), ] ##subset complete cases
corre <- c(corre, cor(corrdata$sulfate, corrdata$nitrate)) ##correlation between sulfate and nitrate in dat
}
return(corre)
}
|
\name{dief}
\alias{dief}
\docType{package}
\title{
Tools for Computing Diefficiency Metrics
}
\description{
An implementation of the metrics dief@t and dief@k to measure the diefficiency (or continuous efficiency) of incremental approaches, see Acosta, M., Vidal, M. E., & Sure-Vetter, Y. (2017) <doi:10.1007/978-3-319-68204-4_1>. The metrics dief@t and dief@k allow for measuring the diefficiency during an elapsed time period t or while k answers are produced, respectively. dief@t and dief@k rely on the computation of the area under the curve of answer traces, and thus capturing the answer rate concentration over a time interval.
}
\details{
\tabular{ll}{
Package: \tab dief\cr
Type: \tab Package\cr
Version: \tab 1.2\cr
Date: \tab 2017-10-30\cr
License: \tab MIT\cr
}
}
\examples{
# This example uses the answer traces provided in the package.
# These traces record the answers produced by three approaches "Selective",
# "Not Adaptive", "Random" when executing the test "Q9.sparql"
data(traces)
# Plot answer traces for test "Q9.sparql"
plotAnswerTrace(traces, "Q9.sparql")
# Compute dief@t with t the time where the slowest approach produced the last answer.
dieft(traces, "Q9.sparql")
# Compute dief@t after 7.5 time units (seconds) of execution.
dieft(traces, "Q9.sparql", 7.5)
}
\author{
Maribel Acosta
Maintainer: Maribel Acosta <maribel.acosta@kit.edu>
}
\references{
Maribel Acosta, Maria-Esther Vidal, and York Sure-Vetter. "Diefficiency metrics: Measuring the continuous efficiency of query processing approaches." In International Semantic Web Conference, pp. 3-19. Springer, Cham, 2017.
}
\keyword{ package }
|
/man/dief.Rd
|
permissive
|
dachafra/dief
|
R
| false | false | 1,648 |
rd
|
\name{dief}
\alias{dief}
\docType{package}
\title{
Tools for Computing Diefficiency Metrics
}
\description{
An implementation of the metrics dief@t and dief@k to measure the diefficiency (or continuous efficiency) of incremental approaches, see Acosta, M., Vidal, M. E., & Sure-Vetter, Y. (2017) <doi:10.1007/978-3-319-68204-4_1>. The metrics dief@t and dief@k allow for measuring the diefficiency during an elapsed time period t or while k answers are produced, respectively. dief@t and dief@k rely on the computation of the area under the curve of answer traces, and thus capturing the answer rate concentration over a time interval.
}
\details{
\tabular{ll}{
Package: \tab dief\cr
Type: \tab Package\cr
Version: \tab 1.2\cr
Date: \tab 2017-10-30\cr
License: \tab MIT\cr
}
}
\examples{
# This example uses the answer traces provided in the package.
# These traces record the answers produced by three approaches "Selective",
# "Not Adaptive", "Random" when executing the test "Q9.sparql"
data(traces)
# Plot answer traces for test "Q9.sparql"
plotAnswerTrace(traces, "Q9.sparql")
# Compute dief@t with t the time where the slowest approach produced the last answer.
dieft(traces, "Q9.sparql")
# Compute dief@t after 7.5 time units (seconds) of execution.
dieft(traces, "Q9.sparql", 7.5)
}
\author{
Maribel Acosta
Maintainer: Maribel Acosta <maribel.acosta@kit.edu>
}
\references{
Maribel Acosta, Maria-Esther Vidal, and York Sure-Vetter. "Diefficiency metrics: Measuring the continuous efficiency of query processing approaches." In International Semantic Web Conference, pp. 3-19. Springer, Cham, 2017.
}
\keyword{ package }
|
\name{ToxLim-package}
\alias{ToxLim-package}
\alias{ToxLim}
\docType{package}
\title{
Bioaccumulation Modelling Using LIM
}
\description{
Incorporating Ecological Data and Associated Uncertainty in
Bioaccumulation Modeling
}
\details{
\tabular{ll}{
Package: \tab ToxLim\cr
Type: \tab Package\cr
Version: \tab 1.0\cr
Date: \tab 2010-04-24\cr
License: \tab GNU Public License 2 or above\cr
}
ToxLim predicts the internal concentrations of hydrophobic chemicals in
aquatic organisms.
}
\author{
Frederik de Laender (maintainer)
Karline Soetaert
}
\references{
De Laender, F., Van Oevelen, D., Middelburg, J.J. and Soetaert, K., 2009.
Incorporating Ecological Data and Associated Uncertainty in
Bioaccumulation Modeling: Methodology Development and Case Study.
Environ. Sci. Technol., 2009, 43 (7), 2620-2626.
}
\seealso{
\code{\link{LimOmega}}, the main function
\code{LIMlake}, \code{LIMlakeFish}, \code{LIMbarents}, the input food webs.
}
\examples{
\dontrun{
## show examples (see respective help pages for details)
example(LimOmega)
example(LIMlake)
example(LIMbarents)
}
}
\keyword{ package }
|
/man/ToxLim.Rd
|
no_license
|
cran/ToxLim
|
R
| false | false | 1,199 |
rd
|
\name{ToxLim-package}
\alias{ToxLim-package}
\alias{ToxLim}
\docType{package}
\title{
Bioaccumulation Modelling Using LIM
}
\description{
Incorporating Ecological Data and Associated Uncertainty in
Bioaccumulation Modeling
}
\details{
\tabular{ll}{
Package: \tab ToxLim\cr
Type: \tab Package\cr
Version: \tab 1.0\cr
Date: \tab 2010-04-24\cr
License: \tab GNU Public License 2 or above\cr
}
ToxLim predicts the internal concentrations of hydrophobic chemicals in
aquatic organisms.
}
\author{
Frederik de Laender (maintainer)
Karline Soetaert
}
\references{
De Laender, F., Van Oevelen, D., Middelburg, J.J. and Soetaert, K., 2009.
Incorporating Ecological Data and Associated Uncertainty in
Bioaccumulation Modeling: Methodology Development and Case Study.
Environ. Sci. Technol., 2009, 43 (7), 2620-2626.
}
\seealso{
\code{\link{LimOmega}}, the main function
\code{LIMlake}, \code{LIMlakeFish}, \code{LIMbarents}, the input food webs.
}
\examples{
\dontrun{
## show examples (see respective help pages for details)
example(LimOmega)
example(LIMlake)
example(LIMbarents)
}
}
\keyword{ package }
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tex_preview.R
\name{tex_preview}
\alias{tex_preview}
\alias{texPreview}
\title{Render and Preview snippets of TeX in R Viewer}
\usage{
tex_preview(obj, tex_lines = NULL, stem = NULL,
fileDir = tex_opts$get("fileDir"), overwrite = TRUE,
margin = tex_opts$get("margin"),
imgFormat = tex_opts$get("imgFormat"),
returnType = tex_opts$get("returnType"),
resizebox = tex_opts$get("resizebox"), usrPackages = NULL,
engine = tex_opts$get("engine"), cleanup = tex_opts$get("cleanup"),
keep_pdf = FALSE, tex_message = FALSE,
density = tex_opts$get("density"), svg_max = tex_opts$get("svg_max"),
print.xtable.opts = tex_opts$get("print.xtable.opts"),
opts.html = tex_opts$get("opts.html"), ...)
}
\arguments{
\item{obj}{character, TeX script}
\item{tex_lines}{vector of character, in case of special needs, instead of asking
texPreview to build up, you may choose to pass in the contents of the
complete LaTeX file directly. It should be a vector of character with each
element as a line of raw TeX code.}
\item{stem}{character, name to use in output files, Default: NULL}
\item{fileDir}{character, output destination. If NULL a temp.dir()
will be used and no output will be saved, Default: tex_opts$get('fileDir')}
\item{overwrite}{logical, controls if overwriting of output stem* files given their existences}
\item{margin}{table margin for pdflatex call, Default: tex_opts$get('margin')}
\item{imgFormat}{character, defines the type of image the PDF is
converted to Default: tex_opts$get('imgFormat')}
\item{returnType}{character, one of "viewer", "html", or "tex" determining appropriate
return type for the rendering process, Default: tex_opts$get('returnType')}
\item{resizebox}{logical, forces a tabular tex object to be constrained on the
margins of the document, Default: tex_opts$get('resizebox')}
\item{usrPackages}{character, vector of usepackage commands, see details for string format}
\item{engine}{character, specifies which latex to pdf engine to use
('pdflatex','xelatex','lualatex'), Default: tex_opts$get('engine')}
\item{cleanup}{character, vector of file extensions to clean up after building pdf,
Default: tex_opts$get('cleanup')}
\item{keep_pdf}{logical, controls if the rendered pdf file should be kept
or deleted, Default is FALSE}
\item{tex_message}{logical, controls if latex executing messages
are displayed in console. Default is FALSE}
\item{density}{numeric, controls the density of the image. Default is 150: tex_opts$get('density)}
\item{svg_max}{numeric, maximum svg file size allowable to preview, Default: tex_opts$get('svg_max')}
\item{print.xtable.opts}{list, contains arguments to pass to print.table,
relevant only if xtable is used as the input, Default: tex_opts$get('print.xtable.opts')}
\item{opts.html}{list, html options, Default: tex_opts$get('opts.html')}
\item{...}{passed to \code{\link[base]{system}}}
}
\description{
input TeX script into the function and it renders a pdf and converts it an image which is sent to Viewer.
}
\details{
The function assumes the system has pdflatex installed and it is defined in the PATH. The function does not return anything to R.
If fileDir is specified then two files are written to the directory. An image file of the name stem with the extension specified in imgFormat.
The default extension is png.The second file is the TeX script used to create the output of the name stem.tex. If you do not wish to view the
console output, pass the corresponding arguments to \code{...}, e.g., ignore.stdout=TRUE.
usrPackage accepts a vector of character strings built by the function \code{\link{build_usepackage}}, of the form
\\\\usepackage[option1,option2,...]\{package_name\}, see the TeX wikibook for more information \url{https://en.wikibooks.org/wiki/LaTeX/Document_Structure#Packages}.
}
\examples{
data('iris')
if(interactive()){
#use xtable to create tex output
tex_preview(obj = xtable::xtable(head(iris,10)))
#use knitr kable to create tex output
tex_preview(knitr::kable(mtcars, "latex"))
tex='\\\\begin{tabular}{llr}
\\\\hline
\\\\multicolumn{2}{c}{Item} \\\\\\\\
\\\\cline{1-2}
Animal & Description & Price (\\\\$) \\\\\\\\
\\\\hline
Gnat & per gram & 13.65 \\\\\\\\
& each & 0.01 \\\\\\\\
Gnu & stuffed & 92.50 \\\\\\\\
Emu & stuffed & 33.33 \\\\\\\\
Armadillo & frozen & 8.99 \\\\\\\\
\\\\hline
\\\\end{tabular}'
tex_preview(obj = tex,stem = 'eq',imgFormat = 'svg')
tikz_example <- system.file('examples/tikz/credit-rationing.tex',package = 'texPreview')
tikzEx=readLines(tikz_example,warn = FALSE)
#use tex_lines parameter to pass full document
tex_preview(tex_lines = tikzEx)
#use texPreview preamble to build document chunks
usetikz <- paste(tikzEx[14:23],collapse="\\n")
bodytikz <- paste(tikzEx[25:90],collapse="\\n")
tex_preview(obj = bodytikz,usrPackages = build_usepackage(pkg = 'tikz',uselibrary = usetikz))
}
}
|
/man/tex_preview.Rd
|
no_license
|
Inferrator/texPreview
|
R
| false | true | 5,022 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tex_preview.R
\name{tex_preview}
\alias{tex_preview}
\alias{texPreview}
\title{Render and Preview snippets of TeX in R Viewer}
\usage{
tex_preview(obj, tex_lines = NULL, stem = NULL,
fileDir = tex_opts$get("fileDir"), overwrite = TRUE,
margin = tex_opts$get("margin"),
imgFormat = tex_opts$get("imgFormat"),
returnType = tex_opts$get("returnType"),
resizebox = tex_opts$get("resizebox"), usrPackages = NULL,
engine = tex_opts$get("engine"), cleanup = tex_opts$get("cleanup"),
keep_pdf = FALSE, tex_message = FALSE,
density = tex_opts$get("density"), svg_max = tex_opts$get("svg_max"),
print.xtable.opts = tex_opts$get("print.xtable.opts"),
opts.html = tex_opts$get("opts.html"), ...)
}
\arguments{
\item{obj}{character, TeX script}
\item{tex_lines}{vector of character, in case of special needs, instead of asking
texPreview to build up, you may choose to pass in the contents of the
complete LaTeX file directly. It should be a vector of character with each
element as a line of raw TeX code.}
\item{stem}{character, name to use in output files, Default: NULL}
\item{fileDir}{character, output destination. If NULL a temp.dir()
will be used and no output will be saved, Default: tex_opts$get('fileDir')}
\item{overwrite}{logical, controls if overwriting of output stem* files given their existences}
\item{margin}{table margin for pdflatex call, Default: tex_opts$get('margin')}
\item{imgFormat}{character, defines the type of image the PDF is
converted to Default: tex_opts$get('imgFormat')}
\item{returnType}{character, one of "viewer", "html", or "tex" determining appropriate
return type for the rendering process, Default: tex_opts$get('returnType')}
\item{resizebox}{logical, forces a tabular tex object to be constrained on the
margins of the document, Default: tex_opts$get('resizebox')}
\item{usrPackages}{character, vector of usepackage commands, see details for string format}
\item{engine}{character, specifies which latex to pdf engine to use
('pdflatex','xelatex','lualatex'), Default: tex_opts$get('engine')}
\item{cleanup}{character, vector of file extensions to clean up after building pdf,
Default: tex_opts$get('cleanup')}
\item{keep_pdf}{logical, controls if the rendered pdf file should be kept
or deleted, Default is FALSE}
\item{tex_message}{logical, controls if latex executing messages
are displayed in console. Default is FALSE}
\item{density}{numeric, controls the density of the image. Default is 150: tex_opts$get('density)}
\item{svg_max}{numeric, maximum svg file size allowable to preview, Default: tex_opts$get('svg_max')}
\item{print.xtable.opts}{list, contains arguments to pass to print.table,
relevant only if xtable is used as the input, Default: tex_opts$get('print.xtable.opts')}
\item{opts.html}{list, html options, Default: tex_opts$get('opts.html')}
\item{...}{passed to \code{\link[base]{system}}}
}
\description{
input TeX script into the function and it renders a pdf and converts it an image which is sent to Viewer.
}
\details{
The function assumes the system has pdflatex installed and it is defined in the PATH. The function does not return anything to R.
If fileDir is specified then two files are written to the directory. An image file of the name stem with the extension specified in imgFormat.
The default extension is png.The second file is the TeX script used to create the output of the name stem.tex. If you do not wish to view the
console output, pass the corresponding arguments to \code{...}, e.g., ignore.stdout=TRUE.
usrPackage accepts a vector of character strings built by the function \code{\link{build_usepackage}}, of the form
\\\\usepackage[option1,option2,...]\{package_name\}, see the TeX wikibook for more information \url{https://en.wikibooks.org/wiki/LaTeX/Document_Structure#Packages}.
}
\examples{
data('iris')
if(interactive()){
#use xtable to create tex output
tex_preview(obj = xtable::xtable(head(iris,10)))
#use knitr kable to create tex output
tex_preview(knitr::kable(mtcars, "latex"))
tex='\\\\begin{tabular}{llr}
\\\\hline
\\\\multicolumn{2}{c}{Item} \\\\\\\\
\\\\cline{1-2}
Animal & Description & Price (\\\\$) \\\\\\\\
\\\\hline
Gnat & per gram & 13.65 \\\\\\\\
& each & 0.01 \\\\\\\\
Gnu & stuffed & 92.50 \\\\\\\\
Emu & stuffed & 33.33 \\\\\\\\
Armadillo & frozen & 8.99 \\\\\\\\
\\\\hline
\\\\end{tabular}'
tex_preview(obj = tex,stem = 'eq',imgFormat = 'svg')
tikz_example <- system.file('examples/tikz/credit-rationing.tex',package = 'texPreview')
tikzEx=readLines(tikz_example,warn = FALSE)
#use tex_lines parameter to pass full document
tex_preview(tex_lines = tikzEx)
#use texPreview preamble to build document chunks
usetikz <- paste(tikzEx[14:23],collapse="\\n")
bodytikz <- paste(tikzEx[25:90],collapse="\\n")
tex_preview(obj = bodytikz,usrPackages = build_usepackage(pkg = 'tikz',uselibrary = usetikz))
}
}
|
library(plyr)
library(tidyverse)
library(arrow)
library(ggplot2)
cell_meta <- arrow::read_parquet("intermediate_data/cell_meta.parquet") %>%
dplyr::transmute(
condition = dplyr::case_when(
Condition == "Treatment" ~ `Product Name`,
Condition == "PC" ~ "Positive Control",
Condition == "NC" ~ "Negative Control"),
dose_uM = Concentration,
log_dose_uM = log10(Concentration),
plate_id = Metadata_PlateID,
row = Metadata_WellID %>%
stringr::str_extract("^[A-Z]") %>%
purrr::map_int(~which(LETTERS==., arr.ind=T)),
column = Metadata_WellID %>%
stringr::str_extract("[0-9]+$") %>%
as.integer(),
is_control = Condition %in% c("PC", "NC"))
cluster_labels <- arrow::read_parquet("intermediate_data/full_normed_embedding_pca200_umap2_spectral_30_0.0_euclid/hdbscan_clustering_min100.parquet")
cell_clusters <- dplyr::bind_cols(cell_meta, cluster_labels)
# cluster sizes
cell_clusters %>%
dplyr::count(cluster_label, sort=T) %>%
data.frame
cell_clusters %>%
dplyr::count(condition, concentration, cluster_label) %>%
tidyr::pivot_wider(names_from=cluster_label, values_from=n)
treatment_by_cluster <- cell_clusters %>%
dplyr::count(is_control, condition, log_dose_uM, cluster_label)
plot <- ggplot2::ggplot(
data=treatment_by_cluster %>%
dplyr::arrange(condition) %>%
dplyr::mutate(
y = paste0(condition, " | ", signif(log_dose_uM, 3)),
x = as.character(cluster_label)))+
ggplot2::theme_bw() +
ggplot2::geom_raster(
mapping=ggplot2::aes(
x=x,
y=y,
fill=log(n+1))) +
ggplot2::ggtitle(
label="Treatment by HDBSCAN CLusters",
subtitle="113 Steatosis2020") +
ggplot2::scale_fill_continuous("Cell Count") +
ggplot2::scale_y_discrete("Treatment | Concentration uM") +
ggplot2::scale_x_discrete("HDBSCAN Cluster ID")
ggplot2::ggsave(
filename="product/figures/full_normed_embedding_pca200_umap2_spectral_30_0.0_euclid_hdbscan_clustering_min100.pdf",
plot=plot,
width=15,
height=15)
# treatment by cluster count matrix
plot <- ggplot2::ggplot(
data=treatment_by_cluster %>%
dplyr::filter(is_control) %>%
dplyr::arrange(condition) %>%
dplyr::mutate(
y = paste0(condition, " | ", signif(log_dose_uM, 3)),
x = as.character(cluster_label)))+
ggplot2::theme_bw() +
ggplot2::geom_raster(
mapping=ggplot2::aes(
x=x,
y=y,
fill=log(n+1))) +
ggplot2::ggtitle(
label="Treatment by HDBSCAN CLusters",
subtitle="113 Steatosis2020") +
ggplot2::scale_fill_continuous("Cell Count") +
ggplot2::scale_y_discrete("Treatment | Concentration uM") +
ggplot2::scale_x_discrete("HDBSCAN Cluster ID")
ggplot2::ggsave(
filename="product/figures/full_normed_embedding_pca200_umap2_spectral_30_0.0_euclid_hdbscan_clustering_min100_control.pdf",
plot=plot,
width=15,
height=15)
1 0.01990000
2 0.01990010
3 0.03900400
4 0.03900420
5 0.07800799
6 0.07800838
7 0.15919996
8 0.15920075
9 0.31839979
10 0.31840138
11 0.62087911
12 0.62088220
13 1.25767623
14 1.25768249
15 2.49944000
16 2.49945275
17 4.99888000
18 4.99890549
19 9.99776000
20 9.99776025
1 0.01990000
2 0.01990010
3 0.03900400
4 0.03900420
5 0.07800799
6 0.07800838
7 0.15919996
8 0.15920075
9 0.31839979
10 0.31840138
11 0.62087911
12 0.62088220
13 1.25767623
14 1.25768249
15 2.49944000
16 2.49945275
17 4.99888000
18 4.99890549
19 9.99776000
20 9.99776025
|
/vignettes/Steatosis2020/DR5/scripts/4.2_analyze_clusters.R
|
permissive
|
MedChemMcCarty/MPLearn
|
R
| false | false | 3,467 |
r
|
library(plyr)
library(tidyverse)
library(arrow)
library(ggplot2)
cell_meta <- arrow::read_parquet("intermediate_data/cell_meta.parquet") %>%
dplyr::transmute(
condition = dplyr::case_when(
Condition == "Treatment" ~ `Product Name`,
Condition == "PC" ~ "Positive Control",
Condition == "NC" ~ "Negative Control"),
dose_uM = Concentration,
log_dose_uM = log10(Concentration),
plate_id = Metadata_PlateID,
row = Metadata_WellID %>%
stringr::str_extract("^[A-Z]") %>%
purrr::map_int(~which(LETTERS==., arr.ind=T)),
column = Metadata_WellID %>%
stringr::str_extract("[0-9]+$") %>%
as.integer(),
is_control = Condition %in% c("PC", "NC"))
cluster_labels <- arrow::read_parquet("intermediate_data/full_normed_embedding_pca200_umap2_spectral_30_0.0_euclid/hdbscan_clustering_min100.parquet")
cell_clusters <- dplyr::bind_cols(cell_meta, cluster_labels)
# cluster sizes
cell_clusters %>%
dplyr::count(cluster_label, sort=T) %>%
data.frame
cell_clusters %>%
dplyr::count(condition, concentration, cluster_label) %>%
tidyr::pivot_wider(names_from=cluster_label, values_from=n)
treatment_by_cluster <- cell_clusters %>%
dplyr::count(is_control, condition, log_dose_uM, cluster_label)
plot <- ggplot2::ggplot(
data=treatment_by_cluster %>%
dplyr::arrange(condition) %>%
dplyr::mutate(
y = paste0(condition, " | ", signif(log_dose_uM, 3)),
x = as.character(cluster_label)))+
ggplot2::theme_bw() +
ggplot2::geom_raster(
mapping=ggplot2::aes(
x=x,
y=y,
fill=log(n+1))) +
ggplot2::ggtitle(
label="Treatment by HDBSCAN CLusters",
subtitle="113 Steatosis2020") +
ggplot2::scale_fill_continuous("Cell Count") +
ggplot2::scale_y_discrete("Treatment | Concentration uM") +
ggplot2::scale_x_discrete("HDBSCAN Cluster ID")
ggplot2::ggsave(
filename="product/figures/full_normed_embedding_pca200_umap2_spectral_30_0.0_euclid_hdbscan_clustering_min100.pdf",
plot=plot,
width=15,
height=15)
# treatment by cluster count matrix
plot <- ggplot2::ggplot(
data=treatment_by_cluster %>%
dplyr::filter(is_control) %>%
dplyr::arrange(condition) %>%
dplyr::mutate(
y = paste0(condition, " | ", signif(log_dose_uM, 3)),
x = as.character(cluster_label)))+
ggplot2::theme_bw() +
ggplot2::geom_raster(
mapping=ggplot2::aes(
x=x,
y=y,
fill=log(n+1))) +
ggplot2::ggtitle(
label="Treatment by HDBSCAN CLusters",
subtitle="113 Steatosis2020") +
ggplot2::scale_fill_continuous("Cell Count") +
ggplot2::scale_y_discrete("Treatment | Concentration uM") +
ggplot2::scale_x_discrete("HDBSCAN Cluster ID")
ggplot2::ggsave(
filename="product/figures/full_normed_embedding_pca200_umap2_spectral_30_0.0_euclid_hdbscan_clustering_min100_control.pdf",
plot=plot,
width=15,
height=15)
1 0.01990000
2 0.01990010
3 0.03900400
4 0.03900420
5 0.07800799
6 0.07800838
7 0.15919996
8 0.15920075
9 0.31839979
10 0.31840138
11 0.62087911
12 0.62088220
13 1.25767623
14 1.25768249
15 2.49944000
16 2.49945275
17 4.99888000
18 4.99890549
19 9.99776000
20 9.99776025
1 0.01990000
2 0.01990010
3 0.03900400
4 0.03900420
5 0.07800799
6 0.07800838
7 0.15919996
8 0.15920075
9 0.31839979
10 0.31840138
11 0.62087911
12 0.62088220
13 1.25767623
14 1.25768249
15 2.49944000
16 2.49945275
17 4.99888000
18 4.99890549
19 9.99776000
20 9.99776025
|
library(pheatmap)
library(RColorBrewer)
load('environment/accuracy_feature/vs_nor_comb.RData')
load('environment/accuracy_feature/tumor_ind_vs.RData')
load('environment/stages.level.comb.RData')
load('environment/sample_info_tumor_rep_normal.RData')
load('environment/accuracy_feature/updated/net_features_trial.RData')
load('environment/accuracy_feature/updated/net_features_updated.RData')
source('main/updated/initialisation.R')
stage.com.norm <- c()
for(stage in sample.info.all.rep$stage.type)
{
if(stage == 'N')
stage.com.norm <- c(stage.com.norm, 'N')
else if(stage == 'stage i' | stage == 'stage ii')
stage.com.norm <- c(stage.com.norm, 'early')
else
stage.com.norm <- c(stage.com.norm, 'late')
}
sample.info.all.rep$stage.comb.norm <- as.factor(stage.com.norm)
col <- colorRampPalette(rev(brewer.pal(9, 'RdYlBu')))(100)
vst_tumor_tum <- vs_normal_comb_reported[tumor.ind.vs, ]
genes.clus <- net.features.trial$
data.clus <- vst_tumor_tum[ , genes.clus]
breaks = c(seq(5,7, length.out = 40), seq(7.1,24, length.out = 60))
clus_rows = run_hclust_on_a_matrix(data.clus)
clus_cols = run_hclust_on_a_matrix(t(data.clus))
pheatmap(t(data.clus),
annotation_col = create.ordered.annotation(stages.levels.comb,
rownames(data.clus)),
# cluster_rows = clus_rows,
# cluster_cols = clus_cols,
breaks = breaks,
# main = 'Heatmap using intersection for atleast 2 and 1 fold',
show_rownames = F, show_colnames = F)
##On originial data
genes.orig <- net.features.updated$
data.orig <- vst_tumor_tum[test.trial.ind,genes.orig]
pheatmap(t(data.orig),
annotation_col = create.ordered.annotation(stages.levels.comb[test.trial.ind],
rownames(data.orig)),
# cluster_rows = clus_rows,
# cluster_cols = clus_cols,
breaks = breaks,
# main = 'Heatmap using intersection for atleast 2 and 1 fold',
show_rownames = F, show_colnames = F)
plotPCA()
loa
|
/papillary/main/updated/clustering.R
|
no_license
|
xulijunji/Stage-Prediction-of-Cancer
|
R
| false | false | 2,129 |
r
|
library(pheatmap)
library(RColorBrewer)
load('environment/accuracy_feature/vs_nor_comb.RData')
load('environment/accuracy_feature/tumor_ind_vs.RData')
load('environment/stages.level.comb.RData')
load('environment/sample_info_tumor_rep_normal.RData')
load('environment/accuracy_feature/updated/net_features_trial.RData')
load('environment/accuracy_feature/updated/net_features_updated.RData')
source('main/updated/initialisation.R')
stage.com.norm <- c()
for(stage in sample.info.all.rep$stage.type)
{
if(stage == 'N')
stage.com.norm <- c(stage.com.norm, 'N')
else if(stage == 'stage i' | stage == 'stage ii')
stage.com.norm <- c(stage.com.norm, 'early')
else
stage.com.norm <- c(stage.com.norm, 'late')
}
sample.info.all.rep$stage.comb.norm <- as.factor(stage.com.norm)
col <- colorRampPalette(rev(brewer.pal(9, 'RdYlBu')))(100)
vst_tumor_tum <- vs_normal_comb_reported[tumor.ind.vs, ]
genes.clus <- net.features.trial$
data.clus <- vst_tumor_tum[ , genes.clus]
breaks = c(seq(5,7, length.out = 40), seq(7.1,24, length.out = 60))
clus_rows = run_hclust_on_a_matrix(data.clus)
clus_cols = run_hclust_on_a_matrix(t(data.clus))
pheatmap(t(data.clus),
annotation_col = create.ordered.annotation(stages.levels.comb,
rownames(data.clus)),
# cluster_rows = clus_rows,
# cluster_cols = clus_cols,
breaks = breaks,
# main = 'Heatmap using intersection for atleast 2 and 1 fold',
show_rownames = F, show_colnames = F)
##On originial data
genes.orig <- net.features.updated$
data.orig <- vst_tumor_tum[test.trial.ind,genes.orig]
pheatmap(t(data.orig),
annotation_col = create.ordered.annotation(stages.levels.comb[test.trial.ind],
rownames(data.orig)),
# cluster_rows = clus_rows,
# cluster_cols = clus_cols,
breaks = breaks,
# main = 'Heatmap using intersection for atleast 2 and 1 fold',
show_rownames = F, show_colnames = F)
plotPCA()
loa
|
# multimodal functions
rastrigin=function(z){
y=0
for(i in 1:length(z)){
y=y+(5.12/100*z[i])^2-10*cos(2*pi*(z[i]*5.12/100))
}
y=y+20
return(y)
}
rosenbrock=function(z){
y=0
for(i in 1:(length(z)-1)){
y=y+100*(2.048/100*z[i+1]-(2.048/100*z[i])^2)^2+100*(1-2.048/100*z[i])^2
}
return(y)
}
|
/JoApp/multimodal.R
|
no_license
|
Jorub/Optim_test
|
R
| false | false | 303 |
r
|
# multimodal functions
rastrigin=function(z){
y=0
for(i in 1:length(z)){
y=y+(5.12/100*z[i])^2-10*cos(2*pi*(z[i]*5.12/100))
}
y=y+20
return(y)
}
rosenbrock=function(z){
y=0
for(i in 1:(length(z)-1)){
y=y+100*(2.048/100*z[i+1]-(2.048/100*z[i])^2)^2+100*(1-2.048/100*z[i])^2
}
return(y)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/HelperFunctions.R
\name{pipelineCluster}
\alias{pipelineCluster}
\title{set up cluster for pipeline}
\usage{
pipelineCluster(maxCores = NULL, reset = FALSE, outfile = NULL)
}
\arguments{
\item{maxCores}{what is max cores, if Null set to half of available cores}
\item{reset}{reset cluster, use only if already existing}
\item{outfile}{log outputs}
}
\description{
Cluster object saved as cl
}
|
/RCode1/ORFikPipeline/man/pipelineCluster.Rd
|
permissive
|
Roleren/UORFome
|
R
| false | true | 473 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/HelperFunctions.R
\name{pipelineCluster}
\alias{pipelineCluster}
\title{set up cluster for pipeline}
\usage{
pipelineCluster(maxCores = NULL, reset = FALSE, outfile = NULL)
}
\arguments{
\item{maxCores}{what is max cores, if Null set to half of available cores}
\item{reset}{reset cluster, use only if already existing}
\item{outfile}{log outputs}
}
\description{
Cluster object saved as cl
}
|
context("Cleaning LOCATION_NAME")
df <- quakeR::quakes_df %>%
quakeR::eq_location_clean()
test_that("Location is cleaned", {
expect_equal(sum(stringr::str_detect(df$LOCATION_NAME, "China"), na.rm = T)<30, T)
expect_equal(sum(stringr::str_count(purrr::map_chr(df$LOCATION_NAME, ~stringr::str_sub(., 3, 3)), "A"), na.rm = T)<10, T)
})
|
/tests/testthat/test_eq_location_clean.R
|
permissive
|
zumthor86/quakeR
|
R
| false | false | 346 |
r
|
context("Cleaning LOCATION_NAME")
df <- quakeR::quakes_df %>%
quakeR::eq_location_clean()
test_that("Location is cleaned", {
expect_equal(sum(stringr::str_detect(df$LOCATION_NAME, "China"), na.rm = T)<30, T)
expect_equal(sum(stringr::str_count(purrr::map_chr(df$LOCATION_NAME, ~stringr::str_sub(., 3, 3)), "A"), na.rm = T)<10, T)
})
|
load("/data/zhangh24/breast_cancer_data_analysis/whole_genome_age/ICOG/ERPRHER2GRADE_fixed_baseline/result/icog_result_shared_1p.Rdata")
load("/data/zhangh24/breast_cancer_data_analysis/whole_genome_age/ONCO/ERPRHER2GRADE_fixed_baseline/result/onco_result_shared_1p.Rdata")
load("/data/zhangh24/breast_cancer_data_analysis/whole_genome_age/ICOG/ERPRHER2GRADE_fixed_baseline/result/icog_result_only_shared_1p.Rdata")
load("/data/zhangh24/breast_cancer_data_analysis/whole_genome_age/ONCO/ERPRHER2GRADE_fixed_baseline/result/onco_result_only_shared_1p.Rdata")
#takes the basic SNPs information: first 8 columns and the last column;
meta_result_shared_1p <- icog_result_shared_1p[,c(1:8,ncol(icog_result_shared_1p))]
meta_result_shared_1p_icog_only <- icog_result_only_shared_1p[,c(1:8,ncol(icog_result_shared_1p))]
meta_result_shared_1p_onco_only <- onco_result_only_shared_1p[,c(1:8,ncol(icog_result_shared_1p))]
#combine them together
meta_result_shared_1p_no_pvalue <- rbind(meta_result_shared_1p,meta_result_shared_1p_icog_only,meta_result_shared_1p_onco_only)
#save(meta_result_shared_1p_no_pvalue,file="/data/zhangh24/breast_cancer_data_analysis/whole_genome_age/ICOG/ERPRHER2GRADE_fixed_baseline/result/meta_result_shared_1p_no_pvalue.Rdata")
#load the p-value results from the 1000 subjobs
n <- nrow( meta_result_shared_1p_no_pvalue)
p.value <- rep(0,n)
total <- 0
for(i1 in 1:1000){
load(paste0("/data/zhangh24/breast_cancer_data_analysis/whole_genome_age/ICOG/ERPRHER2GRADE_fixed_baseline/result/p_value_sub",i1,".Rdata"))
temp <- length(pvalue_sub)
p.value[total+(1:temp)] <- pvalue_sub
total <- total + temp
}
#combine the SNPs information with the p-value
meta_result_shared_1p <- cbind(meta_result_shared_1p_no_pvalue,p.value)
#save the results
save(meta_result_shared_1p,file=paste0("/data/zhangh24/breast_cancer_data_analysis/whole_genome_age/ICOG/ERPRHER2GRADE_fixed_baseline/result/meta_result_shared_1p.Rdata"))
# fine_mapping <- read.csv("/data/zhangh24/breast_cancer_data_analysis/data/fine_mapping_regions.csv",header= T)
#
#
#
#
#
# idx_cut <- NULL
# start <- fine_mapping$start
# end <- fine_mapping$end
# CHR <- fine_mapping$V3
#
#
# for(i in 1:nrow(fine_mapping)){
# print(i)
# chr_temp <- CHR[i]
# start_temp <- start[i]
# end_temp <- end[i]
# idx <- which(meta_result_shared_1p$CHR==chr_temp&meta_result_shared_1p$position>=start_temp&
# meta_result_shared_1p$position<=end_temp)
# idx_cut <- c(idx_cut,idx)
# }
# ############duplicate variables won't mater
# idx_cut <- unique(idx_cut)
# meta_result_shared_1p_filter <- meta_result_shared_1p[-idx_cut,]
#
# save(meta_result_shared_1p_filter,file="/data/zhangh24/breast_cancer_data_analysis/whole_genome_age/ICOG/ERPRHER2GRADE_fixed_baseline/result/meta_result_shared_1p_filter_1M.Rdata")
#
#
# new_filter <- read.csv("/data/zhangh24/breast_cancer_data_analysis/whole_genome/ICOG/ERPRHER2_fixed/result/Filter_based_on_Montse.csv",header=T,stringsAsFactors = F)
# new_filter <- new_filter[1:22,]
# new_filter[,2] <- as.numeric(gsub(",","",new_filter[,2]))
#
# idx_cut <- NULL
#
# position.cut <- 500*10^3
#
# for(i in 1:nrow(new_filter)){
# print(i)
# chr_temp <- new_filter[i,3]
# position_temp <- new_filter[i,2]
# position_low <- position_temp-position.cut
# position_high <- position_temp+position.cut
# idx <- which(meta_result_shared_1p_filter$CHR==chr_temp&meta_result_shared_1p_filter$position>position_low&
# meta_result_shared_1p_filter$position<position_high)
# idx_cut <- c(idx_cut,idx)
# }
# idx_cut <- unique(idx_cut)
# meta_result_shared_1p_filter_Ju <- meta_result_shared_1p_filter[-idx_cut,]
#
# save(meta_result_shared_1p_filter_Ju,file="/data/zhangh24/breast_cancer_data_analysis/whole_genome_age/ICOG/ERPRHER2GRADE_fixed_baseline/result/meta_result_shared_1p_filter_1M_Ju.Rdata")
#
#
#
#
#
# # idx <- which.min(meta_result_shared_1p_filter_Ju$p.value)
# #
# # meta_result_shared_1p_filter_Ju[idx,]
#
#
# # load("/data/zhangh24/breast_cancer_data_analysis/whole_genome_age/ICOG/ERPRHER2GRADE_fixed_baseline/result/meta_result_shared_1p_filter_1M_Ju.Rdata")
# #
# meta_result_shared_1p_filter_Ju_fix <- meta_result_shared_1p_filter_Ju
#
# idx <- which(meta_result_shared_1p_filter_Ju_fix$CHR==2&meta_result_shared_1p_filter_Ju_fix$position== 67902524)
# meta_result_shared_1p_filter_Ju_fix[idx,]
# idx <- which(meta_result_shared_1p_filter_Ju_fix$CHR==11&meta_result_shared_1p_filter_Ju_fix$position== 120233626)
# meta_result_shared_1p_filter_Ju_fix[idx,]
# idx <- which(meta_result_shared_1p_filter_Ju_fix$CHR==18&meta_result_shared_1p_filter_Ju_fix$position== 10354649)
# meta_result_shared_1p_filter_Ju_fix[idx,]
#
#
# load(paste0("/data/zhangh24/breast_cancer_data_analysis/whole_genome_age/ICOG/ERPRHER2GRADE_fixed_baseline/result/meta_result_shared_1p.Rdata"))
# meta_result_shared_1p <- meta_result_shared_1p[,c(2,11,3,15)]
# write.table(meta_result_shared_1p,file=paste0("/data/zhangh24/breast_cancer_data_analysis/whole_genome_age/ICOG/ERPRHER2GRADE_fixed_baseline/result/meta_result_shared_1p_fixed.txt"),row.names = F,
# quote=F)
#
#
#
#
#
#
#
#
#
|
/whole_genome_age/code/ICOG/7_meta_merge.R
|
no_license
|
andrewhaoyu/breast_cancer_data_analysis
|
R
| false | false | 5,210 |
r
|
load("/data/zhangh24/breast_cancer_data_analysis/whole_genome_age/ICOG/ERPRHER2GRADE_fixed_baseline/result/icog_result_shared_1p.Rdata")
load("/data/zhangh24/breast_cancer_data_analysis/whole_genome_age/ONCO/ERPRHER2GRADE_fixed_baseline/result/onco_result_shared_1p.Rdata")
load("/data/zhangh24/breast_cancer_data_analysis/whole_genome_age/ICOG/ERPRHER2GRADE_fixed_baseline/result/icog_result_only_shared_1p.Rdata")
load("/data/zhangh24/breast_cancer_data_analysis/whole_genome_age/ONCO/ERPRHER2GRADE_fixed_baseline/result/onco_result_only_shared_1p.Rdata")
#takes the basic SNPs information: first 8 columns and the last column;
meta_result_shared_1p <- icog_result_shared_1p[,c(1:8,ncol(icog_result_shared_1p))]
meta_result_shared_1p_icog_only <- icog_result_only_shared_1p[,c(1:8,ncol(icog_result_shared_1p))]
meta_result_shared_1p_onco_only <- onco_result_only_shared_1p[,c(1:8,ncol(icog_result_shared_1p))]
#combine them together
meta_result_shared_1p_no_pvalue <- rbind(meta_result_shared_1p,meta_result_shared_1p_icog_only,meta_result_shared_1p_onco_only)
#save(meta_result_shared_1p_no_pvalue,file="/data/zhangh24/breast_cancer_data_analysis/whole_genome_age/ICOG/ERPRHER2GRADE_fixed_baseline/result/meta_result_shared_1p_no_pvalue.Rdata")
#load the p-value results from the 1000 subjobs
n <- nrow( meta_result_shared_1p_no_pvalue)
p.value <- rep(0,n)
total <- 0
for(i1 in 1:1000){
load(paste0("/data/zhangh24/breast_cancer_data_analysis/whole_genome_age/ICOG/ERPRHER2GRADE_fixed_baseline/result/p_value_sub",i1,".Rdata"))
temp <- length(pvalue_sub)
p.value[total+(1:temp)] <- pvalue_sub
total <- total + temp
}
#combine the SNPs information with the p-value
meta_result_shared_1p <- cbind(meta_result_shared_1p_no_pvalue,p.value)
#save the results
save(meta_result_shared_1p,file=paste0("/data/zhangh24/breast_cancer_data_analysis/whole_genome_age/ICOG/ERPRHER2GRADE_fixed_baseline/result/meta_result_shared_1p.Rdata"))
# fine_mapping <- read.csv("/data/zhangh24/breast_cancer_data_analysis/data/fine_mapping_regions.csv",header= T)
#
#
#
#
#
# idx_cut <- NULL
# start <- fine_mapping$start
# end <- fine_mapping$end
# CHR <- fine_mapping$V3
#
#
# for(i in 1:nrow(fine_mapping)){
# print(i)
# chr_temp <- CHR[i]
# start_temp <- start[i]
# end_temp <- end[i]
# idx <- which(meta_result_shared_1p$CHR==chr_temp&meta_result_shared_1p$position>=start_temp&
# meta_result_shared_1p$position<=end_temp)
# idx_cut <- c(idx_cut,idx)
# }
# ############duplicate variables won't mater
# idx_cut <- unique(idx_cut)
# meta_result_shared_1p_filter <- meta_result_shared_1p[-idx_cut,]
#
# save(meta_result_shared_1p_filter,file="/data/zhangh24/breast_cancer_data_analysis/whole_genome_age/ICOG/ERPRHER2GRADE_fixed_baseline/result/meta_result_shared_1p_filter_1M.Rdata")
#
#
# new_filter <- read.csv("/data/zhangh24/breast_cancer_data_analysis/whole_genome/ICOG/ERPRHER2_fixed/result/Filter_based_on_Montse.csv",header=T,stringsAsFactors = F)
# new_filter <- new_filter[1:22,]
# new_filter[,2] <- as.numeric(gsub(",","",new_filter[,2]))
#
# idx_cut <- NULL
#
# position.cut <- 500*10^3
#
# for(i in 1:nrow(new_filter)){
# print(i)
# chr_temp <- new_filter[i,3]
# position_temp <- new_filter[i,2]
# position_low <- position_temp-position.cut
# position_high <- position_temp+position.cut
# idx <- which(meta_result_shared_1p_filter$CHR==chr_temp&meta_result_shared_1p_filter$position>position_low&
# meta_result_shared_1p_filter$position<position_high)
# idx_cut <- c(idx_cut,idx)
# }
# idx_cut <- unique(idx_cut)
# meta_result_shared_1p_filter_Ju <- meta_result_shared_1p_filter[-idx_cut,]
#
# save(meta_result_shared_1p_filter_Ju,file="/data/zhangh24/breast_cancer_data_analysis/whole_genome_age/ICOG/ERPRHER2GRADE_fixed_baseline/result/meta_result_shared_1p_filter_1M_Ju.Rdata")
#
#
#
#
#
# # idx <- which.min(meta_result_shared_1p_filter_Ju$p.value)
# #
# # meta_result_shared_1p_filter_Ju[idx,]
#
#
# # load("/data/zhangh24/breast_cancer_data_analysis/whole_genome_age/ICOG/ERPRHER2GRADE_fixed_baseline/result/meta_result_shared_1p_filter_1M_Ju.Rdata")
# #
# meta_result_shared_1p_filter_Ju_fix <- meta_result_shared_1p_filter_Ju
#
# idx <- which(meta_result_shared_1p_filter_Ju_fix$CHR==2&meta_result_shared_1p_filter_Ju_fix$position== 67902524)
# meta_result_shared_1p_filter_Ju_fix[idx,]
# idx <- which(meta_result_shared_1p_filter_Ju_fix$CHR==11&meta_result_shared_1p_filter_Ju_fix$position== 120233626)
# meta_result_shared_1p_filter_Ju_fix[idx,]
# idx <- which(meta_result_shared_1p_filter_Ju_fix$CHR==18&meta_result_shared_1p_filter_Ju_fix$position== 10354649)
# meta_result_shared_1p_filter_Ju_fix[idx,]
#
#
# load(paste0("/data/zhangh24/breast_cancer_data_analysis/whole_genome_age/ICOG/ERPRHER2GRADE_fixed_baseline/result/meta_result_shared_1p.Rdata"))
# meta_result_shared_1p <- meta_result_shared_1p[,c(2,11,3,15)]
# write.table(meta_result_shared_1p,file=paste0("/data/zhangh24/breast_cancer_data_analysis/whole_genome_age/ICOG/ERPRHER2GRADE_fixed_baseline/result/meta_result_shared_1p_fixed.txt"),row.names = F,
# quote=F)
#
#
#
#
#
#
#
#
#
|
library(fgac)
### Name: ivphiBB3
### Title: ivphiBB3
### Aliases: ivphiBB3
### Keywords: symbolmath
### ** Examples
#a<-phiBB3(0.2,4,c(0.2,0.3,0.4))
#b<-ivphiBB3(0.2,4,c(a[6],a[7],a[8]))
|
/data/genthat_extracted_code/fgac/examples/ivphiBB3.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false | false | 194 |
r
|
library(fgac)
### Name: ivphiBB3
### Title: ivphiBB3
### Aliases: ivphiBB3
### Keywords: symbolmath
### ** Examples
#a<-phiBB3(0.2,4,c(0.2,0.3,0.4))
#b<-ivphiBB3(0.2,4,c(a[6],a[7],a[8]))
|
## Subsets and prepares data for text analysis using tm package.
## Creates general and party-specific DTMs of all words.
## Conducts descriptive analysis to find correlations among terms
## to inform "neoliberal elitism" dictionary.
## Creates general and party-specific DTMs of neoliberal elitist
## rhetoric.
# Install and load packages -- currently using
require(tm) # Framework for text mining
require(dplyr)
# Install and load packages -- may use in future analysis
require(ggplot2) # for plotting word frequencies
require(RTextTools) # a machine learning package for text classification written in R
require(qdap) # Quantiative discourse analysis of transcripts
require(qdapDictionaries)
require(SnowballC) # for stemming
library(data.table)
library(wordcloud2) # for wordclouds
library(RColorBrewer) # for color palettes
# Subset speeches.df from 01_import_and_clean for text analysis,
# remove debate observations (could break apart into Dem and Rep
# segments later on, if wanted to use them)
table(speeches.df$Genre)
sub.speeches.df <- filter(speeches.df, Genre=="SPEECH"|Genre =="AD")
dem.sub.speeches.df <- filter(sub.speeches.df, Party=="Democrat")
rep.sub.speeches.df <- filter(sub.speeches.df, Party=="Republican")
# Create overall corpus, and Dem And Repub corpuses
sub.speech.corp <- Corpus(VectorSource(sub.speeches.df$Text))
dem.sub.speech.corp <- Corpus(VectorSource(dem.sub.speeches.df$Text))
rep.sub.speech.corp <- Corpus(VectorSource(rep.sub.speeches.df$Text))
# Create overall document term matrix
sub.speech.dtm <- DocumentTermMatrix(sub.speech.corp,
control = list(tolower = TRUE,
removePunctuation = TRUE,
removeNumbers = TRUE,
stopwords = TRUE))
# How many terms overall?
freq <- colSums(as.matrix(sub.speech.dtm))
length(freq)
# order terms
ord <- order(freq)
# Least frequent terms overall
freq[head(ord)]
# Most frequent terms overall
freq[tail(ord)]
# frequency of frequencies
head(table(freq),15)
tail(table(freq),15)
# Words that appear at least 2500 times
findFreqTerms(sub.speech.dtm, lowfreq=2500)
# Find correlations with "neoliberal elitism" initial dictionary
# to inform additional term selection
findAssocs(sub.speech.dtm, "top", 0.2)
findAssocs(sub.speech.dtm, "best", 0.3)
findAssocs(sub.speech.dtm, "opportunity", 0.3)
findAssocs(sub.speech.dtm, "opportunities", 0.2)
findAssocs(sub.speech.dtm, "potential", 0.2)
findAssocs(sub.speech.dtm, "risk", 0.2)
findAssocs(sub.speech.dtm, "investment", 0.3)
findAssocs(sub.speech.dtm, "market", 0.3)
findAssocs(sub.speech.dtm, "markets", 0.3)
findAssocs(sub.speech.dtm, "compete", 0.3)
findAssocs(sub.speech.dtm, "competition", 0.3)
# Create matrix matching terms in elitism dictionary to documents
elitism_matrix <- DocumentTermMatrix(sub.speech.corp, control =
list(dictionary = c(
"potential",
"merit",
"talent",
"optimal",
"risk",
"efficient",
"efficiency",
"efficiencies",
"best",
"brightest",
"rank",
"ranked",
"highly-ranked",
"top",
"brain",
"brains",
"market",
"markets",
"marketplace",
"braintrust",
"braintrusts",
"brainpower",
"entrepreneurs",
"entrepreneur",
"entreprenerial",
"profit",
"profits",
"profitable",
"investment",
"competition",
"competitive",
"compete",
"competes",
"opportunity",
"opportunities",
"incentive",
"incentives",
"incentivize",
"privatize",
"talented",
"creative",
"innovate",
"innovates",
"innovator",
"innovators",
"innovation",
"grow",
"growth",
"create",
"creates",
"creators")))
# Display totals by term in elitism dictionary for all documents
colSums(as.matrix(elitism_matrix))
# Create matrix matching terms in elitism dictionary to
# Democratic documents
dem_elitism_matrix <-DocumentTermMatrix(dem.sub.speech.corp, control =
list(dictionary = c(
"potential",
"merit",
"talent",
"optimal",
"risk",
"efficient",
"efficiency",
"efficiencies",
"best",
"brightest",
"rank",
"ranked",
"highly-ranked",
"top",
"brain",
"brains",
"market",
"markets",
"marketplace",
"braintrust",
"braintrusts",
"brainpower",
"entrepreneurs",
"entrepreneur",
"entreprenerial",
"profit",
"profits",
"profitable",
"investment",
"competition",
"competitive",
"compete",
"competes",
"opportunity",
"opportunities",
"incentive",
"incentives",
"incentivize",
"privatize",
"talented",
"creative",
"innovate",
"innovates",
"innovator",
"innovators",
"innovation",
"grow",
"growth",
"create",
"creates",
"creators")))
# Display totals by term in elitism dictionary for Democratic
# documents
colSums(as.matrix(dem_elitism_matrix))
DemSums <- colSums(as.matrix(dem_elitism_matrix))
# Create matrix matching terms in elitism dictionary to
# Republican documents
rep_elitism_matrix <- DocumentTermMatrix(rep.sub.speech.corp, control =
list(dictionary = c(
"potential",
"merit",
"talent",
"optimal",
"risk",
"efficient",
"efficiency",
"efficiencies",
"best",
"brightest",
"rank",
"ranked",
"highly-ranked",
"top",
"brain",
"brains",
"market",
"markets",
"marketplace",
"braintrust",
"braintrusts",
"brainpower",
"entrepreneurs",
"entrepreneur",
"entreprenerial",
"profit",
"profits",
"profitable",
"investment",
"competition",
"competitive",
"compete",
"competes",
"opportunity",
"opportunities",
"incentive",
"incentives",
"incentivize",
"privatize",
"talented",
"creative",
"innovate",
"innovates",
"innovator",
"innovators",
"innovation",
"grow",
"growth",
"create",
"creates",
"creators")))
# Display totals by term in elitism dictionary for Republican
# documents
colSums(as.matrix(rep_elitism_matrix))
RepSums <- colSums(as.matrix(rep_elitism_matrix))
|
/Code/02_analysis.R
|
no_license
|
dsreddy80/ps239T-final-project
|
R
| false | false | 11,467 |
r
|
## Subsets and prepares data for text analysis using tm package.
## Creates general and party-specific DTMs of all words.
## Conducts descriptive analysis to find correlations among terms
## to inform "neoliberal elitism" dictionary.
## Creates general and party-specific DTMs of neoliberal elitist
## rhetoric.
# Install and load packages -- currently using
require(tm) # Framework for text mining
require(dplyr)
# Install and load packages -- may use in future analysis
require(ggplot2) # for plotting word frequencies
require(RTextTools) # a machine learning package for text classification written in R
require(qdap) # Quantiative discourse analysis of transcripts
require(qdapDictionaries)
require(SnowballC) # for stemming
library(data.table)
library(wordcloud2) # for wordclouds
library(RColorBrewer) # for color palettes
# Subset speeches.df from 01_import_and_clean for text analysis,
# remove debate observations (could break apart into Dem and Rep
# segments later on, if wanted to use them)
table(speeches.df$Genre)
sub.speeches.df <- filter(speeches.df, Genre=="SPEECH"|Genre =="AD")
dem.sub.speeches.df <- filter(sub.speeches.df, Party=="Democrat")
rep.sub.speeches.df <- filter(sub.speeches.df, Party=="Republican")
# Create overall corpus, and Dem And Repub corpuses
sub.speech.corp <- Corpus(VectorSource(sub.speeches.df$Text))
dem.sub.speech.corp <- Corpus(VectorSource(dem.sub.speeches.df$Text))
rep.sub.speech.corp <- Corpus(VectorSource(rep.sub.speeches.df$Text))
# Create overall document term matrix
sub.speech.dtm <- DocumentTermMatrix(sub.speech.corp,
control = list(tolower = TRUE,
removePunctuation = TRUE,
removeNumbers = TRUE,
stopwords = TRUE))
# How many terms overall?
freq <- colSums(as.matrix(sub.speech.dtm))
length(freq)
# order terms
ord <- order(freq)
# Least frequent terms overall
freq[head(ord)]
# Most frequent terms overall
freq[tail(ord)]
# frequency of frequencies
head(table(freq),15)
tail(table(freq),15)
# Words that appear at least 2500 times
findFreqTerms(sub.speech.dtm, lowfreq=2500)
# Find correlations with "neoliberal elitism" initial dictionary
# to inform additional term selection
findAssocs(sub.speech.dtm, "top", 0.2)
findAssocs(sub.speech.dtm, "best", 0.3)
findAssocs(sub.speech.dtm, "opportunity", 0.3)
findAssocs(sub.speech.dtm, "opportunities", 0.2)
findAssocs(sub.speech.dtm, "potential", 0.2)
findAssocs(sub.speech.dtm, "risk", 0.2)
findAssocs(sub.speech.dtm, "investment", 0.3)
findAssocs(sub.speech.dtm, "market", 0.3)
findAssocs(sub.speech.dtm, "markets", 0.3)
findAssocs(sub.speech.dtm, "compete", 0.3)
findAssocs(sub.speech.dtm, "competition", 0.3)
# Create matrix matching terms in elitism dictionary to documents
elitism_matrix <- DocumentTermMatrix(sub.speech.corp, control =
list(dictionary = c(
"potential",
"merit",
"talent",
"optimal",
"risk",
"efficient",
"efficiency",
"efficiencies",
"best",
"brightest",
"rank",
"ranked",
"highly-ranked",
"top",
"brain",
"brains",
"market",
"markets",
"marketplace",
"braintrust",
"braintrusts",
"brainpower",
"entrepreneurs",
"entrepreneur",
"entreprenerial",
"profit",
"profits",
"profitable",
"investment",
"competition",
"competitive",
"compete",
"competes",
"opportunity",
"opportunities",
"incentive",
"incentives",
"incentivize",
"privatize",
"talented",
"creative",
"innovate",
"innovates",
"innovator",
"innovators",
"innovation",
"grow",
"growth",
"create",
"creates",
"creators")))
# Display totals by term in elitism dictionary for all documents
colSums(as.matrix(elitism_matrix))
# Create matrix matching terms in elitism dictionary to
# Democratic documents
dem_elitism_matrix <-DocumentTermMatrix(dem.sub.speech.corp, control =
list(dictionary = c(
"potential",
"merit",
"talent",
"optimal",
"risk",
"efficient",
"efficiency",
"efficiencies",
"best",
"brightest",
"rank",
"ranked",
"highly-ranked",
"top",
"brain",
"brains",
"market",
"markets",
"marketplace",
"braintrust",
"braintrusts",
"brainpower",
"entrepreneurs",
"entrepreneur",
"entreprenerial",
"profit",
"profits",
"profitable",
"investment",
"competition",
"competitive",
"compete",
"competes",
"opportunity",
"opportunities",
"incentive",
"incentives",
"incentivize",
"privatize",
"talented",
"creative",
"innovate",
"innovates",
"innovator",
"innovators",
"innovation",
"grow",
"growth",
"create",
"creates",
"creators")))
# Display totals by term in elitism dictionary for Democratic
# documents
colSums(as.matrix(dem_elitism_matrix))
DemSums <- colSums(as.matrix(dem_elitism_matrix))
# Create matrix matching terms in elitism dictionary to
# Republican documents
rep_elitism_matrix <- DocumentTermMatrix(rep.sub.speech.corp, control =
list(dictionary = c(
"potential",
"merit",
"talent",
"optimal",
"risk",
"efficient",
"efficiency",
"efficiencies",
"best",
"brightest",
"rank",
"ranked",
"highly-ranked",
"top",
"brain",
"brains",
"market",
"markets",
"marketplace",
"braintrust",
"braintrusts",
"brainpower",
"entrepreneurs",
"entrepreneur",
"entreprenerial",
"profit",
"profits",
"profitable",
"investment",
"competition",
"competitive",
"compete",
"competes",
"opportunity",
"opportunities",
"incentive",
"incentives",
"incentivize",
"privatize",
"talented",
"creative",
"innovate",
"innovates",
"innovator",
"innovators",
"innovation",
"grow",
"growth",
"create",
"creates",
"creators")))
# Display totals by term in elitism dictionary for Republican
# documents
colSums(as.matrix(rep_elitism_matrix))
RepSums <- colSums(as.matrix(rep_elitism_matrix))
|
\name{qplot.vm}
\alias{qplot.vm}
\title{Plot Diagnostic of FDR computation}
\description{Plot Diagnostic of FDR computation
}
\usage{
qplot.vm(data, lambda = seq(0, 0.95, 0.05))
}
\arguments{
\item{data}{gene expression data object}
\item{lambda}{lambda parameter of qvalue function}
}
\details{}
\value{Plot Diagnostic of FDR computation}
\references{}
\author{Paul Delmar}
\note{}
\seealso{}
\examples{}
\keyword{htest}
|
/man/qplot.vm.Rd
|
no_license
|
cran/varmixt
|
R
| false | false | 448 |
rd
|
\name{qplot.vm}
\alias{qplot.vm}
\title{Plot Diagnostic of FDR computation}
\description{Plot Diagnostic of FDR computation
}
\usage{
qplot.vm(data, lambda = seq(0, 0.95, 0.05))
}
\arguments{
\item{data}{gene expression data object}
\item{lambda}{lambda parameter of qvalue function}
}
\details{}
\value{Plot Diagnostic of FDR computation}
\references{}
\author{Paul Delmar}
\note{}
\seealso{}
\examples{}
\keyword{htest}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cnd-abort.R
\name{format_error_call}
\alias{format_error_call}
\title{Validate and format a function call for use in error messages}
\usage{
format_error_call(call)
}
\arguments{
\item{call}{The execution environment of a currently
running function, e.g. \code{caller_env()}. The function will be
mentioned in error messages as the source of the error. See the
\code{call} argument of \code{\link[=abort]{abort()}} for more information.}
}
\value{
Either a string formatted as code or \code{NULL} if a simple
call could not be generated.
}
\description{
\code{format_error_call()} simplifies its input to a simple call (see
section below) and formats the result as code (using cli if
available). Use this function to generate the "in" part
of an error message from a stack frame call.
If passed an environment, the corresponding \code{sys.call()} is taken
as call, unless there is a local flag (see \code{\link[=local_error_call]{local_error_call()}}).
}
\section{Details of formatting}{
\itemize{
\item The arguments of function calls are stripped.
\item Complex function calls containing inlined objects return
\code{NULL}.
\item Calls to \code{if} preserve the condition since it might be
informative. Branches are dropped.
\item Calls to operators and other special syntax are formatted using
their names rather than the potentially confusing function form.
}
}
\examples{
# Arguments are stripped
writeLines(format_error_call(quote(foo(bar, baz))))
# Returns `NULL` with complex calls such as those that contain
# inlined functions
format_error_call(call2(list))
# Operators are formatted using their names rather than in
# function call form
writeLines(format_error_call(quote(1 + 2)))
}
\keyword{internal}
|
/man/format_error_call.Rd
|
permissive
|
seankross/rlang
|
R
| false | true | 1,796 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cnd-abort.R
\name{format_error_call}
\alias{format_error_call}
\title{Validate and format a function call for use in error messages}
\usage{
format_error_call(call)
}
\arguments{
\item{call}{The execution environment of a currently
running function, e.g. \code{caller_env()}. The function will be
mentioned in error messages as the source of the error. See the
\code{call} argument of \code{\link[=abort]{abort()}} for more information.}
}
\value{
Either a string formatted as code or \code{NULL} if a simple
call could not be generated.
}
\description{
\code{format_error_call()} simplifies its input to a simple call (see
section below) and formats the result as code (using cli if
available). Use this function to generate the "in" part
of an error message from a stack frame call.
If passed an environment, the corresponding \code{sys.call()} is taken
as call, unless there is a local flag (see \code{\link[=local_error_call]{local_error_call()}}).
}
\section{Details of formatting}{
\itemize{
\item The arguments of function calls are stripped.
\item Complex function calls containing inlined objects return
\code{NULL}.
\item Calls to \code{if} preserve the condition since it might be
informative. Branches are dropped.
\item Calls to operators and other special syntax are formatted using
their names rather than the potentially confusing function form.
}
}
\examples{
# Arguments are stripped
writeLines(format_error_call(quote(foo(bar, baz))))
# Returns `NULL` with complex calls such as those that contain
# inlined functions
format_error_call(call2(list))
# Operators are formatted using their names rather than in
# function call form
writeLines(format_error_call(quote(1 + 2)))
}
\keyword{internal}
|
#Install psych package
library(psych)
#SSSSS <-subset(subsample1, FEMALE==0 & HISPANIC ==1)
#descstat <- na.omit(subsample1[,c("AGEP","PERNP")])
shotsdata$MADE<-ifelse(shotsdata$result=="made",1,0)
onlymade <- subset(shotsdata, MADE == 1)
coordinates <- na.omit(onlymade[,c("converted_x","converted_y")])
fromwhere <- onlymade(x= onlymade$converted_x, y= onlymade$converted_y)
#Season 2018-2019 code
season2018_2019 <- na.omit(X2018_2019_combined_stats[,c("points","result","shot_distance","converted_x","converted_y")])
season2018_2019$MADEE<-ifelse(shotsdata$result=="made",1,0)
onlymade2018_2019 <- subset(season2018_2019, result == "made")
coordinates2018_2019 <- na.omit(onlymade2018_2019[,c("converted_x","converted_y")])
fromwhere2018_2019 <- onlymade2018_2019(x= onlymade2018_2019$converted_x, y= onlymade2018_2019$converted_y)
#Season 2009-2010 code
season2009_2010 <- na.omit(X2009_2010_combined_stats[,c("points","result","shot_distance","converted_x","converted_y")])
onlymade2009_2010 <- subset(season2009_2010, result == "made")
coordinates2009_2010 <- na.omit(onlymade2009_2010[,c("converted_x","converted_y")])
#Season 2010-2011 through 2017-2018
season2010_2011 <- na.omit(X2010_2011_combined_stats[,c("points","result","shot_distance","converted_x","converted_y")])
season2011_2012 <- na.omit(X2011_2012_combined_stats[,c("points","result","shot_distance","converted_x","converted_y")])
season2012_2013 <- na.omit(X2012_2013_combined_stats[,c("points","result","shot_distance","converted_x","converted_y")])
season2013_2014 <- na.omit(X2013_2014_combined_stats[,c("points","result","shot_distance","converted_x","converted_y")])
season2014_2015 <- na.omit(X2014_2015_combined_stats[,c("points","result","shot_distance","converted_x","converted_y")])
season2015_2016 <- na.omit(X2015_2016_combined_stats[,c("points","result","shot_distance","converted_x","converted_y")])
season2016_2017 <- na.omit(X2016_2017_combined_stats[,c("points","result","shot_distance","converted_x","converted_y")])
season2017_2018 <- na.omit(X2017_2018_combined_stats[,c("points","result","shot_distance","converted_x","converted_y")])
#Select only makes for everyseason
onlymade2009_2010 <- subset(season2009_2010, result == "made")
onlymade2010_2011 <- subset(season2010_2011, result == "made")
onlymade2011_2012 <- subset(season2011_2012, result == "made")
onlymade2012_2013 <- subset(season2012_2013, result == "made")
onlymade2013_2014 <- subset(season2013_2014, result == "made")
onlymade2014_2015 <- subset(season2014_2015, result == "made")
onlymade2015_2016 <- subset(season2015_2016, result == "made")
onlymade2016_2017 <- subset(season2016_2017, result == "made")
onlymade2017_2018 <- subset(season2017_2018, result == "made")
onlymade2018_2019 <- subset(season2018_2019, result == "made")
#Create datasets with all coordinates of last 10 seasons
|
/shots1.R
|
no_license
|
matdac12/Senior-Synthesis
|
R
| false | false | 2,943 |
r
|
#Install psych package
library(psych)
#SSSSS <-subset(subsample1, FEMALE==0 & HISPANIC ==1)
#descstat <- na.omit(subsample1[,c("AGEP","PERNP")])
shotsdata$MADE<-ifelse(shotsdata$result=="made",1,0)
onlymade <- subset(shotsdata, MADE == 1)
coordinates <- na.omit(onlymade[,c("converted_x","converted_y")])
fromwhere <- onlymade(x= onlymade$converted_x, y= onlymade$converted_y)
#Season 2018-2019 code
season2018_2019 <- na.omit(X2018_2019_combined_stats[,c("points","result","shot_distance","converted_x","converted_y")])
season2018_2019$MADEE<-ifelse(shotsdata$result=="made",1,0)
onlymade2018_2019 <- subset(season2018_2019, result == "made")
coordinates2018_2019 <- na.omit(onlymade2018_2019[,c("converted_x","converted_y")])
fromwhere2018_2019 <- onlymade2018_2019(x= onlymade2018_2019$converted_x, y= onlymade2018_2019$converted_y)
#Season 2009-2010 code
season2009_2010 <- na.omit(X2009_2010_combined_stats[,c("points","result","shot_distance","converted_x","converted_y")])
onlymade2009_2010 <- subset(season2009_2010, result == "made")
coordinates2009_2010 <- na.omit(onlymade2009_2010[,c("converted_x","converted_y")])
#Season 2010-2011 through 2017-2018
season2010_2011 <- na.omit(X2010_2011_combined_stats[,c("points","result","shot_distance","converted_x","converted_y")])
season2011_2012 <- na.omit(X2011_2012_combined_stats[,c("points","result","shot_distance","converted_x","converted_y")])
season2012_2013 <- na.omit(X2012_2013_combined_stats[,c("points","result","shot_distance","converted_x","converted_y")])
season2013_2014 <- na.omit(X2013_2014_combined_stats[,c("points","result","shot_distance","converted_x","converted_y")])
season2014_2015 <- na.omit(X2014_2015_combined_stats[,c("points","result","shot_distance","converted_x","converted_y")])
season2015_2016 <- na.omit(X2015_2016_combined_stats[,c("points","result","shot_distance","converted_x","converted_y")])
season2016_2017 <- na.omit(X2016_2017_combined_stats[,c("points","result","shot_distance","converted_x","converted_y")])
season2017_2018 <- na.omit(X2017_2018_combined_stats[,c("points","result","shot_distance","converted_x","converted_y")])
#Select only makes for everyseason
onlymade2009_2010 <- subset(season2009_2010, result == "made")
onlymade2010_2011 <- subset(season2010_2011, result == "made")
onlymade2011_2012 <- subset(season2011_2012, result == "made")
onlymade2012_2013 <- subset(season2012_2013, result == "made")
onlymade2013_2014 <- subset(season2013_2014, result == "made")
onlymade2014_2015 <- subset(season2014_2015, result == "made")
onlymade2015_2016 <- subset(season2015_2016, result == "made")
onlymade2016_2017 <- subset(season2016_2017, result == "made")
onlymade2017_2018 <- subset(season2017_2018, result == "made")
onlymade2018_2019 <- subset(season2018_2019, result == "made")
#Create datasets with all coordinates of last 10 seasons
|
## ============================================================================================================ ##
# Aplicación Shiny para la comparación entre métodos de aprendizaje supervisado.
#
# CURSO DE DATA SCIENCE (CIENCIA DE LOS DATOS) APLICACIONES A LA BIOLOGÍA Y A LA MEDICINA CON PYTHON Y R
#
# FACULTAT DE BIOLOGIA – UNIVERSITAT DE BARCELONA
#
# Creada por Javier Jarque Valentín y Xavier Abarca García
#
# Para contactar con los autores de este código envía un correo a <javier.jarque@gmail.com> o <xabarca@gmail.com>
#
# Código disponible en Github: https://github.com/xjarque-xabarca
#
## ============================================================================================================ ##
|
/aplicacion/server-tab01-getting-started.R
|
no_license
|
xjarque-xabarca/projecteUB
|
R
| false | false | 736 |
r
|
## ============================================================================================================ ##
# Aplicación Shiny para la comparación entre métodos de aprendizaje supervisado.
#
# CURSO DE DATA SCIENCE (CIENCIA DE LOS DATOS) APLICACIONES A LA BIOLOGÍA Y A LA MEDICINA CON PYTHON Y R
#
# FACULTAT DE BIOLOGIA – UNIVERSITAT DE BARCELONA
#
# Creada por Javier Jarque Valentín y Xavier Abarca García
#
# Para contactar con los autores de este código envía un correo a <javier.jarque@gmail.com> o <xabarca@gmail.com>
#
# Código disponible en Github: https://github.com/xjarque-xabarca
#
## ============================================================================================================ ##
|
library(plotGoogleMaps)
library(RgoogleMaps)
library(ggmap)
library(ggplot2)
library(plotrix)
library(maptools)
library(maps)
library(mapdata)
library(mapplots)
library(ggsubplot)
library(grid)
library(RColorBrewer)
library(animation)
i = 723406
plotancestry <- function(x){
gen = NULL
##Creation of the big matrix of ancestry
for( i in x){
gentemp = read.table(paste("~/PROJECTS/RANGE EXPANSION IN HUMAN POPULATION//SELECTING GENEALOGIES//scripts/BuildGen/output_files/ancestries/",i,"_Ancestors.txt", sep = ""),h=T,sep = "\t")
gen = rbind(gen,gentemp)
}
gen$lon = rep(0,length(gen$ind))
gen$lat = rep(0,length(gen$ind))
j = 0
for(i in gen$town){
j = j+1
indice = which(URB$NOM==i)
if(length(indice) != 0) {
gen$lat[j] = URB$LATITUDE_ADRIEN[indice]
gen$lon[j] = URB$LONGITUDE_ADRIEN[indice]
}
}
print(dim(gen))
gen = gen[order(gen$firstwedyear),]
gen$period = rep(NA, length(gen$ind))
gen$period[which(gen$firstwedyear > 1605)] = "1605-1635"
gen$period[which(gen$firstwedyear > 1635)] = "1635-1665"
gen$period[which(gen$firstwedyear > 1665)] = "1665-1695"
gen$period[which(gen$firstwedyear > 1695)] = "1695-1725"
gen$period[which(gen$firstwedyear > 1725)] = "1725-1755"
gen$period[which(gen$firstwedyear > 1755)] = "1755-1785"
gen$period[which(gen$firstwedyear > 1785)] = "1785-1815"
gen$period[which(gen$firstwedyear > 1815)] = "1815-1845"
gen$period[which(gen$firstwedyear > 1845)] = "1845-1905"
gen$period[which(gen$firstwedyear > 1905)] = "1905-2000"
gen = gen[!is.na(gen$period),]
print(dim(gen))
gen = gen[-which(gen$WFI == -1),]
print(dim(gen))
gen = gen[-which(gen$lon == 0),]
print(dim(gen))
gen$period = as.factor(gen$period)
return(gen)
}
plotancestryregion <- function(x){
gen = NULL
latlong = read.table("~/PROJECTS/RANGE EXPANSION IN HUMAN POPULATION/SELECTING GENEALOGIES//Data_files/latitude-longitude-quebec-region.csv",h=T,stringsAsFactors=FALSE)
gen$lon = rep(0,length(x$maxDepth))
gen$lat = rep(0,length(x$maxDepth))
gen$size = rep(0,length(x$maxDepth))
j = 0
tata = table(x$Loc_ego)
for(i in x$Loc_ego){
j = j+1
indice = which(latlong$region==i)
if(length(indice) != 0) {
gen$lat[j] = latlong$latitude[indice]
gen$lon[j] = latlong$longitude[indice]
gen$size[j] = tata[i]
}
}
y = x
y$lon = gen$lon
y$lat = gen$lat
y$size = gen$size
return(y)
}
quebec <-get_map(location = c(lon = -70.270386,lat = 47.591346),zoom =6, scale = 2, maptype = "terrain", source = 'stamen')
gg = read.table("96-Indiv-list.txt", sep = "\t", row.names = 1, h = T)
gg = read.table("120-Indiv-list.txt", sep = "\t", row.names = 1, h = T)
gglow = gg[which(gg$cond == "Low cWFI"),]
gghigh = gg[-(which(gg$cond == "Low cWFI")),]
latlong = read.table("~/PROJECTS/RANGE EXPANSION IN HUMAN POPULATION/SELECTING GENEALOGIES//Data_files/latitude-longitude-quebec-region.csv",h=T,stringsAsFactors=FALSE)
latlongtown = read.table("~/PROJECTS/RANGE EXPANSION IN HUMAN POPULATION/SELECTING GENEALOGIES//Data_files/villes.csv",h=T,stringsAsFactors=FALSE, sep = "\t")
gglow2 = plotancestryregion(gglow)
gghigh2 = plotancestryregion(gghigh)
gg2 =plotancestryregion(gg)
##########################################Locality opening : gif
##########################################
i = 1850
quebec <-get_map(location = c(lon = -70.270386,lat = 47.591346),zoom =6, scale = 2, maptype = "satellite")
toto = read.table("~/PROJECTS/RANGE EXPANSION IN HUMAN POPULATION/SELECTING GENEALOGIES//scripts/python-toolbox/towns-and-opening.csv",h=T,na.string = "NULL", sep = "\t")
toto$lon = as.numeric(toto$lon)
toto$lat = as.numeric(toto$lat)
toto = toto[!is.na(toto$lat),]
toto = toto[!is.na(toto$opening),]
toto = toto[order(toto$opening),]
colpal = brewer.pal(11,"RdBu")
periode = c("1621-1651",
"1651-1681",
"1681-1711",
"1711-1741",
"1741-1771",
"1771-1801",
"1801-1831",
"1831-1861",
"1861-1891",
"1891-1921",
"1921-1985")
interval = seq((min(toto$opening)+30),1921,30)
interval
j = 1
toto$col = rep(colpal[j], length(toto$lat))
toto$periode = rep(periode[j],length(toto$lat))
for(i in interval){
j = j +1
print(j)
toto$col[which(toto$opening > i)] <- colpal[j]
toto$periode[which(toto$opening > i)] <- periode[j]
}
j = 1
saveGIF(movie.name="opening.gif",autobrowse = TRUE,interval = 2,clean = TRUE,ani.width = 800, ani.height = 800,outdir = "/home/foucal/",{
for(i in c(interval,1985)){
temp = toto[which(toto$opening <= i),]
#temp$col[which(temp$opening > (i-30))] = "#053061"
locop <-ggmap(quebec) + geom_point(data = temp ,aes(y=lat,x=lon, col = periode),size = 4, alpha = 0.7) +
ggtitle(paste("Opened localities until year",i))+scale_colour_manual(name="Time periods",values=colpal[1:j])+
guides(colour=guide_legend(override.aes=list(size=10)))+
theme(legend.text = element_text(size=30),
legend.title = element_text(size=20),legend.position = c(1.2,0.9),
axis.text.x = element_text(size=18),axis.title.x = element_text(size=30),
axis.text.y = element_text(size=18),axis.title.y = element_text(size=30),
legend.justification = "top", plot.title = element_text(size=30),plot.margin = unit(c(0,8,0,1), units = "cm"))
locop$labels$x = "Longitude"
locop$labels$y = "Latitude"
print(locop)
j = j +1
}
})
###Just one picture
locop <-ggmap(quebec) + geom_point(data = toto ,aes(y=lat,x=lon , color = periode))+
ggtitle(paste("Opened localities until year",i))+scale_colour_manual(name="Time periods",values=colpal[1:j])+
guides(colour=guide_legend(override.aes=list(size=10)))+
theme(legend.text = element_text(size=30),
legend.title = element_text(size=20),legend.position = c(1.2,0.9),
axis.text.x = element_text(size=18),axis.title.x = element_text(size=30),
axis.text.y = element_text(size=18),axis.title.y = element_text(size=30),
legend.justification = "top", plot.title = element_text(size=30),plot.margin = unit(c(0,8,0,1), units = "cm"))
locop$labels$x = "Longitude"
locop$labels$y = "Latitude"
locop
#######################################"
######################################Checking the data of towns seriously.
URB <- read.delim("~/PROJECTS/RANGE EXPANSION IN HUMAN POPULATION/SELECTING GENEALOGIES/Data_files/URB_COORD_COMPLETEES_PAR_ADRIEN.csv", na.strings = "NULL")
###without __
URB$COORD_X = as.numeric(URB$COORD_X)
URB$COORD_Y = as.numeric(URB$COORD_Y)
which(is.na(URB$LONGITUDE_ADRIEN))
quebec <-get_map(location = c(lon = -70.270386,lat = 50),zoom =6, scale = 2, maptype = "terrain", source = 'stamen')
ggmap(quebec) +
geom_point(data = URB ,aes(y=LATITUDE_ADRIEN,x=LONGITUDE_ADRIEN),col= "red", alpha = 0.4)
difflon = abs(URB$LONGITUDE_ADRIEN-URB$COORD_X)
difflat = abs(URB$LATITUDE_ADRIEN-URB$COORD_Y)
DIFFURB = URB[URB$NOM %in% union(URB$NOM[which(difflat > 0.2)],URB$NOM[which(difflon > 0.2)]),]
write.table(DIFFURB, "VILLES-DIFFERENTES.csv",sep = "\t", quote=F, row.names=F)
write.table(DIFFURB, "LAT-LONG-TOWN-ADRIEN-DIFFERENT-FROM-ST-HILAIRE.csv",sep = "\t", quote=F, row.names=F)
test = data.frame(lat = DIFFURB$COORD_Y, lon = DIFFURB$COORD_X, NOM = DIFFURB$NOM, Annotation = "Marc")
test2 = data.frame(lat = DIFFURB$LATITUDE_ADRIEN, lon = DIFFURB$LONGITUDE_ADRIEN, NOM = DIFFURB$NOM, Annotation = "Adrien")
test = rbind(test,test2)
quebec <-get_googlemap(center = c(lon = -70.270386,lat = 47),zoom =6, scale = 2, maptype = "roadmap")
ggmap(quebec) +
geom_point(data = test ,aes(y=lat,x=lon, colour = NOM, shape = Annotation), size = 10, alpha = 0.8)
NEWURB = URB[is.na(URB$COORD_X),]
NEWURB = NEWURB[!is.na(NEWURB$LATITUDE_ADRIEN),]
write.table(NEWURB, "NEW-LAT-LONG-TOWN-FROM-ADRIEN.csv",sep = "\t", quote=F, row.names=F)
plotttt <- function(x){
ggmap(quebec) +
geom_point(data = DIFFURB[x,] ,aes(y=LATITUDE_ADRIEN,x=LONGITUDE_ADRIEN), col= "black") +
geom_point(data = DIFFURB[x,] ,aes(y=COORD_Y,x=COORD_X), pch = 17,col = "red") + facet_wrap(~NOM)
}
plotttt(35)
#################################
##################################PIECHART MOTHERFUCKER§§§§§§§§§§§§§§ town per selected individuals
ggll = plotancestryregion(gg)
pie = ggll[,which(names(ggll) %in% c("cond","lat","lon","Loc_ego"))]
pie$cond2 = rep(0,length(pie$lon))
pie$cond2[which(pie$cond == "High cWFI")] <- 1
by(pie$cond2, pie$Loc_ego, mean)
count = table(pie$Loc_ego)
nondup = pie[!duplicated(pie$Loc_ego),]
ord =order(nondup$Loc_ego)
ultimatepie = data.frame(loc = nondup$Loc_ego[ord],lat = nondup$lat[ord],
lon = nondup$lon[ord], size = as.vector(count),
frac = as.vector(by(pie$cond2, pie$Loc_ego, mean)))
quebec <-get_googlemap(center = c(lon = -72,lat = 47.2),zoom =7, scale = 2, maptype = "roadmap")
ggmap(quebec, extent = "device") + geom_point(alpha = 0.8,data = ultimatepie ,aes(y=lat,x=lon, color = frac,size = size,na.rm = TRUE)) +
scale_size_area(max_size = 10) +
scale_color_gradient(low = "yellow", high= "red")
#####################################Diagram to see the selection of individuals
####################################
ggll = plotancestry(gg)
pie = ggll[,which(names(ggll) %in% c("cWFI","lat","lon","Loc_ego"))]
pie$Loc_ego = factor(pie$Loc_ego)
count = table(pie$Loc_ego)
nondup = pie[!duplicated(pie$Loc_ego),]
ord =order(nondup$Loc_ego)
ultimatepie = data.frame(loc = nondup$Loc_ego[ord],lat = nondup$lat[ord],
lon = nondup$lon[ord], size = as.vector(count),
cWFI = as.vector(by(pie$cWFI, pie$Loc_ego, mean)))
quebec <-get_googlemap(center = c(lon = -72,lat = 47.2),zoom =7, scale = 2, maptype = "roadmap")
ggmap(quebec, extent = "device") + geom_point(alpha = 0.8,data = ultimatepie ,aes(y=lat,x=lon, color = cWFI,size = size,na.rm = TRUE)) +
scale_size_area(max_size = 10) +scale_size_identity(guide="legend", trans = "log10")
#scale_color_gradient(low = "yellow",high= "red")
#############################
#############################PRINTING ALL THE INFO OF AN AN ANCESTRY
i = "1605-1635"
periode = c("1605-1635", "1635-1665", "1665-1695","1695-1725", "1725-1755", "1755-1785","1785-1815", "1815-1845", "1845-1905", "1905-2000")
periode = c("1621-1651",
"1651-1681",
"1681-1711",
"1711-1741",
"1741-1771",
"1771-1801",
"1801-1831",
"1831-1861",
"1861-1891",
"1891-1921",
"1921-1985")
quebec <-get_map(location = c(lon = -70.8,lat = 47.3),zoom =7, scale = 2, maptype = "satellite")
####Here say wich gtroup you want to plot the ancestry in a gif
for(individu in "726792"){
anc = plotancestry(c(individu))
anc$wfi =rep(NA,length(anc$town))
anc$size =rep(NA,length(anc$town))
output = NULL
for(i in levels(anc$period)){
print(i)
temp = anc[anc$period %in% i,]
vv = by(temp$WFI, factor(temp$town), mean)
ww = table(factor(temp$town))
k = 0
for(j in names(vv)){
k = k+1
temp$wfi[which(temp$town[temp$period %in% i] == j)] = as.vector(vv[k])
if (as.vector(ww[k]) < 10){
temp$size[which(temp$town[temp$period %in% i] == j)] = 10
}else{
temp$size[which(temp$town[temp$period %in% i] == j)] = as.vector(ww[k])
}
}
output = rbind(output,temp)
}
anc = output
saveGIF(movie.name=paste0(individu,".gif"),autobrowse = FALSE,clean = TRUE,ani.width = 400, ani.height = 400,outdir = "/home/foucal/Pictures/HIGH",{
for(i in levels(anc$period)){
p2 <-ggmap(quebec) + geom_point(data = anc[anc$period %in% i,] , aes(y=lat,x=lon, color=wfi, size = size),alpha = 0.8, na.rm = TRUE) +
scale_color_gradient2(name = "WFI",midpoint = 0.5,low =rgb(84, 39, 136, max = 255) , mid = rgb(247, 247, 247,max = 255), high = rgb(230, 97, 1, max = 255), limits = c(0,1))+ggtitle(i)+
theme(plot.title = element_text(size=15),legend.text = element_text(size=10),legend.title = element_text(size=10))+
scale_size_area(limits = c(1,60), max_size = 10,breaks =c(10,30,60),labels = c("<10","30","60"), guide = 'legend',name = "Ancestors number")
p2$labels$x = "Longitude"
p2$labels$y = "Latitude"
print(p2)
# print(paste(i,"_",k,".png", sep = ""))
#ggsave(paste(i,"_",k,".png", sep = ""),width = 8, height = 8)
#graphics.off()
}
})
}
#########################################
########################################
######################
|
/gmap-brewer.R
|
no_license
|
Adrien-Evo/Selecting-Individuals
|
R
| false | false | 12,594 |
r
|
library(plotGoogleMaps)
library(RgoogleMaps)
library(ggmap)
library(ggplot2)
library(plotrix)
library(maptools)
library(maps)
library(mapdata)
library(mapplots)
library(ggsubplot)
library(grid)
library(RColorBrewer)
library(animation)
i = 723406
plotancestry <- function(x){
gen = NULL
##Creation of the big matrix of ancestry
for( i in x){
gentemp = read.table(paste("~/PROJECTS/RANGE EXPANSION IN HUMAN POPULATION//SELECTING GENEALOGIES//scripts/BuildGen/output_files/ancestries/",i,"_Ancestors.txt", sep = ""),h=T,sep = "\t")
gen = rbind(gen,gentemp)
}
gen$lon = rep(0,length(gen$ind))
gen$lat = rep(0,length(gen$ind))
j = 0
for(i in gen$town){
j = j+1
indice = which(URB$NOM==i)
if(length(indice) != 0) {
gen$lat[j] = URB$LATITUDE_ADRIEN[indice]
gen$lon[j] = URB$LONGITUDE_ADRIEN[indice]
}
}
print(dim(gen))
gen = gen[order(gen$firstwedyear),]
gen$period = rep(NA, length(gen$ind))
gen$period[which(gen$firstwedyear > 1605)] = "1605-1635"
gen$period[which(gen$firstwedyear > 1635)] = "1635-1665"
gen$period[which(gen$firstwedyear > 1665)] = "1665-1695"
gen$period[which(gen$firstwedyear > 1695)] = "1695-1725"
gen$period[which(gen$firstwedyear > 1725)] = "1725-1755"
gen$period[which(gen$firstwedyear > 1755)] = "1755-1785"
gen$period[which(gen$firstwedyear > 1785)] = "1785-1815"
gen$period[which(gen$firstwedyear > 1815)] = "1815-1845"
gen$period[which(gen$firstwedyear > 1845)] = "1845-1905"
gen$period[which(gen$firstwedyear > 1905)] = "1905-2000"
gen = gen[!is.na(gen$period),]
print(dim(gen))
gen = gen[-which(gen$WFI == -1),]
print(dim(gen))
gen = gen[-which(gen$lon == 0),]
print(dim(gen))
gen$period = as.factor(gen$period)
return(gen)
}
plotancestryregion <- function(x){
gen = NULL
latlong = read.table("~/PROJECTS/RANGE EXPANSION IN HUMAN POPULATION/SELECTING GENEALOGIES//Data_files/latitude-longitude-quebec-region.csv",h=T,stringsAsFactors=FALSE)
gen$lon = rep(0,length(x$maxDepth))
gen$lat = rep(0,length(x$maxDepth))
gen$size = rep(0,length(x$maxDepth))
j = 0
tata = table(x$Loc_ego)
for(i in x$Loc_ego){
j = j+1
indice = which(latlong$region==i)
if(length(indice) != 0) {
gen$lat[j] = latlong$latitude[indice]
gen$lon[j] = latlong$longitude[indice]
gen$size[j] = tata[i]
}
}
y = x
y$lon = gen$lon
y$lat = gen$lat
y$size = gen$size
return(y)
}
quebec <-get_map(location = c(lon = -70.270386,lat = 47.591346),zoom =6, scale = 2, maptype = "terrain", source = 'stamen')
gg = read.table("96-Indiv-list.txt", sep = "\t", row.names = 1, h = T)
gg = read.table("120-Indiv-list.txt", sep = "\t", row.names = 1, h = T)
gglow = gg[which(gg$cond == "Low cWFI"),]
gghigh = gg[-(which(gg$cond == "Low cWFI")),]
latlong = read.table("~/PROJECTS/RANGE EXPANSION IN HUMAN POPULATION/SELECTING GENEALOGIES//Data_files/latitude-longitude-quebec-region.csv",h=T,stringsAsFactors=FALSE)
latlongtown = read.table("~/PROJECTS/RANGE EXPANSION IN HUMAN POPULATION/SELECTING GENEALOGIES//Data_files/villes.csv",h=T,stringsAsFactors=FALSE, sep = "\t")
gglow2 = plotancestryregion(gglow)
gghigh2 = plotancestryregion(gghigh)
gg2 =plotancestryregion(gg)
##########################################Locality opening : gif
##########################################
i = 1850
quebec <-get_map(location = c(lon = -70.270386,lat = 47.591346),zoom =6, scale = 2, maptype = "satellite")
toto = read.table("~/PROJECTS/RANGE EXPANSION IN HUMAN POPULATION/SELECTING GENEALOGIES//scripts/python-toolbox/towns-and-opening.csv",h=T,na.string = "NULL", sep = "\t")
toto$lon = as.numeric(toto$lon)
toto$lat = as.numeric(toto$lat)
toto = toto[!is.na(toto$lat),]
toto = toto[!is.na(toto$opening),]
toto = toto[order(toto$opening),]
colpal = brewer.pal(11,"RdBu")
periode = c("1621-1651",
"1651-1681",
"1681-1711",
"1711-1741",
"1741-1771",
"1771-1801",
"1801-1831",
"1831-1861",
"1861-1891",
"1891-1921",
"1921-1985")
interval = seq((min(toto$opening)+30),1921,30)
interval
j = 1
toto$col = rep(colpal[j], length(toto$lat))
toto$periode = rep(periode[j],length(toto$lat))
for(i in interval){
j = j +1
print(j)
toto$col[which(toto$opening > i)] <- colpal[j]
toto$periode[which(toto$opening > i)] <- periode[j]
}
j = 1
saveGIF(movie.name="opening.gif",autobrowse = TRUE,interval = 2,clean = TRUE,ani.width = 800, ani.height = 800,outdir = "/home/foucal/",{
for(i in c(interval,1985)){
temp = toto[which(toto$opening <= i),]
#temp$col[which(temp$opening > (i-30))] = "#053061"
locop <-ggmap(quebec) + geom_point(data = temp ,aes(y=lat,x=lon, col = periode),size = 4, alpha = 0.7) +
ggtitle(paste("Opened localities until year",i))+scale_colour_manual(name="Time periods",values=colpal[1:j])+
guides(colour=guide_legend(override.aes=list(size=10)))+
theme(legend.text = element_text(size=30),
legend.title = element_text(size=20),legend.position = c(1.2,0.9),
axis.text.x = element_text(size=18),axis.title.x = element_text(size=30),
axis.text.y = element_text(size=18),axis.title.y = element_text(size=30),
legend.justification = "top", plot.title = element_text(size=30),plot.margin = unit(c(0,8,0,1), units = "cm"))
locop$labels$x = "Longitude"
locop$labels$y = "Latitude"
print(locop)
j = j +1
}
})
###Just one picture
locop <-ggmap(quebec) + geom_point(data = toto ,aes(y=lat,x=lon , color = periode))+
ggtitle(paste("Opened localities until year",i))+scale_colour_manual(name="Time periods",values=colpal[1:j])+
guides(colour=guide_legend(override.aes=list(size=10)))+
theme(legend.text = element_text(size=30),
legend.title = element_text(size=20),legend.position = c(1.2,0.9),
axis.text.x = element_text(size=18),axis.title.x = element_text(size=30),
axis.text.y = element_text(size=18),axis.title.y = element_text(size=30),
legend.justification = "top", plot.title = element_text(size=30),plot.margin = unit(c(0,8,0,1), units = "cm"))
locop$labels$x = "Longitude"
locop$labels$y = "Latitude"
locop
#######################################"
######################################Checking the data of towns seriously.
URB <- read.delim("~/PROJECTS/RANGE EXPANSION IN HUMAN POPULATION/SELECTING GENEALOGIES/Data_files/URB_COORD_COMPLETEES_PAR_ADRIEN.csv", na.strings = "NULL")
###without __
URB$COORD_X = as.numeric(URB$COORD_X)
URB$COORD_Y = as.numeric(URB$COORD_Y)
which(is.na(URB$LONGITUDE_ADRIEN))
quebec <-get_map(location = c(lon = -70.270386,lat = 50),zoom =6, scale = 2, maptype = "terrain", source = 'stamen')
ggmap(quebec) +
geom_point(data = URB ,aes(y=LATITUDE_ADRIEN,x=LONGITUDE_ADRIEN),col= "red", alpha = 0.4)
difflon = abs(URB$LONGITUDE_ADRIEN-URB$COORD_X)
difflat = abs(URB$LATITUDE_ADRIEN-URB$COORD_Y)
DIFFURB = URB[URB$NOM %in% union(URB$NOM[which(difflat > 0.2)],URB$NOM[which(difflon > 0.2)]),]
write.table(DIFFURB, "VILLES-DIFFERENTES.csv",sep = "\t", quote=F, row.names=F)
write.table(DIFFURB, "LAT-LONG-TOWN-ADRIEN-DIFFERENT-FROM-ST-HILAIRE.csv",sep = "\t", quote=F, row.names=F)
test = data.frame(lat = DIFFURB$COORD_Y, lon = DIFFURB$COORD_X, NOM = DIFFURB$NOM, Annotation = "Marc")
test2 = data.frame(lat = DIFFURB$LATITUDE_ADRIEN, lon = DIFFURB$LONGITUDE_ADRIEN, NOM = DIFFURB$NOM, Annotation = "Adrien")
test = rbind(test,test2)
quebec <-get_googlemap(center = c(lon = -70.270386,lat = 47),zoom =6, scale = 2, maptype = "roadmap")
ggmap(quebec) +
geom_point(data = test ,aes(y=lat,x=lon, colour = NOM, shape = Annotation), size = 10, alpha = 0.8)
NEWURB = URB[is.na(URB$COORD_X),]
NEWURB = NEWURB[!is.na(NEWURB$LATITUDE_ADRIEN),]
write.table(NEWURB, "NEW-LAT-LONG-TOWN-FROM-ADRIEN.csv",sep = "\t", quote=F, row.names=F)
plotttt <- function(x){
ggmap(quebec) +
geom_point(data = DIFFURB[x,] ,aes(y=LATITUDE_ADRIEN,x=LONGITUDE_ADRIEN), col= "black") +
geom_point(data = DIFFURB[x,] ,aes(y=COORD_Y,x=COORD_X), pch = 17,col = "red") + facet_wrap(~NOM)
}
plotttt(35)
#################################
##################################PIECHART MOTHERFUCKER§§§§§§§§§§§§§§ town per selected individuals
ggll = plotancestryregion(gg)
pie = ggll[,which(names(ggll) %in% c("cond","lat","lon","Loc_ego"))]
pie$cond2 = rep(0,length(pie$lon))
pie$cond2[which(pie$cond == "High cWFI")] <- 1
by(pie$cond2, pie$Loc_ego, mean)
count = table(pie$Loc_ego)
nondup = pie[!duplicated(pie$Loc_ego),]
ord =order(nondup$Loc_ego)
ultimatepie = data.frame(loc = nondup$Loc_ego[ord],lat = nondup$lat[ord],
lon = nondup$lon[ord], size = as.vector(count),
frac = as.vector(by(pie$cond2, pie$Loc_ego, mean)))
quebec <-get_googlemap(center = c(lon = -72,lat = 47.2),zoom =7, scale = 2, maptype = "roadmap")
ggmap(quebec, extent = "device") + geom_point(alpha = 0.8,data = ultimatepie ,aes(y=lat,x=lon, color = frac,size = size,na.rm = TRUE)) +
scale_size_area(max_size = 10) +
scale_color_gradient(low = "yellow", high= "red")
#####################################Diagram to see the selection of individuals
####################################
ggll = plotancestry(gg)
pie = ggll[,which(names(ggll) %in% c("cWFI","lat","lon","Loc_ego"))]
pie$Loc_ego = factor(pie$Loc_ego)
count = table(pie$Loc_ego)
nondup = pie[!duplicated(pie$Loc_ego),]
ord =order(nondup$Loc_ego)
ultimatepie = data.frame(loc = nondup$Loc_ego[ord],lat = nondup$lat[ord],
lon = nondup$lon[ord], size = as.vector(count),
cWFI = as.vector(by(pie$cWFI, pie$Loc_ego, mean)))
quebec <-get_googlemap(center = c(lon = -72,lat = 47.2),zoom =7, scale = 2, maptype = "roadmap")
ggmap(quebec, extent = "device") + geom_point(alpha = 0.8,data = ultimatepie ,aes(y=lat,x=lon, color = cWFI,size = size,na.rm = TRUE)) +
scale_size_area(max_size = 10) +scale_size_identity(guide="legend", trans = "log10")
#scale_color_gradient(low = "yellow",high= "red")
#############################
#############################PRINTING ALL THE INFO OF AN AN ANCESTRY
i = "1605-1635"
periode = c("1605-1635", "1635-1665", "1665-1695","1695-1725", "1725-1755", "1755-1785","1785-1815", "1815-1845", "1845-1905", "1905-2000")
periode = c("1621-1651",
"1651-1681",
"1681-1711",
"1711-1741",
"1741-1771",
"1771-1801",
"1801-1831",
"1831-1861",
"1861-1891",
"1891-1921",
"1921-1985")
quebec <-get_map(location = c(lon = -70.8,lat = 47.3),zoom =7, scale = 2, maptype = "satellite")
####Here say wich gtroup you want to plot the ancestry in a gif
for(individu in "726792"){
anc = plotancestry(c(individu))
anc$wfi =rep(NA,length(anc$town))
anc$size =rep(NA,length(anc$town))
output = NULL
for(i in levels(anc$period)){
print(i)
temp = anc[anc$period %in% i,]
vv = by(temp$WFI, factor(temp$town), mean)
ww = table(factor(temp$town))
k = 0
for(j in names(vv)){
k = k+1
temp$wfi[which(temp$town[temp$period %in% i] == j)] = as.vector(vv[k])
if (as.vector(ww[k]) < 10){
temp$size[which(temp$town[temp$period %in% i] == j)] = 10
}else{
temp$size[which(temp$town[temp$period %in% i] == j)] = as.vector(ww[k])
}
}
output = rbind(output,temp)
}
anc = output
saveGIF(movie.name=paste0(individu,".gif"),autobrowse = FALSE,clean = TRUE,ani.width = 400, ani.height = 400,outdir = "/home/foucal/Pictures/HIGH",{
for(i in levels(anc$period)){
p2 <-ggmap(quebec) + geom_point(data = anc[anc$period %in% i,] , aes(y=lat,x=lon, color=wfi, size = size),alpha = 0.8, na.rm = TRUE) +
scale_color_gradient2(name = "WFI",midpoint = 0.5,low =rgb(84, 39, 136, max = 255) , mid = rgb(247, 247, 247,max = 255), high = rgb(230, 97, 1, max = 255), limits = c(0,1))+ggtitle(i)+
theme(plot.title = element_text(size=15),legend.text = element_text(size=10),legend.title = element_text(size=10))+
scale_size_area(limits = c(1,60), max_size = 10,breaks =c(10,30,60),labels = c("<10","30","60"), guide = 'legend',name = "Ancestors number")
p2$labels$x = "Longitude"
p2$labels$y = "Latitude"
print(p2)
# print(paste(i,"_",k,".png", sep = ""))
#ggsave(paste(i,"_",k,".png", sep = ""),width = 8, height = 8)
#graphics.off()
}
})
}
#########################################
########################################
######################
|
#rm(list = ls()) #cleaning the environment from any previouse work, if you don't have important work going on uncomment
require(fImport)
#require(data.table)
#require(plyr)
################## PATHS #######################
subjec_train_path <- 'train/subject_train.txt'
subjec_test_path <- 'test/subject_test.txt'
train_set_path <- 'train/X_train.txt'
test_set_path <- 'test/X_test.txt'
train_labels_path <- 'train/y_train.txt'
test_labels_path <- 'test/y_test.txt'
activity_path <- 'activity_labels.txt'
features_path <- 'features.txt'
#######################################################################
features <- read.table(features_path)[,2] #getting the features
indeces <- grep('.*-mean[^F]()|.*-std()',features) #getting the features of interest ps: didn't include meanFrequency() because its a different mesurement from the mean
########### getting the feature names more readable ##################
features <- gsub('[()]',replacement = '',x = features)
features <- gsub('[-]',replacement = ' ',x = features)
features <- gsub('mean',replacement = 'Mean',x = features)
features <- gsub('std',replacement = 'STD',x = features)
features <- gsub(' ',replacement = '_',x = features)
#######################################################################
activities <- read.table(activity_path)
activities <- as.character(activities[,2])
get_subject <- function(path){ #getting subjects who performed the activity
return (read.table(path))
}
get_set <- function(path){ # getting the sets
print('this may take seviral minutes please wait')
set <- read.table(path)
names(set) <- features
set <- set[,indeces]
print('Done')
return (set)
}
get_labels <- function(path){ #getting labels
num_lbls <- read.lines(path)
num_lbls_fac <- factor(num_lbls) # converting the lable character vector to a facctor
#setattr(num_lbls_fac,'levels',activities)
levels(num_lbls_fac) <- activities # changing the laels levels to the coresponding activity
lables <- as.data.frame(num_lbls_fac)
return (lables)
}
################### collecting dataset variabls ###############################
train_subject <- get_subject(subjec_train_path)
names(train_subject) <- 'Subject_No'
train_set <- get_set(train_set_path)
train_labels <- get_labels(train_labels_path)
names(train_labels) <- 'activity'
test_subject <- get_subject(subjec_test_path)
names(test_subject) <- 'Subject_No'
test_set <- get_set(test_set_path)
test_labels <- get_labels(test_labels_path)
names(test_labels) <- 'activity'
##################### Collecting the dataset ###################################
train_dataset <- cbind(train_subject,train_labels,train_set)
test_dataset <- cbind(test_subject ,test_labels ,test_set )
data_set <- rbind(train_dataset,test_dataset)
##################### constructing the tidy_dataset #########################
getUniqe <- function(data,subject,actv){ # subsetting over the subject and the activity
return(data[data$Subject_No == subject & data$activity == actv,])
}
#the number of rows willbe the same as the number of subjects times the number of activities
num_rows <- length(unique(data_set$Subject_No))*length(activities)
#the number of columns will be the same as the number of columns in the dataset
num_cols <- dim(data_set)[2]
#constructing a metrix with the expectid dimention
m <- matrix(nrow = num_rows,ncol = num_cols)
tidy_dataset <- as.data.frame(m)
names(tidy_dataset) <- paste('Average_Of',names(data_set),sep='_')
n <- 1 # we'll start building the tidy dataset from row number 1
#looping over the subjects and activities and susetting over them
for(S in unique(data_set$Subject_No)){
for(A in activities){
tmp <- colMeans(getUniqe(data_set,S,A)[3:num_cols]) #getting the column means starting from the third column to the end
col1 <- S # the first column will be the subject
col2 <- A # the second column will be the activity
tidy_dataset[n,1] <- S # plugging the first column
tidy_dataset[n,2] <- A # plugging the second column
tidy_dataset[n,3:num_cols ] <- tmp # plugging the columns means
n = n+1 # the next row
}
}
dir.create('analyse_results')
write.table(data_set, "analyse_results/dataset.txt") # getting the data set file
write.table(tidy_dataset, "analyse_results/tidy_dataset.txt") # getting the tidy data set file
|
/run_analysis.R
|
no_license
|
islam3zzat/Human_Activity_Recognition
|
R
| false | false | 4,452 |
r
|
#rm(list = ls()) #cleaning the environment from any previouse work, if you don't have important work going on uncomment
require(fImport)
#require(data.table)
#require(plyr)
################## PATHS #######################
subjec_train_path <- 'train/subject_train.txt'
subjec_test_path <- 'test/subject_test.txt'
train_set_path <- 'train/X_train.txt'
test_set_path <- 'test/X_test.txt'
train_labels_path <- 'train/y_train.txt'
test_labels_path <- 'test/y_test.txt'
activity_path <- 'activity_labels.txt'
features_path <- 'features.txt'
#######################################################################
features <- read.table(features_path)[,2] #getting the features
indeces <- grep('.*-mean[^F]()|.*-std()',features) #getting the features of interest ps: didn't include meanFrequency() because its a different mesurement from the mean
########### getting the feature names more readable ##################
features <- gsub('[()]',replacement = '',x = features)
features <- gsub('[-]',replacement = ' ',x = features)
features <- gsub('mean',replacement = 'Mean',x = features)
features <- gsub('std',replacement = 'STD',x = features)
features <- gsub(' ',replacement = '_',x = features)
#######################################################################
activities <- read.table(activity_path)
activities <- as.character(activities[,2])
get_subject <- function(path){ #getting subjects who performed the activity
return (read.table(path))
}
get_set <- function(path){ # getting the sets
print('this may take seviral minutes please wait')
set <- read.table(path)
names(set) <- features
set <- set[,indeces]
print('Done')
return (set)
}
get_labels <- function(path){ #getting labels
num_lbls <- read.lines(path)
num_lbls_fac <- factor(num_lbls) # converting the lable character vector to a facctor
#setattr(num_lbls_fac,'levels',activities)
levels(num_lbls_fac) <- activities # changing the laels levels to the coresponding activity
lables <- as.data.frame(num_lbls_fac)
return (lables)
}
################### collecting dataset variabls ###############################
train_subject <- get_subject(subjec_train_path)
names(train_subject) <- 'Subject_No'
train_set <- get_set(train_set_path)
train_labels <- get_labels(train_labels_path)
names(train_labels) <- 'activity'
test_subject <- get_subject(subjec_test_path)
names(test_subject) <- 'Subject_No'
test_set <- get_set(test_set_path)
test_labels <- get_labels(test_labels_path)
names(test_labels) <- 'activity'
##################### Collecting the dataset ###################################
train_dataset <- cbind(train_subject,train_labels,train_set)
test_dataset <- cbind(test_subject ,test_labels ,test_set )
data_set <- rbind(train_dataset,test_dataset)
##################### constructing the tidy_dataset #########################
getUniqe <- function(data,subject,actv){ # subsetting over the subject and the activity
return(data[data$Subject_No == subject & data$activity == actv,])
}
#the number of rows willbe the same as the number of subjects times the number of activities
num_rows <- length(unique(data_set$Subject_No))*length(activities)
#the number of columns will be the same as the number of columns in the dataset
num_cols <- dim(data_set)[2]
#constructing a metrix with the expectid dimention
m <- matrix(nrow = num_rows,ncol = num_cols)
tidy_dataset <- as.data.frame(m)
names(tidy_dataset) <- paste('Average_Of',names(data_set),sep='_')
n <- 1 # we'll start building the tidy dataset from row number 1
#looping over the subjects and activities and susetting over them
for(S in unique(data_set$Subject_No)){
for(A in activities){
tmp <- colMeans(getUniqe(data_set,S,A)[3:num_cols]) #getting the column means starting from the third column to the end
col1 <- S # the first column will be the subject
col2 <- A # the second column will be the activity
tidy_dataset[n,1] <- S # plugging the first column
tidy_dataset[n,2] <- A # plugging the second column
tidy_dataset[n,3:num_cols ] <- tmp # plugging the columns means
n = n+1 # the next row
}
}
dir.create('analyse_results')
write.table(data_set, "analyse_results/dataset.txt") # getting the data set file
write.table(tidy_dataset, "analyse_results/tidy_dataset.txt") # getting the tidy data set file
|
# conditional statements
### IF
x = 3
if (x == 3){
print('x is equal to 3')
}
### else
if (x>3){
print('x is greater than 3')
}else{
print('x is not greater than 3')
}
### else if
if (x >3){
print('x is greater than 3')
}else if (x == 3){
print('x is equal to 3')
}else{
print('x is less than 3')
}
a = readline(prompt = 'Enter a value: ')
a = as.integer(a)
4|3
4 %% 3
number <- 2
#number <- as.integer(number)
if ((number%%2) == 0){
print('Given number is even')
}else{
print('Given number is odd')
}
x = 10
while (x > 0){
x = x-1
if (x == 3){
next
}
print(x)
}
v = c(1,2,3,4)
for (i in v){
print(i)
}
n = 3
n1 = (n%%2)+1
a <- switch(n1,'Even','Odd')
print(a)
### functions
def fun():
print('This is a function')
fun <- function(){
print('This is a function')
}
fun()
add <- function(){
x = 1
y = 2
print(x+y)
}
add()
## arguments
add <- function(x,y){
print(x+y)
}
add(2,3)
## return
add <- function(x,y){
return (x+y)
}
n <- add(4,5)
print(n)
sub <- function(x,y){
return (x-y)
}
multi <- function(x,y){
return (x*y)
}
n <- readline(prompt = '1.Add, 2.Sub, 3.Multi -> ')
n <- as.integer(n)
s <- switch(6,fun(),sub(4,3),multi(4,4))
s
fun <- function(){
print('This is a function')
print('This is another function')
}
v <- c(1,2,-3,4)
c = 0
for (i in v){
c = c+1
if (i < 0){
v[c] = 0
}else{
print(i)
}
}
v
v <- c(1,2,3,4)
v[4]
v[5] = 6
name = 'Eminent'
print(paste('Hi',name))
|
/2. Control Statements/cs.R
|
no_license
|
Hemanthkaruturi/R_tutorials
|
R
| false | false | 1,490 |
r
|
# conditional statements
### IF
x = 3
if (x == 3){
print('x is equal to 3')
}
### else
if (x>3){
print('x is greater than 3')
}else{
print('x is not greater than 3')
}
### else if
if (x >3){
print('x is greater than 3')
}else if (x == 3){
print('x is equal to 3')
}else{
print('x is less than 3')
}
a = readline(prompt = 'Enter a value: ')
a = as.integer(a)
4|3
4 %% 3
number <- 2
#number <- as.integer(number)
if ((number%%2) == 0){
print('Given number is even')
}else{
print('Given number is odd')
}
x = 10
while (x > 0){
x = x-1
if (x == 3){
next
}
print(x)
}
v = c(1,2,3,4)
for (i in v){
print(i)
}
n = 3
n1 = (n%%2)+1
a <- switch(n1,'Even','Odd')
print(a)
### functions
def fun():
print('This is a function')
fun <- function(){
print('This is a function')
}
fun()
add <- function(){
x = 1
y = 2
print(x+y)
}
add()
## arguments
add <- function(x,y){
print(x+y)
}
add(2,3)
## return
add <- function(x,y){
return (x+y)
}
n <- add(4,5)
print(n)
sub <- function(x,y){
return (x-y)
}
multi <- function(x,y){
return (x*y)
}
n <- readline(prompt = '1.Add, 2.Sub, 3.Multi -> ')
n <- as.integer(n)
s <- switch(6,fun(),sub(4,3),multi(4,4))
s
fun <- function(){
print('This is a function')
print('This is another function')
}
v <- c(1,2,-3,4)
c = 0
for (i in v){
c = c+1
if (i < 0){
v[c] = 0
}else{
print(i)
}
}
v
v <- c(1,2,3,4)
v[4]
v[5] = 6
name = 'Eminent'
print(paste('Hi',name))
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/alimap.R
\docType{package}
\name{alimap}
\alias{alimap}
\alias{alimap-package}
\title{alimap: Chinese Maps from Aliyun}
\description{
A package to supply Chinese maps (sf objects)
from aliyun (http://datav.aliyun.com/tools/atlas).
}
\section{How to get the maps }{
map_nation() : get the national map of China.
map_province() : get the provincial map of China.
map_prefecture_city() : get the map of prefecture cities of China.
}
|
/man/alimap.Rd
|
no_license
|
swcyo/alimap
|
R
| false | true | 512 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/alimap.R
\docType{package}
\name{alimap}
\alias{alimap}
\alias{alimap-package}
\title{alimap: Chinese Maps from Aliyun}
\description{
A package to supply Chinese maps (sf objects)
from aliyun (http://datav.aliyun.com/tools/atlas).
}
\section{How to get the maps }{
map_nation() : get the national map of China.
map_province() : get the provincial map of China.
map_prefecture_city() : get the map of prefecture cities of China.
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/SGV.R
\name{nimble_sparse_tcrossprod}
\alias{nimble_sparse_tcrossprod}
\title{nimble_sparse_tcrossprod}
\usage{
nimble_sparse_tcrossprod(i, j, x, subset)
}
\arguments{
\item{i}{Vector of row indices.}
\item{j}{Vector of column indices.}
\item{x}{Vector of values in the matrix.}
\item{subset}{Optional vector of rows to include in the calculation.}
}
\description{
nimble_sparse_tcrossprod
}
|
/man/nimble_sparse_tcrossprod.Rd
|
no_license
|
cran/BayesNSGP
|
R
| false | true | 473 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/SGV.R
\name{nimble_sparse_tcrossprod}
\alias{nimble_sparse_tcrossprod}
\title{nimble_sparse_tcrossprod}
\usage{
nimble_sparse_tcrossprod(i, j, x, subset)
}
\arguments{
\item{i}{Vector of row indices.}
\item{j}{Vector of column indices.}
\item{x}{Vector of values in the matrix.}
\item{subset}{Optional vector of rows to include in the calculation.}
}
\description{
nimble_sparse_tcrossprod
}
|
options(digits = 3)
library(tidyverse)
library(dslabs)
library(dplyr)
set.seed(28, sample.kind = "Rounding")
n<-1000
z<-qnorm(0.05)
mu <- (-1117250)
se <- 580994.333
l <- (-150000)
p <- 0.015
#Calculate the premium required for a 5% chance of losing money given n=1000 loans, probability of death p=0.015 , and loss per claim l=???150000 . Save this premium as x for use in further questions.
#x <- -l*( n*p - z*sqrt(n*p*(1-p)))/ ( n*(1-p) + z*sqrt(n*p*(1-p)))
#What is the expected profit per policy at this rate?
#expected_profit <- l*p + x*(1-p)
#expected_profit
#What is the expected profit over 1,000 policies?
#n*expected_profit
B <- 10000
pr_profit <- replicate(B, {
pr_losing <- sample( c(3268.063103, (-150000)), n,
prob=c(1-p, p), replace = TRUE)
sum(pr_losing)
})
mean(pr_profit) # expected value of the profit over n loans
mean(pr_profit<0)
|
/Probability/bigshort 5abc.R
|
no_license
|
asajini/data-viz-R
|
R
| false | false | 893 |
r
|
options(digits = 3)
library(tidyverse)
library(dslabs)
library(dplyr)
set.seed(28, sample.kind = "Rounding")
n<-1000
z<-qnorm(0.05)
mu <- (-1117250)
se <- 580994.333
l <- (-150000)
p <- 0.015
#Calculate the premium required for a 5% chance of losing money given n=1000 loans, probability of death p=0.015 , and loss per claim l=???150000 . Save this premium as x for use in further questions.
#x <- -l*( n*p - z*sqrt(n*p*(1-p)))/ ( n*(1-p) + z*sqrt(n*p*(1-p)))
#What is the expected profit per policy at this rate?
#expected_profit <- l*p + x*(1-p)
#expected_profit
#What is the expected profit over 1,000 policies?
#n*expected_profit
B <- 10000
pr_profit <- replicate(B, {
pr_losing <- sample( c(3268.063103, (-150000)), n,
prob=c(1-p, p), replace = TRUE)
sum(pr_losing)
})
mean(pr_profit) # expected value of the profit over n loans
mean(pr_profit<0)
|
# Simulate Godfrey Thomson's "sampling model" of mental abilities, and perform
# factor analysis on the resulting test scores.
# MASS package used to make random multivariate normal vectors
require(MASS)
# Simulate the Thomson model
# Follow Thomson's original sampling-without-replacement scheme
# Pick a random number in 1:a for the number of shared abilities for each test
# Then draw a sample-without-replacement of that size from 1:a; those are the
# shared abilities summed in that test.
# Specific variance of each test is also random; draw a number in 1:q, and
# sum that many independent normals, with the same parameters as the
# abilities.
# Inputs: number of testees, number of tests, number of shared abilities, number
# of specific abilities per test, mean of each ability, sd of each ability
# Calls: mvrnorm from library MASS (multivariate random normal generator)
# Outputs: matrix of test loadings on to general abilities, vector of number of
# specific abilities per test, matrix of abilities-by-testees, matrix of
# general+specific scores by testees, raw data (including measurement noise)
rthomson <- function(n,d,a,q,ability.mean=0,ability.sd=1) {
# Using incomprehensible parameter names is bad
# number of testees = n
# number of tests = d
# number of shared abilities = a
# max. number of specific abilities per test = q
# assign abilities to tests
general.per.test = floor(runif(d,min=0,max=a)) + 1
specifics.per.test = floor(runif(d,min=0,max=q)) + 1
# Define the matrix assigning abilities to tests
general.to.tests = matrix(0,a,d)
# The use of a for loop here is maybe a little slower than some sort
# of vectorization, but it's sanity-preserving; so is using the temporary
# variable "abilties" to hold the sample.
for (i in 1:d) {
abilities = sample(1:a,size=general.per.test[i],replace=FALSE)
general.to.tests[abilities,i] = 1
}
# Covariance matrix of the general abilities
sigma = matrix(0,a,a)
diag(sigma) = (ability.sd)^2
mu=rep(ability.mean,a)
x = mvrnorm(n,mu,sigma) # person-by-abilities matrix of abilities
# The "general" part of the tests
general.tests = x %*% general.to.tests
specific.tests = matrix(0,n,d)
noisy.tests = matrix(0,n,d)
# Each test gets its own specific abilities, which are independent for each
# person
# Again, I could probably vectorize, but d is small and this is saner
for (i in 1:d) {
# Each test has noises.per.test disturbances, each of which has the
# given sd; since these are all independent their variances add
j = specifics.per.test[i]
specifics = rnorm(n,mean=ability.mean*j,sd=ability.sd*sqrt(j))
specific.tests[,i] = general.tests[,i] + specifics
# Finally, for extra realism, some mean-zero trial-to-trial noise, so
# that if we re-use this combination of general and specific ability
# scores, we won't get the exact same test scores twice
noises = rnorm(n,mean=0,sd=ability.sd)
noisy.tests[,i] = specific.tests[,i] + noises
}
tm = list(data=noisy.tests,general.ability.pattern = general.to.tests,
numbers.of.specifics = specifics.per.test,
ability.matrix = x, specific.tests = specific.tests)
return(tm)
}
|
/Advanced_Data_Analysis_with_Elem_POV_Shalizi/fa/thomson-model.R
|
no_license
|
burakbayramli/books
|
R
| false | false | 3,370 |
r
|
# Simulate Godfrey Thomson's "sampling model" of mental abilities, and perform
# factor analysis on the resulting test scores.
# MASS package used to make random multivariate normal vectors
require(MASS)
# Simulate the Thomson model
# Follow Thomson's original sampling-without-replacement scheme
# Pick a random number in 1:a for the number of shared abilities for each test
# Then draw a sample-without-replacement of that size from 1:a; those are the
# shared abilities summed in that test.
# Specific variance of each test is also random; draw a number in 1:q, and
# sum that many independent normals, with the same parameters as the
# abilities.
# Inputs: number of testees, number of tests, number of shared abilities, number
# of specific abilities per test, mean of each ability, sd of each ability
# Calls: mvrnorm from library MASS (multivariate random normal generator)
# Outputs: matrix of test loadings on to general abilities, vector of number of
# specific abilities per test, matrix of abilities-by-testees, matrix of
# general+specific scores by testees, raw data (including measurement noise)
rthomson <- function(n,d,a,q,ability.mean=0,ability.sd=1) {
# Using incomprehensible parameter names is bad
# number of testees = n
# number of tests = d
# number of shared abilities = a
# max. number of specific abilities per test = q
# assign abilities to tests
general.per.test = floor(runif(d,min=0,max=a)) + 1
specifics.per.test = floor(runif(d,min=0,max=q)) + 1
# Define the matrix assigning abilities to tests
general.to.tests = matrix(0,a,d)
# The use of a for loop here is maybe a little slower than some sort
# of vectorization, but it's sanity-preserving; so is using the temporary
# variable "abilties" to hold the sample.
for (i in 1:d) {
abilities = sample(1:a,size=general.per.test[i],replace=FALSE)
general.to.tests[abilities,i] = 1
}
# Covariance matrix of the general abilities
sigma = matrix(0,a,a)
diag(sigma) = (ability.sd)^2
mu=rep(ability.mean,a)
x = mvrnorm(n,mu,sigma) # person-by-abilities matrix of abilities
# The "general" part of the tests
general.tests = x %*% general.to.tests
specific.tests = matrix(0,n,d)
noisy.tests = matrix(0,n,d)
# Each test gets its own specific abilities, which are independent for each
# person
# Again, I could probably vectorize, but d is small and this is saner
for (i in 1:d) {
# Each test has noises.per.test disturbances, each of which has the
# given sd; since these are all independent their variances add
j = specifics.per.test[i]
specifics = rnorm(n,mean=ability.mean*j,sd=ability.sd*sqrt(j))
specific.tests[,i] = general.tests[,i] + specifics
# Finally, for extra realism, some mean-zero trial-to-trial noise, so
# that if we re-use this combination of general and specific ability
# scores, we won't get the exact same test scores twice
noises = rnorm(n,mean=0,sd=ability.sd)
noisy.tests[,i] = specific.tests[,i] + noises
}
tm = list(data=noisy.tests,general.ability.pattern = general.to.tests,
numbers.of.specifics = specifics.per.test,
ability.matrix = x, specific.tests = specific.tests)
return(tm)
}
|
# Quantmode introduction
#install.packages("quantmod") #Install the quantmod library
library("quantmod") #Load the quantmod Library
stockData <- new.env() #Make a new environment for quantmod to store data in
tickers <- c("FAST","WFC") #Define the tickers we are interested in
#Download the stock history (for all tickers)
getSymbols(tickers, env = stockData, src = "yahoo")
StockPrices <- get("FAST", envir = stockData)
#Use head to show first six rows of matrix
head(StockPrices)
|
/QuantmodIntro.R
|
no_license
|
zhy031311/StockAnalysis
|
R
| false | false | 488 |
r
|
# Quantmode introduction
#install.packages("quantmod") #Install the quantmod library
library("quantmod") #Load the quantmod Library
stockData <- new.env() #Make a new environment for quantmod to store data in
tickers <- c("FAST","WFC") #Define the tickers we are interested in
#Download the stock history (for all tickers)
getSymbols(tickers, env = stockData, src = "yahoo")
StockPrices <- get("FAST", envir = stockData)
#Use head to show first six rows of matrix
head(StockPrices)
|
#' Get all raw metadata from SRA
#'
#' Get all raw metadata from SRA associated with SRA accessions numbers
#'
#' @param run_accession A vector with run accessions
#' @param sample_accession A vector with sample accessions
#' @param experiment_accession A vector with experiment accessions
#' @param study_accession A vector with study accessions
#' @param submission_accession A vector with submission accessions
#' @param platform Sequencing platform, e.g "ILLUMINA"
#' @param db_connn Connection to the sqlite database
#' @return A dataframe with all metadata associated with the filters provided to
#' the function.
#'
#' @export
#'
#' @examples
#' library('srametadata')
#'
#' sqlfile <- file.path('.', 'SRAmetadb.sqlite')
#'
#' # Create connection
#' conn <- dbConnect(SQLite(),sqlfile)
#'
#' get_raw_metadata(run_accession = c("DRR023427", "SRR946066", "ERR204978"),
#' experiment_accession = "SRX329581",
#' sample_accession = c("SRS465599","DRS014276"),
#' study_accession = "SRP028344",
#' submission_accession = "SRA096347",
#' platform = "ILLUMINA",
#' db_conn = conn)
#'
#' get_raw_metadata(experiment_accession = "SRX329581",
#' db_conn = conn)
#'
#' get_raw_metadata(
#' run_accession = c("DRR023427", "SRR946066", "ERR204978"),
#' db_conn = conn)
#'
#' get_raw_metadata(
#' sample_accession = c("SRS465599","DRS014276"),
#' db_conn = conn)
#'
#' all_illumina_sra <- get_raw_metadata(platform = "ILLUMINA", db_conn = conn)
get_raw_metadata <- function(run_accession = NULL,
sample_accession = NULL,
experiment_accession = NULL,
study_accession = NULL,
submission_accession = NULL,
platform = NULL,
db_conn){
md <- list(
run_accession = run_accession,
sample_accession = sample_accession,
experiment_accession = experiment_accession,
study_accession = study_accession,
submission_accession = submission_accession
) %>% lapply(str_trim)
labs <- names(md)
md <- lapply(seq_along(md), function(i){
if(length(md[[i]])){
query <- paste0("SELECT ", paste(names(md), collapse = ", ") ,
" FROM sra WHERE ", names(md)[[i]] ,
" IN ('", paste(md[[i]], collapse = "', '"), "');")
dbGetQuery(db_conn, query)
}
}
)
names(md) <- labs
ids <- lapply(md, length) %>% unlist() %>% sum()
if(length(platform) & ids){
platform <- str_trim(platform) %>% str_to_upper()
runacc <- Reduce(intersect,lapply(md,"[[",1))
query <- paste0("SELECT ", paste(labs, collapse = ", ") ," FROM sra WHERE
platform IN ('", paste(platform,
collapse = "', '"), "')
AND run_accession IN ('",
paste(runacc, collapse = "', '"), "');")
md$platform <- dbGetQuery(db_conn, query)
}else if(length(platform) & !ids){
platform <- str_trim(platform) %>% str_to_upper()
query <- paste0("SELECT * FROM sra WHERE
platform IN ('", paste(platform,
collapse = "', '"), "');")
md$platform <- dbGetQuery(db_conn, query)
return(md$platform)
}
runacc <- unique(Reduce(union,lapply(md,"[[",1)))
query <- paste0("SELECT * FROM sra WHERE
run_accession IN ('", paste(runacc,
collapse = "', '"), "');")
md <- dbGetQuery(db_conn, query)
if(nrow(md)){
return(md)
}else{
return(NA)
}
}
|
/R/get_raw_metadata.R
|
no_license
|
joseah/srametadata
|
R
| false | false | 3,943 |
r
|
#' Get all raw metadata from SRA
#'
#' Get all raw metadata from SRA associated with SRA accessions numbers
#'
#' @param run_accession A vector with run accessions
#' @param sample_accession A vector with sample accessions
#' @param experiment_accession A vector with experiment accessions
#' @param study_accession A vector with study accessions
#' @param submission_accession A vector with submission accessions
#' @param platform Sequencing platform, e.g "ILLUMINA"
#' @param db_connn Connection to the sqlite database
#' @return A dataframe with all metadata associated with the filters provided to
#' the function.
#'
#' @export
#'
#' @examples
#' library('srametadata')
#'
#' sqlfile <- file.path('.', 'SRAmetadb.sqlite')
#'
#' # Create connection
#' conn <- dbConnect(SQLite(),sqlfile)
#'
#' get_raw_metadata(run_accession = c("DRR023427", "SRR946066", "ERR204978"),
#' experiment_accession = "SRX329581",
#' sample_accession = c("SRS465599","DRS014276"),
#' study_accession = "SRP028344",
#' submission_accession = "SRA096347",
#' platform = "ILLUMINA",
#' db_conn = conn)
#'
#' get_raw_metadata(experiment_accession = "SRX329581",
#' db_conn = conn)
#'
#' get_raw_metadata(
#' run_accession = c("DRR023427", "SRR946066", "ERR204978"),
#' db_conn = conn)
#'
#' get_raw_metadata(
#' sample_accession = c("SRS465599","DRS014276"),
#' db_conn = conn)
#'
#' all_illumina_sra <- get_raw_metadata(platform = "ILLUMINA", db_conn = conn)
get_raw_metadata <- function(run_accession = NULL,
sample_accession = NULL,
experiment_accession = NULL,
study_accession = NULL,
submission_accession = NULL,
platform = NULL,
db_conn){
md <- list(
run_accession = run_accession,
sample_accession = sample_accession,
experiment_accession = experiment_accession,
study_accession = study_accession,
submission_accession = submission_accession
) %>% lapply(str_trim)
labs <- names(md)
md <- lapply(seq_along(md), function(i){
if(length(md[[i]])){
query <- paste0("SELECT ", paste(names(md), collapse = ", ") ,
" FROM sra WHERE ", names(md)[[i]] ,
" IN ('", paste(md[[i]], collapse = "', '"), "');")
dbGetQuery(db_conn, query)
}
}
)
names(md) <- labs
ids <- lapply(md, length) %>% unlist() %>% sum()
if(length(platform) & ids){
platform <- str_trim(platform) %>% str_to_upper()
runacc <- Reduce(intersect,lapply(md,"[[",1))
query <- paste0("SELECT ", paste(labs, collapse = ", ") ," FROM sra WHERE
platform IN ('", paste(platform,
collapse = "', '"), "')
AND run_accession IN ('",
paste(runacc, collapse = "', '"), "');")
md$platform <- dbGetQuery(db_conn, query)
}else if(length(platform) & !ids){
platform <- str_trim(platform) %>% str_to_upper()
query <- paste0("SELECT * FROM sra WHERE
platform IN ('", paste(platform,
collapse = "', '"), "');")
md$platform <- dbGetQuery(db_conn, query)
return(md$platform)
}
runacc <- unique(Reduce(union,lapply(md,"[[",1)))
query <- paste0("SELECT * FROM sra WHERE
run_accession IN ('", paste(runacc,
collapse = "', '"), "');")
md <- dbGetQuery(db_conn, query)
if(nrow(md)){
return(md)
}else{
return(NA)
}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/find_markers_fn.R
\name{find_markers}
\alias{find_markers}
\title{Find marker measurements for each group.}
\usage{
find_markers(Y, pure_samples, data_type = NULL, gamma = NULL,
method = "eta")
}
\arguments{
\item{Y}{the expression data set.}
\item{pure_samples}{List of pure samples for each group.}
\item{data_type}{The data type.}
\item{gamma}{The sensitivity tuning parameter if we want to over-ride choice by ``data-type``.}
\item{method}{Method to select markers.}
\item{data_type}{A string indicating the data type. Use to choose pre-estimated gamma value. Current support for probe-level microarray as ``microarray-probe'', gene-level microarray as ``microarray-gene'' or rna-seq as ``rna-seq''.}
}
\value{
List with two elements. ``L'' is respective ranked markers for each group and ``V'' is the corresponding values of the ranking method (lower are better).
}
\description{
Find marker measurements for each group.
}
|
/lib_deconv/man/find_markers.Rd
|
no_license
|
gjhunt/deconvolution
|
R
| false | true | 1,013 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/find_markers_fn.R
\name{find_markers}
\alias{find_markers}
\title{Find marker measurements for each group.}
\usage{
find_markers(Y, pure_samples, data_type = NULL, gamma = NULL,
method = "eta")
}
\arguments{
\item{Y}{the expression data set.}
\item{pure_samples}{List of pure samples for each group.}
\item{data_type}{The data type.}
\item{gamma}{The sensitivity tuning parameter if we want to over-ride choice by ``data-type``.}
\item{method}{Method to select markers.}
\item{data_type}{A string indicating the data type. Use to choose pre-estimated gamma value. Current support for probe-level microarray as ``microarray-probe'', gene-level microarray as ``microarray-gene'' or rna-seq as ``rna-seq''.}
}
\value{
List with two elements. ``L'' is respective ranked markers for each group and ``V'' is the corresponding values of the ranking method (lower are better).
}
\description{
Find marker measurements for each group.
}
|
## DATA VISUALIZATION IN R WORKSHOP
## Center for Research Data and Digital Scholarship
## University of Colorado, Boulder
## N. Chardon
## 11 Oct 2017
## Aims:
## 1. Consider what is sensible to plot
## 2. Practice making figures with base package
## 3. Practice making figures with lattice package
## 4. Practice making figures with ggplot2 package
## 5. Save figures in different formats
## Using this script:
## 1. The symbol '#' denotes comments.
## If in the same line as a command, these comments describe what the command executes.
## 2. All sections labeled 'PRACTICE' are meant to be completed on your own.
## 3. The corresponding voiceover to this script is available on OSF (https://osf.io/6jb9t/)
## Disclaimer: This script was written by Nathalie Chardon and reflects her coding style.
## The University of Colorado, Boulder takes no responsibility for the content.
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# # WELCOME! # #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# Introduce yourself to your neighbor(s)
# What kinds of graphics are used in your field and created by your colleagues?
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# # GETTING STARTED # #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# # INSTALL & LOAD PACKAGES # # ---------------------------------------------------------------------
# lattice package
install.packages('lattice') #only run this once to store package to hard drive
library(lattice)
# ggplot2 package
install.packages('ggplot2') #only run this once to store package to hard drive
library(ggplot2)
# # LOAD DATA # # ----------------------------------------------------------------------------------
data(mtcars)
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# # 1. WHAT TO PLOT # #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# # DATA # # ---------------------------------------------------------------------------------------
# What is in your data?
head(mtcars) #displays first 6 rows of dataframe
?mtcars #documentation for this data, useful to create for own data as separate text file
summary(mtcars) #statistical summary of each column
# How is your data organized?
colnames(mtcars) #column names
str(mtcars) #structure: make sure your numeric variables are numeric!
dim(mtcars) #rows, columns
# # QUESTIONS # # -----------------------------------------------------------------------------------
# What are some questions you could answer with this data?
#
#
# EX1: Does cylinder (cyl) number affect miles/gallen (mpg)?
# EX2: Does horsepower (hp) affect 1/4 mile time (qsec)?
# Think about sensible relationships between your data before plotting.
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# # 2. BASE # #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# # DISTRIBUTION OF DATA: HISTOGRAMS # # ------------------------------------------------------------
hist(mtcars$mpg) #create histogram
hist(mtcars$mpg, breaks=20) #define splits
# Create histograms of horsepower and 1/4 mile time with different splits
#
#
# Plot aesthetics
hist(mtcars$mpg,
main = 'Histogram of miles/gallon', #title (note the quotation marks)
xlab = 'Miles/gallon', #x axis label
col = 'lightblue') #color fill
# NOTE: These aesthetics commands are the same in other plot types.
# Each subsequent command MUST follow a comma.
# Change the plot aesthetics of your horsepower or 1/4 mile time histograms
#
#
# # DATA RELATIONSHIPS: SCATTERPLOTS # # -----------------------------------------------------------
plot(qsec ~ hp, data = mtcars) #dependent variable ~ independent variable, data = your DF
# Adding regression line to plot
lin_mod <- lm(qsec ~ hp, data = mtcars) #linear model for relationship that looks promising
summary(lin_mod) #check model output for significance, r-squared, etc.
abline(lin_mod) #adds regression line to current plot (usually only sensible if p-value < 0.05)
# Aesthetics: plot point color, symbol, and size
plot(qsec ~ hp, data = mtcars,
col = 'violet', #changes color, default = 'black'
pch = 18, #changes symbol, default = 1
cex = 1.5) #changes point size, default = 1
# Aesthetics: regression line color, type, and width
abline(lin_mod, #adds regression line to current plot
col = 'blue', #changes color, default = 'black'
lty = 4, #changes line type, default = 1
lwd = 1.5) #changes line width, default = 1
# Create a scatterplot with one of your relationships of interest
#
#
# Add title, x- and y-axis labels, and change the color, symbol, and size of your points
#
#
# Add a regression line to your plot
#
#
# # DATA RELATIONSHIPS: BOXPLOTS # # ---------------------------------------------------------------
# Boxplots: better with discrete or categorical measurements in independent variables
boxplot(mpg ~ cyl, data = mtcars)
# # SETTING DEFAULT GRAPHICAL PARAMTERS # # --------------------------------------------------------
# NOTE: These settings can also be specified within a plotting function to affect only that plot.
# Aesthetics
par(cex = 2, #changes size of plotting text and symbols
cex.axis = 1.2, #changes axis text size
cex.lab = 1.4, #changes label text size
cex.main = 1.3, #changes text title size
col = 'violet', #changes plotting color
col.axis = 'grey', #changes axes colors
col.lab = 'darkblue', #changes x and y label colors
col.main = 'darkblue', #changes title color
bg = 'grey') #changes background color
# NOTE: The default axis and label size in R is too small for publication figures.
# Run one of your plots above and notice how it has changed according to the par settings
# Multi-panel plots
par(mfrow = c(2, 2), #rows, columns for your panels
mar = c(4.5, 5, 2, 0.5)) #defines width for lower, left, upper, and right margins
# NOTE: Multi-panel plot with default margin settings will have too much white space.
# Run 4 of your plots above and notice how they'll create a panel
# Return to default graphical parameters
dev.off() #turns off device and clears plots
# # PRACTICE 1 # # ---------------------------------------------------------------------------------
# Set the default parameters to create a 2 row panel plot.
# HINT: This plot will have the rows, columns = 2,1.
#
#
# Create a scatterplot and boxplot for two different data relationships
#
#
# Add titles and axes labels, and use the aesthetic commands above to change other plot aspects.
# Go to <http://www.statmethods.net/advgraphs/parameters.html> for list of all par options
#
#
# TIP 1: Modify and use the function mtext() to add labels to your panels
mtext('A', 3, adj=0, cex=1.5) #run ?mtext for further arguments to this function
# TIP 2: Plotting multiple boxplots for the same variable
boxplot(mpg ~ cyl, data = mtcars[which(mtcars$am == 0),], #data subsetted by transmission type
main = 'Effect of cylinder number and transmission type on MGP', #title
xlab = 'Number of cylinders', #x-axis label
ylab = 'Miles per gallon', #y-axis label
ylim = range(mtcars$mpg), #set y-axis limits with range of data for adequate plotting space
boxwex = 0.1) #set box width
boxplot(mpg ~ cyl, data = mtcars[which(mtcars$am == 1),], #data subsetted by transmission type
add = T, #adds to current boxplot
col = 'grey', #specify different color for new gear number
boxwex = .1, #same box width as above
at = c(1.2, 2.2, 3.2), #draw boxes at specified 1:n
axes = F) #don't draw new axes
# TIP 3: Adding legends
legend('topright', #location of legend
legend = c('Automatic Transmission', 'Manual Transmission'), #legend text
pch = c(0, 15), #symbols to match text above
col = c('black', 'grey')) #colors to match symbols and text above
# NOTE: The order of legend text, symbol, and color must match!
# # PLOT ACTIONS # # -------------------------------------------------------------------------------
# Zoom plot to separate window
# Export plot manually
# Export plot with commands
setwd('~/Desktop/') #set working directory to folder of your choice
setwd('c:/Users/.../Desktop/') #Windows: replace ... with your username
pdf('fig1.pdf') #function AND file extension indicates in what format to save plot in
boxplot(mpg ~ cyl, data = mtcars) #create plot
dev.off() #indicates that you are done plotting
# Other formats: replace 'pdf' line of code above with one of the following...
png('fig1.png') #saves plot as PNG
jpeg('fig1.png') #saves plot as JPEG
postscript('fig1.eps') #saves pot as EPS
# NOTE: PDF and EPS are vectorized, whereas PNG and JPEG are rasterized.
# Vectorized images: created out of many paths, so quality is higher
# Raster images: created out of pixels, so quality is lower
# Save one of your plots with commands
#
#
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# # LATTICE # #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# # ONE VARIABLE PLOTS # # -------------------------------------------------------------------------
densityplot(~ mpg, data = mtcars) #displays density of data points
# # TWO VARIABLE PLOTS # # -------------------------------------------------------------------------
dotplot(rownames(mtcars) ~ mpg, data = mtcars) #dotplot to visualize all observations
# NOTE: Need to give your data rows names for the above to work, but can use any y variable here.
# # THREE+ VARIABLE PLOTS # # ----------------------------------------------------------------------
# All relationships
splom(mtcars) #scatterplot matrix
# Trellis graphs: display relationships between variables conditioned on one or more variables
xyplot(mpg ~ cyl | am, data = mtcars) #plots relationship x ~ y | based on another variable
# Plot effect of transmission type on MPG, split by number of cylinders
#
#
xyplot(mpg ~ cyl | am, groups = carb, data = mtcars) #additionally split by groups
# Plot effect of cylinder on MPG, split by carburetor and transmission in different colors
#
#
# # PLOT AESTHETICS # # ----------------------------------------------------------------------------
# Modified with update()
trell_plot <- xyplot(mpg ~ cyl | am, groups = carb, data = mtcars) #create plot object
# Run the following and notice how your plot changes
update(trell_plot,
main = 'Effect of cylinder and carburetors on MPG by transmission type', #add title
ylab = 'Cylinders', xlab = 'Miles per Gallon',
auto.key = T) #add legend
update(trell_plot,
auto.key = list(space = 'right')) #add legend right of plot
update(trell_plot,
auto.key = list(title = 'Carburetors', #add legend title
cex = 0.5)) #change legend size
# Choose any one of your plots and:
# 1. Add a title, x and y axes labels, and a legend;
# 2. Then save it as a PDF or JPEG
#
#
#
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# # GGPLOT2 # #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# # Grammar of Graphics: build graphs from a few necessary components # # --------------------------
# 1. data set (data)
# 2. aesthetics that represent data (aes)
# 3. visual marks to represent data points (geoms)
# (4. coordinate system)
# NOTE: ggplot() does not have defaults, so everything must be specified somewhere.
# 1. Create a plot with DATA and 2. AES
myplot <- ggplot(data = mtcars, aes(x = hp, y = mpg)) #saving as object facilitates adding layers
# Look at myplot: what do you notice?
# 3. Add GEOM layer
myplot + #plus symbol at end of line for new layer below
geom_point() #plot data as points
# Create a ggplot to illustrate one of your relationships of interest
#
#
# # PLOT AESTHETICS: DATA POINTS # # ---------------------------------------------------------------
# Color of data points
myplot +
geom_point(color = 'blue', #adds color to points
size = 4) #increases size of points
# Transparency to avoid overplotting
myplot +
geom_point(alpha = 0.5) #adds transparency to points
# Conditioning on a third variable
myplot +
geom_point( aes(color = cyl) ) #color by another variable with aes()
myplot +
geom_point( aes(size = cyl) ) #size of points by another variable
# Enlarge the points of myplot and condition the data points on cylinder by color to make myplot2
# HINT: all arguments go inside geom_point()
#
#
# # PLOT AESTHETICS: LABELS # # --------------------------------------------------------------------
myplot2 +
ggtitle('Effect of horsepower on MPG by cylinder') +
xlab('Horsepower') +
ylab('Miles per Gallon')
# OR indicate all labels at once:
myplot2 +
labs(title = 'Effect of horsepower on MPG by cylinder', #title
x = 'Horsepower', y = 'Miles per Gallon') #x and y axes labels
# Create an object myplot3 with the above plot
#
#
# # PLOT AESTHETICS: LEGEND # # --------------------------------------------------------------------
# Remove legend
myplot3 +
theme(legend.position = 'none') #no legends
# Edit legend
myplot3 +
theme(legend.position = 'left') + #can be 'bottom', 'top', 'left' or 'right'
labs(color = 'Cylinder') #change legend title
# # PRACTICE 2 # # ---------------------------------------------------------------------------------
# Change legend colors to discrete: need to change variable to factor (numeric gives gradient)
ggplot(data = mtcars, aes(x = hp, y = mpg,
group = factor(cyl), #change variable to factor
colour = factor(cyl))) + #specify color with grouping variable used above
geom_point() #no need to specify anything here
# # PLOT THEMES # # --------------------------------------------------------------------------------
# Default themes
myplot3 +
theme_classic()
myplot3 +
theme_bw()
# Customize your own theme
mytheme <- theme(axis.text.x = element_text(colour = 'black', size = 25), #x axis text size
axis.text.y = element_text(colour = 'black', size = 25), #y axis text size
axis.title.x = element_text(size = 28), #x axis label size
axis.title.y = element_text(size = 28), #x axis label size
plot.title = element_text(size = 30, #title size
hjust = 0.5)) #align title to center
myplot3 + mytheme #plot now has specified theme
# Create your own theme
#
#
# Create an object myplot4 with myplot3 and your new theme
#
#
# # PRACTICE # # -----------------------------------------------------------------------------------
# Set a plot theme for all plots
theme_set(theme_classic()) #sets theme for future plots
myplot3 #this now plots with set theme
# NOTE: Once theme is set, there is no neat way to return to ggplot default graphical parameters
# # SAVE PLOT # # ----------------------------------------------------------------------------------
ggsave(myplot4, #plot to be saved
file='hpVmpg.pdf') #can save in many other image formats (e.g. .eps, .jpeg, .png)
# Save one of your plots to the Desktop
#
#
#NOTE: Set your working directory again if changed
setwd('~/Desktop/') #set working directory to folder of your choice
setwd('c:/Users/.../Desktop/') #Windows: replace ... with your username
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# # RESOURCES # #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# # CU BOULDER # #
# Center for Research Data and Digital Scholarship
# Hacky Hours: Wednesdays 4-6PM (Norlin E206)
# Events: < http://www.colorado.edu/crdds/events >
# Listserv: < https://lists.colorado.edu/sympa/subscribe/crdds-news >
# Workshop voiceovers and scripts: < https://osf.io/6jb9t/ >
# Laboratory for Interdisciplinary Statistical Analysis
# <http://www.colorado.edu/lab/lisa/resources>
# # ONLINE SUPPORT # #
# Parts of this workshop reference Quick-R, Statmethod, and Data Carpentry:
# <http://www.statmethods.net/graphs/index.html>
# <http://www.statmethods.net/advgraphs/trellis.html>
# <http://www.datacarpentry.org/R-ecology-lesson/04-visualization-ggplot2.html>
# Graphical Parameters cheatsheet
# <http://www.statmethods.net/advgraphs/parameters.html>
# Data Visualization with ggplot2 Cheat Sheet:
# <https://www.rstudio.com/wp-content/uploads/2015/03/ggplot2-cheatsheet.pdf>
# Other Cheat Sheets:
# <https://www.rstudio.com/resources/cheatsheets/>
# Data Carpentry
# <http://www.datacarpentry.org/R-ecology-lesson/04-visualization-ggplot2.html>
# R manuals by CRAN
# <https://cran.r-project.org/manuals.html>
# Basic Reference Card
# <https://cran.r-project.org/doc/contrib/Short-refcard.pdf>
# R for Beginners (Emmanuel Paradis)
# <https://cran.r-project.org/doc/contrib/Paradis-rdebuts_en.pdf>
# The R Guide (W. J. Owen)
# <https://cran.r-project.org/doc/contrib/Owen-TheRGuide.pdf>
# An Introduction to R (Longhow Lam)
# <https://cran.r-project.org/doc/contrib/Lam-IntroductionToR_LHL.pdf>
# Cookbook for R
# <http://www.cookbook-r.com/>
# Advanced R (Hadley Wickham)
# <http://adv-r.had.co.nz/>
# rseek: search most online R documentation and discussion forums
# <http://rseek.org/>
# The R Inferno: useful for trouble shooting errors
# <http://www.burns-stat.com/documents/books/the-r-inferno/>
# Google: endless blogs, posted Q & A, tutorials, references guides where you're often
# directed to sites such as Stackoverflow, Crossvalidated, and the R-help mailing list.
# # ONLINE TUTORIALS # #
# YouTube R channel
# <https://www.youtube.com/user/TheLearnR>
# R Programming in Coursera
# <https://www.coursera.org/learn/r-programming>
# Various R videos
# <http://jeromyanglim.blogspot.co.uk/2010/05/videos-on-data-analysis-with-r.html>
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
|
/data_vis_R.R
|
no_license
|
mstefferson/Courses_IntroR
|
R
| false | false | 19,158 |
r
|
## DATA VISUALIZATION IN R WORKSHOP
## Center for Research Data and Digital Scholarship
## University of Colorado, Boulder
## N. Chardon
## 11 Oct 2017
## Aims:
## 1. Consider what is sensible to plot
## 2. Practice making figures with base package
## 3. Practice making figures with lattice package
## 4. Practice making figures with ggplot2 package
## 5. Save figures in different formats
## Using this script:
## 1. The symbol '#' denotes comments.
## If in the same line as a command, these comments describe what the command executes.
## 2. All sections labeled 'PRACTICE' are meant to be completed on your own.
## 3. The corresponding voiceover to this script is available on OSF (https://osf.io/6jb9t/)
## Disclaimer: This script was written by Nathalie Chardon and reflects her coding style.
## The University of Colorado, Boulder takes no responsibility for the content.
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# # WELCOME! # #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# Introduce yourself to your neighbor(s)
# What kinds of graphics are used in your field and created by your colleagues?
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# # GETTING STARTED # #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# # INSTALL & LOAD PACKAGES # # ---------------------------------------------------------------------
# lattice package
install.packages('lattice') #only run this once to store package to hard drive
library(lattice)
# ggplot2 package
install.packages('ggplot2') #only run this once to store package to hard drive
library(ggplot2)
# # LOAD DATA # # ----------------------------------------------------------------------------------
data(mtcars)
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# # 1. WHAT TO PLOT # #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# # DATA # # ---------------------------------------------------------------------------------------
# What is in your data?
head(mtcars) #displays first 6 rows of dataframe
?mtcars #documentation for this data, useful to create for own data as separate text file
summary(mtcars) #statistical summary of each column
# How is your data organized?
colnames(mtcars) #column names
str(mtcars) #structure: make sure your numeric variables are numeric!
dim(mtcars) #rows, columns
# # QUESTIONS # # -----------------------------------------------------------------------------------
# What are some questions you could answer with this data?
#
#
# EX1: Does cylinder (cyl) number affect miles/gallen (mpg)?
# EX2: Does horsepower (hp) affect 1/4 mile time (qsec)?
# Think about sensible relationships between your data before plotting.
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# # 2. BASE # #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# # DISTRIBUTION OF DATA: HISTOGRAMS # # ------------------------------------------------------------
hist(mtcars$mpg) #create histogram
hist(mtcars$mpg, breaks=20) #define splits
# Create histograms of horsepower and 1/4 mile time with different splits
#
#
# Plot aesthetics
hist(mtcars$mpg,
main = 'Histogram of miles/gallon', #title (note the quotation marks)
xlab = 'Miles/gallon', #x axis label
col = 'lightblue') #color fill
# NOTE: These aesthetics commands are the same in other plot types.
# Each subsequent command MUST follow a comma.
# Change the plot aesthetics of your horsepower or 1/4 mile time histograms
#
#
# # DATA RELATIONSHIPS: SCATTERPLOTS # # -----------------------------------------------------------
plot(qsec ~ hp, data = mtcars) #dependent variable ~ independent variable, data = your DF
# Adding regression line to plot
lin_mod <- lm(qsec ~ hp, data = mtcars) #linear model for relationship that looks promising
summary(lin_mod) #check model output for significance, r-squared, etc.
abline(lin_mod) #adds regression line to current plot (usually only sensible if p-value < 0.05)
# Aesthetics: plot point color, symbol, and size
plot(qsec ~ hp, data = mtcars,
col = 'violet', #changes color, default = 'black'
pch = 18, #changes symbol, default = 1
cex = 1.5) #changes point size, default = 1
# Aesthetics: regression line color, type, and width
abline(lin_mod, #adds regression line to current plot
col = 'blue', #changes color, default = 'black'
lty = 4, #changes line type, default = 1
lwd = 1.5) #changes line width, default = 1
# Create a scatterplot with one of your relationships of interest
#
#
# Add title, x- and y-axis labels, and change the color, symbol, and size of your points
#
#
# Add a regression line to your plot
#
#
# # DATA RELATIONSHIPS: BOXPLOTS # # ---------------------------------------------------------------
# Boxplots: better with discrete or categorical measurements in independent variables
boxplot(mpg ~ cyl, data = mtcars)
# # SETTING DEFAULT GRAPHICAL PARAMTERS # # --------------------------------------------------------
# NOTE: These settings can also be specified within a plotting function to affect only that plot.
# Aesthetics
par(cex = 2, #changes size of plotting text and symbols
cex.axis = 1.2, #changes axis text size
cex.lab = 1.4, #changes label text size
cex.main = 1.3, #changes text title size
col = 'violet', #changes plotting color
col.axis = 'grey', #changes axes colors
col.lab = 'darkblue', #changes x and y label colors
col.main = 'darkblue', #changes title color
bg = 'grey') #changes background color
# NOTE: The default axis and label size in R is too small for publication figures.
# Run one of your plots above and notice how it has changed according to the par settings
# Multi-panel plots
par(mfrow = c(2, 2), #rows, columns for your panels
mar = c(4.5, 5, 2, 0.5)) #defines width for lower, left, upper, and right margins
# NOTE: Multi-panel plot with default margin settings will have too much white space.
# Run 4 of your plots above and notice how they'll create a panel
# Return to default graphical parameters
dev.off() #turns off device and clears plots
# # PRACTICE 1 # # ---------------------------------------------------------------------------------
# Set the default parameters to create a 2 row panel plot.
# HINT: This plot will have the rows, columns = 2,1.
#
#
# Create a scatterplot and boxplot for two different data relationships
#
#
# Add titles and axes labels, and use the aesthetic commands above to change other plot aspects.
# Go to <http://www.statmethods.net/advgraphs/parameters.html> for list of all par options
#
#
# TIP 1: Modify and use the function mtext() to add labels to your panels
mtext('A', 3, adj=0, cex=1.5) #run ?mtext for further arguments to this function
# TIP 2: Plotting multiple boxplots for the same variable
boxplot(mpg ~ cyl, data = mtcars[which(mtcars$am == 0),], #data subsetted by transmission type
main = 'Effect of cylinder number and transmission type on MGP', #title
xlab = 'Number of cylinders', #x-axis label
ylab = 'Miles per gallon', #y-axis label
ylim = range(mtcars$mpg), #set y-axis limits with range of data for adequate plotting space
boxwex = 0.1) #set box width
boxplot(mpg ~ cyl, data = mtcars[which(mtcars$am == 1),], #data subsetted by transmission type
add = T, #adds to current boxplot
col = 'grey', #specify different color for new gear number
boxwex = .1, #same box width as above
at = c(1.2, 2.2, 3.2), #draw boxes at specified 1:n
axes = F) #don't draw new axes
# TIP 3: Adding legends
legend('topright', #location of legend
legend = c('Automatic Transmission', 'Manual Transmission'), #legend text
pch = c(0, 15), #symbols to match text above
col = c('black', 'grey')) #colors to match symbols and text above
# NOTE: The order of legend text, symbol, and color must match!
# # PLOT ACTIONS # # -------------------------------------------------------------------------------
# Zoom plot to separate window
# Export plot manually
# Export plot with commands
setwd('~/Desktop/') #set working directory to folder of your choice
setwd('c:/Users/.../Desktop/') #Windows: replace ... with your username
pdf('fig1.pdf') #function AND file extension indicates in what format to save plot in
boxplot(mpg ~ cyl, data = mtcars) #create plot
dev.off() #indicates that you are done plotting
# Other formats: replace 'pdf' line of code above with one of the following...
png('fig1.png') #saves plot as PNG
jpeg('fig1.png') #saves plot as JPEG
postscript('fig1.eps') #saves pot as EPS
# NOTE: PDF and EPS are vectorized, whereas PNG and JPEG are rasterized.
# Vectorized images: created out of many paths, so quality is higher
# Raster images: created out of pixels, so quality is lower
# Save one of your plots with commands
#
#
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# # LATTICE # #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# # ONE VARIABLE PLOTS # # -------------------------------------------------------------------------
densityplot(~ mpg, data = mtcars) #displays density of data points
# # TWO VARIABLE PLOTS # # -------------------------------------------------------------------------
dotplot(rownames(mtcars) ~ mpg, data = mtcars) #dotplot to visualize all observations
# NOTE: Need to give your data rows names for the above to work, but can use any y variable here.
# # THREE+ VARIABLE PLOTS # # ----------------------------------------------------------------------
# All relationships
splom(mtcars) #scatterplot matrix
# Trellis graphs: display relationships between variables conditioned on one or more variables
xyplot(mpg ~ cyl | am, data = mtcars) #plots relationship x ~ y | based on another variable
# Plot effect of transmission type on MPG, split by number of cylinders
#
#
xyplot(mpg ~ cyl | am, groups = carb, data = mtcars) #additionally split by groups
# Plot effect of cylinder on MPG, split by carburetor and transmission in different colors
#
#
# # PLOT AESTHETICS # # ----------------------------------------------------------------------------
# Modified with update()
trell_plot <- xyplot(mpg ~ cyl | am, groups = carb, data = mtcars) #create plot object
# Run the following and notice how your plot changes
update(trell_plot,
main = 'Effect of cylinder and carburetors on MPG by transmission type', #add title
ylab = 'Cylinders', xlab = 'Miles per Gallon',
auto.key = T) #add legend
update(trell_plot,
auto.key = list(space = 'right')) #add legend right of plot
update(trell_plot,
auto.key = list(title = 'Carburetors', #add legend title
cex = 0.5)) #change legend size
# Choose any one of your plots and:
# 1. Add a title, x and y axes labels, and a legend;
# 2. Then save it as a PDF or JPEG
#
#
#
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# # GGPLOT2 # #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# # Grammar of Graphics: build graphs from a few necessary components # # --------------------------
# 1. data set (data)
# 2. aesthetics that represent data (aes)
# 3. visual marks to represent data points (geoms)
# (4. coordinate system)
# NOTE: ggplot() does not have defaults, so everything must be specified somewhere.
# 1. Create a plot with DATA and 2. AES
myplot <- ggplot(data = mtcars, aes(x = hp, y = mpg)) #saving as object facilitates adding layers
# Look at myplot: what do you notice?
# 3. Add GEOM layer
myplot + #plus symbol at end of line for new layer below
geom_point() #plot data as points
# Create a ggplot to illustrate one of your relationships of interest
#
#
# # PLOT AESTHETICS: DATA POINTS # # ---------------------------------------------------------------
# Color of data points
myplot +
geom_point(color = 'blue', #adds color to points
size = 4) #increases size of points
# Transparency to avoid overplotting
myplot +
geom_point(alpha = 0.5) #adds transparency to points
# Conditioning on a third variable
myplot +
geom_point( aes(color = cyl) ) #color by another variable with aes()
myplot +
geom_point( aes(size = cyl) ) #size of points by another variable
# Enlarge the points of myplot and condition the data points on cylinder by color to make myplot2
# HINT: all arguments go inside geom_point()
#
#
# # PLOT AESTHETICS: LABELS # # --------------------------------------------------------------------
myplot2 +
ggtitle('Effect of horsepower on MPG by cylinder') +
xlab('Horsepower') +
ylab('Miles per Gallon')
# OR indicate all labels at once:
myplot2 +
labs(title = 'Effect of horsepower on MPG by cylinder', #title
x = 'Horsepower', y = 'Miles per Gallon') #x and y axes labels
# Create an object myplot3 with the above plot
#
#
# # PLOT AESTHETICS: LEGEND # # --------------------------------------------------------------------
# Remove legend
myplot3 +
theme(legend.position = 'none') #no legends
# Edit legend
myplot3 +
theme(legend.position = 'left') + #can be 'bottom', 'top', 'left' or 'right'
labs(color = 'Cylinder') #change legend title
# # PRACTICE 2 # # ---------------------------------------------------------------------------------
# Change legend colors to discrete: need to change variable to factor (numeric gives gradient)
ggplot(data = mtcars, aes(x = hp, y = mpg,
group = factor(cyl), #change variable to factor
colour = factor(cyl))) + #specify color with grouping variable used above
geom_point() #no need to specify anything here
# # PLOT THEMES # # --------------------------------------------------------------------------------
# Default themes
myplot3 +
theme_classic()
myplot3 +
theme_bw()
# Customize your own theme
mytheme <- theme(axis.text.x = element_text(colour = 'black', size = 25), #x axis text size
axis.text.y = element_text(colour = 'black', size = 25), #y axis text size
axis.title.x = element_text(size = 28), #x axis label size
axis.title.y = element_text(size = 28), #x axis label size
plot.title = element_text(size = 30, #title size
hjust = 0.5)) #align title to center
myplot3 + mytheme #plot now has specified theme
# Create your own theme
#
#
# Create an object myplot4 with myplot3 and your new theme
#
#
# # PRACTICE # # -----------------------------------------------------------------------------------
# Set a plot theme for all plots
theme_set(theme_classic()) #sets theme for future plots
myplot3 #this now plots with set theme
# NOTE: Once theme is set, there is no neat way to return to ggplot default graphical parameters
# # SAVE PLOT # # ----------------------------------------------------------------------------------
ggsave(myplot4, #plot to be saved
file='hpVmpg.pdf') #can save in many other image formats (e.g. .eps, .jpeg, .png)
# Save one of your plots to the Desktop
#
#
#NOTE: Set your working directory again if changed
setwd('~/Desktop/') #set working directory to folder of your choice
setwd('c:/Users/.../Desktop/') #Windows: replace ... with your username
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# # RESOURCES # #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# # CU BOULDER # #
# Center for Research Data and Digital Scholarship
# Hacky Hours: Wednesdays 4-6PM (Norlin E206)
# Events: < http://www.colorado.edu/crdds/events >
# Listserv: < https://lists.colorado.edu/sympa/subscribe/crdds-news >
# Workshop voiceovers and scripts: < https://osf.io/6jb9t/ >
# Laboratory for Interdisciplinary Statistical Analysis
# <http://www.colorado.edu/lab/lisa/resources>
# # ONLINE SUPPORT # #
# Parts of this workshop reference Quick-R, Statmethod, and Data Carpentry:
# <http://www.statmethods.net/graphs/index.html>
# <http://www.statmethods.net/advgraphs/trellis.html>
# <http://www.datacarpentry.org/R-ecology-lesson/04-visualization-ggplot2.html>
# Graphical Parameters cheatsheet
# <http://www.statmethods.net/advgraphs/parameters.html>
# Data Visualization with ggplot2 Cheat Sheet:
# <https://www.rstudio.com/wp-content/uploads/2015/03/ggplot2-cheatsheet.pdf>
# Other Cheat Sheets:
# <https://www.rstudio.com/resources/cheatsheets/>
# Data Carpentry
# <http://www.datacarpentry.org/R-ecology-lesson/04-visualization-ggplot2.html>
# R manuals by CRAN
# <https://cran.r-project.org/manuals.html>
# Basic Reference Card
# <https://cran.r-project.org/doc/contrib/Short-refcard.pdf>
# R for Beginners (Emmanuel Paradis)
# <https://cran.r-project.org/doc/contrib/Paradis-rdebuts_en.pdf>
# The R Guide (W. J. Owen)
# <https://cran.r-project.org/doc/contrib/Owen-TheRGuide.pdf>
# An Introduction to R (Longhow Lam)
# <https://cran.r-project.org/doc/contrib/Lam-IntroductionToR_LHL.pdf>
# Cookbook for R
# <http://www.cookbook-r.com/>
# Advanced R (Hadley Wickham)
# <http://adv-r.had.co.nz/>
# rseek: search most online R documentation and discussion forums
# <http://rseek.org/>
# The R Inferno: useful for trouble shooting errors
# <http://www.burns-stat.com/documents/books/the-r-inferno/>
# Google: endless blogs, posted Q & A, tutorials, references guides where you're often
# directed to sites such as Stackoverflow, Crossvalidated, and the R-help mailing list.
# # ONLINE TUTORIALS # #
# YouTube R channel
# <https://www.youtube.com/user/TheLearnR>
# R Programming in Coursera
# <https://www.coursera.org/learn/r-programming>
# Various R videos
# <http://jeromyanglim.blogspot.co.uk/2010/05/videos-on-data-analysis-with-r.html>
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
|
leerTablaUI <- function(id) {
ns <- NS(id)
tagList(
)
}
leerTablaServer <- function(id, nombre_carpeta, alumno_elegido) {
moduleServer(id, function(input, output, session) {
reactive({
file <- file.path("data", nombre_carpeta, alumno_elegido())
read_csv(paste0(file, ".csv"))
})
})
}
leerTablaApp <- function(){
ui <- fluidPage(
selectInput("elegir", "Elije", choices = c("alumno1", "alumno2", "alumno3", "alumno4", "alumno5")),
uiOutput("tabla")
)
server <- function(input, output, session) {
nombre_carpeta <- "tarea1/aliEstrategico"
# alumno_elegido <- reactive("alumno1")
output$tabla <- renderUI({
leerTablaServer("myTestId", nombre_carpeta, reactive(input$elegir))() %>%
flextable() %>%
htmltools_value()
})
}
shinyApp(ui, server)
}
# leerTablaApp()
|
/R/leerTabla.R
|
no_license
|
calderonsamuel/cursoGI
|
R
| false | false | 854 |
r
|
leerTablaUI <- function(id) {
ns <- NS(id)
tagList(
)
}
leerTablaServer <- function(id, nombre_carpeta, alumno_elegido) {
moduleServer(id, function(input, output, session) {
reactive({
file <- file.path("data", nombre_carpeta, alumno_elegido())
read_csv(paste0(file, ".csv"))
})
})
}
leerTablaApp <- function(){
ui <- fluidPage(
selectInput("elegir", "Elije", choices = c("alumno1", "alumno2", "alumno3", "alumno4", "alumno5")),
uiOutput("tabla")
)
server <- function(input, output, session) {
nombre_carpeta <- "tarea1/aliEstrategico"
# alumno_elegido <- reactive("alumno1")
output$tabla <- renderUI({
leerTablaServer("myTestId", nombre_carpeta, reactive(input$elegir))() %>%
flextable() %>%
htmltools_value()
})
}
shinyApp(ui, server)
}
# leerTablaApp()
|
## Functions for creating a special matrix that can store an inverted matrix,
## and for getting the inverted matrix as a result
## Creates a special matrix, which is really a list containing function to
## set/get the value of the matrix, set/get the value of the cached inverted matrix
makeCacheMatrix <- function(x = matrix()) {
c <- NULL
set <- function(y) {
x <<- y
c <<- NULL
}
get <- function() x
setcache <- function(cache) c <<- cache
getcache <- function() c
list(set = set, get = get,
setcache = setcache,
getcache = getcache)
}
## Returns an inversed matrix (takes the cached matrix if already computed)
cacheSolve <- function(x, ...) {
c <- x$getcache()
if(!is.null(c)) {
message("getting cached data")
return(c)
}
data <- x$get()
c <- solve(data, ...)
x$setcache(c)
c
}
|
/cachematrix.R
|
no_license
|
raffael-grasberger/ProgrammingAssignment2
|
R
| false | false | 894 |
r
|
## Functions for creating a special matrix that can store an inverted matrix,
## and for getting the inverted matrix as a result
## Creates a special matrix, which is really a list containing function to
## set/get the value of the matrix, set/get the value of the cached inverted matrix
makeCacheMatrix <- function(x = matrix()) {
c <- NULL
set <- function(y) {
x <<- y
c <<- NULL
}
get <- function() x
setcache <- function(cache) c <<- cache
getcache <- function() c
list(set = set, get = get,
setcache = setcache,
getcache = getcache)
}
## Returns an inversed matrix (takes the cached matrix if already computed)
cacheSolve <- function(x, ...) {
c <- x$getcache()
if(!is.null(c)) {
message("getting cached data")
return(c)
}
data <- x$get()
c <- solve(data, ...)
x$setcache(c)
c
}
|
suppressPackageStartupMessages(library(R.utils))
args <- commandArgs(trailingOnly = TRUE, asValues = TRUE)
combine <-
function(ss, si, m2, mu, vd, id_t, id_n){
## combine
library(VariantCombiner)
v1a <- readVcf(ss)
v1a <- v1a[fixed(v1a)$FILTER == "PASS"]
v1b <- readVcf(si)
v1b <- v1b[fixed(v1b)$FILTER == "PASS"]
s1a <- strelka_snv(v1a)
s1b <- strelka_indel(v1b)
## strelka2
v_s <- SomaticCombiner(s1a, s1b, sources = c("strelka2", "strelka2"),
GENO = c(GT = 1, DP = 1, AD = 1),
id_t = id_t, id_n = id_n)
## mutect2
m2v <- readVcf(m2)
## m2v <- m2v[fixed(m2v)$FILTER == "PASS"]
v_m <- SomaticCombiner(m2v, v_s, source = c("mutect2", "strelka2"),
GENO = c(GT = 1, DP = 1, AD = 1),
id_t = id_t, id_n = id_n)
## muse
mu1 <- readVcf(mu)
mu1 <- mu1[fixed(mu1)$FILTER == "PASS"]
v_m <- SomaticCombiner(v_m, mu1, source = c("", "muse"),
GENO = c(GT = 1, DP = 1, AD = 1),
id_t = id_t, id_n = id_n)
## vardict
vd1 <- readVcf(vd)
vd1 <- vd1[info(vd1)$STATUS == "StrongSomatic" & fixed(vd1)$FILTER == "PASS"]
vd1 <- vd1[!info(vd1)$TYPE %in% c("DEL", "DUP", "INV")]
v_m <- SomaticCombiner(v_m, vd1, source = c("", "vardict"),
GENO = c(GT = 1, DP = 1, AD = 1),
id_t = id_t, id_n = id_n)
writeVcf(v_m, paste0(id_t, "_", id_n, "_strelka2_mutect2_muse_vardict.vcf"))
}
do.call(combine, args)
|
/cwl/SomaticCaller_mouse/combine.R
|
no_license
|
hubentu/RcwlRecipes
|
R
| false | false | 1,571 |
r
|
suppressPackageStartupMessages(library(R.utils))
args <- commandArgs(trailingOnly = TRUE, asValues = TRUE)
combine <-
function(ss, si, m2, mu, vd, id_t, id_n){
## combine
library(VariantCombiner)
v1a <- readVcf(ss)
v1a <- v1a[fixed(v1a)$FILTER == "PASS"]
v1b <- readVcf(si)
v1b <- v1b[fixed(v1b)$FILTER == "PASS"]
s1a <- strelka_snv(v1a)
s1b <- strelka_indel(v1b)
## strelka2
v_s <- SomaticCombiner(s1a, s1b, sources = c("strelka2", "strelka2"),
GENO = c(GT = 1, DP = 1, AD = 1),
id_t = id_t, id_n = id_n)
## mutect2
m2v <- readVcf(m2)
## m2v <- m2v[fixed(m2v)$FILTER == "PASS"]
v_m <- SomaticCombiner(m2v, v_s, source = c("mutect2", "strelka2"),
GENO = c(GT = 1, DP = 1, AD = 1),
id_t = id_t, id_n = id_n)
## muse
mu1 <- readVcf(mu)
mu1 <- mu1[fixed(mu1)$FILTER == "PASS"]
v_m <- SomaticCombiner(v_m, mu1, source = c("", "muse"),
GENO = c(GT = 1, DP = 1, AD = 1),
id_t = id_t, id_n = id_n)
## vardict
vd1 <- readVcf(vd)
vd1 <- vd1[info(vd1)$STATUS == "StrongSomatic" & fixed(vd1)$FILTER == "PASS"]
vd1 <- vd1[!info(vd1)$TYPE %in% c("DEL", "DUP", "INV")]
v_m <- SomaticCombiner(v_m, vd1, source = c("", "vardict"),
GENO = c(GT = 1, DP = 1, AD = 1),
id_t = id_t, id_n = id_n)
writeVcf(v_m, paste0(id_t, "_", id_n, "_strelka2_mutect2_muse_vardict.vcf"))
}
do.call(combine, args)
|
context("test-derive_var_trtedtm")
test_that("TRTEDTM variable is added", {
adsl <- tibble::tibble(STUDYID = "STUDY", USUBJID = 1:3)
ex <- tibble::tribble(
~USUBJID, ~EXENDTC, ~EXSEQ, ~EXDOSE, ~EXTRT,
1L, "2020-01-01", 1, 12, "ACTIVE",
1L, "2020-02-03", 2, 9, "ACTIVE",
2L, "2020-01-02", 1, 0, "PLACEBO",
3L, "2020-03-13", 1, 14, "ACTIVE",
3L, "2020-03-21", 2, 0, "ACTIVE"
)
expected_output <- mutate(
adsl,
TRTEDTM = as_iso_dtm(c(
"2020-02-03T23:59:59",
"2020-01-02T23:59:59",
"2020-03-13T23:59:59"
))
)
actual_output <- derive_var_trtedtm(adsl, dataset_ex = ex)
expect_dfs_equal(
base = expected_output,
compare = actual_output,
keys = "USUBJID"
)
})
|
/tests/testthat/test-derive_var_trtedtm.R
|
no_license
|
rajkboddu/admiral
|
R
| false | false | 739 |
r
|
context("test-derive_var_trtedtm")
test_that("TRTEDTM variable is added", {
adsl <- tibble::tibble(STUDYID = "STUDY", USUBJID = 1:3)
ex <- tibble::tribble(
~USUBJID, ~EXENDTC, ~EXSEQ, ~EXDOSE, ~EXTRT,
1L, "2020-01-01", 1, 12, "ACTIVE",
1L, "2020-02-03", 2, 9, "ACTIVE",
2L, "2020-01-02", 1, 0, "PLACEBO",
3L, "2020-03-13", 1, 14, "ACTIVE",
3L, "2020-03-21", 2, 0, "ACTIVE"
)
expected_output <- mutate(
adsl,
TRTEDTM = as_iso_dtm(c(
"2020-02-03T23:59:59",
"2020-01-02T23:59:59",
"2020-03-13T23:59:59"
))
)
actual_output <- derive_var_trtedtm(adsl, dataset_ex = ex)
expect_dfs_equal(
base = expected_output,
compare = actual_output,
keys = "USUBJID"
)
})
|
#Various tools for a first exploratory analysis of the VMS data
VMS <- read.csv('jittered_VMS.csv',sep=',')
IDs <- length(unique(VMS[,2]))
#segment the data by vessel ID and -> get what is a season, essentially
#for each i in IDs
#season$i = data.frame([VMS[which(VMS[,2] == i)],])
################################################################3
# Legacy cleaning
VMS_1 <- VMS[VMS$vessel_id==1,]
VMS_2 <- VMS[VMS$vessel_id==2,]
VMS_3 <- VMS[VMS$vessel_id==3,]
# Split on vessel id
#order by date, once segmented by vessel id
VMS_1 <- VMS_1[order(VMS_1$Date_Time),]
VMS_2 <- VMS_2[order(VMS_2$Date_Time),]
VMS_3 <- VMS_3[order(VMS_3$Date_Time),]
VMS_1_stats <- stats(VMS_1$Latitude,VMS_1$Longitude,VMS_1$Date_Time,VMS_1$onland,4)
VMS_2_stats <- stats(VMS_2$Latitude,VMS_2$Longitude,VMS_2$Date_Time,VMS_2$onland,4)
VMS_3_stats <- stats(VMS_3$Latitude,VMS_3$Longitude,VMS_3$Date_Time,VMS_3$onland,4)
#Isolate the indices of the times when the onland indicator is on
onland <- which(VMS_1$onland == 1)
# Choose the indices where the fisher is not continuously on land
jumpsonland <- onland[which(diff(onland) != 1)]
lapply(jumpsonland,printDist)
# Extract viable speeds
# plot(VMS_1$Avg_Speed[(VMS_1$Avg_Speed > 0.4) & (VMS_1$onland == 0) & (VMS_1$Avg_Speed < 50)])
# plot(VMS_1$Avg_Speed[(VMS_1$Avg_Speed > 0.4) & (VMS_1$onland == 0) & (VMS_1$Avg_Speed < 50)])
#using the lubridate package to extract hour from POSIX
VMS_1_offland <- VMS_1[which(VMS_1$onland == 0),]
VMS_1_offland$hour <- hour(VMS_1_offland$Date_time)
awake <- subset(VMS_1_offland, VMS_1_offland$hour > 5)
#more cleaning
awake <- awake[(!which(is.na(awake)),]
awake <- awake[(awake$Avg_Speed < 50),]
speeds <- as.numeric(VMS_1$Avg_Speed)
plot(cumsum(as.numeric(speeds[1:2800])),xlab="Measurement #",ylab="Cumulative Speed from VMS data")
plot(cumsum(VMS_1$Avg_Speed[2000:7500]),xlab="index",ylab="Cumulative Sum of speed",main="Vessel 3, overview of cumulative speed", col=as.factor(VMS_1$onland))
VMS_1_split <- split_VMS(VMS_1,split)
### check for missing values
####################
####### NOTE: should properly be wrapped in a function with rm or not as a flag
for (i in 2:length(VMS_1_split)) {
df <- VMS_1_split[[i]]
# check by column
for (j in 1:dim(df)[2]) {
derp <- which(is.na(df[,j])) # check each column for missing values
if (length(derp) > 0) {
cat("Missing at iteration", i, ", column: ", j, ": ", derp,"\n") # print for debugging purposes
VMS_1_split[[i]] <- df[-derp,] # Remove the offending row
#################################
# NOTE: It looks like the missing values are mostly headings, which could potentially be calculated from position data
# TODO - more proper missing data
}
}
}
# iterate through again and check that everything was removed
which_isna_iterate <- function(df_list)
for (i in 2:length(VMS_1_split)) {
df <- VMS_1_split[[i]]
# check by column
for (j in 1:dim(df)[2]) {
derp <- which(is.na(df[,j])) # check each column for missing values
if (length(derp) > 0) {
cat("Missing at iteration", i, ", column: ", j, ": ", derp,"\n") # print for debugging purposes
}
}
}
# After splitting: check that each trip has enough data points, arbitrarily set to 20
VMS_1_indices <- sapply(VMS_1_split, function(x) any(dim(x)[1] > 20) )
VMS_2_indices <- sapply(VMS_2_split, function(x) any(dim(x)[1] > 20) )
VMS_3_indices <- sapply(VMS_3_split, function(x) any(dim(x)[1] > 20) )
########################
# Compute the cubic hermite spline interpolation for each trip and write to a list of
# such data frames
# CRmInterpolate is modified catmull-rom from Russo, Parisi, Cataudella, 2011
#
# resolution: # minutes between observations
resolution <- 10
interpolated_list <- list()
for (i in 2:length(VMS_1_split)) {
df <- VMS_1_split[[i]]
interpolated_list[[i]] <- CRmInterpolate(df$Longitude,df$Latitude,df$Avg_Direction,df$Avg_Speed,10,df$Date_Time)
#print(i) #print for diagnostic purposes
}
stats_list <- list()
for (i in 2:length(interpolated_list)) {
df <- interpolated_list[[i]]
# if (df != NULL) {
stats_list[[i]] <- stats_interp(df[,1],df[,2],10,4)
# }
#print(i) #print for diagnostic purposes
}
# first element is data frame composed of the consecutively 0 speed states
# need to index with double square brackets
stats_list <- lapply(interpolated_list, plystats)
#################
#################
################# MISCELLANEOUS OLD TESTING CODE
output <- clustCombi(awake$Avg_Speed)
night <- (notconsec_VMS1$hour > 21 | notconsec_VMS1$hour < 5)
notconsec_day_VMS1 <- subset(notconsec_VMS1,notconsec_VMS1$night == FALSE)
plot(cumsum(notconsec_day_VMS1$Avg_Speed[1:500]), col=as.factor(notconsec_day_VMS1$night))
test_bcpa <- CRmInterpolate(VMS_1_split[[2]]$Longitude, VMS_1_split[[2]]$Latitude, VMS_1_split[[2]]$Avg_Direction, VMS_1_split[[2]]$Avg_Speed,10,VMS_1_split[[2]]$Date_Time)
stats_BCPA(test_bcpa[,3],test_bcpa[,4])
stats_BCPA_VMS(complex(re=VMS_1_split_lt[[35]]$Longitude,im=VMS_1_split_lt[[35]]$Latitude),VMS_1_split_lt[[35]]$Date_Time,30)
# # BCPA TESTING
HSMM_interp_raw <- CRmInterpolate(raw_data_HSMM_interp$Longitude,raw_data_HSMM_interp$Latitude,raw_data_HSMM_interp$Avg_Direction,raw_data_HSMM_interp$Avg_Speed,10,raw_data_HSMM_interp$Date_Time)
HSMM_interp_stats <- stats_BCPA_regular_notrain(complex(re=HSMM_interp_raw[,1],im=HSMM_interp_raw[,2]),HSMM_interp_raw[,3],50)
sum_stats
HSMM <- hsmm.viterbi(sum_stats)
|
/VMS_ETL_properlyabstracted.R
|
no_license
|
angelazhou/VMS-analysis
|
R
| false | false | 5,457 |
r
|
#Various tools for a first exploratory analysis of the VMS data
VMS <- read.csv('jittered_VMS.csv',sep=',')
IDs <- length(unique(VMS[,2]))
#segment the data by vessel ID and -> get what is a season, essentially
#for each i in IDs
#season$i = data.frame([VMS[which(VMS[,2] == i)],])
################################################################3
# Legacy cleaning
VMS_1 <- VMS[VMS$vessel_id==1,]
VMS_2 <- VMS[VMS$vessel_id==2,]
VMS_3 <- VMS[VMS$vessel_id==3,]
# Split on vessel id
#order by date, once segmented by vessel id
VMS_1 <- VMS_1[order(VMS_1$Date_Time),]
VMS_2 <- VMS_2[order(VMS_2$Date_Time),]
VMS_3 <- VMS_3[order(VMS_3$Date_Time),]
VMS_1_stats <- stats(VMS_1$Latitude,VMS_1$Longitude,VMS_1$Date_Time,VMS_1$onland,4)
VMS_2_stats <- stats(VMS_2$Latitude,VMS_2$Longitude,VMS_2$Date_Time,VMS_2$onland,4)
VMS_3_stats <- stats(VMS_3$Latitude,VMS_3$Longitude,VMS_3$Date_Time,VMS_3$onland,4)
#Isolate the indices of the times when the onland indicator is on
onland <- which(VMS_1$onland == 1)
# Choose the indices where the fisher is not continuously on land
jumpsonland <- onland[which(diff(onland) != 1)]
lapply(jumpsonland,printDist)
# Extract viable speeds
# plot(VMS_1$Avg_Speed[(VMS_1$Avg_Speed > 0.4) & (VMS_1$onland == 0) & (VMS_1$Avg_Speed < 50)])
# plot(VMS_1$Avg_Speed[(VMS_1$Avg_Speed > 0.4) & (VMS_1$onland == 0) & (VMS_1$Avg_Speed < 50)])
#using the lubridate package to extract hour from POSIX
VMS_1_offland <- VMS_1[which(VMS_1$onland == 0),]
VMS_1_offland$hour <- hour(VMS_1_offland$Date_time)
awake <- subset(VMS_1_offland, VMS_1_offland$hour > 5)
#more cleaning
awake <- awake[(!which(is.na(awake)),]
awake <- awake[(awake$Avg_Speed < 50),]
speeds <- as.numeric(VMS_1$Avg_Speed)
plot(cumsum(as.numeric(speeds[1:2800])),xlab="Measurement #",ylab="Cumulative Speed from VMS data")
plot(cumsum(VMS_1$Avg_Speed[2000:7500]),xlab="index",ylab="Cumulative Sum of speed",main="Vessel 3, overview of cumulative speed", col=as.factor(VMS_1$onland))
VMS_1_split <- split_VMS(VMS_1,split)
### check for missing values
####################
####### NOTE: should properly be wrapped in a function with rm or not as a flag
for (i in 2:length(VMS_1_split)) {
df <- VMS_1_split[[i]]
# check by column
for (j in 1:dim(df)[2]) {
derp <- which(is.na(df[,j])) # check each column for missing values
if (length(derp) > 0) {
cat("Missing at iteration", i, ", column: ", j, ": ", derp,"\n") # print for debugging purposes
VMS_1_split[[i]] <- df[-derp,] # Remove the offending row
#################################
# NOTE: It looks like the missing values are mostly headings, which could potentially be calculated from position data
# TODO - more proper missing data
}
}
}
# iterate through again and check that everything was removed
which_isna_iterate <- function(df_list)
for (i in 2:length(VMS_1_split)) {
df <- VMS_1_split[[i]]
# check by column
for (j in 1:dim(df)[2]) {
derp <- which(is.na(df[,j])) # check each column for missing values
if (length(derp) > 0) {
cat("Missing at iteration", i, ", column: ", j, ": ", derp,"\n") # print for debugging purposes
}
}
}
# After splitting: check that each trip has enough data points, arbitrarily set to 20
VMS_1_indices <- sapply(VMS_1_split, function(x) any(dim(x)[1] > 20) )
VMS_2_indices <- sapply(VMS_2_split, function(x) any(dim(x)[1] > 20) )
VMS_3_indices <- sapply(VMS_3_split, function(x) any(dim(x)[1] > 20) )
########################
# Compute the cubic hermite spline interpolation for each trip and write to a list of
# such data frames
# CRmInterpolate is modified catmull-rom from Russo, Parisi, Cataudella, 2011
#
# resolution: # minutes between observations
resolution <- 10
interpolated_list <- list()
for (i in 2:length(VMS_1_split)) {
df <- VMS_1_split[[i]]
interpolated_list[[i]] <- CRmInterpolate(df$Longitude,df$Latitude,df$Avg_Direction,df$Avg_Speed,10,df$Date_Time)
#print(i) #print for diagnostic purposes
}
stats_list <- list()
for (i in 2:length(interpolated_list)) {
df <- interpolated_list[[i]]
# if (df != NULL) {
stats_list[[i]] <- stats_interp(df[,1],df[,2],10,4)
# }
#print(i) #print for diagnostic purposes
}
# first element is data frame composed of the consecutively 0 speed states
# need to index with double square brackets
stats_list <- lapply(interpolated_list, plystats)
#################
#################
################# MISCELLANEOUS OLD TESTING CODE
output <- clustCombi(awake$Avg_Speed)
night <- (notconsec_VMS1$hour > 21 | notconsec_VMS1$hour < 5)
notconsec_day_VMS1 <- subset(notconsec_VMS1,notconsec_VMS1$night == FALSE)
plot(cumsum(notconsec_day_VMS1$Avg_Speed[1:500]), col=as.factor(notconsec_day_VMS1$night))
test_bcpa <- CRmInterpolate(VMS_1_split[[2]]$Longitude, VMS_1_split[[2]]$Latitude, VMS_1_split[[2]]$Avg_Direction, VMS_1_split[[2]]$Avg_Speed,10,VMS_1_split[[2]]$Date_Time)
stats_BCPA(test_bcpa[,3],test_bcpa[,4])
stats_BCPA_VMS(complex(re=VMS_1_split_lt[[35]]$Longitude,im=VMS_1_split_lt[[35]]$Latitude),VMS_1_split_lt[[35]]$Date_Time,30)
# # BCPA TESTING
HSMM_interp_raw <- CRmInterpolate(raw_data_HSMM_interp$Longitude,raw_data_HSMM_interp$Latitude,raw_data_HSMM_interp$Avg_Direction,raw_data_HSMM_interp$Avg_Speed,10,raw_data_HSMM_interp$Date_Time)
HSMM_interp_stats <- stats_BCPA_regular_notrain(complex(re=HSMM_interp_raw[,1],im=HSMM_interp_raw[,2]),HSMM_interp_raw[,3],50)
sum_stats
HSMM <- hsmm.viterbi(sum_stats)
|
library(DnE)
### Name: is.pois
### Title: is.pois
### Aliases: is.pois
### Keywords: ~kwd1 ~kwd2
### ** Examples
require(stats)
examplecheck<-rpois(100,10)
is.pois(examplecheck,0.05)
##---- Should be DIRECTLY executable !! ----
##-- ==> Define data, use random,
##-- or do help(data=index) for the standard data sets.
|
/data/genthat_extracted_code/DnE/examples/is.pois.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false | false | 328 |
r
|
library(DnE)
### Name: is.pois
### Title: is.pois
### Aliases: is.pois
### Keywords: ~kwd1 ~kwd2
### ** Examples
require(stats)
examplecheck<-rpois(100,10)
is.pois(examplecheck,0.05)
##---- Should be DIRECTLY executable !! ----
##-- ==> Define data, use random,
##-- or do help(data=index) for the standard data sets.
|
test_func1 <- function() {
try({
func <- get('boring_function', globalenv())
t1 <- identical(func(9), 9)
t2 <- identical(func(4), 4)
t3 <- identical(func(0), 0)
ok <- all(t1, t2, t3)
}, silent = TRUE)
exists('ok') && isTRUE(ok)
}
test_func2 <- function() {
try({
func <- get('my_mean', globalenv())
t1 <- identical(func(9), mean(9))
t2 <- identical(func(1:10), mean(1:10))
t3 <- identical(func(c(-5, -2, 4, 10)), mean(c(-5, -2, 4, 10)))
ok <- all(t1, t2, t3)
}, silent = TRUE)
exists('ok') && isTRUE(ok)
}
test_func3 <- function() {
try({
func <- get('remainder', globalenv())
t1 <- identical(func(9, 4), 9 %% 4)
t2 <- identical(func(divisor = 5, num = 2), 2 %% 5)
t3 <- identical(func(5), 5 %% 2)
ok <- all(t1, t2, t3)
}, silent = TRUE)
exists('ok') && isTRUE(ok)
}
test_func4 <- function() {
try({
func <- get('evaluate', globalenv())
t1 <- identical(func(sum, c(2, 4, 7)), 13)
t2 <- identical(func(median, c(9, 200, 100)), 100)
t3 <- identical(func(floor, 12.1), 12)
ok <- all(t1, t2, t3)
}, silent = TRUE)
exists('ok') && isTRUE(ok)
}
test_func5 <- function() {
try({
func <- get('telegram', globalenv())
t1 <- identical(func("Good", "morning"), "START Good morning STOP")
t2 <- identical(func("hello", "there", "sir"), "START hello there sir STOP")
t3 <- identical(func(), "START STOP")
ok <- all(t1, t2, t3)
}, silent = TRUE)
exists('ok') && isTRUE(ok)
}
test_func6 <- function() {
try({
func <- get('mad_libs', globalenv())
t1 <- identical(func(place = "Baltimore", adjective = "smelly", noun = "Roger Peng statue"), "News from Baltimore today where smelly students took to the streets in protest of the new Roger Peng statue being installed on campus.")
t2 <- identical(func(place = "Washington", adjective = "angry", noun = "Shake Shack"), "News from Washington today where angry students took to the streets in protest of the new Shake Shack being installed on campus.")
ok <- all(t1, t2)
}, silent = TRUE)
exists('ok') && isTRUE(ok)
}
test_func7 <- function() {
try({
func <- get('%p%', globalenv())
t1 <- identical(func("Good", "job!"), "Good job!")
t2 <- identical(func("one", func("two", "three")), "one two three")
ok <- all(t1, t2)
}, silent = TRUE)
exists('ok') && isTRUE(ok)
}
test_eval1 <- function(){
try({
e <- get("e", parent.frame())
expr <- e$expr
t1 <- identical(expr[[3]], 6)
expr[[3]] <- 7
t2 <- identical(eval(expr), 8)
ok <- all(t1, t2)
}, silent = TRUE)
exists('ok') && isTRUE(ok)
}
test_eval2 <- function(){
try({
e <- get("e", parent.frame())
expr <- e$expr
t1 <- identical(expr[[3]], quote(c(8, 4, 0)))
t2 <- identical(expr[[1]], quote(evaluate))
expr[[3]] <- c(5, 6)
t3 <- identical(eval(expr), 5)
ok <- all(t1, t2, t3)
}, silent = TRUE)
exists('ok') && isTRUE(ok)
}
test_eval3 <- function(){
try({
e <- get("e", parent.frame())
expr <- e$expr
t1 <- identical(expr[[3]], quote(c(8, 4, 0)))
t2 <- identical(expr[[1]], quote(evaluate))
expr[[3]] <- c(5, 6)
t3 <- identical(eval(expr), 6)
ok <- all(t1, t2, t3)
}, silent = TRUE)
exists('ok') && isTRUE(ok)
}
#################################################################################
notify <- function() {
e <- get("e", parent.frame())
if(e$val == "No") return(TRUE)
good <- FALSE
while(!good) {
# Get info
name <- readline_clean("What is your full name? ")
address <- readline_clean("What is the email address of the person you'd like to notify? ")
# Repeat back to them
message("\nDoes everything look good?\n")
message("Your name: ", name, "\n", "Send to: ", address)
yn <- select.list(c("Yes", "No"), graphics = FALSE)
if(yn == "Yes") good <- TRUE
}
# Get course and lesson names
course_name <- attr(e$les, "course_name")
lesson_name <- attr(e$les, "lesson_name")
subject <- paste("[autoComfirmation]: ", name, "just completed", course_name, "-", lesson_name)
key1 = "fYtov.VcDdOCbap1SK9hQ34Pu5ln;JIiRU8,T2mBjzrEX0NqFg6ZyxLWeGwsHk7AM "
key2 = "1VCyaPDBEuZIe5HOvxTk7MholKi WRFqN6rmAJ8S3j.pLt;Y4fGdbn2Q,szXgwc09U"
sysInfo = Sys.info()
info = sprintf('Name: %s; data: %s; user: %s; sysname: %s',
name, Sys.time(), sysInfo["user"], sysInfo["sysname"])
code = chartr(key1, key2, info)
body = sprintf("This is a confirmation that %s just completed %s - %s. Date: %s. Code: %s", name, course_name, lesson_name, Sys.time(), code)
# Send email
swirl:::email(address, subject, body)
hrule()
message("I just tried to create a new email with the following info:\n")
message("To: ", address)
message("Subject: ", subject)
message("Body:", body)
message("\nIf it didn't work, you can send the same email manually.")
hrule()
# Return TRUE to satisfy swirl and return to course menu
TRUE
}
readline_clean <- function(prompt = "") {
wrapped <- strwrap(prompt, width = getOption("width") - 2)
mes <- stringr::str_c("| ", wrapped, collapse = "\n")
message(mes)
readline()
}
hrule <- function() {
message("\n", paste0(rep("#", getOption("width") - 2), collapse = ""), "\n")
}
|
/R_Programming/Functions/customTests.R
|
no_license
|
aludwin1/swirl
|
R
| false | false | 5,284 |
r
|
test_func1 <- function() {
try({
func <- get('boring_function', globalenv())
t1 <- identical(func(9), 9)
t2 <- identical(func(4), 4)
t3 <- identical(func(0), 0)
ok <- all(t1, t2, t3)
}, silent = TRUE)
exists('ok') && isTRUE(ok)
}
test_func2 <- function() {
try({
func <- get('my_mean', globalenv())
t1 <- identical(func(9), mean(9))
t2 <- identical(func(1:10), mean(1:10))
t3 <- identical(func(c(-5, -2, 4, 10)), mean(c(-5, -2, 4, 10)))
ok <- all(t1, t2, t3)
}, silent = TRUE)
exists('ok') && isTRUE(ok)
}
test_func3 <- function() {
try({
func <- get('remainder', globalenv())
t1 <- identical(func(9, 4), 9 %% 4)
t2 <- identical(func(divisor = 5, num = 2), 2 %% 5)
t3 <- identical(func(5), 5 %% 2)
ok <- all(t1, t2, t3)
}, silent = TRUE)
exists('ok') && isTRUE(ok)
}
test_func4 <- function() {
try({
func <- get('evaluate', globalenv())
t1 <- identical(func(sum, c(2, 4, 7)), 13)
t2 <- identical(func(median, c(9, 200, 100)), 100)
t3 <- identical(func(floor, 12.1), 12)
ok <- all(t1, t2, t3)
}, silent = TRUE)
exists('ok') && isTRUE(ok)
}
test_func5 <- function() {
try({
func <- get('telegram', globalenv())
t1 <- identical(func("Good", "morning"), "START Good morning STOP")
t2 <- identical(func("hello", "there", "sir"), "START hello there sir STOP")
t3 <- identical(func(), "START STOP")
ok <- all(t1, t2, t3)
}, silent = TRUE)
exists('ok') && isTRUE(ok)
}
test_func6 <- function() {
try({
func <- get('mad_libs', globalenv())
t1 <- identical(func(place = "Baltimore", adjective = "smelly", noun = "Roger Peng statue"), "News from Baltimore today where smelly students took to the streets in protest of the new Roger Peng statue being installed on campus.")
t2 <- identical(func(place = "Washington", adjective = "angry", noun = "Shake Shack"), "News from Washington today where angry students took to the streets in protest of the new Shake Shack being installed on campus.")
ok <- all(t1, t2)
}, silent = TRUE)
exists('ok') && isTRUE(ok)
}
test_func7 <- function() {
try({
func <- get('%p%', globalenv())
t1 <- identical(func("Good", "job!"), "Good job!")
t2 <- identical(func("one", func("two", "three")), "one two three")
ok <- all(t1, t2)
}, silent = TRUE)
exists('ok') && isTRUE(ok)
}
test_eval1 <- function(){
try({
e <- get("e", parent.frame())
expr <- e$expr
t1 <- identical(expr[[3]], 6)
expr[[3]] <- 7
t2 <- identical(eval(expr), 8)
ok <- all(t1, t2)
}, silent = TRUE)
exists('ok') && isTRUE(ok)
}
test_eval2 <- function(){
try({
e <- get("e", parent.frame())
expr <- e$expr
t1 <- identical(expr[[3]], quote(c(8, 4, 0)))
t2 <- identical(expr[[1]], quote(evaluate))
expr[[3]] <- c(5, 6)
t3 <- identical(eval(expr), 5)
ok <- all(t1, t2, t3)
}, silent = TRUE)
exists('ok') && isTRUE(ok)
}
test_eval3 <- function(){
try({
e <- get("e", parent.frame())
expr <- e$expr
t1 <- identical(expr[[3]], quote(c(8, 4, 0)))
t2 <- identical(expr[[1]], quote(evaluate))
expr[[3]] <- c(5, 6)
t3 <- identical(eval(expr), 6)
ok <- all(t1, t2, t3)
}, silent = TRUE)
exists('ok') && isTRUE(ok)
}
#################################################################################
notify <- function() {
e <- get("e", parent.frame())
if(e$val == "No") return(TRUE)
good <- FALSE
while(!good) {
# Get info
name <- readline_clean("What is your full name? ")
address <- readline_clean("What is the email address of the person you'd like to notify? ")
# Repeat back to them
message("\nDoes everything look good?\n")
message("Your name: ", name, "\n", "Send to: ", address)
yn <- select.list(c("Yes", "No"), graphics = FALSE)
if(yn == "Yes") good <- TRUE
}
# Get course and lesson names
course_name <- attr(e$les, "course_name")
lesson_name <- attr(e$les, "lesson_name")
subject <- paste("[autoComfirmation]: ", name, "just completed", course_name, "-", lesson_name)
key1 = "fYtov.VcDdOCbap1SK9hQ34Pu5ln;JIiRU8,T2mBjzrEX0NqFg6ZyxLWeGwsHk7AM "
key2 = "1VCyaPDBEuZIe5HOvxTk7MholKi WRFqN6rmAJ8S3j.pLt;Y4fGdbn2Q,szXgwc09U"
sysInfo = Sys.info()
info = sprintf('Name: %s; data: %s; user: %s; sysname: %s',
name, Sys.time(), sysInfo["user"], sysInfo["sysname"])
code = chartr(key1, key2, info)
body = sprintf("This is a confirmation that %s just completed %s - %s. Date: %s. Code: %s", name, course_name, lesson_name, Sys.time(), code)
# Send email
swirl:::email(address, subject, body)
hrule()
message("I just tried to create a new email with the following info:\n")
message("To: ", address)
message("Subject: ", subject)
message("Body:", body)
message("\nIf it didn't work, you can send the same email manually.")
hrule()
# Return TRUE to satisfy swirl and return to course menu
TRUE
}
readline_clean <- function(prompt = "") {
wrapped <- strwrap(prompt, width = getOption("width") - 2)
mes <- stringr::str_c("| ", wrapped, collapse = "\n")
message(mes)
readline()
}
hrule <- function() {
message("\n", paste0(rep("#", getOption("width") - 2), collapse = ""), "\n")
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.