content
large_stringlengths 0
6.46M
| path
large_stringlengths 3
331
| license_type
large_stringclasses 2
values | repo_name
large_stringlengths 5
125
| language
large_stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.46M
| extension
large_stringclasses 75
values | text
stringlengths 0
6.46M
|
---|---|---|---|---|---|---|---|---|---|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in RtmpoUNtsI/file20625b64abda
\name{makeParamInfo}
\alias{makeParamInfo}
\title{Make an object of information about a model-parameter pairing for getParam. Used internally}
\usage{
makeParamInfo(model, node, param)
}
\arguments{
\item{model}{A model such as returned by \link{nimbleModel}.}\item{node}{A character string naming a stochastic node, such as "mu", "beta[2]", or "eta[1:3, 2]"}\item{param}{A character string naming a parameter of the distribution followed by node, such as "mean", "rate", "lambda", or whatever parameter names are relevant for the distribution of the node.}}\description{
Creates a simple getParam_info object, which has a list with a paramID and a type
}
\details{
This is used internally by \link{getParam}. It is not intended for direct use by a user or even a nimbleFunction programmer.
}
|
/packages/nimble/man/makeParamInfo.Rd
|
no_license
|
peterasujan/nimble
|
R
| false | false | 908 |
rd
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in RtmpoUNtsI/file20625b64abda
\name{makeParamInfo}
\alias{makeParamInfo}
\title{Make an object of information about a model-parameter pairing for getParam. Used internally}
\usage{
makeParamInfo(model, node, param)
}
\arguments{
\item{model}{A model such as returned by \link{nimbleModel}.}\item{node}{A character string naming a stochastic node, such as "mu", "beta[2]", or "eta[1:3, 2]"}\item{param}{A character string naming a parameter of the distribution followed by node, such as "mean", "rate", "lambda", or whatever parameter names are relevant for the distribution of the node.}}\description{
Creates a simple getParam_info object, which has a list with a paramID and a type
}
\details{
This is used internally by \link{getParam}. It is not intended for direct use by a user or even a nimbleFunction programmer.
}
|
%%
%% WARNING! DO NOT EDIT!
%% This file is automatically generated from qsolve.R
%%
\name{qsolve}
\alias{inla.qsolve}
\alias{qsolve}
\title{Solves linear SPD systems}
\description{This routine use the GMRFLib implementation
to solve linear systems with a SPD matrix.}
\usage{
inla.qsolve(Q, B, reordering = inla.reorderings(), method = c("solve", "forward", "backward"))
}
\arguments{
\item{Q}{A SPD matrix, either as a (dense) matrix or sparse-matrix}.
\item{B}{The right hand side matrix, either as a (dense) matrix or sparse-matrix.}
\item{reordering}{The type of reordering algorithm to be used for \code{TAUCS};
either one of the names listed in \code{inla.reorderings()}
or the output from \code{inla.qreordering(Q)}.
The default is "auto" which try several reordering
algorithm and use the best one for this particular matrix (using the TAUCS library).}
\item{method}{The system to solve, one of \code{"solve"},
\code{"forward"} or \code{"backward"}. Let \code{Q = L L^T},
where \code{L} is lower triangular
(the Cholesky triangle), then \code{method="solve"} solves \code{L L^T X = B} or
equivalently \code{Q X = B}, \code{method="forward"} solves \code{L X = B}, and
\code{method="backward"} solves \code{L^T X = B}. }
}
\value{
\code{inla.qsolve} returns a matrix \code{X},
which is the solution of \code{Q X = B}, \code{L X = B} or \code{L^T X = B}
depending on the value of \code{method}.
}
\author{Havard Rue \email{hrue@r-inla.org}}
\examples{
n = 10
nb <- n-1
QQ = matrix(rnorm(n^2), n, n)
QQ <- QQ \%*\% t(QQ)
Q = inla.as.sparse(QQ)
B = matrix(rnorm(n*nb), n, nb)
X = inla.qsolve(Q, B, method = "solve")
XX = inla.qsolve(Q, B, method = "solve", reordering = inla.qreordering(Q))
print(paste("err solve1", sum(abs( Q \%*\% X - B))))
print(paste("err solve2", sum(abs( Q \%*\% XX - B))))
## the forward and backward solve is tricky, as after permutation and with Q=LL', then L is
## lower triangular, but L in the orginal ordering is not lower triangular. if the rhs is iid
## noise, this is not important. to control the reordering, then the 'taucs' library must be
## used.
inla.setOption(smtp = 'taucs')
## case 1. use the matrix as is, no reordering
r <- "identity"
L = t(chol(Q))
X = inla.qsolve(Q, B, method = "forward", reordering = r)
XX = inla.qsolve(Q, B, method = "backward", reordering = r)
print(paste("err forward ", sum(abs(L \%*\% X - B))))
print(paste("err backward", sum(abs(t(L) \%*\% XX - B))))
## case 2. use a reordering from the library
r <- inla.qreordering(Q)
im <- r$ireordering
m <- r$reordering
print(cbind(idx = 1:n, m, im) )
Qr <- Q[im, im]
L = t(chol(Qr))[m, m]
X = inla.qsolve(Q, B, method = "forward", reordering = r)
XX = inla.qsolve(Q, B, method = "backward", reordering = r)
print(paste("err forward ", sum(abs( L \%*\% X - B))))
print(paste("err backward", sum(abs( t(L) \%*\% XX - B))))
}
|
/man/qsolve.Rd
|
no_license
|
jdsimkin04/shinyinla
|
R
| false | false | 3,055 |
rd
|
%%
%% WARNING! DO NOT EDIT!
%% This file is automatically generated from qsolve.R
%%
\name{qsolve}
\alias{inla.qsolve}
\alias{qsolve}
\title{Solves linear SPD systems}
\description{This routine use the GMRFLib implementation
to solve linear systems with a SPD matrix.}
\usage{
inla.qsolve(Q, B, reordering = inla.reorderings(), method = c("solve", "forward", "backward"))
}
\arguments{
\item{Q}{A SPD matrix, either as a (dense) matrix or sparse-matrix}.
\item{B}{The right hand side matrix, either as a (dense) matrix or sparse-matrix.}
\item{reordering}{The type of reordering algorithm to be used for \code{TAUCS};
either one of the names listed in \code{inla.reorderings()}
or the output from \code{inla.qreordering(Q)}.
The default is "auto" which try several reordering
algorithm and use the best one for this particular matrix (using the TAUCS library).}
\item{method}{The system to solve, one of \code{"solve"},
\code{"forward"} or \code{"backward"}. Let \code{Q = L L^T},
where \code{L} is lower triangular
(the Cholesky triangle), then \code{method="solve"} solves \code{L L^T X = B} or
equivalently \code{Q X = B}, \code{method="forward"} solves \code{L X = B}, and
\code{method="backward"} solves \code{L^T X = B}. }
}
\value{
\code{inla.qsolve} returns a matrix \code{X},
which is the solution of \code{Q X = B}, \code{L X = B} or \code{L^T X = B}
depending on the value of \code{method}.
}
\author{Havard Rue \email{hrue@r-inla.org}}
\examples{
n = 10
nb <- n-1
QQ = matrix(rnorm(n^2), n, n)
QQ <- QQ \%*\% t(QQ)
Q = inla.as.sparse(QQ)
B = matrix(rnorm(n*nb), n, nb)
X = inla.qsolve(Q, B, method = "solve")
XX = inla.qsolve(Q, B, method = "solve", reordering = inla.qreordering(Q))
print(paste("err solve1", sum(abs( Q \%*\% X - B))))
print(paste("err solve2", sum(abs( Q \%*\% XX - B))))
## the forward and backward solve is tricky, as after permutation and with Q=LL', then L is
## lower triangular, but L in the orginal ordering is not lower triangular. if the rhs is iid
## noise, this is not important. to control the reordering, then the 'taucs' library must be
## used.
inla.setOption(smtp = 'taucs')
## case 1. use the matrix as is, no reordering
r <- "identity"
L = t(chol(Q))
X = inla.qsolve(Q, B, method = "forward", reordering = r)
XX = inla.qsolve(Q, B, method = "backward", reordering = r)
print(paste("err forward ", sum(abs(L \%*\% X - B))))
print(paste("err backward", sum(abs(t(L) \%*\% XX - B))))
## case 2. use a reordering from the library
r <- inla.qreordering(Q)
im <- r$ireordering
m <- r$reordering
print(cbind(idx = 1:n, m, im) )
Qr <- Q[im, im]
L = t(chol(Qr))[m, m]
X = inla.qsolve(Q, B, method = "forward", reordering = r)
XX = inla.qsolve(Q, B, method = "backward", reordering = r)
print(paste("err forward ", sum(abs( L \%*\% X - B))))
print(paste("err backward", sum(abs( t(L) \%*\% XX - B))))
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/greyzone.R
\name{greyzone}
\alias{greyzone}
\title{Function for the determination of a grey zone for ordinal diagnostic and
screening tests}
\usage{
greyzone(
ref,
test,
pretest.prob = NULL,
criterion.values = c(0.05, 0.95),
return.all = F,
precision = 3
)
}
\arguments{
\item{ref}{The reference standard. A column in a data frame or a vector
indicating the reference or gold standard. The reference standard must be
coded either as 0 (absence of the condition) or 1 (presence of the
condition).}
\item{test}{The numeric test scores under evaluation. When
\code{mean(test[ref == 0]) > mean(test[ref == 1])} it is assumed that
higher test scores indicate presence of the condition, otherwise that lower
test scores indicate presence of the condition.}
\item{pretest.prob}{The pre-test probability to be used. When
NULL, the prevalence found in the sample is used.}
\item{criterion.values}{The minimum desired values for respectively the
negative and positive post-test probability.}
\item{return.all}{Default = FALSE. When TRUE the full table of all results
are returned.}
\item{precision}{Default = 3. Precision used for comparison of the criterion
values and the post-test probabilities.}
}
\value{
The function returns the lower and upper value of the range of test
scores that are considered 'grey' or inconclusive. Only smaller or larger
values are considered for a decision. When return.all = TRUE the full table
of the results is returned.
}
\description{
Function for the determination of a grey zone for ordinal diagnostic and
screening tests
}
\details{
This function is proposed by Coste et al. (2003). The current
implementation only handles ordinal test values. This functions uses all
possible test scores as dichotomous thresholds to calculate Se, Sp,
positive and negative likelihood ratios and post-test probabilities. The
likelihood ratios are calculated for the accumulated densities of the test
scores and indicate the levels of seriousness of the disease for all
possible dichotomous thresholds. It uses therefore a cumulative
interpretation of the Likelihood Ratios and posttest probabilities. If a
test has test scores 1 to 5 (with 5 indicating the largest probability of
the disease), Se, positive LR and positive posttest probabilities of the
greyzone function uses dichotomous thresholds that concern test results >=
1, >= 2, >= 3, >= 4 and >= 5, while Sp, negative LR and negative posttest
probabilities concern test results <= 1, <= 2, <= 3, <= 4 and <= 5. Please
note that in these examples values <= 1 respectively <= 5 concern all
possible test values and have by definition a dichotomous Sensitivity of 1.
Please note that the definition of a grey zone deviates from the definition
of an uncertain interval.
The decision criteria are the required values of post-test probabilities.
This has changed in version 0.7. In earlier versions the criteria was a
value closest to the criterion, which could produce invalid results. These
post-test probabilities of accumulated test scores may require a value over
0.99 or even 0.999 (or under 0.01 or 0.001) to confirm or exclude the
presence of a target disease. Only tests of the highest quality can reach
such criteria. The default criterion values are .05 and .95 for
respectively a negative and positive classification, which may be
sufficient for use by clinicians or Public Health professionals for a first
classification whether a target disease may be present or not (Coste et
al., 2003).
As such the cumulative likelihood ratios differ from the Interval
Likelihood Ratios (see \code{\link{RPV}}), as proposed by Sonis (1999).
These likelihood ratios are calculated for each given interval of test
scores separately and uses their densities. In contrast to the greyzone
method, Interval Likelihood ratios and interval posttest probabilities
concern the separate intervals, that is in this example, the separate score
1 to 5. Interval likelihood ratios assign a specific value to each level of
abnormality, and this value is used to calculate the posttest probabilities
of disease for each given level of a test (Sonis, 1999). These post-test
probabilities differ strongly from the cumulative post-test probabilities
and criterion values can be much lower, especially when diseases are life
threatening and low-cost treatments are available. See Sonis (1999) for
further discussion of the interval interpretation.
}
\examples{
ref=c(rep(0, 250), rep(1, 250))
test = c(rep(1:5, c(90,75,50,34,1)), c(rep(1:5, c(10,25,50,65,100))))
addmargins(table(ref, test))
greyzone(ref, test, ret=TRUE, criterion.values=c(.1, .9))
test = c(rep(14:31, c(0,0,0,0,0,0,3,3,5,7,10,20,30,40,50,24,10,10)),
rep(14:31, c(1,0,0,0,0,0,1,4,4,9, 6,13, 8, 6, 5, 4, 0, 0)))
ref = c(rep(0, 212), rep(1, 61))
barplotMD(ref, test)
addmargins(table(ref, test))
greyzone(ref, test, ret=TRUE, crit=c(.1,.9))
}
\references{
{ Coste, J., Jourdain, P., & Pouchot, J. (2006). A gray zone
assigned to inconclusive results of quantitative diagnostic tests:
application to the use of brain natriuretic peptide for diagnosis of heart
failure in acute dyspneic patients. Clinical Chemistry, 52(12), 2229-2235.
Coste, J., & Pouchot, J. (2003). A grey zone for quantitative diagnostic
and screening tests. International Journal of Epidemiology, 32(2), 304-313.
Sonis, J. (1999). How to use and interpret interval likelihood ratios.
Family Medicine, 31, 432-437. }
}
\seealso{
\code{\link{RPV}}
}
|
/man/greyzone.Rd
|
no_license
|
HansLandsheer/UncertainInterval
|
R
| false | true | 5,551 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/greyzone.R
\name{greyzone}
\alias{greyzone}
\title{Function for the determination of a grey zone for ordinal diagnostic and
screening tests}
\usage{
greyzone(
ref,
test,
pretest.prob = NULL,
criterion.values = c(0.05, 0.95),
return.all = F,
precision = 3
)
}
\arguments{
\item{ref}{The reference standard. A column in a data frame or a vector
indicating the reference or gold standard. The reference standard must be
coded either as 0 (absence of the condition) or 1 (presence of the
condition).}
\item{test}{The numeric test scores under evaluation. When
\code{mean(test[ref == 0]) > mean(test[ref == 1])} it is assumed that
higher test scores indicate presence of the condition, otherwise that lower
test scores indicate presence of the condition.}
\item{pretest.prob}{The pre-test probability to be used. When
NULL, the prevalence found in the sample is used.}
\item{criterion.values}{The minimum desired values for respectively the
negative and positive post-test probability.}
\item{return.all}{Default = FALSE. When TRUE the full table of all results
are returned.}
\item{precision}{Default = 3. Precision used for comparison of the criterion
values and the post-test probabilities.}
}
\value{
The function returns the lower and upper value of the range of test
scores that are considered 'grey' or inconclusive. Only smaller or larger
values are considered for a decision. When return.all = TRUE the full table
of the results is returned.
}
\description{
Function for the determination of a grey zone for ordinal diagnostic and
screening tests
}
\details{
This function is proposed by Coste et al. (2003). The current
implementation only handles ordinal test values. This functions uses all
possible test scores as dichotomous thresholds to calculate Se, Sp,
positive and negative likelihood ratios and post-test probabilities. The
likelihood ratios are calculated for the accumulated densities of the test
scores and indicate the levels of seriousness of the disease for all
possible dichotomous thresholds. It uses therefore a cumulative
interpretation of the Likelihood Ratios and posttest probabilities. If a
test has test scores 1 to 5 (with 5 indicating the largest probability of
the disease), Se, positive LR and positive posttest probabilities of the
greyzone function uses dichotomous thresholds that concern test results >=
1, >= 2, >= 3, >= 4 and >= 5, while Sp, negative LR and negative posttest
probabilities concern test results <= 1, <= 2, <= 3, <= 4 and <= 5. Please
note that in these examples values <= 1 respectively <= 5 concern all
possible test values and have by definition a dichotomous Sensitivity of 1.
Please note that the definition of a grey zone deviates from the definition
of an uncertain interval.
The decision criteria are the required values of post-test probabilities.
This has changed in version 0.7. In earlier versions the criteria was a
value closest to the criterion, which could produce invalid results. These
post-test probabilities of accumulated test scores may require a value over
0.99 or even 0.999 (or under 0.01 or 0.001) to confirm or exclude the
presence of a target disease. Only tests of the highest quality can reach
such criteria. The default criterion values are .05 and .95 for
respectively a negative and positive classification, which may be
sufficient for use by clinicians or Public Health professionals for a first
classification whether a target disease may be present or not (Coste et
al., 2003).
As such the cumulative likelihood ratios differ from the Interval
Likelihood Ratios (see \code{\link{RPV}}), as proposed by Sonis (1999).
These likelihood ratios are calculated for each given interval of test
scores separately and uses their densities. In contrast to the greyzone
method, Interval Likelihood ratios and interval posttest probabilities
concern the separate intervals, that is in this example, the separate score
1 to 5. Interval likelihood ratios assign a specific value to each level of
abnormality, and this value is used to calculate the posttest probabilities
of disease for each given level of a test (Sonis, 1999). These post-test
probabilities differ strongly from the cumulative post-test probabilities
and criterion values can be much lower, especially when diseases are life
threatening and low-cost treatments are available. See Sonis (1999) for
further discussion of the interval interpretation.
}
\examples{
ref=c(rep(0, 250), rep(1, 250))
test = c(rep(1:5, c(90,75,50,34,1)), c(rep(1:5, c(10,25,50,65,100))))
addmargins(table(ref, test))
greyzone(ref, test, ret=TRUE, criterion.values=c(.1, .9))
test = c(rep(14:31, c(0,0,0,0,0,0,3,3,5,7,10,20,30,40,50,24,10,10)),
rep(14:31, c(1,0,0,0,0,0,1,4,4,9, 6,13, 8, 6, 5, 4, 0, 0)))
ref = c(rep(0, 212), rep(1, 61))
barplotMD(ref, test)
addmargins(table(ref, test))
greyzone(ref, test, ret=TRUE, crit=c(.1,.9))
}
\references{
{ Coste, J., Jourdain, P., & Pouchot, J. (2006). A gray zone
assigned to inconclusive results of quantitative diagnostic tests:
application to the use of brain natriuretic peptide for diagnosis of heart
failure in acute dyspneic patients. Clinical Chemistry, 52(12), 2229-2235.
Coste, J., & Pouchot, J. (2003). A grey zone for quantitative diagnostic
and screening tests. International Journal of Epidemiology, 32(2), 304-313.
Sonis, J. (1999). How to use and interpret interval likelihood ratios.
Family Medicine, 31, 432-437. }
}
\seealso{
\code{\link{RPV}}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/lifecycle-retired.R
\name{switch_type}
\alias{switch_type}
\alias{coerce_type}
\alias{switch_class}
\alias{coerce_class}
\title{Dispatch on base types}
\usage{
switch_type(.x, ...)
coerce_type(.x, .to, ...)
switch_class(.x, ...)
coerce_class(.x, .to, ...)
}
\arguments{
\item{.x}{An object from which to dispatch.}
\item{...}{Named clauses. The names should be types as returned by
\code{\link[=type_of]{type_of()}}.}
\item{.to}{This is useful when you switchpatch within a coercing
function. If supplied, this should be a string indicating the
target type. A catch-all clause is then added to signal an error
stating the conversion failure. This type is prettified unless
\code{.to} inherits from the S3 class \code{"AsIs"} (see \code{\link[base:AsIs]{base::I()}}).}
}
\description{
\ifelse{html}{\href{https://lifecycle.r-lib.org/articles/stages.html#soft-deprecated}{\figure{lifecycle-soft-deprecated.svg}{options: alt='[Soft-deprecated]'}}}{\strong{[Soft-deprecated]}}
\ifelse{html}{\href{https://lifecycle.r-lib.org/articles/stages.html#experimental}{\figure{lifecycle-experimental.svg}{options: alt='[Experimental]'}}}{\strong{[Experimental]}}
\code{switch_type()} is equivalent to
\code{\link[base]{switch}(\link{type_of}(x, ...))}, while
\code{switch_class()} switchpatches based on \code{class(x)}. The \code{coerce_}
versions are intended for type conversion and provide a standard
error message when conversion fails.
}
\examples{
switch_type(3L,
double = "foo",
integer = "bar",
"default"
)
# Use the coerce_ version to get standardised error handling when no
# type matches:
to_chr <- function(x) {
coerce_type(x, "a chr",
integer = as.character(x),
double = as.character(x)
)
}
to_chr(3L)
# Strings have their own type:
switch_type("str",
character = "foo",
string = "bar",
"default"
)
# Use a fallthrough clause if you need to dispatch on all character
# vectors, including strings:
switch_type("str",
string = ,
character = "foo",
"default"
)
# special and builtin functions are treated as primitive, since
# there is usually no reason to treat them differently:
switch_type(base::list,
primitive = "foo",
"default"
)
switch_type(base::`$`,
primitive = "foo",
"default"
)
# closures are not primitives:
switch_type(rlang::switch_type,
primitive = "foo",
"default"
)
}
\keyword{internal}
|
/man/switch_type.Rd
|
permissive
|
seankross/rlang
|
R
| false | true | 2,434 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/lifecycle-retired.R
\name{switch_type}
\alias{switch_type}
\alias{coerce_type}
\alias{switch_class}
\alias{coerce_class}
\title{Dispatch on base types}
\usage{
switch_type(.x, ...)
coerce_type(.x, .to, ...)
switch_class(.x, ...)
coerce_class(.x, .to, ...)
}
\arguments{
\item{.x}{An object from which to dispatch.}
\item{...}{Named clauses. The names should be types as returned by
\code{\link[=type_of]{type_of()}}.}
\item{.to}{This is useful when you switchpatch within a coercing
function. If supplied, this should be a string indicating the
target type. A catch-all clause is then added to signal an error
stating the conversion failure. This type is prettified unless
\code{.to} inherits from the S3 class \code{"AsIs"} (see \code{\link[base:AsIs]{base::I()}}).}
}
\description{
\ifelse{html}{\href{https://lifecycle.r-lib.org/articles/stages.html#soft-deprecated}{\figure{lifecycle-soft-deprecated.svg}{options: alt='[Soft-deprecated]'}}}{\strong{[Soft-deprecated]}}
\ifelse{html}{\href{https://lifecycle.r-lib.org/articles/stages.html#experimental}{\figure{lifecycle-experimental.svg}{options: alt='[Experimental]'}}}{\strong{[Experimental]}}
\code{switch_type()} is equivalent to
\code{\link[base]{switch}(\link{type_of}(x, ...))}, while
\code{switch_class()} switchpatches based on \code{class(x)}. The \code{coerce_}
versions are intended for type conversion and provide a standard
error message when conversion fails.
}
\examples{
switch_type(3L,
double = "foo",
integer = "bar",
"default"
)
# Use the coerce_ version to get standardised error handling when no
# type matches:
to_chr <- function(x) {
coerce_type(x, "a chr",
integer = as.character(x),
double = as.character(x)
)
}
to_chr(3L)
# Strings have their own type:
switch_type("str",
character = "foo",
string = "bar",
"default"
)
# Use a fallthrough clause if you need to dispatch on all character
# vectors, including strings:
switch_type("str",
string = ,
character = "foo",
"default"
)
# special and builtin functions are treated as primitive, since
# there is usually no reason to treat them differently:
switch_type(base::list,
primitive = "foo",
"default"
)
switch_type(base::`$`,
primitive = "foo",
"default"
)
# closures are not primitives:
switch_type(rlang::switch_type,
primitive = "foo",
"default"
)
}
\keyword{internal}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{forstmann}
\alias{forstmann}
\title{Forstmann et al.'s data.}
\format{A data frame with 2502 rows and 4 variables:
\describe{
\item{subject}{subject, integer ID for each subject}
\item{rt}{rt, reaction time for each trial as a double}
\item{correct}{correct, integer code for correct or incorrect response}
\item{condition}{condition, integer code for speed or accuracy condition}
}}
\source{
\url{https://www.jneurosci.org/content/35/6/2476}
}
\usage{
forstmann
}
\description{
A dataset containing the speed or accuracy manipulation, subject ID's and
reaction times from the ...
}
\keyword{datasets}
|
/man/forstmann.Rd
|
no_license
|
JPCAV/samplers
|
R
| false | true | 716 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{forstmann}
\alias{forstmann}
\title{Forstmann et al.'s data.}
\format{A data frame with 2502 rows and 4 variables:
\describe{
\item{subject}{subject, integer ID for each subject}
\item{rt}{rt, reaction time for each trial as a double}
\item{correct}{correct, integer code for correct or incorrect response}
\item{condition}{condition, integer code for speed or accuracy condition}
}}
\source{
\url{https://www.jneurosci.org/content/35/6/2476}
}
\usage{
forstmann
}
\description{
A dataset containing the speed or accuracy manipulation, subject ID's and
reaction times from the ...
}
\keyword{datasets}
|
# makeCacheMatrix is a function that returns a list of functions
# * setMatrix & getMatrix set and get the value of a matrix
# * cacheInverse & get Inverse set & get the cached value (inverse of the matrix)
# Creating function makeCacheMatrix
makeCacheMatrix <- function(x = matrix()) {
# initially nothing is cached, setting it to NULL
cache <- NULL
# Storing matrix
setMatrix <- function(mat) {
x <<- mat
# Flushing any cached values
cache <<- NULL
}
# Getting the stored matrix
getMatrix <- function() x
# Sending the given argument to CACHE
cacheInverse <- function(solve) {
cache <<- solve
}
# Getting the cached value
getInverse <- function() {
cache
}
# return a list. Each named element of the list is a function
list(setMatrix = setMatrix,
getMatrix = getMatrix,
cacheInverse = cacheInverse,
getInverse = getInverse
)
}
# This function is used to Calculate the inverse of a matrix
# created using the makeCacheMatrix function.
cacheSolve <- function(x, ...) {
# Getting value from CACHE
inverse <- x$getInverse()
# If exist inverse value, return it, else calculate and store in CACHE
if(!is.null(inverse)) {
message("getting cached data")
return(inverse)
}
data <- x$getMatrix()
inverse <- solve(data)
c$cacheInverse(inverse)
# returning the inverse
inverse
}
|
/cachematrix.R
|
no_license
|
0daven/ProgrammingAssignment2
|
R
| false | false | 1,576 |
r
|
# makeCacheMatrix is a function that returns a list of functions
# * setMatrix & getMatrix set and get the value of a matrix
# * cacheInverse & get Inverse set & get the cached value (inverse of the matrix)
# Creating function makeCacheMatrix
makeCacheMatrix <- function(x = matrix()) {
# initially nothing is cached, setting it to NULL
cache <- NULL
# Storing matrix
setMatrix <- function(mat) {
x <<- mat
# Flushing any cached values
cache <<- NULL
}
# Getting the stored matrix
getMatrix <- function() x
# Sending the given argument to CACHE
cacheInverse <- function(solve) {
cache <<- solve
}
# Getting the cached value
getInverse <- function() {
cache
}
# return a list. Each named element of the list is a function
list(setMatrix = setMatrix,
getMatrix = getMatrix,
cacheInverse = cacheInverse,
getInverse = getInverse
)
}
# This function is used to Calculate the inverse of a matrix
# created using the makeCacheMatrix function.
cacheSolve <- function(x, ...) {
# Getting value from CACHE
inverse <- x$getInverse()
# If exist inverse value, return it, else calculate and store in CACHE
if(!is.null(inverse)) {
message("getting cached data")
return(inverse)
}
data <- x$getMatrix()
inverse <- solve(data)
c$cacheInverse(inverse)
# returning the inverse
inverse
}
|
library(horseshoe)
### Name: horseshoe
### Title: Function to implement the horseshoe shrinkage prior in Bayesian
### linear regression
### Aliases: horseshoe
### ** Examples
## Not run:
##D #In this example, there are no relevant predictors
##D #20 observations, 30 predictors (betas)
##D y <- rnorm(20)
##D X <- matrix(rnorm(20*30) , 20)
##D res <- horseshoe(y, X, method.tau = "truncatedCauchy", method.sigma = "Jeffreys")
##D
##D plot(y, X%*%res$BetaHat) #plot predicted values against the observed data
##D res$TauHat #posterior mean of tau
##D HS.var.select(res, y, method = "intervals") #selected betas
##D #Ideally, none of the betas is selected (all zeros)
##D #Plot the credible intervals
##D library(Hmisc)
##D xYplot(Cbind(res$BetaHat, res$LeftCI, res$RightCI) ~ 1:30)
## End(Not run)
## Not run:
##D #The horseshoe applied to the sparse normal means problem
##D # (note that HS.normal.means is much faster in this case)
##D X <- diag(100)
##D beta <- c(rep(0, 80), rep(8, 20))
##D y <- beta + rnorm(100)
##D res2 <- horseshoe(y, X, method.tau = "truncatedCauchy", method.sigma = "Jeffreys")
##D #Plot predicted values against the observed data (signals in blue)
##D plot(y, X%*%res2$BetaHat, col = c(rep("black", 80), rep("blue", 20)))
##D res2$TauHat #posterior mean of tau
##D HS.var.select(res2, y, method = "intervals") #selected betas
##D #Ideally, the final 20 predictors are selected
##D #Plot the credible intervals
##D library(Hmisc)
##D xYplot(Cbind(res2$BetaHat, res2$LeftCI, res2$RightCI) ~ 1:100)
## End(Not run)
|
/data/genthat_extracted_code/horseshoe/examples/horseshoe.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false | false | 1,554 |
r
|
library(horseshoe)
### Name: horseshoe
### Title: Function to implement the horseshoe shrinkage prior in Bayesian
### linear regression
### Aliases: horseshoe
### ** Examples
## Not run:
##D #In this example, there are no relevant predictors
##D #20 observations, 30 predictors (betas)
##D y <- rnorm(20)
##D X <- matrix(rnorm(20*30) , 20)
##D res <- horseshoe(y, X, method.tau = "truncatedCauchy", method.sigma = "Jeffreys")
##D
##D plot(y, X%*%res$BetaHat) #plot predicted values against the observed data
##D res$TauHat #posterior mean of tau
##D HS.var.select(res, y, method = "intervals") #selected betas
##D #Ideally, none of the betas is selected (all zeros)
##D #Plot the credible intervals
##D library(Hmisc)
##D xYplot(Cbind(res$BetaHat, res$LeftCI, res$RightCI) ~ 1:30)
## End(Not run)
## Not run:
##D #The horseshoe applied to the sparse normal means problem
##D # (note that HS.normal.means is much faster in this case)
##D X <- diag(100)
##D beta <- c(rep(0, 80), rep(8, 20))
##D y <- beta + rnorm(100)
##D res2 <- horseshoe(y, X, method.tau = "truncatedCauchy", method.sigma = "Jeffreys")
##D #Plot predicted values against the observed data (signals in blue)
##D plot(y, X%*%res2$BetaHat, col = c(rep("black", 80), rep("blue", 20)))
##D res2$TauHat #posterior mean of tau
##D HS.var.select(res2, y, method = "intervals") #selected betas
##D #Ideally, the final 20 predictors are selected
##D #Plot the credible intervals
##D library(Hmisc)
##D xYplot(Cbind(res2$BetaHat, res2$LeftCI, res2$RightCI) ~ 1:100)
## End(Not run)
|
library("readr")
library("dplyr")
files = list.files("gsalink_weather/", "*.csv")
head(files)
for (f in files) {
print(f)
df = readr::read_csv(paste0("gsalink_weather/", f))
df %>%
dplyr::mutate(`00`=wt_temperatureFhour, `15`=wt_temperatureFhour,
`30`=wt_temperatureFhour, `45`=wt_temperatureFhour) %>%
dplyr::select(-`wt_temperatureFhour`) %>%
tidyr::gather(min, temperature, `00`:`45`) %>%
dplyr::arrange(date, hour, min) %>%
readr::write_csv(paste0("gsalink_weather_15min/", f))
}
|
/faultDetectionSkySpark/data-raw/weather_to_15min.R
|
no_license
|
yujiex/gsa_2018
|
R
| false | false | 531 |
r
|
library("readr")
library("dplyr")
files = list.files("gsalink_weather/", "*.csv")
head(files)
for (f in files) {
print(f)
df = readr::read_csv(paste0("gsalink_weather/", f))
df %>%
dplyr::mutate(`00`=wt_temperatureFhour, `15`=wt_temperatureFhour,
`30`=wt_temperatureFhour, `45`=wt_temperatureFhour) %>%
dplyr::select(-`wt_temperatureFhour`) %>%
tidyr::gather(min, temperature, `00`:`45`) %>%
dplyr::arrange(date, hour, min) %>%
readr::write_csv(paste0("gsalink_weather_15min/", f))
}
|
constr.student <- function(id = "Jane Doe", oceny = rep(2,10), ...){
s <- structure(list(id,oceny), class = "student")
s
}
valid.student <- function(s, ...){
nazwy <- c('p1', 'p2', 'p3', 'p4', 'p5', 'p6', 'p7', 'p8', 'p9', 'p10')
stopifnot(is.character(s[[1]]))
stopifnot(length(s[[2]]) == 10)
stopifnot(class(o) == "numeric")
stopifnot( ( sort( names(s[[2]]) ) == sort(nazwy)))
}
o <- c(rep(4,5), rep(4.5,5))
names(o) <- c('p1', 'p2', 'p3', 'p4', 'p5', 'p6', 'p7', 'p8', 'p9', 'p10')
HZ <- constr.student("Hanna Zdulska", o)
valid.student(HZ)
mean.student <- function(s, ...){
mean(s[[2]])
}
mean.student(HZ)
|
/Wyjsciowki/W1/gr1/ZdulskaHanna/hz.R
|
no_license
|
Kaketo/2020Z-ProgramowanieWR
|
R
| false | false | 633 |
r
|
constr.student <- function(id = "Jane Doe", oceny = rep(2,10), ...){
s <- structure(list(id,oceny), class = "student")
s
}
valid.student <- function(s, ...){
nazwy <- c('p1', 'p2', 'p3', 'p4', 'p5', 'p6', 'p7', 'p8', 'p9', 'p10')
stopifnot(is.character(s[[1]]))
stopifnot(length(s[[2]]) == 10)
stopifnot(class(o) == "numeric")
stopifnot( ( sort( names(s[[2]]) ) == sort(nazwy)))
}
o <- c(rep(4,5), rep(4.5,5))
names(o) <- c('p1', 'p2', 'p3', 'p4', 'p5', 'p6', 'p7', 'p8', 'p9', 'p10')
HZ <- constr.student("Hanna Zdulska", o)
valid.student(HZ)
mean.student <- function(s, ...){
mean(s[[2]])
}
mean.student(HZ)
|
## EMPTY THE ENVIRONMENT
rm(list = ls())
## SET UP FOCAL DRIVES
os <- .Platform$OS.type
if (os=="windows") {
ADDRESS <-"ADDRESS"
ADDRESS <-"ADDRESS"
ADDRESS <-"ADDRESS"
} else {
ADDRESS <-"ADDRESS"
ADDRESS <-paste0("ADDRESS/", Sys.info()[7], "/")
ADDRESS <-"ADDRESS"
}
## LOAD FUNCTIONS
library(msm)
library(data.table)
library(ggplot2)
## SOURCE MR-BRT
repo_dir <- paste0(ADDRESS, "FILEPATH")
source(paste0(repo_dir, "run_mr_brt_function.R"))
source(paste0(repo_dir, "cov_info_function.R"))
source(paste0(repo_dir, "check_for_outputs_function.R"))
source(paste0(repo_dir, "load_mr_brt_outputs_function.R"))
source(paste0(repo_dir, "predict_mr_brt_function.R"))
source(paste0(repo_dir, "check_for_preds_function.R"))
source(paste0(repo_dir, "load_mr_brt_preds_function.R"))
source(paste0(repo_dir, "plot_mr_brt_function.R"))
#############################################################################################
### HELPER OBJECTS AND FUNCTIONS ###
#############################################################################################
## HELPER OBJECTS
date <- "2020_01_22"
decomp_step <- "iterative"
input_dir <- paste0(ADDRESS, "FILEPATH")
adj_data_dir <- paste0(ADDRESS, "FILEPATH")
brt_out_dir <- paste0(ADDRESS, "FILEPATH")
## CREATE DIRECTORIES
ifelse(!dir.exists(brt_out_dir), dir.create(brt_out_dir), FALSE)
ifelse(!dir.exists(adj_data_dir), dir.create(save_dir), FALSE)
ifelse(!dir.exists(input_dir), dir.create(input_dir), FALSE)
## HELPER FUNCTION
draw_summaries <- function(x, new_col, cols_sum, se = F){
x[, (paste0(new_col, "_lower")) := apply(.SD, 1, quantile, probs = .025, na.rm =T), .SDcols = (cols_sum)]
x[, (paste0(new_col, "_mean")) := rowMeans(.SD), .SDcols = (cols_sum)]
x[, (paste0(new_col, "_upper")) := apply(.SD, 1, quantile, probs = .975, na.rm =T), .SDcols = (cols_sum)]
if (se == T) x[, paste0(new_col, "_se") := (get(paste0(new_col, "_upper"))-get(paste0(new_col, "_lower")))/3.92]
if (se == T) x[get(paste0(new_col, "_se")) == 0, paste0(new_col, "_se") := apply(.SD, 1, sd, na.rm=T), .SDcols = (cols_sum)]
x <- x[, !cols_sum, with = F]
return(x)
}
#############################################################################################
### RUN MR-BRT FOR SMEAR / BACT POS ###
#############################################################################################
## GET DATA FOR REFERENCE V SMEAR
all <- fread(paste0(input_dir, "smear_crosswalk_sex_split_input.csv"))
ref_smear <- all[, !c("culture_mean", "culture_se")]
ref_smear <- ref_smear[complete.cases(ref_smear)]
## COMPUTE RATIO AND SE
ref_smear[, both := NULL]
ref_smear[, ratio := smear_mean / ref_mean]
ref_smear[, ratio_se := sqrt(ratio*((ref_se^2/ref_mean^2)+(smear_se^2/smear_mean^2)))]
## LOG TRANSFORMATIONS
ref_smear[, ratio_log := log(ratio)]
ref_smear$ratio_se_log <- sapply(1:nrow(ref_smear), function(i) {
ratio_i <- ref_smear[i, "ratio"]
ratio_se_i <- ref_smear[i, "ratio_se"]
deltamethod(~log(x1), ratio_i, ratio_se_i^2)
})
## CREATE COV MATRIX
covariates <- c("male", "age")
covs1 <- list()
for (nm in covariates) covs1 <- append(covs1, list(cov_info(nm, "X")))
## FIT THE MODEL
fit1 <- run_mr_brt(
output_dir = brt_out_dir,
model_label = "smear_ref_covs_10p",
data = ref_smear,
covs = covs1,
mean_var = "ratio_log",
se_var = "ratio_se_log",
method = "trim_maxL",
study_id = "nid",
trim_pct = 0.15,
overwrite_previous = TRUE
)
## CHECK FOR OUTPUTS
check_for_outputs(fit1)
#############################################################################################
### BEGIN PREDICTIONS ###
#############################################################################################
## GET DATA THAT WILL BE USED FOR PREDICTIONS
orig_dt <- fread(paste0(adj_data_dir, "sex_split_data.csv"))
unadj_dt <- orig_dt[cv_diag_smear == 1]
## CREATE DUMMIES FOR PREDICTIONS
unadj_dt[, age := (age_start+age_end)/2]
unadj_dt[sex == "Male", male := 1][is.na(male), male := 0]
## START PREDICTION
pred1 <- predict_mr_brt(fit1, newdata = unadj_dt, write_draws = T)
## CHECK FOR PREDICTIONS
check_for_preds(pred1)
pred_object <- load_mr_brt_preds(pred1)
## GET PREDICTION DRAWS
preds <- as.data.table(pred_object$model_summaries)
draws <- as.data.table(pred_object$model_draws)
## COMPUTE MEAN AND CI OF PREDICTION DRAWS
pred_summaries <- copy(draws)
pred_summaries <- draw_summaries(pred_summaries, "pred", paste0("draw_", 0:999), T)
## MERGE PREDICTIONS TO DATA
pred_summaries <- pred_summaries[, .(pred_mean, pred_se)]
unadj_dt <- cbind(unadj_dt, pred_summaries)
## COMPUTE PLOT
plot_mr_brt(fit1, continuous_vars = "age", dose_vars = "age")
#############################################################################################
### ADJUST DATA ###
#############################################################################################
## DO COMPUTATIONS IN LOG SPACE
unadj_dt[is.na(standard_error), standard_error := ((upper - lower) / 3.92)]
unadj_dt[, log_mean := log(mean)]
unadj_dt$log_se <- sapply(1:nrow(unadj_dt), function(i) {
mean_i <- unadj_dt[i, mean]
se_i <- unadj_dt[i, standard_error]
deltamethod(~log(x1), mean_i, se_i^2)
})
## MAKE ADJUSTMENT
unadj_dt[, adj_log_mean := log_mean - pred_mean]
unadj_dt[, adj_log_se := sqrt(pred_se^2 + log_se^2)]
unadj_dt[, adj_mean := exp(adj_log_mean)]
unadj_dt$adj_se <- sapply(1:nrow(unadj_dt), function(i) {
mean_i <- unadj_dt[i, adj_log_mean]
se_i <- unadj_dt[i, adj_log_se]
deltamethod(~exp(x1), mean_i, se_i^2)
})
## OVERWRITE VALUES WITH ADJUSTED VALUES FOR SMEAR POS
unadj_dt[, mean := adj_mean]
unadj_dt[, standard_error := adj_se]
unadj_dt[, lower := mean - 1.96*standard_error]
unadj_dt[, upper := mean + 1.96*standard_error]
unadj_dt[lower < 0, lower := 0]
unadj_dt[upper > 1, upper := 1]
## PLOT ADJUSTED VALUES
ggplot(data=unadj_dt, aes(x=exp(log_mean), y=mean))+
geom_point(size = 2.50, alpha = 0.30) +
geom_abline(intercept = 0, slope = 1) + theme_bw() +
labs(x = "Unadjusted prevalence", y = "Adjusted prevalence") +
geom_errorbar(aes(ymin = lower, ymax=upper), show.legend = F, size = 0.5, alpha = 0.45) +
theme(legend.position = "bottom", legend.text=element_text(size=11),
axis.title = element_text(size=14), axis.text = element_text(size = 11))
#############################################################################################
### FORMAT ###
#############################################################################################
## FORMAT
adj_dt <- copy(unadj_dt)
adj_dt[, uncertainty_type := "Standard error"]
adj_dt[, cases := NA][, sample_size := NA][, orig_mean := exp(log_mean)]
adj_dt[, `:=` (age = NULL, male = NULL, pred_mean = NULL, pred_se = NULL, log_mean = NULL)]
adj_dt[, `:=` (log_se = NULL, adj_log_mean = NULL, adj_log_se = NULL, adj_mean = NULL, adj_se = NULL)]
## LEAVE NOTE
adj_dt[, note_modeler := paste0(note_modeler, "; Adjusted to level of bacteriologically confirmed using ratio from MR-BRT")]
adj_dt[substr(note_modeler, start = 1, stop = 1) == ";", note_modeler := gsub("; ", "", note_modeler)]
## APPEND ADJUSTED DATA
new <- orig_dt[cv_diag_smear == 0]
new[, orig_mean := mean]
new <- rbind(new, adj_dt)
new <- new[order(seq)]
## SAVE
write.csv(new, paste0(adj_data_dir, "smear_adj_data.csv"), row.names = F, na = "")
#############################################################################################
### DONE ###
#############################################################################################
|
/gbd_2019/nonfatal_code/tb/tb_prep/02b_smear_adj.R
|
no_license
|
Nermin-Ghith/ihme-modeling
|
R
| false | false | 7,986 |
r
|
## EMPTY THE ENVIRONMENT
rm(list = ls())
## SET UP FOCAL DRIVES
os <- .Platform$OS.type
if (os=="windows") {
ADDRESS <-"ADDRESS"
ADDRESS <-"ADDRESS"
ADDRESS <-"ADDRESS"
} else {
ADDRESS <-"ADDRESS"
ADDRESS <-paste0("ADDRESS/", Sys.info()[7], "/")
ADDRESS <-"ADDRESS"
}
## LOAD FUNCTIONS
library(msm)
library(data.table)
library(ggplot2)
## SOURCE MR-BRT
repo_dir <- paste0(ADDRESS, "FILEPATH")
source(paste0(repo_dir, "run_mr_brt_function.R"))
source(paste0(repo_dir, "cov_info_function.R"))
source(paste0(repo_dir, "check_for_outputs_function.R"))
source(paste0(repo_dir, "load_mr_brt_outputs_function.R"))
source(paste0(repo_dir, "predict_mr_brt_function.R"))
source(paste0(repo_dir, "check_for_preds_function.R"))
source(paste0(repo_dir, "load_mr_brt_preds_function.R"))
source(paste0(repo_dir, "plot_mr_brt_function.R"))
#############################################################################################
### HELPER OBJECTS AND FUNCTIONS ###
#############################################################################################
## HELPER OBJECTS
date <- "2020_01_22"
decomp_step <- "iterative"
input_dir <- paste0(ADDRESS, "FILEPATH")
adj_data_dir <- paste0(ADDRESS, "FILEPATH")
brt_out_dir <- paste0(ADDRESS, "FILEPATH")
## CREATE DIRECTORIES
ifelse(!dir.exists(brt_out_dir), dir.create(brt_out_dir), FALSE)
ifelse(!dir.exists(adj_data_dir), dir.create(save_dir), FALSE)
ifelse(!dir.exists(input_dir), dir.create(input_dir), FALSE)
## HELPER FUNCTION
draw_summaries <- function(x, new_col, cols_sum, se = F){
x[, (paste0(new_col, "_lower")) := apply(.SD, 1, quantile, probs = .025, na.rm =T), .SDcols = (cols_sum)]
x[, (paste0(new_col, "_mean")) := rowMeans(.SD), .SDcols = (cols_sum)]
x[, (paste0(new_col, "_upper")) := apply(.SD, 1, quantile, probs = .975, na.rm =T), .SDcols = (cols_sum)]
if (se == T) x[, paste0(new_col, "_se") := (get(paste0(new_col, "_upper"))-get(paste0(new_col, "_lower")))/3.92]
if (se == T) x[get(paste0(new_col, "_se")) == 0, paste0(new_col, "_se") := apply(.SD, 1, sd, na.rm=T), .SDcols = (cols_sum)]
x <- x[, !cols_sum, with = F]
return(x)
}
#############################################################################################
### RUN MR-BRT FOR SMEAR / BACT POS ###
#############################################################################################
## GET DATA FOR REFERENCE V SMEAR
all <- fread(paste0(input_dir, "smear_crosswalk_sex_split_input.csv"))
ref_smear <- all[, !c("culture_mean", "culture_se")]
ref_smear <- ref_smear[complete.cases(ref_smear)]
## COMPUTE RATIO AND SE
ref_smear[, both := NULL]
ref_smear[, ratio := smear_mean / ref_mean]
ref_smear[, ratio_se := sqrt(ratio*((ref_se^2/ref_mean^2)+(smear_se^2/smear_mean^2)))]
## LOG TRANSFORMATIONS
ref_smear[, ratio_log := log(ratio)]
ref_smear$ratio_se_log <- sapply(1:nrow(ref_smear), function(i) {
ratio_i <- ref_smear[i, "ratio"]
ratio_se_i <- ref_smear[i, "ratio_se"]
deltamethod(~log(x1), ratio_i, ratio_se_i^2)
})
## CREATE COV MATRIX
covariates <- c("male", "age")
covs1 <- list()
for (nm in covariates) covs1 <- append(covs1, list(cov_info(nm, "X")))
## FIT THE MODEL
fit1 <- run_mr_brt(
output_dir = brt_out_dir,
model_label = "smear_ref_covs_10p",
data = ref_smear,
covs = covs1,
mean_var = "ratio_log",
se_var = "ratio_se_log",
method = "trim_maxL",
study_id = "nid",
trim_pct = 0.15,
overwrite_previous = TRUE
)
## CHECK FOR OUTPUTS
check_for_outputs(fit1)
#############################################################################################
### BEGIN PREDICTIONS ###
#############################################################################################
## GET DATA THAT WILL BE USED FOR PREDICTIONS
orig_dt <- fread(paste0(adj_data_dir, "sex_split_data.csv"))
unadj_dt <- orig_dt[cv_diag_smear == 1]
## CREATE DUMMIES FOR PREDICTIONS
unadj_dt[, age := (age_start+age_end)/2]
unadj_dt[sex == "Male", male := 1][is.na(male), male := 0]
## START PREDICTION
pred1 <- predict_mr_brt(fit1, newdata = unadj_dt, write_draws = T)
## CHECK FOR PREDICTIONS
check_for_preds(pred1)
pred_object <- load_mr_brt_preds(pred1)
## GET PREDICTION DRAWS
preds <- as.data.table(pred_object$model_summaries)
draws <- as.data.table(pred_object$model_draws)
## COMPUTE MEAN AND CI OF PREDICTION DRAWS
pred_summaries <- copy(draws)
pred_summaries <- draw_summaries(pred_summaries, "pred", paste0("draw_", 0:999), T)
## MERGE PREDICTIONS TO DATA
pred_summaries <- pred_summaries[, .(pred_mean, pred_se)]
unadj_dt <- cbind(unadj_dt, pred_summaries)
## COMPUTE PLOT
plot_mr_brt(fit1, continuous_vars = "age", dose_vars = "age")
#############################################################################################
### ADJUST DATA ###
#############################################################################################
## DO COMPUTATIONS IN LOG SPACE
unadj_dt[is.na(standard_error), standard_error := ((upper - lower) / 3.92)]
unadj_dt[, log_mean := log(mean)]
unadj_dt$log_se <- sapply(1:nrow(unadj_dt), function(i) {
mean_i <- unadj_dt[i, mean]
se_i <- unadj_dt[i, standard_error]
deltamethod(~log(x1), mean_i, se_i^2)
})
## MAKE ADJUSTMENT
unadj_dt[, adj_log_mean := log_mean - pred_mean]
unadj_dt[, adj_log_se := sqrt(pred_se^2 + log_se^2)]
unadj_dt[, adj_mean := exp(adj_log_mean)]
unadj_dt$adj_se <- sapply(1:nrow(unadj_dt), function(i) {
mean_i <- unadj_dt[i, adj_log_mean]
se_i <- unadj_dt[i, adj_log_se]
deltamethod(~exp(x1), mean_i, se_i^2)
})
## OVERWRITE VALUES WITH ADJUSTED VALUES FOR SMEAR POS
unadj_dt[, mean := adj_mean]
unadj_dt[, standard_error := adj_se]
unadj_dt[, lower := mean - 1.96*standard_error]
unadj_dt[, upper := mean + 1.96*standard_error]
unadj_dt[lower < 0, lower := 0]
unadj_dt[upper > 1, upper := 1]
## PLOT ADJUSTED VALUES
ggplot(data=unadj_dt, aes(x=exp(log_mean), y=mean))+
geom_point(size = 2.50, alpha = 0.30) +
geom_abline(intercept = 0, slope = 1) + theme_bw() +
labs(x = "Unadjusted prevalence", y = "Adjusted prevalence") +
geom_errorbar(aes(ymin = lower, ymax=upper), show.legend = F, size = 0.5, alpha = 0.45) +
theme(legend.position = "bottom", legend.text=element_text(size=11),
axis.title = element_text(size=14), axis.text = element_text(size = 11))
#############################################################################################
### FORMAT ###
#############################################################################################
## FORMAT
adj_dt <- copy(unadj_dt)
adj_dt[, uncertainty_type := "Standard error"]
adj_dt[, cases := NA][, sample_size := NA][, orig_mean := exp(log_mean)]
adj_dt[, `:=` (age = NULL, male = NULL, pred_mean = NULL, pred_se = NULL, log_mean = NULL)]
adj_dt[, `:=` (log_se = NULL, adj_log_mean = NULL, adj_log_se = NULL, adj_mean = NULL, adj_se = NULL)]
## LEAVE NOTE
adj_dt[, note_modeler := paste0(note_modeler, "; Adjusted to level of bacteriologically confirmed using ratio from MR-BRT")]
adj_dt[substr(note_modeler, start = 1, stop = 1) == ";", note_modeler := gsub("; ", "", note_modeler)]
## APPEND ADJUSTED DATA
new <- orig_dt[cv_diag_smear == 0]
new[, orig_mean := mean]
new <- rbind(new, adj_dt)
new <- new[order(seq)]
## SAVE
write.csv(new, paste0(adj_data_dir, "smear_adj_data.csv"), row.names = F, na = "")
#############################################################################################
### DONE ###
#############################################################################################
|
/comparativa.R
|
no_license
|
alvamat/TFG
|
R
| false | false | 10,026 |
r
| ||
nof1.ordinal.simulation <- function(Base.size = 100, Treat.size = 100, alpha = 0, beta_A = -0.1, beta_B = -0.3, cut = c(0.5,1,1.5,2), ncat = 5){
Treat <- rep("baseline", Base.size)
Treat <- c(Treat, rep(c("B", "A", "B", "A"), each = Treat.size))
nobs <- length(Treat)
Y <- mu <- rep(NA, nobs)
q <- matrix(0, nrow = length(Treat), ncol = ncat - 1)
p <- matrix(0, nrow = nobs, ncol = ncat)
for(i in 1:nobs){
mu[i] <- alpha
if(Treat[i] == "A"){
mu[i] <- mu[i] + beta_A
} else if (Treat[i] == "B"){
mu[i] <- mu[i] + beta_B
}
for(r in 1:(ncat-1)){
q[i,r] <- inv_logit(mu[i] - cut[r])
}
p[i,1] <- 1 - q[i,1]
for(r in 2:(ncat-1)){
p[i,r] <- q[i,r-1] - q[i,r]
}
p[i,ncat] <- q[i,(ncat-1)]
Y[i] <- sample(1:ncat, size = 1, prob = p[i,])
}
list(Y = Y, Treat = Treat)
}
nof1.binomial.simulation <- function(Base.size = 14, Treat.size = 56, alpha = 0.5, beta_A = -0.1, beta_B = -0.05){
Treat <- rep("baseline", Base.size)
Treat <- c(Treat, rep(c("B", "A", "B", "A"), each = Treat.size))
nobs <- length(Treat)
Y <- mu <- p <- rep(NA, nobs)
for(i in 1:nobs){
mu[i] <- alpha
if(Treat[i] == "A"){
mu[i] <- mu[i] + beta_A
} else if (Treat[i] == "B"){
mu[i] <- mu[i] + beta_B
}
Y[i] <- rbinom(1,1, inv_logit(mu[i]))
}
Time <- 1:length(Treat)
nobs <- length(Time)
list(Y = Y, Treat = Treat)
}
nof1.poisson.simulation <- function(Base.size = 14, Treat.size = 56, alpha = 1, beta_A = -0.1, beta_B = -0.05){
Treat <- rep("baseline", Base.size)
Treat <- c(Treat, rep(c("B", "A", "B", "A"), each = Treat.size))
nobs <- length(Treat)
Y <- mu <- rep(NA, nobs)
for(i in 1:nobs){
mu[i] <- alpha
if(Treat[i] == "A"){
mu[i] <- mu[i] + beta_A
} else if (Treat[i] == "B"){
mu[i] <- mu[i] + beta_B
}
Y[i] <- rpois(1, exp(mu[i]))
}
list(Y = Y, Treat = Treat)
}
#' Normal data simulation
#'
#' Simulating sample normal data
#'
#' @export
nof1.normal.simulation <- function(Base.size = 2, Treat.size = 8, prec = 0.5, alpha = 50, beta_A = -3, beta_B = -1){
Treat <- rep("baseline", Base.size)
Treat <- c(Treat, rep(c("B", "A", "B", "A"), each = Treat.size))
nobs <- length(Treat)
Y <- mu <- rep(NA, nobs)
for(i in 1:nobs){
mu[i] <- alpha
if(Treat[i] == "A"){
mu[i] <- mu[i] + beta_A
} else if (Treat[i] == "B"){
mu[i] <- mu[i] + beta_B
}
Y[i] <- rnorm(1, mu[i], sqrt(1/prec))
}
list(Y = Y, Treat = Treat)
}
|
/R/nof1.simulation.R
|
no_license
|
jiabei-yang/nof1ins
|
R
| false | false | 2,586 |
r
|
nof1.ordinal.simulation <- function(Base.size = 100, Treat.size = 100, alpha = 0, beta_A = -0.1, beta_B = -0.3, cut = c(0.5,1,1.5,2), ncat = 5){
Treat <- rep("baseline", Base.size)
Treat <- c(Treat, rep(c("B", "A", "B", "A"), each = Treat.size))
nobs <- length(Treat)
Y <- mu <- rep(NA, nobs)
q <- matrix(0, nrow = length(Treat), ncol = ncat - 1)
p <- matrix(0, nrow = nobs, ncol = ncat)
for(i in 1:nobs){
mu[i] <- alpha
if(Treat[i] == "A"){
mu[i] <- mu[i] + beta_A
} else if (Treat[i] == "B"){
mu[i] <- mu[i] + beta_B
}
for(r in 1:(ncat-1)){
q[i,r] <- inv_logit(mu[i] - cut[r])
}
p[i,1] <- 1 - q[i,1]
for(r in 2:(ncat-1)){
p[i,r] <- q[i,r-1] - q[i,r]
}
p[i,ncat] <- q[i,(ncat-1)]
Y[i] <- sample(1:ncat, size = 1, prob = p[i,])
}
list(Y = Y, Treat = Treat)
}
nof1.binomial.simulation <- function(Base.size = 14, Treat.size = 56, alpha = 0.5, beta_A = -0.1, beta_B = -0.05){
Treat <- rep("baseline", Base.size)
Treat <- c(Treat, rep(c("B", "A", "B", "A"), each = Treat.size))
nobs <- length(Treat)
Y <- mu <- p <- rep(NA, nobs)
for(i in 1:nobs){
mu[i] <- alpha
if(Treat[i] == "A"){
mu[i] <- mu[i] + beta_A
} else if (Treat[i] == "B"){
mu[i] <- mu[i] + beta_B
}
Y[i] <- rbinom(1,1, inv_logit(mu[i]))
}
Time <- 1:length(Treat)
nobs <- length(Time)
list(Y = Y, Treat = Treat)
}
nof1.poisson.simulation <- function(Base.size = 14, Treat.size = 56, alpha = 1, beta_A = -0.1, beta_B = -0.05){
Treat <- rep("baseline", Base.size)
Treat <- c(Treat, rep(c("B", "A", "B", "A"), each = Treat.size))
nobs <- length(Treat)
Y <- mu <- rep(NA, nobs)
for(i in 1:nobs){
mu[i] <- alpha
if(Treat[i] == "A"){
mu[i] <- mu[i] + beta_A
} else if (Treat[i] == "B"){
mu[i] <- mu[i] + beta_B
}
Y[i] <- rpois(1, exp(mu[i]))
}
list(Y = Y, Treat = Treat)
}
#' Normal data simulation
#'
#' Simulating sample normal data
#'
#' @export
nof1.normal.simulation <- function(Base.size = 2, Treat.size = 8, prec = 0.5, alpha = 50, beta_A = -3, beta_B = -1){
Treat <- rep("baseline", Base.size)
Treat <- c(Treat, rep(c("B", "A", "B", "A"), each = Treat.size))
nobs <- length(Treat)
Y <- mu <- rep(NA, nobs)
for(i in 1:nobs){
mu[i] <- alpha
if(Treat[i] == "A"){
mu[i] <- mu[i] + beta_A
} else if (Treat[i] == "B"){
mu[i] <- mu[i] + beta_B
}
Y[i] <- rnorm(1, mu[i], sqrt(1/prec))
}
list(Y = Y, Treat = Treat)
}
|
# importing all required library
required_library <- c('ggplot2', 'corrgram', 'corrplot', 'randomForest',
'caret', 'class', 'e1071', 'rpart', 'mlr','grid',
'DMwR','irace','usdm')
# checking for each library whether installed or not
# if not install then installing it first and then attaching to file
for (lib in required_library){
if(!require(lib, character.only = TRUE))
{
install.packages(lib)
require(lib, character.only = TRUE)
}
}
# removing extra variable
rm(required_library,lib)
# Reading train and test csv file
# set working directory to the file location, uncomment below line and put full path
# setwd("full path to folder in which file is present")
churn_data_df <- read.csv("Train_data.csv")
test_data_df <- read.csv("Test_data.csv")
############################################
# #
# 2.1 Exploratory Data Analysis #
# #
############################################
###################################
# 2.1.1 understanding the data #
###################################
# Checking columns name, in R there is syntax defined for column name.
# every space is changed into dot (.) and column name can not start with number etc.
colnames(churn_data_df) # look at columns name, all changed to as per R syntax
# cheking datatypes of all columns
str(churn_data_df)
### checking numerical variables ###
# Checking numerical statistics of numerical columns (Five point summary + mean of all column)
summary(churn_data_df)
### Checking categorical variable ###
# unique values in each category
cat_col <- c('area.code','international.plan', 'voice.mail.plan','Churn')
lapply(churn_data_df[,c('state', cat_col)], function(feat) length(unique(feat)))
# counting of each unique values in categorical columns
lapply(churn_data_df[,cat_col], function(feature) table(feature))
###################################
# 2.1.2 Missing value analysis #
###################################
# checking missing value for each column and storing counting in dataframe with column name
missing_val <- data.frame(lapply(churn_data_df, function(feat) sum(is.na(feat))))
###################################
# 2.1.3 outlier analysis #
###################################
# removing phone number column and changing area code to category
churn_data_df$phone.number <- NULL
churn_data_df$area.code <- as.factor(churn_data_df$area.code)
test_data_df$phone.number <- NULL
test_data_df$area.code <- as.factor(test_data_df$area.code)
# taking out list of name of numerical columns in dataset
numeric_columns <- colnames(Filter(is.numeric, churn_data_df))
# box_plot function to plot boxplot of numerical columns
box_plot <- function(column, dataset){
ggplot(aes_string(x = 'Churn', y = column, fill = 'Churn'),
data = dataset)+
stat_boxplot(geom = 'errorbar', width = 0.5)+
geom_boxplot(outlier.size = 2, outlier.shape = 18)+
theme(legend.position = 'bottom')+
labs(y = gsub('\\.', ' ', column), x = "Churn")+
ggtitle(paste(" Box Plot :",gsub('\\.', ' ', column)))
}
# hist_plot function to plot histogram of numerical variable
hist_plot <- function(column, dataset){
ggplot(aes_string(column), data = dataset)+
geom_histogram(aes(y=..density..), fill = 'skyblue2')+
geom_density()+
labs(x = gsub('\\.', ' ', column))+
ggtitle(paste(" Histogram :",gsub('\\.', ' ', column)))
}
# calling box_plot function and storing all plots in a list
all_box_plots <- lapply(numeric_columns,box_plot, dataset = churn_data_df)
# calling hist_plot function and storing all plots in a list
all_hist_plots <- lapply(numeric_columns,hist_plot, dataset = churn_data_df)
# Plotting Boxplot and histogram to analyse the data for three columns simultaneously
plot_in_grid <- function(f, s, t){
gridExtra::grid.arrange(all_box_plots[[f]],all_box_plots[[s]],all_box_plots[[t]],
all_hist_plots[[f]],all_hist_plots[[s]],all_hist_plots[[t]],ncol=3,nrow=2)
}
# plotting for day's minute, call and charges
plot_in_grid(3,4,5)
# plotting for evening's minute, call and charges
plot_in_grid(6,7,8)
# plotting for night's minute, call and charges
plot_in_grid(9, 10, 11)
# plotting for international's minute, call and charges
plot_in_grid(12, 13, 14)
# plotting for account length, voice mail message and customer service calls
plot_in_grid(1, 2, 15)
#####################
# outlier removing #
#####################
# Note: Considering both dataset one with outliers and other withoutliers for building model
# Reason explained in Project report
#
# name of dataset with outlier :- churn_data_df
# name of dataset without outlier:- churn_data_df_wo
churn_data_df_wo <- churn_data_df
# removing numeric columns for which we will not do outlier removal process
numeric_columns1 <- numeric_columns[! numeric_columns %in% c("number.vmail.messages","number.customer.service.calls")]
for (i in numeric_columns1){
out_value = churn_data_df_wo[,i] [churn_data_df_wo[,i] %in% boxplot.stats(churn_data_df_wo[,i])$out]
churn_data_df_wo = churn_data_df_wo[which(!churn_data_df_wo[,i] %in% out_value),]
}
# Plotting again distribution and boxplot after outlier removal
# calling box_plot function and storing all plots in a list
# for churn_data_df_wo i.e. dataset without outliers
all_box_plots <- lapply(numeric_columns,box_plot, dataset = churn_data_df_wo)
# calling hist_plot function and storing all plots in a list
# for churn_data_df_wo i.e. dataset without outliers
all_hist_plots <- lapply(numeric_columns,hist_plot, dataset = churn_data_df_wo)
# plotting for day's minute, call and charges after outlier removal
plot_in_grid(3,4,5)
# plotting for evening's minute, call and charges after outlier removal
plot_in_grid(6,7,8)
# plotting for night's minute, call and charges after outlier removal
plot_in_grid(9, 10, 11)
# plotting for international's minute, call and charges after outlier removal
plot_in_grid(12, 13, 14)
# plotting for account length, voice mail message and customer service calls
# after outlier removal
plot_in_grid(1, 2, 15)
###################################
# 2.1.4 Feature Selection #
###################################
# correlation plot for numerical feature
corrgram(churn_data_df[,numeric_columns], order = FALSE,
upper.panel = panel.pie, text.panel = panel.txt,
main = "Correlation Plot for Churning data set")
# heatmap plot for numerical features
corrplot(cor(churn_data_df[,numeric_columns]), method = 'color', type = 'lower')
# getting categorical column
cat_col <- c('state', 'area.code','international.plan', 'voice.mail.plan')
# chi-square test of independence of each category with Churn column
for(i in cat_col){
print(i)
print(chisq.test(table(churn_data_df$Churn, churn_data_df[,i])))
}
# Now checking multicollinearity between international plan and voice mail plan
# by chi-sq test of independence
print(chisq.test(table(churn_data_df$international.plan,
churn_data_df$voice.mail.plan)))
# checking VIF factor for numeric columns
vif(churn_data_df[,numeric_columns])
# checking importance of feature in ranking using random forest
important_feat <- randomForest(Churn ~ ., data = churn_data_df,
ntree = 200, keep.forest = FALSE, importance = TRUE)
importance_feat_df <- data.frame(importance(important_feat, type = 1))
################################
# #
# Data After EDA #
# #
################################
# Dropping column state, area code as in chi-sq test these column were
# not dependent with Churn column. Dropping total day min, total eve charge, total night charge,
# total intl charge and these columns found to be multicolinear with other columns
churn_data_df <- churn_data_df[, -c(1,3,7,12,15,18)]
churn_data_df_wo <- churn_data_df_wo[, -c(1,3,7,12,15,18)]
test_data_df <- test_data_df[, -c(1,3,7,12,15,18)]
# checking VIF factor for numeric columns after removal of multicollinear columns
numeric_columns <- colnames(Filter(is.numeric, churn_data_df))
vif(churn_data_df[,numeric_columns])
# changing levels of factor to 0 and 1
# no :- 0, yes:- 1
# false. :- 0, true. :- 1
category = c('international.plan', 'voice.mail.plan', 'Churn')
for (i in category){
levels(churn_data_df[,i]) <- c(0,1)
levels(churn_data_df_wo[,i]) <- c(0,1)
levels(test_data_df[,i]) <- c(0,1)
}
############################################
# #
# #
# 2.2.2 Building Classification models #
# #
# #
############################################
################################################
# K-fold CV accuracy score calculator method #
################################################
### Creating function which will calculate the K-fold CV accuracy
model.K_fold.accuracy <- function(classifier, data){
# creating 10 folds of data
ten_folds = createFolds(data$Churn, k = 10)
# lapply function will result in 10 accuracy measure for each test fold
ten_cv = lapply(ten_folds, function(fold) {
training_fold = data[-fold, ]
test_fold = data[fold, ]
# changing data of classifier with our training folds
classifier$data = training_fold
# predicting on test folds
# for logisitic regression "glm" we got prediction in probability
# changing probabiliey to class
if(class(classifier)[1] == "glm"){
y_prob = predict(churn_classifier, type = 'response', newdata = test_fold[-14])
y_pred = ifelse(y_prob>0.5, 1, 0)
} else if(class(classifier)[1] == 'rpart'){
y_pred = predict(churn_classifier, newdata = test_fold[-14], type ='class')
} else{
y_pred = predict(churn_classifier, newdata = test_fold[-14])
}
# creating confusion matrix
cm = table(test_fold[, 14], y_pred)
# calculating accuracy correct prediction divide by all observation
accuracy = (cm[1,1] + cm[2,2]) / (cm[1,1] + cm[1,2] + cm[2,1] + cm[2,2])
return(accuracy)
})
# returning mean of all accuracy which we got from lapply function result
return(mean(as.numeric(ten_cv)))
}
###########################################################
# Function Predicting result on test data set of a model #
# And returning confusion matrix #
###########################################################
churn.predict <- function(classifier, data){
if(class(classifier)[1] == 'glm'){
churn_prob <- predict(classifier, newdata = data[,-14])
churn_prediction <- ifelse(churn_prob >= 0.5, 1, 0)
} else if(class(classifier)[1] == 'rpart'){
churn_prediction = predict(classifier, data[,-14], type ='class')
} else{
churn_prediction = predict(classifier, data[,-14])
}
cm = confusionMatrix(table(data$Churn, churn_prediction))
return(cm)
}
#########################
# Logistic Regression #
#########################
# logistic regression on dataset churn_data_df with outliers
churn_classifier <- glm(formula = Churn ~ ., family = binomial,
data = churn_data_df)
cm <- churn.predict(churn_classifier, test_data_df)
cm
# K -fold accuracy of Logistic regression model
k_fold_accuracy <- model.K_fold.accuracy(churn_classifier, churn_data_df)
k_fold_accuracy
# Now checking on dataset without ouliers
churn_classifier <- glm(formula = Churn ~ ., family = binomial,
data = churn_data_df_wo)
cm <- churn.predict(churn_classifier, test_data_df)
cm
# K -fold accuracy of Logistic regression model
k_fold_accuracy <- model.K_fold.accuracy(churn_classifier, churn_data_df_wo)
k_fold_accuracy
#########################
# KNN #
#########################
# predicting on dataset with outliers i.e. churn_data_df
churn_prediction <- knn(train = churn_data_df[,-14], test = test_data_df[,-14],
cl = churn_data_df$Churn, k = 5, prob = TRUE)
confusionMatrix(table(test_data_df$Churn, churn_prediction))
# predicting on dataset with outliers i.e. churn_data_df_wo
churn_prediction <- knn(train = churn_data_df_wo[,-14], test = test_data_df[,-14],
cl = churn_data_df_wo$Churn, k = 5, prob = TRUE)
confusionMatrix(table(test_data_df$Churn, churn_prediction))
#########################
# Naive Bayes #
#########################
# Building model on dataset with outliers i.e. churn_data_df
churn_classifier <- naiveBayes(x = churn_data_df[,-14], y =churn_data_df[,14])
cm <- churn.predict(churn_classifier, test_data_df)
cm
# K -fold accuracy of Naive Bayes model
k_fold_accuracy <- model.K_fold.accuracy(churn_classifier, churn_data_df)
k_fold_accuracy
# building model on dataset without outliers i.e. churn_data_df_wo
churn_classifier <- naiveBayes(x = churn_data_df_wo[,-14], y =churn_data_df_wo[,14])
cm <- churn.predict(churn_classifier, test_data_df)
cm
# K -fold accuracy of Naive Bayes model
k_fold_accuracy <- model.K_fold.accuracy(churn_classifier, churn_data_df_wo)
k_fold_accuracy
#########################
# Decision Tree #
#########################
# building model on dataset with outliers i.e. churn_data_df
churn_classifier <- rpart(formula = Churn ~ ., data = churn_data_df)
cm <- churn.predict(churn_classifier, test_data_df)
cm
# K -fold accuracy of Decision Tree model
k_fold_accuracy <- model.K_fold.accuracy(churn_classifier, churn_data_df)
k_fold_accuracy
# building model on dataset without outliers i.e. churn_data_df_wo
churn_classifier <- rpart(formula = Churn ~ ., data = churn_data_df_wo)
cm <- churn.predict(churn_classifier, test_data_df)
cm
# K -fold accuracy of Decision Tree model
k_fold_accuracy <- model.K_fold.accuracy(churn_classifier, churn_data_df_wo)
k_fold_accuracy
#########################
# Random Forest #
#########################
# building model on dataset with outliers i.e. churn_data_df
churn_classifier <- randomForest(formula = Churn ~ ., data = churn_data_df,
ntree = 500)
cm <- churn.predict(churn_classifier, test_data_df)
cm
# K -fold accuracy of Decision Tree model
k_fold_accuracy <- model.K_fold.accuracy(churn_classifier, churn_data_df)
k_fold_accuracy
# building model on dataset without outliers i.e. churn_data_df_wo
churn_classifier <- randomForest(formula = Churn ~ ., data = churn_data_df_wo,
ntree = 500)
cm <- churn.predict(churn_classifier, test_data_df)
cm
# K -fold accuracy of Decision Tree model
k_fold_accuracy <- model.K_fold.accuracy(churn_classifier, churn_data_df_wo)
k_fold_accuracy
############################################
############################################
# #
# #
# Hyperparameter tuning #
# #
# #
############################################
#########################################
# #
# tuning decision tree for both dataset #
# churn_data_df and churn_data_df_wo #
# #
#########################################
# we will tune best two model among above i.e. Decision tree and random Forest
# for tuning we will use mlr package and its methods
tune.Decision.Tree <- function(learner, paramset, dataset){
# creating task for train
train_task = makeClassifTask(data = dataset, target = 'Churn')
# setting 10 fold cross validation
cv = makeResampleDesc("CV", iters = 10)
grid_control = makeTuneControlGrid()
# tuning parameter
tune_param = tuneParams(learner = learner, resampling = cv, task = train_task,
par.set = paramset, control = grid_control, measures = acc)
return(tune_param)
}
# tuning decision tree classifier for churn_data_df i.e. whole dataset
# making learner tree
learner = makeLearner("classif.rpart", predict.type = 'response')
# setting params range
param_set <- makeParamSet(
makeIntegerParam("minsplit", lower = 10, upper = 50),
makeIntegerParam("minbucket", lower = 5, upper = 50),
makeNumericParam("cp", lower = 0.001, upper = 0.2)
)
tuned_param <- tune.Decision.Tree(learner, param_set, churn_data_df)
# building decision tree model based on tuned param with mlr package
set_param <- setHyperPars(learner, par.vals = tuned_param$x)
train_task <- makeClassifTask(data = churn_data_df, target = 'Churn')
test_task <- makeClassifTask(data = test_data_df, target = 'Churn')
# training model
train_model <- train(set_param, train_task)
# predicting on test data
pred <- predict(train_model, test_task)
y_pred = pred[["data"]][["response"]]
# confusion matrix
cm = table(test_data_df[, 14], y_pred)
cm
# tuning decision tree like above for churn_data_df_wo i.e. without outliers
tuned_param <- tune.Decision.Tree(learner, param_set, churn_data_df_wo)
set_param <- setHyperPars(learner, par.vals = tuned_param$x)
train_task_wo <- makeClassifTask(data = churn_data_df_wo, target = 'Churn')
train_model <- train(set_param, train_task_wo)
pred <- predict(train_model, test_task)
y_pred = pred[["data"]][["response"]]
cm = table(test_data_df[, 14], y_pred)
cm
#########################################
# #
# tuning random forest for both dataset #
# churn_data_df and churn_data_df_wo #
# #
#########################################
# tuning random forest for churn_data_df
train_task = makeClassifTask(data = churn_data_df, target = 'Churn')
test_task <- makeClassifTask(data = test_data_df, target = 'Churn')
# making learner
rfLearner <- makeLearner("classif.randomForest", predict.type = 'response',
par.vals = list(ntree = 200, mtry = 3))
rf$par.vals <- list(importance = TRUE)
rf_param_set <- makeParamSet(
makeIntegerParam("ntree",lower = 600, upper = 800),
makeIntegerParam("mtry", lower = 3, upper = 10),
makeIntegerParam("nodesize", lower = 10, upper = 50))
ctrl <- makeTuneControlIrace(maxExperiments = 200L)
cv <- makeResampleDesc("CV",iters = 3L)
rf_tuned_param <- tuneParams(learner = rfLearner, resampling = cv, task=train_task,
par.set = rf_param_set, control = ctrl, measures = acc)
# making model on tuned parameter
rf_set_param <- setHyperPars(rfLearner, par.vals = rf_tuned_param$x)
rf_train_model <- train(rf_set_param, train_task)
pred <- predict(rforest, test_task)
y_pred <- pred$data$response
cm <- table(test_data_df[, 14], y_pred)
cm
###################
# tuning random forest for churn_data_df_wo
train_task_wo <- makeClassifTask(data = churn_data_df_wo, target = 'Churn')
# tuning model
rf_tuned_param <- tuneParams(learner = rfLearner, resampling = cv, task=train_task_wo,
par.set = rf_param_set, control = ctrl, measures = acc)
# making model on tuned parameter
rf_set_param <- setHyperPars(rfLearner, par.vals = rf_tuned_param$x)
rf_train_model <- train(rf_set_param, train_task_wo)
pred <- predict(rforest, test_task)
y_pred <- pred$data$response
cm <- table(test_data_df[, 14], y_pred)
cm
############ Alternative way for hyperparameter tuning using Caret #############
# tuning decision tree
control <- trainControl(method="repeatedcv", number=10, repeats=3)
churn_model <- caret::train(Churn ~., data = churn_data_df, method = 'rpart', trControl = control)
churn_model$bestTune
y_pred <- predict(churn_model, test_data_df)
confusionMatrix(test_data_df$Churn, y_pred)
# Note: Different method are available in caret which tune differnt parameter
# see caret documentation for variety
# tuning Random Forest
control <- trainControl(method="repeatedcv", number=10, repeats=3)
churn_model <- caret::train(Churn ~., data = churn_data_df, method = 'rf', trControl = control)
churn_model$bestTune
y_pred <- predict(churn_model, test_data_df)
confusionMatrix(test_data_df$Churn, y_pred)
# for random forest also different methods available, see documentation
############################################
# #
# #
# SMOTE (Oversampling) #
# Balancing Target #
# #
############################################
# with the help of caret package we can apply dataset directly by selecting
# sampling = smote
ctrl <- trainControl(method = 'repeatedcv', number = 10,repeats = 10,
sampling = 'smote')
set.seed(1)
# smote on churn_data_df and applying on randomForest
rf_model_smote <- caret::train(Churn ~ ., data = churn_data_df, method = 'rf',
preProcess = c("scale", "center"),trControl = ctrl)
churn.predict(rf_model_smote, test_data_df)
# smote on churn_data_df_wo and applying on randomForest
rf_model_smote_wo <- caret::train(Churn ~ ., data = churn_data_df_wo, method = 'rf',
preProcess = c("scale", "center"),trControl = ctrl)
churn.predict(rf_model_smote_wo, test_data_df)
|
/churn_prediction.R
|
no_license
|
mohitsharma294/churn_prediction
|
R
| false | false | 21,884 |
r
|
# importing all required library
required_library <- c('ggplot2', 'corrgram', 'corrplot', 'randomForest',
'caret', 'class', 'e1071', 'rpart', 'mlr','grid',
'DMwR','irace','usdm')
# checking for each library whether installed or not
# if not install then installing it first and then attaching to file
for (lib in required_library){
if(!require(lib, character.only = TRUE))
{
install.packages(lib)
require(lib, character.only = TRUE)
}
}
# removing extra variable
rm(required_library,lib)
# Reading train and test csv file
# set working directory to the file location, uncomment below line and put full path
# setwd("full path to folder in which file is present")
churn_data_df <- read.csv("Train_data.csv")
test_data_df <- read.csv("Test_data.csv")
############################################
# #
# 2.1 Exploratory Data Analysis #
# #
############################################
###################################
# 2.1.1 understanding the data #
###################################
# Checking columns name, in R there is syntax defined for column name.
# every space is changed into dot (.) and column name can not start with number etc.
colnames(churn_data_df) # look at columns name, all changed to as per R syntax
# cheking datatypes of all columns
str(churn_data_df)
### checking numerical variables ###
# Checking numerical statistics of numerical columns (Five point summary + mean of all column)
summary(churn_data_df)
### Checking categorical variable ###
# unique values in each category
cat_col <- c('area.code','international.plan', 'voice.mail.plan','Churn')
lapply(churn_data_df[,c('state', cat_col)], function(feat) length(unique(feat)))
# counting of each unique values in categorical columns
lapply(churn_data_df[,cat_col], function(feature) table(feature))
###################################
# 2.1.2 Missing value analysis #
###################################
# checking missing value for each column and storing counting in dataframe with column name
missing_val <- data.frame(lapply(churn_data_df, function(feat) sum(is.na(feat))))
###################################
# 2.1.3 outlier analysis #
###################################
# removing phone number column and changing area code to category
churn_data_df$phone.number <- NULL
churn_data_df$area.code <- as.factor(churn_data_df$area.code)
test_data_df$phone.number <- NULL
test_data_df$area.code <- as.factor(test_data_df$area.code)
# taking out list of name of numerical columns in dataset
numeric_columns <- colnames(Filter(is.numeric, churn_data_df))
# box_plot function to plot boxplot of numerical columns
box_plot <- function(column, dataset){
ggplot(aes_string(x = 'Churn', y = column, fill = 'Churn'),
data = dataset)+
stat_boxplot(geom = 'errorbar', width = 0.5)+
geom_boxplot(outlier.size = 2, outlier.shape = 18)+
theme(legend.position = 'bottom')+
labs(y = gsub('\\.', ' ', column), x = "Churn")+
ggtitle(paste(" Box Plot :",gsub('\\.', ' ', column)))
}
# hist_plot function to plot histogram of numerical variable
hist_plot <- function(column, dataset){
ggplot(aes_string(column), data = dataset)+
geom_histogram(aes(y=..density..), fill = 'skyblue2')+
geom_density()+
labs(x = gsub('\\.', ' ', column))+
ggtitle(paste(" Histogram :",gsub('\\.', ' ', column)))
}
# calling box_plot function and storing all plots in a list
all_box_plots <- lapply(numeric_columns,box_plot, dataset = churn_data_df)
# calling hist_plot function and storing all plots in a list
all_hist_plots <- lapply(numeric_columns,hist_plot, dataset = churn_data_df)
# Plotting Boxplot and histogram to analyse the data for three columns simultaneously
plot_in_grid <- function(f, s, t){
gridExtra::grid.arrange(all_box_plots[[f]],all_box_plots[[s]],all_box_plots[[t]],
all_hist_plots[[f]],all_hist_plots[[s]],all_hist_plots[[t]],ncol=3,nrow=2)
}
# plotting for day's minute, call and charges
plot_in_grid(3,4,5)
# plotting for evening's minute, call and charges
plot_in_grid(6,7,8)
# plotting for night's minute, call and charges
plot_in_grid(9, 10, 11)
# plotting for international's minute, call and charges
plot_in_grid(12, 13, 14)
# plotting for account length, voice mail message and customer service calls
plot_in_grid(1, 2, 15)
#####################
# outlier removing #
#####################
# Note: Considering both dataset one with outliers and other withoutliers for building model
# Reason explained in Project report
#
# name of dataset with outlier :- churn_data_df
# name of dataset without outlier:- churn_data_df_wo
churn_data_df_wo <- churn_data_df
# removing numeric columns for which we will not do outlier removal process
numeric_columns1 <- numeric_columns[! numeric_columns %in% c("number.vmail.messages","number.customer.service.calls")]
for (i in numeric_columns1){
out_value = churn_data_df_wo[,i] [churn_data_df_wo[,i] %in% boxplot.stats(churn_data_df_wo[,i])$out]
churn_data_df_wo = churn_data_df_wo[which(!churn_data_df_wo[,i] %in% out_value),]
}
# Plotting again distribution and boxplot after outlier removal
# calling box_plot function and storing all plots in a list
# for churn_data_df_wo i.e. dataset without outliers
all_box_plots <- lapply(numeric_columns,box_plot, dataset = churn_data_df_wo)
# calling hist_plot function and storing all plots in a list
# for churn_data_df_wo i.e. dataset without outliers
all_hist_plots <- lapply(numeric_columns,hist_plot, dataset = churn_data_df_wo)
# plotting for day's minute, call and charges after outlier removal
plot_in_grid(3,4,5)
# plotting for evening's minute, call and charges after outlier removal
plot_in_grid(6,7,8)
# plotting for night's minute, call and charges after outlier removal
plot_in_grid(9, 10, 11)
# plotting for international's minute, call and charges after outlier removal
plot_in_grid(12, 13, 14)
# plotting for account length, voice mail message and customer service calls
# after outlier removal
plot_in_grid(1, 2, 15)
###################################
# 2.1.4 Feature Selection #
###################################
# correlation plot for numerical feature
corrgram(churn_data_df[,numeric_columns], order = FALSE,
upper.panel = panel.pie, text.panel = panel.txt,
main = "Correlation Plot for Churning data set")
# heatmap plot for numerical features
corrplot(cor(churn_data_df[,numeric_columns]), method = 'color', type = 'lower')
# getting categorical column
cat_col <- c('state', 'area.code','international.plan', 'voice.mail.plan')
# chi-square test of independence of each category with Churn column
for(i in cat_col){
print(i)
print(chisq.test(table(churn_data_df$Churn, churn_data_df[,i])))
}
# Now checking multicollinearity between international plan and voice mail plan
# by chi-sq test of independence
print(chisq.test(table(churn_data_df$international.plan,
churn_data_df$voice.mail.plan)))
# checking VIF factor for numeric columns
vif(churn_data_df[,numeric_columns])
# checking importance of feature in ranking using random forest
important_feat <- randomForest(Churn ~ ., data = churn_data_df,
ntree = 200, keep.forest = FALSE, importance = TRUE)
importance_feat_df <- data.frame(importance(important_feat, type = 1))
################################
# #
# Data After EDA #
# #
################################
# Dropping column state, area code as in chi-sq test these column were
# not dependent with Churn column. Dropping total day min, total eve charge, total night charge,
# total intl charge and these columns found to be multicolinear with other columns
churn_data_df <- churn_data_df[, -c(1,3,7,12,15,18)]
churn_data_df_wo <- churn_data_df_wo[, -c(1,3,7,12,15,18)]
test_data_df <- test_data_df[, -c(1,3,7,12,15,18)]
# checking VIF factor for numeric columns after removal of multicollinear columns
numeric_columns <- colnames(Filter(is.numeric, churn_data_df))
vif(churn_data_df[,numeric_columns])
# changing levels of factor to 0 and 1
# no :- 0, yes:- 1
# false. :- 0, true. :- 1
category = c('international.plan', 'voice.mail.plan', 'Churn')
for (i in category){
levels(churn_data_df[,i]) <- c(0,1)
levels(churn_data_df_wo[,i]) <- c(0,1)
levels(test_data_df[,i]) <- c(0,1)
}
############################################
# #
# #
# 2.2.2 Building Classification models #
# #
# #
############################################
################################################
# K-fold CV accuracy score calculator method #
################################################
### Creating function which will calculate the K-fold CV accuracy
model.K_fold.accuracy <- function(classifier, data){
# creating 10 folds of data
ten_folds = createFolds(data$Churn, k = 10)
# lapply function will result in 10 accuracy measure for each test fold
ten_cv = lapply(ten_folds, function(fold) {
training_fold = data[-fold, ]
test_fold = data[fold, ]
# changing data of classifier with our training folds
classifier$data = training_fold
# predicting on test folds
# for logisitic regression "glm" we got prediction in probability
# changing probabiliey to class
if(class(classifier)[1] == "glm"){
y_prob = predict(churn_classifier, type = 'response', newdata = test_fold[-14])
y_pred = ifelse(y_prob>0.5, 1, 0)
} else if(class(classifier)[1] == 'rpart'){
y_pred = predict(churn_classifier, newdata = test_fold[-14], type ='class')
} else{
y_pred = predict(churn_classifier, newdata = test_fold[-14])
}
# creating confusion matrix
cm = table(test_fold[, 14], y_pred)
# calculating accuracy correct prediction divide by all observation
accuracy = (cm[1,1] + cm[2,2]) / (cm[1,1] + cm[1,2] + cm[2,1] + cm[2,2])
return(accuracy)
})
# returning mean of all accuracy which we got from lapply function result
return(mean(as.numeric(ten_cv)))
}
###########################################################
# Function Predicting result on test data set of a model #
# And returning confusion matrix #
###########################################################
churn.predict <- function(classifier, data){
if(class(classifier)[1] == 'glm'){
churn_prob <- predict(classifier, newdata = data[,-14])
churn_prediction <- ifelse(churn_prob >= 0.5, 1, 0)
} else if(class(classifier)[1] == 'rpart'){
churn_prediction = predict(classifier, data[,-14], type ='class')
} else{
churn_prediction = predict(classifier, data[,-14])
}
cm = confusionMatrix(table(data$Churn, churn_prediction))
return(cm)
}
#########################
# Logistic Regression #
#########################
# logistic regression on dataset churn_data_df with outliers
churn_classifier <- glm(formula = Churn ~ ., family = binomial,
data = churn_data_df)
cm <- churn.predict(churn_classifier, test_data_df)
cm
# K -fold accuracy of Logistic regression model
k_fold_accuracy <- model.K_fold.accuracy(churn_classifier, churn_data_df)
k_fold_accuracy
# Now checking on dataset without ouliers
churn_classifier <- glm(formula = Churn ~ ., family = binomial,
data = churn_data_df_wo)
cm <- churn.predict(churn_classifier, test_data_df)
cm
# K -fold accuracy of Logistic regression model
k_fold_accuracy <- model.K_fold.accuracy(churn_classifier, churn_data_df_wo)
k_fold_accuracy
#########################
# KNN #
#########################
# predicting on dataset with outliers i.e. churn_data_df
churn_prediction <- knn(train = churn_data_df[,-14], test = test_data_df[,-14],
cl = churn_data_df$Churn, k = 5, prob = TRUE)
confusionMatrix(table(test_data_df$Churn, churn_prediction))
# predicting on dataset with outliers i.e. churn_data_df_wo
churn_prediction <- knn(train = churn_data_df_wo[,-14], test = test_data_df[,-14],
cl = churn_data_df_wo$Churn, k = 5, prob = TRUE)
confusionMatrix(table(test_data_df$Churn, churn_prediction))
#########################
# Naive Bayes #
#########################
# Building model on dataset with outliers i.e. churn_data_df
churn_classifier <- naiveBayes(x = churn_data_df[,-14], y =churn_data_df[,14])
cm <- churn.predict(churn_classifier, test_data_df)
cm
# K -fold accuracy of Naive Bayes model
k_fold_accuracy <- model.K_fold.accuracy(churn_classifier, churn_data_df)
k_fold_accuracy
# building model on dataset without outliers i.e. churn_data_df_wo
churn_classifier <- naiveBayes(x = churn_data_df_wo[,-14], y =churn_data_df_wo[,14])
cm <- churn.predict(churn_classifier, test_data_df)
cm
# K -fold accuracy of Naive Bayes model
k_fold_accuracy <- model.K_fold.accuracy(churn_classifier, churn_data_df_wo)
k_fold_accuracy
#########################
# Decision Tree #
#########################
# building model on dataset with outliers i.e. churn_data_df
churn_classifier <- rpart(formula = Churn ~ ., data = churn_data_df)
cm <- churn.predict(churn_classifier, test_data_df)
cm
# K -fold accuracy of Decision Tree model
k_fold_accuracy <- model.K_fold.accuracy(churn_classifier, churn_data_df)
k_fold_accuracy
# building model on dataset without outliers i.e. churn_data_df_wo
churn_classifier <- rpart(formula = Churn ~ ., data = churn_data_df_wo)
cm <- churn.predict(churn_classifier, test_data_df)
cm
# K -fold accuracy of Decision Tree model
k_fold_accuracy <- model.K_fold.accuracy(churn_classifier, churn_data_df_wo)
k_fold_accuracy
#########################
# Random Forest #
#########################
# building model on dataset with outliers i.e. churn_data_df
churn_classifier <- randomForest(formula = Churn ~ ., data = churn_data_df,
ntree = 500)
cm <- churn.predict(churn_classifier, test_data_df)
cm
# K -fold accuracy of Decision Tree model
k_fold_accuracy <- model.K_fold.accuracy(churn_classifier, churn_data_df)
k_fold_accuracy
# building model on dataset without outliers i.e. churn_data_df_wo
churn_classifier <- randomForest(formula = Churn ~ ., data = churn_data_df_wo,
ntree = 500)
cm <- churn.predict(churn_classifier, test_data_df)
cm
# K -fold accuracy of Decision Tree model
k_fold_accuracy <- model.K_fold.accuracy(churn_classifier, churn_data_df_wo)
k_fold_accuracy
############################################
############################################
# #
# #
# Hyperparameter tuning #
# #
# #
############################################
#########################################
# #
# tuning decision tree for both dataset #
# churn_data_df and churn_data_df_wo #
# #
#########################################
# we will tune best two model among above i.e. Decision tree and random Forest
# for tuning we will use mlr package and its methods
tune.Decision.Tree <- function(learner, paramset, dataset){
# creating task for train
train_task = makeClassifTask(data = dataset, target = 'Churn')
# setting 10 fold cross validation
cv = makeResampleDesc("CV", iters = 10)
grid_control = makeTuneControlGrid()
# tuning parameter
tune_param = tuneParams(learner = learner, resampling = cv, task = train_task,
par.set = paramset, control = grid_control, measures = acc)
return(tune_param)
}
# tuning decision tree classifier for churn_data_df i.e. whole dataset
# making learner tree
learner = makeLearner("classif.rpart", predict.type = 'response')
# setting params range
param_set <- makeParamSet(
makeIntegerParam("minsplit", lower = 10, upper = 50),
makeIntegerParam("minbucket", lower = 5, upper = 50),
makeNumericParam("cp", lower = 0.001, upper = 0.2)
)
tuned_param <- tune.Decision.Tree(learner, param_set, churn_data_df)
# building decision tree model based on tuned param with mlr package
set_param <- setHyperPars(learner, par.vals = tuned_param$x)
train_task <- makeClassifTask(data = churn_data_df, target = 'Churn')
test_task <- makeClassifTask(data = test_data_df, target = 'Churn')
# training model
train_model <- train(set_param, train_task)
# predicting on test data
pred <- predict(train_model, test_task)
y_pred = pred[["data"]][["response"]]
# confusion matrix
cm = table(test_data_df[, 14], y_pred)
cm
# tuning decision tree like above for churn_data_df_wo i.e. without outliers
tuned_param <- tune.Decision.Tree(learner, param_set, churn_data_df_wo)
set_param <- setHyperPars(learner, par.vals = tuned_param$x)
train_task_wo <- makeClassifTask(data = churn_data_df_wo, target = 'Churn')
train_model <- train(set_param, train_task_wo)
pred <- predict(train_model, test_task)
y_pred = pred[["data"]][["response"]]
cm = table(test_data_df[, 14], y_pred)
cm
#########################################
# #
# tuning random forest for both dataset #
# churn_data_df and churn_data_df_wo #
# #
#########################################
# tuning random forest for churn_data_df
train_task = makeClassifTask(data = churn_data_df, target = 'Churn')
test_task <- makeClassifTask(data = test_data_df, target = 'Churn')
# making learner
rfLearner <- makeLearner("classif.randomForest", predict.type = 'response',
par.vals = list(ntree = 200, mtry = 3))
rf$par.vals <- list(importance = TRUE)
rf_param_set <- makeParamSet(
makeIntegerParam("ntree",lower = 600, upper = 800),
makeIntegerParam("mtry", lower = 3, upper = 10),
makeIntegerParam("nodesize", lower = 10, upper = 50))
ctrl <- makeTuneControlIrace(maxExperiments = 200L)
cv <- makeResampleDesc("CV",iters = 3L)
rf_tuned_param <- tuneParams(learner = rfLearner, resampling = cv, task=train_task,
par.set = rf_param_set, control = ctrl, measures = acc)
# making model on tuned parameter
rf_set_param <- setHyperPars(rfLearner, par.vals = rf_tuned_param$x)
rf_train_model <- train(rf_set_param, train_task)
pred <- predict(rforest, test_task)
y_pred <- pred$data$response
cm <- table(test_data_df[, 14], y_pred)
cm
###################
# tuning random forest for churn_data_df_wo
train_task_wo <- makeClassifTask(data = churn_data_df_wo, target = 'Churn')
# tuning model
rf_tuned_param <- tuneParams(learner = rfLearner, resampling = cv, task=train_task_wo,
par.set = rf_param_set, control = ctrl, measures = acc)
# making model on tuned parameter
rf_set_param <- setHyperPars(rfLearner, par.vals = rf_tuned_param$x)
rf_train_model <- train(rf_set_param, train_task_wo)
pred <- predict(rforest, test_task)
y_pred <- pred$data$response
cm <- table(test_data_df[, 14], y_pred)
cm
############ Alternative way for hyperparameter tuning using Caret #############
# tuning decision tree
control <- trainControl(method="repeatedcv", number=10, repeats=3)
churn_model <- caret::train(Churn ~., data = churn_data_df, method = 'rpart', trControl = control)
churn_model$bestTune
y_pred <- predict(churn_model, test_data_df)
confusionMatrix(test_data_df$Churn, y_pred)
# Note: Different method are available in caret which tune differnt parameter
# see caret documentation for variety
# tuning Random Forest
control <- trainControl(method="repeatedcv", number=10, repeats=3)
churn_model <- caret::train(Churn ~., data = churn_data_df, method = 'rf', trControl = control)
churn_model$bestTune
y_pred <- predict(churn_model, test_data_df)
confusionMatrix(test_data_df$Churn, y_pred)
# for random forest also different methods available, see documentation
############################################
# #
# #
# SMOTE (Oversampling) #
# Balancing Target #
# #
############################################
# with the help of caret package we can apply dataset directly by selecting
# sampling = smote
ctrl <- trainControl(method = 'repeatedcv', number = 10,repeats = 10,
sampling = 'smote')
set.seed(1)
# smote on churn_data_df and applying on randomForest
rf_model_smote <- caret::train(Churn ~ ., data = churn_data_df, method = 'rf',
preProcess = c("scale", "center"),trControl = ctrl)
churn.predict(rf_model_smote, test_data_df)
# smote on churn_data_df_wo and applying on randomForest
rf_model_smote_wo <- caret::train(Churn ~ ., data = churn_data_df_wo, method = 'rf',
preProcess = c("scale", "center"),trControl = ctrl)
churn.predict(rf_model_smote_wo, test_data_df)
|
library(arulesSequences)
### Name: sequencerules-class
### Title: Class "sequencerules" - Collections of Sequential Rules
### Aliases: sequencerules-class coerce,sequencerules,list-method
### coerce,sequencerules,data.frame-method
### coerce,sequencerules,sequences-method generatingItemsets
### coverage,sequencerules-method
### coverage,sequencerules,ANY,missing-method
### is.redundant,sequencerules-method summary,sequencerules-method
### show,sequencerules-method show,summary.sequencerules-method
### summary.sequencerules-class summary,sequencerules-method
### Keywords: classes
### ** Examples
## continue example
example(ruleInduction, package = "arulesSequences")
cbind(as(r2, "data.frame"),
coverage = coverage(r2))
## coerce to sequences
as(as(r2, "sequences"), "data.frame")
## find redundant rules
is.redundant(r2, measure = "lift")
|
/data/genthat_extracted_code/arulesSequences/examples/sequencerules-class.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false | false | 877 |
r
|
library(arulesSequences)
### Name: sequencerules-class
### Title: Class "sequencerules" - Collections of Sequential Rules
### Aliases: sequencerules-class coerce,sequencerules,list-method
### coerce,sequencerules,data.frame-method
### coerce,sequencerules,sequences-method generatingItemsets
### coverage,sequencerules-method
### coverage,sequencerules,ANY,missing-method
### is.redundant,sequencerules-method summary,sequencerules-method
### show,sequencerules-method show,summary.sequencerules-method
### summary.sequencerules-class summary,sequencerules-method
### Keywords: classes
### ** Examples
## continue example
example(ruleInduction, package = "arulesSequences")
cbind(as(r2, "data.frame"),
coverage = coverage(r2))
## coerce to sequences
as(as(r2, "sequences"), "data.frame")
## find redundant rules
is.redundant(r2, measure = "lift")
|
complete <- function(directory, id = 1:332) {
setwd(directory)
nobs <- NULL
for(i in id) {
i_char <- as.character(i)
if(nchar(i_char) != 3) {
zero <- paste(rep("0", times=3-nchar(i_char)), collapse="")
i_char <- paste(zero, i_char, sep = "")
}
input <- read.csv(paste(i_char, ".csv", sep=""))
nobs <- c(nobs, sum(complete.cases(input)))
}
data.frame(id = id, nobs = nobs)
}
|
/DataScienceSpecialization/02_R_Programming/complete.R
|
no_license
|
dongyuanwu/SelfImprovement
|
R
| false | false | 457 |
r
|
complete <- function(directory, id = 1:332) {
setwd(directory)
nobs <- NULL
for(i in id) {
i_char <- as.character(i)
if(nchar(i_char) != 3) {
zero <- paste(rep("0", times=3-nchar(i_char)), collapse="")
i_char <- paste(zero, i_char, sep = "")
}
input <- read.csv(paste(i_char, ".csv", sep=""))
nobs <- c(nobs, sum(complete.cases(input)))
}
data.frame(id = id, nobs = nobs)
}
|
# Author: Sternophant
# Date: 2015-08-17
# Description:
# Step 1 - Read raw data
# Step 2 - Add descriptive variable names
# Step 3 - Merge data
# Step 4 - Extratc mean() and std() variables
# Step 5 - Create a second, independent tidy data set
# with the average of each variable for each activity and each subject
library(dplyr)
# Step 1 - Read raw data
# -----------------------------------------------------------------------------
# Features and activity_labels
activity_labels_path <- file.path("UCI HAR Dataset", "activity_labels.txt")
activity_labels <- read.table(file = activity_labels_path,
colClasses = c("numeric", "character"))
features_path <- file.path("UCI HAR Dataset", "features.txt")
features <- read.table(file = features_path,
colClasses = "character")
# Test raw data
subject_test_path <- file.path("UCI HAR Dataset","test", "subject_test.txt")
subject_test <- read.table(file = subject_test_path,
colClasses = "numeric")
X_test_path <- file.path("UCI HAR Dataset","test", "X_test.txt")
X_test <- read.table(file = X_test_path,
colClasses = "numeric")
y_test_path <- file.path("UCI HAR Dataset","test", "y_test.txt")
y_test <- read.table(file = y_test_path,
colClasses = "numeric")
# Train raw data
subject_train_path <- file.path("UCI HAR Dataset","train", "subject_train.txt")
subject_train <- read.table(file = subject_train_path,
colClasses = "numeric")
X_train_path <- file.path("UCI HAR Dataset","train", "X_train.txt")
X_train <- read.table(file = X_train_path,
colClasses = "numeric")
y_train_path <- file.path("UCI HAR Dataset","train", "y_train.txt")
y_train <- read.table(file = y_train_path,
colClasses = "numeric")
# Remove unrequired variabes from environment
rm(activity_labels_path,
features_path,
subject_test_path,
X_test_path,
y_test_path,
subject_train_path,
X_train_path,
y_train_path)
# -----------------------------------------------------------------------------
# Step 2 - Add descriptive variable names
# -----------------------------------------------------------------------------
colnames(activity_labels) <- c("activity", "activity_label")
colnames(subject_test) <- "subject"
colnames(subject_train) <- "subject"
colnames(X_test) <- features[ , 2]
colnames(X_train) <- features[ , 2]
colnames(y_test) <- "activity"
colnames(y_train) <- "activity"
# -----------------------------------------------------------------------------
# Step 3 - Merge data
# -----------------------------------------------------------------------------
# Add activity_labels
y_test <- left_join(y_test, activity_labels)
y_train <- left_join(y_train, activity_labels)
# Merge columns of "subject_*", "y_*", and "X_* seperately for test and train data
test <- bind_cols(subject_test, y_test, X_test)
train <- bind_cols(subject_train, y_train, X_train)
# Merge test and train data
data <- rbind(test, train)
# Remove duplicate data from workspace
rm(activity_labels, features,
subject_test, y_test, X_test,
subject_train, y_train, X_train, test, train)
# -----------------------------------------------------------------------------
# Step 4 - Extratc mean() and std() variables
# -----------------------------------------------------------------------------
index_subject_activity <- c(1:3)
index_mean <- grep("mean()", names(data), fixed = T)
index_std <- grep("std()", names(data), fixed = T)
index_columns <- c(index_subject_activity, index_mean, index_std)
index_columns <- sort(index_columns)
data_mean_std <- data[ , index_columns] %>% arrange(subject, activity)
# Remove unrequired variables from environment
rm(index_subject_activity,
index_mean,
index_std,
index_columns)
# -----------------------------------------------------------------------------
# Step 5 - Create a second, independent tidy data set
# with the average of each variable for each activity and each subject
# -----------------------------------------------------------------------------
averaged_data_mean_std <- data_mean_std %>%
group_by(subject, activity, activity_label) %>%
summarise_each(funs(mean))
names(averaged_data_mean_std)[4:69] <- paste("mean_of_",
names(averaged_data_mean_std)[4:69],
sep = "")
# -----------------------------------------------------------------------------
|
/run_analysis.R
|
no_license
|
sternophant/getting_and_cleaning_data_course_project
|
R
| false | false | 4,725 |
r
|
# Author: Sternophant
# Date: 2015-08-17
# Description:
# Step 1 - Read raw data
# Step 2 - Add descriptive variable names
# Step 3 - Merge data
# Step 4 - Extratc mean() and std() variables
# Step 5 - Create a second, independent tidy data set
# with the average of each variable for each activity and each subject
library(dplyr)
# Step 1 - Read raw data
# -----------------------------------------------------------------------------
# Features and activity_labels
activity_labels_path <- file.path("UCI HAR Dataset", "activity_labels.txt")
activity_labels <- read.table(file = activity_labels_path,
colClasses = c("numeric", "character"))
features_path <- file.path("UCI HAR Dataset", "features.txt")
features <- read.table(file = features_path,
colClasses = "character")
# Test raw data
subject_test_path <- file.path("UCI HAR Dataset","test", "subject_test.txt")
subject_test <- read.table(file = subject_test_path,
colClasses = "numeric")
X_test_path <- file.path("UCI HAR Dataset","test", "X_test.txt")
X_test <- read.table(file = X_test_path,
colClasses = "numeric")
y_test_path <- file.path("UCI HAR Dataset","test", "y_test.txt")
y_test <- read.table(file = y_test_path,
colClasses = "numeric")
# Train raw data
subject_train_path <- file.path("UCI HAR Dataset","train", "subject_train.txt")
subject_train <- read.table(file = subject_train_path,
colClasses = "numeric")
X_train_path <- file.path("UCI HAR Dataset","train", "X_train.txt")
X_train <- read.table(file = X_train_path,
colClasses = "numeric")
y_train_path <- file.path("UCI HAR Dataset","train", "y_train.txt")
y_train <- read.table(file = y_train_path,
colClasses = "numeric")
# Remove unrequired variabes from environment
rm(activity_labels_path,
features_path,
subject_test_path,
X_test_path,
y_test_path,
subject_train_path,
X_train_path,
y_train_path)
# -----------------------------------------------------------------------------
# Step 2 - Add descriptive variable names
# -----------------------------------------------------------------------------
colnames(activity_labels) <- c("activity", "activity_label")
colnames(subject_test) <- "subject"
colnames(subject_train) <- "subject"
colnames(X_test) <- features[ , 2]
colnames(X_train) <- features[ , 2]
colnames(y_test) <- "activity"
colnames(y_train) <- "activity"
# -----------------------------------------------------------------------------
# Step 3 - Merge data
# -----------------------------------------------------------------------------
# Add activity_labels
y_test <- left_join(y_test, activity_labels)
y_train <- left_join(y_train, activity_labels)
# Merge columns of "subject_*", "y_*", and "X_* seperately for test and train data
test <- bind_cols(subject_test, y_test, X_test)
train <- bind_cols(subject_train, y_train, X_train)
# Merge test and train data
data <- rbind(test, train)
# Remove duplicate data from workspace
rm(activity_labels, features,
subject_test, y_test, X_test,
subject_train, y_train, X_train, test, train)
# -----------------------------------------------------------------------------
# Step 4 - Extratc mean() and std() variables
# -----------------------------------------------------------------------------
index_subject_activity <- c(1:3)
index_mean <- grep("mean()", names(data), fixed = T)
index_std <- grep("std()", names(data), fixed = T)
index_columns <- c(index_subject_activity, index_mean, index_std)
index_columns <- sort(index_columns)
data_mean_std <- data[ , index_columns] %>% arrange(subject, activity)
# Remove unrequired variables from environment
rm(index_subject_activity,
index_mean,
index_std,
index_columns)
# -----------------------------------------------------------------------------
# Step 5 - Create a second, independent tidy data set
# with the average of each variable for each activity and each subject
# -----------------------------------------------------------------------------
averaged_data_mean_std <- data_mean_std %>%
group_by(subject, activity, activity_label) %>%
summarise_each(funs(mean))
names(averaged_data_mean_std)[4:69] <- paste("mean_of_",
names(averaged_data_mean_std)[4:69],
sep = "")
# -----------------------------------------------------------------------------
|
##' @include guiComponents.R
##' Label class
setClass("gLabel",
contains="guiComponent",
prototype=prototype(new("guiComponent"))
)
##' constructor for label widget
##'
##' @param text character. Text for label. Coerced to character and pasted together with newlines
##' @param markup logical. For some toolkits, one can specify marked up text.
##' @param editable logical. For some toolkits, label can be edited
##' when this is \code{TRUE}. Generally found to be an unexpected
##' interface for users, so use is discouraged.
##' @param handler function. For some toolkits, this handler will be
##' called when the label is clicked on. In general, labels are
##' expected to be static objects so this use is discouraged.
##' @param action passed to \code{handler}, when called
##' @param container parent container
##' @param ... generally ignored
##' @param toolkit underlying toolkit. Usually not specified
##' @return \code{gLabel} object to manipulate and creates widget on screen
##' @export
##' @examples
##' w <- gwindow()
##' g <- ggroup(container=w, horizontal=FALSE)
##' l1 <- glabel("Some label", container=g)
##' l2 <- glabel(c("pasted with", "new lines"), container=g)
##' svalue(l1) <- "New text for some label")
##' svalue(l1)
glabel = function(
text= "", markup = FALSE, editable = FALSE, handler = NULL,
action = NULL, container = NULL,
..., toolkit=guiToolkit()) {
## collapse if more than one line
text <- paste(text, collapse="\n")
widget = .glabel(toolkit,
text= text, markup = markup, editable = editable, handler = handler,
action = action, container = container,
...)
obj = new("gLabel",widget=widget,toolkit=toolkit)
return(obj)
}
##' glabel generic for toolkit
setGeneric(".glabel",function(toolkit,
text= "", markup = FALSE, editable = FALSE, handler = NULL,
action = NULL, container = NULL,
...) standardGeneric(".glabel"))
##' svalue<- generic
##'
##' Ensure value is character vector. Pastes values together by collapsing with a new line.
##' @use_svalue_otherwise
setReplaceMethod("svalue",signature(obj="gLabel"),
function(obj, index=NULL, ...,value) {
## enforce that value is character
value <- paste(as.character(value), collapse="\n")
callNextMethod()
})
|
/R/glabel.R
|
no_license
|
plantarum/gWidgets
|
R
| false | false | 2,445 |
r
|
##' @include guiComponents.R
##' Label class
setClass("gLabel",
contains="guiComponent",
prototype=prototype(new("guiComponent"))
)
##' constructor for label widget
##'
##' @param text character. Text for label. Coerced to character and pasted together with newlines
##' @param markup logical. For some toolkits, one can specify marked up text.
##' @param editable logical. For some toolkits, label can be edited
##' when this is \code{TRUE}. Generally found to be an unexpected
##' interface for users, so use is discouraged.
##' @param handler function. For some toolkits, this handler will be
##' called when the label is clicked on. In general, labels are
##' expected to be static objects so this use is discouraged.
##' @param action passed to \code{handler}, when called
##' @param container parent container
##' @param ... generally ignored
##' @param toolkit underlying toolkit. Usually not specified
##' @return \code{gLabel} object to manipulate and creates widget on screen
##' @export
##' @examples
##' w <- gwindow()
##' g <- ggroup(container=w, horizontal=FALSE)
##' l1 <- glabel("Some label", container=g)
##' l2 <- glabel(c("pasted with", "new lines"), container=g)
##' svalue(l1) <- "New text for some label")
##' svalue(l1)
glabel = function(
text= "", markup = FALSE, editable = FALSE, handler = NULL,
action = NULL, container = NULL,
..., toolkit=guiToolkit()) {
## collapse if more than one line
text <- paste(text, collapse="\n")
widget = .glabel(toolkit,
text= text, markup = markup, editable = editable, handler = handler,
action = action, container = container,
...)
obj = new("gLabel",widget=widget,toolkit=toolkit)
return(obj)
}
##' glabel generic for toolkit
setGeneric(".glabel",function(toolkit,
text= "", markup = FALSE, editable = FALSE, handler = NULL,
action = NULL, container = NULL,
...) standardGeneric(".glabel"))
##' svalue<- generic
##'
##' Ensure value is character vector. Pastes values together by collapsing with a new line.
##' @use_svalue_otherwise
setReplaceMethod("svalue",signature(obj="gLabel"),
function(obj, index=NULL, ...,value) {
## enforce that value is character
value <- paste(as.character(value), collapse="\n")
callNextMethod()
})
|
CURD_DIR ?=../../../
INC_CUDA_DIRS ?=-I$(P4ROOT)/sw/gpgpu/samples/common/inc
CURD_FLAGS=-I$(CURD_DIR) $(INC_CUDA_DIRS) -L$(CURD_DIR) $(CURD_DIR)/race_detection.o -v -keep -dr -rdc=true
.PHONY: all clean run cudarace
all:
nvcc $(CURD_FLAGS) -O -arch sm_35 -I ../../common/inc/ -cudart=shared hashtable.cu ../../common/lib/libcutil_x86_64.a -o hashtable
nvcc $(CURD_FLAGS) -lineinfo -O -arch sm_35 -I ../../common/inc/ -cudart=shared hashtable.cu ../../common/lib/libcutil_x86_64.a -o hashtable_linfo
cuobjdump -ptx hashtable > hashtable.ptx
clean:
rm -f hashtable
run:
bash ./run
cudarace:
bash ./cudarace
|
/benchmarks/gpu-tm/hashtable/Makefile.rd
|
no_license
|
sljiaa/curd-llvm
|
R
| false | false | 617 |
rd
|
CURD_DIR ?=../../../
INC_CUDA_DIRS ?=-I$(P4ROOT)/sw/gpgpu/samples/common/inc
CURD_FLAGS=-I$(CURD_DIR) $(INC_CUDA_DIRS) -L$(CURD_DIR) $(CURD_DIR)/race_detection.o -v -keep -dr -rdc=true
.PHONY: all clean run cudarace
all:
nvcc $(CURD_FLAGS) -O -arch sm_35 -I ../../common/inc/ -cudart=shared hashtable.cu ../../common/lib/libcutil_x86_64.a -o hashtable
nvcc $(CURD_FLAGS) -lineinfo -O -arch sm_35 -I ../../common/inc/ -cudart=shared hashtable.cu ../../common/lib/libcutil_x86_64.a -o hashtable_linfo
cuobjdump -ptx hashtable > hashtable.ptx
clean:
rm -f hashtable
run:
bash ./run
cudarace:
bash ./cudarace
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/download.R
\name{download}
\alias{download}
\title{Download File}
\usage{
download(url, dir = ".", mode = "wb", chmod = file_ext(url) == "",
destfile = file.path(dir, basename(url)), quiet = TRUE, ...)
}
\arguments{
\item{url}{URL of file to download.}
\item{dir}{directory to download to.}
\item{mode}{download mode, see details.}
\item{chmod}{whether to set execute permission (default is \code{TRUE} if
file has no filename extension).}
\item{destfile}{destination path and filename (optional, overrides
\code{dir}).}
\item{quiet}{whether to suppress messages.}
\item{\dots}{passed to \code{download.file}.}
}
\description{
Download a file in binary mode, e.g. a model executable.
}
\details{
With the default mode \code{"wb"} the file is downloaded in binary mode (see
\code{\link{download.file}}), to prevent R from adding \verb{^M} at line
ends. This is particularly relevant for Windows model executables, while the
\code{chmod} switch is useful when downloading Linux executables.
This function can be convenient for downloading any file, including text
files. Data files in CSV or other text format can also be read directly into
memory using \code{read.table}, \code{read.taf} or similar functions, without
writing to the file system.
}
\examples{
\dontrun{
url <- paste0("https://github.com/ices-taf/2015_had-iceg/raw/master/",
"bootstrap/initial/software/catageysa.exe")
download(url)
}
}
\seealso{
\code{\link{read.taf}} reads a TAF table into a data frame.
\code{\link{icesTAF-package}} gives an overview of the package.
}
|
/man/download.Rd
|
no_license
|
alko989/icesTAF
|
R
| false | true | 1,640 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/download.R
\name{download}
\alias{download}
\title{Download File}
\usage{
download(url, dir = ".", mode = "wb", chmod = file_ext(url) == "",
destfile = file.path(dir, basename(url)), quiet = TRUE, ...)
}
\arguments{
\item{url}{URL of file to download.}
\item{dir}{directory to download to.}
\item{mode}{download mode, see details.}
\item{chmod}{whether to set execute permission (default is \code{TRUE} if
file has no filename extension).}
\item{destfile}{destination path and filename (optional, overrides
\code{dir}).}
\item{quiet}{whether to suppress messages.}
\item{\dots}{passed to \code{download.file}.}
}
\description{
Download a file in binary mode, e.g. a model executable.
}
\details{
With the default mode \code{"wb"} the file is downloaded in binary mode (see
\code{\link{download.file}}), to prevent R from adding \verb{^M} at line
ends. This is particularly relevant for Windows model executables, while the
\code{chmod} switch is useful when downloading Linux executables.
This function can be convenient for downloading any file, including text
files. Data files in CSV or other text format can also be read directly into
memory using \code{read.table}, \code{read.taf} or similar functions, without
writing to the file system.
}
\examples{
\dontrun{
url <- paste0("https://github.com/ices-taf/2015_had-iceg/raw/master/",
"bootstrap/initial/software/catageysa.exe")
download(url)
}
}
\seealso{
\code{\link{read.taf}} reads a TAF table into a data frame.
\code{\link{icesTAF-package}} gives an overview of the package.
}
|
#' Recursively partition haplotype matrix
#'
#' This function recursively partitions the SNVs in the window around the focal SNV.
#'
#'
#' This function makes two clades based on \code{splotSNV}. For each partition, update \code{splitSNV} and use
#' it to define subclades. Then, continue recursive partitioning until each partition has only one haplotype,
#' or there are no more SNVs to consider.
#'
#'
#' @param hapmat A hapMat object with SNVs ordered according to ancestry.
#' @param splitSNV The index of the SNV where the haplotype matrix from \code{\link{orderSNVs}} is partitioned.
#'
#' @keywords internal
#'
#' @return A nested partition of haplotypes, implemented as a list of nodes, each with two child nodes.
#'
#' @seealso \code{\link{makeDend}}, \code{\link{newNode}}, \code{\link{noVariation}}
#'
#' @examples
#'
#' \dontshow{
#'
#' data(ex_hapMatSmall_data)
#'
#' # First, select a window of SNVs about a focal SNV.
#' SNV_win <- selectWindow(hapMat = ex_hapMatSmall_data,
#' focalSNV = 10, minWindow = 1)
#'
#' # Then order SNVs in the window.
#' ordHapmat <- orderSNVs(snvWin = SNV_win)
#'
#' # Recursively partition haplotype matrix.
#' partitions <- makePartition(hapmat = ordHapmat, splitSNV = 1)
#'
#' }
#'
makePartition = function(hapmat, splitSNV) {
if(nrow(hapmat)==1 || splitSNV>ncol(hapmat)){
# Then we are done splitting, either because the clade is
# a single haplotype (nrow(hapmat==1) or because we've
# run out of SNVs (splitSNV>ncol(hapmat)). Return the
# haplotypes in hapmat as a leaf node.
return(newNode(hapmat))
}
# If not, find the next SNV to split on. To split on a SNV, there
# must be variation. Keep searching for a SNV as long as there is
# no variation in the current SNV.
while(splitSNV <= ncol(hapmat) && noVariation(hapmat[, splitSNV])) {
splitSNV = splitSNV + 1
}
# We may have found a SNV to split on, or we may have hit
# the end of hapMat without finding a SNV to split on.
if(splitSNV > ncol(hapmat)){
# Couldn't find a SNV to split on; return hapmat as a leaf node.
return(newNode(hapmat))
}
# Otherwise, we've found a SNV to split on, so split into clades.
# The following call is to R's subset(), applied to a matrix,
# **not** subsetHapMat() applied to a hapMat object.
clade1 = subset(hapmat, hapmat[, splitSNV] == 1)
child1 = makePartition(clade1, splitSNV+1)
clade0 = subset(hapmat,hapmat[, splitSNV] == 0)
child0 = makePartition(clade0, splitSNV+1)
return(newNode(hapmat, child1, child0, depth = nrow(hapmat)-1))
}
#---------------------------------------------------------------#
#' Create a list of child nodes
#'
#' This function creates a pair of child nodes for a parent node.
#'
#' @param hapmat The hapMat object with columns ordered by ancestry.
#' @param child1 The child node from splitting on the mutant allele at the next SNV in the ordered
#' neighborhood.
#' @param child0 The child node from splitting on the non-mutant allele at the next SNV in the ordered
#' neighborhood.
#'
#' @keywords internal
#' @seealso \code{\link{makePartition}}
#'
#' @examples
#'
#' \dontshow{
#'
#' data(ex_hapMatSmall_data)
#'
#' # First, select a window of SNVs about a focal SNV.
#' SNV_win <- selectWindow(hapMat = ex_hapMatSmall_data,
#' focalSNV = 10, minWindow = 1)
#'
#' # Then order SNVs in the window.
#' ordHapmat <- orderSNVs(snvWin = SNV_win)
#'
#' # Create a list of child nodes.
#' chldNodes <- newNode(hapmat = ordHapmat)
#'
#' }
#'
newNode = function(hapmat, child1 = NULL, child0 = NULL, depth = 0) {
return(list(haps = rownames(hapmat), child1 = child1, child0 = child0, depth = depth))
}
#---------------------------------------------------------------#
#' Check the variation in a SNV
#'
#' This function checks the variation in a specified SNV and is applied during the recursive partitioning.
#'
#' @param snv A SNV to check for variation among haplotypes.
#'
#' @return Logical:TRUE, if there is no variation in the SNV; FALSE otherwise.
#' @keywords internal
#' @seealso \code{\link{makePartition}}
#'
#' @examples
#'
#' \dontshow{
#'
#' data(ex_hapMatSmall_data)
#'
#' # Check the variation in a SNV.
#' noVariation(ex_hapMatSmall_data$hapmat[,1])
#'
#' }
#'
noVariation = function(snv) {
if(sd(snv) == 0) return(TRUE) else return(FALSE)
}
#' Convert a list data structure to Newick format
#'
#' This function traverses the dendogram to build the character string that represents the dendrogram in
#' Newick format.
#'
#'
#' @param dend A list of nodes that represents the nested partition of haplotypes.
#' @param sep A separator for haplotype names in the same clade. See the arguments in
#' \code{\link{reconstructPP}}.
#'
#' @return A character string in Newick format.
#' @keywords internal
#' @seealso \code{\link{makeDend}}
#'
#' @examples
#'
#' \dontshow{
#'
#' data(ex_hapMatSmall_data)
#'
#' # First, select a window of SNVs about a focal SNV.
#' SNV_win <- selectWindow(hapMat = ex_hapMatSmall_data,
#' focalSNV = 10, minWindow = 1)
#'
#' # Then order SNVs in the window.
#' ordHapmat <- orderSNVs(snvWin = SNV_win)
#'
#' # Recursively partition haplotype matrix.
#' partitions <- makePartition(hapmat = ordHapmat, splitSNV = 1)
#'
#' # Dendrogram in Newick format.
#' newickDend <- dendToNewick(dend = partitions, sep = "-")
#'
#' }
#'
dendToNewick = function(dend, sep = "-"){
# Arguments:
# dend is the output of makeTreeRec
# sep is a character string to separate haplotype names for
# tips comprised of multiple haplotypes (e.g, if a
# tip contained haplotypes C and D, the tip would
# appear as C-D in the Newick string).
dendStr = makeNewickRec(dend, sep)
# Now just append a ";" to mark the end of the dend
return(paste(dendStr, ";", sep=""))}
#' Build the character string of nodes and haplotypes in Newick format
#'
#'
#' @param node Tree(dend) node
#' @param sep A separator for haplotype names in the same clade. See the arguments in
#' \code{\link{reconstructPP}}.
#'
#' @keywords internal
#'
#' @examples
#'
#' \dontshow{
#'
#' data(ex_hapMatSmall_data)
#'
#' # First, select a window of SNVs about a focal SNV.
#' SNV_win <- selectWindow(hapMat = ex_hapMatSmall_data,
#' focalSNV = 10, minWindow = 1)
#'
#' # Then order SNVs in the window.
#' ordHapmat <- orderSNVs(snvWin = SNV_win)
#'
#' # Recursively partition haplotype matrix.
#' partitions <- makePartition(hapmat = ordHapmat, splitSNV = 1)
#'
#' dendStrng <- makeNewickRec(node = partitions, sep = "-")
#'
#' }
#'
makeNewickRec = function(node,sep) {
# leaf nodes have two NULL children, internal nodes have
# two non-NULL children.
if(!is.null(node$child1)) {
#internal -- get strings from children and parse as
# ( child1:len1 , child0:len2 )
# where len1 is length of branch between node and child1
# and len2 is length of branch between node and child2.
len1 <- node$depth - node$child1$depth
child1Str = makeNewickRec(node$child1,sep)
len2 <- node$depth - node$child0$depth
child0Str = makeNewickRec(node$child0,sep)
return(paste("(",child1Str,":",len1,",",child0Str,":",len2,")",sep=""))
}else{
# leaf -- just return my haplotype label
# If there are multiple labels in this leaf, separate with "sep"
return(paste(node$haps,collapse=sep))
}
}
|
/R/make_dend_utility_functions.R
|
no_license
|
cbhagya/perfectphyloR
|
R
| false | false | 7,576 |
r
|
#' Recursively partition haplotype matrix
#'
#' This function recursively partitions the SNVs in the window around the focal SNV.
#'
#'
#' This function makes two clades based on \code{splotSNV}. For each partition, update \code{splitSNV} and use
#' it to define subclades. Then, continue recursive partitioning until each partition has only one haplotype,
#' or there are no more SNVs to consider.
#'
#'
#' @param hapmat A hapMat object with SNVs ordered according to ancestry.
#' @param splitSNV The index of the SNV where the haplotype matrix from \code{\link{orderSNVs}} is partitioned.
#'
#' @keywords internal
#'
#' @return A nested partition of haplotypes, implemented as a list of nodes, each with two child nodes.
#'
#' @seealso \code{\link{makeDend}}, \code{\link{newNode}}, \code{\link{noVariation}}
#'
#' @examples
#'
#' \dontshow{
#'
#' data(ex_hapMatSmall_data)
#'
#' # First, select a window of SNVs about a focal SNV.
#' SNV_win <- selectWindow(hapMat = ex_hapMatSmall_data,
#' focalSNV = 10, minWindow = 1)
#'
#' # Then order SNVs in the window.
#' ordHapmat <- orderSNVs(snvWin = SNV_win)
#'
#' # Recursively partition haplotype matrix.
#' partitions <- makePartition(hapmat = ordHapmat, splitSNV = 1)
#'
#' }
#'
makePartition = function(hapmat, splitSNV) {
if(nrow(hapmat)==1 || splitSNV>ncol(hapmat)){
# Then we are done splitting, either because the clade is
# a single haplotype (nrow(hapmat==1) or because we've
# run out of SNVs (splitSNV>ncol(hapmat)). Return the
# haplotypes in hapmat as a leaf node.
return(newNode(hapmat))
}
# If not, find the next SNV to split on. To split on a SNV, there
# must be variation. Keep searching for a SNV as long as there is
# no variation in the current SNV.
while(splitSNV <= ncol(hapmat) && noVariation(hapmat[, splitSNV])) {
splitSNV = splitSNV + 1
}
# We may have found a SNV to split on, or we may have hit
# the end of hapMat without finding a SNV to split on.
if(splitSNV > ncol(hapmat)){
# Couldn't find a SNV to split on; return hapmat as a leaf node.
return(newNode(hapmat))
}
# Otherwise, we've found a SNV to split on, so split into clades.
# The following call is to R's subset(), applied to a matrix,
# **not** subsetHapMat() applied to a hapMat object.
clade1 = subset(hapmat, hapmat[, splitSNV] == 1)
child1 = makePartition(clade1, splitSNV+1)
clade0 = subset(hapmat,hapmat[, splitSNV] == 0)
child0 = makePartition(clade0, splitSNV+1)
return(newNode(hapmat, child1, child0, depth = nrow(hapmat)-1))
}
#---------------------------------------------------------------#
#' Create a list of child nodes
#'
#' This function creates a pair of child nodes for a parent node.
#'
#' @param hapmat The hapMat object with columns ordered by ancestry.
#' @param child1 The child node from splitting on the mutant allele at the next SNV in the ordered
#' neighborhood.
#' @param child0 The child node from splitting on the non-mutant allele at the next SNV in the ordered
#' neighborhood.
#'
#' @keywords internal
#' @seealso \code{\link{makePartition}}
#'
#' @examples
#'
#' \dontshow{
#'
#' data(ex_hapMatSmall_data)
#'
#' # First, select a window of SNVs about a focal SNV.
#' SNV_win <- selectWindow(hapMat = ex_hapMatSmall_data,
#' focalSNV = 10, minWindow = 1)
#'
#' # Then order SNVs in the window.
#' ordHapmat <- orderSNVs(snvWin = SNV_win)
#'
#' # Create a list of child nodes.
#' chldNodes <- newNode(hapmat = ordHapmat)
#'
#' }
#'
newNode = function(hapmat, child1 = NULL, child0 = NULL, depth = 0) {
return(list(haps = rownames(hapmat), child1 = child1, child0 = child0, depth = depth))
}
#---------------------------------------------------------------#
#' Check the variation in a SNV
#'
#' This function checks the variation in a specified SNV and is applied during the recursive partitioning.
#'
#' @param snv A SNV to check for variation among haplotypes.
#'
#' @return Logical:TRUE, if there is no variation in the SNV; FALSE otherwise.
#' @keywords internal
#' @seealso \code{\link{makePartition}}
#'
#' @examples
#'
#' \dontshow{
#'
#' data(ex_hapMatSmall_data)
#'
#' # Check the variation in a SNV.
#' noVariation(ex_hapMatSmall_data$hapmat[,1])
#'
#' }
#'
noVariation = function(snv) {
if(sd(snv) == 0) return(TRUE) else return(FALSE)
}
#' Convert a list data structure to Newick format
#'
#' This function traverses the dendogram to build the character string that represents the dendrogram in
#' Newick format.
#'
#'
#' @param dend A list of nodes that represents the nested partition of haplotypes.
#' @param sep A separator for haplotype names in the same clade. See the arguments in
#' \code{\link{reconstructPP}}.
#'
#' @return A character string in Newick format.
#' @keywords internal
#' @seealso \code{\link{makeDend}}
#'
#' @examples
#'
#' \dontshow{
#'
#' data(ex_hapMatSmall_data)
#'
#' # First, select a window of SNVs about a focal SNV.
#' SNV_win <- selectWindow(hapMat = ex_hapMatSmall_data,
#' focalSNV = 10, minWindow = 1)
#'
#' # Then order SNVs in the window.
#' ordHapmat <- orderSNVs(snvWin = SNV_win)
#'
#' # Recursively partition haplotype matrix.
#' partitions <- makePartition(hapmat = ordHapmat, splitSNV = 1)
#'
#' # Dendrogram in Newick format.
#' newickDend <- dendToNewick(dend = partitions, sep = "-")
#'
#' }
#'
dendToNewick = function(dend, sep = "-"){
# Arguments:
# dend is the output of makeTreeRec
# sep is a character string to separate haplotype names for
# tips comprised of multiple haplotypes (e.g, if a
# tip contained haplotypes C and D, the tip would
# appear as C-D in the Newick string).
dendStr = makeNewickRec(dend, sep)
# Now just append a ";" to mark the end of the dend
return(paste(dendStr, ";", sep=""))}
#' Build the character string of nodes and haplotypes in Newick format
#'
#'
#' @param node Tree(dend) node
#' @param sep A separator for haplotype names in the same clade. See the arguments in
#' \code{\link{reconstructPP}}.
#'
#' @keywords internal
#'
#' @examples
#'
#' \dontshow{
#'
#' data(ex_hapMatSmall_data)
#'
#' # First, select a window of SNVs about a focal SNV.
#' SNV_win <- selectWindow(hapMat = ex_hapMatSmall_data,
#' focalSNV = 10, minWindow = 1)
#'
#' # Then order SNVs in the window.
#' ordHapmat <- orderSNVs(snvWin = SNV_win)
#'
#' # Recursively partition haplotype matrix.
#' partitions <- makePartition(hapmat = ordHapmat, splitSNV = 1)
#'
#' dendStrng <- makeNewickRec(node = partitions, sep = "-")
#'
#' }
#'
makeNewickRec = function(node,sep) {
# leaf nodes have two NULL children, internal nodes have
# two non-NULL children.
if(!is.null(node$child1)) {
#internal -- get strings from children and parse as
# ( child1:len1 , child0:len2 )
# where len1 is length of branch between node and child1
# and len2 is length of branch between node and child2.
len1 <- node$depth - node$child1$depth
child1Str = makeNewickRec(node$child1,sep)
len2 <- node$depth - node$child0$depth
child0Str = makeNewickRec(node$child0,sep)
return(paste("(",child1Str,":",len1,",",child0Str,":",len2,")",sep=""))
}else{
# leaf -- just return my haplotype label
# If there are multiple labels in this leaf, separate with "sep"
return(paste(node$haps,collapse=sep))
}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sir.R
\name{run_model}
\alias{run_model}
\title{run stochastic SIR model}
\usage{
run_model(
tt,
nsim = 100,
beta = 0.2,
sigma = 0.1,
S_ini = 1000,
I_ini = 1,
plot = FALSE
)
}
\arguments{
\item{tt}{vector of times}
\item{nsim}{number of realisations to include}
\item{beta}{rate of infection}
\item{sigma}{rate of recovery}
\item{S_ini}{initial number susceptible}
\item{I_ini}{initial number infected}
\item{plot}{boolean, optionally plot results}
}
\description{
runs the stochastic SIR model for given times and parameters
}
|
/man/run_model.Rd
|
no_license
|
hillalex/sir
|
R
| false | true | 626 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sir.R
\name{run_model}
\alias{run_model}
\title{run stochastic SIR model}
\usage{
run_model(
tt,
nsim = 100,
beta = 0.2,
sigma = 0.1,
S_ini = 1000,
I_ini = 1,
plot = FALSE
)
}
\arguments{
\item{tt}{vector of times}
\item{nsim}{number of realisations to include}
\item{beta}{rate of infection}
\item{sigma}{rate of recovery}
\item{S_ini}{initial number susceptible}
\item{I_ini}{initial number infected}
\item{plot}{boolean, optionally plot results}
}
\description{
runs the stochastic SIR model for given times and parameters
}
|
#' Regularized Generalized Canonical Correlation Analysis (RGCCA) is a
#' generalization of regularized canonical correlation analysis to three or more
#' sets of variables.
#' @details
#' Given \eqn{J} matrices
#' \eqn{\mathbf{X_1}, \mathbf{X_2}, ..., \mathbf{X_J}}{X1, X2, ..., XJ} that
#' represent \eqn{J} sets of variables observed on the
#' same set of \eqn{n} individuals. The matrices
#' \eqn{\mathbf{X_1}, \mathbf{X_2}, ..., \mathbf{X_J}}{X1, X2, ..., XJ}
#' must have the same number of rows, but may (and usually will) have different
#' numbers of columns. The aim of RGCCA is to study the relationships between
#' these \eqn{J} blocks of variables. It constitutes a general framework for
#' many multi-block data analysis methods. It combines the power of multi-block
#' data analysis methods (maximization of well identified criteria) and the
#' flexibility of PLS path modeling (the researcher decides which blocks are
#' connected and which are not). Hence, the use of RGCCA requires the
#' construction (user specified) of a design matrix,
#' (\eqn{\mathbf{connection}}{connection}), that characterize the connections
#' between blocks. Elements of the (symmetric) design matrix
#' \eqn{\mathbf{connection} = (c_{jk})}{connection = (c_jk)} is positive;
#' but usually equal to 1
#' if block \eqn{j} and block \eqn{k} are connected, and 0 otherwise. The
#' objective is to find a stationnary point related to the RGCCA optimization
#' problem. The function rgccad() implements a globally convergent algorithm
#' (i.e. monotone convergence that hits at convergence a stationary point).
#' Moreover, depending on the dimensionality of each block
#' \eqn{\mathbf{X}_j}{Xj},
#' \eqn{j = 1, \ldots, J}{j = 1, ..., J}, the primal (when \eqn{n > p_j})
#' algorithm or
#' the dual (when \eqn{n < p_j}) algorithm is used (see Tenenhaus et al. 2015).
#' Moreover, by deflation strategy, rgccad() allows to compute several RGCCA
#' block components (specified by ncomp) for each block. Using deflation, within
#' each block, block components are guaranteed to be orthogonal. The so-called
#' symmetric deflation is considered in this implementation, i.e. each block is
#' deflated with respect to its own component. It should be noted that the
#' numbers of components per block can differ from one block to another.
#' The rgcca() function can handle missing values using a NIPALS type algorithm
#' (non-linear iterative partial least squares algorithm) as described in
#' (Tenenhaus et al, 2005).
#' @inheritParams rgcca
#' @param na.rm If TRUE, runs rgcca only on available data.
#' @param disjunction If TRUE, the response block is a one-hot encoded
#' qualitative block.
#' @return \item{Y}{A list of \eqn{J} elements. Each element of the list is a
#' matrix that contains the RGCCA block components for the corresponding block.}
#' @return \item{a}{A list of \eqn{J} elements. Each element of the list \eqn{a}
#' is a matrix of block weight vectors for the corresponding block.}
#' @return \item{astar}{A list of \eqn{J} elements. Each column of astar[[j]] is
#' a vector such that Y[[j]][, h] = blocks[[j]] \%*\% astar[[j]][, h].}
#' @return \item{tau}{Either a \eqn{1 \times J}{1 x J} vector or a
#' \eqn{\mathrm{max}(ncomp) \times J}{max(ncomp) x J} matrix containing the
#' values of the regularization parameters. tau varies from 0
#' (maximizing the correlation) to 1 (maximizing the covariance).
#' If tau = "optimal" the regularization parameters are estimated for each
#' block and each dimension using the Schafer and Strimmer (2005) analytical
#' formula. If tau is a \eqn{1 \times J}{1 x J} vector, tau[j] is identical
#' across the dimensions of block \eqn{\mathbf{X}_j}{Xj}. If tau is a matrix,
#' tau[k, j] is associated with \eqn{\mathbf{X}_{jk}}{Xjk} (\eqn{k}th residual
#' matrix for block \eqn{j}). tau can be also estimated using
#' \link{rgcca_permutation}.}
#' @return \item{crit}{A list of vector of length max(ncomp). Each vector of
#' the list is related to one specific deflation stage and reports the values
#' of the criterion for this stage across iterations.}
#' @return \item{primal_dual}{A \eqn{1 \times J}{1 x J} vector that contains the
#' formulation ("primal" or "dual") applied to each of the \eqn{J} blocks within
#' the RGCCA alogrithm.}
#' @references Tenenhaus M., Tenenhaus A. and Groenen P. J. (2017). Regularized
#' generalized canonical correlation analysis: a framework for sequential
#' multiblock component methods. Psychometrika, 82(3), 737-777.
#' @references Tenenhaus A., Philippe C. and Frouin, V. (2015). Kernel
#' generalized canonical correlation analysis. Computational Statistics and
#' Data Analysis, 90, 114-131.
#' @references Tenenhaus A. and Tenenhaus M., (2011). Regularized Generalized
#' Canonical Correlation Analysis, Psychometrika, Vol. 76, Nr 2, pp 257-284.
#' @references Schafer J. and Strimmer K. (2005). A shrinkage approach to
#' large-scale covariance matrix estimation and implications for functional
#' genomics. Statist. Appl. Genet. Mol. Biol. 4:32.
#' @title Regularized Generalized Canonical Correlation Analysis (RGCCA)
#' @examples
#' #############
#' # Example 1 #
#' #############
#' data(Russett)
#' X_agric <- as.matrix(Russett[, c("gini", "farm", "rent")])
#' X_ind <- as.matrix(Russett[, c("gnpr", "labo")])
#' X_polit <- as.matrix(Russett[, c("demostab", "dictator")])
#' blocks <- list(X_agric, X_ind, X_polit)
#' # Define the design matrix (output = connection)
#' connection <- matrix(c(0, 0, 1, 0, 0, 1, 1, 1, 0), 3, 3)
#' fit.rgcca <- rgccad(blocks, connection,
#' tau = c(1, 1, 1),
#' scheme = "factorial"
#' )
#' lab <- as.vector(apply(Russett[, 9:11], 1, which.max))
#' plot(fit.rgcca$Y[[1]], fit.rgcca$Y[[2]],
#' col = "white",
#' xlab = "Y1 (Agric. inequality)", ylab = "Y2 (Industrial Development)"
#' )
#'
#' ############################################
#' # Example 2: RGCCA and mutliple components #
#' ############################################
#' ############################
#' # plot(y1, y2) for (RGCCA) #
#' ############################
#' fit.rgcca <- rgccad(blocks, connection,
#' tau = rep(1, 3), ncomp = c(2, 2, 1),
#' scheme = "factorial", verbose = TRUE
#' )
#' layout(t(1:2))
#' plot(fit.rgcca$Y[[1]][, 1], fit.rgcca$Y[[2]][, 1],
#' col = "white",
#' xlab = "Y1 (Agric. inequality)", ylab = "Y2 (Industrial Development)",
#' main = "Factorial plan of RGCCA"
#' )
#' plot(fit.rgcca$Y[[1]][, 1], fit.rgcca$Y[[1]][, 2],
#' col = "white",
#' xlab = "Y1 (Agric. inequality)", ylab = "Y2 (Agric. inequality)",
#' main = "Factorial plan of RGCCA"
#' )
#'
#' ######################################
#' # example 3: RGCCA and leave one out #
#' ######################################
#' Ytest <- matrix(0, 47, 3)
#' fit.rgcca <- rgccad(blocks, connection,
#' tau = rep(1, 3), ncomp = rep(1, 3),
#' scheme = "factorial", verbose = TRUE
#' )
#' for (i in 1:nrow(Russett)) {
#' B <- lapply(blocks, function(x) x[-i, ])
#' B <- lapply(B, scale)
#'
#' resB <- rgccad(B, connection,
#' tau = rep(1, 3), scheme = "factorial",
#' verbose = FALSE
#' )
#' # look for potential conflicting sign among components within
#' # the loo loop.
#' for (k in 1:length(B)) {
#' if (cor(fit.rgcca$a[[k]], resB$a[[k]]) >= 0) {
#' resB$a[[k]] <- resB$a[[k]]
#' } else {
#' resB$a[[k]] <- -resB$a[[k]]
#' }
#' }
#' Btest <- lapply(blocks, function(x) x[i, ])
#' Btest[[1]] <- (Btest[[1]] - attr(B[[1]], "scaled:center")) /
#' (attr(B[[1]], "scaled:scale"))
#' Btest[[2]] <- (Btest[[2]] - attr(B[[2]], "scaled:center")) /
#' (attr(B[[2]], "scaled:scale"))
#' Btest[[3]] <- (Btest[[3]] - attr(B[[3]], "scaled:center")) /
#' (attr(B[[3]], "scaled:scale"))
#' Ytest[i, 1] <- Btest[[1]] %*% resB$a[[1]]
#' Ytest[i, 2] <- Btest[[2]] %*% resB$a[[2]]
#' Ytest[i, 3] <- Btest[[3]] %*% resB$a[[3]]
#' }
#' lab <- apply(Russett[, 9:11], 1, which.max)
#' plot(fit.rgcca$Y[[1]], fit.rgcca$Y[[2]],
#' col = "white",
#' xlab = "Y1 (Agric. inequality)", ylab = "Y2 (Ind. Development)"
#' )
#' @noRd
rgccad <- function(blocks, connection = 1 - diag(length(blocks)),
tau = rep(1, length(blocks)),
ncomp = rep(1, length(blocks)), scheme = "centroid",
init = "svd", bias = TRUE, tol = 1e-08, verbose = TRUE,
na.rm = TRUE, superblock = FALSE,
response = NULL, disjunction = NULL,
n_iter_max = 1000, comp_orth = TRUE) {
if (verbose) {
scheme_str <- ifelse(is(scheme, "function"), "user-defined", scheme)
cat(
"Computation of the RGCCA block components based on the",
scheme_str, "scheme \n"
)
tau_str <- ifelse(
is.numeric(tau),
"Shrinkage intensity parameters are chosen manually \n",
"Optimal shrinkage intensity parameters are estimated \n"
)
cat(tau_str)
}
##### Initialization #####
# ndefl number of deflation per block
ndefl <- ncomp - 1
N <- max(ndefl)
J <- length(blocks)
pjs <- vapply(blocks, NCOL, FUN.VALUE = 1L)
nb_ind <- NROW(blocks[[1]])
crit <- list()
R <- blocks
a <- lapply(seq(J), function(b) c())
Y <- lapply(seq(J), function(b) c())
if (superblock && comp_orth) {
P <- c()
} else {
P <- lapply(seq(J), function(b) c())
}
# Whether primal or dual
primal_dual <- rep("primal", J)
primal_dual[which(nb_ind < pjs)] <- "dual"
# Save computed shrinkage parameter in a new variable
computed_tau <- tau
if (is.vector(tau)) {
computed_tau <- matrix(
rep(tau, N + 1),
nrow = N + 1, J, byrow = TRUE
)
}
##### Computation of RGCCA components #####
for (n in seq(N + 1)) {
if (verbose) {
cat(paste0(
"Computation of the RGCCA block components #", n,
" is under progress...\n"
))
}
gcca_result <- rgccak(R, connection,
tau = computed_tau[n, ], scheme = scheme,
init = init, bias = bias, tol = tol,
verbose = verbose, na.rm = na.rm, n_iter_max = n_iter_max
)
# Store tau, crit
computed_tau[n, ] <- gcca_result$tau
crit[[n]] <- gcca_result$crit
# Store Y, a, factors and weights
a <- lapply(seq(J), function(b) cbind(a[[b]], gcca_result$a[[b]]))
Y <- lapply(seq(J), function(b) cbind(Y[[b]], gcca_result$Y[, b]))
# Deflation procedure
if (n == N + 1) break
defl_result <- deflate(gcca_result$a, gcca_result$Y, R, P, ndefl, n,
superblock, comp_orth, response, na.rm)
R <- defl_result$R
P <- defl_result$P
}
# If there is a superblock and weight vectors are orthogonal, it is possible
# to have non meaningful weights associated to blocks that have been set to
# zero by the deflation
if (superblock && !comp_orth) {
a <- lapply(a, function(x) {
if (ncol(x) > nrow(x)) {
x[, seq(nrow(x) + 1, ncol(x))] <- 0
}
return(x)
})
}
##### Generation of the output #####
if (N == 0) {
crit <- unlist(crit)
computed_tau <- as.numeric(computed_tau)
} else {
computed_tau <- apply(computed_tau, 2, as.numeric)
}
astar <- compute_astar(a, P, superblock, comp_orth, N)
out <- list(
Y = Y,
a = a,
astar = astar,
tau = computed_tau,
crit = crit, primal_dual = primal_dual
)
class(out) <- "rgccad"
return(out)
}
|
/R/rgccad.R
|
no_license
|
cran/RGCCA
|
R
| false | false | 11,649 |
r
|
#' Regularized Generalized Canonical Correlation Analysis (RGCCA) is a
#' generalization of regularized canonical correlation analysis to three or more
#' sets of variables.
#' @details
#' Given \eqn{J} matrices
#' \eqn{\mathbf{X_1}, \mathbf{X_2}, ..., \mathbf{X_J}}{X1, X2, ..., XJ} that
#' represent \eqn{J} sets of variables observed on the
#' same set of \eqn{n} individuals. The matrices
#' \eqn{\mathbf{X_1}, \mathbf{X_2}, ..., \mathbf{X_J}}{X1, X2, ..., XJ}
#' must have the same number of rows, but may (and usually will) have different
#' numbers of columns. The aim of RGCCA is to study the relationships between
#' these \eqn{J} blocks of variables. It constitutes a general framework for
#' many multi-block data analysis methods. It combines the power of multi-block
#' data analysis methods (maximization of well identified criteria) and the
#' flexibility of PLS path modeling (the researcher decides which blocks are
#' connected and which are not). Hence, the use of RGCCA requires the
#' construction (user specified) of a design matrix,
#' (\eqn{\mathbf{connection}}{connection}), that characterize the connections
#' between blocks. Elements of the (symmetric) design matrix
#' \eqn{\mathbf{connection} = (c_{jk})}{connection = (c_jk)} is positive;
#' but usually equal to 1
#' if block \eqn{j} and block \eqn{k} are connected, and 0 otherwise. The
#' objective is to find a stationnary point related to the RGCCA optimization
#' problem. The function rgccad() implements a globally convergent algorithm
#' (i.e. monotone convergence that hits at convergence a stationary point).
#' Moreover, depending on the dimensionality of each block
#' \eqn{\mathbf{X}_j}{Xj},
#' \eqn{j = 1, \ldots, J}{j = 1, ..., J}, the primal (when \eqn{n > p_j})
#' algorithm or
#' the dual (when \eqn{n < p_j}) algorithm is used (see Tenenhaus et al. 2015).
#' Moreover, by deflation strategy, rgccad() allows to compute several RGCCA
#' block components (specified by ncomp) for each block. Using deflation, within
#' each block, block components are guaranteed to be orthogonal. The so-called
#' symmetric deflation is considered in this implementation, i.e. each block is
#' deflated with respect to its own component. It should be noted that the
#' numbers of components per block can differ from one block to another.
#' The rgcca() function can handle missing values using a NIPALS type algorithm
#' (non-linear iterative partial least squares algorithm) as described in
#' (Tenenhaus et al, 2005).
#' @inheritParams rgcca
#' @param na.rm If TRUE, runs rgcca only on available data.
#' @param disjunction If TRUE, the response block is a one-hot encoded
#' qualitative block.
#' @return \item{Y}{A list of \eqn{J} elements. Each element of the list is a
#' matrix that contains the RGCCA block components for the corresponding block.}
#' @return \item{a}{A list of \eqn{J} elements. Each element of the list \eqn{a}
#' is a matrix of block weight vectors for the corresponding block.}
#' @return \item{astar}{A list of \eqn{J} elements. Each column of astar[[j]] is
#' a vector such that Y[[j]][, h] = blocks[[j]] \%*\% astar[[j]][, h].}
#' @return \item{tau}{Either a \eqn{1 \times J}{1 x J} vector or a
#' \eqn{\mathrm{max}(ncomp) \times J}{max(ncomp) x J} matrix containing the
#' values of the regularization parameters. tau varies from 0
#' (maximizing the correlation) to 1 (maximizing the covariance).
#' If tau = "optimal" the regularization parameters are estimated for each
#' block and each dimension using the Schafer and Strimmer (2005) analytical
#' formula. If tau is a \eqn{1 \times J}{1 x J} vector, tau[j] is identical
#' across the dimensions of block \eqn{\mathbf{X}_j}{Xj}. If tau is a matrix,
#' tau[k, j] is associated with \eqn{\mathbf{X}_{jk}}{Xjk} (\eqn{k}th residual
#' matrix for block \eqn{j}). tau can be also estimated using
#' \link{rgcca_permutation}.}
#' @return \item{crit}{A list of vector of length max(ncomp). Each vector of
#' the list is related to one specific deflation stage and reports the values
#' of the criterion for this stage across iterations.}
#' @return \item{primal_dual}{A \eqn{1 \times J}{1 x J} vector that contains the
#' formulation ("primal" or "dual") applied to each of the \eqn{J} blocks within
#' the RGCCA alogrithm.}
#' @references Tenenhaus M., Tenenhaus A. and Groenen P. J. (2017). Regularized
#' generalized canonical correlation analysis: a framework for sequential
#' multiblock component methods. Psychometrika, 82(3), 737-777.
#' @references Tenenhaus A., Philippe C. and Frouin, V. (2015). Kernel
#' generalized canonical correlation analysis. Computational Statistics and
#' Data Analysis, 90, 114-131.
#' @references Tenenhaus A. and Tenenhaus M., (2011). Regularized Generalized
#' Canonical Correlation Analysis, Psychometrika, Vol. 76, Nr 2, pp 257-284.
#' @references Schafer J. and Strimmer K. (2005). A shrinkage approach to
#' large-scale covariance matrix estimation and implications for functional
#' genomics. Statist. Appl. Genet. Mol. Biol. 4:32.
#' @title Regularized Generalized Canonical Correlation Analysis (RGCCA)
#' @examples
#' #############
#' # Example 1 #
#' #############
#' data(Russett)
#' X_agric <- as.matrix(Russett[, c("gini", "farm", "rent")])
#' X_ind <- as.matrix(Russett[, c("gnpr", "labo")])
#' X_polit <- as.matrix(Russett[, c("demostab", "dictator")])
#' blocks <- list(X_agric, X_ind, X_polit)
#' # Define the design matrix (output = connection)
#' connection <- matrix(c(0, 0, 1, 0, 0, 1, 1, 1, 0), 3, 3)
#' fit.rgcca <- rgccad(blocks, connection,
#' tau = c(1, 1, 1),
#' scheme = "factorial"
#' )
#' lab <- as.vector(apply(Russett[, 9:11], 1, which.max))
#' plot(fit.rgcca$Y[[1]], fit.rgcca$Y[[2]],
#' col = "white",
#' xlab = "Y1 (Agric. inequality)", ylab = "Y2 (Industrial Development)"
#' )
#'
#' ############################################
#' # Example 2: RGCCA and mutliple components #
#' ############################################
#' ############################
#' # plot(y1, y2) for (RGCCA) #
#' ############################
#' fit.rgcca <- rgccad(blocks, connection,
#' tau = rep(1, 3), ncomp = c(2, 2, 1),
#' scheme = "factorial", verbose = TRUE
#' )
#' layout(t(1:2))
#' plot(fit.rgcca$Y[[1]][, 1], fit.rgcca$Y[[2]][, 1],
#' col = "white",
#' xlab = "Y1 (Agric. inequality)", ylab = "Y2 (Industrial Development)",
#' main = "Factorial plan of RGCCA"
#' )
#' plot(fit.rgcca$Y[[1]][, 1], fit.rgcca$Y[[1]][, 2],
#' col = "white",
#' xlab = "Y1 (Agric. inequality)", ylab = "Y2 (Agric. inequality)",
#' main = "Factorial plan of RGCCA"
#' )
#'
#' ######################################
#' # example 3: RGCCA and leave one out #
#' ######################################
#' Ytest <- matrix(0, 47, 3)
#' fit.rgcca <- rgccad(blocks, connection,
#' tau = rep(1, 3), ncomp = rep(1, 3),
#' scheme = "factorial", verbose = TRUE
#' )
#' for (i in 1:nrow(Russett)) {
#' B <- lapply(blocks, function(x) x[-i, ])
#' B <- lapply(B, scale)
#'
#' resB <- rgccad(B, connection,
#' tau = rep(1, 3), scheme = "factorial",
#' verbose = FALSE
#' )
#' # look for potential conflicting sign among components within
#' # the loo loop.
#' for (k in 1:length(B)) {
#' if (cor(fit.rgcca$a[[k]], resB$a[[k]]) >= 0) {
#' resB$a[[k]] <- resB$a[[k]]
#' } else {
#' resB$a[[k]] <- -resB$a[[k]]
#' }
#' }
#' Btest <- lapply(blocks, function(x) x[i, ])
#' Btest[[1]] <- (Btest[[1]] - attr(B[[1]], "scaled:center")) /
#' (attr(B[[1]], "scaled:scale"))
#' Btest[[2]] <- (Btest[[2]] - attr(B[[2]], "scaled:center")) /
#' (attr(B[[2]], "scaled:scale"))
#' Btest[[3]] <- (Btest[[3]] - attr(B[[3]], "scaled:center")) /
#' (attr(B[[3]], "scaled:scale"))
#' Ytest[i, 1] <- Btest[[1]] %*% resB$a[[1]]
#' Ytest[i, 2] <- Btest[[2]] %*% resB$a[[2]]
#' Ytest[i, 3] <- Btest[[3]] %*% resB$a[[3]]
#' }
#' lab <- apply(Russett[, 9:11], 1, which.max)
#' plot(fit.rgcca$Y[[1]], fit.rgcca$Y[[2]],
#' col = "white",
#' xlab = "Y1 (Agric. inequality)", ylab = "Y2 (Ind. Development)"
#' )
#' @noRd
rgccad <- function(blocks, connection = 1 - diag(length(blocks)),
tau = rep(1, length(blocks)),
ncomp = rep(1, length(blocks)), scheme = "centroid",
init = "svd", bias = TRUE, tol = 1e-08, verbose = TRUE,
na.rm = TRUE, superblock = FALSE,
response = NULL, disjunction = NULL,
n_iter_max = 1000, comp_orth = TRUE) {
if (verbose) {
scheme_str <- ifelse(is(scheme, "function"), "user-defined", scheme)
cat(
"Computation of the RGCCA block components based on the",
scheme_str, "scheme \n"
)
tau_str <- ifelse(
is.numeric(tau),
"Shrinkage intensity parameters are chosen manually \n",
"Optimal shrinkage intensity parameters are estimated \n"
)
cat(tau_str)
}
##### Initialization #####
# ndefl number of deflation per block
ndefl <- ncomp - 1
N <- max(ndefl)
J <- length(blocks)
pjs <- vapply(blocks, NCOL, FUN.VALUE = 1L)
nb_ind <- NROW(blocks[[1]])
crit <- list()
R <- blocks
a <- lapply(seq(J), function(b) c())
Y <- lapply(seq(J), function(b) c())
if (superblock && comp_orth) {
P <- c()
} else {
P <- lapply(seq(J), function(b) c())
}
# Whether primal or dual
primal_dual <- rep("primal", J)
primal_dual[which(nb_ind < pjs)] <- "dual"
# Save computed shrinkage parameter in a new variable
computed_tau <- tau
if (is.vector(tau)) {
computed_tau <- matrix(
rep(tau, N + 1),
nrow = N + 1, J, byrow = TRUE
)
}
##### Computation of RGCCA components #####
for (n in seq(N + 1)) {
if (verbose) {
cat(paste0(
"Computation of the RGCCA block components #", n,
" is under progress...\n"
))
}
gcca_result <- rgccak(R, connection,
tau = computed_tau[n, ], scheme = scheme,
init = init, bias = bias, tol = tol,
verbose = verbose, na.rm = na.rm, n_iter_max = n_iter_max
)
# Store tau, crit
computed_tau[n, ] <- gcca_result$tau
crit[[n]] <- gcca_result$crit
# Store Y, a, factors and weights
a <- lapply(seq(J), function(b) cbind(a[[b]], gcca_result$a[[b]]))
Y <- lapply(seq(J), function(b) cbind(Y[[b]], gcca_result$Y[, b]))
# Deflation procedure
if (n == N + 1) break
defl_result <- deflate(gcca_result$a, gcca_result$Y, R, P, ndefl, n,
superblock, comp_orth, response, na.rm)
R <- defl_result$R
P <- defl_result$P
}
# If there is a superblock and weight vectors are orthogonal, it is possible
# to have non meaningful weights associated to blocks that have been set to
# zero by the deflation
if (superblock && !comp_orth) {
a <- lapply(a, function(x) {
if (ncol(x) > nrow(x)) {
x[, seq(nrow(x) + 1, ncol(x))] <- 0
}
return(x)
})
}
##### Generation of the output #####
if (N == 0) {
crit <- unlist(crit)
computed_tau <- as.numeric(computed_tau)
} else {
computed_tau <- apply(computed_tau, 2, as.numeric)
}
astar <- compute_astar(a, P, superblock, comp_orth, N)
out <- list(
Y = Y,
a = a,
astar = astar,
tau = computed_tau,
crit = crit, primal_dual = primal_dual
)
class(out) <- "rgccad"
return(out)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fin_to_ddh_keys.R
\name{fin_to_ddh_keys}
\alias{fin_to_ddh_keys}
\title{fin_to_ddh_keys
Extract specific metadata from the Finance API JSON response}
\usage{
fin_to_ddh_keys(metadata_in, metadata_out = fin2ddh::fin_placeholder,
lookup = fin2ddh::lookup)
}
\arguments{
\item{metadata_in}{list: The output of get_fin_datasets_metadata()}
\item{metadata_out}{list: Package object: fin_placeholder}
\item{lookup}{data.frame: Package object: fin2ddh::lookup}
}
\value{
list
}
\description{
fin_to_ddh_keys
Extract specific metadata from the Finance API JSON response
}
|
/man/fin_to_ddh_keys.Rd
|
no_license
|
PinkDiamond1/fin2ddh
|
R
| false | true | 646 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fin_to_ddh_keys.R
\name{fin_to_ddh_keys}
\alias{fin_to_ddh_keys}
\title{fin_to_ddh_keys
Extract specific metadata from the Finance API JSON response}
\usage{
fin_to_ddh_keys(metadata_in, metadata_out = fin2ddh::fin_placeholder,
lookup = fin2ddh::lookup)
}
\arguments{
\item{metadata_in}{list: The output of get_fin_datasets_metadata()}
\item{metadata_out}{list: Package object: fin_placeholder}
\item{lookup}{data.frame: Package object: fin2ddh::lookup}
}
\value{
list
}
\description{
fin_to_ddh_keys
Extract specific metadata from the Finance API JSON response
}
|
#install.packages("quantmod")
library("quantmod")
getSymbols(c("2330.tw","2317.tw"))
STK1 = get("2330.TW")
STK2 = get("2317.TW")
layout(matrix(c(1,1,2,2),nrow=2,ncol=2, byrow=TRUE))
#matrix
"
figure1 | figure1
figure2 | figure2
"
#another way is
layout(matrix(c(1,2),byrow=TRUE))
chartSeries(STK1,TA=NULL,layout = NULL) #TA is vol chart , vol is also a chart
chartSeries(STK2,TA=NULL,layout = NULL) #TA is vol chart , vol is also a chart
layout(matrix(c(1,2,3,4),nrow=2,ncol=2, byrow=FALSE))
#matrix
"
figure1 | figure3
figure2 | figure4
"
chartSeries(STK1,layout = NULL)
chartSeries(STK2,layout = NULL)
|
/twochart.R
|
no_license
|
jerrychu888/RTrading
|
R
| false | false | 628 |
r
|
#install.packages("quantmod")
library("quantmod")
getSymbols(c("2330.tw","2317.tw"))
STK1 = get("2330.TW")
STK2 = get("2317.TW")
layout(matrix(c(1,1,2,2),nrow=2,ncol=2, byrow=TRUE))
#matrix
"
figure1 | figure1
figure2 | figure2
"
#another way is
layout(matrix(c(1,2),byrow=TRUE))
chartSeries(STK1,TA=NULL,layout = NULL) #TA is vol chart , vol is also a chart
chartSeries(STK2,TA=NULL,layout = NULL) #TA is vol chart , vol is also a chart
layout(matrix(c(1,2,3,4),nrow=2,ncol=2, byrow=FALSE))
#matrix
"
figure1 | figure3
figure2 | figure4
"
chartSeries(STK1,layout = NULL)
chartSeries(STK2,layout = NULL)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tempResponse.R
\name{tempResponse}
\alias{tempResponse}
\title{Calculation of climatic metrics from hourly temperature records}
\usage{
tempResponse(hourtemps, Start_JDay = 1, End_JDay = 366,
models = list(Chilling_Hours = Chilling_Hours, Utah_Chill_Units =
Utah_Model, Chill_Portions = Dynamic_Model, GDH = GDH),
misstolerance = 50, whole_record = FALSE, mean_out = FALSE)
}
\arguments{
\item{hourtemps}{a list of two elements, with element 'hourtemps' being a
dataframe of hourly temperatures (e.g. produced by stack_hourly_temps). This
data frame must have a column for Year, a column for JDay (Julian date, or
day of the year), a column for Hour and a column for Temp (hourly
temperature). The second (optional) element is QC, which is a data.frame
indicating completeness of the dataset. This is automatically produced by
stack_hourly_temps.}
\item{Start_JDay}{the start date (in Julian date, or day of the year) of the
period, for which chill and heat should be quantified.}
\item{End_JDay}{the end date (in Julian date, or day of the year) of the
period, for which chill and heat should be quantified.}
\item{models}{named list of models that should be applied to the hourly
temperature data. These should be functions that take as input a vector of
hourly temperatures. This defaults to the set of models provided by the
chilling function.}
\item{misstolerance}{maximum percentage of values for a given season that
can be missing without the record being removed from the output. Defaults to
50.}
\item{whole_record}{boolean parameter indicating whether the metrics should
be summed over the entire temperature record. If set to TRUE (default is
FALSE), then the function ignores the specified start and end dates and
simply returns the totals of each metric that accumulated over the entire
temperature record.}
\item{mean_out}{boolean parameter indicating whether the mean of the input
metric (e.g. temperature) should be returned in a column named "Input_mean".}
}
\value{
data frame showing totals for all specified models for the
respective periods for all seasons included in the temperature records.
Columns are Season, End_year (the year when the period ended) and Days (the
duration of the period), as well as one column per model, which receives the
same name as the function in the models list. If the weather input consisted
of a list with elements stack and QC, the output also contains columns from
QC that indicate the completeness of the weather record that the
calculations are based on.
}
\description{
Extension of the chilling function, which calculated four pre-defined
temperature-based metrics. This function has more flexibility, because it
allows specifying the models that should be calculated. These can be
selected from a small set of models provided with chillR, but they can also
be defined by the user. Precondition at the moment is that they require
hourly temperature only as inputs.
}
\details{
The function calculates the total of user-specified temperature-based
metrics over periods delineated by Start_JDay and End_JDay. Models for
calculating these metrics are provided in the models list, whose elements
are named functions that convert hourly temperature records into a
cumulative record of the climate metric of interest. The metric is then
added up cumulatively over the entire temperature record and then summarized
by season. Examples of functions that can be used are Chilling_Hours,
Utah_Model, Dynamic_Model and GDH. The custom_model function allows
customized simply weight-based models, which assign differential weights to
temperatures within certain intervals. See custom_model documentation for
details.
}
\examples{
weather<-fix_weather(KA_weather[which(KA_weather$Year>2006),])
hourtemps<-stack_hourly_temps(weather,latitude=50.4)
df=data.frame(
lower=c(-1000,1,2,3,4,5,6),
upper=c(1,2,3,4,5,6,1000),
weight=c(0,1,2,3,2,1,0))
custom<-function(x) step_model(x,df)
models<-list(Chilling_Hours=Chilling_Hours,Utah_Chill_Units=Utah_Model,Chill_Portions=
Dynamic_Model,GDH=GDH,custom=custom)
tempResponse(hourtemps,Start_JDay = 305,End_JDay = 60,models)
}
\references{
The chillR package:
Luedeling E, Kunz A and Blanke M, 2013. Identification of chilling and heat
requirements of cherry trees - a statistical approach. International Journal
of Biometeorology 57,679-689.
}
\author{
Eike Luedeling
}
\keyword{and}
\keyword{calculation}
\keyword{chill}
\keyword{heat}
|
/man/tempResponse.Rd
|
no_license
|
HawardKetoyoMsatsi/chillR
|
R
| false | true | 4,641 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tempResponse.R
\name{tempResponse}
\alias{tempResponse}
\title{Calculation of climatic metrics from hourly temperature records}
\usage{
tempResponse(hourtemps, Start_JDay = 1, End_JDay = 366,
models = list(Chilling_Hours = Chilling_Hours, Utah_Chill_Units =
Utah_Model, Chill_Portions = Dynamic_Model, GDH = GDH),
misstolerance = 50, whole_record = FALSE, mean_out = FALSE)
}
\arguments{
\item{hourtemps}{a list of two elements, with element 'hourtemps' being a
dataframe of hourly temperatures (e.g. produced by stack_hourly_temps). This
data frame must have a column for Year, a column for JDay (Julian date, or
day of the year), a column for Hour and a column for Temp (hourly
temperature). The second (optional) element is QC, which is a data.frame
indicating completeness of the dataset. This is automatically produced by
stack_hourly_temps.}
\item{Start_JDay}{the start date (in Julian date, or day of the year) of the
period, for which chill and heat should be quantified.}
\item{End_JDay}{the end date (in Julian date, or day of the year) of the
period, for which chill and heat should be quantified.}
\item{models}{named list of models that should be applied to the hourly
temperature data. These should be functions that take as input a vector of
hourly temperatures. This defaults to the set of models provided by the
chilling function.}
\item{misstolerance}{maximum percentage of values for a given season that
can be missing without the record being removed from the output. Defaults to
50.}
\item{whole_record}{boolean parameter indicating whether the metrics should
be summed over the entire temperature record. If set to TRUE (default is
FALSE), then the function ignores the specified start and end dates and
simply returns the totals of each metric that accumulated over the entire
temperature record.}
\item{mean_out}{boolean parameter indicating whether the mean of the input
metric (e.g. temperature) should be returned in a column named "Input_mean".}
}
\value{
data frame showing totals for all specified models for the
respective periods for all seasons included in the temperature records.
Columns are Season, End_year (the year when the period ended) and Days (the
duration of the period), as well as one column per model, which receives the
same name as the function in the models list. If the weather input consisted
of a list with elements stack and QC, the output also contains columns from
QC that indicate the completeness of the weather record that the
calculations are based on.
}
\description{
Extension of the chilling function, which calculated four pre-defined
temperature-based metrics. This function has more flexibility, because it
allows specifying the models that should be calculated. These can be
selected from a small set of models provided with chillR, but they can also
be defined by the user. Precondition at the moment is that they require
hourly temperature only as inputs.
}
\details{
The function calculates the total of user-specified temperature-based
metrics over periods delineated by Start_JDay and End_JDay. Models for
calculating these metrics are provided in the models list, whose elements
are named functions that convert hourly temperature records into a
cumulative record of the climate metric of interest. The metric is then
added up cumulatively over the entire temperature record and then summarized
by season. Examples of functions that can be used are Chilling_Hours,
Utah_Model, Dynamic_Model and GDH. The custom_model function allows
customized simply weight-based models, which assign differential weights to
temperatures within certain intervals. See custom_model documentation for
details.
}
\examples{
weather<-fix_weather(KA_weather[which(KA_weather$Year>2006),])
hourtemps<-stack_hourly_temps(weather,latitude=50.4)
df=data.frame(
lower=c(-1000,1,2,3,4,5,6),
upper=c(1,2,3,4,5,6,1000),
weight=c(0,1,2,3,2,1,0))
custom<-function(x) step_model(x,df)
models<-list(Chilling_Hours=Chilling_Hours,Utah_Chill_Units=Utah_Model,Chill_Portions=
Dynamic_Model,GDH=GDH,custom=custom)
tempResponse(hourtemps,Start_JDay = 305,End_JDay = 60,models)
}
\references{
The chillR package:
Luedeling E, Kunz A and Blanke M, 2013. Identification of chilling and heat
requirements of cherry trees - a statistical approach. International Journal
of Biometeorology 57,679-689.
}
\author{
Eike Luedeling
}
\keyword{and}
\keyword{calculation}
\keyword{chill}
\keyword{heat}
|
library("isa2")
library("gplots")
library("RColorBrewer")
#### example 1 ##########################################
set.seed(1)
lamb <- isa.in.silico(num.rows = 100, num.cols = 100); lamb
normed.lamb <- isa.normalize(lamb[[1]]); normed.lamb ## gives the 2 matrices Ec & Er
row.seeds <- generate.seeds(length=nrow(lamb[[1]]), count=100)
isaresult <- isa.iterate(normed.lamb, thr.row=1, thr.col=1,
row.seeds=row.seeds); isaresult
isa.unique(normed.lamb, isaresult) ## merge similar modules
## draw heatmaps for both matrices
heatmap(as.matrix(normed.lamb$Er), col=brewer.pal(9, "Blues"))
heatmap(as.matrix(normed.lamb$Ec), col=brewer.pal(9, "Blues"))
#### example 2 (foireux) #################################
random.matrix <- isa.in.silico(num.rows=3, num.cols=3); random.matrix
normed.random <- isa.normalize(random.matrix[[1]]); normed.random ## gives the 2 matrices Ec & Er
row.seeds2 <- generate.seeds(length=nrow(random.matrix[[1]]), count=10)
isaresult2 <- isa.iterate(normed.random, thr.row=1, thr.col=1,
row.seeds=row.seeds2); isaresult2
isa.unique(normed.random, isaresult2) ## merge similar modules
#images(random.matrix, isaresult2) ## ne fct pas
|
/old_files/isa_testing.R
|
no_license
|
Afanc/Expression
|
R
| false | false | 1,217 |
r
|
library("isa2")
library("gplots")
library("RColorBrewer")
#### example 1 ##########################################
set.seed(1)
lamb <- isa.in.silico(num.rows = 100, num.cols = 100); lamb
normed.lamb <- isa.normalize(lamb[[1]]); normed.lamb ## gives the 2 matrices Ec & Er
row.seeds <- generate.seeds(length=nrow(lamb[[1]]), count=100)
isaresult <- isa.iterate(normed.lamb, thr.row=1, thr.col=1,
row.seeds=row.seeds); isaresult
isa.unique(normed.lamb, isaresult) ## merge similar modules
## draw heatmaps for both matrices
heatmap(as.matrix(normed.lamb$Er), col=brewer.pal(9, "Blues"))
heatmap(as.matrix(normed.lamb$Ec), col=brewer.pal(9, "Blues"))
#### example 2 (foireux) #################################
random.matrix <- isa.in.silico(num.rows=3, num.cols=3); random.matrix
normed.random <- isa.normalize(random.matrix[[1]]); normed.random ## gives the 2 matrices Ec & Er
row.seeds2 <- generate.seeds(length=nrow(random.matrix[[1]]), count=10)
isaresult2 <- isa.iterate(normed.random, thr.row=1, thr.col=1,
row.seeds=row.seeds2); isaresult2
isa.unique(normed.random, isaresult2) ## merge similar modules
#images(random.matrix, isaresult2) ## ne fct pas
|
context("MeasureSurvHarrellC")
test_that("missing surv",{
lung2 = load_dataset("lung", "survival")
lung2$time[1] = NA
t = TaskSurv$new("s",backend = lung2, time="time",event="status")
anyMissing(t$truth())
expect_true(is.na(cindex(t$truth(), 1)))
})
|
/tests/testthat/test_mlr_measures_MeasureSurvHarrellC.R
|
permissive
|
sands58/mlr3proba
|
R
| false | false | 263 |
r
|
context("MeasureSurvHarrellC")
test_that("missing surv",{
lung2 = load_dataset("lung", "survival")
lung2$time[1] = NA
t = TaskSurv$new("s",backend = lung2, time="time",event="status")
anyMissing(t$truth())
expect_true(is.na(cindex(t$truth(), 1)))
})
|
# R function script to read in data and clean up by removing missing
# or ineligible data
readingfiles <- function(){
#
fhfull = read.csv("Raw_data/finalfhspreadsheet_12thAug2016.csv", nrow = 258, stringsAsFactors=F)
total_study_full = fhfull
nparticipants = nrow(total_study_full)
assign("nparticipants",nparticipants,envir = .GlobalEnv)
colnames(total_study_full)
# check phenotype data match genotype data and then remove extra columns
total_study = total_study_full
#Date of births match?
if (total_study$Dob[nparticipants] == total_study$p.Dob[nparticipants]) {
message ("yey - DOBs match so we can remove the extra column from the phenotype data")
total_study$p.Dob <- NULL
} else {
message ("Boo - DOB in genotype and phenotype data do not match
so cannot remove this column until further investigation")
}
#convert to correct classes
total_study$Sequenom.Reported <- as.Date((total_study$Sequenom.Result), "%d/%m/%Y")
total_study$Dutch.score <- as.integer(total_study$Dutch.score)
total_study$Data.sent.to.NGS <-as.Date((total_study$Data.sent.to.NGS), "%d/%m/%Y")
total_study$MiSeq.reported.by.NGS <- as.Date((total_study$MiSeq.reported.by.NGS), "%d/%m/%Y")
#total_study$PHENOTYPE <- NULL
total_study$Dutch.score.1 <- as.integer(total_study$Dutch.score.1)
total_study$Family.No <-as.character(total_study$Family.No)
total_study$p.Family.No <-as.character(total_study$p.Family.No)
total_study$A_NoRels50.risk <- as.integer(total_study$A_NoRels50.risk)
total_study$A_NoRels25.risk <- as.integer(total_study$A_NoRels25.risk)
#Family number match?
if (total_study$Family.No[nparticipants] == total_study$p.Family.No[nparticipants]) {
message ("yey - Family numbers match so we can remove the extra column from the phenotype data")
total_study$p.Family.No <- NULL
} else {
message ("Boo - Family numbers in genotype and phenotype data do not match
so cannot remove this column until further investigation")
}
#do the Dutch scores match?
if(total_study$Dutch.score[nparticipants] == total_study$Dutch.score.1[nparticipants] ) {
message ("yey - Dutch scores match so we can remove the extra column from the phenotype data")
total_study$Dutch.score.1 <- NULL
} else {
message ("Boo - Dutch scores in genotype and phenotype data do not match
so cannot remove this column until further investigation")
}
#remove empty columns
total_study$a1 <- NULL
total_study$a1.1 <- NULL
total_study$a3 <- NULL
total_study$a4 <- NULL
total_study$a5 <- NULL
total_study$a5.1 <-NULL
total_study$Famhist_4 <-NULL
total_study$A_NoRels50.risk <- as.integer(total_study$A_NoRels50.risk)
total_study$A_NoRels25.risk <- as.integer(total_study$A_NoRels25.risk)
total_study$A_max <- as.integer(total_study$A_max)
total_study$A_max <- as.integer(total_study$B_max)
total_study$A_max <- as.integer(total_study$C_max)
total_study$A_max <- as.integer(total_study$D_max)
#
# remove erroneous "6" value in TenXan column
colno <- which(sapply(total_study$C_TendXan, function(x) any(x == "6")))
total_study$C_TendXan[colno] = "YES"
total_study$C_TendXan <- as.factor(total_study$C_TendXan)
#
# no dates of LDL have been added so remove this column
total_study$Date.of.LDL <- NULL
#
# Clean LDL intervals column
total_study$D_LDL_1 <- as.factor(total_study$D_LDL_1)
total_study$D_LDL_1[total_study$D_LDL_1 == 8] <- "YES"
total_study$D_LDL_1[total_study$D_LDL_1 == "UNKNOWN" |
total_study$D_LDL_1 == "UKNOWN" | total_study$D_LDL_1 == 0 ] <- "NO"
total_study$D_LDL_1 <- factor(total_study$D_LDL_1)
total_study$D_LDL_2 <- as.factor(total_study$D_LDL_2)
total_study$D_LDL_2[total_study$D_LDL_2 == 5] <- "YES"
total_study$D_LDL_2[total_study$D_LDL_2 == "UNKNOWN" |
total_study$D_LDL_2 == "UKNOWN" | total_study$D_LDL_2 == 0 ] <- "NO"
total_study$D_LDL_2 <- factor(total_study$D_LDL_2)
total_study$D_LDL_3 <- as.factor(total_study$D_LDL_3)
total_study$D_LDL_3[total_study$D_LDL_3 == 3] <- "YES"
total_study$D_LDL_3[total_study$D_LDL_3 == "UNKNOWN" |
total_study$D_LDL_3 == "UKNOWN" | total_study$D_LDL_3 == "UNKOWN"
| total_study$D_LDL_3 == "U" | total_study$D_LDL_3 == 0] <- "NO"
total_study$D_LDL_3 <- factor(total_study$D_LDL_3)
total_study$D_LDL_4 <- as.factor(total_study$D_LDL_4)
total_study$D_LDL_4[total_study$D_LDL_4 == "UNKNOWN" |
total_study$D_LDL_4 == "UKNOWN" | total_study$D_LDL_4 == "UNKOWN"
| total_study$D_LDL_4 == "U" | total_study$D_LDL_4 == 0] <- "NO"
total_study$D_LDL_4 <- factor(total_study$D_LDL_4)
total_study$LDL <- "Unknown"
total_study$LDL[total_study$D_LDL_1 == "YES"] <- "> 8.5"
total_study$LDL[total_study$D_LDL_2 == "YES"] <- "6.5 - 8.4"
total_study$LDL[total_study$D_LDL_3 == "YES"] <- "5.0 - 6.4"
total_study$LDL[total_study$D_LDL_4 == "YES"] <- "4.0 - 4.9"
total_study$LDL <- factor(total_study$LDL, levels= c("4.0 - 4.9", "5.0 - 6.4", "6.5 - 8.4", "> 8.5", "Unknown"))
# look to fasting lipid profile to help characterise LDL levels
# LDLC
if(total_study$LDL == "Unknown"){
total_study$LDL[total_study$LDLC == 0 ] <- "Unknown"
total_study$LDL[total_study$LDLC >= 4.0 & total_study$LDLC <= 4.9 ] <- "4.0 - 4.9"
total_study$LDL[total_study$LDLC >= 5.0 & total_study$LDLC <= 6.4] <- "5.0 - 6.4"
total_study$LDL[total_study$LDLC >= 6.5 & total_study$LDLC <= 8.4 ] <- "6.5 - 8.4"
total_study$LDL[total_study$LDLC >= 8.5] <- "> 8.5"
}
#create a new column with intervals for Dutch Score.
total_study$DLCN <- 0
#
total_study$DLCN <- "Unknown"
total_study$DLCN[total_study$DLCN == "NA" | total_study$DLCN == ""] <- "Unknown"
#total_study$DLCN[total_study$Dutch.score < 2] <- "< 2"
total_study$DLCN[total_study$Dutch.score >= 2 & total_study$Dutch.score <= 6] <- "2 - 6"
total_study$DLCN[total_study$Dutch.score >=7 & total_study$Dutch.score <= 9] <- "7 - 9"
total_study$DLCN[total_study$Dutch.score >= 10 & total_study$Dutch.score <= 12] <- "10 - 12"
total_study$DLCN[total_study$Dutch.score >= 13 & total_study$Dutch.score <= 16] <- "13 - 16"
#total_study$DLCN <-as.factor(total_study$DLCN) #, levels = c("", "< 2", "2 - 6", "7 - 9", "10 - 12", "13 - 16"))
total_study$DLCN <-factor(total_study$DLCN, levels = c( "2 - 6", "7 - 9", "10 - 12", "13 - 16", "Unknown"))
# #total_study$TOTAL..DUTCH.SCORE. <- as.integer(total_study$TOTAL..DUTCH.SCORE)
#
# #Ducth score check
# #A_max + B_max + C_max + D_max
total_study$HDLC <- as.numeric(total_study$HDLC)
total_study$TotalC <- as.numeric(total_study$TotalC)
total_study$Lipo <- as.numeric(total_study$Lipo)
total_study$Trigly <- as.numeric(total_study$Trigly)
total_study$LDLC <- as.numeric(total_study$LDLC)
#remove erronenous "APOB" mutation in Sequenom Result column
colno <- which(sapply(total_study$Sequenom.Result, function(x) any(x == "APOB c.10580G>A p.(Arg3527Gln)")))
total_study$Sequenom.Result[colno] = "ABOB c.10580G>A p.(Arg3527Gln)"
# WHERE IS THE MISSING DATA WHICH ANN CAN HELP WITH?
total_study$Sequenom.Result <- as.factor(total_study$Sequenom.Result)
levels(total_study$Sequenom.Result)
missing_seq_result = subset(total_study,total_study$Sequenom.Result == "NOT DONE")
total_study$MiSeq.Result <- as.factor(total_study$MiSeq.Result)
levels(total_study$MiSeq.Result)
total_study$MiSeq.Result[(total_study$MiSeq.Result == "nmd")] <- "NMD"
total_study$MiSeq.required <- as.factor(total_study$MiSeq.required)
levels(total_study$MiSeq.required)
missing_Miseq_result = subset(total_study, total_study$MiSeq.required != "N" &
total_study$MiSeq.Result == "")
####### WRITE OUT CSV OF MISSING DATA ######
missing_data = rbind(missing_seq_result, missing_Miseq_result)
write.csv(missing_data, "output/missing_data.csv")
### ADDITIONAL COLUMN TO ALLOW FOR ALL POSSIBILITIES OF TESTING ####
total_study$Overall <- ""
total_study$Overall[(total_study$Sequenom.Result != "" & total_study$Sequenom.Result != "NMD" &
total_study$Sequenom.Result != "NOT DONE")] <- "Seq MD"
total_study$Overall[total_study$Sequenom.Result == "NMD" & total_study$MiSeq.required == "N"] <- "Seq NMD and no MiSeq"
total_study$Overall[total_study$Sequenom.Result == "NMD" & total_study$MiSeq.required != "N"] <- "Seq NMD and MDT referred"
total_study$Overall[total_study$Sequenom.Result == "NMD" &
total_study$MiSeq.Result != "NMD" & total_study$MiSeq.Result != ""] <- "Seq NMD and MiSeq MD"
total_study$Overall[ total_study$Sequenom.Result == "NMD" & total_study$MiSeq.Result == "NMD"] <- "Seq NMD and MiSeq NMD"
total_study$Overall <- as.factor(total_study$Overall)
## arrange levels in order wanted graphically ##
total_study$Overall <- factor(total_study$Overall, levels = c("", "Seq MD", "Seq NMD and no MiSeq", "Seq NMD and MDT referred",
"Seq NMD and MiSeq MD", "Seq NMD and MiSeq NMD"))
####### CALCULATE AGE ###########################
total_study$Dob <- as.Date((total_study$Dob), "%m/%d/%Y")
total_study$age <- age_calc(total_study$Dob, enddate = Sys.Date(), units = "years")
total_study_pos = subset(total_study, (total_study$Sequenom.Result != "NOT DONE" &
total_study$Sequenom.Result != "" & total_study$Sequenom.Result != "NMD") |
(total_study$MiSeq.Result != "NMD" & total_study$MiSeq.required == "done"))
assign("total_study_pos", total_study_pos, envir=.GlobalEnv)
nrow(total_study_pos)
#ageldl <- ggplot(total_study, aes(x = age, y = LDLC, colour = ))
#plot(total_study$age, total_study$LDLC)
########################################################################
assign("total_study",total_study,envir = .GlobalEnv)
}
|
/functions/read_clean.R
|
no_license
|
jallen70/FHfileshare
|
R
| false | false | 9,714 |
r
|
# R function script to read in data and clean up by removing missing
# or ineligible data
readingfiles <- function(){
#
fhfull = read.csv("Raw_data/finalfhspreadsheet_12thAug2016.csv", nrow = 258, stringsAsFactors=F)
total_study_full = fhfull
nparticipants = nrow(total_study_full)
assign("nparticipants",nparticipants,envir = .GlobalEnv)
colnames(total_study_full)
# check phenotype data match genotype data and then remove extra columns
total_study = total_study_full
#Date of births match?
if (total_study$Dob[nparticipants] == total_study$p.Dob[nparticipants]) {
message ("yey - DOBs match so we can remove the extra column from the phenotype data")
total_study$p.Dob <- NULL
} else {
message ("Boo - DOB in genotype and phenotype data do not match
so cannot remove this column until further investigation")
}
#convert to correct classes
total_study$Sequenom.Reported <- as.Date((total_study$Sequenom.Result), "%d/%m/%Y")
total_study$Dutch.score <- as.integer(total_study$Dutch.score)
total_study$Data.sent.to.NGS <-as.Date((total_study$Data.sent.to.NGS), "%d/%m/%Y")
total_study$MiSeq.reported.by.NGS <- as.Date((total_study$MiSeq.reported.by.NGS), "%d/%m/%Y")
#total_study$PHENOTYPE <- NULL
total_study$Dutch.score.1 <- as.integer(total_study$Dutch.score.1)
total_study$Family.No <-as.character(total_study$Family.No)
total_study$p.Family.No <-as.character(total_study$p.Family.No)
total_study$A_NoRels50.risk <- as.integer(total_study$A_NoRels50.risk)
total_study$A_NoRels25.risk <- as.integer(total_study$A_NoRels25.risk)
#Family number match?
if (total_study$Family.No[nparticipants] == total_study$p.Family.No[nparticipants]) {
message ("yey - Family numbers match so we can remove the extra column from the phenotype data")
total_study$p.Family.No <- NULL
} else {
message ("Boo - Family numbers in genotype and phenotype data do not match
so cannot remove this column until further investigation")
}
#do the Dutch scores match?
if(total_study$Dutch.score[nparticipants] == total_study$Dutch.score.1[nparticipants] ) {
message ("yey - Dutch scores match so we can remove the extra column from the phenotype data")
total_study$Dutch.score.1 <- NULL
} else {
message ("Boo - Dutch scores in genotype and phenotype data do not match
so cannot remove this column until further investigation")
}
#remove empty columns
total_study$a1 <- NULL
total_study$a1.1 <- NULL
total_study$a3 <- NULL
total_study$a4 <- NULL
total_study$a5 <- NULL
total_study$a5.1 <-NULL
total_study$Famhist_4 <-NULL
total_study$A_NoRels50.risk <- as.integer(total_study$A_NoRels50.risk)
total_study$A_NoRels25.risk <- as.integer(total_study$A_NoRels25.risk)
total_study$A_max <- as.integer(total_study$A_max)
total_study$A_max <- as.integer(total_study$B_max)
total_study$A_max <- as.integer(total_study$C_max)
total_study$A_max <- as.integer(total_study$D_max)
#
# remove erroneous "6" value in TenXan column
colno <- which(sapply(total_study$C_TendXan, function(x) any(x == "6")))
total_study$C_TendXan[colno] = "YES"
total_study$C_TendXan <- as.factor(total_study$C_TendXan)
#
# no dates of LDL have been added so remove this column
total_study$Date.of.LDL <- NULL
#
# Clean LDL intervals column
total_study$D_LDL_1 <- as.factor(total_study$D_LDL_1)
total_study$D_LDL_1[total_study$D_LDL_1 == 8] <- "YES"
total_study$D_LDL_1[total_study$D_LDL_1 == "UNKNOWN" |
total_study$D_LDL_1 == "UKNOWN" | total_study$D_LDL_1 == 0 ] <- "NO"
total_study$D_LDL_1 <- factor(total_study$D_LDL_1)
total_study$D_LDL_2 <- as.factor(total_study$D_LDL_2)
total_study$D_LDL_2[total_study$D_LDL_2 == 5] <- "YES"
total_study$D_LDL_2[total_study$D_LDL_2 == "UNKNOWN" |
total_study$D_LDL_2 == "UKNOWN" | total_study$D_LDL_2 == 0 ] <- "NO"
total_study$D_LDL_2 <- factor(total_study$D_LDL_2)
total_study$D_LDL_3 <- as.factor(total_study$D_LDL_3)
total_study$D_LDL_3[total_study$D_LDL_3 == 3] <- "YES"
total_study$D_LDL_3[total_study$D_LDL_3 == "UNKNOWN" |
total_study$D_LDL_3 == "UKNOWN" | total_study$D_LDL_3 == "UNKOWN"
| total_study$D_LDL_3 == "U" | total_study$D_LDL_3 == 0] <- "NO"
total_study$D_LDL_3 <- factor(total_study$D_LDL_3)
total_study$D_LDL_4 <- as.factor(total_study$D_LDL_4)
total_study$D_LDL_4[total_study$D_LDL_4 == "UNKNOWN" |
total_study$D_LDL_4 == "UKNOWN" | total_study$D_LDL_4 == "UNKOWN"
| total_study$D_LDL_4 == "U" | total_study$D_LDL_4 == 0] <- "NO"
total_study$D_LDL_4 <- factor(total_study$D_LDL_4)
total_study$LDL <- "Unknown"
total_study$LDL[total_study$D_LDL_1 == "YES"] <- "> 8.5"
total_study$LDL[total_study$D_LDL_2 == "YES"] <- "6.5 - 8.4"
total_study$LDL[total_study$D_LDL_3 == "YES"] <- "5.0 - 6.4"
total_study$LDL[total_study$D_LDL_4 == "YES"] <- "4.0 - 4.9"
total_study$LDL <- factor(total_study$LDL, levels= c("4.0 - 4.9", "5.0 - 6.4", "6.5 - 8.4", "> 8.5", "Unknown"))
# look to fasting lipid profile to help characterise LDL levels
# LDLC
if(total_study$LDL == "Unknown"){
total_study$LDL[total_study$LDLC == 0 ] <- "Unknown"
total_study$LDL[total_study$LDLC >= 4.0 & total_study$LDLC <= 4.9 ] <- "4.0 - 4.9"
total_study$LDL[total_study$LDLC >= 5.0 & total_study$LDLC <= 6.4] <- "5.0 - 6.4"
total_study$LDL[total_study$LDLC >= 6.5 & total_study$LDLC <= 8.4 ] <- "6.5 - 8.4"
total_study$LDL[total_study$LDLC >= 8.5] <- "> 8.5"
}
#create a new column with intervals for Dutch Score.
total_study$DLCN <- 0
#
total_study$DLCN <- "Unknown"
total_study$DLCN[total_study$DLCN == "NA" | total_study$DLCN == ""] <- "Unknown"
#total_study$DLCN[total_study$Dutch.score < 2] <- "< 2"
total_study$DLCN[total_study$Dutch.score >= 2 & total_study$Dutch.score <= 6] <- "2 - 6"
total_study$DLCN[total_study$Dutch.score >=7 & total_study$Dutch.score <= 9] <- "7 - 9"
total_study$DLCN[total_study$Dutch.score >= 10 & total_study$Dutch.score <= 12] <- "10 - 12"
total_study$DLCN[total_study$Dutch.score >= 13 & total_study$Dutch.score <= 16] <- "13 - 16"
#total_study$DLCN <-as.factor(total_study$DLCN) #, levels = c("", "< 2", "2 - 6", "7 - 9", "10 - 12", "13 - 16"))
total_study$DLCN <-factor(total_study$DLCN, levels = c( "2 - 6", "7 - 9", "10 - 12", "13 - 16", "Unknown"))
# #total_study$TOTAL..DUTCH.SCORE. <- as.integer(total_study$TOTAL..DUTCH.SCORE)
#
# #Ducth score check
# #A_max + B_max + C_max + D_max
total_study$HDLC <- as.numeric(total_study$HDLC)
total_study$TotalC <- as.numeric(total_study$TotalC)
total_study$Lipo <- as.numeric(total_study$Lipo)
total_study$Trigly <- as.numeric(total_study$Trigly)
total_study$LDLC <- as.numeric(total_study$LDLC)
#remove erronenous "APOB" mutation in Sequenom Result column
colno <- which(sapply(total_study$Sequenom.Result, function(x) any(x == "APOB c.10580G>A p.(Arg3527Gln)")))
total_study$Sequenom.Result[colno] = "ABOB c.10580G>A p.(Arg3527Gln)"
# WHERE IS THE MISSING DATA WHICH ANN CAN HELP WITH?
total_study$Sequenom.Result <- as.factor(total_study$Sequenom.Result)
levels(total_study$Sequenom.Result)
missing_seq_result = subset(total_study,total_study$Sequenom.Result == "NOT DONE")
total_study$MiSeq.Result <- as.factor(total_study$MiSeq.Result)
levels(total_study$MiSeq.Result)
total_study$MiSeq.Result[(total_study$MiSeq.Result == "nmd")] <- "NMD"
total_study$MiSeq.required <- as.factor(total_study$MiSeq.required)
levels(total_study$MiSeq.required)
missing_Miseq_result = subset(total_study, total_study$MiSeq.required != "N" &
total_study$MiSeq.Result == "")
####### WRITE OUT CSV OF MISSING DATA ######
missing_data = rbind(missing_seq_result, missing_Miseq_result)
write.csv(missing_data, "output/missing_data.csv")
### ADDITIONAL COLUMN TO ALLOW FOR ALL POSSIBILITIES OF TESTING ####
total_study$Overall <- ""
total_study$Overall[(total_study$Sequenom.Result != "" & total_study$Sequenom.Result != "NMD" &
total_study$Sequenom.Result != "NOT DONE")] <- "Seq MD"
total_study$Overall[total_study$Sequenom.Result == "NMD" & total_study$MiSeq.required == "N"] <- "Seq NMD and no MiSeq"
total_study$Overall[total_study$Sequenom.Result == "NMD" & total_study$MiSeq.required != "N"] <- "Seq NMD and MDT referred"
total_study$Overall[total_study$Sequenom.Result == "NMD" &
total_study$MiSeq.Result != "NMD" & total_study$MiSeq.Result != ""] <- "Seq NMD and MiSeq MD"
total_study$Overall[ total_study$Sequenom.Result == "NMD" & total_study$MiSeq.Result == "NMD"] <- "Seq NMD and MiSeq NMD"
total_study$Overall <- as.factor(total_study$Overall)
## arrange levels in order wanted graphically ##
total_study$Overall <- factor(total_study$Overall, levels = c("", "Seq MD", "Seq NMD and no MiSeq", "Seq NMD and MDT referred",
"Seq NMD and MiSeq MD", "Seq NMD and MiSeq NMD"))
####### CALCULATE AGE ###########################
total_study$Dob <- as.Date((total_study$Dob), "%m/%d/%Y")
total_study$age <- age_calc(total_study$Dob, enddate = Sys.Date(), units = "years")
total_study_pos = subset(total_study, (total_study$Sequenom.Result != "NOT DONE" &
total_study$Sequenom.Result != "" & total_study$Sequenom.Result != "NMD") |
(total_study$MiSeq.Result != "NMD" & total_study$MiSeq.required == "done"))
assign("total_study_pos", total_study_pos, envir=.GlobalEnv)
nrow(total_study_pos)
#ageldl <- ggplot(total_study, aes(x = age, y = LDLC, colour = ))
#plot(total_study$age, total_study$LDLC)
########################################################################
assign("total_study",total_study,envir = .GlobalEnv)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/HistDAWass-package.R
\docType{data}
\name{China_Month}
\alias{China_Month}
\title{A monthly climatic dataset of China}
\format{
a \code{MatH} object, a matrix of distributions.
}
\source{
raw data are available here: \url{https://cdiac.ess-dive.lbl.gov/ftp/tr055/}
}
\description{
A dataset with the distributions of some climatic variables collected for each month in 60 stations of China.
The collected variables are 168 i.e. 14 climatic variables observed for 12 months. The 14 variables are the following:
mean station pressure (mb), mean temperature, mean maximum temperature, mean minimum temperature,
total precipitation (mm), sunshine duration (h), mean cloud amount (percentage of sky cover),
mean relative humidity (%), snow days (days with snow cover), dominant wind direction (degrees),
mean wind speed (m/s), dominant wind frequency (%), extreme maximum temperature (C),
extreme minimum temperature.
Use the command \code{ get.MatH.main.info(China_Month)} for rapid info.
}
\author{
Antonio Irpino, 2014-10-05
}
|
/man/China_Month.Rd
|
no_license
|
cran/HistDAWass
|
R
| false | true | 1,139 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/HistDAWass-package.R
\docType{data}
\name{China_Month}
\alias{China_Month}
\title{A monthly climatic dataset of China}
\format{
a \code{MatH} object, a matrix of distributions.
}
\source{
raw data are available here: \url{https://cdiac.ess-dive.lbl.gov/ftp/tr055/}
}
\description{
A dataset with the distributions of some climatic variables collected for each month in 60 stations of China.
The collected variables are 168 i.e. 14 climatic variables observed for 12 months. The 14 variables are the following:
mean station pressure (mb), mean temperature, mean maximum temperature, mean minimum temperature,
total precipitation (mm), sunshine duration (h), mean cloud amount (percentage of sky cover),
mean relative humidity (%), snow days (days with snow cover), dominant wind direction (degrees),
mean wind speed (m/s), dominant wind frequency (%), extreme maximum temperature (C),
extreme minimum temperature.
Use the command \code{ get.MatH.main.info(China_Month)} for rapid info.
}
\author{
Antonio Irpino, 2014-10-05
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/MCPLDA.R
\name{wlda}
\alias{wlda}
\title{Implements weighted likelihood estimation for LDA}
\usage{
wlda(a, w)
}
\arguments{
\item{a}{is the data set}
\item{w}{is an indicator matrix for the K classes or, potentially, a weight matrix in which the fraction with which a sample belongs to a particular class is indicated}
}
\value{
m contains the means, p contains the class priors, iW contains the INVERTED within covariance matrix
}
\description{
Implements weighted likelihood estimation for LDA
}
|
/man/wlda.Rd
|
no_license
|
jkrijthe/RSSL
|
R
| false | true | 578 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/MCPLDA.R
\name{wlda}
\alias{wlda}
\title{Implements weighted likelihood estimation for LDA}
\usage{
wlda(a, w)
}
\arguments{
\item{a}{is the data set}
\item{w}{is an indicator matrix for the K classes or, potentially, a weight matrix in which the fraction with which a sample belongs to a particular class is indicated}
}
\value{
m contains the means, p contains the class priors, iW contains the INVERTED within covariance matrix
}
\description{
Implements weighted likelihood estimation for LDA
}
|
\name{check.network-methods}
\docType{methods}
%\alias{check.network-methods}
\alias{check.network,graph-method}
\alias{check.network,matrix-method}
\alias{check.network}
\title{ Graph checking }
\description{
Method to check the characteristics of a graph.
Check if its adjacency matrix is symmetric, if it has NA, NaN o Inf values, and some minimals
statistics about nodes and edges.
}
\usage{
check.network(W, name="Network matrix")
}
\arguments{
\item{W}{
an object representing the graph to be checked
}
\item{name}{
a character vector that will be printed as heading
}
}
\value{
It return a list of strings about the characteristics of the graph
}
\section{Methods}{
\describe{
\item{\code{signature(W = "graph")}}{
an object of the virtual class graph (hence including objects of class \code{\link[graph:graphAM-class]{graphAM}} and \code{\link[graph:graphNEL-class]{graphNEL}} from the package \pkg{graph})
}
\item{\code{signature(W = "matrix")}}{
a matrix representing the adjacency matrix of the graph
}
}}
\examples{
library(bionetdata);
data(DD.chem.data);
check.network(DD.chem.data);
W <- Prob.norm(DD.chem.data);
check.network(W, "prob. transition matrix");
\donttest{WL <- Laplacian.norm(DD.chem.data);
check.network(WL, "Laplacian norm. matrix");}
library(graph)
g1 = randomEGraph(LETTERS[1:15], edges = 40);
check.network(g1, "random graph");
}
\keyword{methods}
\keyword{utilities}
|
/man/check.network-methods.Rd
|
no_license
|
cran/NetPreProc
|
R
| false | false | 1,417 |
rd
|
\name{check.network-methods}
\docType{methods}
%\alias{check.network-methods}
\alias{check.network,graph-method}
\alias{check.network,matrix-method}
\alias{check.network}
\title{ Graph checking }
\description{
Method to check the characteristics of a graph.
Check if its adjacency matrix is symmetric, if it has NA, NaN o Inf values, and some minimals
statistics about nodes and edges.
}
\usage{
check.network(W, name="Network matrix")
}
\arguments{
\item{W}{
an object representing the graph to be checked
}
\item{name}{
a character vector that will be printed as heading
}
}
\value{
It return a list of strings about the characteristics of the graph
}
\section{Methods}{
\describe{
\item{\code{signature(W = "graph")}}{
an object of the virtual class graph (hence including objects of class \code{\link[graph:graphAM-class]{graphAM}} and \code{\link[graph:graphNEL-class]{graphNEL}} from the package \pkg{graph})
}
\item{\code{signature(W = "matrix")}}{
a matrix representing the adjacency matrix of the graph
}
}}
\examples{
library(bionetdata);
data(DD.chem.data);
check.network(DD.chem.data);
W <- Prob.norm(DD.chem.data);
check.network(W, "prob. transition matrix");
\donttest{WL <- Laplacian.norm(DD.chem.data);
check.network(WL, "Laplacian norm. matrix");}
library(graph)
g1 = randomEGraph(LETTERS[1:15], edges = 40);
check.network(g1, "random graph");
}
\keyword{methods}
\keyword{utilities}
|
library(RDCOMClient)
e <- COMCreate("Excel.Application")
e[["Visible"]] <- TRUE
e[["StandardFontSize"]]
e[["StartupPath"]]
e[["Path"]] #
e[["PathSeparator"]] # Check characters.
e[["StatusBar"]] # VARIANT returned.
e[["SheetsInNewWorkbook"]]
# Functions
e$CentimetersToPoints(1.)
e$CheckSpelling("Duncan")
e$CheckSpelling("Duncna")
e$Calculate()
e$CalculateFull()
####
require("RDCOMClient") || stop("You need to install the RDCOMClient package")
.COMInit()
e <- COMCreate("Excel.Application")
books <- e[["workbooks"]]
fn <- system.file("examples", "duncan.xls", package = "RDCOMClient")
fn <- gsub("/", "\\\\", fn)
print(fn)
b = books$open(fn)
sheets = b[["sheets"]]
mySheet = sheets$Item(as.integer(1))
e[["Visible"]] <- TRUE
r = mySheet[["Cells"]]
v <- r$Item(as.integer(1), as.integer(1))
v[["Value"]]
v <- r$Item(as.integer(1), as.integer(3))
v[["Value"]]
################
xls <- COMCreate("Excel.Application")
books <- xls[["Workbooks"]]
books$Open(gsub("/", "\\", system.file("examples", "duncan.xls", package = "RDCOMClient"))
wks = books$item(1)[["worksheets"]]$item(1)
r1 <- wks$Range("A1:C4") # okay
r1$Value()
## create Cell COMIDispatch object:
c1 <- wks$Cells(1,1) # okay
c1$Value()
c2 <- wks$Cells(3,4) # okay
c2$Value()
r2 <- wks$Range("A1", "C4") # okay
r3 <- wks$Range(c1, c2)
print(15, row.names = FALSE)
#
# Sys.time()
#
# formatC(0.987654321, digits=3)
|
/HighLevelExcelControlDemo.R
|
no_license
|
stanasa/Git
|
R
| false | false | 1,486 |
r
|
library(RDCOMClient)
e <- COMCreate("Excel.Application")
e[["Visible"]] <- TRUE
e[["StandardFontSize"]]
e[["StartupPath"]]
e[["Path"]] #
e[["PathSeparator"]] # Check characters.
e[["StatusBar"]] # VARIANT returned.
e[["SheetsInNewWorkbook"]]
# Functions
e$CentimetersToPoints(1.)
e$CheckSpelling("Duncan")
e$CheckSpelling("Duncna")
e$Calculate()
e$CalculateFull()
####
require("RDCOMClient") || stop("You need to install the RDCOMClient package")
.COMInit()
e <- COMCreate("Excel.Application")
books <- e[["workbooks"]]
fn <- system.file("examples", "duncan.xls", package = "RDCOMClient")
fn <- gsub("/", "\\\\", fn)
print(fn)
b = books$open(fn)
sheets = b[["sheets"]]
mySheet = sheets$Item(as.integer(1))
e[["Visible"]] <- TRUE
r = mySheet[["Cells"]]
v <- r$Item(as.integer(1), as.integer(1))
v[["Value"]]
v <- r$Item(as.integer(1), as.integer(3))
v[["Value"]]
################
xls <- COMCreate("Excel.Application")
books <- xls[["Workbooks"]]
books$Open(gsub("/", "\\", system.file("examples", "duncan.xls", package = "RDCOMClient"))
wks = books$item(1)[["worksheets"]]$item(1)
r1 <- wks$Range("A1:C4") # okay
r1$Value()
## create Cell COMIDispatch object:
c1 <- wks$Cells(1,1) # okay
c1$Value()
c2 <- wks$Cells(3,4) # okay
c2$Value()
r2 <- wks$Range("A1", "C4") # okay
r3 <- wks$Range(c1, c2)
print(15, row.names = FALSE)
#
# Sys.time()
#
# formatC(0.987654321, digits=3)
|
\name{GasolineYield}
\alias{GasolineYield}
\title{Estimation of Gasoline Yields from Crude Oil}
\description{
Operational data of the proportion of crude oil converted to gasoline after
distillation and fractionation.
}
\usage{data("GasolineYield")}
\format{
A data frame containing 32 observations on 6 variables.
\describe{
\item{yield}{proportion of crude oil converted to gasoline after distillation and fractionation.}
\item{gravity}{crude oil gravity (degrees API).}
\item{pressure}{vapor pressure of crude oil (lbf/in2).}
\item{temp10}{temperature (degrees F) at which 10 percent of crude oil has vaporized.}
\item{temp}{temperature (degrees F) at which all gasoline has vaporized.}
\item{batch}{factor indicating unique batch of conditions \code{gravity},
\code{pressure}, and \code{temp10}.}
}
}
\details{
This dataset was collected by Prater (1956), its dependent variable is the
proportion of crude oil after distillation and fractionation. This dataset was
analyzed by Atkinson (1985), who used the linear regression model and noted that
there is ``indication that the error distribution is not quite symmetrical,
giving rise to some unduly large and small residuals'' (p. 60).
The dataset contains 32 observations on the response and on the independent
variables. It has been noted (Daniel and Wood, 1971, Chapter 8) that there are only
ten sets of values of the first three explanatory variables which correspond to
ten different crudes and were subjected to experimentally controlled distillation
conditions. These conditions are captured in variable \code{batch} and
the data were ordered according to the ascending order of \code{temp10}.
}
\source{
Taken from Prater (1956).
}
\references{
Atkinson, A.C. (1985).
\emph{Plots, Transformations and Regression: An Introduction to Graphical Methods of Diagnostic Regression Analysis}.
New York: Oxford University Press.
Cribari-Neto, F., and Zeileis, A. (2010). Beta Regression in R.
\emph{Journal of Statistical Software}, \bold{34}(2), 1--24.
\doi{10.18637/jss.v034.i02}
Daniel, C., and Wood, F.S. (1971).
\emph{Fitting Equations to Data}.
New York: John Wiley and Sons.
Ferrari, S.L.P., and Cribari-Neto, F. (2004).
Beta Regression for Modeling Rates and Proportions.
\emph{Journal of Applied Statistics}, \bold{31}(7), 799--815.
Prater, N.H. (1956).
Estimate Gasoline Yields from Crudes.
\emph{Petroleum Refiner}, \bold{35}(5), 236--238.
}
\seealso{\code{\link{betareg}}}
\examples{
## IGNORE_RDIFF_BEGIN
data("GasolineYield", package = "betareg")
gy1 <- betareg(yield ~ gravity + pressure + temp10 + temp, data = GasolineYield)
summary(gy1)
## Ferrari and Cribari-Neto (2004)
gy2 <- betareg(yield ~ batch + temp, data = GasolineYield)
## Table 1
summary(gy2)
## Figure 2
par(mfrow = c(3, 2))
plot(gy2, which = 1, type = "pearson", sub.caption = "")
plot(gy2, which = 1, type = "deviance", sub.caption = "")
plot(gy2, which = 5, type = "deviance", sub.caption = "")
plot(gy2, which = 4, type = "pearson", sub.caption = "")
plot(gy2, which = 2:3)
par(mfrow = c(1, 1))
## exclude 4th observation
gy2a <- update(gy2, subset = -4)
gy2a
summary(gy2a)
## IGNORE_RDIFF_END
}
\keyword{datasets}
|
/man/GasolineYield.Rd
|
no_license
|
cran/betareg
|
R
| false | false | 3,209 |
rd
|
\name{GasolineYield}
\alias{GasolineYield}
\title{Estimation of Gasoline Yields from Crude Oil}
\description{
Operational data of the proportion of crude oil converted to gasoline after
distillation and fractionation.
}
\usage{data("GasolineYield")}
\format{
A data frame containing 32 observations on 6 variables.
\describe{
\item{yield}{proportion of crude oil converted to gasoline after distillation and fractionation.}
\item{gravity}{crude oil gravity (degrees API).}
\item{pressure}{vapor pressure of crude oil (lbf/in2).}
\item{temp10}{temperature (degrees F) at which 10 percent of crude oil has vaporized.}
\item{temp}{temperature (degrees F) at which all gasoline has vaporized.}
\item{batch}{factor indicating unique batch of conditions \code{gravity},
\code{pressure}, and \code{temp10}.}
}
}
\details{
This dataset was collected by Prater (1956), its dependent variable is the
proportion of crude oil after distillation and fractionation. This dataset was
analyzed by Atkinson (1985), who used the linear regression model and noted that
there is ``indication that the error distribution is not quite symmetrical,
giving rise to some unduly large and small residuals'' (p. 60).
The dataset contains 32 observations on the response and on the independent
variables. It has been noted (Daniel and Wood, 1971, Chapter 8) that there are only
ten sets of values of the first three explanatory variables which correspond to
ten different crudes and were subjected to experimentally controlled distillation
conditions. These conditions are captured in variable \code{batch} and
the data were ordered according to the ascending order of \code{temp10}.
}
\source{
Taken from Prater (1956).
}
\references{
Atkinson, A.C. (1985).
\emph{Plots, Transformations and Regression: An Introduction to Graphical Methods of Diagnostic Regression Analysis}.
New York: Oxford University Press.
Cribari-Neto, F., and Zeileis, A. (2010). Beta Regression in R.
\emph{Journal of Statistical Software}, \bold{34}(2), 1--24.
\doi{10.18637/jss.v034.i02}
Daniel, C., and Wood, F.S. (1971).
\emph{Fitting Equations to Data}.
New York: John Wiley and Sons.
Ferrari, S.L.P., and Cribari-Neto, F. (2004).
Beta Regression for Modeling Rates and Proportions.
\emph{Journal of Applied Statistics}, \bold{31}(7), 799--815.
Prater, N.H. (1956).
Estimate Gasoline Yields from Crudes.
\emph{Petroleum Refiner}, \bold{35}(5), 236--238.
}
\seealso{\code{\link{betareg}}}
\examples{
## IGNORE_RDIFF_BEGIN
data("GasolineYield", package = "betareg")
gy1 <- betareg(yield ~ gravity + pressure + temp10 + temp, data = GasolineYield)
summary(gy1)
## Ferrari and Cribari-Neto (2004)
gy2 <- betareg(yield ~ batch + temp, data = GasolineYield)
## Table 1
summary(gy2)
## Figure 2
par(mfrow = c(3, 2))
plot(gy2, which = 1, type = "pearson", sub.caption = "")
plot(gy2, which = 1, type = "deviance", sub.caption = "")
plot(gy2, which = 5, type = "deviance", sub.caption = "")
plot(gy2, which = 4, type = "pearson", sub.caption = "")
plot(gy2, which = 2:3)
par(mfrow = c(1, 1))
## exclude 4th observation
gy2a <- update(gy2, subset = -4)
gy2a
summary(gy2a)
## IGNORE_RDIFF_END
}
\keyword{datasets}
|
library(readr)
library(tidyr)
library(dplyr)
library(edgeR)
library(ggplot2)
library(forcats)
library(FactoMineR)
##### Input Variables ####
# Set the directory
setwd("change_me")
# Set the input file. Input file is a csv of read counts for yeast clones.
# Each row is a unique yeast clone (besides the first row with column names).
# The first column contains protein names, the second column contains the
# barcode sequence associated with the yeast clone, the third column contains
# the counts from the pre-selection library, and the rest of the columns
# contain counts from post-selection libraries.
input_bc_file <- "change_me.csv"
# Set a name for the output folder
folder_name <- "change_me"
##### Import ####
# Read in the CSV file
bc_counts <- read_csv(input_bc_file, col_names = TRUE)
bc_counts_head <- read_csv(input_bc_file, n_max = 1, col_names = FALSE)
colnames(bc_counts)[1] <- "Protein"
# Remove the column containing barcode sequences
bc_counts <- bc_counts[,-2]
bc_counts_head <- bc_counts_head[,-2]
# Create vector of samples
samples <- unlist(bc_counts_head[-1])
# Create and move to output folder
dir.create(folder_name)
setwd(folder_name)
##### Collapse Barcodes ####
# Get a tibble of all the unique identified genes
genes <- unique(bc_counts[,1])
collapsed <- tibble()
barcode_num <- tibble()
# Looping over all the unique genes
for (i in 1:nrow(genes)){
# Take all the rows for the same protein
# Remove the column with the protein name
t <- bc_counts %>% subset(bc_counts$Protein == genes$Protein[i]) %>%
select(-Protein)
# Add the sum of all the columns (all the different
# barcodes for a given protein) to the "collapsed" array
collapsed <- collapsed %>% bind_rows(colSums(t))
}
##### Calculating Total Enrichment ####
# function that iterates over the groups performing an exact test for difference between
# each group and group 1 (pre-library) and adding the logFC between each to a matrix
calcexp <- function(n){
exp <- n$genes
groups <- as.vector(unique(n$samples[,"group"]))
#creates a non-repetitive list of numeric groups present in analysis
for (i in 2:length(unique(samples))) {
et <- exactTest(n, pair = c(samples[1],unique(samples)[i]))
#conducts a pairwise test comparing iterative samples to the first sample
exp <- cbind(exp, et$table[,'logFC'])
#adds logFC of each comparison to a matrix
groupname <- groups[i]
#creates group name for the matrix columns
colnames(exp)[i] <- groupname
}
return(exp)
}
# calculate fold change
expmatrix <- collapsed %>% DGEList(genes = genes, group = samples) %>%
calcNormFactors() %>%
estimateDisp() %>%
calcexp()
# creates fold enrichment matrix
FE.noneg<- expmatrix %>%
tibble() %>%
select(-Protein) %>%
mutate_all(function(x) if_else(x < 0,0,x)) %>%
bind_cols(genes) %>%
relocate(Protein)
##### Calculating Clonal Enrichment ####
# Minimum log fold change to be counted as an enriched barcode
enriched_barcode_fc <- 2
# Separate uncollapsed gene names and counts
bc_counts2_genes <- bc_counts %>% select(Protein)
bc_counts2 <- bc_counts %>% select(-Protein)
# calculate fold change for each barcode independently
expmatrix2 <- bc_counts2 %>% DGEList(genes = bc_counts2_genes, group = samples) %>%
calcNormFactors() %>%
estimateDisp() %>%
calcexp()
# create fold enrichment matrix
FE.noneg.b <- expmatrix2 %>%
tibble() %>%
select(-Protein) %>%
mutate_all(function(x) if_else(x < 0,0,x))
# converts logical matrix of barcode enrichemnt where enrichment = 1
enrich.logical <- FE.noneg.b %>%
mutate_all(function(x) if_else(x > enriched_barcode_fc,1,0)) %>%
bind_cols(bc_counts2_genes) %>%
relocate(Protein)
# create clonal enrichment matrix
frequency.b <- tibble()
for (i in 1:nrow(genes)){
# Looping over all the unique genes
t <- enrich.logical %>% filter(enrich.logical$Protein == genes$Protein[i]) %>%
select(-Protein)
# Take all the rows for the same protein. Remove the column with the protein name
sums <- colSums(t)
freq <- sums/barcode_num$Number[i]
frequency.b <- frequency.b %>% bind_rows(freq)
}
frequency.b <- bind_cols(genes, frequency.b)
##### Calculating REAP Score ####
# create tibble containing barcode number correction factor
bc_num <- barcode_num %>%
select(-Protein) %>%
mutate_all(function(x) if_else(x <= 5, log(x + 0.5)/1.705, 1))
# create tibble containing protein frequency correction factor
num_frequency_l <- tibble(collapsed[,1]/sum(collapsed[,1])) %>%
log10() %>%
mutate_all(function(x) if_else(x < -6,-6,x)) %>%
mutate_all(function(x) if_else(x == 0,-6,x)) %>%
mutate_all(function(x) if_else(x <= -4, log(x + 7.1)/1.16, 1))
# create tibble of scores
score <- select(frequency.b,-Protein)^2*
select(FE.noneg,-Protein)*
unlist(num_frequency_l)*
unlist(bc_num)
score <- bind_cols(genes, score)
write.csv(score, file = "score.csv", row.names = FALSE)
|
/reap_score_pipeline_v3.R
|
permissive
|
ring-lab/REAP_cell_reports_methods_2021
|
R
| false | false | 5,079 |
r
|
library(readr)
library(tidyr)
library(dplyr)
library(edgeR)
library(ggplot2)
library(forcats)
library(FactoMineR)
##### Input Variables ####
# Set the directory
setwd("change_me")
# Set the input file. Input file is a csv of read counts for yeast clones.
# Each row is a unique yeast clone (besides the first row with column names).
# The first column contains protein names, the second column contains the
# barcode sequence associated with the yeast clone, the third column contains
# the counts from the pre-selection library, and the rest of the columns
# contain counts from post-selection libraries.
input_bc_file <- "change_me.csv"
# Set a name for the output folder
folder_name <- "change_me"
##### Import ####
# Read in the CSV file
bc_counts <- read_csv(input_bc_file, col_names = TRUE)
bc_counts_head <- read_csv(input_bc_file, n_max = 1, col_names = FALSE)
colnames(bc_counts)[1] <- "Protein"
# Remove the column containing barcode sequences
bc_counts <- bc_counts[,-2]
bc_counts_head <- bc_counts_head[,-2]
# Create vector of samples
samples <- unlist(bc_counts_head[-1])
# Create and move to output folder
dir.create(folder_name)
setwd(folder_name)
##### Collapse Barcodes ####
# Get a tibble of all the unique identified genes
genes <- unique(bc_counts[,1])
collapsed <- tibble()
barcode_num <- tibble()
# Looping over all the unique genes
for (i in 1:nrow(genes)){
# Take all the rows for the same protein
# Remove the column with the protein name
t <- bc_counts %>% subset(bc_counts$Protein == genes$Protein[i]) %>%
select(-Protein)
# Add the sum of all the columns (all the different
# barcodes for a given protein) to the "collapsed" array
collapsed <- collapsed %>% bind_rows(colSums(t))
}
##### Calculating Total Enrichment ####
# function that iterates over the groups performing an exact test for difference between
# each group and group 1 (pre-library) and adding the logFC between each to a matrix
calcexp <- function(n){
exp <- n$genes
groups <- as.vector(unique(n$samples[,"group"]))
#creates a non-repetitive list of numeric groups present in analysis
for (i in 2:length(unique(samples))) {
et <- exactTest(n, pair = c(samples[1],unique(samples)[i]))
#conducts a pairwise test comparing iterative samples to the first sample
exp <- cbind(exp, et$table[,'logFC'])
#adds logFC of each comparison to a matrix
groupname <- groups[i]
#creates group name for the matrix columns
colnames(exp)[i] <- groupname
}
return(exp)
}
# calculate fold change
expmatrix <- collapsed %>% DGEList(genes = genes, group = samples) %>%
calcNormFactors() %>%
estimateDisp() %>%
calcexp()
# creates fold enrichment matrix
FE.noneg<- expmatrix %>%
tibble() %>%
select(-Protein) %>%
mutate_all(function(x) if_else(x < 0,0,x)) %>%
bind_cols(genes) %>%
relocate(Protein)
##### Calculating Clonal Enrichment ####
# Minimum log fold change to be counted as an enriched barcode
enriched_barcode_fc <- 2
# Separate uncollapsed gene names and counts
bc_counts2_genes <- bc_counts %>% select(Protein)
bc_counts2 <- bc_counts %>% select(-Protein)
# calculate fold change for each barcode independently
expmatrix2 <- bc_counts2 %>% DGEList(genes = bc_counts2_genes, group = samples) %>%
calcNormFactors() %>%
estimateDisp() %>%
calcexp()
# create fold enrichment matrix
FE.noneg.b <- expmatrix2 %>%
tibble() %>%
select(-Protein) %>%
mutate_all(function(x) if_else(x < 0,0,x))
# converts logical matrix of barcode enrichemnt where enrichment = 1
enrich.logical <- FE.noneg.b %>%
mutate_all(function(x) if_else(x > enriched_barcode_fc,1,0)) %>%
bind_cols(bc_counts2_genes) %>%
relocate(Protein)
# create clonal enrichment matrix
frequency.b <- tibble()
for (i in 1:nrow(genes)){
# Looping over all the unique genes
t <- enrich.logical %>% filter(enrich.logical$Protein == genes$Protein[i]) %>%
select(-Protein)
# Take all the rows for the same protein. Remove the column with the protein name
sums <- colSums(t)
freq <- sums/barcode_num$Number[i]
frequency.b <- frequency.b %>% bind_rows(freq)
}
frequency.b <- bind_cols(genes, frequency.b)
##### Calculating REAP Score ####
# create tibble containing barcode number correction factor
bc_num <- barcode_num %>%
select(-Protein) %>%
mutate_all(function(x) if_else(x <= 5, log(x + 0.5)/1.705, 1))
# create tibble containing protein frequency correction factor
num_frequency_l <- tibble(collapsed[,1]/sum(collapsed[,1])) %>%
log10() %>%
mutate_all(function(x) if_else(x < -6,-6,x)) %>%
mutate_all(function(x) if_else(x == 0,-6,x)) %>%
mutate_all(function(x) if_else(x <= -4, log(x + 7.1)/1.16, 1))
# create tibble of scores
score <- select(frequency.b,-Protein)^2*
select(FE.noneg,-Protein)*
unlist(num_frequency_l)*
unlist(bc_num)
score <- bind_cols(genes, score)
write.csv(score, file = "score.csv", row.names = FALSE)
|
#load odds and set field types
odds <- read.csv("golf/odds.csv")
odds$name <- as.character(odds$name)
odds$tournament <- as.character(odds$tournament)
odds$odds <- as.character(odds$odds)
odds$won <- as.character(odds$won)
odds$payoff <- as.numeric(odds$payoff)
odds$id <- with(odds, paste(year, "-", tournament))
#biggest payoff
wins <- subset(odds, won=="true")
max(wins$payoff)
wins.best <- wins[rev(order(wins$payoff)),]
head(wins.best)
#field stats
field <- subset(odds, grepl("Field", name) == 1)
field.wins <- subset(field, grepl("true", won) == 1)
field.wins.best <- field.wins[rev(order(field.wins$payoff)),]
head(field.wins.best)
#number of events offering a field bet
nrow(field)
#number of times the field hit
nrow(field.wins)
#historical winning probability of the field
nrow(field.wins)/nrow(field)
#find outcome if I bet $25 on field each time
field$bet <- with(field, ifelse(grepl("true", won), 25 * payoff, -25))
sum(field$bet)
summary(field$payoff)
#average odds for the field
mean(field$payoff)
#WGC Match Play
wgc <- subset(odds, grepl("WGC", tournament) == 1)
|
/100817RCode/TutorialsPoint/odds.r
|
no_license
|
mhcrnl/050817Octave
|
R
| false | false | 1,090 |
r
|
#load odds and set field types
odds <- read.csv("golf/odds.csv")
odds$name <- as.character(odds$name)
odds$tournament <- as.character(odds$tournament)
odds$odds <- as.character(odds$odds)
odds$won <- as.character(odds$won)
odds$payoff <- as.numeric(odds$payoff)
odds$id <- with(odds, paste(year, "-", tournament))
#biggest payoff
wins <- subset(odds, won=="true")
max(wins$payoff)
wins.best <- wins[rev(order(wins$payoff)),]
head(wins.best)
#field stats
field <- subset(odds, grepl("Field", name) == 1)
field.wins <- subset(field, grepl("true", won) == 1)
field.wins.best <- field.wins[rev(order(field.wins$payoff)),]
head(field.wins.best)
#number of events offering a field bet
nrow(field)
#number of times the field hit
nrow(field.wins)
#historical winning probability of the field
nrow(field.wins)/nrow(field)
#find outcome if I bet $25 on field each time
field$bet <- with(field, ifelse(grepl("true", won), 25 * payoff, -25))
sum(field$bet)
summary(field$payoff)
#average odds for the field
mean(field$payoff)
#WGC Match Play
wgc <- subset(odds, grepl("WGC", tournament) == 1)
|
#' Filter track data for speed
#'
#' Create a filter of a track for "bad" points implying a speed of motion that
#' is unrealistic.
#'
#' Using an algorithm (McConnnell et al., 1992), points are tested for speed
#' between previous / next and 2nd previous / next points. Contiguous sections
#' with an root mean square speed above a given maximum have their highest rms
#' point removed, then rms is recalculated, until all points are below the
#' maximum. By default an (internal) root mean square function is used, this
#' can be specified by the user.
#'
#' If the coordinates of the \code{trip} data are not projected, or NA the
#' distance calculation assumes longlat and kilometres (great circle). For
#' projected coordinates the speed must match the units of the coordinate
#' system. (The PROJ.4 argument "units=km" is suggested).
#'
#' @param x trip object
#' @param max.speed speed in kilometres (or other unit) per hour, the unit is kilometres
#' if the trip is in longitude latitude coordinates, or in the unit of the
#' projection projection (usually metres per hour)
#' @param test cut the algorithm short and just return first pass
#' @return
#'
#' Logical vector matching positions in the coordinate records that pass the
#' filter.
#' @note
#'
#' This algorithm was originally taken from IDL code by David Watts at the
#' Australian Antarctic Division, and used in various other environments before
#' the development of this version.
#' @section Warning:
#'
#' This algorithm is destructive, and provides little information about
#' location uncertainty. It is provided because it's commonly used
#' and provides an illustrative benchmark for further work.
#'
#' It is possible for the filter to become stuck in an infinite loop, depending
#' on the function passed to the filter. Several minutes is probably too long
#' for hundreds of points, test on smaller sections if unsure.
#' @author David Watts and Michael D. Sumner
#' @seealso \code{\link{sda}} for a fast distance angle filter to combine with speed filtering
#' @references
#'
#' The algorithm comes from McConnell, B. J. and Chambers, C. and Fedak, M. A.
#' (1992) Foraging ecology of southern elephant seals in relation to the
#' bathymetry and productivity of the southern ocean. Antarctic Science
#' \emph{4} 393-398
#' @keywords manip
#' @export speedfilter
speedfilter <- function (x, max.speed=NULL, test=FALSE) {
if (!is(x, "trip"))
stop("only trip objects supported")
projected <- is.projected(x)
if (is.na(projected) ) {
projected <- FALSE
warning("coordinate system is NA, assuming longlat . . .")
}
if (is.null(max.speed)) {
print("no max.speed given, nothing to do here")
return(x)
}
longlat <- !projected
coords <- coordinates(x)
tids <- getTimeID(x)
time <- tids[, 1]
id <- factor(tids[, 2])
x <- coords[, 1]
y <- coords[, 2]
pprm <- 3
grps <- levels(id)
if (length(x) != length(y))
stop("x and y vectors must be of same\nlength")
if (length(x) != length(time))
stop("Length of times not equal to number of points")
okFULL <- rep(TRUE, nrow(coords))
if (test)
res <- list(speed=numeric(0), rms=numeric(0))
for (sub in grps) {
ind <- id == sub
xy <- matrix(c(x[ind], y[ind]), ncol=2)
tms <- time[ind]
npts <- nrow(xy)
if (pprm%%2 == 0 || pprm < 3) {
msg <- paste("Points per running mean should be odd and",
"greater than 3, pprm=3")
stop(msg)
}
RMS <- rep(max.speed + 1, npts)
offset <- pprm - 1
ok <- rep(TRUE, npts)
if (npts < (pprm + 1) && !test) {
warning("Not enough points to filter ID: \"", sub,
"\"\n continuing . . . \n")
okFULL[ind] <- ok
next
}
index <- 1:npts
iter <- 1
while (any(RMS > max.speed, na.rm=TRUE)) {
n <- length(which(ok))
x1 <- xy[ok, ]
speed1 <- trackDistance(x1[-nrow(x1), 1], x1[-nrow(x1), 2],
x1[-1, 1], x1[-1, 2],
longlat=!projected) /
(diff(unclass(tms[ok])) / 3600)
speed2 <- trackDistance(x1[-((nrow(x1) - 1):nrow(x1)), 1],
x1[-((nrow(x1) - 1):nrow(x1)), 2],
x1[-(1:2), 1], x1[-(1:2), 2],
longlat=!projected) /
((unclass(tms[ok][-c(1, 2)]) -
unclass(tms[ok][-c(n - 1, n)])) /
3600)
thisIndex <- index[ok]
npts <- length(speed1)
if (npts < pprm)
next
sub1 <- rep(1:2, npts - offset) + rep(1:(npts - offset), each=2)
sub2 <- rep(c(0, 2), npts - offset) +
rep(1:(npts - offset), each=2)
rmsRows <- cbind(matrix(speed1[sub1], ncol=offset, byrow=TRUE),
matrix(speed2[sub2], ncol=offset, byrow=TRUE))
RMS <- c(rep(0, offset),
sqrt(rowSums(rmsRows ^ 2) / ncol(rmsRows)))
if (test & iter == 1) {
res$speed <- c(res$speed, 0, speed1)
res$rms <- c(res$rms, 0, RMS)
break
}
RMS[length(RMS)] <- 0
bad <- RMS > max.speed
segs <- cumsum(c(0, abs(diff(bad))))
## try wrapping ifelse here? no index is quicker
rmsFlag <- unlist(lapply(split(RMS, segs), function(x) {
ifelse((1:length(x)) == which.max(x), TRUE, FALSE)
}), use.names=FALSE)
rmsFlag[!bad] <- FALSE
RMS[rmsFlag] <- -10
ok[thisIndex][rmsFlag > 0] <- FALSE
}
okFULL[ind] <- ok
}
if (test)
return(res)
okFULL
}
##' Filter track for speed, distance and angle.
##'
##' Create a filter index of a track for "bad" points with a
##' combination of speed, distance and angle tests.
##' @name sda
##' @param x trip object
##' @param smax maximum speed, in km/h
##' @param ang minimum turning angle/s in degrees
##' @param distlim maximum step lengths in km
##' @param pre include this filter in the removal
##' @references Freitas, C., Lydersen, C., Fedak, M. A. and Kovacs,
##' K. M. (2008), A simple new algorithm to filter marine mammal Argos
##' locations. Marine Mammal Science, 24: 315?V325. doi:
##' 10.1111/j.1748-7692.2007.00180.x
##' @details This is an independent implementation from that in the
##' package argosfilter by Freitas 2008.
##' @return logical vector, with \code{FALSE} values where the tests failed
##' @export
sda <- function(x, smax, ang = c(15, 25), distlim = c(2.5, 5.0), pre = NULL) {
if (!is.null(pre)) x$prefilter <- pre
xlist <- split(x, x[[getTORnames(x)[2L]]])
bigok <- vector("list", length(xlist))
for (i in seq_along(xlist)) {
ok <- sda0(xlist[[i]], smax, ang, distlim, pre = xlist[[i]]$prefilter)
bigok[[i]] <- ok
}
unlist(bigok)
}
sda0 <- function(x, smax, ang, distlim, pre = NULL) {
x$speed.ok <- speedfilter(x, max.speed = smax)
dsts <- trackDistance(x, longlat = TRUE)
angs <- trackAngle(x)
## simple way to deal with missing angles
### (which don't make sense for first and last position or zero-movement)
angs[is.na(angs)] <- 180
dprev <- dsts
dnext <- c(dsts[-1L], 0)
## No Argos quality filter, anyone can do that
ok <- (x$speed.ok | dprev <= distlim[2]) ##& (x$lc > -9)
if (!is.null(pre)) ok <- ok & pre
x$filt.row <- 1:nrow(x)
x$ok <- rep(FALSE, nrow(x))
df <- x
## first subset
df <- df[ok, ]
## distlim and angles, progressively
for (i in 1:length(distlim)) {
dsts <- trackDistance(df)
angs <- trackAngle(df)
dprev <- dsts
dnext <- c(dsts[-1L], 0)
angs[is.na(angs)] <- 180
ok <- (dprev <= distlim[i] | dnext <= distlim[i]) | angs > ang[i]
ok[c(1:2, (length(ok)-1):length(ok))] <- TRUE
df <- df[ok, ]
ok <- rep(TRUE, nrow(df))
}
x$ok[ match(df$filt.row, x$filt.row)] <- ok
x$ok
}
# $Id: filter.penSS.R 68 2013-03-20 03:11:06Z sluque $
#' Non-destructive smoothing filter
#'
#'
#' Non-destructive filter for track data using penalty smoothing on velocity.
#'
#'
#' Destructive filters such as \code{\link{speedfilter}} can be recast using a
#' penalty smoothing approach in the style of Green and Silverman (1994).
#'
#' This filter works by penalizing the fit of the smoothed track to the
#' observed locations by the sum of squared velocities. That is, we trade off
#' goodness of fit against increasing the total sum of squared velocities.
#'
#' When lambda=0 the smoothed track reproduces the raw track exactly.
#' Increasing lambda favours tracks requiring less extreme velocities, at the
#' expense of reproducing the original locations.
#' @name filter_penSS
#' @aliases filter.penSS
#' @param tr A \code{trip} object.
#' @param lambda Smoothing parameter, see Details.
#' @param first Fix the first location and prevent it from being updated by the
#' filter.
#' @param last Fix the last location and prevent it from being updated by the
#' filter.
#' @param \dots Arguments passed on to \code{\link{nlm}}
#' @return
#'
#' A trip object with updated coordinate values based on the filter - all the
#' data, including original coordinates which are maintained in the trip data
#' frame.
#' @author Simon Wotherspoon and Michael Sumner
#' @seealso \code{\link{speedfilter}}
#' @references
#'
#' Green, P. J. and Silverman, B. W. (1994). Nonparametric regression and
#' generalized linear models: a roughness penalty approach. CRC Press.
#' @keywords manip misc
#' @examples
#'
#'
#' \dontrun{## Example takes a few minutes
#'
#' ## Fake some data
#'
#' ## Brownian motion tethered at each end
#' brownian.bridge <- function(n, r) {
#' x <- cumsum(rnorm(n, 0, 1))
#' x <- x - (x[1] + seq(0, 1, length=n) * (x[n] - x[1]))
#' r * x
#' }
#'
#' ## Number of days and number of obs
#' days <- 50
#' n <- 200
#'
#' ## Make separation between obs gamma distributed
#' x <- rgamma(n, 3)
#' x <- cumsum(x)
#' x <- x/x[n]
#'
#' ## Track is lissajous + brownian bridge
#' b.scale <- 0.6
#' r.scale <- sample(c(0.1, 2, 10.2), n, replace=TRUE,
#' prob=c(0.8, 0.18, 0.02))
#' set.seed(44)
#'
#' tms <- ISOdate(2001, 1, 1) + trunc(days * 24 * 60 * 60 *x)
#' lon <- 120 + 20 * sin(2 * pi * x) +
#' brownian.bridge(n, b.scale) + rnorm(n, 0, r.scale)
#' lat <- -40 + 10 *(sin(3 * 2 * pi * x) + cos(2 * pi * x) - 1) +
#' brownian.bridge(n, b.scale) + rnorm(n, 0, r.scale)
#'
#' tr <- new("trip",
#' SpatialPointsDataFrame(cbind(lon, lat),
#' data.frame(gmt=tms, id="lbb")),
#' TimeOrderedRecords(c("gmt", "id")))
#' plot(tr)
#'
#' ## the filtered version
#' trf <- filter.penSS(tr, lambda=1, iterlim=400, print.level=1)
#'
#' lines(trf)
#'
#' }
#'
#'
#' @export filter_penSS
filter_penSS <- function(tr, lambda, first=TRUE, last=TRUE,...) {
penalized <- function(x) {
## Form smoothed track
p <- p.obs
p[sub, ] <- x
## Velocities between smoothed points
##v <- gc.dist(p[-n,],p[-1,])/dt
v <- trackDistance(p[, 2:1]) / dt
## Distances from smoothed points to observations
##d <- gc.dist(p,p.obs)
d <- trackDistance(p[, 2], p[, 1], p.obs[, 2], p.obs[, 1])
## This is the penalized sum of squares
(sum(d ^ 2) + lambda * sum(v ^ 2)) / n ^ 2
}
if (length(summary(tr)$tripID) > 1) {
msg <- paste("trip object contains multiple events,",
"only the first trip used")
warning(msg)
tr <- tr[tr[[getTORnames(tr)[2]]] == summary(tr)$tripID[1], ]
}
## Number of points and subset
n <- nrow(tr)
sub <- (1 + first):(n - last)
## Observed points
## p.obs <- as.matrix(tr[,c("Lat","Lon")])
p.obs <- coordinates(tr)[, 2:1]
## Time intervals (in days) between obs
##dt <- diff(unclass(tr$Time)/(24*60*60))
dt <- diff(unclass(tr[[getTORnames(tr)[1]]]) / (24 * 60 * 60))
mn <- nlm(penalized, as.matrix(p.obs[sub, ]), ...)
m <- n - (first + last)
res <- coordinates(tr)
## tr$Lat[sub] <- mn$estimate[1:m]
## tr$Lon[sub] <- mn$estimate[m+1:m]
res[sub, 2] <- mn$estimate[1:m]
res[sub, 1] <- mn$estimate[m + 1:m]
res <- SpatialPointsDataFrame(res, as.data.frame(tr),
proj4string=CRS(tr@proj4string@projargs, doCheckCRSArgs = FALSE))
trip(res, getTORnames(tr))
}
###_ + Emacs local variables
## Local variables:
## allout-layout: (+ : 0)
## End:
|
/trip/R/filter.R
|
no_license
|
albrizre/spatstat.revdep
|
R
| false | false | 13,075 |
r
|
#' Filter track data for speed
#'
#' Create a filter of a track for "bad" points implying a speed of motion that
#' is unrealistic.
#'
#' Using an algorithm (McConnnell et al., 1992), points are tested for speed
#' between previous / next and 2nd previous / next points. Contiguous sections
#' with an root mean square speed above a given maximum have their highest rms
#' point removed, then rms is recalculated, until all points are below the
#' maximum. By default an (internal) root mean square function is used, this
#' can be specified by the user.
#'
#' If the coordinates of the \code{trip} data are not projected, or NA the
#' distance calculation assumes longlat and kilometres (great circle). For
#' projected coordinates the speed must match the units of the coordinate
#' system. (The PROJ.4 argument "units=km" is suggested).
#'
#' @param x trip object
#' @param max.speed speed in kilometres (or other unit) per hour, the unit is kilometres
#' if the trip is in longitude latitude coordinates, or in the unit of the
#' projection projection (usually metres per hour)
#' @param test cut the algorithm short and just return first pass
#' @return
#'
#' Logical vector matching positions in the coordinate records that pass the
#' filter.
#' @note
#'
#' This algorithm was originally taken from IDL code by David Watts at the
#' Australian Antarctic Division, and used in various other environments before
#' the development of this version.
#' @section Warning:
#'
#' This algorithm is destructive, and provides little information about
#' location uncertainty. It is provided because it's commonly used
#' and provides an illustrative benchmark for further work.
#'
#' It is possible for the filter to become stuck in an infinite loop, depending
#' on the function passed to the filter. Several minutes is probably too long
#' for hundreds of points, test on smaller sections if unsure.
#' @author David Watts and Michael D. Sumner
#' @seealso \code{\link{sda}} for a fast distance angle filter to combine with speed filtering
#' @references
#'
#' The algorithm comes from McConnell, B. J. and Chambers, C. and Fedak, M. A.
#' (1992) Foraging ecology of southern elephant seals in relation to the
#' bathymetry and productivity of the southern ocean. Antarctic Science
#' \emph{4} 393-398
#' @keywords manip
#' @export speedfilter
speedfilter <- function (x, max.speed=NULL, test=FALSE) {
if (!is(x, "trip"))
stop("only trip objects supported")
projected <- is.projected(x)
if (is.na(projected) ) {
projected <- FALSE
warning("coordinate system is NA, assuming longlat . . .")
}
if (is.null(max.speed)) {
print("no max.speed given, nothing to do here")
return(x)
}
longlat <- !projected
coords <- coordinates(x)
tids <- getTimeID(x)
time <- tids[, 1]
id <- factor(tids[, 2])
x <- coords[, 1]
y <- coords[, 2]
pprm <- 3
grps <- levels(id)
if (length(x) != length(y))
stop("x and y vectors must be of same\nlength")
if (length(x) != length(time))
stop("Length of times not equal to number of points")
okFULL <- rep(TRUE, nrow(coords))
if (test)
res <- list(speed=numeric(0), rms=numeric(0))
for (sub in grps) {
ind <- id == sub
xy <- matrix(c(x[ind], y[ind]), ncol=2)
tms <- time[ind]
npts <- nrow(xy)
if (pprm%%2 == 0 || pprm < 3) {
msg <- paste("Points per running mean should be odd and",
"greater than 3, pprm=3")
stop(msg)
}
RMS <- rep(max.speed + 1, npts)
offset <- pprm - 1
ok <- rep(TRUE, npts)
if (npts < (pprm + 1) && !test) {
warning("Not enough points to filter ID: \"", sub,
"\"\n continuing . . . \n")
okFULL[ind] <- ok
next
}
index <- 1:npts
iter <- 1
while (any(RMS > max.speed, na.rm=TRUE)) {
n <- length(which(ok))
x1 <- xy[ok, ]
speed1 <- trackDistance(x1[-nrow(x1), 1], x1[-nrow(x1), 2],
x1[-1, 1], x1[-1, 2],
longlat=!projected) /
(diff(unclass(tms[ok])) / 3600)
speed2 <- trackDistance(x1[-((nrow(x1) - 1):nrow(x1)), 1],
x1[-((nrow(x1) - 1):nrow(x1)), 2],
x1[-(1:2), 1], x1[-(1:2), 2],
longlat=!projected) /
((unclass(tms[ok][-c(1, 2)]) -
unclass(tms[ok][-c(n - 1, n)])) /
3600)
thisIndex <- index[ok]
npts <- length(speed1)
if (npts < pprm)
next
sub1 <- rep(1:2, npts - offset) + rep(1:(npts - offset), each=2)
sub2 <- rep(c(0, 2), npts - offset) +
rep(1:(npts - offset), each=2)
rmsRows <- cbind(matrix(speed1[sub1], ncol=offset, byrow=TRUE),
matrix(speed2[sub2], ncol=offset, byrow=TRUE))
RMS <- c(rep(0, offset),
sqrt(rowSums(rmsRows ^ 2) / ncol(rmsRows)))
if (test & iter == 1) {
res$speed <- c(res$speed, 0, speed1)
res$rms <- c(res$rms, 0, RMS)
break
}
RMS[length(RMS)] <- 0
bad <- RMS > max.speed
segs <- cumsum(c(0, abs(diff(bad))))
## try wrapping ifelse here? no index is quicker
rmsFlag <- unlist(lapply(split(RMS, segs), function(x) {
ifelse((1:length(x)) == which.max(x), TRUE, FALSE)
}), use.names=FALSE)
rmsFlag[!bad] <- FALSE
RMS[rmsFlag] <- -10
ok[thisIndex][rmsFlag > 0] <- FALSE
}
okFULL[ind] <- ok
}
if (test)
return(res)
okFULL
}
##' Filter track for speed, distance and angle.
##'
##' Create a filter index of a track for "bad" points with a
##' combination of speed, distance and angle tests.
##' @name sda
##' @param x trip object
##' @param smax maximum speed, in km/h
##' @param ang minimum turning angle/s in degrees
##' @param distlim maximum step lengths in km
##' @param pre include this filter in the removal
##' @references Freitas, C., Lydersen, C., Fedak, M. A. and Kovacs,
##' K. M. (2008), A simple new algorithm to filter marine mammal Argos
##' locations. Marine Mammal Science, 24: 315?V325. doi:
##' 10.1111/j.1748-7692.2007.00180.x
##' @details This is an independent implementation from that in the
##' package argosfilter by Freitas 2008.
##' @return logical vector, with \code{FALSE} values where the tests failed
##' @export
sda <- function(x, smax, ang = c(15, 25), distlim = c(2.5, 5.0), pre = NULL) {
if (!is.null(pre)) x$prefilter <- pre
xlist <- split(x, x[[getTORnames(x)[2L]]])
bigok <- vector("list", length(xlist))
for (i in seq_along(xlist)) {
ok <- sda0(xlist[[i]], smax, ang, distlim, pre = xlist[[i]]$prefilter)
bigok[[i]] <- ok
}
unlist(bigok)
}
sda0 <- function(x, smax, ang, distlim, pre = NULL) {
x$speed.ok <- speedfilter(x, max.speed = smax)
dsts <- trackDistance(x, longlat = TRUE)
angs <- trackAngle(x)
## simple way to deal with missing angles
### (which don't make sense for first and last position or zero-movement)
angs[is.na(angs)] <- 180
dprev <- dsts
dnext <- c(dsts[-1L], 0)
## No Argos quality filter, anyone can do that
ok <- (x$speed.ok | dprev <= distlim[2]) ##& (x$lc > -9)
if (!is.null(pre)) ok <- ok & pre
x$filt.row <- 1:nrow(x)
x$ok <- rep(FALSE, nrow(x))
df <- x
## first subset
df <- df[ok, ]
## distlim and angles, progressively
for (i in 1:length(distlim)) {
dsts <- trackDistance(df)
angs <- trackAngle(df)
dprev <- dsts
dnext <- c(dsts[-1L], 0)
angs[is.na(angs)] <- 180
ok <- (dprev <= distlim[i] | dnext <= distlim[i]) | angs > ang[i]
ok[c(1:2, (length(ok)-1):length(ok))] <- TRUE
df <- df[ok, ]
ok <- rep(TRUE, nrow(df))
}
x$ok[ match(df$filt.row, x$filt.row)] <- ok
x$ok
}
# $Id: filter.penSS.R 68 2013-03-20 03:11:06Z sluque $
#' Non-destructive smoothing filter
#'
#'
#' Non-destructive filter for track data using penalty smoothing on velocity.
#'
#'
#' Destructive filters such as \code{\link{speedfilter}} can be recast using a
#' penalty smoothing approach in the style of Green and Silverman (1994).
#'
#' This filter works by penalizing the fit of the smoothed track to the
#' observed locations by the sum of squared velocities. That is, we trade off
#' goodness of fit against increasing the total sum of squared velocities.
#'
#' When lambda=0 the smoothed track reproduces the raw track exactly.
#' Increasing lambda favours tracks requiring less extreme velocities, at the
#' expense of reproducing the original locations.
#' @name filter_penSS
#' @aliases filter.penSS
#' @param tr A \code{trip} object.
#' @param lambda Smoothing parameter, see Details.
#' @param first Fix the first location and prevent it from being updated by the
#' filter.
#' @param last Fix the last location and prevent it from being updated by the
#' filter.
#' @param \dots Arguments passed on to \code{\link{nlm}}
#' @return
#'
#' A trip object with updated coordinate values based on the filter - all the
#' data, including original coordinates which are maintained in the trip data
#' frame.
#' @author Simon Wotherspoon and Michael Sumner
#' @seealso \code{\link{speedfilter}}
#' @references
#'
#' Green, P. J. and Silverman, B. W. (1994). Nonparametric regression and
#' generalized linear models: a roughness penalty approach. CRC Press.
#' @keywords manip misc
#' @examples
#'
#'
#' \dontrun{## Example takes a few minutes
#'
#' ## Fake some data
#'
#' ## Brownian motion tethered at each end
#' brownian.bridge <- function(n, r) {
#' x <- cumsum(rnorm(n, 0, 1))
#' x <- x - (x[1] + seq(0, 1, length=n) * (x[n] - x[1]))
#' r * x
#' }
#'
#' ## Number of days and number of obs
#' days <- 50
#' n <- 200
#'
#' ## Make separation between obs gamma distributed
#' x <- rgamma(n, 3)
#' x <- cumsum(x)
#' x <- x/x[n]
#'
#' ## Track is lissajous + brownian bridge
#' b.scale <- 0.6
#' r.scale <- sample(c(0.1, 2, 10.2), n, replace=TRUE,
#' prob=c(0.8, 0.18, 0.02))
#' set.seed(44)
#'
#' tms <- ISOdate(2001, 1, 1) + trunc(days * 24 * 60 * 60 *x)
#' lon <- 120 + 20 * sin(2 * pi * x) +
#' brownian.bridge(n, b.scale) + rnorm(n, 0, r.scale)
#' lat <- -40 + 10 *(sin(3 * 2 * pi * x) + cos(2 * pi * x) - 1) +
#' brownian.bridge(n, b.scale) + rnorm(n, 0, r.scale)
#'
#' tr <- new("trip",
#' SpatialPointsDataFrame(cbind(lon, lat),
#' data.frame(gmt=tms, id="lbb")),
#' TimeOrderedRecords(c("gmt", "id")))
#' plot(tr)
#'
#' ## the filtered version
#' trf <- filter.penSS(tr, lambda=1, iterlim=400, print.level=1)
#'
#' lines(trf)
#'
#' }
#'
#'
#' @export filter_penSS
filter_penSS <- function(tr, lambda, first=TRUE, last=TRUE,...) {
penalized <- function(x) {
## Form smoothed track
p <- p.obs
p[sub, ] <- x
## Velocities between smoothed points
##v <- gc.dist(p[-n,],p[-1,])/dt
v <- trackDistance(p[, 2:1]) / dt
## Distances from smoothed points to observations
##d <- gc.dist(p,p.obs)
d <- trackDistance(p[, 2], p[, 1], p.obs[, 2], p.obs[, 1])
## This is the penalized sum of squares
(sum(d ^ 2) + lambda * sum(v ^ 2)) / n ^ 2
}
if (length(summary(tr)$tripID) > 1) {
msg <- paste("trip object contains multiple events,",
"only the first trip used")
warning(msg)
tr <- tr[tr[[getTORnames(tr)[2]]] == summary(tr)$tripID[1], ]
}
## Number of points and subset
n <- nrow(tr)
sub <- (1 + first):(n - last)
## Observed points
## p.obs <- as.matrix(tr[,c("Lat","Lon")])
p.obs <- coordinates(tr)[, 2:1]
## Time intervals (in days) between obs
##dt <- diff(unclass(tr$Time)/(24*60*60))
dt <- diff(unclass(tr[[getTORnames(tr)[1]]]) / (24 * 60 * 60))
mn <- nlm(penalized, as.matrix(p.obs[sub, ]), ...)
m <- n - (first + last)
res <- coordinates(tr)
## tr$Lat[sub] <- mn$estimate[1:m]
## tr$Lon[sub] <- mn$estimate[m+1:m]
res[sub, 2] <- mn$estimate[1:m]
res[sub, 1] <- mn$estimate[m + 1:m]
res <- SpatialPointsDataFrame(res, as.data.frame(tr),
proj4string=CRS(tr@proj4string@projargs, doCheckCRSArgs = FALSE))
trip(res, getTORnames(tr))
}
###_ + Emacs local variables
## Local variables:
## allout-layout: (+ : 0)
## End:
|
#' Matrix Uncertainty Selector
#' @description Matrix Uncertainty Selector
#' @param W Design matrix, measured with error. Must be a numeric matrix.
#' @param y Vector of responses.
#' @param lambda Regularization parameter.
#' @param delta Additional regularization parameter, bounding the measurement error.
#' @return Intercept and coefficients at the values of lambda and delta specified.
#' @references \insertRef{rosenbaum2010}{hdme}
#'
#' \insertRef{sorensen2018}{hdme}
#' @examples
#' # Example with logistic regression
#' set.seed(1)
#' # Number of samples
#' n <- 100
#' # Number of covariates
#' p <- 50
#' # True (latent) variables
#' X <- matrix(rnorm(n * p), nrow = n)
#' # Measurement matrix (this is the one we observe)
#' W <- X + matrix(rnorm(n*p, sd = 1), nrow = n, ncol = p)
#' # Coefficient vector
#' beta <- c(seq(from = 0.1, to = 1, length.out = 5), rep(0, p-5))
#' # Response
#' y <- X %*% beta + rnorm(n, sd = 1)
#' # Run the MU Selector
#' mus1 <- fit_mus(W, y)
#' # Draw an elbow plot to select delta
#' plot(mus1)
#'
#' # Now, according to the "elbow rule", choose the final delta where the curve has an "elbow".
#' # In this case, the elbow is at about delta = 0.08, so we use this to compute the final estimate:
#' mus2 <- fit_mus(W, y, delta = 0.08)
#' plot(mus2) # Plot the coefficients
#'
#' @export
fit_mus <- function(W, y, lambda = NULL, delta = NULL) {
fit <- fit_gmus(W, y, lambda = lambda, delta = delta, family = "gaussian")
return(fit)
}
|
/R/fit_mus.R
|
no_license
|
ohitslalila/hdme
|
R
| false | false | 1,486 |
r
|
#' Matrix Uncertainty Selector
#' @description Matrix Uncertainty Selector
#' @param W Design matrix, measured with error. Must be a numeric matrix.
#' @param y Vector of responses.
#' @param lambda Regularization parameter.
#' @param delta Additional regularization parameter, bounding the measurement error.
#' @return Intercept and coefficients at the values of lambda and delta specified.
#' @references \insertRef{rosenbaum2010}{hdme}
#'
#' \insertRef{sorensen2018}{hdme}
#' @examples
#' # Example with logistic regression
#' set.seed(1)
#' # Number of samples
#' n <- 100
#' # Number of covariates
#' p <- 50
#' # True (latent) variables
#' X <- matrix(rnorm(n * p), nrow = n)
#' # Measurement matrix (this is the one we observe)
#' W <- X + matrix(rnorm(n*p, sd = 1), nrow = n, ncol = p)
#' # Coefficient vector
#' beta <- c(seq(from = 0.1, to = 1, length.out = 5), rep(0, p-5))
#' # Response
#' y <- X %*% beta + rnorm(n, sd = 1)
#' # Run the MU Selector
#' mus1 <- fit_mus(W, y)
#' # Draw an elbow plot to select delta
#' plot(mus1)
#'
#' # Now, according to the "elbow rule", choose the final delta where the curve has an "elbow".
#' # In this case, the elbow is at about delta = 0.08, so we use this to compute the final estimate:
#' mus2 <- fit_mus(W, y, delta = 0.08)
#' plot(mus2) # Plot the coefficients
#'
#' @export
fit_mus <- function(W, y, lambda = NULL, delta = NULL) {
fit <- fit_gmus(W, y, lambda = lambda, delta = delta, family = "gaussian")
return(fit)
}
|
c DCNF-Autarky [version 0.0.1].
c Copyright (c) 2018-2019 Swansea University.
c
c Input Clause Count: 10895
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 10894
c
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 10894
c
c Input Parameter (command line, file):
c input filename QBFLIB/Basler/terminator/stmt39_35_335.qdimacs
c output filename /tmp/dcnfAutarky.dimacs
c autarky level 1
c conformity level 0
c encoding type 2
c no.of var 3272
c no.of clauses 10895
c no.of taut cls 0
c
c Output Parameters:
c remaining no.of clauses 10894
c
c QBFLIB/Basler/terminator/stmt39_35_335.qdimacs 3272 10895 E1 [1] 0 246 3025 10894 RED
|
/code/dcnf-ankit-optimized/Results/QBFLIB-2018/E1/Experiments/Basler/terminator/stmt39_35_335/stmt39_35_335.R
|
no_license
|
arey0pushpa/dcnf-autarky
|
R
| false | false | 718 |
r
|
c DCNF-Autarky [version 0.0.1].
c Copyright (c) 2018-2019 Swansea University.
c
c Input Clause Count: 10895
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 10894
c
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 10894
c
c Input Parameter (command line, file):
c input filename QBFLIB/Basler/terminator/stmt39_35_335.qdimacs
c output filename /tmp/dcnfAutarky.dimacs
c autarky level 1
c conformity level 0
c encoding type 2
c no.of var 3272
c no.of clauses 10895
c no.of taut cls 0
c
c Output Parameters:
c remaining no.of clauses 10894
c
c QBFLIB/Basler/terminator/stmt39_35_335.qdimacs 3272 10895 E1 [1] 0 246 3025 10894 RED
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_emlProjection.R
\name{get_emlProjection}
\alias{get_emlProjection}
\title{get_emlProjection}
\usage{
get_emlProjection(spatialDataEntity)
}
\arguments{
\item{spatialDataEntity}{a spatial data entity, often loaded into R with the
raster (for rasters) or rgdal (for vectors) packages}
}
\value{
if a suitable match was found, function returns an EML-compliant
listing of the projection of the spatial data object
}
\description{
get_emlProjection attempts to identify the projection of a
spatial data file, and match this to the corresponding projection ID
permissible by EML.
}
\details{
get_emlProjection is a helper function designed primarily to assist
the creation of EML spatial data objects. The function currently is
restricted to matching coordinate systems in the northern hemisphere, and
will not match projections of type transverse mercator. Though intended
primarily as a helper function, get_emlProjection can be run independently.
}
\note{
get_emlProjection currently is restricted to matching coordinate
systems in the northern hemisphere, and will not match projections of type
transverse mercator.
}
\examples{
\dontrun{
vectorData <- readOGR(dsn="/GISfiles/WatershedShapefile/", layer="AZwatersheds_prj")
rasterdata <- raster("CAP_1985.img")
emlCompliantProjection <- get_emlProjection(rasterdata)
}
}
|
/man/get_emlProjection.Rd
|
no_license
|
kzollove/capemlGIS
|
R
| false | true | 1,421 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_emlProjection.R
\name{get_emlProjection}
\alias{get_emlProjection}
\title{get_emlProjection}
\usage{
get_emlProjection(spatialDataEntity)
}
\arguments{
\item{spatialDataEntity}{a spatial data entity, often loaded into R with the
raster (for rasters) or rgdal (for vectors) packages}
}
\value{
if a suitable match was found, function returns an EML-compliant
listing of the projection of the spatial data object
}
\description{
get_emlProjection attempts to identify the projection of a
spatial data file, and match this to the corresponding projection ID
permissible by EML.
}
\details{
get_emlProjection is a helper function designed primarily to assist
the creation of EML spatial data objects. The function currently is
restricted to matching coordinate systems in the northern hemisphere, and
will not match projections of type transverse mercator. Though intended
primarily as a helper function, get_emlProjection can be run independently.
}
\note{
get_emlProjection currently is restricted to matching coordinate
systems in the northern hemisphere, and will not match projections of type
transverse mercator.
}
\examples{
\dontrun{
vectorData <- readOGR(dsn="/GISfiles/WatershedShapefile/", layer="AZwatersheds_prj")
rasterdata <- raster("CAP_1985.img")
emlCompliantProjection <- get_emlProjection(rasterdata)
}
}
|
gbsgweight = function(df, method ='Truncated', alpha = 0.05)
{
#df = situation1
#method = 'Overlap'
#alpha = 0.05
weight0 = array()
weight1 = array()
id0 = which(df$Treat == 0)
id1 = which(df$Treat == 1)
if (method == 'Truncated')
{
weight0 = 1 / (1 - df$prob)
weight1 = 1 / df$prob
weight0[which(df$prob < alpha)] = 0
weight0[which(df$prob >= (1 - alpha))] = 0
weight1[which(df$prob < alpha)] = 0
weight1[which(df$prob >= (1 - alpha))] = 0
}
if (method == 'Overlap')
{
weight0 = df$prob
weight1 = 1 - df$prob
#df$prob is the probility of teatment = 1
#1 - df$prob is the probility of treatment = 0
}
if (method == 'Combined')
{
weight0 = 1 / (1 - df$prob)
weight1 = 1 / df$prob
}
if (method == 'Matching')
{
#weight0[which(df$prob <= (1 - df$prob))] = df$prob / (1 - df$prob)
weight0 = df$prob / (1 - df$prob)
weight0[which(df$prob > (1 - df$prob))] = 1
weight1 = (1 - df$prob) / df$prob
weight1[which(df$prob <= (1 -df$prob))] = 1
}
if (method == 'Treated')
{
weight0 = df$prob / (1 - df$prob)
Lis = array()
for (i in 1:length.POSIXlt(df))
Lis[i] = 1
weight1 = Lis
}
if (method == 'Control')
{
weight0 = array()
for (i in 1:length.POSIXlt(df))
weight0[i] = 1
weight1 = (1 - df$prob) / df$prob
}
weigh = array()
for (i in 1: length.POSIXlt(df))
{
if (i %in% id0){
weigh[i] = weight0[i]
}
else{
weigh[i] = weight1[i]
}}
weight = data.frame(weigh)
# weightt = data.frame(weight0[id0], weight1[id1])
return(data.frame(df, weight))
}
|
/BW_sim_rub/gbsgweight.R
|
no_license
|
LuyuLee/balanceWeight_r
|
R
| false | false | 1,728 |
r
|
gbsgweight = function(df, method ='Truncated', alpha = 0.05)
{
#df = situation1
#method = 'Overlap'
#alpha = 0.05
weight0 = array()
weight1 = array()
id0 = which(df$Treat == 0)
id1 = which(df$Treat == 1)
if (method == 'Truncated')
{
weight0 = 1 / (1 - df$prob)
weight1 = 1 / df$prob
weight0[which(df$prob < alpha)] = 0
weight0[which(df$prob >= (1 - alpha))] = 0
weight1[which(df$prob < alpha)] = 0
weight1[which(df$prob >= (1 - alpha))] = 0
}
if (method == 'Overlap')
{
weight0 = df$prob
weight1 = 1 - df$prob
#df$prob is the probility of teatment = 1
#1 - df$prob is the probility of treatment = 0
}
if (method == 'Combined')
{
weight0 = 1 / (1 - df$prob)
weight1 = 1 / df$prob
}
if (method == 'Matching')
{
#weight0[which(df$prob <= (1 - df$prob))] = df$prob / (1 - df$prob)
weight0 = df$prob / (1 - df$prob)
weight0[which(df$prob > (1 - df$prob))] = 1
weight1 = (1 - df$prob) / df$prob
weight1[which(df$prob <= (1 -df$prob))] = 1
}
if (method == 'Treated')
{
weight0 = df$prob / (1 - df$prob)
Lis = array()
for (i in 1:length.POSIXlt(df))
Lis[i] = 1
weight1 = Lis
}
if (method == 'Control')
{
weight0 = array()
for (i in 1:length.POSIXlt(df))
weight0[i] = 1
weight1 = (1 - df$prob) / df$prob
}
weigh = array()
for (i in 1: length.POSIXlt(df))
{
if (i %in% id0){
weigh[i] = weight0[i]
}
else{
weigh[i] = weight1[i]
}}
weight = data.frame(weigh)
# weightt = data.frame(weight0[id0], weight1[id1])
return(data.frame(df, weight))
}
|
# Array with only the VNT from Martinique
xobs = cbind(data_csv$CHIKV_obs,data_csv$ONNV_obs)
N = nrow(xobs)
classical.positivity.chik <- function(X){
return(X[,1]>=1 & X[,2]<=X[,1]-2)
}
classical.positivity.onnv <- function(S){
return(X[,2]>=1 & X[,1]<=X[,2]-1)
}
#Samples were considered CHIKV PRNT positive
#if the titer was >20 and the ONNV titer was < four-fold
#lower than the CHIKV titer. Because there is a unique one-way antigenic
#cross-reactivity between CHIKV and ONNV, a sample was designated ONNV positive
#if its titer was >20 and two-fold or greater than the CHIKV titer.
X=xobs[which(data_csv$location<3),]
CHIKV.positive = which(classical.positivity.chik(X))
ONNV.positive =which(classical.positivity.onnv(X))
print('CHIKV positive Mali')
print(paste0('N = ', length(CHIKV.positive)))
print(paste0('proportion = ', length(CHIKV.positive)/dim(X)[1]))
print('ONNV positive Mali')
print(paste0('N = ', length(ONNV.positive)))
print(paste0('proportion = ', length(ONNV.positive)/dim(X)[1]))
# In Martinique
X=xobs[which(data_csv$location==3),]
CHIKV.positive = which(assess.positivity.chik(X))
ONNV.positive = which(assess.positivity.onnv(X))
print('CHIKV positive Martinique')
print(paste0('N = ', length(CHIKV.positive)))
print('ONNV positive Martinique')
print(paste0('N = ', length(ONNV.positive)))
|
/ClassicalSeroprevalence.R
|
no_license
|
nathoze/ONNV_CHIKV
|
R
| false | false | 1,374 |
r
|
# Array with only the VNT from Martinique
xobs = cbind(data_csv$CHIKV_obs,data_csv$ONNV_obs)
N = nrow(xobs)
classical.positivity.chik <- function(X){
return(X[,1]>=1 & X[,2]<=X[,1]-2)
}
classical.positivity.onnv <- function(S){
return(X[,2]>=1 & X[,1]<=X[,2]-1)
}
#Samples were considered CHIKV PRNT positive
#if the titer was >20 and the ONNV titer was < four-fold
#lower than the CHIKV titer. Because there is a unique one-way antigenic
#cross-reactivity between CHIKV and ONNV, a sample was designated ONNV positive
#if its titer was >20 and two-fold or greater than the CHIKV titer.
X=xobs[which(data_csv$location<3),]
CHIKV.positive = which(classical.positivity.chik(X))
ONNV.positive =which(classical.positivity.onnv(X))
print('CHIKV positive Mali')
print(paste0('N = ', length(CHIKV.positive)))
print(paste0('proportion = ', length(CHIKV.positive)/dim(X)[1]))
print('ONNV positive Mali')
print(paste0('N = ', length(ONNV.positive)))
print(paste0('proportion = ', length(ONNV.positive)/dim(X)[1]))
# In Martinique
X=xobs[which(data_csv$location==3),]
CHIKV.positive = which(assess.positivity.chik(X))
ONNV.positive = which(assess.positivity.onnv(X))
print('CHIKV positive Martinique')
print(paste0('N = ', length(CHIKV.positive)))
print('ONNV positive Martinique')
print(paste0('N = ', length(ONNV.positive)))
|
rankhospital <- function(state, outcome, num = "best") {
## Read outcome data
outcome_data <- read.csv("outcome-of-care-measures.csv", colClasses = "character")[,c(2,7,11,17,23)]
## Check that state and outcome are valid
if(!(state %in% outcome_data$State)){
stop ("invalid state")
}
## check outcome
outcome_var<-c("heart attack","heart failure","pneumonia")
##return(outcome %in% outcome_var)
if(!(outcome %in% outcome_var)){
stop ("invalid outcome")
}
if(class(num) == "character"){
if (!(num %in% c("best","worst"))){
stop("invalid number")
}
}
## Return hospital name in that state with the given rank
## 30-day death rate
outcome_data=outcome_data[outcome_data$State==state,c(1,3,4,5)]
outcome_data<-switch(
which(outcome_var==outcome),
outcome_data[,c(1,2)],
outcome_data[,c(1,3)],
outcome_data[,c(1,4)]
)
names(outcome_data)[2] = "Cases"
outcome_data[, 2] = suppressWarnings( as.numeric(outcome_data[, 2]))
outcome_data = outcome_data[!is.na(outcome_data$Cases),]
if(class(num) == "numeric" && num > nrow(outcome_data)){
return (NA)
}
outcome_data = outcome_data[order(outcome_data$Cases, outcome_data$Hospital.Name),]
# Return
if(class(num) == "character") {
if(num == "best") {
return (outcome_data$Hospital.Name[1])
}
else if(num == "worst") {
return (outcome_data$Hospital.Name[nrow(outcome_data)])
}
}
else {
return (outcome_data$Hospital.Name[num])
}
}
|
/rankhospital.R
|
no_license
|
mbmalgit/R_programming_assignment_3
|
R
| false | false | 1,536 |
r
|
rankhospital <- function(state, outcome, num = "best") {
## Read outcome data
outcome_data <- read.csv("outcome-of-care-measures.csv", colClasses = "character")[,c(2,7,11,17,23)]
## Check that state and outcome are valid
if(!(state %in% outcome_data$State)){
stop ("invalid state")
}
## check outcome
outcome_var<-c("heart attack","heart failure","pneumonia")
##return(outcome %in% outcome_var)
if(!(outcome %in% outcome_var)){
stop ("invalid outcome")
}
if(class(num) == "character"){
if (!(num %in% c("best","worst"))){
stop("invalid number")
}
}
## Return hospital name in that state with the given rank
## 30-day death rate
outcome_data=outcome_data[outcome_data$State==state,c(1,3,4,5)]
outcome_data<-switch(
which(outcome_var==outcome),
outcome_data[,c(1,2)],
outcome_data[,c(1,3)],
outcome_data[,c(1,4)]
)
names(outcome_data)[2] = "Cases"
outcome_data[, 2] = suppressWarnings( as.numeric(outcome_data[, 2]))
outcome_data = outcome_data[!is.na(outcome_data$Cases),]
if(class(num) == "numeric" && num > nrow(outcome_data)){
return (NA)
}
outcome_data = outcome_data[order(outcome_data$Cases, outcome_data$Hospital.Name),]
# Return
if(class(num) == "character") {
if(num == "best") {
return (outcome_data$Hospital.Name[1])
}
else if(num == "worst") {
return (outcome_data$Hospital.Name[nrow(outcome_data)])
}
}
else {
return (outcome_data$Hospital.Name[num])
}
}
|
dGetUij <- function(i, j, Kd, A, U, lamdaij) {
return(sum(Kd[, j] * A[, i]))
}
dGetlamdaij <- function(i, j, U) {
U.tilda <- U %*% t(U)
return ( (i == j) - U.tilda[i, j])
}
getU <- function(new.U, mu.U, new.lambda) {
newdU <- 0*new.U
nc <- dim(U)[1]
nr <- dim(U)[2]
for (j in 1:nc) {
for (i in 1:nr){
newdU[i, j] <- dGetUij(i, j, Kd, A, new.U, new.lambda)
}
}
newdU <- newdU - new.lambda * ((2*diag(1,2)*new.U)+diag(1,2)[2:1, ])
new.U <- new.U + mu.U * newdU
new.U
}
getLambda <- function(new.U, new.lambda, mu.lambda) {
dlambda <- 0*new.lambda
nc <- dim(new.lambda)[1]
nr <- dim(new.lambda)[2]
for (j in 1:nc) {
for (i in 1:nr){
dlambda[i, j] <- dGetlamdaij(i, j, new.U)
}
}
new.lambda <- new.lambda + mu.lambda * dlambda
new.lambda
}
############################################
arr.new <- c(-0.9511, -1.6435, 2.3655, -2.9154, -3.7010, 0.9511, -1.6435, 2.3655, -2.9154, 3.7010)
A <- matrix(arr.new, ncol = 2, nrow = 5)
U <- matrix(1:4,2,2)
U <- U * 0.1
new.U <- eigen(U)$vectors #U
K <- A %*% new.U
Kd <- 4 * K * K * K
new.lambda <- matrix(10:13, 2, 2)
mu.lambda <- 0.01
mu.U <- 0.01
for (iter in 1:2000) {
new.U <- getU(new.U, mu.U, new.lambda)
new.U <- eigen(U)$vectors
new.lambda <- getLambda(new.U, new.lambda, mu.lambda)
temp <- A %*% new.U
print(temp)
}
A.rot <- A %*% new.U
plot.new()
lines(x=c(0, A.rot[5, 1]), y=c(0, A.rot[5, 2]), col = "red", xlim)
A.rot
|
/rotF.R
|
no_license
|
gradjitta/checkpoints
|
R
| false | false | 1,445 |
r
|
dGetUij <- function(i, j, Kd, A, U, lamdaij) {
return(sum(Kd[, j] * A[, i]))
}
dGetlamdaij <- function(i, j, U) {
U.tilda <- U %*% t(U)
return ( (i == j) - U.tilda[i, j])
}
getU <- function(new.U, mu.U, new.lambda) {
newdU <- 0*new.U
nc <- dim(U)[1]
nr <- dim(U)[2]
for (j in 1:nc) {
for (i in 1:nr){
newdU[i, j] <- dGetUij(i, j, Kd, A, new.U, new.lambda)
}
}
newdU <- newdU - new.lambda * ((2*diag(1,2)*new.U)+diag(1,2)[2:1, ])
new.U <- new.U + mu.U * newdU
new.U
}
getLambda <- function(new.U, new.lambda, mu.lambda) {
dlambda <- 0*new.lambda
nc <- dim(new.lambda)[1]
nr <- dim(new.lambda)[2]
for (j in 1:nc) {
for (i in 1:nr){
dlambda[i, j] <- dGetlamdaij(i, j, new.U)
}
}
new.lambda <- new.lambda + mu.lambda * dlambda
new.lambda
}
############################################
arr.new <- c(-0.9511, -1.6435, 2.3655, -2.9154, -3.7010, 0.9511, -1.6435, 2.3655, -2.9154, 3.7010)
A <- matrix(arr.new, ncol = 2, nrow = 5)
U <- matrix(1:4,2,2)
U <- U * 0.1
new.U <- eigen(U)$vectors #U
K <- A %*% new.U
Kd <- 4 * K * K * K
new.lambda <- matrix(10:13, 2, 2)
mu.lambda <- 0.01
mu.U <- 0.01
for (iter in 1:2000) {
new.U <- getU(new.U, mu.U, new.lambda)
new.U <- eigen(U)$vectors
new.lambda <- getLambda(new.U, new.lambda, mu.lambda)
temp <- A %*% new.U
print(temp)
}
A.rot <- A %*% new.U
plot.new()
lines(x=c(0, A.rot[5, 1]), y=c(0, A.rot[5, 2]), col = "red", xlim)
A.rot
|
.readBarcodes <- function(path,
header = FALSE,
colname = "cell_barcode",
colClasses = "character") {
res <- data.table::fread(path, header = header, colClasses = colClasses)
if (ncol(res) == 1) {
colnames(res) <- colname
} else {
warning("'barcodes' file contains >1 columns!",
" The column names are kept as is.")
}
return(res)
}
.readFeatures <- function(path,
header = FALSE,
colnames = c("feature_ID", "feature_name", "feature_type"),
colClasses = "character") {
res <- data.table::fread(path, header = header)
if (ncol(res) == 1) {
colnames(res) <- colnames[1]
} else if (ncol(res) == 2) {
colnames(res) <- colnames[seq(2)]
} else if (ncol(res) == 3) {
colnames(res) <- colnames
} else {
warning("'features' file contains >3 columns!",
" The column names are kept as is.")
}
return(res)
}
#' @importFrom tools file_ext
.readMatrixMM <- function(path, gzipped, class, delayedArray) {
if (gzipped == "auto") {
ext <- tools::file_ext(path)
if (ext == "gz") {
path <- gzfile(path)
}
} else if (isTRUE(gzipped)) {
path <- gzfile(path)
}
mat <- Matrix::readMM(path)
mat <- methods::as(mat, "dgCMatrix")
if (class == "matrix") {
mat <- as.matrix(mat)
}
if (isTRUE(delayedArray)) {
mat <- DelayedArray::DelayedArray(mat)
}
return(mat)
}
# dir <- "outs/filtered_feature_bc_matrix/"
.constructSCEFromCellRangerOutputs <- function(dir,
sampleName,
matrixFileName,
featuresFileName,
barcodesFileName,
gzipped,
class,
delayedArray) {
cb <- .readBarcodes(file.path(dir, barcodesFileName))
fe <- .readFeatures(file.path(dir, featuresFileName))
ma <- .readMatrixMM(file.path(dir, matrixFileName),
gzipped = gzipped,
class = class,
delayedArray = delayedArray)
coln <- paste(sampleName, cb[[1]], sep = "_")
rownames(ma) <- fe[[1]]
sce <- SingleCellExperiment::SingleCellExperiment(
assays = list(counts = ma))
SummarizedExperiment::rowData(sce) <- fe
SummarizedExperiment::colData(sce) <- S4Vectors::DataFrame(cb,
column_name = coln,
sample = sampleName,
row.names = coln)
return(sce)
}
.getOutputFolderPath <- function(samplePath, cellRangerOuts, cellRangerOuts2) {
path <- file.path(samplePath, cellRangerOuts)
if (dir.exists(path)) {
return(path)
}
if (!is.na(cellRangerOuts2)) {
path2 <- file.path(samplePath, cellRangerOuts2)
if (dir.exists(path2)) {
return(path2)
} else {
stop("Invalid path ", path2)
}
}
stop("Invalid path ", path)
}
.checkArgsImportCellRanger <- function(cellRangerDirs,
sampleDirs,
sampleNames,
cellRangerOuts,
matrixFileNames,
featuresFileNames,
barcodesFileNames,
gzipped) {
if (any(!(gzipped %in% c("auto", TRUE, FALSE)))) {
stop("Invalid 'gzipped' argument! Should be one of 'auto',",
" TRUE, or FALSE")
}
if (is.null(cellRangerDirs)) {
if (is.null(sampleDirs)) {
stop("'sampleDirs' can not be NULL if 'cellRangerDirs' is NULL!")
}
for (i in seq_along(sampleDirs)) {
if (!dir.exists(sampleDirs[i])) {
stop("Sample folder ", sampleDirs[i], " does not exist!")
}
}
sampleLength <- length(sampleDirs)
if (!is.null(sampleNames)) {
if (length(sampleNames) != sampleLength) {
stop("'sampleDirs' and 'sampleNames' have unequal lengths!")
}
}
if (!(length(cellRangerOuts) %in% c(0, 1))) {
if (length(cellRangerOuts) != sampleLength) {
stop("'sampleDirs' and 'cellRangerOuts' have unequal lengths!")
}
}
if (length(matrixFileNames) != 1) {
if (length(matrixFileNames) != sampleLength) {
stop("'sampleDirs' and 'matrixFileNames' have unequal lengths!")
}
}
if (length(featuresFileNames) != 1) {
if (length(featuresFileNames) != sampleLength) {
stop("'sampleDirs' and 'featuresFileNames'",
" have unequal lengths!")
}
}
if (length(barcodesFileNames) != 1) {
if (length(barcodesFileNames) != sampleLength) {
stop("'sampleDirs' and 'barcodesFileNames'",
" have unequal lengths!")
}
}
if (gzipped != "auto") {
if (length(gzipped) != sampleLength & length(gzipped) != 1) {
stop("'sampleDirs' and 'gzipped' have unequal lengths!")
}
}
} else {
if (!all(dir.exists(cellRangerDirs))) {
stop("Invalid cellRangerDirs: ",
paste(cellRangerDirs[which(!dir.exists(cellRangerDirs))],
collapse = ", "))
}
if (is.null(sampleDirs)) {
for (i in seq_along(cellRangerDirs)) {
if (length(list.dirs(cellRangerDirs[i],
recursive = FALSE)) == 0) {
warning("Empty cellRangerDir. Skipping ",
cellRangerDirs[i])
}
}
sampleLength <- length(unlist(lapply(cellRangerDirs,
list.dirs, recursive = FALSE)))
if (!is.null(sampleNames)) {
if (sampleLength != length(sampleNames)) {
stop("The length of 'sampleNames' does not match length of",
" subdirectories in 'cellRangerDirs'!")
}
}
if (!(length(cellRangerOuts) %in% c(0, 1))) {
if (sampleLength != length(cellRangerOuts)) {
stop("The length of 'cellRangerOuts' does not match",
" length of",
" subdirectories in 'cellRangerDirs'!")
}
}
if (length(matrixFileNames) != 1) {
if (sampleLength != length(matrixFileNames)) {
stop("The length of 'matrixFileNames' does not match",
" length of",
" subdirectories in 'cellRangerDirs'!")
}
}
if (length(featuresFileNames) != 1) {
if (sampleLength != length(featuresFileNames)) {
stop("The length of 'featuresFileNames' does not match",
" length of",
" subdirectories in 'cellRangerDirs'!")
}
}
if (length(barcodesFileNames) != 1) {
if (sampleLength != length(barcodesFileNames)) {
stop("The length of 'barcodesFileNames' does not match",
" length of",
" subdirectories in 'cellRangerDirs'!")
}
}
if (gzipped != "auto") {
if (sampleLength != length(gzipped) & length(gzipped) != 1) {
stop("The length of 'gzipped' does not match",
" length of",
" subdirectories in 'cellRangerDirs'!")
}
}
} else {
if (length(sampleDirs) != length(cellRangerDirs)) {
stop("'sampleDirs' and 'cellRangerDirs' have unequal lengths!")
} else {
for (i in seq_along(cellRangerDirs)) {
paths <- file.path(cellRangerDirs[i], sampleDirs[[i]])
for (j in seq_along(paths)) {
if (!dir.exists(paths[j])) {
stop("Sample folder does not exist!\n",
paths[j])
}
}
}
}
# analogous to length(unlist(sampleDirs))
sampleLength <- sum(vapply(sampleDirs, length, integer(1)))
if (!is.null(sampleNames)) {
if (length(sampleNames) != sampleLength) {
stop("'sampleNames' and 'unlist(sampleDirs)' have unequal",
" lengths!")
}
}
if (!(length(cellRangerOuts) %in% c(0, 1))) {
if (length(cellRangerOuts) != sampleLength) {
stop("'cellRangerOuts' and 'unlist(sampleDirs)'",
" have unequal lengths!")
}
}
if (length(matrixFileNames) != 1) {
if (length(matrixFileNames) != sampleLength) {
stop("'matrixFileNames' and 'unlist(sampleDirs)'",
" have unequal lengths!")
}
}
if (length(featuresFileNames) != 1) {
if (length(featuresFileNames) != sampleLength) {
stop("'featuresFileNames' and 'unlist(sampleDirs)'",
" have unequal lengths!")
}
}
if (length(barcodesFileNames) != 1) {
if (length(barcodesFileNames) != sampleLength) {
stop("'barcodesFileNames' and 'unlist(sampleDirs)'",
" have unequal lengths!")
}
}
if (gzipped != "auto") {
if (length(gzipped) != sampleLength & length(gzipped) != 1) {
stop("'gzipped' and 'unlist(sampleDirs)'",
" have unequal lengths!")
}
}
}
}
}
.getSamplesPaths <- function(cellRangerDirs, samplesDirs) {
if (is.null(cellRangerDirs)) {
res <- samplesDirs
} else {
if (is.null(samplesDirs)) {
res <- list.dirs(cellRangerDirs, recursive = FALSE)
} else {
res <- vector("list", length = length(cellRangerDirs))
for (i in seq_along(cellRangerDirs)) {
res[[i]] <- file.path(cellRangerDirs[i], samplesDirs[[i]])
}
res <- unlist(res)
}
}
return(res)
}
.getSampleNames <- function(samplePaths) {
res <- basename(samplePaths)
return(res)
}
.getVectorized <- function(arg, len) {
if (length(arg) == 1) {
arg <- rep(arg, len)
}
return(arg)
}
# main function
.importCellRanger <- function(
cellRangerDirs,
sampleDirs,
sampleNames,
cellRangerOuts,
dataType,
matrixFileNames,
featuresFileNames,
barcodesFileNames,
gzipped,
class,
delayedArray,
rowNamesDedup) {
.checkArgsImportCellRanger(cellRangerDirs,
sampleDirs,
sampleNames,
cellRangerOuts,
matrixFileNames,
featuresFileNames,
barcodesFileNames,
gzipped)
samplePaths <- .getSamplesPaths(cellRangerDirs, sampleDirs)
res <- vector("list", length = length(samplePaths))
cellRangerOuts2 <- NA
if (is.null(cellRangerOuts)) {
if (dataType == "filtered") {
cellRangerOuts <- "outs/filtered_gene_bc_matrix"
cellRangerOuts2 <- "outs/filtered_feature_bc_matrix"
} else {
cellRangerOuts <- "outs/raw_gene_bc_matrix"
cellRangerOuts2 <- "outs/raw_feature_bc_matrix"
}
}
cellRangerOuts <- .getVectorized(cellRangerOuts, length(samplePaths))
cellRangerOuts2 <- .getVectorized(cellRangerOuts2, length(samplePaths))
matrixFileNames <- .getVectorized(matrixFileNames, length(samplePaths))
featuresFileNames <- .getVectorized(featuresFileNames, length(samplePaths))
barcodesFileNames <- .getVectorized(barcodesFileNames, length(samplePaths))
gzipped <- .getVectorized(gzipped, length(samplePaths))
if (is.null(sampleNames)) {
sampleNames <- .getSampleNames(samplePaths)
}
for (i in seq_along(samplePaths)) {
dir <- .getOutputFolderPath(samplePaths[i], cellRangerOuts[i],
cellRangerOuts2[i])
scei <- .constructSCEFromCellRangerOutputs(dir,
sampleName = sampleNames[i],
matrixFileName = matrixFileNames[i],
featuresFileName = featuresFileNames[i],
barcodesFileName = barcodesFileNames[i],
gzipped = gzipped[i],
class = class,
delayedArray = delayedArray)
res[[i]] <- scei
}
sce <- do.call(SingleCellExperiment::cbind, res)
# Load & Store Cell Ranger Summary into SCE
metrics_summary <- .importMetricsCellRanger(samplePaths, sampleNames, "outs", "metrics_summary.csv")
if (ncol(metrics_summary) > 0) {
sce@metadata$sctk$sample_summary[["cellranger"]] <- metrics_summary
}
# sce <- setSampleSummaryStatsTable(sce, "cellranger", metrics_summary)
if (isTRUE(rowNamesDedup)) {
if (any(duplicated(rownames(sce)))) {
message("Duplicated gene names found, adding '-1', '-2', ",
"... suffix to them.")
}
sce <- dedupRowNames(sce)
}
return(sce)
}
.getCellRangerOutV2 <- function(dataTypeV2, reference) {
res <- vector("list", length = length(reference))
for (i in seq_along(reference)) {
if (dataTypeV2 == 'filtered') {
res[[i]] <- file.path('outs/filtered_gene_bc_matrices', reference[i])
} else {
res[[i]] <- file.path('outs/raw_gene_bc_matrices', reference[i])
}
}
cellRangerOutsV2 <- unlist(res)
return(cellRangerOutsV2)
}
#' @name importCellRanger
#' @rdname importCellRanger
#' @title Construct SCE object from Cell Ranger output
#' @description Read the filtered barcodes, features, and matrices for all
#' samples from (preferably a single run of) Cell Ranger output. Import and
#' combine them
#' as one big \link[SingleCellExperiment]{SingleCellExperiment} object.
#' @param cellRangerDirs The root directories where Cell Ranger was run. These
#' folders should contain sample specific folders. Default \code{NULL},
#' meaning the paths for each sample will be specified in \emph{samples}
#' argument.
#' @param sampleDirs Default \code{NULL}. Can be one of
#' \itemize{
#' \item \code{NULL}. All samples within \code{cellRangerDirs} will be
#' imported. The order of samples will be first determined by the order of
#' \code{cellRangerDirs} and then by \link{list.dirs}. This is only
#' for the case where \code{cellRangerDirs} is specified.
#' \item A list of vectors containing the folder names for samples to import.
#' Each vector in
#' the list corresponds to samples from one of \code{cellRangerDirs}.
#' These names are the same as the folder names under \code{cellRangerDirs}.
#' This is only for the case where \code{cellRangerDirs} is specified.
#' \item A vector of folder paths for the samples to import. This is only for
#' the case where \code{cellRangerDirs} is \code{NULL}.
#' }
#' The cells in the final SCE object will be ordered in the same order of
#' \code{sampleDirs}.
#' @param sampleNames A vector of user-defined sample names for the samples
#' to be
#' imported. Must have the same length as \code{length(unlist(sampleDirs))} if
#' \code{sampleDirs} is not \code{NULL}. Otherwise, make sure the length and
#' order match the output of
#' \code{unlist(lapply(cellRangerDirs, list.dirs, recursive = FALSE))}. Default
#' \code{NULL}, in which case the folder names will be used as sample names.
#' @param cellRangerOuts Character vector. The intermediate
#' paths to filtered or raw cell barcode, feature, and matrix files
#' for each sample. \strong{Supercedes \code{dayaType}}. If \code{NULL},
#' \code{dataType}
#' will be used to determine Cell Ranger output directory. If not \code{NULL},
#' \code{dataType} will be ingored and \code{cellRangerOuts} specifies the
#' paths. Must have length 1 or the same length as
#' \code{length(unlist(sampleDirs))} if
#' \code{sampleDirs} is not \code{NULL}. Otherwise, make sure the length and
#' order match the output of
#' \code{unlist(lapply(cellRangerDirs, list.dirs, recursive = FALSE))}.
#' Reference genome names might need to be
#' appended for CellRanger version below 3.0.0 if reads were mapped to
#' multiple genomes when running Cell Ranger pipeline. Probable options
#' include "outs/filtered_feature_bc_matrix/", "outs/raw_feature_bc_matrix/",
#' "outs/filtered_gene_bc_matrix/", "outs/raw_gene_bc_matrix/".
#' @param dataType Character. The type of data to import. Can be one of
#' "filtered" (which is equivalent to
#' \code{cellRangerOuts = "outs/filtered_feature_bc_matrix/"} or
#' \code{cellRangerOuts = "outs/filtered_gene_bc_matrix/"}) or "raw" (which
#' is equivalent to
#' \code{cellRangerOuts = "outs/raw_feature_bc_matrix/"} or
#' \code{cellRangerOuts = "outs/raw_gene_bc_matrix/"}). Default
#' "filtered" which imports the counts for filtered cell barcodes only.
#' @param matrixFileNames Character vector. Filenames for the Market Exchange
#' Format (MEX) sparse matrix files (matrix.mtx or matrix.mtx.gz files).
#' Must have length 1 or the same
#' length as \code{length(unlist(sampleDirs))} if
#' \code{sampleDirs} is not \code{NULL}. Otherwise, make sure the length and
#' order match the output of
#' \code{unlist(lapply(cellRangerDirs, list.dirs, recursive = FALSE))}.
#' @param featuresFileNames Character vector. Filenames for the feature
#' annotation files. They are usually named \emph{features.tsv.gz} or
#' \emph{genes.tsv}. Must have length 1 or the same
#' length as \code{length(unlist(sampleDirs))} if
#' \code{sampleDirs} is not \code{NULL}. Otherwise, make sure the length and
#' order match the output of
#' \code{unlist(lapply(cellRangerDirs, list.dirs, recursive = FALSE))}.
#' @param barcodesFileNames Character vector. Filename for the cell barcode
#' list files. They are usually named \emph{barcodes.tsv.gz} or
#' \emph{barcodes.tsv}. Must have length 1 or the same
#' length as \code{length(unlist(sampleDirs))} if
#' \code{sampleDirs} is not \code{NULL}. Otherwise, make sure the length and
#' order match the output of
#' \code{unlist(lapply(cellRangerDirs, list.dirs, recursive = FALSE))}.
#' @param gzipped \code{TRUE} if the Cell Ranger output files
#' (barcodes.tsv, features.tsv, and matrix.mtx) were
#' gzip compressed. \code{FALSE} otherwise. This is true after Cell Ranger
#' 3.0.0 update. Default \code{"auto"} which automatically detects if the
#' files are gzip compressed. If not \code{"auto"}, \code{gzipped} must have
#' length 1 or the same
#' length as \code{length(unlist(sampleDirs))} if
#' \code{sampleDirs} is not \code{NULL}. Otherwise, make sure the length and
#' order match the output of
#' \code{unlist(lapply(cellRangerDirs, list.dirs, recursive = FALSE))}.
#' @param class Character. The class of the expression matrix stored in the SCE
#' object. Can be one of "Matrix" (as returned by
#' \link{readMM} function), or "matrix" (as returned by
#' \link[base]{matrix} function). Default \code{"Matrix"}.
#' @param delayedArray Boolean. Whether to read the expression matrix as
#' \link{DelayedArray} object or not. Default \code{FALSE}.
#' @param reference Character vector. The reference genome names.
#' Default \code{NULL}. If not \code{NULL}, it must gave the length and order as
#' \code{length(unlist(sampleDirs))} if \code{sampleDirs} is not \code{NULL}.
#' Otherwise, make sure the length and order match the output of
#' \code{unlist(lapply(cellRangerDirs, list.dirs, recursive = FALSE))}. Only needed
#' for Cellranger version below 3.0.0.
#' @param dataTypeV2 Character. The type of output to import for
#' Cellranger version below 3.0.0. Whether to import the filtered or the
#' raw data. Can be one of 'filtered' or 'raw'. Default 'filtered'. When
#' \code{cellRangerOuts} is specified, \code{dataTypeV2} and \code{reference} will
#' be ignored.
#' @param cellRangerOutsV2 Character vector. The intermediate paths
#' to filtered or raw cell barcode, feature, and matrix files for each
#' sample for Cellranger version below 3.0.0. If \code{NULL}, \code{reference} and
#' \code{dataTypeV2} will be used to determine Cell Ranger output directory. If it has
#' length 1, it assumes that all samples use the same genome reference and
#' the function will load only filtered or raw data.
#' @param rowNamesDedup Boolean. Whether to deduplicate rownames. Default
#' \code{TRUE}.
#' @details
#' \code{importCellRangerV2} imports output from Cell Ranger V2.
#' \code{importCellRangerV2Sample} imports output from one sample from Cell
#' Ranger V2.
#' \code{importCellRangerV3} imports output from Cell Ranger V3.
#' \code{importCellRangerV3} imports output from one sample from Cell Ranger
#' V3.
#' Some implicit
#' assumptions which match the output structure of Cell Ranger V2 & V3
#' are made in these 4 functions including \code{cellRangerOuts},
#' \code{matrixFileName}, \code{featuresFileName}, \code{barcodesFileName},
#' and \code{gzipped}.
#' Alternatively, user can call \code{importCellRanger} to explicitly
#' specify these arguments.
#' @return A \code{SingleCellExperiment} object containing the combined count
#' matrix, the feature annotations, and the cell annotation.
#' @examples
#' # Example #1
#' # The following filtered feature, cell, and matrix files were downloaded from
#' # https://support.10xgenomics.com/single-cell-gene-expression/datasets/
#' # 3.0.0/hgmm_1k_v3
#' # The top 10 hg19 & mm10 genes are included in this example.
#' # Only the first 20 cells are included.
#' sce <- importCellRanger(
#' cellRangerDirs = system.file("extdata/", package = "singleCellTK"),
#' sampleDirs = "hgmm_1k_v3_20x20",
#' sampleNames = "hgmm1kv3",
#' dataType = "filtered")
#' @export
importCellRanger <- function(
cellRangerDirs = NULL,
sampleDirs = NULL,
sampleNames = NULL,
cellRangerOuts = NULL,
dataType = c("filtered", "raw"),
matrixFileNames = "matrix.mtx.gz",
featuresFileNames = "features.tsv.gz",
barcodesFileNames = "barcodes.tsv.gz",
gzipped = "auto",
class = c("Matrix", "matrix"),
delayedArray = FALSE,
rowNamesDedup = TRUE) {
class <- match.arg(class)
dataType <- match.arg(dataType)
.importCellRanger(cellRangerDirs = cellRangerDirs,
sampleDirs = sampleDirs,
sampleNames = sampleNames,
cellRangerOuts = cellRangerOuts,
dataType = dataType,
matrixFileNames = matrixFileNames,
featuresFileNames = featuresFileNames,
barcodesFileNames = barcodesFileNames,
gzipped = gzipped,
class = class,
delayedArray = delayedArray,
rowNamesDedup = rowNamesDedup)
}
#' @rdname importCellRanger
#' @examples
#' # The following filtered feature, cell, and matrix files were downloaded from
#' # https://support.10xgenomics.com/single-cell-gene-expression/datasets/
#' # 2.1.0/pbmc4k
#' # Top 20 genes are kept. 20 cell barcodes are extracted.
#' sce <- importCellRangerV2(
#' cellRangerDirs = system.file("extdata/", package = "singleCellTK"),
#' sampleDirs = "pbmc_4k_v2_20x20",
#' sampleNames = "pbmc4k_20",
#' reference = 'GRCh38',
#' dataTypeV2 = "filtered")
#' @export
importCellRangerV2 <- function(
cellRangerDirs = NULL,
sampleDirs = NULL,
sampleNames = NULL,
dataTypeV2 = c("filtered", "raw"),
class = c("Matrix", "matrix"),
delayedArray = FALSE,
reference = NULL,
cellRangerOutsV2 = NULL,
rowNamesDedup = TRUE) {
class <- match.arg(class)
dataTypeV2 <- match.arg(dataTypeV2)
if (is.null(cellRangerOutsV2)) {
if (is.null(reference) | is.null(dataTypeV2)) {
stop("'reference' and 'dataTypeV2' are required ",
"when 'cellRangerOutsV2' is not specified!")
}
}
# Generate cellRangerOuts if it's null
if (is.null(cellRangerOutsV2)) {
cellRangerOutsV2 <- .getCellRangerOutV2(dataTypeV2, reference)
}
.importCellRanger(cellRangerDirs = cellRangerDirs,
sampleDirs = sampleDirs,
sampleNames = sampleNames,
cellRangerOuts = cellRangerOutsV2,
dataType = NULL,
matrixFileNames = "matrix.mtx",
featuresFileNames = "genes.tsv",
barcodesFileNames = "barcodes.tsv",
gzipped = FALSE,
class = class,
delayedArray = delayedArray,
rowNamesDedup = rowNamesDedup)
}
#' @name importCellRangerV2Sample
#' @title Construct SCE object from Cell Ranger V2 output for a single sample
#' @description Read the filtered barcodes, features, and matrices for all
#' samples from Cell Ranger V2 output. Files are assumed to be named
#' "matrix.mtx", "genes.tsv", and "barcodes.tsv".
#' @param dataDir A path to the directory containing the data files. Default "./".
#' @param sampleName A User-defined sample name. This will be prepended to all cell barcode IDs.
#' Default "sample".
#' @param class Character. The class of the expression matrix stored in the SCE
#' object. Can be one of "Matrix" (as returned by
#' \link{readMM} function), or "matrix" (as returned by
#' \link[base]{matrix} function). Default "Matrix".
#' @param delayedArray Boolean. Whether to read the expression matrix as
#' \link{DelayedArray} object or not. Default \code{FALSE}.
#' @param rowNamesDedup Boolean. Whether to deduplicate rownames. Default
#' \code{TRUE}.
#' @return A \code{SingleCellExperiment} object containing the count
#' matrix, the feature annotations, and the cell annotation for the sample.
#' @examples
#' sce <- importCellRangerV2Sample(
#' dataDir = system.file("extdata/pbmc_4k_v2_20x20/outs/",
#' "filtered_gene_bc_matrices/GRCh38", package = "singleCellTK"),
#' sampleName = "pbmc4k_20")
#' @export
importCellRangerV2Sample <- function(
dataDir = NULL,
sampleName = NULL,
class = c("Matrix", "matrix"),
delayedArray = FALSE,
rowNamesDedup = TRUE) {
class <- match.arg(class)
.importCellRanger(cellRangerDirs = NULL,
sampleDirs = dataDir,
sampleNames = sampleName,
cellRangerOuts = '',
dataType = NULL, # ignored
matrixFileNames = "matrix.mtx",
featuresFileNames = "genes.tsv",
barcodesFileNames = "barcodes.tsv",
gzipped = FALSE,
class = class,
delayedArray = delayedArray,
rowNamesDedup = rowNamesDedup)
}
#' @rdname importCellRanger
#' @examples
#' sce <- importCellRangerV3(
#' cellRangerDirs = system.file("extdata/", package = "singleCellTK"),
#' sampleDirs = "hgmm_1k_v3_20x20",
#' sampleNames = "hgmm1kv3",
#' dataType = "filtered")
#' @export
importCellRangerV3 <- function(
cellRangerDirs = NULL,
sampleDirs = NULL,
sampleNames = NULL,
dataType = c("filtered", "raw"),
class = c("Matrix", "matrix"),
delayedArray = FALSE,
rowNamesDedup = TRUE) {
class <- match.arg(class)
dataType <- match.arg(dataType)
if (dataType == "filtered") {
cellRangerOuts <- "outs/filtered_feature_bc_matrix/"
} else if (dataType == "raw") {
cellRangerOuts <- "outs/raw_feature_bc_matrix/"
}
.importCellRanger(cellRangerDirs = cellRangerDirs,
sampleDirs = sampleDirs,
sampleNames = sampleNames,
cellRangerOuts = cellRangerOuts,
dataType = dataType,
matrixFileNames = "matrix.mtx.gz",
featuresFileNames = "features.tsv.gz",
barcodesFileNames = "barcodes.tsv.gz",
gzipped = TRUE,
class = class,
delayedArray = delayedArray,
rowNamesDedup = rowNamesDedup)
}
#' @name importCellRangerV3Sample
#' @title Construct SCE object from Cell Ranger V3 output for a single sample
#' @description Read the filtered barcodes, features, and matrices for all
#' samples from Cell Ranger V3 output. Files are assumed to be named
#' "matrix.mtx.gz", "features.tsv.gz", and "barcodes.tsv.gz".
#' @param dataDir A path to the directory containing the data files. Default "./".
#' @param sampleName A User-defined sample name. This will be prepended to all cell barcode IDs.
#' Default "sample".
#' @param class Character. The class of the expression matrix stored in the SCE
#' object. Can be one of "Matrix" (as returned by
#' \link{readMM} function), or "matrix" (as returned by
#' \link[base]{matrix} function). Default "Matrix".
#' @param delayedArray Boolean. Whether to read the expression matrix as
#' \link{DelayedArray} object or not. Default \code{FALSE}.
#' @param rowNamesDedup Boolean. Whether to deduplicate rownames. Default
#' \code{TRUE}.
#' @return A \code{SingleCellExperiment} object containing the count
#' matrix, the feature annotations, and the cell annotation for the sample.
#' @examples
#' sce <- importCellRangerV3Sample(
#' dataDir = system.file("extdata/hgmm_1k_v3_20x20/outs/",
#' "filtered_feature_bc_matrix", package = "singleCellTK"),
#' sampleName = "hgmm1kv3")
#' @export
importCellRangerV3Sample <- function(
dataDir = "./",
sampleName = "sample",
class = c("Matrix", "matrix"),
delayedArray = FALSE,
rowNamesDedup = TRUE) {
class <- match.arg(class)
.importCellRanger(cellRangerDirs = NULL,
sampleDirs = dataDir,
sampleNames = sampleName,
cellRangerOuts = "",
dataType = "filtered", # ignored
matrixFileNames = "matrix.mtx.gz",
featuresFileNames = "features.tsv.gz",
barcodesFileNames = "barcodes.tsv.gz",
gzipped = TRUE,
class = class,
delayedArray = delayedArray,
rowNamesDedup = rowNamesDedup)
}
# Find metrics_summary.csv file in each sample and merge them into a single dataframe
# Additionally, if file not available for a sample, fill that sample with NA
.importMetricsCellRanger <- function(samplePaths, sampleNames, metricsPath, metricsFile){
# Check if samplePaths and sampleNames are equal in length
if(!identical(length(samplePaths), length(sampleNames))){
stop("Vectors samplePaths and sampleNames must be equal in length.")
}
# Processing
metrics_summary <- list()
for(i in seq(samplePaths)){
metrics_summary[[i]] <- list.files(pattern= paste0("*", metricsFile, "$"), path = paste0(samplePaths[i], "/", metricsPath), full.names = TRUE)
if(length(metrics_summary[[i]]) > 0){
metrics_summary[[i]] <- lapply(metrics_summary[[i]], utils::read.csv, header = TRUE, check.names = FALSE)[[1]]
}
else{
message("Metrics summary file (", metricsFile, ") not found for sample: ", sampleNames[i])
ms_colnames_union <- Reduce(union, lapply(metrics_summary, colnames))
metrics_summary[[i]] <- data.frame(matrix(data = NA, nrow = 1, ncol = length(ms_colnames_union)))
colnames(metrics_summary[[i]]) <- ms_colnames_union
}
}
# Merge cell ranger metrics_summary csv files from all/multiple samples into a single data.frame
metrics_summary <- plyr::rbind.fill(metrics_summary)
metrics_summary <- t(metrics_summary)
colnames(metrics_summary) <- sampleNames
return(metrics_summary)
}
|
/R/importCellRanger.R
|
permissive
|
rz2333/singleCellTK
|
R
| false | false | 31,297 |
r
|
.readBarcodes <- function(path,
header = FALSE,
colname = "cell_barcode",
colClasses = "character") {
res <- data.table::fread(path, header = header, colClasses = colClasses)
if (ncol(res) == 1) {
colnames(res) <- colname
} else {
warning("'barcodes' file contains >1 columns!",
" The column names are kept as is.")
}
return(res)
}
.readFeatures <- function(path,
header = FALSE,
colnames = c("feature_ID", "feature_name", "feature_type"),
colClasses = "character") {
res <- data.table::fread(path, header = header)
if (ncol(res) == 1) {
colnames(res) <- colnames[1]
} else if (ncol(res) == 2) {
colnames(res) <- colnames[seq(2)]
} else if (ncol(res) == 3) {
colnames(res) <- colnames
} else {
warning("'features' file contains >3 columns!",
" The column names are kept as is.")
}
return(res)
}
#' @importFrom tools file_ext
.readMatrixMM <- function(path, gzipped, class, delayedArray) {
if (gzipped == "auto") {
ext <- tools::file_ext(path)
if (ext == "gz") {
path <- gzfile(path)
}
} else if (isTRUE(gzipped)) {
path <- gzfile(path)
}
mat <- Matrix::readMM(path)
mat <- methods::as(mat, "dgCMatrix")
if (class == "matrix") {
mat <- as.matrix(mat)
}
if (isTRUE(delayedArray)) {
mat <- DelayedArray::DelayedArray(mat)
}
return(mat)
}
# dir <- "outs/filtered_feature_bc_matrix/"
.constructSCEFromCellRangerOutputs <- function(dir,
sampleName,
matrixFileName,
featuresFileName,
barcodesFileName,
gzipped,
class,
delayedArray) {
cb <- .readBarcodes(file.path(dir, barcodesFileName))
fe <- .readFeatures(file.path(dir, featuresFileName))
ma <- .readMatrixMM(file.path(dir, matrixFileName),
gzipped = gzipped,
class = class,
delayedArray = delayedArray)
coln <- paste(sampleName, cb[[1]], sep = "_")
rownames(ma) <- fe[[1]]
sce <- SingleCellExperiment::SingleCellExperiment(
assays = list(counts = ma))
SummarizedExperiment::rowData(sce) <- fe
SummarizedExperiment::colData(sce) <- S4Vectors::DataFrame(cb,
column_name = coln,
sample = sampleName,
row.names = coln)
return(sce)
}
.getOutputFolderPath <- function(samplePath, cellRangerOuts, cellRangerOuts2) {
path <- file.path(samplePath, cellRangerOuts)
if (dir.exists(path)) {
return(path)
}
if (!is.na(cellRangerOuts2)) {
path2 <- file.path(samplePath, cellRangerOuts2)
if (dir.exists(path2)) {
return(path2)
} else {
stop("Invalid path ", path2)
}
}
stop("Invalid path ", path)
}
.checkArgsImportCellRanger <- function(cellRangerDirs,
sampleDirs,
sampleNames,
cellRangerOuts,
matrixFileNames,
featuresFileNames,
barcodesFileNames,
gzipped) {
if (any(!(gzipped %in% c("auto", TRUE, FALSE)))) {
stop("Invalid 'gzipped' argument! Should be one of 'auto',",
" TRUE, or FALSE")
}
if (is.null(cellRangerDirs)) {
if (is.null(sampleDirs)) {
stop("'sampleDirs' can not be NULL if 'cellRangerDirs' is NULL!")
}
for (i in seq_along(sampleDirs)) {
if (!dir.exists(sampleDirs[i])) {
stop("Sample folder ", sampleDirs[i], " does not exist!")
}
}
sampleLength <- length(sampleDirs)
if (!is.null(sampleNames)) {
if (length(sampleNames) != sampleLength) {
stop("'sampleDirs' and 'sampleNames' have unequal lengths!")
}
}
if (!(length(cellRangerOuts) %in% c(0, 1))) {
if (length(cellRangerOuts) != sampleLength) {
stop("'sampleDirs' and 'cellRangerOuts' have unequal lengths!")
}
}
if (length(matrixFileNames) != 1) {
if (length(matrixFileNames) != sampleLength) {
stop("'sampleDirs' and 'matrixFileNames' have unequal lengths!")
}
}
if (length(featuresFileNames) != 1) {
if (length(featuresFileNames) != sampleLength) {
stop("'sampleDirs' and 'featuresFileNames'",
" have unequal lengths!")
}
}
if (length(barcodesFileNames) != 1) {
if (length(barcodesFileNames) != sampleLength) {
stop("'sampleDirs' and 'barcodesFileNames'",
" have unequal lengths!")
}
}
if (gzipped != "auto") {
if (length(gzipped) != sampleLength & length(gzipped) != 1) {
stop("'sampleDirs' and 'gzipped' have unequal lengths!")
}
}
} else {
if (!all(dir.exists(cellRangerDirs))) {
stop("Invalid cellRangerDirs: ",
paste(cellRangerDirs[which(!dir.exists(cellRangerDirs))],
collapse = ", "))
}
if (is.null(sampleDirs)) {
for (i in seq_along(cellRangerDirs)) {
if (length(list.dirs(cellRangerDirs[i],
recursive = FALSE)) == 0) {
warning("Empty cellRangerDir. Skipping ",
cellRangerDirs[i])
}
}
sampleLength <- length(unlist(lapply(cellRangerDirs,
list.dirs, recursive = FALSE)))
if (!is.null(sampleNames)) {
if (sampleLength != length(sampleNames)) {
stop("The length of 'sampleNames' does not match length of",
" subdirectories in 'cellRangerDirs'!")
}
}
if (!(length(cellRangerOuts) %in% c(0, 1))) {
if (sampleLength != length(cellRangerOuts)) {
stop("The length of 'cellRangerOuts' does not match",
" length of",
" subdirectories in 'cellRangerDirs'!")
}
}
if (length(matrixFileNames) != 1) {
if (sampleLength != length(matrixFileNames)) {
stop("The length of 'matrixFileNames' does not match",
" length of",
" subdirectories in 'cellRangerDirs'!")
}
}
if (length(featuresFileNames) != 1) {
if (sampleLength != length(featuresFileNames)) {
stop("The length of 'featuresFileNames' does not match",
" length of",
" subdirectories in 'cellRangerDirs'!")
}
}
if (length(barcodesFileNames) != 1) {
if (sampleLength != length(barcodesFileNames)) {
stop("The length of 'barcodesFileNames' does not match",
" length of",
" subdirectories in 'cellRangerDirs'!")
}
}
if (gzipped != "auto") {
if (sampleLength != length(gzipped) & length(gzipped) != 1) {
stop("The length of 'gzipped' does not match",
" length of",
" subdirectories in 'cellRangerDirs'!")
}
}
} else {
if (length(sampleDirs) != length(cellRangerDirs)) {
stop("'sampleDirs' and 'cellRangerDirs' have unequal lengths!")
} else {
for (i in seq_along(cellRangerDirs)) {
paths <- file.path(cellRangerDirs[i], sampleDirs[[i]])
for (j in seq_along(paths)) {
if (!dir.exists(paths[j])) {
stop("Sample folder does not exist!\n",
paths[j])
}
}
}
}
# analogous to length(unlist(sampleDirs))
sampleLength <- sum(vapply(sampleDirs, length, integer(1)))
if (!is.null(sampleNames)) {
if (length(sampleNames) != sampleLength) {
stop("'sampleNames' and 'unlist(sampleDirs)' have unequal",
" lengths!")
}
}
if (!(length(cellRangerOuts) %in% c(0, 1))) {
if (length(cellRangerOuts) != sampleLength) {
stop("'cellRangerOuts' and 'unlist(sampleDirs)'",
" have unequal lengths!")
}
}
if (length(matrixFileNames) != 1) {
if (length(matrixFileNames) != sampleLength) {
stop("'matrixFileNames' and 'unlist(sampleDirs)'",
" have unequal lengths!")
}
}
if (length(featuresFileNames) != 1) {
if (length(featuresFileNames) != sampleLength) {
stop("'featuresFileNames' and 'unlist(sampleDirs)'",
" have unequal lengths!")
}
}
if (length(barcodesFileNames) != 1) {
if (length(barcodesFileNames) != sampleLength) {
stop("'barcodesFileNames' and 'unlist(sampleDirs)'",
" have unequal lengths!")
}
}
if (gzipped != "auto") {
if (length(gzipped) != sampleLength & length(gzipped) != 1) {
stop("'gzipped' and 'unlist(sampleDirs)'",
" have unequal lengths!")
}
}
}
}
}
.getSamplesPaths <- function(cellRangerDirs, samplesDirs) {
if (is.null(cellRangerDirs)) {
res <- samplesDirs
} else {
if (is.null(samplesDirs)) {
res <- list.dirs(cellRangerDirs, recursive = FALSE)
} else {
res <- vector("list", length = length(cellRangerDirs))
for (i in seq_along(cellRangerDirs)) {
res[[i]] <- file.path(cellRangerDirs[i], samplesDirs[[i]])
}
res <- unlist(res)
}
}
return(res)
}
.getSampleNames <- function(samplePaths) {
res <- basename(samplePaths)
return(res)
}
.getVectorized <- function(arg, len) {
if (length(arg) == 1) {
arg <- rep(arg, len)
}
return(arg)
}
# main function
.importCellRanger <- function(
cellRangerDirs,
sampleDirs,
sampleNames,
cellRangerOuts,
dataType,
matrixFileNames,
featuresFileNames,
barcodesFileNames,
gzipped,
class,
delayedArray,
rowNamesDedup) {
.checkArgsImportCellRanger(cellRangerDirs,
sampleDirs,
sampleNames,
cellRangerOuts,
matrixFileNames,
featuresFileNames,
barcodesFileNames,
gzipped)
samplePaths <- .getSamplesPaths(cellRangerDirs, sampleDirs)
res <- vector("list", length = length(samplePaths))
cellRangerOuts2 <- NA
if (is.null(cellRangerOuts)) {
if (dataType == "filtered") {
cellRangerOuts <- "outs/filtered_gene_bc_matrix"
cellRangerOuts2 <- "outs/filtered_feature_bc_matrix"
} else {
cellRangerOuts <- "outs/raw_gene_bc_matrix"
cellRangerOuts2 <- "outs/raw_feature_bc_matrix"
}
}
cellRangerOuts <- .getVectorized(cellRangerOuts, length(samplePaths))
cellRangerOuts2 <- .getVectorized(cellRangerOuts2, length(samplePaths))
matrixFileNames <- .getVectorized(matrixFileNames, length(samplePaths))
featuresFileNames <- .getVectorized(featuresFileNames, length(samplePaths))
barcodesFileNames <- .getVectorized(barcodesFileNames, length(samplePaths))
gzipped <- .getVectorized(gzipped, length(samplePaths))
if (is.null(sampleNames)) {
sampleNames <- .getSampleNames(samplePaths)
}
for (i in seq_along(samplePaths)) {
dir <- .getOutputFolderPath(samplePaths[i], cellRangerOuts[i],
cellRangerOuts2[i])
scei <- .constructSCEFromCellRangerOutputs(dir,
sampleName = sampleNames[i],
matrixFileName = matrixFileNames[i],
featuresFileName = featuresFileNames[i],
barcodesFileName = barcodesFileNames[i],
gzipped = gzipped[i],
class = class,
delayedArray = delayedArray)
res[[i]] <- scei
}
sce <- do.call(SingleCellExperiment::cbind, res)
# Load & Store Cell Ranger Summary into SCE
metrics_summary <- .importMetricsCellRanger(samplePaths, sampleNames, "outs", "metrics_summary.csv")
if (ncol(metrics_summary) > 0) {
sce@metadata$sctk$sample_summary[["cellranger"]] <- metrics_summary
}
# sce <- setSampleSummaryStatsTable(sce, "cellranger", metrics_summary)
if (isTRUE(rowNamesDedup)) {
if (any(duplicated(rownames(sce)))) {
message("Duplicated gene names found, adding '-1', '-2', ",
"... suffix to them.")
}
sce <- dedupRowNames(sce)
}
return(sce)
}
.getCellRangerOutV2 <- function(dataTypeV2, reference) {
res <- vector("list", length = length(reference))
for (i in seq_along(reference)) {
if (dataTypeV2 == 'filtered') {
res[[i]] <- file.path('outs/filtered_gene_bc_matrices', reference[i])
} else {
res[[i]] <- file.path('outs/raw_gene_bc_matrices', reference[i])
}
}
cellRangerOutsV2 <- unlist(res)
return(cellRangerOutsV2)
}
#' @name importCellRanger
#' @rdname importCellRanger
#' @title Construct SCE object from Cell Ranger output
#' @description Read the filtered barcodes, features, and matrices for all
#' samples from (preferably a single run of) Cell Ranger output. Import and
#' combine them
#' as one big \link[SingleCellExperiment]{SingleCellExperiment} object.
#' @param cellRangerDirs The root directories where Cell Ranger was run. These
#' folders should contain sample specific folders. Default \code{NULL},
#' meaning the paths for each sample will be specified in \emph{samples}
#' argument.
#' @param sampleDirs Default \code{NULL}. Can be one of
#' \itemize{
#' \item \code{NULL}. All samples within \code{cellRangerDirs} will be
#' imported. The order of samples will be first determined by the order of
#' \code{cellRangerDirs} and then by \link{list.dirs}. This is only
#' for the case where \code{cellRangerDirs} is specified.
#' \item A list of vectors containing the folder names for samples to import.
#' Each vector in
#' the list corresponds to samples from one of \code{cellRangerDirs}.
#' These names are the same as the folder names under \code{cellRangerDirs}.
#' This is only for the case where \code{cellRangerDirs} is specified.
#' \item A vector of folder paths for the samples to import. This is only for
#' the case where \code{cellRangerDirs} is \code{NULL}.
#' }
#' The cells in the final SCE object will be ordered in the same order of
#' \code{sampleDirs}.
#' @param sampleNames A vector of user-defined sample names for the samples
#' to be
#' imported. Must have the same length as \code{length(unlist(sampleDirs))} if
#' \code{sampleDirs} is not \code{NULL}. Otherwise, make sure the length and
#' order match the output of
#' \code{unlist(lapply(cellRangerDirs, list.dirs, recursive = FALSE))}. Default
#' \code{NULL}, in which case the folder names will be used as sample names.
#' @param cellRangerOuts Character vector. The intermediate
#' paths to filtered or raw cell barcode, feature, and matrix files
#' for each sample. \strong{Supercedes \code{dayaType}}. If \code{NULL},
#' \code{dataType}
#' will be used to determine Cell Ranger output directory. If not \code{NULL},
#' \code{dataType} will be ingored and \code{cellRangerOuts} specifies the
#' paths. Must have length 1 or the same length as
#' \code{length(unlist(sampleDirs))} if
#' \code{sampleDirs} is not \code{NULL}. Otherwise, make sure the length and
#' order match the output of
#' \code{unlist(lapply(cellRangerDirs, list.dirs, recursive = FALSE))}.
#' Reference genome names might need to be
#' appended for CellRanger version below 3.0.0 if reads were mapped to
#' multiple genomes when running Cell Ranger pipeline. Probable options
#' include "outs/filtered_feature_bc_matrix/", "outs/raw_feature_bc_matrix/",
#' "outs/filtered_gene_bc_matrix/", "outs/raw_gene_bc_matrix/".
#' @param dataType Character. The type of data to import. Can be one of
#' "filtered" (which is equivalent to
#' \code{cellRangerOuts = "outs/filtered_feature_bc_matrix/"} or
#' \code{cellRangerOuts = "outs/filtered_gene_bc_matrix/"}) or "raw" (which
#' is equivalent to
#' \code{cellRangerOuts = "outs/raw_feature_bc_matrix/"} or
#' \code{cellRangerOuts = "outs/raw_gene_bc_matrix/"}). Default
#' "filtered" which imports the counts for filtered cell barcodes only.
#' @param matrixFileNames Character vector. Filenames for the Market Exchange
#' Format (MEX) sparse matrix files (matrix.mtx or matrix.mtx.gz files).
#' Must have length 1 or the same
#' length as \code{length(unlist(sampleDirs))} if
#' \code{sampleDirs} is not \code{NULL}. Otherwise, make sure the length and
#' order match the output of
#' \code{unlist(lapply(cellRangerDirs, list.dirs, recursive = FALSE))}.
#' @param featuresFileNames Character vector. Filenames for the feature
#' annotation files. They are usually named \emph{features.tsv.gz} or
#' \emph{genes.tsv}. Must have length 1 or the same
#' length as \code{length(unlist(sampleDirs))} if
#' \code{sampleDirs} is not \code{NULL}. Otherwise, make sure the length and
#' order match the output of
#' \code{unlist(lapply(cellRangerDirs, list.dirs, recursive = FALSE))}.
#' @param barcodesFileNames Character vector. Filename for the cell barcode
#' list files. They are usually named \emph{barcodes.tsv.gz} or
#' \emph{barcodes.tsv}. Must have length 1 or the same
#' length as \code{length(unlist(sampleDirs))} if
#' \code{sampleDirs} is not \code{NULL}. Otherwise, make sure the length and
#' order match the output of
#' \code{unlist(lapply(cellRangerDirs, list.dirs, recursive = FALSE))}.
#' @param gzipped \code{TRUE} if the Cell Ranger output files
#' (barcodes.tsv, features.tsv, and matrix.mtx) were
#' gzip compressed. \code{FALSE} otherwise. This is true after Cell Ranger
#' 3.0.0 update. Default \code{"auto"} which automatically detects if the
#' files are gzip compressed. If not \code{"auto"}, \code{gzipped} must have
#' length 1 or the same
#' length as \code{length(unlist(sampleDirs))} if
#' \code{sampleDirs} is not \code{NULL}. Otherwise, make sure the length and
#' order match the output of
#' \code{unlist(lapply(cellRangerDirs, list.dirs, recursive = FALSE))}.
#' @param class Character. The class of the expression matrix stored in the SCE
#' object. Can be one of "Matrix" (as returned by
#' \link{readMM} function), or "matrix" (as returned by
#' \link[base]{matrix} function). Default \code{"Matrix"}.
#' @param delayedArray Boolean. Whether to read the expression matrix as
#' \link{DelayedArray} object or not. Default \code{FALSE}.
#' @param reference Character vector. The reference genome names.
#' Default \code{NULL}. If not \code{NULL}, it must gave the length and order as
#' \code{length(unlist(sampleDirs))} if \code{sampleDirs} is not \code{NULL}.
#' Otherwise, make sure the length and order match the output of
#' \code{unlist(lapply(cellRangerDirs, list.dirs, recursive = FALSE))}. Only needed
#' for Cellranger version below 3.0.0.
#' @param dataTypeV2 Character. The type of output to import for
#' Cellranger version below 3.0.0. Whether to import the filtered or the
#' raw data. Can be one of 'filtered' or 'raw'. Default 'filtered'. When
#' \code{cellRangerOuts} is specified, \code{dataTypeV2} and \code{reference} will
#' be ignored.
#' @param cellRangerOutsV2 Character vector. The intermediate paths
#' to filtered or raw cell barcode, feature, and matrix files for each
#' sample for Cellranger version below 3.0.0. If \code{NULL}, \code{reference} and
#' \code{dataTypeV2} will be used to determine Cell Ranger output directory. If it has
#' length 1, it assumes that all samples use the same genome reference and
#' the function will load only filtered or raw data.
#' @param rowNamesDedup Boolean. Whether to deduplicate rownames. Default
#' \code{TRUE}.
#' @details
#' \code{importCellRangerV2} imports output from Cell Ranger V2.
#' \code{importCellRangerV2Sample} imports output from one sample from Cell
#' Ranger V2.
#' \code{importCellRangerV3} imports output from Cell Ranger V3.
#' \code{importCellRangerV3} imports output from one sample from Cell Ranger
#' V3.
#' Some implicit
#' assumptions which match the output structure of Cell Ranger V2 & V3
#' are made in these 4 functions including \code{cellRangerOuts},
#' \code{matrixFileName}, \code{featuresFileName}, \code{barcodesFileName},
#' and \code{gzipped}.
#' Alternatively, user can call \code{importCellRanger} to explicitly
#' specify these arguments.
#' @return A \code{SingleCellExperiment} object containing the combined count
#' matrix, the feature annotations, and the cell annotation.
#' @examples
#' # Example #1
#' # The following filtered feature, cell, and matrix files were downloaded from
#' # https://support.10xgenomics.com/single-cell-gene-expression/datasets/
#' # 3.0.0/hgmm_1k_v3
#' # The top 10 hg19 & mm10 genes are included in this example.
#' # Only the first 20 cells are included.
#' sce <- importCellRanger(
#' cellRangerDirs = system.file("extdata/", package = "singleCellTK"),
#' sampleDirs = "hgmm_1k_v3_20x20",
#' sampleNames = "hgmm1kv3",
#' dataType = "filtered")
#' @export
importCellRanger <- function(
cellRangerDirs = NULL,
sampleDirs = NULL,
sampleNames = NULL,
cellRangerOuts = NULL,
dataType = c("filtered", "raw"),
matrixFileNames = "matrix.mtx.gz",
featuresFileNames = "features.tsv.gz",
barcodesFileNames = "barcodes.tsv.gz",
gzipped = "auto",
class = c("Matrix", "matrix"),
delayedArray = FALSE,
rowNamesDedup = TRUE) {
class <- match.arg(class)
dataType <- match.arg(dataType)
.importCellRanger(cellRangerDirs = cellRangerDirs,
sampleDirs = sampleDirs,
sampleNames = sampleNames,
cellRangerOuts = cellRangerOuts,
dataType = dataType,
matrixFileNames = matrixFileNames,
featuresFileNames = featuresFileNames,
barcodesFileNames = barcodesFileNames,
gzipped = gzipped,
class = class,
delayedArray = delayedArray,
rowNamesDedup = rowNamesDedup)
}
#' @rdname importCellRanger
#' @examples
#' # The following filtered feature, cell, and matrix files were downloaded from
#' # https://support.10xgenomics.com/single-cell-gene-expression/datasets/
#' # 2.1.0/pbmc4k
#' # Top 20 genes are kept. 20 cell barcodes are extracted.
#' sce <- importCellRangerV2(
#' cellRangerDirs = system.file("extdata/", package = "singleCellTK"),
#' sampleDirs = "pbmc_4k_v2_20x20",
#' sampleNames = "pbmc4k_20",
#' reference = 'GRCh38',
#' dataTypeV2 = "filtered")
#' @export
importCellRangerV2 <- function(
cellRangerDirs = NULL,
sampleDirs = NULL,
sampleNames = NULL,
dataTypeV2 = c("filtered", "raw"),
class = c("Matrix", "matrix"),
delayedArray = FALSE,
reference = NULL,
cellRangerOutsV2 = NULL,
rowNamesDedup = TRUE) {
class <- match.arg(class)
dataTypeV2 <- match.arg(dataTypeV2)
if (is.null(cellRangerOutsV2)) {
if (is.null(reference) | is.null(dataTypeV2)) {
stop("'reference' and 'dataTypeV2' are required ",
"when 'cellRangerOutsV2' is not specified!")
}
}
# Generate cellRangerOuts if it's null
if (is.null(cellRangerOutsV2)) {
cellRangerOutsV2 <- .getCellRangerOutV2(dataTypeV2, reference)
}
.importCellRanger(cellRangerDirs = cellRangerDirs,
sampleDirs = sampleDirs,
sampleNames = sampleNames,
cellRangerOuts = cellRangerOutsV2,
dataType = NULL,
matrixFileNames = "matrix.mtx",
featuresFileNames = "genes.tsv",
barcodesFileNames = "barcodes.tsv",
gzipped = FALSE,
class = class,
delayedArray = delayedArray,
rowNamesDedup = rowNamesDedup)
}
#' @name importCellRangerV2Sample
#' @title Construct SCE object from Cell Ranger V2 output for a single sample
#' @description Read the filtered barcodes, features, and matrices for all
#' samples from Cell Ranger V2 output. Files are assumed to be named
#' "matrix.mtx", "genes.tsv", and "barcodes.tsv".
#' @param dataDir A path to the directory containing the data files. Default "./".
#' @param sampleName A User-defined sample name. This will be prepended to all cell barcode IDs.
#' Default "sample".
#' @param class Character. The class of the expression matrix stored in the SCE
#' object. Can be one of "Matrix" (as returned by
#' \link{readMM} function), or "matrix" (as returned by
#' \link[base]{matrix} function). Default "Matrix".
#' @param delayedArray Boolean. Whether to read the expression matrix as
#' \link{DelayedArray} object or not. Default \code{FALSE}.
#' @param rowNamesDedup Boolean. Whether to deduplicate rownames. Default
#' \code{TRUE}.
#' @return A \code{SingleCellExperiment} object containing the count
#' matrix, the feature annotations, and the cell annotation for the sample.
#' @examples
#' sce <- importCellRangerV2Sample(
#' dataDir = system.file("extdata/pbmc_4k_v2_20x20/outs/",
#' "filtered_gene_bc_matrices/GRCh38", package = "singleCellTK"),
#' sampleName = "pbmc4k_20")
#' @export
importCellRangerV2Sample <- function(
dataDir = NULL,
sampleName = NULL,
class = c("Matrix", "matrix"),
delayedArray = FALSE,
rowNamesDedup = TRUE) {
class <- match.arg(class)
.importCellRanger(cellRangerDirs = NULL,
sampleDirs = dataDir,
sampleNames = sampleName,
cellRangerOuts = '',
dataType = NULL, # ignored
matrixFileNames = "matrix.mtx",
featuresFileNames = "genes.tsv",
barcodesFileNames = "barcodes.tsv",
gzipped = FALSE,
class = class,
delayedArray = delayedArray,
rowNamesDedup = rowNamesDedup)
}
#' @rdname importCellRanger
#' @examples
#' sce <- importCellRangerV3(
#' cellRangerDirs = system.file("extdata/", package = "singleCellTK"),
#' sampleDirs = "hgmm_1k_v3_20x20",
#' sampleNames = "hgmm1kv3",
#' dataType = "filtered")
#' @export
importCellRangerV3 <- function(
cellRangerDirs = NULL,
sampleDirs = NULL,
sampleNames = NULL,
dataType = c("filtered", "raw"),
class = c("Matrix", "matrix"),
delayedArray = FALSE,
rowNamesDedup = TRUE) {
class <- match.arg(class)
dataType <- match.arg(dataType)
if (dataType == "filtered") {
cellRangerOuts <- "outs/filtered_feature_bc_matrix/"
} else if (dataType == "raw") {
cellRangerOuts <- "outs/raw_feature_bc_matrix/"
}
.importCellRanger(cellRangerDirs = cellRangerDirs,
sampleDirs = sampleDirs,
sampleNames = sampleNames,
cellRangerOuts = cellRangerOuts,
dataType = dataType,
matrixFileNames = "matrix.mtx.gz",
featuresFileNames = "features.tsv.gz",
barcodesFileNames = "barcodes.tsv.gz",
gzipped = TRUE,
class = class,
delayedArray = delayedArray,
rowNamesDedup = rowNamesDedup)
}
#' @name importCellRangerV3Sample
#' @title Construct SCE object from Cell Ranger V3 output for a single sample
#' @description Read the filtered barcodes, features, and matrices for all
#' samples from Cell Ranger V3 output. Files are assumed to be named
#' "matrix.mtx.gz", "features.tsv.gz", and "barcodes.tsv.gz".
#' @param dataDir A path to the directory containing the data files. Default "./".
#' @param sampleName A User-defined sample name. This will be prepended to all cell barcode IDs.
#' Default "sample".
#' @param class Character. The class of the expression matrix stored in the SCE
#' object. Can be one of "Matrix" (as returned by
#' \link{readMM} function), or "matrix" (as returned by
#' \link[base]{matrix} function). Default "Matrix".
#' @param delayedArray Boolean. Whether to read the expression matrix as
#' \link{DelayedArray} object or not. Default \code{FALSE}.
#' @param rowNamesDedup Boolean. Whether to deduplicate rownames. Default
#' \code{TRUE}.
#' @return A \code{SingleCellExperiment} object containing the count
#' matrix, the feature annotations, and the cell annotation for the sample.
#' @examples
#' sce <- importCellRangerV3Sample(
#' dataDir = system.file("extdata/hgmm_1k_v3_20x20/outs/",
#' "filtered_feature_bc_matrix", package = "singleCellTK"),
#' sampleName = "hgmm1kv3")
#' @export
importCellRangerV3Sample <- function(
dataDir = "./",
sampleName = "sample",
class = c("Matrix", "matrix"),
delayedArray = FALSE,
rowNamesDedup = TRUE) {
class <- match.arg(class)
.importCellRanger(cellRangerDirs = NULL,
sampleDirs = dataDir,
sampleNames = sampleName,
cellRangerOuts = "",
dataType = "filtered", # ignored
matrixFileNames = "matrix.mtx.gz",
featuresFileNames = "features.tsv.gz",
barcodesFileNames = "barcodes.tsv.gz",
gzipped = TRUE,
class = class,
delayedArray = delayedArray,
rowNamesDedup = rowNamesDedup)
}
# Find metrics_summary.csv file in each sample and merge them into a single dataframe
# Additionally, if file not available for a sample, fill that sample with NA
.importMetricsCellRanger <- function(samplePaths, sampleNames, metricsPath, metricsFile){
# Check if samplePaths and sampleNames are equal in length
if(!identical(length(samplePaths), length(sampleNames))){
stop("Vectors samplePaths and sampleNames must be equal in length.")
}
# Processing
metrics_summary <- list()
for(i in seq(samplePaths)){
metrics_summary[[i]] <- list.files(pattern= paste0("*", metricsFile, "$"), path = paste0(samplePaths[i], "/", metricsPath), full.names = TRUE)
if(length(metrics_summary[[i]]) > 0){
metrics_summary[[i]] <- lapply(metrics_summary[[i]], utils::read.csv, header = TRUE, check.names = FALSE)[[1]]
}
else{
message("Metrics summary file (", metricsFile, ") not found for sample: ", sampleNames[i])
ms_colnames_union <- Reduce(union, lapply(metrics_summary, colnames))
metrics_summary[[i]] <- data.frame(matrix(data = NA, nrow = 1, ncol = length(ms_colnames_union)))
colnames(metrics_summary[[i]]) <- ms_colnames_union
}
}
# Merge cell ranger metrics_summary csv files from all/multiple samples into a single data.frame
metrics_summary <- plyr::rbind.fill(metrics_summary)
metrics_summary <- t(metrics_summary)
colnames(metrics_summary) <- sampleNames
return(metrics_summary)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/private_printCaA.R
\name{prCaGetVnStats}
\alias{prCaGetVnStats}
\title{Gets the variable stats}
\usage{
prCaGetVnStats(
model,
vn,
outcome,
ds,
add_references,
add_references_pos,
desc_args
)
}
\arguments{
\item{model}{The model}
\item{vn}{The variable name}
\item{outcome}{The outcome vector}
\item{ds}{The dataset}
\item{add_references}{True if it should use the data set to look for
references, otherwise supply the function with a vector with names.
Sometimes you want to indicate the reference row for each group.
This needs to be just as many as the groups as the order identified.
Use NA if you don't want to have a reference for that particular group.}
\item{add_references_pos}{The position where a reference should be added.
Sometimes you don't want the reference to be at the top, for instance
if you have age groups then you may have < 25, 25-39, 40-55, > 55 and
you have the reference to be 25-39 then you should set the reference
list for \code{age_groups} as \code{add_references_pos = list(age_groups = 2)}
so that you have the second group as the position for the reference.}
\item{desc_args}{The description arguments that are to be used for the
the description columns. The options/arguments should be generated by the
\code{\link{caDescribeOpts}} function.}
}
\value{
\code{matrix} A matrix from \code{\link[Gmisc]{getDescriptionStatsBy}} or
\code{\link{prGetStatistics}}
}
\description{
Gets the variable stats
}
\seealso{
Other printCrudeAndAdjusted functions:
\code{\link{prCaAddRefAndStat}()},
\code{\link{prCaAddReference}()},
\code{\link{prCaAddUserReferences}()},
\code{\link{prCaGetImputationCols}()},
\code{\link{prCaGetRowname}()},
\code{\link{prCaPrepareCrudeAndAdjusted}()},
\code{\link{prCaReorderReferenceDescribe}()},
\code{\link{prCaReorder}()},
\code{\link{prCaSelectAndOrderVars}()},
\code{\link{prCaSetRownames}()}
}
\concept{printCrudeAndAdjusted functions}
\keyword{internal}
|
/man/prCaGetVnStats.Rd
|
no_license
|
gforge/Greg
|
R
| false | true | 2,018 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/private_printCaA.R
\name{prCaGetVnStats}
\alias{prCaGetVnStats}
\title{Gets the variable stats}
\usage{
prCaGetVnStats(
model,
vn,
outcome,
ds,
add_references,
add_references_pos,
desc_args
)
}
\arguments{
\item{model}{The model}
\item{vn}{The variable name}
\item{outcome}{The outcome vector}
\item{ds}{The dataset}
\item{add_references}{True if it should use the data set to look for
references, otherwise supply the function with a vector with names.
Sometimes you want to indicate the reference row for each group.
This needs to be just as many as the groups as the order identified.
Use NA if you don't want to have a reference for that particular group.}
\item{add_references_pos}{The position where a reference should be added.
Sometimes you don't want the reference to be at the top, for instance
if you have age groups then you may have < 25, 25-39, 40-55, > 55 and
you have the reference to be 25-39 then you should set the reference
list for \code{age_groups} as \code{add_references_pos = list(age_groups = 2)}
so that you have the second group as the position for the reference.}
\item{desc_args}{The description arguments that are to be used for the
the description columns. The options/arguments should be generated by the
\code{\link{caDescribeOpts}} function.}
}
\value{
\code{matrix} A matrix from \code{\link[Gmisc]{getDescriptionStatsBy}} or
\code{\link{prGetStatistics}}
}
\description{
Gets the variable stats
}
\seealso{
Other printCrudeAndAdjusted functions:
\code{\link{prCaAddRefAndStat}()},
\code{\link{prCaAddReference}()},
\code{\link{prCaAddUserReferences}()},
\code{\link{prCaGetImputationCols}()},
\code{\link{prCaGetRowname}()},
\code{\link{prCaPrepareCrudeAndAdjusted}()},
\code{\link{prCaReorderReferenceDescribe}()},
\code{\link{prCaReorder}()},
\code{\link{prCaSelectAndOrderVars}()},
\code{\link{prCaSetRownames}()}
}
\concept{printCrudeAndAdjusted functions}
\keyword{internal}
|
#' Simulated dataset with insufficient effort responses.
#'
#' A simulated dataset mimicking insufficient effort responding. Contains three types of responses:
#' (a) Normal responses with answers centering around a trait/attitude value
#' (80 percent probability per simulated observation),
#' (b) Straightlining responses (10 percent probability per simulated observation),
#' (c) Random responses (10 percent probability per simulated observation).
#' Simulated are 10 subscales of 5 items each (= 50 variables).
#'
#'
#' @format A data frame with 200 observations (rows) and 50 variables (columns).
"careless_dataset"
|
/R/careless_dataset.R
|
permissive
|
cran/careless
|
R
| false | false | 634 |
r
|
#' Simulated dataset with insufficient effort responses.
#'
#' A simulated dataset mimicking insufficient effort responding. Contains three types of responses:
#' (a) Normal responses with answers centering around a trait/attitude value
#' (80 percent probability per simulated observation),
#' (b) Straightlining responses (10 percent probability per simulated observation),
#' (c) Random responses (10 percent probability per simulated observation).
#' Simulated are 10 subscales of 5 items each (= 50 variables).
#'
#'
#' @format A data frame with 200 observations (rows) and 50 variables (columns).
"careless_dataset"
|
### This script inspired by http://satijalab.org/seurat/pbmc3k_tutorial.html
library(Seurat)
library(dplyr)
library(stringr)
scdata <- Read10X(data.dir="subset_5000/")
sc <- CreateSeuratObject(raw.data=scdata, min.cells=3, min.genes=3, project="10X_CPTRES")
mito.genes <- grep(pattern="^MT-", x=rownames(x=sc@data), value=TRUE)
percent.mito <- Matrix::colSums(sc@raw.data[mito.genes, ])/Matrix::colSums(sc@raw.data)
get.pop.name <- function(bc) {
strsplit(bc, "-")[[1]][2]
}
barcode.src <- data.frame(sc@raw.data@Dimnames[[2]])
names(barcode.src) <- c("src")
barcode.src$src <- as.numeric(lapply(as.character(barcode.src$src), get.pop.name))
rownames(barcode.src) <- sc@raw.data@Dimnames[[2]]
sc <- AddMetaData(object=sc, metadata=percent.mito, col.name="percent.mito")
sc <- AddMetaData(object=sc, metadata=barcode.src, col.name="barcode.src")
VlnPlot(object=sc, features.plot=c("nGene", "nUMI", "percent.mito"), nCol=3, point.size.use=0.5)
par(mfrow=c(1,2))
GenePlot(object=sc, gene1="nUMI", gene2="percent.mito")
GenePlot(object=sc, gene1="nUMI", gene2="nGene")
sc <- NormalizeData(object=sc, normalization.method="LogNormalize", scale.factor=10000)
par(mfrow=c(1,1))
sc <- FindVariableGenes(object=sc, mean.function=ExpMean, dispersion.function=LogVMR,
x.low.cutoff=0.0125, x.high.cutoff=3, y.cutoff=0.5)
## This needs a lot of memory
sc <- ScaleData(object=sc, vars.to.regress=c("nUMI", "percent.mito"))
## linear dimensional reduction
sc <- RunPCA(object=sc, pc.genes=sc@var.genes)
PCAPlot(object=sc, dim.1=1, dim.2=2)
PCHeatmap(object=sc, pc.use=1, do.balanced=TRUE, label.columns=FALSE)
## Clustering
sc <- FindClusters(object=sc, reduction.type="pca", dims.use=1:10,
resolution=0.6, print.output=0, save.SNN=TRUE)
sc <- RunTSNE(object=sc, dims.use=1:10, do.fast=TRUE)
TSNEPlot(object=sc)
TSNEPlot(object=sc, group.by='src')
|
/analysis2.R
|
permissive
|
alex-wenzel/cptres-r
|
R
| false | false | 1,892 |
r
|
### This script inspired by http://satijalab.org/seurat/pbmc3k_tutorial.html
library(Seurat)
library(dplyr)
library(stringr)
scdata <- Read10X(data.dir="subset_5000/")
sc <- CreateSeuratObject(raw.data=scdata, min.cells=3, min.genes=3, project="10X_CPTRES")
mito.genes <- grep(pattern="^MT-", x=rownames(x=sc@data), value=TRUE)
percent.mito <- Matrix::colSums(sc@raw.data[mito.genes, ])/Matrix::colSums(sc@raw.data)
get.pop.name <- function(bc) {
strsplit(bc, "-")[[1]][2]
}
barcode.src <- data.frame(sc@raw.data@Dimnames[[2]])
names(barcode.src) <- c("src")
barcode.src$src <- as.numeric(lapply(as.character(barcode.src$src), get.pop.name))
rownames(barcode.src) <- sc@raw.data@Dimnames[[2]]
sc <- AddMetaData(object=sc, metadata=percent.mito, col.name="percent.mito")
sc <- AddMetaData(object=sc, metadata=barcode.src, col.name="barcode.src")
VlnPlot(object=sc, features.plot=c("nGene", "nUMI", "percent.mito"), nCol=3, point.size.use=0.5)
par(mfrow=c(1,2))
GenePlot(object=sc, gene1="nUMI", gene2="percent.mito")
GenePlot(object=sc, gene1="nUMI", gene2="nGene")
sc <- NormalizeData(object=sc, normalization.method="LogNormalize", scale.factor=10000)
par(mfrow=c(1,1))
sc <- FindVariableGenes(object=sc, mean.function=ExpMean, dispersion.function=LogVMR,
x.low.cutoff=0.0125, x.high.cutoff=3, y.cutoff=0.5)
## This needs a lot of memory
sc <- ScaleData(object=sc, vars.to.regress=c("nUMI", "percent.mito"))
## linear dimensional reduction
sc <- RunPCA(object=sc, pc.genes=sc@var.genes)
PCAPlot(object=sc, dim.1=1, dim.2=2)
PCHeatmap(object=sc, pc.use=1, do.balanced=TRUE, label.columns=FALSE)
## Clustering
sc <- FindClusters(object=sc, reduction.type="pca", dims.use=1:10,
resolution=0.6, print.output=0, save.SNN=TRUE)
sc <- RunTSNE(object=sc, dims.use=1:10, do.fast=TRUE)
TSNEPlot(object=sc)
TSNEPlot(object=sc, group.by='src')
|
#' @include internal.R
NULL
#' Conservation problem constraints
#'
#' TODO
#'
#' \describe{
#'
#' \item{\code{\link{add_connected_constraints}}}{
#' Add constraints to a conservation problem to ensure that all selected
#' planning units are spatially connected to each other.
#' }
#'
#' \item{\code{\link{add_corridor_constraints}}}{
#' It is important to maintain connectivity between reserves. However,
#' some areas are more difficult for species to traverse then other areas.
#' As a consequence, even though reserves may be connected, species may
#' not be able to move between reserves if the areas connecting them
#' are barriers to dispersal.
#'
#' This function adds constraints to ensure that corridors connect
#' reserves and that individuals from all species can utilise the
#' corridors. Friction \code{\link[raster]{Raster-class}} objects area
#' used to show each dificult areas are to traverse.
#' }
#'
#' \item{\code{\link{add_locked_in_constraints}}}{
#' Add constraints to ensure that they are prioritized in the solution.
#' For example, it may be desirable to lock in planning units that are
#' inside existing protected areas so that the solution fills in the gaps in the
#' existing reserve network.
#' }
#'
#' \item{\code{\link{add_locked_out_constraints}}}{
#' Add constraints to ensure that certain planning units are not prioritized
#' in the solution. For example, it may be useful to lock out planning
#' units that have been degraded and are not longer suitable for conserving
#' species.
#' }
#'
#' \item{\code{\link{add_neighbor_constraints}}}{
#' Add constraints to a conservation problem to ensure that all selected
#' planning units have at least a certain number of neighbors.
#' }
#'
#' }
#'
#' @name constraints
NULL
|
/R/constraints.R
|
no_license
|
prioritizr/prioritizrutils
|
R
| false | false | 1,768 |
r
|
#' @include internal.R
NULL
#' Conservation problem constraints
#'
#' TODO
#'
#' \describe{
#'
#' \item{\code{\link{add_connected_constraints}}}{
#' Add constraints to a conservation problem to ensure that all selected
#' planning units are spatially connected to each other.
#' }
#'
#' \item{\code{\link{add_corridor_constraints}}}{
#' It is important to maintain connectivity between reserves. However,
#' some areas are more difficult for species to traverse then other areas.
#' As a consequence, even though reserves may be connected, species may
#' not be able to move between reserves if the areas connecting them
#' are barriers to dispersal.
#'
#' This function adds constraints to ensure that corridors connect
#' reserves and that individuals from all species can utilise the
#' corridors. Friction \code{\link[raster]{Raster-class}} objects area
#' used to show each dificult areas are to traverse.
#' }
#'
#' \item{\code{\link{add_locked_in_constraints}}}{
#' Add constraints to ensure that they are prioritized in the solution.
#' For example, it may be desirable to lock in planning units that are
#' inside existing protected areas so that the solution fills in the gaps in the
#' existing reserve network.
#' }
#'
#' \item{\code{\link{add_locked_out_constraints}}}{
#' Add constraints to ensure that certain planning units are not prioritized
#' in the solution. For example, it may be useful to lock out planning
#' units that have been degraded and are not longer suitable for conserving
#' species.
#' }
#'
#' \item{\code{\link{add_neighbor_constraints}}}{
#' Add constraints to a conservation problem to ensure that all selected
#' planning units have at least a certain number of neighbors.
#' }
#'
#' }
#'
#' @name constraints
NULL
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fars_functions.R
\name{fars_read}
\alias{fars_read}
\title{Read in FARS data}
\usage{
fars_read(filename)
}
\arguments{
\item{filename}{A character string giving the name of the file from
to import the data from}
}
\value{
This function returns a data frame containing the fars data for a
given year.
}
\description{
A helper (not exported) function used by \code{fars_read_years} and \code{fars_map_state}
functions. It takes a file name and reads in the data storing it in a data frame.
If the file does not exist the function throws an error.
}
\examples{
\dontrun{
fars_read("accident_2014.csv.bz2")
}
}
|
/man/fars_read.Rd
|
no_license
|
vbeakovic/fars
|
R
| false | true | 690 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fars_functions.R
\name{fars_read}
\alias{fars_read}
\title{Read in FARS data}
\usage{
fars_read(filename)
}
\arguments{
\item{filename}{A character string giving the name of the file from
to import the data from}
}
\value{
This function returns a data frame containing the fars data for a
given year.
}
\description{
A helper (not exported) function used by \code{fars_read_years} and \code{fars_map_state}
functions. It takes a file name and reads in the data storing it in a data frame.
If the file does not exist the function throws an error.
}
\examples{
\dontrun{
fars_read("accident_2014.csv.bz2")
}
}
|
library(datasets)
library(ggplot2)
data("ToothGrowth")
str(ToothGrowth)
table(ToothGrowth$dose, ToothGrowth$supp)
ggplot(data = ToothGrowth, aes(x = as.factor(dose), y = len, fill = supp)) +
geom_bar(stat = "identity") +
facet_grid(. ~ supp) +
xlab("Dose in miligrames") +
ylab("Tooth length") +
guides(fill = guide_legend(title = "Suppliment type"))
r <- ToothGrowth %>% mutate(id = rep(1:10, times = 6))
r <- dcast(r, id ~ dose + supp, value.var = "len")
rbind(t.test(r$`0.5_OJ`, r$`1_OJ`)$conf.int,
t.test(r$`1_OJ`, r$`2_OJ`)$conf.int,
t.test(r$`0.5_OJ`, r$`2_OJ`)$conf.int
)
rbind(t.test(r$`0.5_VC`, r$`1_VC`)$conf.int,
t.test(r$`1_VC`, r$`2_VC`)$conf.int,
t.test(r$`0.5_VC`, r$`2_VC`)$conf.int
)
rbind(t.test(r$`0.5_OJ`, r$`0.5_VC`)$conf.int,
t.test(r$`1_OJ`, r$`1_VC`)$conf.int,
t.test(r$`2_OJ`, r$`2_VC`)$conf.int
)
|
/toothgrowth.R
|
no_license
|
isuruceanu/statistical-inference
|
R
| false | false | 890 |
r
|
library(datasets)
library(ggplot2)
data("ToothGrowth")
str(ToothGrowth)
table(ToothGrowth$dose, ToothGrowth$supp)
ggplot(data = ToothGrowth, aes(x = as.factor(dose), y = len, fill = supp)) +
geom_bar(stat = "identity") +
facet_grid(. ~ supp) +
xlab("Dose in miligrames") +
ylab("Tooth length") +
guides(fill = guide_legend(title = "Suppliment type"))
r <- ToothGrowth %>% mutate(id = rep(1:10, times = 6))
r <- dcast(r, id ~ dose + supp, value.var = "len")
rbind(t.test(r$`0.5_OJ`, r$`1_OJ`)$conf.int,
t.test(r$`1_OJ`, r$`2_OJ`)$conf.int,
t.test(r$`0.5_OJ`, r$`2_OJ`)$conf.int
)
rbind(t.test(r$`0.5_VC`, r$`1_VC`)$conf.int,
t.test(r$`1_VC`, r$`2_VC`)$conf.int,
t.test(r$`0.5_VC`, r$`2_VC`)$conf.int
)
rbind(t.test(r$`0.5_OJ`, r$`0.5_VC`)$conf.int,
t.test(r$`1_OJ`, r$`1_VC`)$conf.int,
t.test(r$`2_OJ`, r$`2_VC`)$conf.int
)
|
data <-
data.frame(
date = as.Date(c("2017-04-14", "2017-05-11", "2017-06-08", "2017-07-21", "2017-08-10", "2017-09-14",
"2017-10-12", "2017-11-09", "2017-12-07", "2018-01-11", "2018-02-08", "2018-03-08")),
height =c(64, 65.1, 66.9, 68.1, 70.3, 71.3, 73.5, 73.8, 74.5, 75.8, 76, 77),
weight = c(6.74, 7.42, 7.96, 8.14, 8.38, 8.6, 8.96, 9.3, 9.7, 10.1, 10.6, 10.8),
chest = c(41.8, rep(NA, 5), 44.6, rep(NA, 5)),
head = c(41, rep(NA, 5), 45.3, rep(NA, 5))
)
|
/child1-height-weight.R
|
no_license
|
iypod/sampleData
|
R
| false | false | 508 |
r
|
data <-
data.frame(
date = as.Date(c("2017-04-14", "2017-05-11", "2017-06-08", "2017-07-21", "2017-08-10", "2017-09-14",
"2017-10-12", "2017-11-09", "2017-12-07", "2018-01-11", "2018-02-08", "2018-03-08")),
height =c(64, 65.1, 66.9, 68.1, 70.3, 71.3, 73.5, 73.8, 74.5, 75.8, 76, 77),
weight = c(6.74, 7.42, 7.96, 8.14, 8.38, 8.6, 8.96, 9.3, 9.7, 10.1, 10.6, 10.8),
chest = c(41.8, rep(NA, 5), 44.6, rep(NA, 5)),
head = c(41, rep(NA, 5), 45.3, rep(NA, 5))
)
|
##############################################################################
### Factor analysis code
### Written by : Ashish Goel
### Last update: 4 August 2017
### Last Update By: Ashish Goel
##############################################################################
#################### loading the libraries ###################################
require(car)
require(reshape2)
require(lubridate)
require(fmsb)
require(MASS)
library(forecast)
library(fpp)
library(svDialogs)
library(psych)
library(GPArotation)
##############################################################################
################## setting the library ######################################
setwd("C:\\Users\\C987706\\Desktop\\china")
getwd()
##############################################################################
################## INSTRUCTIONS FOR RUNNING THE CODE #######################
#### raw_data must have dependent variable, it is needed for multicollinearity check
#### raw_data must have only a date variable
#### no year, quarter or any other time variable should be there
################## INSTRUCTIONS END HERE ##################################
################## USER INPUTS REQUIRED #####################################
input_date_format<-"%m/%d/%Y" #### use "/" or "-" as per the need as separator
### "%d-%b-%y" eg: "01-Jan-12" (1 juanuary 2012)
### "%d-%B-%y" eg: "01-January-12" (1 juanuary 2012)
### "%d-%m-%y" eg: "01-01-12" (1 juanuary 2012) and so on
date_var<-"Month" #### name of the date variable in the raw data
dep_var<-"Beer.production" #### name of the dependent varaible
gobal_start<-2009 #### year from which data needs to be considered/start
global_end<-2016 #### year upto whcih data is available or to be considered
################ import the data for Factor analysis ########################
raw_data<-read.csv("raw_data for conducting FA.csv")
raw_data[,date_var]<-as.Date(as.character(raw_data[,date_var]),input_date_format)
##############################################################################
######## separating numeric or integer variables for factor analysis #########
classes<-ifelse(sapply(raw_data,class) %in% c("numeric","integer") ,1,0)
data_factor<-raw_data[classes==0]
data<-cbind(raw_data[,date_var],raw_data[classes==1])
colnames(data)[1]<-date_var
############################## SELECT VARIABLES #############################
#### NOTE: select dependent variable also
var_list <- colnames(data)
vars <- dlgList(var_list,multiple=T,title="Select DEP & INDEPENDENT variables")$res
write.csv(vars,"variables considered for Factor Analysis.csv")
#############################################################################
################ crreating data for factor analysis #########################
fa.data<-data[vars]
###### removing multicollinearity amongst the selected variables ############
Model_Summary_2 <- data.frame(Iteration = integer(),
Variables = character(),
Estimate = double(),
Std.Error = double(),
tValue = double(),
PValue = double(),
RSqaure = double(),
Adj.RSqaure = double())
lm_formula<-as.formula(paste0(dep_var,"~."))
M1 <- lm(lm_formula,data= fa.data)
summary_2 <- data.frame(summary(M1)$coefficients)
summary_2$variables <- row.names(summary_2)
summary_2$RSqaure <- summary(M1)$r.squared
summary_2$AdjRSqaure <- summary(M1)$adj.r.squared
row.names(summary_2) <- NULL
q<- c(summary_2$variables)
q<- q[-1]
final_data <- fa.data[q]
######################### scaling the data for factor analysis ###############
data_scale <-as.data.frame(scale(final_data))
######################### finding number of factors ########################
parallel <- fa.parallel(data_scale, fm = 'minres', fa = 'fa')
num_f<-dlgInput("Enter number of factors based on parallel analysis", Sys.info()["user"])$res
num_f<-as.numeric(num_f)
rotation<-dlgInput("Enter roattion type for FA: 'oblimin' or 'Varimax'", Sys.info()["user"])$res
cut_off<-dlgInput("Enter cutoff for factor loadings", Sys.info()["user"])$res
cut_off<-as.numeric(cut_off)
fffactor <- fa(data_scale,nfactors =num_f,rotate = rotation,fm="minres")
print(fffactor)
print(fffactor$loadings,cutoff = cut_off)
######################## PART -2 #############################################
#### ONLY TO BE RUN AFTER FIXING PART-1
################################################################################
################################################################################
#### the part below is to be used when variables are selected based on
#### different iteration of cutoff, rotation criteria or number of factors
#### THIS PART WILL SAVE FACTOR LOADINGS OF SELECTED VARIABLES AND
#### WILL CREATE FACTORS BASED ON SELECTED VARIABLES AND LOADING CUTOFF
################################################################################
loadings_fa<-as.data.frame(fffactor$loadings[,1:ncol(fffactor$loadings)])
loadings_fa$variables<-rownames(loadings_fa)
rownames(loadings_fa)<-NULL
write.csv(loadings_fa,file=paste0("Factor loadings without cutoff"
," & ",rotation,"-rotation type",".csv"))
factors_created<-loadings_fa
for (i in 1:(ncol(factors_created)-1)){
factors_created[,i]<-ifelse(abs(factors_created[,i])<=cut_off,NA,factors_created[,i])
}
write.csv(factors_created,file=paste0("Factor loadings with loading cutoff-",
as.character(cut_off)," & ",rotation,"-rotation type",".csv"))
factors_created<-loadings_fa
for (i in 1:(ncol(factors_created)-1)){
factors_created[,i]<-ifelse(abs(factors_created[,i])<=cut_off,0,factors_created[,i])
}
v_series<-as.vector(factors_created$variables)
m<-data_scale[v_series]
colnames(m)
dim(m)
num<-ncol(m)
factors_created<-factors_created[,1:(ncol(factors_created)-1)]
var<-sapply(factors_created,var)
factors_created<-factors_created[var>0]
Factors<-data.frame(Factor_1=as.vector((as.matrix(m))%*%(as.matrix(factors_created[,1]))))
if(ncol(factors_created)>1){
for(i in 2:ncol(factors_created)){
Factors<-cbind(Factors,as.vector((as.matrix(m))%*%(as.matrix(factors_created[,i]))))
}
}
colnames(Factors)<-c(paste0("Factor_",seq(1:ncol(Factors))))
write.csv(Factors,"factors created based on variables selected and loadings after applying cutoff.csv")
|
/Forecasting Technique Master/Variable Selection/Factor Analysis/Codes/GFF factor analysis Part 1.R
|
no_license
|
Sayan-Pal585/Time-Series-Analysis
|
R
| false | false | 6,534 |
r
|
##############################################################################
### Factor analysis code
### Written by : Ashish Goel
### Last update: 4 August 2017
### Last Update By: Ashish Goel
##############################################################################
#################### loading the libraries ###################################
require(car)
require(reshape2)
require(lubridate)
require(fmsb)
require(MASS)
library(forecast)
library(fpp)
library(svDialogs)
library(psych)
library(GPArotation)
##############################################################################
################## setting the library ######################################
setwd("C:\\Users\\C987706\\Desktop\\china")
getwd()
##############################################################################
################## INSTRUCTIONS FOR RUNNING THE CODE #######################
#### raw_data must have dependent variable, it is needed for multicollinearity check
#### raw_data must have only a date variable
#### no year, quarter or any other time variable should be there
################## INSTRUCTIONS END HERE ##################################
################## USER INPUTS REQUIRED #####################################
input_date_format<-"%m/%d/%Y" #### use "/" or "-" as per the need as separator
### "%d-%b-%y" eg: "01-Jan-12" (1 juanuary 2012)
### "%d-%B-%y" eg: "01-January-12" (1 juanuary 2012)
### "%d-%m-%y" eg: "01-01-12" (1 juanuary 2012) and so on
date_var<-"Month" #### name of the date variable in the raw data
dep_var<-"Beer.production" #### name of the dependent varaible
gobal_start<-2009 #### year from which data needs to be considered/start
global_end<-2016 #### year upto whcih data is available or to be considered
################ import the data for Factor analysis ########################
raw_data<-read.csv("raw_data for conducting FA.csv")
raw_data[,date_var]<-as.Date(as.character(raw_data[,date_var]),input_date_format)
##############################################################################
######## separating numeric or integer variables for factor analysis #########
classes<-ifelse(sapply(raw_data,class) %in% c("numeric","integer") ,1,0)
data_factor<-raw_data[classes==0]
data<-cbind(raw_data[,date_var],raw_data[classes==1])
colnames(data)[1]<-date_var
############################## SELECT VARIABLES #############################
#### NOTE: select dependent variable also
var_list <- colnames(data)
vars <- dlgList(var_list,multiple=T,title="Select DEP & INDEPENDENT variables")$res
write.csv(vars,"variables considered for Factor Analysis.csv")
#############################################################################
################ crreating data for factor analysis #########################
fa.data<-data[vars]
###### removing multicollinearity amongst the selected variables ############
Model_Summary_2 <- data.frame(Iteration = integer(),
Variables = character(),
Estimate = double(),
Std.Error = double(),
tValue = double(),
PValue = double(),
RSqaure = double(),
Adj.RSqaure = double())
lm_formula<-as.formula(paste0(dep_var,"~."))
M1 <- lm(lm_formula,data= fa.data)
summary_2 <- data.frame(summary(M1)$coefficients)
summary_2$variables <- row.names(summary_2)
summary_2$RSqaure <- summary(M1)$r.squared
summary_2$AdjRSqaure <- summary(M1)$adj.r.squared
row.names(summary_2) <- NULL
q<- c(summary_2$variables)
q<- q[-1]
final_data <- fa.data[q]
######################### scaling the data for factor analysis ###############
data_scale <-as.data.frame(scale(final_data))
######################### finding number of factors ########################
parallel <- fa.parallel(data_scale, fm = 'minres', fa = 'fa')
num_f<-dlgInput("Enter number of factors based on parallel analysis", Sys.info()["user"])$res
num_f<-as.numeric(num_f)
rotation<-dlgInput("Enter roattion type for FA: 'oblimin' or 'Varimax'", Sys.info()["user"])$res
cut_off<-dlgInput("Enter cutoff for factor loadings", Sys.info()["user"])$res
cut_off<-as.numeric(cut_off)
fffactor <- fa(data_scale,nfactors =num_f,rotate = rotation,fm="minres")
print(fffactor)
print(fffactor$loadings,cutoff = cut_off)
######################## PART -2 #############################################
#### ONLY TO BE RUN AFTER FIXING PART-1
################################################################################
################################################################################
#### the part below is to be used when variables are selected based on
#### different iteration of cutoff, rotation criteria or number of factors
#### THIS PART WILL SAVE FACTOR LOADINGS OF SELECTED VARIABLES AND
#### WILL CREATE FACTORS BASED ON SELECTED VARIABLES AND LOADING CUTOFF
################################################################################
loadings_fa<-as.data.frame(fffactor$loadings[,1:ncol(fffactor$loadings)])
loadings_fa$variables<-rownames(loadings_fa)
rownames(loadings_fa)<-NULL
write.csv(loadings_fa,file=paste0("Factor loadings without cutoff"
," & ",rotation,"-rotation type",".csv"))
factors_created<-loadings_fa
for (i in 1:(ncol(factors_created)-1)){
factors_created[,i]<-ifelse(abs(factors_created[,i])<=cut_off,NA,factors_created[,i])
}
write.csv(factors_created,file=paste0("Factor loadings with loading cutoff-",
as.character(cut_off)," & ",rotation,"-rotation type",".csv"))
factors_created<-loadings_fa
for (i in 1:(ncol(factors_created)-1)){
factors_created[,i]<-ifelse(abs(factors_created[,i])<=cut_off,0,factors_created[,i])
}
v_series<-as.vector(factors_created$variables)
m<-data_scale[v_series]
colnames(m)
dim(m)
num<-ncol(m)
factors_created<-factors_created[,1:(ncol(factors_created)-1)]
var<-sapply(factors_created,var)
factors_created<-factors_created[var>0]
Factors<-data.frame(Factor_1=as.vector((as.matrix(m))%*%(as.matrix(factors_created[,1]))))
if(ncol(factors_created)>1){
for(i in 2:ncol(factors_created)){
Factors<-cbind(Factors,as.vector((as.matrix(m))%*%(as.matrix(factors_created[,i]))))
}
}
colnames(Factors)<-c(paste0("Factor_",seq(1:ncol(Factors))))
write.csv(Factors,"factors created based on variables selected and loadings after applying cutoff.csv")
|
\name{combine}
\alias{combine}
\title{combine}
\description{combine lists or character strings}
\usage{combine(x, y)}
\arguments{
\item{x}{x}
\item{y}{y}
}
\author{Toby Dylan Hocking <toby.hocking@r-project.org> [aut, cre], Keith Ponting [aut], Thomas Wutzler [aut], Philippe Grosjean [aut], Markus Müller [aut], R Core Team [ctb, cph]}
|
/man/combine.Rd
|
no_license
|
tdhock/inlinedocs
|
R
| false | false | 350 |
rd
|
\name{combine}
\alias{combine}
\title{combine}
\description{combine lists or character strings}
\usage{combine(x, y)}
\arguments{
\item{x}{x}
\item{y}{y}
}
\author{Toby Dylan Hocking <toby.hocking@r-project.org> [aut, cre], Keith Ponting [aut], Thomas Wutzler [aut], Philippe Grosjean [aut], Markus Müller [aut], R Core Team [ctb, cph]}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data_preprocess.R
\name{x2y_df2list}
\alias{x2y_df2list}
\title{convert x-y associations}
\usage{
x2y_df2list(x2ydf, xcol = 1, ycol = 2)
}
\arguments{
\item{x2ydf}{data.frame of x-y associations}
\item{xcol}{col of x in x2ydf}
\item{ycol}{col of y in x2ydf}
}
\value{
a list of x-y associations
}
\description{
concert x-y associations (e.g. disease-gene associations) from data.frame
to list
}
\examples{
options(stringsAsFactors = FALSE)
d2g_fundo_sample<-read.table(text = "DOID:5218 IL6
DOID:8649 EGFR
DOID:8649 PTGS2
DOID:8649 VHL
DOID:8649 ERBB2
DOID:8649 PDCD1
DOID:8649 KLRC1
DOID:5214 MPZ
DOID:5214 EGR2
DOID:5210 AMH")
d2g_fundo_list<-x2y_df2list(d2g_fundo_sample)
}
\author{
Peng Ni, Min Li
}
|
/man/x2y_df2list.Rd
|
no_license
|
PengNi/dSimer
|
R
| false | true | 792 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data_preprocess.R
\name{x2y_df2list}
\alias{x2y_df2list}
\title{convert x-y associations}
\usage{
x2y_df2list(x2ydf, xcol = 1, ycol = 2)
}
\arguments{
\item{x2ydf}{data.frame of x-y associations}
\item{xcol}{col of x in x2ydf}
\item{ycol}{col of y in x2ydf}
}
\value{
a list of x-y associations
}
\description{
concert x-y associations (e.g. disease-gene associations) from data.frame
to list
}
\examples{
options(stringsAsFactors = FALSE)
d2g_fundo_sample<-read.table(text = "DOID:5218 IL6
DOID:8649 EGFR
DOID:8649 PTGS2
DOID:8649 VHL
DOID:8649 ERBB2
DOID:8649 PDCD1
DOID:8649 KLRC1
DOID:5214 MPZ
DOID:5214 EGR2
DOID:5210 AMH")
d2g_fundo_list<-x2y_df2list(d2g_fundo_sample)
}
\author{
Peng Ni, Min Li
}
|
\name{mllMRH2}
\alias{mllMRH2}
\title{
Minus loglikelihood of an (bivariate) MRHawkes model with Rosenblatt
residuals
}
\description{
Calculates the minus loglikelihood of an (bivariate) RHawkes model with
given immigration hazard functions \eqn{\mu}, common offspring density
functions \eqn{h} and bracnhing ratios \eqn{\eta} for event times and
event types \code{data} on interval \eqn{[0,cens]}. The same as
\code{mllMRH} although this version also returns the Rosenblatt residuals
for goodness-of-fit assessment of the event times.
}
\usage{
mllMRH2(data, cens, par,
h1.fn = function(x, p) 1 / p * exp( - x / p),
h2.fn = function(x, p) 1 / p * exp( - x / p),
mu1.fn = function(x, p){
exp(dweibull(x, shape = p[1], scale = p[2], log = TRUE) -
pweibull(x, shape = p[1], scale = p[2], lower.tail = FALSE,
log.p = TRUE))
},
mu2.fn = function(x, p){
exp(dweibull(x, shape = p[1], scale = p[2], log = TRUE) -
pweibull(x, shape = p[1], scale = p[2], lower.tail = FALSE,
log.p = TRUE))
},
H1.fn = function(x, p) pexp(x, rate = 1 / p),
H2.fn = function(x, p) pexp(x, rate = 1 / p),
Mu1.fn = function(x, p){
- pweibull(x, shape = p[1], scale = p[2], lower.tail = FALSE,
log.p = TRUE)
},
Mu2.fn = function(x, p){
- pweibull(x, shape = p[1], scale = p[2], lower.tail = FALSE,
log.p = TRUE)
})
}
\arguments{
\item{data}{
A two column matrix. The first column contains the event times sorted in
ascending order. The second column contains the corresponding event type
with the label one or two.
}
\item{cens}{
A scalar. The censoring time.
}
\item{par}{
A numeric vector. Contains the ten parameters of the model, in order of
the immigration parameters \eqn{\mu(.)} for the two renewal distributions,
the two offspring parameters \eqn{h(.)} and lastly the four branching
ratios \eqn{\eta}.
}
\item{h1.fn}{
A (vectorized) function. The offspring density function for type one
events.
}
\item{h2.fn}{
A (vectorized) function. The offspring density function for type two
events.
}
\item{mu1.fn}{
A (vectorized) function. The immigration hazard function for events of type
one.
}
\item{mu2.fn}{
A (vectorized) function. The immigration hazard function for events of type
two.
}
\item{H1.fn}{
A (vectorized) function. Its value at \code{t} gives the integral of
the offspring density function from 0 to \code{t} for type one events.
}
\item{H2.fn}{
A (vectorized) function. Its value at \code{t} gives the integral of
the offspring density function from 0 to \code{t} for type two events.
}
\item{Mu1.fn}{
A (vectorized) function. Its value at \code{t} gives the integral of
the immigrant hazard function from 0 to \code{t} for type one events.
}
\item{Mu2.fn}{
A (vectorized) function. Its value at \code{t} gives the integral of
the immigrant hazard function from 0 to \code{t} for type two events.
}
}
\details{
Calculate the MRHawkes point process Rosenblatt residuals
}
\value{
\item{mll}{minus log-likelihood}
\item{W}{Rosenblatt residuals of observed event times}
}
\author{
Tom Stindl <t.stindl@unsw.edu.au>
Feng Chen <feng.chen@unsw.edu.au>
}
\seealso{
\code{mllMRH}}
\examples{
\donttest{
n <- 1000
data <- cbind(sort(runif(n,0,1000)),
sample(1:2, size = n, replace = TRUE))
tmp <- mllMRH2(data = data, cens = 1001,
par = c(1,1,1,1,1,1,0.5,0.2,0.2,0.3))
pp <- ppoints(n)
par(mfrow=c(1,2))
plot(quantile(tmp$W,prob=pp),pp,type="l",
main="Uniform QQ plot",
xlab="Sample quantiles",ylab="Theoretical quantiles")
abline(a = 0, b = 1, col = 2)
a <- acf(tmp$W, main = "ACF Plot")
ks.test(tmp$W,"punif")
Box.test(tmp$W,lag=tail(a$lag,1))
}
}
\keyword{ residual }
\keyword{ point process }
|
/man/mllMRH2.Rd
|
no_license
|
cran/MRHawkes
|
R
| false | false | 4,111 |
rd
|
\name{mllMRH2}
\alias{mllMRH2}
\title{
Minus loglikelihood of an (bivariate) MRHawkes model with Rosenblatt
residuals
}
\description{
Calculates the minus loglikelihood of an (bivariate) RHawkes model with
given immigration hazard functions \eqn{\mu}, common offspring density
functions \eqn{h} and bracnhing ratios \eqn{\eta} for event times and
event types \code{data} on interval \eqn{[0,cens]}. The same as
\code{mllMRH} although this version also returns the Rosenblatt residuals
for goodness-of-fit assessment of the event times.
}
\usage{
mllMRH2(data, cens, par,
h1.fn = function(x, p) 1 / p * exp( - x / p),
h2.fn = function(x, p) 1 / p * exp( - x / p),
mu1.fn = function(x, p){
exp(dweibull(x, shape = p[1], scale = p[2], log = TRUE) -
pweibull(x, shape = p[1], scale = p[2], lower.tail = FALSE,
log.p = TRUE))
},
mu2.fn = function(x, p){
exp(dweibull(x, shape = p[1], scale = p[2], log = TRUE) -
pweibull(x, shape = p[1], scale = p[2], lower.tail = FALSE,
log.p = TRUE))
},
H1.fn = function(x, p) pexp(x, rate = 1 / p),
H2.fn = function(x, p) pexp(x, rate = 1 / p),
Mu1.fn = function(x, p){
- pweibull(x, shape = p[1], scale = p[2], lower.tail = FALSE,
log.p = TRUE)
},
Mu2.fn = function(x, p){
- pweibull(x, shape = p[1], scale = p[2], lower.tail = FALSE,
log.p = TRUE)
})
}
\arguments{
\item{data}{
A two column matrix. The first column contains the event times sorted in
ascending order. The second column contains the corresponding event type
with the label one or two.
}
\item{cens}{
A scalar. The censoring time.
}
\item{par}{
A numeric vector. Contains the ten parameters of the model, in order of
the immigration parameters \eqn{\mu(.)} for the two renewal distributions,
the two offspring parameters \eqn{h(.)} and lastly the four branching
ratios \eqn{\eta}.
}
\item{h1.fn}{
A (vectorized) function. The offspring density function for type one
events.
}
\item{h2.fn}{
A (vectorized) function. The offspring density function for type two
events.
}
\item{mu1.fn}{
A (vectorized) function. The immigration hazard function for events of type
one.
}
\item{mu2.fn}{
A (vectorized) function. The immigration hazard function for events of type
two.
}
\item{H1.fn}{
A (vectorized) function. Its value at \code{t} gives the integral of
the offspring density function from 0 to \code{t} for type one events.
}
\item{H2.fn}{
A (vectorized) function. Its value at \code{t} gives the integral of
the offspring density function from 0 to \code{t} for type two events.
}
\item{Mu1.fn}{
A (vectorized) function. Its value at \code{t} gives the integral of
the immigrant hazard function from 0 to \code{t} for type one events.
}
\item{Mu2.fn}{
A (vectorized) function. Its value at \code{t} gives the integral of
the immigrant hazard function from 0 to \code{t} for type two events.
}
}
\details{
Calculate the MRHawkes point process Rosenblatt residuals
}
\value{
\item{mll}{minus log-likelihood}
\item{W}{Rosenblatt residuals of observed event times}
}
\author{
Tom Stindl <t.stindl@unsw.edu.au>
Feng Chen <feng.chen@unsw.edu.au>
}
\seealso{
\code{mllMRH}}
\examples{
\donttest{
n <- 1000
data <- cbind(sort(runif(n,0,1000)),
sample(1:2, size = n, replace = TRUE))
tmp <- mllMRH2(data = data, cens = 1001,
par = c(1,1,1,1,1,1,0.5,0.2,0.2,0.3))
pp <- ppoints(n)
par(mfrow=c(1,2))
plot(quantile(tmp$W,prob=pp),pp,type="l",
main="Uniform QQ plot",
xlab="Sample quantiles",ylab="Theoretical quantiles")
abline(a = 0, b = 1, col = 2)
a <- acf(tmp$W, main = "ACF Plot")
ks.test(tmp$W,"punif")
Box.test(tmp$W,lag=tail(a$lag,1))
}
}
\keyword{ residual }
\keyword{ point process }
|
/Analyse_graphique_CDmean.R
|
no_license
|
MARIEMNSIBI/hello-world
|
R
| false | false | 7,002 |
r
| ||
source("./roc_curve.R")
library(ggplot2)
base_dir <- "~/single/6.3_sims_exp"
de_dir <- file.path(base_dir, "de")
t2g <- read.table("~/transcriptomes/Homo_sapiens.GRCh38.rel79.transcripts", stringsAsFactors = FALSE,
sep = "\t")
colnames(t2g) <- c("transcripts", "genes")
deseq_1 <- readRDS(file.path(de_dir, "deseq2_1.rds"))
deseq_2 <- readRDS(file.path(de_dir, "deseq2_2.rds"))
deseq_3 <- readRDS(file.path(de_dir, "deseq2_3.rds"))
LR_tx <- readRDS(file.path(de_dir, "LR_deseq.rds"))
deseq_1$genes <- rownames(deseq_1)
deseq_2$genes <- rownames(deseq_2)
deseq_3$genes <- rownames(deseq_3)
deseq_1$deseq_1 <- deseq_1$p_val
deseq_2$deseq_2 <- deseq_2$p_val
deseq_3$deseq_3 <- deseq_3$p_val
LR_tx$LR_tx <- LR_tx$pval
table <- merge(deseq_1, deseq_2, by = "genes", all = TRUE)
table <- merge(deseq_3, table, by = "genes", all = TRUE)
table <- merge(LR_tx, table, by = "genes", all = TRUE)
table <- select(table, genes, deseq_1, deseq_2, deseq_3, LR_tx)
perturb <- read.table(file.path(base_dir, "perturb.tsv"), header = TRUE)
t2g$perturbed <- t2g$transcripts %in% perturb$perturbed_transcripts
perturb <- t2g %>% group_by(genes) %>% summarise(perturb = any(perturbed))
table <- merge(perturb, table, by = "genes", all = TRUE)
saveRDS(table, file.path(de_dir, "tximp_comparison.rds"))
deseq_1 <- calculate_fdr(table$perturb, table$deseq_1, "DESeq2 Counts")
deseq_2 <- calculate_fdr(table$perturb, table$deseq_2, "DESeq2 Scaled TPM")
deseq_3 <- calculate_fdr(table$perturb, table$deseq_3, "DESeq2 Length Scaled TPM")
LR_tx <- calculate_fdr(table$perturb, table$LR_tx, "log reg - transcripts")
fdrs <- list(deseq_1, deseq_2, deseq_3, LR_tx)
names <- c("DESeq2 - tximport Counts", "DESeq2 - tximport Scaled TPM", "DESeq2 - tximport Length Scaled TPM",
"log reg - transcripts")
fdrs <- lapply(1:length(fdrs), function(x) data.frame(sensitivity = fdrs[[x]]$sen,
FDR = fdrs[[x]]$fdr, Method = names[[x]]))
fdrs <- do.call(rbind, fdrs)
p <- ggplot(data = fdrs, aes(x = FDR, y = sensitivity, colour = Method)) + geom_path() +
scale_colour_manual(values = c("#F8766D", "#CC0066", "#993366", "blue"))
tiff(file.path(base_dir, "tximp_fdrs.png"), height = 3000, width = 3500, res = 600)
p <- p + theme_grey(10)
print(p)
dev.off()
p <- p + coord_cartesian(xlim = c(0, 0.25), ylim = c(0, 0.75), expand = TRUE)
png(file.path(base_dir, "tximp_fdrs2.png"))
print(p)
dev.off()
|
/NYMP_2018/simulations/RSEM/R/tximp_roc.R
|
permissive
|
ebecht/logistic_regresion_for_GDE
|
R
| false | false | 2,386 |
r
|
source("./roc_curve.R")
library(ggplot2)
base_dir <- "~/single/6.3_sims_exp"
de_dir <- file.path(base_dir, "de")
t2g <- read.table("~/transcriptomes/Homo_sapiens.GRCh38.rel79.transcripts", stringsAsFactors = FALSE,
sep = "\t")
colnames(t2g) <- c("transcripts", "genes")
deseq_1 <- readRDS(file.path(de_dir, "deseq2_1.rds"))
deseq_2 <- readRDS(file.path(de_dir, "deseq2_2.rds"))
deseq_3 <- readRDS(file.path(de_dir, "deseq2_3.rds"))
LR_tx <- readRDS(file.path(de_dir, "LR_deseq.rds"))
deseq_1$genes <- rownames(deseq_1)
deseq_2$genes <- rownames(deseq_2)
deseq_3$genes <- rownames(deseq_3)
deseq_1$deseq_1 <- deseq_1$p_val
deseq_2$deseq_2 <- deseq_2$p_val
deseq_3$deseq_3 <- deseq_3$p_val
LR_tx$LR_tx <- LR_tx$pval
table <- merge(deseq_1, deseq_2, by = "genes", all = TRUE)
table <- merge(deseq_3, table, by = "genes", all = TRUE)
table <- merge(LR_tx, table, by = "genes", all = TRUE)
table <- select(table, genes, deseq_1, deseq_2, deseq_3, LR_tx)
perturb <- read.table(file.path(base_dir, "perturb.tsv"), header = TRUE)
t2g$perturbed <- t2g$transcripts %in% perturb$perturbed_transcripts
perturb <- t2g %>% group_by(genes) %>% summarise(perturb = any(perturbed))
table <- merge(perturb, table, by = "genes", all = TRUE)
saveRDS(table, file.path(de_dir, "tximp_comparison.rds"))
deseq_1 <- calculate_fdr(table$perturb, table$deseq_1, "DESeq2 Counts")
deseq_2 <- calculate_fdr(table$perturb, table$deseq_2, "DESeq2 Scaled TPM")
deseq_3 <- calculate_fdr(table$perturb, table$deseq_3, "DESeq2 Length Scaled TPM")
LR_tx <- calculate_fdr(table$perturb, table$LR_tx, "log reg - transcripts")
fdrs <- list(deseq_1, deseq_2, deseq_3, LR_tx)
names <- c("DESeq2 - tximport Counts", "DESeq2 - tximport Scaled TPM", "DESeq2 - tximport Length Scaled TPM",
"log reg - transcripts")
fdrs <- lapply(1:length(fdrs), function(x) data.frame(sensitivity = fdrs[[x]]$sen,
FDR = fdrs[[x]]$fdr, Method = names[[x]]))
fdrs <- do.call(rbind, fdrs)
p <- ggplot(data = fdrs, aes(x = FDR, y = sensitivity, colour = Method)) + geom_path() +
scale_colour_manual(values = c("#F8766D", "#CC0066", "#993366", "blue"))
tiff(file.path(base_dir, "tximp_fdrs.png"), height = 3000, width = 3500, res = 600)
p <- p + theme_grey(10)
print(p)
dev.off()
p <- p + coord_cartesian(xlim = c(0, 0.25), ylim = c(0, 0.75), expand = TRUE)
png(file.path(base_dir, "tximp_fdrs2.png"))
print(p)
dev.off()
|
.onLoad <- function(libname, pkgname){
def_netrc <- ifelse(.Platform$OS.type == "windows", "~/_netrc", "~/.netrc")
if(!file.exists(def_netrc)){
packageStartupMessage("A .netrc file is required to connect to ImmuneSpace. For more information on how to create one, refer to the Configuration section of the introduction vignette.")
}
if(.Platform$OS.type == "windows")#set ca bundle file path for windows
options(RCurlOptions = list(cainfo = system.file("ssl_certs/ca-bundle.crt", package = pkgname)))
}
|
/R/zzz.R
|
no_license
|
TheGilt/ImmuneSpaceR
|
R
| false | false | 520 |
r
|
.onLoad <- function(libname, pkgname){
def_netrc <- ifelse(.Platform$OS.type == "windows", "~/_netrc", "~/.netrc")
if(!file.exists(def_netrc)){
packageStartupMessage("A .netrc file is required to connect to ImmuneSpace. For more information on how to create one, refer to the Configuration section of the introduction vignette.")
}
if(.Platform$OS.type == "windows")#set ca bundle file path for windows
options(RCurlOptions = list(cainfo = system.file("ssl_certs/ca-bundle.crt", package = pkgname)))
}
|
# --------------------------------------------------------------
# LAB 7 (R vector manipulations)
# --------------------------------------------------------------
# Complete the function bodies as indicated by the comments under the
# function definition. Make sure you pass the tests before you push
# your lab to github.
# To run the tests, type in the terminal "Rscript lab7.R" and press enter.
# Rscript is a version of the R interpreter that allows you
# to execute the R code contained in a given file
# (Python analog: "python lab5.py")
# The grading of this lab will be automated using the R suite of test
# RUnit, which we import using the R way to load packages:
library(RUnit)
# In this test, we will make use of the following function to
# have a nicer output of the error when a test fails:
errmsg = function(err) print(paste("ERROR: ",err))
# The test will be indicated after the function body you'll need
# to implement.
# Good luck with your first R lab!
# --------------------------------------------------------------
# EXERCISE 1
# --------------------------------------------------------------
createVector1 = function(n) {
# Implement this function body so that it returns
# the vector (1,..., n-1, n, n-1, n-2, ..., 1), where n is
# a natural number (1,2,3,...) passed as the function argument.
if (n==1) {
return (c(1))
}
return (c(seq(from=1, to=n), seq(from=n-1, to=1)))
}
tryCatch ( checkEquals(createVector1(3), c(1,2,3,2,1)),
error = function(err) errmsg(err))
tryCatch ( checkEquals(createVector1(2), c(1,2,1)),
error = function(err) errmsg(err))
tryCatch ( checkEquals(createVector1(1), c(1)),
error = function(err) errmsg(err))
# --------------------------------------------------------------
# EXERCISE 2
# --------------------------------------------------------------
createVector2 = function(a,b,c,k,l,m) {
# Implement this function body so that it returns
# the vector (a,a,...,a,b,b,...,b,c,c,...c), where a is
# repeated k times, b is repeated l times, and c is repeated
# m times.
return (c(rep(a, k), rep(b, l), rep(c, m)))
}
# Tests:
tryCatch (
checkEquals(
createVector2('x','y','z', 2, 3, 4),
c('x','x','y','y','y','z','z','z','z')
),
error = function(err) errmsg(err)
)
tryCatch (
checkEquals(
createVector2('abc','1','2', 2, 1, 2),
c('abc','abc','1','2', '2')
),
error = function(err) errmsg(err)
)
tryCatch (
checkEquals(
createVector2('a','121','2c', 1, 1, 1),
c('a','121','2c')
),
error = function(err) errmsg(err)
)
tryCatch (
checkEquals(
createVector2('1','2','3', 0, 1, 1),
c('2','3')
),
error = function(err) errmsg(err)
)
# --------------------------------------------------------------
# EXERCISE 3
# --------------------------------------------------------------
createVector3 = function(label, n) {
# Implement this function body so that it returns
# the character vector (label 1, label 2, ..., label n), where
# label is a string and n is an integer.
return (paste(label, 1:n))
}
# Tests:
tryCatch (
checkEquals(
createVector3('student', 3),
c('student 1', 'student 2', 'student 3')
),
error = function(err) errmsg(err)
)
tryCatch (
checkEquals(
createVector3('item', 2),
c('item 1', 'item 2')
),
error = function(err) errmsg(err)
)
tryCatch (
checkEquals(
createVector3('item', 1),
c('item 1')
),
error = function(err) errmsg(err)
)
tryCatch (
checkEquals(
createVector3('3', 1),
c('3 1')
),
error = function(err) errmsg(err)
)
# --------------------------------------------------------------
# EXERCISE 4
# --------------------------------------------------------------
createVector4 = function(a, b, s) {
# Implement this function body so that it returns
# the numeric vector
# (exp(a)cos(a), exp(a+s)cos(a+s), exp(a+2s)cos(a+2s),...,exp(a+ns)cos(a+ns))
# where a < b, a+ns <= b, and a+(n+1)s > b
returnVal = seq(from=a, to=b, by=s)
returnVal = exp(returnVal)*cos(returnVal)
return (returnVal)
}
# Tests:
tryCatch (
checkEquals(
createVector4(3,4,1),
c(exp(3)*cos(3), exp(4)*cos(4))
),
error = function(err) errmsg(err)
)
tryCatch (
checkEquals(
createVector4(1,5,3),
c(exp(1)*cos(1), exp(4)*cos(4))
),
error = function(err) errmsg(err)
)
tryCatch (
checkEquals(
createVector4(1,5,2),
c(exp(1)*cos(1), exp(3)*cos(3), exp(5)*cos(5))
),
error = function(err) errmsg(err)
)
tryCatch (
checkEquals(
createVector4(3,5,3),
c(exp(3)*cos(3))
),
error = function(err) errmsg(err)
)
|
/lab7.R
|
no_license
|
harishkumar92/stat133
|
R
| false | false | 4,840 |
r
|
# --------------------------------------------------------------
# LAB 7 (R vector manipulations)
# --------------------------------------------------------------
# Complete the function bodies as indicated by the comments under the
# function definition. Make sure you pass the tests before you push
# your lab to github.
# To run the tests, type in the terminal "Rscript lab7.R" and press enter.
# Rscript is a version of the R interpreter that allows you
# to execute the R code contained in a given file
# (Python analog: "python lab5.py")
# The grading of this lab will be automated using the R suite of test
# RUnit, which we import using the R way to load packages:
library(RUnit)
# In this test, we will make use of the following function to
# have a nicer output of the error when a test fails:
errmsg = function(err) print(paste("ERROR: ",err))
# The test will be indicated after the function body you'll need
# to implement.
# Good luck with your first R lab!
# --------------------------------------------------------------
# EXERCISE 1
# --------------------------------------------------------------
createVector1 = function(n) {
# Implement this function body so that it returns
# the vector (1,..., n-1, n, n-1, n-2, ..., 1), where n is
# a natural number (1,2,3,...) passed as the function argument.
if (n==1) {
return (c(1))
}
return (c(seq(from=1, to=n), seq(from=n-1, to=1)))
}
tryCatch ( checkEquals(createVector1(3), c(1,2,3,2,1)),
error = function(err) errmsg(err))
tryCatch ( checkEquals(createVector1(2), c(1,2,1)),
error = function(err) errmsg(err))
tryCatch ( checkEquals(createVector1(1), c(1)),
error = function(err) errmsg(err))
# --------------------------------------------------------------
# EXERCISE 2
# --------------------------------------------------------------
createVector2 = function(a,b,c,k,l,m) {
# Implement this function body so that it returns
# the vector (a,a,...,a,b,b,...,b,c,c,...c), where a is
# repeated k times, b is repeated l times, and c is repeated
# m times.
return (c(rep(a, k), rep(b, l), rep(c, m)))
}
# Tests:
tryCatch (
checkEquals(
createVector2('x','y','z', 2, 3, 4),
c('x','x','y','y','y','z','z','z','z')
),
error = function(err) errmsg(err)
)
tryCatch (
checkEquals(
createVector2('abc','1','2', 2, 1, 2),
c('abc','abc','1','2', '2')
),
error = function(err) errmsg(err)
)
tryCatch (
checkEquals(
createVector2('a','121','2c', 1, 1, 1),
c('a','121','2c')
),
error = function(err) errmsg(err)
)
tryCatch (
checkEquals(
createVector2('1','2','3', 0, 1, 1),
c('2','3')
),
error = function(err) errmsg(err)
)
# --------------------------------------------------------------
# EXERCISE 3
# --------------------------------------------------------------
createVector3 = function(label, n) {
# Implement this function body so that it returns
# the character vector (label 1, label 2, ..., label n), where
# label is a string and n is an integer.
return (paste(label, 1:n))
}
# Tests:
tryCatch (
checkEquals(
createVector3('student', 3),
c('student 1', 'student 2', 'student 3')
),
error = function(err) errmsg(err)
)
tryCatch (
checkEquals(
createVector3('item', 2),
c('item 1', 'item 2')
),
error = function(err) errmsg(err)
)
tryCatch (
checkEquals(
createVector3('item', 1),
c('item 1')
),
error = function(err) errmsg(err)
)
tryCatch (
checkEquals(
createVector3('3', 1),
c('3 1')
),
error = function(err) errmsg(err)
)
# --------------------------------------------------------------
# EXERCISE 4
# --------------------------------------------------------------
createVector4 = function(a, b, s) {
# Implement this function body so that it returns
# the numeric vector
# (exp(a)cos(a), exp(a+s)cos(a+s), exp(a+2s)cos(a+2s),...,exp(a+ns)cos(a+ns))
# where a < b, a+ns <= b, and a+(n+1)s > b
returnVal = seq(from=a, to=b, by=s)
returnVal = exp(returnVal)*cos(returnVal)
return (returnVal)
}
# Tests:
tryCatch (
checkEquals(
createVector4(3,4,1),
c(exp(3)*cos(3), exp(4)*cos(4))
),
error = function(err) errmsg(err)
)
tryCatch (
checkEquals(
createVector4(1,5,3),
c(exp(1)*cos(1), exp(4)*cos(4))
),
error = function(err) errmsg(err)
)
tryCatch (
checkEquals(
createVector4(1,5,2),
c(exp(1)*cos(1), exp(3)*cos(3), exp(5)*cos(5))
),
error = function(err) errmsg(err)
)
tryCatch (
checkEquals(
createVector4(3,5,3),
c(exp(3)*cos(3))
),
error = function(err) errmsg(err)
)
|
test_that("ContCV", {
n = 40; p = 5; Y = rep(0,n)
x = matrix(rnorm(n*p,0,5), n, p)
X = scale(data.frame(x,X6=x[,4]+x[,5]*0.5,X7=x[,4]*0.2-x[,5])); #Adjacency(X)
Y = X[,4:7]%*%c(3,2,3,-2)
X[,1] = 0; X[c(4,8),1] = c(1, -1);
# expect_error(CV.Cont(X, Y, "network", robust = TRUE, debugging = FALSE))
out = CV.Cont(X, Y, "network", robust = TRUE)
expect_equal(out$penalty, "network")
expect_equal(ncol(out$lambda), 2)
# expect_error(CV.Cont(X, Y, "mcp", lamb.2=0, robust = TRUE, debugging = FALSE))
out = CV.Cont(X, Y, "mcp", lamb.2=0, robust = TRUE)
expect_equal(out$penalty, "mcp")
expect_null(ncol(out$lambda))
# expect_error(CV.Cont(X, Y, "lasso", lamb.2=0, robust = TRUE, debugging = FALSE))
out = CV.Cont(X, Y, "lasso", lamb.2=0, robust = TRUE)
expect_equal(out$penalty, "lasso")
X[,1] = 0;
expect_error(CV.Cont(X, Y, "network", robust = TRUE), "standard deviation equal zero")
out = CV.Cont(X, Y, "mcp", lamb.2=0, robust = TRUE)
expect_equal(out$penalty, "mcp")
out = CV.Cont(X, Y, "lasso", lamb.2=0, robust = TRUE)
expect_equal(out$penalty, "lasso")
})
|
/tests/testthat/test-ContCV.R
|
no_license
|
cran/regnet
|
R
| false | false | 1,143 |
r
|
test_that("ContCV", {
n = 40; p = 5; Y = rep(0,n)
x = matrix(rnorm(n*p,0,5), n, p)
X = scale(data.frame(x,X6=x[,4]+x[,5]*0.5,X7=x[,4]*0.2-x[,5])); #Adjacency(X)
Y = X[,4:7]%*%c(3,2,3,-2)
X[,1] = 0; X[c(4,8),1] = c(1, -1);
# expect_error(CV.Cont(X, Y, "network", robust = TRUE, debugging = FALSE))
out = CV.Cont(X, Y, "network", robust = TRUE)
expect_equal(out$penalty, "network")
expect_equal(ncol(out$lambda), 2)
# expect_error(CV.Cont(X, Y, "mcp", lamb.2=0, robust = TRUE, debugging = FALSE))
out = CV.Cont(X, Y, "mcp", lamb.2=0, robust = TRUE)
expect_equal(out$penalty, "mcp")
expect_null(ncol(out$lambda))
# expect_error(CV.Cont(X, Y, "lasso", lamb.2=0, robust = TRUE, debugging = FALSE))
out = CV.Cont(X, Y, "lasso", lamb.2=0, robust = TRUE)
expect_equal(out$penalty, "lasso")
X[,1] = 0;
expect_error(CV.Cont(X, Y, "network", robust = TRUE), "standard deviation equal zero")
out = CV.Cont(X, Y, "mcp", lamb.2=0, robust = TRUE)
expect_equal(out$penalty, "mcp")
out = CV.Cont(X, Y, "lasso", lamb.2=0, robust = TRUE)
expect_equal(out$penalty, "lasso")
})
|
# Runs DECON over the datasets cofigured at [datasets_params_file]
#USAGE: Rscript runDecon.R [decon_params_file] [datasets_params_file]
print(paste("Starting at", startTime <- Sys.time()))
suppressPackageStartupMessages(library(yaml))
source(if (basename(getwd()) == "optimizers") "../utils/utils.r" else "utils/utils.r") # Load utils functions
# Saves csv file with all failed exons (in a common format)
saveExonFailures <- function(deconFailuresFile, bedFile, bamsFolder, outputFolder){
# load input data
listOfsamples <- sub(".bam.bai", "", list.files(bamsFolder, pattern = "*.bai"))
outputFile <- file.path(outputFolder, "failedROIs.csv")
failuresData <- read.table(deconFailuresFile, header = T, sep = "\t", stringsAsFactors=F)
bedData <- read.table(bedFile, header = F, sep = "\t", stringsAsFactors=F)
# define output dataset
output <- data.frame(matrix(ncol = 5, nrow = 0))
colnames(output) <- c("SampleID", "Chr", "Start", "End", "Gene")
# iterate over failures file to build output data
for(i in 1:nrow(failuresData)) {
sampleName <- failuresData[i,"Sample"]
if (failuresData[i,"Type"] == "Whole sample") {
# Add all bed lines (all exons failed)
output <- rbind(output, data.frame(SampleID = sampleName, Chr = bedData[, 1], Start = bedData[, 2], End = bedData[, 3], Gene = bedData[, 4]))
} else if (failuresData[i,"Type"] == "Whole exon"){
# Add one line (failed exon) for each sample
lineNumber <- failuresData[i,"Exon"]
if (sampleName == "All"){
lineNumber <- failuresData[i,"Exon"]
size <- length(listOfsamples)
output <- rbind(output, data.frame(SampleID = listOfsamples,
Chr = rep(bedData[lineNumber, 1], len = size),
Start = rep(bedData[lineNumber, 2], len = size),
End = rep(bedData[lineNumber, 3], len = size),
Gene = rep(bedData[lineNumber, 4], len = size)))
} else {
output <- rbind(output, data.frame(SampleID = sampleName, Chr = bedData[lineNumber, 1], Start = bedData[lineNumber, 2], End = bedData[lineNumber, 3], Gene = bedData[lineNumber, 4]))
}
} else
message("Error: Failure type not recognised")
}
# save output file
write.table(output, outputFile, sep="\t", row.names=FALSE, quote = FALSE)
}
# Read args
args <- commandArgs(TRUE)
print(args)
if(length(args)>0) {
deconParamsFile <- args[1]
datasetsParamsFile <- args[2]
} else {
deconParamsFile <- "deconParams.yaml"
datasetsParamsFile <- "../../datasets.yaml"
}
#Load the parameters file
deconParams <- yaml.load_file(deconParamsFile)
datasets <- yaml.load_file(datasetsParamsFile)
print(paste("Params for this execution:", list(deconParams)))
# extract decon params
deconFolder <- file.path(deconParams$deconFolder)
# Set decon as working directory. Necessary to make decon packrat work
currentFolder <- getwd()
print(deconFolder)
setwd(deconFolder)
# go over datasets and run decon for those which are active
for (name in names(datasets)) {
dataset <- datasets[[name]]
if (dataset$include){
print(paste("Starting DECoN for", name, "dataset", sep=" "))
# extract fields
bamsDir <- file.path(dataset$bams_dir)
bedFile <- file.path(dataset$bed_file)
fastaFile <- file.path(dataset$fasta_file)
# Create output folder
if (!is.null(deconParams$outputFolder)) {
outputFolder <- deconParams$outputFolder
} else
outputFolder <- file.path(currentFolder, "output", paste0("decon-", name))
if (is.null(deconParams$execution) || deconParams$execution != "skipPrecalcPhase") {
unlink(outputFolder, recursive = TRUE);
dir.create(outputFolder)
}
# build input/output file paths
ouputBams <- file.path(outputFolder, "output.bams")
ouputRData <- file.path(outputFolder, "output.bams.RData")
failuresFile <- file.path(outputFolder, "failures");
calls <- file.path(outputFolder, "calls");
# Do pre-calc part of the algorithm
if (is.null(deconParams$execution) || deconParams$execution != "skipPrecalcPhase") {
cmd <- paste("Rscript", "ReadInBams.R", "--bams", bamsDir, "--bed", bedFile, "--fasta", fastaFile, "--out", ouputBams)
print(cmd); system(cmd)
print("ReadInBams.R finished");
if (!is.null(deconParams$execution) && deconParams$execution == "onlyPrecalcPhase") {
print(paste("DECoN (Only pre-calc phase) for", name, "dataset finished", sep=" "))
cat("\n\n\n")
quit()
}
} else { # skipPrecalcPhase mode: read previous results
print(paste("DECoN Skipping pre-calc phase for", name, "dataset finished", sep=" "))
# Redefine outputRData taking precalc path
ouputRData <- file.path(deconParams$precalcFolder, "output.bams.RData")
}
# Call part 2
cmd <- paste("Rscript", "IdentifyFailures.R", "--Rdata", ouputRData, "--mincorr", deconParams$mincorr,
"--mincov", deconParams$mincov, "--out", failuresFile)
print(cmd); system(cmd)
print("IdentifyFailures.R finished");
# Call part 3
cmd <- paste("Rscript makeCNVcalls.R", "--Rdata", ouputRData, "--transProb", deconParams$transProb,
"--out", calls)
print(cmd); system(cmd)
print("makeCNVcalls.R finished");
# Save results in GRanges format
message("Saving CNV GenomicRanges and Failures results")
saveResultsFileToGR(outputFolder, "calls_all.txt", chrColumn = "Chromosome")
saveExonFailures(file.path(outputFolder, "failures_Failures.txt"), bedFile, bamsDir, outputFolder)
print(paste("DECoN for", name, "dataset finished", sep=" "))
cat("\n\n\n")
}
}
print(paste("Finishing at", endTime <- Sys.time()))
cat("\nElapsed time:")
print(endTime - startTime)
|
/algorithms/decon/runDecon.r
|
permissive
|
TranslationalBioinformaticsIGTP/CNVbenchmarkeR
|
R
| false | false | 6,135 |
r
|
# Runs DECON over the datasets cofigured at [datasets_params_file]
#USAGE: Rscript runDecon.R [decon_params_file] [datasets_params_file]
print(paste("Starting at", startTime <- Sys.time()))
suppressPackageStartupMessages(library(yaml))
source(if (basename(getwd()) == "optimizers") "../utils/utils.r" else "utils/utils.r") # Load utils functions
# Saves csv file with all failed exons (in a common format)
saveExonFailures <- function(deconFailuresFile, bedFile, bamsFolder, outputFolder){
# load input data
listOfsamples <- sub(".bam.bai", "", list.files(bamsFolder, pattern = "*.bai"))
outputFile <- file.path(outputFolder, "failedROIs.csv")
failuresData <- read.table(deconFailuresFile, header = T, sep = "\t", stringsAsFactors=F)
bedData <- read.table(bedFile, header = F, sep = "\t", stringsAsFactors=F)
# define output dataset
output <- data.frame(matrix(ncol = 5, nrow = 0))
colnames(output) <- c("SampleID", "Chr", "Start", "End", "Gene")
# iterate over failures file to build output data
for(i in 1:nrow(failuresData)) {
sampleName <- failuresData[i,"Sample"]
if (failuresData[i,"Type"] == "Whole sample") {
# Add all bed lines (all exons failed)
output <- rbind(output, data.frame(SampleID = sampleName, Chr = bedData[, 1], Start = bedData[, 2], End = bedData[, 3], Gene = bedData[, 4]))
} else if (failuresData[i,"Type"] == "Whole exon"){
# Add one line (failed exon) for each sample
lineNumber <- failuresData[i,"Exon"]
if (sampleName == "All"){
lineNumber <- failuresData[i,"Exon"]
size <- length(listOfsamples)
output <- rbind(output, data.frame(SampleID = listOfsamples,
Chr = rep(bedData[lineNumber, 1], len = size),
Start = rep(bedData[lineNumber, 2], len = size),
End = rep(bedData[lineNumber, 3], len = size),
Gene = rep(bedData[lineNumber, 4], len = size)))
} else {
output <- rbind(output, data.frame(SampleID = sampleName, Chr = bedData[lineNumber, 1], Start = bedData[lineNumber, 2], End = bedData[lineNumber, 3], Gene = bedData[lineNumber, 4]))
}
} else
message("Error: Failure type not recognised")
}
# save output file
write.table(output, outputFile, sep="\t", row.names=FALSE, quote = FALSE)
}
# Read args
args <- commandArgs(TRUE)
print(args)
if(length(args)>0) {
deconParamsFile <- args[1]
datasetsParamsFile <- args[2]
} else {
deconParamsFile <- "deconParams.yaml"
datasetsParamsFile <- "../../datasets.yaml"
}
#Load the parameters file
deconParams <- yaml.load_file(deconParamsFile)
datasets <- yaml.load_file(datasetsParamsFile)
print(paste("Params for this execution:", list(deconParams)))
# extract decon params
deconFolder <- file.path(deconParams$deconFolder)
# Set decon as working directory. Necessary to make decon packrat work
currentFolder <- getwd()
print(deconFolder)
setwd(deconFolder)
# go over datasets and run decon for those which are active
for (name in names(datasets)) {
dataset <- datasets[[name]]
if (dataset$include){
print(paste("Starting DECoN for", name, "dataset", sep=" "))
# extract fields
bamsDir <- file.path(dataset$bams_dir)
bedFile <- file.path(dataset$bed_file)
fastaFile <- file.path(dataset$fasta_file)
# Create output folder
if (!is.null(deconParams$outputFolder)) {
outputFolder <- deconParams$outputFolder
} else
outputFolder <- file.path(currentFolder, "output", paste0("decon-", name))
if (is.null(deconParams$execution) || deconParams$execution != "skipPrecalcPhase") {
unlink(outputFolder, recursive = TRUE);
dir.create(outputFolder)
}
# build input/output file paths
ouputBams <- file.path(outputFolder, "output.bams")
ouputRData <- file.path(outputFolder, "output.bams.RData")
failuresFile <- file.path(outputFolder, "failures");
calls <- file.path(outputFolder, "calls");
# Do pre-calc part of the algorithm
if (is.null(deconParams$execution) || deconParams$execution != "skipPrecalcPhase") {
cmd <- paste("Rscript", "ReadInBams.R", "--bams", bamsDir, "--bed", bedFile, "--fasta", fastaFile, "--out", ouputBams)
print(cmd); system(cmd)
print("ReadInBams.R finished");
if (!is.null(deconParams$execution) && deconParams$execution == "onlyPrecalcPhase") {
print(paste("DECoN (Only pre-calc phase) for", name, "dataset finished", sep=" "))
cat("\n\n\n")
quit()
}
} else { # skipPrecalcPhase mode: read previous results
print(paste("DECoN Skipping pre-calc phase for", name, "dataset finished", sep=" "))
# Redefine outputRData taking precalc path
ouputRData <- file.path(deconParams$precalcFolder, "output.bams.RData")
}
# Call part 2
cmd <- paste("Rscript", "IdentifyFailures.R", "--Rdata", ouputRData, "--mincorr", deconParams$mincorr,
"--mincov", deconParams$mincov, "--out", failuresFile)
print(cmd); system(cmd)
print("IdentifyFailures.R finished");
# Call part 3
cmd <- paste("Rscript makeCNVcalls.R", "--Rdata", ouputRData, "--transProb", deconParams$transProb,
"--out", calls)
print(cmd); system(cmd)
print("makeCNVcalls.R finished");
# Save results in GRanges format
message("Saving CNV GenomicRanges and Failures results")
saveResultsFileToGR(outputFolder, "calls_all.txt", chrColumn = "Chromosome")
saveExonFailures(file.path(outputFolder, "failures_Failures.txt"), bedFile, bamsDir, outputFolder)
print(paste("DECoN for", name, "dataset finished", sep=" "))
cat("\n\n\n")
}
}
print(paste("Finishing at", endTime <- Sys.time()))
cat("\nElapsed time:")
print(endTime - startTime)
|
#===========================================================================
# Library
#===========================================================================
library(shiny)
library(dplyr)
library(data.table)
#===========================================================================
# Data Prepare for selectInput,sliderInput,numericInput
#===========================================================================
path = ' Your File Path ' ###### Check 1 ######
#Path Example(Mac): '/Users/kristen/Desktop/'
#Path Example(Mac): 'C:/Desktop/'
#---- Load Data ----
Titanic_train <- fread(file.path(path, "Titanic_train.csv")) %>% select(-Survived)
Titanic_test <- fread(file.path(path, "Titanic_test.csv"))
#---- Bind Data ----
Titanic_data <- rbind(Titanic_train,Titanic_test)
rm(Titanic_train,Titanic_test)
#---- selectInput for classfication variable ----
pclass <- sort(unique(Titanic_data$PassengerClass))
gender <- sort(unique(Titanic_data$Gender))
embarked <- sort(unique(Titanic_data$PortEmbarkation[Titanic_data$PortEmbarkation != ""]))
#---- sliderInput ----
fare <- data.frame( max = max(Titanic_data$FarePrice ,na.rm =TRUE),
min = min(Titanic_data$FarePrice ,na.rm =TRUE) )
#---- numericInput ----
age <- data.frame( max = floor(max(Titanic_data$Age ,na.rm =TRUE)),
min = floor(min(Titanic_data$Age ,na.rm =TRUE)) )
sibSp <- data.frame( max = max(as.numeric(Titanic_data$SiblingSpouse),na.rm =TRUE),
min = min(as.numeric(Titanic_data$SiblingSpouse),na.rm =TRUE) )
parch <- data.frame( max = max(as.numeric(Titanic_data$ParentChild),na.rm =TRUE),
min = min(as.numeric(Titanic_data$ParentChild),na.rm =TRUE) )
#===========================================================================
# Shiny Layout
#===========================================================================
shinyUI(fluidPage(
titlePanel("Titanic Survival Prediction"),
sidebarLayout(
sidebarPanel(
selectInput("PassengerClass", "Passenger Class : ", choices=pclass),
selectInput("Gender", "Gender : ", choices=gender),
selectInput("PortEmbarkation", "Port Embarkation : ", choices=embarked),
numericInput("Age", "Age : ", min = age$min, max = age$max, value =age$max, step = 0.5),
numericInput("SiblingSpouse", "Sibling Spouse : ", min = sibSp$min, max = sibSp$max, value =sibSp$max, step = 1),
numericInput("ParentChild", "Parent Child : ", min = parch$min, max = parch$max, value =parch$max, step = 1),
sliderInput("FarePrice", "Fare Price : ", min = fare$min, max = fare$max, value = fare$max, step = 0.0001, sep='')
),
mainPanel( imageOutput("result_plot") )
)
))
|
/Shiny_Titanic/ui.R
|
no_license
|
jasoncyr/Azureml-shiny-app
|
R
| false | false | 2,725 |
r
|
#===========================================================================
# Library
#===========================================================================
library(shiny)
library(dplyr)
library(data.table)
#===========================================================================
# Data Prepare for selectInput,sliderInput,numericInput
#===========================================================================
path = ' Your File Path ' ###### Check 1 ######
#Path Example(Mac): '/Users/kristen/Desktop/'
#Path Example(Mac): 'C:/Desktop/'
#---- Load Data ----
Titanic_train <- fread(file.path(path, "Titanic_train.csv")) %>% select(-Survived)
Titanic_test <- fread(file.path(path, "Titanic_test.csv"))
#---- Bind Data ----
Titanic_data <- rbind(Titanic_train,Titanic_test)
rm(Titanic_train,Titanic_test)
#---- selectInput for classfication variable ----
pclass <- sort(unique(Titanic_data$PassengerClass))
gender <- sort(unique(Titanic_data$Gender))
embarked <- sort(unique(Titanic_data$PortEmbarkation[Titanic_data$PortEmbarkation != ""]))
#---- sliderInput ----
fare <- data.frame( max = max(Titanic_data$FarePrice ,na.rm =TRUE),
min = min(Titanic_data$FarePrice ,na.rm =TRUE) )
#---- numericInput ----
age <- data.frame( max = floor(max(Titanic_data$Age ,na.rm =TRUE)),
min = floor(min(Titanic_data$Age ,na.rm =TRUE)) )
sibSp <- data.frame( max = max(as.numeric(Titanic_data$SiblingSpouse),na.rm =TRUE),
min = min(as.numeric(Titanic_data$SiblingSpouse),na.rm =TRUE) )
parch <- data.frame( max = max(as.numeric(Titanic_data$ParentChild),na.rm =TRUE),
min = min(as.numeric(Titanic_data$ParentChild),na.rm =TRUE) )
#===========================================================================
# Shiny Layout
#===========================================================================
shinyUI(fluidPage(
titlePanel("Titanic Survival Prediction"),
sidebarLayout(
sidebarPanel(
selectInput("PassengerClass", "Passenger Class : ", choices=pclass),
selectInput("Gender", "Gender : ", choices=gender),
selectInput("PortEmbarkation", "Port Embarkation : ", choices=embarked),
numericInput("Age", "Age : ", min = age$min, max = age$max, value =age$max, step = 0.5),
numericInput("SiblingSpouse", "Sibling Spouse : ", min = sibSp$min, max = sibSp$max, value =sibSp$max, step = 1),
numericInput("ParentChild", "Parent Child : ", min = parch$min, max = parch$max, value =parch$max, step = 1),
sliderInput("FarePrice", "Fare Price : ", min = fare$min, max = fare$max, value = fare$max, step = 0.0001, sep='')
),
mainPanel( imageOutput("result_plot") )
)
))
|
# Register a set of help topics for dispatching from F1 help
register_help_topics <- function(type = c("module", "class"), topics) {
# pick the right environment for this type
type <- match.arg(type)
envir <- switch(type,
module = .module_help_topics,
class = .class_help_topics
)
# assign the list into the environment
for (name in names(topics))
assign(name, topics[[name]], envir = envir)
}
# Helper function to define topics given a page URL and list of symbols
help_topics <- function(page, prefix, symbols) {
names <- paste(prefix, symbols, sep = ".")
topics <- rep_len(page, length(names))
names(topics) <- names
topics
}
# Generic help_handler returned from .DollarNames -- dispatches to various
# other help handler functions
help_handler <- function(type = c("completion", "parameter", "url"), topic, source, ...) {
type <- match.arg(type)
if (type == "completion") {
help_completion_handler.tensorflow.builtin.object(topic, source)
} else if (type == "parameter") {
help_completion_parameter_handler.tensorflow.builtin.object(source)
} else if (type == "url") {
help_url_handler.tensorflow.builtin.object(topic, source)
}
}
# Return help for display in the completion popup window
help_completion_handler.tensorflow.builtin.object <- function(topic, source) {
# convert source to object if necessary
source <- source_as_object(source)
if (is.null(source))
return(NULL)
# check for property help
help <- import("tftools.help")
description <- help$get_property_doc(source, topic)
# check for standard help
if (is.null(description)) {
inspect <- import("inspect")
description <- inspect$getdoc(py_get_attr_silent(source, topic))
}
# default to no description
if (is.null(description))
description <- ""
matches <- regexpr(pattern ='\n', description, fixed=TRUE)
if (matches[[1]] != -1)
description <- substring(description, 1, matches[[1]])
description <- convert_description_types(description)
# try to generate a signature
signature <- NULL
target <- py_get_attr_silent(source, topic)
if (py_is_callable(target)) {
help <- import("tftools.help")
signature <- help$generate_signature_for_function(target)
if (is.null(signature))
signature <- "()"
signature <- paste0(topic, signature)
}
# return docs
list(title = topic,
signature = signature,
description = description)
}
# Return parameter help for display in the completion popup window
help_completion_parameter_handler.tensorflow.builtin.object <- function(source) {
# split into topic and source
components <- source_components(source)
if (is.null(components))
return(NULL)
topic <- components$topic
source <- components$source
# get the function
target <- py_get_attr_silent(source, topic)
if (py_is_callable(target)) {
help <- import("tftools.help")
args <- help$get_arguments(target)
if (!is.null(args)) {
# get the descriptions
doc <- help$get_doc(target)
if (is.null(doc))
arg_descriptions <- args
else
arg_descriptions <- arg_descriptions_from_doc(args, doc)
return(list(
args = args,
arg_descriptions = arg_descriptions
))
}
}
# no parameter help found
NULL
}
# Handle requests for external (F1) help
help_url_handler.tensorflow.builtin.object <- function(topic, source) {
# normalize topic and source for various calling scenarios
if (grepl(" = $", topic)) {
components <- source_components(source)
if (is.null(components))
return(NULL)
topic <- components$topic
source <- components$source
} else {
source <- source_as_object(source)
if (is.null(source))
return(NULL)
}
# get help page
page <- NULL
inspect <- import("inspect")
if (inspect$ismodule(source)) {
module <- paste(source$`__name__`)
help <- module_help(module, topic)
} else {
help <- class_help(class(source), topic)
}
# return help (can be "")
help
}
# Handle requests for the list of arguments for a function
help_formals_handler.tensorflow.builtin.object <- function(topic, source) {
if (py_has_attr(source, topic)) {
target <- py_get_attr_silent(source, topic)
if (py_is_callable(target)) {
help <- import("tftools.help")
args <- help$get_arguments(target)
if (!is.null(args)) {
return(list(
formals = args,
helpHandler = "tensorflow:::help_handler"
))
}
}
}
# default to NULL if we couldn't get the arguments
NULL
}
# Extract argument descriptions from python docstring
arg_descriptions_from_doc <- function(args, doc) {
doc <- strsplit(doc, "\n", fixed = TRUE)[[1]]
arg_descriptions <- sapply(args, function(arg) {
prefix <- paste0(" ", arg, ": ")
arg_line <- which(grepl(paste0("^", prefix), doc))
if (length(arg_line) > 0) {
arg_description <- substring(doc[[arg_line]], nchar(prefix))
next_line <- arg_line + 1
while((arg_line + 1) <= length(doc)) {
line <- doc[[arg_line + 1]]
if (grepl("^ ", line)) {
arg_description <- paste(arg_description, line)
arg_line <- arg_line + 1
}
else
break
}
arg_description <- gsub("^\\s*", "", arg_description)
arg_description <- convert_description_types(arg_description)
} else {
arg
}
})
arg_descriptions
}
# Convert types in description
convert_description_types <- function(description) {
description <- sub("`None`", "`NULL`", description)
description <- sub("`True`", "`TRUE`", description)
description <- sub("`False`", "`FALSE`", description)
description
}
# Convert source to object if necessary
source_as_object <- function(source) {
if (is.character(source)) {
source <- tryCatch(eval(parse(text = source), envir = globalenv()),
error = function(e) NULL)
if (is.null(source))
return(NULL)
}
source
}
# Split source string into source and topic
source_components <- function(source) {
components <- strsplit(source, "\\$")[[1]]
topic <- components[[length(components)]]
source <- paste(components[1:(length(components)-1)], collapse = "$")
source <- source_as_object(source)
if (!is.null(source))
list(topic = topic, source = source)
else
NULL
}
module_help <- function(module, topic) {
# do we have a page for this module/topic?
lookup <- paste(module, topic, sep = ".")
page <- .module_help_topics[[lookup]]
# if so then append topic
if (!is.null(page))
paste(page, topic, sep = "#")
else
""
}
class_help <- function(class, topic) {
# call recursively for more than one class
if (length(class) > 1) {
# call for each class
for (i in 1:length(class)) {
help <- class_help(class[[i]], topic)
if (nzchar(help))
return(help)
}
# no help found
return("")
}
# do we have a page for this class?
page <- .class_help_topics[[class]]
# if so then append class and topic
if (!is.null(page)) {
components <- strsplit(class, ".", fixed = TRUE)[[1]]
class <- components[[length(components)]]
paste0(page, "#", class, ".", topic)
} else {
""
}
}
# Environments where we store help topics (mappings of module/class name to URL)
.module_help_topics <- new.env(parent = emptyenv())
.class_help_topics <- new.env(parent = emptyenv())
|
/R/python_help.R
|
permissive
|
rahulremanan/tensorflow_rstudio
|
R
| false | false | 7,494 |
r
|
# Register a set of help topics for dispatching from F1 help
register_help_topics <- function(type = c("module", "class"), topics) {
# pick the right environment for this type
type <- match.arg(type)
envir <- switch(type,
module = .module_help_topics,
class = .class_help_topics
)
# assign the list into the environment
for (name in names(topics))
assign(name, topics[[name]], envir = envir)
}
# Helper function to define topics given a page URL and list of symbols
help_topics <- function(page, prefix, symbols) {
names <- paste(prefix, symbols, sep = ".")
topics <- rep_len(page, length(names))
names(topics) <- names
topics
}
# Generic help_handler returned from .DollarNames -- dispatches to various
# other help handler functions
help_handler <- function(type = c("completion", "parameter", "url"), topic, source, ...) {
type <- match.arg(type)
if (type == "completion") {
help_completion_handler.tensorflow.builtin.object(topic, source)
} else if (type == "parameter") {
help_completion_parameter_handler.tensorflow.builtin.object(source)
} else if (type == "url") {
help_url_handler.tensorflow.builtin.object(topic, source)
}
}
# Return help for display in the completion popup window
help_completion_handler.tensorflow.builtin.object <- function(topic, source) {
# convert source to object if necessary
source <- source_as_object(source)
if (is.null(source))
return(NULL)
# check for property help
help <- import("tftools.help")
description <- help$get_property_doc(source, topic)
# check for standard help
if (is.null(description)) {
inspect <- import("inspect")
description <- inspect$getdoc(py_get_attr_silent(source, topic))
}
# default to no description
if (is.null(description))
description <- ""
matches <- regexpr(pattern ='\n', description, fixed=TRUE)
if (matches[[1]] != -1)
description <- substring(description, 1, matches[[1]])
description <- convert_description_types(description)
# try to generate a signature
signature <- NULL
target <- py_get_attr_silent(source, topic)
if (py_is_callable(target)) {
help <- import("tftools.help")
signature <- help$generate_signature_for_function(target)
if (is.null(signature))
signature <- "()"
signature <- paste0(topic, signature)
}
# return docs
list(title = topic,
signature = signature,
description = description)
}
# Return parameter help for display in the completion popup window
help_completion_parameter_handler.tensorflow.builtin.object <- function(source) {
# split into topic and source
components <- source_components(source)
if (is.null(components))
return(NULL)
topic <- components$topic
source <- components$source
# get the function
target <- py_get_attr_silent(source, topic)
if (py_is_callable(target)) {
help <- import("tftools.help")
args <- help$get_arguments(target)
if (!is.null(args)) {
# get the descriptions
doc <- help$get_doc(target)
if (is.null(doc))
arg_descriptions <- args
else
arg_descriptions <- arg_descriptions_from_doc(args, doc)
return(list(
args = args,
arg_descriptions = arg_descriptions
))
}
}
# no parameter help found
NULL
}
# Handle requests for external (F1) help
help_url_handler.tensorflow.builtin.object <- function(topic, source) {
# normalize topic and source for various calling scenarios
if (grepl(" = $", topic)) {
components <- source_components(source)
if (is.null(components))
return(NULL)
topic <- components$topic
source <- components$source
} else {
source <- source_as_object(source)
if (is.null(source))
return(NULL)
}
# get help page
page <- NULL
inspect <- import("inspect")
if (inspect$ismodule(source)) {
module <- paste(source$`__name__`)
help <- module_help(module, topic)
} else {
help <- class_help(class(source), topic)
}
# return help (can be "")
help
}
# Handle requests for the list of arguments for a function
help_formals_handler.tensorflow.builtin.object <- function(topic, source) {
if (py_has_attr(source, topic)) {
target <- py_get_attr_silent(source, topic)
if (py_is_callable(target)) {
help <- import("tftools.help")
args <- help$get_arguments(target)
if (!is.null(args)) {
return(list(
formals = args,
helpHandler = "tensorflow:::help_handler"
))
}
}
}
# default to NULL if we couldn't get the arguments
NULL
}
# Extract argument descriptions from python docstring
arg_descriptions_from_doc <- function(args, doc) {
doc <- strsplit(doc, "\n", fixed = TRUE)[[1]]
arg_descriptions <- sapply(args, function(arg) {
prefix <- paste0(" ", arg, ": ")
arg_line <- which(grepl(paste0("^", prefix), doc))
if (length(arg_line) > 0) {
arg_description <- substring(doc[[arg_line]], nchar(prefix))
next_line <- arg_line + 1
while((arg_line + 1) <= length(doc)) {
line <- doc[[arg_line + 1]]
if (grepl("^ ", line)) {
arg_description <- paste(arg_description, line)
arg_line <- arg_line + 1
}
else
break
}
arg_description <- gsub("^\\s*", "", arg_description)
arg_description <- convert_description_types(arg_description)
} else {
arg
}
})
arg_descriptions
}
# Convert types in description
convert_description_types <- function(description) {
description <- sub("`None`", "`NULL`", description)
description <- sub("`True`", "`TRUE`", description)
description <- sub("`False`", "`FALSE`", description)
description
}
# Convert source to object if necessary
source_as_object <- function(source) {
if (is.character(source)) {
source <- tryCatch(eval(parse(text = source), envir = globalenv()),
error = function(e) NULL)
if (is.null(source))
return(NULL)
}
source
}
# Split source string into source and topic
source_components <- function(source) {
components <- strsplit(source, "\\$")[[1]]
topic <- components[[length(components)]]
source <- paste(components[1:(length(components)-1)], collapse = "$")
source <- source_as_object(source)
if (!is.null(source))
list(topic = topic, source = source)
else
NULL
}
module_help <- function(module, topic) {
# do we have a page for this module/topic?
lookup <- paste(module, topic, sep = ".")
page <- .module_help_topics[[lookup]]
# if so then append topic
if (!is.null(page))
paste(page, topic, sep = "#")
else
""
}
class_help <- function(class, topic) {
# call recursively for more than one class
if (length(class) > 1) {
# call for each class
for (i in 1:length(class)) {
help <- class_help(class[[i]], topic)
if (nzchar(help))
return(help)
}
# no help found
return("")
}
# do we have a page for this class?
page <- .class_help_topics[[class]]
# if so then append class and topic
if (!is.null(page)) {
components <- strsplit(class, ".", fixed = TRUE)[[1]]
class <- components[[length(components)]]
paste0(page, "#", class, ".", topic)
} else {
""
}
}
# Environments where we store help topics (mappings of module/class name to URL)
.module_help_topics <- new.env(parent = emptyenv())
.class_help_topics <- new.env(parent = emptyenv())
|
/A8SishiYang.R
|
no_license
|
SishiYang/R-Homework-Samples
|
R
| false | false | 6,667 |
r
| ||
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/search_word.R
\name{search_word}
\alias{search_word}
\title{search word}
\usage{
search_word(word, data, head = T)
}
\arguments{
\item{word}{parola da cercare}
\item{data}{data.frame}
\item{head}{logical: se guardi le prime o le ultime le lettere}
}
\value{
le posizioni corrispondenti
}
\description{
Gli inserisci le iniziali o le finali di una colonna e ti darà le posizioni corrispondenti
}
|
/man/search_word.Rd
|
no_license
|
ablanda/Esame
|
R
| false | true | 476 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/search_word.R
\name{search_word}
\alias{search_word}
\title{search word}
\usage{
search_word(word, data, head = T)
}
\arguments{
\item{word}{parola da cercare}
\item{data}{data.frame}
\item{head}{logical: se guardi le prime o le ultime le lettere}
}
\value{
le posizioni corrispondenti
}
\description{
Gli inserisci le iniziali o le finali di una colonna e ti darà le posizioni corrispondenti
}
|
# Course 5 - Exploratory Data Analysis: Week 4 Course Project
# Author: Sanjay Lonkar
# Date: 20-May-2018
# TASK 1: Download dataset
if (!file.exists("./downloadedDataset"))
{
dir.create("./downloadedDataset")
}
if (!file.exists ("./downloadedDataset/exdata%2Fdata%2FNEI_data.zip")) # This step is to avoid downloading data every time one runs this script
{
datasetURL <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2FNEI_data.zip"
download.file (datasetURL, destfile="./downloadedDataset/downloadedDataset.zip")
unzip (zipfile = "./downloadedDataset/downloadedDataset.zip", exdir="./downloadedDataset")
}
# TASK 2: Using the base plotting system, make a plot showing the total PM2.5 emission from all sources for each of the years 1999, 2002, 2005, and 2008
# TASK 2.1: Prepare data
emissions <- readRDS("./downloadedDataset/summarySCC_PM25.rds")
scc <- readRDS("./downloadedDataset/Source_Classification_Code.rds")
totalByYear <- aggregate(Emissions ~ year, emissions, sum)
# TASK 2.2: Draw plot
png('plot1.png')
barplot (height = totalByYear$Emissions, names.arg = totalByYear$year, col = "red", xlab = "Year", ylab = expression ('Total PM'[2.5]*' Emissions [Tons]'), main = expression ('Total PM'[2.5]*' Emissions in US by Years'))
dev.off()
|
/plot1.R
|
no_license
|
celesto17/ExpAna_PM2.5
|
R
| false | false | 1,290 |
r
|
# Course 5 - Exploratory Data Analysis: Week 4 Course Project
# Author: Sanjay Lonkar
# Date: 20-May-2018
# TASK 1: Download dataset
if (!file.exists("./downloadedDataset"))
{
dir.create("./downloadedDataset")
}
if (!file.exists ("./downloadedDataset/exdata%2Fdata%2FNEI_data.zip")) # This step is to avoid downloading data every time one runs this script
{
datasetURL <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2FNEI_data.zip"
download.file (datasetURL, destfile="./downloadedDataset/downloadedDataset.zip")
unzip (zipfile = "./downloadedDataset/downloadedDataset.zip", exdir="./downloadedDataset")
}
# TASK 2: Using the base plotting system, make a plot showing the total PM2.5 emission from all sources for each of the years 1999, 2002, 2005, and 2008
# TASK 2.1: Prepare data
emissions <- readRDS("./downloadedDataset/summarySCC_PM25.rds")
scc <- readRDS("./downloadedDataset/Source_Classification_Code.rds")
totalByYear <- aggregate(Emissions ~ year, emissions, sum)
# TASK 2.2: Draw plot
png('plot1.png')
barplot (height = totalByYear$Emissions, names.arg = totalByYear$year, col = "red", xlab = "Year", ylab = expression ('Total PM'[2.5]*' Emissions [Tons]'), main = expression ('Total PM'[2.5]*' Emissions in US by Years'))
dev.off()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Features_ExtractFeatures.R
\name{Features_ExtractFeatures}
\alias{Features_ExtractFeatures}
\alias{Features_ExtractParameters}
\title{Extract features from a list of dataframes.}
\usage{
Features_ExtractFeatures(preprocessedDFList, criteria = "intersect")
Features_ExtractParameters(preprocessedDFList, criteria = "intersect")
}
\arguments{
\item{preprocessedDFList}{A list of feature dataframes generated by \code{Features_Preprocess}, \code{Features_CorFilter}, \code{Features_FeatureSelect}, \code{Features_Importance}, or \code{Features_Importance_Reduce}.}
\item{criteria}{The criteria of feature extraction. Can either be "intersect" or "union".}
}
\description{
\code{Features_ExtractFeatures} obtains a set of features from a list of preprocessed/feature-selected dataframes. It also generates a venn diagram.\cr
\code{Features_ExtractParameters} obtains a set of parameters.\cr
}
|
/man/Features_ExtractFeatures.Rd
|
permissive
|
abuguadalajara/Repitope
|
R
| false | true | 989 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Features_ExtractFeatures.R
\name{Features_ExtractFeatures}
\alias{Features_ExtractFeatures}
\alias{Features_ExtractParameters}
\title{Extract features from a list of dataframes.}
\usage{
Features_ExtractFeatures(preprocessedDFList, criteria = "intersect")
Features_ExtractParameters(preprocessedDFList, criteria = "intersect")
}
\arguments{
\item{preprocessedDFList}{A list of feature dataframes generated by \code{Features_Preprocess}, \code{Features_CorFilter}, \code{Features_FeatureSelect}, \code{Features_Importance}, or \code{Features_Importance_Reduce}.}
\item{criteria}{The criteria of feature extraction. Can either be "intersect" or "union".}
}
\description{
\code{Features_ExtractFeatures} obtains a set of features from a list of preprocessed/feature-selected dataframes. It also generates a venn diagram.\cr
\code{Features_ExtractParameters} obtains a set of parameters.\cr
}
|
setwd("/Users/yfong/DataScienceCoursera/Getting and Cleaning Data")
#______________________________________________________________________________
# 1. Merge the training and the test sets to create one data set.
## step 1: download zip file from website
if(!file.exists("./data")) dir.create("./data")
fileUrl <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
download.file(fileUrl, destfile = "./data/projectData_getCleanData.zip")
## step 2: unzip data
listZip <- unzip("./data/projectData_getCleanData.zip", exdir = "./data")
## step 3: load data into R
train.x <- read.table("./data/UCI HAR Dataset/train/X_train.txt")
train.y <- read.table("./data/UCI HAR Dataset/train/y_train.txt")
train.subject <- read.table("./data/UCI HAR Dataset/train/subject_train.txt")
test.x <- read.table("./data/UCI HAR Dataset/test/X_test.txt")
test.y <- read.table("./data/UCI HAR Dataset/test/y_test.txt")
test.subject <- read.table("./data/UCI HAR Dataset/test/subject_test.txt")
## step 4: merge train and test data
trainData <- cbind(train.subject, train.y, train.x)
testData <- cbind(test.subject, test.y, test.x)
fullData <- rbind(trainData, testData)
#-------------------------------------------------------------------------------
# 2. Extract only the measurements on the mean and standard deviation for each measurement.
## step 1: load feature name into R
featureName <- read.table("./data/UCI HAR Dataset/features.txt", stringsAsFactors = FALSE)[,2]
## step 2: extract mean and standard deviation of each measurements
featureIndex <- grep(("mean\\(\\)|std\\(\\)"), featureName)
finalData <- fullData[, c(1, 2, featureIndex+2)]
colnames(finalData) <- c("subject", "activity", featureName[featureIndex])
#-------------------------------------------------------------------------------
# 3. Uses descriptive activity names to name the activities in the data set
## step 1: load activity data into R
activityName <- read.table("./data/UCI HAR Dataset/activity_labels.txt")
## step 2: replace 1 to 6 with activity names
finalData$activity <- factor(finalData$activity, levels = activityName[,1], labels = activityName[,2])
#-------------------------------------------------------------------------------
# 4. Appropriately labels the data set with descriptive variable names.
names(finalData) <- gsub("\\()", "", names(finalData))
names(finalData) <- gsub("^t", "time", names(finalData))
names(finalData) <- gsub("^f", "frequence", names(finalData))
names(finalData) <- gsub("-mean", "Mean", names(finalData))
names(finalData) <- gsub("-std", "Std", names(finalData))
#-------------------------------------------------------------------------------
# 5. From the data set in step 4, creates a second, independent tidy data set with the average of each variable for each activity and each subject.
library(dplyr)
groupData <- finalData %>%
group_by(subject, activity) %>%
summarise_each(funs(mean))
write.table(groupData, "./Getting_and_Cleaning_data_Project/MeanData.txt", row.names = FALSE)
|
/run_analysis.R
|
no_license
|
foamy1881/coursera-getting-and-cleaning-data-project
|
R
| false | false | 3,062 |
r
|
setwd("/Users/yfong/DataScienceCoursera/Getting and Cleaning Data")
#______________________________________________________________________________
# 1. Merge the training and the test sets to create one data set.
## step 1: download zip file from website
if(!file.exists("./data")) dir.create("./data")
fileUrl <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
download.file(fileUrl, destfile = "./data/projectData_getCleanData.zip")
## step 2: unzip data
listZip <- unzip("./data/projectData_getCleanData.zip", exdir = "./data")
## step 3: load data into R
train.x <- read.table("./data/UCI HAR Dataset/train/X_train.txt")
train.y <- read.table("./data/UCI HAR Dataset/train/y_train.txt")
train.subject <- read.table("./data/UCI HAR Dataset/train/subject_train.txt")
test.x <- read.table("./data/UCI HAR Dataset/test/X_test.txt")
test.y <- read.table("./data/UCI HAR Dataset/test/y_test.txt")
test.subject <- read.table("./data/UCI HAR Dataset/test/subject_test.txt")
## step 4: merge train and test data
trainData <- cbind(train.subject, train.y, train.x)
testData <- cbind(test.subject, test.y, test.x)
fullData <- rbind(trainData, testData)
#-------------------------------------------------------------------------------
# 2. Extract only the measurements on the mean and standard deviation for each measurement.
## step 1: load feature name into R
featureName <- read.table("./data/UCI HAR Dataset/features.txt", stringsAsFactors = FALSE)[,2]
## step 2: extract mean and standard deviation of each measurements
featureIndex <- grep(("mean\\(\\)|std\\(\\)"), featureName)
finalData <- fullData[, c(1, 2, featureIndex+2)]
colnames(finalData) <- c("subject", "activity", featureName[featureIndex])
#-------------------------------------------------------------------------------
# 3. Uses descriptive activity names to name the activities in the data set
## step 1: load activity data into R
activityName <- read.table("./data/UCI HAR Dataset/activity_labels.txt")
## step 2: replace 1 to 6 with activity names
finalData$activity <- factor(finalData$activity, levels = activityName[,1], labels = activityName[,2])
#-------------------------------------------------------------------------------
# 4. Appropriately labels the data set with descriptive variable names.
names(finalData) <- gsub("\\()", "", names(finalData))
names(finalData) <- gsub("^t", "time", names(finalData))
names(finalData) <- gsub("^f", "frequence", names(finalData))
names(finalData) <- gsub("-mean", "Mean", names(finalData))
names(finalData) <- gsub("-std", "Std", names(finalData))
#-------------------------------------------------------------------------------
# 5. From the data set in step 4, creates a second, independent tidy data set with the average of each variable for each activity and each subject.
library(dplyr)
groupData <- finalData %>%
group_by(subject, activity) %>%
summarise_each(funs(mean))
write.table(groupData, "./Getting_and_Cleaning_data_Project/MeanData.txt", row.names = FALSE)
|
library(shiny)
shinyUI(fluidPage(
# Application title
titlePanel("Data Regression and Prediction"),
# Sidebar with a slider input for number of bins
sidebarLayout(
sidebarPanel(
fileInput("file1",
"Choose CSV File",
multiple=TRUE,
accept=c("text/csv",
"text/comma-separated-values",
"text/plain",
".csv")),
tags$hr(),
h4("Parameters for Data Loading"),
radioButtons("sep", "Separator",
choices=c(Comma=",", Semicolon=";",Tab="\t"),
selected=","),
radioButtons("quote","Quote",
choices=c(None="",
"Double Quote" = '"',
"Single Quote"="'")),
tags$hr(),
h4("Regression Type"),
radioButtons("method","Regression Type",
choices=c("Binomial"="binomial",
"Gaussian"="gaussian",
"Poisson"="poisson",
"Gamma"="gamma")),
h4("Predictor Value:"),
uiOutput("slider")
),
mainPanel(
tabsetPanel(type = "tabs",
tabPanel("Data Table",br(),
h3("Loaded Data Table"),
tableOutput("dataTable"),
h4("Predicted value of y:"),
textOutput("PredOut")
),
tabPanel("Model Summary",br(),
h3("Model Summary"),
verbatimTextOutput("ModelText")),
tabPanel("Scatter Plot", br(),
h3("Scatter Plot and Prediction"),
plotOutput("plot1")),
tabPanel("Using this Application", br(),
h2("Step-by-step Guidance on this Application"),
h3("Summary"),
h6("This application allows users to load a dataset and perform regression analysis on the data"),
h3("Data File:"),
h6("Data file to be loaded into the apps should be a .csv file with two columns. Headers should be included with predictor 'x' and dependent variable'y'"),
h3("Parameters for Data Loading:"),
h6("'Separator' and 'Quote' are to be set according to the formatting of the .csv file"),
h3("Regression Type:"),
h6("The type are to be chosen based on the type of data and regression models to be use"),
h3("Predictor Value:"),
h6("Predictor value is set by user and the predicted value of 'y' will be returned"),
h3("Output Tabs:"),
h6("- Data Table: Data loaded by user and predicted value of 'y'"),
h6("- Model Summary: Summary of the resulted regression model"),
h6("- Scatter Plot: Scatter plot of the loaded data with regression line. The predicted data point is also highlighted.")))
)
)
))
|
/DataAnalysisApps/DataAnalysis/ui.R
|
no_license
|
Adrianle1992/DataProductWk4Project
|
R
| false | false | 3,486 |
r
|
library(shiny)
shinyUI(fluidPage(
# Application title
titlePanel("Data Regression and Prediction"),
# Sidebar with a slider input for number of bins
sidebarLayout(
sidebarPanel(
fileInput("file1",
"Choose CSV File",
multiple=TRUE,
accept=c("text/csv",
"text/comma-separated-values",
"text/plain",
".csv")),
tags$hr(),
h4("Parameters for Data Loading"),
radioButtons("sep", "Separator",
choices=c(Comma=",", Semicolon=";",Tab="\t"),
selected=","),
radioButtons("quote","Quote",
choices=c(None="",
"Double Quote" = '"',
"Single Quote"="'")),
tags$hr(),
h4("Regression Type"),
radioButtons("method","Regression Type",
choices=c("Binomial"="binomial",
"Gaussian"="gaussian",
"Poisson"="poisson",
"Gamma"="gamma")),
h4("Predictor Value:"),
uiOutput("slider")
),
mainPanel(
tabsetPanel(type = "tabs",
tabPanel("Data Table",br(),
h3("Loaded Data Table"),
tableOutput("dataTable"),
h4("Predicted value of y:"),
textOutput("PredOut")
),
tabPanel("Model Summary",br(),
h3("Model Summary"),
verbatimTextOutput("ModelText")),
tabPanel("Scatter Plot", br(),
h3("Scatter Plot and Prediction"),
plotOutput("plot1")),
tabPanel("Using this Application", br(),
h2("Step-by-step Guidance on this Application"),
h3("Summary"),
h6("This application allows users to load a dataset and perform regression analysis on the data"),
h3("Data File:"),
h6("Data file to be loaded into the apps should be a .csv file with two columns. Headers should be included with predictor 'x' and dependent variable'y'"),
h3("Parameters for Data Loading:"),
h6("'Separator' and 'Quote' are to be set according to the formatting of the .csv file"),
h3("Regression Type:"),
h6("The type are to be chosen based on the type of data and regression models to be use"),
h3("Predictor Value:"),
h6("Predictor value is set by user and the predicted value of 'y' will be returned"),
h3("Output Tabs:"),
h6("- Data Table: Data loaded by user and predicted value of 'y'"),
h6("- Model Summary: Summary of the resulted regression model"),
h6("- Scatter Plot: Scatter plot of the loaded data with regression line. The predicted data point is also highlighted.")))
)
)
))
|
doublebet_with_limits = function(x, initialbet = 10, maxbet = 100, bet = even)
{
winnings = rep(NA, length(x))
betsize = initialbet
current_winnings = 0
for(i in seq_along(x)){
if(bet(x[i]) == 1){
current_winnings = current_winnings + betsize
betsize = initialbet
} else {
current_winnings = current_winnings - betsize
betsize = 2 * betsize
if(maxbet < betsize){
# Table limits, go back to beginning
betsize = initialbet
}
}
winnings[i] = current_winnings
}
winnings
}
s = rep(1, times = 10)
doublebet_with_limits(s)
|
/roulette2.R
|
no_license
|
clarkfitzg/stat128-fall20
|
R
| false | false | 680 |
r
|
doublebet_with_limits = function(x, initialbet = 10, maxbet = 100, bet = even)
{
winnings = rep(NA, length(x))
betsize = initialbet
current_winnings = 0
for(i in seq_along(x)){
if(bet(x[i]) == 1){
current_winnings = current_winnings + betsize
betsize = initialbet
} else {
current_winnings = current_winnings - betsize
betsize = 2 * betsize
if(maxbet < betsize){
# Table limits, go back to beginning
betsize = initialbet
}
}
winnings[i] = current_winnings
}
winnings
}
s = rep(1, times = 10)
doublebet_with_limits(s)
|
observeEvent(input$load_model,{
req(input$all_run_id)
df <- load.model()
# output$results_table <- DT::renderDataTable(DT::datatable(head(masterDF)))
ids_DF <- parse_ids_from_input_runID(input$all_run_id)
README.text <- c()
for(i in seq(nrow(ids_DF))){
dfsub <- df %>% filter(run_id == ids_DF$runID[i])
diff.m <- diff(dfsub$dates)
mode.m <- diff.m[which.max(tabulate(match(unique(diff.m), diff.m)))]
diff_units.m = units(mode.m)
diff_message <- sprintf("timestep: %.2f %s", mode.m, diff_units.m)
wf.folder <- workflow(bety, ids_DF$wID[i]) %>% collect() %>% pull(folder)
README.text <- c(README.text,
paste("SELECTION",i),
"============",
readLines(file.path(wf.folder, 'run', ids_DF$runID[i], "README.txt")),
diff_message,
""
)
}
output$README <- renderUI({HTML(paste(README.text, collapse = '<br/>'))})
output$dim_message <- renderText({sprintf("This data has %.0f rows, think about skipping exploratory plots if this is a large number...", dim(df)[1])})
})
|
/shiny/workflowPlot/server_files/select_data_server.R
|
permissive
|
araiho/pecan
|
R
| false | false | 1,156 |
r
|
observeEvent(input$load_model,{
req(input$all_run_id)
df <- load.model()
# output$results_table <- DT::renderDataTable(DT::datatable(head(masterDF)))
ids_DF <- parse_ids_from_input_runID(input$all_run_id)
README.text <- c()
for(i in seq(nrow(ids_DF))){
dfsub <- df %>% filter(run_id == ids_DF$runID[i])
diff.m <- diff(dfsub$dates)
mode.m <- diff.m[which.max(tabulate(match(unique(diff.m), diff.m)))]
diff_units.m = units(mode.m)
diff_message <- sprintf("timestep: %.2f %s", mode.m, diff_units.m)
wf.folder <- workflow(bety, ids_DF$wID[i]) %>% collect() %>% pull(folder)
README.text <- c(README.text,
paste("SELECTION",i),
"============",
readLines(file.path(wf.folder, 'run', ids_DF$runID[i], "README.txt")),
diff_message,
""
)
}
output$README <- renderUI({HTML(paste(README.text, collapse = '<br/>'))})
output$dim_message <- renderText({sprintf("This data has %.0f rows, think about skipping exploratory plots if this is a large number...", dim(df)[1])})
})
|
#################################################################################################################
##### VERY BASIC LINEAR REGRESSION ##############################################################################
#################################################################################################################
# Assumes data is clean
# set your working directory to your PC
# READ data
saledata <- read.csv('train_v2.csv')
# examine the variables in the data
summary(saledata)
# RUN the regression of Sales on the factors that if there was a promo that day , was it a school holiday and was it a state holiday
#Since the three variables of interest are binarly , lets convert them to factors by putting a factor() in front of each
reg_saledata <- lm(Sales~factor(Promo)+factor(StateHoliday)+factor(SchoolHoliday), data = saledata)
reg_saledata
# note: you can replace 'Promo+StateHoliday+SchoolHoliday' with a period (.), and you will get the same result.
# like this: reg_shoes <- lm(Sales~., data = shoes)
# look at a summary of the regression results:
summary(reg_saledata)
# PREDICT the average sales for a typical day you want
# first, create a dataframe with the parameters
new_record <- data.frame(StateHoliday = 0 ,SchoolHoliday = 1,Promo=1)
# then run the prediction
predict(reg_saledata, newdata = new_record)
# INTERPRET: we expect the mean value of Sales for the new record to be 111068.12
|
/Basic Linear Regression & Predict.R
|
permissive
|
shubhi126/Learning-Analytics-with-R
|
R
| false | false | 1,476 |
r
|
#################################################################################################################
##### VERY BASIC LINEAR REGRESSION ##############################################################################
#################################################################################################################
# Assumes data is clean
# set your working directory to your PC
# READ data
saledata <- read.csv('train_v2.csv')
# examine the variables in the data
summary(saledata)
# RUN the regression of Sales on the factors that if there was a promo that day , was it a school holiday and was it a state holiday
#Since the three variables of interest are binarly , lets convert them to factors by putting a factor() in front of each
reg_saledata <- lm(Sales~factor(Promo)+factor(StateHoliday)+factor(SchoolHoliday), data = saledata)
reg_saledata
# note: you can replace 'Promo+StateHoliday+SchoolHoliday' with a period (.), and you will get the same result.
# like this: reg_shoes <- lm(Sales~., data = shoes)
# look at a summary of the regression results:
summary(reg_saledata)
# PREDICT the average sales for a typical day you want
# first, create a dataframe with the parameters
new_record <- data.frame(StateHoliday = 0 ,SchoolHoliday = 1,Promo=1)
# then run the prediction
predict(reg_saledata, newdata = new_record)
# INTERPRET: we expect the mean value of Sales for the new record to be 111068.12
|
if (!file.exists("data")) {
dir.create("data")
}
fileUrl <- "https://data.baltimorecity.gov/api/views/dz54-2aru/rows.csv?accessType=DOWNLOAD"
download.file(fileUrl, destfile = "./data/cameras.csv")
dateDownloaded <- date()
# Opdatering af til seneste version
# installing/loading the package:
if(!require(installr)) {
install.packages("installr"); require(installr)} #load / install+load installr
# using the package:
updateR() # this will start the updating process of your R installation. It will check for newer versions, and if one is available, will guide you through the decisions you'd need to make.dir
|
/Week1tests.R
|
no_license
|
JonJagd/getting-cleaning-data
|
R
| false | false | 645 |
r
|
if (!file.exists("data")) {
dir.create("data")
}
fileUrl <- "https://data.baltimorecity.gov/api/views/dz54-2aru/rows.csv?accessType=DOWNLOAD"
download.file(fileUrl, destfile = "./data/cameras.csv")
dateDownloaded <- date()
# Opdatering af til seneste version
# installing/loading the package:
if(!require(installr)) {
install.packages("installr"); require(installr)} #load / install+load installr
# using the package:
updateR() # this will start the updating process of your R installation. It will check for newer versions, and if one is available, will guide you through the decisions you'd need to make.dir
|
library(shiny)
section_missing <- div(
p("A sizable number of Knoedler records are missing crucial information, such as precise sale dates, prices, and artwork genres. We will encounter this same issue in artist and collector life dates, auction sale dates, and so forth. Normally, we would simply discard those records when doing analyses that require the presence of those values. However, simply discarding records means that we would base our summary claims (about, say, the influence of artwork genre on sale price) on a small sample of all the sales that we know did, indeed, take place. How can we determine whether those missing records might invalidate the conclusions we draw?"),
p("One intuitive way to address this issue is through what is known as multiple imputation, in which we articulate informed guesses at what those missing values might be, and then run dozens or hundreds of simulations that stochastically generate values for those missing records within the boundaries set by those guesses, and then return a range of likely results (using whichever metric we were computing in the first place) that take in to account the uncertainty produced by those missing values."),
tags$hr(),
p("The original Knoedler data have very few missing gnere labels. For the purposes of demonstration, we can randomly add a few more. What additinoal percentage of these records should have missing genre?"),
inputPanel(sliderInput("percent_missing", "Percent missing", min = 0, max = 1, value = 0, step = 0.1)),
p("The original distribution of genres over time, including missing values."),
plotOutput("static_plot", width = "800px"))
section_example <- div(
p("Now, what is the balance of genres that we want to assume when guessing the value of these next ones? If we assume that any genre has an equal chance of missing from the dataset, then all sliders should be set to the same value. If, on the other hand, we assume that one genre has a greater chance of being missing - e.g., that missing paintings have a greater chance of actually being abstract than still life - then we would set a higher value for abstract artworks and a lower value for still lifes."),
p("The default values for these sliders match the overall ratios of these genres as observed in the original dataset."),
inputPanel(
sliderInput("abstract", "Abstract", min = 0, max = 1, value = gstart[["abstract"]]),
sliderInput("genre", "Genre", min = 0, max = 1, value = gstart[["Genre"]]),
sliderInput("history", "History", min = 0, max = 1, value = gstart[["abstract"]]),
sliderInput("landscape", "Landscape", min = 0, max = 1, value = gstart[["Landscape"]]),
sliderInput("portrait", "Portrait", min = 0, max = 1, value = gstart[["Portrait"]]),
sliderInput("still_life", "Still Life", min = 0, max = 1, value = gstart[["Still Life"]])),
p("Below, missing values have been replaced randomly, following the weights for each genre set above."),
plotOutput("sim_plot", width = "800px"))
section_simulated <- div(
p("Because the missing value replacement process is randomized, we can't just do it once. We need to repeat it many times over, generating a ", tags$em("range"), " of possible values."),
p("To reduce the noise from year-to-year fluctuations, we can also use a moving window average to smooth the results."),
inputPanel(
sliderInput("n_boot", "Bootstrap iterations", min = 1, max = 100, value = 1, step = 10),
sliderInput("window_size", "Rolling window size", min = 1, max = 20, value = 10),
actionButton("calc", "Simulate!")),
textOutput("window_number"),
p("The black lines represent the values returned by each individual simulation. The red line is the median value of each annual result."),
plotOutput("div_plot", width = "800px"))
shinyUI(fluidPage(
# Application title
titlePanel("Handling missing genres"),
section_missing,
section_example,
section_simulated))
|
/ui.R
|
permissive
|
mdlincoln/missingness
|
R
| false | false | 3,941 |
r
|
library(shiny)
section_missing <- div(
p("A sizable number of Knoedler records are missing crucial information, such as precise sale dates, prices, and artwork genres. We will encounter this same issue in artist and collector life dates, auction sale dates, and so forth. Normally, we would simply discard those records when doing analyses that require the presence of those values. However, simply discarding records means that we would base our summary claims (about, say, the influence of artwork genre on sale price) on a small sample of all the sales that we know did, indeed, take place. How can we determine whether those missing records might invalidate the conclusions we draw?"),
p("One intuitive way to address this issue is through what is known as multiple imputation, in which we articulate informed guesses at what those missing values might be, and then run dozens or hundreds of simulations that stochastically generate values for those missing records within the boundaries set by those guesses, and then return a range of likely results (using whichever metric we were computing in the first place) that take in to account the uncertainty produced by those missing values."),
tags$hr(),
p("The original Knoedler data have very few missing gnere labels. For the purposes of demonstration, we can randomly add a few more. What additinoal percentage of these records should have missing genre?"),
inputPanel(sliderInput("percent_missing", "Percent missing", min = 0, max = 1, value = 0, step = 0.1)),
p("The original distribution of genres over time, including missing values."),
plotOutput("static_plot", width = "800px"))
section_example <- div(
p("Now, what is the balance of genres that we want to assume when guessing the value of these next ones? If we assume that any genre has an equal chance of missing from the dataset, then all sliders should be set to the same value. If, on the other hand, we assume that one genre has a greater chance of being missing - e.g., that missing paintings have a greater chance of actually being abstract than still life - then we would set a higher value for abstract artworks and a lower value for still lifes."),
p("The default values for these sliders match the overall ratios of these genres as observed in the original dataset."),
inputPanel(
sliderInput("abstract", "Abstract", min = 0, max = 1, value = gstart[["abstract"]]),
sliderInput("genre", "Genre", min = 0, max = 1, value = gstart[["Genre"]]),
sliderInput("history", "History", min = 0, max = 1, value = gstart[["abstract"]]),
sliderInput("landscape", "Landscape", min = 0, max = 1, value = gstart[["Landscape"]]),
sliderInput("portrait", "Portrait", min = 0, max = 1, value = gstart[["Portrait"]]),
sliderInput("still_life", "Still Life", min = 0, max = 1, value = gstart[["Still Life"]])),
p("Below, missing values have been replaced randomly, following the weights for each genre set above."),
plotOutput("sim_plot", width = "800px"))
section_simulated <- div(
p("Because the missing value replacement process is randomized, we can't just do it once. We need to repeat it many times over, generating a ", tags$em("range"), " of possible values."),
p("To reduce the noise from year-to-year fluctuations, we can also use a moving window average to smooth the results."),
inputPanel(
sliderInput("n_boot", "Bootstrap iterations", min = 1, max = 100, value = 1, step = 10),
sliderInput("window_size", "Rolling window size", min = 1, max = 20, value = 10),
actionButton("calc", "Simulate!")),
textOutput("window_number"),
p("The black lines represent the values returned by each individual simulation. The red line is the median value of each annual result."),
plotOutput("div_plot", width = "800px"))
shinyUI(fluidPage(
# Application title
titlePanel("Handling missing genres"),
section_missing,
section_example,
section_simulated))
|
% Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/pipeline.R
\name{pipeline}
\alias{pipeline}
\title{Define the pipeline and watch it get executed}
\usage{
pipeline(tasks, to, remote = FALSE)
}
\arguments{
\item{tasks}{list. A list of ruigi_tasks.}
\item{to}{ruigi_target. The scheduler will stop executing the graph once this target is created.}
\item{remote}{logical. Choose between a local and remote scheduler.}
}
\description{
Define the pipeline and watch it get executed
}
|
/man/pipeline.Rd
|
no_license
|
kirillseva/ruigi
|
R
| false | false | 519 |
rd
|
% Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/pipeline.R
\name{pipeline}
\alias{pipeline}
\title{Define the pipeline and watch it get executed}
\usage{
pipeline(tasks, to, remote = FALSE)
}
\arguments{
\item{tasks}{list. A list of ruigi_tasks.}
\item{to}{ruigi_target. The scheduler will stop executing the graph once this target is created.}
\item{remote}{logical. Choose between a local and remote scheduler.}
}
\description{
Define the pipeline and watch it get executed
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cloudresourcemanager_functions.R
\docType{package}
\name{cloudresourcemanager_googleAuthR}
\alias{cloudresourcemanager_googleAuthR}
\alias{cloudresourcemanager_googleAuthR-package}
\title{Google Cloud Resource Manager API
The Google Cloud Resource Manager API provides methods for creating, reading, and updating project metadata.}
\description{
Auto-generated code by googleAuthR::gar_create_api_skeleton
at 2016-09-03 23:17:29
filename: /Users/mark/dev/R/autoGoogleAPI/googlecloudresourcemanagerv1beta1.auto/R/cloudresourcemanager_functions.R
api_json: api_json
}
\details{
Authentication scopes used are:
\itemize{
\item https://www.googleapis.com/auth/cloud-platform
\item https://www.googleapis.com/auth/cloud-platform.read-only
}
}
|
/googlecloudresourcemanagerv1beta1.auto/man/cloudresourcemanager_googleAuthR.Rd
|
permissive
|
Phippsy/autoGoogleAPI
|
R
| false | true | 820 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cloudresourcemanager_functions.R
\docType{package}
\name{cloudresourcemanager_googleAuthR}
\alias{cloudresourcemanager_googleAuthR}
\alias{cloudresourcemanager_googleAuthR-package}
\title{Google Cloud Resource Manager API
The Google Cloud Resource Manager API provides methods for creating, reading, and updating project metadata.}
\description{
Auto-generated code by googleAuthR::gar_create_api_skeleton
at 2016-09-03 23:17:29
filename: /Users/mark/dev/R/autoGoogleAPI/googlecloudresourcemanagerv1beta1.auto/R/cloudresourcemanager_functions.R
api_json: api_json
}
\details{
Authentication scopes used are:
\itemize{
\item https://www.googleapis.com/auth/cloud-platform
\item https://www.googleapis.com/auth/cloud-platform.read-only
}
}
|
context("sparse tidiers")
library(Matrix)
m <- Matrix(0 + 1:28, nrow = 4)
m[-3, c(2, 4:5, 7)] <- m[3, 1:4] <- m[1:3, 6] <- 0
rownames(m) <- letters[1:4]
colnames(m) <- 1:7
mT <- as(m, "dgTMatrix")
mC <- as(m, "dgCMatrix")
mS <- as(m, "sparseMatrix")
test_that("tidy.dgTMatrix works", {
td <- tidy(mT)
check_tidy(td, exp.row = 9, exp.col = 3)
})
test_that("tidy.dgCMatrix uses tidy.dgTMatrix", {
expect_identical(tidy(mC), tidy.dgTMatrix(mC))
})
|
/tests/testthat/test-sparse.R
|
no_license
|
puterleat/broom
|
R
| false | false | 455 |
r
|
context("sparse tidiers")
library(Matrix)
m <- Matrix(0 + 1:28, nrow = 4)
m[-3, c(2, 4:5, 7)] <- m[3, 1:4] <- m[1:3, 6] <- 0
rownames(m) <- letters[1:4]
colnames(m) <- 1:7
mT <- as(m, "dgTMatrix")
mC <- as(m, "dgCMatrix")
mS <- as(m, "sparseMatrix")
test_that("tidy.dgTMatrix works", {
td <- tidy(mT)
check_tidy(td, exp.row = 9, exp.col = 3)
})
test_that("tidy.dgCMatrix uses tidy.dgTMatrix", {
expect_identical(tidy(mC), tidy.dgTMatrix(mC))
})
|
library(parallel)
simRep <- 1000 # Replication times in one simulation
#pvalue.true <- .05 # Testing type I error
#b.var <- c(0.04) # The set of varaince of random covariates b as random slope
#smooth <- 1 # measurement error is added to M if smooth = 0; no measurement error is added if sooth = 1
cores <- 4
#r.sim <- b.var
nSubj <- 200
nRep <- 50
seediter <- 1
run_one_sample <- function(iter){
library(refund)
library(lme4)
library(nlme)
library(arm)
library(RLRsim)
library(MASS)
#set.seed(iter)
nNewvisit <- 10
set.seed(iter+(seediter-1)*5000)
D <- 80 # grid number total
#nSubj <- 20 # 200 # I the number of curves
#nRep <- 20 # 20 # datasets for each covariance function
totalN <- nSubj * nRep
thetaK.true <- 2
timeGrid <- (1:D)/D
npc.true <- 3
percent <- 0.95
SNR <- 3 # 5, signal noise ratio'
sd.epsilon <- 1 # or 0.5
delta.true <- 0.5
a.mean <- 0
gamma.true <- 2
gammaVar.true <- 1
# hot
gammaI.true.i <- mapply(rnorm, nSubj, gamma.true, rep(sqrt(gammaVar.true), 1))
gammaI.true <- gammaI.true.i[rep(1:nrow(gammaI.true.i), each = nRep), ]
# warm
gammaI2.true.i <- mapply(rnorm, nSubj, gamma.true, rep(sqrt(gammaVar.true), 1))
gammaI2.true <- gammaI2.true.i[rep(1:nrow(gammaI2.true.i), each = nRep), ]
dummyX <- rbinom(n = totalN, size = 1, prob = 0.5) # dummyX
#generate functional covariates
lambda.sim <- function(degree) {
return(0.5^(degree - 1))
}
psi.fourier <- function(t, degree) {
result <- NA
if(degree == 1){
result <- sqrt(2) * sinpi(2*t)
}else if(degree == 2){
result <- sqrt(2) * cospi(4*t)
}else if(degree == 3){
result <- sqrt(2) * sinpi(4*t)
}
return(result)
}
lambdaVec.true <- mapply(lambda.sim, 1: npc.true)
psi.true <- matrix(data = mapply(psi.fourier, rep(timeGrid, npc.true), rep(1:npc.true, each=D)),
nrow = npc.true,
ncol = D,
byrow = TRUE)
ascore.true <- mvrnorm(totalN, rep(a.mean, npc.true), diag(lambdaVec.true))
Mt.true <- ascore.true %*% psi.true
error <- rnorm(totalN, mean = 0, sd = sd.epsilon)
#thetaIK.true <- mvrnorm(nSubj, rep(thetaK.true, npc.true), diag(rep(r.sim, npc.true)))
thetaIK.true1 <- mvrnorm(nSubj, rep(thetaK.true, npc.true), diag(c(r.sim, r.sim/2, r.sim/4)))
thetaIK.true <- thetaIK.true1[rep(1:nrow(thetaIK.true1), each = nRep), ]
betaM.true <- thetaIK.true * ascore.true
betaM.true <- rowSums(betaM.true)
Y <- delta.true + dummyX * gammaI.true + (dummyX - 1) * gammaI2.true + betaM.true + error
##########################################################################
ID <- rep(1:nSubj, each = nRep)
if(smooth == 0){
Merror.Var <- sum(lambdaVec.true) / SNR #SNR = sum(lambdaVec.true)/Merror.Var
Mt.hat <- Mt.true + matrix(rnorm(totalN*D, mean=0, sd = sqrt(Merror.Var)), totalN, D)
}
if(smooth == 1){
Merror.Var <- 0 #SNR = sum(lambdaVec.true)/Merror.Var
Mt.hat <- Mt.true
}
M <- Mt.hat
# M <- M - matrix(rep(colMeans(M), each = totalN), totalN, D) # center:column-means are 0
t <- (1:D)/D
knots <- 5 #5 previous setting 10
p <- 5 # previous setting p <- 7, the number of degree for B-splines we use
results <- fpca.face(M, center = TRUE, argvals = t, knots = knots, pve = percent, p = p, lambda = 0) # pve need to be chosen!
npc <- results$npc
score <- results$scores
ascore1 <- score[, 1:npc]/sqrt(D)
efunctions1 <- results$efunctions*sqrt(D)
sign_correct <- diag(sign(colSums(efunctions1*t(psi.true))))
efunctions <- efunctions1 %*% sign_correct
ascore <- ascore1 %*% sign_correct
##############################################################
dummyX <- cbind(dummyX, -dummyX + 1)
##############################################################
designMatrix <- data.frame(rating = Y,
temp.1 = dummyX[, 1],
temp.2 = dummyX[, 2],
ID = as.factor(ID),
ascore = ascore)
noRandom.simpart <- paste(1:npc, collapse = " + ascore.")
noRandom.sim1 <- as.formula(paste("rating ~ 1 + temp.1 + temp.2 + ascore.",
noRandom.simpart,
" + (0 + temp.1 | ID) + (0 + temp.2 | ID) ",
sep = ""))
NoRandomReml <- lmer(noRandom.sim1, data = designMatrix)
# npc=3
additive.heter <- paste0(" + (0 + ascore.", 1:npc, " | ID)", collapse = "")
Random.sim1 <- as.formula(paste("rating ~ 1 + temp.1 + temp.2 + ascore.",
noRandom.simpart,
" + (0 + temp.1 | ID) + (0 + temp.2 | ID)",
additive.heter,
sep = ""))
RandomReml <- lmer(Random.sim1, data = designMatrix)
###true beta_t
beta_t <- t(psi.true)%*%as.vector(rep(thetaK.true, npc.true))
beta_it <- apply(t(psi.true) %*% t(thetaIK.true1),2,function(x) x - beta_t)
### NoRandomReml
fixeffWithTemp1 <- fixef(NoRandomReml)
betaWithTemp1 <- efunctions %*% as.vector(fixeffWithTemp1[3:(npc+2)])
ISE_beta_1 <- sum((betaWithTemp1-beta_t)^2/D) #ISE for beta(t)
e1 <- Y - predict(NoRandomReml)
mseNoRandom <- mean(e1^2) #mse for response
NoRandom_vcov <- summary(NoRandomReml)$vcov
### RandomReml
fixeffWithTemp2 <- fixef(RandomReml)
betaWithTemp2 <- efunctions %*% as.vector(fixeffWithTemp2[3:(npc+2)])
ISE_beta_2 <- sum((betaWithTemp2-beta_t)^2/D) #ISE for beta(t)
e2 <- Y - predict(RandomReml)
mseRandom <- mean(e2^2) #mse for response
betai_2 <- efunctions %*% t(as.matrix(ranef(RandomReml)$ID[,3:(npc+2)])) # beta_i(t)
MISE_2 <- mean(colMeans((betai_2-beta_it)^2)) #MISE for beta_i(t)
Random_vcov <- summary(RandomReml)$vcov
gene_newdata <- function(newvisitN){
#generate new dummyX
dummyX <- rbinom(n = nSubj*newvisitN, size = 1, prob = 0.5) # new dummyX
#generate new x_ij(t) for new visit
ascore.true <- mvrnorm(nSubj*newvisitN, rep(a.mean, npc.true), diag(lambdaVec.true))
#sum new x_ij(t)(beta_t + betai_t)
Mt.true <- ascore.true %*% psi.true
#generate new error
error <- rnorm(nSubj*newvisitN, mean = 0, sd = sd.epsilon)
thetaIK.true <- thetaIK.true1[rep(1:nrow(thetaIK.true1), each = newvisitN), ]
betaM.true <- thetaIK.true * ascore.true
betaM.true <- rowSums(betaM.true)
# hot
gammaI.true <- gammaI.true.i[rep(1:nrow(gammaI.true.i), each = newvisitN), ]
# warm
gammaI2.true <- gammaI2.true.i[rep(1:nrow(gammaI2.true.i), each = newvisitN), ]
# generate new Y
Y <- delta.true + dummyX * gammaI.true + (dummyX - 1) * gammaI2.true + betaM.true + error
totalN <- nSubj*newvisitN
ID <- rep(1:nSubj, each = newvisitN)
if(smooth == 0){
Merror.Var <- sum(lambdaVec.true) / SNR #SNR = sum(lambdaVec.true)/Merror.Var
Mt.hat <- Mt.true + matrix(rnorm(totalN*D, mean=0, sd = sqrt(Merror.Var)), totalN, D)
}
if(smooth == 1){
Merror.Var <- 0 #SNR = sum(lambdaVec.true)/Merror.Var
Mt.hat <- Mt.true
}
M <- Mt.hat
#t <- (1:D)/D
#knots <- 5 #5 previous setting 10
#p <- 5 # previous setting p <- 7, the number of degree for B-splines we use
#results <- fpca.face(M, center = TRUE, argvals = t, knots = knots, pve = percent, p = p, lambda = 0) # pve need to be chosen!
#npc <- results$npc
#score <- results$scores
#ascore1 <- score[, 1:npc]/sqrt(D)
#efunctions1 <- results$efunctions*sqrt(D)
#sign_correct <- diag(sign(colSums(efunctions1*t(psi.true))))
#efunctions <- efunctions1 %*% sign_correct
#ascore <- ascore1 %*% sign_correct
designMatrix <- data.frame(rating = Y,
temp.1 = dummyX,
#temp.2 = -dummyX + 1,
ID = as.factor(ID))
#ascore = ascore)
return(list(newY=Y,
newdata=designMatrix,
Xt = M))
}
#predict for the new dataset
test1 <- gene_newdata(nNewvisit)
#noRandom beta_t
betaIT1.i <- betaWithTemp1
betaIT1 <- betaIT1.i[,rep(1,nSubj*nNewvisit)]
#Random beta_t
betaIT2.i <- betaWithTemp2 %*% rep(1,nSubj) + betai_2
betaIT2 <- betaIT2.i[, rep(1:nSubj, each=nNewvisit)]
#calculate new MSE
test_est1 <- rowMeans(test1$Xt * t(betaIT1)) + as.matrix(cbind(1,as.matrix(test1$newdata$temp.1))) %*% as.matrix(fixeffWithTemp1[1:2])
test_est2 <- rowMeans(test1$Xt * t(betaIT2)) + cbind(1,as.matrix(test1$newdata$temp.1)) %*% as.matrix(fixeffWithTemp2[1:2])
test_est_MSE1 <- mean((test_est1 - test1$newY)^2)
test_est_MSE2 <- mean((test_est2 - test1$newY)^2)
return(list(realTau = r.sim,
ISE_beta.norandom = ISE_beta_1,
ISE_beta.random = ISE_beta_2,
mse_Y.NoRandom = mseNoRandom,
mse_Y.Random = mseRandom,
MISE_betai.Random = MISE_2,
beta.norandom = betaWithTemp1,
beta.random = betaWithTemp2,
betait.random = betai_2,
NoRandom_vcov = NoRandom_vcov,
Random_vcov = Random_vcov,
test_est_MSE.noran = test_est_MSE1,
test_est_MSE.ran = test_est_MSE2))
}
# Setup parallel
#cores <- detectCores()
cluster <- makeCluster(cores)
clusterExport(cluster, c("nSubj", "nRep", "seediter"))
for(r.sim in c(.02, .04, .08)){
for (smooth in c(1,0)){
clusterExport(cluster, c("r.sim", "smooth")) # casting the coefficient parameter on the random effects' covariance function
b.var <- r.sim
fileName <- paste("heter_pred_", smooth, "_",b.var,"_seed",
seediter,"_grp", nSubj, "-rep", nRep,
".RData", sep = "") # Saving file's name
# run the simulation
loopIndex <- 1
est.sim <- list()
node_results <- parLapply(cluster, 1:simRep, run_one_sample)
# result1.sim <- lapply(node_results, function(x) {list(realTau = x$realTau,
# pvalue = x$pvalue)})
#result2.sim <- lapply(node_results, function(x) {list(realTau = x$realTau,
# pvalues.bonf = x$pvalues.bonf,
# smooth = x$smooth,
# npc = x$npc)})
#resultDoubleList.sim[[loopIndex]] <- node_results
#save.image(file=fileName) # Auto Save
#table1.sim <- sapply(result1.sim, function(x) {
# c(sens = (sum(x$pvalue <= pvalue.true) > 0))})
#Power1 <- mean(table1.sim)
#cat("nRandCovariate: ", nRandCovariate, fill = TRUE)
#cat("Power1: ", Power1, fill = TRUE)
#power1.sim[[loopIndex]] <- list(Power = Power1, realTau = r.sim)
#ISE_beta.norandom,ISE_beta.random, mse_Y.NoRandom, mse_Y.Random, MISE_betai.Random
est <- sapply(node_results, function(x){
return(c(x$ISE_beta.norandom, x$ISE_beta.random, x$mse_Y.NoRandom, x$mse_Y.Random, x$MISE_betai.Random,
x$test_est_MSE.noran, x$test_est_MSE.ran))})
est.mean <- rowMeans(est)
beta.norandom <- sapply(node_results, function(x){
x$beta.norandom})
beta.random <- sapply(node_results, function(x){
x$beta.random})
betait.random <- lapply(node_results, function(x){
x$betait.random})
NoRandom_vcov <- lapply(node_results, function(x){
x$NoRandom_vcov})
Random_vcov <- lapply(node_results, function(x){
x$Random_vcov})
est.sim <- list(est.mean = list(test_est_MSE.noran = est.mean[6],
test_est_MSE.ran = est.mean[7],
ISE_beta.norandom = est.mean[1],
ISE_beta.random = est.mean[2],
mse_Y.NoRandom = est.mean[3],
mse_Y.Random = est.mean[4],
MISE_betai.Random = est.mean[5]),
realTau = c(r.sim,r.sim/2,r.sim/4),
smooth = smooth)
save(est.sim, beta.norandom,beta.random,betait.random, NoRandom_vcov,Random_vcov, file=fileName) # Auto Save
}
}
stopCluster(cluster)
|
/full simulation/summer/prediction/hetero/test/heter_20050.R
|
no_license
|
wma9/FMRI-project
|
R
| false | false | 12,376 |
r
|
library(parallel)
simRep <- 1000 # Replication times in one simulation
#pvalue.true <- .05 # Testing type I error
#b.var <- c(0.04) # The set of varaince of random covariates b as random slope
#smooth <- 1 # measurement error is added to M if smooth = 0; no measurement error is added if sooth = 1
cores <- 4
#r.sim <- b.var
nSubj <- 200
nRep <- 50
seediter <- 1
run_one_sample <- function(iter){
library(refund)
library(lme4)
library(nlme)
library(arm)
library(RLRsim)
library(MASS)
#set.seed(iter)
nNewvisit <- 10
set.seed(iter+(seediter-1)*5000)
D <- 80 # grid number total
#nSubj <- 20 # 200 # I the number of curves
#nRep <- 20 # 20 # datasets for each covariance function
totalN <- nSubj * nRep
thetaK.true <- 2
timeGrid <- (1:D)/D
npc.true <- 3
percent <- 0.95
SNR <- 3 # 5, signal noise ratio'
sd.epsilon <- 1 # or 0.5
delta.true <- 0.5
a.mean <- 0
gamma.true <- 2
gammaVar.true <- 1
# hot
gammaI.true.i <- mapply(rnorm, nSubj, gamma.true, rep(sqrt(gammaVar.true), 1))
gammaI.true <- gammaI.true.i[rep(1:nrow(gammaI.true.i), each = nRep), ]
# warm
gammaI2.true.i <- mapply(rnorm, nSubj, gamma.true, rep(sqrt(gammaVar.true), 1))
gammaI2.true <- gammaI2.true.i[rep(1:nrow(gammaI2.true.i), each = nRep), ]
dummyX <- rbinom(n = totalN, size = 1, prob = 0.5) # dummyX
#generate functional covariates
lambda.sim <- function(degree) {
return(0.5^(degree - 1))
}
psi.fourier <- function(t, degree) {
result <- NA
if(degree == 1){
result <- sqrt(2) * sinpi(2*t)
}else if(degree == 2){
result <- sqrt(2) * cospi(4*t)
}else if(degree == 3){
result <- sqrt(2) * sinpi(4*t)
}
return(result)
}
lambdaVec.true <- mapply(lambda.sim, 1: npc.true)
psi.true <- matrix(data = mapply(psi.fourier, rep(timeGrid, npc.true), rep(1:npc.true, each=D)),
nrow = npc.true,
ncol = D,
byrow = TRUE)
ascore.true <- mvrnorm(totalN, rep(a.mean, npc.true), diag(lambdaVec.true))
Mt.true <- ascore.true %*% psi.true
error <- rnorm(totalN, mean = 0, sd = sd.epsilon)
#thetaIK.true <- mvrnorm(nSubj, rep(thetaK.true, npc.true), diag(rep(r.sim, npc.true)))
thetaIK.true1 <- mvrnorm(nSubj, rep(thetaK.true, npc.true), diag(c(r.sim, r.sim/2, r.sim/4)))
thetaIK.true <- thetaIK.true1[rep(1:nrow(thetaIK.true1), each = nRep), ]
betaM.true <- thetaIK.true * ascore.true
betaM.true <- rowSums(betaM.true)
Y <- delta.true + dummyX * gammaI.true + (dummyX - 1) * gammaI2.true + betaM.true + error
##########################################################################
ID <- rep(1:nSubj, each = nRep)
if(smooth == 0){
Merror.Var <- sum(lambdaVec.true) / SNR #SNR = sum(lambdaVec.true)/Merror.Var
Mt.hat <- Mt.true + matrix(rnorm(totalN*D, mean=0, sd = sqrt(Merror.Var)), totalN, D)
}
if(smooth == 1){
Merror.Var <- 0 #SNR = sum(lambdaVec.true)/Merror.Var
Mt.hat <- Mt.true
}
M <- Mt.hat
# M <- M - matrix(rep(colMeans(M), each = totalN), totalN, D) # center:column-means are 0
t <- (1:D)/D
knots <- 5 #5 previous setting 10
p <- 5 # previous setting p <- 7, the number of degree for B-splines we use
results <- fpca.face(M, center = TRUE, argvals = t, knots = knots, pve = percent, p = p, lambda = 0) # pve need to be chosen!
npc <- results$npc
score <- results$scores
ascore1 <- score[, 1:npc]/sqrt(D)
efunctions1 <- results$efunctions*sqrt(D)
sign_correct <- diag(sign(colSums(efunctions1*t(psi.true))))
efunctions <- efunctions1 %*% sign_correct
ascore <- ascore1 %*% sign_correct
##############################################################
dummyX <- cbind(dummyX, -dummyX + 1)
##############################################################
designMatrix <- data.frame(rating = Y,
temp.1 = dummyX[, 1],
temp.2 = dummyX[, 2],
ID = as.factor(ID),
ascore = ascore)
noRandom.simpart <- paste(1:npc, collapse = " + ascore.")
noRandom.sim1 <- as.formula(paste("rating ~ 1 + temp.1 + temp.2 + ascore.",
noRandom.simpart,
" + (0 + temp.1 | ID) + (0 + temp.2 | ID) ",
sep = ""))
NoRandomReml <- lmer(noRandom.sim1, data = designMatrix)
# npc=3
additive.heter <- paste0(" + (0 + ascore.", 1:npc, " | ID)", collapse = "")
Random.sim1 <- as.formula(paste("rating ~ 1 + temp.1 + temp.2 + ascore.",
noRandom.simpart,
" + (0 + temp.1 | ID) + (0 + temp.2 | ID)",
additive.heter,
sep = ""))
RandomReml <- lmer(Random.sim1, data = designMatrix)
###true beta_t
beta_t <- t(psi.true)%*%as.vector(rep(thetaK.true, npc.true))
beta_it <- apply(t(psi.true) %*% t(thetaIK.true1),2,function(x) x - beta_t)
### NoRandomReml
fixeffWithTemp1 <- fixef(NoRandomReml)
betaWithTemp1 <- efunctions %*% as.vector(fixeffWithTemp1[3:(npc+2)])
ISE_beta_1 <- sum((betaWithTemp1-beta_t)^2/D) #ISE for beta(t)
e1 <- Y - predict(NoRandomReml)
mseNoRandom <- mean(e1^2) #mse for response
NoRandom_vcov <- summary(NoRandomReml)$vcov
### RandomReml
fixeffWithTemp2 <- fixef(RandomReml)
betaWithTemp2 <- efunctions %*% as.vector(fixeffWithTemp2[3:(npc+2)])
ISE_beta_2 <- sum((betaWithTemp2-beta_t)^2/D) #ISE for beta(t)
e2 <- Y - predict(RandomReml)
mseRandom <- mean(e2^2) #mse for response
betai_2 <- efunctions %*% t(as.matrix(ranef(RandomReml)$ID[,3:(npc+2)])) # beta_i(t)
MISE_2 <- mean(colMeans((betai_2-beta_it)^2)) #MISE for beta_i(t)
Random_vcov <- summary(RandomReml)$vcov
gene_newdata <- function(newvisitN){
#generate new dummyX
dummyX <- rbinom(n = nSubj*newvisitN, size = 1, prob = 0.5) # new dummyX
#generate new x_ij(t) for new visit
ascore.true <- mvrnorm(nSubj*newvisitN, rep(a.mean, npc.true), diag(lambdaVec.true))
#sum new x_ij(t)(beta_t + betai_t)
Mt.true <- ascore.true %*% psi.true
#generate new error
error <- rnorm(nSubj*newvisitN, mean = 0, sd = sd.epsilon)
thetaIK.true <- thetaIK.true1[rep(1:nrow(thetaIK.true1), each = newvisitN), ]
betaM.true <- thetaIK.true * ascore.true
betaM.true <- rowSums(betaM.true)
# hot
gammaI.true <- gammaI.true.i[rep(1:nrow(gammaI.true.i), each = newvisitN), ]
# warm
gammaI2.true <- gammaI2.true.i[rep(1:nrow(gammaI2.true.i), each = newvisitN), ]
# generate new Y
Y <- delta.true + dummyX * gammaI.true + (dummyX - 1) * gammaI2.true + betaM.true + error
totalN <- nSubj*newvisitN
ID <- rep(1:nSubj, each = newvisitN)
if(smooth == 0){
Merror.Var <- sum(lambdaVec.true) / SNR #SNR = sum(lambdaVec.true)/Merror.Var
Mt.hat <- Mt.true + matrix(rnorm(totalN*D, mean=0, sd = sqrt(Merror.Var)), totalN, D)
}
if(smooth == 1){
Merror.Var <- 0 #SNR = sum(lambdaVec.true)/Merror.Var
Mt.hat <- Mt.true
}
M <- Mt.hat
#t <- (1:D)/D
#knots <- 5 #5 previous setting 10
#p <- 5 # previous setting p <- 7, the number of degree for B-splines we use
#results <- fpca.face(M, center = TRUE, argvals = t, knots = knots, pve = percent, p = p, lambda = 0) # pve need to be chosen!
#npc <- results$npc
#score <- results$scores
#ascore1 <- score[, 1:npc]/sqrt(D)
#efunctions1 <- results$efunctions*sqrt(D)
#sign_correct <- diag(sign(colSums(efunctions1*t(psi.true))))
#efunctions <- efunctions1 %*% sign_correct
#ascore <- ascore1 %*% sign_correct
designMatrix <- data.frame(rating = Y,
temp.1 = dummyX,
#temp.2 = -dummyX + 1,
ID = as.factor(ID))
#ascore = ascore)
return(list(newY=Y,
newdata=designMatrix,
Xt = M))
}
#predict for the new dataset
test1 <- gene_newdata(nNewvisit)
#noRandom beta_t
betaIT1.i <- betaWithTemp1
betaIT1 <- betaIT1.i[,rep(1,nSubj*nNewvisit)]
#Random beta_t
betaIT2.i <- betaWithTemp2 %*% rep(1,nSubj) + betai_2
betaIT2 <- betaIT2.i[, rep(1:nSubj, each=nNewvisit)]
#calculate new MSE
test_est1 <- rowMeans(test1$Xt * t(betaIT1)) + as.matrix(cbind(1,as.matrix(test1$newdata$temp.1))) %*% as.matrix(fixeffWithTemp1[1:2])
test_est2 <- rowMeans(test1$Xt * t(betaIT2)) + cbind(1,as.matrix(test1$newdata$temp.1)) %*% as.matrix(fixeffWithTemp2[1:2])
test_est_MSE1 <- mean((test_est1 - test1$newY)^2)
test_est_MSE2 <- mean((test_est2 - test1$newY)^2)
return(list(realTau = r.sim,
ISE_beta.norandom = ISE_beta_1,
ISE_beta.random = ISE_beta_2,
mse_Y.NoRandom = mseNoRandom,
mse_Y.Random = mseRandom,
MISE_betai.Random = MISE_2,
beta.norandom = betaWithTemp1,
beta.random = betaWithTemp2,
betait.random = betai_2,
NoRandom_vcov = NoRandom_vcov,
Random_vcov = Random_vcov,
test_est_MSE.noran = test_est_MSE1,
test_est_MSE.ran = test_est_MSE2))
}
# Setup parallel
#cores <- detectCores()
cluster <- makeCluster(cores)
clusterExport(cluster, c("nSubj", "nRep", "seediter"))
for(r.sim in c(.02, .04, .08)){
for (smooth in c(1,0)){
clusterExport(cluster, c("r.sim", "smooth")) # casting the coefficient parameter on the random effects' covariance function
b.var <- r.sim
fileName <- paste("heter_pred_", smooth, "_",b.var,"_seed",
seediter,"_grp", nSubj, "-rep", nRep,
".RData", sep = "") # Saving file's name
# run the simulation
loopIndex <- 1
est.sim <- list()
node_results <- parLapply(cluster, 1:simRep, run_one_sample)
# result1.sim <- lapply(node_results, function(x) {list(realTau = x$realTau,
# pvalue = x$pvalue)})
#result2.sim <- lapply(node_results, function(x) {list(realTau = x$realTau,
# pvalues.bonf = x$pvalues.bonf,
# smooth = x$smooth,
# npc = x$npc)})
#resultDoubleList.sim[[loopIndex]] <- node_results
#save.image(file=fileName) # Auto Save
#table1.sim <- sapply(result1.sim, function(x) {
# c(sens = (sum(x$pvalue <= pvalue.true) > 0))})
#Power1 <- mean(table1.sim)
#cat("nRandCovariate: ", nRandCovariate, fill = TRUE)
#cat("Power1: ", Power1, fill = TRUE)
#power1.sim[[loopIndex]] <- list(Power = Power1, realTau = r.sim)
#ISE_beta.norandom,ISE_beta.random, mse_Y.NoRandom, mse_Y.Random, MISE_betai.Random
est <- sapply(node_results, function(x){
return(c(x$ISE_beta.norandom, x$ISE_beta.random, x$mse_Y.NoRandom, x$mse_Y.Random, x$MISE_betai.Random,
x$test_est_MSE.noran, x$test_est_MSE.ran))})
est.mean <- rowMeans(est)
beta.norandom <- sapply(node_results, function(x){
x$beta.norandom})
beta.random <- sapply(node_results, function(x){
x$beta.random})
betait.random <- lapply(node_results, function(x){
x$betait.random})
NoRandom_vcov <- lapply(node_results, function(x){
x$NoRandom_vcov})
Random_vcov <- lapply(node_results, function(x){
x$Random_vcov})
est.sim <- list(est.mean = list(test_est_MSE.noran = est.mean[6],
test_est_MSE.ran = est.mean[7],
ISE_beta.norandom = est.mean[1],
ISE_beta.random = est.mean[2],
mse_Y.NoRandom = est.mean[3],
mse_Y.Random = est.mean[4],
MISE_betai.Random = est.mean[5]),
realTau = c(r.sim,r.sim/2,r.sim/4),
smooth = smooth)
save(est.sim, beta.norandom,beta.random,betait.random, NoRandom_vcov,Random_vcov, file=fileName) # Auto Save
}
}
stopCluster(cluster)
|
## Programming Assignment 2: Caching inverse of a matrix
## Functions provide ability to solve the inverse of a matrix and cache the result
## which avoids processing repeatedly.
## Creates a list of functions:
## set : To set the matrix provided
## get : to get the matrix provided
## setcache: to save the cache of the inverse of the matrix provided
## getcache: to get the cache of the inverse of the matrix
makeCacheMatrix <- function(x = matrix()){
invX <- NULL
set <- function(y = matrix()){
x <<- y
InvX <<- NULL
}
get <- function() x
setcache <- function(z = matrix()){
invX <<- z
}
getcache <- function() invX
l_lst <- list(set = set,
get = get,
getcache = getcache,
setcache = setcache)
l_lst
}
## Takes an object of makeCacheMatrix as argument
## calculates and saves inverse of matrix in cache
## If inverse of matrix already exists then returns cached value
cacheSolve <- function(x){
l_inv <- x$getcache()
if(!is.null(l_inv)){
message("Getting cached Inverse Matrix")
return(l_inv)
}
l_mtx <- x$get()
l_inv <- solve(l_mtx)
x$setcache(l_inv)
l_inv
}
|
/cachematrix.R
|
no_license
|
sanjeev-saini/ProgrammingAssignment2
|
R
| false | false | 1,205 |
r
|
## Programming Assignment 2: Caching inverse of a matrix
## Functions provide ability to solve the inverse of a matrix and cache the result
## which avoids processing repeatedly.
## Creates a list of functions:
## set : To set the matrix provided
## get : to get the matrix provided
## setcache: to save the cache of the inverse of the matrix provided
## getcache: to get the cache of the inverse of the matrix
makeCacheMatrix <- function(x = matrix()){
invX <- NULL
set <- function(y = matrix()){
x <<- y
InvX <<- NULL
}
get <- function() x
setcache <- function(z = matrix()){
invX <<- z
}
getcache <- function() invX
l_lst <- list(set = set,
get = get,
getcache = getcache,
setcache = setcache)
l_lst
}
## Takes an object of makeCacheMatrix as argument
## calculates and saves inverse of matrix in cache
## If inverse of matrix already exists then returns cached value
cacheSolve <- function(x){
l_inv <- x$getcache()
if(!is.null(l_inv)){
message("Getting cached Inverse Matrix")
return(l_inv)
}
l_mtx <- x$get()
l_inv <- solve(l_mtx)
x$setcache(l_inv)
l_inv
}
|
# inspired by janitor::tabyl(),
# to skip the dependency
tabyl <- function(x) {
df <- tibble::tibble(x = x) %>%
dplyr::count(x) %>%
dplyr::mutate(percent = n / sum(n))
colnames(df) <- c(".", "n", "percent")
df
}
|
/R/utils-tabyl.R
|
permissive
|
EMODnet/EMODnetWFS
|
R
| false | false | 227 |
r
|
# inspired by janitor::tabyl(),
# to skip the dependency
tabyl <- function(x) {
df <- tibble::tibble(x = x) %>%
dplyr::count(x) %>%
dplyr::mutate(percent = n / sum(n))
colnames(df) <- c(".", "n", "percent")
df
}
|
#############################################################################################
## Title: RBSA Analysis
## Author: Casey Stevens, Cadmus Group
## Created: 02/27/2017
## Updated: 09/11/2017
## Billing Code(s): 6596.0000.0002.1002.0000
#############################################################################################
## Clear variables
# rm(list = ls())
rundate <- format(Sys.time(), "%d%b%y")
options(scipen = 999)
## Create "Not In" operator
"%notin%" <- Negate("%in%")
# Source codes
source("Code/Table Code/SourceCode.R")
source("Code/Table Code/Weighting Implementation Functions.R")
source("Code/Sample Weighting/Weights.R")
source("Code/Table Code/Export Function.R")
# Read in clean RBSA data
rbsa.dat <- read.xlsx(xlsxFile = file.path(filepathCleanData, paste("clean.rbsa.data", rundate, ".xlsx", sep = "")))
length(unique(rbsa.dat$CK_Cadmus_ID))
# one.line.dat <- read.xlsx(xlsxFile = file.path(filepathRawData, one.line.export), sheet = "Site One Line Summary", startRow = 2)
one.line.dat$CK_Cadmus_ID <- trimws(toupper(one.line.dat$Cadmus.ID))
# #############################################################################################
# #Item 36: AVERAGE NORMALIZED HEAT-LOSS RATE BY VINTAGE AND STATE (SF table 43, MH table 24)
# #############################################################################################
# item36.dat <- one.line.dat[which(colnames(one.line.dat) %in% c("CK_Cadmus_ID"
# ,"Whole.House.UA"))]
# item36.dat1 <- left_join(rbsa.dat, item36.dat)
# item36.dat1 <- item36.dat1[grep("SITE",item36.dat1$CK_Building_ID),]
# item36.dat1$Whole.House.UA <- as.numeric(as.character(item36.dat1$Whole.House.UA))
# item36.dat2 <- item36.dat1[which(!is.na(item36.dat1$Whole.House.UA)),]
# item36.dat3 <- item36.dat2[grep("site",item36.dat2$CK_Building_ID, ignore.case = T),]
# which(duplicated(item36.dat3$CK_Cadmus_ID))
# item36.dat4 <- item36.dat3[which(item36.dat3$Conditioned.Area > 0),]
#
#
# item36.dat4$Normalized.Heat.Loss.Rate <- item36.dat4$Whole.House.UA / item36.dat4$Conditioned.Area
#
# item36.dat5 <- item36.dat4[which(!is.na(item36.dat4$HomeYearBuilt_bins3)),]
#
# ################################################
# # Adding pop and sample sizes for weights
# ################################################
# item36.data <- weightedData(item36.dat5[-which(colnames(item36.dat5) %in% c("Whole.House.UA"
# ,"Normalized.Heat.Loss.Rate"))])
# item36.data <- left_join(item36.data, item36.dat5[which(colnames(item36.dat5) %in% c("CK_Cadmus_ID"
# ,"Whole.House.UA"
# ,"Normalized.Heat.Loss.Rate"))])
# item36.data$count <- 1
# which(duplicated(item36.data$CK_Cadmus_ID))
#
# #######################
# # Weighted Analysis
# #######################
# item36.cast <- mean_two_groups(CustomerLevelData = item36.data
# ,valueVariable = "Normalized.Heat.Loss.Rate"
# ,byVariableRow = "HomeYearBuilt_bins3"
# ,byVariableColumn = "State"
# ,columnAggregate = "Region"
# ,rowAggregate = "All Vintages")
#
# item36.table <- data.frame("BuildingType" = item36.cast$BuildingType
# ,"Housing.Vintage" = item36.cast$HomeYearBuilt_bins3
# ,"ID" = item36.cast$Mean_ID
# ,"ID.SE" = item36.cast$SE_ID
# ,"ID.n" = item36.cast$n_ID
# ,"MT" = item36.cast$Mean_MT
# ,"MT.SE" = item36.cast$SE_MT
# ,"MT.n" = item36.cast$n_MT
# ,"OR" = item36.cast$Mean_OR
# ,"OR.SE" = item36.cast$SE_OR
# ,"OR.n" = item36.cast$n_OR
# ,"WA" = item36.cast$Mean_WA
# ,"WA.SE" = item36.cast$SE_WA
# ,"WA.n" = item36.cast$n_WA
# ,"Region" = item36.cast$Mean_Region
# ,"Region.SE" = item36.cast$SE_Region
# ,"Region.n" = item36.cast$n_Region
# ,"ID.EB" = item36.cast$EB_ID
# ,"MT.EB" = item36.cast$EB_MT
# ,"OR.EB" = item36.cast$EB_OR
# ,"WA.EB" = item36.cast$EB_WA
# ,"Region.EB" = item36.cast$EB_Region)
#
# levels(item36.table$Housing.Vintage)
# rowOrder <- c("Pre 1981"
# ,"1981-1990"
# ,"1991-2000"
# ,"2001-2010"
# ,"Post 2010"
# ,"All Vintages")
# item36.table <- item36.table %>% mutate(Housing.Vintage = factor(Housing.Vintage, levels = rowOrder)) %>% arrange(Housing.Vintage)
# item36.table <- data.frame(item36.table)
#
#
# item36.table.SF <- item36.table[which(item36.table$BuildingType == "Single Family"),
# -which(colnames(item36.table) == "BuildingType")]
# item36.table.MH <- item36.table[which(item36.table$BuildingType == "Manufactured"),
# -which(colnames(item36.table) == "BuildingType")]
#
# exportTable(item36.table.SF, "SF","Table 43",weighted = TRUE)
# # exportTable(item36.table.MH, "MH","Table 24",weighted = TRUE)
#
# #######################
# # Unweighted Analysis
# #######################
# item36.cast <- mean_two_groups_unweighted(CustomerLevelData = item36.data
# ,valueVariable = "Normalized.Heat.Loss.Rate"
# ,byVariableRow = "HomeYearBuilt_bins3"
# ,byVariableColumn = "State"
# ,columnAggregate = "Region"
# ,rowAggregate = "All Vintages")
#
# item36.table <- data.frame("BuildingType" = item36.cast$BuildingType
# ,"Housing.Vintage" = item36.cast$HomeYearBuilt_bins3
# ,"ID" = item36.cast$Mean_ID
# ,"ID.SE" = item36.cast$SE_ID
# ,"ID.n" = item36.cast$n_ID
# ,"MT" = item36.cast$Mean_MT
# ,"MT.SE" = item36.cast$SE_MT
# ,"MT.n" = item36.cast$n_MT
# ,"OR" = item36.cast$Mean_OR
# ,"OR.SE" = item36.cast$SE_OR
# ,"OR.n" = item36.cast$n_OR
# ,"WA" = item36.cast$Mean_WA
# ,"WA.SE" = item36.cast$SE_WA
# ,"WA.n" = item36.cast$n_WA
# ,"Region" = item36.cast$Mean_Region
# ,"Region.SE" = item36.cast$SE_Region
# ,"Region.n" = item36.cast$n_Region)
#
# levels(item36.table$Housing.Vintage)
# rowOrder <- c("Pre 1981"
# ,"1981-1990"
# ,"1991-2000"
# ,"2001-2010"
# ,"Post 2010"
# ,"All Vintages")
# item36.table <- item36.table %>% mutate(Housing.Vintage = factor(Housing.Vintage, levels = rowOrder)) %>% arrange(Housing.Vintage)
# item36.table <- data.frame(item36.table)
#
#
# item36.table.SF <- item36.table[which(item36.table$BuildingType == "Single Family"),
# -which(colnames(item36.table) == "BuildingType")]
# item36.table.MH <- item36.table[which(item36.table$BuildingType == "Manufactured"),
# -which(colnames(item36.table) == "BuildingType")]
#
# exportTable(item36.table.SF, "SF","Table 43",weighted = FALSE)
# # exportTable(item36.table.MH, "MH","Table 24",weighted = FALSE)
#
#
#
#
#
#
# #############################################################################################
# #Item 37: AVERAGE HEAT-LOSS RATE BY VINTAGE AND STATE (SF table 44, MH table 26)
# #############################################################################################
# item37.dat <- one.line.dat[which(colnames(one.line.dat) %in% c("CK_Cadmus_ID"
# ,"Whole.House.UA"))]
# item37.dat1 <- left_join(rbsa.dat, item37.dat)
# item37.dat1$Whole.House.UA <- as.numeric(as.character(item37.dat1$Whole.House.UA))
# item37.dat2 <- item37.dat1[which(item37.dat1$Whole.House.UA %notin% c("N/A",NA)),]
# item37.dat3 <- item37.dat2[grep("site",item37.dat2$CK_Building_ID, ignore.case = T),]
# which(duplicated(item37.dat3$CK_Cadmus_ID))
#
# item37.dat4 <- item37.dat3[which(item37.dat3$HomeYearBuilt_bins3 %notin% c("N/A",NA)),]
#
# ################################################
# # Adding pop and sample sizes for weights
# ################################################
# item37.data <- weightedData(item37.dat4[-which(colnames(item37.dat4) %in% c("Whole.House.UA"))])
# item37.data <- left_join(item37.data, item37.dat4[which(colnames(item37.dat4) %in% c("CK_Cadmus_ID"
# ,"Whole.House.UA"))])
# item37.data$count <- 1
# #######################
# # Weighted Analysis
# #######################
# item37.cast <- mean_two_groups(CustomerLevelData = item37.data
# ,valueVariable = "Whole.House.UA"
# ,byVariableRow = "HomeYearBuilt_bins3"
# ,byVariableColumn = "State"
# ,columnAggregate = "Region"
# ,rowAggregate = "All Vintages")
#
# item37.table <- data.frame("BuildingType" = item37.cast$BuildingType
# ,"Housing.Vintage" = item37.cast$HomeYearBuilt_bins3
# ,"ID" = item37.cast$Mean_ID
# ,"ID.SE" = item37.cast$SE_ID
# ,"ID.n" = item37.cast$n_ID
# ,"MT" = item37.cast$Mean_MT
# ,"MT.SE" = item37.cast$SE_MT
# ,"MT.n" = item37.cast$n_MT
# ,"OR" = item37.cast$Mean_OR
# ,"OR.SE" = item37.cast$SE_OR
# ,"OR.n" = item37.cast$n_OR
# ,"WA" = item37.cast$Mean_WA
# ,"WA.SE" = item37.cast$SE_WA
# ,"WA.n" = item37.cast$n_WA
# ,"Region" = item37.cast$Mean_Region
# ,"Region.SE" = item37.cast$SE_Region
# ,"Region.n" = item37.cast$n_Region
# ,"ID.EB" = item37.cast$EB_ID
# ,"MT.EB" = item37.cast$EB_MT
# ,"OR.EB" = item37.cast$EB_OR
# ,"WA.EB" = item37.cast$EB_WA
# ,"Region.EB" = item37.cast$EB_Region)
#
# levels(item37.table$Housing.Vintage)
# rowOrder <- c("Pre 1981"
# ,"1981-1990"
# ,"1991-2000"
# ,"2001-2010"
# ,"Post 2010"
# ,"All Vintages")
# item37.table <- item37.table %>% mutate(Housing.Vintage = factor(Housing.Vintage, levels = rowOrder)) %>% arrange(Housing.Vintage)
# item37.table <- data.frame(item37.table)
#
#
# item37.table.SF <- item37.table[which(item37.table$BuildingType == "Single Family"),
# -which(colnames(item37.table) == "BuildingType")]
# item37.table.MH <- item37.table[which(item37.table$BuildingType == "Manufactured"),
# -which(colnames(item37.table) == "BuildingType")]
#
# exportTable(item37.table.SF, "SF","Table 44",weighted = TRUE)
# # exportTable(item37.table.MH, "MH","Table 26",weighted = TRUE)
#
# #######################
# # Unweighted Analysis
# #######################
# item37.cast <- mean_two_groups_unweighted(CustomerLevelData = item37.data
# ,valueVariable = "Whole.House.UA"
# ,byVariableRow = "HomeYearBuilt_bins3"
# ,byVariableColumn = "State"
# ,columnAggregate = "Region"
# ,rowAggregate = "All Vintages")
#
# item37.table <- data.frame("BuildingType" = item37.cast$BuildingType
# ,"Housing.Vintage" = item37.cast$HomeYearBuilt_bins3
# ,"ID" = item37.cast$Mean_ID
# ,"ID.SE" = item37.cast$SE_ID
# ,"ID.n" = item37.cast$n_ID
# ,"MT" = item37.cast$Mean_MT
# ,"MT.SE" = item37.cast$SE_MT
# ,"MT.n" = item37.cast$n_MT
# ,"OR" = item37.cast$Mean_OR
# ,"OR.SE" = item37.cast$SE_OR
# ,"OR.n" = item37.cast$n_OR
# ,"WA" = item37.cast$Mean_WA
# ,"WA.SE" = item37.cast$SE_WA
# ,"WA.n" = item37.cast$n_WA
# ,"Region" = item37.cast$Mean_Region
# ,"Region.SE" = item37.cast$SE_Region
# ,"Region.n" = item37.cast$n_Region)
#
# levels(item37.table$Housing.Vintage)
# rowOrder <- c("Pre 1981"
# ,"1981-1990"
# ,"1991-2000"
# ,"2001-2010"
# ,"Post 2010"
# ,"All Vintages")
# item37.table <- item37.table %>% mutate(Housing.Vintage = factor(Housing.Vintage, levels = rowOrder)) %>% arrange(Housing.Vintage)
# item37.table <- data.frame(item37.table)
#
#
# item37.table.SF <- item37.table[which(item37.table$BuildingType == "Single Family"),
# -which(colnames(item37.table) == "BuildingType")]
# item37.table.MH <- item37.table[which(item37.table$BuildingType == "Manufactured"),
# -which(colnames(item37.table) == "BuildingType")]
#
# exportTable(item37.table.SF, "SF","Table 44",weighted = FALSE)
# # exportTable(item37.table.MH, "MH","Table 26",weighted = FALSE)
#
#
#
#
#
#
#
#
#
#
# #############################################################################################
# #Item 182: AVERAGE HEAT-LOSS RATE BY AGE/STANDARD AND STATE (MH table 25)
# #############################################################################################
# #Read in data for analysis
# sites.dat <- read.xlsx(xlsxFile = file.path(filepathRawData, sites.export))
# #clean cadmus IDs
# sites.dat$CK_Cadmus_ID <- trimws(toupper(sites.dat$CK_Cadmus_ID))
#
# item182.sites <- unique(sites.dat[which(colnames(sites.dat) %in% c("CK_Cadmus_ID"
# ,"SITE_Construction_CONSTRUCTION_STANDARD_AgeAndConstructionStandard"))])
# names(item182.sites) <- c("CK_Cadmus_ID", "Age.and.Construction.Standard")
# item182.dat <- one.line.dat[which(colnames(one.line.dat) %in% c("CK_Cadmus_ID"
# ,"Whole.House.UA"))]
# item182.dat0 <- left_join(item182.sites, item182.dat)
# item182.dat1 <- left_join(rbsa.dat, item182.dat0)
# item182.dat1$Whole.House.UA <- as.numeric(as.character(item182.dat1$Whole.House.UA))
# item182.dat2 <- item182.dat1[which(item182.dat1$Whole.House.UA %notin% c("N/A",NA)),]
# item182.dat3 <- item182.dat2[grep("site",item182.dat2$CK_Building_ID, ignore.case = T),]
# which(duplicated(item182.dat3$CK_Cadmus_ID))
#
# item182.dat4 <- item182.dat3[which(item182.dat3$Age.and.Construction.Standard %notin% c("Unknown","unknown","N/A","1977")),]
# unique(item182.dat4$Age.and.Construction.Standard)
# item182.dat4$Normalized.Heat.Loss.Rate <- item182.dat4$Whole.House.UA / item182.dat4$Conditioned.Area
# item182.dat5 <- item182.dat4[which(!is.na(item182.dat4$Normalized.Heat.Loss.Rate)),]
#
#
# unique(item182.dat5$Age.and.Construction.Standard)
#
# ################################################
# # Adding pop and sample sizes for weights
# ################################################
# item182.data <- weightedData(item182.dat5[-which(colnames(item182.dat5) %in% c("Whole.House.UA"
# ,"Age.and.Construction.Standard"
# ,"Normalized.Heat.Loss.Rate"))])
# item182.data <- left_join(item182.data, item182.dat5[which(colnames(item182.dat5) %in% c("CK_Cadmus_ID"
# ,"Whole.House.UA"
# ,"Age.and.Construction.Standard"
# ,"Normalized.Heat.Loss.Rate"))])
# item182.data$count <- 1
# #######################
# # Weighted Analysis
# #######################
# item182.summary <- mean_two_groups(CustomerLevelData = item182.data
# ,valueVariable = "Normalized.Heat.Loss.Rate"
# ,byVariableRow = "Age.and.Construction.Standard"
# ,byVariableColumn = "State"
# ,columnAggregate = "Region"
# ,rowAggregate = "All Age/Standards")
# # item182.all.ages.standards <- mean_one_group(CustomerLevelData = item182.data
# # ,valueVariable = "Normalized.Heat.Loss.Rate"
# # ,byVariable = "State"
# # ,aggregateRow = "Region")
# # item182.all.ages.standards <- dcast(setDT(item182.all.ages.standards)
# # ,formula = BuildingType ~ State
# # ,value.var = c("Mean","SE","n","EB"))
# # item182.all.ages.standards$Age.and.Construction.Standard <- "All Age/Standards"
#
# item182.cast <- item182.summary#rbind.data.frame(item182.summary, item182.all.ages.standards, stringsAsFactors = F)
#
# item182.table <- data.frame("BuildingType" = item182.cast$BuildingType
# ,"Age.Construction.Standard" = item182.cast$Age.and.Construction.Standard
# ,"ID" = item182.cast$Mean_ID
# ,"ID.SE" = item182.cast$SE_ID
# ,"ID.n" = item182.cast$n_ID
# ,"MT" = item182.cast$Mean_MT
# ,"MT.SE" = item182.cast$SE_MT
# ,"MT.n" = item182.cast$n_MT
# ,"OR" = item182.cast$Mean_OR
# ,"OR.SE" = item182.cast$SE_OR
# ,"OR.n" = item182.cast$n_OR
# ,"WA" = item182.cast$Mean_WA
# ,"WA.SE" = item182.cast$SE_WA
# ,"WA.n" = item182.cast$n_WA
# ,"Region" = item182.cast$Mean_Region
# ,"Region.SE" = item182.cast$SE_Region
# ,"Region.n" = item182.cast$n_Region
# ,"ID.EB" = item182.cast$EB_ID
# ,"MT.EB" = item182.cast$EB_MT
# ,"OR.EB" = item182.cast$EB_OR
# ,"WA.EB" = item182.cast$EB_WA
# ,"Region.EB" = item182.cast$EB_Region)
#
# item182.table.MH <- item182.table[which(item182.table$BuildingType == "Manufactured"),
# -which(colnames(item182.table) == "BuildingType")]
#
# exportTable(item182.table.MH, "MH","Table 25",weighted = TRUE)
#
# #######################
# # Unweighted Analysis
# #######################
# item182.summary <- mean_two_groups_unweighted(CustomerLevelData = item182.data
# ,valueVariable = "Normalized.Heat.Loss.Rate"
# ,byVariableRow = "Age.and.Construction.Standard"
# ,byVariableColumn = "State"
# ,columnAggregate = "Region"
# ,rowAggregate = "All Age/Standards")
#
# item182.cast <- item182.summary
#
# item182.table <- data.frame("BuildingType" = item182.cast$BuildingType
# ,"Age.Construction.Standard" = item182.cast$Age.and.Construction.Standard
# ,"ID" = item182.cast$Mean_ID
# ,"ID.SE" = item182.cast$SE_ID
# ,"ID.n" = item182.cast$n_ID
# ,"MT" = item182.cast$Mean_MT
# ,"MT.SE" = item182.cast$SE_MT
# ,"MT.n" = item182.cast$n_MT
# ,"OR" = item182.cast$Mean_OR
# ,"OR.SE" = item182.cast$SE_OR
# ,"OR.n" = item182.cast$n_OR
# ,"WA" = item182.cast$Mean_WA
# ,"WA.SE" = item182.cast$SE_WA
# ,"WA.n" = item182.cast$n_WA
# ,"Region" = item182.cast$Mean_Region
# ,"Region.SE" = item182.cast$SE_Region
# ,"Region.n" = item182.cast$n_Region)
#
# item182.table.MH <- item182.table[which(item182.table$BuildingType == "Manufactured"),
# -which(colnames(item182.table) == "BuildingType")]
#
# exportTable(item182.table.MH, "MH","Table 25",weighted = FALSE)
############################################################################################################
#
#
# OVERSAMPLE ANALYSIS
#
#
############################################################################################################
# Read in clean scl data
os.dat <- read.xlsx(xlsxFile = file.path(filepathCleanData, paste("clean.",os.ind,".data", rundate, ".xlsx", sep = "")))
length(unique(os.dat$CK_Cadmus_ID))
os.dat$CK_Building_ID <- os.dat$Category
os.dat <- os.dat[which(names(os.dat) != "Category")]
#############################################################################################
#Item 36: AVERAGE NORMALIZED HEAT-LOSS RATE BY VINTAGE AND CK_Building_ID (SF table 43, MH table 24)
#############################################################################################
item36.os.dat <- one.line.dat[which(colnames(one.line.dat) %in% c("CK_Cadmus_ID"
,"Whole.House.UA"))]
item36.os.dat1 <- left_join(os.dat, item36.os.dat)
item36.os.dat1$Whole.House.UA <- as.numeric(as.character(item36.os.dat1$Whole.House.UA))
item36.os.dat2 <- item36.os.dat1[which(!is.na(item36.os.dat1$Whole.House.UA)),]
item36.os.dat4 <- item36.os.dat2[which(item36.os.dat2$Conditioned.Area > 0),]
item36.os.dat4$Normalized.Heat.Loss.Rate <- item36.os.dat4$Whole.House.UA / item36.os.dat4$Conditioned.Area
item36.os.dat5 <- item36.os.dat4[which(!is.na(item36.os.dat4$HomeYearBuilt_bins3)),]
################################################
# Adding pop and sample sizes for weights
################################################
item36.os.data <- weightedData(item36.os.dat5[-which(colnames(item36.os.dat5) %in% c("Whole.House.UA"
,"Normalized.Heat.Loss.Rate"))])
item36.os.data <- left_join(item36.os.data, item36.os.dat5[which(colnames(item36.os.dat5) %in% c("CK_Cadmus_ID"
,"Whole.House.UA"
,"Normalized.Heat.Loss.Rate"))])
item36.os.data$count <- 1
#######################
# Weighted Analysis
#######################
item36.os.cast <- mean_two_groups(CustomerLevelData = item36.os.data
,valueVariable = "Normalized.Heat.Loss.Rate"
,byVariableRow = "HomeYearBuilt_bins3"
,byVariableColumn = "CK_Building_ID"
,columnAggregate = "Remove"
,rowAggregate = "All Vintages")
names(item36.os.cast)
if(os.ind == "scl"){
item36.os.table <- data.frame("Housing.Vintage" = item36.os.cast$HomeYearBuilt_bins3
,"Mean_SCL.GenPop" = item36.os.cast$`Mean_SCL GenPop`
,"SE_SCL.GenPop" = item36.os.cast$`SE_SCL GenPop`
,"n_SCL.GenPop" = item36.os.cast$`n_SCL GenPop`
,"Mean_SCL.LI" = item36.os.cast$`Mean_SCL LI`
,"SE_SCL.LI" = item36.os.cast$`SE_SCL LI`
,"n_SCL.LI" = item36.os.cast$`n_SCL LI`
,"Mean_SCL.EH" = item36.os.cast$`Mean_SCL EH`
,"SE_SCL.EH" = item36.os.cast$`SE_SCL EH`
,"n_SCL.EH" = item36.os.cast$`n_SCL EH`
,"Mean_2017.RBSA.PS" = item36.os.cast$`Mean_2017 RBSA PS`
,"SE_2017.RBSA.PS" = item36.os.cast$`SE_2017 RBSA PS`
,"n_2017.RBSA.PS" = item36.os.cast$`n_2017 RBSA PS`
,"EB_SCL.GenPop" = item36.os.cast$`EB_SCL GenPop`
,"EB_SCL.LI" = item36.os.cast$`EB_SCL LI`
,"EB_SCL.EH" = item36.os.cast$`EB_SCL EH`
,"EB_2017.RBSA.PS" = item36.os.cast$`EB_2017 RBSA PS`)
}else if(os.ind == "snopud"){
item36.os.table <- data.frame("Housing.Vintage" = item36.os.cast$HomeYearBuilt_bins3
,"Mean_SnoPUD" = item36.os.cast$`Mean_SnoPUD`
,"SE_SnoPUD" = item36.os.cast$`SE_SnoPUD`
,"n_SnoPUD" = item36.os.cast$`n_SnoPUD`
,"Mean_2017.RBSA.PS" = item36.os.cast$`Mean_2017 RBSA PS`
,"SE_2017.RBSA.PS" = item36.os.cast$`SE_2017 RBSA PS`
,"n_2017.RBSA.PS" = item36.os.cast$`n_2017 RBSA PS`
,"Mean_RBSA.NW" = item36.os.cast$`Mean_2017 RBSA NW`
,"SE_RBSA.NW" = item36.os.cast$`SE_2017 RBSA NW`
,"n_RBSA.NW" = item36.os.cast$`n_2017 RBSA NW`
,"EB_SnoPUD" = item36.os.cast$`EB_SnoPUD`
,"EB_2017.RBSA.PS" = item36.os.cast$`EB_2017 RBSA PS`
,"EB_RBSA.NW" = item36.os.cast$`EB_2017 RBSA NW`)
}
levels(item36.os.table$Housing.Vintage)
rowOrder <- c("Pre 1981"
,"1981-1990"
,"1991-2000"
,"2001-2010"
,"Post 2010"
,"All Vintages")
item36.os.table <- item36.os.table %>% mutate(Housing.Vintage = factor(Housing.Vintage, levels = rowOrder)) %>% arrange(Housing.Vintage)
item36.os.table <- data.frame(item36.os.table)
exportTable(item36.os.table, "SF","Table 43",weighted = TRUE, osIndicator = export.ind, OS = T)
#######################
# Unweighted Analysis
#######################
item36.os.cast <- mean_two_groups_unweighted(CustomerLevelData = item36.os.data
,valueVariable = "Normalized.Heat.Loss.Rate"
,byVariableRow = "HomeYearBuilt_bins3"
,byVariableColumn = "CK_Building_ID"
,columnAggregate = "Region"
,rowAggregate = "All Vintages")
names(item36.os.cast)
if(os.ind == "scl"){
item36.os.table <- data.frame("Housing.Vintage" = item36.os.cast$HomeYearBuilt_bins3
,"Mean_SCL.GenPop" = item36.os.cast$`Mean_SCL GenPop`
,"SE_SCL.GenPop" = item36.os.cast$`SE_SCL GenPop`
,"n_SCL.GenPop" = item36.os.cast$`n_SCL GenPop`
,"Mean_SCL.LI" = item36.os.cast$`Mean_SCL LI`
,"SE_SCL.LI" = item36.os.cast$`SE_SCL LI`
,"n_SCL.LI" = item36.os.cast$`n_SCL LI`
,"Mean_SCL.EH" = item36.os.cast$`Mean_SCL EH`
,"SE_SCL.EH" = item36.os.cast$`SE_SCL EH`
,"n_SCL.EH" = item36.os.cast$`n_SCL EH`
,"Mean_2017.RBSA.PS" = item36.os.cast$`Mean_2017 RBSA PS`
,"SE_2017.RBSA.PS" = item36.os.cast$`SE_2017 RBSA PS`
,"n_2017.RBSA.PS" = item36.os.cast$`n_2017 RBSA PS`)
}else if(os.ind == "snopud"){
item36.os.table <- data.frame("Housing.Vintage" = item36.os.cast$HomeYearBuilt_bins3
,"Mean_SCL.GenPop" = item36.os.cast$`Mean_SnoPUD`
,"SE_SnoPUD" = item36.os.cast$`SE_SnoPUD`
,"n_SnoPUD" = item36.os.cast$`n_SnoPUD`
,"Mean_2017.RBSA.PS" = item36.os.cast$`Mean_2017 RBSA PS`
,"SE_2017.RBSA.PS" = item36.os.cast$`SE_2017 RBSA PS`
,"n_2017.RBSA.PS" = item36.os.cast$`n_2017 RBSA PS`
,"Mean_RBSA.NW" = item36.os.cast$`Mean_2017 RBSA NW`
,"SE_RBSA.NW" = item36.os.cast$`SE_2017 RBSA NW`
,"n_RBSA.NW" = item36.os.cast$`n_2017 RBSA NW`)
}
levels(item36.os.table$Housing.Vintage)
rowOrder <- c("Pre 1981"
,"1981-1990"
,"1991-2000"
,"2001-2010"
,"Post 2010"
,"All Vintages")
item36.os.table <- item36.os.table %>% mutate(Housing.Vintage = factor(Housing.Vintage, levels = rowOrder)) %>% arrange(Housing.Vintage)
item36.os.table <- data.frame(item36.os.table)
exportTable(item36.os.table, "SF","Table 43",weighted = FALSE, osIndicator = export.ind, OS = T)
#############################################################################################
#Item 37: AVERAGE HEAT-LOSS RATE BY VINTAGE AND CK_Building_ID (SF table 44, MH table 26)
#############################################################################################
item37.os.dat <- one.line.dat[which(colnames(one.line.dat) %in% c("CK_Cadmus_ID"
,"Whole.House.UA"))]
item37.os.dat1 <- left_join(os.dat, item37.os.dat)
item37.os.dat1$Whole.House.UA <- as.numeric(as.character(item37.os.dat1$Whole.House.UA))
item37.os.dat2 <- item37.os.dat1[which(item37.os.dat1$Whole.House.UA %notin% c("N/A",NA)),]
item37.os.dat4 <- item37.os.dat2[which(item37.os.dat2$HomeYearBuilt_bins3 %notin% c("N/A",NA)),]
################################################
# Adding pop and sample sizes for weights
################################################
item37.os.data <- weightedData(item37.os.dat4[-which(colnames(item37.os.dat4) %in% c("Whole.House.UA"))])
item37.os.data <- left_join(item37.os.data, item37.os.dat4[which(colnames(item37.os.dat4) %in% c("CK_Cadmus_ID"
,"Whole.House.UA"))])
item37.os.data$count <- 1
#######################
# Weighted Analysis
#######################
item37.os.cast <- mean_two_groups(CustomerLevelData = item37.os.data
,valueVariable = "Whole.House.UA"
,byVariableRow = "HomeYearBuilt_bins3"
,byVariableColumn = "CK_Building_ID"
,columnAggregate = "Remove"
,rowAggregate = "All Vintages")
names(item37.os.cast)
if(os.ind == "scl"){
item37.os.table <- data.frame("Housing.Vintage" = item37.os.cast$HomeYearBuilt_bins3
,"Mean_SCL.GenPop" = item37.os.cast$`Mean_SCL GenPop`
,"SE_SCL.GenPop" = item37.os.cast$`SE_SCL GenPop`
,"n_SCL.GenPop" = item37.os.cast$`n_SCL GenPop`
,"Mean_SCL.LI" = item37.os.cast$`Mean_SCL LI`
,"SE_SCL.LI" = item37.os.cast$`SE_SCL LI`
,"n_SCL.LI" = item37.os.cast$`n_SCL LI`
,"Mean_SCL.EH" = item37.os.cast$`Mean_SCL EH`
,"SE_SCL.EH" = item37.os.cast$`SE_SCL EH`
,"n_SCL.EH" = item37.os.cast$`n_SCL EH`
,"Mean_2017.RBSA.PS" = item37.os.cast$`Mean_2017 RBSA PS`
,"SE_2017.RBSA.PS" = item37.os.cast$`SE_2017 RBSA PS`
,"n_2017.RBSA.PS" = item37.os.cast$`n_2017 RBSA PS`
,"EB_SCL.GenPop" = item37.os.cast$`EB_SCL GenPop`
,"EB_SCL.LI" = item37.os.cast$`EB_SCL LI`
,"EB_SCL.EH" = item37.os.cast$`EB_SCL EH`
,"EB_2017.RBSA.PS" = item37.os.cast$`EB_2017 RBSA PS`)
}else if(os.ind == "snopud"){
item37.os.table <- data.frame("Housing.Vintage" = item37.os.cast$HomeYearBuilt_bins3
,"Mean_SnoPUD" = item37.os.cast$`Mean_SnoPUD`
,"SE_SnoPUD" = item37.os.cast$`SE_SnoPUD`
,"n_SnoPUD" = item37.os.cast$`n_SnoPUD`
,"Mean_2017.RBSA.PS" = item37.os.cast$`Mean_2017 RBSA PS`
,"SE_2017.RBSA.PS" = item37.os.cast$`SE_2017 RBSA PS`
,"n_2017.RBSA.PS" = item37.os.cast$`n_2017 RBSA PS`
,"Mean_RBSA.NW" = item37.os.cast$`Mean_2017 RBSA NW`
,"SE_RBSA.NW" = item37.os.cast$`SE_2017 RBSA NW`
,"n_RBSA.NW" = item37.os.cast$`n_2017 RBSA NW`
,"EB_SnoPUD" = item37.os.cast$`EB_SnoPUD`
,"EB_2017.RBSA.PS" = item37.os.cast$`EB_2017 RBSA PS`
,"EB_RBSA.NW" = item37.os.cast$`EB_2017 RBSA NW`)
}
levels(item37.os.table$Housing.Vintage)
rowOrder <- c("Pre 1981"
,"1981-1990"
,"1991-2000"
,"2001-2010"
,"Post 2010"
,"All Vintages")
item37.os.table <- item37.os.table %>% mutate(Housing.Vintage = factor(Housing.Vintage, levels = rowOrder)) %>% arrange(Housing.Vintage)
item37.os.table <- data.frame(item37.os.table)
exportTable(item37.os.table, "SF","Table 44",weighted = TRUE, osIndicator = export.ind, OS = T)
#######################
# Unweighted Analysis
#######################
item37.os.cast <- mean_two_groups_unweighted(CustomerLevelData = item37.os.data
,valueVariable = "Whole.House.UA"
,byVariableRow = "HomeYearBuilt_bins3"
,byVariableColumn = "CK_Building_ID"
,columnAggregate = "Region"
,rowAggregate = "All Vintages")
names(item37.os.cast)
if(os.ind == "scl"){
item37.os.table <- data.frame("Housing.Vintage" = item37.os.cast$HomeYearBuilt_bins3
,"Mean_SCL.GenPop" = item37.os.cast$`Mean_SCL GenPop`
,"SE_SCL.GenPop" = item37.os.cast$`SE_SCL GenPop`
,"n_SCL.GenPop" = item37.os.cast$`n_SCL GenPop`
,"Mean_SCL.LI" = item37.os.cast$`Mean_SCL LI`
,"SE_SCL.LI" = item37.os.cast$`SE_SCL LI`
,"n_SCL.LI" = item37.os.cast$`n_SCL LI`
,"Mean_SCL.EH" = item37.os.cast$`Mean_SCL EH`
,"SE_SCL.EH" = item37.os.cast$`SE_SCL EH`
,"n_SCL.EH" = item37.os.cast$`n_SCL EH`
,"Mean_2017.RBSA.PS" = item37.os.cast$`Mean_2017 RBSA PS`
,"SE_2017.RBSA.PS" = item37.os.cast$`SE_2017 RBSA PS`
,"n_2017.RBSA.PS" = item37.os.cast$`n_2017 RBSA PS`)
}else if(os.ind == "snopud"){
item37.os.table <- data.frame("Housing.Vintage" = item37.os.cast$HomeYearBuilt_bins3
,"Mean_SnoPUD" = item37.os.cast$`Mean_SnoPUD`
,"SE_SnoPUD" = item37.os.cast$`SE_SnoPUD`
,"n_SnoPUD" = item37.os.cast$`n_SnoPUD`
,"Mean_2017.RBSA.PS" = item37.os.cast$`Mean_2017 RBSA PS`
,"SE_2017.RBSA.PS" = item37.os.cast$`SE_2017 RBSA PS`
,"n_2017.RBSA.PS" = item37.os.cast$`n_2017 RBSA PS`
,"Mean_RBSA.NW" = item37.os.cast$`Mean_2017 RBSA NW`
,"SE_RBSA.NW" = item37.os.cast$`SE_2017 RBSA NW`
,"n_RBSA.NW" = item37.os.cast$`n_2017 RBSA NW`)
}
levels(item37.os.table$Housing.Vintage)
rowOrder <- c("Pre 1981"
,"1981-1990"
,"1991-2000"
,"2001-2010"
,"Post 2010"
,"All Vintages")
item37.os.table <- item37.os.table %>% mutate(Housing.Vintage = factor(Housing.Vintage, levels = rowOrder)) %>% arrange(Housing.Vintage)
item37.os.table <- data.frame(item37.os.table)
exportTable(item37.os.table, "SF","Table 44",weighted = FALSE, osIndicator = export.ind, OS = T)
|
/Code/Table Code/SF - OS/Items 36,37,182.R
|
no_license
|
casey-stevens/Cadmus-6000-2017
|
R
| false | false | 40,570 |
r
|
#############################################################################################
## Title: RBSA Analysis
## Author: Casey Stevens, Cadmus Group
## Created: 02/27/2017
## Updated: 09/11/2017
## Billing Code(s): 6596.0000.0002.1002.0000
#############################################################################################
## Clear variables
# rm(list = ls())
rundate <- format(Sys.time(), "%d%b%y")
options(scipen = 999)
## Create "Not In" operator
"%notin%" <- Negate("%in%")
# Source codes
source("Code/Table Code/SourceCode.R")
source("Code/Table Code/Weighting Implementation Functions.R")
source("Code/Sample Weighting/Weights.R")
source("Code/Table Code/Export Function.R")
# Read in clean RBSA data
rbsa.dat <- read.xlsx(xlsxFile = file.path(filepathCleanData, paste("clean.rbsa.data", rundate, ".xlsx", sep = "")))
length(unique(rbsa.dat$CK_Cadmus_ID))
# one.line.dat <- read.xlsx(xlsxFile = file.path(filepathRawData, one.line.export), sheet = "Site One Line Summary", startRow = 2)
one.line.dat$CK_Cadmus_ID <- trimws(toupper(one.line.dat$Cadmus.ID))
# #############################################################################################
# #Item 36: AVERAGE NORMALIZED HEAT-LOSS RATE BY VINTAGE AND STATE (SF table 43, MH table 24)
# #############################################################################################
# item36.dat <- one.line.dat[which(colnames(one.line.dat) %in% c("CK_Cadmus_ID"
# ,"Whole.House.UA"))]
# item36.dat1 <- left_join(rbsa.dat, item36.dat)
# item36.dat1 <- item36.dat1[grep("SITE",item36.dat1$CK_Building_ID),]
# item36.dat1$Whole.House.UA <- as.numeric(as.character(item36.dat1$Whole.House.UA))
# item36.dat2 <- item36.dat1[which(!is.na(item36.dat1$Whole.House.UA)),]
# item36.dat3 <- item36.dat2[grep("site",item36.dat2$CK_Building_ID, ignore.case = T),]
# which(duplicated(item36.dat3$CK_Cadmus_ID))
# item36.dat4 <- item36.dat3[which(item36.dat3$Conditioned.Area > 0),]
#
#
# item36.dat4$Normalized.Heat.Loss.Rate <- item36.dat4$Whole.House.UA / item36.dat4$Conditioned.Area
#
# item36.dat5 <- item36.dat4[which(!is.na(item36.dat4$HomeYearBuilt_bins3)),]
#
# ################################################
# # Adding pop and sample sizes for weights
# ################################################
# item36.data <- weightedData(item36.dat5[-which(colnames(item36.dat5) %in% c("Whole.House.UA"
# ,"Normalized.Heat.Loss.Rate"))])
# item36.data <- left_join(item36.data, item36.dat5[which(colnames(item36.dat5) %in% c("CK_Cadmus_ID"
# ,"Whole.House.UA"
# ,"Normalized.Heat.Loss.Rate"))])
# item36.data$count <- 1
# which(duplicated(item36.data$CK_Cadmus_ID))
#
# #######################
# # Weighted Analysis
# #######################
# item36.cast <- mean_two_groups(CustomerLevelData = item36.data
# ,valueVariable = "Normalized.Heat.Loss.Rate"
# ,byVariableRow = "HomeYearBuilt_bins3"
# ,byVariableColumn = "State"
# ,columnAggregate = "Region"
# ,rowAggregate = "All Vintages")
#
# item36.table <- data.frame("BuildingType" = item36.cast$BuildingType
# ,"Housing.Vintage" = item36.cast$HomeYearBuilt_bins3
# ,"ID" = item36.cast$Mean_ID
# ,"ID.SE" = item36.cast$SE_ID
# ,"ID.n" = item36.cast$n_ID
# ,"MT" = item36.cast$Mean_MT
# ,"MT.SE" = item36.cast$SE_MT
# ,"MT.n" = item36.cast$n_MT
# ,"OR" = item36.cast$Mean_OR
# ,"OR.SE" = item36.cast$SE_OR
# ,"OR.n" = item36.cast$n_OR
# ,"WA" = item36.cast$Mean_WA
# ,"WA.SE" = item36.cast$SE_WA
# ,"WA.n" = item36.cast$n_WA
# ,"Region" = item36.cast$Mean_Region
# ,"Region.SE" = item36.cast$SE_Region
# ,"Region.n" = item36.cast$n_Region
# ,"ID.EB" = item36.cast$EB_ID
# ,"MT.EB" = item36.cast$EB_MT
# ,"OR.EB" = item36.cast$EB_OR
# ,"WA.EB" = item36.cast$EB_WA
# ,"Region.EB" = item36.cast$EB_Region)
#
# levels(item36.table$Housing.Vintage)
# rowOrder <- c("Pre 1981"
# ,"1981-1990"
# ,"1991-2000"
# ,"2001-2010"
# ,"Post 2010"
# ,"All Vintages")
# item36.table <- item36.table %>% mutate(Housing.Vintage = factor(Housing.Vintage, levels = rowOrder)) %>% arrange(Housing.Vintage)
# item36.table <- data.frame(item36.table)
#
#
# item36.table.SF <- item36.table[which(item36.table$BuildingType == "Single Family"),
# -which(colnames(item36.table) == "BuildingType")]
# item36.table.MH <- item36.table[which(item36.table$BuildingType == "Manufactured"),
# -which(colnames(item36.table) == "BuildingType")]
#
# exportTable(item36.table.SF, "SF","Table 43",weighted = TRUE)
# # exportTable(item36.table.MH, "MH","Table 24",weighted = TRUE)
#
# #######################
# # Unweighted Analysis
# #######################
# item36.cast <- mean_two_groups_unweighted(CustomerLevelData = item36.data
# ,valueVariable = "Normalized.Heat.Loss.Rate"
# ,byVariableRow = "HomeYearBuilt_bins3"
# ,byVariableColumn = "State"
# ,columnAggregate = "Region"
# ,rowAggregate = "All Vintages")
#
# item36.table <- data.frame("BuildingType" = item36.cast$BuildingType
# ,"Housing.Vintage" = item36.cast$HomeYearBuilt_bins3
# ,"ID" = item36.cast$Mean_ID
# ,"ID.SE" = item36.cast$SE_ID
# ,"ID.n" = item36.cast$n_ID
# ,"MT" = item36.cast$Mean_MT
# ,"MT.SE" = item36.cast$SE_MT
# ,"MT.n" = item36.cast$n_MT
# ,"OR" = item36.cast$Mean_OR
# ,"OR.SE" = item36.cast$SE_OR
# ,"OR.n" = item36.cast$n_OR
# ,"WA" = item36.cast$Mean_WA
# ,"WA.SE" = item36.cast$SE_WA
# ,"WA.n" = item36.cast$n_WA
# ,"Region" = item36.cast$Mean_Region
# ,"Region.SE" = item36.cast$SE_Region
# ,"Region.n" = item36.cast$n_Region)
#
# levels(item36.table$Housing.Vintage)
# rowOrder <- c("Pre 1981"
# ,"1981-1990"
# ,"1991-2000"
# ,"2001-2010"
# ,"Post 2010"
# ,"All Vintages")
# item36.table <- item36.table %>% mutate(Housing.Vintage = factor(Housing.Vintage, levels = rowOrder)) %>% arrange(Housing.Vintage)
# item36.table <- data.frame(item36.table)
#
#
# item36.table.SF <- item36.table[which(item36.table$BuildingType == "Single Family"),
# -which(colnames(item36.table) == "BuildingType")]
# item36.table.MH <- item36.table[which(item36.table$BuildingType == "Manufactured"),
# -which(colnames(item36.table) == "BuildingType")]
#
# exportTable(item36.table.SF, "SF","Table 43",weighted = FALSE)
# # exportTable(item36.table.MH, "MH","Table 24",weighted = FALSE)
#
#
#
#
#
#
# #############################################################################################
# #Item 37: AVERAGE HEAT-LOSS RATE BY VINTAGE AND STATE (SF table 44, MH table 26)
# #############################################################################################
# item37.dat <- one.line.dat[which(colnames(one.line.dat) %in% c("CK_Cadmus_ID"
# ,"Whole.House.UA"))]
# item37.dat1 <- left_join(rbsa.dat, item37.dat)
# item37.dat1$Whole.House.UA <- as.numeric(as.character(item37.dat1$Whole.House.UA))
# item37.dat2 <- item37.dat1[which(item37.dat1$Whole.House.UA %notin% c("N/A",NA)),]
# item37.dat3 <- item37.dat2[grep("site",item37.dat2$CK_Building_ID, ignore.case = T),]
# which(duplicated(item37.dat3$CK_Cadmus_ID))
#
# item37.dat4 <- item37.dat3[which(item37.dat3$HomeYearBuilt_bins3 %notin% c("N/A",NA)),]
#
# ################################################
# # Adding pop and sample sizes for weights
# ################################################
# item37.data <- weightedData(item37.dat4[-which(colnames(item37.dat4) %in% c("Whole.House.UA"))])
# item37.data <- left_join(item37.data, item37.dat4[which(colnames(item37.dat4) %in% c("CK_Cadmus_ID"
# ,"Whole.House.UA"))])
# item37.data$count <- 1
# #######################
# # Weighted Analysis
# #######################
# item37.cast <- mean_two_groups(CustomerLevelData = item37.data
# ,valueVariable = "Whole.House.UA"
# ,byVariableRow = "HomeYearBuilt_bins3"
# ,byVariableColumn = "State"
# ,columnAggregate = "Region"
# ,rowAggregate = "All Vintages")
#
# item37.table <- data.frame("BuildingType" = item37.cast$BuildingType
# ,"Housing.Vintage" = item37.cast$HomeYearBuilt_bins3
# ,"ID" = item37.cast$Mean_ID
# ,"ID.SE" = item37.cast$SE_ID
# ,"ID.n" = item37.cast$n_ID
# ,"MT" = item37.cast$Mean_MT
# ,"MT.SE" = item37.cast$SE_MT
# ,"MT.n" = item37.cast$n_MT
# ,"OR" = item37.cast$Mean_OR
# ,"OR.SE" = item37.cast$SE_OR
# ,"OR.n" = item37.cast$n_OR
# ,"WA" = item37.cast$Mean_WA
# ,"WA.SE" = item37.cast$SE_WA
# ,"WA.n" = item37.cast$n_WA
# ,"Region" = item37.cast$Mean_Region
# ,"Region.SE" = item37.cast$SE_Region
# ,"Region.n" = item37.cast$n_Region
# ,"ID.EB" = item37.cast$EB_ID
# ,"MT.EB" = item37.cast$EB_MT
# ,"OR.EB" = item37.cast$EB_OR
# ,"WA.EB" = item37.cast$EB_WA
# ,"Region.EB" = item37.cast$EB_Region)
#
# levels(item37.table$Housing.Vintage)
# rowOrder <- c("Pre 1981"
# ,"1981-1990"
# ,"1991-2000"
# ,"2001-2010"
# ,"Post 2010"
# ,"All Vintages")
# item37.table <- item37.table %>% mutate(Housing.Vintage = factor(Housing.Vintage, levels = rowOrder)) %>% arrange(Housing.Vintage)
# item37.table <- data.frame(item37.table)
#
#
# item37.table.SF <- item37.table[which(item37.table$BuildingType == "Single Family"),
# -which(colnames(item37.table) == "BuildingType")]
# item37.table.MH <- item37.table[which(item37.table$BuildingType == "Manufactured"),
# -which(colnames(item37.table) == "BuildingType")]
#
# exportTable(item37.table.SF, "SF","Table 44",weighted = TRUE)
# # exportTable(item37.table.MH, "MH","Table 26",weighted = TRUE)
#
# #######################
# # Unweighted Analysis
# #######################
# item37.cast <- mean_two_groups_unweighted(CustomerLevelData = item37.data
# ,valueVariable = "Whole.House.UA"
# ,byVariableRow = "HomeYearBuilt_bins3"
# ,byVariableColumn = "State"
# ,columnAggregate = "Region"
# ,rowAggregate = "All Vintages")
#
# item37.table <- data.frame("BuildingType" = item37.cast$BuildingType
# ,"Housing.Vintage" = item37.cast$HomeYearBuilt_bins3
# ,"ID" = item37.cast$Mean_ID
# ,"ID.SE" = item37.cast$SE_ID
# ,"ID.n" = item37.cast$n_ID
# ,"MT" = item37.cast$Mean_MT
# ,"MT.SE" = item37.cast$SE_MT
# ,"MT.n" = item37.cast$n_MT
# ,"OR" = item37.cast$Mean_OR
# ,"OR.SE" = item37.cast$SE_OR
# ,"OR.n" = item37.cast$n_OR
# ,"WA" = item37.cast$Mean_WA
# ,"WA.SE" = item37.cast$SE_WA
# ,"WA.n" = item37.cast$n_WA
# ,"Region" = item37.cast$Mean_Region
# ,"Region.SE" = item37.cast$SE_Region
# ,"Region.n" = item37.cast$n_Region)
#
# levels(item37.table$Housing.Vintage)
# rowOrder <- c("Pre 1981"
# ,"1981-1990"
# ,"1991-2000"
# ,"2001-2010"
# ,"Post 2010"
# ,"All Vintages")
# item37.table <- item37.table %>% mutate(Housing.Vintage = factor(Housing.Vintage, levels = rowOrder)) %>% arrange(Housing.Vintage)
# item37.table <- data.frame(item37.table)
#
#
# item37.table.SF <- item37.table[which(item37.table$BuildingType == "Single Family"),
# -which(colnames(item37.table) == "BuildingType")]
# item37.table.MH <- item37.table[which(item37.table$BuildingType == "Manufactured"),
# -which(colnames(item37.table) == "BuildingType")]
#
# exportTable(item37.table.SF, "SF","Table 44",weighted = FALSE)
# # exportTable(item37.table.MH, "MH","Table 26",weighted = FALSE)
#
#
#
#
#
#
#
#
#
#
# #############################################################################################
# #Item 182: AVERAGE HEAT-LOSS RATE BY AGE/STANDARD AND STATE (MH table 25)
# #############################################################################################
# #Read in data for analysis
# sites.dat <- read.xlsx(xlsxFile = file.path(filepathRawData, sites.export))
# #clean cadmus IDs
# sites.dat$CK_Cadmus_ID <- trimws(toupper(sites.dat$CK_Cadmus_ID))
#
# item182.sites <- unique(sites.dat[which(colnames(sites.dat) %in% c("CK_Cadmus_ID"
# ,"SITE_Construction_CONSTRUCTION_STANDARD_AgeAndConstructionStandard"))])
# names(item182.sites) <- c("CK_Cadmus_ID", "Age.and.Construction.Standard")
# item182.dat <- one.line.dat[which(colnames(one.line.dat) %in% c("CK_Cadmus_ID"
# ,"Whole.House.UA"))]
# item182.dat0 <- left_join(item182.sites, item182.dat)
# item182.dat1 <- left_join(rbsa.dat, item182.dat0)
# item182.dat1$Whole.House.UA <- as.numeric(as.character(item182.dat1$Whole.House.UA))
# item182.dat2 <- item182.dat1[which(item182.dat1$Whole.House.UA %notin% c("N/A",NA)),]
# item182.dat3 <- item182.dat2[grep("site",item182.dat2$CK_Building_ID, ignore.case = T),]
# which(duplicated(item182.dat3$CK_Cadmus_ID))
#
# item182.dat4 <- item182.dat3[which(item182.dat3$Age.and.Construction.Standard %notin% c("Unknown","unknown","N/A","1977")),]
# unique(item182.dat4$Age.and.Construction.Standard)
# item182.dat4$Normalized.Heat.Loss.Rate <- item182.dat4$Whole.House.UA / item182.dat4$Conditioned.Area
# item182.dat5 <- item182.dat4[which(!is.na(item182.dat4$Normalized.Heat.Loss.Rate)),]
#
#
# unique(item182.dat5$Age.and.Construction.Standard)
#
# ################################################
# # Adding pop and sample sizes for weights
# ################################################
# item182.data <- weightedData(item182.dat5[-which(colnames(item182.dat5) %in% c("Whole.House.UA"
# ,"Age.and.Construction.Standard"
# ,"Normalized.Heat.Loss.Rate"))])
# item182.data <- left_join(item182.data, item182.dat5[which(colnames(item182.dat5) %in% c("CK_Cadmus_ID"
# ,"Whole.House.UA"
# ,"Age.and.Construction.Standard"
# ,"Normalized.Heat.Loss.Rate"))])
# item182.data$count <- 1
# #######################
# # Weighted Analysis
# #######################
# item182.summary <- mean_two_groups(CustomerLevelData = item182.data
# ,valueVariable = "Normalized.Heat.Loss.Rate"
# ,byVariableRow = "Age.and.Construction.Standard"
# ,byVariableColumn = "State"
# ,columnAggregate = "Region"
# ,rowAggregate = "All Age/Standards")
# # item182.all.ages.standards <- mean_one_group(CustomerLevelData = item182.data
# # ,valueVariable = "Normalized.Heat.Loss.Rate"
# # ,byVariable = "State"
# # ,aggregateRow = "Region")
# # item182.all.ages.standards <- dcast(setDT(item182.all.ages.standards)
# # ,formula = BuildingType ~ State
# # ,value.var = c("Mean","SE","n","EB"))
# # item182.all.ages.standards$Age.and.Construction.Standard <- "All Age/Standards"
#
# item182.cast <- item182.summary#rbind.data.frame(item182.summary, item182.all.ages.standards, stringsAsFactors = F)
#
# item182.table <- data.frame("BuildingType" = item182.cast$BuildingType
# ,"Age.Construction.Standard" = item182.cast$Age.and.Construction.Standard
# ,"ID" = item182.cast$Mean_ID
# ,"ID.SE" = item182.cast$SE_ID
# ,"ID.n" = item182.cast$n_ID
# ,"MT" = item182.cast$Mean_MT
# ,"MT.SE" = item182.cast$SE_MT
# ,"MT.n" = item182.cast$n_MT
# ,"OR" = item182.cast$Mean_OR
# ,"OR.SE" = item182.cast$SE_OR
# ,"OR.n" = item182.cast$n_OR
# ,"WA" = item182.cast$Mean_WA
# ,"WA.SE" = item182.cast$SE_WA
# ,"WA.n" = item182.cast$n_WA
# ,"Region" = item182.cast$Mean_Region
# ,"Region.SE" = item182.cast$SE_Region
# ,"Region.n" = item182.cast$n_Region
# ,"ID.EB" = item182.cast$EB_ID
# ,"MT.EB" = item182.cast$EB_MT
# ,"OR.EB" = item182.cast$EB_OR
# ,"WA.EB" = item182.cast$EB_WA
# ,"Region.EB" = item182.cast$EB_Region)
#
# item182.table.MH <- item182.table[which(item182.table$BuildingType == "Manufactured"),
# -which(colnames(item182.table) == "BuildingType")]
#
# exportTable(item182.table.MH, "MH","Table 25",weighted = TRUE)
#
# #######################
# # Unweighted Analysis
# #######################
# item182.summary <- mean_two_groups_unweighted(CustomerLevelData = item182.data
# ,valueVariable = "Normalized.Heat.Loss.Rate"
# ,byVariableRow = "Age.and.Construction.Standard"
# ,byVariableColumn = "State"
# ,columnAggregate = "Region"
# ,rowAggregate = "All Age/Standards")
#
# item182.cast <- item182.summary
#
# item182.table <- data.frame("BuildingType" = item182.cast$BuildingType
# ,"Age.Construction.Standard" = item182.cast$Age.and.Construction.Standard
# ,"ID" = item182.cast$Mean_ID
# ,"ID.SE" = item182.cast$SE_ID
# ,"ID.n" = item182.cast$n_ID
# ,"MT" = item182.cast$Mean_MT
# ,"MT.SE" = item182.cast$SE_MT
# ,"MT.n" = item182.cast$n_MT
# ,"OR" = item182.cast$Mean_OR
# ,"OR.SE" = item182.cast$SE_OR
# ,"OR.n" = item182.cast$n_OR
# ,"WA" = item182.cast$Mean_WA
# ,"WA.SE" = item182.cast$SE_WA
# ,"WA.n" = item182.cast$n_WA
# ,"Region" = item182.cast$Mean_Region
# ,"Region.SE" = item182.cast$SE_Region
# ,"Region.n" = item182.cast$n_Region)
#
# item182.table.MH <- item182.table[which(item182.table$BuildingType == "Manufactured"),
# -which(colnames(item182.table) == "BuildingType")]
#
# exportTable(item182.table.MH, "MH","Table 25",weighted = FALSE)
############################################################################################################
#
#
# OVERSAMPLE ANALYSIS
#
#
############################################################################################################
# Read in clean scl data
os.dat <- read.xlsx(xlsxFile = file.path(filepathCleanData, paste("clean.",os.ind,".data", rundate, ".xlsx", sep = "")))
length(unique(os.dat$CK_Cadmus_ID))
os.dat$CK_Building_ID <- os.dat$Category
os.dat <- os.dat[which(names(os.dat) != "Category")]
#############################################################################################
#Item 36: AVERAGE NORMALIZED HEAT-LOSS RATE BY VINTAGE AND CK_Building_ID (SF table 43, MH table 24)
#############################################################################################
item36.os.dat <- one.line.dat[which(colnames(one.line.dat) %in% c("CK_Cadmus_ID"
,"Whole.House.UA"))]
item36.os.dat1 <- left_join(os.dat, item36.os.dat)
item36.os.dat1$Whole.House.UA <- as.numeric(as.character(item36.os.dat1$Whole.House.UA))
item36.os.dat2 <- item36.os.dat1[which(!is.na(item36.os.dat1$Whole.House.UA)),]
item36.os.dat4 <- item36.os.dat2[which(item36.os.dat2$Conditioned.Area > 0),]
item36.os.dat4$Normalized.Heat.Loss.Rate <- item36.os.dat4$Whole.House.UA / item36.os.dat4$Conditioned.Area
item36.os.dat5 <- item36.os.dat4[which(!is.na(item36.os.dat4$HomeYearBuilt_bins3)),]
################################################
# Adding pop and sample sizes for weights
################################################
item36.os.data <- weightedData(item36.os.dat5[-which(colnames(item36.os.dat5) %in% c("Whole.House.UA"
,"Normalized.Heat.Loss.Rate"))])
item36.os.data <- left_join(item36.os.data, item36.os.dat5[which(colnames(item36.os.dat5) %in% c("CK_Cadmus_ID"
,"Whole.House.UA"
,"Normalized.Heat.Loss.Rate"))])
item36.os.data$count <- 1
#######################
# Weighted Analysis
#######################
item36.os.cast <- mean_two_groups(CustomerLevelData = item36.os.data
,valueVariable = "Normalized.Heat.Loss.Rate"
,byVariableRow = "HomeYearBuilt_bins3"
,byVariableColumn = "CK_Building_ID"
,columnAggregate = "Remove"
,rowAggregate = "All Vintages")
names(item36.os.cast)
if(os.ind == "scl"){
item36.os.table <- data.frame("Housing.Vintage" = item36.os.cast$HomeYearBuilt_bins3
,"Mean_SCL.GenPop" = item36.os.cast$`Mean_SCL GenPop`
,"SE_SCL.GenPop" = item36.os.cast$`SE_SCL GenPop`
,"n_SCL.GenPop" = item36.os.cast$`n_SCL GenPop`
,"Mean_SCL.LI" = item36.os.cast$`Mean_SCL LI`
,"SE_SCL.LI" = item36.os.cast$`SE_SCL LI`
,"n_SCL.LI" = item36.os.cast$`n_SCL LI`
,"Mean_SCL.EH" = item36.os.cast$`Mean_SCL EH`
,"SE_SCL.EH" = item36.os.cast$`SE_SCL EH`
,"n_SCL.EH" = item36.os.cast$`n_SCL EH`
,"Mean_2017.RBSA.PS" = item36.os.cast$`Mean_2017 RBSA PS`
,"SE_2017.RBSA.PS" = item36.os.cast$`SE_2017 RBSA PS`
,"n_2017.RBSA.PS" = item36.os.cast$`n_2017 RBSA PS`
,"EB_SCL.GenPop" = item36.os.cast$`EB_SCL GenPop`
,"EB_SCL.LI" = item36.os.cast$`EB_SCL LI`
,"EB_SCL.EH" = item36.os.cast$`EB_SCL EH`
,"EB_2017.RBSA.PS" = item36.os.cast$`EB_2017 RBSA PS`)
}else if(os.ind == "snopud"){
item36.os.table <- data.frame("Housing.Vintage" = item36.os.cast$HomeYearBuilt_bins3
,"Mean_SnoPUD" = item36.os.cast$`Mean_SnoPUD`
,"SE_SnoPUD" = item36.os.cast$`SE_SnoPUD`
,"n_SnoPUD" = item36.os.cast$`n_SnoPUD`
,"Mean_2017.RBSA.PS" = item36.os.cast$`Mean_2017 RBSA PS`
,"SE_2017.RBSA.PS" = item36.os.cast$`SE_2017 RBSA PS`
,"n_2017.RBSA.PS" = item36.os.cast$`n_2017 RBSA PS`
,"Mean_RBSA.NW" = item36.os.cast$`Mean_2017 RBSA NW`
,"SE_RBSA.NW" = item36.os.cast$`SE_2017 RBSA NW`
,"n_RBSA.NW" = item36.os.cast$`n_2017 RBSA NW`
,"EB_SnoPUD" = item36.os.cast$`EB_SnoPUD`
,"EB_2017.RBSA.PS" = item36.os.cast$`EB_2017 RBSA PS`
,"EB_RBSA.NW" = item36.os.cast$`EB_2017 RBSA NW`)
}
levels(item36.os.table$Housing.Vintage)
rowOrder <- c("Pre 1981"
,"1981-1990"
,"1991-2000"
,"2001-2010"
,"Post 2010"
,"All Vintages")
item36.os.table <- item36.os.table %>% mutate(Housing.Vintage = factor(Housing.Vintage, levels = rowOrder)) %>% arrange(Housing.Vintage)
item36.os.table <- data.frame(item36.os.table)
exportTable(item36.os.table, "SF","Table 43",weighted = TRUE, osIndicator = export.ind, OS = T)
#######################
# Unweighted Analysis
#######################
item36.os.cast <- mean_two_groups_unweighted(CustomerLevelData = item36.os.data
,valueVariable = "Normalized.Heat.Loss.Rate"
,byVariableRow = "HomeYearBuilt_bins3"
,byVariableColumn = "CK_Building_ID"
,columnAggregate = "Region"
,rowAggregate = "All Vintages")
names(item36.os.cast)
if(os.ind == "scl"){
item36.os.table <- data.frame("Housing.Vintage" = item36.os.cast$HomeYearBuilt_bins3
,"Mean_SCL.GenPop" = item36.os.cast$`Mean_SCL GenPop`
,"SE_SCL.GenPop" = item36.os.cast$`SE_SCL GenPop`
,"n_SCL.GenPop" = item36.os.cast$`n_SCL GenPop`
,"Mean_SCL.LI" = item36.os.cast$`Mean_SCL LI`
,"SE_SCL.LI" = item36.os.cast$`SE_SCL LI`
,"n_SCL.LI" = item36.os.cast$`n_SCL LI`
,"Mean_SCL.EH" = item36.os.cast$`Mean_SCL EH`
,"SE_SCL.EH" = item36.os.cast$`SE_SCL EH`
,"n_SCL.EH" = item36.os.cast$`n_SCL EH`
,"Mean_2017.RBSA.PS" = item36.os.cast$`Mean_2017 RBSA PS`
,"SE_2017.RBSA.PS" = item36.os.cast$`SE_2017 RBSA PS`
,"n_2017.RBSA.PS" = item36.os.cast$`n_2017 RBSA PS`)
}else if(os.ind == "snopud"){
item36.os.table <- data.frame("Housing.Vintage" = item36.os.cast$HomeYearBuilt_bins3
,"Mean_SCL.GenPop" = item36.os.cast$`Mean_SnoPUD`
,"SE_SnoPUD" = item36.os.cast$`SE_SnoPUD`
,"n_SnoPUD" = item36.os.cast$`n_SnoPUD`
,"Mean_2017.RBSA.PS" = item36.os.cast$`Mean_2017 RBSA PS`
,"SE_2017.RBSA.PS" = item36.os.cast$`SE_2017 RBSA PS`
,"n_2017.RBSA.PS" = item36.os.cast$`n_2017 RBSA PS`
,"Mean_RBSA.NW" = item36.os.cast$`Mean_2017 RBSA NW`
,"SE_RBSA.NW" = item36.os.cast$`SE_2017 RBSA NW`
,"n_RBSA.NW" = item36.os.cast$`n_2017 RBSA NW`)
}
levels(item36.os.table$Housing.Vintage)
rowOrder <- c("Pre 1981"
,"1981-1990"
,"1991-2000"
,"2001-2010"
,"Post 2010"
,"All Vintages")
item36.os.table <- item36.os.table %>% mutate(Housing.Vintage = factor(Housing.Vintage, levels = rowOrder)) %>% arrange(Housing.Vintage)
item36.os.table <- data.frame(item36.os.table)
exportTable(item36.os.table, "SF","Table 43",weighted = FALSE, osIndicator = export.ind, OS = T)
#############################################################################################
#Item 37: AVERAGE HEAT-LOSS RATE BY VINTAGE AND CK_Building_ID (SF table 44, MH table 26)
#############################################################################################
item37.os.dat <- one.line.dat[which(colnames(one.line.dat) %in% c("CK_Cadmus_ID"
,"Whole.House.UA"))]
item37.os.dat1 <- left_join(os.dat, item37.os.dat)
item37.os.dat1$Whole.House.UA <- as.numeric(as.character(item37.os.dat1$Whole.House.UA))
item37.os.dat2 <- item37.os.dat1[which(item37.os.dat1$Whole.House.UA %notin% c("N/A",NA)),]
item37.os.dat4 <- item37.os.dat2[which(item37.os.dat2$HomeYearBuilt_bins3 %notin% c("N/A",NA)),]
################################################
# Adding pop and sample sizes for weights
################################################
item37.os.data <- weightedData(item37.os.dat4[-which(colnames(item37.os.dat4) %in% c("Whole.House.UA"))])
item37.os.data <- left_join(item37.os.data, item37.os.dat4[which(colnames(item37.os.dat4) %in% c("CK_Cadmus_ID"
,"Whole.House.UA"))])
item37.os.data$count <- 1
#######################
# Weighted Analysis
#######################
item37.os.cast <- mean_two_groups(CustomerLevelData = item37.os.data
,valueVariable = "Whole.House.UA"
,byVariableRow = "HomeYearBuilt_bins3"
,byVariableColumn = "CK_Building_ID"
,columnAggregate = "Remove"
,rowAggregate = "All Vintages")
names(item37.os.cast)
if(os.ind == "scl"){
item37.os.table <- data.frame("Housing.Vintage" = item37.os.cast$HomeYearBuilt_bins3
,"Mean_SCL.GenPop" = item37.os.cast$`Mean_SCL GenPop`
,"SE_SCL.GenPop" = item37.os.cast$`SE_SCL GenPop`
,"n_SCL.GenPop" = item37.os.cast$`n_SCL GenPop`
,"Mean_SCL.LI" = item37.os.cast$`Mean_SCL LI`
,"SE_SCL.LI" = item37.os.cast$`SE_SCL LI`
,"n_SCL.LI" = item37.os.cast$`n_SCL LI`
,"Mean_SCL.EH" = item37.os.cast$`Mean_SCL EH`
,"SE_SCL.EH" = item37.os.cast$`SE_SCL EH`
,"n_SCL.EH" = item37.os.cast$`n_SCL EH`
,"Mean_2017.RBSA.PS" = item37.os.cast$`Mean_2017 RBSA PS`
,"SE_2017.RBSA.PS" = item37.os.cast$`SE_2017 RBSA PS`
,"n_2017.RBSA.PS" = item37.os.cast$`n_2017 RBSA PS`
,"EB_SCL.GenPop" = item37.os.cast$`EB_SCL GenPop`
,"EB_SCL.LI" = item37.os.cast$`EB_SCL LI`
,"EB_SCL.EH" = item37.os.cast$`EB_SCL EH`
,"EB_2017.RBSA.PS" = item37.os.cast$`EB_2017 RBSA PS`)
}else if(os.ind == "snopud"){
item37.os.table <- data.frame("Housing.Vintage" = item37.os.cast$HomeYearBuilt_bins3
,"Mean_SnoPUD" = item37.os.cast$`Mean_SnoPUD`
,"SE_SnoPUD" = item37.os.cast$`SE_SnoPUD`
,"n_SnoPUD" = item37.os.cast$`n_SnoPUD`
,"Mean_2017.RBSA.PS" = item37.os.cast$`Mean_2017 RBSA PS`
,"SE_2017.RBSA.PS" = item37.os.cast$`SE_2017 RBSA PS`
,"n_2017.RBSA.PS" = item37.os.cast$`n_2017 RBSA PS`
,"Mean_RBSA.NW" = item37.os.cast$`Mean_2017 RBSA NW`
,"SE_RBSA.NW" = item37.os.cast$`SE_2017 RBSA NW`
,"n_RBSA.NW" = item37.os.cast$`n_2017 RBSA NW`
,"EB_SnoPUD" = item37.os.cast$`EB_SnoPUD`
,"EB_2017.RBSA.PS" = item37.os.cast$`EB_2017 RBSA PS`
,"EB_RBSA.NW" = item37.os.cast$`EB_2017 RBSA NW`)
}
levels(item37.os.table$Housing.Vintage)
rowOrder <- c("Pre 1981"
,"1981-1990"
,"1991-2000"
,"2001-2010"
,"Post 2010"
,"All Vintages")
item37.os.table <- item37.os.table %>% mutate(Housing.Vintage = factor(Housing.Vintage, levels = rowOrder)) %>% arrange(Housing.Vintage)
item37.os.table <- data.frame(item37.os.table)
exportTable(item37.os.table, "SF","Table 44",weighted = TRUE, osIndicator = export.ind, OS = T)
#######################
# Unweighted Analysis
#######################
item37.os.cast <- mean_two_groups_unweighted(CustomerLevelData = item37.os.data
,valueVariable = "Whole.House.UA"
,byVariableRow = "HomeYearBuilt_bins3"
,byVariableColumn = "CK_Building_ID"
,columnAggregate = "Region"
,rowAggregate = "All Vintages")
names(item37.os.cast)
if(os.ind == "scl"){
item37.os.table <- data.frame("Housing.Vintage" = item37.os.cast$HomeYearBuilt_bins3
,"Mean_SCL.GenPop" = item37.os.cast$`Mean_SCL GenPop`
,"SE_SCL.GenPop" = item37.os.cast$`SE_SCL GenPop`
,"n_SCL.GenPop" = item37.os.cast$`n_SCL GenPop`
,"Mean_SCL.LI" = item37.os.cast$`Mean_SCL LI`
,"SE_SCL.LI" = item37.os.cast$`SE_SCL LI`
,"n_SCL.LI" = item37.os.cast$`n_SCL LI`
,"Mean_SCL.EH" = item37.os.cast$`Mean_SCL EH`
,"SE_SCL.EH" = item37.os.cast$`SE_SCL EH`
,"n_SCL.EH" = item37.os.cast$`n_SCL EH`
,"Mean_2017.RBSA.PS" = item37.os.cast$`Mean_2017 RBSA PS`
,"SE_2017.RBSA.PS" = item37.os.cast$`SE_2017 RBSA PS`
,"n_2017.RBSA.PS" = item37.os.cast$`n_2017 RBSA PS`)
}else if(os.ind == "snopud"){
item37.os.table <- data.frame("Housing.Vintage" = item37.os.cast$HomeYearBuilt_bins3
,"Mean_SnoPUD" = item37.os.cast$`Mean_SnoPUD`
,"SE_SnoPUD" = item37.os.cast$`SE_SnoPUD`
,"n_SnoPUD" = item37.os.cast$`n_SnoPUD`
,"Mean_2017.RBSA.PS" = item37.os.cast$`Mean_2017 RBSA PS`
,"SE_2017.RBSA.PS" = item37.os.cast$`SE_2017 RBSA PS`
,"n_2017.RBSA.PS" = item37.os.cast$`n_2017 RBSA PS`
,"Mean_RBSA.NW" = item37.os.cast$`Mean_2017 RBSA NW`
,"SE_RBSA.NW" = item37.os.cast$`SE_2017 RBSA NW`
,"n_RBSA.NW" = item37.os.cast$`n_2017 RBSA NW`)
}
levels(item37.os.table$Housing.Vintage)
rowOrder <- c("Pre 1981"
,"1981-1990"
,"1991-2000"
,"2001-2010"
,"Post 2010"
,"All Vintages")
item37.os.table <- item37.os.table %>% mutate(Housing.Vintage = factor(Housing.Vintage, levels = rowOrder)) %>% arrange(Housing.Vintage)
item37.os.table <- data.frame(item37.os.table)
exportTable(item37.os.table, "SF","Table 44",weighted = FALSE, osIndicator = export.ind, OS = T)
|
# Reference for data source (
# @misc{Lichman:2013 ,
# author = "M. Lichman",
# year = "2013",
# title = "{UCI} Machine Learning Repository",
# url = "http://archive.ics.uci.edu/ml",
# institution = "University of California, Irvine, School of Information and Computer Sciences" })
# Decision Trees
# Source of Data Set:- UCI Repository - Wine Quality Data(https://archive.ics.uci.edu/ml/datasets/wine+quality)
# Exploring and preparing the data
# Step 2: Exploring and preparing the data
# Read the csv file into a data frame titled WineData.
WineData <- read.table("winequality-red.csv", sep=";", header=TRUE)
head(WineData)
table(WineData$quality)
# Identify missing values using graphical view. See the Rplot.pdf and red colour stripes indicate the missing values.
library(Amelia)
missmap(WineData, main="Missing Data - Red Wine Quality", col=c("red","grey"), legend=FALSE)
# Data Visualization
# plot histogram of fixed acidity
library(ggplot2)
ggplot(WineData, aes(x = fixed.acidity)) +
geom_histogram(binwidth = 0.1) +
scale_x_continuous(breaks = seq(4, 16, by = 1)) +
ggtitle("Fixed Acidity distribution") +
xlab("Fixed Acidity") +
ylab("Count")
# plot histogram of Volatile Acidity
plot1 <- ggplot(WineData, aes(x = volatile.acidity)) +
geom_histogram(binwidth = 0.02) +
scale_x_continuous(breaks = seq(0, 1.6, by = 0.1)) +
ggtitle("Volatile Acidity distribution") +
xlab("Volatile Acidity") +
ylab("Count")
plot2 <- ggplot(WineData, aes(x = volatile.acidity)) +
geom_histogram(binwidth = 0.02) +
scale_x_log10(breaks = seq(0, 1.6, by = 0.5)) +
ggtitle("Volatile Acidity distribution") +
xlab("log(Volatile Acidity)") +
ylab("Count")
# gridExtra: Miscellaneous Functions for "Grid" Graphics.
library(gridExtra)
grid.arrange(plot1, plot2)
# plot histogram of pH
p1 <- ggplot(WineData, aes(x = pH)) +
geom_histogram(binwidth = 0.02) +
ggtitle("pH distribution") +
xlab("pH") +
ylab("Count")
# plot histogram of Free SO2
p2 <- ggplot(WineData, aes(x = free.sulfur.dioxide)) +
geom_histogram(binwidth = 1) +
ggtitle("Free SO2 distribution") +
xlab("Free SO2") +
ylab("Count")
# plot histogram of Total SO2
p3 <- ggplot(WineData, aes(x = total.sulfur.dioxide)) +
geom_histogram(binwidth = 3) +
ggtitle("Total SO2 distribution") +
xlab("Total SO2") +
ylab("Count")
# plot histogram of Alcohol
p4 <- ggplot(WineData, aes(x = alcohol)) +
geom_histogram(binwidth = 0.1) +
ggtitle("Alcohol distribution") +
xlab("Alcohol") +
ylab("Count")
grid.arrange(p1, p2, p3, p4, ncol = 2)
# plot histogram of Quality
ggplot(WineData, aes(x = quality)) +
geom_histogram(binwidth = 1) +
scale_x_continuous(breaks = seq(3, 8, by = 1)) +
ggtitle("Quality Distributions") +
xlab("Quality") +
ylab("Count")
# Positive correlation of alcohol and quality
ggplot(WineData, aes(x = alcohol)) +
geom_density(aes(fill = "red", color = "red")) +
facet_wrap(~quality) +
theme(legend.position = "none") +
ggtitle("Alcohol VS Quality") +
xlab("Alcohol") +
ylab("Quality")
# Negative correlation of volatile acidity and quality
ggplot(WineData, aes(x = volatile.acidity)) +
geom_density(aes(fill = "red", color = "red")) +
facet_wrap(~quality) +
theme(legend.position = "none") +
ggtitle("Volatile Acidity VS Quality") +
xlab("Volatile Acidity") +
ylab("Quality")
# Positive correlation of Free SO~2~ and Total SO~2~
ggplot(WineData, aes(x = free.sulfur.dioxide, y = total.sulfur.dioxide)) +
geom_jitter(alpha = 1/5) +
ggtitle("Free S02 vs Total SO2") +
xlab("Free SO2") +
ylab("Total SO2")
# residual sugar and quality relationship
ggplot(WineData, aes(x = residual.sugar)) +
geom_density(aes(fill = "red", color = "red")) +
facet_wrap(~quality) +
theme(legend.position = "none") +
ggtitle("Residual Sugar VS Quality") +
xlab("Residual Sugar") +
ylab("Quality")
# Density and Alchol
ggplot(WineData, aes(x = density, y = alcohol)) +
geom_jitter(alpha = 1/2) +
ggtitle("Density VS Alcohol") +
xlab("Density") +
ylab("Alcohol")
# Creating a categorical variable for wine quality
# WineData$quality <- ifelse(WineData$quality == 3, "Lev_Three", ifelse(WineData$quality == 4, "Lev_Four", ifelse(WineData$quality == 5, "Lev_Five", ifelse(WineData$quality == 6, "Lev_Six", ifelse(WineData$quality == 7, "Lev_Seven", ifelse(WineData$quality == 8, "Lev_Eight", "Lev_Nine"))) )))
# WineData$quality <- as.factor(WineData$quality)
# str(WineData)
WineData$quality <- ifelse(WineData$quality < 5, 'bad', ifelse(WineData$quality > 6,'good','normal'))
WineData$quality <- as.factor(WineData$quality)
str(WineData$quality)
# Data preparation - creating random training and test datasets
# Create random sample
# Divide the data into a training set and a test set randomly with ratio 80:20
set.seed(123)
train_sample <- sample(nrow(WineData), 0.8 * nrow(WineData))
WineData_train <- WineData[train_sample, ]
WineData_test <- WineData[-train_sample, ]
# Check whether data set fairly even split
prop.table(table(WineData_train$quality))
prop.table(table(WineData_test$quality))
# Train model
# # C5.0
# # # Training a model on the data
# # # The C5.0 package can be installed via the install.packages("C50") and
# # # loaded with the library(C50) command.
library(C50)
WineData_model <- C5.0(WineData_train[-12], WineData_train$quality)
WineData_model
# See the tree's decisions
summary(WineData_model)
# Evaluating model performance
WineData_predict <- predict(WineData_model, WineData_test)
# Various R Programming Tools for Model Fitting
library(gmodels)
# create a cross tabulation indicating the agreement between the two vectors.
# Specifying prop.chisq = FALSE will remove the unnecessary chi-square
# values from the output.
# Setting the prop.c and prop.r parameters to FALSE removes the column and row percentages
# from the table. The remaining percentage ( prop.t ) indicates the proportion of
# records in the cell out of the total number of records:
CrossTable(WineData_test$quality, WineData_predict, prop.chisq = FALSE, prop.c= FALSE, prop.r = FALSE, dnn = c('Actual quality', 'Predicted quality'))
# Accuracy : Measures of performance
library(caret)
confusionMatrix(WineData_test$quality, WineData_predict)
# Improving model performance
# Boosting the accuracy of decision trees
# Add additional trials parameter indicating the number of
# separate decision trees to use in the boosted team.
WineData_boost10 <- C5.0(WineData_train[-12], WineData_train$quality, trials = 10)
WineData_boost10
# See all 10 trees
summary(WineData_boost10)
WineData_boost10_predict <- predict(WineData_boost10, WineData_test)
CrossTable(WineData_test$quality, WineData_boost10_predict, prop.chisq = FALSE, prop.c = FALSE, prop.r = FALSE, dnn = c('Actual Class', 'Predicted Class'))
confusionMatrix(WineData_test$quality, WineData_boost10_predict)
|
/Decision_Trees_Algorithm/DT_Wine_Quality/DTWineQualityC50.R
|
no_license
|
AIroot/Machine-Learning-Projects-with-R
|
R
| false | false | 6,883 |
r
|
# Reference for data source (
# @misc{Lichman:2013 ,
# author = "M. Lichman",
# year = "2013",
# title = "{UCI} Machine Learning Repository",
# url = "http://archive.ics.uci.edu/ml",
# institution = "University of California, Irvine, School of Information and Computer Sciences" })
# Decision Trees
# Source of Data Set:- UCI Repository - Wine Quality Data(https://archive.ics.uci.edu/ml/datasets/wine+quality)
# Exploring and preparing the data
# Step 2: Exploring and preparing the data
# Read the csv file into a data frame titled WineData.
WineData <- read.table("winequality-red.csv", sep=";", header=TRUE)
head(WineData)
table(WineData$quality)
# Identify missing values using graphical view. See the Rplot.pdf and red colour stripes indicate the missing values.
library(Amelia)
missmap(WineData, main="Missing Data - Red Wine Quality", col=c("red","grey"), legend=FALSE)
# Data Visualization
# plot histogram of fixed acidity
library(ggplot2)
ggplot(WineData, aes(x = fixed.acidity)) +
geom_histogram(binwidth = 0.1) +
scale_x_continuous(breaks = seq(4, 16, by = 1)) +
ggtitle("Fixed Acidity distribution") +
xlab("Fixed Acidity") +
ylab("Count")
# plot histogram of Volatile Acidity
plot1 <- ggplot(WineData, aes(x = volatile.acidity)) +
geom_histogram(binwidth = 0.02) +
scale_x_continuous(breaks = seq(0, 1.6, by = 0.1)) +
ggtitle("Volatile Acidity distribution") +
xlab("Volatile Acidity") +
ylab("Count")
plot2 <- ggplot(WineData, aes(x = volatile.acidity)) +
geom_histogram(binwidth = 0.02) +
scale_x_log10(breaks = seq(0, 1.6, by = 0.5)) +
ggtitle("Volatile Acidity distribution") +
xlab("log(Volatile Acidity)") +
ylab("Count")
# gridExtra: Miscellaneous Functions for "Grid" Graphics.
library(gridExtra)
grid.arrange(plot1, plot2)
# plot histogram of pH
p1 <- ggplot(WineData, aes(x = pH)) +
geom_histogram(binwidth = 0.02) +
ggtitle("pH distribution") +
xlab("pH") +
ylab("Count")
# plot histogram of Free SO2
p2 <- ggplot(WineData, aes(x = free.sulfur.dioxide)) +
geom_histogram(binwidth = 1) +
ggtitle("Free SO2 distribution") +
xlab("Free SO2") +
ylab("Count")
# plot histogram of Total SO2
p3 <- ggplot(WineData, aes(x = total.sulfur.dioxide)) +
geom_histogram(binwidth = 3) +
ggtitle("Total SO2 distribution") +
xlab("Total SO2") +
ylab("Count")
# plot histogram of Alcohol
p4 <- ggplot(WineData, aes(x = alcohol)) +
geom_histogram(binwidth = 0.1) +
ggtitle("Alcohol distribution") +
xlab("Alcohol") +
ylab("Count")
grid.arrange(p1, p2, p3, p4, ncol = 2)
# plot histogram of Quality
ggplot(WineData, aes(x = quality)) +
geom_histogram(binwidth = 1) +
scale_x_continuous(breaks = seq(3, 8, by = 1)) +
ggtitle("Quality Distributions") +
xlab("Quality") +
ylab("Count")
# Positive correlation of alcohol and quality
ggplot(WineData, aes(x = alcohol)) +
geom_density(aes(fill = "red", color = "red")) +
facet_wrap(~quality) +
theme(legend.position = "none") +
ggtitle("Alcohol VS Quality") +
xlab("Alcohol") +
ylab("Quality")
# Negative correlation of volatile acidity and quality
ggplot(WineData, aes(x = volatile.acidity)) +
geom_density(aes(fill = "red", color = "red")) +
facet_wrap(~quality) +
theme(legend.position = "none") +
ggtitle("Volatile Acidity VS Quality") +
xlab("Volatile Acidity") +
ylab("Quality")
# Positive correlation of Free SO~2~ and Total SO~2~
ggplot(WineData, aes(x = free.sulfur.dioxide, y = total.sulfur.dioxide)) +
geom_jitter(alpha = 1/5) +
ggtitle("Free S02 vs Total SO2") +
xlab("Free SO2") +
ylab("Total SO2")
# residual sugar and quality relationship
ggplot(WineData, aes(x = residual.sugar)) +
geom_density(aes(fill = "red", color = "red")) +
facet_wrap(~quality) +
theme(legend.position = "none") +
ggtitle("Residual Sugar VS Quality") +
xlab("Residual Sugar") +
ylab("Quality")
# Density and Alchol
ggplot(WineData, aes(x = density, y = alcohol)) +
geom_jitter(alpha = 1/2) +
ggtitle("Density VS Alcohol") +
xlab("Density") +
ylab("Alcohol")
# Creating a categorical variable for wine quality
# WineData$quality <- ifelse(WineData$quality == 3, "Lev_Three", ifelse(WineData$quality == 4, "Lev_Four", ifelse(WineData$quality == 5, "Lev_Five", ifelse(WineData$quality == 6, "Lev_Six", ifelse(WineData$quality == 7, "Lev_Seven", ifelse(WineData$quality == 8, "Lev_Eight", "Lev_Nine"))) )))
# WineData$quality <- as.factor(WineData$quality)
# str(WineData)
WineData$quality <- ifelse(WineData$quality < 5, 'bad', ifelse(WineData$quality > 6,'good','normal'))
WineData$quality <- as.factor(WineData$quality)
str(WineData$quality)
# Data preparation - creating random training and test datasets
# Create random sample
# Divide the data into a training set and a test set randomly with ratio 80:20
set.seed(123)
train_sample <- sample(nrow(WineData), 0.8 * nrow(WineData))
WineData_train <- WineData[train_sample, ]
WineData_test <- WineData[-train_sample, ]
# Check whether data set fairly even split
prop.table(table(WineData_train$quality))
prop.table(table(WineData_test$quality))
# Train model
# # C5.0
# # # Training a model on the data
# # # The C5.0 package can be installed via the install.packages("C50") and
# # # loaded with the library(C50) command.
library(C50)
WineData_model <- C5.0(WineData_train[-12], WineData_train$quality)
WineData_model
# See the tree's decisions
summary(WineData_model)
# Evaluating model performance
WineData_predict <- predict(WineData_model, WineData_test)
# Various R Programming Tools for Model Fitting
library(gmodels)
# create a cross tabulation indicating the agreement between the two vectors.
# Specifying prop.chisq = FALSE will remove the unnecessary chi-square
# values from the output.
# Setting the prop.c and prop.r parameters to FALSE removes the column and row percentages
# from the table. The remaining percentage ( prop.t ) indicates the proportion of
# records in the cell out of the total number of records:
CrossTable(WineData_test$quality, WineData_predict, prop.chisq = FALSE, prop.c= FALSE, prop.r = FALSE, dnn = c('Actual quality', 'Predicted quality'))
# Accuracy : Measures of performance
library(caret)
confusionMatrix(WineData_test$quality, WineData_predict)
# Improving model performance
# Boosting the accuracy of decision trees
# Add additional trials parameter indicating the number of
# separate decision trees to use in the boosted team.
WineData_boost10 <- C5.0(WineData_train[-12], WineData_train$quality, trials = 10)
WineData_boost10
# See all 10 trees
summary(WineData_boost10)
WineData_boost10_predict <- predict(WineData_boost10, WineData_test)
CrossTable(WineData_test$quality, WineData_boost10_predict, prop.chisq = FALSE, prop.c = FALSE, prop.r = FALSE, dnn = c('Actual Class', 'Predicted Class'))
confusionMatrix(WineData_test$quality, WineData_boost10_predict)
|
context("Testing realtime functions")
test_that("realtime_ws returns the correct data header", {
skip_on_cran()
skip_on_travis()
token_out <- token_ws()
ws_test <- realtime_ws(station_number = "08MF005",
parameters = c(46), ## Water level and temperature
start_date = Sys.Date(),
end_date = Sys.Date(),
token = token_out)
expect_identical(colnames(ws_test),
c("STATION_NUMBER", "Date", "Name_En", "Value", "Unit", "Grade",
"Symbol", "Approval", "Parameter", "Code"))
## Turned #42 into a test
expect_is(ws_test$Value, "numeric")
})
test_that("realtime_dd returns the correct data header", {
skip_on_cran()
expect_identical(
colnames(realtime_dd(station_number = "08MF005", prov_terr_state_loc = "BC")),
c("STATION_NUMBER", "PROV_TERR_STATE_LOC", "Date", "Parameter", "Value", "Grade", "Symbol", "Code")
)
})
test_that("realtime_dd can download stations from multiple provinces using prov_terr_state_loc", {
skip_on_cran()
expect_silent(realtime_dd(prov_terr_state_loc = c("QC", "PE")))
})
test_that("realtime_dd can download stations from multiple provinces using station_number", {
skip_on_cran()
expect_error(realtime_dd(station_number = c("01CD005", "08MF005")), regexp = NA)
})
test_that("When station_number is ALL there is an error", {
skip_on_cran()
expect_error(realtime_dd(station_number = "ALL"))
})
|
/tests/testthat/test_download_realtime.R
|
permissive
|
jongoetz/tidyhydat
|
R
| false | false | 1,536 |
r
|
context("Testing realtime functions")
test_that("realtime_ws returns the correct data header", {
skip_on_cran()
skip_on_travis()
token_out <- token_ws()
ws_test <- realtime_ws(station_number = "08MF005",
parameters = c(46), ## Water level and temperature
start_date = Sys.Date(),
end_date = Sys.Date(),
token = token_out)
expect_identical(colnames(ws_test),
c("STATION_NUMBER", "Date", "Name_En", "Value", "Unit", "Grade",
"Symbol", "Approval", "Parameter", "Code"))
## Turned #42 into a test
expect_is(ws_test$Value, "numeric")
})
test_that("realtime_dd returns the correct data header", {
skip_on_cran()
expect_identical(
colnames(realtime_dd(station_number = "08MF005", prov_terr_state_loc = "BC")),
c("STATION_NUMBER", "PROV_TERR_STATE_LOC", "Date", "Parameter", "Value", "Grade", "Symbol", "Code")
)
})
test_that("realtime_dd can download stations from multiple provinces using prov_terr_state_loc", {
skip_on_cran()
expect_silent(realtime_dd(prov_terr_state_loc = c("QC", "PE")))
})
test_that("realtime_dd can download stations from multiple provinces using station_number", {
skip_on_cran()
expect_error(realtime_dd(station_number = c("01CD005", "08MF005")), regexp = NA)
})
test_that("When station_number is ALL there is an error", {
skip_on_cran()
expect_error(realtime_dd(station_number = "ALL"))
})
|
#### search between grip and byrd
#load('/Volumes/YANGXIAO/alldata/gripbyrdsync')
setwd('/Users/yangxiao/Dropbox/Research/PaleoSearch backup/alldata')
load('gripbyrdsync')
source('phaseshiftest.R')
source('filterPaleo.R')
source('xyrange.R')
source('interpPaleo.R')
fp=c(1/10000,1/800)
N=50
df=(fp[2]-fp[1])/(N-1)
f1=seq(fp[1],fp[2],length=N)
M=50
fpi2=matrix(nrow=N,ncol=N)
for (i in 1:N) {
f2=seq(f1[i],fp[2],by=df)
n2=length(f2)
for (j in 1:(N+1-i)) {
datafil=bwfilter(data,cut=c(f1[i],f2[j]),type='pass',PLOT=F)
data2=datafil[[1]]
y=fitPhase(data2[[1]]$y,data2[[2]]$y,N=M,PLOT=F)
fpi2[i,(j+i-1)]=y[[2]]*(sin(y[[1]]*pi))^3
}
}
f2=f1
quartz(width=12*1.2,height=12)
filled.contour(f1,f2,fpi2,color=rainbow,ann=T,axes=T)
title(xlab='lower frequency',ylab='higher frequency',main='pi/2 phase shift between GRIP and BYRD (methane sync) data\n Search range: 1/10ky to 1/800/n (50 intervals) with sin^3',cex.lab=1.5)
### output table
p=locator(type='p',pch=20,col='white',cex=1.3)
text(p$x,p$y,labels=as.character(1:length(p$x)),pos=3,col='white',cex=1.3)
n3=length(p$x)
corph=vector('list',length=n3)
for (i in 1:n3) {
f=c(p$x[i],p$y[i])
datafil=bwfilter(data,cut=c(f[1],f[2]),type='pass',PLOT=F)
data2=datafil[[1]]
corph[[i]]=fitPhase(data2[[1]]$y,data2[[2]]$y,N=200,PLOT=F)
}
k=unlist(corph)
k1=matrix(k,ncol=2,byrow=T)
fpi21=k1[,2]*(sin(k1[,1]*pi))^3
k2=cbind(p$x,p$y)
k=data.frame(k2,k1,fpi21)
names(k)=c('bp_low','bp_high','phase shift','maxcorr','fpi2')
#### plot examples
f=c(0.00038,0.00051)
datafil=bwfilter(data,cut=c(f[1],f[2]),type='pass',PLOT=F)
data2=datafil[[1]]
r=fitPhase(data2[[1]]$y,data2[[2]]$y,N=200,PLOT=F)
r1=phshift(data2[[2]]$y,r[[1]])
quartz(width=12,height=6)
plot(y~t,data2[[1]],type='l',col='black',xlab='Year BP',ylab='')
lines(y~t,data2[[2]],col='grey')
lines(data2[[1]]$t,r1,col='red')
title(main=paste('Red is', format(r[[1]]/.5,digits=3) ,'(*pi/2) phase shift of grey, correlation between red and black:',format(r[[2]],digits=3)))
legend('bottomright',bty='n',legend=c('GRIP','BYRD'),col=c('black','grey'),lty=1)
|
/PaleoAnalyze/freqbandsearch.R
|
no_license
|
seanyx/TimeSeries
|
R
| false | false | 2,070 |
r
|
#### search between grip and byrd
#load('/Volumes/YANGXIAO/alldata/gripbyrdsync')
setwd('/Users/yangxiao/Dropbox/Research/PaleoSearch backup/alldata')
load('gripbyrdsync')
source('phaseshiftest.R')
source('filterPaleo.R')
source('xyrange.R')
source('interpPaleo.R')
fp=c(1/10000,1/800)
N=50
df=(fp[2]-fp[1])/(N-1)
f1=seq(fp[1],fp[2],length=N)
M=50
fpi2=matrix(nrow=N,ncol=N)
for (i in 1:N) {
f2=seq(f1[i],fp[2],by=df)
n2=length(f2)
for (j in 1:(N+1-i)) {
datafil=bwfilter(data,cut=c(f1[i],f2[j]),type='pass',PLOT=F)
data2=datafil[[1]]
y=fitPhase(data2[[1]]$y,data2[[2]]$y,N=M,PLOT=F)
fpi2[i,(j+i-1)]=y[[2]]*(sin(y[[1]]*pi))^3
}
}
f2=f1
quartz(width=12*1.2,height=12)
filled.contour(f1,f2,fpi2,color=rainbow,ann=T,axes=T)
title(xlab='lower frequency',ylab='higher frequency',main='pi/2 phase shift between GRIP and BYRD (methane sync) data\n Search range: 1/10ky to 1/800/n (50 intervals) with sin^3',cex.lab=1.5)
### output table
p=locator(type='p',pch=20,col='white',cex=1.3)
text(p$x,p$y,labels=as.character(1:length(p$x)),pos=3,col='white',cex=1.3)
n3=length(p$x)
corph=vector('list',length=n3)
for (i in 1:n3) {
f=c(p$x[i],p$y[i])
datafil=bwfilter(data,cut=c(f[1],f[2]),type='pass',PLOT=F)
data2=datafil[[1]]
corph[[i]]=fitPhase(data2[[1]]$y,data2[[2]]$y,N=200,PLOT=F)
}
k=unlist(corph)
k1=matrix(k,ncol=2,byrow=T)
fpi21=k1[,2]*(sin(k1[,1]*pi))^3
k2=cbind(p$x,p$y)
k=data.frame(k2,k1,fpi21)
names(k)=c('bp_low','bp_high','phase shift','maxcorr','fpi2')
#### plot examples
f=c(0.00038,0.00051)
datafil=bwfilter(data,cut=c(f[1],f[2]),type='pass',PLOT=F)
data2=datafil[[1]]
r=fitPhase(data2[[1]]$y,data2[[2]]$y,N=200,PLOT=F)
r1=phshift(data2[[2]]$y,r[[1]])
quartz(width=12,height=6)
plot(y~t,data2[[1]],type='l',col='black',xlab='Year BP',ylab='')
lines(y~t,data2[[2]],col='grey')
lines(data2[[1]]$t,r1,col='red')
title(main=paste('Red is', format(r[[1]]/.5,digits=3) ,'(*pi/2) phase shift of grey, correlation between red and black:',format(r[[2]],digits=3)))
legend('bottomright',bty='n',legend=c('GRIP','BYRD'),col=c('black','grey'),lty=1)
|
#Note to self: [Rows,Column]
#Reset and read the data
rm(list=ls())
mydata= read.table(file= "clipboard",sep= "\t",header =T) #header row included with data
newdata=mydata
mydata=newdata
##Transformations of the data to make the reisduals better
mydata[,1] = log(mydata[,1], base = 10) #ln of the growth data to make it usable by a linear model
bf = lm(formula = Biofilm ~ Growth + Source, data = mydata)
bf = lm(formula = Biofilm ~ Source + Growth, data = mydata)
bf2 = lm(formula = Biofilm ~ Growth, data = mydata)
bf3 = lm(formula = Biofilm ~ Source, data = mydata)
summary(bf)
summary(bf2)
summary(bf3)
so = lm(formula = Growth ~ Biofilm + Source, data = mydata)
so = lm(formula = Growth ~ Biofilm, data = mydata)
summary(so)
plot(bf$residuals, pch = 16, col = "red")
plot(mydata$Growth, newdata$Biofilm, pch = 16, cex = 1, col = "blue")
library(ggplot2)
p = ggplot(data = mydata, mapping = aes(x = Growth, y = Biofilm))+geom_point(aes(colour = factor(Source)),cex=2)
p+theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.background = element_blank(),
axis.line = element_line(colour = "black", size=1),
axis.title.x=element_blank(),
#axis.text.x=element_blank()),
axis.ticks=element_line(colour = "black", size =1),
axis.ticks.length = unit(5,"points") ,
axis.title.y = element_blank(),
legend.position = "none"
) +geom_smooth(method=glm, color='#2C3E50',linetype="dashed",se=T)
##Drawing a line baesed on pregenerated linear model
p+geom_abline(slope = bf2$coefficients[2], intercept = bf2$coefficients[1],col= "black",cex=1,linetype="dashed")
|
/Biofilm/Linear model building.R
|
no_license
|
nunngm/StatAnalysis
|
R
| false | false | 1,662 |
r
|
#Note to self: [Rows,Column]
#Reset and read the data
rm(list=ls())
mydata= read.table(file= "clipboard",sep= "\t",header =T) #header row included with data
newdata=mydata
mydata=newdata
##Transformations of the data to make the reisduals better
mydata[,1] = log(mydata[,1], base = 10) #ln of the growth data to make it usable by a linear model
bf = lm(formula = Biofilm ~ Growth + Source, data = mydata)
bf = lm(formula = Biofilm ~ Source + Growth, data = mydata)
bf2 = lm(formula = Biofilm ~ Growth, data = mydata)
bf3 = lm(formula = Biofilm ~ Source, data = mydata)
summary(bf)
summary(bf2)
summary(bf3)
so = lm(formula = Growth ~ Biofilm + Source, data = mydata)
so = lm(formula = Growth ~ Biofilm, data = mydata)
summary(so)
plot(bf$residuals, pch = 16, col = "red")
plot(mydata$Growth, newdata$Biofilm, pch = 16, cex = 1, col = "blue")
library(ggplot2)
p = ggplot(data = mydata, mapping = aes(x = Growth, y = Biofilm))+geom_point(aes(colour = factor(Source)),cex=2)
p+theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.background = element_blank(),
axis.line = element_line(colour = "black", size=1),
axis.title.x=element_blank(),
#axis.text.x=element_blank()),
axis.ticks=element_line(colour = "black", size =1),
axis.ticks.length = unit(5,"points") ,
axis.title.y = element_blank(),
legend.position = "none"
) +geom_smooth(method=glm, color='#2C3E50',linetype="dashed",se=T)
##Drawing a line baesed on pregenerated linear model
p+geom_abline(slope = bf2$coefficients[2], intercept = bf2$coefficients[1],col= "black",cex=1,linetype="dashed")
|
uploadDir <- system.file("extdata/bcbio", package = "bcbioRNASeq")
lfc <- 0.25
plotlist <- c("tree_row", "tree_col", "kmeans", "gtable")
|
/tests/testthat/helper_02_globals.R
|
permissive
|
WeiSong-bio/roryk-bcbioRNASeq
|
R
| false | false | 137 |
r
|
uploadDir <- system.file("extdata/bcbio", package = "bcbioRNASeq")
lfc <- 0.25
plotlist <- c("tree_row", "tree_col", "kmeans", "gtable")
|
library(plotrix); library(lattice); library(pso); library(car)
# George Kachergis July 7, 2012
# fit associative model and Fazly model using MLE to the
# frequency/CD experiments, subject-by-subject
make_exp3_ind_graphs <- function() {
source("paper_graphs_ggplot2.R")
load("humans/freqCD_data_exps8_9_10_12.Rdata") # raw 159 Ss
load("freqCD_tilles_model_MLE_PSOfits.RData")
freq_graph(raw, tilfreq, "tilles")
}
make_exp3_group_graphs <- function() {
source("paper_graphs_ggplot2.R")
load("humans/freqCD_data_exps8_9_10_12.Rdata") # raw 159 Ss
load("freq_tilles_model_MLE_fitPSO_group.RData")
freq_graph_group(raw, tilfreq, "tilles_group")
}
#assfreq <- fit_by_subj("model", PSO=F)
#save(assfreq, file="freqCD_assoc_model_MLE_fits.RData") # 32.96
# Exp 3: CD and Frequency
#best <- fit(c(.2146, 6.931, .982)) # Grow M SSE=.064 1/100 startval
binomial_likelihood <- function(cdat, M) {
est_prob = diag(M) / rowSums(M) # prob correct
lik = 0
for(i in 1:dim(M)[1]) {
resp = cdat[which(cdat$CorrectAns==i),]$Response
if(resp==i) {
lik = lik + log(est_prob[i])
} else {
lik = lik + log(1-est_prob[i])
}
}
return(lik) # 18*log(1/18) = -52.02669 for guessing
}
multinomial_likelihood <- function(cdat, M) {
M = M / rowSums(M) # p(o|w)
lik = 0
for(i in 1:dim(M)[1]) {
resp = cdat[which(cdat$CorrectAns==i),]$Response
lik = lik + log(M[i,resp])
}
return(lik) # 18*log(1/18) = -52.02669 for guessing
}
fit_subj <- function(par, orders, sdat) {
# ords is all of the orders
# sdat has order file names and responses
tot_lik = 0
for(f in unique(sdat$file)) {
o <- orders[[f]] # $trials ?
#print(paste("Sub",sdat$Subject[1]," File",f))
M <- model(par, ord=o)
# need: for each word (CorrectAns), what object they chose (Response)
tot_lik = tot_lik + binomial_likelihood(subset(sdat, file==f), M)
}
mlik = tot_lik / length(unique(sdat$file))
#print(par)
#print(mlik)
return(-mlik)
}
fit_all <- function(par, orders, dat) {
# ords is all of the orders
# dat has order file names and responses
tot_lik = 0
for(s in unique(dat$Subject)) {
sdat <- subset(dat, Subject==s)
for(f in unique(sdat$file)) {
#print(paste(s,f))
o <- orders[[f]]
#print(paste("Sub",sdat$Subject[1]," File",f))
M <- model(par, ord=o)
# need: for each word (CorrectAns), what object they chose (Response)
tot_lik = tot_lik + binomial_likelihood(subset(sdat, file==f), M)
}
}
mlik = tot_lik #/ length(unique(dat$Subject))
#print(par)
#print(mlik)
return(-mlik)
}
fit_all_MLE <- function(modeln, PSO=FALSE) {
source(paste(modeln,".R",sep=""))
load("master_orders.RData") # orders
load("humans/freqCD_data_exps8_9_10_12.Rdata") # raw 159 Ss
agg <- subset(raw, Subject!=1224) # block2_369-3x3hiCD somehow had 24 test trials??
agg <- subset(agg, Exp==12) #
agg <- with(agg, aggregate(Correct, list(CorrectAns=CorrectAns, Response=Response, Subject=Subject, file=file), mean))
controls = list(maxit=200, max.restart=2, reltol=.01)
#agg <- with(agg, aggregate(Correct, list(Subject=Subject, file=file), mean))
agg$X <- NA
agg$B <- NA
agg$A <- NA
parcols = c("X","B","A")
initp = c(.6, .8, .85)
lowerp = c(0,0,.27) # parm 3 > .268 for "block2_369-3x3hiCD" ...see tilles.R
upperp = c(1,1,1)
if(PSO) {
best <- psoptim(initp, fit_all, orders=orders, dat=agg, lower=lowerp, upper=upperp, control=controls)
# control=list(maxit=500, reltol=1e-4) # something like "max.reps=2", too?
} else {
best <- optim(initp, fit_all, orders=orders, dat=agg, lower=lowerp, upper=upperp, method="L-BFGS-B") # , control=list(parscale=c(10,10,10), maxit=20)
}
ret <- list()
for(f in unique(agg$file)) {
mp = model(best$par, orders[[f]])
ret[[f]] <- diag(mp) / rowSums(mp)
}
ret$par = best$par
ret$ML = best$value
return(ret)
}
fit_by_subj <- function(modeln, PSO=FALSE) {
source(paste(modeln,".R",sep=""))
load("master_orders.RData") # orders
load("humans/freqCD_data_exps8_9_10_12.Rdata") # raw 159 Ss
agg <- subset(raw, Subject!=1224) # block2_369-3x3hiCD somehow had 24 test trials??
agg <- subset(agg, Exp==12) #
agg <- with(agg, aggregate(Correct, list(CorrectAns=CorrectAns, Response=Response, Subject=Subject, file=file), mean))
controls = list(maxit=100, max.restart=2) # reltol=.1
#agg <- with(agg, aggregate(Correct, list(Subject=Subject, file=file), mean))
agg$X <- NA
agg$B <- NA
agg$A <- NA
parcols = c("X","B","A")
initp = c(.6, .8, .85)
lowerp = c(0.0001,0.0001,0.0001)
upperp = c(1,1,1)
agg$ML <- NA
agg$Model <- NA
for(s in unique(agg$Subject)) {
sdat <- subset(agg, Subject==s)
if(PSO) {
best <- psoptim(initp, fit_subj, orders=orders, sdat=sdat, lower=lowerp, upper=upperp, control=controls)
} else {
best <- optim(initp, fit_subj, orders=orders, sdat=sdat, lower=lowerp, upper=upperp, method="L-BFGS-B") # , control=list(parscale=c(10,10,10), maxit=20)
}
for(f in unique(sdat$file)) {
rows <- which(agg$Subject==s & agg$file==f)
mp = model(best$par, orders[[f]])
agg[rows,parcols] = matrix(rep(best$par, length(rows)), nrow=length(rows), byrow=T)
agg[rows,]$ML = best$value
agg[rows,]$Model <- diag(mp) / rowSums(mp)
}
print(agg[rows[1],])
}
return(agg)
}
#assfreq <- fit_by_subj("model", PSO=T)
#save(assfreq, file="freqCD_assoc_model_MLE_PSOfits.RData")
#print(paste("assoc PSO:",mean(assfreq$ML))) #
#fazfreq <- fit_by_subj("fazly", PSO=T)
#save(fazfreq, file="freqCD_fazly_model_MLE_PSOfits.RData")
#print(paste("fazly PSO:",mean(fazfreq$ML))) #
#tilfreq <- fit_all_MLE("tilles", PSO=T)
#save(tilfreq, file="freq_tilles_model_MLE_fitPSO_group.RData")
#print(paste("tilles all PSO:",tilfreq$ML))
# "tilles all PSO: 2450.6612888634"
tilfreq <- fit_by_subj("fazly", PSO=T)
save(tilfreq, file="freqCD_tilles_model_MLE_PSOfits.RData")
print(paste("tilles PSO:",mean(tilfreq$ML)))
# "tilles PSO: 12.725226234322"
make_exp3_group_graphs()
|
/main_freq_MLE_tilles.R
|
no_license
|
mcfrank/word_learning_models
|
R
| false | false | 5,913 |
r
|
library(plotrix); library(lattice); library(pso); library(car)
# George Kachergis July 7, 2012
# fit associative model and Fazly model using MLE to the
# frequency/CD experiments, subject-by-subject
make_exp3_ind_graphs <- function() {
source("paper_graphs_ggplot2.R")
load("humans/freqCD_data_exps8_9_10_12.Rdata") # raw 159 Ss
load("freqCD_tilles_model_MLE_PSOfits.RData")
freq_graph(raw, tilfreq, "tilles")
}
make_exp3_group_graphs <- function() {
source("paper_graphs_ggplot2.R")
load("humans/freqCD_data_exps8_9_10_12.Rdata") # raw 159 Ss
load("freq_tilles_model_MLE_fitPSO_group.RData")
freq_graph_group(raw, tilfreq, "tilles_group")
}
#assfreq <- fit_by_subj("model", PSO=F)
#save(assfreq, file="freqCD_assoc_model_MLE_fits.RData") # 32.96
# Exp 3: CD and Frequency
#best <- fit(c(.2146, 6.931, .982)) # Grow M SSE=.064 1/100 startval
binomial_likelihood <- function(cdat, M) {
est_prob = diag(M) / rowSums(M) # prob correct
lik = 0
for(i in 1:dim(M)[1]) {
resp = cdat[which(cdat$CorrectAns==i),]$Response
if(resp==i) {
lik = lik + log(est_prob[i])
} else {
lik = lik + log(1-est_prob[i])
}
}
return(lik) # 18*log(1/18) = -52.02669 for guessing
}
multinomial_likelihood <- function(cdat, M) {
M = M / rowSums(M) # p(o|w)
lik = 0
for(i in 1:dim(M)[1]) {
resp = cdat[which(cdat$CorrectAns==i),]$Response
lik = lik + log(M[i,resp])
}
return(lik) # 18*log(1/18) = -52.02669 for guessing
}
fit_subj <- function(par, orders, sdat) {
# ords is all of the orders
# sdat has order file names and responses
tot_lik = 0
for(f in unique(sdat$file)) {
o <- orders[[f]] # $trials ?
#print(paste("Sub",sdat$Subject[1]," File",f))
M <- model(par, ord=o)
# need: for each word (CorrectAns), what object they chose (Response)
tot_lik = tot_lik + binomial_likelihood(subset(sdat, file==f), M)
}
mlik = tot_lik / length(unique(sdat$file))
#print(par)
#print(mlik)
return(-mlik)
}
fit_all <- function(par, orders, dat) {
# ords is all of the orders
# dat has order file names and responses
tot_lik = 0
for(s in unique(dat$Subject)) {
sdat <- subset(dat, Subject==s)
for(f in unique(sdat$file)) {
#print(paste(s,f))
o <- orders[[f]]
#print(paste("Sub",sdat$Subject[1]," File",f))
M <- model(par, ord=o)
# need: for each word (CorrectAns), what object they chose (Response)
tot_lik = tot_lik + binomial_likelihood(subset(sdat, file==f), M)
}
}
mlik = tot_lik #/ length(unique(dat$Subject))
#print(par)
#print(mlik)
return(-mlik)
}
fit_all_MLE <- function(modeln, PSO=FALSE) {
source(paste(modeln,".R",sep=""))
load("master_orders.RData") # orders
load("humans/freqCD_data_exps8_9_10_12.Rdata") # raw 159 Ss
agg <- subset(raw, Subject!=1224) # block2_369-3x3hiCD somehow had 24 test trials??
agg <- subset(agg, Exp==12) #
agg <- with(agg, aggregate(Correct, list(CorrectAns=CorrectAns, Response=Response, Subject=Subject, file=file), mean))
controls = list(maxit=200, max.restart=2, reltol=.01)
#agg <- with(agg, aggregate(Correct, list(Subject=Subject, file=file), mean))
agg$X <- NA
agg$B <- NA
agg$A <- NA
parcols = c("X","B","A")
initp = c(.6, .8, .85)
lowerp = c(0,0,.27) # parm 3 > .268 for "block2_369-3x3hiCD" ...see tilles.R
upperp = c(1,1,1)
if(PSO) {
best <- psoptim(initp, fit_all, orders=orders, dat=agg, lower=lowerp, upper=upperp, control=controls)
# control=list(maxit=500, reltol=1e-4) # something like "max.reps=2", too?
} else {
best <- optim(initp, fit_all, orders=orders, dat=agg, lower=lowerp, upper=upperp, method="L-BFGS-B") # , control=list(parscale=c(10,10,10), maxit=20)
}
ret <- list()
for(f in unique(agg$file)) {
mp = model(best$par, orders[[f]])
ret[[f]] <- diag(mp) / rowSums(mp)
}
ret$par = best$par
ret$ML = best$value
return(ret)
}
fit_by_subj <- function(modeln, PSO=FALSE) {
source(paste(modeln,".R",sep=""))
load("master_orders.RData") # orders
load("humans/freqCD_data_exps8_9_10_12.Rdata") # raw 159 Ss
agg <- subset(raw, Subject!=1224) # block2_369-3x3hiCD somehow had 24 test trials??
agg <- subset(agg, Exp==12) #
agg <- with(agg, aggregate(Correct, list(CorrectAns=CorrectAns, Response=Response, Subject=Subject, file=file), mean))
controls = list(maxit=100, max.restart=2) # reltol=.1
#agg <- with(agg, aggregate(Correct, list(Subject=Subject, file=file), mean))
agg$X <- NA
agg$B <- NA
agg$A <- NA
parcols = c("X","B","A")
initp = c(.6, .8, .85)
lowerp = c(0.0001,0.0001,0.0001)
upperp = c(1,1,1)
agg$ML <- NA
agg$Model <- NA
for(s in unique(agg$Subject)) {
sdat <- subset(agg, Subject==s)
if(PSO) {
best <- psoptim(initp, fit_subj, orders=orders, sdat=sdat, lower=lowerp, upper=upperp, control=controls)
} else {
best <- optim(initp, fit_subj, orders=orders, sdat=sdat, lower=lowerp, upper=upperp, method="L-BFGS-B") # , control=list(parscale=c(10,10,10), maxit=20)
}
for(f in unique(sdat$file)) {
rows <- which(agg$Subject==s & agg$file==f)
mp = model(best$par, orders[[f]])
agg[rows,parcols] = matrix(rep(best$par, length(rows)), nrow=length(rows), byrow=T)
agg[rows,]$ML = best$value
agg[rows,]$Model <- diag(mp) / rowSums(mp)
}
print(agg[rows[1],])
}
return(agg)
}
#assfreq <- fit_by_subj("model", PSO=T)
#save(assfreq, file="freqCD_assoc_model_MLE_PSOfits.RData")
#print(paste("assoc PSO:",mean(assfreq$ML))) #
#fazfreq <- fit_by_subj("fazly", PSO=T)
#save(fazfreq, file="freqCD_fazly_model_MLE_PSOfits.RData")
#print(paste("fazly PSO:",mean(fazfreq$ML))) #
#tilfreq <- fit_all_MLE("tilles", PSO=T)
#save(tilfreq, file="freq_tilles_model_MLE_fitPSO_group.RData")
#print(paste("tilles all PSO:",tilfreq$ML))
# "tilles all PSO: 2450.6612888634"
tilfreq <- fit_by_subj("fazly", PSO=T)
save(tilfreq, file="freqCD_tilles_model_MLE_PSOfits.RData")
print(paste("tilles PSO:",mean(tilfreq$ML)))
# "tilles PSO: 12.725226234322"
make_exp3_group_graphs()
|
lista_de_paquetes <- c("curl","ggplot2", "grid") # Definimos los paquetes que queremos cargar
paquetes_nuevos <- lista_de_paquetes[!(lista_de_paquetes %in% installed.packages()[,"Package"])] # Buscamos los paquetes que no tenemos
if(length(paquetes_nuevos)) install.packages(paquetes_nuevos) # Instalamos los paquetes que no tenemos
# Cargamos las librerías necesarias
library(curl)
library(ggplot2)
library(grid)
# Leemos los datos
datos=read.csv(curl("https://raw.githubusercontent.com/estadisticavlc/Datos/master/Indicadores.csv"),sep=",",dec=",")
datos$VALOR<-as.numeric(as.character(datos$VALOR))
datos$FECHA=as.Date(paste0(as.character(datos$ANO),"-",as.character(datos$MES),"-15"))
# Seleccionamos periodo temporal e indicadores
datos=datos[datos$ANO>=2008,]
datos=datos[datos$codigo%in%c("Ind16","Ind17"),]
datos$INDICADOR=""
datos$INDICADOR[datos$codigo=="Ind16"]="Autobuses EMT"
datos$INDICADOR[datos$codigo=="Ind17"]="Metrovalencia"
# Dibujamos las series temporales
options(scipen=5)
p=ggplot(data=datos, aes(FECHA,VALOR,col=codigo)) +
geom_line(size=1.5,aes(color=INDICADOR)) +
theme_bw() +
labs(title="Evolución mensual de los pasajeros de EMT y Metrovalencia desde 2008",
y="Pasajeros",
x=NULL,
caption="Fuente: Empresa Municipal de Transportes de València / Metrovalencia.\n\nElaboración: Oficina d'Estadística. Ajuntament de València.")+
theme(plot.title = element_text(size=10,hjust = 0.5,face="bold"),
plot.caption = element_text(color = "black",face = "italic", size = 6, hjust=0),
legend.title = element_blank())+
scale_x_date(breaks = as.Date(paste0(2008:2020,"-01-01")),date_labels = "%Y")+
scale_color_manual(values=c("#df4444","#dfc944"))
p
ggsave(filename = paste0("20200524 Grafico visualizar datos temporales.png"), p,
width = 9, height = 5, dpi = 300, units = "in", device='png')
|
/20200524 Grafico visualizar datos temporales.R
|
no_license
|
estadisticavlc/RetoGraficos
|
R
| false | false | 1,930 |
r
|
lista_de_paquetes <- c("curl","ggplot2", "grid") # Definimos los paquetes que queremos cargar
paquetes_nuevos <- lista_de_paquetes[!(lista_de_paquetes %in% installed.packages()[,"Package"])] # Buscamos los paquetes que no tenemos
if(length(paquetes_nuevos)) install.packages(paquetes_nuevos) # Instalamos los paquetes que no tenemos
# Cargamos las librerías necesarias
library(curl)
library(ggplot2)
library(grid)
# Leemos los datos
datos=read.csv(curl("https://raw.githubusercontent.com/estadisticavlc/Datos/master/Indicadores.csv"),sep=",",dec=",")
datos$VALOR<-as.numeric(as.character(datos$VALOR))
datos$FECHA=as.Date(paste0(as.character(datos$ANO),"-",as.character(datos$MES),"-15"))
# Seleccionamos periodo temporal e indicadores
datos=datos[datos$ANO>=2008,]
datos=datos[datos$codigo%in%c("Ind16","Ind17"),]
datos$INDICADOR=""
datos$INDICADOR[datos$codigo=="Ind16"]="Autobuses EMT"
datos$INDICADOR[datos$codigo=="Ind17"]="Metrovalencia"
# Dibujamos las series temporales
options(scipen=5)
p=ggplot(data=datos, aes(FECHA,VALOR,col=codigo)) +
geom_line(size=1.5,aes(color=INDICADOR)) +
theme_bw() +
labs(title="Evolución mensual de los pasajeros de EMT y Metrovalencia desde 2008",
y="Pasajeros",
x=NULL,
caption="Fuente: Empresa Municipal de Transportes de València / Metrovalencia.\n\nElaboración: Oficina d'Estadística. Ajuntament de València.")+
theme(plot.title = element_text(size=10,hjust = 0.5,face="bold"),
plot.caption = element_text(color = "black",face = "italic", size = 6, hjust=0),
legend.title = element_blank())+
scale_x_date(breaks = as.Date(paste0(2008:2020,"-01-01")),date_labels = "%Y")+
scale_color_manual(values=c("#df4444","#dfc944"))
p
ggsave(filename = paste0("20200524 Grafico visualizar datos temporales.png"), p,
width = 9, height = 5, dpi = 300, units = "in", device='png')
|
#' Prices of 50,000 round cut diamonds.
#'
#' A dataset containing the prices and other attributes of almost 54,000
#' diamonds.
#'
#' @format A data frame with 54000 rows and 12 variables:
#' \describe{
#' \item{species}{species name}
#' \item{day}{day of sampling}
#' \item{month}{month of sampling}
#' \item{year}{year of sampling}
#' \item{hour}{hour of sampling}
#' \item{minute}{minute of sampling}
#' \item{second}{second of sampling}
#' \item{t_air}{air temperature at nearest weather station, in degrees Celsius}
#' \item{Lon}{Longitude of sampling point}
#' \item{Lat}{Latitude of sampling point}
#' \item{microhabitat}{microhabitat sampled}
#' \item{temp}{temperature at microhabitat, in degrees Celsius}
#' ...
#' }
"FulanusMicroclimate"
|
/R/FulanusMicroclimate.R
|
no_license
|
gabrielhoc/MapinguariLegacy
|
R
| false | false | 776 |
r
|
#' Prices of 50,000 round cut diamonds.
#'
#' A dataset containing the prices and other attributes of almost 54,000
#' diamonds.
#'
#' @format A data frame with 54000 rows and 12 variables:
#' \describe{
#' \item{species}{species name}
#' \item{day}{day of sampling}
#' \item{month}{month of sampling}
#' \item{year}{year of sampling}
#' \item{hour}{hour of sampling}
#' \item{minute}{minute of sampling}
#' \item{second}{second of sampling}
#' \item{t_air}{air temperature at nearest weather station, in degrees Celsius}
#' \item{Lon}{Longitude of sampling point}
#' \item{Lat}{Latitude of sampling point}
#' \item{microhabitat}{microhabitat sampled}
#' \item{temp}{temperature at microhabitat, in degrees Celsius}
#' ...
#' }
"FulanusMicroclimate"
|
library(RSelenium)
RSelenium::rsDriver()
binman::list_versions("chromedriver")
RSelenium::rsDriver(browser = "chrome", chromever = "75.0.3770.90",port=4568L)
rD=rsDriver(browser = "chrome", chromever = "75.0.3770.90",port=4568L)
remDr=rd$client
remDr$navigate
|
/Web scrapping.R
|
no_license
|
chinmayi15/R-Programming
|
R
| false | false | 262 |
r
|
library(RSelenium)
RSelenium::rsDriver()
binman::list_versions("chromedriver")
RSelenium::rsDriver(browser = "chrome", chromever = "75.0.3770.90",port=4568L)
rD=rsDriver(browser = "chrome", chromever = "75.0.3770.90",port=4568L)
remDr=rd$client
remDr$navigate
|
renv_tests_scope <- function(packages = character(), project = NULL) {
renv_tests_init()
# ensure that attempts to restart are a no-op
options(restart = function(...) TRUE)
# save local repositories
Sys.setenv(RENV_PATHS_LOCAL = file.path(renv_tests_root(), "local"))
# move to own test directory
dir <- project %||% tempfile("renv-test-")
ensure_directory(dir)
dir <- renv_path_normalize(dir, winslash = "/")
owd <- setwd(dir)
# set as active project
Sys.setenv(RENV_PROJECT = dir)
# create empty renv directory
dir.create(file.path(dir, "renv"))
# create file with dependencies
code <- sprintf("library(%s)", packages)
writeLines(code, "dependencies.R")
# use temporary library
lib <- tempfile("renv-library-")
ensure_directory(lib)
libpaths <- .libPaths()
.libPaths(lib)
defer(envir = parent.frame(), {
setwd(owd)
unlink(lib, recursive = TRUE)
.libPaths(libpaths)
})
invisible(dir)
}
renv_tests_root <- function(path = getwd()) {
renv_global("tests.root", renv_tests_root_impl(path))
}
renv_tests_root_impl <- function(path = getwd()) {
# if we're working in an RStudio project, we can cheat
if (exists(".rs.getProjectDirectory")) {
projroot <- get(".rs.getProjectDirectory")
return(file.path(projroot(), "tests/testthat"))
}
# construct set of paths we'll hunt through
slashes <- gregexpr("(?:/|$)", path)[[1]]
parts <- substring(path, 1, slashes - 1)
# begin the search
for (part in rev(parts)) {
# required to find test directory during R CMD check
if (file.exists(file.path(part, "testthat.R")))
return(file.path(part, "testthat"))
# required for other general testing
anchor <- file.path(part, "DESCRIPTION")
if (file.exists(anchor))
return(file.path(part, "tests/testthat"))
}
stop("could not determine root directory for test files")
}
renv_tests_init_workarounds <- function() {
if (renv_platform_macos()) {
if (!nzchar(Sys.getenv("TZ")))
Sys.setenv(TZ = "America/Los_Angeles")
}
}
renv_tests_init_working_dir <- function() {
if (exists(".rs.getProjectDirectory")) {
home <- get(".rs.getProjectDirectory")
setwd(home())
}
}
renv_tests_init_envvars <- function() {
root <- tempfile("renv-root-")
dir.create(root, showWarnings = TRUE, mode = "755")
Sys.setenv(RENV_PATHS_ROOT = root)
}
renv_tests_init_options <- function() {
options(
renv.config.user.library = FALSE,
restart = NULL,
warn = 2
)
}
renv_tests_init_repos <- function(repopath = NULL) {
# find root directory
root <- renv_tests_root()
# generate our dummy repository
repopath <- repopath %||% tempfile("renv-repos-")
contrib <- file.path(repopath, "src/contrib")
ensure_directory(contrib)
# save current directory
owd <- getwd()
on.exit(setwd(owd), add = TRUE)
# copy package stuff to tempdir (because we'll mutate them a bit)
source <- file.path(root, "packages")
target <- tempfile("renv-packages-")
renv_file_copy(source, target)
setwd(target)
# helper function for 'uploading' a package to our test repo
upload <- function(path, root, subdir = FALSE) {
# create package tarball
desc <- renv_description_read(path)
package <- basename(path)
tarball <- sprintf("%s_%s.tar.gz", package, desc$Version)
tar(tarball, package, compression = "gzip")
# copy into repository tree
components <- c(root, if (subdir) package, tarball)
target <- paste(components, collapse = "/")
ensure_parent_directory(target)
renv_file_move(tarball, target)
}
# just in case?
renv_scope_options(renv.config.filebacked.cache = FALSE)
# copy in packages
paths <- list.files(getwd(), full.names = TRUE)
subdirs <- file.path(getRversion(), "Recommended")
for (path in paths) {
# upload the 'regular' package
upload(path, contrib, subdir = FALSE)
# upload a subdir (mocking what R does during upgrades)
upload(path, file.path(contrib, subdirs), subdir = FALSE)
# generate an 'old' version of the packages
descpath <- file.path(path, "DESCRIPTION")
desc <- renv_description_read(descpath)
desc$Version <- "0.0.1"
write.dcf(desc, file = descpath)
# place packages at top level (simulating packages with multiple
# versions at the top level of the repository)
upload(path, contrib, subdir = FALSE)
# generate an 'old' version of the packages
descpath <- file.path(path, "DESCRIPTION")
desc <- renv_description_read(descpath)
desc$Version <- "0.1.0"
write.dcf(desc, file = descpath)
# place these packages into the archive
upload(path, file.path(contrib, "Archive"), subdir = TRUE)
}
# update PACKAGES metadata
tools::write_PACKAGES(
dir = contrib,
subdirs = subdirs,
type = "source",
latestOnly = FALSE
)
# update our repos option
fmt <- if (renv_platform_windows()) "file:///%s" else "file://%s"
repos <- c(CRAN = sprintf(fmt, repopath))
options(
pkgType = "source",
repos = repos,
renv.tests.repos = repos,
renv.tests.repopath = repopath
)
}
renv_tests_init_packages <- function() {
# don't treat warnings as errors in this scope
renv_scope_options(warn = 1)
# find packages to load
packages <- renv_tests_init_packages_find()
# load those packages
envir <- new.env(parent = emptyenv())
renv_tests_init_packages_load(packages, envir)
}
renv_tests_init_packages_find <- function() {
fields <- c("Depends", "Imports", "Suggests", "LinkingTo")
descpath <- system.file("DESCRIPTION", package = "renv")
deps <- renv_dependencies_discover_description(descpath, fields = fields)
deps[["Package"]]
}
renv_tests_init_packages_load <- function(packages, envir) {
for (package in packages) {
tryCatch(
renv_tests_init_packages_load_impl(package, envir),
error = warning
)
}
}
renv_tests_init_packages_load_impl <- function(package, envir) {
# skip the 'R' package
if (identical(package, "R"))
return()
# if we've already tried to load this package, skip it
if (visited(package, envir = envir))
return()
# try to load the package
if (!package %in% loadedNamespaces())
loadNamespace(package)
# try to find this package
pkgpath <- renv_package_find(package)
if (!file.exists(pkgpath))
return()
# try to read the package DESCRIPTION and load its dependencies
descpath <- file.path(pkgpath, "DESCRIPTION")
deps <- renv_dependencies_discover_description(
path = descpath,
fields = c("Depends", "Imports", "LinkingTo")
)
map(
deps$Package,
renv_tests_init_packages_load,
envir = envir
)
}
renv_tests_init_sandbox <- function() {
# eagerly load packages that we'll need during tests
# (as the sandbox will otherwise 'hide' these packages)
testthat <- find.package("testthat")
descpath <- file.path(testthat, "DESCRIPTION")
deps <- renv_dependencies_discover_description(descpath)
for (package in deps$Package)
requireNamespace(package, quietly = TRUE)
# set up a dummy library path
dummy <- tempfile("renv-library-")
dir.create(dummy)
.libPaths(dummy)
# now sandbox the libpaths
renv_sandbox_activate()
}
renv_tests_init_finish <- function() {
# don't perform transactional installs by default for now
# (causes strange CI failures, especially on Windows?)
options(renv.config.install.transactional = FALSE)
# mark tests as running
options(renv.tests.running = TRUE)
}
renv_tests_init <- function() {
if (renv_tests_running())
return()
Sys.unsetenv("RENV_PROFILE")
Sys.unsetenv("RENV_PATHS_LIBRARY")
Sys.unsetenv("RENV_PATHS_LIBRARY_ROOT")
Sys.unsetenv("RENV_CONFIG_CACHE_ENABLED")
Sys.unsetenv("RENV_PYTHON")
Sys.unsetenv("RETICULATE_PYTHON")
Sys.unsetenv("RETICULATE_PYTHON_ENV")
Sys.unsetenv("RETICULATE_PYTHON_FALLBACK")
renv_tests_init_workarounds()
renv_tests_init_working_dir()
renv_tests_init_envvars()
renv_tests_init_options()
renv_tests_init_repos()
renv_tests_init_packages()
renv_tests_init_sandbox()
renv_tests_init_finish()
}
renv_tests_running <- function() {
getOption("renv.tests.running", default = FALSE)
}
renv_tests_verbose <- function() {
# if we're not running tests, mark as true
running <- renv_tests_running()
if (!running)
return(TRUE)
# otherwise, respect option
# (we might set this to FALSE to silence output from expected errors)
getOption("renv.tests.verbose", default = TRUE)
}
renv_test_code <- function(code, fileext = ".R") {
file <- tempfile("renv-code-", fileext = fileext)
writeLines(deparse(substitute(code)), con = file)
file
}
renv_test_retrieve <- function(record) {
renv_scope_error_handler()
# avoid using cache
renv_scope_envvars(RENV_PATHS_CACHE = tempfile())
# construct records
package <- record$Package
records <- list(record)
names(records) <- package
# prepare dummy library
templib <- renv_tempfile_path("renv-library-")
ensure_directory(templib)
renv_scope_libpaths(c(templib, .libPaths()))
# attempt a restore into that library
renv_scope_restore(
project = getwd(),
library = templib,
records = records,
packages = package,
recursive = FALSE
)
records <- renv_retrieve(record$Package)
renv_install(records)
descpath <- file.path(templib, package)
if (!file.exists(descpath))
stopf("failed to retrieve package '%s'", package)
desc <- renv_description_read(descpath)
fields <- grep("^Remote", names(record), value = TRUE)
testthat::expect_identical(
as.list(desc[fields]),
as.list(record[fields])
)
}
renv_tests_diagnostics <- function() {
# print library paths
renv_pretty_print(
paste("-", .libPaths()),
"The following R libraries are set:",
wrap = FALSE
)
# print repositories
repos <- getOption("repos")
renv_pretty_print(
paste(names(repos), repos, sep = ": "),
"The following repositories are set:",
wrap = FALSE
)
# print renv root
renv_pretty_print(
paste("-", paths$root()),
"The following renv root directory is being used:",
wrap = FALSE
)
# print cache root
renv_pretty_print(
paste("-", paths$cache()),
"The following renv cache directory is being used:",
wrap = FALSE
)
writeLines("The following packages are available in the test repositories:")
dbs <-
renv_available_packages(type = "source", quiet = TRUE) %>%
map(function(db) {
rownames(db) <- NULL
db[c("Package", "Version", "Path")]
})
print(dbs)
path <- Sys.getenv("PATH")
splat <- strsplit(path, .Platform$path.sep, fixed = TRUE)[[1]]
renv_pretty_print(
paste("-", splat),
"The following PATH is set:",
wrap = FALSE
)
envvars <- c(
grep("^_R_", names(Sys.getenv()), value = TRUE),
"HOME",
"R_ARCH", "R_HOME",
"R_LIBS", "R_LIBS_SITE", "R_LIBS_USER", "R_USER",
"R_ZIPCMD",
"TAR", "TEMP", "TMP", "TMPDIR"
)
keys <- format(envvars)
vals <- Sys.getenv(envvars, unset = "<NA>")
vals[vals != "<NA>"] <- shQuote(vals[vals != "<NA>"], type = "cmd")
renv_pretty_print(
paste(keys, vals, sep = " : "),
"The following environment variables of interest are set:",
wrap = FALSE
)
}
renv_tests_report <- function(test, elapsed, expectations) {
# figure out overall test result
status <- "PASS"
for (expectation in expectations) {
errors <- c("expectation_error", "expectation_failure")
if (inherits(expectation, errors)) {
status <- "FAIL"
break
}
if (inherits(expectation, "expectation_skip")) {
status <- "SKIP"
break
}
}
# get console width
width <- max(getOption("width"), 78L)
# write out text with line
left <- trunc(test, width - 23L)
# figure out how long tests took to run
time <- if (elapsed < 0.1)
"<0.1s"
else
format(renv_difftime_format_short(elapsed), width = 5L, justify = "right")
# write formatted
fmt <- "[%s / %s]"
right <- sprintf(fmt, status, time)
# fill space between with dots
dots <- rep.int(".", max(0L, width - nchar(left) - nchar(right) - 4L))
all <- paste(left, paste(dots, collapse = ""), right)
# write it out
cli::cat_bullet(all)
}
renv_tests_path <- function(path) {
root <- renv_tests_root()
file.path(root, path)
}
renv_tests_supported <- function() {
# supported when running locally + on CI
for (envvar in c("NOT_CRAN", "CI"))
if (!is.na(Sys.getenv(envvar, unset = NA)))
return(TRUE)
# disabled on older macOS releases (credentials fails to load)
if (renv_platform_macos() && getRversion() < "4.0.0")
return(FALSE)
# disabled on Windows
if (renv_platform_windows())
return(FALSE)
# true otherwise
TRUE
}
|
/R/tests.R
|
permissive
|
mkyriak/renv
|
R
| false | false | 12,816 |
r
|
renv_tests_scope <- function(packages = character(), project = NULL) {
renv_tests_init()
# ensure that attempts to restart are a no-op
options(restart = function(...) TRUE)
# save local repositories
Sys.setenv(RENV_PATHS_LOCAL = file.path(renv_tests_root(), "local"))
# move to own test directory
dir <- project %||% tempfile("renv-test-")
ensure_directory(dir)
dir <- renv_path_normalize(dir, winslash = "/")
owd <- setwd(dir)
# set as active project
Sys.setenv(RENV_PROJECT = dir)
# create empty renv directory
dir.create(file.path(dir, "renv"))
# create file with dependencies
code <- sprintf("library(%s)", packages)
writeLines(code, "dependencies.R")
# use temporary library
lib <- tempfile("renv-library-")
ensure_directory(lib)
libpaths <- .libPaths()
.libPaths(lib)
defer(envir = parent.frame(), {
setwd(owd)
unlink(lib, recursive = TRUE)
.libPaths(libpaths)
})
invisible(dir)
}
renv_tests_root <- function(path = getwd()) {
renv_global("tests.root", renv_tests_root_impl(path))
}
renv_tests_root_impl <- function(path = getwd()) {
# if we're working in an RStudio project, we can cheat
if (exists(".rs.getProjectDirectory")) {
projroot <- get(".rs.getProjectDirectory")
return(file.path(projroot(), "tests/testthat"))
}
# construct set of paths we'll hunt through
slashes <- gregexpr("(?:/|$)", path)[[1]]
parts <- substring(path, 1, slashes - 1)
# begin the search
for (part in rev(parts)) {
# required to find test directory during R CMD check
if (file.exists(file.path(part, "testthat.R")))
return(file.path(part, "testthat"))
# required for other general testing
anchor <- file.path(part, "DESCRIPTION")
if (file.exists(anchor))
return(file.path(part, "tests/testthat"))
}
stop("could not determine root directory for test files")
}
renv_tests_init_workarounds <- function() {
if (renv_platform_macos()) {
if (!nzchar(Sys.getenv("TZ")))
Sys.setenv(TZ = "America/Los_Angeles")
}
}
renv_tests_init_working_dir <- function() {
if (exists(".rs.getProjectDirectory")) {
home <- get(".rs.getProjectDirectory")
setwd(home())
}
}
renv_tests_init_envvars <- function() {
root <- tempfile("renv-root-")
dir.create(root, showWarnings = TRUE, mode = "755")
Sys.setenv(RENV_PATHS_ROOT = root)
}
renv_tests_init_options <- function() {
options(
renv.config.user.library = FALSE,
restart = NULL,
warn = 2
)
}
renv_tests_init_repos <- function(repopath = NULL) {
# find root directory
root <- renv_tests_root()
# generate our dummy repository
repopath <- repopath %||% tempfile("renv-repos-")
contrib <- file.path(repopath, "src/contrib")
ensure_directory(contrib)
# save current directory
owd <- getwd()
on.exit(setwd(owd), add = TRUE)
# copy package stuff to tempdir (because we'll mutate them a bit)
source <- file.path(root, "packages")
target <- tempfile("renv-packages-")
renv_file_copy(source, target)
setwd(target)
# helper function for 'uploading' a package to our test repo
upload <- function(path, root, subdir = FALSE) {
# create package tarball
desc <- renv_description_read(path)
package <- basename(path)
tarball <- sprintf("%s_%s.tar.gz", package, desc$Version)
tar(tarball, package, compression = "gzip")
# copy into repository tree
components <- c(root, if (subdir) package, tarball)
target <- paste(components, collapse = "/")
ensure_parent_directory(target)
renv_file_move(tarball, target)
}
# just in case?
renv_scope_options(renv.config.filebacked.cache = FALSE)
# copy in packages
paths <- list.files(getwd(), full.names = TRUE)
subdirs <- file.path(getRversion(), "Recommended")
for (path in paths) {
# upload the 'regular' package
upload(path, contrib, subdir = FALSE)
# upload a subdir (mocking what R does during upgrades)
upload(path, file.path(contrib, subdirs), subdir = FALSE)
# generate an 'old' version of the packages
descpath <- file.path(path, "DESCRIPTION")
desc <- renv_description_read(descpath)
desc$Version <- "0.0.1"
write.dcf(desc, file = descpath)
# place packages at top level (simulating packages with multiple
# versions at the top level of the repository)
upload(path, contrib, subdir = FALSE)
# generate an 'old' version of the packages
descpath <- file.path(path, "DESCRIPTION")
desc <- renv_description_read(descpath)
desc$Version <- "0.1.0"
write.dcf(desc, file = descpath)
# place these packages into the archive
upload(path, file.path(contrib, "Archive"), subdir = TRUE)
}
# update PACKAGES metadata
tools::write_PACKAGES(
dir = contrib,
subdirs = subdirs,
type = "source",
latestOnly = FALSE
)
# update our repos option
fmt <- if (renv_platform_windows()) "file:///%s" else "file://%s"
repos <- c(CRAN = sprintf(fmt, repopath))
options(
pkgType = "source",
repos = repos,
renv.tests.repos = repos,
renv.tests.repopath = repopath
)
}
renv_tests_init_packages <- function() {
# don't treat warnings as errors in this scope
renv_scope_options(warn = 1)
# find packages to load
packages <- renv_tests_init_packages_find()
# load those packages
envir <- new.env(parent = emptyenv())
renv_tests_init_packages_load(packages, envir)
}
renv_tests_init_packages_find <- function() {
fields <- c("Depends", "Imports", "Suggests", "LinkingTo")
descpath <- system.file("DESCRIPTION", package = "renv")
deps <- renv_dependencies_discover_description(descpath, fields = fields)
deps[["Package"]]
}
renv_tests_init_packages_load <- function(packages, envir) {
for (package in packages) {
tryCatch(
renv_tests_init_packages_load_impl(package, envir),
error = warning
)
}
}
renv_tests_init_packages_load_impl <- function(package, envir) {
# skip the 'R' package
if (identical(package, "R"))
return()
# if we've already tried to load this package, skip it
if (visited(package, envir = envir))
return()
# try to load the package
if (!package %in% loadedNamespaces())
loadNamespace(package)
# try to find this package
pkgpath <- renv_package_find(package)
if (!file.exists(pkgpath))
return()
# try to read the package DESCRIPTION and load its dependencies
descpath <- file.path(pkgpath, "DESCRIPTION")
deps <- renv_dependencies_discover_description(
path = descpath,
fields = c("Depends", "Imports", "LinkingTo")
)
map(
deps$Package,
renv_tests_init_packages_load,
envir = envir
)
}
renv_tests_init_sandbox <- function() {
# eagerly load packages that we'll need during tests
# (as the sandbox will otherwise 'hide' these packages)
testthat <- find.package("testthat")
descpath <- file.path(testthat, "DESCRIPTION")
deps <- renv_dependencies_discover_description(descpath)
for (package in deps$Package)
requireNamespace(package, quietly = TRUE)
# set up a dummy library path
dummy <- tempfile("renv-library-")
dir.create(dummy)
.libPaths(dummy)
# now sandbox the libpaths
renv_sandbox_activate()
}
renv_tests_init_finish <- function() {
# don't perform transactional installs by default for now
# (causes strange CI failures, especially on Windows?)
options(renv.config.install.transactional = FALSE)
# mark tests as running
options(renv.tests.running = TRUE)
}
renv_tests_init <- function() {
if (renv_tests_running())
return()
Sys.unsetenv("RENV_PROFILE")
Sys.unsetenv("RENV_PATHS_LIBRARY")
Sys.unsetenv("RENV_PATHS_LIBRARY_ROOT")
Sys.unsetenv("RENV_CONFIG_CACHE_ENABLED")
Sys.unsetenv("RENV_PYTHON")
Sys.unsetenv("RETICULATE_PYTHON")
Sys.unsetenv("RETICULATE_PYTHON_ENV")
Sys.unsetenv("RETICULATE_PYTHON_FALLBACK")
renv_tests_init_workarounds()
renv_tests_init_working_dir()
renv_tests_init_envvars()
renv_tests_init_options()
renv_tests_init_repos()
renv_tests_init_packages()
renv_tests_init_sandbox()
renv_tests_init_finish()
}
renv_tests_running <- function() {
getOption("renv.tests.running", default = FALSE)
}
renv_tests_verbose <- function() {
# if we're not running tests, mark as true
running <- renv_tests_running()
if (!running)
return(TRUE)
# otherwise, respect option
# (we might set this to FALSE to silence output from expected errors)
getOption("renv.tests.verbose", default = TRUE)
}
renv_test_code <- function(code, fileext = ".R") {
file <- tempfile("renv-code-", fileext = fileext)
writeLines(deparse(substitute(code)), con = file)
file
}
renv_test_retrieve <- function(record) {
renv_scope_error_handler()
# avoid using cache
renv_scope_envvars(RENV_PATHS_CACHE = tempfile())
# construct records
package <- record$Package
records <- list(record)
names(records) <- package
# prepare dummy library
templib <- renv_tempfile_path("renv-library-")
ensure_directory(templib)
renv_scope_libpaths(c(templib, .libPaths()))
# attempt a restore into that library
renv_scope_restore(
project = getwd(),
library = templib,
records = records,
packages = package,
recursive = FALSE
)
records <- renv_retrieve(record$Package)
renv_install(records)
descpath <- file.path(templib, package)
if (!file.exists(descpath))
stopf("failed to retrieve package '%s'", package)
desc <- renv_description_read(descpath)
fields <- grep("^Remote", names(record), value = TRUE)
testthat::expect_identical(
as.list(desc[fields]),
as.list(record[fields])
)
}
renv_tests_diagnostics <- function() {
# print library paths
renv_pretty_print(
paste("-", .libPaths()),
"The following R libraries are set:",
wrap = FALSE
)
# print repositories
repos <- getOption("repos")
renv_pretty_print(
paste(names(repos), repos, sep = ": "),
"The following repositories are set:",
wrap = FALSE
)
# print renv root
renv_pretty_print(
paste("-", paths$root()),
"The following renv root directory is being used:",
wrap = FALSE
)
# print cache root
renv_pretty_print(
paste("-", paths$cache()),
"The following renv cache directory is being used:",
wrap = FALSE
)
writeLines("The following packages are available in the test repositories:")
dbs <-
renv_available_packages(type = "source", quiet = TRUE) %>%
map(function(db) {
rownames(db) <- NULL
db[c("Package", "Version", "Path")]
})
print(dbs)
path <- Sys.getenv("PATH")
splat <- strsplit(path, .Platform$path.sep, fixed = TRUE)[[1]]
renv_pretty_print(
paste("-", splat),
"The following PATH is set:",
wrap = FALSE
)
envvars <- c(
grep("^_R_", names(Sys.getenv()), value = TRUE),
"HOME",
"R_ARCH", "R_HOME",
"R_LIBS", "R_LIBS_SITE", "R_LIBS_USER", "R_USER",
"R_ZIPCMD",
"TAR", "TEMP", "TMP", "TMPDIR"
)
keys <- format(envvars)
vals <- Sys.getenv(envvars, unset = "<NA>")
vals[vals != "<NA>"] <- shQuote(vals[vals != "<NA>"], type = "cmd")
renv_pretty_print(
paste(keys, vals, sep = " : "),
"The following environment variables of interest are set:",
wrap = FALSE
)
}
renv_tests_report <- function(test, elapsed, expectations) {
# figure out overall test result
status <- "PASS"
for (expectation in expectations) {
errors <- c("expectation_error", "expectation_failure")
if (inherits(expectation, errors)) {
status <- "FAIL"
break
}
if (inherits(expectation, "expectation_skip")) {
status <- "SKIP"
break
}
}
# get console width
width <- max(getOption("width"), 78L)
# write out text with line
left <- trunc(test, width - 23L)
# figure out how long tests took to run
time <- if (elapsed < 0.1)
"<0.1s"
else
format(renv_difftime_format_short(elapsed), width = 5L, justify = "right")
# write formatted
fmt <- "[%s / %s]"
right <- sprintf(fmt, status, time)
# fill space between with dots
dots <- rep.int(".", max(0L, width - nchar(left) - nchar(right) - 4L))
all <- paste(left, paste(dots, collapse = ""), right)
# write it out
cli::cat_bullet(all)
}
renv_tests_path <- function(path) {
root <- renv_tests_root()
file.path(root, path)
}
renv_tests_supported <- function() {
# supported when running locally + on CI
for (envvar in c("NOT_CRAN", "CI"))
if (!is.na(Sys.getenv(envvar, unset = NA)))
return(TRUE)
# disabled on older macOS releases (credentials fails to load)
if (renv_platform_macos() && getRversion() < "4.0.0")
return(FALSE)
# disabled on Windows
if (renv_platform_windows())
return(FALSE)
# true otherwise
TRUE
}
|
% Generated by roxygen2 (4.0.2): do not edit by hand
\name{simulate_experiment}
\alias{simulate_experiment}
\title{simulate RNA-seq experiment using negative binomial model}
\usage{
simulate_experiment(fasta = NULL, gtf = NULL, seqpath = NULL,
outdir = ".", num_reps = c(10, 10), reads_per_transcript = 300,
size = NULL, fold_changes, paired = TRUE, ...)
}
\arguments{
\item{fasta}{path to FASTA file containing transcripts from which to simulate
reads. See details.}
\item{gtf}{path to GTF file containing transcript structures from which reads
should be simulated. See details.}
\item{seqpath}{path to folder containing one FASTA file (\code{.fa}
extension) for each chromosome in \code{gtf}. See details.}
\item{outdir}{character, path to folder where simulated reads should be
written, with *no* slash at the end. By default, reads are
written to current working directory.}
\item{num_reps}{How many biological replicates should be in each group? The
length \code{num_reps} determines how many groups are in the experiment.
For example, \code{num_reps = c(5,6,5)} specifies a 3-group experiment with
5 samples in group 1, 6 samples in group 2, and 5 samples in group 3.
Defaults to a 2-group experiment with 10 reps per group (i.e.,
\code{c(10,10)}).}
\item{reads_per_transcript}{baseline mean number of reads to simulate
from each transcript. Can be an integer, in which case this many reads
are simulated from each transcript, or an integer vector whose length
matches the number of transcripts in \code{fasta}. Default 300. You can
also leave \code{reads_per_transcript} empty and set \code{meanmodel=TRUE}
to draw baseline mean numbers from a model based on transcript length.}
\item{size}{the negative binomial \code{size} parameter (see
\code{\link{NegBinomial}}) for the number of reads drawn per transcript.
It can be a matrix (where the user can specify the size parameter per
transcript, per group), a vector (where the user can specify the size per
transcript, perhaps relating to reads_per_transcript), or a single number,
specifying the size for all transcripts and groups.
If left NULL, defaults to \code{reads_per_transcript * fold_changes / 3}.
Negative binomial variance is mean + mean^2 / size.}
\item{fold_changes}{Matrix specifying multiplicative fold changes
between groups. There is no default, so you must provide this argument.
In real data sets, lowly-expressed transcripts often show high fold
changes between groups, so this can be kept in mind when setting
\code{fold_changes} and \code{reads_per_transcript}. This argument must
have the same number of columns as there are groups as
specified by \code{num_reps}, and must have the same number of rows as
there are transcripts in \code{fasta}. A fold change of X in matrix entry
i,j means that for replicate j, the baseline mean number of reads
(reads_per_transcript[i]) will be multiplied by X. Note that the
multiplication happens before the negative binomial value
(for the number of reads that *actually will* be
drawn from transcript i, for replicate j) is drawn. This argument is
ignored if \code{length(num_reps)} is 1 (meaning you only have 1 group in
your simulation).}
\item{paired}{If \code{TRUE}, paired-end reads are simulated; else
single-end reads are simulated. Default \code{TRUE}}
\item{...}{any of several other arguments that can be used to add nuance
to the simulation. See details.}
}
\value{
No return, but simulated reads and a simulation info file are written
to \code{outdir}.
}
\description{
create FASTA files containing RNA-seq reads simulated from provided
transcripts, with optional differential expression between two groups
}
\details{
Reads can either be simulated from a FASTA file of transcripts
(provided with the \code{fasta} argument) or from a GTF file plus DNA
sequences (provided with the \code{gtf} and \code{seqpath} arguments).
Simulating from a GTF file and DNA sequences may be a bit slower: it took
about 6 minutes to parse the GTF/sequence files for chromosomes 1-22, X,
and Y in hg19.
Several optional parameters can be passed to this function to adjust the
simulation. The options are:
\itemize{
\item \code{readlen}: read length. Default 100.
\item \code{lib_sizes}: Library size factors for the biological replicates.
\code{lib_sizes} should have length equal to the total number of
replicates in the experiment, i.e., \code{sum(num_reps)}. For each
replicate, once the number of reads to simulate from each transcript for
that replicate is known, all read numbers across all transcripts from that
replicate are multiplied by the corresponding entry in \code{lib_sizes}.
\item \code{distr} One of 'normal', 'empirical', or 'custom', which
specifies the distribution from which to draw RNA fragment lengths. If
'normal', draw fragment lengths from a normal distribution. You can provide
the mean of that normal distribution with \code{fraglen} (defaults to 250)
and the standard deviation of that normal distribution with \code{fragsd}
(defaults to 25). If 'empirical', draw fragment lengths
from a fragment length distribution estimated from a real data set. If
'custom', draw fragment lengths from a custom distribution, which you can
provide as the \code{custdens} argument. \code{custdens} should be a
density fitted using \code{\link{logspline}}.
\item \code{error_model}: The error model can be one of:
\itemize{
\item \code{'uniform'}: errors are distributed uniformly across reads.
You can also provide an \code{'error_rate'} parameter, giving the overall
probability of making a sequencing error at any given nucleotide. This
error rate defaults to 0.005.
\item \code{'illumina4'} or \code{'illumina5'}: Empirical error models.
See \code{?add_platform_error} for more information.
\item \code{'custom'}: A custom error model you've estimated from an
RNA-seq data set using \code{GemErr}. See \code{?add_platform_error}
for more info. You will need to provide both \code{model_path} and
\code{model_prefix} if using a custom error model. \code{model_path} is
the output folder you provided to \code{build_error_model.py}. This path
should contain either two files suffixed _mate1 and _mate2, or a file
suffixed _single. \code{model_prefix} is the 'prefix' argument you
provided to \code{build_error_model.py} and is whatever comes before the
_mate1/_mate2 or _single files in \code{model_path}.
}
\item \code{bias} One of 'none', 'rnaf', or 'cdnaf'. 'none'
represents uniform fragment selection (every possible fragment in a
transcript has equal probability of being in the experiment); 'rnaf'
represents positional bias that arises in protocols using RNA
fragmentation, and 'cdnaf' represents positional bias arising in protocols
that use cDNA fragmentation (Li and Jiang 2012). Using the 'rnaf' model,
coverage is higher in the middle of the transcript and lower at both ends,
and in the 'cdnaf' model, coverage increases toward the 3' end of the
transcript. The probability models used come from Supplementary Figure S3
of Li and Jiang (2012). Defaults to 'none' if you don't provide this.
\item \code{gcbias} list indicating which samples to add GC bias to, and
from which models. Should be the same length as \code{sum(num_reps)};
entries can be either numeric or of class \code{loess}. A numeric entry of
0 indicates no GC bias. Numeric entries 1 through 7 correspond to the
7 empirical GC models that ship with Polyester, estimated from GEUVADIS
HapMap samples NA06985, NA12144, NA12776, NA18858, NA20542, NA20772,
and NA20815, respectively. The code used to derive the empirical GC models
is available at
\url{https://github.com/alyssafrazee/polyester/blob/master/make_gc_bias.R}.
A loess entry should be a loess prediction model
that takes a GC content percent value (between 0 and 1) a transcript's
deviation from overall mean read count based on that GC value. Counts for
each replicate will be adjusted based on the GC bias model specified for
it. Numeric and loess entries can be mixed. By default, no bias is
included.
\item \code{meanmodel}: set to TRUE if you'd like to set
\code{reads_per_transcripts} as a function of transcript length. We
fit a linear model regressing transcript abundance on transcript length,
and setting \code{meanmodel=TRUE} means we will use transcript lengths
to draw transcript abundance based on that linear model. You can see our
modeling code at \url{http://htmlpreview.github.io/?https://github.com/alyssafrazee/polyester_code/blob/master/length_simulation.html}
\item \code{write_info}: set to FALSE if you do not want files of
simulation information written to disk. By default, transcript fold
changes and expression status & replicate library sizes and group
identifiers are written to \code{outdir}.
\item \code{seed}: specify a seed (e.g. \code{seed=142} or some other
integer) to set before randomly drawing read numbers, for reproducibility.
\item \code{transcriptid}: optional vector of transcript IDs to be written
into \code{sim_info.txt} and used as transcript identifiers in the output
fasta files. Defaults to \code{names(readDNAStringSet(fasta))}. This
option is useful if default names are very long or contain special
characters.
\item You can also include other parameters to pass to
\code{\link{seq_gtf}} if you're simulating from a GTF file.
}
}
\examples{
\donttest{
## simulate a few reads from chromosome 22
fastapath = system.file("extdata", "chr22.fa", package="polyester")
numtx = count_transcripts(fastapath)
set.seed(4)
fold_changes = sample(c(0.5, 1, 2), size=numtx,
prob=c(0.05, 0.9, 0.05), replace=TRUE)
library(Biostrings)
# remove quotes from transcript IDs:
tNames = gsub("'", "", names(readDNAStringSet(fastapath)))
simulate_experiment(fastapath, reads_per_transcript=10,
fold_changes=fold_changes, outdir='simulated_reads',
transcriptid=tNames, seed=12)
}
}
\references{
't Hoen PA, et al (2013): Reproducibility of high-throughput mRNA and
small RNA sequencing across laboratories. Nature Biotechnology 31(11):
1015-1022.
Li W and Jiang T (2012): Transcriptome assembly and isoform expression
level estimation from biased RNA-Seq reads. Bioinformatics 28(22):
2914-2921.
McElroy KE, Luciani F and Thomas T (2012): GemSIM: general,
error-model based simulator of next-generation sequencing data. BMC
Genomics 13(1), 74.
}
|
/man/simulate_experiment.Rd
|
no_license
|
jwcasement/polyester
|
R
| false | false | 10,532 |
rd
|
% Generated by roxygen2 (4.0.2): do not edit by hand
\name{simulate_experiment}
\alias{simulate_experiment}
\title{simulate RNA-seq experiment using negative binomial model}
\usage{
simulate_experiment(fasta = NULL, gtf = NULL, seqpath = NULL,
outdir = ".", num_reps = c(10, 10), reads_per_transcript = 300,
size = NULL, fold_changes, paired = TRUE, ...)
}
\arguments{
\item{fasta}{path to FASTA file containing transcripts from which to simulate
reads. See details.}
\item{gtf}{path to GTF file containing transcript structures from which reads
should be simulated. See details.}
\item{seqpath}{path to folder containing one FASTA file (\code{.fa}
extension) for each chromosome in \code{gtf}. See details.}
\item{outdir}{character, path to folder where simulated reads should be
written, with *no* slash at the end. By default, reads are
written to current working directory.}
\item{num_reps}{How many biological replicates should be in each group? The
length \code{num_reps} determines how many groups are in the experiment.
For example, \code{num_reps = c(5,6,5)} specifies a 3-group experiment with
5 samples in group 1, 6 samples in group 2, and 5 samples in group 3.
Defaults to a 2-group experiment with 10 reps per group (i.e.,
\code{c(10,10)}).}
\item{reads_per_transcript}{baseline mean number of reads to simulate
from each transcript. Can be an integer, in which case this many reads
are simulated from each transcript, or an integer vector whose length
matches the number of transcripts in \code{fasta}. Default 300. You can
also leave \code{reads_per_transcript} empty and set \code{meanmodel=TRUE}
to draw baseline mean numbers from a model based on transcript length.}
\item{size}{the negative binomial \code{size} parameter (see
\code{\link{NegBinomial}}) for the number of reads drawn per transcript.
It can be a matrix (where the user can specify the size parameter per
transcript, per group), a vector (where the user can specify the size per
transcript, perhaps relating to reads_per_transcript), or a single number,
specifying the size for all transcripts and groups.
If left NULL, defaults to \code{reads_per_transcript * fold_changes / 3}.
Negative binomial variance is mean + mean^2 / size.}
\item{fold_changes}{Matrix specifying multiplicative fold changes
between groups. There is no default, so you must provide this argument.
In real data sets, lowly-expressed transcripts often show high fold
changes between groups, so this can be kept in mind when setting
\code{fold_changes} and \code{reads_per_transcript}. This argument must
have the same number of columns as there are groups as
specified by \code{num_reps}, and must have the same number of rows as
there are transcripts in \code{fasta}. A fold change of X in matrix entry
i,j means that for replicate j, the baseline mean number of reads
(reads_per_transcript[i]) will be multiplied by X. Note that the
multiplication happens before the negative binomial value
(for the number of reads that *actually will* be
drawn from transcript i, for replicate j) is drawn. This argument is
ignored if \code{length(num_reps)} is 1 (meaning you only have 1 group in
your simulation).}
\item{paired}{If \code{TRUE}, paired-end reads are simulated; else
single-end reads are simulated. Default \code{TRUE}}
\item{...}{any of several other arguments that can be used to add nuance
to the simulation. See details.}
}
\value{
No return, but simulated reads and a simulation info file are written
to \code{outdir}.
}
\description{
create FASTA files containing RNA-seq reads simulated from provided
transcripts, with optional differential expression between two groups
}
\details{
Reads can either be simulated from a FASTA file of transcripts
(provided with the \code{fasta} argument) or from a GTF file plus DNA
sequences (provided with the \code{gtf} and \code{seqpath} arguments).
Simulating from a GTF file and DNA sequences may be a bit slower: it took
about 6 minutes to parse the GTF/sequence files for chromosomes 1-22, X,
and Y in hg19.
Several optional parameters can be passed to this function to adjust the
simulation. The options are:
\itemize{
\item \code{readlen}: read length. Default 100.
\item \code{lib_sizes}: Library size factors for the biological replicates.
\code{lib_sizes} should have length equal to the total number of
replicates in the experiment, i.e., \code{sum(num_reps)}. For each
replicate, once the number of reads to simulate from each transcript for
that replicate is known, all read numbers across all transcripts from that
replicate are multiplied by the corresponding entry in \code{lib_sizes}.
\item \code{distr} One of 'normal', 'empirical', or 'custom', which
specifies the distribution from which to draw RNA fragment lengths. If
'normal', draw fragment lengths from a normal distribution. You can provide
the mean of that normal distribution with \code{fraglen} (defaults to 250)
and the standard deviation of that normal distribution with \code{fragsd}
(defaults to 25). If 'empirical', draw fragment lengths
from a fragment length distribution estimated from a real data set. If
'custom', draw fragment lengths from a custom distribution, which you can
provide as the \code{custdens} argument. \code{custdens} should be a
density fitted using \code{\link{logspline}}.
\item \code{error_model}: The error model can be one of:
\itemize{
\item \code{'uniform'}: errors are distributed uniformly across reads.
You can also provide an \code{'error_rate'} parameter, giving the overall
probability of making a sequencing error at any given nucleotide. This
error rate defaults to 0.005.
\item \code{'illumina4'} or \code{'illumina5'}: Empirical error models.
See \code{?add_platform_error} for more information.
\item \code{'custom'}: A custom error model you've estimated from an
RNA-seq data set using \code{GemErr}. See \code{?add_platform_error}
for more info. You will need to provide both \code{model_path} and
\code{model_prefix} if using a custom error model. \code{model_path} is
the output folder you provided to \code{build_error_model.py}. This path
should contain either two files suffixed _mate1 and _mate2, or a file
suffixed _single. \code{model_prefix} is the 'prefix' argument you
provided to \code{build_error_model.py} and is whatever comes before the
_mate1/_mate2 or _single files in \code{model_path}.
}
\item \code{bias} One of 'none', 'rnaf', or 'cdnaf'. 'none'
represents uniform fragment selection (every possible fragment in a
transcript has equal probability of being in the experiment); 'rnaf'
represents positional bias that arises in protocols using RNA
fragmentation, and 'cdnaf' represents positional bias arising in protocols
that use cDNA fragmentation (Li and Jiang 2012). Using the 'rnaf' model,
coverage is higher in the middle of the transcript and lower at both ends,
and in the 'cdnaf' model, coverage increases toward the 3' end of the
transcript. The probability models used come from Supplementary Figure S3
of Li and Jiang (2012). Defaults to 'none' if you don't provide this.
\item \code{gcbias} list indicating which samples to add GC bias to, and
from which models. Should be the same length as \code{sum(num_reps)};
entries can be either numeric or of class \code{loess}. A numeric entry of
0 indicates no GC bias. Numeric entries 1 through 7 correspond to the
7 empirical GC models that ship with Polyester, estimated from GEUVADIS
HapMap samples NA06985, NA12144, NA12776, NA18858, NA20542, NA20772,
and NA20815, respectively. The code used to derive the empirical GC models
is available at
\url{https://github.com/alyssafrazee/polyester/blob/master/make_gc_bias.R}.
A loess entry should be a loess prediction model
that takes a GC content percent value (between 0 and 1) a transcript's
deviation from overall mean read count based on that GC value. Counts for
each replicate will be adjusted based on the GC bias model specified for
it. Numeric and loess entries can be mixed. By default, no bias is
included.
\item \code{meanmodel}: set to TRUE if you'd like to set
\code{reads_per_transcripts} as a function of transcript length. We
fit a linear model regressing transcript abundance on transcript length,
and setting \code{meanmodel=TRUE} means we will use transcript lengths
to draw transcript abundance based on that linear model. You can see our
modeling code at \url{http://htmlpreview.github.io/?https://github.com/alyssafrazee/polyester_code/blob/master/length_simulation.html}
\item \code{write_info}: set to FALSE if you do not want files of
simulation information written to disk. By default, transcript fold
changes and expression status & replicate library sizes and group
identifiers are written to \code{outdir}.
\item \code{seed}: specify a seed (e.g. \code{seed=142} or some other
integer) to set before randomly drawing read numbers, for reproducibility.
\item \code{transcriptid}: optional vector of transcript IDs to be written
into \code{sim_info.txt} and used as transcript identifiers in the output
fasta files. Defaults to \code{names(readDNAStringSet(fasta))}. This
option is useful if default names are very long or contain special
characters.
\item You can also include other parameters to pass to
\code{\link{seq_gtf}} if you're simulating from a GTF file.
}
}
\examples{
\donttest{
## simulate a few reads from chromosome 22
fastapath = system.file("extdata", "chr22.fa", package="polyester")
numtx = count_transcripts(fastapath)
set.seed(4)
fold_changes = sample(c(0.5, 1, 2), size=numtx,
prob=c(0.05, 0.9, 0.05), replace=TRUE)
library(Biostrings)
# remove quotes from transcript IDs:
tNames = gsub("'", "", names(readDNAStringSet(fastapath)))
simulate_experiment(fastapath, reads_per_transcript=10,
fold_changes=fold_changes, outdir='simulated_reads',
transcriptid=tNames, seed=12)
}
}
\references{
't Hoen PA, et al (2013): Reproducibility of high-throughput mRNA and
small RNA sequencing across laboratories. Nature Biotechnology 31(11):
1015-1022.
Li W and Jiang T (2012): Transcriptome assembly and isoform expression
level estimation from biased RNA-Seq reads. Bioinformatics 28(22):
2914-2921.
McElroy KE, Luciani F and Thomas T (2012): GemSIM: general,
error-model based simulator of next-generation sequencing data. BMC
Genomics 13(1), 74.
}
|
\name{plot.gp}
\alias{plot.gp}
\title{
Diagnostic Plot for the Validation of a \code{gp} Object
}
\description{
Three plots are currently available, based on the \code{influence}
results: one plot of fitted values against response values, one plot
of standardized residuals, and one qqplot of standardized residuals.
}
\usage{
\S3method{plot}{gp}(x, y, kriging.type = "UK",
trend.reestim = TRUE, which = 1:3, ...)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{x}{
An object with S3 class \code{"gp"}.
}
\item{y}{
Not used.
}
\item{kriging.type}{
Optional character string corresponding to the GP "kriging" family,
to be chosen between simple kriging (\code{"SK"}) or universal
kriging (\code{"UK"}).
}
\item{trend.reestim}{
Should the trend be re-estimated when removing an observation?
Default to \code{TRUE}.
}
\item{which}{
A subset of {1, 2, 3} indicating which figures to plot (see
\code{Description} above). Default is 1:3 (all figures).
}\item{...}{
No other argument for this method.
}
}
\details{
The standardized residuals are defined by \eqn{[y(\mathbf{x}_i) -
\widehat{y}_{-i}(\mathbf{x}_i)] /
\widehat{\sigma}_{-i}(\mathbf{x}_i)}{(y(xi) - yhat_{-i}(xi)) /
sigmahat_{-i}(xi)}, where \eqn{y(\mathbf{x}_i)}{y(xi)} is the response at the
location \eqn{\mathbf{x}_i}{xi},
\eqn{\widehat{y}_{-i}(\mathbf{x}_i)}{yhat_{-i}(xi)} is the fitted
value when the \eqn{i}-th observation is omitted (see
\code{\link{influence.gp}}), and
\eqn{\widehat{\sigma}_{-i}(\mathbf{x}_i)}{sigmahat_{-i}(xi)} is the
corresponding kriging standard deviation.
}
\section{Warning}{
Only trend parameters are re-estimated when removing one
observation. When the number \eqn{n} of observations is small,
re-estimated values can substantially differ from those obtained with
the whole learning set.
}
\value{
A list composed of the following elements where \emph{n} is the total
number of observations.
\item{mean }{
A vector of length \emph{n}. The \eqn{i}-th element is the kriging
mean (including the trend) at the \eqn{i}-th observation number when
removing it from the learning set.
}
\item{sd }{
A vector of length \emph{n}. The \eqn{i}-th element is the kriging
standard deviation at the \eqn{i}-th observation number when removing it
from the learning set.
}
}
\references{
F. Bachoc (2013), "Cross Validation and Maximum Likelihood estimations of
hyper-parameters of Gaussian processes with model
misspecification". \emph{Computational Statistics and Data Analysis},
\bold{66}, 55-69.
N.A.C. Cressie (1993), \emph{Statistics for spatial data}. Wiley series
in probability and mathematical statistics.
O. Dubrule (1983), "Cross validation of Kriging in a unique
neighborhood". \emph{Mathematical Geology}, \bold{15}, 687-699.
J.D. Martin and T.W. Simpson (2005), "Use of kriging models to
approximate deterministic computer models". \emph{AIAA Journal},
\bold{43} no. 4, 853-863.
M. Schonlau (1997), \emph{Computer experiments and global optimization}.
Ph.D. thesis, University of Waterloo.
}
%\author{ O. Roustant, D. Ginsbourger, Ecole des Mines de St-Etienne. }
\seealso{
\code{\link{predict.gp}} and \code{\link{influence.gp}}, the
\code{predict} and \code{influence} methods for \code{"gp"}.
}
\keyword{models}
\keyword{methods}
|
/fuzzedpackages/kergp/man/plot.gp.Rd
|
no_license
|
akhikolla/testpackages
|
R
| false | false | 3,591 |
rd
|
\name{plot.gp}
\alias{plot.gp}
\title{
Diagnostic Plot for the Validation of a \code{gp} Object
}
\description{
Three plots are currently available, based on the \code{influence}
results: one plot of fitted values against response values, one plot
of standardized residuals, and one qqplot of standardized residuals.
}
\usage{
\S3method{plot}{gp}(x, y, kriging.type = "UK",
trend.reestim = TRUE, which = 1:3, ...)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{x}{
An object with S3 class \code{"gp"}.
}
\item{y}{
Not used.
}
\item{kriging.type}{
Optional character string corresponding to the GP "kriging" family,
to be chosen between simple kriging (\code{"SK"}) or universal
kriging (\code{"UK"}).
}
\item{trend.reestim}{
Should the trend be re-estimated when removing an observation?
Default to \code{TRUE}.
}
\item{which}{
A subset of {1, 2, 3} indicating which figures to plot (see
\code{Description} above). Default is 1:3 (all figures).
}\item{...}{
No other argument for this method.
}
}
\details{
The standardized residuals are defined by \eqn{[y(\mathbf{x}_i) -
\widehat{y}_{-i}(\mathbf{x}_i)] /
\widehat{\sigma}_{-i}(\mathbf{x}_i)}{(y(xi) - yhat_{-i}(xi)) /
sigmahat_{-i}(xi)}, where \eqn{y(\mathbf{x}_i)}{y(xi)} is the response at the
location \eqn{\mathbf{x}_i}{xi},
\eqn{\widehat{y}_{-i}(\mathbf{x}_i)}{yhat_{-i}(xi)} is the fitted
value when the \eqn{i}-th observation is omitted (see
\code{\link{influence.gp}}), and
\eqn{\widehat{\sigma}_{-i}(\mathbf{x}_i)}{sigmahat_{-i}(xi)} is the
corresponding kriging standard deviation.
}
\section{Warning}{
Only trend parameters are re-estimated when removing one
observation. When the number \eqn{n} of observations is small,
re-estimated values can substantially differ from those obtained with
the whole learning set.
}
\value{
A list composed of the following elements where \emph{n} is the total
number of observations.
\item{mean }{
A vector of length \emph{n}. The \eqn{i}-th element is the kriging
mean (including the trend) at the \eqn{i}-th observation number when
removing it from the learning set.
}
\item{sd }{
A vector of length \emph{n}. The \eqn{i}-th element is the kriging
standard deviation at the \eqn{i}-th observation number when removing it
from the learning set.
}
}
\references{
F. Bachoc (2013), "Cross Validation and Maximum Likelihood estimations of
hyper-parameters of Gaussian processes with model
misspecification". \emph{Computational Statistics and Data Analysis},
\bold{66}, 55-69.
N.A.C. Cressie (1993), \emph{Statistics for spatial data}. Wiley series
in probability and mathematical statistics.
O. Dubrule (1983), "Cross validation of Kriging in a unique
neighborhood". \emph{Mathematical Geology}, \bold{15}, 687-699.
J.D. Martin and T.W. Simpson (2005), "Use of kriging models to
approximate deterministic computer models". \emph{AIAA Journal},
\bold{43} no. 4, 853-863.
M. Schonlau (1997), \emph{Computer experiments and global optimization}.
Ph.D. thesis, University of Waterloo.
}
%\author{ O. Roustant, D. Ginsbourger, Ecole des Mines de St-Etienne. }
\seealso{
\code{\link{predict.gp}} and \code{\link{influence.gp}}, the
\code{predict} and \code{influence} methods for \code{"gp"}.
}
\keyword{models}
\keyword{methods}
|
load("~/Desktop/Freddie Mac data/USMortgages2008_2009.rdata")
#build the model based on CA data first
D_state = D1[,-c(1,3,6,5,17,20,26)]
D_state = D_state[complete.cases(D_state),]
state = D_state$property.state
ca = data[(state=="CA"),]
tx = data[(state=="TX"),]
except_ca = data[(state!="CA"),]
x = ca[,-9]
x = as.matrix(x)
y = ca[,9]
cvfit = cv.glmnet(x, y, family='binomial',type.measure = "auc")
coef(cvfit, s = "lambda.min")
auc_calucator = function(model, test_data_x, test_data_y) {
fitted.results = predict(model, newx = test_data_x, s = "lambda.min",
type = "response")
pr <- prediction(fitted.results, test_data_y)
prf <- performance(pr, measure = "tpr", x.measure = "fpr")
auc <- performance(pr, measure = "auc")
auc <- auc@y.values[[1]]
return(auc)
}
auc_calucator(cvfit, x, y)
auc_calucator(cvfit, as.matrix(tx[,-9]), tx[,9])
auc_calucator(cvfit, as.matrix(except_ca[,-9]), except_ca[,9])
|
/CA.R
|
no_license
|
yazheli/code
|
R
| false | false | 953 |
r
|
load("~/Desktop/Freddie Mac data/USMortgages2008_2009.rdata")
#build the model based on CA data first
D_state = D1[,-c(1,3,6,5,17,20,26)]
D_state = D_state[complete.cases(D_state),]
state = D_state$property.state
ca = data[(state=="CA"),]
tx = data[(state=="TX"),]
except_ca = data[(state!="CA"),]
x = ca[,-9]
x = as.matrix(x)
y = ca[,9]
cvfit = cv.glmnet(x, y, family='binomial',type.measure = "auc")
coef(cvfit, s = "lambda.min")
auc_calucator = function(model, test_data_x, test_data_y) {
fitted.results = predict(model, newx = test_data_x, s = "lambda.min",
type = "response")
pr <- prediction(fitted.results, test_data_y)
prf <- performance(pr, measure = "tpr", x.measure = "fpr")
auc <- performance(pr, measure = "auc")
auc <- auc@y.values[[1]]
return(auc)
}
auc_calucator(cvfit, x, y)
auc_calucator(cvfit, as.matrix(tx[,-9]), tx[,9])
auc_calucator(cvfit, as.matrix(except_ca[,-9]), except_ca[,9])
|
\name{qpar}
\alias{qpar}
\title{Graphical parameters in cranvas}
\usage{
qpar(...)
}
\arguments{
\item{...}{options of the form \code{tag = value}}
}
\value{
the current list of parameters or set new options
}
\description{
This function can set or query the graphical parameters
in the \code{cranvas} package.
}
\examples{
op <- qpar()
qpar(mar = 0.05) # the degree to extend the plot margin (inner margin)
qpar(op) # restore
}
\author{
Yihui Xie <\url{http://yihui.name}>
}
\seealso{
\code{\link[graphics]{par}}
}
|
/man/qpar.Rd
|
no_license
|
NickSpyrison/cranvas
|
R
| false | false | 530 |
rd
|
\name{qpar}
\alias{qpar}
\title{Graphical parameters in cranvas}
\usage{
qpar(...)
}
\arguments{
\item{...}{options of the form \code{tag = value}}
}
\value{
the current list of parameters or set new options
}
\description{
This function can set or query the graphical parameters
in the \code{cranvas} package.
}
\examples{
op <- qpar()
qpar(mar = 0.05) # the degree to extend the plot margin (inner margin)
qpar(op) # restore
}
\author{
Yihui Xie <\url{http://yihui.name}>
}
\seealso{
\code{\link[graphics]{par}}
}
|
#' @title Shannon Entropy
#' @description A very simple implementation of Shannon entropy.
#'
#' @param x vector of probabilities (0,1), must sum to 1, should not contain NA
#' @param b logarithm base
#'
#' @details `0`s are automatically removed by \code{na.rm = TRUE}, as \code{(0 * log(0) = Nan)}
#'
#' @note When \code{b = length(x)} the result is the normalized Shannon entropy of (Kempen et al, 2009).
#'
#' @return A single numeric value.
#'
#' @references
#' Kempen, Bas, Dick J. Brus, Gerard B.M. Heuvelink, and Jetse J. Stoorvogel. 2009. "Updating the 1:50,000 Dutch Soil Map Using Legacy Soil Data: A Multinominal Logistic Regression Approach." Geoderma 151: 311-26. doi:10.1016/j.geoderma.2009.04.023
#'
#' Shannon, Claude E. (July-October 1948). "A Mathematical Theory of Communication". Bell System Technical Journal. 27 (3): 379-423. doi:10.1002/j.1538-7305.1948.tb01338.x
#'
#'
#' @export
#'
#' @examples
#'
#' # a very simple example
#' p <- c(0.25, 0.25, 0.4, 0.05, 0.05)
#'
#' shannonEntropy(p)
#'
#'
#'
## TODO: test that sum(x) == 1
shannonEntropy <- function(x, b = 2) {
# 0s automatically removed by na.rm=TRUE (0 * log(0) = Nan)
res <- -1 * sum(x * log(x, base = b), na.rm = TRUE)
return(res)
}
#' @title Confusion Index
#'
#' @description Calculate the confusion index of Burrough et al., 1997.
#'
#' @param x vector of probabilities (0,1), should not contain NA
#'
#' @author D.E. Beaudette
#'
#' @references Burrough, P.A., P.F.M. van Gaans, and R. Hootsmans. 1997. "Continuous Classification in Soil Survey: Spatial Correlation, Confusion and Boundaries." Geoderma 77: 115-35. doi:10.1016/S0016-7061(97)00018-9.
#'
#' @return A single numeric value.
#' @export
#'
#' @examples
#'
#' # a very simple example
#' p <- c(0.25, 0.25, 0.4, 0.05, 0.05)
#' confusionIndex(p)
#'
#' # for comparison
#' shannonEntropy(p)
#'
confusionIndex <- function(x) {
x <- sort(x, decreasing = TRUE)
res <- 1 - (x[1] - x[2])
return(res)
}
# multinominal Brier score
# x: data.frame, rows are predictions/observations, columns contain classes
# classLabels: vector of class labels, corresponding to column names in x.i
# actual: name of column containing the observed class
#' @title Multinominal Brier Score
#'
#' @description Compute a multinominal Brier score from predicted class probabilities and observed class label. Lower values are associated with a more accurate classifier.
#'
#' @param x \code{data.frame} of class probabilities (numeric) and observed class label (character), see examples
#' @param classLabels vector of predicted class labels (probabilities), corresponding to column names in \code{x}
#' @param actual name of column containing the observed class, should be character vector not factor
#'
#' @references Brier, Glenn W. 1950. "Verification of Forecasts Expressed in Terms of Probability." Monthly Weather Review 78 (1): 1-3. doi:10.1175/1520-0493(1950)078<0001:VOFEIT>2.0.CO;2.
#'
#' @author D.E. Beaudette
#'
#' @return a single Brier score, representative of data in \code{x}
#' @export
#'
#' @examples
#'
#' # columns 'a', 'b', 'c' contain predicted probabilities
#' # column 'actual' contains observed class label
#'
#' # a good classifier
#' d.good <- data.frame(
#' a = c(0.05, 0.05, 0.10),
#' b = c(0.90, 0.85, 0.75),
#' c = c(0.05, 0.10, 0.15),
#' actual = c('b', 'b', 'b'),
#' stringsAsFactors = FALSE
#' )
#'
#' # a rather bad classifier
#' d.bad <- data.frame(
#' a = c(0.05, 0.05, 0.10),
#' b = c(0.90, 0.85, 0.75),
#' c = c(0.05, 0.10, 0.15),
#' actual = c('c', 'c', 'c'),
#' stringsAsFactors = FALSE
#' )
#'
#' # class labels are factors
#' d.factors <- data.frame(
#' a = c(0.05, 0.05, 0.10),
#' b = c(0.90, 0.85, 0.75),
#' c = c(0.05, 0.10, 0.15),
#' actual = c('b', 'b', 'b'),
#' stringsAsFactors = TRUE
#' )
#'
#' # relatively low value = accurate
#' brierScore(x = d.good, classLabels = c('a', 'b', 'c'), actual = 'actual')
#'
#' # high values = not accuate
#' brierScore(x = d.bad, classLabels = c('a', 'b', 'c'), actual = 'actual')
#'
#' # message related to conversion of factor -> character
#' brierScore(x = d.factors, classLabels = c('a', 'b', 'c'), actual = 'actual')
#'
brierScore <- function(x, classLabels, actual = 'actual') {
if (inherits(x, 'data.frame')) {
x <- data.frame(x)
} else stop("`x` should be a data.frame", call. = FALSE)
# number of observations
n <- nrow(x)
# sanity check: no factors allowed in class labels
if(inherits(x[[actual]], 'factor')) {
message('converting `actual` from factor to character')
x[[actual]] <- as.character(x[[actual]])
}
# extract vector of observed classes
x.actual <- x[[actual]]
# keep only probabilities as matrix
x <- as.matrix(x[, classLabels, drop=FALSE])
# init new matrix to store most-likely class
m <- matrix(0, ncol = ncol(x), nrow = n)
# same structure as x.pr
dimnames(m)[[2]] <- classLabels
# set cells of actual observed outcome to 1
for(i in 1:n) {
x.i <- x.actual[i]
m[i, x.i] <- 1
}
# compute multinominal brier score
# 1/n * sum((x - m)^2)
# x: matrix of predictions
# m: indicator matrix of outcomes
bs <- (1/n) * sum((x - m)^2, na.rm=TRUE)
return(bs)
}
|
/R/accuracy_uncertainty.R
|
no_license
|
ncss-tech/aqp
|
R
| false | false | 5,252 |
r
|
#' @title Shannon Entropy
#' @description A very simple implementation of Shannon entropy.
#'
#' @param x vector of probabilities (0,1), must sum to 1, should not contain NA
#' @param b logarithm base
#'
#' @details `0`s are automatically removed by \code{na.rm = TRUE}, as \code{(0 * log(0) = Nan)}
#'
#' @note When \code{b = length(x)} the result is the normalized Shannon entropy of (Kempen et al, 2009).
#'
#' @return A single numeric value.
#'
#' @references
#' Kempen, Bas, Dick J. Brus, Gerard B.M. Heuvelink, and Jetse J. Stoorvogel. 2009. "Updating the 1:50,000 Dutch Soil Map Using Legacy Soil Data: A Multinominal Logistic Regression Approach." Geoderma 151: 311-26. doi:10.1016/j.geoderma.2009.04.023
#'
#' Shannon, Claude E. (July-October 1948). "A Mathematical Theory of Communication". Bell System Technical Journal. 27 (3): 379-423. doi:10.1002/j.1538-7305.1948.tb01338.x
#'
#'
#' @export
#'
#' @examples
#'
#' # a very simple example
#' p <- c(0.25, 0.25, 0.4, 0.05, 0.05)
#'
#' shannonEntropy(p)
#'
#'
#'
## TODO: test that sum(x) == 1
shannonEntropy <- function(x, b = 2) {
# 0s automatically removed by na.rm=TRUE (0 * log(0) = Nan)
res <- -1 * sum(x * log(x, base = b), na.rm = TRUE)
return(res)
}
#' @title Confusion Index
#'
#' @description Calculate the confusion index of Burrough et al., 1997.
#'
#' @param x vector of probabilities (0,1), should not contain NA
#'
#' @author D.E. Beaudette
#'
#' @references Burrough, P.A., P.F.M. van Gaans, and R. Hootsmans. 1997. "Continuous Classification in Soil Survey: Spatial Correlation, Confusion and Boundaries." Geoderma 77: 115-35. doi:10.1016/S0016-7061(97)00018-9.
#'
#' @return A single numeric value.
#' @export
#'
#' @examples
#'
#' # a very simple example
#' p <- c(0.25, 0.25, 0.4, 0.05, 0.05)
#' confusionIndex(p)
#'
#' # for comparison
#' shannonEntropy(p)
#'
confusionIndex <- function(x) {
x <- sort(x, decreasing = TRUE)
res <- 1 - (x[1] - x[2])
return(res)
}
# multinominal Brier score
# x: data.frame, rows are predictions/observations, columns contain classes
# classLabels: vector of class labels, corresponding to column names in x.i
# actual: name of column containing the observed class
#' @title Multinominal Brier Score
#'
#' @description Compute a multinominal Brier score from predicted class probabilities and observed class label. Lower values are associated with a more accurate classifier.
#'
#' @param x \code{data.frame} of class probabilities (numeric) and observed class label (character), see examples
#' @param classLabels vector of predicted class labels (probabilities), corresponding to column names in \code{x}
#' @param actual name of column containing the observed class, should be character vector not factor
#'
#' @references Brier, Glenn W. 1950. "Verification of Forecasts Expressed in Terms of Probability." Monthly Weather Review 78 (1): 1-3. doi:10.1175/1520-0493(1950)078<0001:VOFEIT>2.0.CO;2.
#'
#' @author D.E. Beaudette
#'
#' @return a single Brier score, representative of data in \code{x}
#' @export
#'
#' @examples
#'
#' # columns 'a', 'b', 'c' contain predicted probabilities
#' # column 'actual' contains observed class label
#'
#' # a good classifier
#' d.good <- data.frame(
#' a = c(0.05, 0.05, 0.10),
#' b = c(0.90, 0.85, 0.75),
#' c = c(0.05, 0.10, 0.15),
#' actual = c('b', 'b', 'b'),
#' stringsAsFactors = FALSE
#' )
#'
#' # a rather bad classifier
#' d.bad <- data.frame(
#' a = c(0.05, 0.05, 0.10),
#' b = c(0.90, 0.85, 0.75),
#' c = c(0.05, 0.10, 0.15),
#' actual = c('c', 'c', 'c'),
#' stringsAsFactors = FALSE
#' )
#'
#' # class labels are factors
#' d.factors <- data.frame(
#' a = c(0.05, 0.05, 0.10),
#' b = c(0.90, 0.85, 0.75),
#' c = c(0.05, 0.10, 0.15),
#' actual = c('b', 'b', 'b'),
#' stringsAsFactors = TRUE
#' )
#'
#' # relatively low value = accurate
#' brierScore(x = d.good, classLabels = c('a', 'b', 'c'), actual = 'actual')
#'
#' # high values = not accuate
#' brierScore(x = d.bad, classLabels = c('a', 'b', 'c'), actual = 'actual')
#'
#' # message related to conversion of factor -> character
#' brierScore(x = d.factors, classLabels = c('a', 'b', 'c'), actual = 'actual')
#'
brierScore <- function(x, classLabels, actual = 'actual') {
if (inherits(x, 'data.frame')) {
x <- data.frame(x)
} else stop("`x` should be a data.frame", call. = FALSE)
# number of observations
n <- nrow(x)
# sanity check: no factors allowed in class labels
if(inherits(x[[actual]], 'factor')) {
message('converting `actual` from factor to character')
x[[actual]] <- as.character(x[[actual]])
}
# extract vector of observed classes
x.actual <- x[[actual]]
# keep only probabilities as matrix
x <- as.matrix(x[, classLabels, drop=FALSE])
# init new matrix to store most-likely class
m <- matrix(0, ncol = ncol(x), nrow = n)
# same structure as x.pr
dimnames(m)[[2]] <- classLabels
# set cells of actual observed outcome to 1
for(i in 1:n) {
x.i <- x.actual[i]
m[i, x.i] <- 1
}
# compute multinominal brier score
# 1/n * sum((x - m)^2)
# x: matrix of predictions
# m: indicator matrix of outcomes
bs <- (1/n) * sum((x - m)^2, na.rm=TRUE)
return(bs)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Regressions.R
\name{BHJ_industry_regressions}
\alias{BHJ_industry_regressions}
\title{BHJ industry regressions}
\usage{
BHJ_industry_regressions(
industry_data,
lagIV = FALSE,
lagsIV = 1,
timeFE = FALSE,
industryFE = FALSE
)
}
\arguments{
\item{industry_data}{Industry data}
\item{lagIV}{(optional) default is FALSE}
\item{timeFE}{(optional) default is FALSE}
\item{industryFE}{(optional) default is FALSE}
}
\description{
BHJ industry regressions
}
|
/man/BHJ_industry_regressions.Rd
|
permissive
|
setzler/ShiftShareIV
|
R
| false | true | 541 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Regressions.R
\name{BHJ_industry_regressions}
\alias{BHJ_industry_regressions}
\title{BHJ industry regressions}
\usage{
BHJ_industry_regressions(
industry_data,
lagIV = FALSE,
lagsIV = 1,
timeFE = FALSE,
industryFE = FALSE
)
}
\arguments{
\item{industry_data}{Industry data}
\item{lagIV}{(optional) default is FALSE}
\item{timeFE}{(optional) default is FALSE}
\item{industryFE}{(optional) default is FALSE}
}
\description{
BHJ industry regressions
}
|
#library the tidyverse, readxl
library(readxl)
library(tidyverse)
library(kableExtra)
#then load the data
titanic <- read_excel('titanic.xlsx')
View(titanic)
# 1. Explore the data a bit
head(titanic)
tail(titanic)
class(titanic)
dim(titanic)
names(titanic)
str(titanic)
library(dplyr)
glimpse(titanic)
summary(titanic)
#no need to gather or spread
#a. What variables are present, what type are they? 891 obs and 12 variables; number variables - age. fare; chr variables - name, sex ,ticket, cabin, embarked.
#need to change survived, pclass. parch to chr variables
class("Survived")
titanic$Survived <- as.character(titanic$Survived)
as.character('PassengerID')
as.character('Pclass')
as.character('Parch')
as.factor('Embarked')
#b. How much missing data ? Age is missing 177 observations
library(stringr)
is.na(titanic)
any(is.na(titanic))
colSums(is.na(titanic))
summary(titanic)
complete.cases(titanic)
na.omit(titanic)
print(titanic)
str(titanic)
titanic[!complete.cases(titanic),]
#c. How many passengers of different types? 3 classes (216, 184, 491); mean age is 29.7
hist(titanic$Age)
summary(titanic$Age)
table(titanic$Pclass)
#d. How many male/female, how many survived/died? 342 of 549 survived; 314 F, 577 M
table(titanic$Survived)
table(titanic$Sex)
# 2. Filter to select only males, look at survival rate
Males <- titanic %>%
filter(Sex == "male") %>%
select(Survived) %>%
summarize(nsurvived = sum(Survived), total=n(), survival_rate=(nsurvived/total)*100)
Males
titanic %>%
add_count(Sex)
titanic %>%
add_tally(total=n)
#3. Filter to select only females, look at survival rate
Females <- titanic %>%
filter(Sex == "female")
table(Females$Survived)
#4. Arrange by fare, look at survival rate in head and tail
titanicfare <- titanic %>%
arrange(Fare) %>%
summary(titanic$Fare)
View(titanicfare)
head(titanicfare, n=20)
tail(titanicfare, n=20)
library(forcats)
fare_survival <- titanic %>%
mutate(fare5 = case_when(Fare == 0 ~ "freeloader",
Fare < 10 ~ "Cheap",
Fare < 20 ~ "Average",
Fare < 100 ~ "Expensive",
Fare >=100 ~ "Royal"))
FS1 <- fare_survival %>%
group_by(fare5) %>%
summarize(nsurvived = sum(as.numeric(Survived)), total=n(), survival_rate=(nsurvived/total)*100)
ggplot(FS1, aes(x=fct_reorder(fare5, survival_rate), y=survival_rate)) +
geom_bar(stat = "identity")
#5. Parch = number of parents and children, SibSp = number of siblings and spouses. Use mutate to create a new variable called family_size = Parch + SibSp
class("Parch")
as.numeric("Parch")
class("SibSp")
as.numeric("SibSp")
titanic2 <- titanic %>%
mutate(family_size = Parch + SibSp)
str(titanic2)
#6. Look at family size vs survival
Fam <- table(titanic2$family_size, titanic2$Survived)
Fam
summary(Fam)
#7. To make it easier, use mutate to replace Survived = 0 with No, and Survived =1 with Yes
titanic2$Survived[titanic2$Survived == 1] <- "Yes"
titanic2$Survived[titanic2$Survived == 0] <- "No"
Fam <- table(titanic2$family_size, titanic2$Survived)
Fam
summary(Fam)
#8. Bar Plot how many male vs female passengers
counts <- table(titanic2$Sex)
barplot(counts, main = "Sex Proportion", xlab = "Sex", col=c("darkblue", "red"))
#9. Scatter plot age vs fare, with color = sex, and facet by survived
library(ggplot2)
titanic3 <- ggplot() + geom_point(data = titanic2, aes(x=Age, y=Fare, color = Sex))
titanic3
titanic3 + facet_grid(rows = vars(Survived))
titanic3 + facet_grid(col = vars(Survived))
#10. Plot a stacked bar (fill= Survived), with x = Sex
counts <- table(titanic2$Sex)
barplot(counts, main = "Sex Proportion", xlab = "Sex", col=c("darkblue", "red"))
titanic4 <- ggplot() + geom_bar(data=titanic2, aes(x=Sex, fill=Survived))
titanic5 <- ggplot(aes(x=Age, y=Fare, color=Embarked)) + geom_bar(position = "filled")
#11. Group by sex, then mutate to get mean_fare and pct_survived
ggplot(titanic2, aes(x=Sex, y=Fare)) +
geom_point()
EX <- titanic2 %>%
group_by(Sex) %>%
summarise(Fare = mean(Fare))
ggplot(EX, aes(x=Sex, y=Fare)) +
geom_bar(stat = "identity")
#install.packages("usethis")
#usethis::use_git()
#### add table
FS1 %>%
kable() %>%
kable_styling(full_width = F) %>%
column_spec(4, bold=T)
|
/Titanic Exercise.R
|
no_license
|
higgi13425/titanic
|
R
| false | false | 4,345 |
r
|
#library the tidyverse, readxl
library(readxl)
library(tidyverse)
library(kableExtra)
#then load the data
titanic <- read_excel('titanic.xlsx')
View(titanic)
# 1. Explore the data a bit
head(titanic)
tail(titanic)
class(titanic)
dim(titanic)
names(titanic)
str(titanic)
library(dplyr)
glimpse(titanic)
summary(titanic)
#no need to gather or spread
#a. What variables are present, what type are they? 891 obs and 12 variables; number variables - age. fare; chr variables - name, sex ,ticket, cabin, embarked.
#need to change survived, pclass. parch to chr variables
class("Survived")
titanic$Survived <- as.character(titanic$Survived)
as.character('PassengerID')
as.character('Pclass')
as.character('Parch')
as.factor('Embarked')
#b. How much missing data ? Age is missing 177 observations
library(stringr)
is.na(titanic)
any(is.na(titanic))
colSums(is.na(titanic))
summary(titanic)
complete.cases(titanic)
na.omit(titanic)
print(titanic)
str(titanic)
titanic[!complete.cases(titanic),]
#c. How many passengers of different types? 3 classes (216, 184, 491); mean age is 29.7
hist(titanic$Age)
summary(titanic$Age)
table(titanic$Pclass)
#d. How many male/female, how many survived/died? 342 of 549 survived; 314 F, 577 M
table(titanic$Survived)
table(titanic$Sex)
# 2. Filter to select only males, look at survival rate
Males <- titanic %>%
filter(Sex == "male") %>%
select(Survived) %>%
summarize(nsurvived = sum(Survived), total=n(), survival_rate=(nsurvived/total)*100)
Males
titanic %>%
add_count(Sex)
titanic %>%
add_tally(total=n)
#3. Filter to select only females, look at survival rate
Females <- titanic %>%
filter(Sex == "female")
table(Females$Survived)
#4. Arrange by fare, look at survival rate in head and tail
titanicfare <- titanic %>%
arrange(Fare) %>%
summary(titanic$Fare)
View(titanicfare)
head(titanicfare, n=20)
tail(titanicfare, n=20)
library(forcats)
fare_survival <- titanic %>%
mutate(fare5 = case_when(Fare == 0 ~ "freeloader",
Fare < 10 ~ "Cheap",
Fare < 20 ~ "Average",
Fare < 100 ~ "Expensive",
Fare >=100 ~ "Royal"))
FS1 <- fare_survival %>%
group_by(fare5) %>%
summarize(nsurvived = sum(as.numeric(Survived)), total=n(), survival_rate=(nsurvived/total)*100)
ggplot(FS1, aes(x=fct_reorder(fare5, survival_rate), y=survival_rate)) +
geom_bar(stat = "identity")
#5. Parch = number of parents and children, SibSp = number of siblings and spouses. Use mutate to create a new variable called family_size = Parch + SibSp
class("Parch")
as.numeric("Parch")
class("SibSp")
as.numeric("SibSp")
titanic2 <- titanic %>%
mutate(family_size = Parch + SibSp)
str(titanic2)
#6. Look at family size vs survival
Fam <- table(titanic2$family_size, titanic2$Survived)
Fam
summary(Fam)
#7. To make it easier, use mutate to replace Survived = 0 with No, and Survived =1 with Yes
titanic2$Survived[titanic2$Survived == 1] <- "Yes"
titanic2$Survived[titanic2$Survived == 0] <- "No"
Fam <- table(titanic2$family_size, titanic2$Survived)
Fam
summary(Fam)
#8. Bar Plot how many male vs female passengers
counts <- table(titanic2$Sex)
barplot(counts, main = "Sex Proportion", xlab = "Sex", col=c("darkblue", "red"))
#9. Scatter plot age vs fare, with color = sex, and facet by survived
library(ggplot2)
titanic3 <- ggplot() + geom_point(data = titanic2, aes(x=Age, y=Fare, color = Sex))
titanic3
titanic3 + facet_grid(rows = vars(Survived))
titanic3 + facet_grid(col = vars(Survived))
#10. Plot a stacked bar (fill= Survived), with x = Sex
counts <- table(titanic2$Sex)
barplot(counts, main = "Sex Proportion", xlab = "Sex", col=c("darkblue", "red"))
titanic4 <- ggplot() + geom_bar(data=titanic2, aes(x=Sex, fill=Survived))
titanic5 <- ggplot(aes(x=Age, y=Fare, color=Embarked)) + geom_bar(position = "filled")
#11. Group by sex, then mutate to get mean_fare and pct_survived
ggplot(titanic2, aes(x=Sex, y=Fare)) +
geom_point()
EX <- titanic2 %>%
group_by(Sex) %>%
summarise(Fare = mean(Fare))
ggplot(EX, aes(x=Sex, y=Fare)) +
geom_bar(stat = "identity")
#install.packages("usethis")
#usethis::use_git()
#### add table
FS1 %>%
kable() %>%
kable_styling(full_width = F) %>%
column_spec(4, bold=T)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/exiv-package.R
\docType{package}
\name{exiv}
\alias{exiv}
\alias{exiv-package}
\title{Read and Write 'Exif' Image/Media Tags}
\description{
Read and Write 'Exif' Image/Media Tags
}
\author{
Bob Rudis (bob@rud.is)
}
|
/man/exiv.Rd
|
no_license
|
hrbrmstr/exiv
|
R
| false | true | 293 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/exiv-package.R
\docType{package}
\name{exiv}
\alias{exiv}
\alias{exiv-package}
\title{Read and Write 'Exif' Image/Media Tags}
\description{
Read and Write 'Exif' Image/Media Tags
}
\author{
Bob Rudis (bob@rud.is)
}
|
#' Splits presence locations into training and testing data sets for use by DesktopGARP
#'
#' \code{splitData} Uses a user-defined precentage to split presence locations into training and testing datasets. Both are output as .xls files (for use in DesktopGARP) and a shapefile object (for testing using \code{\link{aucGARP}}).
#'
#' @param points a data.frame object containing latitude and longitude values of presence locations
#' @param p a numeric value specifying the precentage of locations to keep for training
#' @param type a character value specifying which points should be returned: training ("train"), testing ("test"), or both ("all")
#' @param iterations a numeric value specifying how many resampling events should be run; defaults to 1
#' @param output logical. Should the datasets be saved to file?
#' @param output.path a file path specifying the file name of the objects to be saved. "_train" or "_test" will be appended to file name (e.g. "C:/Users/Homo_sapiens_test.shp")
#'
#' @return A list object containing data.frames for training locations (when \code{type = "train"}), testing locations (when \code{type = "test"}), or both (when \code{type = "all"}). When output = TRUE, outputs both a .xls and shapefile to specified file path.
#'
#' @details Use the \code{iterations} element when performing multiple experiments in DesktopGARP. The function will return a single .xlsx datasheet with all training data combined for use by DesktopGARP, with
#' the "Species" vector containing the species name from the \code{points} object with the iteration number attached (e.g. "Homo_sapiens1_train", "Homo_sapiens2_train", "Homo_sapiens_train3_train", etc.).
#'
#' NOTE: The \code{splitData} function uses elements of the ".xlsx" package, which requires an installation of Java with the same version as RStudio. If this prompts an error with ".xlsx," the easiest fix is to work from the 32-bit version of RStudio.
#'
#' @examples
#' hs <- data.frame("Latitude" = c(-89, 72, 63, 42, 54), "Longitude" = c(-12, 13, 24, 26, 87), "Species" = rep("Homo sapiens", 5))
#' splitData(points = hs, p = 0.7, type = "all")
#'
#' @import raster
#' @import sp
#' @importFrom maptools writeSpatialShape
#'
#' @export
splitData <- function(points, p, type = c("train", "test", "all"), iterations = 1, output = TRUE, output.path){
if("Latitude" %in% names(points@data) == FALSE) {stop("Coordinates must be labeled 'Latitude' and 'Longitude'.")}
if("Longitude" %in% names(points@data) == FALSE) {stop("Coordinates must be labeled 'Latitude' and 'Longitude'.")}
if("Species" %in% names(points@data) == FALSE) {stop("Must contain vector of species names labeled 'Species'.")}
species <- levels(points$Species)
df.train <- data.frame()
for(n in 1:paste(iterations)){
train.pts.format.all <- data.frame()
test.pts.format.all <- data.frame()
for(i in species){
#Subset for species
points.sp <- subset(points, points$Species == i)
#Add unique id
points.sp$id <- seq(1, length(points.sp))
#Pull a random sample dataset
sample <- sample(points.sp$id,ceiling(length(points.sp) * p))
#Subset points into training points
train.pts <- points.sp[(points.sp$id %in% sample), ]
#Subset other points as testing points
test.pts <- points.sp[!(points.sp$id %in% sample), ]
#Make sure in correct format for GARP
train.pts.format <- data.frame(paste(train.pts$Species,n,sep=""), train.pts$Longitude, train.pts$Latitude)
test.pts.format <- data.frame(paste(test.pts$Species,n,sep=""), test.pts$Longitude, test.pts$Latitude)
colnames(train.pts.format) <- c("Species", "Longitude", "Latitude")
colnames(test.pts.format) <- c("Species", "Longitude", "Latitude")
train.pts.format.all <- rbind(train.pts.format.all, train.pts.format)
test.pts.format.all <- rbind(test.pts.format.all, test.pts.format)
}
#Convert to spatial points dataframe
test.pts.all <- SpatialPointsDataFrame(as.matrix(data.frame(test.pts.format.all$Longitude,test.pts.format.all$Latitude)), data = test.pts.format.all)
#Output testing files
if(type == "test"){
if(output == TRUE){
#Write xlsx file
write.xlsx(test.pts.format.all, paste(output.path,n,"_test.xls", sep = ""), row.names=FALSE)
#Write shapefile
writeSpatialShape(test.pts.all, paste(output.path,n,"_test", sep = ""))
} else if(output == FALSE){return(test.pts.format.all)}
} else if(type == "all"){
if(output == TRUE){
#Write xlsx file
write.xlsx(test.pts.format.all, paste(output.path,n,"_test.xls", sep = ""), row.names=FALSE)
#Write shapefile
writeSpatialShape(test.pts.all, paste(output.path,n,"_test", sep = ""))
} else if(output == FALSE){return(test.pts.format.all)}
}
#Append training files
df.train <- rbind(df.train, train.pts.format.all)
}
#Convert to spatial points datafame
df.train.sp <- SpatialPointsDataFrame(as.matrix(data.frame(df.train$Longitude,df.train$Latitude)), data = df.train)
#Output files based on type call
if(type == "train"){
if(output == TRUE){
#Write xlsx file
write.xlsx(df.train.sp, paste(output.path,"_train.xls", sep = ""), row.names=FALSE)
#Write shapefile
writeSpatialShape(df.train.sp, paste(output.path,"_train", sep = ""))
} else if(output == FALSE){return(df.train.sp)}
} else if(type == "all"){
if(output == TRUE){
#Write xlsx file
write.xlsx(df.train.sp, paste(output.path,"_train.xls", sep = ""), row.names=FALSE)
#Write shapefile
writeSpatialShape(df.train.sp, paste(output.path,"_train", sep = ""))
} else if(output == FALSE){return(df.train.sp)}
}
}
|
/R/splitData.R
|
no_license
|
AlassaneB/GARPTools
|
R
| false | false | 5,759 |
r
|
#' Splits presence locations into training and testing data sets for use by DesktopGARP
#'
#' \code{splitData} Uses a user-defined precentage to split presence locations into training and testing datasets. Both are output as .xls files (for use in DesktopGARP) and a shapefile object (for testing using \code{\link{aucGARP}}).
#'
#' @param points a data.frame object containing latitude and longitude values of presence locations
#' @param p a numeric value specifying the precentage of locations to keep for training
#' @param type a character value specifying which points should be returned: training ("train"), testing ("test"), or both ("all")
#' @param iterations a numeric value specifying how many resampling events should be run; defaults to 1
#' @param output logical. Should the datasets be saved to file?
#' @param output.path a file path specifying the file name of the objects to be saved. "_train" or "_test" will be appended to file name (e.g. "C:/Users/Homo_sapiens_test.shp")
#'
#' @return A list object containing data.frames for training locations (when \code{type = "train"}), testing locations (when \code{type = "test"}), or both (when \code{type = "all"}). When output = TRUE, outputs both a .xls and shapefile to specified file path.
#'
#' @details Use the \code{iterations} element when performing multiple experiments in DesktopGARP. The function will return a single .xlsx datasheet with all training data combined for use by DesktopGARP, with
#' the "Species" vector containing the species name from the \code{points} object with the iteration number attached (e.g. "Homo_sapiens1_train", "Homo_sapiens2_train", "Homo_sapiens_train3_train", etc.).
#'
#' NOTE: The \code{splitData} function uses elements of the ".xlsx" package, which requires an installation of Java with the same version as RStudio. If this prompts an error with ".xlsx," the easiest fix is to work from the 32-bit version of RStudio.
#'
#' @examples
#' hs <- data.frame("Latitude" = c(-89, 72, 63, 42, 54), "Longitude" = c(-12, 13, 24, 26, 87), "Species" = rep("Homo sapiens", 5))
#' splitData(points = hs, p = 0.7, type = "all")
#'
#' @import raster
#' @import sp
#' @importFrom maptools writeSpatialShape
#'
#' @export
splitData <- function(points, p, type = c("train", "test", "all"), iterations = 1, output = TRUE, output.path){
if("Latitude" %in% names(points@data) == FALSE) {stop("Coordinates must be labeled 'Latitude' and 'Longitude'.")}
if("Longitude" %in% names(points@data) == FALSE) {stop("Coordinates must be labeled 'Latitude' and 'Longitude'.")}
if("Species" %in% names(points@data) == FALSE) {stop("Must contain vector of species names labeled 'Species'.")}
species <- levels(points$Species)
df.train <- data.frame()
for(n in 1:paste(iterations)){
train.pts.format.all <- data.frame()
test.pts.format.all <- data.frame()
for(i in species){
#Subset for species
points.sp <- subset(points, points$Species == i)
#Add unique id
points.sp$id <- seq(1, length(points.sp))
#Pull a random sample dataset
sample <- sample(points.sp$id,ceiling(length(points.sp) * p))
#Subset points into training points
train.pts <- points.sp[(points.sp$id %in% sample), ]
#Subset other points as testing points
test.pts <- points.sp[!(points.sp$id %in% sample), ]
#Make sure in correct format for GARP
train.pts.format <- data.frame(paste(train.pts$Species,n,sep=""), train.pts$Longitude, train.pts$Latitude)
test.pts.format <- data.frame(paste(test.pts$Species,n,sep=""), test.pts$Longitude, test.pts$Latitude)
colnames(train.pts.format) <- c("Species", "Longitude", "Latitude")
colnames(test.pts.format) <- c("Species", "Longitude", "Latitude")
train.pts.format.all <- rbind(train.pts.format.all, train.pts.format)
test.pts.format.all <- rbind(test.pts.format.all, test.pts.format)
}
#Convert to spatial points dataframe
test.pts.all <- SpatialPointsDataFrame(as.matrix(data.frame(test.pts.format.all$Longitude,test.pts.format.all$Latitude)), data = test.pts.format.all)
#Output testing files
if(type == "test"){
if(output == TRUE){
#Write xlsx file
write.xlsx(test.pts.format.all, paste(output.path,n,"_test.xls", sep = ""), row.names=FALSE)
#Write shapefile
writeSpatialShape(test.pts.all, paste(output.path,n,"_test", sep = ""))
} else if(output == FALSE){return(test.pts.format.all)}
} else if(type == "all"){
if(output == TRUE){
#Write xlsx file
write.xlsx(test.pts.format.all, paste(output.path,n,"_test.xls", sep = ""), row.names=FALSE)
#Write shapefile
writeSpatialShape(test.pts.all, paste(output.path,n,"_test", sep = ""))
} else if(output == FALSE){return(test.pts.format.all)}
}
#Append training files
df.train <- rbind(df.train, train.pts.format.all)
}
#Convert to spatial points datafame
df.train.sp <- SpatialPointsDataFrame(as.matrix(data.frame(df.train$Longitude,df.train$Latitude)), data = df.train)
#Output files based on type call
if(type == "train"){
if(output == TRUE){
#Write xlsx file
write.xlsx(df.train.sp, paste(output.path,"_train.xls", sep = ""), row.names=FALSE)
#Write shapefile
writeSpatialShape(df.train.sp, paste(output.path,"_train", sep = ""))
} else if(output == FALSE){return(df.train.sp)}
} else if(type == "all"){
if(output == TRUE){
#Write xlsx file
write.xlsx(df.train.sp, paste(output.path,"_train.xls", sep = ""), row.names=FALSE)
#Write shapefile
writeSpatialShape(df.train.sp, paste(output.path,"_train", sep = ""))
} else if(output == FALSE){return(df.train.sp)}
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.