content
large_stringlengths 0
6.46M
| path
large_stringlengths 3
331
| license_type
large_stringclasses 2
values | repo_name
large_stringlengths 5
125
| language
large_stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.46M
| extension
large_stringclasses 75
values | text
stringlengths 0
6.46M
|
---|---|---|---|---|---|---|---|---|---|
#####################################################
#'
#' Estimates \eqn{\beta}s per population and a bootstrap confidence interval
#'
#' Estimate populations (Population specific FST) or individual coancestries
#' and a bootstrap confidence interval, assuming random mating
#'
#' If betaijT=TRUE, and the first column contains a unique identifier for
#' each individual, the function returns the matrix of individual coancestries/kinships.
#' Individual inbreeding coefficients can be obtained by multiplying by 2 the diagonal
#' and substracting 1.
#'
#' @usage betas(dat,nboot=0,lim=c(0.025,0.975),diploid=TRUE,betaijT=FALSE)
#'
#' @param dat data frame with genetic data and pop identifier
#' @param nboot number of bootstrap samples.
#' @param lim width of the bootstrap confidence interval
#' @param diploid whether the data comes from a diploid organism
#' @param betaijT whether to estimate individual coancestries
#'
#' @return Hi Within population gene diversities (complement to 1 of matching probabilities)
#' @return Hb Between populations gene diversities
#' @return betaiovl Average \eqn{\hat{\beta_{WT}^i}} over loci (Population specific FSTs), Table 3 of
#' \href{https://academic.oup.com/genetics/article/206/4/2085/6072590}{Weir and Goudet, 2017 (Genetics)}
#' @return betaW Average of the betaiovl \eqn{\hat{\beta_{WT}}} over loci (overall population FST)
#' @return ci The bootstrap confidence interval of population specific FSTs
#' (only if more than 100 bootstraps requested AND if more than 10 loci are present)
#' @return if betaijT=TRUE, return the matrix of pairwise kinships only.
#'
#'
#'
#' @seealso \code{\link{fs.dosage}}, \code{\link{beta.dosage}} for Fst estimates (not assuming Random Mating)
#' and kinship estimates from dosage data, respectively
#'
#' @author Jerome Goudet \email{jerome.goudet@@unil.ch}
#'
#' @references \href{https://academic.oup.com/genetics/article/206/4/2085/6072590}{Weir and Goudet, 2017 (Genetics)}
#' A unified characterization of population structure and relatedness.
#'
#'
#' @examples
#' \dontrun{
#' #3 different population sizes lead to 3 different betais
#' dat<-sim.genot(size=40,N=c(50,200,1000),nbloc=50,nbal=10)
#' betas(dat,nboot=100)
#'
#' #individual coancestries from the smallest population are large
#' ind.coan<-betas(cbind(1:120,dat[,-1]),betaij=T)
#' diag(ind.coan$betaij)<-NA
#' graphics::image(1:120,1:120,ind.coan$betaij,xlab="Inds",ylab="Inds")
#' }
#'
#'@export
#'
#####################################################
betas<-function(dat,nboot=0,lim=c(0.025,0.975),diploid=TRUE,betaijT=FALSE){
ratio.Hi.Hb<-function(x){
dum<-which(!is.na(x))
sum(x[dum])/sum(Hb[dum])
}
if (is.genind(dat)) dat<-genind2hierfstat(dat)
if (betaijT) {inames<-dat[,1];dat[,1]<-1:dim(dat)[1]}
pfr<-pop.freq(dat,diploid)
pfr2<-lapply(pfr,function(x) t(x) %*% x)
nl<-dim(dat)[2]-1
np<-length(table(dat[,1]))
ns<-ind.count.n(dat)
if (diploid) ns<-ns*2
ns[is.na(ns)]<-0
Hi<-matrix(numeric(np*nl),ncol=nl)
dimnames(Hi)<-dimnames(ns)
Hb<-numeric(nl)
for (il in 1:nl){
Hi[,il]<-ns[,il]/(ns[,il]-1)*(1-diag(pfr2[[il]]))
# Hi[is.na(Hi)]<-0.0
npl<-sum(ns[,il]>0,na.rm=TRUE)
Hb[il]<-1-1/npl/(npl-1)*sum((pfr2[[il]]-diag(diag(pfr2[[il]]))),na.rm=TRUE)
}
betai<-1-apply(Hi,1,ratio.Hi.Hb)
betaW<-mean(betai,na.rm=T)
if (betaijT) {
ratio.Mij.Hb <- function(x) {
dum <- which(!is.na(x))
a<-mean(x[dum])/mean(Hb[dum])
b<-1/mean(Hb[dum])
a-b
}
H<-array(dim=c(np,np,nl))
for (il in 1:nl) H[,,il]<-pfr2[[il]]
betaij<-1+apply(H,c(1,2),ratio.Mij.Hb)
rownames(betaij)<-colnames(betaij)<-inames
all.res<-list(betaijT=betaijT,betaij=betaij)
}
else {
if (nboot==0L){
all.res<-list(betaijT=betaijT,Hi=Hi,Hb=Hb,betaiovl=betai,betaW=betaW)
}
else{
if (nboot<100L){
warning("Less than 100 bootstrap requested, can't estimate Conf. Int.")
all.res<-list(betaijT=betaijT,Hi=Hi,Hb=Hb,betaiovl=betai,betaW=betaW)
}
if (nl<10L) {
warning("Less than 10 loci, can't estimate Conf. Int.")
all.res<-list(betaijT=betaijT,Hi=Hi,Hb=Hb,betaiovl=betai,betaW=betaW)
}
boot.bi<-matrix(numeric(nboot*np),nrow=nboot)
nls<-apply(ns,1,function(x) which(x>0))
if (is.matrix(nls)){
for (ib in 1:nboot){
for (ip in 1:np){
dum<-sample(nls[,ip],replace=TRUE)
boot.bi[ib,ip]<-1-sum(Hi[ip,dum])/sum(Hb[dum])
}}}
else{
for (ib in 1:nboot){
for (ip in 1:np){
dum<-sample(nls[[ip]],replace=TRUE)
boot.bi[ib,ip]<-1-sum(Hi[ip,dum])/sum(Hb[dum])
}}}
boot.bW<-rowMeans(boot.bi)
bi.ci<-apply(cbind(boot.bi,boot.bW),2,stats::quantile,lim,na.rm=TRUE)
all.res<-list(betaijT=betaijT,Hi=Hi,Hb=Hb,betaiovl=betai,betaW=betaW,ci=bi.ci)
}}
class(all.res)<-"betas"
all.res
}
#' @describeIn betas print function for betas class
#' @param x a betas object
#' @param digits number of digits to print
#' @param ... further arguments to pass to print
#'
#' @method print betas
#' @export
#'
#'
####################################################################################
print.betas<-function(x,digits=4,...){
if (x$betaijT) {cat(" ***Kinship coefficients*** \n \n ")
print(round(x$betaij,digits=digits,...))}
else {
cat(" ***Population specific and Overall Fst*** \n\n")
res<-c(x$betaiovl,x$betaW)
names(res)<-c(names(x$betaiovl),"Overall")
print(res,digits=digits,...)
if (!is.null(x$ci)){
cat("\n ***Bootstrap CI for population specific and overall FSTs*** \n\n")
colnames(x$ci)<-names(res)
print(x$ci,digits=digits,...)
}
}
invisible(x)
}
|
/R/betas.R
|
no_license
|
cran/hierfstat
|
R
| false | false | 5,920 |
r
|
#####################################################
#'
#' Estimates \eqn{\beta}s per population and a bootstrap confidence interval
#'
#' Estimate populations (Population specific FST) or individual coancestries
#' and a bootstrap confidence interval, assuming random mating
#'
#' If betaijT=TRUE, and the first column contains a unique identifier for
#' each individual, the function returns the matrix of individual coancestries/kinships.
#' Individual inbreeding coefficients can be obtained by multiplying by 2 the diagonal
#' and substracting 1.
#'
#' @usage betas(dat,nboot=0,lim=c(0.025,0.975),diploid=TRUE,betaijT=FALSE)
#'
#' @param dat data frame with genetic data and pop identifier
#' @param nboot number of bootstrap samples.
#' @param lim width of the bootstrap confidence interval
#' @param diploid whether the data comes from a diploid organism
#' @param betaijT whether to estimate individual coancestries
#'
#' @return Hi Within population gene diversities (complement to 1 of matching probabilities)
#' @return Hb Between populations gene diversities
#' @return betaiovl Average \eqn{\hat{\beta_{WT}^i}} over loci (Population specific FSTs), Table 3 of
#' \href{https://academic.oup.com/genetics/article/206/4/2085/6072590}{Weir and Goudet, 2017 (Genetics)}
#' @return betaW Average of the betaiovl \eqn{\hat{\beta_{WT}}} over loci (overall population FST)
#' @return ci The bootstrap confidence interval of population specific FSTs
#' (only if more than 100 bootstraps requested AND if more than 10 loci are present)
#' @return if betaijT=TRUE, return the matrix of pairwise kinships only.
#'
#'
#'
#' @seealso \code{\link{fs.dosage}}, \code{\link{beta.dosage}} for Fst estimates (not assuming Random Mating)
#' and kinship estimates from dosage data, respectively
#'
#' @author Jerome Goudet \email{jerome.goudet@@unil.ch}
#'
#' @references \href{https://academic.oup.com/genetics/article/206/4/2085/6072590}{Weir and Goudet, 2017 (Genetics)}
#' A unified characterization of population structure and relatedness.
#'
#'
#' @examples
#' \dontrun{
#' #3 different population sizes lead to 3 different betais
#' dat<-sim.genot(size=40,N=c(50,200,1000),nbloc=50,nbal=10)
#' betas(dat,nboot=100)
#'
#' #individual coancestries from the smallest population are large
#' ind.coan<-betas(cbind(1:120,dat[,-1]),betaij=T)
#' diag(ind.coan$betaij)<-NA
#' graphics::image(1:120,1:120,ind.coan$betaij,xlab="Inds",ylab="Inds")
#' }
#'
#'@export
#'
#####################################################
betas<-function(dat,nboot=0,lim=c(0.025,0.975),diploid=TRUE,betaijT=FALSE){
ratio.Hi.Hb<-function(x){
dum<-which(!is.na(x))
sum(x[dum])/sum(Hb[dum])
}
if (is.genind(dat)) dat<-genind2hierfstat(dat)
if (betaijT) {inames<-dat[,1];dat[,1]<-1:dim(dat)[1]}
pfr<-pop.freq(dat,diploid)
pfr2<-lapply(pfr,function(x) t(x) %*% x)
nl<-dim(dat)[2]-1
np<-length(table(dat[,1]))
ns<-ind.count.n(dat)
if (diploid) ns<-ns*2
ns[is.na(ns)]<-0
Hi<-matrix(numeric(np*nl),ncol=nl)
dimnames(Hi)<-dimnames(ns)
Hb<-numeric(nl)
for (il in 1:nl){
Hi[,il]<-ns[,il]/(ns[,il]-1)*(1-diag(pfr2[[il]]))
# Hi[is.na(Hi)]<-0.0
npl<-sum(ns[,il]>0,na.rm=TRUE)
Hb[il]<-1-1/npl/(npl-1)*sum((pfr2[[il]]-diag(diag(pfr2[[il]]))),na.rm=TRUE)
}
betai<-1-apply(Hi,1,ratio.Hi.Hb)
betaW<-mean(betai,na.rm=T)
if (betaijT) {
ratio.Mij.Hb <- function(x) {
dum <- which(!is.na(x))
a<-mean(x[dum])/mean(Hb[dum])
b<-1/mean(Hb[dum])
a-b
}
H<-array(dim=c(np,np,nl))
for (il in 1:nl) H[,,il]<-pfr2[[il]]
betaij<-1+apply(H,c(1,2),ratio.Mij.Hb)
rownames(betaij)<-colnames(betaij)<-inames
all.res<-list(betaijT=betaijT,betaij=betaij)
}
else {
if (nboot==0L){
all.res<-list(betaijT=betaijT,Hi=Hi,Hb=Hb,betaiovl=betai,betaW=betaW)
}
else{
if (nboot<100L){
warning("Less than 100 bootstrap requested, can't estimate Conf. Int.")
all.res<-list(betaijT=betaijT,Hi=Hi,Hb=Hb,betaiovl=betai,betaW=betaW)
}
if (nl<10L) {
warning("Less than 10 loci, can't estimate Conf. Int.")
all.res<-list(betaijT=betaijT,Hi=Hi,Hb=Hb,betaiovl=betai,betaW=betaW)
}
boot.bi<-matrix(numeric(nboot*np),nrow=nboot)
nls<-apply(ns,1,function(x) which(x>0))
if (is.matrix(nls)){
for (ib in 1:nboot){
for (ip in 1:np){
dum<-sample(nls[,ip],replace=TRUE)
boot.bi[ib,ip]<-1-sum(Hi[ip,dum])/sum(Hb[dum])
}}}
else{
for (ib in 1:nboot){
for (ip in 1:np){
dum<-sample(nls[[ip]],replace=TRUE)
boot.bi[ib,ip]<-1-sum(Hi[ip,dum])/sum(Hb[dum])
}}}
boot.bW<-rowMeans(boot.bi)
bi.ci<-apply(cbind(boot.bi,boot.bW),2,stats::quantile,lim,na.rm=TRUE)
all.res<-list(betaijT=betaijT,Hi=Hi,Hb=Hb,betaiovl=betai,betaW=betaW,ci=bi.ci)
}}
class(all.res)<-"betas"
all.res
}
#' @describeIn betas print function for betas class
#' @param x a betas object
#' @param digits number of digits to print
#' @param ... further arguments to pass to print
#'
#' @method print betas
#' @export
#'
#'
####################################################################################
print.betas<-function(x,digits=4,...){
if (x$betaijT) {cat(" ***Kinship coefficients*** \n \n ")
print(round(x$betaij,digits=digits,...))}
else {
cat(" ***Population specific and Overall Fst*** \n\n")
res<-c(x$betaiovl,x$betaW)
names(res)<-c(names(x$betaiovl),"Overall")
print(res,digits=digits,...)
if (!is.null(x$ci)){
cat("\n ***Bootstrap CI for population specific and overall FSTs*** \n\n")
colnames(x$ci)<-names(res)
print(x$ci,digits=digits,...)
}
}
invisible(x)
}
|
library(showtext)
dest <- file.path("C:", "Windows", "Fonts", "ARIALN.ttf")
font_add("arialn", regular = dest)
showtext_auto()
setwd("E:/Documents/GitHub/EBIO-R-Code/EBIO 3080/Lab/Week 5/Data")
flyDnDs <- read.csv("FlyDnDs.csv", header = TRUE)
meanDnDs <- mean(flyDnDs$DnDs)
par(bg = "#eee9d3")
par(mar = c(4.1, 5, 4.1, 5))
hist(flyDnDs$DnDs,
main = "",
xlab = "",
ylab = "",
axes = FALSE,
col = "#721919"
)
title(main = "Dn/Ds of Random Genes Throughout the D. melanogaster Genome",
family = "arialn",
cex.main = 2,
col.main = "#2c3136"
)
title(xlab = "Dn/Ds",
font.lab = 3,
family = "arialn",
cex.lab = 1.5,
col.lab = "#2c3136"
)
title(ylab = "Frequency",
font.lab = 3,
family = "arialn",
cex.lab = 1.5,
col.lab = "#2c3136"
)
axis(side = 1,
col = "#2c3136",
col.ticks = "#2c3136",
col.axis = "#2c3136"
)
axis(side = 2,
col = "#2c3136",
col.ticks = "#2c3136",
col.axis = "#2c3136"
)
abline(v = meanDnDs, lwd = 3, col = "#2c3136")
legend("topright",
"Mean Dn/Ds ratio",
pch = "-",
col = "#2c3136",
bg = "#eee9d3",
box.lwd = 1
)
|
/EBIO 3080/Lab/Week 5/Pre-Lab/Week5_Pre-Lab.R
|
no_license
|
dmwo/EBIO-R-Code
|
R
| false | false | 1,185 |
r
|
library(showtext)
dest <- file.path("C:", "Windows", "Fonts", "ARIALN.ttf")
font_add("arialn", regular = dest)
showtext_auto()
setwd("E:/Documents/GitHub/EBIO-R-Code/EBIO 3080/Lab/Week 5/Data")
flyDnDs <- read.csv("FlyDnDs.csv", header = TRUE)
meanDnDs <- mean(flyDnDs$DnDs)
par(bg = "#eee9d3")
par(mar = c(4.1, 5, 4.1, 5))
hist(flyDnDs$DnDs,
main = "",
xlab = "",
ylab = "",
axes = FALSE,
col = "#721919"
)
title(main = "Dn/Ds of Random Genes Throughout the D. melanogaster Genome",
family = "arialn",
cex.main = 2,
col.main = "#2c3136"
)
title(xlab = "Dn/Ds",
font.lab = 3,
family = "arialn",
cex.lab = 1.5,
col.lab = "#2c3136"
)
title(ylab = "Frequency",
font.lab = 3,
family = "arialn",
cex.lab = 1.5,
col.lab = "#2c3136"
)
axis(side = 1,
col = "#2c3136",
col.ticks = "#2c3136",
col.axis = "#2c3136"
)
axis(side = 2,
col = "#2c3136",
col.ticks = "#2c3136",
col.axis = "#2c3136"
)
abline(v = meanDnDs, lwd = 3, col = "#2c3136")
legend("topright",
"Mean Dn/Ds ratio",
pch = "-",
col = "#2c3136",
bg = "#eee9d3",
box.lwd = 1
)
|
\name{XTRA 1}
\alias{mixed}
\alias{mtmixed}
\alias{mtgsru}
\alias{mm}
\alias{NNS}
\alias{GSFLM}
\alias{GSRR}
\alias{GS2EIGEN}
\alias{NNSEARCH}
\alias{predict_FLMSS}
\title{
Mixed model solver
}
\description{
Function to solve univariate mixed models with or without the usage of omic information. This function allows single-step modeling of replicated observations with marker information available through the usage of a linkage function to connect to a whole-genome regression method. Genomic estimated values can be optionally deregressed (no shrinkage) while fitting the model.
}
\usage{
mixed(y,random=NULL,fixed=NULL,data=NULL,X=list(),
alg=emML,maxit=10,Deregress=FALSE,...)
}
\arguments{
\item{y}{
Response variable from the data frame containg the dataset.
}
\item{random}{
Formula. Right-hand side formula of random effects.
}
\item{fixed}{
Formula. Right-hand side formula of fixed effects.
}
\item{data}{
Data frame containing the response variable, random and fixed terms.
}
\item{X}{
List of omic incidence matrix. Row names of these matrices connect the omic information to the levels of the indicated random terms (eg. \code{X=list("ID"=gen)}).
}
\item{alg}{
Function. Whole-genome regression algorithm utilized to solve link functions. These include MCMC (\code{wgr}, \code{BayesB}, etc) and EM (\code{emEN}, \code{emDE}, etc) algorithms. By default, it runs maximum likelihood \code{emML}.
}
\item{maxit}{
Integer. Maximum number of iterations.
}
\item{Deregress}{
Logical. Deregress (unshrink) coefficients while fitting the model?
}
\item{...}{
Additional arguments to be passed to the whole-genome regression algorithms especified on \code{alg}.
}
}
\details{
The model for the whole-genome regression is as follows:
\deqn{y = Xb + Zu + Wa + e}
where \eqn{y} is the response variable, \eqn{Xb} corresponds to the fixed effect term, \eqn{Zu} corresponds to one or more random effect terms, \eqn{W} is the incidence matrix of terms with omic information and \eqn{a} is omic values by \eqn{a=Mg}, where \eqn{M} is the genotypic matrix and \eqn{g} are marker effects. Here, \eqn{e} is the residual term. An example is provided using the data from the NAM package with: \code{demo(mixedmodel)}.
Alterinative (and updated) implementations have similar syntax:
01) \code{mm(y,random=NULL,fixed=NULL,data=NULL,
M=NULL,bin=FALSE,AM=NULL,it=10,verb=TRUE,
FLM=TRUE,wgtM=TRUE,cntM=TRUE,nPc=3)}
02) \code{mtmixed = function(resp, random=NULL, fixed=NULL,
data, X=list(), maxit=10, init=10, regVC=FALSE)}
}
\value{
The function wgr returns a list with Fitness values (\code{Fitness}) containing observation \code{obs}, fitted values \code{hat}, residuals \code{res}, and fitted values by model term \code{fits}; Estimated variance components (\code{VarComp}) containing the variance components per se (\code{VarComponents}) and variance explained by each model term (\code{VarExplained}), regression coefficients by model term (\code{Coefficients}), and the effects of structured terms (\code{Structure}) containing the marker effects of each model term where markers were provided.
}
\references{
Xavier, A. (2019). Efficient Estimation of Marker Effects in Plant Breeding. G3: Genes, Genomes, Genetics, DOI: 10.1534/g3.119.400728
}
\author{
Alencar Xavier
}
\examples{
\dontrun{
demo(mixedmodel)
}
}
|
/man/mix.Rd
|
no_license
|
alenxav/bWGR
|
R
| false | false | 3,501 |
rd
|
\name{XTRA 1}
\alias{mixed}
\alias{mtmixed}
\alias{mtgsru}
\alias{mm}
\alias{NNS}
\alias{GSFLM}
\alias{GSRR}
\alias{GS2EIGEN}
\alias{NNSEARCH}
\alias{predict_FLMSS}
\title{
Mixed model solver
}
\description{
Function to solve univariate mixed models with or without the usage of omic information. This function allows single-step modeling of replicated observations with marker information available through the usage of a linkage function to connect to a whole-genome regression method. Genomic estimated values can be optionally deregressed (no shrinkage) while fitting the model.
}
\usage{
mixed(y,random=NULL,fixed=NULL,data=NULL,X=list(),
alg=emML,maxit=10,Deregress=FALSE,...)
}
\arguments{
\item{y}{
Response variable from the data frame containg the dataset.
}
\item{random}{
Formula. Right-hand side formula of random effects.
}
\item{fixed}{
Formula. Right-hand side formula of fixed effects.
}
\item{data}{
Data frame containing the response variable, random and fixed terms.
}
\item{X}{
List of omic incidence matrix. Row names of these matrices connect the omic information to the levels of the indicated random terms (eg. \code{X=list("ID"=gen)}).
}
\item{alg}{
Function. Whole-genome regression algorithm utilized to solve link functions. These include MCMC (\code{wgr}, \code{BayesB}, etc) and EM (\code{emEN}, \code{emDE}, etc) algorithms. By default, it runs maximum likelihood \code{emML}.
}
\item{maxit}{
Integer. Maximum number of iterations.
}
\item{Deregress}{
Logical. Deregress (unshrink) coefficients while fitting the model?
}
\item{...}{
Additional arguments to be passed to the whole-genome regression algorithms especified on \code{alg}.
}
}
\details{
The model for the whole-genome regression is as follows:
\deqn{y = Xb + Zu + Wa + e}
where \eqn{y} is the response variable, \eqn{Xb} corresponds to the fixed effect term, \eqn{Zu} corresponds to one or more random effect terms, \eqn{W} is the incidence matrix of terms with omic information and \eqn{a} is omic values by \eqn{a=Mg}, where \eqn{M} is the genotypic matrix and \eqn{g} are marker effects. Here, \eqn{e} is the residual term. An example is provided using the data from the NAM package with: \code{demo(mixedmodel)}.
Alterinative (and updated) implementations have similar syntax:
01) \code{mm(y,random=NULL,fixed=NULL,data=NULL,
M=NULL,bin=FALSE,AM=NULL,it=10,verb=TRUE,
FLM=TRUE,wgtM=TRUE,cntM=TRUE,nPc=3)}
02) \code{mtmixed = function(resp, random=NULL, fixed=NULL,
data, X=list(), maxit=10, init=10, regVC=FALSE)}
}
\value{
The function wgr returns a list with Fitness values (\code{Fitness}) containing observation \code{obs}, fitted values \code{hat}, residuals \code{res}, and fitted values by model term \code{fits}; Estimated variance components (\code{VarComp}) containing the variance components per se (\code{VarComponents}) and variance explained by each model term (\code{VarExplained}), regression coefficients by model term (\code{Coefficients}), and the effects of structured terms (\code{Structure}) containing the marker effects of each model term where markers were provided.
}
\references{
Xavier, A. (2019). Efficient Estimation of Marker Effects in Plant Breeding. G3: Genes, Genomes, Genetics, DOI: 10.1534/g3.119.400728
}
\author{
Alencar Xavier
}
\examples{
\dontrun{
demo(mixedmodel)
}
}
|
#ultiple Linear/Non-linear regression to determine train delay given 46 factors
library(MASS)
library(caret)
library(xlsx)
library(neuralnet)
library(e1071)
a<-read.csv("TSC_430.csv",sep=",")
b<-read.csv("TSC_Meta_data",sheet="Sheet2",header=TRUE)
colnames(a)<-b[,2]
#Averaging factors of precipitation, visibility, etc at the origin and destination
anew2<- transform(anew2, journey_average_precipitation = rowMeans(anew2[,c(2,8)], na.rm = TRUE))
anew2<- transform(anew2, journey_average_temperature = rowMeans(anew2[,c(3,9)], na.rm = TRUE))
anew2<- transform(anew2, journey_average_cloud_cover = rowMeans(anew2[,c(4,10)], na.rm = TRUE))
anew2<- transform(anew2, journey_average_weather_visibility = rowMeans(anew2[,c(5,11)], na.rm = TRUE))
anew2<- transform(anew2, journey_average_weather_wind_gust_speed = rowMeans(anew2[,c(6,12)], na.rm = TRUE))
anew2<- transform(anew2, journey_average_weather_wind_speed = rowMeans(anew2[,c(7,13)], na.rm = TRUE))
anew8<-anew2[,c(19:24)]
anew8$journeystopcount<-anew2$journey_stopcount
#Computing the actual delay time(Delay at destination-delay at the origin)
ptm<-proc.time()
for(i in 1:nrow(a)){
if(a$journey_origin_variation_status[i]=="LATE"){
a$actualdelaytime[i]=((a$journey_destination_timetable_variation[i])-(a$journey_origin_timetable_variation[i]))
}else if(a$journey_origin_variation_status[i]=="EARLY"){
a$actualdelaytime[i]=((a$journey_destination_timetable_variation[i])+(a$journey_origin_timetable_variation[i]))}
else if(a$journey_origin_variation_status[i]=="ON TIME"){
a$actualdelaytime[i]=((a$journey_destination_timetable_variation[i])-(a$journey_origin_timetable_variation[i]))
}else if(a$journey_origin_variation_status[i]=="OFF ROUTE"){
a$actualdelaytime[i]=a$journey_destination_timetable_variation[i]
}else if(a$journey_origin_variation_status[i]==""){
a$actualdelaytime[i]=NA
}
}
elapsed<-proc.time()-ptm
#Calculating mean of origin and destination for all the 12 other variables-reduces to Mean_of_variables_dataset
anew8$actualdelaytime<-a$actualdelaytime
#Removing outliers in the data-using cooks distance
model_for_outliers<-lm(actualdelaytime~.,data=anew8)
cooksd<-cooks.distance(model_for_outliers)
influential <- as.numeric(names(cooksd)[(cooksd > 4*mean(cooksd, na.rm=T))])
data_without_outliers<-anew8[-influential,]
##Removing rows with NA-complete.cases function
final_data_for_prediction<-data_without_outliers[complete.cases(data_without_outliers),]
set.seed(345)
index <- sample(1:nrow(final_data_for_prediction),round(0.75*nrow(final_data_for_prediction)))
train <-final_data_for_prediction[index,]
test <- final_data_for_prediction[-index,]
# Fitting linear model-without scaling and centering
lm.fit <- glm(actualdelaytime~., data=train)
summary(lm.fit)
# Predicted data from lm
pr.lm2 <- predict(lm.fit,test)
# Test MSE
MSE.lm <- sum((pr.lm2 - test$actualdelaytime)^2)/nrow(test)
##-----Another method-Scaling and using Cross Validation in linear model for measuring RMSE-------------------#
std<-apply(final_data_for_prediction,2,sd)
data_scaled_with_mean<-as.data.frame(scale(final_data_for_prediction),center=TRUE,scale=std)
set.seed(450)
cv.error<-NULL
variableimportance<-NULL
k <- 10
for(i in 1:k){
index <- sample(1:nrow(final_data_for_prediction),round(0.75*nrow(final_data_for_prediction)))
train.cv <- data_scaled_with_mean[index,]
test.cv <- data_scaled_with_mean[-index,]
model2 <- lm(actualdelaytime~.,data=train.cv)
pr.lm <- predict(model2,test.cv[,1:8])
pr.lm <- pr.lm*(sd(final_data_for_prediction$actualdelaytime))+mean(final_data_for_prediction$actualdelaytime)
test.cv.r <- (test.cv$actualdelaytime)*(sd(final_data_for_prediction$actualdelaytime))+mean(final_data_for_prediction$actualdelaytime)
variableimportance[i]<-varImp(model2)
cv.error[i] <- (sum((test.cv.r-pr.lm)^2))/length(test.cv.r)
}
cross_validation_error_in_linear_fit<-mean(cv.error)
df<-data.frame(matrix(unlist(variableimportance),nrow=10,byrow=T),stringsasFactors=FALSE)
df<-df[,-8]
colnames(df)<-names(data_scaled_with_mean[,c(1:7)])
varimp_according_to_linear_model<-apply(df,2,mean)#This gives the mean of the variable importance of each variable through all the 10 iterations
correlation<-as.data.frame(cor(data_scaled_with_mean))
##--------------Using neural networks for regression-------------------------##
n <- names(train)
f <- as.formula(paste("actualdelaytime ~", paste(n[!n %in% "actualdelaytime"], collapse = " + ")))
set.seed(786)
m<-10
cv.error.nn<-NULL
variableimportance.nn<-NULL
for(j in 1:m){
index <- sample(1:nrow(data_scaled_with_mean),round(0.75*nrow(data_scaled_with_mean)))
train.cv.nn <- data_scaled_with_mean[index,]
test.cv.nn <- data_scaled_with_mean[-index,]
nn <- neuralnet(f,data=train.cv.nn,hidden=4,linear.output=TRUE)
pr.nn <- compute(nn,test.cv.nn[,1:8])
pr.nn <- pr.nn$net.result*(sd(final_data_for_prediction$actualdelaytime))+mean(final_data_for_prediction$actualdelaytime)
test.cv.r.nn <- (test.cv.nn$actualdelaytime)*(sd(final_data_for_prediction$actualdelaytime))+mean(final_data_for_prediction$actualdelaytime)
variableimportance.nn[j]<-varImp(nn)
cv.error.nn[j] <- (sum((test.cv.r.nn - pr.nn)^2))/nrow(test.cv.nn)
}
mean(cv.error.nn)
df.nn<-data.frame(matrix(unlist(variableimportance.nn),nrow=10,byrow=T),stringsasFactors=FALSE)
colnames(df.nn)<-names(data_scaled_with_mean[,c(1:7)])
varimp.nn<-apply(df.nn,2,mean)
|
/Regression_for_predicting_traindelay.R
|
no_license
|
aishiitm/Internship-with-Tech-Mahindra
|
R
| false | false | 5,688 |
r
|
#ultiple Linear/Non-linear regression to determine train delay given 46 factors
library(MASS)
library(caret)
library(xlsx)
library(neuralnet)
library(e1071)
a<-read.csv("TSC_430.csv",sep=",")
b<-read.csv("TSC_Meta_data",sheet="Sheet2",header=TRUE)
colnames(a)<-b[,2]
#Averaging factors of precipitation, visibility, etc at the origin and destination
anew2<- transform(anew2, journey_average_precipitation = rowMeans(anew2[,c(2,8)], na.rm = TRUE))
anew2<- transform(anew2, journey_average_temperature = rowMeans(anew2[,c(3,9)], na.rm = TRUE))
anew2<- transform(anew2, journey_average_cloud_cover = rowMeans(anew2[,c(4,10)], na.rm = TRUE))
anew2<- transform(anew2, journey_average_weather_visibility = rowMeans(anew2[,c(5,11)], na.rm = TRUE))
anew2<- transform(anew2, journey_average_weather_wind_gust_speed = rowMeans(anew2[,c(6,12)], na.rm = TRUE))
anew2<- transform(anew2, journey_average_weather_wind_speed = rowMeans(anew2[,c(7,13)], na.rm = TRUE))
anew8<-anew2[,c(19:24)]
anew8$journeystopcount<-anew2$journey_stopcount
#Computing the actual delay time(Delay at destination-delay at the origin)
ptm<-proc.time()
for(i in 1:nrow(a)){
if(a$journey_origin_variation_status[i]=="LATE"){
a$actualdelaytime[i]=((a$journey_destination_timetable_variation[i])-(a$journey_origin_timetable_variation[i]))
}else if(a$journey_origin_variation_status[i]=="EARLY"){
a$actualdelaytime[i]=((a$journey_destination_timetable_variation[i])+(a$journey_origin_timetable_variation[i]))}
else if(a$journey_origin_variation_status[i]=="ON TIME"){
a$actualdelaytime[i]=((a$journey_destination_timetable_variation[i])-(a$journey_origin_timetable_variation[i]))
}else if(a$journey_origin_variation_status[i]=="OFF ROUTE"){
a$actualdelaytime[i]=a$journey_destination_timetable_variation[i]
}else if(a$journey_origin_variation_status[i]==""){
a$actualdelaytime[i]=NA
}
}
elapsed<-proc.time()-ptm
#Calculating mean of origin and destination for all the 12 other variables-reduces to Mean_of_variables_dataset
anew8$actualdelaytime<-a$actualdelaytime
#Removing outliers in the data-using cooks distance
model_for_outliers<-lm(actualdelaytime~.,data=anew8)
cooksd<-cooks.distance(model_for_outliers)
influential <- as.numeric(names(cooksd)[(cooksd > 4*mean(cooksd, na.rm=T))])
data_without_outliers<-anew8[-influential,]
##Removing rows with NA-complete.cases function
final_data_for_prediction<-data_without_outliers[complete.cases(data_without_outliers),]
set.seed(345)
index <- sample(1:nrow(final_data_for_prediction),round(0.75*nrow(final_data_for_prediction)))
train <-final_data_for_prediction[index,]
test <- final_data_for_prediction[-index,]
# Fitting linear model-without scaling and centering
lm.fit <- glm(actualdelaytime~., data=train)
summary(lm.fit)
# Predicted data from lm
pr.lm2 <- predict(lm.fit,test)
# Test MSE
MSE.lm <- sum((pr.lm2 - test$actualdelaytime)^2)/nrow(test)
##-----Another method-Scaling and using Cross Validation in linear model for measuring RMSE-------------------#
std<-apply(final_data_for_prediction,2,sd)
data_scaled_with_mean<-as.data.frame(scale(final_data_for_prediction),center=TRUE,scale=std)
set.seed(450)
cv.error<-NULL
variableimportance<-NULL
k <- 10
for(i in 1:k){
index <- sample(1:nrow(final_data_for_prediction),round(0.75*nrow(final_data_for_prediction)))
train.cv <- data_scaled_with_mean[index,]
test.cv <- data_scaled_with_mean[-index,]
model2 <- lm(actualdelaytime~.,data=train.cv)
pr.lm <- predict(model2,test.cv[,1:8])
pr.lm <- pr.lm*(sd(final_data_for_prediction$actualdelaytime))+mean(final_data_for_prediction$actualdelaytime)
test.cv.r <- (test.cv$actualdelaytime)*(sd(final_data_for_prediction$actualdelaytime))+mean(final_data_for_prediction$actualdelaytime)
variableimportance[i]<-varImp(model2)
cv.error[i] <- (sum((test.cv.r-pr.lm)^2))/length(test.cv.r)
}
cross_validation_error_in_linear_fit<-mean(cv.error)
df<-data.frame(matrix(unlist(variableimportance),nrow=10,byrow=T),stringsasFactors=FALSE)
df<-df[,-8]
colnames(df)<-names(data_scaled_with_mean[,c(1:7)])
varimp_according_to_linear_model<-apply(df,2,mean)#This gives the mean of the variable importance of each variable through all the 10 iterations
correlation<-as.data.frame(cor(data_scaled_with_mean))
##--------------Using neural networks for regression-------------------------##
n <- names(train)
f <- as.formula(paste("actualdelaytime ~", paste(n[!n %in% "actualdelaytime"], collapse = " + ")))
set.seed(786)
m<-10
cv.error.nn<-NULL
variableimportance.nn<-NULL
for(j in 1:m){
index <- sample(1:nrow(data_scaled_with_mean),round(0.75*nrow(data_scaled_with_mean)))
train.cv.nn <- data_scaled_with_mean[index,]
test.cv.nn <- data_scaled_with_mean[-index,]
nn <- neuralnet(f,data=train.cv.nn,hidden=4,linear.output=TRUE)
pr.nn <- compute(nn,test.cv.nn[,1:8])
pr.nn <- pr.nn$net.result*(sd(final_data_for_prediction$actualdelaytime))+mean(final_data_for_prediction$actualdelaytime)
test.cv.r.nn <- (test.cv.nn$actualdelaytime)*(sd(final_data_for_prediction$actualdelaytime))+mean(final_data_for_prediction$actualdelaytime)
variableimportance.nn[j]<-varImp(nn)
cv.error.nn[j] <- (sum((test.cv.r.nn - pr.nn)^2))/nrow(test.cv.nn)
}
mean(cv.error.nn)
df.nn<-data.frame(matrix(unlist(variableimportance.nn),nrow=10,byrow=T),stringsasFactors=FALSE)
colnames(df.nn)<-names(data_scaled_with_mean[,c(1:7)])
varimp.nn<-apply(df.nn,2,mean)
|
defineModule(sim, list(
name="disturbanceDriver",
description="generate parameters for the generic percolation model",# spades::spread()",
keywords=c("fire"),
authors=c(person(c("Steve", "G"), "Cumming", email="stevec@sbf.ulaval.ca", role=c("aut", "cre"))),
childModules=character(),
version=numeric_version("0.1.0"),
spatialExtent=raster::extent(rep(NA_real_, 4)),
timeframe=as.POSIXlt(c(NA, NA)),
timeunit="year",
citation=list(),
reqdPkgs=list("stats"),
parameters=rbind(
defineParameter(".plotInitialTime", "numeric", NA, NA, NA, "This describes the simulation time at which the first plot event should occur"),
defineParameter(".saveInitialTime", "numeric", NA, NA, NA, "This describes the simulation time at which the first save event should occur")),
#defineParameter("paramName", "paramClass", value, min, max, "parameter description")),
inputObjects=data.frame(objectName=c("spreadCalibrationData","fireRegimeParameters","fireMapAttr"),
objectClass=c("data.frame","list","list"), other=rep(NA_character_,3), stringsAsFactors=FALSE),
#inputObjects=data.frame(objectName=c("fireRegimeParameters","spreadCalibrationData"), objectClass=c("list","data.frame"), other=rep(NA_character_,2), stringsAsFactors=FALSE),
outputObjects=data.frame(objectName=c("spreadParameters"),
objectClass=c("list"),
other=NA_character_, stringsAsFactors=FALSE)
))
## event types
# - type `init` is required for initiliazation
doEvent.disturbanceDriver = function(sim, eventTime, eventType, debug=FALSE) {
if (eventType=="init") {
### check for more detailed object dependencies:
### (use `checkObject` or similar)
# do stuff for this event
disturbanceDriverInit(sim)
# schedule future event(s)
scheduleEvent(sim, params(sim)$disturbanceDriver$.plotInitialTime, "disturbanceDriver", "plot")
scheduleEvent(sim, params(sim)$disturbanceDriver$.saveInitialTime, "disturbanceDriver", "save")
} else if (eventType=="templateEvent") {
# ! ----- EDIT BELOW ----- ! #
# do stuff for this event
# e.g., call your custom functions/methods here
# you can define your own methods below this `doEvent` function
# schedule future event(s)
# e.g.,
# scheduleEvent(sim, time(sim) + increment, "disturbanceDriver", "templateEvent")
# ! ----- STOP EDITING ----- ! #
} else {
warning(paste("Undefined event type: '", events(sim)[1, "eventType", with=FALSE],
"' in module '", events(sim)[1, "moduleName", with=FALSE], "'", sep=""))
}
return(invisible(sim))
}
## event functions
# - follow the naming convention `modulenameEventtype()`;
# - `modulenameInit()` function is required for initiliazation;
# - keep event functions short and clean, modularize by calling subroutines from section below.
### template initilization
# 1 - (1-p0)**N = pEscape
# 1 - pEscape = (1-p0)**N
# (1 - pEscape)**1/N = 1 - p0
# p0 = 1 - (1 - pEscape)**1/N
disturbanceDriverInit = function(sim) {
hatP0<-function(pEscape,n=8){
1 - (1-pEscape)**(1/n)
}
#a real clever boots would minimise the abs log odds ratio.
#be my guest.
escapeProbDelta<-function(p0,w,hatPE){
abs(sum(w*(1-(1-p0)**(0:8)))-hatPE)
}
#this table contains calibration data for several landscape sizes
#and several min fire sizes (1 or 2 cells), organised by collumn.
#The data were made by Steve Cumming in June 2013 for a whole other purpose.
#I chose the one that seems most appropriate to me
#browser()
y<-log(sim$spreadCalibrationData[,paste("ls",1e3,"fs",2,sep="")])
x<-sim$spreadCalibrationData$pjmp
m.glm<-glm(x~I(log(y)))
mfs<-sim$fireRegimeParameters$xBar/sim$fireMapAttr$cellSize #mean size escaped fires in cells
pJmp<-sum(m.glm$coeff*c(1,log(mfs)))
w<-sim$fireMapAttr$nNbrs
w<-w/sum(w)
hatPE<-sim$fireRegimeParameters$pEscape
foo<-optimise(escapeProbDelta,interval=c(hatP0(hatPE,8),hatP0(hatPE,floor(sum(w*0:8)))),tol=1e6,w=w,hatPE=hatPE)
spreadParameters<-list(pJmp=pJmp,p0=foo$minimum,naiveP0=hatP0(sim$fireRegimeParameters$pEscape,8))
return(invisible(sim))
}
### template for save events
disturbanceDriverSave = function(sim) {
# ! ----- EDIT BELOW ----- ! #
# do stuff for this event
sim <- saveFiles(sim)
# ! ----- STOP EDITING ----- ! #
return(invisible(sim))
}
### template for plot events
disturbanceDriverPlot = function(sim) {
# ! ----- EDIT BELOW ----- ! #
# do stuff for this event
#Plot("object")
# ! ----- STOP EDITING ----- ! #
return(invisible(sim))
}
### template for your event1
disturbanceDriverEvent1 = function(sim) {
# ! ----- EDIT BELOW ----- ! #
# ! ----- STOP EDITING ----- ! #
return(invisible(sim))
}
### add additional events as needed by copy/pasting from above
|
/disturbanceDriver/disturbanceDriver.R
|
no_license
|
SteveCumming/BEACONs
|
R
| false | false | 4,872 |
r
|
defineModule(sim, list(
name="disturbanceDriver",
description="generate parameters for the generic percolation model",# spades::spread()",
keywords=c("fire"),
authors=c(person(c("Steve", "G"), "Cumming", email="stevec@sbf.ulaval.ca", role=c("aut", "cre"))),
childModules=character(),
version=numeric_version("0.1.0"),
spatialExtent=raster::extent(rep(NA_real_, 4)),
timeframe=as.POSIXlt(c(NA, NA)),
timeunit="year",
citation=list(),
reqdPkgs=list("stats"),
parameters=rbind(
defineParameter(".plotInitialTime", "numeric", NA, NA, NA, "This describes the simulation time at which the first plot event should occur"),
defineParameter(".saveInitialTime", "numeric", NA, NA, NA, "This describes the simulation time at which the first save event should occur")),
#defineParameter("paramName", "paramClass", value, min, max, "parameter description")),
inputObjects=data.frame(objectName=c("spreadCalibrationData","fireRegimeParameters","fireMapAttr"),
objectClass=c("data.frame","list","list"), other=rep(NA_character_,3), stringsAsFactors=FALSE),
#inputObjects=data.frame(objectName=c("fireRegimeParameters","spreadCalibrationData"), objectClass=c("list","data.frame"), other=rep(NA_character_,2), stringsAsFactors=FALSE),
outputObjects=data.frame(objectName=c("spreadParameters"),
objectClass=c("list"),
other=NA_character_, stringsAsFactors=FALSE)
))
## event types
# - type `init` is required for initiliazation
doEvent.disturbanceDriver = function(sim, eventTime, eventType, debug=FALSE) {
if (eventType=="init") {
### check for more detailed object dependencies:
### (use `checkObject` or similar)
# do stuff for this event
disturbanceDriverInit(sim)
# schedule future event(s)
scheduleEvent(sim, params(sim)$disturbanceDriver$.plotInitialTime, "disturbanceDriver", "plot")
scheduleEvent(sim, params(sim)$disturbanceDriver$.saveInitialTime, "disturbanceDriver", "save")
} else if (eventType=="templateEvent") {
# ! ----- EDIT BELOW ----- ! #
# do stuff for this event
# e.g., call your custom functions/methods here
# you can define your own methods below this `doEvent` function
# schedule future event(s)
# e.g.,
# scheduleEvent(sim, time(sim) + increment, "disturbanceDriver", "templateEvent")
# ! ----- STOP EDITING ----- ! #
} else {
warning(paste("Undefined event type: '", events(sim)[1, "eventType", with=FALSE],
"' in module '", events(sim)[1, "moduleName", with=FALSE], "'", sep=""))
}
return(invisible(sim))
}
## event functions
# - follow the naming convention `modulenameEventtype()`;
# - `modulenameInit()` function is required for initiliazation;
# - keep event functions short and clean, modularize by calling subroutines from section below.
### template initilization
# 1 - (1-p0)**N = pEscape
# 1 - pEscape = (1-p0)**N
# (1 - pEscape)**1/N = 1 - p0
# p0 = 1 - (1 - pEscape)**1/N
disturbanceDriverInit = function(sim) {
hatP0<-function(pEscape,n=8){
1 - (1-pEscape)**(1/n)
}
#a real clever boots would minimise the abs log odds ratio.
#be my guest.
escapeProbDelta<-function(p0,w,hatPE){
abs(sum(w*(1-(1-p0)**(0:8)))-hatPE)
}
#this table contains calibration data for several landscape sizes
#and several min fire sizes (1 or 2 cells), organised by collumn.
#The data were made by Steve Cumming in June 2013 for a whole other purpose.
#I chose the one that seems most appropriate to me
#browser()
y<-log(sim$spreadCalibrationData[,paste("ls",1e3,"fs",2,sep="")])
x<-sim$spreadCalibrationData$pjmp
m.glm<-glm(x~I(log(y)))
mfs<-sim$fireRegimeParameters$xBar/sim$fireMapAttr$cellSize #mean size escaped fires in cells
pJmp<-sum(m.glm$coeff*c(1,log(mfs)))
w<-sim$fireMapAttr$nNbrs
w<-w/sum(w)
hatPE<-sim$fireRegimeParameters$pEscape
foo<-optimise(escapeProbDelta,interval=c(hatP0(hatPE,8),hatP0(hatPE,floor(sum(w*0:8)))),tol=1e6,w=w,hatPE=hatPE)
spreadParameters<-list(pJmp=pJmp,p0=foo$minimum,naiveP0=hatP0(sim$fireRegimeParameters$pEscape,8))
return(invisible(sim))
}
### template for save events
disturbanceDriverSave = function(sim) {
# ! ----- EDIT BELOW ----- ! #
# do stuff for this event
sim <- saveFiles(sim)
# ! ----- STOP EDITING ----- ! #
return(invisible(sim))
}
### template for plot events
disturbanceDriverPlot = function(sim) {
# ! ----- EDIT BELOW ----- ! #
# do stuff for this event
#Plot("object")
# ! ----- STOP EDITING ----- ! #
return(invisible(sim))
}
### template for your event1
disturbanceDriverEvent1 = function(sim) {
# ! ----- EDIT BELOW ----- ! #
# ! ----- STOP EDITING ----- ! #
return(invisible(sim))
}
### add additional events as needed by copy/pasting from above
|
#' pribor
#'
#' A function returning data frame of PRague InterBank OffeRed rates (PRIBOR).
#'
#' The function expects date input, and returns data frame of two or more columns - date, and relevant PRIBOR rate (as determined by `maturity` parameter).
#'
#' PRIBOR rates are reported as fractions, i.e. not as percentages (i.e. 1\% is reported as .01, not 1).
#'
#' For dates when no PRIBOR was quoted (e.g. weekends, Bank Holidays, such as December 24th on any year, or August 13th, 2002 when no PRIBOR was quoted due to catastrophic floods) no result will be returned.
#'
#' @name pribor
#'
#' @param date Date of fixing as date, default is yesterday.
#' @param maturity Maturity of loan as string, default is overnight ("1D").
#'
#' @return data frame - first column is date, second is relevant PRIBOR rate.
#' @export
#'
#' @examples pribor(as.Date("2002-08-12"), "1D")
#'
# exported function...
pribor <- function(date = Sys.Date() - 1, maturity = "1D") {
cnb <- as.logical(Sys.getenv("CNB_UP", unset = TRUE)) # dummy variable to allow testing of network
if (!ok_to_proceed("https://www.cnb.cz/en/financial-markets/money-market/pribor/fixing-of-interest-rates-on-interbank-deposits-pribor/year.txt") | !cnb) { # CNB website down
message("Data source broken.")
return(NULL)
}
# a quick reality check:
if(!inherits(date, "Date")) stop("'date' parameter expected as a Date data type!")
if(!all(maturity %in% c("1D", "1W", "2W", "1M", "3M", "6M", "9M", "1Y"))) stop(paste0("'", maturity, "' is not a recognized maturity abbreviation!"))
roky <- format(date, "%Y") %>%
unique()
sazba <- paste0("PRIBOR_", maturity)
res <- lapply(roky, dnl_pribor) %>%
dplyr::bind_rows() %>%
dplyr::filter(date_valid %in% date) %>%
dplyr::select(date_valid, !! sazba) %>%
dplyr::mutate_if(is.numeric, ~ . / 100) %>%
dplyr::arrange(date_valid)
res
} # / exported function
# downloader - a helper function to be l-applied
dnl_pribor <- function(year) {
remote_path <- "https://www.cnb.cz/cs/financni-trhy/penezni-trh/pribor/fixing-urokovych-sazeb-na-mezibankovnim-trhu-depozit-pribor/rok.txt?year=" # remote archive
remote_file <- paste0(remote_path, year) # path to ČNB source data
local_file <- file.path(tempdir(), paste0("pr-", year, ".txt")) # local file - in tempdir
if (!file.exists(local_file)) {
# proceed to download via curl
curl::curl_download(url = remote_file, destfile = local_file, quiet = T)
Sys.sleep(1/500)
} # /if - local file exists
local_df <- readr::read_delim(local_file,
delim = "|", skip = 2,
locale = readr::locale(decimal_mark = ","),
col_names = c(
"date_valid",
"PRIBID_1D", "PRIBOR_1D",
"PRIBID_1W", "PRIBOR_1W",
"PRIBID_2W", "PRIBOR_2W",
"PRIBID_1M", "PRIBOR_1M",
"PRIBID_2M", "PRIBOR_2M",
"PRIBID_3M", "PRIBOR_3M",
"PRIBID_6M", "PRIBOR_6M",
"PRIBID_9M", "PRIBOR_9M",
"PRIBID_1Y", "PRIBOR_1Y"
),
col_types = readr::cols(
date_valid = readr::col_date(format = "%d.%m.%Y"),
PRIBID_1D = readr::col_double(),
PRIBOR_1D = readr::col_double(),
PRIBID_1W = readr::col_double(),
PRIBOR_1W = readr::col_double(),
PRIBID_2W = readr::col_double(),
PRIBOR_2W = readr::col_double(),
PRIBID_1M = readr::col_double(),
PRIBOR_1M = readr::col_double(),
PRIBID_2M = readr::col_double(),
PRIBOR_2M = readr::col_double(),
PRIBID_3M = readr::col_double(),
PRIBOR_3M = readr::col_double(),
PRIBID_6M = readr::col_double(),
PRIBOR_6M = readr::col_double(),
PRIBID_9M = readr::col_double(),
PRIBOR_9M = readr::col_double(),
PRIBID_1Y = readr::col_double(),
PRIBOR_1Y = readr::col_double()
)
)
attr(local_df, 'spec') <- NULL
local_df
} # /function
|
/R/pribor.R
|
permissive
|
jla-data/czechrates
|
R
| false | false | 4,816 |
r
|
#' pribor
#'
#' A function returning data frame of PRague InterBank OffeRed rates (PRIBOR).
#'
#' The function expects date input, and returns data frame of two or more columns - date, and relevant PRIBOR rate (as determined by `maturity` parameter).
#'
#' PRIBOR rates are reported as fractions, i.e. not as percentages (i.e. 1\% is reported as .01, not 1).
#'
#' For dates when no PRIBOR was quoted (e.g. weekends, Bank Holidays, such as December 24th on any year, or August 13th, 2002 when no PRIBOR was quoted due to catastrophic floods) no result will be returned.
#'
#' @name pribor
#'
#' @param date Date of fixing as date, default is yesterday.
#' @param maturity Maturity of loan as string, default is overnight ("1D").
#'
#' @return data frame - first column is date, second is relevant PRIBOR rate.
#' @export
#'
#' @examples pribor(as.Date("2002-08-12"), "1D")
#'
# exported function...
pribor <- function(date = Sys.Date() - 1, maturity = "1D") {
cnb <- as.logical(Sys.getenv("CNB_UP", unset = TRUE)) # dummy variable to allow testing of network
if (!ok_to_proceed("https://www.cnb.cz/en/financial-markets/money-market/pribor/fixing-of-interest-rates-on-interbank-deposits-pribor/year.txt") | !cnb) { # CNB website down
message("Data source broken.")
return(NULL)
}
# a quick reality check:
if(!inherits(date, "Date")) stop("'date' parameter expected as a Date data type!")
if(!all(maturity %in% c("1D", "1W", "2W", "1M", "3M", "6M", "9M", "1Y"))) stop(paste0("'", maturity, "' is not a recognized maturity abbreviation!"))
roky <- format(date, "%Y") %>%
unique()
sazba <- paste0("PRIBOR_", maturity)
res <- lapply(roky, dnl_pribor) %>%
dplyr::bind_rows() %>%
dplyr::filter(date_valid %in% date) %>%
dplyr::select(date_valid, !! sazba) %>%
dplyr::mutate_if(is.numeric, ~ . / 100) %>%
dplyr::arrange(date_valid)
res
} # / exported function
# downloader - a helper function to be l-applied
dnl_pribor <- function(year) {
remote_path <- "https://www.cnb.cz/cs/financni-trhy/penezni-trh/pribor/fixing-urokovych-sazeb-na-mezibankovnim-trhu-depozit-pribor/rok.txt?year=" # remote archive
remote_file <- paste0(remote_path, year) # path to ČNB source data
local_file <- file.path(tempdir(), paste0("pr-", year, ".txt")) # local file - in tempdir
if (!file.exists(local_file)) {
# proceed to download via curl
curl::curl_download(url = remote_file, destfile = local_file, quiet = T)
Sys.sleep(1/500)
} # /if - local file exists
local_df <- readr::read_delim(local_file,
delim = "|", skip = 2,
locale = readr::locale(decimal_mark = ","),
col_names = c(
"date_valid",
"PRIBID_1D", "PRIBOR_1D",
"PRIBID_1W", "PRIBOR_1W",
"PRIBID_2W", "PRIBOR_2W",
"PRIBID_1M", "PRIBOR_1M",
"PRIBID_2M", "PRIBOR_2M",
"PRIBID_3M", "PRIBOR_3M",
"PRIBID_6M", "PRIBOR_6M",
"PRIBID_9M", "PRIBOR_9M",
"PRIBID_1Y", "PRIBOR_1Y"
),
col_types = readr::cols(
date_valid = readr::col_date(format = "%d.%m.%Y"),
PRIBID_1D = readr::col_double(),
PRIBOR_1D = readr::col_double(),
PRIBID_1W = readr::col_double(),
PRIBOR_1W = readr::col_double(),
PRIBID_2W = readr::col_double(),
PRIBOR_2W = readr::col_double(),
PRIBID_1M = readr::col_double(),
PRIBOR_1M = readr::col_double(),
PRIBID_2M = readr::col_double(),
PRIBOR_2M = readr::col_double(),
PRIBID_3M = readr::col_double(),
PRIBOR_3M = readr::col_double(),
PRIBID_6M = readr::col_double(),
PRIBOR_6M = readr::col_double(),
PRIBID_9M = readr::col_double(),
PRIBOR_9M = readr::col_double(),
PRIBID_1Y = readr::col_double(),
PRIBOR_1Y = readr::col_double()
)
)
attr(local_df, 'spec') <- NULL
local_df
} # /function
|
#' Finding DMR
#'
#' Finding DMR by Wilcoxon, t-Student, Kolmogorov-Smirnow tests or logistic regression, logistic regression with mixed models,
#' logistic regression with mixed models with correlation matrix.
#' In Ttest, Wilcoxon and Ks are compared methylation rate between x and y prob on the same position and chromosome
#' and null hypothesis is that mean (median, distribution respectively) of methylation rate.
#' In these methods alternative hypothesis is two sided and these sorts regions based on criticial value.
#'
#' In regression methods, number of success are number of methylated citosines and failures are number of unmethylated citosines.
#' Output from this methods is beta coefficient of indicator variable from regression model and criticial value from Wald test on indicator variable.
#' Indicator variable is equal 1 if observations are from x prob and 0 otherwise. These methods order regions based on beta coefficients of
#' grouping variable or p.values of grouping variable.
#' In mixed models explantatory variable is only indicator variable and positions of chromosome are random effects.
#' In standard logistic regression explantatory variables are also position of chromosome.
#' @param data There are two options: 1. dataframe with specific columns: chr, poz, prob, no, meth, unmeth, meth.rate.
#' This dataframe is result of function preprocessing.
#' 2. dataframe with specific columns: chr, poz, prob, no, meth, unmeth, meth.rate, tiles and possible tiles.common columns. This dataframe is result of function create.tiles.min.gap or
#' create.tiles.fixed.length.
#' @param methods vectors with given methods. Possible values are: 'Wilcoxon', 'Ttest', 'KS', 'Reg.Log', 'Reg.Mixed',
#' 'Reg.Corr.Mixed'.
#' 'Wilcoxon' - Wilcoxon signed test;
#' 'Ttest' - t-Student test with unequal variance;
#' 'KS' - Kolmogorov-Smirnov test;
#' 'Reg.Log' - Wald test of grouping variable from logistic regression;
#' 'Reg.Mixed' - Wald test of grouping variable from logistic regression with mixed effects;
#' 'Reg.Corr.Mixed' - Wald test of grouping variable from logistic regression with mixed effect and estimated previous correlation matrix
#' @param p.value.log.reg if not NULL regions with p.value of prob variable smaller than p.value.log.reg are returned and decreasingly ordered by absolute value of beta coefficient
#' of prob variable otherwise regions ale increasingly ordered by p.value
#' @param p.value.reg.mixed if not NULL regions with p.value of prob variable smaller than p.value.log.reg are returned and decreasingly ordered by absolute value of beta coefficient
#' of prob variable otherwise regions ale increasingly ordered by p.value
#' @param p.value.reg.corr.mixed if not NULL regions with p.value of prob variable smaller than p.value.log.reg are returned and decreasingly ordered by absolute value of beta coefficient
#' of prob variable otherwise regions ale increasingly ordered by p.value
#' @param beta.coef.max only results which have absolute value of beta.coef less than this parameter are returned from Log.Reg, Reg.Mixed, Reg.Corr.Mixed. This prevent cases when algorithm
#' did not convergence well
#' @return list object. Elements of list are results of given methods. The most interesting regions are on the top
#' @export
#' @examples
#' data('schizophrenia')
#' control <- schizophrenia %>% filter(category == 'control') %>%
#' dplyr::select(-category)
#'
#' disease <- schizophrenia %>% filter(category == 'disease') %>%
#' dplyr::select(-category)
#'
#' data <- preprocessing(control, disease)
#' data.tiles <- create_tiles_max_gap(data, gaps.length = 100)
#' data.tiles.small <- data.tiles %>% filter(tiles < 30)
#'
#' #finding DMR by all methods with sorting on p.values
#' find_DMR(data.tiles.small, c('Wilcoxon', 'Ttest', 'KS', 'Reg.Log', 'Reg.Mixed', 'Reg.Corr.Mixed'))
#'
#' #finding DMR by 'Reg.Log', 'Reg.Mixed', 'Reg.Corr.Mixed' with sorting on beta values
#' find_DMR(data.tiles.small, c('Reg.Log', 'Reg.Mixed', 'Reg.Corr.Mixed'), p.value.log.reg = 0.01, p.value.reg.mixed = 0.02, p.value.reg.corr.mixed=0.03)
#'
#' #finding DMR only by 'Reg.Log' with sorting on beta values and 'Wilcoxon' with sorting on p.values
#' find_DMR(data.tiles.small, c('Wilcoxon', 'Reg.Log'), p.value.log.reg = 0.001)
find_DMR <- function(data, methods, p.value.log.reg = NULL,
p.value.reg.mixed= NULL, p.value.reg.corr.mixed= NULL,
beta.coef.max = 30){
check_data_without_tiles(data[,1:7])
check_tiles_in_data(data)
check_args_find_DMR(methods, p.value.log.reg,
p.value.reg.mixed, p.value.reg.corr.mixed,
beta.coef.max)
data <- group_data(data, prob = F)
find_DMR_given_methods(data, methods, p.value.log.reg,p.value.reg.mixed, p.value.reg.corr.mixed,
beta.coef.max)
}
|
/R/find_DMR.R
|
no_license
|
geneticsMiNIng/metR
|
R
| false | false | 4,878 |
r
|
#' Finding DMR
#'
#' Finding DMR by Wilcoxon, t-Student, Kolmogorov-Smirnow tests or logistic regression, logistic regression with mixed models,
#' logistic regression with mixed models with correlation matrix.
#' In Ttest, Wilcoxon and Ks are compared methylation rate between x and y prob on the same position and chromosome
#' and null hypothesis is that mean (median, distribution respectively) of methylation rate.
#' In these methods alternative hypothesis is two sided and these sorts regions based on criticial value.
#'
#' In regression methods, number of success are number of methylated citosines and failures are number of unmethylated citosines.
#' Output from this methods is beta coefficient of indicator variable from regression model and criticial value from Wald test on indicator variable.
#' Indicator variable is equal 1 if observations are from x prob and 0 otherwise. These methods order regions based on beta coefficients of
#' grouping variable or p.values of grouping variable.
#' In mixed models explantatory variable is only indicator variable and positions of chromosome are random effects.
#' In standard logistic regression explantatory variables are also position of chromosome.
#' @param data There are two options: 1. dataframe with specific columns: chr, poz, prob, no, meth, unmeth, meth.rate.
#' This dataframe is result of function preprocessing.
#' 2. dataframe with specific columns: chr, poz, prob, no, meth, unmeth, meth.rate, tiles and possible tiles.common columns. This dataframe is result of function create.tiles.min.gap or
#' create.tiles.fixed.length.
#' @param methods vectors with given methods. Possible values are: 'Wilcoxon', 'Ttest', 'KS', 'Reg.Log', 'Reg.Mixed',
#' 'Reg.Corr.Mixed'.
#' 'Wilcoxon' - Wilcoxon signed test;
#' 'Ttest' - t-Student test with unequal variance;
#' 'KS' - Kolmogorov-Smirnov test;
#' 'Reg.Log' - Wald test of grouping variable from logistic regression;
#' 'Reg.Mixed' - Wald test of grouping variable from logistic regression with mixed effects;
#' 'Reg.Corr.Mixed' - Wald test of grouping variable from logistic regression with mixed effect and estimated previous correlation matrix
#' @param p.value.log.reg if not NULL regions with p.value of prob variable smaller than p.value.log.reg are returned and decreasingly ordered by absolute value of beta coefficient
#' of prob variable otherwise regions ale increasingly ordered by p.value
#' @param p.value.reg.mixed if not NULL regions with p.value of prob variable smaller than p.value.log.reg are returned and decreasingly ordered by absolute value of beta coefficient
#' of prob variable otherwise regions ale increasingly ordered by p.value
#' @param p.value.reg.corr.mixed if not NULL regions with p.value of prob variable smaller than p.value.log.reg are returned and decreasingly ordered by absolute value of beta coefficient
#' of prob variable otherwise regions ale increasingly ordered by p.value
#' @param beta.coef.max only results which have absolute value of beta.coef less than this parameter are returned from Log.Reg, Reg.Mixed, Reg.Corr.Mixed. This prevent cases when algorithm
#' did not convergence well
#' @return list object. Elements of list are results of given methods. The most interesting regions are on the top
#' @export
#' @examples
#' data('schizophrenia')
#' control <- schizophrenia %>% filter(category == 'control') %>%
#' dplyr::select(-category)
#'
#' disease <- schizophrenia %>% filter(category == 'disease') %>%
#' dplyr::select(-category)
#'
#' data <- preprocessing(control, disease)
#' data.tiles <- create_tiles_max_gap(data, gaps.length = 100)
#' data.tiles.small <- data.tiles %>% filter(tiles < 30)
#'
#' #finding DMR by all methods with sorting on p.values
#' find_DMR(data.tiles.small, c('Wilcoxon', 'Ttest', 'KS', 'Reg.Log', 'Reg.Mixed', 'Reg.Corr.Mixed'))
#'
#' #finding DMR by 'Reg.Log', 'Reg.Mixed', 'Reg.Corr.Mixed' with sorting on beta values
#' find_DMR(data.tiles.small, c('Reg.Log', 'Reg.Mixed', 'Reg.Corr.Mixed'), p.value.log.reg = 0.01, p.value.reg.mixed = 0.02, p.value.reg.corr.mixed=0.03)
#'
#' #finding DMR only by 'Reg.Log' with sorting on beta values and 'Wilcoxon' with sorting on p.values
#' find_DMR(data.tiles.small, c('Wilcoxon', 'Reg.Log'), p.value.log.reg = 0.001)
find_DMR <- function(data, methods, p.value.log.reg = NULL,
p.value.reg.mixed= NULL, p.value.reg.corr.mixed= NULL,
beta.coef.max = 30){
check_data_without_tiles(data[,1:7])
check_tiles_in_data(data)
check_args_find_DMR(methods, p.value.log.reg,
p.value.reg.mixed, p.value.reg.corr.mixed,
beta.coef.max)
data <- group_data(data, prob = F)
find_DMR_given_methods(data, methods, p.value.log.reg,p.value.reg.mixed, p.value.reg.corr.mixed,
beta.coef.max)
}
|
library(data.table)
library(ggplot2)
library(wpp2017)
## IHME pops/mort
source("/home/j/temp/central_comp/libraries/current/r/get_demographics.R")
source("/home/j/temp/central_comp/libraries/current/r/get_demographics_template.R")
mortality_2016_demographics <- get_demographics(gbd_team="mort", gbd_round_id=4)
pops <- fread('C:/Users/ngraetz/Documents/Penn/papers/cfr_migration/WPP2017_INT_F03_2_POPULATION_BY_AGE_ANNUAL_MALE.csv', skip=1)
names(pops) <- c('Index','Variant','name','Notes','country_code','year',paste0('age',as.character(c(0:79))), 'age80plus', paste0('age',as.character(c(80:100))))
broad_regions <- c('WORLD','More developed regions','Less developed regions','Least developed countries',
'Less developed regions, excluding least developed countries','Less developed regions, excluding China','High-income countries','Middle-income countries',
'Upper-middle-income countries','Lower-middle-income countries','Low-income countries','Sub-Saharan Africa','AFRICA',
'Eastern Africa','Middle Africa','Northern Africa','Southern Africa','Western Africa','ASIA','Eastern Asia',
'South-Central Asia','Central Asia','South-Eastern Asia','Western Asia','EUROPE','Eastern Europe',
'Northern Europe','Western Europe','Southern Europe','LATIN AMERICA AND THE CARIBBEAN','Caribbean',
'Central America','South America','NORTHERN AMERICA','Canada','United States of America','OCEANIA','Southern Asia')
pops <- pops[!(name %in% broad_regions), ]
cols <- grep("^age", names(pops), value = TRUE)
for(c in cols) {
pops[, (c) := as.numeric(gsub(' ','',get(c)))]
}
pops <- pops[, total_pop := apply(.SD, 1, sum, na.rm=TRUE), .SDcols=grep("^age", names(pops))]
total_pops <- pops[, c('name','year','total_pop')]
total_pops[, total_pop := total_pop * 1000]
for(c in cols) {
pops[, paste0('prop_',c) := get(c) / total_pop]
}
pops <- pops[, size_15_19 := apply(.SD, 1, sum, na.rm=TRUE), .SDcols=c('age15','age16','age17','age18','age19')]
pops <- pops[, prop_15_19 := apply(.SD, 1, sum, na.rm=TRUE), .SDcols=c('prop_age15','prop_age16','prop_age17','prop_age18','prop_age19')]
pops <- pops[, size_10_19 := apply(.SD, 1, sum, na.rm=TRUE), .SDcols=c('age10','age11','age12','age13','age14','age15','age16','age17','age18','age19')]
pops <- pops[, prop_10_19 := apply(.SD, 1, sum, na.rm=TRUE), .SDcols=c('prop_age10','prop_age11','prop_age12','prop_age13','prop_age14','prop_age15','prop_age16','prop_age17','prop_age18','prop_age19')]
pops <- pops[, size_20_24 := apply(.SD, 1, sum, na.rm=TRUE), .SDcols=c('age20','age21','age22','age23','age24')]
for(y in seq(1950,2015,5)) pops[year>=y, five_year := y]
pops <- pops[, list(prop_15_19=mean(prop_15_19), size_15_19=mean(size_15_19),size_20_24=mean(size_20_24), prop_10_19=mean(prop_10_19), size_10_19=mean(size_10_19)), by=c('country_code','name','five_year')]
setnames(pops, 'five_year', 'year')
pops[, ratio_15_19_20_24 := size_15_19 / size_20_24]
ggplot() + geom_line(data=pops[name=='Liberia',], aes(x=year,y=ratio_15_19_20_24)) + xlim(c(1985,2005))
nm <- copy(net_migration)
nm[, year := year - 2.5]
ni <- copy(crni)
ni[, year := year - 2.5]
om <- copy(out_migration[,c('name','year','out_rate','out_migration')])
om[, year := year - 2.5]
mx_subset <- mx[sex=='Male' & age==0, ]
mx_subset[, year := year - 2.5]
cohorts <- merge(om, pops, by=c('name','year'), all.y=TRUE)
cohorts <- merge(cohorts, nm, by=c('name','year','country_code'), all.x=TRUE)
cohorts <- merge(cohorts, mx_subset, by=c('name','year','country_code'), all.x=TRUE)
cohorts <- merge(cohorts, ni, by=c('name','year','country_code'), all.x=TRUE)
cohorts <- cohorts[order(name,year)]
## Create lags and changes
create_changes <- function(n, dt, vars, lag, change=FALSE, year_step=5) {
dt.n <- dt[name==n, ]
if(length(dt.n[, name])==1) return(NULL)
if(length(dt.n[, name])!=1) {
for(r in seq(min(dt.n[, year]),max(dt.n[, year]),year_step)) {
for(v in vars) {
if(r>=min(dt.n[, year])+lag & change==TRUE) {
previous_v <- dt.n[year==r-lag, get(v)]
dt.n[year==r, paste0('relchange_',(v)) := (get(v)-previous_v)/previous_v]
dt.n[year==r, paste0('abschange_',(v)) := get(v)-previous_v]
dt.n[year==r, paste0('r_',(v)) := log(get(v)/previous_v)/lag]
}
if(r>=min(dt.n[, year])+lag) {
lag_v <- dt.n[year==r-lag, get(v)]
dt.n[year==r, paste0('lag',lag,'_',(v)) := lag_v]
}
}
}
return(dt.n)
}
}
cohort_change <- rbindlist(lapply(unique(cohorts[, name]), create_changes,
dt = cohorts,
vars = c('prop_15_19','size_15_19','mx','out_rate'),
change = TRUE,
lag = 5))
cohort_change <- rbindlist(lapply(unique(cohorts[, name]), create_changes,
dt = cohort_change,
vars = c('prop_10_19','size_10_19'),
change = TRUE,
lag = 10))
## Calculate all possible lags of r for migration correlation.
for(l in c(0,5,10,15,20)) {
cohort_change <- rbindlist(lapply(unique(cohort_change[, name]), create_changes,
dt = cohort_change,
vars = c('r_size_15_19','r_mx','crni','net_migration','mx','r_size_10_19'),
change = FALSE,
lag = l))
}
## Angola, Burundi, Burkina Faso, Congo, Honduras
write.csv(cohort_change, 'C:/Users/ngraetz/Documents/Penn/papers/cfr_migration/all_migration_data.csv', row.names=FALSE)
cohort_change <- fread('C:/Users/ngraetz/Documents/Penn/papers/cfr_migration/all_migration_data.csv')
cor_countries_mort <- readRDS('C:/Users/ngraetz/Documents/Penn/papers/cfr_migration/cor_countries_mort.RDS')
transition_countries <- data.table(country=cor_countries_mort)
# write.csv(transition_countries, 'C:/Users/ngraetz/Documents/Penn/papers/cfr_migration/transition_countries.csv', row.names = FALSE)
# transition_countries <- fread('C:/Users/ngraetz/Documents/Penn/papers/cfr_migration/transition_countries.csv')
#write.csv(transition_countries, 'C:/Users/ngraetz/Documents/Penn/papers/cfr_migration/model_countries.csv', row.names = FALSE)
# transition_countries <- fread('C:/Users/ngraetz/Documents/Penn/papers/cfr_migration/transition_countries.csv')
# setnames(transition_countries, 'country', 'name')
model_countries <- fread('C:/Users/ngraetz/Documents/Penn/papers/cfr_migration/model_countries.csv')
cohort_change <- merge(cohort_change, transition_countries, by='name', all.x = TRUE)
## Merge IHME net migrant data
ihme <- fread('C:/Users/ngraetz/Desktop/net_migrants.csv')
locs <- fread('C:/Users/ngraetz/Desktop/gaul_to_loc_id.csv')
setnames(locs, 'loc_name', 'name')
locs[name=='Tanzania', name := 'United Republic of Tanzania']
locs[name=='Democratic Republic of the Congo', name := 'Democratic Republic of Congo']
locs[name=='Ivory Coast', name := "Cote d'Ivoire"]
locs[name=='Iran', name := 'Iran (Islamic Republic of)']
locs[name=='Vietnam', name := 'Viet Nam']
locs[name=='Syria', name := 'Syrian Arab Republic']
locs[name=='Czech Republic', name := 'Czechia']
locs[name=='Russia', name := 'Russian Federation']
locs[name=='Bolivia', name := 'Bolivia (Plurinational State of)']
locs[name=='Venezuela', name := 'Venezuela (Bolivarian Republic of)']
locs[name=='United States', name := 'United States of America']
locs[name=='The Gambia', name := 'Gambia']
locs[name=='Laos', name := "Lao People's Democratic Republic"]
locs[name=='Cape Verde', name := 'Cabo Verde']
locs[name=='Palestine', name := 'State of Palestine']
setnames(locs, 'ihme_lc_id', 'ihme_loc_id')
setnames(ihme, 'value', 'ihme_net_migrants')
setnames(ihme, 'year_id', 'year')
ihme <- merge(ihme, locs, by='ihme_loc_id')
ihme <- ihme[, c('ihme_net_migrants','year','name')]
ihme <- ihme[, list(ihme_net_migrants=sum(ihme_net_migrants)), by=c('year','name')]
cohort_change <- merge(cohort_change, ihme, by=c('name','year'), all.x=TRUE)
# cohort_change <- rbindlist(lapply(unique(cohort_change[, name]), create_changes,
# dt = cohort_change,
# vars = c('ihme_net_migrants'),
# change = TRUE,
# lag = 5))
cohort_change <- merge(cohort_change, total_pops, by=c('year','name'))
cohort_change[, ihme_rate := (ihme_net_migrants / total_pop)*1000]
## Merge IHME SDI
sdi <- fread('J:/temp/ngraetz/sdi.csv')
setnames(sdi, 'mean_value', 'sdi')
setnames(sdi, 'year_id', 'year')
setnames(sdi, 'location_id', 'loc_id')
sdi <- merge(sdi, locs, by='loc_id')
sdi <- sdi[, c('sdi','year','name')]
cohort_change <- merge(cohort_change, sdi, by=c('name','year'), all.x=TRUE)
## Merge ILO % agriculture
ilo <- fread("C:/Users/ngraetz/Downloads/ILOSTAT_.csv")
ilo <- ilo[sex=='SEX_T' & classif1.label=='Broad sector: Agriculture', ]
setnames(ilo, 'ref_area', 'ihme_loc_id')
ilo <- merge(ilo, locs, by='ihme_loc_id')
setnames(ilo, 'obs_value', 'percent_agriculture')
setnames(ilo, 'time', 'year')
cohort_change <- merge(cohort_change, ilo, by=c('name','year'), all.x=TRUE)
## For each country that is growing because of reductions in infant mortality, find the lag that produces the highest correlation with out-migration rates
find_lag <- function(n, dt, v, target, type) {
dt.n <- dt[name==n, ]
#message(n)
if(length(dt.n[, name])==1) return(NULL)
if(length(dt.n[!is.na(get(target)), name])==0) return(NULL)
if(length(dt.n[, name])!=1) {
for(l in c(0,5,10,15,20)) {
dt.n[, (paste0('cor',l)) := cor(dt.n[, get(paste0('lag',l,'_',v))], dt.n[, get(target)], use='complete.obs')]
}
if(type=='positive') dt.n[, cor := apply(.SD, 1, max, na.rm=TRUE), .SDcols=grep("^cor", names(dt.n))]
if(type=='negative') dt.n[, cor := apply(.SD, 1, min, na.rm=TRUE), .SDcols=grep("^cor", names(dt.n))]
for(l in c(0,5,10,15,20)) dt.n[get(paste0('cor',l))==cor, bestlag := l]
dt.n <- unique(dt.n[, c('name','cor','bestlag')])
return(dt.n)
}
}
this_v <- 'r_size_15_19'
net_cors <- rbindlist(lapply(unique(cohort_change[name %in% cor_countries_mort, name]), find_lag, dt=cohort_change, v=this_v, target='net_migration', type='negative'))
setnames(net_cors, c('cor','bestlag'), c('net_cor','net_lag'))
out_cors <- rbindlist(lapply(unique(cohort_change[name %in% cor_countries_mort, name]), find_lag, dt=cohort_change, v=this_v, target='out_rate', type='positive'))
setnames(out_cors, c('cor','bestlag'), c('out_cor','out_lag'))
ihme_cors <- rbindlist(lapply(unique(cohort_change[name %in% cor_countries_mort, name]), find_lag, dt=cohort_change, v=this_v, target='ihme_rate', type='negative'))
setnames(ihme_cors, c('cor','bestlag'), c('ihme_cor','ihme_lag'))
all_cors <- merge(net_cors, out_cors, by='name')
all_cors <- merge(all_cors, ihme_cors, by='name')
## STAGE 1: Test mortality correlations
get_cor <- function(n, dt, vars) {
dt.n <- dt[name==n & year>=1970, ]
dt.n <- dt.n[!is.na(get(vars[1])) & !is.na(get(vars[2])), ]
c <- cor(dt.n[, get(vars[1])], dt.n[, get(vars[2])])
dt.n[, corr := c]
dt.n <- unique(dt.n[, c('name','corr')])
return(dt.n)
}
mort_cors <- rbindlist(lapply(unique(cohort_change[, name]),get_cor,cohort_change,c('lag15_r_mx','r_size_15_19')))
mort_cors <- mort_cors[order(-corr)] ## 64 countries where correlation <= -0.4
## Try correlating MAGR 1975-1990 with MAGR in the size of the 15-19 cohorts 1990-2005 by country.
global_mort_cors <- cohort_change[year %in% c(1990,1995,2000,2005), list(mx_magr=mean(lag15_mx), size_15_19_magr=mean(r_size_15_19)), by='name']
global_mort_cors <- global_mort_cors[order(-mx_magr)] ## 64 countries where correlation <= -0.4
ggplot() +
geom_point(data=gbd,
aes(x=lag15_mx,
y=r_size_15_19))
## Try GLMs
model_data <- copy(cohort_change)
model_data[, lag0_r_size_15_19 := lag0_r_size_15_19*100]
model_data[, lag5_r_size_15_19 := lag5_r_size_15_19*100]
model_data[, lag10_r_size_15_19 := lag10_r_size_15_19*100]
model_data[, lag15_r_size_15_19 := lag15_r_size_15_19*100]
model_data[, lag20_r_size_15_19 := lag20_r_size_15_19*100]
model_data[, lag20_r_mx := lag20_r_mx*100]
model_data[, lag15_r_mx := lag15_r_mx*100]
model_data[, lag10_r_size_10_19 := lag10_r_size_10_19*100]
fit_glm <- glm(cbind(round(out_migration), round(total_pop-out_migration)) ~
lag5_r_size_15_19 + lag5_out_rate + as.factor(name) + year,
data=model_data[name %in% cor_countries_mort & !is.na(out_migration), ], family=binomial)
glm_coefs1 <- data.table(model='GLM1',
name=names(fit_glm$coefficients),
coef=fit_glm$coefficients,
se=coef(summary(fit_glm))[,2],
p=coef(summary(fit_glm))[,4])
glm_coefs1[, coef := exp(coef)]
glm_coefs1[1:5, ]
fit_lm <- lm(out_rate ~ lag5_r_size_15_19 + lag5_out_rate + as.factor(name) + year,
data=model_data[name %in% cor_countries_mort & !is.na(out_migration), ])
lm1 <- data.table(model='LM1',
name=names(fit_lm$coefficients),
coef=fit_lm$coefficients,
se=coef(summary(fit_lm))[,2],
p=coef(summary(fit_lm))[,4])
fit_lm <- lm(out_rate ~ lag20_r_mx + lag5_out_rate + as.factor(name) + year,
data=model_data[name %in% cor_countries_mort & !is.na(out_migration), ])
lm2 <- data.table(model='LM2',
name=names(fit_lm$coefficients),
coef=fit_lm$coefficients,
se=coef(summary(fit_lm))[,2],
p=coef(summary(fit_lm))[,4])
fit_lm <- lm(net_migration ~ lag20_crni + lag5_net_migration + as.factor(name) + year,
data=model_data[name %in% cor_countries_mort & !is.na(out_migration), ])
lm3 <- data.table(model='LM3',
name=names(fit_lm$coefficients),
coef=fit_lm$coefficients,
se=coef(summary(fit_lm))[,2],
p=coef(summary(fit_lm))[,4])
fit_lm <- lm(out_rate ~ lag20_crni + lag5_out_rate + as.factor(name) + year,
data=model_data[name %in% cor_countries_mort & !is.na(out_migration), ])
lm4 <- data.table(model='LM4',
name=names(fit_lm$coefficients),
coef=fit_lm$coefficients,
se=coef(summary(fit_lm))[,2],
p=coef(summary(fit_lm))[,4])
all_lms <- rbind(glm_coefs1, lm1, lm2, lm3, lm4)
saveRDS(all_lms, 'C:/Users/ngraetz/Documents/Penn/papers/cfr_migration/paper_figs/lm_fits.RDS')
get_coefs <- function(reg='', global=FALSE) {
if(global==TRUE) fit_lm <- lm(net_migration ~ lag5_r_size_15_19 + percent_agriculture + lag5_net_migration + as.factor(name) + year,
data=model_data[!is.na(transition_region), ])
if(global!=TRUE) fit_lm <- lm(net_migration ~ lag5_r_size_15_19 + percent_agriculture + lag5_net_migration + as.factor(name) + year,
data=model_data[transition_region == reg, ])
coefs <- data.table(model=ifelse(global,'global',reg),
name=names(fit_lm$coefficients),
coef=fit_lm$coefficients,
se=coef(summary(fit_lm))[,2],
p=coef(summary(fit_lm))[,4])
return(coefs)
}
get_coefs_glm <- function(reg='', global=FALSE) {
if(global==TRUE) fit_lm <- glm(cbind(round(out_migration), round(total_pop-out_migration)) ~
lag5_r_size_15_19 + percent_agriculture + lag5_out_rate + as.factor(name) + year,
data=model_data[!is.na(transition_region), ], family=binomial)
if(global!=TRUE) fit_lm <- glm(cbind(round(out_migration), round(total_pop-out_migration)) ~
lag5_r_size_15_19 + percent_agriculture + lag5_out_rate + as.factor(name) + year,
data=model_data[transition_region == reg], family=binomial)
coefs <- data.table(model=ifelse(global,'global',reg),
name=names(fit_lm$coefficients),
coef=exp(fit_lm$coefficients),
se=coef(summary(fit_lm))[,2],
p=coef(summary(fit_lm))[,4])
return(coefs)
}
all_lms <- rbind(get_coefs(global=TRUE),
get_coefs(reg='ssa'),
get_coefs(reg='ea'),
get_coefs(reg='ca'),
get_coefs(reg='name'))
saveRDS(all_lms, 'C:/Users/ngraetz/Documents/Penn/papers/cfr_migration/paper_figs/lm_fits_net_cohort.RDS')
all_lms <- rbind(get_coefs_glm(global=TRUE),
get_coefs_glm(reg='ssa'),
get_coefs_glm(reg='ea'),
get_coefs_glm(reg='ca'),
get_coefs_glm(reg='name'))
saveRDS(all_lms, 'C:/Users/ngraetz/Documents/Penn/papers/cfr_migration/paper_figs/glm_fits_out_cohort.RDS')
## Try random slope models
library(lme4)
sdi_groups <- model_data[year==2005 & name %in% model_countries[, name] & !is.na(sdi), c('name','sdi')]
for(q in rev(seq(.1,1,.9/9))) {
sdi_q <- quantile(sdi_groups[, sdi], p=q)
message(round(sdi_q, 3))
sdi_groups[sdi < sdi_q, sdi_group := as.character(q)]
}
for(q in as.character(rev(seq(.1,1,.9/9)))) {
message(paste(unique(sdi_groups[sdi_group==q, name]), collapse=' '))
}
sdi_data <- merge(model_data, sdi_groups[, c('name','sdi_group')], by='name')
sdi_data[, scaled_year := year / 1000]
sdi_data[, scaled_sdi := sdi*100]
sdi_data <- merge(sdi_data, gbd, by=c('name','year'), all.x = TRUE)
#sdi_data[, lag5_r_gbd_size_15_19 := lag5_r_gbd_size_15_19*100]
sdi_data$transition_region <- NULL
sdi_data$transition_region_detailed <- NULL
sdi_data <- merge(sdi_data, model_countries, by='name', all.x=TRUE)
saveRDS(sdi_data, 'C:/Users/ngraetz/Documents/Penn/papers/cfr_migration/sdi_data.rds')
## Load GBD data
gbd_mort <- fread('C:/Users/ngraetz/Documents/Penn/papers/cfr_migration/gbd_u5m.csv')
setnames(gbd_mort, 'location_name', 'name')
gbd_pop <- fread('C:/Users/ngraetz/Documents/Penn/papers/cfr_migration/gbd_pops.csv')
setnames(gbd_pop, 'location_name', 'name')
gbd_15_19 <- gbd_pop[age_group_name=="15 to 19", list(gbd_size_15_19=sum(gbd_pops)), by=c('year','name')]
gbd_working_age <- gbd_pop[age_group_name %in% c("15 to 19","20 to 24","25 to 29","30 to 34","35 to 39","40 to 44","45 to 49","50 to 54","55 to 59"), list(gbd_size_15_60=sum(gbd_pops)), by=c('year','name')]
#gbd_0_5 <- gbd_pop[age_group_name %in% c("Early Neonatal","Late Neonatal","Post Neonatal","1 to 4"), list(gbd_size_0_5=sum(gbd_pops)), by=c('year','name')]
gbd_pop <- merge(gbd_15_19, gbd_working_age, by=c('year','name'))
#gbd_pop <- merge(gbd_pop, gbd_0_5, by=c('year','name'))
gbd_pop[, gbd_propwa_15_19 := gbd_size_15_19/gbd_size_15_60]
## Make changes and lags
gbd_pop <- rbindlist(lapply(unique(gbd_pop[, name]), create_changes,
dt = gbd_pop,
vars = c('gbd_size_15_19'),
change = TRUE,
lag = 5,
year_step = 5))
gbd_mort <- rbindlist(lapply(unique(gbd_mort[, name]), create_changes,
dt = gbd_mort,
vars = c('gbd_mx'),
change = TRUE,
lag = 5,
year_step = 5))
gbd <- merge(gbd_mort, gbd_pop, by=c('name','year'), all.y=TRUE)
for(l in c(0,5,10,15,20)) {
gbd <- rbindlist(lapply(unique(gbd[, name]), create_changes,
dt = gbd[!is.na(r_gbd_size_15_19)],
vars = c('r_gbd_mx','r_gbd_size_15_19','gbd_mx'),
change = FALSE,
lag = l,
year_step = 5))
}
#gbd <- gbd[name %in% unique(sdi_data[, name]),]
gbd <- gbd[name!='Georgia']
mort_cors <- rbindlist(lapply(unique(gbd[, name]),get_cor,gbd,c('lag15_r_gbd_mx','r_gbd_size_15_19')))
mort_cors <- mort_cors[order(-corr)]
## Try correlating MAGR 1980-1995 with MAGR in the size of the 15-19 cohorts 1995-2010 by country.
country_change <- copy(gbd[year %in% c(1980,1995), c('year','name','gbd_mx')])
country_change <- dcast(country_change, name ~ year, value.var = "gbd_mx")
country_change[, diff := `1995` - `1980`]
country_change <- country_change[order(-diff)] ## 61 countries have experienced an absolute drop in under-5 mortality greater than 0.01 between 1980-1995.
model_countries <- country_change[diff <= -.01, name]
## Countries where mortality dropped after 2000
new_countries <- copy(gbd[year %in% c(2000,2015), c('year','name','gbd_mx')])
new_countries <- dcast(new_countries, name ~ year, value.var = "gbd_mx")
new_countries[, diff := `2000` - `2015`]
new_countries <- new_countries[order(-diff)] ## 61 countries have experienced an absolute drop in under-5 mortality greater than 0.01 between 1980-1995.
new_countries <- new_countries[diff >= .01 & !(name %in% model_countries), name]
## Countries where the size of the 15-19 cohorts has significantly increased 1985-2000
size_countries <- copy(gbd[year %in% c(1985,2000), c('year','name','r_gbd_size_15_19')])
size_countries <- dcast(size_countries, name ~ year, value.var = "r_gbd_size_15_19")
size_countries[, diff := `1985` - `2000`]
size_countries <- size_countries[order(-diff)] ## 61 countries have experienced an absolute drop in under-5 mortality greater than 0.01 between 1980-1995.
size_countries <- size_countries[diff >= .01 & !(name %in% model_countries), name]
global_mort_cors <- gbd[year %in% 1995:2010 & name %in% model_countries, list(mx_magr=mean(lag15_r_gbd_mx, na.rm=TRUE), size_15_19_magr=mean(r_gbd_size_15_19)), by='name']
global_mort_cors <- global_mort_cors[order(mx_magr)] ## 64 countries where correlation <= -0.4
ggplot() +
geom_point(data=global_mort_cors,
aes(x=mx_magr,
y=size_15_19_magr))
ggplot() +
geom_point(data=gbd[name %in% model_countries & year %in% 1995:2010,],
aes(x=r_gbd_size_15_19,y=lag15_r_gbd_mx))
ggplot() +
geom_line(data=gbd[name=='Uganda'],
aes(x=year,
y=gbd_mx))
## Fit random slope model for given IV by given category.
fit_random_slope <- function(dv, iv, cat, order_cat, zero_line, dt, country_fe=TRUE, guide_title, size_var=NULL, no_res=FALSE) {
if(no_res==TRUE) {
f <- as.formula(paste0(dv, ' ~ ', iv, ' + percent_agriculture + scaled_sdi + lag5_out_rate + as.factor(name) + year'))
mixedmod <- lm(f, data=dt)
return(list(mixedmod))
}
if(no_res==FALSE) {
if(country_fe) f <- as.formula(paste0(dv, ' ~ ', iv, ' + log_lag5_out_rate + as.factor(name) + scaled_year + (1 + ', iv, ' | ', cat, ')'))
if(!country_fe) f <- as.formula(paste0(dv, ' ~ ', iv, ' + log_lag5_out_rate + scaled_year + (1 + ', iv, ' | ', cat, ')'))
mixedmod <- lmer(f, data=dt)
# examine random and fixed effects
gg.data <- data.table(slope=ranef(mixedmod)[[cat]][,iv],
group=rownames(ranef(mixedmod)[[cat]]))
if(is.null(size_var)) size_var <- dv
mean_obs <- dt[, list(outcome=mean(get(size_var), na.rm=TRUE)), by=cat]
setnames(mean_obs, cat, 'group')
gg.data <- merge(gg.data, mean_obs, by='group')
ifelse(order_cat,
gg.data[, f_group := factor(group, levels=gg.data$group[order(gg.data[, slope])])],
gg.data[, f_group := group])
gg <- ggplot(data=gg.data,
aes(x=f_group, y=exp(slope+fixef(mixedmod)[iv]), size=outcome)) +
geom_point() +
geom_hline(yintercept = exp(fixef(mixedmod)[iv]), color = 'red', size=2) +
labs(x='Grouping variable',y='Slope') +
theme_minimal() +
theme(axis.text.x = element_text(angle=60, hjust=1)) +
scale_size_continuous(guide = guide_legend(title = guide_title))
if(zero_line) {
gg <- gg + geom_hline(yintercept = 0, linetype='dashed')
}
return(list(gg, mixedmod))
}
}
mod <- fit_random_slope(dv='out_rate', iv='lag5_r_gbd_size_15_19', cat='name', order_cat=TRUE, zero_line=TRUE, dt=sdi_data, country_fe=FALSE, guide_title = 'Out-migration\nper thousand', size_var = 'out_migration', no_res=TRUE)
summary(mod[[1]])
fixef(mod[[2]])['lag5_r_size_15_19']
sdi_data[, lag5_r_gbd_size_15_19 := lag5_r_gbd_size_15_19*100]
sdi_data[, log_out_rate := log(out_rate+0.001)]
sdi_data[, log_lag5_out_rate := log(lag5_out_rate+0.001)]
mod <- fit_random_slope(dv='log_out_rate', iv='lag5_r_gbd_size_15_19', cat='name', order_cat=TRUE, zero_line=TRUE, dt=sdi_data, country_fe=FALSE, guide_title = 'Out-migration\nper thousand', size_var = 'out_rate')
mod[[1]]
mod <- fit_random_slope(dv='log_out_rate', iv='lag5_r_gbd_size_15_19', cat='name', order_cat=TRUE, zero_line=TRUE, dt=sdi_data[!(name %in% c('Liberia','Cambodia','Tajikistan'))], country_fe=FALSE, guide_title = 'Out-migration\nper thousand', size_var = 'out_rate')
mod[[1]]
mod <- fit_random_slope(dv='log_out_rate', iv='lag5_r_gbd_size_15_19', cat='transition_region_detailed', order_cat=TRUE, zero_line=TRUE, dt=sdi_data[!(name %in% c('Timor-Leste','Tajikistan','El Salvador','Guinea','Sierra Leone'))], guide_title = 'Out-migration\nper thousand', size_var = 'out_rate')
mod[[1]]
mod <- fit_random_slope(dv='log_out_rate', iv='lag5_r_gbd_size_15_19', cat='sdi_group', order_cat=FALSE, zero_line=TRUE, dt=sdi_data[!(name %in% c('Timor-Leste','Tajikistan','El Salvador','Guinea'))], guide_title = 'Out-migration\nper thousand', size_var = 'out_rate')
mod[[1]]
out_cors <- rbindlist(lapply(unique(sdi_data[, name]), find_lag, dt=sdi_data, v='r_gbd_size_15_19', target='out_rate', type='positive'))
setnames(out_cors, c('cor','bestlag'), c('out_cor','out_lag'))
pdf(paste0('C:/Users/ngraetz/Documents/Penn/papers/cfr_migration/paper_figs/stage3_v2.pdf'), width = 12, height = 6)
mod <- fit_random_slope(dv='log_out_rate', iv='lag5_r_gbd_size_15_19', cat='name', order_cat=TRUE, zero_line=TRUE, dt=sdi_data, country_fe=FALSE, guide_title = 'Out-migration\nper thousand', size_var = 'out_rate')
mod[[1]]
mod <- fit_random_slope(dv='log_out_rate', iv='lag5_r_gbd_size_15_19', cat='name', order_cat=TRUE, zero_line=TRUE, dt=sdi_data[!(name %in% c('Sierra Leone','Bhutan','South Sudan','Afghanistan'))], country_fe=FALSE, guide_title = 'Out-migration\nper thousand', size_var = 'out_rate')
mod[[1]]
mod <- fit_random_slope(dv='log_out_rate', iv='lag5_r_gbd_size_15_19', cat='transition_region_detailed', order_cat=TRUE, zero_line=TRUE, dt=sdi_data[!(name %in% c('Sierra Leone','Bhutan','South Sudan','Afghanistan'))], guide_title = 'Out-migration\nper thousand', size_var = 'out_rate')
mod[[1]]
mod <- fit_random_slope(dv='log_out_rate', iv='lag5_r_gbd_size_15_19', cat='sdi_group', order_cat=FALSE, zero_line=TRUE, dt=sdi_data[!(name %in% c('Sierra Leone','Bhutan','South Sudan','Afghanistan'))], guide_title = 'Out-migration\nper thousand', size_var = 'out_rate')
mod[[1]]
dev.off()
test <- sdi_data[!(name %in% c('Zimbabwe')) & !is.na(percent_agriculture) & year <= 2005 & !is.na(out_rate), ]
test[, lme_pred := predict(mod[[2]])]
test[, manual :=
fixef(mod[[2]])['(Intercept)'] +
fixef(mod[[2]])['lag5_r_size_15_19'] * lag5_r_size_15_19 +
fixef(mod[[2]])['percent_agriculture'] * percent_agriculture +
fixef(mod[[2]])['lag5_out_rate'] * lag5_out_rate +
fixef(mod[[2]])['as.factor(name)Kenya'] +
fixef(mod[[2]])['year'] * year +
ranef(mod[[2]])[['transition_region_detailed']][4, 'lag5_r_size_15_19'] * lag5_r_size_15_19 +
ranef(mod[[2]])[['transition_region_detailed']][4, '(Intercept)']]
test[, inla_pred := inla_model$summary.fitted.values$mean]
test[, manual_inla :=
inla_model$summary.fixed['(Intercept)',]$mean +
inla_model$summary.fixed['lag5_r_size_15_19',]$mean * lag5_r_size_15_19 +
inla_model$summary.fixed['percent_agriculture',]$mean * percent_agriculture +
inla_model$summary.fixed['lag5_out_rate',]$mean * lag5_out_rate +
inla_model$summary.fixed['as.factor(name)Kenya',]$mean +
inla_model$summary.fixed['year',]$mean * year +
inla_model$summary.random$transition_region_detailed[4, ]$mean * lag5_r_size_15_19]
## Test INLA implementation and compare
dv <- 'log_out_rate'
iv <- 'lag5_r_gbd_size_15_19'
cat <- 'name'
inla_f <- as.formula(paste0(dv, ' ~ ', iv, ' + lag5_out_rate + year + f(', cat, ', ', iv, ', model = "iid")'))
inla_f <- out_rate ~ lag5_r_size_15_19 + f(transition_region_detailed, lag5_r_size_15_19, model = "iid")
inla_model =
inla(formula=inla_f,
data=sdi_data, family="gaussian", control.compute=list(dic=TRUE))#, control.fixed = list(prec.intercept = 0.0001))
summary(inla_model)
inla_model$summary.random
test <- sdi_data[!is.na(percent_agriculture), ]
n.block = max(sdi_data$transition_region_detailed) ## = 4
sdi_data$i.intercept = sdi_data$transition_region_detailed
sdi_data$j.intercept = sdi_data$transition_region_detailed + n.block ## see doc for iid2d
formule_random_correlated_intercept_slope = y ~ x1 +
f(i.intercept, model="iid2d", n=2*n.block) +
f(j.intercept, x1, copy="i.intercept")
model_random_correlated_intercept_slope_INLA = inla.cpo(
formula=formule_random_correlated_intercept_slope,
data=mydata, family="gaussian", control.compute=list(dic=TRUE))
summary(model_random_correlated_intercept_slope_INLA)
pdf(paste0('C:/Users/ngraetz/Documents/Penn/papers/cfr_migration/paper_figs/updated_heat_maps.pdf'), width = 6, height = 10)
for(cor_var in c('net_cor','ihme_cor','out_cor')) {
heat_data <- unique(all_cors[, c('name','net_cor','out_cor','net_lag','out_lag','ihme_cor','ihme_lag')])
if(cor_var=='out_cor') heat_data[, name_f := factor(name, levels=heat_data$name[order(heat_data[, get(cor_var)])])]
if(cor_var!='out_cor') heat_data[, name_f := factor(name, levels=heat_data$name[order(-heat_data[, get(cor_var)])])]
heat_data <- melt(heat_data, id.vars = c('name_f','out_lag','net_lag','ihme_lag'), measure.vars = c('net_cor','out_cor','ihme_cor'))
heat_data[variable=='net_cor', value_lab := paste0(as.character(round(value,2)), ' (', net_lag, ')')]
heat_data[variable=='out_cor', value_lab := paste0(as.character(round(value,2)), ' (', out_lag, ')')]
heat_data[variable=='ihme_cor', value_lab := paste0(as.character(round(value,2)), ' (', ihme_lag, ')')]
heat_data[variable=='net_cor', variable := 'WPP (net)\n1950-2015']
heat_data[variable=='out_cor', variable := 'Wittgenstein (out)\n1990-2010']
heat_data[variable=='ihme_cor', variable := 'IHME (net)\n1950-2015']
heat_data[, variable := factor(variable, levels=c('WPP (net)\n1950-2015','IHME (net)\n1950-2015','Wittgenstein (out)\n1990-2010'))]
redblue <- c('#b2182b','#d6604d','#f4a582','#fddbc7','#f7f7f7','#d1e5f0','#92c5de','#4393c3','#2166ac')
heat.gg <- ggplot(heat_data, aes(variable, name_f)) +
geom_tile(aes(fill = value), color = "white") +
geom_text(aes(label = value_lab)) +
#scale_fill_gradient(low = "steelblue", high = "white", na.value = "white", limits=c(-1,0)) +
scale_fill_gradientn(colours=rev(redblue), values=c(-1,0,1), na.value = "#000000", rescaler = function(x, ...) x, oob = identity) +
ylab("Country") +
xlab("Comparison") +
theme(legend.title = element_text(size = 10),
legend.text = element_text(size = 12),
plot.title = element_text(size=16),
axis.title=element_text(size=14,face="bold"),
axis.text.x = element_text(angle = 90, hjust = 1)) +
labs(fill = "Correlation") +
theme_minimal()
print(heat.gg)
}
dev.off()
get_cor <- function(n, dt, vars) {
dt.n <- dt[name==n, ]
dt.n <- dt.n[!is.na(get(vars[1])) & !is.na(get(vars[2])), ]
c <- cor(dt.n[, get(vars[1])], dt.n[, get(vars[2])])
dt.n[, corr := c]
dt.n <- unique(dt.n[, c('name','corr')])
return(dt.n)
}
cors <- rbindlist(lapply(unique(cohort_change[, name]),get_cor,cohort_change,c('out_rate','lag_age_prop')))
cors <- cors[order(corr)]
cohort_change <- merge(cohort_change, cors, by='name')
library(grid)
library(gridExtra)
pdf(paste0('C:/Users/ngraetz/Documents/Penn/papers/cfr_migration/all_trends_by_country.pdf'), width = 20, height = 8)
for(this_name in unique(sdi_data[order(name), name])) {
message(this_name)
mort.gg <- ggplot() +
geom_line(data=sdi_data[name==this_name,],
aes(x=year,
y=mx),
size = 2) +
labs(x='Year',y='GBD: U5M') +
theme_minimal()
rmort.gg <- ggplot() +
geom_line(data=sdi_data[name==this_name,],
aes(x=year,
y=r_mx),
size = 2) +
geom_hline(yintercept = 0, color = 'red') +
labs(x='Year',y='GBD: growth rate in U5M') +
theme_minimal()
age_prop.gg <- ggplot() +
geom_line(data=sdi_data[name==this_name,],
aes(x=year,
y=gbd_size_15_19/1000),
size = 2) +
labs(x='Year',y='GBD: pop size 15-19 (in thousands)') +
theme_minimal()
age_change.gg <- ggplot() +
geom_line(data=sdi_data[name==this_name,],
aes(x=year,
y=r_gbd_size_15_19),
size = 2) +
geom_hline(yintercept = 0, color = 'red') +
labs(x='Year',y='GBD: growth rate in population size 15-19') +
theme_minimal()
out_migration.gg <- ggplot() +
geom_line(data=sdi_data[name==this_name,],
aes(x=year,
y=out_rate),
size = 2) +
labs(x='Year',y='Wittgenstein: out-migration rate (per 1000)') +
theme_minimal()
gLegend<-function(a.plot){
if ("ggplot" %in% class(a.plot)) {
tmp <- ggplot_gtable(ggplot_build(a.plot))
} else if ("grob" %in% class(a.plot)) {
tmp <- .gplot
}
leg <- which(sapply(tmp$grobs, function(x) x$name) == "guide-box")
legend <- tmp$grobs[[leg]]
return(legend)
}
#leg <- gLegend(mx.gg)
#leg$vp <- viewport(layout.pos.row = 2:6, layout.pos.col = 13)
grid.newpage()
pushViewport(viewport(layout = grid.layout(6, 15)))
vplayout <- function(x, y) viewport(layout.pos.row = x, layout.pos.col = y)
print(mort.gg + theme(legend.position="none"), vp = vplayout(2:6, 1:3))
print(rmort.gg + theme(legend.position="none"), vp = vplayout(2:6, 4:6))
print(age_prop.gg + theme(legend.position="none"), vp = vplayout(2:6, 7:9))
print(age_change.gg + theme(legend.position="none"), vp = vplayout(2:6, 10:12))
print(out_migration.gg + theme(legend.position="none"), vp = vplayout(2:6, 13:15))
#grid.draw(leg)
grid.text(this_name, vp = viewport(layout.pos.row = 1, layout.pos.col = 1:15))
}
dev.off()
cohort_change[, moving_average := rollapplyr(data=net_migration, width=1:.N, FUN=mean, by=1), by = 'name']
cohort_change[, moving_average := lapply(.SD, rollmean, k = 3, na.pad = TRUE, partial = TRUE), by = name, .SDcols = 'net_migration']
cohort_change[name=='Nigeria', c('net_migration','moving_average')]
print('hi')
print('hi')
print('hi')
print('hi')
|
/abs_cohorts.R
|
no_license
|
ngraetz/cfr
|
R
| false | false | 35,063 |
r
|
library(data.table)
library(ggplot2)
library(wpp2017)
## IHME pops/mort
source("/home/j/temp/central_comp/libraries/current/r/get_demographics.R")
source("/home/j/temp/central_comp/libraries/current/r/get_demographics_template.R")
mortality_2016_demographics <- get_demographics(gbd_team="mort", gbd_round_id=4)
pops <- fread('C:/Users/ngraetz/Documents/Penn/papers/cfr_migration/WPP2017_INT_F03_2_POPULATION_BY_AGE_ANNUAL_MALE.csv', skip=1)
names(pops) <- c('Index','Variant','name','Notes','country_code','year',paste0('age',as.character(c(0:79))), 'age80plus', paste0('age',as.character(c(80:100))))
broad_regions <- c('WORLD','More developed regions','Less developed regions','Least developed countries',
'Less developed regions, excluding least developed countries','Less developed regions, excluding China','High-income countries','Middle-income countries',
'Upper-middle-income countries','Lower-middle-income countries','Low-income countries','Sub-Saharan Africa','AFRICA',
'Eastern Africa','Middle Africa','Northern Africa','Southern Africa','Western Africa','ASIA','Eastern Asia',
'South-Central Asia','Central Asia','South-Eastern Asia','Western Asia','EUROPE','Eastern Europe',
'Northern Europe','Western Europe','Southern Europe','LATIN AMERICA AND THE CARIBBEAN','Caribbean',
'Central America','South America','NORTHERN AMERICA','Canada','United States of America','OCEANIA','Southern Asia')
pops <- pops[!(name %in% broad_regions), ]
cols <- grep("^age", names(pops), value = TRUE)
for(c in cols) {
pops[, (c) := as.numeric(gsub(' ','',get(c)))]
}
pops <- pops[, total_pop := apply(.SD, 1, sum, na.rm=TRUE), .SDcols=grep("^age", names(pops))]
total_pops <- pops[, c('name','year','total_pop')]
total_pops[, total_pop := total_pop * 1000]
for(c in cols) {
pops[, paste0('prop_',c) := get(c) / total_pop]
}
pops <- pops[, size_15_19 := apply(.SD, 1, sum, na.rm=TRUE), .SDcols=c('age15','age16','age17','age18','age19')]
pops <- pops[, prop_15_19 := apply(.SD, 1, sum, na.rm=TRUE), .SDcols=c('prop_age15','prop_age16','prop_age17','prop_age18','prop_age19')]
pops <- pops[, size_10_19 := apply(.SD, 1, sum, na.rm=TRUE), .SDcols=c('age10','age11','age12','age13','age14','age15','age16','age17','age18','age19')]
pops <- pops[, prop_10_19 := apply(.SD, 1, sum, na.rm=TRUE), .SDcols=c('prop_age10','prop_age11','prop_age12','prop_age13','prop_age14','prop_age15','prop_age16','prop_age17','prop_age18','prop_age19')]
pops <- pops[, size_20_24 := apply(.SD, 1, sum, na.rm=TRUE), .SDcols=c('age20','age21','age22','age23','age24')]
for(y in seq(1950,2015,5)) pops[year>=y, five_year := y]
pops <- pops[, list(prop_15_19=mean(prop_15_19), size_15_19=mean(size_15_19),size_20_24=mean(size_20_24), prop_10_19=mean(prop_10_19), size_10_19=mean(size_10_19)), by=c('country_code','name','five_year')]
setnames(pops, 'five_year', 'year')
pops[, ratio_15_19_20_24 := size_15_19 / size_20_24]
ggplot() + geom_line(data=pops[name=='Liberia',], aes(x=year,y=ratio_15_19_20_24)) + xlim(c(1985,2005))
nm <- copy(net_migration)
nm[, year := year - 2.5]
ni <- copy(crni)
ni[, year := year - 2.5]
om <- copy(out_migration[,c('name','year','out_rate','out_migration')])
om[, year := year - 2.5]
mx_subset <- mx[sex=='Male' & age==0, ]
mx_subset[, year := year - 2.5]
cohorts <- merge(om, pops, by=c('name','year'), all.y=TRUE)
cohorts <- merge(cohorts, nm, by=c('name','year','country_code'), all.x=TRUE)
cohorts <- merge(cohorts, mx_subset, by=c('name','year','country_code'), all.x=TRUE)
cohorts <- merge(cohorts, ni, by=c('name','year','country_code'), all.x=TRUE)
cohorts <- cohorts[order(name,year)]
## Create lags and changes
create_changes <- function(n, dt, vars, lag, change=FALSE, year_step=5) {
dt.n <- dt[name==n, ]
if(length(dt.n[, name])==1) return(NULL)
if(length(dt.n[, name])!=1) {
for(r in seq(min(dt.n[, year]),max(dt.n[, year]),year_step)) {
for(v in vars) {
if(r>=min(dt.n[, year])+lag & change==TRUE) {
previous_v <- dt.n[year==r-lag, get(v)]
dt.n[year==r, paste0('relchange_',(v)) := (get(v)-previous_v)/previous_v]
dt.n[year==r, paste0('abschange_',(v)) := get(v)-previous_v]
dt.n[year==r, paste0('r_',(v)) := log(get(v)/previous_v)/lag]
}
if(r>=min(dt.n[, year])+lag) {
lag_v <- dt.n[year==r-lag, get(v)]
dt.n[year==r, paste0('lag',lag,'_',(v)) := lag_v]
}
}
}
return(dt.n)
}
}
cohort_change <- rbindlist(lapply(unique(cohorts[, name]), create_changes,
dt = cohorts,
vars = c('prop_15_19','size_15_19','mx','out_rate'),
change = TRUE,
lag = 5))
cohort_change <- rbindlist(lapply(unique(cohorts[, name]), create_changes,
dt = cohort_change,
vars = c('prop_10_19','size_10_19'),
change = TRUE,
lag = 10))
## Calculate all possible lags of r for migration correlation.
for(l in c(0,5,10,15,20)) {
cohort_change <- rbindlist(lapply(unique(cohort_change[, name]), create_changes,
dt = cohort_change,
vars = c('r_size_15_19','r_mx','crni','net_migration','mx','r_size_10_19'),
change = FALSE,
lag = l))
}
## Angola, Burundi, Burkina Faso, Congo, Honduras
write.csv(cohort_change, 'C:/Users/ngraetz/Documents/Penn/papers/cfr_migration/all_migration_data.csv', row.names=FALSE)
cohort_change <- fread('C:/Users/ngraetz/Documents/Penn/papers/cfr_migration/all_migration_data.csv')
cor_countries_mort <- readRDS('C:/Users/ngraetz/Documents/Penn/papers/cfr_migration/cor_countries_mort.RDS')
transition_countries <- data.table(country=cor_countries_mort)
# write.csv(transition_countries, 'C:/Users/ngraetz/Documents/Penn/papers/cfr_migration/transition_countries.csv', row.names = FALSE)
# transition_countries <- fread('C:/Users/ngraetz/Documents/Penn/papers/cfr_migration/transition_countries.csv')
#write.csv(transition_countries, 'C:/Users/ngraetz/Documents/Penn/papers/cfr_migration/model_countries.csv', row.names = FALSE)
# transition_countries <- fread('C:/Users/ngraetz/Documents/Penn/papers/cfr_migration/transition_countries.csv')
# setnames(transition_countries, 'country', 'name')
model_countries <- fread('C:/Users/ngraetz/Documents/Penn/papers/cfr_migration/model_countries.csv')
cohort_change <- merge(cohort_change, transition_countries, by='name', all.x = TRUE)
## Merge IHME net migrant data
ihme <- fread('C:/Users/ngraetz/Desktop/net_migrants.csv')
locs <- fread('C:/Users/ngraetz/Desktop/gaul_to_loc_id.csv')
setnames(locs, 'loc_name', 'name')
locs[name=='Tanzania', name := 'United Republic of Tanzania']
locs[name=='Democratic Republic of the Congo', name := 'Democratic Republic of Congo']
locs[name=='Ivory Coast', name := "Cote d'Ivoire"]
locs[name=='Iran', name := 'Iran (Islamic Republic of)']
locs[name=='Vietnam', name := 'Viet Nam']
locs[name=='Syria', name := 'Syrian Arab Republic']
locs[name=='Czech Republic', name := 'Czechia']
locs[name=='Russia', name := 'Russian Federation']
locs[name=='Bolivia', name := 'Bolivia (Plurinational State of)']
locs[name=='Venezuela', name := 'Venezuela (Bolivarian Republic of)']
locs[name=='United States', name := 'United States of America']
locs[name=='The Gambia', name := 'Gambia']
locs[name=='Laos', name := "Lao People's Democratic Republic"]
locs[name=='Cape Verde', name := 'Cabo Verde']
locs[name=='Palestine', name := 'State of Palestine']
setnames(locs, 'ihme_lc_id', 'ihme_loc_id')
setnames(ihme, 'value', 'ihme_net_migrants')
setnames(ihme, 'year_id', 'year')
ihme <- merge(ihme, locs, by='ihme_loc_id')
ihme <- ihme[, c('ihme_net_migrants','year','name')]
ihme <- ihme[, list(ihme_net_migrants=sum(ihme_net_migrants)), by=c('year','name')]
cohort_change <- merge(cohort_change, ihme, by=c('name','year'), all.x=TRUE)
# cohort_change <- rbindlist(lapply(unique(cohort_change[, name]), create_changes,
# dt = cohort_change,
# vars = c('ihme_net_migrants'),
# change = TRUE,
# lag = 5))
cohort_change <- merge(cohort_change, total_pops, by=c('year','name'))
cohort_change[, ihme_rate := (ihme_net_migrants / total_pop)*1000]
## Merge IHME SDI
sdi <- fread('J:/temp/ngraetz/sdi.csv')
setnames(sdi, 'mean_value', 'sdi')
setnames(sdi, 'year_id', 'year')
setnames(sdi, 'location_id', 'loc_id')
sdi <- merge(sdi, locs, by='loc_id')
sdi <- sdi[, c('sdi','year','name')]
cohort_change <- merge(cohort_change, sdi, by=c('name','year'), all.x=TRUE)
## Merge ILO % agriculture
ilo <- fread("C:/Users/ngraetz/Downloads/ILOSTAT_.csv")
ilo <- ilo[sex=='SEX_T' & classif1.label=='Broad sector: Agriculture', ]
setnames(ilo, 'ref_area', 'ihme_loc_id')
ilo <- merge(ilo, locs, by='ihme_loc_id')
setnames(ilo, 'obs_value', 'percent_agriculture')
setnames(ilo, 'time', 'year')
cohort_change <- merge(cohort_change, ilo, by=c('name','year'), all.x=TRUE)
## For each country that is growing because of reductions in infant mortality, find the lag that produces the highest correlation with out-migration rates
find_lag <- function(n, dt, v, target, type) {
dt.n <- dt[name==n, ]
#message(n)
if(length(dt.n[, name])==1) return(NULL)
if(length(dt.n[!is.na(get(target)), name])==0) return(NULL)
if(length(dt.n[, name])!=1) {
for(l in c(0,5,10,15,20)) {
dt.n[, (paste0('cor',l)) := cor(dt.n[, get(paste0('lag',l,'_',v))], dt.n[, get(target)], use='complete.obs')]
}
if(type=='positive') dt.n[, cor := apply(.SD, 1, max, na.rm=TRUE), .SDcols=grep("^cor", names(dt.n))]
if(type=='negative') dt.n[, cor := apply(.SD, 1, min, na.rm=TRUE), .SDcols=grep("^cor", names(dt.n))]
for(l in c(0,5,10,15,20)) dt.n[get(paste0('cor',l))==cor, bestlag := l]
dt.n <- unique(dt.n[, c('name','cor','bestlag')])
return(dt.n)
}
}
this_v <- 'r_size_15_19'
net_cors <- rbindlist(lapply(unique(cohort_change[name %in% cor_countries_mort, name]), find_lag, dt=cohort_change, v=this_v, target='net_migration', type='negative'))
setnames(net_cors, c('cor','bestlag'), c('net_cor','net_lag'))
out_cors <- rbindlist(lapply(unique(cohort_change[name %in% cor_countries_mort, name]), find_lag, dt=cohort_change, v=this_v, target='out_rate', type='positive'))
setnames(out_cors, c('cor','bestlag'), c('out_cor','out_lag'))
ihme_cors <- rbindlist(lapply(unique(cohort_change[name %in% cor_countries_mort, name]), find_lag, dt=cohort_change, v=this_v, target='ihme_rate', type='negative'))
setnames(ihme_cors, c('cor','bestlag'), c('ihme_cor','ihme_lag'))
all_cors <- merge(net_cors, out_cors, by='name')
all_cors <- merge(all_cors, ihme_cors, by='name')
## STAGE 1: Test mortality correlations
get_cor <- function(n, dt, vars) {
dt.n <- dt[name==n & year>=1970, ]
dt.n <- dt.n[!is.na(get(vars[1])) & !is.na(get(vars[2])), ]
c <- cor(dt.n[, get(vars[1])], dt.n[, get(vars[2])])
dt.n[, corr := c]
dt.n <- unique(dt.n[, c('name','corr')])
return(dt.n)
}
mort_cors <- rbindlist(lapply(unique(cohort_change[, name]),get_cor,cohort_change,c('lag15_r_mx','r_size_15_19')))
mort_cors <- mort_cors[order(-corr)] ## 64 countries where correlation <= -0.4
## Try correlating MAGR 1975-1990 with MAGR in the size of the 15-19 cohorts 1990-2005 by country.
global_mort_cors <- cohort_change[year %in% c(1990,1995,2000,2005), list(mx_magr=mean(lag15_mx), size_15_19_magr=mean(r_size_15_19)), by='name']
global_mort_cors <- global_mort_cors[order(-mx_magr)] ## 64 countries where correlation <= -0.4
ggplot() +
geom_point(data=gbd,
aes(x=lag15_mx,
y=r_size_15_19))
## Try GLMs
model_data <- copy(cohort_change)
model_data[, lag0_r_size_15_19 := lag0_r_size_15_19*100]
model_data[, lag5_r_size_15_19 := lag5_r_size_15_19*100]
model_data[, lag10_r_size_15_19 := lag10_r_size_15_19*100]
model_data[, lag15_r_size_15_19 := lag15_r_size_15_19*100]
model_data[, lag20_r_size_15_19 := lag20_r_size_15_19*100]
model_data[, lag20_r_mx := lag20_r_mx*100]
model_data[, lag15_r_mx := lag15_r_mx*100]
model_data[, lag10_r_size_10_19 := lag10_r_size_10_19*100]
fit_glm <- glm(cbind(round(out_migration), round(total_pop-out_migration)) ~
lag5_r_size_15_19 + lag5_out_rate + as.factor(name) + year,
data=model_data[name %in% cor_countries_mort & !is.na(out_migration), ], family=binomial)
glm_coefs1 <- data.table(model='GLM1',
name=names(fit_glm$coefficients),
coef=fit_glm$coefficients,
se=coef(summary(fit_glm))[,2],
p=coef(summary(fit_glm))[,4])
glm_coefs1[, coef := exp(coef)]
glm_coefs1[1:5, ]
fit_lm <- lm(out_rate ~ lag5_r_size_15_19 + lag5_out_rate + as.factor(name) + year,
data=model_data[name %in% cor_countries_mort & !is.na(out_migration), ])
lm1 <- data.table(model='LM1',
name=names(fit_lm$coefficients),
coef=fit_lm$coefficients,
se=coef(summary(fit_lm))[,2],
p=coef(summary(fit_lm))[,4])
fit_lm <- lm(out_rate ~ lag20_r_mx + lag5_out_rate + as.factor(name) + year,
data=model_data[name %in% cor_countries_mort & !is.na(out_migration), ])
lm2 <- data.table(model='LM2',
name=names(fit_lm$coefficients),
coef=fit_lm$coefficients,
se=coef(summary(fit_lm))[,2],
p=coef(summary(fit_lm))[,4])
fit_lm <- lm(net_migration ~ lag20_crni + lag5_net_migration + as.factor(name) + year,
data=model_data[name %in% cor_countries_mort & !is.na(out_migration), ])
lm3 <- data.table(model='LM3',
name=names(fit_lm$coefficients),
coef=fit_lm$coefficients,
se=coef(summary(fit_lm))[,2],
p=coef(summary(fit_lm))[,4])
fit_lm <- lm(out_rate ~ lag20_crni + lag5_out_rate + as.factor(name) + year,
data=model_data[name %in% cor_countries_mort & !is.na(out_migration), ])
lm4 <- data.table(model='LM4',
name=names(fit_lm$coefficients),
coef=fit_lm$coefficients,
se=coef(summary(fit_lm))[,2],
p=coef(summary(fit_lm))[,4])
all_lms <- rbind(glm_coefs1, lm1, lm2, lm3, lm4)
saveRDS(all_lms, 'C:/Users/ngraetz/Documents/Penn/papers/cfr_migration/paper_figs/lm_fits.RDS')
get_coefs <- function(reg='', global=FALSE) {
if(global==TRUE) fit_lm <- lm(net_migration ~ lag5_r_size_15_19 + percent_agriculture + lag5_net_migration + as.factor(name) + year,
data=model_data[!is.na(transition_region), ])
if(global!=TRUE) fit_lm <- lm(net_migration ~ lag5_r_size_15_19 + percent_agriculture + lag5_net_migration + as.factor(name) + year,
data=model_data[transition_region == reg, ])
coefs <- data.table(model=ifelse(global,'global',reg),
name=names(fit_lm$coefficients),
coef=fit_lm$coefficients,
se=coef(summary(fit_lm))[,2],
p=coef(summary(fit_lm))[,4])
return(coefs)
}
get_coefs_glm <- function(reg='', global=FALSE) {
if(global==TRUE) fit_lm <- glm(cbind(round(out_migration), round(total_pop-out_migration)) ~
lag5_r_size_15_19 + percent_agriculture + lag5_out_rate + as.factor(name) + year,
data=model_data[!is.na(transition_region), ], family=binomial)
if(global!=TRUE) fit_lm <- glm(cbind(round(out_migration), round(total_pop-out_migration)) ~
lag5_r_size_15_19 + percent_agriculture + lag5_out_rate + as.factor(name) + year,
data=model_data[transition_region == reg], family=binomial)
coefs <- data.table(model=ifelse(global,'global',reg),
name=names(fit_lm$coefficients),
coef=exp(fit_lm$coefficients),
se=coef(summary(fit_lm))[,2],
p=coef(summary(fit_lm))[,4])
return(coefs)
}
all_lms <- rbind(get_coefs(global=TRUE),
get_coefs(reg='ssa'),
get_coefs(reg='ea'),
get_coefs(reg='ca'),
get_coefs(reg='name'))
saveRDS(all_lms, 'C:/Users/ngraetz/Documents/Penn/papers/cfr_migration/paper_figs/lm_fits_net_cohort.RDS')
all_lms <- rbind(get_coefs_glm(global=TRUE),
get_coefs_glm(reg='ssa'),
get_coefs_glm(reg='ea'),
get_coefs_glm(reg='ca'),
get_coefs_glm(reg='name'))
saveRDS(all_lms, 'C:/Users/ngraetz/Documents/Penn/papers/cfr_migration/paper_figs/glm_fits_out_cohort.RDS')
## Try random slope models
library(lme4)
sdi_groups <- model_data[year==2005 & name %in% model_countries[, name] & !is.na(sdi), c('name','sdi')]
for(q in rev(seq(.1,1,.9/9))) {
sdi_q <- quantile(sdi_groups[, sdi], p=q)
message(round(sdi_q, 3))
sdi_groups[sdi < sdi_q, sdi_group := as.character(q)]
}
for(q in as.character(rev(seq(.1,1,.9/9)))) {
message(paste(unique(sdi_groups[sdi_group==q, name]), collapse=' '))
}
sdi_data <- merge(model_data, sdi_groups[, c('name','sdi_group')], by='name')
sdi_data[, scaled_year := year / 1000]
sdi_data[, scaled_sdi := sdi*100]
sdi_data <- merge(sdi_data, gbd, by=c('name','year'), all.x = TRUE)
#sdi_data[, lag5_r_gbd_size_15_19 := lag5_r_gbd_size_15_19*100]
sdi_data$transition_region <- NULL
sdi_data$transition_region_detailed <- NULL
sdi_data <- merge(sdi_data, model_countries, by='name', all.x=TRUE)
saveRDS(sdi_data, 'C:/Users/ngraetz/Documents/Penn/papers/cfr_migration/sdi_data.rds')
## Load GBD data
gbd_mort <- fread('C:/Users/ngraetz/Documents/Penn/papers/cfr_migration/gbd_u5m.csv')
setnames(gbd_mort, 'location_name', 'name')
gbd_pop <- fread('C:/Users/ngraetz/Documents/Penn/papers/cfr_migration/gbd_pops.csv')
setnames(gbd_pop, 'location_name', 'name')
gbd_15_19 <- gbd_pop[age_group_name=="15 to 19", list(gbd_size_15_19=sum(gbd_pops)), by=c('year','name')]
gbd_working_age <- gbd_pop[age_group_name %in% c("15 to 19","20 to 24","25 to 29","30 to 34","35 to 39","40 to 44","45 to 49","50 to 54","55 to 59"), list(gbd_size_15_60=sum(gbd_pops)), by=c('year','name')]
#gbd_0_5 <- gbd_pop[age_group_name %in% c("Early Neonatal","Late Neonatal","Post Neonatal","1 to 4"), list(gbd_size_0_5=sum(gbd_pops)), by=c('year','name')]
gbd_pop <- merge(gbd_15_19, gbd_working_age, by=c('year','name'))
#gbd_pop <- merge(gbd_pop, gbd_0_5, by=c('year','name'))
gbd_pop[, gbd_propwa_15_19 := gbd_size_15_19/gbd_size_15_60]
## Make changes and lags
gbd_pop <- rbindlist(lapply(unique(gbd_pop[, name]), create_changes,
dt = gbd_pop,
vars = c('gbd_size_15_19'),
change = TRUE,
lag = 5,
year_step = 5))
gbd_mort <- rbindlist(lapply(unique(gbd_mort[, name]), create_changes,
dt = gbd_mort,
vars = c('gbd_mx'),
change = TRUE,
lag = 5,
year_step = 5))
gbd <- merge(gbd_mort, gbd_pop, by=c('name','year'), all.y=TRUE)
for(l in c(0,5,10,15,20)) {
gbd <- rbindlist(lapply(unique(gbd[, name]), create_changes,
dt = gbd[!is.na(r_gbd_size_15_19)],
vars = c('r_gbd_mx','r_gbd_size_15_19','gbd_mx'),
change = FALSE,
lag = l,
year_step = 5))
}
#gbd <- gbd[name %in% unique(sdi_data[, name]),]
gbd <- gbd[name!='Georgia']
mort_cors <- rbindlist(lapply(unique(gbd[, name]),get_cor,gbd,c('lag15_r_gbd_mx','r_gbd_size_15_19')))
mort_cors <- mort_cors[order(-corr)]
## Try correlating MAGR 1980-1995 with MAGR in the size of the 15-19 cohorts 1995-2010 by country.
country_change <- copy(gbd[year %in% c(1980,1995), c('year','name','gbd_mx')])
country_change <- dcast(country_change, name ~ year, value.var = "gbd_mx")
country_change[, diff := `1995` - `1980`]
country_change <- country_change[order(-diff)] ## 61 countries have experienced an absolute drop in under-5 mortality greater than 0.01 between 1980-1995.
model_countries <- country_change[diff <= -.01, name]
## Countries where mortality dropped after 2000
new_countries <- copy(gbd[year %in% c(2000,2015), c('year','name','gbd_mx')])
new_countries <- dcast(new_countries, name ~ year, value.var = "gbd_mx")
new_countries[, diff := `2000` - `2015`]
new_countries <- new_countries[order(-diff)] ## 61 countries have experienced an absolute drop in under-5 mortality greater than 0.01 between 1980-1995.
new_countries <- new_countries[diff >= .01 & !(name %in% model_countries), name]
## Countries where the size of the 15-19 cohorts has significantly increased 1985-2000
size_countries <- copy(gbd[year %in% c(1985,2000), c('year','name','r_gbd_size_15_19')])
size_countries <- dcast(size_countries, name ~ year, value.var = "r_gbd_size_15_19")
size_countries[, diff := `1985` - `2000`]
size_countries <- size_countries[order(-diff)] ## 61 countries have experienced an absolute drop in under-5 mortality greater than 0.01 between 1980-1995.
size_countries <- size_countries[diff >= .01 & !(name %in% model_countries), name]
global_mort_cors <- gbd[year %in% 1995:2010 & name %in% model_countries, list(mx_magr=mean(lag15_r_gbd_mx, na.rm=TRUE), size_15_19_magr=mean(r_gbd_size_15_19)), by='name']
global_mort_cors <- global_mort_cors[order(mx_magr)] ## 64 countries where correlation <= -0.4
ggplot() +
geom_point(data=global_mort_cors,
aes(x=mx_magr,
y=size_15_19_magr))
ggplot() +
geom_point(data=gbd[name %in% model_countries & year %in% 1995:2010,],
aes(x=r_gbd_size_15_19,y=lag15_r_gbd_mx))
ggplot() +
geom_line(data=gbd[name=='Uganda'],
aes(x=year,
y=gbd_mx))
## Fit random slope model for given IV by given category.
fit_random_slope <- function(dv, iv, cat, order_cat, zero_line, dt, country_fe=TRUE, guide_title, size_var=NULL, no_res=FALSE) {
if(no_res==TRUE) {
f <- as.formula(paste0(dv, ' ~ ', iv, ' + percent_agriculture + scaled_sdi + lag5_out_rate + as.factor(name) + year'))
mixedmod <- lm(f, data=dt)
return(list(mixedmod))
}
if(no_res==FALSE) {
if(country_fe) f <- as.formula(paste0(dv, ' ~ ', iv, ' + log_lag5_out_rate + as.factor(name) + scaled_year + (1 + ', iv, ' | ', cat, ')'))
if(!country_fe) f <- as.formula(paste0(dv, ' ~ ', iv, ' + log_lag5_out_rate + scaled_year + (1 + ', iv, ' | ', cat, ')'))
mixedmod <- lmer(f, data=dt)
# examine random and fixed effects
gg.data <- data.table(slope=ranef(mixedmod)[[cat]][,iv],
group=rownames(ranef(mixedmod)[[cat]]))
if(is.null(size_var)) size_var <- dv
mean_obs <- dt[, list(outcome=mean(get(size_var), na.rm=TRUE)), by=cat]
setnames(mean_obs, cat, 'group')
gg.data <- merge(gg.data, mean_obs, by='group')
ifelse(order_cat,
gg.data[, f_group := factor(group, levels=gg.data$group[order(gg.data[, slope])])],
gg.data[, f_group := group])
gg <- ggplot(data=gg.data,
aes(x=f_group, y=exp(slope+fixef(mixedmod)[iv]), size=outcome)) +
geom_point() +
geom_hline(yintercept = exp(fixef(mixedmod)[iv]), color = 'red', size=2) +
labs(x='Grouping variable',y='Slope') +
theme_minimal() +
theme(axis.text.x = element_text(angle=60, hjust=1)) +
scale_size_continuous(guide = guide_legend(title = guide_title))
if(zero_line) {
gg <- gg + geom_hline(yintercept = 0, linetype='dashed')
}
return(list(gg, mixedmod))
}
}
mod <- fit_random_slope(dv='out_rate', iv='lag5_r_gbd_size_15_19', cat='name', order_cat=TRUE, zero_line=TRUE, dt=sdi_data, country_fe=FALSE, guide_title = 'Out-migration\nper thousand', size_var = 'out_migration', no_res=TRUE)
summary(mod[[1]])
fixef(mod[[2]])['lag5_r_size_15_19']
sdi_data[, lag5_r_gbd_size_15_19 := lag5_r_gbd_size_15_19*100]
sdi_data[, log_out_rate := log(out_rate+0.001)]
sdi_data[, log_lag5_out_rate := log(lag5_out_rate+0.001)]
mod <- fit_random_slope(dv='log_out_rate', iv='lag5_r_gbd_size_15_19', cat='name', order_cat=TRUE, zero_line=TRUE, dt=sdi_data, country_fe=FALSE, guide_title = 'Out-migration\nper thousand', size_var = 'out_rate')
mod[[1]]
mod <- fit_random_slope(dv='log_out_rate', iv='lag5_r_gbd_size_15_19', cat='name', order_cat=TRUE, zero_line=TRUE, dt=sdi_data[!(name %in% c('Liberia','Cambodia','Tajikistan'))], country_fe=FALSE, guide_title = 'Out-migration\nper thousand', size_var = 'out_rate')
mod[[1]]
mod <- fit_random_slope(dv='log_out_rate', iv='lag5_r_gbd_size_15_19', cat='transition_region_detailed', order_cat=TRUE, zero_line=TRUE, dt=sdi_data[!(name %in% c('Timor-Leste','Tajikistan','El Salvador','Guinea','Sierra Leone'))], guide_title = 'Out-migration\nper thousand', size_var = 'out_rate')
mod[[1]]
mod <- fit_random_slope(dv='log_out_rate', iv='lag5_r_gbd_size_15_19', cat='sdi_group', order_cat=FALSE, zero_line=TRUE, dt=sdi_data[!(name %in% c('Timor-Leste','Tajikistan','El Salvador','Guinea'))], guide_title = 'Out-migration\nper thousand', size_var = 'out_rate')
mod[[1]]
out_cors <- rbindlist(lapply(unique(sdi_data[, name]), find_lag, dt=sdi_data, v='r_gbd_size_15_19', target='out_rate', type='positive'))
setnames(out_cors, c('cor','bestlag'), c('out_cor','out_lag'))
pdf(paste0('C:/Users/ngraetz/Documents/Penn/papers/cfr_migration/paper_figs/stage3_v2.pdf'), width = 12, height = 6)
mod <- fit_random_slope(dv='log_out_rate', iv='lag5_r_gbd_size_15_19', cat='name', order_cat=TRUE, zero_line=TRUE, dt=sdi_data, country_fe=FALSE, guide_title = 'Out-migration\nper thousand', size_var = 'out_rate')
mod[[1]]
mod <- fit_random_slope(dv='log_out_rate', iv='lag5_r_gbd_size_15_19', cat='name', order_cat=TRUE, zero_line=TRUE, dt=sdi_data[!(name %in% c('Sierra Leone','Bhutan','South Sudan','Afghanistan'))], country_fe=FALSE, guide_title = 'Out-migration\nper thousand', size_var = 'out_rate')
mod[[1]]
mod <- fit_random_slope(dv='log_out_rate', iv='lag5_r_gbd_size_15_19', cat='transition_region_detailed', order_cat=TRUE, zero_line=TRUE, dt=sdi_data[!(name %in% c('Sierra Leone','Bhutan','South Sudan','Afghanistan'))], guide_title = 'Out-migration\nper thousand', size_var = 'out_rate')
mod[[1]]
mod <- fit_random_slope(dv='log_out_rate', iv='lag5_r_gbd_size_15_19', cat='sdi_group', order_cat=FALSE, zero_line=TRUE, dt=sdi_data[!(name %in% c('Sierra Leone','Bhutan','South Sudan','Afghanistan'))], guide_title = 'Out-migration\nper thousand', size_var = 'out_rate')
mod[[1]]
dev.off()
test <- sdi_data[!(name %in% c('Zimbabwe')) & !is.na(percent_agriculture) & year <= 2005 & !is.na(out_rate), ]
test[, lme_pred := predict(mod[[2]])]
test[, manual :=
fixef(mod[[2]])['(Intercept)'] +
fixef(mod[[2]])['lag5_r_size_15_19'] * lag5_r_size_15_19 +
fixef(mod[[2]])['percent_agriculture'] * percent_agriculture +
fixef(mod[[2]])['lag5_out_rate'] * lag5_out_rate +
fixef(mod[[2]])['as.factor(name)Kenya'] +
fixef(mod[[2]])['year'] * year +
ranef(mod[[2]])[['transition_region_detailed']][4, 'lag5_r_size_15_19'] * lag5_r_size_15_19 +
ranef(mod[[2]])[['transition_region_detailed']][4, '(Intercept)']]
test[, inla_pred := inla_model$summary.fitted.values$mean]
test[, manual_inla :=
inla_model$summary.fixed['(Intercept)',]$mean +
inla_model$summary.fixed['lag5_r_size_15_19',]$mean * lag5_r_size_15_19 +
inla_model$summary.fixed['percent_agriculture',]$mean * percent_agriculture +
inla_model$summary.fixed['lag5_out_rate',]$mean * lag5_out_rate +
inla_model$summary.fixed['as.factor(name)Kenya',]$mean +
inla_model$summary.fixed['year',]$mean * year +
inla_model$summary.random$transition_region_detailed[4, ]$mean * lag5_r_size_15_19]
## Test INLA implementation and compare
dv <- 'log_out_rate'
iv <- 'lag5_r_gbd_size_15_19'
cat <- 'name'
inla_f <- as.formula(paste0(dv, ' ~ ', iv, ' + lag5_out_rate + year + f(', cat, ', ', iv, ', model = "iid")'))
inla_f <- out_rate ~ lag5_r_size_15_19 + f(transition_region_detailed, lag5_r_size_15_19, model = "iid")
inla_model =
inla(formula=inla_f,
data=sdi_data, family="gaussian", control.compute=list(dic=TRUE))#, control.fixed = list(prec.intercept = 0.0001))
summary(inla_model)
inla_model$summary.random
test <- sdi_data[!is.na(percent_agriculture), ]
n.block = max(sdi_data$transition_region_detailed) ## = 4
sdi_data$i.intercept = sdi_data$transition_region_detailed
sdi_data$j.intercept = sdi_data$transition_region_detailed + n.block ## see doc for iid2d
formule_random_correlated_intercept_slope = y ~ x1 +
f(i.intercept, model="iid2d", n=2*n.block) +
f(j.intercept, x1, copy="i.intercept")
model_random_correlated_intercept_slope_INLA = inla.cpo(
formula=formule_random_correlated_intercept_slope,
data=mydata, family="gaussian", control.compute=list(dic=TRUE))
summary(model_random_correlated_intercept_slope_INLA)
pdf(paste0('C:/Users/ngraetz/Documents/Penn/papers/cfr_migration/paper_figs/updated_heat_maps.pdf'), width = 6, height = 10)
for(cor_var in c('net_cor','ihme_cor','out_cor')) {
heat_data <- unique(all_cors[, c('name','net_cor','out_cor','net_lag','out_lag','ihme_cor','ihme_lag')])
if(cor_var=='out_cor') heat_data[, name_f := factor(name, levels=heat_data$name[order(heat_data[, get(cor_var)])])]
if(cor_var!='out_cor') heat_data[, name_f := factor(name, levels=heat_data$name[order(-heat_data[, get(cor_var)])])]
heat_data <- melt(heat_data, id.vars = c('name_f','out_lag','net_lag','ihme_lag'), measure.vars = c('net_cor','out_cor','ihme_cor'))
heat_data[variable=='net_cor', value_lab := paste0(as.character(round(value,2)), ' (', net_lag, ')')]
heat_data[variable=='out_cor', value_lab := paste0(as.character(round(value,2)), ' (', out_lag, ')')]
heat_data[variable=='ihme_cor', value_lab := paste0(as.character(round(value,2)), ' (', ihme_lag, ')')]
heat_data[variable=='net_cor', variable := 'WPP (net)\n1950-2015']
heat_data[variable=='out_cor', variable := 'Wittgenstein (out)\n1990-2010']
heat_data[variable=='ihme_cor', variable := 'IHME (net)\n1950-2015']
heat_data[, variable := factor(variable, levels=c('WPP (net)\n1950-2015','IHME (net)\n1950-2015','Wittgenstein (out)\n1990-2010'))]
redblue <- c('#b2182b','#d6604d','#f4a582','#fddbc7','#f7f7f7','#d1e5f0','#92c5de','#4393c3','#2166ac')
heat.gg <- ggplot(heat_data, aes(variable, name_f)) +
geom_tile(aes(fill = value), color = "white") +
geom_text(aes(label = value_lab)) +
#scale_fill_gradient(low = "steelblue", high = "white", na.value = "white", limits=c(-1,0)) +
scale_fill_gradientn(colours=rev(redblue), values=c(-1,0,1), na.value = "#000000", rescaler = function(x, ...) x, oob = identity) +
ylab("Country") +
xlab("Comparison") +
theme(legend.title = element_text(size = 10),
legend.text = element_text(size = 12),
plot.title = element_text(size=16),
axis.title=element_text(size=14,face="bold"),
axis.text.x = element_text(angle = 90, hjust = 1)) +
labs(fill = "Correlation") +
theme_minimal()
print(heat.gg)
}
dev.off()
get_cor <- function(n, dt, vars) {
dt.n <- dt[name==n, ]
dt.n <- dt.n[!is.na(get(vars[1])) & !is.na(get(vars[2])), ]
c <- cor(dt.n[, get(vars[1])], dt.n[, get(vars[2])])
dt.n[, corr := c]
dt.n <- unique(dt.n[, c('name','corr')])
return(dt.n)
}
cors <- rbindlist(lapply(unique(cohort_change[, name]),get_cor,cohort_change,c('out_rate','lag_age_prop')))
cors <- cors[order(corr)]
cohort_change <- merge(cohort_change, cors, by='name')
library(grid)
library(gridExtra)
pdf(paste0('C:/Users/ngraetz/Documents/Penn/papers/cfr_migration/all_trends_by_country.pdf'), width = 20, height = 8)
for(this_name in unique(sdi_data[order(name), name])) {
message(this_name)
mort.gg <- ggplot() +
geom_line(data=sdi_data[name==this_name,],
aes(x=year,
y=mx),
size = 2) +
labs(x='Year',y='GBD: U5M') +
theme_minimal()
rmort.gg <- ggplot() +
geom_line(data=sdi_data[name==this_name,],
aes(x=year,
y=r_mx),
size = 2) +
geom_hline(yintercept = 0, color = 'red') +
labs(x='Year',y='GBD: growth rate in U5M') +
theme_minimal()
age_prop.gg <- ggplot() +
geom_line(data=sdi_data[name==this_name,],
aes(x=year,
y=gbd_size_15_19/1000),
size = 2) +
labs(x='Year',y='GBD: pop size 15-19 (in thousands)') +
theme_minimal()
age_change.gg <- ggplot() +
geom_line(data=sdi_data[name==this_name,],
aes(x=year,
y=r_gbd_size_15_19),
size = 2) +
geom_hline(yintercept = 0, color = 'red') +
labs(x='Year',y='GBD: growth rate in population size 15-19') +
theme_minimal()
out_migration.gg <- ggplot() +
geom_line(data=sdi_data[name==this_name,],
aes(x=year,
y=out_rate),
size = 2) +
labs(x='Year',y='Wittgenstein: out-migration rate (per 1000)') +
theme_minimal()
gLegend<-function(a.plot){
if ("ggplot" %in% class(a.plot)) {
tmp <- ggplot_gtable(ggplot_build(a.plot))
} else if ("grob" %in% class(a.plot)) {
tmp <- .gplot
}
leg <- which(sapply(tmp$grobs, function(x) x$name) == "guide-box")
legend <- tmp$grobs[[leg]]
return(legend)
}
#leg <- gLegend(mx.gg)
#leg$vp <- viewport(layout.pos.row = 2:6, layout.pos.col = 13)
grid.newpage()
pushViewport(viewport(layout = grid.layout(6, 15)))
vplayout <- function(x, y) viewport(layout.pos.row = x, layout.pos.col = y)
print(mort.gg + theme(legend.position="none"), vp = vplayout(2:6, 1:3))
print(rmort.gg + theme(legend.position="none"), vp = vplayout(2:6, 4:6))
print(age_prop.gg + theme(legend.position="none"), vp = vplayout(2:6, 7:9))
print(age_change.gg + theme(legend.position="none"), vp = vplayout(2:6, 10:12))
print(out_migration.gg + theme(legend.position="none"), vp = vplayout(2:6, 13:15))
#grid.draw(leg)
grid.text(this_name, vp = viewport(layout.pos.row = 1, layout.pos.col = 1:15))
}
dev.off()
cohort_change[, moving_average := rollapplyr(data=net_migration, width=1:.N, FUN=mean, by=1), by = 'name']
cohort_change[, moving_average := lapply(.SD, rollmean, k = 3, na.pad = TRUE, partial = TRUE), by = name, .SDcols = 'net_migration']
cohort_change[name=='Nigeria', c('net_migration','moving_average')]
print('hi')
print('hi')
print('hi')
print('hi')
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/add-link.R
\name{add_link}
\alias{add_link}
\title{Add other association link plot on correlation plot.}
\usage{
add_link(df, mapping = NULL, spec.key = "spec", env.key = "env",
curvature = NULL, spec.label.hspace = NULL, spec.label.vspace = 0,
on.left = FALSE, diag.label = FALSE, extra.params = extra_params(),
...)
}
\arguments{
\item{df}{a data frame object.}
\item{mapping}{NULL (default) or a list of aesthetic mappings to use for plot.}
\item{spec.key}{string (defaults to "spec"), group variables names in \code{df}.}
\item{env.key}{string (defaults to "env"), variables names in \code{df} that
associated with the correlation coefficient matrix.}
\item{curvature}{a numeric value giving the amount of curvature.}
\item{spec.label.hspace, spec.label.vspace}{a numeric value giving the amount of
horizontal/vertical space betweed group points and labels.}
\item{on.left}{add link plot on left or right when the type of correlation plot
is "full".}
\item{diag.label}{logical (defaults to FALSE) to indicate whether add diag labels.}
\item{extra.params}{other parameters that control details can be setting
using the \code{extra_params} function.}
\item{...}{extra params passing to \code{\link[ggplot2]{geom_curve}}.}
}
\description{
This function can add other associated information link plot more
quickly, and this function can be used to reflect the relationship between
other variables and variables in the correlation coefficient matrix plot.
}
\examples{
\dontrun{
require(vegan, quietly = TRUE)
require(dplyr, quietly = TRUE)
require(ggplot2, quietly = TRUE)
data("varechem")
data("varespec")
mantel <- fortify_mantel(varespec, varechem,
spec.select = list(1:10, 5:14, 7:22, 9:32))
quickcor(varechem, type = "upper") +
geom_square() +
add_link(mantel, diag.label = TRUE) +
geom_diag_label() + remove_axis("x")
mantel01 <- mantel \%>\%
mutate(r = cut(r, breaks = c(-Inf, 0.25, 0.5, Inf),
labels = c("<0.25", "0.25-0.5", ">=0.5"),
right = FALSE),
p.value = cut(p.value, breaks = c(-Inf, 0.001, 0.01, 0.05, Inf),
labels = c("<0.001", "0.001-0.01", "0.01-0.05", ">=0.05"),
right = FALSE))
quickcor(varechem, type = "upper") + geom_square() +
add_link(mantel01, mapping = aes(colour = p.value, size = r),
diag.label = TRUE) +
geom_diag_label() +
scale_size_manual(values = c(0.5, 1.5, 3)) +
remove_axis("x")
}
}
\author{
Houyun Huang, Lei Zhou, Jian Chen, Taiyun Wei
}
|
/man/add_link.Rd
|
no_license
|
xma82/ggcor
|
R
| false | true | 2,624 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/add-link.R
\name{add_link}
\alias{add_link}
\title{Add other association link plot on correlation plot.}
\usage{
add_link(df, mapping = NULL, spec.key = "spec", env.key = "env",
curvature = NULL, spec.label.hspace = NULL, spec.label.vspace = 0,
on.left = FALSE, diag.label = FALSE, extra.params = extra_params(),
...)
}
\arguments{
\item{df}{a data frame object.}
\item{mapping}{NULL (default) or a list of aesthetic mappings to use for plot.}
\item{spec.key}{string (defaults to "spec"), group variables names in \code{df}.}
\item{env.key}{string (defaults to "env"), variables names in \code{df} that
associated with the correlation coefficient matrix.}
\item{curvature}{a numeric value giving the amount of curvature.}
\item{spec.label.hspace, spec.label.vspace}{a numeric value giving the amount of
horizontal/vertical space betweed group points and labels.}
\item{on.left}{add link plot on left or right when the type of correlation plot
is "full".}
\item{diag.label}{logical (defaults to FALSE) to indicate whether add diag labels.}
\item{extra.params}{other parameters that control details can be setting
using the \code{extra_params} function.}
\item{...}{extra params passing to \code{\link[ggplot2]{geom_curve}}.}
}
\description{
This function can add other associated information link plot more
quickly, and this function can be used to reflect the relationship between
other variables and variables in the correlation coefficient matrix plot.
}
\examples{
\dontrun{
require(vegan, quietly = TRUE)
require(dplyr, quietly = TRUE)
require(ggplot2, quietly = TRUE)
data("varechem")
data("varespec")
mantel <- fortify_mantel(varespec, varechem,
spec.select = list(1:10, 5:14, 7:22, 9:32))
quickcor(varechem, type = "upper") +
geom_square() +
add_link(mantel, diag.label = TRUE) +
geom_diag_label() + remove_axis("x")
mantel01 <- mantel \%>\%
mutate(r = cut(r, breaks = c(-Inf, 0.25, 0.5, Inf),
labels = c("<0.25", "0.25-0.5", ">=0.5"),
right = FALSE),
p.value = cut(p.value, breaks = c(-Inf, 0.001, 0.01, 0.05, Inf),
labels = c("<0.001", "0.001-0.01", "0.01-0.05", ">=0.05"),
right = FALSE))
quickcor(varechem, type = "upper") + geom_square() +
add_link(mantel01, mapping = aes(colour = p.value, size = r),
diag.label = TRUE) +
geom_diag_label() +
scale_size_manual(values = c(0.5, 1.5, 3)) +
remove_axis("x")
}
}
\author{
Houyun Huang, Lei Zhou, Jian Chen, Taiyun Wei
}
|
data<-read.csv2("Salaries.csv",sep=",",header=TRUE)
data$X<-NULL
salaire<-as.data.frame(data)
summary(data)
library(tree)
tree.Lin<-tree(salary~yrs.service+yrs.since.phd,data=salaire)
#1.2.c
tree.model<-tree(log(salary)~yrs.service+yrs.since.phd,data=salaire)
plot(tree.Lin)
text(tree.Lin,cex=.75)
#1.3
salar.deciles<-quantile(salaire$salary,0:10/10)
cut.prices<-cut(salaire$salary,salar.deciles,include.lowest=TRUE)
plot(salaire$yrs.service,salaire$yrs.since.phd,col=grey(10:2/11)[cut.prices],pch=20,xlab="yrs.service",ylab="yrs.since.phd")
####Partition
partition.tree(tree.model,ordvars=c("yrs.service","yrs.since.phd"),add=TRUE)
#Figure5
plot(salaire$yrs.since.phd,salaire$salary,pch=19,col=as.numeric(salaire$rank))
partition.tree(tree.model,label="Species",add=TRUE)
legend("topright",legend=unique(salaire$rank),col=unique(as.numeric(salaire$rank)),pch=19)
summary(tree.model)
###Nouveaumodel
tree.model2<-tree(log(salary)~yrs.service+yrs.since.phd,
data=salaire,mindev=0.001)
plot(tree.model2)
text(tree.model2,cex=.75)
summary(tree.model2)
#Elagation
pruned.tree<-prune.tree(tree.model,best=4)
plot(pruned.tree)
text(pruned.tree)
#chargerlesdonnees-attentionauxoptions
data<-read.csv2("C:/Users/claey/Documents/cour/MyTD/TD5/
titanic.csv",sep=",",header=TRUE)
titanic<-as.data.frame(data)
###Summary
summary(titanic)
View(data)
###Installeretcharger
library(rpart)
library(party)
library(rpart.plot)
str(titanic)
myFormula<-survived~class+age#
##CART##
fit<-rpart(myFormula,method="class",data=titanic)
printcp(fit)#displaytheresults
plotcp(fit)#visualizecross-validationresult(Figure8)
summary(fit)#detailedsummaryofsplits
#plottreeFigure9
plot(fit,uniform=TRUE)
text(fit,use.n=TRUE,all=TRUE,cex=.7)
library(rattle)
library(rpart.plot)
library(RColorBrewer)
#Onameliorelegraphiquequin"estpastreslisible:Figure10
fancyRpartPlot(fit)
##CTREE##
titanic_tree<-ctree(myFormula,data=titanic)
print(titanic_tree)
##Figure11
plot(titanic_tree)
predictions<-predict(titanic_tree,titanic)
table(titanic$survived,predictions)
predictions<-predict(titanic_tree2,titanic)
table(titanic$survived,predictions)
library(randomForest)
r<-randomForest(survived~.,data=titanic,importance=TRUE,do.
trace=100,ntree=100)
plot(r)
predictions<-predict(r,titanic)
table(titanic$survived,predictions)
print(r)
|
/Cours 5A/Big Data/Module 3 Modélisation/TD2/Pour les pd/td2.R
|
no_license
|
hejoseph/workspace
|
R
| false | false | 2,333 |
r
|
data<-read.csv2("Salaries.csv",sep=",",header=TRUE)
data$X<-NULL
salaire<-as.data.frame(data)
summary(data)
library(tree)
tree.Lin<-tree(salary~yrs.service+yrs.since.phd,data=salaire)
#1.2.c
tree.model<-tree(log(salary)~yrs.service+yrs.since.phd,data=salaire)
plot(tree.Lin)
text(tree.Lin,cex=.75)
#1.3
salar.deciles<-quantile(salaire$salary,0:10/10)
cut.prices<-cut(salaire$salary,salar.deciles,include.lowest=TRUE)
plot(salaire$yrs.service,salaire$yrs.since.phd,col=grey(10:2/11)[cut.prices],pch=20,xlab="yrs.service",ylab="yrs.since.phd")
####Partition
partition.tree(tree.model,ordvars=c("yrs.service","yrs.since.phd"),add=TRUE)
#Figure5
plot(salaire$yrs.since.phd,salaire$salary,pch=19,col=as.numeric(salaire$rank))
partition.tree(tree.model,label="Species",add=TRUE)
legend("topright",legend=unique(salaire$rank),col=unique(as.numeric(salaire$rank)),pch=19)
summary(tree.model)
###Nouveaumodel
tree.model2<-tree(log(salary)~yrs.service+yrs.since.phd,
data=salaire,mindev=0.001)
plot(tree.model2)
text(tree.model2,cex=.75)
summary(tree.model2)
#Elagation
pruned.tree<-prune.tree(tree.model,best=4)
plot(pruned.tree)
text(pruned.tree)
#chargerlesdonnees-attentionauxoptions
data<-read.csv2("C:/Users/claey/Documents/cour/MyTD/TD5/
titanic.csv",sep=",",header=TRUE)
titanic<-as.data.frame(data)
###Summary
summary(titanic)
View(data)
###Installeretcharger
library(rpart)
library(party)
library(rpart.plot)
str(titanic)
myFormula<-survived~class+age#
##CART##
fit<-rpart(myFormula,method="class",data=titanic)
printcp(fit)#displaytheresults
plotcp(fit)#visualizecross-validationresult(Figure8)
summary(fit)#detailedsummaryofsplits
#plottreeFigure9
plot(fit,uniform=TRUE)
text(fit,use.n=TRUE,all=TRUE,cex=.7)
library(rattle)
library(rpart.plot)
library(RColorBrewer)
#Onameliorelegraphiquequin"estpastreslisible:Figure10
fancyRpartPlot(fit)
##CTREE##
titanic_tree<-ctree(myFormula,data=titanic)
print(titanic_tree)
##Figure11
plot(titanic_tree)
predictions<-predict(titanic_tree,titanic)
table(titanic$survived,predictions)
predictions<-predict(titanic_tree2,titanic)
table(titanic$survived,predictions)
library(randomForest)
r<-randomForest(survived~.,data=titanic,importance=TRUE,do.
trace=100,ntree=100)
plot(r)
predictions<-predict(r,titanic)
table(titanic$survived,predictions)
print(r)
|
#---------------
hurst_scan<-scan('mercyhurst.txt',what=character(),sep='\n')
hurst_lines<-data_frame(line=1:24066,text=hurst_scan)
hurst_lines$group<-hurst_lines$line %/% 80
hurst_words<-unnest_tokens(hurst_lines,word,text)
afinn<-get_sentiments('afinn')
hurst_words<-inner_join(hurst_words,afinn)
hurst_groups<-hurst_words%>%
group_by(group)%>%
summarize(sentiment=sum(score))
ggplot()+
geom_col(data=hurst_groups,aes(x=group,y=sentiment))
#------------------------
gutenberg_works(str_detect(title,'Frankenstein'))
frankenstein<-gutenberg_download(84)
frankenstein_words<-unnest_tokens(frankenstein,word,text)
frankenstein_words$word_number<-1:75175
frankenstein_words$gutenberg_id<-NULL
afinn<-get_sentiments('afinn')
frankenstein_words<-inner_join(frankenstein_words,afinn)
frankenstein_words$acc_sent<-cumsum(frankenstein_words$score)
ggplot()+
geom_line(data=frankenstein_words,aes(x=word_number,y=acc_sent))
#-------------------
#\documentclass{article}
#\usepackage{natbib}
#\begin{document}
#\title{My Article}
#\author{Charles Redmond}
#\maketitle
#\section{My First Section}
#Lorem ipsum dolor sit amet, consectetur adipiscing elit. Duis ornare, libero quis sodales bibendum, dolor augue placerat arcu, sed pellentesque ante urna vel ex. Pellentesque eget purus eget velit ornare sodales et nec augue. In nec diam nibh. Nulla facilisi. Praesent tristique scelerisque lacus, at sollicitudin magna ultrices non. Mauris imperdiet eleifend sapien at mollis. Nullam id justo sollicitudin, euismod purus at, molestie nunc. Quisque et tellus sed risus sollicitudin lacinia viverra sit amet felis. Morbi hendrerit ultrices mauris, non iaculis justo cursus in. Pellentesque eget velit eget neque lobortis scelerisque quis et felis. Mauris commodo magna leo, eu tempor odio varius sed. Etiam erat ipsum, porttitor pellentesque dolor vitae, porta rhoncus ligula.
#Ut mi magna, congue vitae sem quis, sodales porttitor urna. Nunc ornare faucibus arcu id tempor. Etiam congue vulputate mattis. Aliquam sollicitudin accumsan turpis vitae malesuada. Quisque aliquet lorem sit amet odio ullamcorper porta. Pellentesque lobortis augue nec dolor sagittis, ut rutrum sem placerat. Duis eu viverra nulla.
#Nullam sit amet tempor mi. Curabitur sed odio vitae erat sodales consectetur vitae convallis nulla. Curabitur feugiat mollis consequat. Duis fermentum aliquet sem sit amet tincidunt. Etiam vitae orci dolor. Fusce ac sodales libero. Sed sodales non lorem a auctor. Nunc in tellus ornare, gravida elit a, venenatis justo. Vivamus ipsum augue, dapibus vitae nisi sit amet, faucibus vehicula quam. Ut quis elit accumsan, rhoncus massa tempor, blandit sapien. Quisque eleifend turpis vel nisi porttitor efficitur.
#Nulla pretium in ipsum at ultricies. Integer maximus orci ut orci sagittis ornare. Praesent ac pellentesque nibh. Curabitur lacinia, nunc ut lobortis vulputate, augue ipsum consectetur enim, eu eleifend eros magna eget leo. Nulla sit amet laoreet nulla. Duis leo elit, accumsan eu ultrices id, rhoncus eget nibh. Duis eleifend accumsan tortor a bibendum \citep{Lakshmanan}.
#Aliquam erat volutpat. Etiam convallis ultrices tortor, vel faucibus sapien finibus in. Class aptent taciti sociosqu ad litora torquent per conubia nostra, per inceptos himenaeos. Nullam nulla dui, imperdiet et elementum eget, vehicula non risus. Duis varius orci id ex egestas vulputate. Donec eleifend tempor sem ac commodo. Vivamus ut nunc nisi. Sed quis lorem erat. Aliquam imperdiet tempus justo, ut laoreet lorem finibus ac.
#\bibliographystyle{apa}
#\bibliography{article}
#\nocite{*}
#\end{document}
#----------
#@book{Lakshmanan,
#author={Valliappa Lakshmanan},
#title={Data Science on the Google Cloud Platform: Implementing End-to-End Real-time Data Pipelines: From Ingest to Machine Learning},
#publisher={O'Reilly Inc.},
#year={2018}
#}
#pdflatex article
#bibtex article
#pdflatex article
#\citep
|
/Test Example Problems/test_examples.R
|
no_license
|
justinminsk/Communication-and-Data
|
R
| false | false | 3,914 |
r
|
#---------------
hurst_scan<-scan('mercyhurst.txt',what=character(),sep='\n')
hurst_lines<-data_frame(line=1:24066,text=hurst_scan)
hurst_lines$group<-hurst_lines$line %/% 80
hurst_words<-unnest_tokens(hurst_lines,word,text)
afinn<-get_sentiments('afinn')
hurst_words<-inner_join(hurst_words,afinn)
hurst_groups<-hurst_words%>%
group_by(group)%>%
summarize(sentiment=sum(score))
ggplot()+
geom_col(data=hurst_groups,aes(x=group,y=sentiment))
#------------------------
gutenberg_works(str_detect(title,'Frankenstein'))
frankenstein<-gutenberg_download(84)
frankenstein_words<-unnest_tokens(frankenstein,word,text)
frankenstein_words$word_number<-1:75175
frankenstein_words$gutenberg_id<-NULL
afinn<-get_sentiments('afinn')
frankenstein_words<-inner_join(frankenstein_words,afinn)
frankenstein_words$acc_sent<-cumsum(frankenstein_words$score)
ggplot()+
geom_line(data=frankenstein_words,aes(x=word_number,y=acc_sent))
#-------------------
#\documentclass{article}
#\usepackage{natbib}
#\begin{document}
#\title{My Article}
#\author{Charles Redmond}
#\maketitle
#\section{My First Section}
#Lorem ipsum dolor sit amet, consectetur adipiscing elit. Duis ornare, libero quis sodales bibendum, dolor augue placerat arcu, sed pellentesque ante urna vel ex. Pellentesque eget purus eget velit ornare sodales et nec augue. In nec diam nibh. Nulla facilisi. Praesent tristique scelerisque lacus, at sollicitudin magna ultrices non. Mauris imperdiet eleifend sapien at mollis. Nullam id justo sollicitudin, euismod purus at, molestie nunc. Quisque et tellus sed risus sollicitudin lacinia viverra sit amet felis. Morbi hendrerit ultrices mauris, non iaculis justo cursus in. Pellentesque eget velit eget neque lobortis scelerisque quis et felis. Mauris commodo magna leo, eu tempor odio varius sed. Etiam erat ipsum, porttitor pellentesque dolor vitae, porta rhoncus ligula.
#Ut mi magna, congue vitae sem quis, sodales porttitor urna. Nunc ornare faucibus arcu id tempor. Etiam congue vulputate mattis. Aliquam sollicitudin accumsan turpis vitae malesuada. Quisque aliquet lorem sit amet odio ullamcorper porta. Pellentesque lobortis augue nec dolor sagittis, ut rutrum sem placerat. Duis eu viverra nulla.
#Nullam sit amet tempor mi. Curabitur sed odio vitae erat sodales consectetur vitae convallis nulla. Curabitur feugiat mollis consequat. Duis fermentum aliquet sem sit amet tincidunt. Etiam vitae orci dolor. Fusce ac sodales libero. Sed sodales non lorem a auctor. Nunc in tellus ornare, gravida elit a, venenatis justo. Vivamus ipsum augue, dapibus vitae nisi sit amet, faucibus vehicula quam. Ut quis elit accumsan, rhoncus massa tempor, blandit sapien. Quisque eleifend turpis vel nisi porttitor efficitur.
#Nulla pretium in ipsum at ultricies. Integer maximus orci ut orci sagittis ornare. Praesent ac pellentesque nibh. Curabitur lacinia, nunc ut lobortis vulputate, augue ipsum consectetur enim, eu eleifend eros magna eget leo. Nulla sit amet laoreet nulla. Duis leo elit, accumsan eu ultrices id, rhoncus eget nibh. Duis eleifend accumsan tortor a bibendum \citep{Lakshmanan}.
#Aliquam erat volutpat. Etiam convallis ultrices tortor, vel faucibus sapien finibus in. Class aptent taciti sociosqu ad litora torquent per conubia nostra, per inceptos himenaeos. Nullam nulla dui, imperdiet et elementum eget, vehicula non risus. Duis varius orci id ex egestas vulputate. Donec eleifend tempor sem ac commodo. Vivamus ut nunc nisi. Sed quis lorem erat. Aliquam imperdiet tempus justo, ut laoreet lorem finibus ac.
#\bibliographystyle{apa}
#\bibliography{article}
#\nocite{*}
#\end{document}
#----------
#@book{Lakshmanan,
#author={Valliappa Lakshmanan},
#title={Data Science on the Google Cloud Platform: Implementing End-to-End Real-time Data Pipelines: From Ingest to Machine Learning},
#publisher={O'Reilly Inc.},
#year={2018}
#}
#pdflatex article
#bibtex article
#pdflatex article
#\citep
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/collection_migrate.R
\name{collection_migrate}
\alias{collection_migrate}
\title{Migrate documents to another collection}
\usage{
collection_migrate(conn, name, target.collection, split.key,
forward.timeout = NULL, async = NULL, raw = FALSE, callopts = list())
}
\arguments{
\item{conn}{A solrium connection object, see \link{SolrClient}}
\item{name}{(character) The name of the core to be created. Required}
\item{target.collection}{(character) Required. The name of the target collection
to which documents will be migrated}
\item{split.key}{(character) Required. The routing key prefix. For example, if
uniqueKey is a!123, then you would use split.key=a!}
\item{forward.timeout}{(integer) The timeout (seconds), until which write requests
made to the source collection for the given \code{split.key} will be forwarded to the
target shard. Default: 60}
\item{async}{(character) Request ID to track this action which will be processed
asynchronously}
\item{raw}{(logical) If \code{TRUE}, returns raw data}
\item{callopts}{curl options passed on to \link[crul:HttpClient]{crul::HttpClient}}
}
\description{
Migrate documents to another collection
}
\examples{
\dontrun{
(conn <- SolrClient$new())
# create collection
if (!conn$collection_exists("migrate_from")) {
conn$collection_create(name = "migrate_from")
# OR: bin/solr create -c migrate_from
}
# create another collection
if (!conn$collection_exists("migrate_to")) {
conn$collection_create(name = "migrate_to")
# OR bin/solr create -c migrate_to
}
# add some documents
file <- system.file("examples", "books.csv", package = "solrium")
x <- read.csv(file, stringsAsFactors = FALSE)
conn$add(x, "migrate_from")
# migrate some documents from one collection to the other
## FIXME - not sure if this is actually working....
# conn$collection_migrate("migrate_from", "migrate_to", split.key = "05535")
}
}
|
/man/collection_migrate.Rd
|
permissive
|
melsiddieg/solrium
|
R
| false | true | 1,955 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/collection_migrate.R
\name{collection_migrate}
\alias{collection_migrate}
\title{Migrate documents to another collection}
\usage{
collection_migrate(conn, name, target.collection, split.key,
forward.timeout = NULL, async = NULL, raw = FALSE, callopts = list())
}
\arguments{
\item{conn}{A solrium connection object, see \link{SolrClient}}
\item{name}{(character) The name of the core to be created. Required}
\item{target.collection}{(character) Required. The name of the target collection
to which documents will be migrated}
\item{split.key}{(character) Required. The routing key prefix. For example, if
uniqueKey is a!123, then you would use split.key=a!}
\item{forward.timeout}{(integer) The timeout (seconds), until which write requests
made to the source collection for the given \code{split.key} will be forwarded to the
target shard. Default: 60}
\item{async}{(character) Request ID to track this action which will be processed
asynchronously}
\item{raw}{(logical) If \code{TRUE}, returns raw data}
\item{callopts}{curl options passed on to \link[crul:HttpClient]{crul::HttpClient}}
}
\description{
Migrate documents to another collection
}
\examples{
\dontrun{
(conn <- SolrClient$new())
# create collection
if (!conn$collection_exists("migrate_from")) {
conn$collection_create(name = "migrate_from")
# OR: bin/solr create -c migrate_from
}
# create another collection
if (!conn$collection_exists("migrate_to")) {
conn$collection_create(name = "migrate_to")
# OR bin/solr create -c migrate_to
}
# add some documents
file <- system.file("examples", "books.csv", package = "solrium")
x <- read.csv(file, stringsAsFactors = FALSE)
conn$add(x, "migrate_from")
# migrate some documents from one collection to the other
## FIXME - not sure if this is actually working....
# conn$collection_migrate("migrate_from", "migrate_to", split.key = "05535")
}
}
|
library(RWeka)
multiplot <- function(..., plotlist=NULL, file, cols=1, layout=NULL) {
library(grid)
# Make a list from the ... arguments and plotlist
plots <- c(list(...), plotlist)
numPlots = length(plots)
# If layout is NULL, then use 'cols' to determine layout
if (is.null(layout)) {
# Make the panel
# ncol: Number of columns of plots
# nrow: Number of rows needed, calculated from # of cols
layout <- matrix(seq(1, cols * ceiling(numPlots/cols)),
ncol = cols, nrow = ceiling(numPlots/cols))
}
if (numPlots==1) {
print(plots[[1]])
} else {
# Set up the page
grid.newpage()
pushViewport(viewport(layout = grid.layout(nrow(layout), ncol(layout))))
# Make each plot, in the correct location
for (i in 1:numPlots) {
# Get the i,j matrix positions of the regions that contain this subplot
matchidx <- as.data.frame(which(layout == i, arr.ind = TRUE))
print(plots[[i]], vp = viewport(layout.pos.row = matchidx$row,
layout.pos.col = matchidx$col))
}
}
}
# Refresh caches
WPM("refresh-cache")
# Load data
load_data <- function() {
fbppr.true <- read.csv("/data/bshi/dataset/dblp_citation/link_prediction/fbppr.true.csv", header=FALSE, sep=" ",
col.names=c("query_id","id", "fppr_score", "fppr_rank", "bppr_score", "bppr_rank"))
fbppr.false <- read.csv("/data/bshi/dataset/dblp_citation/link_prediction/fbppr.false.csv", header=FALSE, sep=" ",
col.names=c("query_id","id", "fppr_score", "fppr_rank", "bppr_score", "bppr_rank"))
fbppr.true$label <- "T"
fbppr.false$label <- "F"
fbppr <- rbind(fbppr.true, fbppr.false)
sim.true <- read.csv("/data/bshi/dataset/dblp_citation/link_prediction/simrank.true.csv", header=FALSE, sep=" ",
col.names=c("query_id", "id", "sim_score"))
sim.false <- read.csv("/data/bshi/dataset/dblp_citation/link_prediction/simrank.false.csv", header=FALSE, sep=" ",
col.names=c("query_id", "id", "sim_score"))
sim.true$label <- "T"
sim.false$label <- "F"
sim <- rbind(sim.true, sim.false)
salsa.true <- read.csv("/data/bshi/dataset/dblp_citation/link_prediction/salsa.true.csv", header=FALSE, sep=" ",
col.names=c("query_id", "id", "query_auth_score", "query_auth_rank","query_hub_score",
"query_hub_rank","id_auth_score", "id_auth_rank","id_hub_score","id_hub_rank"))
salsa.false <- read.csv("/data/bshi/dataset/dblp_citation/link_prediction/salsa.false.csv", header=FALSE, sep=" ",
col.names=c("query_id", "id", "query_auth_score", "query_auth_rank","query_hub_score",
"query_hub_rank","id_auth_score", "id_auth_rank","id_hub_score","id_hub_rank"))
salsa.true$label <- "T"
salsa.false$label <- "F"
salsa <- rbind(salsa.true, salsa.false)
# Combine them together
dat <- merge(fbppr, sim, by=c("query_id","id","label"))
dat <- merge(dat, salsa, by=c("query_id","id","label"))
return(dat)
}
# Do logistic regression on all different combinations of features
do_logistic <- function(dat) {
# Convert label to factor
dat$label <- as.factor(dat$label)
# Generate all combinations
dat.features <- colnames(dat)[! colnames(dat) %in% c("query_id", "id", "label")]
dat.res <- NULL
for(i in 1:length(dat.features)) {
features <- combn(dat.features, i)
for(j in 1:dim(features)[2]) {
feature <- paste(unlist(features[,j]), collapse = "+")
f <- paste("label",feature,sep = "~")
model <- Logistic(f, data = dat)
res <- evaluate_Weka_classifier(model, numFolds = 5, complexity = FALSE,
seed = 1, class = TRUE)
dat.res <- rbind(dat.res, data.frame(name=feature, choose=i, t(colMeans(res$detailsClass))))
}
}
return(dat.res)
}
extractRes <- function(df, measurevar, topk) {
tmpres <- NULL
for(i in unique(df$choose)) {
tmpdf <- df[which(df$choose == i),]
tmpdf <- tmpdf[order(tmpdf[,measurevar], decreasing = TRUE),]
len <- ifelse(dim(tmpdf)[1] > topk, topk, dim(tmpdf)[1])
tmpres <- rbind(tmpres, tmpdf[1:len,])
}
return(tmpres)
}
abbr <- function(df) {
df$name <- sub("fppr_score", "FS", df$name)
df$name <- sub("fppr_rank", "FR", df$name)
df$name <- sub("bppr_score", "BS", df$name)
df$name <- sub("bppr_rank", "BR", df$name)
df$name <- sub("sim_score", "SR", df$name)
df$name <- sub("query_auth_score", "SAS", df$name)
df$name <- sub("query_auth_rank", "SAR", df$name)
df$name <- sub("query_hub_score", "SHS", df$name)
df$name <- sub("query_hub_rank", "SHR", df$name)
df$name <- sub("id_auth_score", "DAS", df$name)
df$name <- sub("id_auth_rank", "DAR", df$name)
df$name <- sub("id_hub_score", "DHS", df$name)
df$name <- sub("id_hub_rank", "DHR", df$name)
return(df)
}
draw <- function(df, measurevar = "areaUnderROC") {
# Add ranking
df$ranking <- rank(-df[,measurevar])
maxN <- max(df$choose)
df$choose <- as.factor(df$choose)
df$x <- 1:dim(df)[1]
g <- ggplot(df, aes_string(x = "x", y = measurevar, fill="choose", label="ranking")) +
geom_bar(stat="identity") +
geom_text(vjust=0.5) +
geom_hline(yintercept =max(df[, measurevar]), colour = "red", size=.1) +
# coord_cartesian(ylim=c(0.98 * min(df[,measurevar]),1)) +
coord_cartesian(ylim=c(0.5, 1)) +
scale_x_discrete(breaks=df$x, labels=df$name) +
scale_colour_brewer(palette="Set1") +
theme_classic() +
theme(axis.text.x = element_text(angle = 30, hjust = 1),
axis.title.x = element_blank())
g
return(g)
}
drawModel <- function(df, measurevar = "areaUnderROC") {
df$x <- 1:dim(df)[1]
df$choose <- as.factor(df$choose)
ggplot(df, aes_string(x = "x", y = measurevar, label="Models", fill="choose")) +
geom_bar(stat="identity") +
geom_hline(yintercept =max(df[, measurevar]), colour = "red", size=.1) +
coord_cartesian(ylim=c(0.6, 1)) +
scale_colour_brewer(palette="Set1") +
scale_x_discrete(breaks=df$x, labels=df$Models) +
scale_fill_discrete(name="Number of\nModels") +
theme_classic() +
theme(axis.text.x = element_text(angle = 15, hjust = 1),
axis.title.x = element_blank())
}
compareAllModels <- function(df, measurevar = "areaUnderROC") {
# There are 4 models FPPR/FBPPR/P-SALSA/SIMRANK
res <- NULL
# Number of models
for(i in 1:4) {
models <- combn(c("FPPR", "FBPPR", "P-SALSA", "SIMRANK"), i)
# Select different model combinations
templist <- NULL
for( j in 1:dim(models)[2]) { #dim(models)[2]
# Get results from this combination
tmpdf <- NULL
tmpidx <- rep(TRUE, dim(df)[1])
cmodels=setdiff(c("FPPR", "FBPPR", "P-SALSA", "SIMRANK"),models[,j])
for( m in cmodels) {
if (m == "FPPR") {
if (length(which(cmodels=="FBPPR"))>0)
tmpidx <- tmpidx * (! 1:dim(df)[1] %in% grep("fppr_", df$name))
}
if (m == "FBPPR") {
tmpidx <- tmpidx * (! 1:dim(df)[1] %in% grep("bppr_", df$name))
}
if (m == "P-SALSA") {
tmpidx <- tmpidx * (! 1:dim(df)[1] %in% grep("query_", df$name))
tmpidx <- tmpidx * (! 1:dim(df)[1] %in% grep("id_", df$name))
}
if (m == "SIMRANK") {
tmpidx <- tmpidx * (! 1:dim(df)[1] %in% grep("sim", df$name))
}
}
tmpdf <- df[which(tmpidx>0),]
templist <- rbind(templist,data.frame(Models=paste(unlist(models[,j]),collapse = "+"),
noname=max(tmpdf[,measurevar]),
choose = i))
}
colnames(templist)[2] <- measurevar
templist <- templist[order(templist[, measurevar],decreasing=T),]
res <- rbind(res,templist)
}
return(res)
}
# Compare different models
drawModel(compareAllModels(dat.res, "recall"), "recall")
drawModel(compareAllModels(dat.res, "precision"), "precision")
drawModel(compareAllModels(dat.res, "fMeasure"), "fMeasure")
drawModel(compareAllModels(dat.res, "areaUnderROC"), "areaUnderROC")
#--------------------
dat.selected.res <- dat.res[! 1:dim(dat.res)[1] %in% union(grep("hub", dat.res$name), grep("auth_rank", dat.res$name)),]
dat.nohub.res <- dat.res[! 1:dim(dat.res)[1] %in% grep("hub", dat.res$name),]
dat.nobppr.res <- dat.res[! 1:dim(dat.res)[1] %in% grep("bppr_", dat.res$name),]
dat.selected.resRest <- extractRes(dat.selected.res, "areaUnderROC", 5)
draw(abbr(dat.selected.resRest), "areaUnderROC")
dat.nobppr.bestRes <- extractRes(dat.nobppr.res, "areaUnderROC", 1)
draw(abbr(dat.nobppr.bestRes), "areaUnderROC")
dat.nohub.bestRes <- extractRes(dat.nohub.res, "areaUnderROC", 1)
draw(abbr(dat.nohub.bestRes), "areaUnderROC")
dat.bestRes <- extractRes(dat.res, "areaUnderROC", 1)
draw(abbr(dat.bestRes),"areaUnderROC")
|
/scripts/weka.R
|
no_license
|
nddsg/forward-backward-ppr
|
R
| false | false | 8,964 |
r
|
library(RWeka)
multiplot <- function(..., plotlist=NULL, file, cols=1, layout=NULL) {
library(grid)
# Make a list from the ... arguments and plotlist
plots <- c(list(...), plotlist)
numPlots = length(plots)
# If layout is NULL, then use 'cols' to determine layout
if (is.null(layout)) {
# Make the panel
# ncol: Number of columns of plots
# nrow: Number of rows needed, calculated from # of cols
layout <- matrix(seq(1, cols * ceiling(numPlots/cols)),
ncol = cols, nrow = ceiling(numPlots/cols))
}
if (numPlots==1) {
print(plots[[1]])
} else {
# Set up the page
grid.newpage()
pushViewport(viewport(layout = grid.layout(nrow(layout), ncol(layout))))
# Make each plot, in the correct location
for (i in 1:numPlots) {
# Get the i,j matrix positions of the regions that contain this subplot
matchidx <- as.data.frame(which(layout == i, arr.ind = TRUE))
print(plots[[i]], vp = viewport(layout.pos.row = matchidx$row,
layout.pos.col = matchidx$col))
}
}
}
# Refresh caches
WPM("refresh-cache")
# Load data
load_data <- function() {
fbppr.true <- read.csv("/data/bshi/dataset/dblp_citation/link_prediction/fbppr.true.csv", header=FALSE, sep=" ",
col.names=c("query_id","id", "fppr_score", "fppr_rank", "bppr_score", "bppr_rank"))
fbppr.false <- read.csv("/data/bshi/dataset/dblp_citation/link_prediction/fbppr.false.csv", header=FALSE, sep=" ",
col.names=c("query_id","id", "fppr_score", "fppr_rank", "bppr_score", "bppr_rank"))
fbppr.true$label <- "T"
fbppr.false$label <- "F"
fbppr <- rbind(fbppr.true, fbppr.false)
sim.true <- read.csv("/data/bshi/dataset/dblp_citation/link_prediction/simrank.true.csv", header=FALSE, sep=" ",
col.names=c("query_id", "id", "sim_score"))
sim.false <- read.csv("/data/bshi/dataset/dblp_citation/link_prediction/simrank.false.csv", header=FALSE, sep=" ",
col.names=c("query_id", "id", "sim_score"))
sim.true$label <- "T"
sim.false$label <- "F"
sim <- rbind(sim.true, sim.false)
salsa.true <- read.csv("/data/bshi/dataset/dblp_citation/link_prediction/salsa.true.csv", header=FALSE, sep=" ",
col.names=c("query_id", "id", "query_auth_score", "query_auth_rank","query_hub_score",
"query_hub_rank","id_auth_score", "id_auth_rank","id_hub_score","id_hub_rank"))
salsa.false <- read.csv("/data/bshi/dataset/dblp_citation/link_prediction/salsa.false.csv", header=FALSE, sep=" ",
col.names=c("query_id", "id", "query_auth_score", "query_auth_rank","query_hub_score",
"query_hub_rank","id_auth_score", "id_auth_rank","id_hub_score","id_hub_rank"))
salsa.true$label <- "T"
salsa.false$label <- "F"
salsa <- rbind(salsa.true, salsa.false)
# Combine them together
dat <- merge(fbppr, sim, by=c("query_id","id","label"))
dat <- merge(dat, salsa, by=c("query_id","id","label"))
return(dat)
}
# Do logistic regression on all different combinations of features
do_logistic <- function(dat) {
# Convert label to factor
dat$label <- as.factor(dat$label)
# Generate all combinations
dat.features <- colnames(dat)[! colnames(dat) %in% c("query_id", "id", "label")]
dat.res <- NULL
for(i in 1:length(dat.features)) {
features <- combn(dat.features, i)
for(j in 1:dim(features)[2]) {
feature <- paste(unlist(features[,j]), collapse = "+")
f <- paste("label",feature,sep = "~")
model <- Logistic(f, data = dat)
res <- evaluate_Weka_classifier(model, numFolds = 5, complexity = FALSE,
seed = 1, class = TRUE)
dat.res <- rbind(dat.res, data.frame(name=feature, choose=i, t(colMeans(res$detailsClass))))
}
}
return(dat.res)
}
extractRes <- function(df, measurevar, topk) {
tmpres <- NULL
for(i in unique(df$choose)) {
tmpdf <- df[which(df$choose == i),]
tmpdf <- tmpdf[order(tmpdf[,measurevar], decreasing = TRUE),]
len <- ifelse(dim(tmpdf)[1] > topk, topk, dim(tmpdf)[1])
tmpres <- rbind(tmpres, tmpdf[1:len,])
}
return(tmpres)
}
abbr <- function(df) {
df$name <- sub("fppr_score", "FS", df$name)
df$name <- sub("fppr_rank", "FR", df$name)
df$name <- sub("bppr_score", "BS", df$name)
df$name <- sub("bppr_rank", "BR", df$name)
df$name <- sub("sim_score", "SR", df$name)
df$name <- sub("query_auth_score", "SAS", df$name)
df$name <- sub("query_auth_rank", "SAR", df$name)
df$name <- sub("query_hub_score", "SHS", df$name)
df$name <- sub("query_hub_rank", "SHR", df$name)
df$name <- sub("id_auth_score", "DAS", df$name)
df$name <- sub("id_auth_rank", "DAR", df$name)
df$name <- sub("id_hub_score", "DHS", df$name)
df$name <- sub("id_hub_rank", "DHR", df$name)
return(df)
}
draw <- function(df, measurevar = "areaUnderROC") {
# Add ranking
df$ranking <- rank(-df[,measurevar])
maxN <- max(df$choose)
df$choose <- as.factor(df$choose)
df$x <- 1:dim(df)[1]
g <- ggplot(df, aes_string(x = "x", y = measurevar, fill="choose", label="ranking")) +
geom_bar(stat="identity") +
geom_text(vjust=0.5) +
geom_hline(yintercept =max(df[, measurevar]), colour = "red", size=.1) +
# coord_cartesian(ylim=c(0.98 * min(df[,measurevar]),1)) +
coord_cartesian(ylim=c(0.5, 1)) +
scale_x_discrete(breaks=df$x, labels=df$name) +
scale_colour_brewer(palette="Set1") +
theme_classic() +
theme(axis.text.x = element_text(angle = 30, hjust = 1),
axis.title.x = element_blank())
g
return(g)
}
drawModel <- function(df, measurevar = "areaUnderROC") {
df$x <- 1:dim(df)[1]
df$choose <- as.factor(df$choose)
ggplot(df, aes_string(x = "x", y = measurevar, label="Models", fill="choose")) +
geom_bar(stat="identity") +
geom_hline(yintercept =max(df[, measurevar]), colour = "red", size=.1) +
coord_cartesian(ylim=c(0.6, 1)) +
scale_colour_brewer(palette="Set1") +
scale_x_discrete(breaks=df$x, labels=df$Models) +
scale_fill_discrete(name="Number of\nModels") +
theme_classic() +
theme(axis.text.x = element_text(angle = 15, hjust = 1),
axis.title.x = element_blank())
}
compareAllModels <- function(df, measurevar = "areaUnderROC") {
# There are 4 models FPPR/FBPPR/P-SALSA/SIMRANK
res <- NULL
# Number of models
for(i in 1:4) {
models <- combn(c("FPPR", "FBPPR", "P-SALSA", "SIMRANK"), i)
# Select different model combinations
templist <- NULL
for( j in 1:dim(models)[2]) { #dim(models)[2]
# Get results from this combination
tmpdf <- NULL
tmpidx <- rep(TRUE, dim(df)[1])
cmodels=setdiff(c("FPPR", "FBPPR", "P-SALSA", "SIMRANK"),models[,j])
for( m in cmodels) {
if (m == "FPPR") {
if (length(which(cmodels=="FBPPR"))>0)
tmpidx <- tmpidx * (! 1:dim(df)[1] %in% grep("fppr_", df$name))
}
if (m == "FBPPR") {
tmpidx <- tmpidx * (! 1:dim(df)[1] %in% grep("bppr_", df$name))
}
if (m == "P-SALSA") {
tmpidx <- tmpidx * (! 1:dim(df)[1] %in% grep("query_", df$name))
tmpidx <- tmpidx * (! 1:dim(df)[1] %in% grep("id_", df$name))
}
if (m == "SIMRANK") {
tmpidx <- tmpidx * (! 1:dim(df)[1] %in% grep("sim", df$name))
}
}
tmpdf <- df[which(tmpidx>0),]
templist <- rbind(templist,data.frame(Models=paste(unlist(models[,j]),collapse = "+"),
noname=max(tmpdf[,measurevar]),
choose = i))
}
colnames(templist)[2] <- measurevar
templist <- templist[order(templist[, measurevar],decreasing=T),]
res <- rbind(res,templist)
}
return(res)
}
# Compare different models
drawModel(compareAllModels(dat.res, "recall"), "recall")
drawModel(compareAllModels(dat.res, "precision"), "precision")
drawModel(compareAllModels(dat.res, "fMeasure"), "fMeasure")
drawModel(compareAllModels(dat.res, "areaUnderROC"), "areaUnderROC")
#--------------------
dat.selected.res <- dat.res[! 1:dim(dat.res)[1] %in% union(grep("hub", dat.res$name), grep("auth_rank", dat.res$name)),]
dat.nohub.res <- dat.res[! 1:dim(dat.res)[1] %in% grep("hub", dat.res$name),]
dat.nobppr.res <- dat.res[! 1:dim(dat.res)[1] %in% grep("bppr_", dat.res$name),]
dat.selected.resRest <- extractRes(dat.selected.res, "areaUnderROC", 5)
draw(abbr(dat.selected.resRest), "areaUnderROC")
dat.nobppr.bestRes <- extractRes(dat.nobppr.res, "areaUnderROC", 1)
draw(abbr(dat.nobppr.bestRes), "areaUnderROC")
dat.nohub.bestRes <- extractRes(dat.nohub.res, "areaUnderROC", 1)
draw(abbr(dat.nohub.bestRes), "areaUnderROC")
dat.bestRes <- extractRes(dat.res, "areaUnderROC", 1)
draw(abbr(dat.bestRes),"areaUnderROC")
|
# This script creates a SQL table of CFPB comment metadata
# subset to Dodd-Frank dockets or RINs as identified by Davis Polk
library(DBI)
library(RSQLite)
library(tidyverse)
# API version
v4 = FALSE
## now pulling from new search of API v4
if(v4){
load(here::here("data", "CFPBcomments.Rdata"))
comments_all <- CFPBcomments
} else{
## originally pulled from Devin's master data
load(here::here("data", "comment_metadata.Rdata"))
}
nrow(comments_all)
head(comments_all)
names(comments_all)
# Rename to fit https://docs.google.com/spreadsheets/d/1i8t_ZMAhjddg7cQz06Z057BqnNQEsC4Gur6p17Uz0i4/edit#gid=1357829693
names(comments_all) <- names(comments_all) %>%
str_replace_all("([A-Z])", "_\\1") %>%
str_to_lower()
names(comments_all)
comments_all %<>% mutate(source = "regulations.gov")
if(v4){
comments_all %<>% mutate(comment_id = id)
comments_all %<>% rename(agency_acronym = agency_id)
} else {
# v3
comments_all %<>% mutate(comment_id = document_id)
}
comments_all %<>% mutate(comment_url = str_c("https://www.regulations.gov/comment/", comment_id))
#FIXME v4 does not return comment due date
count(comments_all, is.na(posted_date))
count(comments_all, is.na(comment_due_date))
comments_all %<>% mutate(late_comment = as.Date(posted_date) > as.Date(comment_due_date))
count(comments_all, is.na(late_comment))
# V4 no longer returns organization or docket title
comments_all %<>%
mutate(docket_id = str_remove(comment_id, "-[0-9]*$"))
vars_to_keep <- c("fr_document_id", # need this from rules table joined in by document_id - comment_id
"docket_title", # this may clash with docket title from attachments table
"docket_type",
"rin",
"attachment_count",
"posted_date",
"submitter_name",
"organization",
"late_comment",
"allow_late_comment")
# FIXME trim down to minimial variables
comments_all %<>% select(source,
#fr_document_id, # need this from rules table joined in by document_id - comment_id
agency_acronym,
docket_id,
comment_id = document_id,
comment_title = title, # rename
comment_url,
any_of(vars_to_keep) )
# filter to only CFPB comments
comments_CFPB <- comments_all %>% filter(agency_acronym == "CFPB")
nrow(comments_CFPB)
head(comments_CFPB)
# save Rdata
save(comments_CFPB, file = here::here("data", "comment_metadata_CFPB.Rdata"))
# convert dates to character for SQL
comments_CFPB %<>% mutate(across(where(is.numeric.Date), as.character) )
# Create RSQLite database for all CFPB comment metadata
library(DBI)
# install.packages("RSQLite")
1
library(RSQLite)
con <- dbConnect(RSQLite::SQLite(), here::here("data", "comment_metadata_CFPB.sqlite"))
# check
list.files("data")
dbListTables(con)
dbWriteTable(con, "comments", comments_CFPB, overwrite = T)
dbListTables(con)
dbListFields(con, "comments")
# dbReadTable(con, "comments_CFPB") # oops
# fetch results:
res <- dbSendQuery(con, "SELECT * FROM comments WHERE agency_acronym = 'CFPB'")
dbFetch(res) %>% head()
dbClearResult(res)
dbDisconnect(con)
###################################################
# Subset to Davis Polk Dodd-Frank rules
# load(here::here("data", "comment_metadata_CFPB.Rdata"))
names(comments_CFPB)
# Dodd-Frank rules from Davis Polk Data
df <- read_csv(here::here("data", "dockets_to_scrape.csv"))
names(df)
head(df)
df %<>% filter(str_detect(agency, "CFPB"))
# Subset to Dodd-Frank rules (by docket or RIN)
df_rins <- df$RIN %>% na.omit() %>% unique()
df_dockets <- df$identifier %>% na.omit() %>% unique()
comments_CFPB_df <- comments_CFPB %>%
filter(docket_id %in% df_dockets | rin %in% df_rins)
# rins not in dockets to scrape
comments_CFPB_df %>%
filter(!rin %in% df_rins) %>%
select(docket_id, rin) %>%
distinct()
# dockets not in dockets to scrape
comments_CFPB_df %>%
filter(!docket_id %in% df_dockets) %>%
select(docket_id, rin) %>%
distinct() %>% knitr::kable()
comments_CFPB_df$docket_id %>% unique()
comments_CFPB_df$rin %>% unique()
# look back to see how many we matched
matched <- df %>% filter(RIN %in% na.omit(comments_CFPB_df$rin) | identifier %in% na.omit(comments_CFPB_df$docket_id))
unmatched <- df %>% anti_join(matched)
unmatched %>%
select(RIN, identifier) %>%
distinct()
# # 0 comments
# RIN identifier
# <chr> <chr>
# 1 NA CFPB-2013-0038
# 2 3170-AA30 CFPB-2012-0040
# 3 NA CFPB-2014-0030
# 4 3170-AA36 CFPB-2013-0006
# 5 NA CFPB-2012-0042
# 6 NA CFPB-2013-0034
# 7 NA CFPB-2017-0026
# 8 NA CFPB-2012-0043
# 9 NA CFPB-2013-0035
# 10 NA CFPB-2017-0027
# NOTE see checks against fed reg doc numbers in functions/sql_actions_metadata_CFPB.R
# save Rdata
save(comments_CFPB_df, file = here::here("data", "comment_metadata_CFPB_df.Rdata"))
# Create RSQLite database
con <- dbConnect(RSQLite::SQLite(), here::here("data", "comment_metadata_CFPB_df.sqlite"))
# check
list.files("data")
dbListTables(con)
dbWriteTable(con, "comments", comments_CFPB_df, overwrite = T)
dbListTables(con)
dbListFields(con, "comments")
# fetch results:
res <- dbSendQuery(con, "SELECT * FROM comments WHERE agency_acronym = 'CFPB'")
result <- dbFetch(res)
result |> head()
count(result, posted_date, sort = T)
count(result, is.na(posted_date), sort = T)
dbClearResult(res)
dbDisconnect(con)
|
/functions/sql_comment_metadata_CFPB.R
|
no_license
|
zoeang/rulemaking
|
R
| false | false | 5,635 |
r
|
# This script creates a SQL table of CFPB comment metadata
# subset to Dodd-Frank dockets or RINs as identified by Davis Polk
library(DBI)
library(RSQLite)
library(tidyverse)
# API version
v4 = FALSE
## now pulling from new search of API v4
if(v4){
load(here::here("data", "CFPBcomments.Rdata"))
comments_all <- CFPBcomments
} else{
## originally pulled from Devin's master data
load(here::here("data", "comment_metadata.Rdata"))
}
nrow(comments_all)
head(comments_all)
names(comments_all)
# Rename to fit https://docs.google.com/spreadsheets/d/1i8t_ZMAhjddg7cQz06Z057BqnNQEsC4Gur6p17Uz0i4/edit#gid=1357829693
names(comments_all) <- names(comments_all) %>%
str_replace_all("([A-Z])", "_\\1") %>%
str_to_lower()
names(comments_all)
comments_all %<>% mutate(source = "regulations.gov")
if(v4){
comments_all %<>% mutate(comment_id = id)
comments_all %<>% rename(agency_acronym = agency_id)
} else {
# v3
comments_all %<>% mutate(comment_id = document_id)
}
comments_all %<>% mutate(comment_url = str_c("https://www.regulations.gov/comment/", comment_id))
#FIXME v4 does not return comment due date
count(comments_all, is.na(posted_date))
count(comments_all, is.na(comment_due_date))
comments_all %<>% mutate(late_comment = as.Date(posted_date) > as.Date(comment_due_date))
count(comments_all, is.na(late_comment))
# V4 no longer returns organization or docket title
comments_all %<>%
mutate(docket_id = str_remove(comment_id, "-[0-9]*$"))
vars_to_keep <- c("fr_document_id", # need this from rules table joined in by document_id - comment_id
"docket_title", # this may clash with docket title from attachments table
"docket_type",
"rin",
"attachment_count",
"posted_date",
"submitter_name",
"organization",
"late_comment",
"allow_late_comment")
# FIXME trim down to minimial variables
comments_all %<>% select(source,
#fr_document_id, # need this from rules table joined in by document_id - comment_id
agency_acronym,
docket_id,
comment_id = document_id,
comment_title = title, # rename
comment_url,
any_of(vars_to_keep) )
# filter to only CFPB comments
comments_CFPB <- comments_all %>% filter(agency_acronym == "CFPB")
nrow(comments_CFPB)
head(comments_CFPB)
# save Rdata
save(comments_CFPB, file = here::here("data", "comment_metadata_CFPB.Rdata"))
# convert dates to character for SQL
comments_CFPB %<>% mutate(across(where(is.numeric.Date), as.character) )
# Create RSQLite database for all CFPB comment metadata
library(DBI)
# install.packages("RSQLite")
1
library(RSQLite)
con <- dbConnect(RSQLite::SQLite(), here::here("data", "comment_metadata_CFPB.sqlite"))
# check
list.files("data")
dbListTables(con)
dbWriteTable(con, "comments", comments_CFPB, overwrite = T)
dbListTables(con)
dbListFields(con, "comments")
# dbReadTable(con, "comments_CFPB") # oops
# fetch results:
res <- dbSendQuery(con, "SELECT * FROM comments WHERE agency_acronym = 'CFPB'")
dbFetch(res) %>% head()
dbClearResult(res)
dbDisconnect(con)
###################################################
# Subset to Davis Polk Dodd-Frank rules
# load(here::here("data", "comment_metadata_CFPB.Rdata"))
names(comments_CFPB)
# Dodd-Frank rules from Davis Polk Data
df <- read_csv(here::here("data", "dockets_to_scrape.csv"))
names(df)
head(df)
df %<>% filter(str_detect(agency, "CFPB"))
# Subset to Dodd-Frank rules (by docket or RIN)
df_rins <- df$RIN %>% na.omit() %>% unique()
df_dockets <- df$identifier %>% na.omit() %>% unique()
comments_CFPB_df <- comments_CFPB %>%
filter(docket_id %in% df_dockets | rin %in% df_rins)
# rins not in dockets to scrape
comments_CFPB_df %>%
filter(!rin %in% df_rins) %>%
select(docket_id, rin) %>%
distinct()
# dockets not in dockets to scrape
comments_CFPB_df %>%
filter(!docket_id %in% df_dockets) %>%
select(docket_id, rin) %>%
distinct() %>% knitr::kable()
comments_CFPB_df$docket_id %>% unique()
comments_CFPB_df$rin %>% unique()
# look back to see how many we matched
matched <- df %>% filter(RIN %in% na.omit(comments_CFPB_df$rin) | identifier %in% na.omit(comments_CFPB_df$docket_id))
unmatched <- df %>% anti_join(matched)
unmatched %>%
select(RIN, identifier) %>%
distinct()
# # 0 comments
# RIN identifier
# <chr> <chr>
# 1 NA CFPB-2013-0038
# 2 3170-AA30 CFPB-2012-0040
# 3 NA CFPB-2014-0030
# 4 3170-AA36 CFPB-2013-0006
# 5 NA CFPB-2012-0042
# 6 NA CFPB-2013-0034
# 7 NA CFPB-2017-0026
# 8 NA CFPB-2012-0043
# 9 NA CFPB-2013-0035
# 10 NA CFPB-2017-0027
# NOTE see checks against fed reg doc numbers in functions/sql_actions_metadata_CFPB.R
# save Rdata
save(comments_CFPB_df, file = here::here("data", "comment_metadata_CFPB_df.Rdata"))
# Create RSQLite database
con <- dbConnect(RSQLite::SQLite(), here::here("data", "comment_metadata_CFPB_df.sqlite"))
# check
list.files("data")
dbListTables(con)
dbWriteTable(con, "comments", comments_CFPB_df, overwrite = T)
dbListTables(con)
dbListFields(con, "comments")
# fetch results:
res <- dbSendQuery(con, "SELECT * FROM comments WHERE agency_acronym = 'CFPB'")
result <- dbFetch(res)
result |> head()
count(result, posted_date, sort = T)
count(result, is.na(posted_date), sort = T)
dbClearResult(res)
dbDisconnect(con)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utility_define_param.R
\name{define.param}
\alias{define.param}
\title{Define climateR configuration}
\usage{
define.param(param, service = NULL)
}
\arguments{
\item{param}{the parameter(s) of interest}
\item{service}{the dataset for which a configuration is needed}
}
\value{
a vector of N GCMs
}
\description{
**INTERNAL** Define the parameter configuration to call with climateR. Ensures that the specificed parameters
are avialable and structured in the variable sapce of the datasource.
}
\author{
Mike Johnson
}
\keyword{internal}
|
/man/define.param.Rd
|
permissive
|
mbjoseph/climateR
|
R
| false | true | 616 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utility_define_param.R
\name{define.param}
\alias{define.param}
\title{Define climateR configuration}
\usage{
define.param(param, service = NULL)
}
\arguments{
\item{param}{the parameter(s) of interest}
\item{service}{the dataset for which a configuration is needed}
}
\value{
a vector of N GCMs
}
\description{
**INTERNAL** Define the parameter configuration to call with climateR. Ensures that the specificed parameters
are avialable and structured in the variable sapce of the datasource.
}
\author{
Mike Johnson
}
\keyword{internal}
|
context("logging x and y values")
# Helper to check if a logging result has the correct structure
expect_logging_result_structure = function(x) {
expect_true(is.list(x))
expect_true(is.data.frame(x$pars))
expect_true(is.numeric(x$obj.vals))
}
test_that("logging for functions with matrix input works well", {
fn = makeBBOBFunction(fid = 1L, iid = 1L, dimension = 10L)
fn = addLoggingWrapper(fn, logg.x = TRUE)
fn(matrix(runif(10L * 10L), ncol = 10L))
res = getLoggedValues(fn, compact = TRUE)
expect_true(is.data.frame(res))
expect_equal(nrow(res), 10L)
expect_equal(ncol(res), 10L + 1L) # dim plus y
})
test_that("logging for simple functions works well", {
# generate Sphere function
for (dimension in c(1L, 2L, 5L, 10L)) {
fn = makeSphereFunction(dimension = dimension)
par.ids = getParamIds(getParamSet(fn), with.nr = TRUE, repeated = TRUE)
# add logger for both x and y values
fn = addLoggingWrapper(fn, logg.x = TRUE)
# now apply some evaluations
fn(runif(dimension))
# check for logged vals
res = getLoggedValues(fn)
expect_logging_result_structure(res)
expect_equal(nrow(res$pars), 1L)
expect_equal(length(res$obj.vals), 1L)
for (i in seq(10L)) {
fn(runif(dimension))
}
res = getLoggedValues(fn)
expect_logging_result_structure(res)
expect_equal(nrow(res$pars), 11L)
expect_equal(length(res$obj.vals), 11L)
# check "compact" logging result
res = getLoggedValues(fn, compact = TRUE)
expect_true(is.data.frame(res))
expect_equal(nrow(res), 11L)
expect_equal(ncol(res), dimension + 1L) # dim plus the single objective value
}
})
test_that("logging for mixed function works well", {
# define a mixed function with three parameters
fn = makeSingleObjectiveFunction(
name = "Test",
fn = function(x) {
if (x$disc == "a") {
return(x$x1 + x$x2)
}
return(x$x1 + x$x2 + 1L)
},
par.set = makeParamSet(
makeDiscreteParam("disc", values = letters[1:2]),
makeNumericParam("x1"),
makeNumericParam("x2", lower = 0, upper = 10)
),
has.simple.signature = FALSE
)
# add logger
fn = addLoggingWrapper(fn, logg.x = TRUE)
test.df = data.frame(
disc = c("a", "a", "b"),
x1 = c(0, 0, 1),
x2 = c(0, 0, 1)
)
obj.vals = c(0, 0, 3)
for (i in 1:nrow(test.df)) {
fn(dfRowToList(test.df, i, par.set = ParamHelpers::getParamSet(fn)))
}
res = getLoggedValues(fn)
expect_logging_result_structure(res)
expect_equal(test.df, res$pars)
expect_true(all(obj.vals == res$obj.vals))
})
test_that("nesting of wrappers works well", {
fn = makeSphereFunction(2L)
fn = addCountingWrapper(fn)
fn = addLoggingWrapper(fn, logg.x = TRUE)
# evaluate 10 times
n.evals = 10L
for (i in seq(n.evals)) {
fn(runif(2))
}
# should be a wrapped function now
expect_true(isWrappedSmoofFunction(fn))
expect_equal(getNumberOfEvaluations(fn), n.evals)
fn2 = fn
resetEvaluationCounter(fn2)
expect_equal(getNumberOfEvaluations(fn2), 0L)
expect_logging_result_structure(getLoggedValues(fn))
# now unwrap step by step
fn2 = getWrappedFunction(fn)
expect_true(isWrappedSmoofFunction(fn2))
expect_equal(getNumberOfEvaluations(fn), 0L)
fn2 = getWrappedFunction(fn2)
expect_false(isWrappedSmoofFunction(fn2))
expect_true(isSmoofFunction(fn2))
# now unwrap completely
fn2 = getWrappedFunction(fn, deepest = TRUE)
expect_false(isWrappedSmoofFunction(fn2))
expect_true(isSmoofFunction(fn2))
})
test_that("getters work for wrapped smoof functions", {
fn = makeSphereFunction(2L)
fn = addCountingWrapper(fn)
fn = addLoggingWrapper(fn, logg.x = TRUE)
# getter functions work on wrappers
expect_true(hasTags(fn, "unimodal"))
expect_true(doesCountEvaluations(fn))
expect_true(all(is.character(getTags(fn))))
expect_true(is.character(getName(fn)))
expect_false(isMultiobjective(fn))
expect_true(isSingleobjective(fn))
expect_false(isVectorized(fn))
expect_false(isNoisy(fn))
expect_true(shouldBeMinimized(fn))
expect_is(getParamSet(fn), "ParamSet")
expect_true(all(is.numeric(getLowerBoxConstraints(fn))))
expect_true(all(is.numeric(getUpperBoxConstraints(fn))))
expect_true(hasGlobalOptimum(fn))
expect_true(hasConstraints(fn))
expect_false(hasOtherConstraints(fn))
expect_true(hasBoxConstraints(fn))
expect_true(is.list(getGlobalOptimum(fn)))
})
|
/tests/testthat/test_logging.R
|
permissive
|
DrRoad/smoof
|
R
| false | false | 4,426 |
r
|
context("logging x and y values")
# Helper to check if a logging result has the correct structure
expect_logging_result_structure = function(x) {
expect_true(is.list(x))
expect_true(is.data.frame(x$pars))
expect_true(is.numeric(x$obj.vals))
}
test_that("logging for functions with matrix input works well", {
fn = makeBBOBFunction(fid = 1L, iid = 1L, dimension = 10L)
fn = addLoggingWrapper(fn, logg.x = TRUE)
fn(matrix(runif(10L * 10L), ncol = 10L))
res = getLoggedValues(fn, compact = TRUE)
expect_true(is.data.frame(res))
expect_equal(nrow(res), 10L)
expect_equal(ncol(res), 10L + 1L) # dim plus y
})
test_that("logging for simple functions works well", {
# generate Sphere function
for (dimension in c(1L, 2L, 5L, 10L)) {
fn = makeSphereFunction(dimension = dimension)
par.ids = getParamIds(getParamSet(fn), with.nr = TRUE, repeated = TRUE)
# add logger for both x and y values
fn = addLoggingWrapper(fn, logg.x = TRUE)
# now apply some evaluations
fn(runif(dimension))
# check for logged vals
res = getLoggedValues(fn)
expect_logging_result_structure(res)
expect_equal(nrow(res$pars), 1L)
expect_equal(length(res$obj.vals), 1L)
for (i in seq(10L)) {
fn(runif(dimension))
}
res = getLoggedValues(fn)
expect_logging_result_structure(res)
expect_equal(nrow(res$pars), 11L)
expect_equal(length(res$obj.vals), 11L)
# check "compact" logging result
res = getLoggedValues(fn, compact = TRUE)
expect_true(is.data.frame(res))
expect_equal(nrow(res), 11L)
expect_equal(ncol(res), dimension + 1L) # dim plus the single objective value
}
})
test_that("logging for mixed function works well", {
# define a mixed function with three parameters
fn = makeSingleObjectiveFunction(
name = "Test",
fn = function(x) {
if (x$disc == "a") {
return(x$x1 + x$x2)
}
return(x$x1 + x$x2 + 1L)
},
par.set = makeParamSet(
makeDiscreteParam("disc", values = letters[1:2]),
makeNumericParam("x1"),
makeNumericParam("x2", lower = 0, upper = 10)
),
has.simple.signature = FALSE
)
# add logger
fn = addLoggingWrapper(fn, logg.x = TRUE)
test.df = data.frame(
disc = c("a", "a", "b"),
x1 = c(0, 0, 1),
x2 = c(0, 0, 1)
)
obj.vals = c(0, 0, 3)
for (i in 1:nrow(test.df)) {
fn(dfRowToList(test.df, i, par.set = ParamHelpers::getParamSet(fn)))
}
res = getLoggedValues(fn)
expect_logging_result_structure(res)
expect_equal(test.df, res$pars)
expect_true(all(obj.vals == res$obj.vals))
})
test_that("nesting of wrappers works well", {
fn = makeSphereFunction(2L)
fn = addCountingWrapper(fn)
fn = addLoggingWrapper(fn, logg.x = TRUE)
# evaluate 10 times
n.evals = 10L
for (i in seq(n.evals)) {
fn(runif(2))
}
# should be a wrapped function now
expect_true(isWrappedSmoofFunction(fn))
expect_equal(getNumberOfEvaluations(fn), n.evals)
fn2 = fn
resetEvaluationCounter(fn2)
expect_equal(getNumberOfEvaluations(fn2), 0L)
expect_logging_result_structure(getLoggedValues(fn))
# now unwrap step by step
fn2 = getWrappedFunction(fn)
expect_true(isWrappedSmoofFunction(fn2))
expect_equal(getNumberOfEvaluations(fn), 0L)
fn2 = getWrappedFunction(fn2)
expect_false(isWrappedSmoofFunction(fn2))
expect_true(isSmoofFunction(fn2))
# now unwrap completely
fn2 = getWrappedFunction(fn, deepest = TRUE)
expect_false(isWrappedSmoofFunction(fn2))
expect_true(isSmoofFunction(fn2))
})
test_that("getters work for wrapped smoof functions", {
fn = makeSphereFunction(2L)
fn = addCountingWrapper(fn)
fn = addLoggingWrapper(fn, logg.x = TRUE)
# getter functions work on wrappers
expect_true(hasTags(fn, "unimodal"))
expect_true(doesCountEvaluations(fn))
expect_true(all(is.character(getTags(fn))))
expect_true(is.character(getName(fn)))
expect_false(isMultiobjective(fn))
expect_true(isSingleobjective(fn))
expect_false(isVectorized(fn))
expect_false(isNoisy(fn))
expect_true(shouldBeMinimized(fn))
expect_is(getParamSet(fn), "ParamSet")
expect_true(all(is.numeric(getLowerBoxConstraints(fn))))
expect_true(all(is.numeric(getUpperBoxConstraints(fn))))
expect_true(hasGlobalOptimum(fn))
expect_true(hasConstraints(fn))
expect_false(hasOtherConstraints(fn))
expect_true(hasBoxConstraints(fn))
expect_true(is.list(getGlobalOptimum(fn)))
})
|
############################################################################################################
# ANÁLISE EMPÍRICA - ESTIMAÇÃO OLS E EFEITOS FIXOS (LAST YEAR)
############################################################################################################
rm(list = ls())
# Pacotes:
library(dplyr)
library(ggplot2)
library(tidyr)
library(readr)
library(stringr)
library(htmltools)
library(htmlwidgets)
library(zeallot)
library(readxl)
library(gdata)
library(stringi)
library(Hmisc)
library(gmodels)
library(broom)
# Pacotes para estatísticas descritivas:
library(psych)
library(summarytools)
# Pacotes Específicos para se trabalhar com RDD:
library(rdd) # Pacote mais básico
library(rddtools) # Pacote com muitas opções de testes de placebo e sensibilidade das estimações e estimação paramétrica
library(rdrobust) # Pacote mais abrangente
library(rddapp) # Pacote com interface em Shiny, reduz a necessidade de se saber programar.
library(rddensity)
# Pacotes para se visualizar resultados de modelos:
library(stargazer)
library(jtools)
library(huxtable)
library(texreg)
# Pacote para regressões de Dados em Painel:
library(plm)
# Opções:
options(scipen = 999) # Desabilita notação scientífica. Para voltar ao padrão -> options(scipen = 1)
# Definindo o diretório onde se encontram as bases de dados prontas para análise empírica:
setwd("C:/Users/joseg_000.PC-JE/Documents/Dissertação - Dados/Bases de Dados Prontas")
# ----------------------------------------------------------------------------------------------------------------------------------
# Baixando as bases de dados (ESCOLHER A QUE FOR USAR)
# load("bases_prontas_ECD")
load("bases_prontas_ED")
# Definindo o diretório para o R salvar as tabelas geradas:
# setwd("C:/Users/joseg_000.PC-JE/Google Drive/FGV-EPGE/Dissertação/Tabelas Geradas no R/Análise com partidos de esquerda, centro e direita")
setwd("C:/Users/joseg_000.PC-JE/Google Drive/FGV-EPGE/Dissertação/Tabelas Geradas no R/Análise com partidos de esquerda e direita")
# ----------------------------------------------------------------------------------------------------------------------------------
# Analisando quais partidos estão sendo considerados:
table(base_pronta_last_year$ideologia_partido_eleito)
summarytools::freq(base_pronta_last_year$ideologia_partido_eleito)
summarytools::descr(base_pronta_last_year$margem_vitoria_esquerda, transpose = T, stats = "common", round.digits = 3)
# Lista de variáveis outcome de interesse:
# outcomes <- c("despesa_geral_pc", "despesa_geral_pib",
# "total_receitas_pc", "total_receitas_pib",
# "receita_tributaria_pc", "receita_tributaria_pib",
# "educacao_prop", "saude_prop", "assistencia_social_prop",
# "urbanismo_prop", "transporte_prop", "desporto_e_lazer_prop",
# "seguranca_publica_prop", "gestao_ambiental_prop",
# "servidores_comissionados_mil")
# Lista de variáveis de controle de interesse:
# covariates <- c("regiao", "pop", "fracao_pop_masculina", "fracao_pop_urbana",
# "proporcao_idoso", "proporcao_jovem", "proporcao_brancos", "pib_pc")
# -----------------------------------------------------------------------------------------------------------------------
# Estimação RDD Paramétrica usando o pacote rddtools
# Usando a função rddtools::rdd_data() para construir os RDD objects de interesse:
# Primeiramente, devemos armazenar os modelos RDD como rdd objects: (SEM COVARIATES)
# O prefixo p_ significa "PARAMÉTRICO":
p_rdd_object_unconditional_1 <- rdd_data(y = log(despesa_geral_pc), x = margem_vitoria_esquerda, cutpoint = 0, data = base_pronta_last_year)
p_rdd_object_unconditional_2 <- rdd_data(y = log(despesa_geral_pib), x = margem_vitoria_esquerda, cutpoint = 0, data = base_pronta_last_year)
p_rdd_object_unconditional_3 <- rdd_data(y = log(total_receitas_pc), x = margem_vitoria_esquerda, cutpoint = 0, data = base_pronta_last_year)
p_rdd_object_unconditional_4 <- rdd_data(y = log(total_receitas_pib), x = margem_vitoria_esquerda, cutpoint = 0, data = base_pronta_last_year)
p_rdd_object_unconditional_5 <- rdd_data(y = log(receita_tributaria_pc + 0.0001), x = margem_vitoria_esquerda, cutpoint = 0, data = base_pronta_last_year)
p_rdd_object_unconditional_6 <- rdd_data(y = log(receita_tributaria_pib + 0.0001), x = margem_vitoria_esquerda, cutpoint = 0, data = base_pronta_last_year)
p_rdd_object_unconditional_7 <- rdd_data(y = log(servidores_comissionados_mil + 0.0001), x = margem_vitoria_esquerda, cutpoint = 0, data = base_pronta_last_year)
p_rdd_object_unconditional_8 <- rdd_data(y = educacao_prop, x = margem_vitoria_esquerda, cutpoint = 0, data = base_pronta_last_year)
p_rdd_object_unconditional_9 <- rdd_data(y = saude_prop, x = margem_vitoria_esquerda, cutpoint = 0, data = base_pronta_last_year)
p_rdd_object_unconditional_10 <- rdd_data(y = assistencia_social_prop, x = margem_vitoria_esquerda, cutpoint = 0, data = base_pronta_last_year)
p_rdd_object_unconditional_11 <- rdd_data(y = urbanismo_prop, x = margem_vitoria_esquerda, cutpoint = 0, data = base_pronta_last_year)
p_rdd_object_unconditional_12 <- rdd_data(y = transporte_prop, x = margem_vitoria_esquerda, cutpoint = 0, data = base_pronta_last_year)
p_rdd_object_unconditional_13 <- rdd_data(y = desporto_e_lazer_prop, x = margem_vitoria_esquerda, cutpoint = 0, data = base_pronta_last_year)
p_rdd_object_unconditional_14 <- rdd_data(y = seguranca_publica_prop, x = margem_vitoria_esquerda, cutpoint = 0, data = base_pronta_last_year)
p_rdd_object_unconditional_15 <- rdd_data(y = gestao_ambiental_prop, x = margem_vitoria_esquerda, cutpoint = 0, data = base_pronta_last_year)
# ------------------------------------------------------------------------------------------------------------
# REGRESSÕES RDD PARAMÉTRICAS SEM COVARIATES:
# Regressões sem covariates, polinômio de ordem 3:
p_rdd_unconditional_1 <- rdd_reg_lm(p_rdd_object_unconditional_1, order = 3, slope = "separate")
p_rdd_unconditional_2 <- rdd_reg_lm(p_rdd_object_unconditional_2, order = 3, slope = "separate")
p_rdd_unconditional_3 <- rdd_reg_lm(p_rdd_object_unconditional_3, order = 3, slope = "separate")
p_rdd_unconditional_4 <- rdd_reg_lm(p_rdd_object_unconditional_4, order = 3, slope = "separate")
p_rdd_unconditional_5 <- rdd_reg_lm(p_rdd_object_unconditional_5, order = 3, slope = "separate")
p_rdd_unconditional_6 <- rdd_reg_lm(p_rdd_object_unconditional_6, order = 3, slope = "separate")
p_rdd_unconditional_7 <- rdd_reg_lm(p_rdd_object_unconditional_7, order = 3, slope = "separate")
p_rdd_unconditional_8 <- rdd_reg_lm(p_rdd_object_unconditional_8, order = 3, slope = "separate")
p_rdd_unconditional_9 <- rdd_reg_lm(p_rdd_object_unconditional_9, order = 3, slope = "separate")
p_rdd_unconditional_10 <- rdd_reg_lm(p_rdd_object_unconditional_10, order = 3, slope = "separate")
p_rdd_unconditional_11 <- rdd_reg_lm(p_rdd_object_unconditional_11, order = 3, slope = "separate")
p_rdd_unconditional_12 <- rdd_reg_lm(p_rdd_object_unconditional_12, order = 3, slope = "separate")
p_rdd_unconditional_13 <- rdd_reg_lm(p_rdd_object_unconditional_13, order = 3, slope = "separate")
p_rdd_unconditional_14 <- rdd_reg_lm(p_rdd_object_unconditional_14, order = 3, slope = "separate")
p_rdd_unconditional_15 <- rdd_reg_lm(p_rdd_object_unconditional_15, order = 3, slope = "separate")
stargazer(p_rdd_unconditional_1, p_rdd_unconditional_2,
type = "text", digits = 3, digits.extra = 2, digit.separator = "", keep = "D", single.row = F, align = T, table.placement = "h!",
covariate.labels = "Left-Wing Party", title = "Parametric RDD",
column.labels = c("Total expenditures (per capita)", "Total Expeditures (share of income)"),
se = list(sqrt(diag(vcovHC(p_rdd_unconditional_1, type = "HC0", cluster = "group"))),
sqrt(diag(vcovHC(p_rdd_unconditional_2, type = "HC0", cluster = "group")))))
stargazer(p_rdd_unconditional_3, p_rdd_unconditional_4, p_rdd_unconditional_5, p_rdd_unconditional_6,
type = "text", digits = 3, digits.extra = 2, digit.separator = "", keep = "D", single.row = F, align = T, table.placement = "h!",
covariate.labels = "Left-Wing Party", title = "Parametric RDD",
column.labels = c("Total Revenues (per capita)", "Total Revenues (share of income)",
"Tax Revenues (per capita)", "Tax Revenues (share of income)"),
se = list(sqrt(diag(vcovHC(p_rdd_unconditional_3, type = "HC0", cluster = "group"))),
sqrt(diag(vcovHC(p_rdd_unconditional_4, type = "HC0", cluster = "group"))),
sqrt(diag(vcovHC(p_rdd_unconditional_5, type = "HC0", cluster = "group"))),
sqrt(diag(vcovHC(p_rdd_unconditional_6, type = "HC0", cluster = "group")))))
stargazer(p_rdd_unconditional_7, p_rdd_unconditional_8, p_rdd_unconditional_9, p_rdd_unconditional_10,
type = "text", digits = 3, digits.extra = 2, digit.separator = "", keep = "D", single.row = F, align = T, table.placement = "h!",
covariate.labels = "Left-Wing Party", title = "Parametric RDD",
column.labels = c("Comission Employees (per 1000 residents)", "Spending on Education (share of total)",
"Spending on Health (share of total)", "Spending on Social Assistance (share of total)"),
se = list(sqrt(diag(vcovHC(p_rdd_unconditional_7, type = "HC0", cluster = "group"))),
sqrt(diag(vcovHC(p_rdd_unconditional_8, type = "HC0", cluster = "group"))),
sqrt(diag(vcovHC(p_rdd_unconditional_9, type = "HC0", cluster = "group"))),
sqrt(diag(vcovHC(p_rdd_unconditional_10, type = "HC0", cluster = "group")))))
column_labels_vector <- c("Total expenditures (per capita)", "Total Expeditures (share of income)",
"Total Revenues (per capita)", "Total Revenues (share of income)",
"Tax Revenues (per capita)", "Tax Revenues (share of income)",
"Comission Employees (per 1000 residents)", "Spending on Education (share of total)",
"Spending on Health (share of total)", "Spending on Social Assistance (share of total)",
"Spending on Urbanism (Share of Total)", "Spending on Transportation (Share of Total)",
"Spending on Sports and Leisure (Share of Total)",
"Spending on Public Safety (Share of Total)",
"Spending on Environmental management (Share of Total)")
# Todas as regressões em uma só tabela, exportada para excel xls, e erros padrão com cluster:
# (Abrir os arquivos em html, copiar e colar (transpondo) no excel e arrumar os dados na tabela)
stargazer(p_rdd_unconditional_1, p_rdd_unconditional_2, p_rdd_unconditional_3, p_rdd_unconditional_4,
p_rdd_unconditional_5, p_rdd_unconditional_6, p_rdd_unconditional_7, p_rdd_unconditional_8,
p_rdd_unconditional_9, p_rdd_unconditional_10, p_rdd_unconditional_11, p_rdd_unconditional_12,
p_rdd_unconditional_13, p_rdd_unconditional_14, p_rdd_unconditional_15,
digits = 3, digits.extra = 2, digit.separator = "", keep = "D", single.row = F, align = T, table.placement = "h!",
covariate.labels = "Left-Wing Party", title = "Parametric RDD. No covariates. Polymonial of order 3",
column.labels = column_labels_vector,
se = list(sqrt(diag(vcovHC(p_rdd_unconditional_1, type = "HC0", cluster = "group"))),
sqrt(diag(vcovHC(p_rdd_unconditional_2, type = "HC0", cluster = "group"))),
sqrt(diag(vcovHC(p_rdd_unconditional_3, type = "HC0", cluster = "group"))),
sqrt(diag(vcovHC(p_rdd_unconditional_4, type = "HC0", cluster = "group"))),
sqrt(diag(vcovHC(p_rdd_unconditional_5, type = "HC0", cluster = "group"))),
sqrt(diag(vcovHC(p_rdd_unconditional_6, type = "HC0", cluster = "group"))),
sqrt(diag(vcovHC(p_rdd_unconditional_7, type = "HC0", cluster = "group"))),
sqrt(diag(vcovHC(p_rdd_unconditional_8, type = "HC0", cluster = "group"))),
sqrt(diag(vcovHC(p_rdd_unconditional_9, type = "HC0", cluster = "group"))),
sqrt(diag(vcovHC(p_rdd_unconditional_10, type = "HC0", cluster = "group"))),
sqrt(diag(vcovHC(p_rdd_unconditional_11, type = "HC0", cluster = "group"))),
sqrt(diag(vcovHC(p_rdd_unconditional_12, type = "HC0", cluster = "group"))),
sqrt(diag(vcovHC(p_rdd_unconditional_13, type = "HC0", cluster = "group"))),
sqrt(diag(vcovHC(p_rdd_unconditional_14, type = "HC0", cluster = "group"))),
sqrt(diag(vcovHC(p_rdd_unconditional_15, type = "HC0", cluster = "group")))),
type = "html", out = "p_rdd_unconditional.html")
plot(p_rdd_unconditional_1)
plot(p_rdd_unconditional_2)
plot(p_rdd_unconditional_3)
plot(p_rdd_unconditional_4)
plot(p_rdd_unconditional_5)
plot(p_rdd_unconditional_6)
plot(p_rdd_unconditional_7)
plot(p_rdd_unconditional_8)
plot(p_rdd_unconditional_9)
plot(p_rdd_unconditional_10)
# ------------------------------------------------------------------------------------------------------------
# REGRESSÕES RDD PARAMÉTRICAS COM COVARIATES:
# Primeiramente, devemos armazenar os modelos RDD como rdd objects: (COM COVARIATES)
# O prefixo p_ significa "PARAMÉTRICO":
covariates_df <- base_pronta_last_year %>% select(pop, fracao_pop_masculina, fracao_pop_urbana, pib_pc)
p_rdd_object_conditional_1 <- rdd_data(y = log(despesa_geral_pc), x = margem_vitoria_esquerda, covar = covariates_df, cutpoint = 0, data = base_pronta_last_year)
p_rdd_object_conditional_2 <- rdd_data(y = log(despesa_geral_pib), x = margem_vitoria_esquerda, covar = covariates_df, cutpoint = 0, data = base_pronta_last_year)
p_rdd_object_conditional_3 <- rdd_data(y = log(total_receitas_pc), x = margem_vitoria_esquerda, covar = covariates_df, cutpoint = 0, data = base_pronta_last_year)
p_rdd_object_conditional_4 <- rdd_data(y = log(total_receitas_pib), x = margem_vitoria_esquerda, covar = covariates_df, cutpoint = 0, data = base_pronta_last_year)
p_rdd_object_conditional_5 <- rdd_data(y = log(receita_tributaria_pc + 0.0001), x = margem_vitoria_esquerda, covar = covariates_df, cutpoint = 0, data = base_pronta_last_year)
p_rdd_object_conditional_6 <- rdd_data(y = log(receita_tributaria_pib + 0.0001), x = margem_vitoria_esquerda, covar = covariates_df, cutpoint = 0, data = base_pronta_last_year)
p_rdd_object_conditional_7 <- rdd_data(y = log(servidores_comissionados_mil + 0.0001), x = margem_vitoria_esquerda, covar = covariates_df, cutpoint = 0, data = base_pronta_last_year)
p_rdd_object_conditional_8 <- rdd_data(y = educacao_prop, x = margem_vitoria_esquerda, covar = covariates_df, cutpoint = 0, data = base_pronta_last_year)
p_rdd_object_conditional_9 <- rdd_data(y = saude_prop, x = margem_vitoria_esquerda, covar = covariates_df, cutpoint = 0, data = base_pronta_last_year)
p_rdd_object_conditional_10 <- rdd_data(y = assistencia_social_prop, x = margem_vitoria_esquerda, covar = covariates_df, cutpoint = 0, data = base_pronta_last_year)
p_rdd_object_conditional_11 <- rdd_data(y = urbanismo_prop, x = margem_vitoria_esquerda, covar = covariates_df, cutpoint = 0, data = base_pronta_last_year)
p_rdd_object_conditional_12 <- rdd_data(y = transporte_prop, x = margem_vitoria_esquerda, covar = covariates_df, cutpoint = 0, data = base_pronta_last_year)
p_rdd_object_conditional_13 <- rdd_data(y = desporto_e_lazer_prop, x = margem_vitoria_esquerda, covar = covariates_df, cutpoint = 0, data = base_pronta_last_year)
p_rdd_object_conditional_14 <- rdd_data(y = seguranca_publica_prop, x = margem_vitoria_esquerda, covar = covariates_df, cutpoint = 0, data = base_pronta_last_year)
p_rdd_object_conditional_15 <- rdd_data(y = gestao_ambiental_prop, x = margem_vitoria_esquerda, covar = covariates_df, cutpoint = 0, data = base_pronta_last_year)
# Regressões com covariates, polinômio de ordem 3:
attach(base_pronta_last_year)
p_rdd_conditional_1 <- rdd_reg_lm(p_rdd_object_conditional_1, order = 3, slope = "separate", covariates = "pop", covar.opt = list("include"))
p_rdd_conditional_2 <- rdd_reg_lm(p_rdd_object_conditional_2, order = 3, slope = "separate", covariates = "pop", covar.opt = list("include"))
p_rdd_conditional_3 <- rdd_reg_lm(p_rdd_object_conditional_3, order = 3, slope = "separate", covariates = "pop", covar.opt = list("include"))
p_rdd_conditional_4 <- rdd_reg_lm(p_rdd_object_conditional_4, order = 3, slope = "separate", covariates = "pop", covar.opt = list("include"))
p_rdd_conditional_5 <- rdd_reg_lm(p_rdd_object_conditional_5, order = 3, slope = "separate", covariates = "pop", covar.opt = list("include"))
p_rdd_conditional_6 <- rdd_reg_lm(p_rdd_object_conditional_6, order = 3, slope = "separate", covariates = "pop", covar.opt = list("include"))
p_rdd_conditional_7 <- rdd_reg_lm(p_rdd_object_conditional_7, order = 3, slope = "separate", covariates = "pop", covar.opt = list("include"))
p_rdd_conditional_8 <- rdd_reg_lm(p_rdd_object_conditional_8, order = 3, slope = "separate", covariates = "pop", covar.opt = list("include"))
p_rdd_conditional_9 <- rdd_reg_lm(p_rdd_object_conditional_9, order = 3, slope = "separate", covariates = "pop", covar.opt = list("include"))
p_rdd_conditional_10 <- rdd_reg_lm(p_rdd_object_conditional_10, order = 3, slope = "separate", covariates = "pop", covar.opt = list("include"))
p_rdd_conditional_11 <- rdd_reg_lm(p_rdd_object_conditional_11, order = 3, slope = "separate", covariates = "pop", covar.opt = list("include"))
p_rdd_conditional_12 <- rdd_reg_lm(p_rdd_object_conditional_12, order = 3, slope = "separate", covariates = "pop", covar.opt = list("include"))
p_rdd_conditional_13 <- rdd_reg_lm(p_rdd_object_conditional_13, order = 3, slope = "separate", covariates = "pop", covar.opt = list("include"))
p_rdd_conditional_14 <- rdd_reg_lm(p_rdd_object_conditional_14, order = 3, slope = "separate", covariates = "pop", covar.opt = list("include"))
p_rdd_conditional_15 <- rdd_reg_lm(p_rdd_object_conditional_15, order = 3, slope = "separate", covariates = "pop", covar.opt = list("include"))
stargazer(p_rdd_conditional_1, p_rdd_conditional_2,
type = "text", digits = 3, digits.extra = 2, digit.separator = "", keep = "D", single.row = F, align = T, table.placement = "h!",
covariate.labels = "Left-Wing Party", title = "Parametric RDD",
column.labels = c("Total expenditures (per capita)", "Total Expeditures (share of income)"),
se = list(sqrt(diag(vcovHC(p_rdd_conditional_1, type = "HC0", cluster = "group"))),
sqrt(diag(vcovHC(p_rdd_conditional_2, type = "HC0", cluster = "group")))))
stargazer(p_rdd_conditional_3, p_rdd_conditional_4, p_rdd_conditional_5, p_rdd_conditional_6,
type = "text", digits = 3, digits.extra = 2, digit.separator = "", keep = "D", single.row = F, align = T, table.placement = "h!",
covariate.labels = "Left-Wing Party", title = "Parametric RDD",
column.labels = c("Total Revenues (per capita)", "Total Revenues (share of income)",
"Tax Revenues (per capita)", "Tax Revenues (share of income)"),
se = list(sqrt(diag(vcovHC(p_rdd_conditional_3, type = "HC0", cluster = "group"))),
sqrt(diag(vcovHC(p_rdd_conditional_4, type = "HC0", cluster = "group"))),
sqrt(diag(vcovHC(p_rdd_conditional_5, type = "HC0", cluster = "group"))),
sqrt(diag(vcovHC(p_rdd_conditional_6, type = "HC0", cluster = "group")))))
stargazer(p_rdd_conditional_7, p_rdd_conditional_8, p_rdd_conditional_9, p_rdd_conditional_10,
type = "text", digits = 3, digits.extra = 2, digit.separator = "", keep = "D", single.row = F, align = T, table.placement = "h!",
covariate.labels = "Left-Wing Party", title = "Parametric RDD",
column.labels = c("Comission Employees (per 1000 residents)", "Spending on Education (share of total)",
"Spending on Health (share of total)", "Spending on Social Assistance (share of total)"),
se = list(sqrt(diag(vcovHC(p_rdd_conditional_7, type = "HC0", cluster = "group"))),
sqrt(diag(vcovHC(p_rdd_conditional_8, type = "HC0", cluster = "group"))),
sqrt(diag(vcovHC(p_rdd_conditional_9, type = "HC0", cluster = "group"))),
sqrt(diag(vcovHC(p_rdd_conditional_10, type = "HC0", cluster = "group")))))
# Todas as regressões em uma só tabela, exportada para excel xls, e erros padrão com cluster:
# (Abrir os arquivos em html, copiar e colar (transpondo) no excel e arrumar os dados na tabela)
stargazer(p_rdd_conditional_1, p_rdd_conditional_2, p_rdd_conditional_3, p_rdd_conditional_4,
p_rdd_conditional_5, p_rdd_conditional_6, p_rdd_conditional_7, p_rdd_conditional_8,
p_rdd_conditional_9, p_rdd_conditional_10, p_rdd_conditional_11, p_rdd_conditional_12,
p_rdd_conditional_13, p_rdd_conditional_14, p_rdd_conditional_15,
digits = 3, digits.extra = 2, digit.separator = "", keep = "D", single.row = F, align = T, table.placement = "h!",
covariate.labels = "Left-Wing Party", title = "Parametric RDD. Four covariates. Polymonial of order 3",
column.labels = column_labels_vector,
se = list(sqrt(diag(vcovHC(p_rdd_conditional_1, type = "HC0", cluster = "group"))),
sqrt(diag(vcovHC(p_rdd_conditional_2, type = "HC0", cluster = "group"))),
sqrt(diag(vcovHC(p_rdd_conditional_3, type = "HC0", cluster = "group"))),
sqrt(diag(vcovHC(p_rdd_conditional_4, type = "HC0", cluster = "group"))),
sqrt(diag(vcovHC(p_rdd_conditional_5, type = "HC0", cluster = "group"))),
sqrt(diag(vcovHC(p_rdd_conditional_6, type = "HC0", cluster = "group"))),
sqrt(diag(vcovHC(p_rdd_conditional_7, type = "HC0", cluster = "group"))),
sqrt(diag(vcovHC(p_rdd_conditional_8, type = "HC0", cluster = "group"))),
sqrt(diag(vcovHC(p_rdd_conditional_9, type = "HC0", cluster = "group"))),
sqrt(diag(vcovHC(p_rdd_conditional_10, type = "HC0", cluster = "group"))),
sqrt(diag(vcovHC(p_rdd_conditional_11, type = "HC0", cluster = "group"))),
sqrt(diag(vcovHC(p_rdd_conditional_12, type = "HC0", cluster = "group"))),
sqrt(diag(vcovHC(p_rdd_conditional_13, type = "HC0", cluster = "group"))),
sqrt(diag(vcovHC(p_rdd_conditional_14, type = "HC0", cluster = "group"))),
sqrt(diag(vcovHC(p_rdd_conditional_15, type = "HC0", cluster = "group")))),
type = "html", out = "p_rdd_conditional.html")
# plot(p_rdd_conditional_1)
# plot(p_rdd_conditional_2)
# plot(p_rdd_conditional_3)
# plot(p_rdd_conditional_4)
# plot(p_rdd_conditional_5)
# plot(p_rdd_conditional_6)
# plot(p_rdd_conditional_7)
# plot(p_rdd_conditional_8)
# plot(p_rdd_conditional_9)
# plot(p_rdd_conditional_10)
# plot(p_rdd_conditional_11)
# plot(p_rdd_conditional_12)
detach(base_pronta_last_year)
|
/4.5 - Estimação RDD Paramétrico (last year).R
|
no_license
|
joseeduardo-gs/do-political-parties-matter
|
R
| false | false | 23,941 |
r
|
############################################################################################################
# ANÁLISE EMPÍRICA - ESTIMAÇÃO OLS E EFEITOS FIXOS (LAST YEAR)
############################################################################################################
rm(list = ls())
# Pacotes:
library(dplyr)
library(ggplot2)
library(tidyr)
library(readr)
library(stringr)
library(htmltools)
library(htmlwidgets)
library(zeallot)
library(readxl)
library(gdata)
library(stringi)
library(Hmisc)
library(gmodels)
library(broom)
# Pacotes para estatísticas descritivas:
library(psych)
library(summarytools)
# Pacotes Específicos para se trabalhar com RDD:
library(rdd) # Pacote mais básico
library(rddtools) # Pacote com muitas opções de testes de placebo e sensibilidade das estimações e estimação paramétrica
library(rdrobust) # Pacote mais abrangente
library(rddapp) # Pacote com interface em Shiny, reduz a necessidade de se saber programar.
library(rddensity)
# Pacotes para se visualizar resultados de modelos:
library(stargazer)
library(jtools)
library(huxtable)
library(texreg)
# Pacote para regressões de Dados em Painel:
library(plm)
# Opções:
options(scipen = 999) # Desabilita notação scientífica. Para voltar ao padrão -> options(scipen = 1)
# Definindo o diretório onde se encontram as bases de dados prontas para análise empírica:
setwd("C:/Users/joseg_000.PC-JE/Documents/Dissertação - Dados/Bases de Dados Prontas")
# ----------------------------------------------------------------------------------------------------------------------------------
# Baixando as bases de dados (ESCOLHER A QUE FOR USAR)
# load("bases_prontas_ECD")
load("bases_prontas_ED")
# Definindo o diretório para o R salvar as tabelas geradas:
# setwd("C:/Users/joseg_000.PC-JE/Google Drive/FGV-EPGE/Dissertação/Tabelas Geradas no R/Análise com partidos de esquerda, centro e direita")
setwd("C:/Users/joseg_000.PC-JE/Google Drive/FGV-EPGE/Dissertação/Tabelas Geradas no R/Análise com partidos de esquerda e direita")
# ----------------------------------------------------------------------------------------------------------------------------------
# Analisando quais partidos estão sendo considerados:
table(base_pronta_last_year$ideologia_partido_eleito)
summarytools::freq(base_pronta_last_year$ideologia_partido_eleito)
summarytools::descr(base_pronta_last_year$margem_vitoria_esquerda, transpose = T, stats = "common", round.digits = 3)
# Lista de variáveis outcome de interesse:
# outcomes <- c("despesa_geral_pc", "despesa_geral_pib",
# "total_receitas_pc", "total_receitas_pib",
# "receita_tributaria_pc", "receita_tributaria_pib",
# "educacao_prop", "saude_prop", "assistencia_social_prop",
# "urbanismo_prop", "transporte_prop", "desporto_e_lazer_prop",
# "seguranca_publica_prop", "gestao_ambiental_prop",
# "servidores_comissionados_mil")
# Lista de variáveis de controle de interesse:
# covariates <- c("regiao", "pop", "fracao_pop_masculina", "fracao_pop_urbana",
# "proporcao_idoso", "proporcao_jovem", "proporcao_brancos", "pib_pc")
# -----------------------------------------------------------------------------------------------------------------------
# Estimação RDD Paramétrica usando o pacote rddtools
# Usando a função rddtools::rdd_data() para construir os RDD objects de interesse:
# Primeiramente, devemos armazenar os modelos RDD como rdd objects: (SEM COVARIATES)
# O prefixo p_ significa "PARAMÉTRICO":
p_rdd_object_unconditional_1 <- rdd_data(y = log(despesa_geral_pc), x = margem_vitoria_esquerda, cutpoint = 0, data = base_pronta_last_year)
p_rdd_object_unconditional_2 <- rdd_data(y = log(despesa_geral_pib), x = margem_vitoria_esquerda, cutpoint = 0, data = base_pronta_last_year)
p_rdd_object_unconditional_3 <- rdd_data(y = log(total_receitas_pc), x = margem_vitoria_esquerda, cutpoint = 0, data = base_pronta_last_year)
p_rdd_object_unconditional_4 <- rdd_data(y = log(total_receitas_pib), x = margem_vitoria_esquerda, cutpoint = 0, data = base_pronta_last_year)
p_rdd_object_unconditional_5 <- rdd_data(y = log(receita_tributaria_pc + 0.0001), x = margem_vitoria_esquerda, cutpoint = 0, data = base_pronta_last_year)
p_rdd_object_unconditional_6 <- rdd_data(y = log(receita_tributaria_pib + 0.0001), x = margem_vitoria_esquerda, cutpoint = 0, data = base_pronta_last_year)
p_rdd_object_unconditional_7 <- rdd_data(y = log(servidores_comissionados_mil + 0.0001), x = margem_vitoria_esquerda, cutpoint = 0, data = base_pronta_last_year)
p_rdd_object_unconditional_8 <- rdd_data(y = educacao_prop, x = margem_vitoria_esquerda, cutpoint = 0, data = base_pronta_last_year)
p_rdd_object_unconditional_9 <- rdd_data(y = saude_prop, x = margem_vitoria_esquerda, cutpoint = 0, data = base_pronta_last_year)
p_rdd_object_unconditional_10 <- rdd_data(y = assistencia_social_prop, x = margem_vitoria_esquerda, cutpoint = 0, data = base_pronta_last_year)
p_rdd_object_unconditional_11 <- rdd_data(y = urbanismo_prop, x = margem_vitoria_esquerda, cutpoint = 0, data = base_pronta_last_year)
p_rdd_object_unconditional_12 <- rdd_data(y = transporte_prop, x = margem_vitoria_esquerda, cutpoint = 0, data = base_pronta_last_year)
p_rdd_object_unconditional_13 <- rdd_data(y = desporto_e_lazer_prop, x = margem_vitoria_esquerda, cutpoint = 0, data = base_pronta_last_year)
p_rdd_object_unconditional_14 <- rdd_data(y = seguranca_publica_prop, x = margem_vitoria_esquerda, cutpoint = 0, data = base_pronta_last_year)
p_rdd_object_unconditional_15 <- rdd_data(y = gestao_ambiental_prop, x = margem_vitoria_esquerda, cutpoint = 0, data = base_pronta_last_year)
# ------------------------------------------------------------------------------------------------------------
# REGRESSÕES RDD PARAMÉTRICAS SEM COVARIATES:
# Regressões sem covariates, polinômio de ordem 3:
p_rdd_unconditional_1 <- rdd_reg_lm(p_rdd_object_unconditional_1, order = 3, slope = "separate")
p_rdd_unconditional_2 <- rdd_reg_lm(p_rdd_object_unconditional_2, order = 3, slope = "separate")
p_rdd_unconditional_3 <- rdd_reg_lm(p_rdd_object_unconditional_3, order = 3, slope = "separate")
p_rdd_unconditional_4 <- rdd_reg_lm(p_rdd_object_unconditional_4, order = 3, slope = "separate")
p_rdd_unconditional_5 <- rdd_reg_lm(p_rdd_object_unconditional_5, order = 3, slope = "separate")
p_rdd_unconditional_6 <- rdd_reg_lm(p_rdd_object_unconditional_6, order = 3, slope = "separate")
p_rdd_unconditional_7 <- rdd_reg_lm(p_rdd_object_unconditional_7, order = 3, slope = "separate")
p_rdd_unconditional_8 <- rdd_reg_lm(p_rdd_object_unconditional_8, order = 3, slope = "separate")
p_rdd_unconditional_9 <- rdd_reg_lm(p_rdd_object_unconditional_9, order = 3, slope = "separate")
p_rdd_unconditional_10 <- rdd_reg_lm(p_rdd_object_unconditional_10, order = 3, slope = "separate")
p_rdd_unconditional_11 <- rdd_reg_lm(p_rdd_object_unconditional_11, order = 3, slope = "separate")
p_rdd_unconditional_12 <- rdd_reg_lm(p_rdd_object_unconditional_12, order = 3, slope = "separate")
p_rdd_unconditional_13 <- rdd_reg_lm(p_rdd_object_unconditional_13, order = 3, slope = "separate")
p_rdd_unconditional_14 <- rdd_reg_lm(p_rdd_object_unconditional_14, order = 3, slope = "separate")
p_rdd_unconditional_15 <- rdd_reg_lm(p_rdd_object_unconditional_15, order = 3, slope = "separate")
stargazer(p_rdd_unconditional_1, p_rdd_unconditional_2,
type = "text", digits = 3, digits.extra = 2, digit.separator = "", keep = "D", single.row = F, align = T, table.placement = "h!",
covariate.labels = "Left-Wing Party", title = "Parametric RDD",
column.labels = c("Total expenditures (per capita)", "Total Expeditures (share of income)"),
se = list(sqrt(diag(vcovHC(p_rdd_unconditional_1, type = "HC0", cluster = "group"))),
sqrt(diag(vcovHC(p_rdd_unconditional_2, type = "HC0", cluster = "group")))))
stargazer(p_rdd_unconditional_3, p_rdd_unconditional_4, p_rdd_unconditional_5, p_rdd_unconditional_6,
type = "text", digits = 3, digits.extra = 2, digit.separator = "", keep = "D", single.row = F, align = T, table.placement = "h!",
covariate.labels = "Left-Wing Party", title = "Parametric RDD",
column.labels = c("Total Revenues (per capita)", "Total Revenues (share of income)",
"Tax Revenues (per capita)", "Tax Revenues (share of income)"),
se = list(sqrt(diag(vcovHC(p_rdd_unconditional_3, type = "HC0", cluster = "group"))),
sqrt(diag(vcovHC(p_rdd_unconditional_4, type = "HC0", cluster = "group"))),
sqrt(diag(vcovHC(p_rdd_unconditional_5, type = "HC0", cluster = "group"))),
sqrt(diag(vcovHC(p_rdd_unconditional_6, type = "HC0", cluster = "group")))))
stargazer(p_rdd_unconditional_7, p_rdd_unconditional_8, p_rdd_unconditional_9, p_rdd_unconditional_10,
type = "text", digits = 3, digits.extra = 2, digit.separator = "", keep = "D", single.row = F, align = T, table.placement = "h!",
covariate.labels = "Left-Wing Party", title = "Parametric RDD",
column.labels = c("Comission Employees (per 1000 residents)", "Spending on Education (share of total)",
"Spending on Health (share of total)", "Spending on Social Assistance (share of total)"),
se = list(sqrt(diag(vcovHC(p_rdd_unconditional_7, type = "HC0", cluster = "group"))),
sqrt(diag(vcovHC(p_rdd_unconditional_8, type = "HC0", cluster = "group"))),
sqrt(diag(vcovHC(p_rdd_unconditional_9, type = "HC0", cluster = "group"))),
sqrt(diag(vcovHC(p_rdd_unconditional_10, type = "HC0", cluster = "group")))))
column_labels_vector <- c("Total expenditures (per capita)", "Total Expeditures (share of income)",
"Total Revenues (per capita)", "Total Revenues (share of income)",
"Tax Revenues (per capita)", "Tax Revenues (share of income)",
"Comission Employees (per 1000 residents)", "Spending on Education (share of total)",
"Spending on Health (share of total)", "Spending on Social Assistance (share of total)",
"Spending on Urbanism (Share of Total)", "Spending on Transportation (Share of Total)",
"Spending on Sports and Leisure (Share of Total)",
"Spending on Public Safety (Share of Total)",
"Spending on Environmental management (Share of Total)")
# Todas as regressões em uma só tabela, exportada para excel xls, e erros padrão com cluster:
# (Abrir os arquivos em html, copiar e colar (transpondo) no excel e arrumar os dados na tabela)
stargazer(p_rdd_unconditional_1, p_rdd_unconditional_2, p_rdd_unconditional_3, p_rdd_unconditional_4,
p_rdd_unconditional_5, p_rdd_unconditional_6, p_rdd_unconditional_7, p_rdd_unconditional_8,
p_rdd_unconditional_9, p_rdd_unconditional_10, p_rdd_unconditional_11, p_rdd_unconditional_12,
p_rdd_unconditional_13, p_rdd_unconditional_14, p_rdd_unconditional_15,
digits = 3, digits.extra = 2, digit.separator = "", keep = "D", single.row = F, align = T, table.placement = "h!",
covariate.labels = "Left-Wing Party", title = "Parametric RDD. No covariates. Polymonial of order 3",
column.labels = column_labels_vector,
se = list(sqrt(diag(vcovHC(p_rdd_unconditional_1, type = "HC0", cluster = "group"))),
sqrt(diag(vcovHC(p_rdd_unconditional_2, type = "HC0", cluster = "group"))),
sqrt(diag(vcovHC(p_rdd_unconditional_3, type = "HC0", cluster = "group"))),
sqrt(diag(vcovHC(p_rdd_unconditional_4, type = "HC0", cluster = "group"))),
sqrt(diag(vcovHC(p_rdd_unconditional_5, type = "HC0", cluster = "group"))),
sqrt(diag(vcovHC(p_rdd_unconditional_6, type = "HC0", cluster = "group"))),
sqrt(diag(vcovHC(p_rdd_unconditional_7, type = "HC0", cluster = "group"))),
sqrt(diag(vcovHC(p_rdd_unconditional_8, type = "HC0", cluster = "group"))),
sqrt(diag(vcovHC(p_rdd_unconditional_9, type = "HC0", cluster = "group"))),
sqrt(diag(vcovHC(p_rdd_unconditional_10, type = "HC0", cluster = "group"))),
sqrt(diag(vcovHC(p_rdd_unconditional_11, type = "HC0", cluster = "group"))),
sqrt(diag(vcovHC(p_rdd_unconditional_12, type = "HC0", cluster = "group"))),
sqrt(diag(vcovHC(p_rdd_unconditional_13, type = "HC0", cluster = "group"))),
sqrt(diag(vcovHC(p_rdd_unconditional_14, type = "HC0", cluster = "group"))),
sqrt(diag(vcovHC(p_rdd_unconditional_15, type = "HC0", cluster = "group")))),
type = "html", out = "p_rdd_unconditional.html")
plot(p_rdd_unconditional_1)
plot(p_rdd_unconditional_2)
plot(p_rdd_unconditional_3)
plot(p_rdd_unconditional_4)
plot(p_rdd_unconditional_5)
plot(p_rdd_unconditional_6)
plot(p_rdd_unconditional_7)
plot(p_rdd_unconditional_8)
plot(p_rdd_unconditional_9)
plot(p_rdd_unconditional_10)
# ------------------------------------------------------------------------------------------------------------
# REGRESSÕES RDD PARAMÉTRICAS COM COVARIATES:
# Primeiramente, devemos armazenar os modelos RDD como rdd objects: (COM COVARIATES)
# O prefixo p_ significa "PARAMÉTRICO":
covariates_df <- base_pronta_last_year %>% select(pop, fracao_pop_masculina, fracao_pop_urbana, pib_pc)
p_rdd_object_conditional_1 <- rdd_data(y = log(despesa_geral_pc), x = margem_vitoria_esquerda, covar = covariates_df, cutpoint = 0, data = base_pronta_last_year)
p_rdd_object_conditional_2 <- rdd_data(y = log(despesa_geral_pib), x = margem_vitoria_esquerda, covar = covariates_df, cutpoint = 0, data = base_pronta_last_year)
p_rdd_object_conditional_3 <- rdd_data(y = log(total_receitas_pc), x = margem_vitoria_esquerda, covar = covariates_df, cutpoint = 0, data = base_pronta_last_year)
p_rdd_object_conditional_4 <- rdd_data(y = log(total_receitas_pib), x = margem_vitoria_esquerda, covar = covariates_df, cutpoint = 0, data = base_pronta_last_year)
p_rdd_object_conditional_5 <- rdd_data(y = log(receita_tributaria_pc + 0.0001), x = margem_vitoria_esquerda, covar = covariates_df, cutpoint = 0, data = base_pronta_last_year)
p_rdd_object_conditional_6 <- rdd_data(y = log(receita_tributaria_pib + 0.0001), x = margem_vitoria_esquerda, covar = covariates_df, cutpoint = 0, data = base_pronta_last_year)
p_rdd_object_conditional_7 <- rdd_data(y = log(servidores_comissionados_mil + 0.0001), x = margem_vitoria_esquerda, covar = covariates_df, cutpoint = 0, data = base_pronta_last_year)
p_rdd_object_conditional_8 <- rdd_data(y = educacao_prop, x = margem_vitoria_esquerda, covar = covariates_df, cutpoint = 0, data = base_pronta_last_year)
p_rdd_object_conditional_9 <- rdd_data(y = saude_prop, x = margem_vitoria_esquerda, covar = covariates_df, cutpoint = 0, data = base_pronta_last_year)
p_rdd_object_conditional_10 <- rdd_data(y = assistencia_social_prop, x = margem_vitoria_esquerda, covar = covariates_df, cutpoint = 0, data = base_pronta_last_year)
p_rdd_object_conditional_11 <- rdd_data(y = urbanismo_prop, x = margem_vitoria_esquerda, covar = covariates_df, cutpoint = 0, data = base_pronta_last_year)
p_rdd_object_conditional_12 <- rdd_data(y = transporte_prop, x = margem_vitoria_esquerda, covar = covariates_df, cutpoint = 0, data = base_pronta_last_year)
p_rdd_object_conditional_13 <- rdd_data(y = desporto_e_lazer_prop, x = margem_vitoria_esquerda, covar = covariates_df, cutpoint = 0, data = base_pronta_last_year)
p_rdd_object_conditional_14 <- rdd_data(y = seguranca_publica_prop, x = margem_vitoria_esquerda, covar = covariates_df, cutpoint = 0, data = base_pronta_last_year)
p_rdd_object_conditional_15 <- rdd_data(y = gestao_ambiental_prop, x = margem_vitoria_esquerda, covar = covariates_df, cutpoint = 0, data = base_pronta_last_year)
# Regressões com covariates, polinômio de ordem 3:
attach(base_pronta_last_year)
p_rdd_conditional_1 <- rdd_reg_lm(p_rdd_object_conditional_1, order = 3, slope = "separate", covariates = "pop", covar.opt = list("include"))
p_rdd_conditional_2 <- rdd_reg_lm(p_rdd_object_conditional_2, order = 3, slope = "separate", covariates = "pop", covar.opt = list("include"))
p_rdd_conditional_3 <- rdd_reg_lm(p_rdd_object_conditional_3, order = 3, slope = "separate", covariates = "pop", covar.opt = list("include"))
p_rdd_conditional_4 <- rdd_reg_lm(p_rdd_object_conditional_4, order = 3, slope = "separate", covariates = "pop", covar.opt = list("include"))
p_rdd_conditional_5 <- rdd_reg_lm(p_rdd_object_conditional_5, order = 3, slope = "separate", covariates = "pop", covar.opt = list("include"))
p_rdd_conditional_6 <- rdd_reg_lm(p_rdd_object_conditional_6, order = 3, slope = "separate", covariates = "pop", covar.opt = list("include"))
p_rdd_conditional_7 <- rdd_reg_lm(p_rdd_object_conditional_7, order = 3, slope = "separate", covariates = "pop", covar.opt = list("include"))
p_rdd_conditional_8 <- rdd_reg_lm(p_rdd_object_conditional_8, order = 3, slope = "separate", covariates = "pop", covar.opt = list("include"))
p_rdd_conditional_9 <- rdd_reg_lm(p_rdd_object_conditional_9, order = 3, slope = "separate", covariates = "pop", covar.opt = list("include"))
p_rdd_conditional_10 <- rdd_reg_lm(p_rdd_object_conditional_10, order = 3, slope = "separate", covariates = "pop", covar.opt = list("include"))
p_rdd_conditional_11 <- rdd_reg_lm(p_rdd_object_conditional_11, order = 3, slope = "separate", covariates = "pop", covar.opt = list("include"))
p_rdd_conditional_12 <- rdd_reg_lm(p_rdd_object_conditional_12, order = 3, slope = "separate", covariates = "pop", covar.opt = list("include"))
p_rdd_conditional_13 <- rdd_reg_lm(p_rdd_object_conditional_13, order = 3, slope = "separate", covariates = "pop", covar.opt = list("include"))
p_rdd_conditional_14 <- rdd_reg_lm(p_rdd_object_conditional_14, order = 3, slope = "separate", covariates = "pop", covar.opt = list("include"))
p_rdd_conditional_15 <- rdd_reg_lm(p_rdd_object_conditional_15, order = 3, slope = "separate", covariates = "pop", covar.opt = list("include"))
stargazer(p_rdd_conditional_1, p_rdd_conditional_2,
type = "text", digits = 3, digits.extra = 2, digit.separator = "", keep = "D", single.row = F, align = T, table.placement = "h!",
covariate.labels = "Left-Wing Party", title = "Parametric RDD",
column.labels = c("Total expenditures (per capita)", "Total Expeditures (share of income)"),
se = list(sqrt(diag(vcovHC(p_rdd_conditional_1, type = "HC0", cluster = "group"))),
sqrt(diag(vcovHC(p_rdd_conditional_2, type = "HC0", cluster = "group")))))
stargazer(p_rdd_conditional_3, p_rdd_conditional_4, p_rdd_conditional_5, p_rdd_conditional_6,
type = "text", digits = 3, digits.extra = 2, digit.separator = "", keep = "D", single.row = F, align = T, table.placement = "h!",
covariate.labels = "Left-Wing Party", title = "Parametric RDD",
column.labels = c("Total Revenues (per capita)", "Total Revenues (share of income)",
"Tax Revenues (per capita)", "Tax Revenues (share of income)"),
se = list(sqrt(diag(vcovHC(p_rdd_conditional_3, type = "HC0", cluster = "group"))),
sqrt(diag(vcovHC(p_rdd_conditional_4, type = "HC0", cluster = "group"))),
sqrt(diag(vcovHC(p_rdd_conditional_5, type = "HC0", cluster = "group"))),
sqrt(diag(vcovHC(p_rdd_conditional_6, type = "HC0", cluster = "group")))))
stargazer(p_rdd_conditional_7, p_rdd_conditional_8, p_rdd_conditional_9, p_rdd_conditional_10,
type = "text", digits = 3, digits.extra = 2, digit.separator = "", keep = "D", single.row = F, align = T, table.placement = "h!",
covariate.labels = "Left-Wing Party", title = "Parametric RDD",
column.labels = c("Comission Employees (per 1000 residents)", "Spending on Education (share of total)",
"Spending on Health (share of total)", "Spending on Social Assistance (share of total)"),
se = list(sqrt(diag(vcovHC(p_rdd_conditional_7, type = "HC0", cluster = "group"))),
sqrt(diag(vcovHC(p_rdd_conditional_8, type = "HC0", cluster = "group"))),
sqrt(diag(vcovHC(p_rdd_conditional_9, type = "HC0", cluster = "group"))),
sqrt(diag(vcovHC(p_rdd_conditional_10, type = "HC0", cluster = "group")))))
# Todas as regressões em uma só tabela, exportada para excel xls, e erros padrão com cluster:
# (Abrir os arquivos em html, copiar e colar (transpondo) no excel e arrumar os dados na tabela)
stargazer(p_rdd_conditional_1, p_rdd_conditional_2, p_rdd_conditional_3, p_rdd_conditional_4,
p_rdd_conditional_5, p_rdd_conditional_6, p_rdd_conditional_7, p_rdd_conditional_8,
p_rdd_conditional_9, p_rdd_conditional_10, p_rdd_conditional_11, p_rdd_conditional_12,
p_rdd_conditional_13, p_rdd_conditional_14, p_rdd_conditional_15,
digits = 3, digits.extra = 2, digit.separator = "", keep = "D", single.row = F, align = T, table.placement = "h!",
covariate.labels = "Left-Wing Party", title = "Parametric RDD. Four covariates. Polymonial of order 3",
column.labels = column_labels_vector,
se = list(sqrt(diag(vcovHC(p_rdd_conditional_1, type = "HC0", cluster = "group"))),
sqrt(diag(vcovHC(p_rdd_conditional_2, type = "HC0", cluster = "group"))),
sqrt(diag(vcovHC(p_rdd_conditional_3, type = "HC0", cluster = "group"))),
sqrt(diag(vcovHC(p_rdd_conditional_4, type = "HC0", cluster = "group"))),
sqrt(diag(vcovHC(p_rdd_conditional_5, type = "HC0", cluster = "group"))),
sqrt(diag(vcovHC(p_rdd_conditional_6, type = "HC0", cluster = "group"))),
sqrt(diag(vcovHC(p_rdd_conditional_7, type = "HC0", cluster = "group"))),
sqrt(diag(vcovHC(p_rdd_conditional_8, type = "HC0", cluster = "group"))),
sqrt(diag(vcovHC(p_rdd_conditional_9, type = "HC0", cluster = "group"))),
sqrt(diag(vcovHC(p_rdd_conditional_10, type = "HC0", cluster = "group"))),
sqrt(diag(vcovHC(p_rdd_conditional_11, type = "HC0", cluster = "group"))),
sqrt(diag(vcovHC(p_rdd_conditional_12, type = "HC0", cluster = "group"))),
sqrt(diag(vcovHC(p_rdd_conditional_13, type = "HC0", cluster = "group"))),
sqrt(diag(vcovHC(p_rdd_conditional_14, type = "HC0", cluster = "group"))),
sqrt(diag(vcovHC(p_rdd_conditional_15, type = "HC0", cluster = "group")))),
type = "html", out = "p_rdd_conditional.html")
# plot(p_rdd_conditional_1)
# plot(p_rdd_conditional_2)
# plot(p_rdd_conditional_3)
# plot(p_rdd_conditional_4)
# plot(p_rdd_conditional_5)
# plot(p_rdd_conditional_6)
# plot(p_rdd_conditional_7)
# plot(p_rdd_conditional_8)
# plot(p_rdd_conditional_9)
# plot(p_rdd_conditional_10)
# plot(p_rdd_conditional_11)
# plot(p_rdd_conditional_12)
detach(base_pronta_last_year)
|
# Andy Philips
# andrew.philips@colorado.edu
# 06/08/17
# --------------------------------#
shinyUI(fluidPage(
includeCSS("style.css"),
titlePanel("Central Limit Theorem Simulator"),
sidebarLayout(position = "right",
sidebarPanel(
radioButtons("dist", "Choose Distribution",
c("Normal" = "norm",
"Uniform" = "unif",
"Exponential" = "expo")),
sliderInput(inputId = "obs",
label = "Number of Observations Per Sample",
min = 1, max = 500, value = 50),
br(),
sliderInput(inputId = "samples", label = "Number of Samples", min = 1, max = 10000, value = 100),
br()
),
mainPanel(
p("This app shows the Central Limit Theorem in action. The Central Limit Theorem states that, given a large enough number of independent, identically distributed samples, the resulting means of the samples will be distributed approximately normal. In other words, the mean of the means of the samples converge to the population mean, given enough samples."),
p("Below you can try out the CLT for yourself across a number of different distributions, number of observations in each sample, and the total number of samples. Note that the data were sampled with mean equal to one for all distributions."),
p("Author: ",
a("Andrew Q. Philips", href = "http://www.andyphilips.com/")),
plotOutput("plot1"),
br(),
fluidRow(column(6, p("Summary Statistics of Means of Samples"),
verbatimTextOutput("summary")),
column(6, plotOutput("plot2")))
)
)
))
|
/CLT-Simulator/ui.R
|
no_license
|
anhnguyendepocen/Shiny-2
|
R
| false | false | 1,656 |
r
|
# Andy Philips
# andrew.philips@colorado.edu
# 06/08/17
# --------------------------------#
shinyUI(fluidPage(
includeCSS("style.css"),
titlePanel("Central Limit Theorem Simulator"),
sidebarLayout(position = "right",
sidebarPanel(
radioButtons("dist", "Choose Distribution",
c("Normal" = "norm",
"Uniform" = "unif",
"Exponential" = "expo")),
sliderInput(inputId = "obs",
label = "Number of Observations Per Sample",
min = 1, max = 500, value = 50),
br(),
sliderInput(inputId = "samples", label = "Number of Samples", min = 1, max = 10000, value = 100),
br()
),
mainPanel(
p("This app shows the Central Limit Theorem in action. The Central Limit Theorem states that, given a large enough number of independent, identically distributed samples, the resulting means of the samples will be distributed approximately normal. In other words, the mean of the means of the samples converge to the population mean, given enough samples."),
p("Below you can try out the CLT for yourself across a number of different distributions, number of observations in each sample, and the total number of samples. Note that the data were sampled with mean equal to one for all distributions."),
p("Author: ",
a("Andrew Q. Philips", href = "http://www.andyphilips.com/")),
plotOutput("plot1"),
br(),
fluidRow(column(6, p("Summary Statistics of Means of Samples"),
verbatimTextOutput("summary")),
column(6, plotOutput("plot2")))
)
)
))
|
setwd("C://Project1")
getwd()
dataset <- read.csv("./household_power_consumption.txt", header=T, sep=';', na.strings="?", nrows=2075259, check.names=F, stringsAsFactors=F, comment.char="", quote='\"')
dataset$Date <- as.Date(dataset$Date, format="%d/%m/%Y")
data <- subset(dataset, subset=(Date >= "2007-02-01" & Date <= "2007-02-02"))
rm(dataset)
datetime <- paste(as.Date(data$Date), data$Time)
data$Datetime <- as.POSIXct(datetime)
par(mfrow=c(2,2), mar=c(4,4,2,1), oma=c(0,0,2,0))
with(data, {
plot(Global_active_power~Datetime, type="l", ylab="Active Power (kW)", xlab="")
plot(Voltage~Datetime, type="l", ylab="Voltage (V)", xlab="")
plot(Sub_metering_1~Datetime, type="l", ylab="Active Power (kW)", xlab="")
lines(Sub_metering_2~Datetime,col='Red')
lines(Sub_metering_3~Datetime,col='Blue')
legend("topright", col=c("black", "red", "blue"), lty=1, lwd=2, bty="n",
legend=c("Sub Meter 1", "Sub Meter 2", "Sub Meter 3"))
plot(Global_reactive_power~Datetime, type="l", ylab="Re-Active Power (kW)",xlab="")
})
dev.copy(png, file="plot4.png", height=480, width=480)
dev.off()
|
/plot4.R
|
no_license
|
williewilkins/ExData_Plotting1
|
R
| false | false | 1,150 |
r
|
setwd("C://Project1")
getwd()
dataset <- read.csv("./household_power_consumption.txt", header=T, sep=';', na.strings="?", nrows=2075259, check.names=F, stringsAsFactors=F, comment.char="", quote='\"')
dataset$Date <- as.Date(dataset$Date, format="%d/%m/%Y")
data <- subset(dataset, subset=(Date >= "2007-02-01" & Date <= "2007-02-02"))
rm(dataset)
datetime <- paste(as.Date(data$Date), data$Time)
data$Datetime <- as.POSIXct(datetime)
par(mfrow=c(2,2), mar=c(4,4,2,1), oma=c(0,0,2,0))
with(data, {
plot(Global_active_power~Datetime, type="l", ylab="Active Power (kW)", xlab="")
plot(Voltage~Datetime, type="l", ylab="Voltage (V)", xlab="")
plot(Sub_metering_1~Datetime, type="l", ylab="Active Power (kW)", xlab="")
lines(Sub_metering_2~Datetime,col='Red')
lines(Sub_metering_3~Datetime,col='Blue')
legend("topright", col=c("black", "red", "blue"), lty=1, lwd=2, bty="n",
legend=c("Sub Meter 1", "Sub Meter 2", "Sub Meter 3"))
plot(Global_reactive_power~Datetime, type="l", ylab="Re-Active Power (kW)",xlab="")
})
dev.copy(png, file="plot4.png", height=480, width=480)
dev.off()
|
#basic class that points to the database and allows easier manipulations
setClass("BuxcoDB", representation(db.name="character", annotation.table="character"), prototype=prototype(db.name=character(0), annotation.table="Additional_labels"))
.run.update.statement <- function(db.con, query){
state <- dbSendStatement(db.con, query)
dbHasCompleted(state)
dbClearResult(state)
}
.insert.data <- function(db.con, query, data){
#way too slow, stick with depricated version for now
#state <- dbSendQuery(db.con, query)
#dbBind(state, data)
#dbClearResult(state)
dbBegin(db.con)
suppressWarnings(state <- dbSendPreparedQuery(db.con, query, bind.data = data))
dbClearResult(state)
dbCommit(db.con)
}
makeBuxcoDB <- function(db.name=NULL, annotation.table="Additional_labels")
{
if (missing(db.name) || is.null(db.name))
{
stop("ERROR: Need to supply a valid file name for db.name")
}
else if (! file.exists(db.name))
{
stop(paste("ERROR:", db.name, "does not exist"))
}
if (is.character(annotation.table) == FALSE || length(annotation.table) != 1)
{
stop("ERROR annotation.table needs to be a character vector of length 1")
}
return(new("BuxcoDB", db.name=db.name, annotation.table=annotation.table))
}
setGeneric("tsplot", def=function(obj,...) standardGeneric("tsplot"))
setMethod("tsplot", signature("BuxcoDB"), function(obj, ..., exp.factor=NULL, summary.func=function(x) mean(log(x)), legend.name="Factor", xlab="Days", ylab="mean(log(Value))")
{
if (is.function(summary.func) == F)
{
stop("ERROR: summary.func needs to be a function that takes vector and returns a single value")
}
if ((missing(legend.name) || (is.character(legend.name) && length(legend.name) == 1)) == F)
{
stop("ERROR: If legend.name is non-missing, it needs to be a single character value")
}
if ((missing(xlab) || (is.character(xlab) && length(xlab) == 1)) == F)
{
stop("ERROR: If xlab is non-missing, it needs to be a single character value")
}
if ((missing(ylab) || (is.character(ylab) && length(ylab) == 1)) == F)
{
stop("ERROR: If ylab is non-missing, it needs to be a single character value")
}
use.dta <- retrieveData(obj, ...)
if ((missing(exp.factor) || is.null(exp.factor) || (is.character(exp.factor) && length(exp.factor) == 1 && exp.factor %in% names(use.dta))) == F)
{
stop("ERROR: If exp.factor is specified, it needs to correspond to a column from 'retrieveData'")
}
show(qplot(x=Days, y=Value, data=use.dta, group=Sample_Name, stat="summary", fun.y=summary.func, facets=.~Variable_Name, geom="line", xlab=xlab, ylab=ylab) + aes_string(color=exp.factor) + labs(color=legend.name))
})
setGeneric("mvtsplot", def=function(obj,...) standardGeneric("mvtsplot"))
setMethod("mvtsplot", signature("BuxcoDB"), function(obj,..., plot.value="Penh",main=plot.value, summary.func=function(x) data.frame(Value=mean(log(x$Value))), outer.group.name=NULL, inner.group.name=NULL, outer.cols=NULL, colorbrewer.pal="PRGn")
{
if ("Days" %in% annoCols(obj) == F)
{
stop("ERROR: The BuxcoDB object needs to contain a 'Days' column potentially created through the use of 'day.infer.query'")
}
if ((is.character(plot.value) && length(plot.value) == 1 && plot.value %in% variables(obj)) == F)
{
stop("ERROR: plot.value needs to be a single character value corresponding to a variable in 'obj'")
}
bux.dta <- retrieveData(obj, variables=plot.value,...)
mean.dta <- ddply(.data=bux.dta, .variables=c("Days", "Sample_Name", inner.group.name, outer.group.name), .fun=summary.func)
names(mean.dta)[names(mean.dta) == "Value"] <- plot.value
mvtsplot.data.frame(use.dta=mean.dta, plot.value=plot.value, main=main, outer.group.name=outer.group.name, inner.group.name=inner.group.name, outer.cols=outer.cols,colorbrewer.pal=colorbrewer.pal)
})
setGeneric("makeIndexes", def=function(obj,...) standardGeneric("makeIndexes"))
setMethod("makeIndexes", signature("BuxcoDB"), function(obj, annotation.table=annoTable(obj))
{
db.con <- dbConnect(SQLite(), dbName(obj))
make.annotation.indexes(db.con, annotation.table)
invisible(dbDisconnect(db.con))
})
setMethod("show", signature("BuxcoDB"), function(object)
{
db.con <- dbConnect(SQLite(), dbName(object))
#Adapted from AnnotationDbi::show
if ("metadata" %in% dbListTables(db.con))
{
metadata <- dbGetQuery(db.con, "SELECT * FROM metadata")
cat(class(object), "object:\n")
cat(paste("Database:", object@db.name, "\n"))
cat(paste("Annotation Table:", object@annotation.table, "\n"))
for (i in seq_len(nrow(metadata))) {
cat("| ", metadata[i, "name"], ": ", metadata[i, "value"],
"\n", sep = "")
}
}else{
cat("BuxcoDB object\n")
cat(paste("Database:", object@db.name, "\n"))
cat(paste("Annotation Table:", object@annotation.table, "\n"))
cat("No metadata is available\n")
}
})
setGeneric("summaryMeasures", def=function(obj,...) standardGeneric("summaryMeasures"))
setMethod("summaryMeasures", signature("BuxcoDB"), function(obj, summary.type=c("time.to.max.response", "max.response", "auc.response", "mean.response"), sample.summary.func=function(x) data.frame(Value=mean(x$Value)), samples=NULL, variables=NULL, tables=NULL, Break_type_label="EXP", day.summary.column="Days")
{
summaries <- match.arg(summary.type, several.ok=TRUE)
if (is.function(sample.summary.func) == FALSE)
{
stop("ERROR: sample.summary.func needs to be a valid function")
}
ret.dta <- retrieveData(obj, samples=samples, variables=variables, tables=tables, Break_type_label=Break_type_label)
if (day.summary.column %in% names(ret.dta) == FALSE || any(is.na(as.numeric(ret.dta[,day.summary.column]))))
{
stop("ERROR: day.summary.column needs to be a valid name in the database and be coercible to numeric values")
}
if ("Break_type_label" %in% names(ret.dta) == FALSE)
{
stop("ERROR: Break_type_label needs to be part of the returned values for ret.dta")
}
if (any(Break_type_label %in% ret.dta$Break_type_label) == FALSE)
{
stop("ERROR: At least one type element of Break_type_label needs to exist in the current output")
}
sum.days <- ddply(ret.dta, c("Variable_Name", "Sample_Name", day.summary.column), .fun=sample.summary.func)
#a hack because ddply can't find the functions if they are supplied as characters...
ret.dta <- data.frame(Variable_Name=character(0), Sample_Name=character(), stringsAsFactors=FALSE)
for (i in summaries)
{
summary.func <- get(i)
temp.dta <- ddply(sum.days, c("Variable_Name", "Sample_Name"), .fun=summary.func, day.name=day.summary.column)
temp.dta$Variable_Name <- as.character(temp.dta$Variable_Name)
temp.dta$Sample_Name <- as.character(temp.dta$Sample_Name)
ret.dta <- merge(ret.dta, temp.dta, by=c("Variable_Name", "Sample_Name"), all=TRUE, incomparables=NULL, sort=FALSE)
}
return(ret.dta)
})
setGeneric("retrieveMatrix", def=function(obj,...) standardGeneric("retrieveMatrix"))
setMethod("retrieveMatrix", signature("BuxcoDB"), function(obj,...,formula=Sample_Name~Days~Variable_Name, summary.func=function(x) mean(log(x)))
{
if (is.function(summary.func)==F)
{
stop("summary.func needs to be a function taking a vector as an argument and returning a single value")
}
ret.dta <- retrieveData(obj,...)
form.terms <- all.vars(attr(terms(formula), "variables"))
if (class(formula) != "formula" || all(form.terms %in% names(ret.dta))==F)
{
stop("formula needs to refer to a valid formula involving columns as found using 'retrieveData'")
}
temp.mat <- acast(data=ret.dta, formula=formula, fun.aggregate=summary.func, value.var="Value")
temp.mat[is.nan(temp.mat)] <- NA
return(temp.mat)
})
setGeneric("annoTable", def=function(obj,...) standardGeneric("annoTable"))
setMethod("annoTable", signature("BuxcoDB"), function(obj)
{
return(obj@annotation.table)
})
setGeneric("annoCols", def=function(obj,...) standardGeneric("annoCols"))
setMethod("annoCols", signature("BuxcoDB"), function(obj)
{
db.con <- dbConnect(SQLite(), dbName(obj))
if (annoTable(obj) %in% dbListTables(db.con) == FALSE)
{
return(character(0))
}
else
{
#modified this 9-03-2013 to deal with the case of columns added by user that had _ID, really only deal with the case of Break_Chunk_ID as the ID col...
test.query <- dbListFields(db.con, annoTable(obj))
dbDisconnect(db.con)
id.col <- test.query[test.query == "Break_Chunk_ID"]
stopifnot(length(id.col) == 1)
lo.cols <- setdiff(test.query, id.col)
return(lo.cols)
}
})
setGeneric("annoLevels", def=function(obj,...) standardGeneric("annoLevels"))
setMethod("annoLevels", signature("BuxcoDB"), function(obj)
{
db.con <- dbConnect(SQLite(), dbName(obj))
use.cols <- annoCols(obj)
if (length(use.cols) == 0)
{
return(character(0))
}
else
{
ret.list <- lapply(use.cols, function(x)
{
dbGetQuery(db.con, paste("SELECT DISTINCT (", x,") FROM", annoTable(obj)))[,1]
})
names(ret.list) <- use.cols
dbDisconnect(db.con)
return(ret.list)
}
})
setGeneric("dbName", def=function(obj,...) standardGeneric("dbName"))
setMethod("dbName", signature("BuxcoDB"), function(obj)
{
return(obj@db.name)
})
setGeneric("samples", def=function(obj,...) standardGeneric("samples"))
setMethod("samples", signature("BuxcoDB"), function(obj)
{
get.simple.single.col.query(db.name=dbName(obj), var.name="Sample", col.suffix="_Name")
})
setGeneric("variables", def=function(obj,...) standardGeneric("variables"))
setMethod("variables", signature("BuxcoDB"), function(obj)
{
get.simple.single.col.query(db.name=dbName(obj), var.name="Variable", col.suffix="_Name")
})
setGeneric("tables", def=function(obj,...) standardGeneric("tables"))
setMethod("tables", signature("BuxcoDB"), function(obj)
{
get.simple.single.col.query(db.name=dbName(obj), var.name="Bux_table", col.suffix="_Name")
})
setGeneric("retrieveData", def=function(obj,...) standardGeneric("retrieveData"))
setMethod("retrieveData", signature("BuxcoDB"), function(obj, samples=NULL, variables=NULL, tables=NULL,phase=NULL,timepoint=NULL, debug=FALSE, ...)
{
supplied.args <- ls()
db.con <- dbConnect(SQLite(), dbName(obj))
#modified this on 1-22-2013, added column="P_Time" and break=list(table="Chunk_Time", column="Break_number") to make sure these make
#it to the results
table.map <- list(data=list(table="Data", column="Value"),
timepoint=list(table="Timepoint", column="P_Time"),
chunk.time=list(table="Chunk_Time", column="Break_sec_start"),
samples=list(table="Sample", column="Sample_Name"),
variables=list(table="Variable", column="Variable_Name"),
tables=list(table="Bux_table", column="Bux_table_Name"),
phase=list(table="Chunk_Time", column="Rec_Exp_date"),
break.num=list(table="Chunk_Time", column="Break_number"))
#if additional table are present
if (annoTable(obj) %in% dbListTables(db.con))
{
anno.tab.args <- list(...)
if (length(anno.tab.args) > 0 && (is.null(names(anno.tab.args)) == TRUE || all(names(anno.tab.args) %in% annoCols(obj)) == FALSE))
{
stop("ERROR: Need to supply named arguments (arg.name=c(1:10)) corresponding to columns of the annotation table, use annoCols(obj)")
}
for (column in annoCols(obj))
{
table.map[[column]] <- list(table=annoTable(obj), column=column, value=anno.tab.args[[column]])
}
}
supplied.args <- supplied.args[supplied.args %in% c("obj", "debug") == FALSE]
for (i in supplied.args)
{
arg.vals <- get(i)
table.map[[i]]$value <- arg.vals
}
query.res <- execute.query.map(db.con=db.con, query.map=table.map, debug=debug)
dbDisconnect(db.con)
#enforce kind of a rough ordering of the columns mainly for asthetics--sample is first, value is last
if (all(c("Value", "Sample_Name") %in% colnames(query.res)))
{
lo.names <- setdiff(colnames(query.res), c("Value", "Sample_Name"))
new.order <- c("Sample_Name", lo.names, "Value")
query.res <- query.res[,new.order]
}
return(query.res)
})
setGeneric("addAnnotation", def=function(obj,...) standardGeneric("addAnnotation"))
setMethod("addAnnotation", signature("BuxcoDB"), function(obj, query=NULL, index=FALSE, id.col.regex="_ID", debug=FALSE)
{
if (missing(query) || is.null(query) || is.function(query) == FALSE)
{
stop("ERROR: Need to supply a function which takes a BuxcoDB object to the query argument")
}
if (length(index) != 1 || is.logical(index) == FALSE)
{
stop("ERROR: index needs to be a logical value")
}
if (length(id.col.regex) != 1 || is.character(id.col.regex) == FALSE)
{
stop("ERROR: id.col.regex needs to be a character string")
}
if (length(debug) != 1 || is.logical(debug) == FALSE)
{
stop("ERROR: debug needs to be a logical value")
}
db.con <- dbConnect(SQLite(), dbName(obj))
cur.tables <- dbListTables(db.con)
use.query <- query(obj)
if(annoTable(obj) %in% cur.tables)
{
temp.tab.1 <- paste(annoTable(obj), "temp1", sep="_")
temp.tab.2 <- paste(annoTable(obj), "temp2", sep="_")
query.list <- paste("CREATE TEMPORARY TABLE", temp.tab.1,"AS SELECT * FROM", annoTable(obj))
if (length(use.query) > 1)
{
query.list <- c(query.list, use.query[1:(length(use.query)-1)])
}
query.list <- c(query.list, paste("CREATE TEMPORARY TABLE", temp.tab.2, " AS", use.query[length(use.query)]), paste("DROP TABLE", annoTable(obj)),
paste("CREATE TABLE ", annoTable(obj), "AS SELECT * FROM", temp.tab.1, "NATURAL JOIN", temp.tab.2),
paste("DROP TABLE", temp.tab.1), paste("DROP TABLE", temp.tab.2))
}
else
{
#otherwise just create the table directly
if (length(use.query) > 1)
{
query.list <- c(use.query[1:(length(use.query)-1)], paste("CREATE TABLE", annoTable(obj), "AS", use.query[length(use.query)]))
}
else
{
query.list <- paste("CREATE TABLE", annoTable(obj), "AS", use.query[length(use.query)])
}
}
for (i in query.list)
{
if (debug==TRUE)
{
message(i)
}
else
{
.run.update.statement(db.con, i)
}
}
if (index==TRUE)
{
make.annotation.indexes(db.con, annoTable(obj))
}
dbDisconnect(db.con)
})
make.annotation.indexes <- function(db.con, anno.table)
{
test.query <- dbGetQuery(db.con, paste("SELECT * FROM", anno.table, "limit 5"))
id.col <- names(test.query)[grep("_ID", names(test.query))]
if (length(id.col) > 1 && any(grep("_ID", names(test.query)) == 1))
{
id.col <- names(test.query)[1]
}else if (length(id.col) == 0)
{
warning(paste("Warning:", anno.table, "does not appear to have an ID column, skipping indexing..."))
invisible(T)
}else if (length(id.col) > 1) {
warning(paste("Warning: There appears to be multiple _ID columns for table:", anno.table, "skipping indexing..."))
invisible(T)
}
lo.cols <- setdiff(names(test.query), id.col)
index.query <- paste("CREATE INDEX IF NOT EXISTS",paste(anno.table,"_", id.col, "_ind", sep=""),"ON",anno.table,"(",id.col,")")
.run.update.statement(db.con, index.query)
if (length(lo.cols) > 1)
{
perms <- expand.grid(rep(list(lo.cols), length(lo.cols)))
use.perms <- apply(perms, 1, function(x) sum(duplicated(x)) == 0)
perms <- perms[use.perms,]
for (i in 1:nrow(perms))
{
paste.rows <- paste(unlist(perms[i,]), collapse=", ")
var.query <- paste("CREATE INDEX IF NOT EXISTS",paste(anno.table,"_ind_",i,sep=""),"ON",anno.table,"(",paste.rows,")")
.run.update.statement(db.con, var.query)
}
}
}
dbImport <- function(bux.db=NULL, bux.dta, db.name="merge_test_1.db", debug=FALSE)
{
if (missing(bux.db) == FALSE && is.null(bux.db) == FALSE && class(bux.db) != "BuxcoDB")
{
stop("ERROR: bux.db needs to be a BuxcoDB object or not specified at all")
}
else if (missing(bux.db) == FALSE && is.null(bux.db) == FALSE && class(bux.db) == "BuxcoDB")
{
file.copy(from=dbName(bux.db), to=db.name)
}
if (is.data.frame(bux.dta) == FALSE || nrow(bux.dta) < 1)
{
stop("ERROR: bux.dta needs to be a dataframe containing at least one row")
}
else if (validate.dta(bux.db, bux.dta) == FALSE)
{
stop("ERROR: bux.dta needs to have the same columns as bux.db, compare bux.dta with retrieveData(bux.db)")
}
if (is.character(db.name) == FALSE || length(db.name) != 1)
{
stop("ERROR: db.name needs to be a character string of the path to a new database")
}
#first create the simple tables, Sample, Bux_table, Variable, Timepoint
#then do chunk_time followed by additional labels and data at the end
#to do this first define a list containing definitions to create database tables:
schema.list <- list(Sample=list(primary.key="Sample_ID", foreign.keys=NULL, record.vars="Sample_Name"),
Bux_table=list(primary.key="Bux_table_ID", foreign.keys=NULL, record.vars="Bux_table_Name"),
Variable=list(primary.key="Variable_ID", foreign.keys=NULL, record.vars="Variable_Name"),
Timepoint=list(primary.key="Time_ID", foreign.keys=NULL, record.vars="P_Time"),
Chunk_Time=list(primary.key="Break_Chunk_ID", foreign.keys=c("Sample_ID", "Time_ID", "Bux_table_ID", "Variable_ID", "Break_number"),
record.vars=c("Break_sec_start", "Rec_Exp_date")),
Data=list(primary.key="Data_ID", foreign.keys=c("Time_ID", "Variable_ID", "Sample_ID", "Bux_table_ID"), record.vars="Value"))
db.con <- dbConnect(SQLite(), db.name)
db.tables <- dbListTables(db.con)
if ("metadata" %in% db.tables)
{
.run.update.statement(db.con, "DROP TABLE metadata;")
db.tables <- dbListTables(db.con)
}
if (length(setdiff(names(schema.list), db.tables)) == length(schema.list))
{
create.tables(db.con)
db.tables <- dbListTables(db.con)
}
for (i in names(schema.list))
{
if (debug == TRUE) message(paste("Starting table", i))
cur.schema <- schema.list[[i]]
if (i %in% db.tables == FALSE)
{
stop(paste("ERROR: table", i, "not found in database"))
}
if (is.null(cur.schema$foreign.key))
{
rev.query <- db.insert.autoinc(db.con=db.con, table.name=i, col.name=cur.schema$record.vars, values=unique(bux.dta[,cur.schema$record.vars]),
return.query.type="reverse", debug=debug)
bux.dta <- merge(bux.dta, rev.query, all=TRUE, incomparables=NULL, sort=FALSE)
}
else
{
relevant.cols <- c(cur.schema$foreign.keys, cur.schema$record.vars)
temp.dta <- bux.dta[,relevant.cols]
temp.dta <- temp.dta[!duplicated(temp.dta),]
use.sql <- paste("INSERT INTO", i, "(", paste(relevant.cols, collapse=",") ,")","VALUES (", paste(paste("$", relevant.cols, sep=""), collapse=",") ,")")
if (debug==TRUE) message(use.sql)
#find the previous max primary key if applicable
prev.max.primary <- as.numeric(dbGetQuery(db.con, paste("SELECT MAX(",cur.schema$primary.key,") FROM", i))[,1])
if (is.na(prev.max.primary)) prev.max.primary <- 0
.insert.data(db.con, use.sql, temp.dta)
bux.dta <- merge(bux.dta, dbGetQuery(db.con, paste("SELECT * FROM", i, "WHERE", cur.schema$primary.key, ">", prev.max.primary)), all=TRUE, incomparables=NULL, sort=FALSE)
}
}
if (debug==TRUE) message("Starting annotation portion")
#figure out if additional annotation is present
annot.cols <- setdiff(colnames(bux.dta), unique(as.character(unlist(schema.list))))
if (length(annot.cols) > 0)
{
#does the annotation table exist?
annot.tab <- setdiff(db.tables, c(names(schema.list), "sqlite_sequence"))
temp.dta <- bux.dta[,c(schema.list$Chunk_Time$primary.key, annot.cols)]
temp.dta <- temp.dta[!duplicated(temp.dta),]
#if it doesn't exist
if (length(annot.tab) == 0)
{
#just to get the default additional annotation table name specified via the prototype
temp.obj <- new("BuxcoDB")
dbWriteTable(db.con, annoTable(temp.obj), temp.dta, row.names=FALSE)
make.annotation.indexes(db.con, anno.table=annoTable(temp.obj))
cur.annot.table <- annoTable(temp.obj)
}
else
{
cur.annot.tab.cols <- dbListFields(db.con, annot.tab)
if (all(annot.cols %in% cur.annot.tab.cols) == FALSE)
{
stop("ERROR: annotation columns are discordant between bux.dta and bux.db")
}
use.sql <- paste("INSERT INTO", annot.tab, "(", paste(colnames(temp.dta), collapse=",") ,")","VALUES (", paste(paste("$", colnames(temp.dta), sep=""), collapse=",") ,")")
.insert.data(db.con, use.sql, temp.dta)
cur.annot.table <- annot.tab
}
}
else
{
cur.annot.table <- "Additional_labels"
}
#now make a new metadata table
meta.dta <- data.frame(name=c("PARSE_DATE", "DBSCHEMA", "package", "Db type", "DBSCHEMAVERION"),
value=c(as.character(Sys.time()), "Buxco", "plethy", "BuxcoDB", "1.0"), stringsAsFactors=F)
dbWriteTable(db.con, "metadata", meta.dta, row.names=F)
dbDisconnect(db.con)
return(makeBuxcoDB(db.name=db.name, annotation.table=cur.annot.table))
}
#incomplete for now
validate.dta <- function(bux.db, bux.dta)
{
return(TRUE)
}
|
/R/BuxcoDB.R
|
no_license
|
dbottomly/plethy
|
R
| false | false | 26,316 |
r
|
#basic class that points to the database and allows easier manipulations
setClass("BuxcoDB", representation(db.name="character", annotation.table="character"), prototype=prototype(db.name=character(0), annotation.table="Additional_labels"))
.run.update.statement <- function(db.con, query){
state <- dbSendStatement(db.con, query)
dbHasCompleted(state)
dbClearResult(state)
}
.insert.data <- function(db.con, query, data){
#way too slow, stick with depricated version for now
#state <- dbSendQuery(db.con, query)
#dbBind(state, data)
#dbClearResult(state)
dbBegin(db.con)
suppressWarnings(state <- dbSendPreparedQuery(db.con, query, bind.data = data))
dbClearResult(state)
dbCommit(db.con)
}
makeBuxcoDB <- function(db.name=NULL, annotation.table="Additional_labels")
{
if (missing(db.name) || is.null(db.name))
{
stop("ERROR: Need to supply a valid file name for db.name")
}
else if (! file.exists(db.name))
{
stop(paste("ERROR:", db.name, "does not exist"))
}
if (is.character(annotation.table) == FALSE || length(annotation.table) != 1)
{
stop("ERROR annotation.table needs to be a character vector of length 1")
}
return(new("BuxcoDB", db.name=db.name, annotation.table=annotation.table))
}
setGeneric("tsplot", def=function(obj,...) standardGeneric("tsplot"))
setMethod("tsplot", signature("BuxcoDB"), function(obj, ..., exp.factor=NULL, summary.func=function(x) mean(log(x)), legend.name="Factor", xlab="Days", ylab="mean(log(Value))")
{
if (is.function(summary.func) == F)
{
stop("ERROR: summary.func needs to be a function that takes vector and returns a single value")
}
if ((missing(legend.name) || (is.character(legend.name) && length(legend.name) == 1)) == F)
{
stop("ERROR: If legend.name is non-missing, it needs to be a single character value")
}
if ((missing(xlab) || (is.character(xlab) && length(xlab) == 1)) == F)
{
stop("ERROR: If xlab is non-missing, it needs to be a single character value")
}
if ((missing(ylab) || (is.character(ylab) && length(ylab) == 1)) == F)
{
stop("ERROR: If ylab is non-missing, it needs to be a single character value")
}
use.dta <- retrieveData(obj, ...)
if ((missing(exp.factor) || is.null(exp.factor) || (is.character(exp.factor) && length(exp.factor) == 1 && exp.factor %in% names(use.dta))) == F)
{
stop("ERROR: If exp.factor is specified, it needs to correspond to a column from 'retrieveData'")
}
show(qplot(x=Days, y=Value, data=use.dta, group=Sample_Name, stat="summary", fun.y=summary.func, facets=.~Variable_Name, geom="line", xlab=xlab, ylab=ylab) + aes_string(color=exp.factor) + labs(color=legend.name))
})
setGeneric("mvtsplot", def=function(obj,...) standardGeneric("mvtsplot"))
setMethod("mvtsplot", signature("BuxcoDB"), function(obj,..., plot.value="Penh",main=plot.value, summary.func=function(x) data.frame(Value=mean(log(x$Value))), outer.group.name=NULL, inner.group.name=NULL, outer.cols=NULL, colorbrewer.pal="PRGn")
{
if ("Days" %in% annoCols(obj) == F)
{
stop("ERROR: The BuxcoDB object needs to contain a 'Days' column potentially created through the use of 'day.infer.query'")
}
if ((is.character(plot.value) && length(plot.value) == 1 && plot.value %in% variables(obj)) == F)
{
stop("ERROR: plot.value needs to be a single character value corresponding to a variable in 'obj'")
}
bux.dta <- retrieveData(obj, variables=plot.value,...)
mean.dta <- ddply(.data=bux.dta, .variables=c("Days", "Sample_Name", inner.group.name, outer.group.name), .fun=summary.func)
names(mean.dta)[names(mean.dta) == "Value"] <- plot.value
mvtsplot.data.frame(use.dta=mean.dta, plot.value=plot.value, main=main, outer.group.name=outer.group.name, inner.group.name=inner.group.name, outer.cols=outer.cols,colorbrewer.pal=colorbrewer.pal)
})
setGeneric("makeIndexes", def=function(obj,...) standardGeneric("makeIndexes"))
setMethod("makeIndexes", signature("BuxcoDB"), function(obj, annotation.table=annoTable(obj))
{
db.con <- dbConnect(SQLite(), dbName(obj))
make.annotation.indexes(db.con, annotation.table)
invisible(dbDisconnect(db.con))
})
setMethod("show", signature("BuxcoDB"), function(object)
{
db.con <- dbConnect(SQLite(), dbName(object))
#Adapted from AnnotationDbi::show
if ("metadata" %in% dbListTables(db.con))
{
metadata <- dbGetQuery(db.con, "SELECT * FROM metadata")
cat(class(object), "object:\n")
cat(paste("Database:", object@db.name, "\n"))
cat(paste("Annotation Table:", object@annotation.table, "\n"))
for (i in seq_len(nrow(metadata))) {
cat("| ", metadata[i, "name"], ": ", metadata[i, "value"],
"\n", sep = "")
}
}else{
cat("BuxcoDB object\n")
cat(paste("Database:", object@db.name, "\n"))
cat(paste("Annotation Table:", object@annotation.table, "\n"))
cat("No metadata is available\n")
}
})
setGeneric("summaryMeasures", def=function(obj,...) standardGeneric("summaryMeasures"))
setMethod("summaryMeasures", signature("BuxcoDB"), function(obj, summary.type=c("time.to.max.response", "max.response", "auc.response", "mean.response"), sample.summary.func=function(x) data.frame(Value=mean(x$Value)), samples=NULL, variables=NULL, tables=NULL, Break_type_label="EXP", day.summary.column="Days")
{
summaries <- match.arg(summary.type, several.ok=TRUE)
if (is.function(sample.summary.func) == FALSE)
{
stop("ERROR: sample.summary.func needs to be a valid function")
}
ret.dta <- retrieveData(obj, samples=samples, variables=variables, tables=tables, Break_type_label=Break_type_label)
if (day.summary.column %in% names(ret.dta) == FALSE || any(is.na(as.numeric(ret.dta[,day.summary.column]))))
{
stop("ERROR: day.summary.column needs to be a valid name in the database and be coercible to numeric values")
}
if ("Break_type_label" %in% names(ret.dta) == FALSE)
{
stop("ERROR: Break_type_label needs to be part of the returned values for ret.dta")
}
if (any(Break_type_label %in% ret.dta$Break_type_label) == FALSE)
{
stop("ERROR: At least one type element of Break_type_label needs to exist in the current output")
}
sum.days <- ddply(ret.dta, c("Variable_Name", "Sample_Name", day.summary.column), .fun=sample.summary.func)
#a hack because ddply can't find the functions if they are supplied as characters...
ret.dta <- data.frame(Variable_Name=character(0), Sample_Name=character(), stringsAsFactors=FALSE)
for (i in summaries)
{
summary.func <- get(i)
temp.dta <- ddply(sum.days, c("Variable_Name", "Sample_Name"), .fun=summary.func, day.name=day.summary.column)
temp.dta$Variable_Name <- as.character(temp.dta$Variable_Name)
temp.dta$Sample_Name <- as.character(temp.dta$Sample_Name)
ret.dta <- merge(ret.dta, temp.dta, by=c("Variable_Name", "Sample_Name"), all=TRUE, incomparables=NULL, sort=FALSE)
}
return(ret.dta)
})
setGeneric("retrieveMatrix", def=function(obj,...) standardGeneric("retrieveMatrix"))
setMethod("retrieveMatrix", signature("BuxcoDB"), function(obj,...,formula=Sample_Name~Days~Variable_Name, summary.func=function(x) mean(log(x)))
{
if (is.function(summary.func)==F)
{
stop("summary.func needs to be a function taking a vector as an argument and returning a single value")
}
ret.dta <- retrieveData(obj,...)
form.terms <- all.vars(attr(terms(formula), "variables"))
if (class(formula) != "formula" || all(form.terms %in% names(ret.dta))==F)
{
stop("formula needs to refer to a valid formula involving columns as found using 'retrieveData'")
}
temp.mat <- acast(data=ret.dta, formula=formula, fun.aggregate=summary.func, value.var="Value")
temp.mat[is.nan(temp.mat)] <- NA
return(temp.mat)
})
setGeneric("annoTable", def=function(obj,...) standardGeneric("annoTable"))
setMethod("annoTable", signature("BuxcoDB"), function(obj)
{
return(obj@annotation.table)
})
setGeneric("annoCols", def=function(obj,...) standardGeneric("annoCols"))
setMethod("annoCols", signature("BuxcoDB"), function(obj)
{
db.con <- dbConnect(SQLite(), dbName(obj))
if (annoTable(obj) %in% dbListTables(db.con) == FALSE)
{
return(character(0))
}
else
{
#modified this 9-03-2013 to deal with the case of columns added by user that had _ID, really only deal with the case of Break_Chunk_ID as the ID col...
test.query <- dbListFields(db.con, annoTable(obj))
dbDisconnect(db.con)
id.col <- test.query[test.query == "Break_Chunk_ID"]
stopifnot(length(id.col) == 1)
lo.cols <- setdiff(test.query, id.col)
return(lo.cols)
}
})
setGeneric("annoLevels", def=function(obj,...) standardGeneric("annoLevels"))
setMethod("annoLevels", signature("BuxcoDB"), function(obj)
{
db.con <- dbConnect(SQLite(), dbName(obj))
use.cols <- annoCols(obj)
if (length(use.cols) == 0)
{
return(character(0))
}
else
{
ret.list <- lapply(use.cols, function(x)
{
dbGetQuery(db.con, paste("SELECT DISTINCT (", x,") FROM", annoTable(obj)))[,1]
})
names(ret.list) <- use.cols
dbDisconnect(db.con)
return(ret.list)
}
})
setGeneric("dbName", def=function(obj,...) standardGeneric("dbName"))
setMethod("dbName", signature("BuxcoDB"), function(obj)
{
return(obj@db.name)
})
setGeneric("samples", def=function(obj,...) standardGeneric("samples"))
setMethod("samples", signature("BuxcoDB"), function(obj)
{
get.simple.single.col.query(db.name=dbName(obj), var.name="Sample", col.suffix="_Name")
})
setGeneric("variables", def=function(obj,...) standardGeneric("variables"))
setMethod("variables", signature("BuxcoDB"), function(obj)
{
get.simple.single.col.query(db.name=dbName(obj), var.name="Variable", col.suffix="_Name")
})
setGeneric("tables", def=function(obj,...) standardGeneric("tables"))
setMethod("tables", signature("BuxcoDB"), function(obj)
{
get.simple.single.col.query(db.name=dbName(obj), var.name="Bux_table", col.suffix="_Name")
})
setGeneric("retrieveData", def=function(obj,...) standardGeneric("retrieveData"))
setMethod("retrieveData", signature("BuxcoDB"), function(obj, samples=NULL, variables=NULL, tables=NULL,phase=NULL,timepoint=NULL, debug=FALSE, ...)
{
supplied.args <- ls()
db.con <- dbConnect(SQLite(), dbName(obj))
#modified this on 1-22-2013, added column="P_Time" and break=list(table="Chunk_Time", column="Break_number") to make sure these make
#it to the results
table.map <- list(data=list(table="Data", column="Value"),
timepoint=list(table="Timepoint", column="P_Time"),
chunk.time=list(table="Chunk_Time", column="Break_sec_start"),
samples=list(table="Sample", column="Sample_Name"),
variables=list(table="Variable", column="Variable_Name"),
tables=list(table="Bux_table", column="Bux_table_Name"),
phase=list(table="Chunk_Time", column="Rec_Exp_date"),
break.num=list(table="Chunk_Time", column="Break_number"))
#if additional table are present
if (annoTable(obj) %in% dbListTables(db.con))
{
anno.tab.args <- list(...)
if (length(anno.tab.args) > 0 && (is.null(names(anno.tab.args)) == TRUE || all(names(anno.tab.args) %in% annoCols(obj)) == FALSE))
{
stop("ERROR: Need to supply named arguments (arg.name=c(1:10)) corresponding to columns of the annotation table, use annoCols(obj)")
}
for (column in annoCols(obj))
{
table.map[[column]] <- list(table=annoTable(obj), column=column, value=anno.tab.args[[column]])
}
}
supplied.args <- supplied.args[supplied.args %in% c("obj", "debug") == FALSE]
for (i in supplied.args)
{
arg.vals <- get(i)
table.map[[i]]$value <- arg.vals
}
query.res <- execute.query.map(db.con=db.con, query.map=table.map, debug=debug)
dbDisconnect(db.con)
#enforce kind of a rough ordering of the columns mainly for asthetics--sample is first, value is last
if (all(c("Value", "Sample_Name") %in% colnames(query.res)))
{
lo.names <- setdiff(colnames(query.res), c("Value", "Sample_Name"))
new.order <- c("Sample_Name", lo.names, "Value")
query.res <- query.res[,new.order]
}
return(query.res)
})
setGeneric("addAnnotation", def=function(obj,...) standardGeneric("addAnnotation"))
setMethod("addAnnotation", signature("BuxcoDB"), function(obj, query=NULL, index=FALSE, id.col.regex="_ID", debug=FALSE)
{
if (missing(query) || is.null(query) || is.function(query) == FALSE)
{
stop("ERROR: Need to supply a function which takes a BuxcoDB object to the query argument")
}
if (length(index) != 1 || is.logical(index) == FALSE)
{
stop("ERROR: index needs to be a logical value")
}
if (length(id.col.regex) != 1 || is.character(id.col.regex) == FALSE)
{
stop("ERROR: id.col.regex needs to be a character string")
}
if (length(debug) != 1 || is.logical(debug) == FALSE)
{
stop("ERROR: debug needs to be a logical value")
}
db.con <- dbConnect(SQLite(), dbName(obj))
cur.tables <- dbListTables(db.con)
use.query <- query(obj)
if(annoTable(obj) %in% cur.tables)
{
temp.tab.1 <- paste(annoTable(obj), "temp1", sep="_")
temp.tab.2 <- paste(annoTable(obj), "temp2", sep="_")
query.list <- paste("CREATE TEMPORARY TABLE", temp.tab.1,"AS SELECT * FROM", annoTable(obj))
if (length(use.query) > 1)
{
query.list <- c(query.list, use.query[1:(length(use.query)-1)])
}
query.list <- c(query.list, paste("CREATE TEMPORARY TABLE", temp.tab.2, " AS", use.query[length(use.query)]), paste("DROP TABLE", annoTable(obj)),
paste("CREATE TABLE ", annoTable(obj), "AS SELECT * FROM", temp.tab.1, "NATURAL JOIN", temp.tab.2),
paste("DROP TABLE", temp.tab.1), paste("DROP TABLE", temp.tab.2))
}
else
{
#otherwise just create the table directly
if (length(use.query) > 1)
{
query.list <- c(use.query[1:(length(use.query)-1)], paste("CREATE TABLE", annoTable(obj), "AS", use.query[length(use.query)]))
}
else
{
query.list <- paste("CREATE TABLE", annoTable(obj), "AS", use.query[length(use.query)])
}
}
for (i in query.list)
{
if (debug==TRUE)
{
message(i)
}
else
{
.run.update.statement(db.con, i)
}
}
if (index==TRUE)
{
make.annotation.indexes(db.con, annoTable(obj))
}
dbDisconnect(db.con)
})
make.annotation.indexes <- function(db.con, anno.table)
{
test.query <- dbGetQuery(db.con, paste("SELECT * FROM", anno.table, "limit 5"))
id.col <- names(test.query)[grep("_ID", names(test.query))]
if (length(id.col) > 1 && any(grep("_ID", names(test.query)) == 1))
{
id.col <- names(test.query)[1]
}else if (length(id.col) == 0)
{
warning(paste("Warning:", anno.table, "does not appear to have an ID column, skipping indexing..."))
invisible(T)
}else if (length(id.col) > 1) {
warning(paste("Warning: There appears to be multiple _ID columns for table:", anno.table, "skipping indexing..."))
invisible(T)
}
lo.cols <- setdiff(names(test.query), id.col)
index.query <- paste("CREATE INDEX IF NOT EXISTS",paste(anno.table,"_", id.col, "_ind", sep=""),"ON",anno.table,"(",id.col,")")
.run.update.statement(db.con, index.query)
if (length(lo.cols) > 1)
{
perms <- expand.grid(rep(list(lo.cols), length(lo.cols)))
use.perms <- apply(perms, 1, function(x) sum(duplicated(x)) == 0)
perms <- perms[use.perms,]
for (i in 1:nrow(perms))
{
paste.rows <- paste(unlist(perms[i,]), collapse=", ")
var.query <- paste("CREATE INDEX IF NOT EXISTS",paste(anno.table,"_ind_",i,sep=""),"ON",anno.table,"(",paste.rows,")")
.run.update.statement(db.con, var.query)
}
}
}
dbImport <- function(bux.db=NULL, bux.dta, db.name="merge_test_1.db", debug=FALSE)
{
if (missing(bux.db) == FALSE && is.null(bux.db) == FALSE && class(bux.db) != "BuxcoDB")
{
stop("ERROR: bux.db needs to be a BuxcoDB object or not specified at all")
}
else if (missing(bux.db) == FALSE && is.null(bux.db) == FALSE && class(bux.db) == "BuxcoDB")
{
file.copy(from=dbName(bux.db), to=db.name)
}
if (is.data.frame(bux.dta) == FALSE || nrow(bux.dta) < 1)
{
stop("ERROR: bux.dta needs to be a dataframe containing at least one row")
}
else if (validate.dta(bux.db, bux.dta) == FALSE)
{
stop("ERROR: bux.dta needs to have the same columns as bux.db, compare bux.dta with retrieveData(bux.db)")
}
if (is.character(db.name) == FALSE || length(db.name) != 1)
{
stop("ERROR: db.name needs to be a character string of the path to a new database")
}
#first create the simple tables, Sample, Bux_table, Variable, Timepoint
#then do chunk_time followed by additional labels and data at the end
#to do this first define a list containing definitions to create database tables:
schema.list <- list(Sample=list(primary.key="Sample_ID", foreign.keys=NULL, record.vars="Sample_Name"),
Bux_table=list(primary.key="Bux_table_ID", foreign.keys=NULL, record.vars="Bux_table_Name"),
Variable=list(primary.key="Variable_ID", foreign.keys=NULL, record.vars="Variable_Name"),
Timepoint=list(primary.key="Time_ID", foreign.keys=NULL, record.vars="P_Time"),
Chunk_Time=list(primary.key="Break_Chunk_ID", foreign.keys=c("Sample_ID", "Time_ID", "Bux_table_ID", "Variable_ID", "Break_number"),
record.vars=c("Break_sec_start", "Rec_Exp_date")),
Data=list(primary.key="Data_ID", foreign.keys=c("Time_ID", "Variable_ID", "Sample_ID", "Bux_table_ID"), record.vars="Value"))
db.con <- dbConnect(SQLite(), db.name)
db.tables <- dbListTables(db.con)
if ("metadata" %in% db.tables)
{
.run.update.statement(db.con, "DROP TABLE metadata;")
db.tables <- dbListTables(db.con)
}
if (length(setdiff(names(schema.list), db.tables)) == length(schema.list))
{
create.tables(db.con)
db.tables <- dbListTables(db.con)
}
for (i in names(schema.list))
{
if (debug == TRUE) message(paste("Starting table", i))
cur.schema <- schema.list[[i]]
if (i %in% db.tables == FALSE)
{
stop(paste("ERROR: table", i, "not found in database"))
}
if (is.null(cur.schema$foreign.key))
{
rev.query <- db.insert.autoinc(db.con=db.con, table.name=i, col.name=cur.schema$record.vars, values=unique(bux.dta[,cur.schema$record.vars]),
return.query.type="reverse", debug=debug)
bux.dta <- merge(bux.dta, rev.query, all=TRUE, incomparables=NULL, sort=FALSE)
}
else
{
relevant.cols <- c(cur.schema$foreign.keys, cur.schema$record.vars)
temp.dta <- bux.dta[,relevant.cols]
temp.dta <- temp.dta[!duplicated(temp.dta),]
use.sql <- paste("INSERT INTO", i, "(", paste(relevant.cols, collapse=",") ,")","VALUES (", paste(paste("$", relevant.cols, sep=""), collapse=",") ,")")
if (debug==TRUE) message(use.sql)
#find the previous max primary key if applicable
prev.max.primary <- as.numeric(dbGetQuery(db.con, paste("SELECT MAX(",cur.schema$primary.key,") FROM", i))[,1])
if (is.na(prev.max.primary)) prev.max.primary <- 0
.insert.data(db.con, use.sql, temp.dta)
bux.dta <- merge(bux.dta, dbGetQuery(db.con, paste("SELECT * FROM", i, "WHERE", cur.schema$primary.key, ">", prev.max.primary)), all=TRUE, incomparables=NULL, sort=FALSE)
}
}
if (debug==TRUE) message("Starting annotation portion")
#figure out if additional annotation is present
annot.cols <- setdiff(colnames(bux.dta), unique(as.character(unlist(schema.list))))
if (length(annot.cols) > 0)
{
#does the annotation table exist?
annot.tab <- setdiff(db.tables, c(names(schema.list), "sqlite_sequence"))
temp.dta <- bux.dta[,c(schema.list$Chunk_Time$primary.key, annot.cols)]
temp.dta <- temp.dta[!duplicated(temp.dta),]
#if it doesn't exist
if (length(annot.tab) == 0)
{
#just to get the default additional annotation table name specified via the prototype
temp.obj <- new("BuxcoDB")
dbWriteTable(db.con, annoTable(temp.obj), temp.dta, row.names=FALSE)
make.annotation.indexes(db.con, anno.table=annoTable(temp.obj))
cur.annot.table <- annoTable(temp.obj)
}
else
{
cur.annot.tab.cols <- dbListFields(db.con, annot.tab)
if (all(annot.cols %in% cur.annot.tab.cols) == FALSE)
{
stop("ERROR: annotation columns are discordant between bux.dta and bux.db")
}
use.sql <- paste("INSERT INTO", annot.tab, "(", paste(colnames(temp.dta), collapse=",") ,")","VALUES (", paste(paste("$", colnames(temp.dta), sep=""), collapse=",") ,")")
.insert.data(db.con, use.sql, temp.dta)
cur.annot.table <- annot.tab
}
}
else
{
cur.annot.table <- "Additional_labels"
}
#now make a new metadata table
meta.dta <- data.frame(name=c("PARSE_DATE", "DBSCHEMA", "package", "Db type", "DBSCHEMAVERION"),
value=c(as.character(Sys.time()), "Buxco", "plethy", "BuxcoDB", "1.0"), stringsAsFactors=F)
dbWriteTable(db.con, "metadata", meta.dta, row.names=F)
dbDisconnect(db.con)
return(makeBuxcoDB(db.name=db.name, annotation.table=cur.annot.table))
}
#incomplete for now
validate.dta <- function(bux.db, bux.dta)
{
return(TRUE)
}
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/dse22e.R
\docType{data}
\name{dse22e}
\alias{dse22e}
\title{Dataset for Exercise E, Chapter 22}
\format{A \code{data.frame} with 9 rows and 7 variables:
\describe{
\item{y}{}
\item{x1}{}
\item{x2}{}
\item{x3}{}
\item{x4}{}
\item{x5}{}
\item{x6}{}
}}
\source{
Draper, N.R., Smith, H., (1998) Applied Regression Analyis, 3rd ed., New York: Wiley
}
\usage{
dse22e
}
\description{
Dataset for Exercise E, Chapter 22
}
\examples{
dse22e
}
\keyword{datasets}
|
/man/dse22e.Rd
|
no_license
|
danielgil1/aprean3
|
R
| false | false | 540 |
rd
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/dse22e.R
\docType{data}
\name{dse22e}
\alias{dse22e}
\title{Dataset for Exercise E, Chapter 22}
\format{A \code{data.frame} with 9 rows and 7 variables:
\describe{
\item{y}{}
\item{x1}{}
\item{x2}{}
\item{x3}{}
\item{x4}{}
\item{x5}{}
\item{x6}{}
}}
\source{
Draper, N.R., Smith, H., (1998) Applied Regression Analyis, 3rd ed., New York: Wiley
}
\usage{
dse22e
}
\description{
Dataset for Exercise E, Chapter 22
}
\examples{
dse22e
}
\keyword{datasets}
|
#' FindBestModel
#'
#' @description Find the best model for a given forecast time
#'
#' @param dfPerf The performance dataframe
#' @param forecast The forecast time
#' @param vecOutcome The vector of outcome considered
#'
#' @return The label of the best model
#' @export
FindBestModel <- function(dfPerf, forecast = 7, vecOutcome = c("Prevalent variation", "HOSP_DERIV", "Prevalent")){
res <- dfPerf %>%
filter(FORECAST == forecast, OUTCOME %in% vecOutcome) %>%
slice_min(MRE, n = 1) %>%
mutate(Label = paste0(OUTCOME, "_", SELECTION, "_span", SPAN, "_", Model)) %>%
pull(Label)
return(res)
}
|
/R/FindBestModel.R
|
no_license
|
thomasferte/PredictCovidOpen
|
R
| false | false | 615 |
r
|
#' FindBestModel
#'
#' @description Find the best model for a given forecast time
#'
#' @param dfPerf The performance dataframe
#' @param forecast The forecast time
#' @param vecOutcome The vector of outcome considered
#'
#' @return The label of the best model
#' @export
FindBestModel <- function(dfPerf, forecast = 7, vecOutcome = c("Prevalent variation", "HOSP_DERIV", "Prevalent")){
res <- dfPerf %>%
filter(FORECAST == forecast, OUTCOME %in% vecOutcome) %>%
slice_min(MRE, n = 1) %>%
mutate(Label = paste0(OUTCOME, "_", SELECTION, "_span", SPAN, "_", Model)) %>%
pull(Label)
return(res)
}
|
#This is an applicaiton that predicts the next word based on ngram user input
library(shiny)
# Define UI for the application
shinyUI(fluidPage(
titlePanel("Next Word Prediction"),
sidebarLayout(
sidebarPanel(
# Create the text input box
textInput("inpNgram", label = h5("Please provide the text to be completed"),
value = ""),
hr(),
actionButton("loadNgram", "Load Vocabulary"),
helpText("Please load the vocabulary only once then type sentences")
),
mainPanel(
h3("Best Predicted Next Words - left to right"),
#hr(),
strong(verbatimTextOutput('predWord'))
)
)
)
)
|
/ui.R
|
no_license
|
cmba50/Final
|
R
| false | false | 691 |
r
|
#This is an applicaiton that predicts the next word based on ngram user input
library(shiny)
# Define UI for the application
shinyUI(fluidPage(
titlePanel("Next Word Prediction"),
sidebarLayout(
sidebarPanel(
# Create the text input box
textInput("inpNgram", label = h5("Please provide the text to be completed"),
value = ""),
hr(),
actionButton("loadNgram", "Load Vocabulary"),
helpText("Please load the vocabulary only once then type sentences")
),
mainPanel(
h3("Best Predicted Next Words - left to right"),
#hr(),
strong(verbatimTextOutput('predWord'))
)
)
)
)
|
# These functions are depricated
# Only relsurv uses them, and I'm working on that
ratetable <- function(...) {
datecheck <- function(x)
inherits(x, c("Date", "POSIXt", "date", "chron"))
args <- list(...)
nargs <- length(args)
ll <- sapply(args, length)
n <- max(ll) # We assume this is the dimension of the user's data frame
levlist <- vector("list", nargs)
x <- matrix(0,n,nargs)
dimnames(x) <- list(1:n, names(args))
isDate <- sapply(args, datecheck)
for (i in 1:nargs) {
if (ll[i] ==1) args[[i]] <- rep(args[[i]], n)
else if (ll[i] != n)
stop(gettextf("Aguments do not all have the same length (arg %d)", i))
# In Splus cut and tcut produce class 'category'
if (inherits(args[[i]], 'cateogory') || is.character(args[[i]]))
args[[i]] <- as.factor(args[[i]])
if (is.factor(args[[i]])) {
levlist[[i]] <- levels(args[[i]])
x[,i] <- as.numeric(args[[i]]) # the vector of levels
}
else x[,i] <- ratetableDate(args[[i]])
}
attr(x, "isDate") <- isDate
attr(x, "levlist") <- levlist
class(x) <- 'ratetable2'
x
}
# The two functions below should only be called internally, when missing
# values cause model.frame to drop some rows
is.na.ratetable2 <- function(x) {
attributes(x) <- list(dim=dim(x))
as.vector((1 * is.na(x)) %*% rep(1, ncol(x)) >0)
}
"[.ratetable2" <- function(x, rows, cols, drop=FALSE) {
if (!missing(cols)) {
stop("This should never be called!")
}
aa <- attributes(x)
attributes(x) <- aa[c("dim", "dimnames")]
y <- x[rows,,drop=FALSE]
attr(y,'isDate') <- aa$isDate
attr(y,'levlist') <- aa$levlist
class(y) <- 'ratetable2'
y
}
|
/Recommended/survival/R/ratetableold.R
|
no_license
|
lukaszdaniel/ivory
|
R
| false | false | 1,791 |
r
|
# These functions are depricated
# Only relsurv uses them, and I'm working on that
ratetable <- function(...) {
datecheck <- function(x)
inherits(x, c("Date", "POSIXt", "date", "chron"))
args <- list(...)
nargs <- length(args)
ll <- sapply(args, length)
n <- max(ll) # We assume this is the dimension of the user's data frame
levlist <- vector("list", nargs)
x <- matrix(0,n,nargs)
dimnames(x) <- list(1:n, names(args))
isDate <- sapply(args, datecheck)
for (i in 1:nargs) {
if (ll[i] ==1) args[[i]] <- rep(args[[i]], n)
else if (ll[i] != n)
stop(gettextf("Aguments do not all have the same length (arg %d)", i))
# In Splus cut and tcut produce class 'category'
if (inherits(args[[i]], 'cateogory') || is.character(args[[i]]))
args[[i]] <- as.factor(args[[i]])
if (is.factor(args[[i]])) {
levlist[[i]] <- levels(args[[i]])
x[,i] <- as.numeric(args[[i]]) # the vector of levels
}
else x[,i] <- ratetableDate(args[[i]])
}
attr(x, "isDate") <- isDate
attr(x, "levlist") <- levlist
class(x) <- 'ratetable2'
x
}
# The two functions below should only be called internally, when missing
# values cause model.frame to drop some rows
is.na.ratetable2 <- function(x) {
attributes(x) <- list(dim=dim(x))
as.vector((1 * is.na(x)) %*% rep(1, ncol(x)) >0)
}
"[.ratetable2" <- function(x, rows, cols, drop=FALSE) {
if (!missing(cols)) {
stop("This should never be called!")
}
aa <- attributes(x)
attributes(x) <- aa[c("dim", "dimnames")]
y <- x[rows,,drop=FALSE]
attr(y,'isDate') <- aa$isDate
attr(y,'levlist') <- aa$levlist
class(y) <- 'ratetable2'
y
}
|
\name{toBiblatex}
\alias{toBiblatex}
\alias{toBibtex}
\alias{toBibtex.BibEntry}
\title{Convert BibEntry objects to BibTeX or BibLaTeX}
\usage{
toBiblatex(object, ...)
\method{toBibtex}{BibEntry}(object, note.replace.field = c("urldate",
"pubsate", "addendum"), extra.fields = NULL, ...)
}
\arguments{
\item{object}{an object of class BibEntry to be
converted}
\item{note.replace.field}{a character vector of BibLaTeX
fields. When converting an entry to BibTeX, the first
field in the entry that matches one specified in this
vector will be added to the note field, \emph{if} the
note field is not already present}
\item{extra.fields}{character vector; fields that are not
supported in standard BibTeX styles are by default
dropped in the result return by the toBibtex function.
Any fields specified in extra.fields will \emph{not} be
dropped if present in an entry.}
\item{...}{ignored}
}
\value{
an object of class \dQuote{Bibtex} - character vectors
where each element holds one line of a BibTeX or BibLaTeX
file
}
\description{
toBiblatex converts a BibEntry object to character vectors
with BibLaTeX markup. toBibtex will convert a BibEntry
object to character vectors with BibTeX markup, converting
some BibLaTeX fields and all entry types that are not
supported by BibTeX to ones that are supported.
}
\details{
toBiblatex converts the BibEntry object to a vector
containing the corresponding BibLaTeX file, it ensures the
name list fields (e.g. author and editor) are formatted
properly to be read by bibtex and biber and otherwise
prints all fields as is, thus it is similar to
\code{\link{toBibtex}}.
toBibtex will attempt to convert BibLaTeX entries to a
format that can be read by bibtex. Any fields not
supported by bibtex are dropped unless they are specified
in \code{extra.fields}. The fields below, if they are
present, are converted as described and added to a bibtex
supported field, unless that field is already present.
\itemize{ \item date - The \code{date} field, if present
will be truncated to a year and added to the \code{year}
field, if it is not already present. If a month is
specified with the date, it will be added to the
\code{month} field. \item journaltitle - Will be changed to
journal, if it is not already present \item location - Will
be changed to address \item institution - Converted to
\code{school} for thesis entries \item sortkey - Converted
to \code{key} \item maintitle - Converted to \code{series}
\item issuetitle - Converted to \code{booktitle} \item
eventtitle - Converted to \code{booktitle} \item eprinttype
- Converted to \code{archiveprefix} (for arXiv references)
\item eprintclass - Converted to \code{primaryclass} (for
arXiv references) }
If no \code{note} field is present, the note.replace.field
can be used to specified BibLaTeX fields that can be looked
for and added to the note field if they are present.
BibLaTeX entry types that are not supported by bibtex are
converted by toBibtex as follows "mvbook" = "Book",
"bookinbook" = "InBook", "suppbook" = "InBook", \itemize{
\item
MvBook,Collection,MvCollection,Reference,MvReference,Proceedings,MvProceedings,Periodical
- to Book \item
BookInBook,SuppBook,InReference,SuppPeriodical - to InBook
\item report,patent - to TechReport \item SuppCollection -
to InCollection \item thesis - to MastersThesis if
\code{type = mathesis}, else to PhdThesis \item \emph{rest}
- to Misc }
}
\examples{
file.name <- system.file("Bib", "biblatexExamples.bib", package="RefManageR")
bib <- suppressMessages(ReadBib(file.name))
toBiblatex(bib[70:72])
toBiblatex(bib[70:72])
}
\author{
McLean, M. W. \email{mathew.w.mclean@gmail.com}
}
\seealso{
\code{\link{toBibtex}}, \code{\link{BibEntry}},
\code{\link{print.BibEntry}}
}
\keyword{IO}
\keyword{database}
\keyword{utilities}
|
/man/toBiblatex.Rd
|
no_license
|
aurora-mareviv/RefManageR
|
R
| false | false | 3,918 |
rd
|
\name{toBiblatex}
\alias{toBiblatex}
\alias{toBibtex}
\alias{toBibtex.BibEntry}
\title{Convert BibEntry objects to BibTeX or BibLaTeX}
\usage{
toBiblatex(object, ...)
\method{toBibtex}{BibEntry}(object, note.replace.field = c("urldate",
"pubsate", "addendum"), extra.fields = NULL, ...)
}
\arguments{
\item{object}{an object of class BibEntry to be
converted}
\item{note.replace.field}{a character vector of BibLaTeX
fields. When converting an entry to BibTeX, the first
field in the entry that matches one specified in this
vector will be added to the note field, \emph{if} the
note field is not already present}
\item{extra.fields}{character vector; fields that are not
supported in standard BibTeX styles are by default
dropped in the result return by the toBibtex function.
Any fields specified in extra.fields will \emph{not} be
dropped if present in an entry.}
\item{...}{ignored}
}
\value{
an object of class \dQuote{Bibtex} - character vectors
where each element holds one line of a BibTeX or BibLaTeX
file
}
\description{
toBiblatex converts a BibEntry object to character vectors
with BibLaTeX markup. toBibtex will convert a BibEntry
object to character vectors with BibTeX markup, converting
some BibLaTeX fields and all entry types that are not
supported by BibTeX to ones that are supported.
}
\details{
toBiblatex converts the BibEntry object to a vector
containing the corresponding BibLaTeX file, it ensures the
name list fields (e.g. author and editor) are formatted
properly to be read by bibtex and biber and otherwise
prints all fields as is, thus it is similar to
\code{\link{toBibtex}}.
toBibtex will attempt to convert BibLaTeX entries to a
format that can be read by bibtex. Any fields not
supported by bibtex are dropped unless they are specified
in \code{extra.fields}. The fields below, if they are
present, are converted as described and added to a bibtex
supported field, unless that field is already present.
\itemize{ \item date - The \code{date} field, if present
will be truncated to a year and added to the \code{year}
field, if it is not already present. If a month is
specified with the date, it will be added to the
\code{month} field. \item journaltitle - Will be changed to
journal, if it is not already present \item location - Will
be changed to address \item institution - Converted to
\code{school} for thesis entries \item sortkey - Converted
to \code{key} \item maintitle - Converted to \code{series}
\item issuetitle - Converted to \code{booktitle} \item
eventtitle - Converted to \code{booktitle} \item eprinttype
- Converted to \code{archiveprefix} (for arXiv references)
\item eprintclass - Converted to \code{primaryclass} (for
arXiv references) }
If no \code{note} field is present, the note.replace.field
can be used to specified BibLaTeX fields that can be looked
for and added to the note field if they are present.
BibLaTeX entry types that are not supported by bibtex are
converted by toBibtex as follows "mvbook" = "Book",
"bookinbook" = "InBook", "suppbook" = "InBook", \itemize{
\item
MvBook,Collection,MvCollection,Reference,MvReference,Proceedings,MvProceedings,Periodical
- to Book \item
BookInBook,SuppBook,InReference,SuppPeriodical - to InBook
\item report,patent - to TechReport \item SuppCollection -
to InCollection \item thesis - to MastersThesis if
\code{type = mathesis}, else to PhdThesis \item \emph{rest}
- to Misc }
}
\examples{
file.name <- system.file("Bib", "biblatexExamples.bib", package="RefManageR")
bib <- suppressMessages(ReadBib(file.name))
toBiblatex(bib[70:72])
toBiblatex(bib[70:72])
}
\author{
McLean, M. W. \email{mathew.w.mclean@gmail.com}
}
\seealso{
\code{\link{toBibtex}}, \code{\link{BibEntry}},
\code{\link{print.BibEntry}}
}
\keyword{IO}
\keyword{database}
\keyword{utilities}
|
testlist <- list(m = NULL, repetitions = 0L, in_m = structure(c(2.31584307429747e+77, 9.53818252170339e+295, 1.22810536108214e+146, 4.12396251261199e-221, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(5L, 7L)))
result <- do.call(CNull:::communities_individual_based_sampling_alpha,testlist)
str(result)
|
/CNull/inst/testfiles/communities_individual_based_sampling_alpha/AFL_communities_individual_based_sampling_alpha/communities_individual_based_sampling_alpha_valgrind_files/1615769617-test.R
|
no_license
|
akhikolla/updatedatatype-list2
|
R
| false | false | 362 |
r
|
testlist <- list(m = NULL, repetitions = 0L, in_m = structure(c(2.31584307429747e+77, 9.53818252170339e+295, 1.22810536108214e+146, 4.12396251261199e-221, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(5L, 7L)))
result <- do.call(CNull:::communities_individual_based_sampling_alpha,testlist)
str(result)
|
library(testthat)
library(cprint)
test_check("cprint")
|
/tests/testthat.R
|
no_license
|
bgreenwell/cprint
|
R
| false | false | 56 |
r
|
library(testthat)
library(cprint)
test_check("cprint")
|
setwd("~/Documents/PushMe/Data")
require(ggplot2)
require(reshape2)
require(splines)
require(signal)
require(entropy)
require(RSEIS)
rm(results)
results = data.frame("time"=0, "meanX"=0, "meanY"=0, "meanZ"=0, "sdX"=0, "sdY"=0, "sdZ"=0, "sdT"=0, "energyX"=0, "energyY"=0, "energyZ"=0, "energyT"=0, "lat"=mean(sample$Latitude[1:140]), "lon"=mean(sample$Longitude[1:140]), "entropyX"=0, "entropyY"=0, "entropyZ"=0, "entropyT"=0)
# Read in .csv file.
sample=read.csv('rDrive1.csv')
# State sample-specific variables
time = sample$Elapsed.Time
# Split sensor output into 3 dataframes
dX=data.frame(x=time, y=sample$X)
dY=data.frame(x=time, y=sample$Y)
dZ=data.frame(x=time, y=sample$Z)
ntot = floor(nrow(sample)/140)
for(u in seq(140, 140*(ntot-1), by = 140)) {
# Define range of samples
rangeS = u:(u+140)
# Location
mlat = mean(sample$Latitude[rangeS])
mlon = mean(sample$Longitude[rangeS])
# Create interpolated spline along raw values for each sensor
interpX = interpSpline(dX$x[rangeS], dX$y[rangeS])
interpY = interpSpline(dY$x[rangeS], dY$y[rangeS])
interpZ = interpSpline(dZ$x[rangeS], dZ$y[rangeS])
# Resample along spline, with a number of segments proportional to the range that the data covers
resampleX = predict(interpX, nseg = 128)
resampleX = data.frame(x=resampleX$x, y=resampleX$y)
resampleY = predict(interpY, nseg = 128)
resampleY = data.frame(x=resampleY$x, y=resampleY$y)
resampleZ = predict(interpZ, nseg = 128)
resampleZ = data.frame(x=resampleZ$x, y=resampleZ$y)
# Calculate means of signal components
meanX = mean(resampleX$y)
meanY = mean(resampleY$y)
meanZ = mean(resampleZ$y)
# Calculate standard deviation of signal components
sdX = sd(resampleX$y)
sdY = sd(resampleY$y)
sdZ = sd(resampleZ$y)
# Calculate energy proxy and information entropy from fourier transform
window = 128
freq = 20
start = 0
fftX = Mod(fft(resampleX$y-mean(resampleX$y)))
fftY = Mod(fft(resampleY$y-mean(resampleY$y)))
fftZ = Mod(fft(resampleZ$y-mean(resampleZ$y)))
energyX = sum(fftX)/window
energyY = sum(fftY)/window
energyZ = sum(fftZ)/window
entropyX = entropy(fftX)
entropyY = entropy(fftY)
entropyZ = entropy(fftZ)
row = nrow(results)+1
results[row,1] = time[u]
results[row,2] = meanX
results[row,3] = meanY
results[row,4] = meanZ
results[row,5] = sdX
results[row,6] = sdY
results[row,7] = sdZ
results[row,8] = sqrt((sdX^2)+(sdY^2)+(sdZ^2))
results[row,9] = energyX
results[row,10] = energyY
results[row,11] = energyZ
results[row,12] = sqrt((energyX^2)+(energyY^2)+(energyZ^2))
results[row,13] = mlat
results[row,14] = mlon
results[row,15] = entropyX
results[row,16] = entropyY
results[row,17] = entropyZ
results[row,18] = sqrt((entropyX^2)+(entropyY^2)+(entropyZ^2))
cat("Pass",nrow(results)-1,"of",ntot-1,"Complete ","[",u,"to",u+140,"]","\n")
}
print("Run Complete")
|
/continuousMethod.R
|
permissive
|
jhrrsn/push_me
|
R
| false | false | 2,928 |
r
|
setwd("~/Documents/PushMe/Data")
require(ggplot2)
require(reshape2)
require(splines)
require(signal)
require(entropy)
require(RSEIS)
rm(results)
results = data.frame("time"=0, "meanX"=0, "meanY"=0, "meanZ"=0, "sdX"=0, "sdY"=0, "sdZ"=0, "sdT"=0, "energyX"=0, "energyY"=0, "energyZ"=0, "energyT"=0, "lat"=mean(sample$Latitude[1:140]), "lon"=mean(sample$Longitude[1:140]), "entropyX"=0, "entropyY"=0, "entropyZ"=0, "entropyT"=0)
# Read in .csv file.
sample=read.csv('rDrive1.csv')
# State sample-specific variables
time = sample$Elapsed.Time
# Split sensor output into 3 dataframes
dX=data.frame(x=time, y=sample$X)
dY=data.frame(x=time, y=sample$Y)
dZ=data.frame(x=time, y=sample$Z)
ntot = floor(nrow(sample)/140)
for(u in seq(140, 140*(ntot-1), by = 140)) {
# Define range of samples
rangeS = u:(u+140)
# Location
mlat = mean(sample$Latitude[rangeS])
mlon = mean(sample$Longitude[rangeS])
# Create interpolated spline along raw values for each sensor
interpX = interpSpline(dX$x[rangeS], dX$y[rangeS])
interpY = interpSpline(dY$x[rangeS], dY$y[rangeS])
interpZ = interpSpline(dZ$x[rangeS], dZ$y[rangeS])
# Resample along spline, with a number of segments proportional to the range that the data covers
resampleX = predict(interpX, nseg = 128)
resampleX = data.frame(x=resampleX$x, y=resampleX$y)
resampleY = predict(interpY, nseg = 128)
resampleY = data.frame(x=resampleY$x, y=resampleY$y)
resampleZ = predict(interpZ, nseg = 128)
resampleZ = data.frame(x=resampleZ$x, y=resampleZ$y)
# Calculate means of signal components
meanX = mean(resampleX$y)
meanY = mean(resampleY$y)
meanZ = mean(resampleZ$y)
# Calculate standard deviation of signal components
sdX = sd(resampleX$y)
sdY = sd(resampleY$y)
sdZ = sd(resampleZ$y)
# Calculate energy proxy and information entropy from fourier transform
window = 128
freq = 20
start = 0
fftX = Mod(fft(resampleX$y-mean(resampleX$y)))
fftY = Mod(fft(resampleY$y-mean(resampleY$y)))
fftZ = Mod(fft(resampleZ$y-mean(resampleZ$y)))
energyX = sum(fftX)/window
energyY = sum(fftY)/window
energyZ = sum(fftZ)/window
entropyX = entropy(fftX)
entropyY = entropy(fftY)
entropyZ = entropy(fftZ)
row = nrow(results)+1
results[row,1] = time[u]
results[row,2] = meanX
results[row,3] = meanY
results[row,4] = meanZ
results[row,5] = sdX
results[row,6] = sdY
results[row,7] = sdZ
results[row,8] = sqrt((sdX^2)+(sdY^2)+(sdZ^2))
results[row,9] = energyX
results[row,10] = energyY
results[row,11] = energyZ
results[row,12] = sqrt((energyX^2)+(energyY^2)+(energyZ^2))
results[row,13] = mlat
results[row,14] = mlon
results[row,15] = entropyX
results[row,16] = entropyY
results[row,17] = entropyZ
results[row,18] = sqrt((entropyX^2)+(entropyY^2)+(entropyZ^2))
cat("Pass",nrow(results)-1,"of",ntot-1,"Complete ","[",u,"to",u+140,"]","\n")
}
print("Run Complete")
|
######## load packages
library(tidyverse)
library(corrplot)
library(fastDummies)
library(rpart)
library(rpart.plot)
library(caret)
library(glmnet)
library(randomForest)
library(ROCR)
library(pROC)
library(naivebayes)
library(xgboost)
library(e1071)
library(vegan)
library(factoextra)
library(kableExtra)
library(jtools)
library(inspectdf)
library(cowplot)
######### load the data set
bank <- read.csv("data/bank.csv")
##########################################
## Exploratory Data Analysis
##########################################
## Categorical variables overview
x <- inspect_cat(bank, show_plot = T)
show_plot(x, text_labels = T)
## Distributions of deposit in numbers and percentages
## dataset is balanced. No need to do resampling.
deposit_number <- bank %>%
group_by(deposit) %>%
summarize(Count = n()) %>%
ggplot(aes(x = deposit, y = Count)) +
geom_bar(stat = "identity", fill = "#b779ed", color = "grey40") +
theme_bw() +
coord_flip() +
geom_text(aes(x = deposit, y = 0.01, label = Count),
hjust = -0.8, vjust = -1, size = 3,
color = "black", fontface = "bold") +
labs(title = "Deposit", x = "Deposit", y="Amount") +
theme(plot.title=element_text(hjust=0.5))
deposit_percentage <- bank %>% group_by(deposit) %>% summarise(Count=n()) %>%
mutate(pct = round(prop.table(Count), 2) * 100) %>%
ggplot(aes(x=deposit, y=pct)) +
geom_bar(stat = "identity", fill = "#62dce3", color="grey40") +
geom_text(aes(x=deposit, y=0.01, label= sprintf("%.2f%%", pct)),
hjust=0.5, vjust=-3, size=4,
color="black", fontface = "bold") +
theme_bw() +
labs(x = "Deposit", y="Percentage") +
labs(title = "Deposit (%)") + theme(plot.title=element_text(hjust=0.5))
plot_grid(deposit_number, deposit_percentage, align="h", ncol=2)
## Deposit Subscriptions based on Education Level
bank %>%
group_by(education, deposit) %>%
tally() %>%
ggplot(aes(x = education, y = n, fill = deposit)) +
geom_bar(stat = "identity", position = "dodge") +
labs(x = "Education Level", y = "Number of People") +
ggtitle("Deposit Subscriptions Based on Education Level") +
geom_text(aes(label = n), vjust = -0.5, position = position_dodge(0.8))
## Deposit Subscriptions based on Marital Status
bank %>%
group_by(marital, deposit) %>%
tally() %>%
ggplot(aes(x = marital, y = n, fill = deposit)) +
geom_bar(stat = "identity", position = "dodge") +
labs(x = "Marital Status", y = "Number of People") +
ggtitle("Deposit Subscriptions based on Marital Status") +
geom_text(aes(label = n), vjust = -0.5, position = position_dodge(0.8))
## Deposit Subscriptions Based on Last Contact Duration(in seconds)
bank %>%
group_by(duration, deposit) %>%
tally() %>%
ggplot(aes(x = duration, y = n, color = deposit)) +
geom_smooth(se = F) +
labs(x = "Duration(in seconds)", y = "Number of People") +
ggtitle("Deposit Subscriptions Based on Last Contact Duration")
## Deposit Subscriptions based on jobs
bank %>%
group_by(job, deposit) %>%
tally() %>%
ggplot(aes(x = job, y = n, fill = deposit)) +
geom_bar(stat = "identity", position = "dodge") +
labs(x = "Job", y = "Number of People") +
ggtitle("Deposit Subscriptions Based on Jobs") +
geom_text(aes(label = n), vjust = -0.5, position = position_dodge(0.8))
## Changes in Deposit Subscriptions vs Age vs Personal Loans
bank %>%
group_by(age, deposit, loan) %>%
tally() %>%
ggplot(aes(x = age, y = n, color = loan)) +
geom_smooth(se=F) +
labs(title = "Changes in Deposit Subscriptions vs Age vs Personal Loans",
x = "Age",
y = "Number of People")
## Changes in Deposit Subscriptions vs Age vs Contact Methods
bank %>%
group_by(contact, age, deposit) %>%
tally() %>%
ggplot(aes(x = age, y = n, color = contact)) +
geom_smooth(se=F) +
labs(title = "Changes in Deposit Subscriptions vs Age vs Contact Methods",
x = "Age",
y = "Number of People")
##########################################
## Data Cleaning
##########################################
## remove unnecessary columns - month and day
## they are overlapped variables
bank2 <- bank %>%
filter(!(pdays != -1 & poutcome=='unknown')) %>%
filter(!(job == "unknown")) %>%
select(-month, -day, -poutcome)
## convert categorical variables to dummies
## and drop the following unknown dummy variables
bank_clean <- fastDummies::dummy_cols(bank2, remove_first_dummy = T) %>%
select(-contact, -default, -deposit,
-education, -housing, -job,
-loan, -marital, -job_unknown)
colnames(bank_clean) <- sub("-","_", colnames(bank_clean))
## Split into training/test dataset
set.seed(820)
# Determine sample size
bank_clean$train <- sample(c(0,1), nrow(bank_clean), replace = TRUE, prob = c(0.3, 0.7))
# train dataset - 7731 observations
bank_train <- bank_clean %>% filter(train == 1)
bank_train$train <- NULL
# test dataset - 3359 observations
bank_test <- bank_clean %>% filter(train == 0)
bank_test$train <- NULL
## for future models
bank_clean$train <- NULL
x_train <- model.matrix(deposit_yes ~ ., bank_train)[, -1]
y_train <- bank_train$deposit_yes
x_test <- model.matrix(deposit_yes ~ ., bank_test)[ ,-1]
y_test <- bank_test$deposit_yes
########################################################
## Logistic regression
########################################################
## build logistic model
logit <- glm(deposit_yes~., data=bank_train, family="binomial")
yhat_logit <- predict(logit, bank_test,type='response')
## review model summary and important variables
summary(logit)
sort(logit$coefficients)
sort(desc(logit$coefficients))
## Model performance measurement
modelroc_logit <- roc(as.ordered(y_test), yhat_logit)
modelroc_logit
# AUC = 0.867
plot(modelroc_logit, print.auc=TRUE, auc.polygon=TRUE, grid=c(0.1, 0.2),
grid.col=c("green", "red"), max.auc.polygon=TRUE,
auc.polygon.col="skyblue", print.thres=TRUE)
########################################################
## Lasso
########################################################
## build Lasso model
fit_lasso <- cv.glmnet(x_train, y_train, alpha = 1,
nfolds = 10, family="binomial")
yhat_lasso_test <- predict(fit_lasso, x_test, s = fit_lasso$lambda.min)
## Model performance measurement
modelroc_lasso <- roc(as.ordered(y_test), yhat_lasso_test)
modelroc_lasso
# AUC = 0.867
plot(modelroc_lasso, print.auc=TRUE, auc.polygon=TRUE, grid=c(0.1, 0.2),
grid.col=c("green", "red"), max.auc.polygon=TRUE,
auc.polygon.col="skyblue", print.thres=TRUE)
# view variables coefficients
coef(fit_lasso)
########################################################
## Random Forest
########################################################
## build Random Forest model
## number of trees = 500
rf <- randomForest(deposit_yes ~ ., data = bank_train, ntree = 500)
yhat_rf_test <- predict(rf, bank_test[, -27], type='response')
## view variables importance
varImpPlot(rf)
## Model performance measurement
modelroc_rf <- roc(as.ordered(y_test), as.ordered(yhat_rf_test))
modelroc_rf
# AUC = 0.887
plot(modelroc_rf, print.auc=TRUE, auc.polygon=TRUE, grid=c(0.1, 0.2),
grid.col=c("green", "red"), max.auc.polygon=TRUE,
auc.polygon.col="skyblue", print.thres=TRUE)
########################################################
## Boosting
########################################################
train_bst <- bank_train %>% dplyr::select(-deposit_yes)
test_bst <- bank_test %>% dplyr::select(-deposit_yes)
test_m <- as.matrix(test_bst)
train_m <- as.matrix(train_bst)
deposit_train <- bank_train$deposit_yes
deposit_test <- bank_test$deposit_yes
## build Boosting model
# here we choose 0.01 instead of 0.001
# to avoid overfitting controlling 1000 rounds.
bst <- xgboost(data = train_m, label = deposit_train, eta =0.01,
max_depth = 6, nrounds = 800, objective = "binary:logistic")
pred <- predict(bst,test_m, type='response')
## Model performance measurement
modelroc_boost <- roc(as.ordered(y_test), pred)
modelroc_boost
# AUC = 0.891
plot(modelroc_boost, print.auc=TRUE, auc.polygon=TRUE, grid=c(0.1, 0.2),
grid.col=c("green", "red"), max.auc.polygon=TRUE,
auc.polygon.col="skyblue", print.thres=TRUE)
## model summary
summary(bst)
########################################################
## Support Vector Machines
########################################################
## build SVM model
svm_model <- svm(deposit_yes ~ ., data = bank_train, kernel="linear", scale = T)
yhat_svm_test <- predict(svm_model, bank_test[, -27])
## Model performance measurement
modelroc_svm <- roc(as.ordered(y_test), yhat_svm_test)
modelroc_svm
# AUC = 0.862
plot(modelroc_svm, print.auc=TRUE, auc.polygon=TRUE, grid=c(0.1, 0.2),
grid.col=c("green", "red"), max.auc.polygon=TRUE,
auc.polygon.col="skyblue", print.thres=TRUE)
########################################################
## Naive Bayes
########################################################
bank_train_nb <- bank_train
bank_train_nb$deposit_yes <- as.factor(bank_train_nb$deposit_yes)
## build Naive Bayes model
nb <- naive_bayes(deposit_yes ~ ., data = bank_train_nb)
yhat_nb_test <- predict(nb, bank_test[, -27], type = "prob")
## Model performance measurement
modelroc_nb <- roc(as.ordered(y_test), yhat_nb_test[, 2])
modelroc_nb
# AUC = 0.781
plot(modelroc_nb, print.auc=TRUE, auc.polygon=TRUE, grid=c(0.1, 0.2),
grid.col=c("green", "red"), max.auc.polygon=TRUE,
auc.polygon.col="skyblue", print.thres=TRUE)
########################################################
## Models Measurement
########################################################
auc_table <- tibble(Model = c("Logistic",
"Lasso",
"Random Forest",
"Boosting",
"SVM",
"Naive Bayes"),
AUC = c(modelroc_logit$auc,
modelroc_lasso$auc,
modelroc_rf$auc,
modelroc_boost$auc,
modelroc_svm$auc,
modelroc_nb$auc))
ggplot(auc_table, aes(x = Model, y = AUC)) +
geom_bar(aes(fill = Model), stat = "identity") +
geom_text(aes(label = round(AUC, 4)), vjust=1.6, color="white", size=3.5) +
coord_cartesian(ylim = c(0.75, 0.9)) +
scale_fill_manual(values = c("#0072B2", "#999999","dark grey", "#D55E00","#56B4E9", "purple")) +
theme_minimal()
########################################################
## Apply best model
########################################################
## so far, our best model is Boosting (AUC:0.892)
## here we apply the threshold = 0.447
bank_test$deposit_predict <- ifelse(pred > 0.447, 1, 0)
## Confusion Matrix
confusionMatrix(table(bank_test$deposit_yes, bank_test$deposit_predict))
########################################################
## Clustering
########################################################
library(analogue)
library(sets)
data <- bank_clean
data_n <- data[, 1:6]
data_c <- data[, 7:27]
data_nscale = scale(data_n)
distc <- vegdist(data_c, method = "jaccard")
distn <- vegdist(data_nscale,method = "euclidean")
complete_dis <- fuse(distc,distn, weights = c(0.5,0.5))
dm <- as.matrix(complete_dis)
hward <- hclust(complete_dis, method = "ward.D")
plot(hward)
table(cutree(hward,k = 3))
cutree(hward,k = 3) -> grouping
fviz_cluster(list(data = data, cluster = grouping))
fviz_nbclust(data, FUN = hcut, method = "wss")
fviz_nbclust(data, FUN = hcut, method = "silhouette")
data$cluster = cutree(hward, k = 3)
data$cluster1 = cutree(hward, k = 4)
data$cluster2 = cutree(hward, k = 2)
cluster_s <- data %>%
group_by(cluster) %>%
skimr::skim_to_wide() %>%
select(cluster, variable, mean) %>%
mutate_at(vars(mean), as.numeric)
cluster_dat = cluster_s%>%
pivot_wider(names_from = variable,
values_from=mean)
heatmap(as.matrix(cluster_dat),
scale="column",
Colv = NA,
Rowv = NA)
data %>%
group_by(cluster) %>%
skimr::skim_to_wide()%>%
summary()
data %>%
group_by(cluster) %>%
summarize(cluster_age = mean(age),
cluster_balance = mean(balance),
cluster_campaign = mean(campaign),
cluster_days = mean(pdays),
cluster_duration = mean(duration),
cluster_previous = mean(previous))
# ggplot(data, aes(age, balance, group = cluster, color = factor(cluster))) +
# geom_point(alpha = 0.5,position = "jitter") +
# scale_color_manual(values = c("#c41414","#ffb92e","#6ca5f5",#1b9400","#006bc2")) +
# theme_bw()
ggplot(data, aes(factor(cluster),age,fill = factor(cluster))) +
geom_boxplot() +
scale_fill_manual(values = c("#c41414","#ffb92e","#6ca5f5","#1b9400","#006bc2")) +
theme_bw() +
labs(title = "Age Distribution by Cluster")
ggplot(data, aes(balance, fill = factor(cluster))) +
geom_histogram(position = "dodge") +
facet_wrap(~factor(cluster)) +
scale_fill_manual(values = c("#c41414","#ffb92e","#6ca5f5","#1b9400","#006bc2")) +
theme_bw() +
labs(title = "Balance Distribution by Cluster")
dend <- as.dendrogram(hward)
dend %>%
set("labels_col", value = c("#c41414","#ffb92e","#6ca5f5","#1b9400","#006bc2"), k = 3) %>%
plot(horiz = F, axes=F, main = "Visual Breakdown of Three Clusters")
|
/Bank_Marketing_Campaign_Analysis.R
|
no_license
|
davidzhang647/Machine_Learning_Projects
|
R
| false | false | 13,767 |
r
|
######## load packages
library(tidyverse)
library(corrplot)
library(fastDummies)
library(rpart)
library(rpart.plot)
library(caret)
library(glmnet)
library(randomForest)
library(ROCR)
library(pROC)
library(naivebayes)
library(xgboost)
library(e1071)
library(vegan)
library(factoextra)
library(kableExtra)
library(jtools)
library(inspectdf)
library(cowplot)
######### load the data set
bank <- read.csv("data/bank.csv")
##########################################
## Exploratory Data Analysis
##########################################
## Categorical variables overview
x <- inspect_cat(bank, show_plot = T)
show_plot(x, text_labels = T)
## Distributions of deposit in numbers and percentages
## dataset is balanced. No need to do resampling.
deposit_number <- bank %>%
group_by(deposit) %>%
summarize(Count = n()) %>%
ggplot(aes(x = deposit, y = Count)) +
geom_bar(stat = "identity", fill = "#b779ed", color = "grey40") +
theme_bw() +
coord_flip() +
geom_text(aes(x = deposit, y = 0.01, label = Count),
hjust = -0.8, vjust = -1, size = 3,
color = "black", fontface = "bold") +
labs(title = "Deposit", x = "Deposit", y="Amount") +
theme(plot.title=element_text(hjust=0.5))
deposit_percentage <- bank %>% group_by(deposit) %>% summarise(Count=n()) %>%
mutate(pct = round(prop.table(Count), 2) * 100) %>%
ggplot(aes(x=deposit, y=pct)) +
geom_bar(stat = "identity", fill = "#62dce3", color="grey40") +
geom_text(aes(x=deposit, y=0.01, label= sprintf("%.2f%%", pct)),
hjust=0.5, vjust=-3, size=4,
color="black", fontface = "bold") +
theme_bw() +
labs(x = "Deposit", y="Percentage") +
labs(title = "Deposit (%)") + theme(plot.title=element_text(hjust=0.5))
plot_grid(deposit_number, deposit_percentage, align="h", ncol=2)
## Deposit Subscriptions based on Education Level
bank %>%
group_by(education, deposit) %>%
tally() %>%
ggplot(aes(x = education, y = n, fill = deposit)) +
geom_bar(stat = "identity", position = "dodge") +
labs(x = "Education Level", y = "Number of People") +
ggtitle("Deposit Subscriptions Based on Education Level") +
geom_text(aes(label = n), vjust = -0.5, position = position_dodge(0.8))
## Deposit Subscriptions based on Marital Status
bank %>%
group_by(marital, deposit) %>%
tally() %>%
ggplot(aes(x = marital, y = n, fill = deposit)) +
geom_bar(stat = "identity", position = "dodge") +
labs(x = "Marital Status", y = "Number of People") +
ggtitle("Deposit Subscriptions based on Marital Status") +
geom_text(aes(label = n), vjust = -0.5, position = position_dodge(0.8))
## Deposit Subscriptions Based on Last Contact Duration(in seconds)
bank %>%
group_by(duration, deposit) %>%
tally() %>%
ggplot(aes(x = duration, y = n, color = deposit)) +
geom_smooth(se = F) +
labs(x = "Duration(in seconds)", y = "Number of People") +
ggtitle("Deposit Subscriptions Based on Last Contact Duration")
## Deposit Subscriptions based on jobs
bank %>%
group_by(job, deposit) %>%
tally() %>%
ggplot(aes(x = job, y = n, fill = deposit)) +
geom_bar(stat = "identity", position = "dodge") +
labs(x = "Job", y = "Number of People") +
ggtitle("Deposit Subscriptions Based on Jobs") +
geom_text(aes(label = n), vjust = -0.5, position = position_dodge(0.8))
## Changes in Deposit Subscriptions vs Age vs Personal Loans
bank %>%
group_by(age, deposit, loan) %>%
tally() %>%
ggplot(aes(x = age, y = n, color = loan)) +
geom_smooth(se=F) +
labs(title = "Changes in Deposit Subscriptions vs Age vs Personal Loans",
x = "Age",
y = "Number of People")
## Changes in Deposit Subscriptions vs Age vs Contact Methods
bank %>%
group_by(contact, age, deposit) %>%
tally() %>%
ggplot(aes(x = age, y = n, color = contact)) +
geom_smooth(se=F) +
labs(title = "Changes in Deposit Subscriptions vs Age vs Contact Methods",
x = "Age",
y = "Number of People")
##########################################
## Data Cleaning
##########################################
## remove unnecessary columns - month and day
## they are overlapped variables
bank2 <- bank %>%
filter(!(pdays != -1 & poutcome=='unknown')) %>%
filter(!(job == "unknown")) %>%
select(-month, -day, -poutcome)
## convert categorical variables to dummies
## and drop the following unknown dummy variables
bank_clean <- fastDummies::dummy_cols(bank2, remove_first_dummy = T) %>%
select(-contact, -default, -deposit,
-education, -housing, -job,
-loan, -marital, -job_unknown)
colnames(bank_clean) <- sub("-","_", colnames(bank_clean))
## Split into training/test dataset
set.seed(820)
# Determine sample size
bank_clean$train <- sample(c(0,1), nrow(bank_clean), replace = TRUE, prob = c(0.3, 0.7))
# train dataset - 7731 observations
bank_train <- bank_clean %>% filter(train == 1)
bank_train$train <- NULL
# test dataset - 3359 observations
bank_test <- bank_clean %>% filter(train == 0)
bank_test$train <- NULL
## for future models
bank_clean$train <- NULL
x_train <- model.matrix(deposit_yes ~ ., bank_train)[, -1]
y_train <- bank_train$deposit_yes
x_test <- model.matrix(deposit_yes ~ ., bank_test)[ ,-1]
y_test <- bank_test$deposit_yes
########################################################
## Logistic regression
########################################################
## build logistic model
logit <- glm(deposit_yes~., data=bank_train, family="binomial")
yhat_logit <- predict(logit, bank_test,type='response')
## review model summary and important variables
summary(logit)
sort(logit$coefficients)
sort(desc(logit$coefficients))
## Model performance measurement
modelroc_logit <- roc(as.ordered(y_test), yhat_logit)
modelroc_logit
# AUC = 0.867
plot(modelroc_logit, print.auc=TRUE, auc.polygon=TRUE, grid=c(0.1, 0.2),
grid.col=c("green", "red"), max.auc.polygon=TRUE,
auc.polygon.col="skyblue", print.thres=TRUE)
########################################################
## Lasso
########################################################
## build Lasso model
fit_lasso <- cv.glmnet(x_train, y_train, alpha = 1,
nfolds = 10, family="binomial")
yhat_lasso_test <- predict(fit_lasso, x_test, s = fit_lasso$lambda.min)
## Model performance measurement
modelroc_lasso <- roc(as.ordered(y_test), yhat_lasso_test)
modelroc_lasso
# AUC = 0.867
plot(modelroc_lasso, print.auc=TRUE, auc.polygon=TRUE, grid=c(0.1, 0.2),
grid.col=c("green", "red"), max.auc.polygon=TRUE,
auc.polygon.col="skyblue", print.thres=TRUE)
# view variables coefficients
coef(fit_lasso)
########################################################
## Random Forest
########################################################
## build Random Forest model
## number of trees = 500
rf <- randomForest(deposit_yes ~ ., data = bank_train, ntree = 500)
yhat_rf_test <- predict(rf, bank_test[, -27], type='response')
## view variables importance
varImpPlot(rf)
## Model performance measurement
modelroc_rf <- roc(as.ordered(y_test), as.ordered(yhat_rf_test))
modelroc_rf
# AUC = 0.887
plot(modelroc_rf, print.auc=TRUE, auc.polygon=TRUE, grid=c(0.1, 0.2),
grid.col=c("green", "red"), max.auc.polygon=TRUE,
auc.polygon.col="skyblue", print.thres=TRUE)
########################################################
## Boosting
########################################################
train_bst <- bank_train %>% dplyr::select(-deposit_yes)
test_bst <- bank_test %>% dplyr::select(-deposit_yes)
test_m <- as.matrix(test_bst)
train_m <- as.matrix(train_bst)
deposit_train <- bank_train$deposit_yes
deposit_test <- bank_test$deposit_yes
## build Boosting model
# here we choose 0.01 instead of 0.001
# to avoid overfitting controlling 1000 rounds.
bst <- xgboost(data = train_m, label = deposit_train, eta =0.01,
max_depth = 6, nrounds = 800, objective = "binary:logistic")
pred <- predict(bst,test_m, type='response')
## Model performance measurement
modelroc_boost <- roc(as.ordered(y_test), pred)
modelroc_boost
# AUC = 0.891
plot(modelroc_boost, print.auc=TRUE, auc.polygon=TRUE, grid=c(0.1, 0.2),
grid.col=c("green", "red"), max.auc.polygon=TRUE,
auc.polygon.col="skyblue", print.thres=TRUE)
## model summary
summary(bst)
########################################################
## Support Vector Machines
########################################################
## build SVM model
svm_model <- svm(deposit_yes ~ ., data = bank_train, kernel="linear", scale = T)
yhat_svm_test <- predict(svm_model, bank_test[, -27])
## Model performance measurement
modelroc_svm <- roc(as.ordered(y_test), yhat_svm_test)
modelroc_svm
# AUC = 0.862
plot(modelroc_svm, print.auc=TRUE, auc.polygon=TRUE, grid=c(0.1, 0.2),
grid.col=c("green", "red"), max.auc.polygon=TRUE,
auc.polygon.col="skyblue", print.thres=TRUE)
########################################################
## Naive Bayes
########################################################
bank_train_nb <- bank_train
bank_train_nb$deposit_yes <- as.factor(bank_train_nb$deposit_yes)
## build Naive Bayes model
nb <- naive_bayes(deposit_yes ~ ., data = bank_train_nb)
yhat_nb_test <- predict(nb, bank_test[, -27], type = "prob")
## Model performance measurement
modelroc_nb <- roc(as.ordered(y_test), yhat_nb_test[, 2])
modelroc_nb
# AUC = 0.781
plot(modelroc_nb, print.auc=TRUE, auc.polygon=TRUE, grid=c(0.1, 0.2),
grid.col=c("green", "red"), max.auc.polygon=TRUE,
auc.polygon.col="skyblue", print.thres=TRUE)
########################################################
## Models Measurement
########################################################
auc_table <- tibble(Model = c("Logistic",
"Lasso",
"Random Forest",
"Boosting",
"SVM",
"Naive Bayes"),
AUC = c(modelroc_logit$auc,
modelroc_lasso$auc,
modelroc_rf$auc,
modelroc_boost$auc,
modelroc_svm$auc,
modelroc_nb$auc))
ggplot(auc_table, aes(x = Model, y = AUC)) +
geom_bar(aes(fill = Model), stat = "identity") +
geom_text(aes(label = round(AUC, 4)), vjust=1.6, color="white", size=3.5) +
coord_cartesian(ylim = c(0.75, 0.9)) +
scale_fill_manual(values = c("#0072B2", "#999999","dark grey", "#D55E00","#56B4E9", "purple")) +
theme_minimal()
########################################################
## Apply best model
########################################################
## so far, our best model is Boosting (AUC:0.892)
## here we apply the threshold = 0.447
bank_test$deposit_predict <- ifelse(pred > 0.447, 1, 0)
## Confusion Matrix
confusionMatrix(table(bank_test$deposit_yes, bank_test$deposit_predict))
########################################################
## Clustering
########################################################
library(analogue)
library(sets)
data <- bank_clean
data_n <- data[, 1:6]
data_c <- data[, 7:27]
data_nscale = scale(data_n)
distc <- vegdist(data_c, method = "jaccard")
distn <- vegdist(data_nscale,method = "euclidean")
complete_dis <- fuse(distc,distn, weights = c(0.5,0.5))
dm <- as.matrix(complete_dis)
hward <- hclust(complete_dis, method = "ward.D")
plot(hward)
table(cutree(hward,k = 3))
cutree(hward,k = 3) -> grouping
fviz_cluster(list(data = data, cluster = grouping))
fviz_nbclust(data, FUN = hcut, method = "wss")
fviz_nbclust(data, FUN = hcut, method = "silhouette")
data$cluster = cutree(hward, k = 3)
data$cluster1 = cutree(hward, k = 4)
data$cluster2 = cutree(hward, k = 2)
cluster_s <- data %>%
group_by(cluster) %>%
skimr::skim_to_wide() %>%
select(cluster, variable, mean) %>%
mutate_at(vars(mean), as.numeric)
cluster_dat = cluster_s%>%
pivot_wider(names_from = variable,
values_from=mean)
heatmap(as.matrix(cluster_dat),
scale="column",
Colv = NA,
Rowv = NA)
data %>%
group_by(cluster) %>%
skimr::skim_to_wide()%>%
summary()
data %>%
group_by(cluster) %>%
summarize(cluster_age = mean(age),
cluster_balance = mean(balance),
cluster_campaign = mean(campaign),
cluster_days = mean(pdays),
cluster_duration = mean(duration),
cluster_previous = mean(previous))
# ggplot(data, aes(age, balance, group = cluster, color = factor(cluster))) +
# geom_point(alpha = 0.5,position = "jitter") +
# scale_color_manual(values = c("#c41414","#ffb92e","#6ca5f5",#1b9400","#006bc2")) +
# theme_bw()
ggplot(data, aes(factor(cluster),age,fill = factor(cluster))) +
geom_boxplot() +
scale_fill_manual(values = c("#c41414","#ffb92e","#6ca5f5","#1b9400","#006bc2")) +
theme_bw() +
labs(title = "Age Distribution by Cluster")
ggplot(data, aes(balance, fill = factor(cluster))) +
geom_histogram(position = "dodge") +
facet_wrap(~factor(cluster)) +
scale_fill_manual(values = c("#c41414","#ffb92e","#6ca5f5","#1b9400","#006bc2")) +
theme_bw() +
labs(title = "Balance Distribution by Cluster")
dend <- as.dendrogram(hward)
dend %>%
set("labels_col", value = c("#c41414","#ffb92e","#6ca5f5","#1b9400","#006bc2"), k = 3) %>%
plot(horiz = F, axes=F, main = "Visual Breakdown of Three Clusters")
|
# Hierarchical Clustering
dataset<-read.csv("Mall_Customers.csv")
X=dataset[4:5]
#Using the dendogram to find the optimal number of clusters
dendogram=hclust(dist(X,method="euclidean"),method="ward.D")
plot(dendogram,
main=paste("Dendogram"),
xlab="Customers",
ylab="Euclidean Distances")
#Fitting hierarchical clustering to the mall dataset
hc=hclust(dist(X,method="euclidean"),method="ward.D")
y_hc=cutree(hc,5)
# Visualising the clusters
library(cluster)
clusplot(X,
y_hc,
lines=0,
shade=TRUE,
color=TRUE,
labels=2,
plotchar=FALSE,
span=TRUE,
main=paste("Clusters of clients"),
xlab="Annual Income",
ylab="Spending Score")
|
/03_Clustering/Hierarchical_Clustering/Hierarchical_Clustering_R_AM.R
|
no_license
|
AMDonati/ML_Algorithms_inR
|
R
| false | false | 736 |
r
|
# Hierarchical Clustering
dataset<-read.csv("Mall_Customers.csv")
X=dataset[4:5]
#Using the dendogram to find the optimal number of clusters
dendogram=hclust(dist(X,method="euclidean"),method="ward.D")
plot(dendogram,
main=paste("Dendogram"),
xlab="Customers",
ylab="Euclidean Distances")
#Fitting hierarchical clustering to the mall dataset
hc=hclust(dist(X,method="euclidean"),method="ward.D")
y_hc=cutree(hc,5)
# Visualising the clusters
library(cluster)
clusplot(X,
y_hc,
lines=0,
shade=TRUE,
color=TRUE,
labels=2,
plotchar=FALSE,
span=TRUE,
main=paste("Clusters of clients"),
xlab="Annual Income",
ylab="Spending Score")
|
#' @title Creatinine Normalisation
#' @description Creatinine Normalisation (CN) is a useful method much like region of interest normalisation that can normalise spectra based on the total area of the creatinine signal at the chemical shift 3.05ppm.
#' @details `creNorm()` works by dividing each element in a row with the sum of the values from its Creatinine signal.
#' @family {Attribute-Based}
#' @param X The spectra intended to be normalised. Can either be a single spectrum in the form of a numerical array or multiple spectra in a numerical matrix with the rows being the spectra/samples and the columns being the ppm variables
#' @param ppm A numerical array holding the chemical shift values of the X matrix. Only necessary when X is an array, not when X is a matrix
#' @param cre3 A concatenated numerical value of the lower and upper ppm values where the creatinine peak at 3.05 starts and ends.
#' @param cre4 A concatenated numerical value of the lower and upper ppm values where the creatinine peak at 4.05 starts and ends.
#' @param err The level of error given when calculating the creatinine peak ratios. interperted as a percentage (i.e., 5 = 5%)
#' @return This function assigns the normalised X argument (as X_cre) and the calculated dilution factors (as dilf_cre) to the global environment.
#' @author \email{kylebario1@@gmail.com}
#' @seealso More on the methodology of CN and issue with using it are outlined here: \url{https://doi.org/10.1021/ac051632c}
#' @examples
#' # When X contains multiple spectra, ppm is not required
#' data(X, ppm)
#' creNorm(X)
#' cat(dilf_cre)
#'
#' # When X has only one spectrum, ppm is required
#' data(X, ppm)
#' creNorm(X[1,], ppm)
#' cat(dilf_cre)
#'
#' @export
creNorm <- function(X, ppm = NULL, cre3 = c(3, 3.1), cre4 = c(4, 4.1), err = 5){
if (length(cre3)!=2 | length(cre4)!=2){
stop("Please provide only two values for each of the args cre3 and cre4. The first for each should be the lower bounds of the creatinine regions and the second should be the upper bounds.")
}
if (is.null(err)){
err = 5
}
if (is.null(dim(X))){
if (is.null(length(X))){
stop("Please provide a valid X variable. X is neither a matrix or an array")
}
if (is.null(ppm)){
stop("Please provide a X-matched ppm. None was provided and ppm cannot be determined from a single spectrum")
} else if (length(ppm)!=length(X)){
stop('Please make sure that the length of X and ppm match')
}
cat('\033[0;34mCalculating Dilfs... \033[0m')
i3 <- shift_pickr(X, ppm, cre3, 0.005)
i4 <- shift_pickr(X, ppm, cre4, 0.005)
a3 <- sum(X[i3])
a4 <- sum(X[i4])
r <- a4/a3
cat('\033[1;32mDone.\n\033[0m')
cat('\033[0;34mChecking creatinine peak ratio... \033[0m')
er <- ((2/3)/100)*err
lo <- (2/3)-er
up <- (2/3)+er
if(r<=up & r>=lo){
cat('\033[1;32mRatio is within limit.\n\033[0m')
} else{
cat('\033[1;31mThe provided spectra is outside of the error limit.\n\033[1;33mRefer to Df_cre for more information.\n\033[0m')
}
e <- as.array(r<=up & r>=lo)
df <- data.frame(a3, r, e)
colnames(df) <- c('dilf', 'ratio', paste('ratio within a', err, '% error margin'))
cat('\033[0;34mNormalising X... \033[0m')
Xn <- X/a3
} else if (!is.null(dim(X))){
cat('\033[0;34mCalculating Dilfs... \033[0m')
if (is.null(ppm)){
p <- as.numeric(colnames(X))
} else {
if (length(ppm)!=ncol(X)){
stop('Please provide a column-matched ppm and X variable')
} else {
p <- ppm
}
}
i3 <- shift_pickr(X, p, cre3, 0.005)
i4 <- shift_pickr(X, p, cre4, 0.005)
a3 <- vapply(seq_len(nrow(X)),function(i){
j <- i3[i,]
a <- sum(X[i,j])
return(a)
}, FUN.VALUE = 1.1)
a4 <- vapply(seq_len(nrow(X)),function(i){
j <- i4[i,]
a <- sum(X[i,j])
return(a)
}, FUN.VALUE = 1.1)
r <- a3/a4
cat('\033[1;32mDone.\n\033[0m')
cat('\033[0;34mChecking creatinine peak ratios... \033[0m')
er <- ((2/3)/100)*err
lo <- (2/3)-er
up <- (2/3)+er
if(all(r<=up & r>=lo)){
cat('\033[1;32mAll within limits.\n\033[0m')
} else{
cat('\033[1;31mspec', which(r>=up | r<=lo), 'are outside error limits.\n\033[1;33mRefer to Df_cre for more information.\n\033[0m')
}
e <- as.array(r<=up & r>=lo)
df <- data.frame(a3, r, e)
colnames(df) <- c('dilf', 'ratio', paste('ratio within a', err, '% error margin'))
cat('\033[0;34mNormalising X... \033[0m')
Xn <- t(vapply(seq_len(nrow(X)), function(i){
X[i,]/a3[i]
}, FUN.VALUE = X[1,]))
rownames(Xn) <- rownames(X)
} else {
stop("X cannot be normalised")
}
cat('\033[1;32mDone.\n\033[0m')
assign("X_cre", Xn, envir = .GlobalEnv)
assign("dilf_cre", a3, envir = .GlobalEnv)
assign("Df_cre", df, envir = .GlobalEnv)
}
|
/R/creNorm.R
|
permissive
|
kbario/concentr8r
|
R
| false | false | 5,031 |
r
|
#' @title Creatinine Normalisation
#' @description Creatinine Normalisation (CN) is a useful method much like region of interest normalisation that can normalise spectra based on the total area of the creatinine signal at the chemical shift 3.05ppm.
#' @details `creNorm()` works by dividing each element in a row with the sum of the values from its Creatinine signal.
#' @family {Attribute-Based}
#' @param X The spectra intended to be normalised. Can either be a single spectrum in the form of a numerical array or multiple spectra in a numerical matrix with the rows being the spectra/samples and the columns being the ppm variables
#' @param ppm A numerical array holding the chemical shift values of the X matrix. Only necessary when X is an array, not when X is a matrix
#' @param cre3 A concatenated numerical value of the lower and upper ppm values where the creatinine peak at 3.05 starts and ends.
#' @param cre4 A concatenated numerical value of the lower and upper ppm values where the creatinine peak at 4.05 starts and ends.
#' @param err The level of error given when calculating the creatinine peak ratios. interperted as a percentage (i.e., 5 = 5%)
#' @return This function assigns the normalised X argument (as X_cre) and the calculated dilution factors (as dilf_cre) to the global environment.
#' @author \email{kylebario1@@gmail.com}
#' @seealso More on the methodology of CN and issue with using it are outlined here: \url{https://doi.org/10.1021/ac051632c}
#' @examples
#' # When X contains multiple spectra, ppm is not required
#' data(X, ppm)
#' creNorm(X)
#' cat(dilf_cre)
#'
#' # When X has only one spectrum, ppm is required
#' data(X, ppm)
#' creNorm(X[1,], ppm)
#' cat(dilf_cre)
#'
#' @export
creNorm <- function(X, ppm = NULL, cre3 = c(3, 3.1), cre4 = c(4, 4.1), err = 5){
if (length(cre3)!=2 | length(cre4)!=2){
stop("Please provide only two values for each of the args cre3 and cre4. The first for each should be the lower bounds of the creatinine regions and the second should be the upper bounds.")
}
if (is.null(err)){
err = 5
}
if (is.null(dim(X))){
if (is.null(length(X))){
stop("Please provide a valid X variable. X is neither a matrix or an array")
}
if (is.null(ppm)){
stop("Please provide a X-matched ppm. None was provided and ppm cannot be determined from a single spectrum")
} else if (length(ppm)!=length(X)){
stop('Please make sure that the length of X and ppm match')
}
cat('\033[0;34mCalculating Dilfs... \033[0m')
i3 <- shift_pickr(X, ppm, cre3, 0.005)
i4 <- shift_pickr(X, ppm, cre4, 0.005)
a3 <- sum(X[i3])
a4 <- sum(X[i4])
r <- a4/a3
cat('\033[1;32mDone.\n\033[0m')
cat('\033[0;34mChecking creatinine peak ratio... \033[0m')
er <- ((2/3)/100)*err
lo <- (2/3)-er
up <- (2/3)+er
if(r<=up & r>=lo){
cat('\033[1;32mRatio is within limit.\n\033[0m')
} else{
cat('\033[1;31mThe provided spectra is outside of the error limit.\n\033[1;33mRefer to Df_cre for more information.\n\033[0m')
}
e <- as.array(r<=up & r>=lo)
df <- data.frame(a3, r, e)
colnames(df) <- c('dilf', 'ratio', paste('ratio within a', err, '% error margin'))
cat('\033[0;34mNormalising X... \033[0m')
Xn <- X/a3
} else if (!is.null(dim(X))){
cat('\033[0;34mCalculating Dilfs... \033[0m')
if (is.null(ppm)){
p <- as.numeric(colnames(X))
} else {
if (length(ppm)!=ncol(X)){
stop('Please provide a column-matched ppm and X variable')
} else {
p <- ppm
}
}
i3 <- shift_pickr(X, p, cre3, 0.005)
i4 <- shift_pickr(X, p, cre4, 0.005)
a3 <- vapply(seq_len(nrow(X)),function(i){
j <- i3[i,]
a <- sum(X[i,j])
return(a)
}, FUN.VALUE = 1.1)
a4 <- vapply(seq_len(nrow(X)),function(i){
j <- i4[i,]
a <- sum(X[i,j])
return(a)
}, FUN.VALUE = 1.1)
r <- a3/a4
cat('\033[1;32mDone.\n\033[0m')
cat('\033[0;34mChecking creatinine peak ratios... \033[0m')
er <- ((2/3)/100)*err
lo <- (2/3)-er
up <- (2/3)+er
if(all(r<=up & r>=lo)){
cat('\033[1;32mAll within limits.\n\033[0m')
} else{
cat('\033[1;31mspec', which(r>=up | r<=lo), 'are outside error limits.\n\033[1;33mRefer to Df_cre for more information.\n\033[0m')
}
e <- as.array(r<=up & r>=lo)
df <- data.frame(a3, r, e)
colnames(df) <- c('dilf', 'ratio', paste('ratio within a', err, '% error margin'))
cat('\033[0;34mNormalising X... \033[0m')
Xn <- t(vapply(seq_len(nrow(X)), function(i){
X[i,]/a3[i]
}, FUN.VALUE = X[1,]))
rownames(Xn) <- rownames(X)
} else {
stop("X cannot be normalised")
}
cat('\033[1;32mDone.\n\033[0m')
assign("X_cre", Xn, envir = .GlobalEnv)
assign("dilf_cre", a3, envir = .GlobalEnv)
assign("Df_cre", df, envir = .GlobalEnv)
}
|
screePlotAPA <- function(data, rep=1000, cent=.05) {
library(nFactors)
library(ggplot2)
ev <- eigen(cor(data)) # get eigenvalues
eig <- ev$values # eigenvalues
ap <- parallel(subject = nrow(data), var = ncol(data), rep = rep, cent = cent)
eig_pa <- ap$eigen$qevpea # The 95 centile
nS <- nScree(x=ev$values, aparallel=eig_pa, model = "components")
xlab = "Components"
ylab = "Eigenvalues"
main = "Parallel Scree Test"
df <- data.frame(eig, eig_pa)
k <- 1:length(eig)
# Only plotting Factors to 25% of the sample size
n <- round(length(eig)/4)
#APA theme
apatheme = theme_bw() +
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.background = element_blank(),
panel.border = element_blank(),
text = element_text(family='Arial'),
legend.title = element_blank(),
legend.position = c(.7, .8),
axis.line.x = element_line(color = 'black'),
axis.line.y = element_line(color = 'black'))
# Plot
par(col = 1, pch = 1)
par(mfrow = c(1, 1))
#Use data from eigendat. Map number of factors to x-axis, eigenvalue to y-axis, and give different data point shapes depending on whether eigenvalue is observed or simulated
p <- ggplot(df[1:n,], aes(x=k[1:n])) +
#Add lines connecting data points
geom_line(mapping = aes(y=eig[1:n]), size=1) +
geom_point(mapping = aes(y=eig[1:n]), size=3, shape=16) +
geom_line(mapping = aes(y=eig_pa[1:n]), size=1, linetype = "dotdash") +
geom_point(mapping = aes(y=eig_pa[1:n]), size=3, shape=1) +
#Label the y-axis 'Eigenvalue'
ylab('Eigenvalue') +
#Label the x-axis 'Factor Number', and ensure that it ranges from 1-max # of factors, increasing by one with each 'tick' mark.
xlab('Factor/Component Number') +
#Add vertical line indicating parallel analysis suggested max # of factors to retain
geom_vline(xintercept = nS$Components$nparallel, linetype = 'dashed') +
#Add X limit to 25% of number of factors
xlim(1, n) +
#Apply our apa-formatting theme
apatheme
# How many Factors?
cat("Parallel Analysis (n = ", nS$Components$nparallel, ")", sep = "")
return(p)
}
|
/Factor Analysis/screePlotAPA.R
|
no_license
|
storopoli/R_Scripts
|
R
| false | false | 2,205 |
r
|
screePlotAPA <- function(data, rep=1000, cent=.05) {
library(nFactors)
library(ggplot2)
ev <- eigen(cor(data)) # get eigenvalues
eig <- ev$values # eigenvalues
ap <- parallel(subject = nrow(data), var = ncol(data), rep = rep, cent = cent)
eig_pa <- ap$eigen$qevpea # The 95 centile
nS <- nScree(x=ev$values, aparallel=eig_pa, model = "components")
xlab = "Components"
ylab = "Eigenvalues"
main = "Parallel Scree Test"
df <- data.frame(eig, eig_pa)
k <- 1:length(eig)
# Only plotting Factors to 25% of the sample size
n <- round(length(eig)/4)
#APA theme
apatheme = theme_bw() +
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.background = element_blank(),
panel.border = element_blank(),
text = element_text(family='Arial'),
legend.title = element_blank(),
legend.position = c(.7, .8),
axis.line.x = element_line(color = 'black'),
axis.line.y = element_line(color = 'black'))
# Plot
par(col = 1, pch = 1)
par(mfrow = c(1, 1))
#Use data from eigendat. Map number of factors to x-axis, eigenvalue to y-axis, and give different data point shapes depending on whether eigenvalue is observed or simulated
p <- ggplot(df[1:n,], aes(x=k[1:n])) +
#Add lines connecting data points
geom_line(mapping = aes(y=eig[1:n]), size=1) +
geom_point(mapping = aes(y=eig[1:n]), size=3, shape=16) +
geom_line(mapping = aes(y=eig_pa[1:n]), size=1, linetype = "dotdash") +
geom_point(mapping = aes(y=eig_pa[1:n]), size=3, shape=1) +
#Label the y-axis 'Eigenvalue'
ylab('Eigenvalue') +
#Label the x-axis 'Factor Number', and ensure that it ranges from 1-max # of factors, increasing by one with each 'tick' mark.
xlab('Factor/Component Number') +
#Add vertical line indicating parallel analysis suggested max # of factors to retain
geom_vline(xintercept = nS$Components$nparallel, linetype = 'dashed') +
#Add X limit to 25% of number of factors
xlim(1, n) +
#Apply our apa-formatting theme
apatheme
# How many Factors?
cat("Parallel Analysis (n = ", nS$Components$nparallel, ")", sep = "")
return(p)
}
|
library(nimble)
### Name: Wishart
### Title: The Wishart Distribution
### Aliases: Wishart dwish_chol rwish_chol wishart
### ** Examples
df <- 40
ch <- chol(matrix(c(1, .7, .7, 1), 2))
x <- rwish_chol(1, ch, df = df)
dwish_chol(x, ch, df = df)
|
/data/genthat_extracted_code/nimble/examples/Wishart.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false | false | 252 |
r
|
library(nimble)
### Name: Wishart
### Title: The Wishart Distribution
### Aliases: Wishart dwish_chol rwish_chol wishart
### ** Examples
df <- 40
ch <- chol(matrix(c(1, .7, .7, 1), 2))
x <- rwish_chol(1, ch, df = df)
dwish_chol(x, ch, df = df)
|
library(dplyr)
library(ggplot2)
library(lubridate)
library(stringr)
# Input -------------------------------------------------------------------
data = read.csv('gun-violence-data_01-2013_03-2018.csv')
# Understand data ---------------------------------------------------------
glimpse(data)
count_unique = data.frame()
for (i in 1:ncol(data)) {
count_unique[i,1] = sum(!is.na(data[,i]))
count_unique[i,2] = round(sum(!is.na(data[,i])) / nrow(data) * 100,3)
count_unique[i,3] = length(unique(data[,i]))
}
rownames(count_unique) = colnames(data)
colnames(count_unique) = c("Total records", "% populated", "Unique values")
?month
# Date --------------------------------------------------------------------
data$date = as.Date(data$date)
dqrDate = data %>%
group_by(date=round_date(date,'month')) %>%
summarize(count = n()) %>%
arrange(desc(date))
head(dqrDate)
tail(dqrDate)
ggplot(dqrDate,aes(date,count)) +
geom_line(group=1)
# State -------------------------------------------------------------------
dqrState = data %>%
group_by(state) %>%
summarize(count = n()) %>%
arrange(desc(count))
head(dqrState,20)
tail(dqrState,20)
# City/County -------------------------------------------------------------
dqrCity = data %>%
group_by(city_or_county) %>%
summarize(count = n()) %>%
arrange(desc(count))
head(dqrCity,20)
tail(dqrCity,20)
# Address -----------------------------------------------------------------
dqrAddress = data %>%
group_by(address) %>%
summarize(count = n()) %>%
arrange(desc(count))
head(dqrAddress,20)
tail(dqrAddress,20)
data %>%
filter(address == '2375 International Pkwy') %>%
arrange(desc(date)) %>%
select(date)
# number of killed --------------------------------------------------------
data %>%
ggplot(aes(x=1,y=n_killed)) +
geom_boxplot()
# number of injured -------------------------------------------------------
# gun stolen --------------------------------------------------------------
dqrAddress = data %>%
group_by(address) %>%
summarize(count = n()) %>%
arrange(desc(count))
head(dqrAddress,20)
tail(dqrAddress,20)
# gun type ----------------------------------------------------------------
dqrGunType = data %>%
group_by(gun_type) %>%
summarize(count = n()) %>%
arrange(desc(count))
head(dqrGunType,20)
tail(dqrGunType,20)
# incident characteristics ------------------------------------------------
# location description ----------------------------------------------------
# number of guns involved -------------------------------------------------
# participant age ---------------------------------------------------------
# participant age group ---------------------------------------------------
# participant gender ------------------------------------------------------
# participant name --------------------------------------------------------
# participant relationship ------------------------------------------------
# participant status ------------------------------------------------------
# participant type -------------------------------------------------------
# Strategy:
# 1. Explore + understand the data
# 2. Experiment + create raw charts
# 3. Theme-tify all charts
# 4. Structure + add wordings
|
/Gun Violence in US/EDA.R
|
no_license
|
kennhan/DataVis
|
R
| false | false | 3,389 |
r
|
library(dplyr)
library(ggplot2)
library(lubridate)
library(stringr)
# Input -------------------------------------------------------------------
data = read.csv('gun-violence-data_01-2013_03-2018.csv')
# Understand data ---------------------------------------------------------
glimpse(data)
count_unique = data.frame()
for (i in 1:ncol(data)) {
count_unique[i,1] = sum(!is.na(data[,i]))
count_unique[i,2] = round(sum(!is.na(data[,i])) / nrow(data) * 100,3)
count_unique[i,3] = length(unique(data[,i]))
}
rownames(count_unique) = colnames(data)
colnames(count_unique) = c("Total records", "% populated", "Unique values")
?month
# Date --------------------------------------------------------------------
data$date = as.Date(data$date)
dqrDate = data %>%
group_by(date=round_date(date,'month')) %>%
summarize(count = n()) %>%
arrange(desc(date))
head(dqrDate)
tail(dqrDate)
ggplot(dqrDate,aes(date,count)) +
geom_line(group=1)
# State -------------------------------------------------------------------
dqrState = data %>%
group_by(state) %>%
summarize(count = n()) %>%
arrange(desc(count))
head(dqrState,20)
tail(dqrState,20)
# City/County -------------------------------------------------------------
dqrCity = data %>%
group_by(city_or_county) %>%
summarize(count = n()) %>%
arrange(desc(count))
head(dqrCity,20)
tail(dqrCity,20)
# Address -----------------------------------------------------------------
dqrAddress = data %>%
group_by(address) %>%
summarize(count = n()) %>%
arrange(desc(count))
head(dqrAddress,20)
tail(dqrAddress,20)
data %>%
filter(address == '2375 International Pkwy') %>%
arrange(desc(date)) %>%
select(date)
# number of killed --------------------------------------------------------
data %>%
ggplot(aes(x=1,y=n_killed)) +
geom_boxplot()
# number of injured -------------------------------------------------------
# gun stolen --------------------------------------------------------------
dqrAddress = data %>%
group_by(address) %>%
summarize(count = n()) %>%
arrange(desc(count))
head(dqrAddress,20)
tail(dqrAddress,20)
# gun type ----------------------------------------------------------------
dqrGunType = data %>%
group_by(gun_type) %>%
summarize(count = n()) %>%
arrange(desc(count))
head(dqrGunType,20)
tail(dqrGunType,20)
# incident characteristics ------------------------------------------------
# location description ----------------------------------------------------
# number of guns involved -------------------------------------------------
# participant age ---------------------------------------------------------
# participant age group ---------------------------------------------------
# participant gender ------------------------------------------------------
# participant name --------------------------------------------------------
# participant relationship ------------------------------------------------
# participant status ------------------------------------------------------
# participant type -------------------------------------------------------
# Strategy:
# 1. Explore + understand the data
# 2. Experiment + create raw charts
# 3. Theme-tify all charts
# 4. Structure + add wordings
|
##Plot 2
#Geting the data and transforming the first two collumns into a single variable for time
tabela1 <- read.table(file = "household_power_consumption.txt",sep = ";",header = T,colClasses = c(rep("character",2),rep("numeric",7)),na.strings = "?")
tabela1[,1]<- as.Date(tabela1[,1],"%d/%m/%Y")
tabela2 <- subset.data.frame(tabela1,tabela1[,1]<="2007-02-02" & tabela1[,1] >= "2007-02-01")
teste2 <- strptime(paste(tabela2[,1],tabela2[,2]),format = "%Y-%m-%d %H:%M:%S")
tabela4 <- cbind(teste2,tabela2[,3:9])
##Plot 2
#Global Active Power by Period of Time - Line
png(filename = "Plot 2.png",width = 480, height = 480)
plot(tabela4$Global_active_power ~ tabela4$teste2, type = "l",ylab = "Global Active Power (kilowatts)", xlab="")
dev.off()
|
/Plot 2.R
|
no_license
|
Alibanio/ExData_Plotting1
|
R
| false | false | 744 |
r
|
##Plot 2
#Geting the data and transforming the first two collumns into a single variable for time
tabela1 <- read.table(file = "household_power_consumption.txt",sep = ";",header = T,colClasses = c(rep("character",2),rep("numeric",7)),na.strings = "?")
tabela1[,1]<- as.Date(tabela1[,1],"%d/%m/%Y")
tabela2 <- subset.data.frame(tabela1,tabela1[,1]<="2007-02-02" & tabela1[,1] >= "2007-02-01")
teste2 <- strptime(paste(tabela2[,1],tabela2[,2]),format = "%Y-%m-%d %H:%M:%S")
tabela4 <- cbind(teste2,tabela2[,3:9])
##Plot 2
#Global Active Power by Period of Time - Line
png(filename = "Plot 2.png",width = 480, height = 480)
plot(tabela4$Global_active_power ~ tabela4$teste2, type = "l",ylab = "Global Active Power (kilowatts)", xlab="")
dev.off()
|
#install.packages("smoothmest")
#install.packages("extraDistr")
#install.packages("truncdist")
library(MASS)
#library(smoothmest)
library(stats)
library(extraDistr)
set.seed(106)
eps=runif(15)
dexp_err = -log(log(eps^-1))
f = function( x, alpha , beta, gamma){
alpha*exp(-exp(beta-gamma*x))
}
x = seq(from=1,to=15)
y = f(x, 22.37 , 2.14 , 0.395) + dexp_err
plot(x,y,type = "l")
bs = cbind(x,y)
for (i in 1:1000){
x_bs = sample(x,15,replace = TRUE)
y_bs= f(x_bs, 22.37 , 2.14 , 0.395) + rgumbel(15, mu = 0, sigma = 1)
bs = rbind(bs , cbind(x_bs,y_bs))
}
bs=data.frame(bs)
avr = NULL
low=NULL
upp=NULL
for(i in 1:15){
bs_s = bs[which(bs$x==i),]
a=mean(bs_s$y)
avr[i] = a
low[i] = a - 1.96*sd(bs_s$y)
upp[i] = a + 1.96*sd(bs_s$y)
}
plot(x,avr,type = "l", ylim =range(-2,25), ylab = "Y")
lines(x,low, type="b", pch=22, col="red", lty=2)
lines(x,upp, type="b", pch=22, col="red", lty=2)
points(x,y,col = "blue", pch = 20)
#############
### MLE for parameter
############
mle= function(x,y,alpha,beta,gamma){ # parameters constrained to be non-negative
ll=0
for ( i in 1:15){
l= -abs(y[i]-alpha*exp(-exp(beta-gamma*x[i])))
ll=ll+l
}
return(ll)
}
################
###initial population
population = cbind(a=runif(200,min=15,max=30),b=runif(200,min=0,max=5),c=runif(200,min=0,max=1) )
###########
##selection##
###########
selec = function(population ){
new_pop = matrix(NA,200,3) # initialize next generation
for( i in 1:100){
f1= mle(x,y, population[i,1], population[i,2], population[i,3])
f2= mle(x,y, population[i+100,1], population[i+100,2], population[i+100,3])
if (f1 > f2){new_pop[i,]= population[i,]}
else {new_pop[i,]= population[i+100,]}
}
return(new_pop)
}
#############
##recombination
#############
library(truncdist)
recomb = function() {
for(j in 1:100){
s=sample(1:100,2)
alphas = new_pop[s,1]
betas = new_pop[s,2]
gammas = new_pop[s,3]
a_d = alphas[1]
a_m = alphas[2]
b_d = betas[1]
b_m = betas[2]
g_d = gammas[1]
g_m = gammas[2]
if(a_d > a_m){a_baby = (a_d+a_m)/2 + rtrunc(1, spec="cauchy",
a=(30-a_d-a_m)/(a_d-a_m) ,b=(50-a_d-a_m)/(a_d-a_m))*(a_d-a_m)/2}
else if(a_d < a_m){a_baby = (a_d+a_m)/2 + rtrunc(1, spec="cauchy",
a=(50-a_d-a_m)/(a_d-a_m) ,b=(30-a_d-a_m)/(a_d-a_m))*(a_d-a_m)/2}
else {a_baby = 0.5*(a_d+a_m) }
if(b_d>b_m){b_baby = (b_d+b_m)/2 + rtrunc(1, spec="cauchy",
a=(-b_d-b_m)/(b_d-b_m) ,b=(10-b_d-b_m)/(b_d-b_m))*(b_d-b_m)/2}
else if(b_d < b_m){b_baby = (b_d+b_m)/2 + rtrunc(1, spec="cauchy",
a=(10-b_d-b_m)/(b_d-b_m),b=(-b_d-b_m)/(b_d-b_m))*(b_d-b_m)/2}
else {b_baby = 0.5*(b_d+b_m) }
if(g_d>g_m){g_baby = (g_d+g_m)/2 + rtrunc(1, spec="cauchy",
a=(-g_d-g_m)/(g_d-g_m) ,b=(2-g_d-g_m)/(g_d-g_m))*(g_d-g_m)/2}
else if(g_d < g_m){g_baby = (g_d+g_m)/2 + rtrunc(1, spec="cauchy",
a=(2-g_d-g_m)/(g_d-g_m) ,b=(-g_d-g_m)/(g_d-g_m))*(g_d-g_m)/2}
else {g_baby = 0.5*(g_d+g_m) }
new_pop[j+100,1] = a_baby
new_pop[j+100,2] = b_baby
new_pop[j+100,3] = g_baby
}
return(new_pop)
}
################
### random select 10 member from each generation and keep
################
num_cycle = 100 #
##dim = c(10,3,num_cycle)
gen_num = 1
big_gens = list(list())
id=sample(1:200,10,replace=FALSE)
big_gens[[1]][[1]]= population[id,]
#gen_sub(population)
##############
###mutation####
###############
#############3
### Evolution
##########
for (m in 2:num_cycle){
new_pop = selec(population)
new_pop = recomb()
##########
population = new_pop
gen_num = m
id=sample(1:200,10,replace=FALSE)
gens[[m]]= population[id,]
#new_pop = matrix(NA,200,3)
}
sin=NULL
for (i in 1:100){
sin[i]= gens[[i]][5,2]
}
plot(sin,type = "l")
#####################
####################
seed = sample(1:999999999, 1000, replace = FALSE)
big_gens=list()
for (k in 1:1000){
set.seed(seed[k])
population = cbind(a=runif(200,min=15,max=30),b=runif(200,min=0,max=5),c=runif(200,min=0,max=1) )
id=sample(1:200,10,replace=FALSE)
big_gens[[k]]= list()
big_gens[[k]][[1]]=population[id,]
for (m in 2:num_cycle){
new_pop = selec(population)
new_pop = recomb()
##########
population = new_pop
gen_num = m
id=sample(1:200,10,replace=FALSE)
big_gens[[k]][[m]]= population[id,]
#new_pop = matrix(NA,200,3)
}
}
#save(big_gens, file="proj1_result.RData")
alpha=rep(NA,1000)
beta=rep(NA,1000)
gamma=rep(NA,1000)
for (i in 1:1000){
alpha[i]=big_gens[[i]][[100]][10,1]
beta[i]= big_gens[[i]][[100]][10,2]
gamma[i]= big_gens[[i]][[100]][10,3]
}
hist(alpha,breaks = "scott",col="yellow", xlab = bquote(alpha),main="")
hist(beta,breaks = "scott",xlab = bquote(beta),main="")
hist(gamma,breaks = "scott",xlab = bquote(gamma),main="")
mean(alpha)
mean(beta)
mean(gamma)
par(mfrow=c(1,3))
hist(alpha,breaks = "scott",col="yellow",xlab = bquote(alpha),main="")
hist(beta,breaks = "scott",col="yellow",xlab = bquote(beta),main="")
hist(gamma,breaks = "scott",col="yellow",xlab = bquote(gamma),main="")
|
/proj1.R
|
no_license
|
sinasanei/Genetic-Algorithms-and-their-applications-in-statistics
|
R
| false | false | 5,033 |
r
|
#install.packages("smoothmest")
#install.packages("extraDistr")
#install.packages("truncdist")
library(MASS)
#library(smoothmest)
library(stats)
library(extraDistr)
set.seed(106)
eps=runif(15)
dexp_err = -log(log(eps^-1))
f = function( x, alpha , beta, gamma){
alpha*exp(-exp(beta-gamma*x))
}
x = seq(from=1,to=15)
y = f(x, 22.37 , 2.14 , 0.395) + dexp_err
plot(x,y,type = "l")
bs = cbind(x,y)
for (i in 1:1000){
x_bs = sample(x,15,replace = TRUE)
y_bs= f(x_bs, 22.37 , 2.14 , 0.395) + rgumbel(15, mu = 0, sigma = 1)
bs = rbind(bs , cbind(x_bs,y_bs))
}
bs=data.frame(bs)
avr = NULL
low=NULL
upp=NULL
for(i in 1:15){
bs_s = bs[which(bs$x==i),]
a=mean(bs_s$y)
avr[i] = a
low[i] = a - 1.96*sd(bs_s$y)
upp[i] = a + 1.96*sd(bs_s$y)
}
plot(x,avr,type = "l", ylim =range(-2,25), ylab = "Y")
lines(x,low, type="b", pch=22, col="red", lty=2)
lines(x,upp, type="b", pch=22, col="red", lty=2)
points(x,y,col = "blue", pch = 20)
#############
### MLE for parameter
############
mle= function(x,y,alpha,beta,gamma){ # parameters constrained to be non-negative
ll=0
for ( i in 1:15){
l= -abs(y[i]-alpha*exp(-exp(beta-gamma*x[i])))
ll=ll+l
}
return(ll)
}
################
###initial population
population = cbind(a=runif(200,min=15,max=30),b=runif(200,min=0,max=5),c=runif(200,min=0,max=1) )
###########
##selection##
###########
selec = function(population ){
new_pop = matrix(NA,200,3) # initialize next generation
for( i in 1:100){
f1= mle(x,y, population[i,1], population[i,2], population[i,3])
f2= mle(x,y, population[i+100,1], population[i+100,2], population[i+100,3])
if (f1 > f2){new_pop[i,]= population[i,]}
else {new_pop[i,]= population[i+100,]}
}
return(new_pop)
}
#############
##recombination
#############
library(truncdist)
recomb = function() {
for(j in 1:100){
s=sample(1:100,2)
alphas = new_pop[s,1]
betas = new_pop[s,2]
gammas = new_pop[s,3]
a_d = alphas[1]
a_m = alphas[2]
b_d = betas[1]
b_m = betas[2]
g_d = gammas[1]
g_m = gammas[2]
if(a_d > a_m){a_baby = (a_d+a_m)/2 + rtrunc(1, spec="cauchy",
a=(30-a_d-a_m)/(a_d-a_m) ,b=(50-a_d-a_m)/(a_d-a_m))*(a_d-a_m)/2}
else if(a_d < a_m){a_baby = (a_d+a_m)/2 + rtrunc(1, spec="cauchy",
a=(50-a_d-a_m)/(a_d-a_m) ,b=(30-a_d-a_m)/(a_d-a_m))*(a_d-a_m)/2}
else {a_baby = 0.5*(a_d+a_m) }
if(b_d>b_m){b_baby = (b_d+b_m)/2 + rtrunc(1, spec="cauchy",
a=(-b_d-b_m)/(b_d-b_m) ,b=(10-b_d-b_m)/(b_d-b_m))*(b_d-b_m)/2}
else if(b_d < b_m){b_baby = (b_d+b_m)/2 + rtrunc(1, spec="cauchy",
a=(10-b_d-b_m)/(b_d-b_m),b=(-b_d-b_m)/(b_d-b_m))*(b_d-b_m)/2}
else {b_baby = 0.5*(b_d+b_m) }
if(g_d>g_m){g_baby = (g_d+g_m)/2 + rtrunc(1, spec="cauchy",
a=(-g_d-g_m)/(g_d-g_m) ,b=(2-g_d-g_m)/(g_d-g_m))*(g_d-g_m)/2}
else if(g_d < g_m){g_baby = (g_d+g_m)/2 + rtrunc(1, spec="cauchy",
a=(2-g_d-g_m)/(g_d-g_m) ,b=(-g_d-g_m)/(g_d-g_m))*(g_d-g_m)/2}
else {g_baby = 0.5*(g_d+g_m) }
new_pop[j+100,1] = a_baby
new_pop[j+100,2] = b_baby
new_pop[j+100,3] = g_baby
}
return(new_pop)
}
################
### random select 10 member from each generation and keep
################
num_cycle = 100 #
##dim = c(10,3,num_cycle)
gen_num = 1
big_gens = list(list())
id=sample(1:200,10,replace=FALSE)
big_gens[[1]][[1]]= population[id,]
#gen_sub(population)
##############
###mutation####
###############
#############3
### Evolution
##########
for (m in 2:num_cycle){
new_pop = selec(population)
new_pop = recomb()
##########
population = new_pop
gen_num = m
id=sample(1:200,10,replace=FALSE)
gens[[m]]= population[id,]
#new_pop = matrix(NA,200,3)
}
sin=NULL
for (i in 1:100){
sin[i]= gens[[i]][5,2]
}
plot(sin,type = "l")
#####################
####################
seed = sample(1:999999999, 1000, replace = FALSE)
big_gens=list()
for (k in 1:1000){
set.seed(seed[k])
population = cbind(a=runif(200,min=15,max=30),b=runif(200,min=0,max=5),c=runif(200,min=0,max=1) )
id=sample(1:200,10,replace=FALSE)
big_gens[[k]]= list()
big_gens[[k]][[1]]=population[id,]
for (m in 2:num_cycle){
new_pop = selec(population)
new_pop = recomb()
##########
population = new_pop
gen_num = m
id=sample(1:200,10,replace=FALSE)
big_gens[[k]][[m]]= population[id,]
#new_pop = matrix(NA,200,3)
}
}
#save(big_gens, file="proj1_result.RData")
alpha=rep(NA,1000)
beta=rep(NA,1000)
gamma=rep(NA,1000)
for (i in 1:1000){
alpha[i]=big_gens[[i]][[100]][10,1]
beta[i]= big_gens[[i]][[100]][10,2]
gamma[i]= big_gens[[i]][[100]][10,3]
}
hist(alpha,breaks = "scott",col="yellow", xlab = bquote(alpha),main="")
hist(beta,breaks = "scott",xlab = bquote(beta),main="")
hist(gamma,breaks = "scott",xlab = bquote(gamma),main="")
mean(alpha)
mean(beta)
mean(gamma)
par(mfrow=c(1,3))
hist(alpha,breaks = "scott",col="yellow",xlab = bquote(alpha),main="")
hist(beta,breaks = "scott",col="yellow",xlab = bquote(beta),main="")
hist(gamma,breaks = "scott",col="yellow",xlab = bquote(gamma),main="")
|
library(tidyverse)
library(imager)
#############################
#
# Color transformation
#
###########################
bi <- load.image("images/black_iris.jpg")
bi_df <- bi %>%
as.data.frame() %>%
mutate(cc = factor(cc,labels=c('R','G','B'))) %>%
group_by(cc) %>%
mutate(cd = ecdf(value)(value) * 3.5) %>%
ungroup
#### Color density analysis
p_density <- bi_df %>%
ggplot() +
geom_density(aes(value, fill = cc, color = cc), alpha = 0.1, size = 0.75) +
scale_fill_manual(name = "channel",
values = c("R" = "red", "G" = "green", "B" = "blue")) +
scale_color_manual(name = "channel",
values = c("R" = "red", "G" = "green", "B" = "blue")) +
scale_y_continuous(sec.axis = sec_axis(~./3.5, name = "cumulative distribution")) +
theme(legend.position = "none")
#ggsave("output/density.png", p_density)
p_cumulative <- p_density +
geom_line(aes(value, cd, color = cc), size = 0.75, linetype = "dashed")
#ggsave("output/cumulative.png", p_cumulative)
#### Red enhancement
R(bi) <- as.cimg(ecdf(R(bi))(R(bi)),dim=dim(R(bi)))
#save.image(bi, "output/bf_red.jpg", quality = 1)
###########################
#
# Edge detection
#
###########################
aw <- load.image("images/abstract_wave.jpg")
wave_mask <- aw %>%
isoblur(1) %>%
grayscale() %>%
imgradient("xy") %>%
enorm() %>%
threshold("80%")
wave <- as.cimg(wave_mask)
wave[which(wave_mask)] <- 0.9
#save.image(wave, "output/wave.jpg", quality = 1)
###########################
#
# Masking
#
###########################
drawing <- load.image("images/drawing_xii.jpg")
mask <- drawing %>%
isoblur(7) %>%
grayscale() %>%
threshold("25%")
m_drawing <- drawing
alpha <- 0.7
R(m_drawing)[which(mask)] <- alpha + (1 - alpha) * R(m_drawing)[which(mask)]
G(m_drawing)[which(mask)] <- (1 - alpha) * G(m_drawing)[which(mask)]
B(m_drawing)[which(mask)] <- (1 - alpha) * B(m_drawing)[which(mask)]
save.image(m_drawing, "output/m_drawing.jpg", quality = 1)
|
/okeeffe.R
|
no_license
|
doritge/img_processing
|
R
| false | false | 1,987 |
r
|
library(tidyverse)
library(imager)
#############################
#
# Color transformation
#
###########################
bi <- load.image("images/black_iris.jpg")
bi_df <- bi %>%
as.data.frame() %>%
mutate(cc = factor(cc,labels=c('R','G','B'))) %>%
group_by(cc) %>%
mutate(cd = ecdf(value)(value) * 3.5) %>%
ungroup
#### Color density analysis
p_density <- bi_df %>%
ggplot() +
geom_density(aes(value, fill = cc, color = cc), alpha = 0.1, size = 0.75) +
scale_fill_manual(name = "channel",
values = c("R" = "red", "G" = "green", "B" = "blue")) +
scale_color_manual(name = "channel",
values = c("R" = "red", "G" = "green", "B" = "blue")) +
scale_y_continuous(sec.axis = sec_axis(~./3.5, name = "cumulative distribution")) +
theme(legend.position = "none")
#ggsave("output/density.png", p_density)
p_cumulative <- p_density +
geom_line(aes(value, cd, color = cc), size = 0.75, linetype = "dashed")
#ggsave("output/cumulative.png", p_cumulative)
#### Red enhancement
R(bi) <- as.cimg(ecdf(R(bi))(R(bi)),dim=dim(R(bi)))
#save.image(bi, "output/bf_red.jpg", quality = 1)
###########################
#
# Edge detection
#
###########################
aw <- load.image("images/abstract_wave.jpg")
wave_mask <- aw %>%
isoblur(1) %>%
grayscale() %>%
imgradient("xy") %>%
enorm() %>%
threshold("80%")
wave <- as.cimg(wave_mask)
wave[which(wave_mask)] <- 0.9
#save.image(wave, "output/wave.jpg", quality = 1)
###########################
#
# Masking
#
###########################
drawing <- load.image("images/drawing_xii.jpg")
mask <- drawing %>%
isoblur(7) %>%
grayscale() %>%
threshold("25%")
m_drawing <- drawing
alpha <- 0.7
R(m_drawing)[which(mask)] <- alpha + (1 - alpha) * R(m_drawing)[which(mask)]
G(m_drawing)[which(mask)] <- (1 - alpha) * G(m_drawing)[which(mask)]
B(m_drawing)[which(mask)] <- (1 - alpha) * B(m_drawing)[which(mask)]
save.image(m_drawing, "output/m_drawing.jpg", quality = 1)
|
##
## Plot closeness
##
closeness <- function(file){
# Cumulative proportion of intronic sQTLs/non-sQTLs distances to the closest exon
table<-read.table(file)
distances<-as.factor(table[,2])
df<-as.data.frame(table(factor(distances)))
df<-df[-c(1),]
df<-rbind(c(0,0),df)
total= sum(df$Freq)
df$h = df$Freq/total
for (i in 1:nrow(df)){
if (i==1){
Acum <-df[i,3]
v<-c(Acum)
}else{
Acum <- Acum + df[i,3]
v<-c(v,Acum)
}
}
df<-cbind(df,v)
return(df)
}
## Arguments from the command line
args <- commandArgs(TRUE)
sqtls.df<-closeness(args[1]) # closeness.txt
control.df<-closeness(args[2]) # closeness.control.txt
## PNG plot
png(args[3])
plot(type="l",as.character(sqtls.df[1:5001,1]),sqtls.df[1:5001,4],col="darkgreen",ylim=c(0, 1), xlim=c(0,5000), main="Intronic SNPs", xlab="Distance to the closest exon (bp)",ylab="Cumulative proportion")
lines(type="l",control.df[1:5001,4],col="darkred")
dev.off()
|
/4.Blueprint/Enrichments/closeness.R
|
no_license
|
dgarmar/MT
|
R
| false | false | 975 |
r
|
##
## Plot closeness
##
closeness <- function(file){
# Cumulative proportion of intronic sQTLs/non-sQTLs distances to the closest exon
table<-read.table(file)
distances<-as.factor(table[,2])
df<-as.data.frame(table(factor(distances)))
df<-df[-c(1),]
df<-rbind(c(0,0),df)
total= sum(df$Freq)
df$h = df$Freq/total
for (i in 1:nrow(df)){
if (i==1){
Acum <-df[i,3]
v<-c(Acum)
}else{
Acum <- Acum + df[i,3]
v<-c(v,Acum)
}
}
df<-cbind(df,v)
return(df)
}
## Arguments from the command line
args <- commandArgs(TRUE)
sqtls.df<-closeness(args[1]) # closeness.txt
control.df<-closeness(args[2]) # closeness.control.txt
## PNG plot
png(args[3])
plot(type="l",as.character(sqtls.df[1:5001,1]),sqtls.df[1:5001,4],col="darkgreen",ylim=c(0, 1), xlim=c(0,5000), main="Intronic SNPs", xlab="Distance to the closest exon (bp)",ylab="Cumulative proportion")
lines(type="l",control.df[1:5001,4],col="darkred")
dev.off()
|
setwd("C:/Users/smull2/R/exploratory")
pfile<-file("household_power_consumption.txt","r")
power.raw<-read.table(text = grep("^[1,2]/2/2007",readLines(pfile),value=TRUE),sep=";",dec=".",na.strings="?",header=FALSE,stringsAsFactors=FALSE)
str(power.raw)
##Use colClasses to help when readLines
##colClass<-read.table("household_power_consumption.txt",header=TRUE,sep=";",stringsAsFactors=FALSE,nrows=3)
##classes <- sapply(colClass,class)
pfile<-file("household_power_consumption.txt","r")
power.colHeaders<-read.table(text = grep("Date",readLines(pfile),value=TRUE),sep=";",dec=".",na.strings="?",header=FALSE,stringsAsFactors=FALSE)
colnames(power.raw) <- power.colHeaders
str(power.raw)
colnames(power.raw) <- tolower(colnames(power.raw))
power.data <- power.raw
##combine date and time to create timestamp. convert date field to date
##if ran these in opposite order - result NAs. Suspect strptime requires character vector inputs
power.data$ts =strptime(paste(power.data$date, power.data$time), format = "%d/%m/%Y %H:%M:%S")
power.data$date <- as.Date(power.data$date,format="%d/%m/%Y")
with(power.data, hist(global_active_power, breaks=12,col="red", main="Global Active Power", xlab= "Global Active Power (kilowats)"))
dev.copy(png, file = "plot1.png")
dev.off() ## Dont forget to close the png device
##how can we use OR in grep
##power.file <- read.table(text = grep("Date | ^[1,2]/2/2007",readLines(pfile),value=TRUE),sep=";",dec=".",na.strings="?",header=TRUE,stringsAsFactors=FALSE)
|
/Plot1.R
|
no_license
|
smullen17/ExData_Plotting1
|
R
| false | false | 1,501 |
r
|
setwd("C:/Users/smull2/R/exploratory")
pfile<-file("household_power_consumption.txt","r")
power.raw<-read.table(text = grep("^[1,2]/2/2007",readLines(pfile),value=TRUE),sep=";",dec=".",na.strings="?",header=FALSE,stringsAsFactors=FALSE)
str(power.raw)
##Use colClasses to help when readLines
##colClass<-read.table("household_power_consumption.txt",header=TRUE,sep=";",stringsAsFactors=FALSE,nrows=3)
##classes <- sapply(colClass,class)
pfile<-file("household_power_consumption.txt","r")
power.colHeaders<-read.table(text = grep("Date",readLines(pfile),value=TRUE),sep=";",dec=".",na.strings="?",header=FALSE,stringsAsFactors=FALSE)
colnames(power.raw) <- power.colHeaders
str(power.raw)
colnames(power.raw) <- tolower(colnames(power.raw))
power.data <- power.raw
##combine date and time to create timestamp. convert date field to date
##if ran these in opposite order - result NAs. Suspect strptime requires character vector inputs
power.data$ts =strptime(paste(power.data$date, power.data$time), format = "%d/%m/%Y %H:%M:%S")
power.data$date <- as.Date(power.data$date,format="%d/%m/%Y")
with(power.data, hist(global_active_power, breaks=12,col="red", main="Global Active Power", xlab= "Global Active Power (kilowats)"))
dev.copy(png, file = "plot1.png")
dev.off() ## Dont forget to close the png device
##how can we use OR in grep
##power.file <- read.table(text = grep("Date | ^[1,2]/2/2007",readLines(pfile),value=TRUE),sep=";",dec=".",na.strings="?",header=TRUE,stringsAsFactors=FALSE)
|
normalize_pipe_rhs <- function(rhs, binding) {
if (!rlang::is_quosure(rhs)) {
stop(paste("'rhs' parameter must be a quosure. Try calling",
"'rhs <- rlang::enquo(rhs)' first"))
}
# Turn bare symbols into functions.
if (rlang::is_symbol(rlang::f_rhs(rhs))) {
rlang::f_rhs(rhs) <- rlang::lang(rlang::f_rhs(rhs))
}
# Prevent some pathelogical inputs, such as `x %>>=% "cats"`.
if (!rlang::is_lang(rlang::f_rhs(rhs))) {
stop("RHS must be callable; '", rlang::f_rhs(rhs), "' is not",
call. = FALSE)
}
# Handle parenthetical inputs and bare anonymous functions.
rhs <- normalize_anon_fns(rhs)
# Blocks are simply executed with `.` bound.
if (!is_block(rhs)) {
rhs <- ensure_dot(rhs)
}
# Modify the environment so that `.` is bound to `binding`.
rlang::f_env(rhs) <- rlang::child_env(.parent = rlang::f_env(rhs),
. = binding)
rhs
}
normalize_anon_fns <- function(expr) {
stopifnot(rlang::is_quosure(expr) && rlang::is_lang(rlang::f_rhs(expr)))
rhs <- rlang::f_rhs(expr)
# First, check for bare anonymous functions and wrap them in parentheses.
if (identical(function_symbol, rlang::node_car(rhs))) {
rlang::f_rhs(expr) <- rlang::lang(rhs)
return(expr)
}
# Look for the first part of the pairlist that isn't another expression. This
# supports forms like `(function(x, y) log(x, y))(., 20)`.
while (rlang::is_lang(rlang::node_car(rhs))) {
rhs <- rlang::node_car(rhs)
}
if (identical(paren_symbol, rlang::node_car(rhs))) {
inside <- rlang::node_cadr(rhs)
if (!rlang::is_lang(inside) ||
!identical(function_symbol, rlang::node_car(inside))) {
stop("parenthesized expressions must contain an anonymous function",
call. = FALSE)
}
# If the right-hand-side doesn't have arguments, add them. This effectively
# turns forms like `(function(x) x)` into `(function(x) x)()`, meaning they
# can be handled natively by `ensure_dot()`.
if (!rlang::is_lang(rlang::node_car(rlang::f_rhs(expr)))) {
rlang::f_rhs(expr) <- rlang::lang(rlang::f_rhs(expr))
}
}
expr
}
is_block <- function(expr) {
stopifnot(rlang::is_quosure(expr) && rlang::is_lang(rlang::f_rhs(expr)))
car <- rlang::node_car(rlang::f_rhs(expr))
# Look for the first part of the pairlist that isn't another expression. This
# supports forms like `{ y <- . * 2; function() { log(., y) } }()`, although
# it seems unlikely they are useful.
while (rlang::is_lang(car)) {
car <- rlang::node_car(car)
}
identical(bracket_symbol, car)
}
ensure_dot <- function(expr) {
stopifnot(rlang::is_quosure(expr) && rlang::is_lang(rlang::f_rhs(expr)))
# The right-hand side is a CONS where the CAR is the function symbol and the
# CDR is the pairlist of arguments (or NULL).
args <- rlang::node_cdr(rlang::f_rhs(expr))
# Loop over the CONS cells.
arg <- args
while (!rlang::is_null(arg)) {
# The CAR of an argument is its value, as in `name = value` or just `value`
# if it is not a named argument. We're looking for dots in these values. If
# we find one, the quosure is fine as-is.
if (identical(dot_symbol, rlang::node_car(arg))) {
return(expr)
}
arg <- rlang::node_cdr(arg)
}
# No dots found. Prefix the argument CONS with one, in place.
new_args <- rlang::node(dot_symbol, args)
rlang::mut_node_cdr(rlang::f_rhs(expr), new_args)
invisible(expr)
}
paren_symbol <- quote(`(`)
function_symbol <- quote(`function`)
dot_symbol <- quote(.)
bracket_symbol <- quote(`{`)
|
/R/normalize.R
|
no_license
|
atheriel/rrails
|
R
| false | false | 3,588 |
r
|
normalize_pipe_rhs <- function(rhs, binding) {
if (!rlang::is_quosure(rhs)) {
stop(paste("'rhs' parameter must be a quosure. Try calling",
"'rhs <- rlang::enquo(rhs)' first"))
}
# Turn bare symbols into functions.
if (rlang::is_symbol(rlang::f_rhs(rhs))) {
rlang::f_rhs(rhs) <- rlang::lang(rlang::f_rhs(rhs))
}
# Prevent some pathelogical inputs, such as `x %>>=% "cats"`.
if (!rlang::is_lang(rlang::f_rhs(rhs))) {
stop("RHS must be callable; '", rlang::f_rhs(rhs), "' is not",
call. = FALSE)
}
# Handle parenthetical inputs and bare anonymous functions.
rhs <- normalize_anon_fns(rhs)
# Blocks are simply executed with `.` bound.
if (!is_block(rhs)) {
rhs <- ensure_dot(rhs)
}
# Modify the environment so that `.` is bound to `binding`.
rlang::f_env(rhs) <- rlang::child_env(.parent = rlang::f_env(rhs),
. = binding)
rhs
}
normalize_anon_fns <- function(expr) {
stopifnot(rlang::is_quosure(expr) && rlang::is_lang(rlang::f_rhs(expr)))
rhs <- rlang::f_rhs(expr)
# First, check for bare anonymous functions and wrap them in parentheses.
if (identical(function_symbol, rlang::node_car(rhs))) {
rlang::f_rhs(expr) <- rlang::lang(rhs)
return(expr)
}
# Look for the first part of the pairlist that isn't another expression. This
# supports forms like `(function(x, y) log(x, y))(., 20)`.
while (rlang::is_lang(rlang::node_car(rhs))) {
rhs <- rlang::node_car(rhs)
}
if (identical(paren_symbol, rlang::node_car(rhs))) {
inside <- rlang::node_cadr(rhs)
if (!rlang::is_lang(inside) ||
!identical(function_symbol, rlang::node_car(inside))) {
stop("parenthesized expressions must contain an anonymous function",
call. = FALSE)
}
# If the right-hand-side doesn't have arguments, add them. This effectively
# turns forms like `(function(x) x)` into `(function(x) x)()`, meaning they
# can be handled natively by `ensure_dot()`.
if (!rlang::is_lang(rlang::node_car(rlang::f_rhs(expr)))) {
rlang::f_rhs(expr) <- rlang::lang(rlang::f_rhs(expr))
}
}
expr
}
is_block <- function(expr) {
stopifnot(rlang::is_quosure(expr) && rlang::is_lang(rlang::f_rhs(expr)))
car <- rlang::node_car(rlang::f_rhs(expr))
# Look for the first part of the pairlist that isn't another expression. This
# supports forms like `{ y <- . * 2; function() { log(., y) } }()`, although
# it seems unlikely they are useful.
while (rlang::is_lang(car)) {
car <- rlang::node_car(car)
}
identical(bracket_symbol, car)
}
ensure_dot <- function(expr) {
stopifnot(rlang::is_quosure(expr) && rlang::is_lang(rlang::f_rhs(expr)))
# The right-hand side is a CONS where the CAR is the function symbol and the
# CDR is the pairlist of arguments (or NULL).
args <- rlang::node_cdr(rlang::f_rhs(expr))
# Loop over the CONS cells.
arg <- args
while (!rlang::is_null(arg)) {
# The CAR of an argument is its value, as in `name = value` or just `value`
# if it is not a named argument. We're looking for dots in these values. If
# we find one, the quosure is fine as-is.
if (identical(dot_symbol, rlang::node_car(arg))) {
return(expr)
}
arg <- rlang::node_cdr(arg)
}
# No dots found. Prefix the argument CONS with one, in place.
new_args <- rlang::node(dot_symbol, args)
rlang::mut_node_cdr(rlang::f_rhs(expr), new_args)
invisible(expr)
}
paren_symbol <- quote(`(`)
function_symbol <- quote(`function`)
dot_symbol <- quote(.)
bracket_symbol <- quote(`{`)
|
library(pollagg)
# Fit a model
y <- matrix(c(10, 900, 50, 50), ncol = 2, byrow = TRUE)
n <- rowSums(y)
fit <- yapa(y = y, n = n, dates = NULL, iter = 1000, chains = 3)
# Test model fit
if(!all(dim(fit$params$theta) == c(1500, 2, 2))) {
stop("yapa is not returning results as expected")
}
# Test plot
p <- plot(fit)
if(!"ggplot" %in% class(p)) {
stop("plot.yapafit is not returning an object of class ggplot2")
}
# Test summary
sf <- summary(fit)
if(!identical(names(sf), c("delta", "polls", "pct", "trend"))) {
stop("summary.fit is not summarising the four output data.frames (delta, polls, pct, and trend)")
}
|
/tests/test-yapa.R
|
permissive
|
alexpavlakis/pollagg
|
R
| false | false | 621 |
r
|
library(pollagg)
# Fit a model
y <- matrix(c(10, 900, 50, 50), ncol = 2, byrow = TRUE)
n <- rowSums(y)
fit <- yapa(y = y, n = n, dates = NULL, iter = 1000, chains = 3)
# Test model fit
if(!all(dim(fit$params$theta) == c(1500, 2, 2))) {
stop("yapa is not returning results as expected")
}
# Test plot
p <- plot(fit)
if(!"ggplot" %in% class(p)) {
stop("plot.yapafit is not returning an object of class ggplot2")
}
# Test summary
sf <- summary(fit)
if(!identical(names(sf), c("delta", "polls", "pct", "trend"))) {
stop("summary.fit is not summarising the four output data.frames (delta, polls, pct, and trend)")
}
|
library(TrenaProjectBrainCell)
library(RUnit)
library(trenaSGM)
library(org.Hs.eg.db)
#------------------------------------------------------------------------------------------------------------------------
if(!exists("tp")) {
message(sprintf("--- creating instance of TrenaProjectBrainCell"))
tp <- TrenaProjectBrainCell();
}
#------------------------------------------------------------------------------------------------------------------------
runTests <- function()
{
test_constructor()
test_supportedGenes()
test_variants()
test_footprintDatabases()
test_expressionMatrices()
test_setTargetGene()
test_buildSingleGeneModel()
test_buildSingleGeneModel_slowGenes()
test_buildSingleGeneModel_footprintsAndWithout_MEF2C()
} # runTests
#------------------------------------------------------------------------------------------------------------------------
test_constructor <- function()
{
message(sprintf("--- test_constructor"))
checkTrue(all(c("TrenaProjectBrainCell", "TrenaProjectHG38") %in% is(tp)))
checkEquals(getFootprintDatabasePort(tp), 5432)
} # test_constructor
#------------------------------------------------------------------------------------------------------------------------
test_supportedGenes <- function()
{
message(sprintf("--- test_supportedGenes"))
subset.expected <- c("APOE")
checkTrue(all(subset.expected %in% getSupportedGenes(tp)))
} # test_supportedGenes
#------------------------------------------------------------------------------------------------------------------------
test_variants <- function()
{
message(sprintf("--- test_variants"))
checkEquals(length(getVariantDatasetNames(tp)), 0)
} # test_variants
#------------------------------------------------------------------------------------------------------------------------
test_footprintDatabases <- function()
{
message(sprintf("--- test_footprintDatabases"))
expected <- c("brain_hint_16", "brain_hint_20", "brain_wellington_16", "brain_wellington_20")
checkTrue(all(expected %in% getFootprintDatabaseNames(tp)))
checkEquals(getFootprintDatabaseHost(tp), "khaleesi.systemsbiology.net")
checkEquals(getFootprintDatabasePort(tp), 5432)
} # test_footprintDatabases
#------------------------------------------------------------------------------------------------------------------------
test_expressionMatrices <- function()
{
message(sprintf("--- test_expressionMatrices"))
expected.matrix <- "Micro_TYROBP"
checkTrue(all(expected.matrix %in% getExpressionMatrixNames(tp)))
mtx <- getExpressionMatrix(tp, expected.matrix)
checkEquals(dim(mtx), c(13974, 264))
expected.genes <- c("AACS", "AADAT", "AAED1")
checkTrue(all(expected.genes %in% rownames(mtx)))
summary.stats <- fivenum(mtx)
checkTrue(summary.stats[1] < 0.01)
checkTrue(summary.stats[1] > 0)
checkTrue(summary.stats[5] < 10)
checkTrue(summary.stats[5] > 9)
} # test_expressionMatrices
#------------------------------------------------------------------------------------------------------------------------
# setting the target gene implies a few other assignements, all tested here:
# geneInfo (temporarily also masquerading at tbl.transcripts
# geneRegion
# geneEnhancersRegion (when avaialable, defaults to geneRegion)
#
test_setTargetGene <- function()
{
message(sprintf("--- test_setTargetGene"))
setTargetGene(tp, "MICA")
checkEquals(getTargetGene(tp), "MICA")
message(sprintf(" transcripts"))
tbl.transcripts <- getTranscriptsTable(tp)
checkTrue(nrow(tbl.transcripts) == 1)
checkEquals(tbl.transcripts$chr, "chr6")
checkEquals(tbl.transcripts$start, 31399784)
checkEquals(tbl.transcripts$end , 31415315)
checkEquals(tbl.transcripts$tss, 31403579)
checkEquals(tbl.transcripts$strand, 1)
message(sprintf(" geneRegion"))
region <- getGeneRegion(tp, flankingPercent=0)
checkTrue(all(c("chromLocString", "chrom", "start", "end") %in% names(region)))
checkEquals(region$chromLocString, "chr6:31399784-31415315")
message(sprintf(" enhancers"))
tbl.enhancers <- getEnhancers(tp, tissues=c("brain", "Brain"))
checkEquals(head(colnames(tbl.enhancers)), c("chrom", "start", "end", "gene", "eqtl", "hic"))
checkTrue(nrow(tbl.enhancers) >= 0)
message(sprintf(" geneGeneEnhancersRegion"))
region <- getGeneEnhancersRegion(tp, flankingPercent=0)
checkTrue(all(c("chromLocString", "chrom", "start", "end") %in% names(region)))
message(sprintf(" encode DHS"))
tbl.dhs <- getEncodeDHS(tp)
checkTrue(nrow(tbl.dhs) > 1900)
message(sprintf(" ChIP-seq"))
tbl.chipSeq <- with(tbl.transcripts, getChipSeq(tp, chrom=chrom, start=start, end=end, tfs="BCLAF1"))
checkEquals(nrow(tbl.chipSeq), 2)
} # test_setTargetGene
#------------------------------------------------------------------------------------------------------------------------
test_buildSingleGeneModel <- function()
{
printf("--- test_buildSingleGeneModel")
genome <- "hg38"
targetGene <- "APOE"
chromosome <- "chr19"
tss <- 44905751
# strand-aware start and end: trem2 is on the minus strand
geneHancer.promoter.chromLocString <- "chr19:44,903,353-44,907,298"
start <- 44903353
end <- 44907298
tbl.regions <- data.frame(chrom=chromosome, start=start, end=end, stringsAsFactors=FALSE)
matrix.name <- "Micro_TYROBP"
checkTrue(matrix.name %in% getExpressionMatrixNames(tp))
mtx <- getExpressionMatrix(tp, matrix.name)
build.spec <- list(title="unit test on APOE",
type="footprint.database",
regions=tbl.regions,
geneSymbol=targetGene,
tss=tss,
matrix=mtx,
db.host=getFootprintDatabaseHost(tp),
db.port=getFootprintDatabasePort(tp),
databases=getFootprintDatabaseNames(tp),
annotationDbFile=dbfile(org.Hs.eg.db),
motifDiscovery="builtinFimo",
tfPool=allKnownTFs(),
tfMapping="MotifDB",
tfPrefilterCorrelation=0.1,
orderModelByColumn="rfScore",
solverNames=c("lasso", "lassopv", "pearson", "randomForest", "ridge", "spearman"))
fpBuilder <- FootprintDatabaseModelBuilder(genome, targetGene, build.spec, quiet=FALSE)
suppressWarnings(x <- build(fpBuilder))
lapply(x, dim)
checkEquals(sort(names(x)), c("model", "regulatoryRegions"))
tbl.regulatoryRegions <- x$regulatoryRegions
tbl.model <- x$model
tbl.model <- tbl.model[order(tbl.model$rfScore, decreasing=TRUE),]
checkTrue(all(tbl.model$gene %in% tbl.regulatoryRegions$geneSymbol))
checkTrue(nrow(x$model) > 50)
checkTrue("TP53" %in% head(x$model$gene, n=20))
checkTrue(max(tbl.model$pearsonCoeff) > 0.85)
# a modest sanity check on pearsonCoeff: should be exactly what we see in the expression matrix
checkEqualsNumeric(cor(mtx["APOE",], mtx["TP53",]), subset(tbl.model, gene=="TP53")$pearsonCoeff)
} # test_buildSingleGeneModel
#------------------------------------------------------------------------------------------------------------------------
# cory's top genes producing huge output (5 jun 2019): "SF3A2" "ZNF764" "PRR12" "ALDH16A1" "EIF1AD" "ZNF44"
test_buildSingleGeneModel_slowGenes <- function()
{
printf("--- test_buildSingleGeneModel_slowGenes")
#slowGenes <- c("SF3A2", "ZNF764", "PRR12", "ALDH16A1", "EIF1AD", "ZNF44")
#slowGenes <- "SF3A2"#"ABCB4""
slowGenes <- "TCTA"
genome <- "hg38"
targetGene <- slowGenes[1]
setTargetGene(tp, slowGenes[1])
tss <- getTranscriptsTable(tp, targetGene)$tss
tbl.regions <- getEnhancers(tp)[, c("chrom", "start", "end")]
tbl.regions <- tbl.regions[order(tbl.regions$start, decreasing=FALSE),]
#matrix.name <- "Micro_TYROBP"
matrix.name <- "Exc_ALL"
checkTrue(matrix.name %in% getExpressionMatrixNames(tp))
#load("/ssd/cory/github/TrenaProjectBrainCell/inst/extdata/expression/Micro_TYROBP.RData")
mtx <- getExpressionMatrix(tp, matrix.name)
target.gene.expression <- mtx[targetGene,]
other.tfs <- intersect(rownames(mtx), allKnownTFs())
# is our target.gene itself a TF?
# if so, it will skew the overall correlations: eliminate it
target.gene.among.tfs <- match(targetGene, other.tfs)
if(!is.na(target.gene.among.tfs))
other.tfs <- other.tfs[-target.gene.among.tfs]
mtx.test <- mtx[other.tfs,]
dim(mtx.test)
correlations <- abs(apply(mtx.test, 1, function(row) cor(target.gene.expression, row)))
range.of.correlations <- fivenum(correlations)
third.quartile <- range.of.correlations[4]
max <- range.of.correlations[5]
#if(third.quartile < 0.25) return()
build.spec <- list(title=sprintf("unit test on %s", targetGene),
type="footprint.database",
regions=tbl.regions,
geneSymbol=targetGene,
tss=tss,
matrix=mtx,
db.host=getFootprintDatabaseHost(tp),
db.port=getFootprintDatabasePort(tp),
databases=getFootprintDatabaseNames(tp),
annotationDbFile=dbfile(org.Hs.eg.db),
motifDiscovery="builtinFimo",
tfPool=allKnownTFs(),
tfMapping=c("MotifDB", "TFClass"),
tfPrefilterCorrelation=0.1,
orderModelByColumn="rfScore",
solverNames=c("lasso", "lassopv", "pearson", "randomForest", "ridge", "spearman"))
fpBuilder <- FootprintDatabaseModelBuilder(genome, targetGene, build.spec, quiet=FALSE)
print(system.time(suppressWarnings(x <- build(fpBuilder))))
checkEquals(sort(names(x)), c("model", "regulatoryRegions"))
tbl.regulatoryRegions <- x$regulatoryRegions
tbl.model <- x$model
tbl.model <- tbl.model[order(abs(tbl.model$pearsonCoeff), decreasing=TRUE),]
checkTrue(all(tbl.model$gene %in% tbl.regulatoryRegions$geneSymbol))
checkTrue(nrow(tbl.model) > 50)
sample.strong.tf <- tbl.model$gene[1]
checkTrue(max(tbl.model$pearsonCoeff) > 0.5)
# a modest sanity check on pearsonCoeff: should be exactly what we see in the expression matrix
checkEqualsNumeric(cor(mtx[targetGene,], mtx[sample.strong.tf,]), subset(tbl.model, gene==sample.strong.tf)$pearsonCoeff)
} # test_buildSingleGeneModel_slowGenes
#------------------------------------------------------------------------------------------------------------------------
# no genehancer info for this gene, and though we have gene expression, there are no footprints.
# do we handle this oddity gracefully?
test_buildSingleGeneModel_RBMXP2 <- function()
{
printf("--- test_buildSingleGeneModel_RBMXP2")
genome <- "hg38"
targetGene <- "RBMXP2"
chromosome <- "chr9"
tss <- 30689105
start <- tss - 5000
end <- tss + 5000
tbl.regions <- data.frame(chrom=chromosome, start=start, end=end, stringsAsFactors=FALSE)
matrix.name <- "Micro_TYROBP"
checkTrue(matrix.name %in% getExpressionMatrixNames(tpl))
mtx <- getExpressionMatrix(tpl, matrix.name)
build.spec <- list(title="unit test on RBMXP2",
type="footprint.database",
regions=tbl.regions,
geneSymbol=targetGene,
tss=tss,
matrix=mtx,
db.host=getFootprintDatabaseHost(tpl),
db.port=getFootprintDatabasePort(tpl),
databases=getFootprintDatabaseNames(tpl),
annotationDbFile=dbfile(org.Hs.eg.db),
motifDiscovery="builtinFimo",
tfPool=allKnownTFs(),
tfMapping="MotifDB",
tfPrefilterCorrelation=0.1,
orderModelByColumn="rfScore",
solverNames=c("lasso", "lassopv", "pearson", "randomForest", "ridge", "spearman"))
fpBuilder <- FootprintDatabaseModelBuilder(genome, targetGene, build.spec, quiet=FALSE)
checkException(x <- build(fpBuilder), silent=TRUE)
} # test_buildSingleGeneModel_RBMXP2
#------------------------------------------------------------------------------------------------------------------------
# build mef2c model with footprints, and simply based on expression (with all TFs)
test_buildSingleGeneModel_footprintsAndWithout_MEF2C <- function()
{
printf("--- test_buildSingleGeneModel_footprintsAndWithout_MEF2C")
genome <- "hg38"
targetGene <- "MEF2C"
setTargetGene(tp, targetGene)
tbl.geneInfo <- getTranscriptsTable(tp)
brain.related.tissues <- grep("brain", listTissues(tp@genehancer), ignore.case=TRUE, v=TRUE)
tbl.enhancers <- getEnhancers(tp, tissues=brain.related.tissues)
dim(tbl.enhancers)
tbl.regions <- subset(tbl.enhancers, elite==TRUE)
dim(tbl.regions)
matrix.name <- "Micro_TYROBP"
checkTrue(matrix.name %in% getExpressionMatrixNames(tp))
mtx <- getExpressionMatrix(tp, matrix.name)
dim(mtx)
recipe <- list(title="TREM2 with genehancer",
type="footprint.database",
regions=tbl.regions,
geneSymbol=targetGene,
tss=tbl.geneInfo$tss,
matrix=mtx,
db.host=getFootprintDatabaseHost(tp),
db.port=getFootprintDatabasePort(tp),
databases=getFootprintDatabaseNames(tp),
annotationDbFile=dbfile(org.Hs.eg.db),
motifDiscovery="builtinFimo",
tfPool=allKnownTFs(),
tfMapping="MotifDB",
tfPrefilterCorrelation=0.1,
orderModelByColumn="rfScore",
solverNames=c("lasso", "lassopv", "pearson", "randomForest", "ridge", "spearman"))
fpBuilder <- FootprintDatabaseModelBuilder(genome, targetGene, recipe, quiet=FALSE)
x.fp <- build(fpBuilder)
#------------------------------------------------------------
# now a "noDNA" model
#------------------------------------------------------------
candidate.tfs <- intersect(rownames(mtx), allKnownTFs())
length(candidate.tfs) # 1102
recipe.noDNA <- list(title="trem2.noDNA.allTFs",
type="noDNA.tfsSupplied",
matrix=mtx,
candidateTFs=candidate.tfs,
tfPool=allKnownTFs(),
tfPrefilterCorrelation=0.5,
annotationDbFile=dbfile(org.Hs.eg.db),
orderModelByColumn="rfScore",
solverNames=c("lasso", "lassopv", "pearson", "randomForest", "ridge", "spearman"),
quiet=FALSE)
builder <- NoDnaModelBuilder(genome, targetGene, recipe.noDNA, quiet=FALSE)
x.noDNA <- build(builder)
checkEquals(x.noDNA$regulatoryRegions, data.frame())
tbl.model <- x.noDNA$model
checkTrue(nrow(tbl.model) > 600) # 708 on (3 apr 2020)
tfs.in.both <- intersect(tbl.model$gene, x.fp$model$gene)
checkTrue(length(tfs.in.both) > 10)
new.tfs.withoutBindingSites <- setdiff(tbl.model$gene, x.fp$model$gene)
checkTrue(length(new.tfs.withoutBindingSites) > 500)
checkTrue("CSRNP3" %in% new.tfs.withoutBindingSites)
} # test_buildSingleGeneModel_footprintsgAndWithout_MEF2C
#------------------------------------------------------------------------------------------------------------------------
if(!interactive())
runTests()
|
/inst/unitTests/test_TrenaProjectBrainCell.R
|
permissive
|
PriceLab/TrenaProjectBrainCell
|
R
| false | false | 15,715 |
r
|
library(TrenaProjectBrainCell)
library(RUnit)
library(trenaSGM)
library(org.Hs.eg.db)
#------------------------------------------------------------------------------------------------------------------------
if(!exists("tp")) {
message(sprintf("--- creating instance of TrenaProjectBrainCell"))
tp <- TrenaProjectBrainCell();
}
#------------------------------------------------------------------------------------------------------------------------
runTests <- function()
{
test_constructor()
test_supportedGenes()
test_variants()
test_footprintDatabases()
test_expressionMatrices()
test_setTargetGene()
test_buildSingleGeneModel()
test_buildSingleGeneModel_slowGenes()
test_buildSingleGeneModel_footprintsAndWithout_MEF2C()
} # runTests
#------------------------------------------------------------------------------------------------------------------------
test_constructor <- function()
{
message(sprintf("--- test_constructor"))
checkTrue(all(c("TrenaProjectBrainCell", "TrenaProjectHG38") %in% is(tp)))
checkEquals(getFootprintDatabasePort(tp), 5432)
} # test_constructor
#------------------------------------------------------------------------------------------------------------------------
test_supportedGenes <- function()
{
message(sprintf("--- test_supportedGenes"))
subset.expected <- c("APOE")
checkTrue(all(subset.expected %in% getSupportedGenes(tp)))
} # test_supportedGenes
#------------------------------------------------------------------------------------------------------------------------
test_variants <- function()
{
message(sprintf("--- test_variants"))
checkEquals(length(getVariantDatasetNames(tp)), 0)
} # test_variants
#------------------------------------------------------------------------------------------------------------------------
test_footprintDatabases <- function()
{
message(sprintf("--- test_footprintDatabases"))
expected <- c("brain_hint_16", "brain_hint_20", "brain_wellington_16", "brain_wellington_20")
checkTrue(all(expected %in% getFootprintDatabaseNames(tp)))
checkEquals(getFootprintDatabaseHost(tp), "khaleesi.systemsbiology.net")
checkEquals(getFootprintDatabasePort(tp), 5432)
} # test_footprintDatabases
#------------------------------------------------------------------------------------------------------------------------
test_expressionMatrices <- function()
{
message(sprintf("--- test_expressionMatrices"))
expected.matrix <- "Micro_TYROBP"
checkTrue(all(expected.matrix %in% getExpressionMatrixNames(tp)))
mtx <- getExpressionMatrix(tp, expected.matrix)
checkEquals(dim(mtx), c(13974, 264))
expected.genes <- c("AACS", "AADAT", "AAED1")
checkTrue(all(expected.genes %in% rownames(mtx)))
summary.stats <- fivenum(mtx)
checkTrue(summary.stats[1] < 0.01)
checkTrue(summary.stats[1] > 0)
checkTrue(summary.stats[5] < 10)
checkTrue(summary.stats[5] > 9)
} # test_expressionMatrices
#------------------------------------------------------------------------------------------------------------------------
# setting the target gene implies a few other assignements, all tested here:
# geneInfo (temporarily also masquerading at tbl.transcripts
# geneRegion
# geneEnhancersRegion (when avaialable, defaults to geneRegion)
#
test_setTargetGene <- function()
{
message(sprintf("--- test_setTargetGene"))
setTargetGene(tp, "MICA")
checkEquals(getTargetGene(tp), "MICA")
message(sprintf(" transcripts"))
tbl.transcripts <- getTranscriptsTable(tp)
checkTrue(nrow(tbl.transcripts) == 1)
checkEquals(tbl.transcripts$chr, "chr6")
checkEquals(tbl.transcripts$start, 31399784)
checkEquals(tbl.transcripts$end , 31415315)
checkEquals(tbl.transcripts$tss, 31403579)
checkEquals(tbl.transcripts$strand, 1)
message(sprintf(" geneRegion"))
region <- getGeneRegion(tp, flankingPercent=0)
checkTrue(all(c("chromLocString", "chrom", "start", "end") %in% names(region)))
checkEquals(region$chromLocString, "chr6:31399784-31415315")
message(sprintf(" enhancers"))
tbl.enhancers <- getEnhancers(tp, tissues=c("brain", "Brain"))
checkEquals(head(colnames(tbl.enhancers)), c("chrom", "start", "end", "gene", "eqtl", "hic"))
checkTrue(nrow(tbl.enhancers) >= 0)
message(sprintf(" geneGeneEnhancersRegion"))
region <- getGeneEnhancersRegion(tp, flankingPercent=0)
checkTrue(all(c("chromLocString", "chrom", "start", "end") %in% names(region)))
message(sprintf(" encode DHS"))
tbl.dhs <- getEncodeDHS(tp)
checkTrue(nrow(tbl.dhs) > 1900)
message(sprintf(" ChIP-seq"))
tbl.chipSeq <- with(tbl.transcripts, getChipSeq(tp, chrom=chrom, start=start, end=end, tfs="BCLAF1"))
checkEquals(nrow(tbl.chipSeq), 2)
} # test_setTargetGene
#------------------------------------------------------------------------------------------------------------------------
test_buildSingleGeneModel <- function()
{
printf("--- test_buildSingleGeneModel")
genome <- "hg38"
targetGene <- "APOE"
chromosome <- "chr19"
tss <- 44905751
# strand-aware start and end: trem2 is on the minus strand
geneHancer.promoter.chromLocString <- "chr19:44,903,353-44,907,298"
start <- 44903353
end <- 44907298
tbl.regions <- data.frame(chrom=chromosome, start=start, end=end, stringsAsFactors=FALSE)
matrix.name <- "Micro_TYROBP"
checkTrue(matrix.name %in% getExpressionMatrixNames(tp))
mtx <- getExpressionMatrix(tp, matrix.name)
build.spec <- list(title="unit test on APOE",
type="footprint.database",
regions=tbl.regions,
geneSymbol=targetGene,
tss=tss,
matrix=mtx,
db.host=getFootprintDatabaseHost(tp),
db.port=getFootprintDatabasePort(tp),
databases=getFootprintDatabaseNames(tp),
annotationDbFile=dbfile(org.Hs.eg.db),
motifDiscovery="builtinFimo",
tfPool=allKnownTFs(),
tfMapping="MotifDB",
tfPrefilterCorrelation=0.1,
orderModelByColumn="rfScore",
solverNames=c("lasso", "lassopv", "pearson", "randomForest", "ridge", "spearman"))
fpBuilder <- FootprintDatabaseModelBuilder(genome, targetGene, build.spec, quiet=FALSE)
suppressWarnings(x <- build(fpBuilder))
lapply(x, dim)
checkEquals(sort(names(x)), c("model", "regulatoryRegions"))
tbl.regulatoryRegions <- x$regulatoryRegions
tbl.model <- x$model
tbl.model <- tbl.model[order(tbl.model$rfScore, decreasing=TRUE),]
checkTrue(all(tbl.model$gene %in% tbl.regulatoryRegions$geneSymbol))
checkTrue(nrow(x$model) > 50)
checkTrue("TP53" %in% head(x$model$gene, n=20))
checkTrue(max(tbl.model$pearsonCoeff) > 0.85)
# a modest sanity check on pearsonCoeff: should be exactly what we see in the expression matrix
checkEqualsNumeric(cor(mtx["APOE",], mtx["TP53",]), subset(tbl.model, gene=="TP53")$pearsonCoeff)
} # test_buildSingleGeneModel
#------------------------------------------------------------------------------------------------------------------------
# cory's top genes producing huge output (5 jun 2019): "SF3A2" "ZNF764" "PRR12" "ALDH16A1" "EIF1AD" "ZNF44"
test_buildSingleGeneModel_slowGenes <- function()
{
printf("--- test_buildSingleGeneModel_slowGenes")
#slowGenes <- c("SF3A2", "ZNF764", "PRR12", "ALDH16A1", "EIF1AD", "ZNF44")
#slowGenes <- "SF3A2"#"ABCB4""
slowGenes <- "TCTA"
genome <- "hg38"
targetGene <- slowGenes[1]
setTargetGene(tp, slowGenes[1])
tss <- getTranscriptsTable(tp, targetGene)$tss
tbl.regions <- getEnhancers(tp)[, c("chrom", "start", "end")]
tbl.regions <- tbl.regions[order(tbl.regions$start, decreasing=FALSE),]
#matrix.name <- "Micro_TYROBP"
matrix.name <- "Exc_ALL"
checkTrue(matrix.name %in% getExpressionMatrixNames(tp))
#load("/ssd/cory/github/TrenaProjectBrainCell/inst/extdata/expression/Micro_TYROBP.RData")
mtx <- getExpressionMatrix(tp, matrix.name)
target.gene.expression <- mtx[targetGene,]
other.tfs <- intersect(rownames(mtx), allKnownTFs())
# is our target.gene itself a TF?
# if so, it will skew the overall correlations: eliminate it
target.gene.among.tfs <- match(targetGene, other.tfs)
if(!is.na(target.gene.among.tfs))
other.tfs <- other.tfs[-target.gene.among.tfs]
mtx.test <- mtx[other.tfs,]
dim(mtx.test)
correlations <- abs(apply(mtx.test, 1, function(row) cor(target.gene.expression, row)))
range.of.correlations <- fivenum(correlations)
third.quartile <- range.of.correlations[4]
max <- range.of.correlations[5]
#if(third.quartile < 0.25) return()
build.spec <- list(title=sprintf("unit test on %s", targetGene),
type="footprint.database",
regions=tbl.regions,
geneSymbol=targetGene,
tss=tss,
matrix=mtx,
db.host=getFootprintDatabaseHost(tp),
db.port=getFootprintDatabasePort(tp),
databases=getFootprintDatabaseNames(tp),
annotationDbFile=dbfile(org.Hs.eg.db),
motifDiscovery="builtinFimo",
tfPool=allKnownTFs(),
tfMapping=c("MotifDB", "TFClass"),
tfPrefilterCorrelation=0.1,
orderModelByColumn="rfScore",
solverNames=c("lasso", "lassopv", "pearson", "randomForest", "ridge", "spearman"))
fpBuilder <- FootprintDatabaseModelBuilder(genome, targetGene, build.spec, quiet=FALSE)
print(system.time(suppressWarnings(x <- build(fpBuilder))))
checkEquals(sort(names(x)), c("model", "regulatoryRegions"))
tbl.regulatoryRegions <- x$regulatoryRegions
tbl.model <- x$model
tbl.model <- tbl.model[order(abs(tbl.model$pearsonCoeff), decreasing=TRUE),]
checkTrue(all(tbl.model$gene %in% tbl.regulatoryRegions$geneSymbol))
checkTrue(nrow(tbl.model) > 50)
sample.strong.tf <- tbl.model$gene[1]
checkTrue(max(tbl.model$pearsonCoeff) > 0.5)
# a modest sanity check on pearsonCoeff: should be exactly what we see in the expression matrix
checkEqualsNumeric(cor(mtx[targetGene,], mtx[sample.strong.tf,]), subset(tbl.model, gene==sample.strong.tf)$pearsonCoeff)
} # test_buildSingleGeneModel_slowGenes
#------------------------------------------------------------------------------------------------------------------------
# no genehancer info for this gene, and though we have gene expression, there are no footprints.
# do we handle this oddity gracefully?
test_buildSingleGeneModel_RBMXP2 <- function()
{
printf("--- test_buildSingleGeneModel_RBMXP2")
genome <- "hg38"
targetGene <- "RBMXP2"
chromosome <- "chr9"
tss <- 30689105
start <- tss - 5000
end <- tss + 5000
tbl.regions <- data.frame(chrom=chromosome, start=start, end=end, stringsAsFactors=FALSE)
matrix.name <- "Micro_TYROBP"
checkTrue(matrix.name %in% getExpressionMatrixNames(tpl))
mtx <- getExpressionMatrix(tpl, matrix.name)
build.spec <- list(title="unit test on RBMXP2",
type="footprint.database",
regions=tbl.regions,
geneSymbol=targetGene,
tss=tss,
matrix=mtx,
db.host=getFootprintDatabaseHost(tpl),
db.port=getFootprintDatabasePort(tpl),
databases=getFootprintDatabaseNames(tpl),
annotationDbFile=dbfile(org.Hs.eg.db),
motifDiscovery="builtinFimo",
tfPool=allKnownTFs(),
tfMapping="MotifDB",
tfPrefilterCorrelation=0.1,
orderModelByColumn="rfScore",
solverNames=c("lasso", "lassopv", "pearson", "randomForest", "ridge", "spearman"))
fpBuilder <- FootprintDatabaseModelBuilder(genome, targetGene, build.spec, quiet=FALSE)
checkException(x <- build(fpBuilder), silent=TRUE)
} # test_buildSingleGeneModel_RBMXP2
#------------------------------------------------------------------------------------------------------------------------
# build mef2c model with footprints, and simply based on expression (with all TFs)
test_buildSingleGeneModel_footprintsAndWithout_MEF2C <- function()
{
printf("--- test_buildSingleGeneModel_footprintsAndWithout_MEF2C")
genome <- "hg38"
targetGene <- "MEF2C"
setTargetGene(tp, targetGene)
tbl.geneInfo <- getTranscriptsTable(tp)
brain.related.tissues <- grep("brain", listTissues(tp@genehancer), ignore.case=TRUE, v=TRUE)
tbl.enhancers <- getEnhancers(tp, tissues=brain.related.tissues)
dim(tbl.enhancers)
tbl.regions <- subset(tbl.enhancers, elite==TRUE)
dim(tbl.regions)
matrix.name <- "Micro_TYROBP"
checkTrue(matrix.name %in% getExpressionMatrixNames(tp))
mtx <- getExpressionMatrix(tp, matrix.name)
dim(mtx)
recipe <- list(title="TREM2 with genehancer",
type="footprint.database",
regions=tbl.regions,
geneSymbol=targetGene,
tss=tbl.geneInfo$tss,
matrix=mtx,
db.host=getFootprintDatabaseHost(tp),
db.port=getFootprintDatabasePort(tp),
databases=getFootprintDatabaseNames(tp),
annotationDbFile=dbfile(org.Hs.eg.db),
motifDiscovery="builtinFimo",
tfPool=allKnownTFs(),
tfMapping="MotifDB",
tfPrefilterCorrelation=0.1,
orderModelByColumn="rfScore",
solverNames=c("lasso", "lassopv", "pearson", "randomForest", "ridge", "spearman"))
fpBuilder <- FootprintDatabaseModelBuilder(genome, targetGene, recipe, quiet=FALSE)
x.fp <- build(fpBuilder)
#------------------------------------------------------------
# now a "noDNA" model
#------------------------------------------------------------
candidate.tfs <- intersect(rownames(mtx), allKnownTFs())
length(candidate.tfs) # 1102
recipe.noDNA <- list(title="trem2.noDNA.allTFs",
type="noDNA.tfsSupplied",
matrix=mtx,
candidateTFs=candidate.tfs,
tfPool=allKnownTFs(),
tfPrefilterCorrelation=0.5,
annotationDbFile=dbfile(org.Hs.eg.db),
orderModelByColumn="rfScore",
solverNames=c("lasso", "lassopv", "pearson", "randomForest", "ridge", "spearman"),
quiet=FALSE)
builder <- NoDnaModelBuilder(genome, targetGene, recipe.noDNA, quiet=FALSE)
x.noDNA <- build(builder)
checkEquals(x.noDNA$regulatoryRegions, data.frame())
tbl.model <- x.noDNA$model
checkTrue(nrow(tbl.model) > 600) # 708 on (3 apr 2020)
tfs.in.both <- intersect(tbl.model$gene, x.fp$model$gene)
checkTrue(length(tfs.in.both) > 10)
new.tfs.withoutBindingSites <- setdiff(tbl.model$gene, x.fp$model$gene)
checkTrue(length(new.tfs.withoutBindingSites) > 500)
checkTrue("CSRNP3" %in% new.tfs.withoutBindingSites)
} # test_buildSingleGeneModel_footprintsgAndWithout_MEF2C
#------------------------------------------------------------------------------------------------------------------------
if(!interactive())
runTests()
|
testlist <- list(id = NULL, id = NULL, booklet_id = c(8168473L, 2127314835L, 171177770L, -1942759639L, -1815221204L, 601253144L, -804651186L, 2094281728L, 860713787L, -971707632L, -1475044502L, 870040598L, -1182814578L, -1415711445L, 1901326755L, -1882837573L, 1340545259L, 1156041943L, 823641812L, -1106109928L, -1048158709L), person_id = c(0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L))
result <- do.call(dexterMST:::is_person_booklet_sorted,testlist)
str(result)
|
/dexterMST/inst/testfiles/is_person_booklet_sorted/AFL_is_person_booklet_sorted/is_person_booklet_sorted_valgrind_files/1615940028-test.R
|
no_license
|
akhikolla/updatedatatype-list1
|
R
| false | false | 826 |
r
|
testlist <- list(id = NULL, id = NULL, booklet_id = c(8168473L, 2127314835L, 171177770L, -1942759639L, -1815221204L, 601253144L, -804651186L, 2094281728L, 860713787L, -971707632L, -1475044502L, 870040598L, -1182814578L, -1415711445L, 1901326755L, -1882837573L, 1340545259L, 1156041943L, 823641812L, -1106109928L, -1048158709L), person_id = c(0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L))
result <- do.call(dexterMST:::is_person_booklet_sorted,testlist)
str(result)
|
library(ape)
testtree <- read.tree("2548_1.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="2548_1_unrooted.txt")
|
/codeml_files/newick_trees_processed/2548_1/rinput.R
|
no_license
|
DaniBoo/cyanobacteria_project
|
R
| false | false | 135 |
r
|
library(ape)
testtree <- read.tree("2548_1.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="2548_1_unrooted.txt")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/stats_met.R
\name{stats_met}
\alias{stats_met}
\title{Statistical Methods}
\usage{
stats_met()
}
\value{
}
\description{
Statistical Methods
}
\examples{
}
|
/man/stats_met.Rd
|
permissive
|
sbalci/histopathRaddins
|
R
| false | true | 236 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/stats_met.R
\name{stats_met}
\alias{stats_met}
\title{Statistical Methods}
\usage{
stats_met()
}
\value{
}
\description{
Statistical Methods
}
\examples{
}
|
###
comp_plot_sdhunter_year <- function(group = spgp,
var = "sdhunter",
prov = "",
zone = "",
M = out2,
castes = jdat$castes ){
dsum = as.data.frame(M$summary)
names(dsum)[3:7] <- c("lci","lqrt","med","uqrt","uci")
dsum$Parameter = row.names(dsum)
d1 = filter(dsum,grepl(Parameter,pattern = paste0(var,"["),fixed = T))
d1$caste = jags_dim(var = var,dat = d1)
d1$yr = jags_dim(var = var,dat = d1,dim = 2)
d1$year = d1$yr+(Y-(jdat$nyears))
d1$mod = "Cst"
dd = d1
dd$mod <- factor(dd$mod,levels = c("Old","New"), ordered = T)
dd <- dd[which(dd$year >= FY),]
my_col <- scale_color_viridis_d(aesthetics = c("colour","fill"), begin = 0.3,end = 0.9,option = "B",direction = -1)
outgg = ggplot(data = dd,aes(x = year,y = mean))+
geom_point(aes(colour = mod),size = 0.5)+
geom_line(aes(colour = mod))+
labs(title = paste0("caste specific sdhunter ",prov," zn",zone," (mean and 95 CI)"))+
geom_ribbon(aes(ymax = uci,ymin = lci),alpha = 0.2)+
#scale_y_continuous(limits = c(0,NA))+
my_col+
theme_classic()+
facet_wrap(facets = ~caste,nrow = 2,ncol = 2,scales = "fixed")
return(outgg)
}
|
/functions/comparison_plotting_function_sdhunter_year.R
|
no_license
|
AdamCSmithCWS/CWS_National_Harvest_Survey
|
R
| false | false | 1,314 |
r
|
###
comp_plot_sdhunter_year <- function(group = spgp,
var = "sdhunter",
prov = "",
zone = "",
M = out2,
castes = jdat$castes ){
dsum = as.data.frame(M$summary)
names(dsum)[3:7] <- c("lci","lqrt","med","uqrt","uci")
dsum$Parameter = row.names(dsum)
d1 = filter(dsum,grepl(Parameter,pattern = paste0(var,"["),fixed = T))
d1$caste = jags_dim(var = var,dat = d1)
d1$yr = jags_dim(var = var,dat = d1,dim = 2)
d1$year = d1$yr+(Y-(jdat$nyears))
d1$mod = "Cst"
dd = d1
dd$mod <- factor(dd$mod,levels = c("Old","New"), ordered = T)
dd <- dd[which(dd$year >= FY),]
my_col <- scale_color_viridis_d(aesthetics = c("colour","fill"), begin = 0.3,end = 0.9,option = "B",direction = -1)
outgg = ggplot(data = dd,aes(x = year,y = mean))+
geom_point(aes(colour = mod),size = 0.5)+
geom_line(aes(colour = mod))+
labs(title = paste0("caste specific sdhunter ",prov," zn",zone," (mean and 95 CI)"))+
geom_ribbon(aes(ymax = uci,ymin = lci),alpha = 0.2)+
#scale_y_continuous(limits = c(0,NA))+
my_col+
theme_classic()+
facet_wrap(facets = ~caste,nrow = 2,ncol = 2,scales = "fixed")
return(outgg)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot_gg.R
\name{plot_gg}
\alias{plot_gg}
\title{Transform ggplot2 objects into 3D}
\usage{
plot_gg(
ggobj,
width = 3,
height = 3,
height_aes = NULL,
invert = FALSE,
shadow_intensity = 0.5,
units = c("in", "cm", "mm"),
scale = 150,
pointcontract = 0.7,
offset_edges = FALSE,
preview = FALSE,
raytrace = TRUE,
sunangle = 315,
anglebreaks = seq(30, 40, 0.1),
multicore = FALSE,
lambert = TRUE,
triangulate = TRUE,
max_error = 0.001,
max_tri = 0,
verbose = FALSE,
reduce_size = NULL,
save_height_matrix = FALSE,
save_shadow_matrix = FALSE,
saved_shadow_matrix = NULL,
...
)
}
\arguments{
\item{ggobj}{ggplot object to projected into 3D.}
\item{width}{Default `3`. Width of ggplot, in `units`.}
\item{height}{Default `3`. Height of ggplot, in `units`.}
\item{height_aes}{Default `NULL`. Whether the `fill` or `color` aesthetic should be used for height values,
which the user can specify by passing either `fill` or `color` to this argument.
Automatically detected. If both `fill` and `color` aesthetics are present, then `fill` is default.}
\item{invert}{Default `FALSE`. If `TRUE`, the height mapping is inverted.}
\item{shadow_intensity}{Default `0.5`. The intensity of the calculated shadows.}
\item{units}{Default `in`. One of c("in", "cm", "mm").}
\item{scale}{Default `150`. Multiplier for vertical scaling: a higher number increases the height
of the 3D transformation.}
\item{pointcontract}{Default `0.7`. This multiplies the size of the points and shrinks
them around their center in the 3D surface mapping. Decrease this to reduce color bleed on edges, and set to
`1` to turn off entirely. Note: If `size` is passed as an aesthetic to the same geom
that is being mapped to elevation, this scaling will not be applied. If `alpha` varies on the variable
being mapped, you may want to set this to `1`, since the points now have a non-zero width stroke outline (however,
mapping `alpha` in the same variable you are projecting to height is probably not a good choice. as the `alpha`
variable is ignored when performing the 3D projection).}
\item{offset_edges}{Default `FALSE`. If `TRUE`, inserts a small amount of space between polygons for "geom_sf", "geom_tile", "geom_hex", and "geom_polygon" layers.
If you pass in a number, the space between polygons will be a line of that width. Note: this feature may end up removing thin polygons
from the plot entirely--use with care.}
\item{preview}{Default `FALSE`. If `TRUE`, the raytraced 2D ggplot will be displayed on the current device.}
\item{raytrace}{Default `FALSE`. Whether to add a raytraced layer.}
\item{sunangle}{Default `315` (NW). If raytracing, the angle (in degrees) around the matrix from which the light originates.}
\item{anglebreaks}{Default `seq(30,40,0.1)`. The azimuth angle(s), in degrees, as measured from the horizon from which the light originates.}
\item{multicore}{Default `FALSE`. If raytracing and `TRUE`, multiple cores will be used to compute the shadow matrix. By default, this uses all cores available, unless the user has
set `options("cores")` in which the multicore option will only use that many cores.}
\item{lambert}{Default `TRUE`. If raytracing, changes the intensity of the light at each point based proportional to the
dot product of the ray direction and the surface normal at that point. Zeros out all values directed away from
the ray.}
\item{triangulate}{Default `FALSE`. Reduce the size of the 3D model by triangulating the height map.
Set this to `TRUE` if generating the model is slow, or moving it is choppy. Will also reduce the size
of 3D models saved to disk.}
\item{max_error}{Default `0.001`. Maximum allowable error when triangulating the height map,
when `triangulate = TRUE`. Increase this if you encounter problems with 3D performance, want
to decrease render time with `render_highquality()`, or need
to save a smaller 3D OBJ file to disk with `save_obj()`,}
\item{max_tri}{Default `0`, which turns this setting off and uses `max_error`.
Maximum number of triangles allowed with triangulating the
height map, when `triangulate = TRUE`. Increase this if you encounter problems with 3D performance, want
to decrease render time with `render_highquality()`, or need
to save a smaller 3D OBJ file to disk with `save_obj()`,}
\item{verbose}{Default `TRUE`, if `interactive()`. Prints information about the mesh triangulation
if `triangulate = TRUE`.}
\item{reduce_size}{Default `NULL`. A number between `0` and `1` that specifies how much to reduce the resolution of the plot, for faster plotting. By
default, this just decreases the size of height map, not the image. If you wish the image to be reduced in resolution as well, pass a numeric vector of size 2.}
\item{save_height_matrix}{Default `FALSE`. If `TRUE`, the function will return the height matrix used for the ggplot.}
\item{save_shadow_matrix}{Default `FALSE`. If `TRUE`, the function will return the shadow matrix for use in future updates via the `shadow_cache` argument passed to `ray_shade`.}
\item{saved_shadow_matrix}{Default `NULL`. A cached shadow matrix (saved by the a previous invocation of `plot_gg(..., save_shadow_matrix=TRUE)` to use instead of raytracing a shadow map each time.}
\item{...}{Additional arguments to be passed to `plot_3d()`.}
}
\value{
Opens a 3D plot in rgl.
}
\description{
Plots a ggplot2 object in 3D by mapping the color or fill aesthetic to elevation.
Currently, this function does not transform lines mapped to color into 3D.
If there are multiple legends/guides due to multiple aesthetics being mapped (e.g. color and shape),
the package author recommends that the user pass the order of the guides manually using the ggplot2 function "guides()`.
Otherwise, the order may change when processing the ggplot2 object and result in a mismatch between the 3D mapping
and the underlying plot.
Using the shape aesthetic with more than three groups is not recommended, unless the user passes in
custom, solid shapes. By default in ggplot2, only the first three shapes are solid, which is a requirement to be projected
into 3D.
}
\examples{
library(ggplot2)
library(viridis)
ggdiamonds = ggplot(diamonds, aes(x, depth)) +
stat_density_2d(aes(fill = stat(nlevel)), geom = "polygon", n = 100, bins = 10,contour = TRUE) +
facet_wrap(clarity~.) +
scale_fill_viridis_c(option = "A")
\donttest{
plot_gg(ggdiamonds,multicore=TRUE,width=5,height=5,scale=250,windowsize=c(1400,866),
zoom = 0.55, phi = 30)
render_snapshot()
}
#Change the camera angle and take a snapshot:
\donttest{
render_camera(zoom=0.5,theta=-30,phi=30)
render_snapshot(clear = TRUE)
}
#Contours and other lines will automatically be ignored. Here is the volcano dataset:
ggvolcano = volcano \%>\%
reshape2::melt() \%>\%
ggplot() +
geom_tile(aes(x=Var1,y=Var2,fill=value)) +
geom_contour(aes(x=Var1,y=Var2,z=value),color="black") +
scale_x_continuous("X",expand = c(0,0)) +
scale_y_continuous("Y",expand = c(0,0)) +
scale_fill_gradientn("Z",colours = terrain.colors(10)) +
coord_fixed()
ggvolcano
\donttest{
plot_gg(ggvolcano, multicore = TRUE, raytrace = TRUE, width = 7, height = 4,
scale = 300, windowsize = c(1400, 866), zoom = 0.6, phi = 30, theta = 30)
render_snapshot(clear = TRUE)
}
#Here, we will create a 3D plot of the mtcars dataset. This automatically detects
#that the user used the `color` aesthetic instead of the `fill`.
mtplot = ggplot(mtcars) +
geom_point(aes(x=mpg,y=disp,color=cyl)) +
scale_color_continuous(limits=c(0,8))
#Preview how the plot will look by setting `preview = TRUE`: We also adjust the angle of the light.
\donttest{
plot_gg(mtplot, width=3.5, sunangle=225, preview = TRUE)
}
\donttest{
plot_gg(mtplot, width=3.5, multicore = TRUE, windowsize = c(1400,866), sunangle=225,
zoom = 0.60, phi = 30, theta = 45)
render_snapshot(clear = TRUE)
}
#Now let's plot a density plot in 3D.
mtplot_density = ggplot(mtcars) +
stat_density_2d(aes(x=mpg,y=disp, fill=..density..), geom = "raster", contour = FALSE) +
scale_x_continuous(expand=c(0,0)) +
scale_y_continuous(expand=c(0,0)) +
scale_fill_gradient(low="pink", high="red")
mtplot_density
\donttest{
plot_gg(mtplot_density, width = 4,zoom = 0.60, theta = -45, phi = 30,
windowsize = c(1400,866))
render_snapshot(clear = TRUE)
}
#This also works facetted.
mtplot_density_facet = mtplot_density + facet_wrap(~cyl)
#Preview this plot in 2D:
\donttest{
plot_gg(mtplot_density_facet, preview = TRUE)
}
\donttest{
plot_gg(mtplot_density_facet, windowsize=c(1400,866),
zoom = 0.55, theta = -10, phi = 25)
render_snapshot(clear = TRUE)
}
#That is a little cramped. Specifying a larger width will improve the readability of this plot.
\donttest{
plot_gg(mtplot_density_facet, width = 6, preview = TRUE)
}
#That's better. Let's plot it in 3D, and increase the scale.
\donttest{
plot_gg(mtplot_density_facet, width = 6, windowsize=c(1400,866),
zoom = 0.55, theta = -10, phi = 25, scale=300)
render_snapshot(clear = TRUE)
}
#
}
|
/man/plot_gg.Rd
|
no_license
|
hiter-joe/rayshader
|
R
| false | true | 9,105 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot_gg.R
\name{plot_gg}
\alias{plot_gg}
\title{Transform ggplot2 objects into 3D}
\usage{
plot_gg(
ggobj,
width = 3,
height = 3,
height_aes = NULL,
invert = FALSE,
shadow_intensity = 0.5,
units = c("in", "cm", "mm"),
scale = 150,
pointcontract = 0.7,
offset_edges = FALSE,
preview = FALSE,
raytrace = TRUE,
sunangle = 315,
anglebreaks = seq(30, 40, 0.1),
multicore = FALSE,
lambert = TRUE,
triangulate = TRUE,
max_error = 0.001,
max_tri = 0,
verbose = FALSE,
reduce_size = NULL,
save_height_matrix = FALSE,
save_shadow_matrix = FALSE,
saved_shadow_matrix = NULL,
...
)
}
\arguments{
\item{ggobj}{ggplot object to projected into 3D.}
\item{width}{Default `3`. Width of ggplot, in `units`.}
\item{height}{Default `3`. Height of ggplot, in `units`.}
\item{height_aes}{Default `NULL`. Whether the `fill` or `color` aesthetic should be used for height values,
which the user can specify by passing either `fill` or `color` to this argument.
Automatically detected. If both `fill` and `color` aesthetics are present, then `fill` is default.}
\item{invert}{Default `FALSE`. If `TRUE`, the height mapping is inverted.}
\item{shadow_intensity}{Default `0.5`. The intensity of the calculated shadows.}
\item{units}{Default `in`. One of c("in", "cm", "mm").}
\item{scale}{Default `150`. Multiplier for vertical scaling: a higher number increases the height
of the 3D transformation.}
\item{pointcontract}{Default `0.7`. This multiplies the size of the points and shrinks
them around their center in the 3D surface mapping. Decrease this to reduce color bleed on edges, and set to
`1` to turn off entirely. Note: If `size` is passed as an aesthetic to the same geom
that is being mapped to elevation, this scaling will not be applied. If `alpha` varies on the variable
being mapped, you may want to set this to `1`, since the points now have a non-zero width stroke outline (however,
mapping `alpha` in the same variable you are projecting to height is probably not a good choice. as the `alpha`
variable is ignored when performing the 3D projection).}
\item{offset_edges}{Default `FALSE`. If `TRUE`, inserts a small amount of space between polygons for "geom_sf", "geom_tile", "geom_hex", and "geom_polygon" layers.
If you pass in a number, the space between polygons will be a line of that width. Note: this feature may end up removing thin polygons
from the plot entirely--use with care.}
\item{preview}{Default `FALSE`. If `TRUE`, the raytraced 2D ggplot will be displayed on the current device.}
\item{raytrace}{Default `FALSE`. Whether to add a raytraced layer.}
\item{sunangle}{Default `315` (NW). If raytracing, the angle (in degrees) around the matrix from which the light originates.}
\item{anglebreaks}{Default `seq(30,40,0.1)`. The azimuth angle(s), in degrees, as measured from the horizon from which the light originates.}
\item{multicore}{Default `FALSE`. If raytracing and `TRUE`, multiple cores will be used to compute the shadow matrix. By default, this uses all cores available, unless the user has
set `options("cores")` in which the multicore option will only use that many cores.}
\item{lambert}{Default `TRUE`. If raytracing, changes the intensity of the light at each point based proportional to the
dot product of the ray direction and the surface normal at that point. Zeros out all values directed away from
the ray.}
\item{triangulate}{Default `FALSE`. Reduce the size of the 3D model by triangulating the height map.
Set this to `TRUE` if generating the model is slow, or moving it is choppy. Will also reduce the size
of 3D models saved to disk.}
\item{max_error}{Default `0.001`. Maximum allowable error when triangulating the height map,
when `triangulate = TRUE`. Increase this if you encounter problems with 3D performance, want
to decrease render time with `render_highquality()`, or need
to save a smaller 3D OBJ file to disk with `save_obj()`,}
\item{max_tri}{Default `0`, which turns this setting off and uses `max_error`.
Maximum number of triangles allowed with triangulating the
height map, when `triangulate = TRUE`. Increase this if you encounter problems with 3D performance, want
to decrease render time with `render_highquality()`, or need
to save a smaller 3D OBJ file to disk with `save_obj()`,}
\item{verbose}{Default `TRUE`, if `interactive()`. Prints information about the mesh triangulation
if `triangulate = TRUE`.}
\item{reduce_size}{Default `NULL`. A number between `0` and `1` that specifies how much to reduce the resolution of the plot, for faster plotting. By
default, this just decreases the size of height map, not the image. If you wish the image to be reduced in resolution as well, pass a numeric vector of size 2.}
\item{save_height_matrix}{Default `FALSE`. If `TRUE`, the function will return the height matrix used for the ggplot.}
\item{save_shadow_matrix}{Default `FALSE`. If `TRUE`, the function will return the shadow matrix for use in future updates via the `shadow_cache` argument passed to `ray_shade`.}
\item{saved_shadow_matrix}{Default `NULL`. A cached shadow matrix (saved by the a previous invocation of `plot_gg(..., save_shadow_matrix=TRUE)` to use instead of raytracing a shadow map each time.}
\item{...}{Additional arguments to be passed to `plot_3d()`.}
}
\value{
Opens a 3D plot in rgl.
}
\description{
Plots a ggplot2 object in 3D by mapping the color or fill aesthetic to elevation.
Currently, this function does not transform lines mapped to color into 3D.
If there are multiple legends/guides due to multiple aesthetics being mapped (e.g. color and shape),
the package author recommends that the user pass the order of the guides manually using the ggplot2 function "guides()`.
Otherwise, the order may change when processing the ggplot2 object and result in a mismatch between the 3D mapping
and the underlying plot.
Using the shape aesthetic with more than three groups is not recommended, unless the user passes in
custom, solid shapes. By default in ggplot2, only the first three shapes are solid, which is a requirement to be projected
into 3D.
}
\examples{
library(ggplot2)
library(viridis)
ggdiamonds = ggplot(diamonds, aes(x, depth)) +
stat_density_2d(aes(fill = stat(nlevel)), geom = "polygon", n = 100, bins = 10,contour = TRUE) +
facet_wrap(clarity~.) +
scale_fill_viridis_c(option = "A")
\donttest{
plot_gg(ggdiamonds,multicore=TRUE,width=5,height=5,scale=250,windowsize=c(1400,866),
zoom = 0.55, phi = 30)
render_snapshot()
}
#Change the camera angle and take a snapshot:
\donttest{
render_camera(zoom=0.5,theta=-30,phi=30)
render_snapshot(clear = TRUE)
}
#Contours and other lines will automatically be ignored. Here is the volcano dataset:
ggvolcano = volcano \%>\%
reshape2::melt() \%>\%
ggplot() +
geom_tile(aes(x=Var1,y=Var2,fill=value)) +
geom_contour(aes(x=Var1,y=Var2,z=value),color="black") +
scale_x_continuous("X",expand = c(0,0)) +
scale_y_continuous("Y",expand = c(0,0)) +
scale_fill_gradientn("Z",colours = terrain.colors(10)) +
coord_fixed()
ggvolcano
\donttest{
plot_gg(ggvolcano, multicore = TRUE, raytrace = TRUE, width = 7, height = 4,
scale = 300, windowsize = c(1400, 866), zoom = 0.6, phi = 30, theta = 30)
render_snapshot(clear = TRUE)
}
#Here, we will create a 3D plot of the mtcars dataset. This automatically detects
#that the user used the `color` aesthetic instead of the `fill`.
mtplot = ggplot(mtcars) +
geom_point(aes(x=mpg,y=disp,color=cyl)) +
scale_color_continuous(limits=c(0,8))
#Preview how the plot will look by setting `preview = TRUE`: We also adjust the angle of the light.
\donttest{
plot_gg(mtplot, width=3.5, sunangle=225, preview = TRUE)
}
\donttest{
plot_gg(mtplot, width=3.5, multicore = TRUE, windowsize = c(1400,866), sunangle=225,
zoom = 0.60, phi = 30, theta = 45)
render_snapshot(clear = TRUE)
}
#Now let's plot a density plot in 3D.
mtplot_density = ggplot(mtcars) +
stat_density_2d(aes(x=mpg,y=disp, fill=..density..), geom = "raster", contour = FALSE) +
scale_x_continuous(expand=c(0,0)) +
scale_y_continuous(expand=c(0,0)) +
scale_fill_gradient(low="pink", high="red")
mtplot_density
\donttest{
plot_gg(mtplot_density, width = 4,zoom = 0.60, theta = -45, phi = 30,
windowsize = c(1400,866))
render_snapshot(clear = TRUE)
}
#This also works facetted.
mtplot_density_facet = mtplot_density + facet_wrap(~cyl)
#Preview this plot in 2D:
\donttest{
plot_gg(mtplot_density_facet, preview = TRUE)
}
\donttest{
plot_gg(mtplot_density_facet, windowsize=c(1400,866),
zoom = 0.55, theta = -10, phi = 25)
render_snapshot(clear = TRUE)
}
#That is a little cramped. Specifying a larger width will improve the readability of this plot.
\donttest{
plot_gg(mtplot_density_facet, width = 6, preview = TRUE)
}
#That's better. Let's plot it in 3D, and increase the scale.
\donttest{
plot_gg(mtplot_density_facet, width = 6, windowsize=c(1400,866),
zoom = 0.55, theta = -10, phi = 25, scale=300)
render_snapshot(clear = TRUE)
}
#
}
|
isat_my=function(y, mc = TRUE, ar = NULL, ewma = NULL, mxreg = NULL,
iis = TRUE, sis = TRUE, tis = FALSE, uis = FALSE, blocks = NULL,
ratio.threshold = 0.8, max.block.size = 30, t.pval = 0.001,
wald.pval = t.pval, vcov.type = c("ordinary", "white", "newey-west"),
do.pet = FALSE, ar.LjungB = NULL, arch.LjungB = NULL, normality.JarqueB = NULL,
user.diagnostics = NULL, info.method = c("sc", "aic", "hq"),
include.gum = NULL, include.1cut = FALSE, include.empty = FALSE,
max.paths = NULL, parallel.options = NULL, turbo =FALSE,
tol = 1e-07, LAPACK = FALSE, max.regs = NULL, print.searchinfo = TRUE,
plot = NULL, alarm = FALSE,
#for my version of the code (for a local level model)
model.ssmod = "local-level", pars.ssmod = c(var1 = log(var(y)), var2 = log(var(y)),
P0=10000000,a0=y[1]),
nopars.ssmod = NULL, cpar.ssmod = NULL,
xreg.ssmod = NULL, lower.ssmod = NULL, upper.ssmod = NULL, transPars.ssmod = NULL,
ssd.ssmod = FALSE, sgfc.ssmod = FALSE,
kf.args = list(P0cov = FALSE), # if we would set this one to TRUE, we would say that
# P0 for max likelihood with KF is not diagonal
method.kfs ="L-BFGS-B",
gr.kfs = c("numerical", "analytical"),
optim.kfs = list(lower = 0, upper = Inf, hessian = TRUE) #this should be later rewritten
#into hessian = FALSE, can really slow down the whole process
)
{
#(y_m, t.pval=0.001, model.ssmod = "local-level",
# vcov.type = "ordinary",info.method = 'aic', gr.kfs ="numerical",
# optim.kfs = list(lower = 0, upper = Inf, hessian = TRUE))
#################################################
# mc = TRUE
# ar = NULL
# ewma = NULL
# mxreg = NULL
# iis = TRUE
# sis = TRUE
# tis = FALSE
# uis = FALSE
# blocks = NULL
# ratio.threshold = 0.8
# max.block.size = 30
# t.pval = 0.001
# wald.pval = t.pval
# vcov.type = "ordinary"
# do.pet = FALSE
# ar.LjungB = NULL
# arch.LjungB = NULL
# normality.JarqueB = NULL
# user.diagnostics = NULL
# info.method ="aic"
# include.gum = NULL
# include.1cut = FALSE
# include.empty = FALSE
# max.paths = NULL
# parallel.options = NULL
# turbo = FALSE
# tol = 1e-07
# LAPACK = FALSE
# max.regs = NULL
# print.searchinfo = TRUE
# plot = NULL
# alarm = FALSE
# model.ssmod = "local-level"
#pars.ssmod <- c(var1 =var(y), var2 =var(y))
# pars.ssmod = c("var1" = var(y), "var2" = var(y),"P01"=10000000,"a01"=y[1])
# nopars.ssmod = NULL
# cpar.ssmod = NULL
# xreg.ssmod = NULL
# lower.ssmod = NULL
# upper.ssmod = NULL
# transPars.ssmod = NULL
# ssd.ssmod = FALSE
# sgfc.ssmod = FALSE
# kf.args = list(P0cov = FALSE) # if we would set this one to TRUE, we would say that
# P0 for max likelihood with KF is not diagonal
# method.kfs ="L-BFGS-B"
# gr.kfs = "analytical"
# optim.kfs =list(maxit = 100000000) #this should be later rewritten
#into hessian = FALSE, can really slow down the whole process
###########################################
require(zoo)
require(stsm)
require(dlm)
isat.call <- sys.call()
vcov.type <- match.arg(vcov.type)
info.method <- match.arg(info.method)
if (!is.null(include.gum)) {
warning("The 'include.gum' argument is ignored (temporarily deprecated in isat)")
}
include.gum <- TRUE
olsMethod <- switch(vcov.type, ordinary = 3, white = 4, `newey-west` = 5)
if (!is.null(max.paths) && max.paths < 1) {
stop("'max.paths' cannot be smaller than 1")
}
if (!is.null(parallel.options)) {
if (is.numeric(parallel.options)) {
clusterSpec <- parallel.options
}
OScores <- detectCores()
if (parallel.options > OScores) {
stop("parallel.options > number of cores/threads")
}
}
#my own version for state space modelling
# (y, mc = mc, ar = ar, ewma = ewma, mxreg = mxreg,
# vcov.type = vcov.type, qstat.options = NULL, user.diagnostics = user.diagnostics,
# tol = tol, LAPACK = LAPACK, plot = FALSE)
y=as.ts(y)
mod <- kfs.arx(y, mc = mc, mxreg = mxreg,
vcov.type = vcov.type, qstat.options = NULL, user.diagnostics = user.diagnostics,
tol = tol, LAPACK = LAPACK, plot = FALSE)
# mod <- arx(y, mc = mc, ar = ar, ewma = ewma, mxreg = mxreg,
# vcov.type = vcov.type, qstat.options = NULL, user.diagnostics = user.diagnostics,
# tol = tol, LAPACK = LAPACK, plot = FALSE)
y <- mod$aux$y
y.n <- mod$aux$y.n
y.index <- mod$aux$y.index
y.index.as.char <- as.character(y.index)
y.name <- mod$aux$y.name
mX <- mod$aux$mX
mXnames <- mod$aux$mXnames
colnames(mX) <- mXnames
mXncol <- mod$aux$mXncol
vcov.type <- mod$aux$vcov.type
qstat.options <- mod$aux$qstat.options
if (is.null(mX)){
mxkeep <- NULL
}else{
mxkeep <- 1:mXncol
}
arLjungB <- NULL
if (!is.null(ar.LjungB)) { #two item list with lag and pval for Ljung Box test for serial correlation in the standardised residuals.
arLjungB <- c(NA, ar.LjungB$pval)
if (is.null(ar.LjungB$lag)) {
arLjungB[1] <- qstat.options[1]
}else {
arLjungB[1] <- ar.LjungB$lag
}
}
archLjungB <- NULL
if (!is.null(arch.LjungB)) {
archLjungB <- c(NA, arch.LjungB$pval)
if (is.null(arch.LjungB$lag)) {
archLjungB[1] <- qstat.options[2]
}else {
archLjungB[1] <- arch.LjungB$lag
}
}
ISmatrices <- list()
if (iis) {
mIIS <- matrix(0, y.n, y.n)
diag(mIIS) <- 1
colnames(mIIS) <- paste("iis", y.index.as.char, sep = "")
ISmatrices <- c(ISmatrices, list(IIS = mIIS))
}
if (sis) {
mSIS <- matrix(0, y.n, y.n)
loop.indx <- 1:y.n
tmp <- function(i) {
mSIS[i, 1:i] <<- 1
}
tmp <- sapply(loop.indx, tmp)
colnames(mSIS) <- paste("sis", y.index.as.char, sep = "")
mSIS <- mSIS[, -1]
ISmatrices <- c(ISmatrices, list(SIS = mSIS))
}
if (tis) {
mTIS <- matrix(0, y.n, y.n)
v1n <- seq(1, y.n)
loop.indx <- 1:y.n
tmp <- function(i) {
mTIS[c(i:y.n), i] <<- v1n[1:c(y.n - i + 1)]
}
tmp <- sapply(loop.indx, tmp)
colnames(mTIS) <- paste("tis", y.index.as.char, sep = "")
mTIS <- mTIS[, -1]
ISmatrices <- c(ISmatrices, list(TIS = mTIS))
}
if (!is.list(uis) && !identical(as.numeric(uis), 0)) {
uis <- as.zoo(cbind(uis))
uis.names <- colnames(uis)
if (is.null(uis.names)) {
uis.names <- paste("uisxreg", 1:NCOL(uis), sep = "")
}
if (any(uis.names == "")) {
missing.colnames <- which(uis.names == "")
for (i in 1:length(missing.colnames)) {
uis.names[i] <- paste("uisxreg", missing.colnames[i],
sep = "")
}
}
uis <- na.trim(uis, sides = "both", is.na = "any")
uis.index.as.char <- as.character(index(uis))
t1 <- which(uis.index.as.char == y.index.as.char[1])
t2 <- which(uis.index.as.char == y.index.as.char[length(y.index.as.char)])
uis <- coredata(uis)
uis <- window(uis, start = t1, end = t2)
uis <- cbind(coredata(as.zoo(uis)))
colnames(uis) <- uis.names
if (nrow(uis) != y.n)
stop("nrow(uis) is unequal to no. of observations")
ISmatrices <- c(ISmatrices, list(UIS = uis))
}
if (is.list(uis)) {
for (i in 1:length(uis)) {
uis[[i]] <- as.matrix(coredata(as.zoo(uis[[i]])))
if (nrow(uis[[i]]) != y.n) {
stop(paste("nrow(uis[[", i, "]]) is unequal to no. of observations",
sep = ""))
}
}
uis.names <- paste("UIS", 1:length(uis), sep = "")
if (is.null(names(uis))) {
names(uis) <- uis.names
} else {
for (i in 1:length(uis)) {
if (names(uis)[i] == "") {
names(uis)[i] <- uis.names[i]
}else {
names(uis)[i] <- paste(uis.names[i], ".", names(uis)[i],
sep = "")
}
}
}
ISmatrices <- c(ISmatrices, uis)
}
if (is.list(blocks)) {
if (length(ISmatrices) != length(blocks)) {
stop("No. of IS matrices is unequal to length(blocks)")
}
blocks.is.list <- TRUE
ISblocks <- blocks
}else {
blocks.is.list <- FALSE
ISblocks <- list()
}
ISfinalmodels <- list()
for (i in 1:length(ISmatrices)) {
if (!blocks.is.list) {
ncol.adj <- NCOL(ISmatrices[[i]])
if (is.null(blocks)) {
blockratio.value <- ncol.adj/(ratio.threshold *
ncol.adj - mXncol)
blocksize.value <- ncol.adj/min(y.n * ratio.threshold,
max.block.size)
no.of.blocks <- max(2, blockratio.value, blocksize.value)
no.of.blocks <- ceiling(no.of.blocks)
no.of.blocks <- min(ncol.adj, no.of.blocks)
}else{
no.of.blocks <- blocks
}
blocksize <- ceiling(ncol.adj/no.of.blocks)
partitions.t2 <- blocksize
for (j in 1:no.of.blocks) {
if (blocksize * j <= ncol.adj) {
partitions.t2[j] <- blocksize * j
}
}
if (partitions.t2[length(partitions.t2)] < ncol.adj) {
partitions.t2 <- c(partitions.t2, ncol.adj)
}
blocksadj <- length(partitions.t2)
partitions.t1 <- partitions.t2 + 1
partitions.t1 <- c(1, partitions.t1[-blocksadj])
tmp <- list()
for (j in 1:blocksadj) {
tmp[[j]] <- partitions.t1[j]:partitions.t2[j]
}
ISblocks[[i]] <- tmp
}
#there is some problem with this one
ISblocksFun <- function(j, i, ISmatrices, ISblocks, mX,
parallel.options, y, olsMethod, t.pval, wald.pval,
do.pet, arLjungB, archLjungB, normality.JarqueB,
user.diagnostics, info.method, mxkeep, include.gum,
include.1cut, include.empty, max.paths, turbo, tol,
LAPACK, max.regs, print.searchinfo) {
if (length(ISblocks[[i]][[j]]) == 1) {
tmp <- colnames(ISmatrices[[i]])[ISblocks[[i]][[j]]]
mXis <- cbind(ISmatrices[[i]][, ISblocks[[i]][[j]]])
colnames(mXis) <- tmp
mXis <- cbind(mX, mXis)
}else {
mXis <- cbind(mX, ISmatrices[[i]][, ISblocks[[i]][[j]]])
}
mXis <- dropvar(mXis, tol = tol, LAPACK = LAPACK,
silent = print.searchinfo)
if (is.null(parallel.options)) {
if (print.searchinfo) {
message("\n", appendLF = FALSE)
message(names(ISmatrices)[i], " block ", j,
" of ", length(ISblocks[[i]]), ":", appendLF = TRUE)
}
}
#this one performs backwards selection of the best model
getsis <- gets::getsFun(y, mXis, untransformed.residuals = NULL,
user.estimator = list(name = "kfs", tol = tol,
LAPACK = LAPACK, method = olsMethod),
gum.result = NULL,
t.pval = t.pval, wald.pval = wald.pval, do.pet = do.pet,
ar.LjungB = arLjungB, arch.LjungB = archLjungB,
normality.JarqueB = normality.JarqueB, user.diagnostics = user.diagnostics,
gof.function = list(name = "infocrit", method = info.method),
gof.method = "min", keep = mxkeep, include.gum = include.gum,
include.1cut = include.1cut, include.empty = include.empty,
max.paths = max.paths, turbo = turbo, tol = tol,
LAPACK = LAPACK, max.regs = max.regs, print.searchinfo = print.searchinfo,
alarm = FALSE)
if (is.null(getsis$specific.spec)) {
ISspecific.models <- NULL
}else {
ISspecific.models <- names(getsis$specific.spec)
}
return(ISspecific.models)
}
if (is.null(parallel.options)) {
ISspecific.models <- lapply(1:length(ISblocks[[i]]),
ISblocksFun, i, ISmatrices, ISblocks, mX, parallel.options,
y, olsMethod, t.pval, wald.pval, do.pet, arLjungB,
archLjungB, normality.JarqueB, user.diagnostics,
info.method, mxkeep, include.gum, include.1cut,
include.empty, max.paths, turbo, tol, LAPACK,
max.regs, print.searchinfo)
}
if (!is.null(parallel.options)) {
if (print.searchinfo) {
message("\n", appendLF = FALSE)
message("Preparing parallel computing...", appendLF = TRUE)
message(names(ISmatrices)[i], " blocks to search in parallel: ",
length(ISblocks[[i]]), appendLF = TRUE)
message("Searching...", appendLF = TRUE)
}
blocksClust <- makeCluster(clusterSpec, outfile = "")
clusterExport(blocksClust, c("dropvar", "getsFun",
"ols", "infocrit", "diagnostics"), envir = .GlobalEnv)
ISspecific.models <- parLapply(blocksClust, 1:length(ISblocks[[i]]),
ISblocksFun, i, ISmatrices, ISblocks, mX, parallel.options,
y, olsMethod, t.pval, wald.pval, do.pet, arLjungB,
archLjungB, normality.JarqueB, user.diagnostics,
info.method, mxkeep, include.gum, include.1cut,
include.empty, max.paths, turbo, tol, LAPACK,
max.regs, print.searchinfo)
stopCluster(blocksClust)
}
if (print.searchinfo) {
message("\n", appendLF = FALSE)
message("GETS of union of retained ", names(ISmatrices)[i],
" variables... ", appendLF = TRUE)
}
if (length(ISspecific.models) == 0) {
isNames <- NULL
ISfinalmodels[[i]] <- NULL
}
if (length(ISspecific.models) > 0) {
isNames <- NULL
for (j in 1:length(ISspecific.models)) {
if (!is.null(ISspecific.models[[j]])) {
isNames <- union(isNames, ISspecific.models[[j]])
}
}
isNames <- setdiff(isNames, mXnames)
if (length(isNames) == 0) {
ISfinalmodels[[i]] <- mXnames
}
else {
mXisNames <- c(mXnames, isNames)
mXis <- cbind(mX, ISmatrices[[i]][, isNames])
colnames(mXis) <- mXisNames
mXis <- dropvar(mXis, tol = tol, LAPACK = LAPACK,
silent = print.searchinfo)
getsis <- gets::getsFun(y, mXis, untransformed.residuals = NULL,
user.estimator = list(name ="kfs", tol = tol,
LAPACK = LAPACK, method = olsMethod), gum.result = NULL,
t.pval = t.pval, wald.pval = wald.pval, do.pet = do.pet,
ar.LjungB = arLjungB, arch.LjungB = archLjungB,
normality.JarqueB = normality.JarqueB, user.diagnostics = user.diagnostics,
gof.function = list(name = "infocrit", method = info.method),
gof.method = "min", keep = mxkeep, include.gum = include.gum,
include.1cut = include.1cut, include.empty = include.empty,
max.paths = max.paths, turbo = turbo, tol = tol,
LAPACK = LAPACK, max.regs = max.regs, print.searchinfo = print.searchinfo,
alarm = FALSE)
ISfinalmodels[[i]] <- names(getsis$specific.spec)
}
}
}
names(ISblocks) <- names(ISmatrices)
if (print.searchinfo) {
message("\n", appendLF = FALSE)
message("GETS of union of ALL retained variables...",
appendLF = TRUE)
message("\n", appendLF = FALSE)
}
if (length(ISfinalmodels) == 0) {
ISfinalmodels <- NULL
if (is.null(mX)) {
mXis <- NULL
}
else {
mXis <- zoo(cbind(mX), order.by = y.index)
colnames(mXis) <- mXnames
}
}
if (length(ISfinalmodels) > 0) {
mIS <- NULL
for (i in 1:length(ISfinalmodels)) {
isNames <- NULL
if (!is.null(ISfinalmodels[[i]])) {
isNames <- setdiff(ISfinalmodels[[i]], mXnames)
}
if (length(isNames) > 0) {
tmp <- cbind(ISmatrices[[i]][, isNames])
colnames(tmp) <- isNames
mIS <- cbind(mIS, tmp)
}
}
mXis <- dropvar(cbind(mX, mIS), tol = tol, LAPACK = LAPACK,
silent = print.searchinfo)
mXis <- zoo(mXis, order.by = y.index)
}
#y <- as.ts(y, order.by = y.index)
#y <- zoo(y, order.by = y.index)
#mod <- arx(y, mxreg = mXis, vcov.type = vcov.type, qstat.options = qstat.options,
# user.diagnostics = user.diagnostics, tol = tol, LAPACK = LAPACK,
# plot = FALSE)
#this part has to be done properly
mod <-kfs.arx(y, mxreg = mXis, vcov.type = vcov.type, qstat.options = qstat.options,
user.diagnostics = user.diagnostics, tol = tol, LAPACK = LAPACK
)
y <- zoo(y, order.by = y.index)
getsis <- getsm_my(mod, keep = mxkeep, t.pval = t.pval, do.pet = do.pet,
wald.pval = wald.pval, ar.LjungB = ar.LjungB, arch.LjungB = arch.LjungB,
normality.JarqueB = normality.JarqueB, user.diagnostics = user.diagnostics,
info.method = info.method, include.empty = include.empty,
max.paths = max.paths, max.regs = max.regs, print.searchinfo = print.searchinfo,
#added by me cuz of using kfs instead of
vcov.type = vcov.type)
ISnames <- setdiff(getsis$aux$mXnames, mXnames)
if (length(ISnames) == 0) {
ISnames <- NULL
}
colnames(getsis$aux$mX) <- getsis$aux$mXnames
getsis$gets.type <- "isat"
getsis$call <- isat.call
getsis <- c(list(ISfinalmodels = ISfinalmodels, ISnames = ISnames),
getsis)
getsis$aux$t.pval <- t.pval
class(getsis) <- "isat"
if (alarm) {
alarm()
}
if (is.null(plot)) {
plot <- getOption("plot")
if (is.null(plot)) {
plot <- FALSE
}
}
if (plot) {
plot.isat(getsis, coef.path = TRUE)
}
getsis$hessian = hessian
return(getsis)
}
|
/ISA_indicator_saturation_v2.R
|
no_license
|
zuzanale/Master-thesis
|
R
| false | false | 18,439 |
r
|
isat_my=function(y, mc = TRUE, ar = NULL, ewma = NULL, mxreg = NULL,
iis = TRUE, sis = TRUE, tis = FALSE, uis = FALSE, blocks = NULL,
ratio.threshold = 0.8, max.block.size = 30, t.pval = 0.001,
wald.pval = t.pval, vcov.type = c("ordinary", "white", "newey-west"),
do.pet = FALSE, ar.LjungB = NULL, arch.LjungB = NULL, normality.JarqueB = NULL,
user.diagnostics = NULL, info.method = c("sc", "aic", "hq"),
include.gum = NULL, include.1cut = FALSE, include.empty = FALSE,
max.paths = NULL, parallel.options = NULL, turbo =FALSE,
tol = 1e-07, LAPACK = FALSE, max.regs = NULL, print.searchinfo = TRUE,
plot = NULL, alarm = FALSE,
#for my version of the code (for a local level model)
model.ssmod = "local-level", pars.ssmod = c(var1 = log(var(y)), var2 = log(var(y)),
P0=10000000,a0=y[1]),
nopars.ssmod = NULL, cpar.ssmod = NULL,
xreg.ssmod = NULL, lower.ssmod = NULL, upper.ssmod = NULL, transPars.ssmod = NULL,
ssd.ssmod = FALSE, sgfc.ssmod = FALSE,
kf.args = list(P0cov = FALSE), # if we would set this one to TRUE, we would say that
# P0 for max likelihood with KF is not diagonal
method.kfs ="L-BFGS-B",
gr.kfs = c("numerical", "analytical"),
optim.kfs = list(lower = 0, upper = Inf, hessian = TRUE) #this should be later rewritten
#into hessian = FALSE, can really slow down the whole process
)
{
#(y_m, t.pval=0.001, model.ssmod = "local-level",
# vcov.type = "ordinary",info.method = 'aic', gr.kfs ="numerical",
# optim.kfs = list(lower = 0, upper = Inf, hessian = TRUE))
#################################################
# mc = TRUE
# ar = NULL
# ewma = NULL
# mxreg = NULL
# iis = TRUE
# sis = TRUE
# tis = FALSE
# uis = FALSE
# blocks = NULL
# ratio.threshold = 0.8
# max.block.size = 30
# t.pval = 0.001
# wald.pval = t.pval
# vcov.type = "ordinary"
# do.pet = FALSE
# ar.LjungB = NULL
# arch.LjungB = NULL
# normality.JarqueB = NULL
# user.diagnostics = NULL
# info.method ="aic"
# include.gum = NULL
# include.1cut = FALSE
# include.empty = FALSE
# max.paths = NULL
# parallel.options = NULL
# turbo = FALSE
# tol = 1e-07
# LAPACK = FALSE
# max.regs = NULL
# print.searchinfo = TRUE
# plot = NULL
# alarm = FALSE
# model.ssmod = "local-level"
#pars.ssmod <- c(var1 =var(y), var2 =var(y))
# pars.ssmod = c("var1" = var(y), "var2" = var(y),"P01"=10000000,"a01"=y[1])
# nopars.ssmod = NULL
# cpar.ssmod = NULL
# xreg.ssmod = NULL
# lower.ssmod = NULL
# upper.ssmod = NULL
# transPars.ssmod = NULL
# ssd.ssmod = FALSE
# sgfc.ssmod = FALSE
# kf.args = list(P0cov = FALSE) # if we would set this one to TRUE, we would say that
# P0 for max likelihood with KF is not diagonal
# method.kfs ="L-BFGS-B"
# gr.kfs = "analytical"
# optim.kfs =list(maxit = 100000000) #this should be later rewritten
#into hessian = FALSE, can really slow down the whole process
###########################################
require(zoo)
require(stsm)
require(dlm)
isat.call <- sys.call()
vcov.type <- match.arg(vcov.type)
info.method <- match.arg(info.method)
if (!is.null(include.gum)) {
warning("The 'include.gum' argument is ignored (temporarily deprecated in isat)")
}
include.gum <- TRUE
olsMethod <- switch(vcov.type, ordinary = 3, white = 4, `newey-west` = 5)
if (!is.null(max.paths) && max.paths < 1) {
stop("'max.paths' cannot be smaller than 1")
}
if (!is.null(parallel.options)) {
if (is.numeric(parallel.options)) {
clusterSpec <- parallel.options
}
OScores <- detectCores()
if (parallel.options > OScores) {
stop("parallel.options > number of cores/threads")
}
}
#my own version for state space modelling
# (y, mc = mc, ar = ar, ewma = ewma, mxreg = mxreg,
# vcov.type = vcov.type, qstat.options = NULL, user.diagnostics = user.diagnostics,
# tol = tol, LAPACK = LAPACK, plot = FALSE)
y=as.ts(y)
mod <- kfs.arx(y, mc = mc, mxreg = mxreg,
vcov.type = vcov.type, qstat.options = NULL, user.diagnostics = user.diagnostics,
tol = tol, LAPACK = LAPACK, plot = FALSE)
# mod <- arx(y, mc = mc, ar = ar, ewma = ewma, mxreg = mxreg,
# vcov.type = vcov.type, qstat.options = NULL, user.diagnostics = user.diagnostics,
# tol = tol, LAPACK = LAPACK, plot = FALSE)
y <- mod$aux$y
y.n <- mod$aux$y.n
y.index <- mod$aux$y.index
y.index.as.char <- as.character(y.index)
y.name <- mod$aux$y.name
mX <- mod$aux$mX
mXnames <- mod$aux$mXnames
colnames(mX) <- mXnames
mXncol <- mod$aux$mXncol
vcov.type <- mod$aux$vcov.type
qstat.options <- mod$aux$qstat.options
if (is.null(mX)){
mxkeep <- NULL
}else{
mxkeep <- 1:mXncol
}
arLjungB <- NULL
if (!is.null(ar.LjungB)) { #two item list with lag and pval for Ljung Box test for serial correlation in the standardised residuals.
arLjungB <- c(NA, ar.LjungB$pval)
if (is.null(ar.LjungB$lag)) {
arLjungB[1] <- qstat.options[1]
}else {
arLjungB[1] <- ar.LjungB$lag
}
}
archLjungB <- NULL
if (!is.null(arch.LjungB)) {
archLjungB <- c(NA, arch.LjungB$pval)
if (is.null(arch.LjungB$lag)) {
archLjungB[1] <- qstat.options[2]
}else {
archLjungB[1] <- arch.LjungB$lag
}
}
ISmatrices <- list()
if (iis) {
mIIS <- matrix(0, y.n, y.n)
diag(mIIS) <- 1
colnames(mIIS) <- paste("iis", y.index.as.char, sep = "")
ISmatrices <- c(ISmatrices, list(IIS = mIIS))
}
if (sis) {
mSIS <- matrix(0, y.n, y.n)
loop.indx <- 1:y.n
tmp <- function(i) {
mSIS[i, 1:i] <<- 1
}
tmp <- sapply(loop.indx, tmp)
colnames(mSIS) <- paste("sis", y.index.as.char, sep = "")
mSIS <- mSIS[, -1]
ISmatrices <- c(ISmatrices, list(SIS = mSIS))
}
if (tis) {
mTIS <- matrix(0, y.n, y.n)
v1n <- seq(1, y.n)
loop.indx <- 1:y.n
tmp <- function(i) {
mTIS[c(i:y.n), i] <<- v1n[1:c(y.n - i + 1)]
}
tmp <- sapply(loop.indx, tmp)
colnames(mTIS) <- paste("tis", y.index.as.char, sep = "")
mTIS <- mTIS[, -1]
ISmatrices <- c(ISmatrices, list(TIS = mTIS))
}
if (!is.list(uis) && !identical(as.numeric(uis), 0)) {
uis <- as.zoo(cbind(uis))
uis.names <- colnames(uis)
if (is.null(uis.names)) {
uis.names <- paste("uisxreg", 1:NCOL(uis), sep = "")
}
if (any(uis.names == "")) {
missing.colnames <- which(uis.names == "")
for (i in 1:length(missing.colnames)) {
uis.names[i] <- paste("uisxreg", missing.colnames[i],
sep = "")
}
}
uis <- na.trim(uis, sides = "both", is.na = "any")
uis.index.as.char <- as.character(index(uis))
t1 <- which(uis.index.as.char == y.index.as.char[1])
t2 <- which(uis.index.as.char == y.index.as.char[length(y.index.as.char)])
uis <- coredata(uis)
uis <- window(uis, start = t1, end = t2)
uis <- cbind(coredata(as.zoo(uis)))
colnames(uis) <- uis.names
if (nrow(uis) != y.n)
stop("nrow(uis) is unequal to no. of observations")
ISmatrices <- c(ISmatrices, list(UIS = uis))
}
if (is.list(uis)) {
for (i in 1:length(uis)) {
uis[[i]] <- as.matrix(coredata(as.zoo(uis[[i]])))
if (nrow(uis[[i]]) != y.n) {
stop(paste("nrow(uis[[", i, "]]) is unequal to no. of observations",
sep = ""))
}
}
uis.names <- paste("UIS", 1:length(uis), sep = "")
if (is.null(names(uis))) {
names(uis) <- uis.names
} else {
for (i in 1:length(uis)) {
if (names(uis)[i] == "") {
names(uis)[i] <- uis.names[i]
}else {
names(uis)[i] <- paste(uis.names[i], ".", names(uis)[i],
sep = "")
}
}
}
ISmatrices <- c(ISmatrices, uis)
}
if (is.list(blocks)) {
if (length(ISmatrices) != length(blocks)) {
stop("No. of IS matrices is unequal to length(blocks)")
}
blocks.is.list <- TRUE
ISblocks <- blocks
}else {
blocks.is.list <- FALSE
ISblocks <- list()
}
ISfinalmodels <- list()
for (i in 1:length(ISmatrices)) {
if (!blocks.is.list) {
ncol.adj <- NCOL(ISmatrices[[i]])
if (is.null(blocks)) {
blockratio.value <- ncol.adj/(ratio.threshold *
ncol.adj - mXncol)
blocksize.value <- ncol.adj/min(y.n * ratio.threshold,
max.block.size)
no.of.blocks <- max(2, blockratio.value, blocksize.value)
no.of.blocks <- ceiling(no.of.blocks)
no.of.blocks <- min(ncol.adj, no.of.blocks)
}else{
no.of.blocks <- blocks
}
blocksize <- ceiling(ncol.adj/no.of.blocks)
partitions.t2 <- blocksize
for (j in 1:no.of.blocks) {
if (blocksize * j <= ncol.adj) {
partitions.t2[j] <- blocksize * j
}
}
if (partitions.t2[length(partitions.t2)] < ncol.adj) {
partitions.t2 <- c(partitions.t2, ncol.adj)
}
blocksadj <- length(partitions.t2)
partitions.t1 <- partitions.t2 + 1
partitions.t1 <- c(1, partitions.t1[-blocksadj])
tmp <- list()
for (j in 1:blocksadj) {
tmp[[j]] <- partitions.t1[j]:partitions.t2[j]
}
ISblocks[[i]] <- tmp
}
#there is some problem with this one
ISblocksFun <- function(j, i, ISmatrices, ISblocks, mX,
parallel.options, y, olsMethod, t.pval, wald.pval,
do.pet, arLjungB, archLjungB, normality.JarqueB,
user.diagnostics, info.method, mxkeep, include.gum,
include.1cut, include.empty, max.paths, turbo, tol,
LAPACK, max.regs, print.searchinfo) {
if (length(ISblocks[[i]][[j]]) == 1) {
tmp <- colnames(ISmatrices[[i]])[ISblocks[[i]][[j]]]
mXis <- cbind(ISmatrices[[i]][, ISblocks[[i]][[j]]])
colnames(mXis) <- tmp
mXis <- cbind(mX, mXis)
}else {
mXis <- cbind(mX, ISmatrices[[i]][, ISblocks[[i]][[j]]])
}
mXis <- dropvar(mXis, tol = tol, LAPACK = LAPACK,
silent = print.searchinfo)
if (is.null(parallel.options)) {
if (print.searchinfo) {
message("\n", appendLF = FALSE)
message(names(ISmatrices)[i], " block ", j,
" of ", length(ISblocks[[i]]), ":", appendLF = TRUE)
}
}
#this one performs backwards selection of the best model
getsis <- gets::getsFun(y, mXis, untransformed.residuals = NULL,
user.estimator = list(name = "kfs", tol = tol,
LAPACK = LAPACK, method = olsMethod),
gum.result = NULL,
t.pval = t.pval, wald.pval = wald.pval, do.pet = do.pet,
ar.LjungB = arLjungB, arch.LjungB = archLjungB,
normality.JarqueB = normality.JarqueB, user.diagnostics = user.diagnostics,
gof.function = list(name = "infocrit", method = info.method),
gof.method = "min", keep = mxkeep, include.gum = include.gum,
include.1cut = include.1cut, include.empty = include.empty,
max.paths = max.paths, turbo = turbo, tol = tol,
LAPACK = LAPACK, max.regs = max.regs, print.searchinfo = print.searchinfo,
alarm = FALSE)
if (is.null(getsis$specific.spec)) {
ISspecific.models <- NULL
}else {
ISspecific.models <- names(getsis$specific.spec)
}
return(ISspecific.models)
}
if (is.null(parallel.options)) {
ISspecific.models <- lapply(1:length(ISblocks[[i]]),
ISblocksFun, i, ISmatrices, ISblocks, mX, parallel.options,
y, olsMethod, t.pval, wald.pval, do.pet, arLjungB,
archLjungB, normality.JarqueB, user.diagnostics,
info.method, mxkeep, include.gum, include.1cut,
include.empty, max.paths, turbo, tol, LAPACK,
max.regs, print.searchinfo)
}
if (!is.null(parallel.options)) {
if (print.searchinfo) {
message("\n", appendLF = FALSE)
message("Preparing parallel computing...", appendLF = TRUE)
message(names(ISmatrices)[i], " blocks to search in parallel: ",
length(ISblocks[[i]]), appendLF = TRUE)
message("Searching...", appendLF = TRUE)
}
blocksClust <- makeCluster(clusterSpec, outfile = "")
clusterExport(blocksClust, c("dropvar", "getsFun",
"ols", "infocrit", "diagnostics"), envir = .GlobalEnv)
ISspecific.models <- parLapply(blocksClust, 1:length(ISblocks[[i]]),
ISblocksFun, i, ISmatrices, ISblocks, mX, parallel.options,
y, olsMethod, t.pval, wald.pval, do.pet, arLjungB,
archLjungB, normality.JarqueB, user.diagnostics,
info.method, mxkeep, include.gum, include.1cut,
include.empty, max.paths, turbo, tol, LAPACK,
max.regs, print.searchinfo)
stopCluster(blocksClust)
}
if (print.searchinfo) {
message("\n", appendLF = FALSE)
message("GETS of union of retained ", names(ISmatrices)[i],
" variables... ", appendLF = TRUE)
}
if (length(ISspecific.models) == 0) {
isNames <- NULL
ISfinalmodels[[i]] <- NULL
}
if (length(ISspecific.models) > 0) {
isNames <- NULL
for (j in 1:length(ISspecific.models)) {
if (!is.null(ISspecific.models[[j]])) {
isNames <- union(isNames, ISspecific.models[[j]])
}
}
isNames <- setdiff(isNames, mXnames)
if (length(isNames) == 0) {
ISfinalmodels[[i]] <- mXnames
}
else {
mXisNames <- c(mXnames, isNames)
mXis <- cbind(mX, ISmatrices[[i]][, isNames])
colnames(mXis) <- mXisNames
mXis <- dropvar(mXis, tol = tol, LAPACK = LAPACK,
silent = print.searchinfo)
getsis <- gets::getsFun(y, mXis, untransformed.residuals = NULL,
user.estimator = list(name ="kfs", tol = tol,
LAPACK = LAPACK, method = olsMethod), gum.result = NULL,
t.pval = t.pval, wald.pval = wald.pval, do.pet = do.pet,
ar.LjungB = arLjungB, arch.LjungB = archLjungB,
normality.JarqueB = normality.JarqueB, user.diagnostics = user.diagnostics,
gof.function = list(name = "infocrit", method = info.method),
gof.method = "min", keep = mxkeep, include.gum = include.gum,
include.1cut = include.1cut, include.empty = include.empty,
max.paths = max.paths, turbo = turbo, tol = tol,
LAPACK = LAPACK, max.regs = max.regs, print.searchinfo = print.searchinfo,
alarm = FALSE)
ISfinalmodels[[i]] <- names(getsis$specific.spec)
}
}
}
names(ISblocks) <- names(ISmatrices)
if (print.searchinfo) {
message("\n", appendLF = FALSE)
message("GETS of union of ALL retained variables...",
appendLF = TRUE)
message("\n", appendLF = FALSE)
}
if (length(ISfinalmodels) == 0) {
ISfinalmodels <- NULL
if (is.null(mX)) {
mXis <- NULL
}
else {
mXis <- zoo(cbind(mX), order.by = y.index)
colnames(mXis) <- mXnames
}
}
if (length(ISfinalmodels) > 0) {
mIS <- NULL
for (i in 1:length(ISfinalmodels)) {
isNames <- NULL
if (!is.null(ISfinalmodels[[i]])) {
isNames <- setdiff(ISfinalmodels[[i]], mXnames)
}
if (length(isNames) > 0) {
tmp <- cbind(ISmatrices[[i]][, isNames])
colnames(tmp) <- isNames
mIS <- cbind(mIS, tmp)
}
}
mXis <- dropvar(cbind(mX, mIS), tol = tol, LAPACK = LAPACK,
silent = print.searchinfo)
mXis <- zoo(mXis, order.by = y.index)
}
#y <- as.ts(y, order.by = y.index)
#y <- zoo(y, order.by = y.index)
#mod <- arx(y, mxreg = mXis, vcov.type = vcov.type, qstat.options = qstat.options,
# user.diagnostics = user.diagnostics, tol = tol, LAPACK = LAPACK,
# plot = FALSE)
#this part has to be done properly
mod <-kfs.arx(y, mxreg = mXis, vcov.type = vcov.type, qstat.options = qstat.options,
user.diagnostics = user.diagnostics, tol = tol, LAPACK = LAPACK
)
y <- zoo(y, order.by = y.index)
getsis <- getsm_my(mod, keep = mxkeep, t.pval = t.pval, do.pet = do.pet,
wald.pval = wald.pval, ar.LjungB = ar.LjungB, arch.LjungB = arch.LjungB,
normality.JarqueB = normality.JarqueB, user.diagnostics = user.diagnostics,
info.method = info.method, include.empty = include.empty,
max.paths = max.paths, max.regs = max.regs, print.searchinfo = print.searchinfo,
#added by me cuz of using kfs instead of
vcov.type = vcov.type)
ISnames <- setdiff(getsis$aux$mXnames, mXnames)
if (length(ISnames) == 0) {
ISnames <- NULL
}
colnames(getsis$aux$mX) <- getsis$aux$mXnames
getsis$gets.type <- "isat"
getsis$call <- isat.call
getsis <- c(list(ISfinalmodels = ISfinalmodels, ISnames = ISnames),
getsis)
getsis$aux$t.pval <- t.pval
class(getsis) <- "isat"
if (alarm) {
alarm()
}
if (is.null(plot)) {
plot <- getOption("plot")
if (is.null(plot)) {
plot <- FALSE
}
}
if (plot) {
plot.isat(getsis, coef.path = TRUE)
}
getsis$hessian = hessian
return(getsis)
}
|
## These functions can compute the inverse matrix of 'x'
## to save time, if the matrix has been calculated, the inverse
## matrix should be retrieved from the cache
## This function creates a special "matrix" object that can cache its inverse
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setInverse <- function(solve) m <<- solve
getInverse <- function() m
list(set = set, get = get,
setInverse = setInverse,
getInverse = getInverse)
}
## This function computes the inverse of 'x'
## If the inverse has already been calculated,
## then the function should retrieve the inverse from the cache.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
m <- x$getInverse()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- solve(data, ...)
x$setInverse(m)
m
}
|
/cachematrix.R
|
no_license
|
wangjun341/ProgrammingAssignment2
|
R
| false | false | 954 |
r
|
## These functions can compute the inverse matrix of 'x'
## to save time, if the matrix has been calculated, the inverse
## matrix should be retrieved from the cache
## This function creates a special "matrix" object that can cache its inverse
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setInverse <- function(solve) m <<- solve
getInverse <- function() m
list(set = set, get = get,
setInverse = setInverse,
getInverse = getInverse)
}
## This function computes the inverse of 'x'
## If the inverse has already been calculated,
## then the function should retrieve the inverse from the cache.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
m <- x$getInverse()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- solve(data, ...)
x$setInverse(m)
m
}
|
\name{dtWV}
\alias{dtWV}
\title{Noncentral t Distribution Density by W.V.}
\description{
Compute the density function \eqn{f(x)} of the t distribution with
\code{df} degrees of freedom and non-centrality parameter \code{ncp},
according to Wolfgang Viechtbauer's proposal in 2002.
}
\usage{%--> ../R/t-nonc-fn.R + ?dt at ~/R/D/r-devel/R/src/library/stats/man/TDist.Rd
dtWV(x, df, ncp = 0, log = FALSE)
}
\arguments{
\item{x}{numeric vector.}
\item{df}{degrees of freedom (\eqn{> 0}, maybe non-integer). \code{df
= Inf} is allowed.}
\item{ncp}{non-centrality parameter \eqn{\delta}{delta};
If omitted, use the central t distribution.}
\item{log}{logical; if TRUE, \eqn{log(f(x))} is returned instead of \eqn{f(x)}.}
}
\details{
The formula used is \dQuote{asymptotic}: Resnikoff and Lieberman (1957),
p.1 and p.25ff, proposed to use recursive polynomials for (\emph{integer !})
degrees of freedom \eqn{f = 1,2,\dots, 20}, and then, for
\code{df}\eqn{ = f > 20}, use the asymptotic approximation which
Wolfgang Viechtbauer proposed as a first version of a non-central t
density for \R (when \code{\link{dt}()} did not yet have an \code{ncp}
argument).
}
\value{
numeric vector of density values, properly recycled in \code{(x, df, ncp)}.
}
\references{
Resnikoff, George J. and Lieberman, Gerald J. (1957)
\emph{Tables of the non-central t-distribution};
Technical report no. 32 (\code{LIE ONR 32}), April 1, 1957;
Applied Math. and Stat. Lab., Stanford University.
\url{https://statistics.stanford.edu/technical-reports/tables-non-central-t-distribution-density-function-cumulative-distribution}
%was \url{https://statistics.stanford.edu/research/tables-non-central-t-distribution-density-function-cumulative-distribution-function-and}
}
\author{Wolfgang Viechtbauer (2002) post to R-help
(\url{https://stat.ethz.ch/pipermail/r-help/2002-October/026044.html}),
and Martin Maechler (\code{log} argument; tweaks, notably recycling).
}
\seealso{
\code{\link{dt}}, \R's (C level) implementation of the (non-central) t density;
\code{\link{dntJKBf}}, for Johnson et al.'s summation formula approximation.
}
\examples{
tt <- seq(0, 10, len = 21)
ncp <- seq(0, 6, len = 31)
dt3R <- outer(tt, ncp, dt , df = 3)
dt3WV <- outer(tt, ncp, dtWV, df = 3)
all.equal(dt3R, dt3WV) # rel.err 0.00063
dt25R <- outer(tt, ncp, dt , df = 25)
dt25WV <- outer(tt, ncp, dtWV, df = 25)
all.equal(dt25R, dt25WV) # rel.err 1.1e-5
x <- -10:700
fx <- dt (x, df = 22, ncp =100)
lfx <- dt (x, df = 22, ncp =100, log=TRUE)
lfV <- dtWV(x, df = 22, ncp =100, log=TRUE)
head(lfx, 20) # shows that R's dt(*, log=TRUE) implementation is "quite suboptimal"
## graphics
opa <- par(no.readonly=TRUE)
par(mar=.1+c(5,4,4,3), mgp = c(2, .8,0))
plot(fx ~ x, type="l")
par(new=TRUE) ; cc <- c("red", adjustcolor("orange", 0.4))
plot(lfx ~ x, type = "o", pch=".", col=cc[1], cex=2, ann=FALSE, yaxt="n")
sfsmisc::eaxis(4, col=cc[1], col.axis=cc[1], small.args = list(col=cc[1]))
lines(x, lfV, col=cc[2], lwd=3)
dtt1 <- " dt"; dtt2 <- "(x, df=22, ncp=100"; dttL <- paste0(dtt2,", log=TRUE)")
legend("right", c(paste0(dtt1,dtt2,")"), paste0(c(dtt1,"dtWV"), dttL)),
lty=1, lwd=c(1,1,3), col=c("black", cc), bty = "n")
par(opa) # reset
}
\keyword{distribution}
\keyword{math}
|
/man/dtWV.Rd
|
no_license
|
cran/DPQ
|
R
| false | false | 3,305 |
rd
|
\name{dtWV}
\alias{dtWV}
\title{Noncentral t Distribution Density by W.V.}
\description{
Compute the density function \eqn{f(x)} of the t distribution with
\code{df} degrees of freedom and non-centrality parameter \code{ncp},
according to Wolfgang Viechtbauer's proposal in 2002.
}
\usage{%--> ../R/t-nonc-fn.R + ?dt at ~/R/D/r-devel/R/src/library/stats/man/TDist.Rd
dtWV(x, df, ncp = 0, log = FALSE)
}
\arguments{
\item{x}{numeric vector.}
\item{df}{degrees of freedom (\eqn{> 0}, maybe non-integer). \code{df
= Inf} is allowed.}
\item{ncp}{non-centrality parameter \eqn{\delta}{delta};
If omitted, use the central t distribution.}
\item{log}{logical; if TRUE, \eqn{log(f(x))} is returned instead of \eqn{f(x)}.}
}
\details{
The formula used is \dQuote{asymptotic}: Resnikoff and Lieberman (1957),
p.1 and p.25ff, proposed to use recursive polynomials for (\emph{integer !})
degrees of freedom \eqn{f = 1,2,\dots, 20}, and then, for
\code{df}\eqn{ = f > 20}, use the asymptotic approximation which
Wolfgang Viechtbauer proposed as a first version of a non-central t
density for \R (when \code{\link{dt}()} did not yet have an \code{ncp}
argument).
}
\value{
numeric vector of density values, properly recycled in \code{(x, df, ncp)}.
}
\references{
Resnikoff, George J. and Lieberman, Gerald J. (1957)
\emph{Tables of the non-central t-distribution};
Technical report no. 32 (\code{LIE ONR 32}), April 1, 1957;
Applied Math. and Stat. Lab., Stanford University.
\url{https://statistics.stanford.edu/technical-reports/tables-non-central-t-distribution-density-function-cumulative-distribution}
%was \url{https://statistics.stanford.edu/research/tables-non-central-t-distribution-density-function-cumulative-distribution-function-and}
}
\author{Wolfgang Viechtbauer (2002) post to R-help
(\url{https://stat.ethz.ch/pipermail/r-help/2002-October/026044.html}),
and Martin Maechler (\code{log} argument; tweaks, notably recycling).
}
\seealso{
\code{\link{dt}}, \R's (C level) implementation of the (non-central) t density;
\code{\link{dntJKBf}}, for Johnson et al.'s summation formula approximation.
}
\examples{
tt <- seq(0, 10, len = 21)
ncp <- seq(0, 6, len = 31)
dt3R <- outer(tt, ncp, dt , df = 3)
dt3WV <- outer(tt, ncp, dtWV, df = 3)
all.equal(dt3R, dt3WV) # rel.err 0.00063
dt25R <- outer(tt, ncp, dt , df = 25)
dt25WV <- outer(tt, ncp, dtWV, df = 25)
all.equal(dt25R, dt25WV) # rel.err 1.1e-5
x <- -10:700
fx <- dt (x, df = 22, ncp =100)
lfx <- dt (x, df = 22, ncp =100, log=TRUE)
lfV <- dtWV(x, df = 22, ncp =100, log=TRUE)
head(lfx, 20) # shows that R's dt(*, log=TRUE) implementation is "quite suboptimal"
## graphics
opa <- par(no.readonly=TRUE)
par(mar=.1+c(5,4,4,3), mgp = c(2, .8,0))
plot(fx ~ x, type="l")
par(new=TRUE) ; cc <- c("red", adjustcolor("orange", 0.4))
plot(lfx ~ x, type = "o", pch=".", col=cc[1], cex=2, ann=FALSE, yaxt="n")
sfsmisc::eaxis(4, col=cc[1], col.axis=cc[1], small.args = list(col=cc[1]))
lines(x, lfV, col=cc[2], lwd=3)
dtt1 <- " dt"; dtt2 <- "(x, df=22, ncp=100"; dttL <- paste0(dtt2,", log=TRUE)")
legend("right", c(paste0(dtt1,dtt2,")"), paste0(c(dtt1,"dtWV"), dttL)),
lty=1, lwd=c(1,1,3), col=c("black", cc), bty = "n")
par(opa) # reset
}
\keyword{distribution}
\keyword{math}
|
### Load packages
require( geomorph )
require( ape )
#Clear workspace
rm( list = ls() )
#Set working directory to where turtle landmark data (from supplemental folder) is stored
setwd( "INSERT DIRECTORY PATH" )
#Load landmark data and transform to correct format for GPA commands
temp.file <- list.files(pattern = ".csv")
landmark.data <- lapply (temp.file, read.csv, row.names=1)
names(landmark.data) <- gsub(".csv","",temp.file)
landmark.data.temp <- array(as.numeric(unlist(landmark.data)), dim = c(123, 3, 184)) #number of landmarks, number of dimensions (3D coordinates), number of specimens
dimnames(landmark.data.temp)[[3]] <- gsub(".csv","",temp.file)
dimnames(landmark.data.temp)[[1]] <- rownames(landmark.data[[1]])
dimnames(landmark.data.temp)[[2]] <- c("x","y","z")
#Load slider information and colour information for deformation plots
setwd( "INSERT DIRECTORY PATH" )
sliders <- read.csv("Dataset 4. sliders.turtles.csv", row.names=1)
colours <- as.character( read.csv("Dataset 5. landmark_colours.csv", row.names=1)[,1] )
#Load specimen information
setwd( "INSERT DIRECTORY PATH" )
specimen.info <- read.csv( "Dataset 2. Specimen info.csv", header = TRUE )
rownames( specimen.info ) <- specimen.info[ , "Specimen_name" ]
#Load tree
setwd( "INSERT DIRECTORY PATH" )
tree <- read.nexus( "Dataset 7. cal3tree.calibrated.txt" )
alternative.tree <- read.nexus("Dataset 8. mbltree.calibrated.txt")
#Do GPA of labyrinth shape for all taxa available
GPA.data <- landmark.data.temp
tree.names <- as.character( specimen.info[ dimnames( GPA.data )[[ 3 ]] , "Tree_names" ] )
skull.box.temp <- as.character( specimen.info[ dimnames( GPA.data )[[ 3 ]] , "logV_mm3" ] )
#'ecologies' is only needed to delete marine species as well
#ecologies <- as.character( specimen.info[ dimnames( GPA.data )[[ 3 ]] , "Plotting_habitat" ] )
#'families' is only needed to exclude chelonioids
#families <- as.character( specimen.info[ dimnames( GPA.data )[[ 3 ]] , "Family" ] )
dimnames( GPA.data )[[ 3 ]][ !is.na( tree.names ) ] <- tree.names[ !is.na( tree.names ) ]
duplicate.specimens <- which( is.na(tree.names) == TRUE )
no.skull.box <- which (is.na(skull.box.temp) == TRUE )
#'marine' is only needed to delete marine species as well
#marine <- which( ecologies[] == "marine" )
#'chelonidoids' is only needed to exclude chelonioids
#chelonioids <- which( families[] == "Chelonioidea" )
#include 'marine' from this list for analysis including marine species; same for 'chelonioids'
delete.these <- unique(c(duplicate.specimens, no.skull.box))
GPA.data <- GPA.data[,, - delete.these]
#Delete inner loop landmarks
ASC.loop.landmarks <- which(grepl("loop", dimnames( GPA.data )[[ 1 ]]) == TRUE)
GPA.data <- GPA.data[-ASC.loop.landmarks,,]
sliders <- sliders[- which( grepl("loop", rownames(sliders)) == TRUE) , ]
rows.to.modify <- c( which( grepl("LSC", rownames(sliders)) == TRUE) , which(grepl("PSC", rownames(sliders)) == TRUE ) )
sliders[rows.to.modify, ] <-sliders[rows.to.modify, ] -39
GPA.labyrinth.all <- gpagen( GPA.data , curves = sliders , ProcD = F )
labyrinth.Csize.all <- GPA.labyrinth.all$Csize
labyrinth.Csize.all[ labyrinth.Csize.all > 5000 ] <- labyrinth.Csize.all[ labyrinth.Csize.all > 5000 ] / 1000
#DO PCA
PCA.labyrinth <- plotTangentSpace( GPA.labyrinth.all$coords , warpgrids = F )
#Prepare tree that has same tips as the shape data blocks
tree.temp <- drop.tip( tree , tree$tip.label[ ! tree$tip.label %in% names( GPA.labyrinth.all$Csize ) ] )
#For tests with alternative calibration
tree.temp.alternative <- drop.tip( alternative.tree , alternative.tree $tip.label[ ! alternative.tree $tip.label %in% names( GPA.labyrinth.all$Csize ) ] )
#Examine trees
plot(tree.temp, cex=0.4)
plot(tree.temp.alternative, cex=0.4)
##Make a version of the specimen data that matches the taxon sample
data.temp <- specimen.info[ specimen.info$Tree_names %in% tree.temp$tip.label , ]
rownames( data.temp ) <- data.temp$Tree_names
data.temp <- data.temp[ tree.temp$tip.label , ]
##Make explanatory variables
##Habitat ecology
marine.all <- data.temp$Habitat_general == "marine" | data.temp$Fossil_marine == "yes"
names( marine.all ) <- rownames( data.temp )
marine.all <- marine.all[ tree.temp$tip.label ]
marine.all[which(is.na(marine.all))] <- "FALSE"
which(marine.all == TRUE)
marine <- data.temp$Habitat_general == "marine"
names( marine ) <- rownames( data.temp )
marine <- marine[ tree.temp$tip.label ]
marine[which(is.na(marine))] <- "FALSE"
which(marine == TRUE)
freshwater <- data.temp$Plotting_habitat == "freshwater"
names( freshwater ) <- rownames( data.temp )
freshwater <- freshwater[ tree.temp$tip.label ]
freshwater[which(is.na(freshwater))] <- "FALSE"
which(freshwater == TRUE)
terrestrial <- data.temp$Plotting_habitat == "terrestrial"
names( terrestrial ) <- rownames( data.temp )
terrestrial <- terrestrial[ tree.temp$tip.label ]
terrestrial[which(is.na(terrestrial))] <- "FALSE"
which(terrestrial == TRUE)
##Neck categories
no_plane <- data.temp$Retraction_type == "none"
names( no_plane ) <- rownames( data.temp )
no_plane <- no_plane[ tree.temp$tip.label ]
no_plane[which(is.na(no_plane))] <- "FALSE"
which(no_plane == TRUE)
vertical <- data.temp$Retraction_type == "vertical"
names( vertical ) <- rownames( data.temp )
vertical <- vertical[ tree.temp$tip.label ]
vertical[which(is.na(vertical))] <- "FALSE"
which(vertical == TRUE)
horizontal <- data.temp$Retraction_type == "sideways"
names( horizontal ) <- rownames( data.temp )
horizontal <- horizontal[ tree.temp$tip.label ]
horizontal[which(is.na(horizontal))] <- "FALSE"
which(horizontal == TRUE)
incomplete_retr <- data.temp$Retratction_ability == "incomplete"
names( incomplete_retr ) <- rownames( data.temp )
incomplete_retr <- incomplete_retr[ tree.temp$tip.label ]
incomplete_retr[which(is.na(incomplete_retr))] <- "FALSE"
which(incomplete_retr == TRUE)
full_retr <- data.temp$Retratction_ability == "full"
names( full_retr ) <- rownames( data.temp )
full_retr <- full_retr[ tree.temp$tip.label ]
full_retr[which(is.na(full_retr))] <- "FALSE"
which(full_retr == TRUE)
##Size proxies
skull_length.temp <- data.temp$Skull_length_mm
names(skull_length.temp) <- rownames (data.temp)
skull_length.temp <- skull_length.temp[ tree.temp$tip.label ]
skull_width.temp <- data.temp$Skull_width_mm
names(skull_width.temp) <- rownames (data.temp)
skull_width.temp <- skull_width.temp[ tree.temp$tip.label ]
skull_height.temp <- data.temp$Skull_height_mm
names(skull_height.temp) <- rownames (data.temp)
skull_height.temp <- skull_height.temp[ tree.temp$tip.label ]
##Skull geometry proxy
skull_geometry.temp <- skull_height.temp / skull_width.temp
#check frequency distribution
hist(skull_geometry.temp)
#check if these make sense
which(skull_geometry.temp[] == max(skull_geometry.temp))
which(skull_geometry.temp[] == min(skull_geometry.temp))
#Make a big data frame for analyses
gdf <- geomorph.data.frame( shape = GPA.labyrinth.all$coords[ ,, tree.temp$tip.label ] ,
phy = tree.temp ,
marine = marine , freshwater = freshwater , terrestrial = terrestrial , marine.all = marine.all,
no_plane = no_plane, vertical = vertical, horizontal = horizontal,
incomplete_retr = incomplete_retr, full_retr = full_retr,
skull_length = log10( skull_length.temp ) , skull_width = log10(skull_width.temp) , skull_height = log10(skull_height.temp) ,
skull_box = data.temp[ tree.temp$tip.label , "logV_mm3" ] ,
labyrinth_Csize = log10( labyrinth.Csize.all )[ tree.temp$tip.label ] ,
skull_geometry = skull_geometry.temp)
##In this script we're setting up all the combinations of explanatory variables for the right sizes of the models,
# and then running them all in a loop. This makes it easy to add regression models by extending the vector called "right.sides".
#as the model building process was iterative, several models that were initially explored are muted below.
#Models that are active are those reported in the table <shape_models_incl_fossils>
right.sides <- c(
#following models test relations of size variables as correlates of shape, exploring allometric effects
"skull_length" , "skull_width", "skull_height", "skull_box" , "labyrinth_Csize" ,
# -> skull height performs best (R2), followed by skull box.
#following model tests relations of skull geometry as correlates of shape
"skull_geometry" ,
# -> significant and explains much of the variance
#following models test independent effect of skull size and labyrinth size
# "skull_length + labyrinth_Csize" , "skull_box + labyrinth_Csize" ,
#-> both models show significant indipendent effect, skull box performs better
#following models test independent effect of skull size and skull geometry
# "skull_length + skull_geometry" , "skull_box + skull_geometry" , "skull_height + skull_geometry",
#-> all models significant. in the height+geometry model, proportion of variance explained is near equal between both variables
#-> in other models, more variance is explanation is attributed to geometry than skull size
#following models test independent effect of skull geometry and labyrinth size
# "skull_geometry + labyrinth_Csize" ,
# -> also significant
#following models test independent effects of skull size and skull geometry and labyrinth size
# "skull_length + skull_geometry + labyrinth_Csize" , #slightly worse in R2 than below model
# "skull_box + skull_geometry + labyrinth_Csize" , #slightly better in R2 than above model
#-> all independent effects are important
#following models test non-independent effects, i.e. hypothesis that taxa with prop. larger labyrinths in relation to skull size have different laby shapes
# "skull_length * labyrinth_Csize" , #
# "skull_box * labyrinth_Csize" , #interaction term significant
#
# "skull_box * labyrinth_Csize + skull_geometry" ,
#->interaction term remains significant
#following models test non-independent effects, i.e. hypothesis that taxa with higher/broader skulls in relation to skull size have different laby shapes
# "skull_length * skull_geometry" , #slightly worse in R2 than below model
# "skull_box * skull_geometry" , #slightly better in R2 than above model
#-> interaction term is significant
#the following models ask: do terrestrial turtles have a different mean labyrinth shape than non-terrestrial trutles?
# -> initial analyses show: only freshwater and terretrial are relevant
"terrestrial" , "freshwater", "marine.all", "marine" ,
#-> marine extant variables not significant
#-> marine all near significant
#->terrestrial not significant
#->freshwater not significant
#the following models ask: do turtles with/without neck retraction have a different mean labyrinth shapes?
"incomplete_retr" , "full_retr", #both non significant
#the following models ask: do turtles with specific neck retractions have a different mean labyrinth shapes?
"no_plane" , "vertical", "horizontal", #all non significant
# "skull_box * skull_geometry + labyrinth_Csize + incomplete_retr", #non-significant
# "skull_box * skull_geometry + labyrinth_Csize + full_retr", #non-significant
# "skull_box * skull_geometry + labyrinth_Csize + no_plane", #non-significant
# "skull_box * skull_geometry + labyrinth_Csize + vertical", #non-significant
# "skull_box * skull_geometry + labyrinth_Csize + horizontal", #non-significant
# "skull_box * skull_geometry + labyrinth_Csize + freshwater", #non-significant
# "skull_box * skull_geometry + labyrinth_Csize + terrestrial", #non-significant
# "skull_box * skull_geometry + labyrinth_Csize + marine.all", #non-significant
#check if ecological effects are redundant with skull size
# "skull_box + marine.all" , #marine.all becomes clearly non-significant, indicating the near-significant effect in bivarite models can be explained by skull size
# "labyrinth_Csize + marine.all" , #marine all remains near significnt significant
# "skull_geometry + marine.all" , #marine,all remains near significant
#Further tests for effect of marine.all
# "skull_box + skull_geometry + labyrinth_Csize + marine.all" ,
#also insignificant, not further considered
#are ecological effects important when included in the best model:
# "skull_box * skull_geometry + labyrinth_Csize + marine.all" ,
# -> marine remains non significant
#best model excludes ecological effects:
"skull_box * skull_geometry + labyrinth_Csize"
#-> all model parameters signifciant, brain case shape explains most of variance
)
models <- paste( "shape ~" , right.sides )
models <- lapply( models , as.formula )
##Run Procrustes distance pGLS analyses (Adams 2014)
procD.pgls.fits <- list()
for( i in 1:length( models ) ) {
procD.pgls.fits[[ i ]] <- procD.pgls( models[[ i ]] , phy = phy , SS.type = "II" , data = gdf )
}
##See summaries of procD.pgls results
model.summaries <- lapply( procD.pgls.fits , summary )
model.summaries[15]
#Print all coefficents to file
capture.output(model.summaries, file = "Labyrinth_shape_model_summaries_incl_fossils.txt")
capture.output(model.summaries, file = "Labyrinth_shape_model_summaries_excl_marine_species.txt")
capture.output(model.summaries, file = "Labyrinth_shape_model_summaries_excl_chelonioids.txt")
capture.output(model.summaries, file = "Labyrinth_shape_model_summaries_no_ASCloop.txt")
##Altermative run with alternative tree
gdf2 <- geomorph.data.frame( shape = GPA.labyrinth.all$coords[ ,, tree.temp$tip.label ] ,
phy = tree.temp.alternative ,
marine = marine , freshwater = freshwater , terrestrial = terrestrial , marine.all = marine.all,
no_plane = no_plane, vertical = vertical, horizontal = horizontal,
incomplete_retr = incomplete_retr, full_retr = full_retr,
skull_length = log10( skull_length.temp ) , skull_width = log10(skull_width.temp) , skull_height = log10(skull_height.temp) ,
skull_box = data.temp[ tree.temp$tip.label , "logV_mm3" ] ,
labyrinth_Csize = log10( labyrinth.Csize.all )[ tree.temp$tip.label ] ,
skull_geometry = skull_geometry.temp)
procD.pgls.fits.2 <- list()
for( i in 1:length( models ) ) {
procD.pgls.fits.2[[ i ]] <- procD.pgls( models[[ i ]] , phy = phy , SS.type = "II" , data = gdf2 )
}
##See summaries of procD.pgls results
model.summaries.2 <- lapply( procD.pgls.fits.2 , summary )
#Print all coefficents to file
capture.output(model.summaries.2, file = "Labyrinth_shape_model_summaries_incl_fossils_alternative_tree.txt")
##Phylogenetic signal from Procrustes shape variables
physig <- physignal(gdf$shape, phy = gdf$phy)
summary(physig)
plot(physig)
|
/Dataset 11. Labyrinth shape model comparisons.R
|
no_license
|
SerjoschaEvers/Turtle-Labyrinth-Ecomorphology-and-Evolution-Data
|
R
| false | false | 15,719 |
r
|
### Load packages
require( geomorph )
require( ape )
#Clear workspace
rm( list = ls() )
#Set working directory to where turtle landmark data (from supplemental folder) is stored
setwd( "INSERT DIRECTORY PATH" )
#Load landmark data and transform to correct format for GPA commands
temp.file <- list.files(pattern = ".csv")
landmark.data <- lapply (temp.file, read.csv, row.names=1)
names(landmark.data) <- gsub(".csv","",temp.file)
landmark.data.temp <- array(as.numeric(unlist(landmark.data)), dim = c(123, 3, 184)) #number of landmarks, number of dimensions (3D coordinates), number of specimens
dimnames(landmark.data.temp)[[3]] <- gsub(".csv","",temp.file)
dimnames(landmark.data.temp)[[1]] <- rownames(landmark.data[[1]])
dimnames(landmark.data.temp)[[2]] <- c("x","y","z")
#Load slider information and colour information for deformation plots
setwd( "INSERT DIRECTORY PATH" )
sliders <- read.csv("Dataset 4. sliders.turtles.csv", row.names=1)
colours <- as.character( read.csv("Dataset 5. landmark_colours.csv", row.names=1)[,1] )
#Load specimen information
setwd( "INSERT DIRECTORY PATH" )
specimen.info <- read.csv( "Dataset 2. Specimen info.csv", header = TRUE )
rownames( specimen.info ) <- specimen.info[ , "Specimen_name" ]
#Load tree
setwd( "INSERT DIRECTORY PATH" )
tree <- read.nexus( "Dataset 7. cal3tree.calibrated.txt" )
alternative.tree <- read.nexus("Dataset 8. mbltree.calibrated.txt")
#Do GPA of labyrinth shape for all taxa available
GPA.data <- landmark.data.temp
tree.names <- as.character( specimen.info[ dimnames( GPA.data )[[ 3 ]] , "Tree_names" ] )
skull.box.temp <- as.character( specimen.info[ dimnames( GPA.data )[[ 3 ]] , "logV_mm3" ] )
#'ecologies' is only needed to delete marine species as well
#ecologies <- as.character( specimen.info[ dimnames( GPA.data )[[ 3 ]] , "Plotting_habitat" ] )
#'families' is only needed to exclude chelonioids
#families <- as.character( specimen.info[ dimnames( GPA.data )[[ 3 ]] , "Family" ] )
dimnames( GPA.data )[[ 3 ]][ !is.na( tree.names ) ] <- tree.names[ !is.na( tree.names ) ]
duplicate.specimens <- which( is.na(tree.names) == TRUE )
no.skull.box <- which (is.na(skull.box.temp) == TRUE )
#'marine' is only needed to delete marine species as well
#marine <- which( ecologies[] == "marine" )
#'chelonidoids' is only needed to exclude chelonioids
#chelonioids <- which( families[] == "Chelonioidea" )
#include 'marine' from this list for analysis including marine species; same for 'chelonioids'
delete.these <- unique(c(duplicate.specimens, no.skull.box))
GPA.data <- GPA.data[,, - delete.these]
#Delete inner loop landmarks
ASC.loop.landmarks <- which(grepl("loop", dimnames( GPA.data )[[ 1 ]]) == TRUE)
GPA.data <- GPA.data[-ASC.loop.landmarks,,]
sliders <- sliders[- which( grepl("loop", rownames(sliders)) == TRUE) , ]
rows.to.modify <- c( which( grepl("LSC", rownames(sliders)) == TRUE) , which(grepl("PSC", rownames(sliders)) == TRUE ) )
sliders[rows.to.modify, ] <-sliders[rows.to.modify, ] -39
GPA.labyrinth.all <- gpagen( GPA.data , curves = sliders , ProcD = F )
labyrinth.Csize.all <- GPA.labyrinth.all$Csize
labyrinth.Csize.all[ labyrinth.Csize.all > 5000 ] <- labyrinth.Csize.all[ labyrinth.Csize.all > 5000 ] / 1000
#DO PCA
PCA.labyrinth <- plotTangentSpace( GPA.labyrinth.all$coords , warpgrids = F )
#Prepare tree that has same tips as the shape data blocks
tree.temp <- drop.tip( tree , tree$tip.label[ ! tree$tip.label %in% names( GPA.labyrinth.all$Csize ) ] )
#For tests with alternative calibration
tree.temp.alternative <- drop.tip( alternative.tree , alternative.tree $tip.label[ ! alternative.tree $tip.label %in% names( GPA.labyrinth.all$Csize ) ] )
#Examine trees
plot(tree.temp, cex=0.4)
plot(tree.temp.alternative, cex=0.4)
##Make a version of the specimen data that matches the taxon sample
data.temp <- specimen.info[ specimen.info$Tree_names %in% tree.temp$tip.label , ]
rownames( data.temp ) <- data.temp$Tree_names
data.temp <- data.temp[ tree.temp$tip.label , ]
##Make explanatory variables
##Habitat ecology
marine.all <- data.temp$Habitat_general == "marine" | data.temp$Fossil_marine == "yes"
names( marine.all ) <- rownames( data.temp )
marine.all <- marine.all[ tree.temp$tip.label ]
marine.all[which(is.na(marine.all))] <- "FALSE"
which(marine.all == TRUE)
marine <- data.temp$Habitat_general == "marine"
names( marine ) <- rownames( data.temp )
marine <- marine[ tree.temp$tip.label ]
marine[which(is.na(marine))] <- "FALSE"
which(marine == TRUE)
freshwater <- data.temp$Plotting_habitat == "freshwater"
names( freshwater ) <- rownames( data.temp )
freshwater <- freshwater[ tree.temp$tip.label ]
freshwater[which(is.na(freshwater))] <- "FALSE"
which(freshwater == TRUE)
terrestrial <- data.temp$Plotting_habitat == "terrestrial"
names( terrestrial ) <- rownames( data.temp )
terrestrial <- terrestrial[ tree.temp$tip.label ]
terrestrial[which(is.na(terrestrial))] <- "FALSE"
which(terrestrial == TRUE)
##Neck categories
no_plane <- data.temp$Retraction_type == "none"
names( no_plane ) <- rownames( data.temp )
no_plane <- no_plane[ tree.temp$tip.label ]
no_plane[which(is.na(no_plane))] <- "FALSE"
which(no_plane == TRUE)
vertical <- data.temp$Retraction_type == "vertical"
names( vertical ) <- rownames( data.temp )
vertical <- vertical[ tree.temp$tip.label ]
vertical[which(is.na(vertical))] <- "FALSE"
which(vertical == TRUE)
horizontal <- data.temp$Retraction_type == "sideways"
names( horizontal ) <- rownames( data.temp )
horizontal <- horizontal[ tree.temp$tip.label ]
horizontal[which(is.na(horizontal))] <- "FALSE"
which(horizontal == TRUE)
incomplete_retr <- data.temp$Retratction_ability == "incomplete"
names( incomplete_retr ) <- rownames( data.temp )
incomplete_retr <- incomplete_retr[ tree.temp$tip.label ]
incomplete_retr[which(is.na(incomplete_retr))] <- "FALSE"
which(incomplete_retr == TRUE)
full_retr <- data.temp$Retratction_ability == "full"
names( full_retr ) <- rownames( data.temp )
full_retr <- full_retr[ tree.temp$tip.label ]
full_retr[which(is.na(full_retr))] <- "FALSE"
which(full_retr == TRUE)
##Size proxies
skull_length.temp <- data.temp$Skull_length_mm
names(skull_length.temp) <- rownames (data.temp)
skull_length.temp <- skull_length.temp[ tree.temp$tip.label ]
skull_width.temp <- data.temp$Skull_width_mm
names(skull_width.temp) <- rownames (data.temp)
skull_width.temp <- skull_width.temp[ tree.temp$tip.label ]
skull_height.temp <- data.temp$Skull_height_mm
names(skull_height.temp) <- rownames (data.temp)
skull_height.temp <- skull_height.temp[ tree.temp$tip.label ]
##Skull geometry proxy
skull_geometry.temp <- skull_height.temp / skull_width.temp
#check frequency distribution
hist(skull_geometry.temp)
#check if these make sense
which(skull_geometry.temp[] == max(skull_geometry.temp))
which(skull_geometry.temp[] == min(skull_geometry.temp))
#Make a big data frame for analyses
gdf <- geomorph.data.frame( shape = GPA.labyrinth.all$coords[ ,, tree.temp$tip.label ] ,
phy = tree.temp ,
marine = marine , freshwater = freshwater , terrestrial = terrestrial , marine.all = marine.all,
no_plane = no_plane, vertical = vertical, horizontal = horizontal,
incomplete_retr = incomplete_retr, full_retr = full_retr,
skull_length = log10( skull_length.temp ) , skull_width = log10(skull_width.temp) , skull_height = log10(skull_height.temp) ,
skull_box = data.temp[ tree.temp$tip.label , "logV_mm3" ] ,
labyrinth_Csize = log10( labyrinth.Csize.all )[ tree.temp$tip.label ] ,
skull_geometry = skull_geometry.temp)
##In this script we're setting up all the combinations of explanatory variables for the right sizes of the models,
# and then running them all in a loop. This makes it easy to add regression models by extending the vector called "right.sides".
#as the model building process was iterative, several models that were initially explored are muted below.
#Models that are active are those reported in the table <shape_models_incl_fossils>
right.sides <- c(
#following models test relations of size variables as correlates of shape, exploring allometric effects
"skull_length" , "skull_width", "skull_height", "skull_box" , "labyrinth_Csize" ,
# -> skull height performs best (R2), followed by skull box.
#following model tests relations of skull geometry as correlates of shape
"skull_geometry" ,
# -> significant and explains much of the variance
#following models test independent effect of skull size and labyrinth size
# "skull_length + labyrinth_Csize" , "skull_box + labyrinth_Csize" ,
#-> both models show significant indipendent effect, skull box performs better
#following models test independent effect of skull size and skull geometry
# "skull_length + skull_geometry" , "skull_box + skull_geometry" , "skull_height + skull_geometry",
#-> all models significant. in the height+geometry model, proportion of variance explained is near equal between both variables
#-> in other models, more variance is explanation is attributed to geometry than skull size
#following models test independent effect of skull geometry and labyrinth size
# "skull_geometry + labyrinth_Csize" ,
# -> also significant
#following models test independent effects of skull size and skull geometry and labyrinth size
# "skull_length + skull_geometry + labyrinth_Csize" , #slightly worse in R2 than below model
# "skull_box + skull_geometry + labyrinth_Csize" , #slightly better in R2 than above model
#-> all independent effects are important
#following models test non-independent effects, i.e. hypothesis that taxa with prop. larger labyrinths in relation to skull size have different laby shapes
# "skull_length * labyrinth_Csize" , #
# "skull_box * labyrinth_Csize" , #interaction term significant
#
# "skull_box * labyrinth_Csize + skull_geometry" ,
#->interaction term remains significant
#following models test non-independent effects, i.e. hypothesis that taxa with higher/broader skulls in relation to skull size have different laby shapes
# "skull_length * skull_geometry" , #slightly worse in R2 than below model
# "skull_box * skull_geometry" , #slightly better in R2 than above model
#-> interaction term is significant
#the following models ask: do terrestrial turtles have a different mean labyrinth shape than non-terrestrial trutles?
# -> initial analyses show: only freshwater and terretrial are relevant
"terrestrial" , "freshwater", "marine.all", "marine" ,
#-> marine extant variables not significant
#-> marine all near significant
#->terrestrial not significant
#->freshwater not significant
#the following models ask: do turtles with/without neck retraction have a different mean labyrinth shapes?
"incomplete_retr" , "full_retr", #both non significant
#the following models ask: do turtles with specific neck retractions have a different mean labyrinth shapes?
"no_plane" , "vertical", "horizontal", #all non significant
# "skull_box * skull_geometry + labyrinth_Csize + incomplete_retr", #non-significant
# "skull_box * skull_geometry + labyrinth_Csize + full_retr", #non-significant
# "skull_box * skull_geometry + labyrinth_Csize + no_plane", #non-significant
# "skull_box * skull_geometry + labyrinth_Csize + vertical", #non-significant
# "skull_box * skull_geometry + labyrinth_Csize + horizontal", #non-significant
# "skull_box * skull_geometry + labyrinth_Csize + freshwater", #non-significant
# "skull_box * skull_geometry + labyrinth_Csize + terrestrial", #non-significant
# "skull_box * skull_geometry + labyrinth_Csize + marine.all", #non-significant
#check if ecological effects are redundant with skull size
# "skull_box + marine.all" , #marine.all becomes clearly non-significant, indicating the near-significant effect in bivarite models can be explained by skull size
# "labyrinth_Csize + marine.all" , #marine all remains near significnt significant
# "skull_geometry + marine.all" , #marine,all remains near significant
#Further tests for effect of marine.all
# "skull_box + skull_geometry + labyrinth_Csize + marine.all" ,
#also insignificant, not further considered
#are ecological effects important when included in the best model:
# "skull_box * skull_geometry + labyrinth_Csize + marine.all" ,
# -> marine remains non significant
#best model excludes ecological effects:
"skull_box * skull_geometry + labyrinth_Csize"
#-> all model parameters signifciant, brain case shape explains most of variance
)
models <- paste( "shape ~" , right.sides )
models <- lapply( models , as.formula )
##Run Procrustes distance pGLS analyses (Adams 2014)
procD.pgls.fits <- list()
for( i in 1:length( models ) ) {
procD.pgls.fits[[ i ]] <- procD.pgls( models[[ i ]] , phy = phy , SS.type = "II" , data = gdf )
}
##See summaries of procD.pgls results
model.summaries <- lapply( procD.pgls.fits , summary )
model.summaries[15]
#Print all coefficents to file
capture.output(model.summaries, file = "Labyrinth_shape_model_summaries_incl_fossils.txt")
capture.output(model.summaries, file = "Labyrinth_shape_model_summaries_excl_marine_species.txt")
capture.output(model.summaries, file = "Labyrinth_shape_model_summaries_excl_chelonioids.txt")
capture.output(model.summaries, file = "Labyrinth_shape_model_summaries_no_ASCloop.txt")
##Altermative run with alternative tree
gdf2 <- geomorph.data.frame( shape = GPA.labyrinth.all$coords[ ,, tree.temp$tip.label ] ,
phy = tree.temp.alternative ,
marine = marine , freshwater = freshwater , terrestrial = terrestrial , marine.all = marine.all,
no_plane = no_plane, vertical = vertical, horizontal = horizontal,
incomplete_retr = incomplete_retr, full_retr = full_retr,
skull_length = log10( skull_length.temp ) , skull_width = log10(skull_width.temp) , skull_height = log10(skull_height.temp) ,
skull_box = data.temp[ tree.temp$tip.label , "logV_mm3" ] ,
labyrinth_Csize = log10( labyrinth.Csize.all )[ tree.temp$tip.label ] ,
skull_geometry = skull_geometry.temp)
procD.pgls.fits.2 <- list()
for( i in 1:length( models ) ) {
procD.pgls.fits.2[[ i ]] <- procD.pgls( models[[ i ]] , phy = phy , SS.type = "II" , data = gdf2 )
}
##See summaries of procD.pgls results
model.summaries.2 <- lapply( procD.pgls.fits.2 , summary )
#Print all coefficents to file
capture.output(model.summaries.2, file = "Labyrinth_shape_model_summaries_incl_fossils_alternative_tree.txt")
##Phylogenetic signal from Procrustes shape variables
physig <- physignal(gdf$shape, phy = gdf$phy)
summary(physig)
plot(physig)
|
library(igraph)
load_graph <- function(filename) {
edgelist <- read.table(filename, sep = "", header = F)
return(edgelist)
}
convert_graph <- function(graph_df) {
e <- c()
for(i in 1:nrow(graph_df)) {
row <- graph_df[i,]
e <- c(e, row[[1]] + 1)
e <- c(e, row[[2]] + 1)
}
return(graph(edges = e, n = max(graph_df, na.rm = T) + 1, directed = F))
}
embed_graph <- function(graph) {
return(labne_hm(net = graph, gma = 2.3, Temp = 0.15, k.speedup = 10, w = 2*pi))
}
|
/hypermap/hyper_embed.R
|
no_license
|
mananshah99/hyperbolic
|
R
| false | false | 476 |
r
|
library(igraph)
load_graph <- function(filename) {
edgelist <- read.table(filename, sep = "", header = F)
return(edgelist)
}
convert_graph <- function(graph_df) {
e <- c()
for(i in 1:nrow(graph_df)) {
row <- graph_df[i,]
e <- c(e, row[[1]] + 1)
e <- c(e, row[[2]] + 1)
}
return(graph(edges = e, n = max(graph_df, na.rm = T) + 1, directed = F))
}
embed_graph <- function(graph) {
return(labne_hm(net = graph, gma = 2.3, Temp = 0.15, k.speedup = 10, w = 2*pi))
}
|
# Solution to question 2
# V, the given matrix:
V <- matrix(c(3, -1, 1, -1, 5, -1, 1, -1, 3), 3)
# Q (orthogonal here) contains eigenvectors in its columns:
Q <- eigen(V)$vectors
# D contains the inverse square roots of of V's eigenvalues
# on the diagonal, zeros elsewhere:
D <- diag(1/sqrt(eigen(V)$values))
# The desired `inverse square root` matrix:
W <- Q %*% D %*% t(Q)
# W %*% W #check that this returns solve(V)
|
/week2/question2.R
|
no_license
|
david-dobor/8004
|
R
| false | false | 450 |
r
|
# Solution to question 2
# V, the given matrix:
V <- matrix(c(3, -1, 1, -1, 5, -1, 1, -1, 3), 3)
# Q (orthogonal here) contains eigenvectors in its columns:
Q <- eigen(V)$vectors
# D contains the inverse square roots of of V's eigenvalues
# on the diagonal, zeros elsewhere:
D <- diag(1/sqrt(eigen(V)$values))
# The desired `inverse square root` matrix:
W <- Q %*% D %*% t(Q)
# W %*% W #check that this returns solve(V)
|
\name{LaplacesDemonCpp-package}
\alias{LaplacesDemonCpp-package}
\alias{LaplacesDemonCpp}
\alias{.colVars}
\alias{.iqagh}
\alias{.iqaghsg}
\alias{.iqcagh}
\alias{.laaga}
\alias{.labfgs}
\alias{.labhhh}
\alias{.lacg}
\alias{.ladfp}
\alias{.lahar}
\alias{.lahj}
\alias{.lalbfgs}
\alias{.lalm}
\alias{.lanm}
\alias{.lanr}
\alias{.lapso}
\alias{.larprop}
\alias{.lasgd}
\alias{.lasoma}
\alias{.laspg}
\alias{.lasr1}
\alias{.latr}
\alias{.mcmcadmg}
\alias{.mcmcagg}
\alias{.mcmcahmc}
\alias{.mcmcaies}
\alias{.mcmcam}
\alias{.mcmcamm}
\alias{.mcmcamm.b}
\alias{.mcmcamwg}
\alias{.mcmccharm}
\alias{.mcmcdemc}
\alias{.mcmcdram}
\alias{.mcmcdrm}
\alias{.mcmcess}
\alias{.mcmcgibbs}
\alias{.mcmcgg}
\alias{.mcmcggcp}
\alias{.mcmcggcpp}
\alias{.mcmcggdp}
\alias{.mcmcggdpp}
\alias{.mcmcharm}
\alias{.mcmchmc}
\alias{.mcmchmcda}
\alias{.mcmcim}
\alias{.mcmcinca}
\alias{.mcmcmala}
\alias{.mcmcmcmcmc}
\alias{.mcmcmtm}
\alias{.mcmcmwg}
\alias{.mcmcnuts}
\alias{.mcmcohss}
\alias{.mcmcram}
\alias{.mcmcrdmh}
\alias{.mcmcrefractive}
\alias{.mcmcrj}
\alias{.mcmcrss}
\alias{.mcmcrwm}
\alias{.mcmcsamwg}
\alias{.mcmcsgld}
\alias{.mcmcslice}
\alias{.mcmcsmwg}
\alias{.mcmcthmc}
\alias{.mcmctwalk}
\alias{.mcmcuess}
\alias{.mcmcusamwg}
\alias{.mcmcusmwg}
\alias{.rowVars}
\alias{.vbsalimans2}
\docType{package}
\title{
LaplacesDemonCpp is an R package that includes C++ functions as an
extension to the LaplacesDemon package.
}
\description{
The LaplacesDemon package is a complete environment for Bayesian
inference in R. LaplacesDemon is written entirely in R. C++ is
valuable for speed. This package consists of C++ functions to be used
with the LaplacesDemon package.
}
\details{
\tabular{ll}{
Package: \tab LaplacesDemonCpp\cr
Type: \tab Package\cr
Version: \tab 2014-06-23\cr
Date: \tab 2014-06-23\cr
License: \tab MIT\cr
}
The LaplacesDemonCpp package consists of C++ functions that replace
functions written in R in LaplacesDemon.
Documentation exists, but is minimal, because these functions are
fully documented in the LaplacesDemon package.
To use LaplacesDemonCpp, simply activate the LaplacesDemonCpp library,
and any C++ functions here will replace the R functions in
LaplacesDemon.
}
\author{Statisticat, LLC. \email{software@bayesian-inference.com}}
\keyword{package}
|
/man/LaplacesDemonCpp-package.Rd
|
permissive
|
sakex/LaplacesDemonCpp
|
R
| false | false | 2,319 |
rd
|
\name{LaplacesDemonCpp-package}
\alias{LaplacesDemonCpp-package}
\alias{LaplacesDemonCpp}
\alias{.colVars}
\alias{.iqagh}
\alias{.iqaghsg}
\alias{.iqcagh}
\alias{.laaga}
\alias{.labfgs}
\alias{.labhhh}
\alias{.lacg}
\alias{.ladfp}
\alias{.lahar}
\alias{.lahj}
\alias{.lalbfgs}
\alias{.lalm}
\alias{.lanm}
\alias{.lanr}
\alias{.lapso}
\alias{.larprop}
\alias{.lasgd}
\alias{.lasoma}
\alias{.laspg}
\alias{.lasr1}
\alias{.latr}
\alias{.mcmcadmg}
\alias{.mcmcagg}
\alias{.mcmcahmc}
\alias{.mcmcaies}
\alias{.mcmcam}
\alias{.mcmcamm}
\alias{.mcmcamm.b}
\alias{.mcmcamwg}
\alias{.mcmccharm}
\alias{.mcmcdemc}
\alias{.mcmcdram}
\alias{.mcmcdrm}
\alias{.mcmcess}
\alias{.mcmcgibbs}
\alias{.mcmcgg}
\alias{.mcmcggcp}
\alias{.mcmcggcpp}
\alias{.mcmcggdp}
\alias{.mcmcggdpp}
\alias{.mcmcharm}
\alias{.mcmchmc}
\alias{.mcmchmcda}
\alias{.mcmcim}
\alias{.mcmcinca}
\alias{.mcmcmala}
\alias{.mcmcmcmcmc}
\alias{.mcmcmtm}
\alias{.mcmcmwg}
\alias{.mcmcnuts}
\alias{.mcmcohss}
\alias{.mcmcram}
\alias{.mcmcrdmh}
\alias{.mcmcrefractive}
\alias{.mcmcrj}
\alias{.mcmcrss}
\alias{.mcmcrwm}
\alias{.mcmcsamwg}
\alias{.mcmcsgld}
\alias{.mcmcslice}
\alias{.mcmcsmwg}
\alias{.mcmcthmc}
\alias{.mcmctwalk}
\alias{.mcmcuess}
\alias{.mcmcusamwg}
\alias{.mcmcusmwg}
\alias{.rowVars}
\alias{.vbsalimans2}
\docType{package}
\title{
LaplacesDemonCpp is an R package that includes C++ functions as an
extension to the LaplacesDemon package.
}
\description{
The LaplacesDemon package is a complete environment for Bayesian
inference in R. LaplacesDemon is written entirely in R. C++ is
valuable for speed. This package consists of C++ functions to be used
with the LaplacesDemon package.
}
\details{
\tabular{ll}{
Package: \tab LaplacesDemonCpp\cr
Type: \tab Package\cr
Version: \tab 2014-06-23\cr
Date: \tab 2014-06-23\cr
License: \tab MIT\cr
}
The LaplacesDemonCpp package consists of C++ functions that replace
functions written in R in LaplacesDemon.
Documentation exists, but is minimal, because these functions are
fully documented in the LaplacesDemon package.
To use LaplacesDemonCpp, simply activate the LaplacesDemonCpp library,
and any C++ functions here will replace the R functions in
LaplacesDemon.
}
\author{Statisticat, LLC. \email{software@bayesian-inference.com}}
\keyword{package}
|
#Use fold change change data to look for trends in gene expression changes and make line plots
install.packages("ggplot2")
install.packages("reshape2")
install.packages("pheatmap")
install.packages("tidyr")
install.packages("reshape")
library(ggplot2)
library(reshape2)
library(pheatmap)
library(tidyr)
library(reshape)
#Here, I am reading in data that describes the samples associated with each file name
phenodata2 = read.csv("~/Data/rnaseq_data/rnaseq.phenotypedata.csv", header = FALSE)
#here, I am creating lists with specific genes I want to look at
wnt = c("APC","ASCL1","ASCL2","AXIN1","AXIN2","CTNNB1","BCL9","BCL92","ABL1","CSNK1A1","CSNK1D","DIXDC1","CREB","DVL1","DVL2","DVL3","DRAXIN","GSK3A","GSK3B","TGFB1I1","CTNNBIP1", "PYGO1","PYGO2","HNF1B","UBE2A","TCF7","TCF7L1","TLE2")
crc = c("AKT1", "AKT2","AKT3","APC","APC2","APPL1","ARAF","Axin1","AXIN2","BAD","BAX","BCL2","BIRC5","BRAF","casp3","CASP9","CCND1","CTNNB1","CYCS","DCC","FOS","GSK3B","JUN","KRAS","LEF1","MAP2K1","MAPK1","MAPK10","MAPK3","MAPK8","MAPK9","MLH1","MSH2","MSH3","MSH6","MYC","PIK3CA","PIK3CB","PIK3CD","PIK3CG","PIK3R1","PIK3R2","PIK3R3","PIK3R5","RAC1","RAC2","RAC3","RAF1","RALGDS","RHOA","SMAD2","SMAD3","SMAD4","TCF7","TCF7L1","TCF7L2","TGFB1","TGFB2","TGFB3","TGFBR1","TGFBR2","TP53")
bcat = c("MYC","MYCN","CCND1","HNF1A","LEF1","PPARD","JUN","FOSL1","PLAUR","MMP7","AXIN2","NRCAM","TCF4","GAST","CD44","EFNB1","EFNB2","BMP4","CLDN1","BIRC5","VEGFA","FGF18","ATOH1","MET","EDN1","MYCBP","L1CAM","ID2","JAG1","MSL1","TIAM1","NOS2","TERT","DKK1","FGF9","LBH","FGF20","LGR5","SOX9","SOX17","RUNX2","GREM1","SALL4","TNFSF11","TNFRSF11B","CYR61","PTTG1","DLL1","FOXN1","MMP26","NANOG","POU5F1","SNAI1","FN1","FZD7","ISL1","MMP2","MMP9","FST","WNT3A","TWIST1","TBX3","GBX2","CACNA1G","CDC25","WISP1","WISP2","IGF2","EMP1","IGF1","VEGFC","PTGS2","IL6","PITX2","EGFR","CDH1","CDKN2A","CTLA4","CXCL8","VCAN","TNFRSF19") # INFO FROM https://web.stanford.edu/group/nusselab/cgi-bin/wnt/target_genes
#read in 24hrbft2 expression data
expres.24 = read.csv("~/Data/rnaseq_data/gene.level.analysis/rerun/tpmcounts.sleuth.genelevel.allsamples.rerun.csv", row.names = 1)
expres.24 = expres.24[expres.24$condition %in% c("24hrbft2"),]
expres.24 = expres.24[order(expres.24$target_id),]
expres.24$logtpm= log2(expres.24$tpm + 1)
#expres.24 = expres.24[expres.24$logtpm>1,]
expres.24 = merge(expres.24, phenodata2, by.x= "sample", by.y= "V1")
expres.24 = expres.24[order(expres.24$target_id),]
#read in 24hrblank data
expres.24b = read.csv("~/Data/rnaseq_data/gene.level.analysis/rerun/tpmcounts.sleuth.genelevel.allsamples.rerun.csv", row.names = 1)
expres.24b = expres.24b[expres.24b$condition %in% c("24blank"),]
expres.24b = expres.24b[order(expres.24b$target_id),]
expres.24b$logtpm= log2(expres.24b$tpm + 1)
#expres.24b = expres.24b[expres.24b$logtpm>1,]
expres.24b = merge(expres.24b, phenodata2, by.x= "sample", by.y= "V1")
expres.24b = expres.24b[order(expres.24b$target_id),]
#split up data for each group
expres.24.220 = expres.24[expres.24$V2=="24hrbft2.220",]
expres.24.031 = expres.24[expres.24$V2=="24hrbft2.031",]
expres.24.trial1 = expres.24[expres.24$V2=="24hrbft2.trial1",]
expres.24b.220 = expres.24b[expres.24b$V2=="24blank.220",]
expres.24b.031 = expres.24b[expres.24b$V2=="24blank.031",]
expres.24b.trial1 = expres.24b[expres.24b$V2=="24blank.trial1",]
#calculate the logfc in expression between blank and bft2 samples then combine all the samples, then get rid of low expressing genes
expres.24.220$logfc = log2((1+expres.24.220$tpm)/(1+expres.24b.220$tpm))
expres.24.031$logfc = log2((1+expres.24.031$tpm)/(1+expres.24b.031$tpm))
expres.24.trial1$logfc = log2((1+expres.24.trial1$tpm)/(1+expres.24b.trial1$tpm))
expres.24.all=rbind(expres.24.220, expres.24.trial1, expres.24.031)
#expres.24.all = expres.24.all[expres.24.all$logtpm>1,]
#read in 48hrbft2 expression data
expres.48 = read.csv("~/Data/rnaseq_data/gene.level.analysis/rerun/tpmcounts.sleuth.genelevel.allsamples.rerun.csv", row.names = 1)
expres.48 = expres.48[expres.48$condition %in% c("48hrbft2"),]
expres.48 = expres.48[order(expres.48$target_id),]
expres.48$logtpm= log2(expres.48$tpm + 1)
#expres.48 = expres.48[expres.48$logtpm>1,]
expres.48 = merge(expres.48, phenodata2, by.x= "sample", by.y= "V1")
expres.48 = expres.48[order(expres.48$target_id),]
#read in 48hrblank data
expres.48b = read.csv("~/Data/rnaseq_data/gene.level.analysis/rerun/tpmcounts.sleuth.genelevel.allsamples.rerun.csv", row.names = 1)
expres.48b = expres.48b[expres.48b$condition %in% c("48blank"),]
expres.48b = expres.48b[order(expres.48b$target_id),]
expres.48b$logtpm= log2(expres.48b$tpm + 1)
#expres.48b = expres.48b[expres.48b$logtpm>1,]
expres.48b = merge(expres.48b, phenodata2, by.x= "sample", by.y= "V1")
expres.48b = expres.48b[order(expres.48b$target_id),]
#separate out replicate groups
expres.48.220 = expres.48[expres.48$V2=="48hrbft2.220",]
expres.48.031 = expres.48[expres.48$V2=="48hrbft2.031",]
expres.48.trial1 = expres.48[expres.48$V2=="48hrbft2.trial1",]
expres.48b.220 = expres.48b[expres.48b$V2=="48blank.220",]
expres.48b.031 = expres.48b[expres.48b$V2=="48blank.031",]
expres.48b.trial1 = expres.48b[expres.48b$V2=="48blank.trial1",]
#calculate logfc for the differnce in expression between bft2 and blank, then combine samples, then get rid of low expressing genes
expres.48.220$logfc = log2((1+expres.48.220$tpm)/(1+expres.48b.220$tpm))
expres.48.031$logfc = log2((1+expres.48.031$tpm)/(1+expres.48b.031$tpm))
expres.48.trial1$logfc = log2((1+expres.48.trial1$tpm)/(1+expres.48b.trial1$tpm))
expres.48.all = rbind(expres.48.220, expres.48.trial1, expres.48.031)#do not remove 031 time point because it looks like an outlier
#expres.48.all = expres.48.all[expres.48.all$logtpm>1,]
####################
#set up data to be run with pheatmap function
expres.2448.wnt = rbind(expres.24.all, expres.48.all)
#get average logfc
setup.expres.wnt = expres.2448.wnt[ ,c("target_id","V2","logfc")]
setup.expres.wnt.melt = melt(setup.expres.wnt, target_id=c("target_id","logfc"))
setup.expres.wnt.melt.cast = cast(setup.expres.wnt.melt, target_id~V2, mean)
setup.expres.wnt.melt.names = setup.expres.wnt.melt.cast
row.names(setup.expres.wnt.melt.names) = setup.expres.wnt.melt.names[ ,1]
setup.expres.wnt.melt.names = setup.expres.wnt.melt.names[ ,-1] #these 3 commands make my sample names my row names
colnames(setup.expres.wnt.melt.names) = c("hr24bft2.031","hr24bft2.220","hr24bft2.trial1","hr48bft2.031","hr48bft2.220","hr48bft2.trial1")
expres.wnt.averages = setup.expres.wnt.melt.names
expres.wnt.averages$average24hrbft2 = (expres.wnt.averages$hr24bft2.031 + expres.wnt.averages$hr24bft2.220 + expres.wnt.averages$hr24bft2.trial1)/3
expres.wnt.averages$average48hrbft2 = (expres.wnt.averages$hr48bft2.031 + expres.wnt.averages$hr48bft2.220 + expres.wnt.averages$hr48bft2.trial1)/3
expres.wnt.averages = expres.wnt.averages [ , c("average24hrbft2","average48hrbft2")]
expres.wnt.averages.df = data.frame(expres.wnt.averages)
expres.wnt.averages.df$target_id= rownames(expres.wnt.averages.df)
expres.wnt.averages.melt = melt(expres.wnt.averages.df)
colnames(expres.wnt.averages.melt) = c("target_id","variable","average_logfc")
#get average tpm values
setup.expres.wnt.tpm = expres.2448.wnt[ ,c("target_id","V2","tpm")]
setup.expres.wnt.tpm.melt = melt(setup.expres.wnt.tpm, target_id=c("target_id","tpm"))
setup.expres.wnt.tpm.melt.cast = cast(setup.expres.wnt.tpm.melt, target_id~V2, mean)
setup.expres.wnt.tpm.melt.names = setup.expres.wnt.tpm.melt.cast
row.names(setup.expres.wnt.tpm.melt.names) = setup.expres.wnt.tpm.melt.names[ ,1]
setup.expres.wnt.tpm.melt.names = setup.expres.wnt.tpm.melt.names[ ,-1] #these 3 commands make my sample names my row names
colnames(setup.expres.wnt.tpm.melt.names) = c("hr24bft2.031","hr24bft2.220","hr24bft2.trial1","hr48bft2.031","hr48bft2.220","hr48bft2.trial1")
expres.wnt.tpm.averages = setup.expres.wnt.tpm.melt.names
expres.wnt.tpm.averages$average24hrbft2 = (expres.wnt.tpm.averages$hr24bft2.031 + expres.wnt.tpm.averages$hr24bft2.220 + expres.wnt.tpm.averages$hr24bft2.trial1)/3
expres.wnt.tpm.averages$average48hrbft2 = (expres.wnt.tpm.averages$hr48bft2.031 + expres.wnt.tpm.averages$hr48bft2.220 + expres.wnt.tpm.averages$hr48bft2.trial1)/3
expres.wnt.tpm.averages = expres.wnt.tpm.averages [ , c("average24hrbft2","average48hrbft2")]
expres.wnt.tpm.averages.df = data.frame(expres.wnt.tpm.averages)
expres.wnt.tpm.averages.df$target_id= rownames(expres.wnt.tpm.averages.df)
expres.wnt.tpm.averages.melt = melt(expres.wnt.tpm.averages.df)
colnames(expres.wnt.tpm.averages.melt) = c("target_id","variable","average_tpm")
#here, I am separating out my 24hr and 48hr data, then merging my logfc data with my average tpm data
expres.wnt.averages.melt.24 = expres.wnt.averages.melt[expres.wnt.averages.melt$variable=="average24hrbft2", ]
expres.wnt.averages.melt.48 = expres.wnt.averages.melt[expres.wnt.averages.melt$variable=="average48hrbft2", ]
expres.wnt.tpm.averages.melt.24 = expres.wnt.tpm.averages.melt[expres.wnt.tpm.averages.melt$variable=="average24hrbft2", ]
expres.wnt.tpm.averages.melt.48 = expres.wnt.tpm.averages.melt[expres.wnt.tpm.averages.melt$variable=="average48hrbft2", ]
expres.wnt.tpm.logfc.24 = merge(expres.wnt.tpm.averages.melt.24, expres.wnt.averages.melt.24, by.x= "target_id", by.y= "target_id")
expres.wnt.tpm.logfc.48 = merge(expres.wnt.tpm.averages.melt.48, expres.wnt.averages.melt.48, by.x= "target_id", by.y= "target_id")
#here, I am making a new logtpm column
expres.wnt.tpm.logfc.24$logtpm = log2(expres.wnt.tpm.logfc.24$average_tpm +1)
expres.wnt.tpm.logfc.48$logtpm = log2(expres.wnt.tpm.logfc.48$average_tpm +1)
#Add chromatin state data to all of my samples
#first, I will read in data with info on uniquepeak locations
upeaks.24hrbft2= read.csv("~/Data/atac/final.analysis/chippeakanno_peaks/unique24hrbft2peaks.promoters.1000.1.chippeakanno.csv") #use promoters only or promoters.andgenebodies file
upeaks.24hrblank= read.csv("~/Data/atac/final.analysis/chippeakanno_peaks/unique24hrblankpeaks.promoters.1000.1.chippeakanno.csv") #use promoters only or promoters.andgenebodies file
upeaks.48hrbft2= read.csv("~/Data/atac/final.analysis/chippeakanno_peaks/unique48hrbft2peaks.promoters.1000.1.chippeakanno.csv") #use promoters only or promoters.andgenebodies file
upeaks.48hrblank= read.csv("~/Data/atac/final.analysis/chippeakanno_peaks/unique48hrblankpeaks.promoters.1000.1.chippeakanno.csv") #use promoters only or promoters.andgenebodies file
#genes in wnt pathway
expres.wnt.tpm.logfc.24$bft48hr.chromatin="nochange"
expres.wnt.tpm.logfc.24$bft48hr.chromatin[expres.wnt.tpm.logfc.24$target_id %in% upeaks.48hrbft2$symbol]="opened"
expres.wnt.tpm.logfc.24$bft48hr.chromatin[expres.wnt.tpm.logfc.24$target_id %in% upeaks.48hrblank$symbol]="closed"
expres.wnt.tpm.logfc.24$bft24hr.chromatin="nochange"
expres.wnt.tpm.logfc.24$bft24hr.chromatin[expres.wnt.tpm.logfc.24$target_id %in% upeaks.24hrbft2$symbol]="opened"
expres.wnt.tpm.logfc.24$bft24hr.chromatin[expres.wnt.tpm.logfc.24$target_id %in% upeaks.24hrblank$symbol]="closed"
expres.wnt.tpm.logfc.48$bft48hr.chromatin="nochange"
expres.wnt.tpm.logfc.48$bft48hr.chromatin[expres.wnt.tpm.logfc.48$target_id %in% upeaks.48hrbft2$symbol]="opened"
expres.wnt.tpm.logfc.48$bft48hr.chromatin[expres.wnt.tpm.logfc.48$target_id %in% upeaks.48hrblank$symbol]="closed"
expres.wnt.tpm.logfc.48$bft24hr.chromatin="nochange"
expres.wnt.tpm.logfc.48$bft24hr.chromatin[expres.wnt.tpm.logfc.48$target_id %in% upeaks.24hrbft2$symbol]="opened"
expres.wnt.tpm.logfc.48$bft24hr.chromatin[expres.wnt.tpm.logfc.48$target_id %in% upeaks.24hrblank$symbol]="closed"
table(expres.wnt.tpm.logfc.24$bft24hr.chromatin)
table(expres.wnt.tpm.logfc.24$bft48hr.chromatin)
table(expres.wnt.tpm.logfc.24$bftboth.chromatin)
table(expres.wnt.tpm.logfc.48$bft24hr.chromatin)
table(expres.wnt.tpm.logfc.48$bft48hr.chromatin)
table(expres.wnt.tpm.logfc.48$bftboth.chromatin)
expres.wnt.tpm.logfc.24.nono = expres.wnt.tpm.logfc.24[ expres.wnt.tpm.logfc.24$bft24hr.chromatin !="nochange",] #writes only genes that are opened or closed
expres.wnt.tpm.logfc.48.nono = expres.wnt.tpm.logfc.48[ expres.wnt.tpm.logfc.48$bft48hr.chromatin !="nochange",] #writes only genes that are opened or closed
#perform stats
wilcox.test (expres.wnt.tpm.logfc.24[ expres.wnt.tpm.logfc.24$bft24hr.chromatin=="opened", "average_logfc"], expres.wnt.tpm.logfc.24[ expres.wnt.tpm.logfc.24$bft24hr.chromatin=="closed","average_logfc" ]) #testing whether baseline expression is different for open and closed genes
mean(expres.wnt.tpm.logfc.24[expres.wnt.tpm.logfc.24$bft24hr.chromatin=="opened","average_logfc"])
mean(expres.wnt.tpm.logfc.24[expres.wnt.tpm.logfc.24$bft24hr.chromatin=="closed","average_logfc"])
median(expres.wnt.tpm.logfc.24[expres.wnt.tpm.logfc.24$bft24hr.chromatin=="opened","average_logfc"])
median(expres.wnt.tpm.logfc.24[expres.wnt.tpm.logfc.24$bft24hr.chromatin=="closed","average_logfc"])
wilcox.test (expres.wnt.tpm.logfc.48[ expres.wnt.tpm.logfc.48$bft48hr.chromatin=="opened", "average_logfc"], expres.wnt.tpm.logfc.48[ expres.wnt.tpm.logfc.48$bft48hr.chromatin=="closed","average_logfc" ])
mean(expres.wnt.tpm.logfc.48[expres.wnt.tpm.logfc.48$bft48hr.chromatin=="opened","average_logfc"])
mean(expres.wnt.tpm.logfc.48[expres.wnt.tpm.logfc.48$bft48hr.chromatin=="closed","average_logfc"])
median(expres.wnt.tpm.logfc.48[expres.wnt.tpm.logfc.48$bft48hr.chromatin=="opened","average_logfc"])
median(expres.wnt.tpm.logfc.48[expres.wnt.tpm.logfc.48$bft48hr.chromatin=="closed","average_logfc"])
#get rid of low expression genes
expres.wnt.tpm.logfc.24.nolow = expres.wnt.tpm.logfc.24[expres.wnt.tpm.logfc.24$logtpm>1,]
expres.wnt.tpm.logfc.48.nolow = expres.wnt.tpm.logfc.48[expres.wnt.tpm.logfc.48$logtpm>1,]
expres.wnt.tpm.logfc.24.nono.nolow = expres.wnt.tpm.logfc.24.nolow[ expres.wnt.tpm.logfc.24.nolow$bft24hr.chromatin !="nochange",] #writes only genes that are opened or closed
expres.wnt.tpm.logfc.48.nono.nolow = expres.wnt.tpm.logfc.48.nolow[ expres.wnt.tpm.logfc.48.nolow$bft48hr.chromatin !="nochange",] #writes only genes that are opened or closed
#stats with low expression genes removed
wilcox.test (expres.wnt.tpm.logfc.24.nolow[ expres.wnt.tpm.logfc.24.nolow$bft24hr.chromatin=="opened", "average_logfc"], expres.wnt.tpm.logfc.24.nolow[ expres.wnt.tpm.logfc.24.nolow$bft24hr.chromatin=="closed","average_logfc" ])
wilcox.test (expres.wnt.tpm.logfc.48.nolow[ expres.wnt.tpm.logfc.48.nolow$bft48hr.chromatin=="opened", "average_logfc"], expres.wnt.tpm.logfc.48.nolow[ expres.wnt.tpm.logfc.48.nolow$bft48hr.chromatin=="closed","average_logfc" ])
#plot
pdf("~/Data/rnaseq_data/gene.level.analysis/rerun/maplot.24hrs.rerun.pdf")
ggplot( expres.wnt.tpm.logfc.24, aes(x=logtpm, y=average_logfc)) + geom_point(aes(colour=bft24hr.chromatin), alpha=0.75) + geom_abline(intercept=0.5, slope=0, linetype=2)+ geom_abline(slope=0, intercept = -0.5, linetype=2) +labs(x= "mean (logTPM +1) ", y="logFC expression") + annotate("text", x = 10,y=1.3, label = "pvalue [opened ~ closed] =0.015 \nwilcoxon rank sum test") + ylim(-1.5,1.5) + xlim(0,15)
dev.off()
pdf("~/Data/rnaseq_data/gene.level.analysis/rerun/maplot.24hrs.nonochange.rerun.pdf")
ggplot( expres.wnt.tpm.logfc.24.nono, aes(x=logtpm, y=average_logfc)) + geom_point(aes(colour=bft24hr.chromatin), alpha=0.75) + geom_abline(intercept=0.5, slope=0, linetype=2)+ geom_abline(slope=0, intercept = -0.5, linetype=2) +labs(x= "mean (logTPM +1) ", y="logFC expression") + annotate("text", x = 10,y=1.3, label = "pvalue [opened ~ closed] =0.015 \nwilcoxon rank sum test") + ylim(-1.5,1.5) + xlim(0,15)
dev.off()
pdf("~/Data/rnaseq_data/gene.level.analysis/rerun/maplot.24hrs.nonochange.nolow.rerun.pdf")
ggplot( expres.wnt.tpm.logfc.24.nono.nolow, aes(x=logtpm, y=average_logfc)) + geom_point(aes(colour=bft24hr.chromatin), alpha=0.75) + geom_abline(intercept=0.5, slope=0, linetype=2)+ geom_abline(slope=0, intercept = -0.5, linetype=2) +labs(x= "mean (logTPM +1) ", y="logFC expression") + annotate("text", x = 10,y=1.3, label = "pvalue [opened ~ closed] =0.009 \nwilcoxon rank sum test") + ylim(-1.5,1.5) + xlim(0,15)
dev.off()
pdf("~/Data/rnaseq_data/gene.level.analysis/rerun/maplot.48hrs.rerun.pdf")
ggplot( expres.wnt.tpm.logfc.48, aes(x=logtpm, y=average_logfc)) + geom_point(aes(colour=bft48hr.chromatin), alpha=0.75) + geom_abline(intercept=0.5, slope=0, linetype=2) + geom_abline(slope=0, intercept = -0.5, linetype=2) +labs(x= "mean (logTPM +1) ", y="logFC expression") + annotate("text", x = 10,y=1.3, label = "pvalue [opened ~ closed] =0.456 \nwilcoxon rank sum test") + ylim(-1.5,1.5) + xlim(0,15)
dev.off()
pdf("~/Data/rnaseq_data/gene.level.analysis/rerun/maplot.48hrs.nonochange.rerun.pdf")
ggplot( expres.wnt.tpm.logfc.48.nono, aes(x=logtpm, y=average_logfc)) + geom_point(aes(colour=bft48hr.chromatin),alpha=0.75) + geom_abline(intercept=0.5, slope=0, linetype=2)+ geom_abline(slope=0, intercept = -0.5, linetype=2) +labs(x= "mean (logTPM +1) ", y="logFC expression") + annotate("text", x = 10,y=1.3, label = "pvalue [opened ~ closed] =0.456 \nwilcoxon rank sum test") + ylim(-1.5,1.5) + xlim(0,15)
dev.off()
pdf("~/Data/rnaseq_data/gene.level.analysis/rerun/maplot.48hrs.nonochange.nolow.rerun.pdf")
ggplot( expres.wnt.tpm.logfc.48.nono.nolow, aes(x=logtpm, y=average_logfc)) + geom_point(aes(colour=bft48hr.chromatin),alpha=0.75) + geom_abline(intercept=0.5, slope=0)+ geom_abline(slope=0, intercept = -0.5, linetype=2) +labs(x= "mean (logTPM +1) ", y="logFC expression") + annotate("text", x = 10,y=1.3, label = "pvalue [opened ~ closed] =0.383 \nwilcoxon rank sum test") + ylim(-1.5,1.5) + xlim(0,15)
dev.off()
##################
|
/sleuth.maplot.rerun.R
|
no_license
|
Jawara22/fragillis
|
R
| false | false | 18,289 |
r
|
#Use fold change change data to look for trends in gene expression changes and make line plots
install.packages("ggplot2")
install.packages("reshape2")
install.packages("pheatmap")
install.packages("tidyr")
install.packages("reshape")
library(ggplot2)
library(reshape2)
library(pheatmap)
library(tidyr)
library(reshape)
#Here, I am reading in data that describes the samples associated with each file name
phenodata2 = read.csv("~/Data/rnaseq_data/rnaseq.phenotypedata.csv", header = FALSE)
#here, I am creating lists with specific genes I want to look at
wnt = c("APC","ASCL1","ASCL2","AXIN1","AXIN2","CTNNB1","BCL9","BCL92","ABL1","CSNK1A1","CSNK1D","DIXDC1","CREB","DVL1","DVL2","DVL3","DRAXIN","GSK3A","GSK3B","TGFB1I1","CTNNBIP1", "PYGO1","PYGO2","HNF1B","UBE2A","TCF7","TCF7L1","TLE2")
crc = c("AKT1", "AKT2","AKT3","APC","APC2","APPL1","ARAF","Axin1","AXIN2","BAD","BAX","BCL2","BIRC5","BRAF","casp3","CASP9","CCND1","CTNNB1","CYCS","DCC","FOS","GSK3B","JUN","KRAS","LEF1","MAP2K1","MAPK1","MAPK10","MAPK3","MAPK8","MAPK9","MLH1","MSH2","MSH3","MSH6","MYC","PIK3CA","PIK3CB","PIK3CD","PIK3CG","PIK3R1","PIK3R2","PIK3R3","PIK3R5","RAC1","RAC2","RAC3","RAF1","RALGDS","RHOA","SMAD2","SMAD3","SMAD4","TCF7","TCF7L1","TCF7L2","TGFB1","TGFB2","TGFB3","TGFBR1","TGFBR2","TP53")
bcat = c("MYC","MYCN","CCND1","HNF1A","LEF1","PPARD","JUN","FOSL1","PLAUR","MMP7","AXIN2","NRCAM","TCF4","GAST","CD44","EFNB1","EFNB2","BMP4","CLDN1","BIRC5","VEGFA","FGF18","ATOH1","MET","EDN1","MYCBP","L1CAM","ID2","JAG1","MSL1","TIAM1","NOS2","TERT","DKK1","FGF9","LBH","FGF20","LGR5","SOX9","SOX17","RUNX2","GREM1","SALL4","TNFSF11","TNFRSF11B","CYR61","PTTG1","DLL1","FOXN1","MMP26","NANOG","POU5F1","SNAI1","FN1","FZD7","ISL1","MMP2","MMP9","FST","WNT3A","TWIST1","TBX3","GBX2","CACNA1G","CDC25","WISP1","WISP2","IGF2","EMP1","IGF1","VEGFC","PTGS2","IL6","PITX2","EGFR","CDH1","CDKN2A","CTLA4","CXCL8","VCAN","TNFRSF19") # INFO FROM https://web.stanford.edu/group/nusselab/cgi-bin/wnt/target_genes
#read in 24hrbft2 expression data
expres.24 = read.csv("~/Data/rnaseq_data/gene.level.analysis/rerun/tpmcounts.sleuth.genelevel.allsamples.rerun.csv", row.names = 1)
expres.24 = expres.24[expres.24$condition %in% c("24hrbft2"),]
expres.24 = expres.24[order(expres.24$target_id),]
expres.24$logtpm= log2(expres.24$tpm + 1)
#expres.24 = expres.24[expres.24$logtpm>1,]
expres.24 = merge(expres.24, phenodata2, by.x= "sample", by.y= "V1")
expres.24 = expres.24[order(expres.24$target_id),]
#read in 24hrblank data
expres.24b = read.csv("~/Data/rnaseq_data/gene.level.analysis/rerun/tpmcounts.sleuth.genelevel.allsamples.rerun.csv", row.names = 1)
expres.24b = expres.24b[expres.24b$condition %in% c("24blank"),]
expres.24b = expres.24b[order(expres.24b$target_id),]
expres.24b$logtpm= log2(expres.24b$tpm + 1)
#expres.24b = expres.24b[expres.24b$logtpm>1,]
expres.24b = merge(expres.24b, phenodata2, by.x= "sample", by.y= "V1")
expres.24b = expres.24b[order(expres.24b$target_id),]
#split up data for each group
expres.24.220 = expres.24[expres.24$V2=="24hrbft2.220",]
expres.24.031 = expres.24[expres.24$V2=="24hrbft2.031",]
expres.24.trial1 = expres.24[expres.24$V2=="24hrbft2.trial1",]
expres.24b.220 = expres.24b[expres.24b$V2=="24blank.220",]
expres.24b.031 = expres.24b[expres.24b$V2=="24blank.031",]
expres.24b.trial1 = expres.24b[expres.24b$V2=="24blank.trial1",]
#calculate the logfc in expression between blank and bft2 samples then combine all the samples, then get rid of low expressing genes
expres.24.220$logfc = log2((1+expres.24.220$tpm)/(1+expres.24b.220$tpm))
expres.24.031$logfc = log2((1+expres.24.031$tpm)/(1+expres.24b.031$tpm))
expres.24.trial1$logfc = log2((1+expres.24.trial1$tpm)/(1+expres.24b.trial1$tpm))
expres.24.all=rbind(expres.24.220, expres.24.trial1, expres.24.031)
#expres.24.all = expres.24.all[expres.24.all$logtpm>1,]
#read in 48hrbft2 expression data
expres.48 = read.csv("~/Data/rnaseq_data/gene.level.analysis/rerun/tpmcounts.sleuth.genelevel.allsamples.rerun.csv", row.names = 1)
expres.48 = expres.48[expres.48$condition %in% c("48hrbft2"),]
expres.48 = expres.48[order(expres.48$target_id),]
expres.48$logtpm= log2(expres.48$tpm + 1)
#expres.48 = expres.48[expres.48$logtpm>1,]
expres.48 = merge(expres.48, phenodata2, by.x= "sample", by.y= "V1")
expres.48 = expres.48[order(expres.48$target_id),]
#read in 48hrblank data
expres.48b = read.csv("~/Data/rnaseq_data/gene.level.analysis/rerun/tpmcounts.sleuth.genelevel.allsamples.rerun.csv", row.names = 1)
expres.48b = expres.48b[expres.48b$condition %in% c("48blank"),]
expres.48b = expres.48b[order(expres.48b$target_id),]
expres.48b$logtpm= log2(expres.48b$tpm + 1)
#expres.48b = expres.48b[expres.48b$logtpm>1,]
expres.48b = merge(expres.48b, phenodata2, by.x= "sample", by.y= "V1")
expres.48b = expres.48b[order(expres.48b$target_id),]
#separate out replicate groups
expres.48.220 = expres.48[expres.48$V2=="48hrbft2.220",]
expres.48.031 = expres.48[expres.48$V2=="48hrbft2.031",]
expres.48.trial1 = expres.48[expres.48$V2=="48hrbft2.trial1",]
expres.48b.220 = expres.48b[expres.48b$V2=="48blank.220",]
expres.48b.031 = expres.48b[expres.48b$V2=="48blank.031",]
expres.48b.trial1 = expres.48b[expres.48b$V2=="48blank.trial1",]
#calculate logfc for the differnce in expression between bft2 and blank, then combine samples, then get rid of low expressing genes
expres.48.220$logfc = log2((1+expres.48.220$tpm)/(1+expres.48b.220$tpm))
expres.48.031$logfc = log2((1+expres.48.031$tpm)/(1+expres.48b.031$tpm))
expres.48.trial1$logfc = log2((1+expres.48.trial1$tpm)/(1+expres.48b.trial1$tpm))
expres.48.all = rbind(expres.48.220, expres.48.trial1, expres.48.031)#do not remove 031 time point because it looks like an outlier
#expres.48.all = expres.48.all[expres.48.all$logtpm>1,]
####################
#set up data to be run with pheatmap function
expres.2448.wnt = rbind(expres.24.all, expres.48.all)
#get average logfc
setup.expres.wnt = expres.2448.wnt[ ,c("target_id","V2","logfc")]
setup.expres.wnt.melt = melt(setup.expres.wnt, target_id=c("target_id","logfc"))
setup.expres.wnt.melt.cast = cast(setup.expres.wnt.melt, target_id~V2, mean)
setup.expres.wnt.melt.names = setup.expres.wnt.melt.cast
row.names(setup.expres.wnt.melt.names) = setup.expres.wnt.melt.names[ ,1]
setup.expres.wnt.melt.names = setup.expres.wnt.melt.names[ ,-1] #these 3 commands make my sample names my row names
colnames(setup.expres.wnt.melt.names) = c("hr24bft2.031","hr24bft2.220","hr24bft2.trial1","hr48bft2.031","hr48bft2.220","hr48bft2.trial1")
expres.wnt.averages = setup.expres.wnt.melt.names
expres.wnt.averages$average24hrbft2 = (expres.wnt.averages$hr24bft2.031 + expres.wnt.averages$hr24bft2.220 + expres.wnt.averages$hr24bft2.trial1)/3
expres.wnt.averages$average48hrbft2 = (expres.wnt.averages$hr48bft2.031 + expres.wnt.averages$hr48bft2.220 + expres.wnt.averages$hr48bft2.trial1)/3
expres.wnt.averages = expres.wnt.averages [ , c("average24hrbft2","average48hrbft2")]
expres.wnt.averages.df = data.frame(expres.wnt.averages)
expres.wnt.averages.df$target_id= rownames(expres.wnt.averages.df)
expres.wnt.averages.melt = melt(expres.wnt.averages.df)
colnames(expres.wnt.averages.melt) = c("target_id","variable","average_logfc")
#get average tpm values
setup.expres.wnt.tpm = expres.2448.wnt[ ,c("target_id","V2","tpm")]
setup.expres.wnt.tpm.melt = melt(setup.expres.wnt.tpm, target_id=c("target_id","tpm"))
setup.expres.wnt.tpm.melt.cast = cast(setup.expres.wnt.tpm.melt, target_id~V2, mean)
setup.expres.wnt.tpm.melt.names = setup.expres.wnt.tpm.melt.cast
row.names(setup.expres.wnt.tpm.melt.names) = setup.expres.wnt.tpm.melt.names[ ,1]
setup.expres.wnt.tpm.melt.names = setup.expres.wnt.tpm.melt.names[ ,-1] #these 3 commands make my sample names my row names
colnames(setup.expres.wnt.tpm.melt.names) = c("hr24bft2.031","hr24bft2.220","hr24bft2.trial1","hr48bft2.031","hr48bft2.220","hr48bft2.trial1")
expres.wnt.tpm.averages = setup.expres.wnt.tpm.melt.names
expres.wnt.tpm.averages$average24hrbft2 = (expres.wnt.tpm.averages$hr24bft2.031 + expres.wnt.tpm.averages$hr24bft2.220 + expres.wnt.tpm.averages$hr24bft2.trial1)/3
expres.wnt.tpm.averages$average48hrbft2 = (expres.wnt.tpm.averages$hr48bft2.031 + expres.wnt.tpm.averages$hr48bft2.220 + expres.wnt.tpm.averages$hr48bft2.trial1)/3
expres.wnt.tpm.averages = expres.wnt.tpm.averages [ , c("average24hrbft2","average48hrbft2")]
expres.wnt.tpm.averages.df = data.frame(expres.wnt.tpm.averages)
expres.wnt.tpm.averages.df$target_id= rownames(expres.wnt.tpm.averages.df)
expres.wnt.tpm.averages.melt = melt(expres.wnt.tpm.averages.df)
colnames(expres.wnt.tpm.averages.melt) = c("target_id","variable","average_tpm")
#here, I am separating out my 24hr and 48hr data, then merging my logfc data with my average tpm data
expres.wnt.averages.melt.24 = expres.wnt.averages.melt[expres.wnt.averages.melt$variable=="average24hrbft2", ]
expres.wnt.averages.melt.48 = expres.wnt.averages.melt[expres.wnt.averages.melt$variable=="average48hrbft2", ]
expres.wnt.tpm.averages.melt.24 = expres.wnt.tpm.averages.melt[expres.wnt.tpm.averages.melt$variable=="average24hrbft2", ]
expres.wnt.tpm.averages.melt.48 = expres.wnt.tpm.averages.melt[expres.wnt.tpm.averages.melt$variable=="average48hrbft2", ]
expres.wnt.tpm.logfc.24 = merge(expres.wnt.tpm.averages.melt.24, expres.wnt.averages.melt.24, by.x= "target_id", by.y= "target_id")
expres.wnt.tpm.logfc.48 = merge(expres.wnt.tpm.averages.melt.48, expres.wnt.averages.melt.48, by.x= "target_id", by.y= "target_id")
#here, I am making a new logtpm column
expres.wnt.tpm.logfc.24$logtpm = log2(expres.wnt.tpm.logfc.24$average_tpm +1)
expres.wnt.tpm.logfc.48$logtpm = log2(expres.wnt.tpm.logfc.48$average_tpm +1)
#Add chromatin state data to all of my samples
#first, I will read in data with info on uniquepeak locations
upeaks.24hrbft2= read.csv("~/Data/atac/final.analysis/chippeakanno_peaks/unique24hrbft2peaks.promoters.1000.1.chippeakanno.csv") #use promoters only or promoters.andgenebodies file
upeaks.24hrblank= read.csv("~/Data/atac/final.analysis/chippeakanno_peaks/unique24hrblankpeaks.promoters.1000.1.chippeakanno.csv") #use promoters only or promoters.andgenebodies file
upeaks.48hrbft2= read.csv("~/Data/atac/final.analysis/chippeakanno_peaks/unique48hrbft2peaks.promoters.1000.1.chippeakanno.csv") #use promoters only or promoters.andgenebodies file
upeaks.48hrblank= read.csv("~/Data/atac/final.analysis/chippeakanno_peaks/unique48hrblankpeaks.promoters.1000.1.chippeakanno.csv") #use promoters only or promoters.andgenebodies file
#genes in wnt pathway
expres.wnt.tpm.logfc.24$bft48hr.chromatin="nochange"
expres.wnt.tpm.logfc.24$bft48hr.chromatin[expres.wnt.tpm.logfc.24$target_id %in% upeaks.48hrbft2$symbol]="opened"
expres.wnt.tpm.logfc.24$bft48hr.chromatin[expres.wnt.tpm.logfc.24$target_id %in% upeaks.48hrblank$symbol]="closed"
expres.wnt.tpm.logfc.24$bft24hr.chromatin="nochange"
expres.wnt.tpm.logfc.24$bft24hr.chromatin[expres.wnt.tpm.logfc.24$target_id %in% upeaks.24hrbft2$symbol]="opened"
expres.wnt.tpm.logfc.24$bft24hr.chromatin[expres.wnt.tpm.logfc.24$target_id %in% upeaks.24hrblank$symbol]="closed"
expres.wnt.tpm.logfc.48$bft48hr.chromatin="nochange"
expres.wnt.tpm.logfc.48$bft48hr.chromatin[expres.wnt.tpm.logfc.48$target_id %in% upeaks.48hrbft2$symbol]="opened"
expres.wnt.tpm.logfc.48$bft48hr.chromatin[expres.wnt.tpm.logfc.48$target_id %in% upeaks.48hrblank$symbol]="closed"
expres.wnt.tpm.logfc.48$bft24hr.chromatin="nochange"
expres.wnt.tpm.logfc.48$bft24hr.chromatin[expres.wnt.tpm.logfc.48$target_id %in% upeaks.24hrbft2$symbol]="opened"
expres.wnt.tpm.logfc.48$bft24hr.chromatin[expres.wnt.tpm.logfc.48$target_id %in% upeaks.24hrblank$symbol]="closed"
table(expres.wnt.tpm.logfc.24$bft24hr.chromatin)
table(expres.wnt.tpm.logfc.24$bft48hr.chromatin)
table(expres.wnt.tpm.logfc.24$bftboth.chromatin)
table(expres.wnt.tpm.logfc.48$bft24hr.chromatin)
table(expres.wnt.tpm.logfc.48$bft48hr.chromatin)
table(expres.wnt.tpm.logfc.48$bftboth.chromatin)
expres.wnt.tpm.logfc.24.nono = expres.wnt.tpm.logfc.24[ expres.wnt.tpm.logfc.24$bft24hr.chromatin !="nochange",] #writes only genes that are opened or closed
expres.wnt.tpm.logfc.48.nono = expres.wnt.tpm.logfc.48[ expres.wnt.tpm.logfc.48$bft48hr.chromatin !="nochange",] #writes only genes that are opened or closed
#perform stats
wilcox.test (expres.wnt.tpm.logfc.24[ expres.wnt.tpm.logfc.24$bft24hr.chromatin=="opened", "average_logfc"], expres.wnt.tpm.logfc.24[ expres.wnt.tpm.logfc.24$bft24hr.chromatin=="closed","average_logfc" ]) #testing whether baseline expression is different for open and closed genes
mean(expres.wnt.tpm.logfc.24[expres.wnt.tpm.logfc.24$bft24hr.chromatin=="opened","average_logfc"])
mean(expres.wnt.tpm.logfc.24[expres.wnt.tpm.logfc.24$bft24hr.chromatin=="closed","average_logfc"])
median(expres.wnt.tpm.logfc.24[expres.wnt.tpm.logfc.24$bft24hr.chromatin=="opened","average_logfc"])
median(expres.wnt.tpm.logfc.24[expres.wnt.tpm.logfc.24$bft24hr.chromatin=="closed","average_logfc"])
wilcox.test (expres.wnt.tpm.logfc.48[ expres.wnt.tpm.logfc.48$bft48hr.chromatin=="opened", "average_logfc"], expres.wnt.tpm.logfc.48[ expres.wnt.tpm.logfc.48$bft48hr.chromatin=="closed","average_logfc" ])
mean(expres.wnt.tpm.logfc.48[expres.wnt.tpm.logfc.48$bft48hr.chromatin=="opened","average_logfc"])
mean(expres.wnt.tpm.logfc.48[expres.wnt.tpm.logfc.48$bft48hr.chromatin=="closed","average_logfc"])
median(expres.wnt.tpm.logfc.48[expres.wnt.tpm.logfc.48$bft48hr.chromatin=="opened","average_logfc"])
median(expres.wnt.tpm.logfc.48[expres.wnt.tpm.logfc.48$bft48hr.chromatin=="closed","average_logfc"])
#get rid of low expression genes
expres.wnt.tpm.logfc.24.nolow = expres.wnt.tpm.logfc.24[expres.wnt.tpm.logfc.24$logtpm>1,]
expres.wnt.tpm.logfc.48.nolow = expres.wnt.tpm.logfc.48[expres.wnt.tpm.logfc.48$logtpm>1,]
expres.wnt.tpm.logfc.24.nono.nolow = expres.wnt.tpm.logfc.24.nolow[ expres.wnt.tpm.logfc.24.nolow$bft24hr.chromatin !="nochange",] #writes only genes that are opened or closed
expres.wnt.tpm.logfc.48.nono.nolow = expres.wnt.tpm.logfc.48.nolow[ expres.wnt.tpm.logfc.48.nolow$bft48hr.chromatin !="nochange",] #writes only genes that are opened or closed
#stats with low expression genes removed
wilcox.test (expres.wnt.tpm.logfc.24.nolow[ expres.wnt.tpm.logfc.24.nolow$bft24hr.chromatin=="opened", "average_logfc"], expres.wnt.tpm.logfc.24.nolow[ expres.wnt.tpm.logfc.24.nolow$bft24hr.chromatin=="closed","average_logfc" ])
wilcox.test (expres.wnt.tpm.logfc.48.nolow[ expres.wnt.tpm.logfc.48.nolow$bft48hr.chromatin=="opened", "average_logfc"], expres.wnt.tpm.logfc.48.nolow[ expres.wnt.tpm.logfc.48.nolow$bft48hr.chromatin=="closed","average_logfc" ])
#plot
pdf("~/Data/rnaseq_data/gene.level.analysis/rerun/maplot.24hrs.rerun.pdf")
ggplot( expres.wnt.tpm.logfc.24, aes(x=logtpm, y=average_logfc)) + geom_point(aes(colour=bft24hr.chromatin), alpha=0.75) + geom_abline(intercept=0.5, slope=0, linetype=2)+ geom_abline(slope=0, intercept = -0.5, linetype=2) +labs(x= "mean (logTPM +1) ", y="logFC expression") + annotate("text", x = 10,y=1.3, label = "pvalue [opened ~ closed] =0.015 \nwilcoxon rank sum test") + ylim(-1.5,1.5) + xlim(0,15)
dev.off()
pdf("~/Data/rnaseq_data/gene.level.analysis/rerun/maplot.24hrs.nonochange.rerun.pdf")
ggplot( expres.wnt.tpm.logfc.24.nono, aes(x=logtpm, y=average_logfc)) + geom_point(aes(colour=bft24hr.chromatin), alpha=0.75) + geom_abline(intercept=0.5, slope=0, linetype=2)+ geom_abline(slope=0, intercept = -0.5, linetype=2) +labs(x= "mean (logTPM +1) ", y="logFC expression") + annotate("text", x = 10,y=1.3, label = "pvalue [opened ~ closed] =0.015 \nwilcoxon rank sum test") + ylim(-1.5,1.5) + xlim(0,15)
dev.off()
pdf("~/Data/rnaseq_data/gene.level.analysis/rerun/maplot.24hrs.nonochange.nolow.rerun.pdf")
ggplot( expres.wnt.tpm.logfc.24.nono.nolow, aes(x=logtpm, y=average_logfc)) + geom_point(aes(colour=bft24hr.chromatin), alpha=0.75) + geom_abline(intercept=0.5, slope=0, linetype=2)+ geom_abline(slope=0, intercept = -0.5, linetype=2) +labs(x= "mean (logTPM +1) ", y="logFC expression") + annotate("text", x = 10,y=1.3, label = "pvalue [opened ~ closed] =0.009 \nwilcoxon rank sum test") + ylim(-1.5,1.5) + xlim(0,15)
dev.off()
pdf("~/Data/rnaseq_data/gene.level.analysis/rerun/maplot.48hrs.rerun.pdf")
ggplot( expres.wnt.tpm.logfc.48, aes(x=logtpm, y=average_logfc)) + geom_point(aes(colour=bft48hr.chromatin), alpha=0.75) + geom_abline(intercept=0.5, slope=0, linetype=2) + geom_abline(slope=0, intercept = -0.5, linetype=2) +labs(x= "mean (logTPM +1) ", y="logFC expression") + annotate("text", x = 10,y=1.3, label = "pvalue [opened ~ closed] =0.456 \nwilcoxon rank sum test") + ylim(-1.5,1.5) + xlim(0,15)
dev.off()
pdf("~/Data/rnaseq_data/gene.level.analysis/rerun/maplot.48hrs.nonochange.rerun.pdf")
ggplot( expres.wnt.tpm.logfc.48.nono, aes(x=logtpm, y=average_logfc)) + geom_point(aes(colour=bft48hr.chromatin),alpha=0.75) + geom_abline(intercept=0.5, slope=0, linetype=2)+ geom_abline(slope=0, intercept = -0.5, linetype=2) +labs(x= "mean (logTPM +1) ", y="logFC expression") + annotate("text", x = 10,y=1.3, label = "pvalue [opened ~ closed] =0.456 \nwilcoxon rank sum test") + ylim(-1.5,1.5) + xlim(0,15)
dev.off()
pdf("~/Data/rnaseq_data/gene.level.analysis/rerun/maplot.48hrs.nonochange.nolow.rerun.pdf")
ggplot( expres.wnt.tpm.logfc.48.nono.nolow, aes(x=logtpm, y=average_logfc)) + geom_point(aes(colour=bft48hr.chromatin),alpha=0.75) + geom_abline(intercept=0.5, slope=0)+ geom_abline(slope=0, intercept = -0.5, linetype=2) +labs(x= "mean (logTPM +1) ", y="logFC expression") + annotate("text", x = 10,y=1.3, label = "pvalue [opened ~ closed] =0.383 \nwilcoxon rank sum test") + ylim(-1.5,1.5) + xlim(0,15)
dev.off()
##################
|
\name{tesseract}
\alias{tesseract}
\alias{TesseractBaseAPI-class}
\alias{SetImage}
\alias{Recognize}
\alias{SetRectangle}
\alias{SetSourceResolution}
\alias{GetInputName}
\alias{SetInputName}
\alias{GetDatapath}
\alias{GetInitLanguages}
\alias{ReadConfigFile}
\alias{GetSourceYResolution}
\alias{IsValidWord}
\alias{Init}
\alias{End}
\alias{Clear}
\alias{plot,TesseractBaseAPI-method}
\alias{coerce,TesseractBaseAPI,ResultIterator-method}
%\alias{SetVariables}
\title{Top-level Tesseract OCR API Functions and Classes}
\description{
To perform OCR on an image, we need a Tesseract API object
and then can call its methods.
}
\usage{
tesseract(image = character(), ..., init = TRUE)
SetImage(api, pix)
Init(api, lang = "eng")
SetRectangle(api, ..., dims = sapply(list(...), as.integer))
SetSourceResolution(api, ppi)
Recognize(api)
SetInputName(api, name)
GetInputName(api)
GetDatapath(api)
GetInitLanguages(api)
ReadConfigFile(api, files)
GetSourceYResolution(api)
IsValidWord(api, word)
Clear(api)
End(api)
}
\arguments{
\item{image,pix}{either a \code{\link{Pix-class}} object, or a file name
from which to read the image. Specifying the name of a file also
arranges to call \code{\link{SetInputName}} and so the
\code{\link{TesseractBaseAPI-class}}
instance knows where the image is located. This means we can query
it.
If speciyfing the name of a file, make certain to assign the result
to a variable that persists until \code{Recognize} is called for
this \code{\link{TesseractBaseAPI-class}} instance.
In the future, we will ensure that garbage collection protects the
image, but it is not the case now.}
\item{api}{the instance of the \code{\link{TesseractBaseAPI-class}} in which to perform the operations.}
\item{\dots}{\code{name = value} pairs that are passed to
\code{SetVariables} to configure the \code{\link{TesseractBaseAPI-class}} instance.}
\item{init}{a logical value controlling whether \code{Init} is called
by the \code{tesseract} function.}
\item{ppi}{the per-pixel resolution as an integer.}
\item{dims}{a vector of length 4 giving the location of the rectangle
as x1, y1, x2, y2.}
\item{files}{a character vector specifying the full or relative paths
to the configuration files.}
\item{word}{a character vector of words whose validity we want to
check}
\item{name}{the name of the file being processed by the OCR system.}
\item{lang}{a string specifying the language for the text to be recognized.}
}
\value{
The different functions return very different objects.
}
\references{
Tesseract \url{https://code.google.com/p/tesseract-ocr/},
specifically
\url{http://zdenop.github.io/tesseract-doc/classtesseract_1_1_tess_base_a_p_i.html}
}
\author{
Duncan Temple Lang
}
\seealso{
\code{\link{GetIterator}}, \code{\link{lapply}}.
\code{\link{SetVariables}}, \code{\link{PrintVariables}}
}
\examples{
f = system.file("images", "OCRSample2.png", package = "Rtesseract")
api = tesseract(f)
GetInputName(api)
Recognize(api)
ri = GetIterator(api)
bbox = lapply(ri, BoundingBox, "word")
if(require("png")) {
i = readPNG(f)
plot(api, level = "symbol", img = i, border = "red")
}
}
\keyword{IO }
\keyword{programming}
|
/man/tesseract.Rd
|
no_license
|
aorimi/Rtesseract
|
R
| false | false | 3,256 |
rd
|
\name{tesseract}
\alias{tesseract}
\alias{TesseractBaseAPI-class}
\alias{SetImage}
\alias{Recognize}
\alias{SetRectangle}
\alias{SetSourceResolution}
\alias{GetInputName}
\alias{SetInputName}
\alias{GetDatapath}
\alias{GetInitLanguages}
\alias{ReadConfigFile}
\alias{GetSourceYResolution}
\alias{IsValidWord}
\alias{Init}
\alias{End}
\alias{Clear}
\alias{plot,TesseractBaseAPI-method}
\alias{coerce,TesseractBaseAPI,ResultIterator-method}
%\alias{SetVariables}
\title{Top-level Tesseract OCR API Functions and Classes}
\description{
To perform OCR on an image, we need a Tesseract API object
and then can call its methods.
}
\usage{
tesseract(image = character(), ..., init = TRUE)
SetImage(api, pix)
Init(api, lang = "eng")
SetRectangle(api, ..., dims = sapply(list(...), as.integer))
SetSourceResolution(api, ppi)
Recognize(api)
SetInputName(api, name)
GetInputName(api)
GetDatapath(api)
GetInitLanguages(api)
ReadConfigFile(api, files)
GetSourceYResolution(api)
IsValidWord(api, word)
Clear(api)
End(api)
}
\arguments{
\item{image,pix}{either a \code{\link{Pix-class}} object, or a file name
from which to read the image. Specifying the name of a file also
arranges to call \code{\link{SetInputName}} and so the
\code{\link{TesseractBaseAPI-class}}
instance knows where the image is located. This means we can query
it.
If speciyfing the name of a file, make certain to assign the result
to a variable that persists until \code{Recognize} is called for
this \code{\link{TesseractBaseAPI-class}} instance.
In the future, we will ensure that garbage collection protects the
image, but it is not the case now.}
\item{api}{the instance of the \code{\link{TesseractBaseAPI-class}} in which to perform the operations.}
\item{\dots}{\code{name = value} pairs that are passed to
\code{SetVariables} to configure the \code{\link{TesseractBaseAPI-class}} instance.}
\item{init}{a logical value controlling whether \code{Init} is called
by the \code{tesseract} function.}
\item{ppi}{the per-pixel resolution as an integer.}
\item{dims}{a vector of length 4 giving the location of the rectangle
as x1, y1, x2, y2.}
\item{files}{a character vector specifying the full or relative paths
to the configuration files.}
\item{word}{a character vector of words whose validity we want to
check}
\item{name}{the name of the file being processed by the OCR system.}
\item{lang}{a string specifying the language for the text to be recognized.}
}
\value{
The different functions return very different objects.
}
\references{
Tesseract \url{https://code.google.com/p/tesseract-ocr/},
specifically
\url{http://zdenop.github.io/tesseract-doc/classtesseract_1_1_tess_base_a_p_i.html}
}
\author{
Duncan Temple Lang
}
\seealso{
\code{\link{GetIterator}}, \code{\link{lapply}}.
\code{\link{SetVariables}}, \code{\link{PrintVariables}}
}
\examples{
f = system.file("images", "OCRSample2.png", package = "Rtesseract")
api = tesseract(f)
GetInputName(api)
Recognize(api)
ri = GetIterator(api)
bbox = lapply(ri, BoundingBox, "word")
if(require("png")) {
i = readPNG(f)
plot(api, level = "symbol", img = i, border = "red")
}
}
\keyword{IO }
\keyword{programming}
|
#' Tests of radiative transfer models
library(PEcAnRTM)
context("SAIL models")
data(model.list)
setkey(model.list, modname)
p <- defparam("pro4sail")
pout <- pro4sail(p)
test.dim <- c(2101,4)
test_that("Returns matrix", {
expect_is(pout, "matrix")
})
test_that("Correct dimensions", {
expect_equal(dim(pout), test.dim)
})
test_that("Don't return 0", {
expect_true(all(colSums(pout) > 0))
})
|
/modules/rtm/tests/testthat/test.sail.R
|
permissive
|
davidjpmoore/pecan
|
R
| false | false | 437 |
r
|
#' Tests of radiative transfer models
library(PEcAnRTM)
context("SAIL models")
data(model.list)
setkey(model.list, modname)
p <- defparam("pro4sail")
pout <- pro4sail(p)
test.dim <- c(2101,4)
test_that("Returns matrix", {
expect_is(pout, "matrix")
})
test_that("Correct dimensions", {
expect_equal(dim(pout), test.dim)
})
test_that("Don't return 0", {
expect_true(all(colSums(pout) > 0))
})
|
### XXX is the stuff in this file correct or should we be exporting *formatted* values to
### meet the needs of consumers of this? Do we ened to support both?
#' Create Enriched flat value table with paths
#'
#'
#' This function creates a flat tabular file of cell values and
#' corresponding paths.
#'
#' List columns where at least one value has length > 1 are collapsed
#' to character vectors by collapsing the list element with \code{"|"}.
#'
#' @note There is currently no round-trip capability for this type of export.
#' You can read values exported this way back in via \code{import_from_tsv}
#' but you will receive only the data.frame version back, NOT a \code{TableTree}.
#'
#' @inheritParams gen_args
#' @param file character(1). The path of the file to written to or read from.
#' @param pathproc function. Internal detail, not intended for use by end users.
#' @return \code{NULL} silently for \code{export_as_tsv}, a data.frame with
#' re-constituted list values for \code{export_as_tsv}.
#' @export
#' @rdname tsv_io
#' @importFrom utils write.table read.table
export_as_tsv <- function(tt, file = NULL, pathproc = collapse_path) {
df <- path_enriched_df(tt, pathproc = pathproc)
write.table(df, file, sep = "\t")
}
.collapse_char <- "|"
.collapse_char_esc <- "\\|"
##' @export
##' @rdname tsv_io
import_from_tsv <- function(file) {
rawdf <- read.table(file, header = TRUE, sep = "\t")
as.data.frame(lapply(rawdf,
function(col) {
if(!any(grepl(.collapse_char, col, fixed = TRUE)))
col
else
I(strsplit(col, split = .collapse_char_esc))
}))
}
collapse_path <- function(paths) {
if(is.list(paths))
return(vapply(paths, collapse_path, ""))
paste(paths, collapse = .collapse_char)
}
collapse_values <- function(colvals) {
if(!is.list(colvals) || all(vapply(colvals, length, 1L) == 1))
return(colvals)
vapply(colvals, paste, "", collapse = .collapse_char)
}
path_enriched_df <- function(tt, pathproc = collapse_path) {
rdf <- make_row_df(tt)
cdf <- make_col_df(tt)
cvs <- as.data.frame(do.call(rbind, cell_values(tt)))
cvs <- as.data.frame(lapply(cvs, collapse_values))
row.names(cvs) <- NULL
colnames(cvs) <- pathproc(cdf$path)
preppaths <- pathproc(rdf[rdf$node_class != "LabelRow",]$path)
cbind.data.frame(row_path = preppaths, cvs)
}
#' Export as plain text with page break symbol
#'
#' @inheritParams gen_args
#' @param file character(1). File to write.
#' @param paginate logical(1). Should \code{tt} be paginated before writing the file.
#' @param \dots Passed directly to \code{\link{paginate_table}}
#' @param page_break character(1). Page break symbol (defaults to outputting \code{"\\s"}).
#' @return \code{file} (this function is called for the side effect of writing the file.
#'
#'
#' @export
#'
#' @seealso `export_as_pdf`
#'
#' @examples
#'
#' lyt <- basic_table() %>%
#' split_cols_by("ARM") %>%
#' analyze(c("AGE", "BMRKR2", "COUNTRY"))
#'
#' tbl <- build_table(lyt, ex_adsl)
#'
#' cat(export_as_txt(tbl, file = NULL, paginate = TRUE, lpp = 8))
#'
#' \dontrun{
#' tf <- tempfile(fileext = ".txt")
#' export_as_txt(tbl, file = tf)
#' system2("cat", tf)
#' }
export_as_txt <- function(tt, file = NULL, paginate = FALSE, ..., page_break = "\\s\\n") {
colwidths <- propose_column_widths(tt)
if(paginate) {
tbls <- paginate_table(tt, ...)
} else {
tbls <- list(tt)
}
res <- paste(sapply(tbls, toString, widths = colwidths), collapse = page_break)
if(!is.null(file))
cat(res, file = file)
else
res
}
#' Export as PDF
#'
#' The PDF output is based on the ASCII output created with `toString`
#'
#' @inheritParams export_as_txt
#' @inheritParams grid::plotViewport
#' @inheritParams paginate_table
#' @param file file to write, must have `.pdf` extension
#' @param width the width and height of the graphics region in inches
#' @param height the width and height of the graphics region in inches
#' @param fontsize the size of text (in points)
#' @param ... arguments passed on to `paginate_table`
#'
#' @importFrom grDevices pdf
#' @importFrom grid textGrob grid.newpage gpar pushViewport plotViewport unit grid.draw
#' convertWidth convertHeight grobHeight grobWidth
#'
#' @seealso `export_as_txt`
#'
#'
#' @importFrom grid textGrob get.gpar
#' @importFrom grDevices dev.off
#' @export
#'
#' @examples
#' lyt <- basic_table() %>%
#' split_cols_by("ARM") %>%
#' analyze(c("AGE", "BMRKR2", "COUNTRY"))
#'
#' tbl <- build_table(lyt, ex_adsl)
#'
#' \dontrun{
#' tf <- tempfile(fileext = ".pdf")
#' export_as_pdf(tbl, file = tf, height = 4)
#' tf <- tempfile(fileext = ".pdf")
#' export_as_pdf(tbl, file = tf, lpp = 8)
#' }
#'
export_as_pdf <- function(tt,
file, width = 11.7, height = 8.3, # passed to pdf()
margins = c(4, 4, 4, 4), fontsize = 8, # grid parameters
paginate = TRUE, lpp = NULL, ... # passed to paginate_table
) {
stopifnot(tools::file_ext(file) != ".pdf")
gp_plot <- gpar(fontsize = fontsize, fontfamily = "mono")
pdf(file = file, width = width, height = height)
grid.newpage()
pushViewport(plotViewport(margins = margins, gp = gp_plot))
colwidths <- propose_column_widths(tt)
tbls <- if (paginate) {
if (is.null(lpp)) {
cur_gpar <- get.gpar()
lpp <- floor(convertHeight(unit(1, "npc"), "lines", valueOnly = TRUE) /
(cur_gpar$cex * cur_gpar$lineheight))
}
paginate_table(tt, lpp = lpp, ...)
} else {
list(tt)
}
stbls <- lapply(lapply(tbls, toString, widths = colwidths), function(xi) substr(xi, 1, nchar(xi) - nchar("\n")))
gtbls <- lapply(stbls, function(txt) {
textGrob(
label = txt,
x = unit(0, "npc"), y = unit(1, "npc"),
just = c("left", "top")
)
})
npages <- length(gtbls)
exceeds_width = rep(FALSE, npages)
exceeds_height = rep(FALSE, npages)
for (i in seq_along(gtbls)) {
g <- gtbls[[i]]
if (i > 1) {
grid.newpage()
pushViewport(plotViewport(margins = margins, gp = gp_plot))
}
if (convertHeight(grobHeight(g), "inches", valueOnly = TRUE) >
convertHeight(unit(1, "npc"), "inches", valueOnly = TRUE)) {
exceeds_height[i] <- TRUE
warning("height of page ", i, " exceeds the available space")
}
if (convertWidth(grobWidth(g), "inches", valueOnly = TRUE) >
convertWidth(unit(1, "npc"), "inches", valueOnly = TRUE)) {
exceeds_width[i] <- TRUE
warning("width of page ", i, " exceeds the available space")
}
grid.draw(g)
}
dev.off()
list(file = file, npages = npages, exceeds_width = exceeds_width, exceeds_height = exceeds_height, lpp = lpp)
}
|
/R/tt_export.R
|
permissive
|
jcheng5/rtables
|
R
| false | false | 6,986 |
r
|
### XXX is the stuff in this file correct or should we be exporting *formatted* values to
### meet the needs of consumers of this? Do we ened to support both?
#' Create Enriched flat value table with paths
#'
#'
#' This function creates a flat tabular file of cell values and
#' corresponding paths.
#'
#' List columns where at least one value has length > 1 are collapsed
#' to character vectors by collapsing the list element with \code{"|"}.
#'
#' @note There is currently no round-trip capability for this type of export.
#' You can read values exported this way back in via \code{import_from_tsv}
#' but you will receive only the data.frame version back, NOT a \code{TableTree}.
#'
#' @inheritParams gen_args
#' @param file character(1). The path of the file to written to or read from.
#' @param pathproc function. Internal detail, not intended for use by end users.
#' @return \code{NULL} silently for \code{export_as_tsv}, a data.frame with
#' re-constituted list values for \code{export_as_tsv}.
#' @export
#' @rdname tsv_io
#' @importFrom utils write.table read.table
export_as_tsv <- function(tt, file = NULL, pathproc = collapse_path) {
df <- path_enriched_df(tt, pathproc = pathproc)
write.table(df, file, sep = "\t")
}
.collapse_char <- "|"
.collapse_char_esc <- "\\|"
##' @export
##' @rdname tsv_io
import_from_tsv <- function(file) {
rawdf <- read.table(file, header = TRUE, sep = "\t")
as.data.frame(lapply(rawdf,
function(col) {
if(!any(grepl(.collapse_char, col, fixed = TRUE)))
col
else
I(strsplit(col, split = .collapse_char_esc))
}))
}
collapse_path <- function(paths) {
if(is.list(paths))
return(vapply(paths, collapse_path, ""))
paste(paths, collapse = .collapse_char)
}
collapse_values <- function(colvals) {
if(!is.list(colvals) || all(vapply(colvals, length, 1L) == 1))
return(colvals)
vapply(colvals, paste, "", collapse = .collapse_char)
}
path_enriched_df <- function(tt, pathproc = collapse_path) {
rdf <- make_row_df(tt)
cdf <- make_col_df(tt)
cvs <- as.data.frame(do.call(rbind, cell_values(tt)))
cvs <- as.data.frame(lapply(cvs, collapse_values))
row.names(cvs) <- NULL
colnames(cvs) <- pathproc(cdf$path)
preppaths <- pathproc(rdf[rdf$node_class != "LabelRow",]$path)
cbind.data.frame(row_path = preppaths, cvs)
}
#' Export as plain text with page break symbol
#'
#' @inheritParams gen_args
#' @param file character(1). File to write.
#' @param paginate logical(1). Should \code{tt} be paginated before writing the file.
#' @param \dots Passed directly to \code{\link{paginate_table}}
#' @param page_break character(1). Page break symbol (defaults to outputting \code{"\\s"}).
#' @return \code{file} (this function is called for the side effect of writing the file.
#'
#'
#' @export
#'
#' @seealso `export_as_pdf`
#'
#' @examples
#'
#' lyt <- basic_table() %>%
#' split_cols_by("ARM") %>%
#' analyze(c("AGE", "BMRKR2", "COUNTRY"))
#'
#' tbl <- build_table(lyt, ex_adsl)
#'
#' cat(export_as_txt(tbl, file = NULL, paginate = TRUE, lpp = 8))
#'
#' \dontrun{
#' tf <- tempfile(fileext = ".txt")
#' export_as_txt(tbl, file = tf)
#' system2("cat", tf)
#' }
export_as_txt <- function(tt, file = NULL, paginate = FALSE, ..., page_break = "\\s\\n") {
colwidths <- propose_column_widths(tt)
if(paginate) {
tbls <- paginate_table(tt, ...)
} else {
tbls <- list(tt)
}
res <- paste(sapply(tbls, toString, widths = colwidths), collapse = page_break)
if(!is.null(file))
cat(res, file = file)
else
res
}
#' Export as PDF
#'
#' The PDF output is based on the ASCII output created with `toString`
#'
#' @inheritParams export_as_txt
#' @inheritParams grid::plotViewport
#' @inheritParams paginate_table
#' @param file file to write, must have `.pdf` extension
#' @param width the width and height of the graphics region in inches
#' @param height the width and height of the graphics region in inches
#' @param fontsize the size of text (in points)
#' @param ... arguments passed on to `paginate_table`
#'
#' @importFrom grDevices pdf
#' @importFrom grid textGrob grid.newpage gpar pushViewport plotViewport unit grid.draw
#' convertWidth convertHeight grobHeight grobWidth
#'
#' @seealso `export_as_txt`
#'
#'
#' @importFrom grid textGrob get.gpar
#' @importFrom grDevices dev.off
#' @export
#'
#' @examples
#' lyt <- basic_table() %>%
#' split_cols_by("ARM") %>%
#' analyze(c("AGE", "BMRKR2", "COUNTRY"))
#'
#' tbl <- build_table(lyt, ex_adsl)
#'
#' \dontrun{
#' tf <- tempfile(fileext = ".pdf")
#' export_as_pdf(tbl, file = tf, height = 4)
#' tf <- tempfile(fileext = ".pdf")
#' export_as_pdf(tbl, file = tf, lpp = 8)
#' }
#'
export_as_pdf <- function(tt,
file, width = 11.7, height = 8.3, # passed to pdf()
margins = c(4, 4, 4, 4), fontsize = 8, # grid parameters
paginate = TRUE, lpp = NULL, ... # passed to paginate_table
) {
stopifnot(tools::file_ext(file) != ".pdf")
gp_plot <- gpar(fontsize = fontsize, fontfamily = "mono")
pdf(file = file, width = width, height = height)
grid.newpage()
pushViewport(plotViewport(margins = margins, gp = gp_plot))
colwidths <- propose_column_widths(tt)
tbls <- if (paginate) {
if (is.null(lpp)) {
cur_gpar <- get.gpar()
lpp <- floor(convertHeight(unit(1, "npc"), "lines", valueOnly = TRUE) /
(cur_gpar$cex * cur_gpar$lineheight))
}
paginate_table(tt, lpp = lpp, ...)
} else {
list(tt)
}
stbls <- lapply(lapply(tbls, toString, widths = colwidths), function(xi) substr(xi, 1, nchar(xi) - nchar("\n")))
gtbls <- lapply(stbls, function(txt) {
textGrob(
label = txt,
x = unit(0, "npc"), y = unit(1, "npc"),
just = c("left", "top")
)
})
npages <- length(gtbls)
exceeds_width = rep(FALSE, npages)
exceeds_height = rep(FALSE, npages)
for (i in seq_along(gtbls)) {
g <- gtbls[[i]]
if (i > 1) {
grid.newpage()
pushViewport(plotViewport(margins = margins, gp = gp_plot))
}
if (convertHeight(grobHeight(g), "inches", valueOnly = TRUE) >
convertHeight(unit(1, "npc"), "inches", valueOnly = TRUE)) {
exceeds_height[i] <- TRUE
warning("height of page ", i, " exceeds the available space")
}
if (convertWidth(grobWidth(g), "inches", valueOnly = TRUE) >
convertWidth(unit(1, "npc"), "inches", valueOnly = TRUE)) {
exceeds_width[i] <- TRUE
warning("width of page ", i, " exceeds the available space")
}
grid.draw(g)
}
dev.off()
list(file = file, npages = npages, exceeds_width = exceeds_width, exceeds_height = exceeds_height, lpp = lpp)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mod_table_pieces.R
\name{mod_table_piecesui}
\alias{mod_table_piecesui}
\alias{mod_table_pieces}
\title{mod_table_piecesui and mod_table_pieces}
\usage{
mod_table_piecesui(id)
mod_table_pieces(
input,
output,
session,
scale_obj,
steps_range,
step_choice,
width_table = "100\%"
)
}
\arguments{
\item{id}{shiny id}
\item{input}{internal}
\item{output}{internal}
\item{session}{internal}
}
\description{
A shiny Module that ...
}
|
/man/mod_table_piecesui.Rd
|
permissive
|
federman/shinylego
|
R
| false | true | 523 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mod_table_pieces.R
\name{mod_table_piecesui}
\alias{mod_table_piecesui}
\alias{mod_table_pieces}
\title{mod_table_piecesui and mod_table_pieces}
\usage{
mod_table_piecesui(id)
mod_table_pieces(
input,
output,
session,
scale_obj,
steps_range,
step_choice,
width_table = "100\%"
)
}
\arguments{
\item{id}{shiny id}
\item{input}{internal}
\item{output}{internal}
\item{session}{internal}
}
\description{
A shiny Module that ...
}
|
#
# This file is for plot 1 for Expolitory Data Analysis Project 1
#
###############################################################################
# Step 1 Set the file locations and read the file
# Note: This file assumes the data file exists in the "explore_p_1" subdirectory
# under the working directory
###############################################################################
readfile <- paste(getwd(),"/explore_p_1", "/", "household_power_consumption.txt",
sep="")
writefile <- paste(getwd(),"/explore_p_1", "/", "plot1.png", sep="")
# Read the file
power.df <- as.data.frame(read.table(readfile, header=TRUE, sep=";",
na.strings="?"))
# Step 2 Clean the data set
###############################################################################
#subset the data frame to innclude only the dates of interest
power.df <- subset(power.df, Date == "1/2/2007" | Date == "2/2/2007")
# Step 3 Generate the histogram
###############################################################################
png(filename = writefile, width = 480, height = 480, units = "px", bg = "white")
par(mar = c(6, 6, 5, 4))
hist(power.df$Global_active_power, col = "red", main = "Global Active Power",
xlab = "Global Active Power(kilowatts)")
dev.off()
|
/plot1.R
|
no_license
|
MGoodman10/ExData_Plotting1
|
R
| false | false | 1,310 |
r
|
#
# This file is for plot 1 for Expolitory Data Analysis Project 1
#
###############################################################################
# Step 1 Set the file locations and read the file
# Note: This file assumes the data file exists in the "explore_p_1" subdirectory
# under the working directory
###############################################################################
readfile <- paste(getwd(),"/explore_p_1", "/", "household_power_consumption.txt",
sep="")
writefile <- paste(getwd(),"/explore_p_1", "/", "plot1.png", sep="")
# Read the file
power.df <- as.data.frame(read.table(readfile, header=TRUE, sep=";",
na.strings="?"))
# Step 2 Clean the data set
###############################################################################
#subset the data frame to innclude only the dates of interest
power.df <- subset(power.df, Date == "1/2/2007" | Date == "2/2/2007")
# Step 3 Generate the histogram
###############################################################################
png(filename = writefile, width = 480, height = 480, units = "px", bg = "white")
par(mar = c(6, 6, 5, 4))
hist(power.df$Global_active_power, col = "red", main = "Global Active Power",
xlab = "Global Active Power(kilowatts)")
dev.off()
|
context("Checking findVariance")
test_that("One cat column, one measure col, and no date columns give correct df", {
df <- data.frame(Gender = c('F','M','M','M','M','F','F','F'),
LOS = c(3.2,NA,5,1.3,2.4,4,9,5))
dfRes <- findVariation(df = df,
categoricalCols = "Gender",
measureCol = "LOS")
expected <- data.frame(DimensionalAttributes = c('Gender','Gender'),
CategoriesGrouped = c('M','F'),
MeasureCOV = c('LOS|0.66','LOS|0.49'),
MeasureVolumeRaw = c('LOS|3','LOS|4'),
MeasureVolumePercent = c('LOS|0.43','LOS|0.57'),
MeasureImpact = c('LOS|1.98','LOS|1.96'),
AboveMeanCOVFLG = c('LOS|Y','LOS|N'),
AboveMeanVolumeFLG = c('LOS|N','LOS|Y'),
# findVariation returns strings as characters
stringsAsFactors = FALSE)
# Drop row names, so the two dfs can match w/o specifying row names explicitly
rownames(dfRes) <- c()
rownames(expected) <- c()
testthat::expect_equal(dfRes,expected)
})
test_that("One cat column, one measure col, and no date col and NA in cat col
give correct df", {
df <- data.frame(Gender = c('F','M','M','M','M','F',NA,'F'),
LOS = c(3.2,NA,5,1.3,2.4,4,9,5))
dfRes <- findVariation(df = df,
categoricalCols = "Gender",
measureCol = "LOS")
expected <- data.frame(DimensionalAttributes = c('Gender','Gender'),
CategoriesGrouped = c('M','F'),
MeasureCOV = c('LOS|0.66','LOS|0.22'),
MeasureVolumeRaw = c('LOS|3','LOS|3'),
MeasureVolumePercent = c('LOS|0.5','LOS|0.5'),
MeasureImpact = c('LOS|1.98','LOS|0.66'),
AboveMeanCOVFLG = c('LOS|Y','LOS|N'),
AboveMeanVolumeFLG = c('LOS|N','LOS|N'),
stringsAsFactors = FALSE)
rownames(dfRes) <- c()
rownames(expected) <- c()
testthat::expect_equal(dfRes,expected)
})
test_that("Two cat columns and no date columns give correct df", {
df <- data.frame(Dept = c('A','A','A','B','B','B','B'),
Gender = c('F','M','M','M','M','F','F'),
LOS = c(3.2,NA,5,1.3,2.4,4,9))
categoricalCols <- c("Dept","Gender")
dfRes <- findVariation(df = df,
categoricalCols = categoricalCols,
measureCol = "LOS")
expected <- data.frame(
DimensionalAttributes = c('Dept','Dept','Gender','Gender',
'Dept|Gender','Dept|Gender'),
CategoriesGrouped = c('B','A','M','F','B|F','B|M'),
MeasureCOV = c('LOS|0.81','LOS|0.31','LOS|0.66',
'LOS|0.58','LOS|0.54','LOS|0.42'),
MeasureVolumeRaw = c('LOS|4','LOS|2','LOS|3','LOS|3','LOS|2','LOS|2'),
MeasureVolumePercent = c('LOS|0.67','LOS|0.33','LOS|0.5','LOS|0.5',
'LOS|0.5','LOS|0.5'),
MeasureImpact = c('LOS|3.24','LOS|0.62','LOS|1.98','LOS|1.74',
'LOS|1.08','LOS|0.84'),
AboveMeanCOVFLG = c('LOS|Y','LOS|N','LOS|Y','LOS|N',
'LOS|Y','LOS|N'),
AboveMeanVolumeFLG = c('LOS|Y','LOS|N','LOS|N','LOS|N',
'LOS|Y','LOS|Y'),
stringsAsFactors = FALSE)
rownames(dfRes) <- c()
rownames(expected) <- c()
testthat::expect_equal(dfRes,expected)
})
test_that("One cat col, one measure col, and one date col give correct df", {
df <- data.frame(Gender = c('F','M','M','M','M','F','F','F'),
LOS = c(3.2,NA,5,1.3,2.4,4,9,5),
StartDTS = c('2012-01-01 10:04:23', '2012-01-01 10:04:23',
'2012-02-01 10:04:23', '2012-02-02 10:04:23',
'2012-01-01 10:04:23', '2012-03-01 10:04:23',
'2012-04-01 10:04:23', '2012-04-01 10:04:23'))
dfRes <- findVariation(df = df,
categoricalCols = "Gender",
measureCol = "LOS",
dateCol = 'StartDTS')
expected <- data.frame(
DimensionalAttributes = c('Gender','Gender',
'StartDTS','StartDTS','StartDTS',
'Gender|StartDTS','Gender|StartDTS'),
CategoriesGrouped = c('M','F',
'2012-02','2012-04','2012-01',
'M|2012-02','F|2012-04'),
MeasureCOV = c('LOS|0.66','LOS|0.49',
'LOS|0.83','LOS|0.4','LOS|0.2',
'LOS|0.83','LOS|0.4'),
MeasureVolumeRaw = c('LOS|3','LOS|4',
'LOS|2','LOS|2','LOS|2',
'LOS|2','LOS|2'),
MeasureVolumePercent = c('LOS|0.43','LOS|0.57',
'LOS|0.33','LOS|0.33','LOS|0.33',
'LOS|0.5','LOS|0.5'),
MeasureImpact = c('LOS|1.98','LOS|1.96',
'LOS|1.66','LOS|0.8','LOS|0.4',
'LOS|1.66','LOS|0.8'),
AboveMeanCOVFLG = c('LOS|Y','LOS|N',
'LOS|Y','LOS|N','LOS|N',
'LOS|Y','LOS|N'),
AboveMeanVolumeFLG = c('LOS|N','LOS|Y',
'LOS|Y','LOS|Y','LOS|Y',
'LOS|Y','LOS|Y'),
stringsAsFactors = FALSE)
rownames(dfRes) <- c()
rownames(expected) <- c()
testthat::expect_equal(dfRes,expected)
})
test_that("One cat column, two measure col, and no date columns give
correct df", {
df <- data.frame(Gender = c('F','M','M','M','M','F','F','F'),
LOS = c(3.2,NA,5,1.3,2.4,4,9,5),
BP = c(123,129,89,150,90,58,160,145))
dfRes <- findVariation(df = df,
categoricalCols = "Gender",
measureCol = c("LOS","BP"))
expected <- data.frame(DimensionalAttributes = c('Gender','Gender',
'Gender','Gender'),
CategoriesGrouped = c('M','F',
'F','M'),
MeasureCOV = c('LOS|0.66','LOS|0.49',
'BP|0.37','BP|0.26'),
MeasureVolumeRaw = c('LOS|3','LOS|4',
'BP|4','BP|4'),
MeasureVolumePercent = c('LOS|0.43','LOS|0.57',
'BP|0.5','BP|0.5'),
MeasureImpact = c('LOS|1.98','LOS|1.96',
'BP|1.48','BP|1.04'),
AboveMeanCOVFLG = c('LOS|Y','LOS|N',
'BP|Y','BP|N'),
AboveMeanVolumeFLG = c('LOS|N','LOS|Y',
'BP|N','BP|N'),
stringsAsFactors = FALSE)
rownames(dfRes) <- c()
rownames(expected) <- c()
# Reset factor levels of both dataframes (as we don't care if they're equal)
testthat::expect_equal(dfRes,expected)
})
test_that("One cat col, two measure cols, and one date col give correct df", {
df <- data.frame(Gender = c('F','M','M','M','M','F','F','F'),
LOS = c(3.2,NA,5,1.3,2.4,4,9,5),
BP = c(123,129,89,150,90,58,160,145),
StartDTS = c('2012-01-01 10:04:23', '2012-01-01 10:04:23',
'2012-02-01 10:04:23', '2012-02-02 10:04:23',
'2012-01-01 10:04:23', '2012-03-01 10:04:23',
'2012-04-01 10:04:23', '2012-04-01 10:04:23'))
dfRes <- findVariation(df = df,
categoricalCols = "Gender",
measureCol = c("LOS","BP"),
dateCol = 'StartDTS')
expected <- data.frame(
DimensionalAttributes = c('Gender','Gender','Gender','Gender',
'StartDTS','StartDTS','StartDTS',
'StartDTS','StartDTS','StartDTS',
'Gender|StartDTS','Gender|StartDTS',
'Gender|StartDTS','Gender|StartDTS',
'Gender|StartDTS'),
CategoriesGrouped = c('M','F','F','M',
'2012-02','2012-04','2012-02',
'2012-01','2012-01','2012-04',
'M|2012-02','F|2012-04','M|2012-02',
'M|2012-01','F|2012-04'),
MeasureCOV = c('LOS|0.66','LOS|0.49',
'BP|0.37','BP|0.26',
'LOS|0.83','LOS|0.4',
'BP|0.36','BP|0.18',
'LOS|0.2',
'BP|0.07',
'LOS|0.83','LOS|0.4',
'BP|0.36','BP|0.25','BP|0.07'),
MeasureVolumeRaw = c('LOS|3','LOS|4',
'BP|4','BP|4',
'LOS|2','LOS|2',
'BP|2','BP|3',
'LOS|2',
'BP|2',
'LOS|2','LOS|2',
'BP|2','BP|2','BP|2'),
MeasureVolumePercent = c('LOS|0.43','LOS|0.57',
'BP|0.5','BP|0.5',
'LOS|0.33','LOS|0.33',
'BP|0.29','BP|0.43',
'LOS|0.33',
'BP|0.29',
'LOS|0.5','LOS|0.5',
'BP|0.33','BP|0.33','BP|0.33'),
MeasureImpact = c('LOS|1.98','LOS|1.96',
'BP|1.48','BP|1.04',
'LOS|1.66','LOS|0.8',
'BP|0.72','BP|0.54',
'LOS|0.4',
'BP|0.14',
'LOS|1.66','LOS|0.8',
'BP|0.72','BP|0.5','BP|0.14'),
AboveMeanCOVFLG = c('LOS|Y','LOS|N',
'BP|Y','BP|N',
'LOS|Y','LOS|N',
'BP|Y','BP|N',
'LOS|N',
'BP|N',
'LOS|Y','LOS|N',
'BP|Y','BP|Y','BP|N'),
AboveMeanVolumeFLG = c('LOS|N','LOS|Y',
'BP|N','BP|N',
'LOS|Y','LOS|Y',
'BP|N','BP|Y',
'LOS|Y',
'BP|N',
'LOS|Y','LOS|Y',
'BP|Y','BP|Y','BP|Y'),
stringsAsFactors = FALSE)
rownames(dfRes) <- c()
rownames(expected) <- c()
testthat::expect_equal(dfRes,expected)
})
test_that("One cat col, two measure cols,one date col, & threshold give correct
df", {
df <- data.frame(Gender = c('F','M','M','M','M','F','F','F'),
LOS = c(3.2,NA,5,1.3,2.4,4,9,5),
BP = c(123,129,89,150,90,58,160,145),
StartDTS = c('2012-01-01 10:04:23', '2012-01-01 10:04:23',
'2012-02-01 10:04:23', '2012-02-02 10:04:23',
'2012-01-01 10:04:23', '2012-03-01 10:04:23',
'2012-04-01 10:04:23', '2012-04-01 10:04:23'))
dfRes <- findVariation(df = df,
categoricalCols = "Gender",
measureCol = c("LOS","BP"),
dateCol = 'StartDTS',
threshold = 1.0)
expected <- data.frame(
DimensionalAttributes = c('Gender','Gender','Gender','Gender',
'StartDTS',
'Gender|StartDTS'),
CategoriesGrouped = c('M','F','F','M',
'2012-02',
'M|2012-02'),
MeasureCOV = c('LOS|0.66','LOS|0.49','BP|0.37','BP|0.26',
'LOS|0.83',
'LOS|0.83'),
MeasureVolumeRaw = c('LOS|3','LOS|4',
'BP|4','BP|4',
'LOS|2',
'LOS|2'),
MeasureVolumePercent = c('LOS|0.43','LOS|0.57','BP|0.5','BP|0.5',
'LOS|0.33',
'LOS|0.5'),
MeasureImpact = c('LOS|1.98','LOS|1.96','BP|1.48','BP|1.04',
'LOS|1.66',
'LOS|1.66'),
AboveMeanCOVFLG = c('LOS|Y','LOS|N','BP|Y','BP|N',
'LOS|Y','LOS|Y'),
AboveMeanVolumeFLG = c('LOS|N','LOS|Y','BP|N','BP|N',
'LOS|Y',
'LOS|Y'),
stringsAsFactors = FALSE)
rownames(dfRes) <- c()
rownames(expected) <- c()
testthat::expect_equal(dfRes,expected)
})
test_that("Measure col missing from df gives correct error (when one
specified)", {
df <- data.frame(Gender = c('F','M','M','M','M','F','F','F'),
LOS = c(3.2,NA,5,1.3,2.4,4,9,5))
testthat::expect_error(findVariation(df = df,
categoricalCols = "Gender",
measureCol = "LO"), # <-- error
"The measure column or one of the categorical cols is")
})
test_that("Measure col missing from df gives correct error (when two
specified)", {
df <- data.frame(Gender = c('F','M','M','M','M','F','F','F'),
LOS = c(3.2,NA,5,1.3,2.4,4,9,5))
testthat::expect_error(findVariation(df = df,
categoricalCols = "Gender",
measureCol = "LO"), # <-- error
"The measure column or one of the categorical cols is")
})
test_that("Categorical col missing from df gives correct error (when one
specified)", {
df <- data.frame(Gender = c('F','M','M','M','M','F','F','F'),
LOS = c(3.2,NA,5,1.3,2.4,4,9,5))
testthat::expect_error(findVariation(df = df,
categoricalCols = "Gendr", # <-- error
measureCol = "LOS"),
"The measure column or one of the categorical cols is")
})
test_that("Categorical col missing from df gives correct error (when two
specified)", {
df <- data.frame(Gender = c('F','M','M','M','M','F','F','F'),
LOS = c(3.2,NA,5,1.3,2.4,4,9,5),
Age = c(23,45,63,42,78,32,15,65))
testthat::expect_error(findVariation(df = df,
categoricalCols = c("Gender"),
measureCol = c("LOS","Ag")),
"The measure column or one of the categorical cols is")
})
test_that("Measure col as strings gives correct error", {
df <- data.frame(Gender = c('F','M','M','M','M','F','F','F'),
Married = c('Y','N','Y','N','N','Y','N','Y'),
LOS = c(3.2,NA,5,1.3,2.4,4,9,5))
testthat::expect_error(findVariation(df = df,
categoricalCols = "Gender",
measureCol = "Married"), # <-- error
"measureColumn needs to be of class numeric or int")
})
test_that("Categorical col as numbers gives correct error", {
df <- data.frame(Gender = c('F','M','M','M','M','F','F','F'),
LOS = c(3.2,NA,5,1.3,2.4,4,9,5),
Age = c(23,45,63,42,78,32,15,65))
testthat::expect_error(findVariation(df = df,
categoricalCols = c("Gender","Age"),
measureCol = "LOS"),
"categoricalCols cannot be of class numeric or int")
})
test_that("Date column in wrong format gives correct error", {
df <- data.frame(Gender = c('F','M','M','M','M','F','F','F'),
LOS = c(3.2,NA,5,1.3,2.4,4,9,5),
AdmitDTS = c('01-2012 12:34:12','2012-01-01 12:34:12',
'2012-01-01 12:34:12','2012-01-01 12:34:12',
'2012-01-01 12:34:12','2012-01-01 12:34:12',
'2012-01-01 12:34:12','2012-01-01 12:34:12'))
testthat::expect_error(findVariation(df = df,
categoricalCols = c("Gender"),
measureCol = "LOS",
dateCol = "AdmitDTS"), # <-- error
paste0("AdmitDTS may not be a datetime column, or the",
" column may not be in format YYYY-MM-DD"))
})
test_that("Date column passed as number gives correct error", {
df <- data.frame(Gender = c('F','M','M','M','M','F','F','F'),
LOS = c(3.2,NA,5,1.3,2.4,4,9,5),
Age = c(23,45,63,42,78,32,15,65))
testthat::expect_error(findVariation(df = df,
categoricalCols = c("Gender"),
measureCol = "LOS",
dateCol = "Age"), # <-- error
paste0("Age may not be a datetime column, or the",
" column may not be in format YYYY-MM-DD"))
})
|
/tests/testthat/test-find-variance.R
|
permissive
|
Quantitative72/healthcareai-r
|
R
| false | false | 17,564 |
r
|
context("Checking findVariance")
test_that("One cat column, one measure col, and no date columns give correct df", {
df <- data.frame(Gender = c('F','M','M','M','M','F','F','F'),
LOS = c(3.2,NA,5,1.3,2.4,4,9,5))
dfRes <- findVariation(df = df,
categoricalCols = "Gender",
measureCol = "LOS")
expected <- data.frame(DimensionalAttributes = c('Gender','Gender'),
CategoriesGrouped = c('M','F'),
MeasureCOV = c('LOS|0.66','LOS|0.49'),
MeasureVolumeRaw = c('LOS|3','LOS|4'),
MeasureVolumePercent = c('LOS|0.43','LOS|0.57'),
MeasureImpact = c('LOS|1.98','LOS|1.96'),
AboveMeanCOVFLG = c('LOS|Y','LOS|N'),
AboveMeanVolumeFLG = c('LOS|N','LOS|Y'),
# findVariation returns strings as characters
stringsAsFactors = FALSE)
# Drop row names, so the two dfs can match w/o specifying row names explicitly
rownames(dfRes) <- c()
rownames(expected) <- c()
testthat::expect_equal(dfRes,expected)
})
test_that("One cat column, one measure col, and no date col and NA in cat col
give correct df", {
df <- data.frame(Gender = c('F','M','M','M','M','F',NA,'F'),
LOS = c(3.2,NA,5,1.3,2.4,4,9,5))
dfRes <- findVariation(df = df,
categoricalCols = "Gender",
measureCol = "LOS")
expected <- data.frame(DimensionalAttributes = c('Gender','Gender'),
CategoriesGrouped = c('M','F'),
MeasureCOV = c('LOS|0.66','LOS|0.22'),
MeasureVolumeRaw = c('LOS|3','LOS|3'),
MeasureVolumePercent = c('LOS|0.5','LOS|0.5'),
MeasureImpact = c('LOS|1.98','LOS|0.66'),
AboveMeanCOVFLG = c('LOS|Y','LOS|N'),
AboveMeanVolumeFLG = c('LOS|N','LOS|N'),
stringsAsFactors = FALSE)
rownames(dfRes) <- c()
rownames(expected) <- c()
testthat::expect_equal(dfRes,expected)
})
test_that("Two cat columns and no date columns give correct df", {
df <- data.frame(Dept = c('A','A','A','B','B','B','B'),
Gender = c('F','M','M','M','M','F','F'),
LOS = c(3.2,NA,5,1.3,2.4,4,9))
categoricalCols <- c("Dept","Gender")
dfRes <- findVariation(df = df,
categoricalCols = categoricalCols,
measureCol = "LOS")
expected <- data.frame(
DimensionalAttributes = c('Dept','Dept','Gender','Gender',
'Dept|Gender','Dept|Gender'),
CategoriesGrouped = c('B','A','M','F','B|F','B|M'),
MeasureCOV = c('LOS|0.81','LOS|0.31','LOS|0.66',
'LOS|0.58','LOS|0.54','LOS|0.42'),
MeasureVolumeRaw = c('LOS|4','LOS|2','LOS|3','LOS|3','LOS|2','LOS|2'),
MeasureVolumePercent = c('LOS|0.67','LOS|0.33','LOS|0.5','LOS|0.5',
'LOS|0.5','LOS|0.5'),
MeasureImpact = c('LOS|3.24','LOS|0.62','LOS|1.98','LOS|1.74',
'LOS|1.08','LOS|0.84'),
AboveMeanCOVFLG = c('LOS|Y','LOS|N','LOS|Y','LOS|N',
'LOS|Y','LOS|N'),
AboveMeanVolumeFLG = c('LOS|Y','LOS|N','LOS|N','LOS|N',
'LOS|Y','LOS|Y'),
stringsAsFactors = FALSE)
rownames(dfRes) <- c()
rownames(expected) <- c()
testthat::expect_equal(dfRes,expected)
})
test_that("One cat col, one measure col, and one date col give correct df", {
df <- data.frame(Gender = c('F','M','M','M','M','F','F','F'),
LOS = c(3.2,NA,5,1.3,2.4,4,9,5),
StartDTS = c('2012-01-01 10:04:23', '2012-01-01 10:04:23',
'2012-02-01 10:04:23', '2012-02-02 10:04:23',
'2012-01-01 10:04:23', '2012-03-01 10:04:23',
'2012-04-01 10:04:23', '2012-04-01 10:04:23'))
dfRes <- findVariation(df = df,
categoricalCols = "Gender",
measureCol = "LOS",
dateCol = 'StartDTS')
expected <- data.frame(
DimensionalAttributes = c('Gender','Gender',
'StartDTS','StartDTS','StartDTS',
'Gender|StartDTS','Gender|StartDTS'),
CategoriesGrouped = c('M','F',
'2012-02','2012-04','2012-01',
'M|2012-02','F|2012-04'),
MeasureCOV = c('LOS|0.66','LOS|0.49',
'LOS|0.83','LOS|0.4','LOS|0.2',
'LOS|0.83','LOS|0.4'),
MeasureVolumeRaw = c('LOS|3','LOS|4',
'LOS|2','LOS|2','LOS|2',
'LOS|2','LOS|2'),
MeasureVolumePercent = c('LOS|0.43','LOS|0.57',
'LOS|0.33','LOS|0.33','LOS|0.33',
'LOS|0.5','LOS|0.5'),
MeasureImpact = c('LOS|1.98','LOS|1.96',
'LOS|1.66','LOS|0.8','LOS|0.4',
'LOS|1.66','LOS|0.8'),
AboveMeanCOVFLG = c('LOS|Y','LOS|N',
'LOS|Y','LOS|N','LOS|N',
'LOS|Y','LOS|N'),
AboveMeanVolumeFLG = c('LOS|N','LOS|Y',
'LOS|Y','LOS|Y','LOS|Y',
'LOS|Y','LOS|Y'),
stringsAsFactors = FALSE)
rownames(dfRes) <- c()
rownames(expected) <- c()
testthat::expect_equal(dfRes,expected)
})
test_that("One cat column, two measure col, and no date columns give
correct df", {
df <- data.frame(Gender = c('F','M','M','M','M','F','F','F'),
LOS = c(3.2,NA,5,1.3,2.4,4,9,5),
BP = c(123,129,89,150,90,58,160,145))
dfRes <- findVariation(df = df,
categoricalCols = "Gender",
measureCol = c("LOS","BP"))
expected <- data.frame(DimensionalAttributes = c('Gender','Gender',
'Gender','Gender'),
CategoriesGrouped = c('M','F',
'F','M'),
MeasureCOV = c('LOS|0.66','LOS|0.49',
'BP|0.37','BP|0.26'),
MeasureVolumeRaw = c('LOS|3','LOS|4',
'BP|4','BP|4'),
MeasureVolumePercent = c('LOS|0.43','LOS|0.57',
'BP|0.5','BP|0.5'),
MeasureImpact = c('LOS|1.98','LOS|1.96',
'BP|1.48','BP|1.04'),
AboveMeanCOVFLG = c('LOS|Y','LOS|N',
'BP|Y','BP|N'),
AboveMeanVolumeFLG = c('LOS|N','LOS|Y',
'BP|N','BP|N'),
stringsAsFactors = FALSE)
rownames(dfRes) <- c()
rownames(expected) <- c()
# Reset factor levels of both dataframes (as we don't care if they're equal)
testthat::expect_equal(dfRes,expected)
})
test_that("One cat col, two measure cols, and one date col give correct df", {
df <- data.frame(Gender = c('F','M','M','M','M','F','F','F'),
LOS = c(3.2,NA,5,1.3,2.4,4,9,5),
BP = c(123,129,89,150,90,58,160,145),
StartDTS = c('2012-01-01 10:04:23', '2012-01-01 10:04:23',
'2012-02-01 10:04:23', '2012-02-02 10:04:23',
'2012-01-01 10:04:23', '2012-03-01 10:04:23',
'2012-04-01 10:04:23', '2012-04-01 10:04:23'))
dfRes <- findVariation(df = df,
categoricalCols = "Gender",
measureCol = c("LOS","BP"),
dateCol = 'StartDTS')
expected <- data.frame(
DimensionalAttributes = c('Gender','Gender','Gender','Gender',
'StartDTS','StartDTS','StartDTS',
'StartDTS','StartDTS','StartDTS',
'Gender|StartDTS','Gender|StartDTS',
'Gender|StartDTS','Gender|StartDTS',
'Gender|StartDTS'),
CategoriesGrouped = c('M','F','F','M',
'2012-02','2012-04','2012-02',
'2012-01','2012-01','2012-04',
'M|2012-02','F|2012-04','M|2012-02',
'M|2012-01','F|2012-04'),
MeasureCOV = c('LOS|0.66','LOS|0.49',
'BP|0.37','BP|0.26',
'LOS|0.83','LOS|0.4',
'BP|0.36','BP|0.18',
'LOS|0.2',
'BP|0.07',
'LOS|0.83','LOS|0.4',
'BP|0.36','BP|0.25','BP|0.07'),
MeasureVolumeRaw = c('LOS|3','LOS|4',
'BP|4','BP|4',
'LOS|2','LOS|2',
'BP|2','BP|3',
'LOS|2',
'BP|2',
'LOS|2','LOS|2',
'BP|2','BP|2','BP|2'),
MeasureVolumePercent = c('LOS|0.43','LOS|0.57',
'BP|0.5','BP|0.5',
'LOS|0.33','LOS|0.33',
'BP|0.29','BP|0.43',
'LOS|0.33',
'BP|0.29',
'LOS|0.5','LOS|0.5',
'BP|0.33','BP|0.33','BP|0.33'),
MeasureImpact = c('LOS|1.98','LOS|1.96',
'BP|1.48','BP|1.04',
'LOS|1.66','LOS|0.8',
'BP|0.72','BP|0.54',
'LOS|0.4',
'BP|0.14',
'LOS|1.66','LOS|0.8',
'BP|0.72','BP|0.5','BP|0.14'),
AboveMeanCOVFLG = c('LOS|Y','LOS|N',
'BP|Y','BP|N',
'LOS|Y','LOS|N',
'BP|Y','BP|N',
'LOS|N',
'BP|N',
'LOS|Y','LOS|N',
'BP|Y','BP|Y','BP|N'),
AboveMeanVolumeFLG = c('LOS|N','LOS|Y',
'BP|N','BP|N',
'LOS|Y','LOS|Y',
'BP|N','BP|Y',
'LOS|Y',
'BP|N',
'LOS|Y','LOS|Y',
'BP|Y','BP|Y','BP|Y'),
stringsAsFactors = FALSE)
rownames(dfRes) <- c()
rownames(expected) <- c()
testthat::expect_equal(dfRes,expected)
})
test_that("One cat col, two measure cols,one date col, & threshold give correct
df", {
df <- data.frame(Gender = c('F','M','M','M','M','F','F','F'),
LOS = c(3.2,NA,5,1.3,2.4,4,9,5),
BP = c(123,129,89,150,90,58,160,145),
StartDTS = c('2012-01-01 10:04:23', '2012-01-01 10:04:23',
'2012-02-01 10:04:23', '2012-02-02 10:04:23',
'2012-01-01 10:04:23', '2012-03-01 10:04:23',
'2012-04-01 10:04:23', '2012-04-01 10:04:23'))
dfRes <- findVariation(df = df,
categoricalCols = "Gender",
measureCol = c("LOS","BP"),
dateCol = 'StartDTS',
threshold = 1.0)
expected <- data.frame(
DimensionalAttributes = c('Gender','Gender','Gender','Gender',
'StartDTS',
'Gender|StartDTS'),
CategoriesGrouped = c('M','F','F','M',
'2012-02',
'M|2012-02'),
MeasureCOV = c('LOS|0.66','LOS|0.49','BP|0.37','BP|0.26',
'LOS|0.83',
'LOS|0.83'),
MeasureVolumeRaw = c('LOS|3','LOS|4',
'BP|4','BP|4',
'LOS|2',
'LOS|2'),
MeasureVolumePercent = c('LOS|0.43','LOS|0.57','BP|0.5','BP|0.5',
'LOS|0.33',
'LOS|0.5'),
MeasureImpact = c('LOS|1.98','LOS|1.96','BP|1.48','BP|1.04',
'LOS|1.66',
'LOS|1.66'),
AboveMeanCOVFLG = c('LOS|Y','LOS|N','BP|Y','BP|N',
'LOS|Y','LOS|Y'),
AboveMeanVolumeFLG = c('LOS|N','LOS|Y','BP|N','BP|N',
'LOS|Y',
'LOS|Y'),
stringsAsFactors = FALSE)
rownames(dfRes) <- c()
rownames(expected) <- c()
testthat::expect_equal(dfRes,expected)
})
test_that("Measure col missing from df gives correct error (when one
specified)", {
df <- data.frame(Gender = c('F','M','M','M','M','F','F','F'),
LOS = c(3.2,NA,5,1.3,2.4,4,9,5))
testthat::expect_error(findVariation(df = df,
categoricalCols = "Gender",
measureCol = "LO"), # <-- error
"The measure column or one of the categorical cols is")
})
test_that("Measure col missing from df gives correct error (when two
specified)", {
df <- data.frame(Gender = c('F','M','M','M','M','F','F','F'),
LOS = c(3.2,NA,5,1.3,2.4,4,9,5))
testthat::expect_error(findVariation(df = df,
categoricalCols = "Gender",
measureCol = "LO"), # <-- error
"The measure column or one of the categorical cols is")
})
test_that("Categorical col missing from df gives correct error (when one
specified)", {
df <- data.frame(Gender = c('F','M','M','M','M','F','F','F'),
LOS = c(3.2,NA,5,1.3,2.4,4,9,5))
testthat::expect_error(findVariation(df = df,
categoricalCols = "Gendr", # <-- error
measureCol = "LOS"),
"The measure column or one of the categorical cols is")
})
test_that("Categorical col missing from df gives correct error (when two
specified)", {
df <- data.frame(Gender = c('F','M','M','M','M','F','F','F'),
LOS = c(3.2,NA,5,1.3,2.4,4,9,5),
Age = c(23,45,63,42,78,32,15,65))
testthat::expect_error(findVariation(df = df,
categoricalCols = c("Gender"),
measureCol = c("LOS","Ag")),
"The measure column or one of the categorical cols is")
})
test_that("Measure col as strings gives correct error", {
df <- data.frame(Gender = c('F','M','M','M','M','F','F','F'),
Married = c('Y','N','Y','N','N','Y','N','Y'),
LOS = c(3.2,NA,5,1.3,2.4,4,9,5))
testthat::expect_error(findVariation(df = df,
categoricalCols = "Gender",
measureCol = "Married"), # <-- error
"measureColumn needs to be of class numeric or int")
})
test_that("Categorical col as numbers gives correct error", {
df <- data.frame(Gender = c('F','M','M','M','M','F','F','F'),
LOS = c(3.2,NA,5,1.3,2.4,4,9,5),
Age = c(23,45,63,42,78,32,15,65))
testthat::expect_error(findVariation(df = df,
categoricalCols = c("Gender","Age"),
measureCol = "LOS"),
"categoricalCols cannot be of class numeric or int")
})
test_that("Date column in wrong format gives correct error", {
df <- data.frame(Gender = c('F','M','M','M','M','F','F','F'),
LOS = c(3.2,NA,5,1.3,2.4,4,9,5),
AdmitDTS = c('01-2012 12:34:12','2012-01-01 12:34:12',
'2012-01-01 12:34:12','2012-01-01 12:34:12',
'2012-01-01 12:34:12','2012-01-01 12:34:12',
'2012-01-01 12:34:12','2012-01-01 12:34:12'))
testthat::expect_error(findVariation(df = df,
categoricalCols = c("Gender"),
measureCol = "LOS",
dateCol = "AdmitDTS"), # <-- error
paste0("AdmitDTS may not be a datetime column, or the",
" column may not be in format YYYY-MM-DD"))
})
test_that("Date column passed as number gives correct error", {
df <- data.frame(Gender = c('F','M','M','M','M','F','F','F'),
LOS = c(3.2,NA,5,1.3,2.4,4,9,5),
Age = c(23,45,63,42,78,32,15,65))
testthat::expect_error(findVariation(df = df,
categoricalCols = c("Gender"),
measureCol = "LOS",
dateCol = "Age"), # <-- error
paste0("Age may not be a datetime column, or the",
" column may not be in format YYYY-MM-DD"))
})
|
# Apriori
# Data Preprocessing
# install.packages('arules')
library(arules)
dataset = read.csv('~/Dropbox/github/machine_learning_udemy/machine-learning-udemy/part-5-association-rule-learning/Apriori/Market_Basket_Optimisation.csv', header = FALSE)
dataset = read.transactions('~/Dropbox/github/machine_learning_udemy/machine-learning-udemy/part-5-association-rule-learning/Apriori/Market_Basket_Optimisation.csv', sep = ',', rm.duplicates = TRUE)
summary(dataset)
itemFrequencyPlot(dataset, topN = 10)
# Training Apriori on the dataset
rules = apriori(data = dataset, parameter = list(support = 0.004, confidence = 0.2))
# Visualising the results
inspect(sort(rules, by = 'lift')[1:10])
|
/part-5-association-rule-learning/Apriori/apriori.R
|
no_license
|
adamzolotarev/machine-learning-udemy
|
R
| false | false | 691 |
r
|
# Apriori
# Data Preprocessing
# install.packages('arules')
library(arules)
dataset = read.csv('~/Dropbox/github/machine_learning_udemy/machine-learning-udemy/part-5-association-rule-learning/Apriori/Market_Basket_Optimisation.csv', header = FALSE)
dataset = read.transactions('~/Dropbox/github/machine_learning_udemy/machine-learning-udemy/part-5-association-rule-learning/Apriori/Market_Basket_Optimisation.csv', sep = ',', rm.duplicates = TRUE)
summary(dataset)
itemFrequencyPlot(dataset, topN = 10)
# Training Apriori on the dataset
rules = apriori(data = dataset, parameter = list(support = 0.004, confidence = 0.2))
# Visualising the results
inspect(sort(rules, by = 'lift')[1:10])
|
#capturing old digitized data
require(digitize) ##there is no angle correction on this if the file is rotated you get wrong values
ca = ReadAndCal('~/Documents/Adam/Lobster/LFA38/Jan2020/CampbellandDugganTotalEffortTotalTraps.png') #click xlow, xhigh, ylow, yhigh and calibratin is now ca
dp = DigitData(col = 'red') #right click when done
#Landings first
dF = Calibrate(dp,ca,1877,1979, 0, 800)
names(dF) = c('Year','TLand')
dF$Year = round(dF$Year)
#Traps
ca1 = ReadAndCal('~/Documents/Adam/Lobster/LFA38/Jan2020/CampbellandDugganTotalEffortTotalTraps.png') #click xlow, xhigh, ylow, yhigh and calibratin is now ca
dp1 = DigitData(col = 'red') #right click when done
dF1 = Calibrate(dp1,ca1,1877,1979, 0, 50000)
names(dF1) = c('Year','Traps')
dF1$Year = round(dF1$Year)
#Licenses
ca2 = ReadAndCal('~/Documents/Adam/Lobster/LFA38/Jan2020/CampbellandDugganTotalEffortTotalTraps.png') #click xlow, xhigh, ylow, yhigh and calibratin is now ca
dp2 = DigitData(col = 'red') #right click when done
dF2 = Calibrate(dp2,ca2,1877,1979, 0, 300) #calibrating numbers,
names(dF2) = c('Year','Licenses')
dF2$Year = round(dF2$Year)
LFA38Data = merge(merge(dF,dF1,all=T),dF2,all=T)
write.csv(LFA38Data,file='~/Documents/Adam/Lobster/LFA38/Jan2020/CampbellandDugganFigureData.csv')
|
/inst/IP/DigitizingOldFigs.r
|
no_license
|
jfontestad/bio.lobster
|
R
| false | false | 1,281 |
r
|
#capturing old digitized data
require(digitize) ##there is no angle correction on this if the file is rotated you get wrong values
ca = ReadAndCal('~/Documents/Adam/Lobster/LFA38/Jan2020/CampbellandDugganTotalEffortTotalTraps.png') #click xlow, xhigh, ylow, yhigh and calibratin is now ca
dp = DigitData(col = 'red') #right click when done
#Landings first
dF = Calibrate(dp,ca,1877,1979, 0, 800)
names(dF) = c('Year','TLand')
dF$Year = round(dF$Year)
#Traps
ca1 = ReadAndCal('~/Documents/Adam/Lobster/LFA38/Jan2020/CampbellandDugganTotalEffortTotalTraps.png') #click xlow, xhigh, ylow, yhigh and calibratin is now ca
dp1 = DigitData(col = 'red') #right click when done
dF1 = Calibrate(dp1,ca1,1877,1979, 0, 50000)
names(dF1) = c('Year','Traps')
dF1$Year = round(dF1$Year)
#Licenses
ca2 = ReadAndCal('~/Documents/Adam/Lobster/LFA38/Jan2020/CampbellandDugganTotalEffortTotalTraps.png') #click xlow, xhigh, ylow, yhigh and calibratin is now ca
dp2 = DigitData(col = 'red') #right click when done
dF2 = Calibrate(dp2,ca2,1877,1979, 0, 300) #calibrating numbers,
names(dF2) = c('Year','Licenses')
dF2$Year = round(dF2$Year)
LFA38Data = merge(merge(dF,dF1,all=T),dF2,all=T)
write.csv(LFA38Data,file='~/Documents/Adam/Lobster/LFA38/Jan2020/CampbellandDugganFigureData.csv')
|
hero <- function(bullets, dragons) {
ifelse(bullets / dragons >= 2, TRUE, FALSE)
}
|
/R/8_kyu/Is_he_gonna_survive.R
|
no_license
|
y0wel/Codewars-Kata
|
R
| false | false | 84 |
r
|
hero <- function(bullets, dragons) {
ifelse(bullets / dragons >= 2, TRUE, FALSE)
}
|
# plotmo.R: plot the model response when varying one or two predictors
#
# Stephen Milborrow Sep 2006 Cape Town
plotmo <- function(object = stop("no 'object' argument"),
type = NULL,
nresponse = NA,
pt.col = 0,
jitter = .5,
smooth.col = 0,
level = 0,
func = NULL,
inverse.func = NULL,
nrug = 0,
grid.col = 0,
type2 = "persp",
degree1 = TRUE,
all1 = FALSE,
degree2 = TRUE,
all2 = FALSE,
do.par = TRUE,
clip = TRUE,
ylim = NULL,
caption = NULL,
trace = 0,
grid.func = median,
grid.levels = NULL,
extend = 0,
ngrid1 = 50,
ngrid2 = 20,
ndiscrete = 5,
npoints = 3000,
center = FALSE,
xflip = FALSE,
yflip = FALSE,
swapxy = FALSE,
int.only.ok = TRUE,
...)
{
init.global.data()
on.exit(init.global.data()) # release memory on exit
object.name <- quote.deparse(substitute(object))
object # make sure object exists
trace <- as.numeric(check.integer.scalar(trace, logical.ok=TRUE))
# Associate the model environment with the object.
# (This is instead of passing it as an argument to plotmo's data access
# functions. It saves a few hundred references to model.env in the code.)
attr(object, ".Environment") <- get.model.env(object, object.name, trace)
temp <- plotmo_prolog(object, object.name, trace, ...)
object <- temp$object
my.call <- temp$my.call
# We will later make two passes through the plots if we need to
# automatically determine ylim (see get.ylim.by.dummy.plots).
# The trace2 variable is used for disabling tracing on the second pass.
trace2 <- trace
# trace=100 to 103 are special values used for development
# (they are for tracing just plotmo_x with no plotting)
special.trace <- FALSE
if(trace >= 100 && trace <= 103) {
special.trace <- TRUE
trace <- trace - 100
}
clip <- check.boolean(clip)
all1 <- check.boolean(all1)
all2 <- check.boolean(all2)
center <- check.boolean(center)
swapxy <- check.boolean(swapxy)
xflip <- check.boolean(xflip)
yflip <- check.boolean(yflip)
type2 <- match.choices(type2, c("persp", "contour", "image"), "type2")
level <- get.level(level, ...)
pt.col <- get.pt.col(pt.col, ...)
jitter <- get.jitter(jitter, ...)
ngrid1 <- get.ngrid1(ngrid1, ...)
ngrid2 <- get.ngrid2(ngrid2, ...)
smooth.col <- get.smooth.col(smooth.col, ...)
check.integer.scalar(ndiscrete, min=0)
nrug <- get.nrug(nrug, ...)
extend <- check.numeric.scalar(extend)
stopifnot(extend > -.3, extend <= 10) # .3 prevents shrinking to nothing, 10 is arb
# TODO revisit this, causes issues because the following for example produces
# the identical last two plots: for(i in 1:3) a <- earth(.., nfold=3); plot(a)
rnorm(1) # seems to be necessary to make .Random.seed available
old.seed <- .Random.seed
on.exit(set.seed(old.seed), add=TRUE)
set.seed(2015)
if(!is.specified(degree1)) degree1 <- 0
if(!is.specified(degree2)) degree2 <- 0
if(!is.specified(nresponse)) nresponse <- NA
if(!is.specified(clip)) clip <- FALSE
if(center && clip) {
clip <- FALSE # otherwise incorrect clipping (TODO revisit)
warning0("forcing clip=FALSE because center=TRUE ",
"(a limitation of the current implementation)")
}
# get x so we can get the predictor names and ux.list
x <- plotmo_x(object, trace)
if(NCOL(x) == 0 || NROW(x) == 0)
stop("x is empty")
if(special.trace) # special value of trace was used?
return(invisible(x))
meta <- plotmo_meta(object, type, nresponse, trace,
msg.if.predictions.not.numeric=
if(level > 0) "the level argument is not allowed" else NULL,
...)
y <- meta$y.as.numeric.mat # y as a numeric mat, only the nresponse column
nresponse <- meta$nresponse # column index
resp.name <- meta$resp.name # used only in automatic caption, may be NULL
resp.levs <- meta$resp.levs # to convert predicted strings to factors, may be NULL
type <- meta$type # always a string (converted from NULL if necessary)
# following prevents aliasing on nrow(data) to ensure we catch the following:
# "warning: predict(): newdata' had 31 rows but variable(s) found have 30 rows"
if(ngrid1 == length(y)) {
trace2(trace, "changed ngrid1 from %g to %g\n", ngrid1, ngrid1+1)
ngrid1 <- ngrid1 + 1
}
temp <- get.unique.xyvals(x, y, npoints, trace)
ux.list <- temp$ux.list # list, each elem is unique vals in a column of x
uy <- temp$uy # unique y vals
npoints <- temp$npoints
y <- apply.inverse.func(inverse.func, y, object, trace)
if(center)
y <- my.center(y, trace)
# get iresponse
ncases <- nrow(x)
iresponse <- NULL
if(is.specified(pt.col)) {
iresponse <- get.iresponse(npoints, ncases)
if(is.null(iresponse))
pt.col <- 0
}
# singles is a vector of indices of predictors for degree1 plots
temp <- plotmo_singles(object, x, nresponse, trace, degree1, all1)
some.singles <- temp$some.singles
singles <- temp$singles
# each row of pairs is the indices of two predictors for a degree2 plot
temp <- plotmo_pairs(object, x, nresponse, trace, all2, degree2)
some.pairs <- temp$some.pairs
pairs <- temp$pairs
nsingles <- length(singles)
npairs <- NROW(pairs)
temp <- get.pred.names(colnames(x), nsingles + npairs)
pred.names <- temp$pred.names
abbr.pred.names <- temp$abbr.pred.names
def.cex.main <- temp$def.cex.main
is.int.only <- !some.singles && !some.pairs
if(is.int.only && int.only.ok && !all(degree1 == 0)) {
singles <- 1 # plot the first predictor
nsingles <- 1
}
if(nsingles > 100) { # 100 is arb, 10 * 10
singles <- singles[1:100]
warning0("Will plot only the first 100 degree1 plots")
}
if(npairs > 100) {
pairs <- pairs[1:100,]
warning0("Will plot only the first 100 degree2 plots")
}
if(extend != 0 && npairs) {
warning0("extend=", extend, ": will not plot degree2 plots ",
"(extend is not yet implemented for degree2 plots)")
pairs <- NULL
npairs <- 0
}
nfigs <- nsingles + npairs
if(nfigs == 0) {
if(trace >= 0) {
if(is.int.only)
warning0("plotmo: nothing to plot (intercept-only model)")
else
warning0("plotmo: nothing to plot")
}
return(invisible())
}
do.par <- check.do.par(do.par, nfigs) # do.par is 0, 1, or 2
# Prepare caption --- we need it now for do.par() but
# can only display it later after at least one plot.
# nfigs=2 (any number greater than 1) because by default we do.par in plotmo.
caption <- get.caption(nfigs=2, do.par, caption, resp.name, type,
object$call, object.name, my.call)
if(do.par) {
# TODO document what happens here and in plotres if only one plot
oldpar <- par(no.readonly=TRUE)
# need xlab etc. so so we can figure out margin sizes in do.par
xlab <- dot("xlab", DEF="", ...)
ylab <- dot("ylab", DEF="", ...)
main <- dot("main", ...)
do.par(nfigs=nfigs, caption=caption, main1=main,
xlab1=xlab, ylab1=ylab, trace=trace, def.cex.main=def.cex.main, ...)
if(do.par == 1)
on.exit(par(oldpar), add=TRUE)
} else { # do.par=FALSE
oldpar <- do.par.dots(..., trace=trace)
if(length(oldpar))
on.exit(do.call(par, oldpar), add=TRUE)
}
trace2(trace, "\n----Figuring out ylim\n")
is.na.ylim <- !is.null(ylim) && anyNA(ylim)
jittered.y <- apply.jitter(as.numeric(y), jitter)
# get.ylim will do dummy plots if necessary
temp <- get.ylim(object=object,
type=type, nresponse=nresponse, pt.col=pt.col,
jitter=jitter, smooth.col=smooth.col, level=level,
func=func, inverse.func=inverse.func, nrug=nrug, grid.col=grid.col,
type2=type2, degree1=degree1, all1=all1, degree2=degree2, all2=all2,
do.par=do.par, clip=clip, ylim=ylim, caption=caption, trace=trace,
grid.func=grid.func, grid.levels=grid.levels, extend=extend,
ngrid1=ngrid1, ngrid2=ngrid2, npoints=npoints, ndiscrete=ndiscrete,
int.only.ok=int.only.ok, center=center, xflip=xflip, yflip=yflip,
swapxy=swapxy, def.cex.main=def.cex.main,
x=x, y=y, singles=singles, resp.levs=resp.levs,
ux.list=ux.list,
pred.names=pred.names, abbr.pred.names=abbr.pred.names,
nsingles=nsingles, npairs=npairs, nfigs=nfigs, uy=uy,
is.na.ylim=is.na.ylim, is.int.only=is.int.only, trace2=trace2,
pairs=pairs, iresponse=iresponse, jittered.y=jittered.y, ...)
ylim <- temp$ylim
trace2 <- temp$trace2
if(nsingles)
plot.degree1(object=object, degree1=degree1, all1=all1, center=center,
ylim=if(is.na.ylim) NULL else ylim, # each graph has its own ylim?
nresponse=nresponse, type=type, trace=trace, trace2=trace2,
pt.col=pt.col, jitter=jitter, iresponse=iresponse,
smooth.col=smooth.col, grid.col=grid.col, inverse.func=inverse.func,
grid.func=grid.func, grid.levels=grid.levels, extend=extend,
ngrid1=ngrid1, is.int.only=is.int.only, level=level,
func=func, nrug=nrug,
draw.plot=TRUE, x=x, y=y, singles=singles, resp.levs=resp.levs,
ux.list=ux.list, ndiscrete=ndiscrete,
pred.names=pred.names, abbr.pred.names=abbr.pred.names,
nfigs=nfigs, uy=uy, xflip=xflip, jittered.y=jittered.y, ...)
if(npairs)
plot.degree2(object=object, degree2=degree2, all2=all2, center,
ylim=if(is.na.ylim) NULL else ylim, # each graph has its own ylim?
nresponse=nresponse, type=type, clip=clip, trace=trace, trace2=trace2,
pt.col=pt.col, jitter=jitter, iresponse=iresponse,
inverse.func=inverse.func,
grid.func=grid.func, grid.levels=grid.levels, extend=extend,
type2=type2, ngrid2=ngrid2, draw.plot=TRUE, do.par=do.par, x=x, y=y,
pairs=pairs, resp.levs=resp.levs, ux.list=ux.list,
ndiscrete=ndiscrete,
pred.names=pred.names, abbr.pred.names=abbr.pred.names,
nfigs=nfigs, nsingles=nsingles, npairs=npairs, xflip=xflip, yflip=yflip,
swapxy=swapxy, def.cex.main=def.cex.main, ...)
draw.caption(caption, ...)
invisible(x)
}
plotmo_prolog <- function(object, object.name, trace, ...)
{
object <- plotmo.prolog(object, object.name, trace, ...)
my.call <- call.as.char(n=2)
callers.name <- callers.name()
if(trace >= 2) {
printf.wrap("%s trace %g: %s\n", callers.name, trace, my.call)
if(is.null(object$call))
printf("object class is \"%s\" with no object$call\n", class(object)[1])
else
printf.wrap("object$call is %s\n", strip.deparse(object$call))
}
SHOWCALL <- dot("SHOWCALL", ...)
if(!is.specified(SHOWCALL))
my.call <- NULL
list(object=object, my.call=my.call)
}
get.pred.names <- function(colnames.x, nfigs)
{
# numbers below are somewhat arb
nrows <- ceiling(sqrt(nfigs)) # nrows in plot grid
minlength <- 20; def.cex.main <- 1.2
if (nrows >= 9) { minlength <- 6; def.cex.main <- .7 }
else if(nrows >= 8) { minlength <- 7; def.cex.main <- .8 }
else if(nrows >= 7) { minlength <- 7; def.cex.main <- .8 }
else if(nrows >= 6) { minlength <- 7; def.cex.main <- .8 }
else if(nrows >= 5) { minlength <- 8; def.cex.main <- 1 }
else if(nrows >= 4) { minlength <- 9; def.cex.main <- 1.1 }
stopifnot(!is.null(colnames.x)) # plotmo_x always returns colnames (unless no columns)
list(pred.names = colnames.x,
abbr.pred.names = abbreviate(strip.space(colnames.x),
minlength=minlength, method="both.sides"),
def.cex.main = def.cex.main)
}
# always returns a vector of 2 elems, could be c(-Inf, Inf)
get.ylim <- function(object,
type, nresponse, pt.col, jitter, smooth.col, level, func,
inverse.func, nrug, grid.col, type2, degree1, all1, degree2, all2,
do.par, clip, ylim, caption, trace,
grid.func, grid.levels, extend=extend, ngrid1, ngrid2,
npoints, ndiscrete, int.only.ok, center, xflip, yflip, swapxy, def.cex.main,
x, y, singles, resp.levs, ux.list, pred.names, abbr.pred.names,
nsingles, npairs, nfigs, uy,
is.na.ylim, is.int.only, trace2, pairs,
iresponse, jittered.y, ...)
{
get.ylim.by.dummy.plots <- function(..., trace)
{
# call the plotting functions with draw.plot=FALSE to get the ylim
trace2(trace, "--get.ylim.by.dummy.plots\n")
all.yhat <- NULL
if(nsingles) { # get all.yhat by calling with draw.plot=FALSE
# have to use explicit arg names to prevent alias probs
# with dots, because the user can pass in any name with dots
all.yhat <- c(all.yhat,
plot.degree1(object=object, degree1=degree1, all1=all1,
center=center, ylim=ylim, nresponse=nresponse, type=type,
trace=trace, trace2=trace2, pt.col=pt.col,
jitter=jitter, iresponse=iresponse,
smooth.col=smooth.col, grid.col=grid.col,
inverse.func=inverse.func, grid.func=grid.func,
grid.levels=grid.levels, extend=extend, ngrid1=ngrid1,
is.int.only=is.int.only,
level=level, func=func, nrug=nrug, draw.plot=FALSE, x=x, y=y,
singles=singles, resp.levs=resp.levs,
ux.list=ux.list, ndiscrete=ndiscrete,
pred.names=pred.names, abbr.pred.names=abbr.pred.names,
nfigs=nfigs, uy=uy, xflip=xflip, jittered.y=jittered.y, ...))
}
if(npairs) {
all.yhat <- c(all.yhat,
plot.degree2(object=object, degree2=degree2, all2=all2,
center=center, ylim=ylim, nresponse=nresponse, type=type,
clip=clip, trace=trace, trace2=trace2, pt.col=pt.col,
jitter=jitter, iresponse=iresponse,
inverse.func=inverse.func, grid.func=grid.func,
grid.levels=grid.levels, extend=extend,
type2=type2, ngrid2=ngrid2,
draw.plot=FALSE, do.par=do.par, x=x, y=y, pairs=pairs,
resp.levs=resp.levs, ux.list=ux.list,
ndiscrete=ndiscrete,
pred.names=pred.names, abbr.pred.names=abbr.pred.names,
nfigs=nfigs,
nsingles=nsingles, npairs=npairs, xflip=xflip, yflip=yflip,
swapxy=swapxy, def.cex.main=def.cex.main, ...))
} # 1 2 3 4 5
q <- quantile(all.yhat, probs=c(0, .25, .5, .75, 1))
ylim <- c(q[1], q[5]) # all the data
# iqr test to prevent clipping in some pathological cases
iqr <- q[4] - q[2] # middle 50% of the data (inter-quartile range)
if(clip && iqr > .05 * (max(y) - min(y))) {
median <- q[3]
ylim[1] <- max(ylim[1], median - 10 * iqr)
ylim[2] <- min(ylim[2], median + 10 * iqr)
}
if(is.specified(pt.col) || is.specified(smooth.col) || is.specified(level))
ylim <- range1(ylim, jittered.y) # ensure ylim big enough for resp points
else if(is.specified(smooth.col))
ylim <- range1(ylim, y)
# binary or ternary reponse?
# the range(uy) test is needed for binomial models specified using counts
else if(length(uy) <= 3 || range(y) == c(0,1))
ylim <- range1(ylim, y)
if(is.specified(nrug)) # space for rug
ylim[1] <- ylim[1] - .1 * (ylim[2] - ylim[1])
trace2(trace, "--done get.ylim.by.dummy.plots\n\n")
# have called the plot functions, minimize tracing in further calls to them
trace2 <<- 0 # note <<- not <-
ylim
}
#--- get.ylim starts here
if(!(is.null(ylim) || is.na(ylim[1]) || length(ylim) == 2))
stop0("ylim must be one of:\n",
" NULL all graphs have same vertical axes\n",
" NA each graph has its own vertical axis\n",
" c(min,max) ylim for all graphs")
if(length(ylim) == 2 && ylim[2] <= ylim[1])
stop0("ylim[2] ", ylim[2], " is not greater than ylim[1] ", ylim[1])
if(is.na.ylim)
ylim <- c(NA, NA) # won't be used
else if(is.null(ylim)) # auto ylim
ylim <- if(is.int.only) range(y, na.rm=TRUE)
else get.ylim.by.dummy.plots(trace=trace, ...)
if(!anyNA(ylim))
ylim <- fix.lim(ylim)
if(trace >= 2)
printf("ylim c(%.4g, %.4g) clip %s\n\n",
ylim[1], ylim[2], if(clip) "TRUE" else "FALSE")
list(ylim=ylim, trace2=trace2)
}
do.degree2.par <- function(type2, nfigs, detailed.ticktype)
{
nrows <- ceiling(sqrt(nfigs))
if(type2 == "persp") { # perspective plot
# note: persp ignores both the global mgp and any mgp passed directly to persp
mar <- c(if(detailed.ticktype) 1 else .2, .3, 1.7, 0.1)
par(mar=mar)
return(NULL)
} else { # contour or image plot
if(nrows >= 5)
mar <- c(2, 2, 1.2, .5) # space for bottom and left axis labels
else
mar <- c(3, 3, 2, .5)
par(mar=mar)
cex <- par("cex") # TODO would be better to use nfigs here?
mgp <- # compact title and axis annotations
if (cex < .7) c(1.2, 0.2, 0)
else if(cex < .8) c(1.3, 0.3, 0)
else c(1.5, 0.4, 0)
par(mgp=mgp)
}
}
plotmo_singles <- function(object, x, nresponse, trace, degree1, all1)
{
trace2(trace, "\n----plotmo_singles for %s object\n", class(object)[1])
singles <- plotmo.singles(object=object,
x=x, nresponse=nresponse, trace=trace, all1=all1)
some.singles <- FALSE
if(length(singles)) {
singles <- sort.unique(singles)
some.singles <- TRUE
}
nsingles <- length(singles)
if(nsingles) {
degree1 <- check.index(degree1, "degree1", singles, colnames=colnames(x),
allow.empty=TRUE, is.degree.spec=TRUE)
singles <- singles[degree1]
} else if(is.degree.specified(degree1) && degree1[1] != 0 && trace >= 0)
warning0("'degree1' specified but no degree1 plots")
if(trace >= 2) {
if(nsingles)
cat("singles:", paste0(singles, " ", colnames(x)[singles], collapse=", "), "\n")
else
cat("no singles\n")
}
list(some.singles=some.singles,
singles =singles) # a vector of indices of predictors for degree1 plots
}
plotmo_pairs <- function(object, x, nresponse, trace, all2, degree2)
{
trace2(trace, "\n----plotmo_pairs for %s object\n", class(object)[1])
pairs <- plotmo.pairs(object, x, nresponse, trace, all2)
if(!NROW(pairs) || !NCOL(pairs))
pairs <- NULL
npairs <- NROW(pairs)
some.pairs <- FALSE
if(npairs) {
some.pairs <- TRUE
# put lowest numbered predictor first and remove duplicate pairs
pairs <- unique(t(apply(pairs, 1, sort)))
# order the pairs on the predictor order
order <- order(pairs[,1], pairs[,2])
pairs <- pairs[order, , drop=FALSE]
degree2 <- check.index(degree2, "degree2", pairs, colnames=colnames(x),
allow.empty=TRUE, is.degree.spec=TRUE)
pairs <- pairs[degree2, , drop=FALSE]
}
if(trace >= 2) {
if(npairs) {
cat("pairs:\n")
print(matrix(paste(pairs, colnames(x)[pairs]), ncol=2))
} else
cat("no pairs\n")
}
if(npairs == 0 && is.degree.specified(degree2) && degree2[1] != 0 && trace >= 0)
warning0("'degree2' specified but no degree2 plots")
list(some.pairs=some.pairs,
pairs =pairs)
}
# pt.col is a formal arg, but for back compat we also support col.response
get.pt.col <- function(pt.col, ...)
{
pt.col <- pt.col
if(!is.specified(pt.col) && !is.dot("col", ...))
pt.col <- dot("col.response", EX=0, ...) # partial match, "col" excluded above
# if any other response argument is specified, set the response color
if(!is.specified(pt.col) &&
is.dot("pch cex.response pch.response pt.cex pt.pch",
EX=c(1,1,1,0,0), ...))
pt.col <- "slategray4"
if(!is.specified(pt.col))
pt.col <- 0
pt.col
}
get.jitter <- function(jitter, ...)
{
if(anyNA(jitter)) # allow jitter=NA
jitter <- 0
check.numeric.scalar(jitter, logical.ok=TRUE)
jitter <- as.numeric(jitter)
if(jitter < 0 || jitter > 100)
stop0("jitter=", jitter, " is illegal")
jitter
}
get.smooth.col <- function(smooth.col, ...)
{
smooth.col <- dot("col.smooth", DEF=smooth.col, ...) # back compat
# if any other smooth argument is specified, set the smooth color
if(!is.specified(smooth.col) &&
is.dot("lty.smooth lwd.smooth lwd.loess smooth.lty smooth.lwd",
EX=c(1,1,1,0,0), ...))
smooth.col <- 2
if(!is.specified(smooth.col))
smooth.col <- 0
smooth.col
}
get.ngrid1 <- function(ngrid1, ...)
{
check.integer.scalar(ngrid1)
if(ngrid1 < 2)
stop0("illegal ngrid1 ", ngrid1)
if(ngrid1 > 1000) {
warning0("clipped ngrid1=", ngrid1, " to 1000")
ngrid1 <- 1000
}
ngrid1
}
get.ngrid2 <- function(ngrid2, ...)
{
check.integer.scalar(ngrid2)
if(ngrid2 < 2)
stop0("illegal ngrid2 ", ngrid2)
if(ngrid2 > 500) {
warning0("clipped ngrid2=", ngrid2, " to 500")
ngrid2 <- 500
}
ngrid2
}
get.level <- function(level, ...)
{
if(anyNA(level) || is.null(level)) # treat NA and NULL as 0
level <- 0
check.numeric.scalar(level)
# some code for backward compatibility (se is now deprecated)
se <- 0
if(is.dot("se", ...))
se <- dot("se", ...)
check.numeric.scalar(se, logical.ok=TRUE)
if(se && level) # both specified?
stop0("plotmo's 'se' argument is deprecated, please use 'level' instead")
if(identical(se, TRUE)) {
level <- .95
warning0(
"plotmo's 'se' argument is deprecated, please use 'level=.95' instead")
} else if (se < 0 || se > 5) # 5 is arb
stop0("plotmo's 'se' argument is deprecated, please use 'level=.95' instead")
else if (se > 0 && se < 1) # e.g. se=.95
stop0("plotmo's 'se' argument is deprecated, please use 'level=.95' instead")
else if (se > 0) {
level <- 1 - 2 * (1 - pnorm(se)) # se=2 becomes level=.954
warning0(sprintf(
"plotmo's 'se' argument is deprecated, please use 'level=%.2f' instead",
level))
} else if(level != 0 && (level < .5 || level >= 1))
stop0("level=", level, " is out of range, try level=.95")
level
}
get.unique.xyvals <- function(x, y, npoints, trace)
{
# convert special values of npoints
ncases <- nrow(x)
check.integer.scalar(npoints, min=-1, null.ok=TRUE, logical.ok=TRUE)
npoints.was.neg <- FALSE
if(is.null(npoints))
npoints <- 0
else if(is.logical(npoints))
npoints <- if(npoints) ncases else 0
else if(npoints == -1) {
npoints.was.neg <- TRUE
npoints <- ncases
} else if(npoints > ncases)
npoints <- ncases
# Use a maximum of NMAX cases for calculating ux.list and uy
# (unless npoints is bigger or TRUE or negative).
# Allows plotmo to be fast even on models with millions of cases.
NMAX <- 1e4
nmax <- max(NMAX, npoints)
if(!npoints.was.neg && ncases > nmax) {
trace2(trace, "using %g of %g cases to calculate unique x and y values\n",
npoints, ncases)
isubset <- get.isubset(y, npoints)
y <- y[isubset]
x <- x[isubset, , drop=FALSE]
}
list(ux.list = get.ux.list(x, trace),
uy = unique(y),
npoints = npoints)
}
# return a list, each element is the unique levels for corresponding column of x
# TODO this is where we spend a lot of time in plotmo for big data
get.ux.list <- function(x, trace)
{
ux.list <- list(colnames(x))
for(i in seq_len(ncol(x)))
ux.list[[i]] <- if(is.factor(x[,i])) levels(x[,i])
else sort.unique(x[,i])
trace2(trace, "number of x values: %s\n",
paste.trunc(colnames(x), sapply(ux.list, length)))
ux.list
}
# Remove duplicates in x, then sort (smallest first).
# I had sort(unique(x)), but following is faster because it requires only one sort.
sort.unique <- function(x)
{
rle(sort(x))[["values"]] # rle() is in base
}
points.or.text <- function(..., x, y, pt.col, iresponse)
{
stopifnot(!is.na(pt.col))
cex <- dot("pt.cex cex.response", DEF=1, EX=c(0,1), NEW=1, ...)
cex <- cex * pt.cex(NROW(x))
pch <- dot("pt.pch pch.response pch", DEF=20, EX=c(0,1,1), NEW=1, ...)
# recycle then select only iresponse points
n <- length(y)
col <- repl(pt.col, n)[iresponse]
pch <- repl(pch, n)[iresponse]
cex <- repl(cex, n)[iresponse]
x <- x[iresponse]
y <- y[iresponse]
if(is.character(pch) && pch[1] != ".")
call.plot(graphics::text.default, PREFIX="pt.",
force.x = x,
force.y = y,
force.labels = pch,
force.col = col,
force.cex = pmax(.1, .9 * cex),
def.xpd = NA, # allow writing beyond plot area
...)
else
call.plot(graphics::points.default, PREFIX="pt.",
force.x = x,
force.y = y,
force.pch = pch,
force.col = col,
force.cex = cex,
# commented out because looks messy in image plots
# def.xpd = NA, # allow writing beyond plot area
...)
}
# The following global variables are for efficiency when we make two
# passes through the plot. We store the data from the first pass so we
# don't have to regenerate it.
degree1.xgrid.global <- NULL
degree2.xgrid.global <- NULL
# TODO Following is ugly. I would prefer to have two namespace level
# variables, degree1.data.global and degree2.data.global, similar to the
# above two variables. But CRAN check won't allow
# unlockBinding(degree1.data.global, asNamespace("plotmo")) so we can update
# those variables. Also, we can't directly use assignInMyNamespace for these
# variables because we need to update individual list elements.
make.static.list <- function() {
data <- list()
func <- function(i, newdata=NULL) {
if(is.null(i)) # init the data?
data <<- list()
else if(!missing(newdata)) # assign to the data?
data[[i]] <<- newdata
else if(i <= length(data)) # return the data element
data[[i]]
else # return the element, but it's NULL
NULL
}
func
}
degree1.data <- make.static.list()
degree2.data <- make.static.list()
trace.call.global <- 0 # nonzero to trace call to predict, residuals, etc
init.global.data <- function()
{
assignInMyNamespace("trace.call.global", 0)
assignInMyNamespace("degree1.xgrid.global", NULL)
assignInMyNamespace("degree2.xgrid.global", NULL)
degree1.data(NULL) # clear the degree1 data by passing NULL
degree2.data(NULL)
}
plot.degree1 <- function( # plot all degree1 graphs
# copy of args from plotmo, some have been tweaked slightly
object, degree1, all1, center,
ylim, nresponse, type,
trace, trace2,
pt.col, jitter, iresponse,
smooth.col, grid.col,
inverse.func, grid.func, grid.levels, extend,
ngrid1,
is.int.only, level,
func, nrug,
# the following args are generated in plotmo
draw.plot, # draw.plot=FALSE means get predictions but don't actually plot
x, y, singles, resp.levs, ux.list, ndiscrete,
pred.names, abbr.pred.names, nfigs, uy,
xflip, jittered.y,
...)
{
get.degree1.data <- function(isingle)
{
data <- degree1.data(isingle)
if(!is.null(data)) # data is already initialized?
return(data) # yes, use it
# create data.frame of x values to be plotted, by updating xgrid for this predictor
xframe <- get.degree1.xframe(xgrid, x, ipred, ngrid1,
ndiscrete, ux.list, extend)
trace2(trace, "degree1 plot %d %s\n",
isingle, pred.names[ipred])
yhat <- plotmo_predict(object, xframe, nresponse,
type, resp.levs, trace2, inverse.func, ...)$yhat
# prediction intervals, NULL if level argument not used
intervals <- NULL
if(level > 0)
intervals <- plotmo_pint(object, xframe, type, level, trace2,
ipred, inverse.func)
temp <- blockify.degree1.frame(xframe, yhat, intervals,
ipred, ux.list, ndiscrete)
xframe <- temp$xframe
yhat <- temp$yhat
intervals <- temp$intervals
if(center) {
yhat <- my.center(yhat, trace2)
intervals$fit <- my.center(intervals$fit, trace2)
intervals$lwr <- my.center(intervals$lwr, trace2)
intervals$upr <- my.center(intervals$upr, trace2)
intervals$cint.lwr <- my.center(intervals$cint.lwr, trace2)
intervals$cint.upr <- my.center(intervals$cint.upr, trace2)
}
all.yhat <- c(all.yhat, yhat,
intervals$lwr, intervals$upr,
intervals$cint.lwr, intervals$cint.upr)
data <- list(xframe=xframe, yhat=yhat, intervals=intervals, all.yhat=all.yhat)
if(!draw.plot) # save the data, if there is going to be a next time
degree1.data(isingle, data)
data
}
draw.degree1 <- function(...)
{
draw.degree1.fac <- function(...)
{
draw.grid(grid.col, nx=NA, ...) # nx=NA for horiz-only grid
draw.fac.intervals(xframe[,ipred], intervals, ...)
if(is.specified(pt.col))
points.or.text(x=jittered.x, y=jittered.y, pt.col=pt.col,
iresponse=iresponse, ...)
draw.smooth1(smooth.col, x, ipred, y, ux.list, ndiscrete, center, ...)
# formal args for plot.factor, needed because "CRAN check"
# doesn't allow ":::" and plot.factor isn't public
plot.factor.formals <- c("x", "y", "legend.text")
call.plot(graphics::plot, # calls plot.factor
PREFIX = "degree1.",
FORMALS = plot.factor.formals,
TRACE = if(isingle == 1 && trace >= 2) trace-1 else 0,
force.x = xframe[,ipred], force.y=yhat,
force.add = TRUE,
def.xaxt = if(xaxis.is.levs) "n" else "s",
def.yaxt = if(yaxis.is.levs) "n" else "s",
force.lty = 1, # else lty=2 say is printed weirdly
force.lwd = 1,
...)
if(xaxis.is.levs) # plot x level names along the x axis
mtext(xlevnames, side=1, at=1:length(xlevnames),
cex=par("cex") * cex.lab, line=.5, las=get.las(xlevnames))
if(yaxis.is.levs) # plot y level names along the y axis
mtext(ylevnames, side=2, at=1:length(ylevnames),
cex=par("cex") * cex.lab, line=.5, las=get.las(ylevnames))
}
draw.degree1.numeric <- function(...)
{
draw.grid(grid.col, ...)
draw.numeric.intervals(xframe[,ipred], intervals, ...)
draw.func(func, object, xframe, ipred, center, trace, ...)
if(is.specified(pt.col))
points.or.text(x=jittered.x, y=jittered.y, pt.col=pt.col,
iresponse=iresponse, ...)
draw.smooth1(smooth.col, x, ipred, y, ux.list, ndiscrete, center, ...)
call.plot(graphics::lines.default, PREFIX="degree1.",
force.x = xframe[,ipred], force.y = yhat,
force.col = dot("degree1.col col.degree1 col",
EX=c(0,1,1), DEF=1, NEW=1, ...),
force.lty = dot("degree1.lty lty.degree1 lty",
EX=c(0,1,1), DEF=1, NEW=1, ...),
force.lwd = dot("degree1.lwd lwd.degree1 lwd",
EX=c(0,1,1), DEF=1, NEW=1, ...),
...)
}
#--- draw.degree1 starts here
x1 <- x[,ipred]
numeric.x <- jittered.x <- as.numeric(x1)
jittered.x <- apply.jitter(numeric.x, jitter)
xlim <- get.degree1.xlim(ipred, xframe, ux.list, ndiscrete,
pt.col, jittered.x, xflip, ...)
# title of the current plot
main <- dot("main", ...)
main <- if(is.specified(main))
repl(main, isingle)[isingle]
else {
main <- ""
if(nfigs > 1 && !is.degree.specified(degree1))
main <- paste0(isingle, " ") # show plot number in headers
paste(main, abbr.pred.names[ipred])
}
xlevnames <- abbreviate(levels(xframe[,ipred]), minlength=6, strict=TRUE)
xaxis.is.levs <- is.factor(x1) && length(xlevnames) <= 12
yaxis.is.levs <- length(resp.levs) >= 1 && length(resp.levs) <= 12
if(yaxis.is.levs)
ylevnames <- abbreviate(resp.levs, minlength=6, strict=TRUE)
yaxis.is.levs <- FALSE # TODO should only do this if response is a string or a factor
xlab <- dot("xlab", ...)
xlab <- if(is.null(xlab)) abbr.pred.names[ipred]
else if(is.specified(xlab)) repl(xlab, isingle)[isingle]
else ""
ylab <- dot("ylab", DEF=NULL, ...)
ylab <- if(is.specified(ylab)) repl(ylab, isingle)[isingle]
else ""
call.plot(graphics::plot.default, PREFIX="degree1.",
TRACE = if(isingle == 1 && trace >= 2) trace-1 else 0,
force.x = xframe[,ipred],
force.y = yhat,
force.type = "n", # nothing in interior of plot yet
force.main = main,
force.xlab = xlab,
force.ylab = ylab,
force.xlim = xlim,
force.ylim = ylim,
def.xaxt = if(xaxis.is.levs) "n" else "s",
def.yaxt = if(yaxis.is.levs) "n" else "s",
...)
if(yaxis.is.levs) # plot y level names along the y axis
mtext(ylevnames, side=2, at=1:length(ylevnames),
cex=par("cex") * cex.lab, line=.5, las=get.las(ylevnames))
if(center &&
!is.specified(grid.col) &&
!is.specified(dot("col.grid", ...)))
abline(h=0, col="gray", lwd=.6) # gray line at y=0
if(is.int.only) # make it obvious that this is an intercept-only model
legend("topleft", "intercept-only model", bg="white")
if(is.factor(x1))
draw.degree1.fac(...)
else
draw.degree1.numeric(...)
if(is.character(nrug) || is.dot("density.col", EX=0, ...))
draw.density.along.the.bottom(numeric.x, ...)
else if(nrug)
call.plot(graphics::rug, force.x=jittered.x, def.quiet=TRUE, ...)
}
#--- plot.degree1 starts here
trace2(trace, "--plot.degree1(draw.plot=%s)\n", if(draw.plot) "TRUE" else "FALSE")
# get the x matrix we will plot, will be updated later for each predictor one by one
if(!is.null(degree1.xgrid.global)) # already have the data?
xgrid <- degree1.xgrid.global # yes, use it
else {
xgrid <- get.degree1.xgrid(x, grid.func, grid.levels, pred.names, ngrid1)
if(!draw.plot) # save the data, if there is going to be a next time
assignInMyNamespace("degree1.xgrid.global", xgrid)
}
# is.int.only test because we don't call get.ylim.by.dummy.plots for int only models
if((!draw.plot || is.int.only) && trace >= 0 && ncol(xgrid) > 1)
print.grid.values(xgrid, trace)
cex.lab <- dot("cex.lab", DEF=.8 * par("cex.main"), ...)
irug <- get.degree1.irug(nrug, x, draw.plot, ...) # get indices of rug points, if any
all.yhat <- NULL
for(isingle in seq_along(singles)) {
if(isingle == 2 && trace2 == 2) {
trace2 <- 1
printf("Reducing trace level for subsequent degree1 plots\n")
}
ipred <- singles[isingle] # ipred is the predictor index i.e. col in model mat
# following happens with lm if you do e.g. ozone1$doy <- NULL after using ozone1
# this won't catch all such errors
if(ipred > NCOL(x))
stop0("illegal index=", ipred, " (missing column in x?) NCOL(x)=", NCOL(x))
temp <- get.degree1.data(isingle)
xframe <- temp$xframe
yhat <- temp$yhat
intervals <- temp$intervals
all.yhat <- temp$all.yhat
if(draw.plot)
draw.degree1(...)
}
all.yhat # numeric vector of all predicted values
}
get.degree1.xlim <- function(ipred, xframe, ux.list, ndiscrete,
pt.col, jittered.x, xflip, ...)
{
xlim <- dot("xlim", ...)
if(is.specified(xlim))
stopifnot(is.numeric(xlim), length(xlim) == 2)
else {
x1 <- xframe[,ipred]
xlim <- range1(x1)
if(is.factor(x1)) {
xlim[1] <- xlim[1] - .4
xlim[2] <- xlim[2] + .4
} else if(length(ux.list[[ipred]]) <= ndiscrete)
xlim <- c(xlim[1] - .1, xlim[2] + .1)
if(is.specified(pt.col))
xlim <- range1(xlim, jittered.x)
}
xlim <- fix.lim(xlim)
if(xflip) {
temp <- xlim[1]
xlim[1] <- xlim[2]
xlim[2] <- temp
}
xlim
}
apply.jitter <- function(x, jitter, adjust=1)
{
if(jitter == 0)
return(x)
jitter(x, factor=adjust * jitter)
}
get.iresponse <- function(npoints, ncases) # get indices of xrows
{
check.integer.scalar(npoints)
if(npoints == 0)
return(NULL)
if(npoints == 1)
npoints <- -1
if(npoints <= 1 || npoints > ncases) # -1 or TRUE means all cases
npoints <- ncases
if(npoints == ncases)
seq_len(ncases)
else
sample(seq_len(ncases), size=npoints, replace=FALSE)
}
draw.smooth1 <- function(smooth.col, x, ipred, y, ux.list, ndiscrete, center, ...)
{
if(!is.specified(smooth.col))
return(NULL)
x1 <- x[,ipred]
is.discrete.x <- FALSE
if(is.factor(x1)) {
is.discrete.x <- TRUE
levels <- sort.unique(as.numeric(x1))
} else if(length(ux.list[[ipred]]) <= ndiscrete) {
is.discrete.x <- TRUE
levels <- ux.list[[ipred]]
}
if(is.discrete.x) {
# x1 has discrete levels, display the mean y at each value of x1
smooth <- sapply(split(y, x1), mean)
if(center)
smooth <- my.center(smooth) else smooth
call.plot(graphics::lines.default, PREFIX="smooth.", drop.f=1,
force.x = levels,
force.y = smooth,
force.col = smooth.col,
force.lty = dot("smooth.lty lty.smooth",
EX=c(0,1), DEF=1, NEW=1, ...),
force.lwd = dot("smooth.lwd lwd.smooth lwd.loess",
EX=c(0,1,1), DEF=1, NEW=1, ...),
force.pch = dot("smooth.pch", DEF=20, EX=0, ...),
def.type = "b",
...)
} else {
# For less smoothing (so we can better judge earth inflection points),
# we use a default value for f lower than the default 2/3.
smooth.f <- dot("smooth.f loess.f", DEF=.5, NEW=1, ...)
check.numeric.scalar(smooth.f)
stopifnot(smooth.f > .01, smooth.f < 1)
smooth <- lowess(x1, y, f=smooth.f)
y <- if(center) my.center(smooth$y) else smooth$y
call.plot(graphics::lines.default, PREFIX="smooth.", drop.f=1,
force.x = smooth$x,
force.y = y,
force.col = smooth.col,
force.lty = dot("smooth.lty lty.smooth", EX=c(0,1), DEF=1, NEW=1, ...),
force.lwd = dot("smooth.lwd lwd.smooth lwd.loess",
EX=c(0,1,1), DEF=1, NEW=1, ...),
force.pch = dot("smooth.pch", DEF=20, EX=0, ...),
...)
}
}
get.nrug <- function(nrug, ...)
{
if(!is.specified(nrug))
nrug <- 0
else if(!is.character(nrug)) {
check.integer.scalar(nrug, logical.ok=TRUE)
if(nrug == TRUE)
nrug <- -1
else if(!is.specified(nrug) && is.dot("rug.col", ...))
nrug <- -1
}
nrug
}
get.degree1.irug <- function(nrug, x, draw.plot, ...) # indices of xrows for rug
{
if(!draw.plot || nrug == 0)
return(NULL)
if(is.character(nrug))
nrug <- -1
else
check.integer.scalar(nrug, logical.ok=TRUE)
if(nrug < 0 || nrug > nrow(x))
nrug <- nrow(x)
if(nrug == nrow(x))
seq_len(nrow(x))
else
sample(seq_len(nrow(x)), size=nrug, replace=FALSE)
}
draw.grid <- function(grid.col, nx=NULL, ...)
{
if(is.specified(grid.col) || is.specified(dot("col.grid", ...))) {
if(is.specified(grid.col) && is.logical(grid.col) && grid.col)
grid.col <- "lightgray"
grid.col <- if(is.specified(grid.col)) grid.col
else dot("col.grid", DEF="lightgray", ...)
# grid() doesn't have a dots arg so we invoke call.plot without dots
call.plot(graphics::grid,
force.nx = dot("grid.nx", DEF=nx, ...),
force.ny = dot("grid.ny", DEF=NULL, ...),
force.col = grid.col,
force.lty = dot("grid.lty", DEF=1, ...),
force.lwd = dot("grid.lwd", DEF=1, ...))
}
}
get.level.shades <- function(intervals, ...)
{
level.shade <- dot("level.shade shade.pints", DEF="mistyrose2", ...)
if(is.null(intervals$lwr) || is.null(intervals$cint.lwr))
c(level.shade, level.shade)
else { # use level.shade2 only if two kinds of intervals
# use exact match here because level.shade2 is also matched by level.shade
level.shade2 <- dot("level.shade2 shade2.pints", DEF="mistyrose4", ...)
c(level.shade, level.shade2)
}
}
# draw std err bars for a numeric predictor
draw.numeric.intervals <- function(x, intervals, ...)
{
if(!is.null(intervals)) {
level.shades <- get.level.shades(intervals, ...)
if(!is.null(intervals$lwr))
polygon1(x=x, lwr=intervals$lwr, upr=intervals$upr,
shade=level.shades[1], ...)
if(!is.null(intervals$cint.lwr))
polygon1(x=x, lwr=intervals$cint.lwr, upr=intervals$cint.upr,
shade=level.shades[2])
if(!is.null(intervals$lwr) || !is.null(intervals$cint.lwr))
box() # replot the box because intervals sometimes drawn over it
}
}
# TODO you can't get just the confidence lines with no shading, following looks not ok:
# plotmo(a, level=.8, level.lty=1, level.border=1, level.shade=2, level.density=0)
polygon1 <- function(x, lwr, upr, shade, ...)
{
call.plot(graphics::polygon, PREFIX="level.", drop.shade=1, drop.shade2=1,
force.x = c(x[1], x, rev(x)),
force.y = c(lwr[1], lwr, rev(upr)),
force.col = shade,
def.border = shade,
def.lty = 0,
...)
}
# draw std err bands for a factor predictor
draw.fac.intervals <- function(x, intervals, ...)
{
draw.intervals <- function(lwr, upr, shade)
{
for(ilev in seq_along(levels(x))) {
min <- min(lwr[[ilev]])
max <- max(upr[[ilev]])
polygon(c(ilev - .4, ilev - .4, ilev + .4, ilev + .4),
c(min, max, max, min), col=shade, border=shade, lty=0)
}
}
if(!is.null(intervals)) {
level.shades <- get.level.shades(intervals, ...)
if(!is.null(intervals$lwr))
draw.intervals(split(intervals$lwr, x),
split(intervals$upr, x), level.shades[1])
if(!is.null(intervals$cint.lwr))
draw.intervals(split(intervals$cint.lwr, x),
split(intervals$cint.upr, x), level.shades[2])
if(!is.null(intervals$lwr) || !is.null(intervals$cint.lwr))
box() # replot the box because intervals sometimes drawn over it
}
}
# draw the func arg, if specified
draw.func <- function(func, object, xframe, ipred, center, trace, ...)
{
if(!is.null(func)) {
print_summary(xframe, "Data for func", trace)
if(!is.function(func))
stop0("'func' is not a function");
y <- process.y(func(xframe), object, type="response", nresponse=1,
nrow(xframe), expected.levs=NULL, trace, "func returned")$y
if(center)
y <- my.center(y, trace)
call.plot(graphics::lines.default, PREFIX="func.",
force.x = xframe[,ipred],
force.y = y,
def.type = "l",
force.col = dot("func.col col.func",
EX=c(0,1), DEF="lightblue3", NEW=1, ...),
force.lty = dot("func.lty lty.func",
EX=c(0,1), DEF=1, NEW=1, ...),
force.lwd = dot("func.lwd lwd.func",
EX=c(0,1), DEF=2, NEW=1, ...),
...)
}
}
plot.degree2 <- function( # plot all degree2 graphs
# copy of args from plotmo, some have been tweaked slightly
object, degree2, all2, center, ylim, nresponse, type,
clip, trace, trace2, pt.col,
jitter, iresponse,
inverse.func, grid.func, grid.levels, extend,
type2, ngrid2,
# the following args are generated in plotmo
draw.plot, # draw.plot=FALSE means get and return all.yhat but don't actually plot
do.par,
x, y, pairs, resp.levs, ux.list, ndiscrete,
pred.names, abbr.pred.names, nfigs, nsingles, npairs,
xflip, yflip, swapxy, def.cex.main,
...)
{
get.degree2.data <- function(ipair)
{
data <- degree2.data(ipair)
if(!is.null(data)) # data is already initialized?
return(data) # yes, use it
# create data.frame of x values to be plotted, by updating xgrid for this pair
temp <- get.degree2.xframe(xgrid, x, ipred1, ipred2,
ngrid2, xranges, ux.list, ndiscrete)
xframe <- temp$xframe
grid1 <- temp$grid1
grid2 <- temp$grid2
trace2(trace, "degree2 plot %d %s:%s\n",
ipair, pred.names[ipred1], pred.names[ipred2])
yhat <- plotmo_predict(object, xframe, nresponse,
type, resp.levs, trace2, inverse.func, ...)$yhat
# image plots for factors look better if not blockified
if(type2 != "image") {
temp <- blockify.degree2.frame(x, yhat, grid1, grid2,
ipred1, ipred2, ux.list, ndiscrete)
yhat <- temp$yhat
grid1 <- temp$grid1
grid2 <- temp$grid2
}
if(center)
yhat <- my.center(yhat, trace2)
yhat <- matrix(yhat, nrow=length(grid1), ncol=length(grid2))
data <- list(xframe=xframe, grid1=grid1, grid2=grid2, yhat=yhat)
if(!draw.plot) # save the data, if there is going to be a next time
degree2.data(ipair, data)
data
}
draw.degree2 <- function(type2 = c("persp", "contour", "image"), ...)
{
name1 <- abbr.pred.names[ipred1]
name2 <- abbr.pred.names[ipred2]
# title of the current plot
main <- dot("main", ...)
main <- if(is.specified(main))
repl(main, nsingles+ipair)[nsingles+ipair]
else {
main <- ""
if(nfigs > 1 && !is.degree.specified(degree2))
main <- paste0(ipair, " ") # show plot number in headers
if(swapxy)
paste0(main, name2, ": ", name1)
else
paste0(main, name1, ": ", name2)
}
if(clip) {
yhat[yhat < ylim[1]] <- NA
# we don't clip upper values for persp plot because its own clipping is ok
# (whereas its own clipping for lower values tends to allow overwrite of axes).
if(type2 != "persp")
yhat[yhat > ylim[2]] <- NA
}
switch(type2,
persp=plot.persp(
x=x, grid1=grid1, grid2=grid2, yhat=yhat, name1=name1, name2=name2,
ipred1=ipred1, ipred2=ipred2, ipair=ipair, nsingles=nsingles,
trace=trace, ylim=ylim, xflip=xflip, yflip=yflip, swapxy=swapxy,
ngrid2=ngrid2, main2=main, ticktype2=ticktype, def.cex.main=def.cex.main,
...),
contour=plot.contour(
x=x, grid1=grid1, grid2=grid2, yhat=yhat, name1=name1, name2=name2,
ipred1=ipred1, ipred2=ipred2, xflip=xflip, yflip=yflip, swapxy=swapxy,
main2=main, pt.col=pt.col,
jitter=jitter,
ux.list=ux.list, ndiscrete=ndiscrete, iresponse=iresponse,
...),
image=plot.image(
x=x, grid1=grid1, grid2=grid2, yhat=yhat, name1=name1, name2=name2,
ipred1=ipred1, ipred2=ipred2, xflip=xflip, yflip=yflip, swapxy=swapxy,
main2=main, pt.col=pt.col,
jitter=jitter,
ux.list=ux.list, ndiscrete=ndiscrete, iresponse=iresponse,
...))
}
#--- plot.degree2 starts here
trace2(trace, "--plot.degree2(draw.plot=%s)\n", if(draw.plot) "TRUE" else "FALSE")
stopifnot(npairs > 0)
# need ticktype to determine degree2 margins
ticktype <- dot("persp.ticktype", DEF="simple", EX=0, ...)
ticktype <- match.choices(ticktype, c("simple", "detailed"), "ticktype")
if(draw.plot && do.par) {
opar=par("mar", "mgp")
on.exit(par(mar=opar$mar, mgp=opar$mgp))
do.degree2.par(type2, nfigs, substr(ticktype, 1, 1) == "d")
}
# get the x matrix we will plot, will be updated later for each pair of predictors
xranges <- get.degree2.xranges(x, extend, ux.list, ndiscrete)
if(!is.null(degree2.xgrid.global)) # already have the data?
xgrid <- degree2.xgrid.global # yes, use it
else {
xgrid <- get.degree2.xgrid(x, grid.func, grid.levels, pred.names, ngrid2)
if(!draw.plot) # save the data, if there is going to be a next time
assignInMyNamespace("degree2.xgrid.global", xgrid)
}
all.yhat <- NULL
for(ipair in seq_len(npairs)) {
ipred1 <- pairs[ipair,1] # index of first predictor
ipred2 <- pairs[ipair,2] # index of second predictor
if(ipair == 2 && trace2 == 2) {
trace2 <- 1
printf("Reducing trace level for subsequent degree2 plots\n")
}
temp <- get.degree2.data(ipair)
xframe <- temp$xframe
grid1 <- temp$grid1
grid2 <- temp$grid2
yhat <- temp$yhat
all.yhat <- c(all.yhat, yhat)
if(draw.plot)
draw.degree2(type2, ...)
}
all.yhat
}
get.degree2.xranges <- function(x, extend, ux.list, ndiscrete)
{
xranges <- matrix(NA, ncol=ncol(x), nrow=2)
colnames(xranges) <- colnames(x)
for(icol in seq_len(ncol(x))) {
x1 <- x[,icol]
xrange <- range1(x1, na.rm=TRUE)
nxvals <- length(ux.list[[icol]])
# TODO this extends xrange correctly but that doesn't suffice
# because get.degree2.xframe doesn't necessarily use xranges
if(extend != 0 && nxvals > ndiscrete && !is.factor(x1)) {
stopifnot(xrange[2] >= xrange[1])
ext <- extend * (xrange[2] - xrange[1])
xrange[1] <- xrange[1] - ext
xrange[2] <- xrange[2] + ext
}
xranges[,icol] <- xrange
}
xranges
}
draw.response.sites <- function(x, ipred1, ipred2, pt.col, jitter,
ux.list, ndiscrete, iresponse, swapxy, ...)
{
if(swapxy) {
x1 <- x[,ipred2]
x2 <- x[,ipred1]
} else {
x1 <- x[,ipred1]
x2 <- x[,ipred2]
}
points.or.text(
x=apply.jitter(as.numeric(x1), jitter, adjust=1.5),
y=apply.jitter(as.numeric(x2), jitter, adjust=1.5),
pt.col=pt.col, iresponse=iresponse, ...)
}
plot.persp <- function(x, grid1, grid2, yhat, name1, name2, ipred1, ipred2,
ipair, nsingles, trace, ylim, xflip, yflip, swapxy, ngrid2,
main2, ticktype2, def.cex.main, ...)
{
get.theta <- function(...) # theta arg for persp()
{
get.diag.val <- function(diag1, diag2) # return first non NA along diag
{
vals <- yhat[diag1, diag2]
(vals[!is.na(vals)])[1] # return first non NA in vals
}
theta <- dot("persp.theta theta", EX=c(0,1), ...)
if(is.na(theta)) { # no user specified theta?
# rotate graph so highest point is farthest (this could be improved)
theta <- -35
nr <- nrow(yhat)
nc <- ncol(yhat)
imax <- which.max(c(
get.diag.val(nr:1, nc:1),
get.diag.val(1:nr, nc:1),
get.diag.val(1:nr, 1:nc),
get.diag.val(nr:1, 1:nc)))
if(length(imax)) # length>0 unless entire diag is NA
theta <- theta + switch(imax, 0, 90, 180, 270)
}
theta
}
#--- plot.persp starts here
# following needed because persp() rejects a reversed xlim or ylim
if(xflip)
warning0("ignoring xflip=TRUE for persp plot")
if(yflip)
warning0("ignoring yflip=TRUE for persp plot")
theta <- get.theta(...)
cex1 <- par("cex") # persp needs an explicit cex arg, doesn't use par("cex")
trace2(trace, "persp(%s:%s) theta %.3g\n", name1, name2, theta)
if(swapxy) {
temp <- grid1; grid1 <- grid2; grid2 <- temp # swap grid1 and grid2
temp <- ipred1; ipred1 <- ipred2; ipred2 <- temp # swap ipred1 and ipred2
temp <- name1; name1 <- name2; name2 <- temp # swap name1 and name2
yhat <- t(yhat)
}
zlab <- dot("ylab", DEF="", ...) # use ylab as zlab if specified
zlab <- repl(zlab, nsingles+ipair)[nsingles+ipair]
cex.lab <- dot("persp.cex.lab",
# make the labels small if multiple figures
DEF=if(def.cex.main < 1) .8 * def.cex.main else 1, ...)
# persp ignores mgp so prefix a newline to space the axis label
# we also prepend spaces else bottom of label tends to get cut off
if(theta < 0) theta <- theta + 360
theta <- theta %% 360
if((0 < theta && theta <= 90) || (180 < theta && theta <= 270)) {
xlab <- paste0("\n", name1, " ")
ylab <- paste0("\n ", name2)
} else {
xlab <- paste0("\n ", name1)
ylab <- paste0("\n", name2, " ")
}
# We use deprefix directly (and not call.plot) because
# we have to do a bit of manipulation of the args for nticks.
# Also we cannot use graphics:::persp.default because CRAN check complains
# about ":::". Instead we explicitly pass the formal argnames with formals.
persp.def.formals <- c( # formal args for persp.default (R version 3.2.0)
"x", "y", "z", "xlim", "zlim", "xlab", "ylab", "zlab", "main", "sub",
"theta", "phi", "r", "d", "scale", "expand", "col", "border", "ltheta",
"lphi", "shade", "box", "axes", "nticks", "ticktype")
args <- deprefix(graphics::persp, # calls persp.default
FNAME = "persp",
KEEP = "PREFIX,PLOT.ARGS",
FORMALS = persp.def.formals,
TRACE = if(ipair == 1 && trace >= 2) trace-1 else 0,
force.x = grid1,
force.y = grid2,
force.z = yhat,
force.xlim = range(grid1), # prevent use of user specified xlim and ylim
force.ylim = range(grid2),
# persp won't accept zlim=NULL
force.zlim = if(is.null(ylim)) ylim <- range(yhat) else ylim,
force.xlab = xlab,
force.ylab = ylab,
force.theta = theta,
force.phi = dot("persp.phi phi", EX=c(0,1), DEF=30, ...),
force.d = dot("persp.d dvalue", EX=c(0,1), DEF=1, ...),
force.main = main2,
def.cex.lab = cex.lab,
def.cex.axis = cex.lab,
def.zlab = zlab,
def.ticktype = "simple",
def.nticks = 5,
def.cex = cex1,
force.col = dot("persp.col col.persp",
EX=c(0,1), DEF="lightblue", NEW=1, ...),
def.border = NULL,
def.shade = .5,
...)
# if ticktype="simple" we must call persp without the nticks arg
# else persp emits confusing error messages
if(substr(ticktype2, 1, 1) == "s")
args["nticks"] <- NULL
# We use suppressWarnings below to suppress the warning
# "surface extends beyond the box" that was introduced in R 2.13-1.
# This warning may be issued multiple times and may be annoying to the plotmo user.
# (Unfortunately this also suppress any other warnings in persp.)
# TODO Want to use lab=c(2,2,7) or similar in persp but persp ignores it
suppressWarnings(
do.call.trace(graphics::persp, args, fname="graphics::persp", trace=0))
}
plot.contour <- function(x, grid1, grid2, yhat, name1, name2, ipred1, ipred2,
xflip, yflip, swapxy, main2, pt.col,
jitter, ux.list, ndiscrete, iresponse, ...)
{
get.lim <- function(xflip, grid1, ipred)
{
# contour() automatically extends ylim, so we don't need to do it here
xrange <- range(grid1)
if(xflip)
c(xrange[2], xrange[1])
else
c(xrange[1], xrange[2])
}
#--- plot.contour starts here
x1 <- x[,ipred1]
x2 <- x[,ipred2]
levnames1 <- levels(x1)
levnames2 <- levels(x2)
is.fac1 <- is.factor(x1) && length(levnames1) <= 12
is.fac2 <- is.factor(x2) && length(levnames2) <= 12
xlab <- if(is.fac1) "" else name1 # no lab if fac else on top of lev name
ylab <- if(is.fac2) "" else name2
if(swapxy) {
temp <- levnames2; levnames2 <- levnames1; levnames1 <- temp
temp <- is.fac2; is.fac2 <- is.fac1; is.fac1 <- temp
temp <- ylab; ylab <- xlab; xlab <- temp
}
xlim <- get.lim(xflip, grid1, ipred1)
ylim <- get.lim(yflip, grid2, ipred2)
if(swapxy) {
temp <- xlim; xlim <- ylim; ylim <- temp
}
levels <- get.contour.levs(yhat)
labels <- signif(levels, 2) # else contour prints labels like 0.0157895
cex.lab <- par("cex") * dot("cex.lab", DEF=1, ...)
# We use suppressWarnings below to suppress the warning "all z values are
# equal" This warning may be issued multiple times and may be annoying to
# the plotmo user. (Unfortunately this also suppress any other warnings
# in persp.)
suppressWarnings(
call.plot(graphics::contour.default,
force.x = if(swapxy) grid2 else grid1,
force.y = if(swapxy) grid1 else grid2,
force.z = if(swapxy) t(yhat) else yhat,
force.xlim = xlim,
force.ylim = ylim,
force.xlab = xlab,
force.ylab = ylab,
def.xaxt = if(is.fac1) "n" else "s",
def.yaxt = if(is.fac2) "n" else "s",
def.main = main2,
def.levels = levels,
def.labels = labels,
def.labcex = par("cex") * cex.lab,
...))
if(is.fac1) {
levnames1 <- abbreviate(levnames1, minlength=6, strict=TRUE)
mtext(levnames1, side=1, at=1:length(levnames1),
cex=cex.lab, line=.5, las=get.las(levnames1))
}
if(is.fac2)
mtext(abbreviate(levnames2, minlength=6, strict=TRUE),
side=2, at=1:length(levnames2),
cex=cex.lab, line=.5, las=2)
if(is.specified(pt.col))
draw.response.sites(x=x, ipred1=ipred1, ipred2=ipred2,
pt.col=pt.col, jitter=jitter, ux.list=ux.list,
ndiscrete=ndiscrete, iresponse=iresponse, swapxy=swapxy, ...)
}
get.contour.levs <- function(yhat)
{
# the default, as calculated internally by plot.contour
levs <- pretty(range(yhat, finite=TRUE), 10)
# reduce the default if the number of unique yhat values is less
# this is mainly for factors
unique.yhat <- sort.unique(yhat)
if(length(unique.yhat) > 1 && length(unique.yhat) < length(levs))
levs <- unique.yhat
levs
}
plot.image <- function(x, grid1, grid2, yhat, name1, name2, ipred1, ipred2,
xflip, yflip, swapxy, main2, pt.col,
jitter, ux.list, ndiscrete, iresponse, ...)
{
# like image but fill the plot area with lightblue first so NAs are obvious
image.with.lightblue.na <- function(grid1, grid2, yhat, ...)
{
if(anyNA(yhat)) {
image(grid1, grid2, matrix(0, nrow(yhat), ncol(yhat)),
col="lightblue",
xlab="", ylab="", xaxt="n", yaxt="n", bty="n", main="")
par(new=TRUE) # so next plot is on top of this plot
}
call.plot(graphics::image.default,
force.x=grid1, force.y=grid2, force.z=yhat, ...)
box() # image() tends to overwrite the borders of the box
}
get.lim <- function(xflip, grid1, is.discrete)
{
xrange <- range(grid1)
if(is.discrete) {
xrange[1] <- xrange[1] - .5
xrange[2] <- xrange[2] + .5
} else {
range <- xrange[2] - xrange[1]
# .025 seems the max we can use without getting unsightly
# gaps at the edges of the plot
xrange[1] <- xrange[1] - .025 * range
xrange[2] <- xrange[2] + .025 * range
}
if(xflip)
c(xrange[2], xrange[1])
else
c(xrange[1], xrange[2])
}
#--- plot.image starts here
x1 <- x[,ipred1]
x2 <- x[,ipred2]
levnames1 <- levels(x1)
levnames2 <- levels(x2)
use.fac.names1 <- is.factor(x1) && length(levnames1) <= 12
use.fac.names2 <- is.factor(x2) && length(levnames2) <= 12
xlab <- if(use.fac.names1) "" else name1 # no lab if fac else on top of lev name
ylab <- if(use.fac.names2) "" else name2
if(swapxy) {
temp <- levnames2; levnames2 <- levnames1; levnames1 <- temp
temp <- use.fac.names2; use.fac.names2 <- use.fac.names1; use.fac.names1 <- temp
temp <- ylab; ylab <- xlab; xlab <- temp
}
xlim <- get.lim(xflip, grid1,
use.fac.names1 || length(ux.list[[ipred1]]) <= ndiscrete)
ylim <- get.lim(yflip, grid2,
use.fac.names2 || length(ux.list[[ipred2]]) <= ndiscrete)
# default col: white high values (snowy mountain tops), dark low values (dark depths)
if(swapxy)
image.with.lightblue.na(grid1=grid2, grid2=grid1, yhat=t(yhat),
force.col = dot("image.col col.image", EX=c(0,1),
DEF=gray((0:10)/10), NEW=1, ...),
force.main = main2,
force.xlim = ylim,
force.ylim = xlim,
force.xaxt = if(use.fac.names1) "n" else "s",
force.yaxt = if(use.fac.names2) "n" else "s",
force.xlab = xlab,
force.ylab = ylab,
...)
else
image.with.lightblue.na(grid1=grid1, grid2=grid2, yhat=yhat,
force.col = dot("image.col col.image", EX=c(0,1),
DEF=gray((0:10)/10), NEW=1, ...),
force.main = main2,
force.xlim = xlim,
force.ylim = ylim,
force.xaxt = if(use.fac.names1) "n" else "s",
force.yaxt = if(use.fac.names2) "n" else "s",
force.xlab = xlab,
force.ylab = ylab,
...)
cex.lab <- par("cex") * dot("cex.lab", DEF=1, ...)
if(use.fac.names1) {
levnames1 <- abbreviate(levnames1, minlength=6, strict=TRUE)
mtext(levnames1, side=1, at=1:length(levnames1),
cex=cex.lab, line=.5, las=get.las(levnames1))
}
if(use.fac.names2)
mtext(abbreviate(levnames2, minlength=6, strict=TRUE),
side=2, at=1:length(levnames2),
cex=cex.lab, line=.5, las=2)
if(is.specified(pt.col))
draw.response.sites(x=x, ipred1=ipred1, ipred2=ipred2,
pt.col=pt.col, jitter=jitter, ux.list=ux.list,
ndiscrete=ndiscrete, iresponse=iresponse, swapxy=swapxy, ...)
}
apply.inverse.func <- function(inverse.func, y, object, trace)
{
if(!is.null(inverse.func)) {
if(!is.numeric(y[1]))
stopf("inverse.func cannot be used on \"%s\" values", class(y[1])[1])
y <- process.y(inverse.func(y), object, type="response", nresponse=1,
length(y), NULL, trace, "inverse.func")$y
}
y
}
# should the factor labels on the x axis be printed horizontally or vertically?
get.las <- function(labels)
{
if(length(labels) * max(nchar(labels)) <= 20) # 20 is arbitrary
0 # horizontal
else
2 # vertical
}
# true if a plot was selected by the user (excluding the default setting)
is.degree.specified <- function(degree)
{
!is.logical(degree) || length(degree) > 1
}
my.center <- function(x, trace=FALSE)
{
if(!is.null(x) && !is.factor(x)) {
x <- x - mean(x[is.finite(x)], na.rm=TRUE)
if(trace >= 2) {
name <- paste0("centered ", trunc.deparse(substitute(x)))
cat(name, "length ", length(x))
print.first.few.elements.of.vector(x, trace, name)
}
}
x
}
|
/plotmo/R/plotmo.R
|
no_license
|
ingted/R-Examples
|
R
| false | false | 67,472 |
r
|
# plotmo.R: plot the model response when varying one or two predictors
#
# Stephen Milborrow Sep 2006 Cape Town
plotmo <- function(object = stop("no 'object' argument"),
type = NULL,
nresponse = NA,
pt.col = 0,
jitter = .5,
smooth.col = 0,
level = 0,
func = NULL,
inverse.func = NULL,
nrug = 0,
grid.col = 0,
type2 = "persp",
degree1 = TRUE,
all1 = FALSE,
degree2 = TRUE,
all2 = FALSE,
do.par = TRUE,
clip = TRUE,
ylim = NULL,
caption = NULL,
trace = 0,
grid.func = median,
grid.levels = NULL,
extend = 0,
ngrid1 = 50,
ngrid2 = 20,
ndiscrete = 5,
npoints = 3000,
center = FALSE,
xflip = FALSE,
yflip = FALSE,
swapxy = FALSE,
int.only.ok = TRUE,
...)
{
init.global.data()
on.exit(init.global.data()) # release memory on exit
object.name <- quote.deparse(substitute(object))
object # make sure object exists
trace <- as.numeric(check.integer.scalar(trace, logical.ok=TRUE))
# Associate the model environment with the object.
# (This is instead of passing it as an argument to plotmo's data access
# functions. It saves a few hundred references to model.env in the code.)
attr(object, ".Environment") <- get.model.env(object, object.name, trace)
temp <- plotmo_prolog(object, object.name, trace, ...)
object <- temp$object
my.call <- temp$my.call
# We will later make two passes through the plots if we need to
# automatically determine ylim (see get.ylim.by.dummy.plots).
# The trace2 variable is used for disabling tracing on the second pass.
trace2 <- trace
# trace=100 to 103 are special values used for development
# (they are for tracing just plotmo_x with no plotting)
special.trace <- FALSE
if(trace >= 100 && trace <= 103) {
special.trace <- TRUE
trace <- trace - 100
}
clip <- check.boolean(clip)
all1 <- check.boolean(all1)
all2 <- check.boolean(all2)
center <- check.boolean(center)
swapxy <- check.boolean(swapxy)
xflip <- check.boolean(xflip)
yflip <- check.boolean(yflip)
type2 <- match.choices(type2, c("persp", "contour", "image"), "type2")
level <- get.level(level, ...)
pt.col <- get.pt.col(pt.col, ...)
jitter <- get.jitter(jitter, ...)
ngrid1 <- get.ngrid1(ngrid1, ...)
ngrid2 <- get.ngrid2(ngrid2, ...)
smooth.col <- get.smooth.col(smooth.col, ...)
check.integer.scalar(ndiscrete, min=0)
nrug <- get.nrug(nrug, ...)
extend <- check.numeric.scalar(extend)
stopifnot(extend > -.3, extend <= 10) # .3 prevents shrinking to nothing, 10 is arb
# TODO revisit this, causes issues because the following for example produces
# the identical last two plots: for(i in 1:3) a <- earth(.., nfold=3); plot(a)
rnorm(1) # seems to be necessary to make .Random.seed available
old.seed <- .Random.seed
on.exit(set.seed(old.seed), add=TRUE)
set.seed(2015)
if(!is.specified(degree1)) degree1 <- 0
if(!is.specified(degree2)) degree2 <- 0
if(!is.specified(nresponse)) nresponse <- NA
if(!is.specified(clip)) clip <- FALSE
if(center && clip) {
clip <- FALSE # otherwise incorrect clipping (TODO revisit)
warning0("forcing clip=FALSE because center=TRUE ",
"(a limitation of the current implementation)")
}
# get x so we can get the predictor names and ux.list
x <- plotmo_x(object, trace)
if(NCOL(x) == 0 || NROW(x) == 0)
stop("x is empty")
if(special.trace) # special value of trace was used?
return(invisible(x))
meta <- plotmo_meta(object, type, nresponse, trace,
msg.if.predictions.not.numeric=
if(level > 0) "the level argument is not allowed" else NULL,
...)
y <- meta$y.as.numeric.mat # y as a numeric mat, only the nresponse column
nresponse <- meta$nresponse # column index
resp.name <- meta$resp.name # used only in automatic caption, may be NULL
resp.levs <- meta$resp.levs # to convert predicted strings to factors, may be NULL
type <- meta$type # always a string (converted from NULL if necessary)
# following prevents aliasing on nrow(data) to ensure we catch the following:
# "warning: predict(): newdata' had 31 rows but variable(s) found have 30 rows"
if(ngrid1 == length(y)) {
trace2(trace, "changed ngrid1 from %g to %g\n", ngrid1, ngrid1+1)
ngrid1 <- ngrid1 + 1
}
temp <- get.unique.xyvals(x, y, npoints, trace)
ux.list <- temp$ux.list # list, each elem is unique vals in a column of x
uy <- temp$uy # unique y vals
npoints <- temp$npoints
y <- apply.inverse.func(inverse.func, y, object, trace)
if(center)
y <- my.center(y, trace)
# get iresponse
ncases <- nrow(x)
iresponse <- NULL
if(is.specified(pt.col)) {
iresponse <- get.iresponse(npoints, ncases)
if(is.null(iresponse))
pt.col <- 0
}
# singles is a vector of indices of predictors for degree1 plots
temp <- plotmo_singles(object, x, nresponse, trace, degree1, all1)
some.singles <- temp$some.singles
singles <- temp$singles
# each row of pairs is the indices of two predictors for a degree2 plot
temp <- plotmo_pairs(object, x, nresponse, trace, all2, degree2)
some.pairs <- temp$some.pairs
pairs <- temp$pairs
nsingles <- length(singles)
npairs <- NROW(pairs)
temp <- get.pred.names(colnames(x), nsingles + npairs)
pred.names <- temp$pred.names
abbr.pred.names <- temp$abbr.pred.names
def.cex.main <- temp$def.cex.main
is.int.only <- !some.singles && !some.pairs
if(is.int.only && int.only.ok && !all(degree1 == 0)) {
singles <- 1 # plot the first predictor
nsingles <- 1
}
if(nsingles > 100) { # 100 is arb, 10 * 10
singles <- singles[1:100]
warning0("Will plot only the first 100 degree1 plots")
}
if(npairs > 100) {
pairs <- pairs[1:100,]
warning0("Will plot only the first 100 degree2 plots")
}
if(extend != 0 && npairs) {
warning0("extend=", extend, ": will not plot degree2 plots ",
"(extend is not yet implemented for degree2 plots)")
pairs <- NULL
npairs <- 0
}
nfigs <- nsingles + npairs
if(nfigs == 0) {
if(trace >= 0) {
if(is.int.only)
warning0("plotmo: nothing to plot (intercept-only model)")
else
warning0("plotmo: nothing to plot")
}
return(invisible())
}
do.par <- check.do.par(do.par, nfigs) # do.par is 0, 1, or 2
# Prepare caption --- we need it now for do.par() but
# can only display it later after at least one plot.
# nfigs=2 (any number greater than 1) because by default we do.par in plotmo.
caption <- get.caption(nfigs=2, do.par, caption, resp.name, type,
object$call, object.name, my.call)
if(do.par) {
# TODO document what happens here and in plotres if only one plot
oldpar <- par(no.readonly=TRUE)
# need xlab etc. so so we can figure out margin sizes in do.par
xlab <- dot("xlab", DEF="", ...)
ylab <- dot("ylab", DEF="", ...)
main <- dot("main", ...)
do.par(nfigs=nfigs, caption=caption, main1=main,
xlab1=xlab, ylab1=ylab, trace=trace, def.cex.main=def.cex.main, ...)
if(do.par == 1)
on.exit(par(oldpar), add=TRUE)
} else { # do.par=FALSE
oldpar <- do.par.dots(..., trace=trace)
if(length(oldpar))
on.exit(do.call(par, oldpar), add=TRUE)
}
trace2(trace, "\n----Figuring out ylim\n")
is.na.ylim <- !is.null(ylim) && anyNA(ylim)
jittered.y <- apply.jitter(as.numeric(y), jitter)
# get.ylim will do dummy plots if necessary
temp <- get.ylim(object=object,
type=type, nresponse=nresponse, pt.col=pt.col,
jitter=jitter, smooth.col=smooth.col, level=level,
func=func, inverse.func=inverse.func, nrug=nrug, grid.col=grid.col,
type2=type2, degree1=degree1, all1=all1, degree2=degree2, all2=all2,
do.par=do.par, clip=clip, ylim=ylim, caption=caption, trace=trace,
grid.func=grid.func, grid.levels=grid.levels, extend=extend,
ngrid1=ngrid1, ngrid2=ngrid2, npoints=npoints, ndiscrete=ndiscrete,
int.only.ok=int.only.ok, center=center, xflip=xflip, yflip=yflip,
swapxy=swapxy, def.cex.main=def.cex.main,
x=x, y=y, singles=singles, resp.levs=resp.levs,
ux.list=ux.list,
pred.names=pred.names, abbr.pred.names=abbr.pred.names,
nsingles=nsingles, npairs=npairs, nfigs=nfigs, uy=uy,
is.na.ylim=is.na.ylim, is.int.only=is.int.only, trace2=trace2,
pairs=pairs, iresponse=iresponse, jittered.y=jittered.y, ...)
ylim <- temp$ylim
trace2 <- temp$trace2
if(nsingles)
plot.degree1(object=object, degree1=degree1, all1=all1, center=center,
ylim=if(is.na.ylim) NULL else ylim, # each graph has its own ylim?
nresponse=nresponse, type=type, trace=trace, trace2=trace2,
pt.col=pt.col, jitter=jitter, iresponse=iresponse,
smooth.col=smooth.col, grid.col=grid.col, inverse.func=inverse.func,
grid.func=grid.func, grid.levels=grid.levels, extend=extend,
ngrid1=ngrid1, is.int.only=is.int.only, level=level,
func=func, nrug=nrug,
draw.plot=TRUE, x=x, y=y, singles=singles, resp.levs=resp.levs,
ux.list=ux.list, ndiscrete=ndiscrete,
pred.names=pred.names, abbr.pred.names=abbr.pred.names,
nfigs=nfigs, uy=uy, xflip=xflip, jittered.y=jittered.y, ...)
if(npairs)
plot.degree2(object=object, degree2=degree2, all2=all2, center,
ylim=if(is.na.ylim) NULL else ylim, # each graph has its own ylim?
nresponse=nresponse, type=type, clip=clip, trace=trace, trace2=trace2,
pt.col=pt.col, jitter=jitter, iresponse=iresponse,
inverse.func=inverse.func,
grid.func=grid.func, grid.levels=grid.levels, extend=extend,
type2=type2, ngrid2=ngrid2, draw.plot=TRUE, do.par=do.par, x=x, y=y,
pairs=pairs, resp.levs=resp.levs, ux.list=ux.list,
ndiscrete=ndiscrete,
pred.names=pred.names, abbr.pred.names=abbr.pred.names,
nfigs=nfigs, nsingles=nsingles, npairs=npairs, xflip=xflip, yflip=yflip,
swapxy=swapxy, def.cex.main=def.cex.main, ...)
draw.caption(caption, ...)
invisible(x)
}
plotmo_prolog <- function(object, object.name, trace, ...)
{
object <- plotmo.prolog(object, object.name, trace, ...)
my.call <- call.as.char(n=2)
callers.name <- callers.name()
if(trace >= 2) {
printf.wrap("%s trace %g: %s\n", callers.name, trace, my.call)
if(is.null(object$call))
printf("object class is \"%s\" with no object$call\n", class(object)[1])
else
printf.wrap("object$call is %s\n", strip.deparse(object$call))
}
SHOWCALL <- dot("SHOWCALL", ...)
if(!is.specified(SHOWCALL))
my.call <- NULL
list(object=object, my.call=my.call)
}
get.pred.names <- function(colnames.x, nfigs)
{
# numbers below are somewhat arb
nrows <- ceiling(sqrt(nfigs)) # nrows in plot grid
minlength <- 20; def.cex.main <- 1.2
if (nrows >= 9) { minlength <- 6; def.cex.main <- .7 }
else if(nrows >= 8) { minlength <- 7; def.cex.main <- .8 }
else if(nrows >= 7) { minlength <- 7; def.cex.main <- .8 }
else if(nrows >= 6) { minlength <- 7; def.cex.main <- .8 }
else if(nrows >= 5) { minlength <- 8; def.cex.main <- 1 }
else if(nrows >= 4) { minlength <- 9; def.cex.main <- 1.1 }
stopifnot(!is.null(colnames.x)) # plotmo_x always returns colnames (unless no columns)
list(pred.names = colnames.x,
abbr.pred.names = abbreviate(strip.space(colnames.x),
minlength=minlength, method="both.sides"),
def.cex.main = def.cex.main)
}
# always returns a vector of 2 elems, could be c(-Inf, Inf)
get.ylim <- function(object,
type, nresponse, pt.col, jitter, smooth.col, level, func,
inverse.func, nrug, grid.col, type2, degree1, all1, degree2, all2,
do.par, clip, ylim, caption, trace,
grid.func, grid.levels, extend=extend, ngrid1, ngrid2,
npoints, ndiscrete, int.only.ok, center, xflip, yflip, swapxy, def.cex.main,
x, y, singles, resp.levs, ux.list, pred.names, abbr.pred.names,
nsingles, npairs, nfigs, uy,
is.na.ylim, is.int.only, trace2, pairs,
iresponse, jittered.y, ...)
{
get.ylim.by.dummy.plots <- function(..., trace)
{
# call the plotting functions with draw.plot=FALSE to get the ylim
trace2(trace, "--get.ylim.by.dummy.plots\n")
all.yhat <- NULL
if(nsingles) { # get all.yhat by calling with draw.plot=FALSE
# have to use explicit arg names to prevent alias probs
# with dots, because the user can pass in any name with dots
all.yhat <- c(all.yhat,
plot.degree1(object=object, degree1=degree1, all1=all1,
center=center, ylim=ylim, nresponse=nresponse, type=type,
trace=trace, trace2=trace2, pt.col=pt.col,
jitter=jitter, iresponse=iresponse,
smooth.col=smooth.col, grid.col=grid.col,
inverse.func=inverse.func, grid.func=grid.func,
grid.levels=grid.levels, extend=extend, ngrid1=ngrid1,
is.int.only=is.int.only,
level=level, func=func, nrug=nrug, draw.plot=FALSE, x=x, y=y,
singles=singles, resp.levs=resp.levs,
ux.list=ux.list, ndiscrete=ndiscrete,
pred.names=pred.names, abbr.pred.names=abbr.pred.names,
nfigs=nfigs, uy=uy, xflip=xflip, jittered.y=jittered.y, ...))
}
if(npairs) {
all.yhat <- c(all.yhat,
plot.degree2(object=object, degree2=degree2, all2=all2,
center=center, ylim=ylim, nresponse=nresponse, type=type,
clip=clip, trace=trace, trace2=trace2, pt.col=pt.col,
jitter=jitter, iresponse=iresponse,
inverse.func=inverse.func, grid.func=grid.func,
grid.levels=grid.levels, extend=extend,
type2=type2, ngrid2=ngrid2,
draw.plot=FALSE, do.par=do.par, x=x, y=y, pairs=pairs,
resp.levs=resp.levs, ux.list=ux.list,
ndiscrete=ndiscrete,
pred.names=pred.names, abbr.pred.names=abbr.pred.names,
nfigs=nfigs,
nsingles=nsingles, npairs=npairs, xflip=xflip, yflip=yflip,
swapxy=swapxy, def.cex.main=def.cex.main, ...))
} # 1 2 3 4 5
q <- quantile(all.yhat, probs=c(0, .25, .5, .75, 1))
ylim <- c(q[1], q[5]) # all the data
# iqr test to prevent clipping in some pathological cases
iqr <- q[4] - q[2] # middle 50% of the data (inter-quartile range)
if(clip && iqr > .05 * (max(y) - min(y))) {
median <- q[3]
ylim[1] <- max(ylim[1], median - 10 * iqr)
ylim[2] <- min(ylim[2], median + 10 * iqr)
}
if(is.specified(pt.col) || is.specified(smooth.col) || is.specified(level))
ylim <- range1(ylim, jittered.y) # ensure ylim big enough for resp points
else if(is.specified(smooth.col))
ylim <- range1(ylim, y)
# binary or ternary reponse?
# the range(uy) test is needed for binomial models specified using counts
else if(length(uy) <= 3 || range(y) == c(0,1))
ylim <- range1(ylim, y)
if(is.specified(nrug)) # space for rug
ylim[1] <- ylim[1] - .1 * (ylim[2] - ylim[1])
trace2(trace, "--done get.ylim.by.dummy.plots\n\n")
# have called the plot functions, minimize tracing in further calls to them
trace2 <<- 0 # note <<- not <-
ylim
}
#--- get.ylim starts here
if(!(is.null(ylim) || is.na(ylim[1]) || length(ylim) == 2))
stop0("ylim must be one of:\n",
" NULL all graphs have same vertical axes\n",
" NA each graph has its own vertical axis\n",
" c(min,max) ylim for all graphs")
if(length(ylim) == 2 && ylim[2] <= ylim[1])
stop0("ylim[2] ", ylim[2], " is not greater than ylim[1] ", ylim[1])
if(is.na.ylim)
ylim <- c(NA, NA) # won't be used
else if(is.null(ylim)) # auto ylim
ylim <- if(is.int.only) range(y, na.rm=TRUE)
else get.ylim.by.dummy.plots(trace=trace, ...)
if(!anyNA(ylim))
ylim <- fix.lim(ylim)
if(trace >= 2)
printf("ylim c(%.4g, %.4g) clip %s\n\n",
ylim[1], ylim[2], if(clip) "TRUE" else "FALSE")
list(ylim=ylim, trace2=trace2)
}
do.degree2.par <- function(type2, nfigs, detailed.ticktype)
{
nrows <- ceiling(sqrt(nfigs))
if(type2 == "persp") { # perspective plot
# note: persp ignores both the global mgp and any mgp passed directly to persp
mar <- c(if(detailed.ticktype) 1 else .2, .3, 1.7, 0.1)
par(mar=mar)
return(NULL)
} else { # contour or image plot
if(nrows >= 5)
mar <- c(2, 2, 1.2, .5) # space for bottom and left axis labels
else
mar <- c(3, 3, 2, .5)
par(mar=mar)
cex <- par("cex") # TODO would be better to use nfigs here?
mgp <- # compact title and axis annotations
if (cex < .7) c(1.2, 0.2, 0)
else if(cex < .8) c(1.3, 0.3, 0)
else c(1.5, 0.4, 0)
par(mgp=mgp)
}
}
plotmo_singles <- function(object, x, nresponse, trace, degree1, all1)
{
trace2(trace, "\n----plotmo_singles for %s object\n", class(object)[1])
singles <- plotmo.singles(object=object,
x=x, nresponse=nresponse, trace=trace, all1=all1)
some.singles <- FALSE
if(length(singles)) {
singles <- sort.unique(singles)
some.singles <- TRUE
}
nsingles <- length(singles)
if(nsingles) {
degree1 <- check.index(degree1, "degree1", singles, colnames=colnames(x),
allow.empty=TRUE, is.degree.spec=TRUE)
singles <- singles[degree1]
} else if(is.degree.specified(degree1) && degree1[1] != 0 && trace >= 0)
warning0("'degree1' specified but no degree1 plots")
if(trace >= 2) {
if(nsingles)
cat("singles:", paste0(singles, " ", colnames(x)[singles], collapse=", "), "\n")
else
cat("no singles\n")
}
list(some.singles=some.singles,
singles =singles) # a vector of indices of predictors for degree1 plots
}
plotmo_pairs <- function(object, x, nresponse, trace, all2, degree2)
{
trace2(trace, "\n----plotmo_pairs for %s object\n", class(object)[1])
pairs <- plotmo.pairs(object, x, nresponse, trace, all2)
if(!NROW(pairs) || !NCOL(pairs))
pairs <- NULL
npairs <- NROW(pairs)
some.pairs <- FALSE
if(npairs) {
some.pairs <- TRUE
# put lowest numbered predictor first and remove duplicate pairs
pairs <- unique(t(apply(pairs, 1, sort)))
# order the pairs on the predictor order
order <- order(pairs[,1], pairs[,2])
pairs <- pairs[order, , drop=FALSE]
degree2 <- check.index(degree2, "degree2", pairs, colnames=colnames(x),
allow.empty=TRUE, is.degree.spec=TRUE)
pairs <- pairs[degree2, , drop=FALSE]
}
if(trace >= 2) {
if(npairs) {
cat("pairs:\n")
print(matrix(paste(pairs, colnames(x)[pairs]), ncol=2))
} else
cat("no pairs\n")
}
if(npairs == 0 && is.degree.specified(degree2) && degree2[1] != 0 && trace >= 0)
warning0("'degree2' specified but no degree2 plots")
list(some.pairs=some.pairs,
pairs =pairs)
}
# pt.col is a formal arg, but for back compat we also support col.response
get.pt.col <- function(pt.col, ...)
{
pt.col <- pt.col
if(!is.specified(pt.col) && !is.dot("col", ...))
pt.col <- dot("col.response", EX=0, ...) # partial match, "col" excluded above
# if any other response argument is specified, set the response color
if(!is.specified(pt.col) &&
is.dot("pch cex.response pch.response pt.cex pt.pch",
EX=c(1,1,1,0,0), ...))
pt.col <- "slategray4"
if(!is.specified(pt.col))
pt.col <- 0
pt.col
}
get.jitter <- function(jitter, ...)
{
if(anyNA(jitter)) # allow jitter=NA
jitter <- 0
check.numeric.scalar(jitter, logical.ok=TRUE)
jitter <- as.numeric(jitter)
if(jitter < 0 || jitter > 100)
stop0("jitter=", jitter, " is illegal")
jitter
}
get.smooth.col <- function(smooth.col, ...)
{
smooth.col <- dot("col.smooth", DEF=smooth.col, ...) # back compat
# if any other smooth argument is specified, set the smooth color
if(!is.specified(smooth.col) &&
is.dot("lty.smooth lwd.smooth lwd.loess smooth.lty smooth.lwd",
EX=c(1,1,1,0,0), ...))
smooth.col <- 2
if(!is.specified(smooth.col))
smooth.col <- 0
smooth.col
}
get.ngrid1 <- function(ngrid1, ...)
{
check.integer.scalar(ngrid1)
if(ngrid1 < 2)
stop0("illegal ngrid1 ", ngrid1)
if(ngrid1 > 1000) {
warning0("clipped ngrid1=", ngrid1, " to 1000")
ngrid1 <- 1000
}
ngrid1
}
get.ngrid2 <- function(ngrid2, ...)
{
check.integer.scalar(ngrid2)
if(ngrid2 < 2)
stop0("illegal ngrid2 ", ngrid2)
if(ngrid2 > 500) {
warning0("clipped ngrid2=", ngrid2, " to 500")
ngrid2 <- 500
}
ngrid2
}
get.level <- function(level, ...)
{
if(anyNA(level) || is.null(level)) # treat NA and NULL as 0
level <- 0
check.numeric.scalar(level)
# some code for backward compatibility (se is now deprecated)
se <- 0
if(is.dot("se", ...))
se <- dot("se", ...)
check.numeric.scalar(se, logical.ok=TRUE)
if(se && level) # both specified?
stop0("plotmo's 'se' argument is deprecated, please use 'level' instead")
if(identical(se, TRUE)) {
level <- .95
warning0(
"plotmo's 'se' argument is deprecated, please use 'level=.95' instead")
} else if (se < 0 || se > 5) # 5 is arb
stop0("plotmo's 'se' argument is deprecated, please use 'level=.95' instead")
else if (se > 0 && se < 1) # e.g. se=.95
stop0("plotmo's 'se' argument is deprecated, please use 'level=.95' instead")
else if (se > 0) {
level <- 1 - 2 * (1 - pnorm(se)) # se=2 becomes level=.954
warning0(sprintf(
"plotmo's 'se' argument is deprecated, please use 'level=%.2f' instead",
level))
} else if(level != 0 && (level < .5 || level >= 1))
stop0("level=", level, " is out of range, try level=.95")
level
}
get.unique.xyvals <- function(x, y, npoints, trace)
{
# convert special values of npoints
ncases <- nrow(x)
check.integer.scalar(npoints, min=-1, null.ok=TRUE, logical.ok=TRUE)
npoints.was.neg <- FALSE
if(is.null(npoints))
npoints <- 0
else if(is.logical(npoints))
npoints <- if(npoints) ncases else 0
else if(npoints == -1) {
npoints.was.neg <- TRUE
npoints <- ncases
} else if(npoints > ncases)
npoints <- ncases
# Use a maximum of NMAX cases for calculating ux.list and uy
# (unless npoints is bigger or TRUE or negative).
# Allows plotmo to be fast even on models with millions of cases.
NMAX <- 1e4
nmax <- max(NMAX, npoints)
if(!npoints.was.neg && ncases > nmax) {
trace2(trace, "using %g of %g cases to calculate unique x and y values\n",
npoints, ncases)
isubset <- get.isubset(y, npoints)
y <- y[isubset]
x <- x[isubset, , drop=FALSE]
}
list(ux.list = get.ux.list(x, trace),
uy = unique(y),
npoints = npoints)
}
# return a list, each element is the unique levels for corresponding column of x
# TODO this is where we spend a lot of time in plotmo for big data
get.ux.list <- function(x, trace)
{
ux.list <- list(colnames(x))
for(i in seq_len(ncol(x)))
ux.list[[i]] <- if(is.factor(x[,i])) levels(x[,i])
else sort.unique(x[,i])
trace2(trace, "number of x values: %s\n",
paste.trunc(colnames(x), sapply(ux.list, length)))
ux.list
}
# Remove duplicates in x, then sort (smallest first).
# I had sort(unique(x)), but following is faster because it requires only one sort.
sort.unique <- function(x)
{
rle(sort(x))[["values"]] # rle() is in base
}
points.or.text <- function(..., x, y, pt.col, iresponse)
{
stopifnot(!is.na(pt.col))
cex <- dot("pt.cex cex.response", DEF=1, EX=c(0,1), NEW=1, ...)
cex <- cex * pt.cex(NROW(x))
pch <- dot("pt.pch pch.response pch", DEF=20, EX=c(0,1,1), NEW=1, ...)
# recycle then select only iresponse points
n <- length(y)
col <- repl(pt.col, n)[iresponse]
pch <- repl(pch, n)[iresponse]
cex <- repl(cex, n)[iresponse]
x <- x[iresponse]
y <- y[iresponse]
if(is.character(pch) && pch[1] != ".")
call.plot(graphics::text.default, PREFIX="pt.",
force.x = x,
force.y = y,
force.labels = pch,
force.col = col,
force.cex = pmax(.1, .9 * cex),
def.xpd = NA, # allow writing beyond plot area
...)
else
call.plot(graphics::points.default, PREFIX="pt.",
force.x = x,
force.y = y,
force.pch = pch,
force.col = col,
force.cex = cex,
# commented out because looks messy in image plots
# def.xpd = NA, # allow writing beyond plot area
...)
}
# The following global variables are for efficiency when we make two
# passes through the plot. We store the data from the first pass so we
# don't have to regenerate it.
degree1.xgrid.global <- NULL
degree2.xgrid.global <- NULL
# TODO Following is ugly. I would prefer to have two namespace level
# variables, degree1.data.global and degree2.data.global, similar to the
# above two variables. But CRAN check won't allow
# unlockBinding(degree1.data.global, asNamespace("plotmo")) so we can update
# those variables. Also, we can't directly use assignInMyNamespace for these
# variables because we need to update individual list elements.
make.static.list <- function() {
data <- list()
func <- function(i, newdata=NULL) {
if(is.null(i)) # init the data?
data <<- list()
else if(!missing(newdata)) # assign to the data?
data[[i]] <<- newdata
else if(i <= length(data)) # return the data element
data[[i]]
else # return the element, but it's NULL
NULL
}
func
}
degree1.data <- make.static.list()
degree2.data <- make.static.list()
trace.call.global <- 0 # nonzero to trace call to predict, residuals, etc
init.global.data <- function()
{
assignInMyNamespace("trace.call.global", 0)
assignInMyNamespace("degree1.xgrid.global", NULL)
assignInMyNamespace("degree2.xgrid.global", NULL)
degree1.data(NULL) # clear the degree1 data by passing NULL
degree2.data(NULL)
}
plot.degree1 <- function( # plot all degree1 graphs
# copy of args from plotmo, some have been tweaked slightly
object, degree1, all1, center,
ylim, nresponse, type,
trace, trace2,
pt.col, jitter, iresponse,
smooth.col, grid.col,
inverse.func, grid.func, grid.levels, extend,
ngrid1,
is.int.only, level,
func, nrug,
# the following args are generated in plotmo
draw.plot, # draw.plot=FALSE means get predictions but don't actually plot
x, y, singles, resp.levs, ux.list, ndiscrete,
pred.names, abbr.pred.names, nfigs, uy,
xflip, jittered.y,
...)
{
get.degree1.data <- function(isingle)
{
data <- degree1.data(isingle)
if(!is.null(data)) # data is already initialized?
return(data) # yes, use it
# create data.frame of x values to be plotted, by updating xgrid for this predictor
xframe <- get.degree1.xframe(xgrid, x, ipred, ngrid1,
ndiscrete, ux.list, extend)
trace2(trace, "degree1 plot %d %s\n",
isingle, pred.names[ipred])
yhat <- plotmo_predict(object, xframe, nresponse,
type, resp.levs, trace2, inverse.func, ...)$yhat
# prediction intervals, NULL if level argument not used
intervals <- NULL
if(level > 0)
intervals <- plotmo_pint(object, xframe, type, level, trace2,
ipred, inverse.func)
temp <- blockify.degree1.frame(xframe, yhat, intervals,
ipred, ux.list, ndiscrete)
xframe <- temp$xframe
yhat <- temp$yhat
intervals <- temp$intervals
if(center) {
yhat <- my.center(yhat, trace2)
intervals$fit <- my.center(intervals$fit, trace2)
intervals$lwr <- my.center(intervals$lwr, trace2)
intervals$upr <- my.center(intervals$upr, trace2)
intervals$cint.lwr <- my.center(intervals$cint.lwr, trace2)
intervals$cint.upr <- my.center(intervals$cint.upr, trace2)
}
all.yhat <- c(all.yhat, yhat,
intervals$lwr, intervals$upr,
intervals$cint.lwr, intervals$cint.upr)
data <- list(xframe=xframe, yhat=yhat, intervals=intervals, all.yhat=all.yhat)
if(!draw.plot) # save the data, if there is going to be a next time
degree1.data(isingle, data)
data
}
draw.degree1 <- function(...)
{
draw.degree1.fac <- function(...)
{
draw.grid(grid.col, nx=NA, ...) # nx=NA for horiz-only grid
draw.fac.intervals(xframe[,ipred], intervals, ...)
if(is.specified(pt.col))
points.or.text(x=jittered.x, y=jittered.y, pt.col=pt.col,
iresponse=iresponse, ...)
draw.smooth1(smooth.col, x, ipred, y, ux.list, ndiscrete, center, ...)
# formal args for plot.factor, needed because "CRAN check"
# doesn't allow ":::" and plot.factor isn't public
plot.factor.formals <- c("x", "y", "legend.text")
call.plot(graphics::plot, # calls plot.factor
PREFIX = "degree1.",
FORMALS = plot.factor.formals,
TRACE = if(isingle == 1 && trace >= 2) trace-1 else 0,
force.x = xframe[,ipred], force.y=yhat,
force.add = TRUE,
def.xaxt = if(xaxis.is.levs) "n" else "s",
def.yaxt = if(yaxis.is.levs) "n" else "s",
force.lty = 1, # else lty=2 say is printed weirdly
force.lwd = 1,
...)
if(xaxis.is.levs) # plot x level names along the x axis
mtext(xlevnames, side=1, at=1:length(xlevnames),
cex=par("cex") * cex.lab, line=.5, las=get.las(xlevnames))
if(yaxis.is.levs) # plot y level names along the y axis
mtext(ylevnames, side=2, at=1:length(ylevnames),
cex=par("cex") * cex.lab, line=.5, las=get.las(ylevnames))
}
draw.degree1.numeric <- function(...)
{
draw.grid(grid.col, ...)
draw.numeric.intervals(xframe[,ipred], intervals, ...)
draw.func(func, object, xframe, ipred, center, trace, ...)
if(is.specified(pt.col))
points.or.text(x=jittered.x, y=jittered.y, pt.col=pt.col,
iresponse=iresponse, ...)
draw.smooth1(smooth.col, x, ipred, y, ux.list, ndiscrete, center, ...)
call.plot(graphics::lines.default, PREFIX="degree1.",
force.x = xframe[,ipred], force.y = yhat,
force.col = dot("degree1.col col.degree1 col",
EX=c(0,1,1), DEF=1, NEW=1, ...),
force.lty = dot("degree1.lty lty.degree1 lty",
EX=c(0,1,1), DEF=1, NEW=1, ...),
force.lwd = dot("degree1.lwd lwd.degree1 lwd",
EX=c(0,1,1), DEF=1, NEW=1, ...),
...)
}
#--- draw.degree1 starts here
x1 <- x[,ipred]
numeric.x <- jittered.x <- as.numeric(x1)
jittered.x <- apply.jitter(numeric.x, jitter)
xlim <- get.degree1.xlim(ipred, xframe, ux.list, ndiscrete,
pt.col, jittered.x, xflip, ...)
# title of the current plot
main <- dot("main", ...)
main <- if(is.specified(main))
repl(main, isingle)[isingle]
else {
main <- ""
if(nfigs > 1 && !is.degree.specified(degree1))
main <- paste0(isingle, " ") # show plot number in headers
paste(main, abbr.pred.names[ipred])
}
xlevnames <- abbreviate(levels(xframe[,ipred]), minlength=6, strict=TRUE)
xaxis.is.levs <- is.factor(x1) && length(xlevnames) <= 12
yaxis.is.levs <- length(resp.levs) >= 1 && length(resp.levs) <= 12
if(yaxis.is.levs)
ylevnames <- abbreviate(resp.levs, minlength=6, strict=TRUE)
yaxis.is.levs <- FALSE # TODO should only do this if response is a string or a factor
xlab <- dot("xlab", ...)
xlab <- if(is.null(xlab)) abbr.pred.names[ipred]
else if(is.specified(xlab)) repl(xlab, isingle)[isingle]
else ""
ylab <- dot("ylab", DEF=NULL, ...)
ylab <- if(is.specified(ylab)) repl(ylab, isingle)[isingle]
else ""
call.plot(graphics::plot.default, PREFIX="degree1.",
TRACE = if(isingle == 1 && trace >= 2) trace-1 else 0,
force.x = xframe[,ipred],
force.y = yhat,
force.type = "n", # nothing in interior of plot yet
force.main = main,
force.xlab = xlab,
force.ylab = ylab,
force.xlim = xlim,
force.ylim = ylim,
def.xaxt = if(xaxis.is.levs) "n" else "s",
def.yaxt = if(yaxis.is.levs) "n" else "s",
...)
if(yaxis.is.levs) # plot y level names along the y axis
mtext(ylevnames, side=2, at=1:length(ylevnames),
cex=par("cex") * cex.lab, line=.5, las=get.las(ylevnames))
if(center &&
!is.specified(grid.col) &&
!is.specified(dot("col.grid", ...)))
abline(h=0, col="gray", lwd=.6) # gray line at y=0
if(is.int.only) # make it obvious that this is an intercept-only model
legend("topleft", "intercept-only model", bg="white")
if(is.factor(x1))
draw.degree1.fac(...)
else
draw.degree1.numeric(...)
if(is.character(nrug) || is.dot("density.col", EX=0, ...))
draw.density.along.the.bottom(numeric.x, ...)
else if(nrug)
call.plot(graphics::rug, force.x=jittered.x, def.quiet=TRUE, ...)
}
#--- plot.degree1 starts here
trace2(trace, "--plot.degree1(draw.plot=%s)\n", if(draw.plot) "TRUE" else "FALSE")
# get the x matrix we will plot, will be updated later for each predictor one by one
if(!is.null(degree1.xgrid.global)) # already have the data?
xgrid <- degree1.xgrid.global # yes, use it
else {
xgrid <- get.degree1.xgrid(x, grid.func, grid.levels, pred.names, ngrid1)
if(!draw.plot) # save the data, if there is going to be a next time
assignInMyNamespace("degree1.xgrid.global", xgrid)
}
# is.int.only test because we don't call get.ylim.by.dummy.plots for int only models
if((!draw.plot || is.int.only) && trace >= 0 && ncol(xgrid) > 1)
print.grid.values(xgrid, trace)
cex.lab <- dot("cex.lab", DEF=.8 * par("cex.main"), ...)
irug <- get.degree1.irug(nrug, x, draw.plot, ...) # get indices of rug points, if any
all.yhat <- NULL
for(isingle in seq_along(singles)) {
if(isingle == 2 && trace2 == 2) {
trace2 <- 1
printf("Reducing trace level for subsequent degree1 plots\n")
}
ipred <- singles[isingle] # ipred is the predictor index i.e. col in model mat
# following happens with lm if you do e.g. ozone1$doy <- NULL after using ozone1
# this won't catch all such errors
if(ipred > NCOL(x))
stop0("illegal index=", ipred, " (missing column in x?) NCOL(x)=", NCOL(x))
temp <- get.degree1.data(isingle)
xframe <- temp$xframe
yhat <- temp$yhat
intervals <- temp$intervals
all.yhat <- temp$all.yhat
if(draw.plot)
draw.degree1(...)
}
all.yhat # numeric vector of all predicted values
}
get.degree1.xlim <- function(ipred, xframe, ux.list, ndiscrete,
pt.col, jittered.x, xflip, ...)
{
xlim <- dot("xlim", ...)
if(is.specified(xlim))
stopifnot(is.numeric(xlim), length(xlim) == 2)
else {
x1 <- xframe[,ipred]
xlim <- range1(x1)
if(is.factor(x1)) {
xlim[1] <- xlim[1] - .4
xlim[2] <- xlim[2] + .4
} else if(length(ux.list[[ipred]]) <= ndiscrete)
xlim <- c(xlim[1] - .1, xlim[2] + .1)
if(is.specified(pt.col))
xlim <- range1(xlim, jittered.x)
}
xlim <- fix.lim(xlim)
if(xflip) {
temp <- xlim[1]
xlim[1] <- xlim[2]
xlim[2] <- temp
}
xlim
}
apply.jitter <- function(x, jitter, adjust=1)
{
if(jitter == 0)
return(x)
jitter(x, factor=adjust * jitter)
}
get.iresponse <- function(npoints, ncases) # get indices of xrows
{
check.integer.scalar(npoints)
if(npoints == 0)
return(NULL)
if(npoints == 1)
npoints <- -1
if(npoints <= 1 || npoints > ncases) # -1 or TRUE means all cases
npoints <- ncases
if(npoints == ncases)
seq_len(ncases)
else
sample(seq_len(ncases), size=npoints, replace=FALSE)
}
draw.smooth1 <- function(smooth.col, x, ipred, y, ux.list, ndiscrete, center, ...)
{
if(!is.specified(smooth.col))
return(NULL)
x1 <- x[,ipred]
is.discrete.x <- FALSE
if(is.factor(x1)) {
is.discrete.x <- TRUE
levels <- sort.unique(as.numeric(x1))
} else if(length(ux.list[[ipred]]) <= ndiscrete) {
is.discrete.x <- TRUE
levels <- ux.list[[ipred]]
}
if(is.discrete.x) {
# x1 has discrete levels, display the mean y at each value of x1
smooth <- sapply(split(y, x1), mean)
if(center)
smooth <- my.center(smooth) else smooth
call.plot(graphics::lines.default, PREFIX="smooth.", drop.f=1,
force.x = levels,
force.y = smooth,
force.col = smooth.col,
force.lty = dot("smooth.lty lty.smooth",
EX=c(0,1), DEF=1, NEW=1, ...),
force.lwd = dot("smooth.lwd lwd.smooth lwd.loess",
EX=c(0,1,1), DEF=1, NEW=1, ...),
force.pch = dot("smooth.pch", DEF=20, EX=0, ...),
def.type = "b",
...)
} else {
# For less smoothing (so we can better judge earth inflection points),
# we use a default value for f lower than the default 2/3.
smooth.f <- dot("smooth.f loess.f", DEF=.5, NEW=1, ...)
check.numeric.scalar(smooth.f)
stopifnot(smooth.f > .01, smooth.f < 1)
smooth <- lowess(x1, y, f=smooth.f)
y <- if(center) my.center(smooth$y) else smooth$y
call.plot(graphics::lines.default, PREFIX="smooth.", drop.f=1,
force.x = smooth$x,
force.y = y,
force.col = smooth.col,
force.lty = dot("smooth.lty lty.smooth", EX=c(0,1), DEF=1, NEW=1, ...),
force.lwd = dot("smooth.lwd lwd.smooth lwd.loess",
EX=c(0,1,1), DEF=1, NEW=1, ...),
force.pch = dot("smooth.pch", DEF=20, EX=0, ...),
...)
}
}
get.nrug <- function(nrug, ...)
{
if(!is.specified(nrug))
nrug <- 0
else if(!is.character(nrug)) {
check.integer.scalar(nrug, logical.ok=TRUE)
if(nrug == TRUE)
nrug <- -1
else if(!is.specified(nrug) && is.dot("rug.col", ...))
nrug <- -1
}
nrug
}
get.degree1.irug <- function(nrug, x, draw.plot, ...) # indices of xrows for rug
{
if(!draw.plot || nrug == 0)
return(NULL)
if(is.character(nrug))
nrug <- -1
else
check.integer.scalar(nrug, logical.ok=TRUE)
if(nrug < 0 || nrug > nrow(x))
nrug <- nrow(x)
if(nrug == nrow(x))
seq_len(nrow(x))
else
sample(seq_len(nrow(x)), size=nrug, replace=FALSE)
}
draw.grid <- function(grid.col, nx=NULL, ...)
{
if(is.specified(grid.col) || is.specified(dot("col.grid", ...))) {
if(is.specified(grid.col) && is.logical(grid.col) && grid.col)
grid.col <- "lightgray"
grid.col <- if(is.specified(grid.col)) grid.col
else dot("col.grid", DEF="lightgray", ...)
# grid() doesn't have a dots arg so we invoke call.plot without dots
call.plot(graphics::grid,
force.nx = dot("grid.nx", DEF=nx, ...),
force.ny = dot("grid.ny", DEF=NULL, ...),
force.col = grid.col,
force.lty = dot("grid.lty", DEF=1, ...),
force.lwd = dot("grid.lwd", DEF=1, ...))
}
}
get.level.shades <- function(intervals, ...)
{
level.shade <- dot("level.shade shade.pints", DEF="mistyrose2", ...)
if(is.null(intervals$lwr) || is.null(intervals$cint.lwr))
c(level.shade, level.shade)
else { # use level.shade2 only if two kinds of intervals
# use exact match here because level.shade2 is also matched by level.shade
level.shade2 <- dot("level.shade2 shade2.pints", DEF="mistyrose4", ...)
c(level.shade, level.shade2)
}
}
# draw std err bars for a numeric predictor
draw.numeric.intervals <- function(x, intervals, ...)
{
if(!is.null(intervals)) {
level.shades <- get.level.shades(intervals, ...)
if(!is.null(intervals$lwr))
polygon1(x=x, lwr=intervals$lwr, upr=intervals$upr,
shade=level.shades[1], ...)
if(!is.null(intervals$cint.lwr))
polygon1(x=x, lwr=intervals$cint.lwr, upr=intervals$cint.upr,
shade=level.shades[2])
if(!is.null(intervals$lwr) || !is.null(intervals$cint.lwr))
box() # replot the box because intervals sometimes drawn over it
}
}
# TODO you can't get just the confidence lines with no shading, following looks not ok:
# plotmo(a, level=.8, level.lty=1, level.border=1, level.shade=2, level.density=0)
polygon1 <- function(x, lwr, upr, shade, ...)
{
call.plot(graphics::polygon, PREFIX="level.", drop.shade=1, drop.shade2=1,
force.x = c(x[1], x, rev(x)),
force.y = c(lwr[1], lwr, rev(upr)),
force.col = shade,
def.border = shade,
def.lty = 0,
...)
}
# draw std err bands for a factor predictor
draw.fac.intervals <- function(x, intervals, ...)
{
draw.intervals <- function(lwr, upr, shade)
{
for(ilev in seq_along(levels(x))) {
min <- min(lwr[[ilev]])
max <- max(upr[[ilev]])
polygon(c(ilev - .4, ilev - .4, ilev + .4, ilev + .4),
c(min, max, max, min), col=shade, border=shade, lty=0)
}
}
if(!is.null(intervals)) {
level.shades <- get.level.shades(intervals, ...)
if(!is.null(intervals$lwr))
draw.intervals(split(intervals$lwr, x),
split(intervals$upr, x), level.shades[1])
if(!is.null(intervals$cint.lwr))
draw.intervals(split(intervals$cint.lwr, x),
split(intervals$cint.upr, x), level.shades[2])
if(!is.null(intervals$lwr) || !is.null(intervals$cint.lwr))
box() # replot the box because intervals sometimes drawn over it
}
}
# draw the func arg, if specified
draw.func <- function(func, object, xframe, ipred, center, trace, ...)
{
if(!is.null(func)) {
print_summary(xframe, "Data for func", trace)
if(!is.function(func))
stop0("'func' is not a function");
y <- process.y(func(xframe), object, type="response", nresponse=1,
nrow(xframe), expected.levs=NULL, trace, "func returned")$y
if(center)
y <- my.center(y, trace)
call.plot(graphics::lines.default, PREFIX="func.",
force.x = xframe[,ipred],
force.y = y,
def.type = "l",
force.col = dot("func.col col.func",
EX=c(0,1), DEF="lightblue3", NEW=1, ...),
force.lty = dot("func.lty lty.func",
EX=c(0,1), DEF=1, NEW=1, ...),
force.lwd = dot("func.lwd lwd.func",
EX=c(0,1), DEF=2, NEW=1, ...),
...)
}
}
plot.degree2 <- function( # plot all degree2 graphs
# copy of args from plotmo, some have been tweaked slightly
object, degree2, all2, center, ylim, nresponse, type,
clip, trace, trace2, pt.col,
jitter, iresponse,
inverse.func, grid.func, grid.levels, extend,
type2, ngrid2,
# the following args are generated in plotmo
draw.plot, # draw.plot=FALSE means get and return all.yhat but don't actually plot
do.par,
x, y, pairs, resp.levs, ux.list, ndiscrete,
pred.names, abbr.pred.names, nfigs, nsingles, npairs,
xflip, yflip, swapxy, def.cex.main,
...)
{
get.degree2.data <- function(ipair)
{
data <- degree2.data(ipair)
if(!is.null(data)) # data is already initialized?
return(data) # yes, use it
# create data.frame of x values to be plotted, by updating xgrid for this pair
temp <- get.degree2.xframe(xgrid, x, ipred1, ipred2,
ngrid2, xranges, ux.list, ndiscrete)
xframe <- temp$xframe
grid1 <- temp$grid1
grid2 <- temp$grid2
trace2(trace, "degree2 plot %d %s:%s\n",
ipair, pred.names[ipred1], pred.names[ipred2])
yhat <- plotmo_predict(object, xframe, nresponse,
type, resp.levs, trace2, inverse.func, ...)$yhat
# image plots for factors look better if not blockified
if(type2 != "image") {
temp <- blockify.degree2.frame(x, yhat, grid1, grid2,
ipred1, ipred2, ux.list, ndiscrete)
yhat <- temp$yhat
grid1 <- temp$grid1
grid2 <- temp$grid2
}
if(center)
yhat <- my.center(yhat, trace2)
yhat <- matrix(yhat, nrow=length(grid1), ncol=length(grid2))
data <- list(xframe=xframe, grid1=grid1, grid2=grid2, yhat=yhat)
if(!draw.plot) # save the data, if there is going to be a next time
degree2.data(ipair, data)
data
}
draw.degree2 <- function(type2 = c("persp", "contour", "image"), ...)
{
name1 <- abbr.pred.names[ipred1]
name2 <- abbr.pred.names[ipred2]
# title of the current plot
main <- dot("main", ...)
main <- if(is.specified(main))
repl(main, nsingles+ipair)[nsingles+ipair]
else {
main <- ""
if(nfigs > 1 && !is.degree.specified(degree2))
main <- paste0(ipair, " ") # show plot number in headers
if(swapxy)
paste0(main, name2, ": ", name1)
else
paste0(main, name1, ": ", name2)
}
if(clip) {
yhat[yhat < ylim[1]] <- NA
# we don't clip upper values for persp plot because its own clipping is ok
# (whereas its own clipping for lower values tends to allow overwrite of axes).
if(type2 != "persp")
yhat[yhat > ylim[2]] <- NA
}
switch(type2,
persp=plot.persp(
x=x, grid1=grid1, grid2=grid2, yhat=yhat, name1=name1, name2=name2,
ipred1=ipred1, ipred2=ipred2, ipair=ipair, nsingles=nsingles,
trace=trace, ylim=ylim, xflip=xflip, yflip=yflip, swapxy=swapxy,
ngrid2=ngrid2, main2=main, ticktype2=ticktype, def.cex.main=def.cex.main,
...),
contour=plot.contour(
x=x, grid1=grid1, grid2=grid2, yhat=yhat, name1=name1, name2=name2,
ipred1=ipred1, ipred2=ipred2, xflip=xflip, yflip=yflip, swapxy=swapxy,
main2=main, pt.col=pt.col,
jitter=jitter,
ux.list=ux.list, ndiscrete=ndiscrete, iresponse=iresponse,
...),
image=plot.image(
x=x, grid1=grid1, grid2=grid2, yhat=yhat, name1=name1, name2=name2,
ipred1=ipred1, ipred2=ipred2, xflip=xflip, yflip=yflip, swapxy=swapxy,
main2=main, pt.col=pt.col,
jitter=jitter,
ux.list=ux.list, ndiscrete=ndiscrete, iresponse=iresponse,
...))
}
#--- plot.degree2 starts here
trace2(trace, "--plot.degree2(draw.plot=%s)\n", if(draw.plot) "TRUE" else "FALSE")
stopifnot(npairs > 0)
# need ticktype to determine degree2 margins
ticktype <- dot("persp.ticktype", DEF="simple", EX=0, ...)
ticktype <- match.choices(ticktype, c("simple", "detailed"), "ticktype")
if(draw.plot && do.par) {
opar=par("mar", "mgp")
on.exit(par(mar=opar$mar, mgp=opar$mgp))
do.degree2.par(type2, nfigs, substr(ticktype, 1, 1) == "d")
}
# get the x matrix we will plot, will be updated later for each pair of predictors
xranges <- get.degree2.xranges(x, extend, ux.list, ndiscrete)
if(!is.null(degree2.xgrid.global)) # already have the data?
xgrid <- degree2.xgrid.global # yes, use it
else {
xgrid <- get.degree2.xgrid(x, grid.func, grid.levels, pred.names, ngrid2)
if(!draw.plot) # save the data, if there is going to be a next time
assignInMyNamespace("degree2.xgrid.global", xgrid)
}
all.yhat <- NULL
for(ipair in seq_len(npairs)) {
ipred1 <- pairs[ipair,1] # index of first predictor
ipred2 <- pairs[ipair,2] # index of second predictor
if(ipair == 2 && trace2 == 2) {
trace2 <- 1
printf("Reducing trace level for subsequent degree2 plots\n")
}
temp <- get.degree2.data(ipair)
xframe <- temp$xframe
grid1 <- temp$grid1
grid2 <- temp$grid2
yhat <- temp$yhat
all.yhat <- c(all.yhat, yhat)
if(draw.plot)
draw.degree2(type2, ...)
}
all.yhat
}
get.degree2.xranges <- function(x, extend, ux.list, ndiscrete)
{
xranges <- matrix(NA, ncol=ncol(x), nrow=2)
colnames(xranges) <- colnames(x)
for(icol in seq_len(ncol(x))) {
x1 <- x[,icol]
xrange <- range1(x1, na.rm=TRUE)
nxvals <- length(ux.list[[icol]])
# TODO this extends xrange correctly but that doesn't suffice
# because get.degree2.xframe doesn't necessarily use xranges
if(extend != 0 && nxvals > ndiscrete && !is.factor(x1)) {
stopifnot(xrange[2] >= xrange[1])
ext <- extend * (xrange[2] - xrange[1])
xrange[1] <- xrange[1] - ext
xrange[2] <- xrange[2] + ext
}
xranges[,icol] <- xrange
}
xranges
}
draw.response.sites <- function(x, ipred1, ipred2, pt.col, jitter,
ux.list, ndiscrete, iresponse, swapxy, ...)
{
if(swapxy) {
x1 <- x[,ipred2]
x2 <- x[,ipred1]
} else {
x1 <- x[,ipred1]
x2 <- x[,ipred2]
}
points.or.text(
x=apply.jitter(as.numeric(x1), jitter, adjust=1.5),
y=apply.jitter(as.numeric(x2), jitter, adjust=1.5),
pt.col=pt.col, iresponse=iresponse, ...)
}
plot.persp <- function(x, grid1, grid2, yhat, name1, name2, ipred1, ipred2,
ipair, nsingles, trace, ylim, xflip, yflip, swapxy, ngrid2,
main2, ticktype2, def.cex.main, ...)
{
get.theta <- function(...) # theta arg for persp()
{
get.diag.val <- function(diag1, diag2) # return first non NA along diag
{
vals <- yhat[diag1, diag2]
(vals[!is.na(vals)])[1] # return first non NA in vals
}
theta <- dot("persp.theta theta", EX=c(0,1), ...)
if(is.na(theta)) { # no user specified theta?
# rotate graph so highest point is farthest (this could be improved)
theta <- -35
nr <- nrow(yhat)
nc <- ncol(yhat)
imax <- which.max(c(
get.diag.val(nr:1, nc:1),
get.diag.val(1:nr, nc:1),
get.diag.val(1:nr, 1:nc),
get.diag.val(nr:1, 1:nc)))
if(length(imax)) # length>0 unless entire diag is NA
theta <- theta + switch(imax, 0, 90, 180, 270)
}
theta
}
#--- plot.persp starts here
# following needed because persp() rejects a reversed xlim or ylim
if(xflip)
warning0("ignoring xflip=TRUE for persp plot")
if(yflip)
warning0("ignoring yflip=TRUE for persp plot")
theta <- get.theta(...)
cex1 <- par("cex") # persp needs an explicit cex arg, doesn't use par("cex")
trace2(trace, "persp(%s:%s) theta %.3g\n", name1, name2, theta)
if(swapxy) {
temp <- grid1; grid1 <- grid2; grid2 <- temp # swap grid1 and grid2
temp <- ipred1; ipred1 <- ipred2; ipred2 <- temp # swap ipred1 and ipred2
temp <- name1; name1 <- name2; name2 <- temp # swap name1 and name2
yhat <- t(yhat)
}
zlab <- dot("ylab", DEF="", ...) # use ylab as zlab if specified
zlab <- repl(zlab, nsingles+ipair)[nsingles+ipair]
cex.lab <- dot("persp.cex.lab",
# make the labels small if multiple figures
DEF=if(def.cex.main < 1) .8 * def.cex.main else 1, ...)
# persp ignores mgp so prefix a newline to space the axis label
# we also prepend spaces else bottom of label tends to get cut off
if(theta < 0) theta <- theta + 360
theta <- theta %% 360
if((0 < theta && theta <= 90) || (180 < theta && theta <= 270)) {
xlab <- paste0("\n", name1, " ")
ylab <- paste0("\n ", name2)
} else {
xlab <- paste0("\n ", name1)
ylab <- paste0("\n", name2, " ")
}
# We use deprefix directly (and not call.plot) because
# we have to do a bit of manipulation of the args for nticks.
# Also we cannot use graphics:::persp.default because CRAN check complains
# about ":::". Instead we explicitly pass the formal argnames with formals.
persp.def.formals <- c( # formal args for persp.default (R version 3.2.0)
"x", "y", "z", "xlim", "zlim", "xlab", "ylab", "zlab", "main", "sub",
"theta", "phi", "r", "d", "scale", "expand", "col", "border", "ltheta",
"lphi", "shade", "box", "axes", "nticks", "ticktype")
args <- deprefix(graphics::persp, # calls persp.default
FNAME = "persp",
KEEP = "PREFIX,PLOT.ARGS",
FORMALS = persp.def.formals,
TRACE = if(ipair == 1 && trace >= 2) trace-1 else 0,
force.x = grid1,
force.y = grid2,
force.z = yhat,
force.xlim = range(grid1), # prevent use of user specified xlim and ylim
force.ylim = range(grid2),
# persp won't accept zlim=NULL
force.zlim = if(is.null(ylim)) ylim <- range(yhat) else ylim,
force.xlab = xlab,
force.ylab = ylab,
force.theta = theta,
force.phi = dot("persp.phi phi", EX=c(0,1), DEF=30, ...),
force.d = dot("persp.d dvalue", EX=c(0,1), DEF=1, ...),
force.main = main2,
def.cex.lab = cex.lab,
def.cex.axis = cex.lab,
def.zlab = zlab,
def.ticktype = "simple",
def.nticks = 5,
def.cex = cex1,
force.col = dot("persp.col col.persp",
EX=c(0,1), DEF="lightblue", NEW=1, ...),
def.border = NULL,
def.shade = .5,
...)
# if ticktype="simple" we must call persp without the nticks arg
# else persp emits confusing error messages
if(substr(ticktype2, 1, 1) == "s")
args["nticks"] <- NULL
# We use suppressWarnings below to suppress the warning
# "surface extends beyond the box" that was introduced in R 2.13-1.
# This warning may be issued multiple times and may be annoying to the plotmo user.
# (Unfortunately this also suppress any other warnings in persp.)
# TODO Want to use lab=c(2,2,7) or similar in persp but persp ignores it
suppressWarnings(
do.call.trace(graphics::persp, args, fname="graphics::persp", trace=0))
}
plot.contour <- function(x, grid1, grid2, yhat, name1, name2, ipred1, ipred2,
xflip, yflip, swapxy, main2, pt.col,
jitter, ux.list, ndiscrete, iresponse, ...)
{
get.lim <- function(xflip, grid1, ipred)
{
# contour() automatically extends ylim, so we don't need to do it here
xrange <- range(grid1)
if(xflip)
c(xrange[2], xrange[1])
else
c(xrange[1], xrange[2])
}
#--- plot.contour starts here
x1 <- x[,ipred1]
x2 <- x[,ipred2]
levnames1 <- levels(x1)
levnames2 <- levels(x2)
is.fac1 <- is.factor(x1) && length(levnames1) <= 12
is.fac2 <- is.factor(x2) && length(levnames2) <= 12
xlab <- if(is.fac1) "" else name1 # no lab if fac else on top of lev name
ylab <- if(is.fac2) "" else name2
if(swapxy) {
temp <- levnames2; levnames2 <- levnames1; levnames1 <- temp
temp <- is.fac2; is.fac2 <- is.fac1; is.fac1 <- temp
temp <- ylab; ylab <- xlab; xlab <- temp
}
xlim <- get.lim(xflip, grid1, ipred1)
ylim <- get.lim(yflip, grid2, ipred2)
if(swapxy) {
temp <- xlim; xlim <- ylim; ylim <- temp
}
levels <- get.contour.levs(yhat)
labels <- signif(levels, 2) # else contour prints labels like 0.0157895
cex.lab <- par("cex") * dot("cex.lab", DEF=1, ...)
# We use suppressWarnings below to suppress the warning "all z values are
# equal" This warning may be issued multiple times and may be annoying to
# the plotmo user. (Unfortunately this also suppress any other warnings
# in persp.)
suppressWarnings(
call.plot(graphics::contour.default,
force.x = if(swapxy) grid2 else grid1,
force.y = if(swapxy) grid1 else grid2,
force.z = if(swapxy) t(yhat) else yhat,
force.xlim = xlim,
force.ylim = ylim,
force.xlab = xlab,
force.ylab = ylab,
def.xaxt = if(is.fac1) "n" else "s",
def.yaxt = if(is.fac2) "n" else "s",
def.main = main2,
def.levels = levels,
def.labels = labels,
def.labcex = par("cex") * cex.lab,
...))
if(is.fac1) {
levnames1 <- abbreviate(levnames1, minlength=6, strict=TRUE)
mtext(levnames1, side=1, at=1:length(levnames1),
cex=cex.lab, line=.5, las=get.las(levnames1))
}
if(is.fac2)
mtext(abbreviate(levnames2, minlength=6, strict=TRUE),
side=2, at=1:length(levnames2),
cex=cex.lab, line=.5, las=2)
if(is.specified(pt.col))
draw.response.sites(x=x, ipred1=ipred1, ipred2=ipred2,
pt.col=pt.col, jitter=jitter, ux.list=ux.list,
ndiscrete=ndiscrete, iresponse=iresponse, swapxy=swapxy, ...)
}
get.contour.levs <- function(yhat)
{
# the default, as calculated internally by plot.contour
levs <- pretty(range(yhat, finite=TRUE), 10)
# reduce the default if the number of unique yhat values is less
# this is mainly for factors
unique.yhat <- sort.unique(yhat)
if(length(unique.yhat) > 1 && length(unique.yhat) < length(levs))
levs <- unique.yhat
levs
}
plot.image <- function(x, grid1, grid2, yhat, name1, name2, ipred1, ipred2,
xflip, yflip, swapxy, main2, pt.col,
jitter, ux.list, ndiscrete, iresponse, ...)
{
# like image but fill the plot area with lightblue first so NAs are obvious
image.with.lightblue.na <- function(grid1, grid2, yhat, ...)
{
if(anyNA(yhat)) {
image(grid1, grid2, matrix(0, nrow(yhat), ncol(yhat)),
col="lightblue",
xlab="", ylab="", xaxt="n", yaxt="n", bty="n", main="")
par(new=TRUE) # so next plot is on top of this plot
}
call.plot(graphics::image.default,
force.x=grid1, force.y=grid2, force.z=yhat, ...)
box() # image() tends to overwrite the borders of the box
}
get.lim <- function(xflip, grid1, is.discrete)
{
xrange <- range(grid1)
if(is.discrete) {
xrange[1] <- xrange[1] - .5
xrange[2] <- xrange[2] + .5
} else {
range <- xrange[2] - xrange[1]
# .025 seems the max we can use without getting unsightly
# gaps at the edges of the plot
xrange[1] <- xrange[1] - .025 * range
xrange[2] <- xrange[2] + .025 * range
}
if(xflip)
c(xrange[2], xrange[1])
else
c(xrange[1], xrange[2])
}
#--- plot.image starts here
x1 <- x[,ipred1]
x2 <- x[,ipred2]
levnames1 <- levels(x1)
levnames2 <- levels(x2)
use.fac.names1 <- is.factor(x1) && length(levnames1) <= 12
use.fac.names2 <- is.factor(x2) && length(levnames2) <= 12
xlab <- if(use.fac.names1) "" else name1 # no lab if fac else on top of lev name
ylab <- if(use.fac.names2) "" else name2
if(swapxy) {
temp <- levnames2; levnames2 <- levnames1; levnames1 <- temp
temp <- use.fac.names2; use.fac.names2 <- use.fac.names1; use.fac.names1 <- temp
temp <- ylab; ylab <- xlab; xlab <- temp
}
xlim <- get.lim(xflip, grid1,
use.fac.names1 || length(ux.list[[ipred1]]) <= ndiscrete)
ylim <- get.lim(yflip, grid2,
use.fac.names2 || length(ux.list[[ipred2]]) <= ndiscrete)
# default col: white high values (snowy mountain tops), dark low values (dark depths)
if(swapxy)
image.with.lightblue.na(grid1=grid2, grid2=grid1, yhat=t(yhat),
force.col = dot("image.col col.image", EX=c(0,1),
DEF=gray((0:10)/10), NEW=1, ...),
force.main = main2,
force.xlim = ylim,
force.ylim = xlim,
force.xaxt = if(use.fac.names1) "n" else "s",
force.yaxt = if(use.fac.names2) "n" else "s",
force.xlab = xlab,
force.ylab = ylab,
...)
else
image.with.lightblue.na(grid1=grid1, grid2=grid2, yhat=yhat,
force.col = dot("image.col col.image", EX=c(0,1),
DEF=gray((0:10)/10), NEW=1, ...),
force.main = main2,
force.xlim = xlim,
force.ylim = ylim,
force.xaxt = if(use.fac.names1) "n" else "s",
force.yaxt = if(use.fac.names2) "n" else "s",
force.xlab = xlab,
force.ylab = ylab,
...)
cex.lab <- par("cex") * dot("cex.lab", DEF=1, ...)
if(use.fac.names1) {
levnames1 <- abbreviate(levnames1, minlength=6, strict=TRUE)
mtext(levnames1, side=1, at=1:length(levnames1),
cex=cex.lab, line=.5, las=get.las(levnames1))
}
if(use.fac.names2)
mtext(abbreviate(levnames2, minlength=6, strict=TRUE),
side=2, at=1:length(levnames2),
cex=cex.lab, line=.5, las=2)
if(is.specified(pt.col))
draw.response.sites(x=x, ipred1=ipred1, ipred2=ipred2,
pt.col=pt.col, jitter=jitter, ux.list=ux.list,
ndiscrete=ndiscrete, iresponse=iresponse, swapxy=swapxy, ...)
}
apply.inverse.func <- function(inverse.func, y, object, trace)
{
if(!is.null(inverse.func)) {
if(!is.numeric(y[1]))
stopf("inverse.func cannot be used on \"%s\" values", class(y[1])[1])
y <- process.y(inverse.func(y), object, type="response", nresponse=1,
length(y), NULL, trace, "inverse.func")$y
}
y
}
# should the factor labels on the x axis be printed horizontally or vertically?
get.las <- function(labels)
{
if(length(labels) * max(nchar(labels)) <= 20) # 20 is arbitrary
0 # horizontal
else
2 # vertical
}
# true if a plot was selected by the user (excluding the default setting)
is.degree.specified <- function(degree)
{
!is.logical(degree) || length(degree) > 1
}
my.center <- function(x, trace=FALSE)
{
if(!is.null(x) && !is.factor(x)) {
x <- x - mean(x[is.finite(x)], na.rm=TRUE)
if(trace >= 2) {
name <- paste0("centered ", trunc.deparse(substitute(x)))
cat(name, "length ", length(x))
print.first.few.elements.of.vector(x, trace, name)
}
}
x
}
|
testlist <- list(m = NULL, repetitions = 0L, in_m = structure(c(2.31638858795802e+77, 9.53818252170339e+295, 1.22810536108214e+146, 4.12396251261199e-221, 0), .Dim = c(5L, 1L)))
result <- do.call(CNull:::communities_individual_based_sampling_beta,testlist)
str(result)
|
/CNull/inst/testfiles/communities_individual_based_sampling_beta/AFL_communities_individual_based_sampling_beta/communities_individual_based_sampling_beta_valgrind_files/1615833592-test.R
|
no_license
|
akhikolla/updatedatatype-list2
|
R
| false | false | 270 |
r
|
testlist <- list(m = NULL, repetitions = 0L, in_m = structure(c(2.31638858795802e+77, 9.53818252170339e+295, 1.22810536108214e+146, 4.12396251261199e-221, 0), .Dim = c(5L, 1L)))
result <- do.call(CNull:::communities_individual_based_sampling_beta,testlist)
str(result)
|
data <- read.table("./data/household_power_consumption.txt", header=TRUE, sep=";", stringsAsFactors=FALSE, dec=".")
data1 <- subset(data,as.Date(data$Date,"%d/%m/%Y")=="2007-02-01")
data2 <- subset(data,as.Date(data$Date,"%d/%m/%Y")=="2007-02-02")
data3 <- rbind(data1,data2)
data4 <- strptime(paste(data3$Date,data3$Time,sep=" "), "%d/%m/%Y %H:%M:%S")
data5 <- as.numeric(subSetData$Sub_metering_1)
data6 <- as.numeric(subSetData$Sub_metering_2)
data7 <- as.numeric(subSetData$Sub_metering_3)
png("plot3.png", width=480, height=480)
plot(data4,data5,type="l",ylab="Energy Submetering",xlab="")
lines(data4,data6,type="l",col="red")
lines(data4,data7,type="l",col="blue")
legend("topright", c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lty=1, lwd=2.5, col=c("black", "red", "blue"))
dev.off()
|
/plot3.R
|
no_license
|
weizuo/ExData_Plotting1
|
R
| false | false | 805 |
r
|
data <- read.table("./data/household_power_consumption.txt", header=TRUE, sep=";", stringsAsFactors=FALSE, dec=".")
data1 <- subset(data,as.Date(data$Date,"%d/%m/%Y")=="2007-02-01")
data2 <- subset(data,as.Date(data$Date,"%d/%m/%Y")=="2007-02-02")
data3 <- rbind(data1,data2)
data4 <- strptime(paste(data3$Date,data3$Time,sep=" "), "%d/%m/%Y %H:%M:%S")
data5 <- as.numeric(subSetData$Sub_metering_1)
data6 <- as.numeric(subSetData$Sub_metering_2)
data7 <- as.numeric(subSetData$Sub_metering_3)
png("plot3.png", width=480, height=480)
plot(data4,data5,type="l",ylab="Energy Submetering",xlab="")
lines(data4,data6,type="l",col="red")
lines(data4,data7,type="l",col="blue")
legend("topright", c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lty=1, lwd=2.5, col=c("black", "red", "blue"))
dev.off()
|
# set up parallelized portion from qsub
jobnum <- as.numeric(commandArgs()[3])
root_path <- commandArgs()[4]
outpath <- commandArgs()[5]
if (Sys.info()[1] == "Linux"){
j <- "FILEPATH"
h <- paste0("FILEPATH",Sys.info()[6])
package_lib <- paste0(j,'FILEPATH')
} else {
j <- "FILEPATH"
h <- "FILEPATH"
package_lib <- paste0(j,'FILEPATH')
}
.libPaths(package_lib)
#load necessary packages
library(dplyr)
library(maptools)
library(raster)
library(rgeos)
library(rgdal)
library(data.table)
library(seegSDM)
library(stringr)
library(sp)
repo <- paste0(j, 'FILEPATH')
source(paste0(repo, 'FILEPATH/functions.R'))
source(paste0(repo, 'FILEPATH/econiche_qsub.R'))
source(paste0(repo, 'FILEPATH/check_loc_results.R'))
# set i
i <- jobnum
# set up filepaths and directories
##############
## pull in mean map
nichemap_mansoni <- raster(paste0(root_path, 'FILEPATH/preds_', i, '.tif'), band=1)
nichemap_haematobium <- raster(paste0(root_path, 'FILEPATH/preds_', i, '.tif'), band=1)
urbanicity <- raster(paste0(j, 'FILEPATH/ghslurbanicity_mean_1y_2005_00_00.tif'))
urbanicity[urbanicity>0 & urbanicity<=100000] <- 3
urbanicity[is.na(urbanicity) | urbanicity == 0] <- 1
urbanicity[urbanicity == 3] <- 0
# # get world shapefile
# worldshp = readOGR(paste0(j, 'FILEPATH'), 'GBD2016_analysis_final')
# # get rid of the factor class
# worldshp$loc_id = as.numeric(levels(worldshp$loc_id))[as.integer(worldshp$loc_id)]
# # pull in geographic restrictions
# excl_list = data.table(read.csv(paste0(j, "FILEPATH/species_specific_exclusions.csv"), stringsAsFactors = F))
# # keep if a location is endemic
# excl_list = excl_list[japonicum == "" | mansoni == "" | haematobium =="" | other == "",]
# excl_list_mansoni = excl_list[mansoni == "" | location_id == 172,]
# excl_list_haematobium = excl_list[haematobium =="" | location_id == 215,]
# excl_list_japonicum = excl_list[japonicum == "" | location_id == 10 | location_id == 12,]
# #subset world shp to be only locations with da shishto
# worldshp = worldshp[worldshp$loc_id %in% excl_list[,location_id],]
# worldshp_mansoni = worldshp[worldshp$loc_id %in% excl_list_mansoni[,location_id],]
# worldshp_haematobium = worldshp[worldshp$loc_id %in% excl_list_haematobium[,location_id],]
# worldshp_japonicum = worldshp[worldshp$loc_id %in% excl_list_japonicum[,location_id],]
#load world pop
pop = raster(paste0(j, 'FILEPATH/worldpop_total_5y_2005_00_00.tif'))
# mask our urban areas
pop <- pop * urbanicity
ras_shp <- raster(paste0(j, "FILEPATH/ras_shp.tif"))
ras_shp_mansoni <- raster(paste0(j, "FILEPATH/ras_shp_mansoni.tif"))
ras_shp_haematobium <- raster(paste0(j, "FILEPATH/ras_shp_haematobium.tif"))
setExtent(pop, ras_shp)
#load in occurrence reference csv
occ_raw_mansoni<-read.csv(paste0(root_path, 'FILEPATH/dat_all.csv'))
occ_raw_haematobium<-read.csv(paste0(root_path, 'FILEPATH/dat_all.csv'))
#define threshold value stringexte
threshold<-seq(0,1,0.001)
threshold<-threshold[order(threshold, decreasing = TRUE)]
#generate independent subset of the data using a subset of the occurrence probabilities
sensitivity<-rep(NA, length(threshold))
ROC_mega_table<-list(NA)
optimal_thresholds<-NA
optimal_thresholds_m<-NA
optimal_thresholds_h<-NA
#for each map extract values for all occurrences and pseudo 0s
occ_mansoni <- occ_raw_mansoni[occ_raw_mansoni$PA == 1,]
occ_mansoni$X <- seq.int(nrow(occ_mansoni))
occ_haematobium <- occ_raw_haematobium[occ_raw_haematobium$PA == 1,]
occ_haematobium$X <- seq.int(nrow(occ_haematobium))
occ_coords_mansoni <- occ_mansoni[c('longitude', 'latitude')]
occ_coords_haematobium <- occ_haematobium[c('longitude', 'latitude')]
pseudoz_mansoni <- occ_raw_mansoni[occ_raw_mansoni$PA == 0,]
pseudoz_haematobium <- occ_raw_haematobium[occ_raw_haematobium$PA == 0,]
# now do this for pseudo 0s
pseudoz_coords_mansoni <- pseudoz_mansoni[c('longitude', 'latitude')]
pseudoz_coords_haematobium <- pseudoz_haematobium[c('longitude', 'latitude')]
##############################
##### TEST WITH MEAN MAP #####
##############################
# extract these values from the raster
occ_preds_mansoni <- raster::extract(nichemap_mansoni, occ_coords_mansoni, df = T, method = 'simple')
occ_preds_haematobium <- raster::extract(nichemap_haematobium, occ_coords_haematobium, df = T, method = 'simple')
# setup for merge
colnames(occ_preds_mansoni) <- c('X', 'prob')
colnames(occ_preds_haematobium) <- c('X', 'prob')
occ_mansoni <- merge(occ_mansoni, occ_preds_mansoni, by = "X")
occ_haematobium <- merge(occ_haematobium, occ_preds_haematobium, by = "X")
#generate independent subset of the data using a subset of the occurrence probabilities
sensitivity_mansoni<-rep(NA, length(threshold))
sensitivity_haematobium<-rep(NA, length(threshold))
# extract raster values for true occurence data only
# set sensitivities
for (k in 1:length(threshold)){
sensitivity_mansoni[k]<-length(occ_mansoni$prob[occ_mansoni$prob>threshold[k]])/length(occ_mansoni$prob)
sensitivity_haematobium[k]<-length(occ_haematobium$prob[occ_haematobium$prob>threshold[k]])/length(occ_haematobium$prob)
}
## now for pseudo 0s
## extract these values from the raster
pseudoz_preds_mansoni <- raster::extract(nichemap_mansoni, pseudoz_coords_mansoni, df = T, method = 'simple')
pseudoz_preds_haematobium <- raster::extract(nichemap_haematobium, pseudoz_coords_haematobium, df = T, method = 'simple')
# setup for merge
colnames(pseudoz_preds_mansoni) <- c('X', 'prob')
colnames(pseudoz_preds_haematobium) <- c('X', 'prob')
# setup pseudoz
pseudoz_mansoni$X <- (1:nrow(pseudoz_mansoni))
pseudoz_haematobium$X <- (1:nrow(pseudoz_haematobium))
# merge together
pseudoz_mansoni <- merge(pseudoz_mansoni, pseudoz_preds_mansoni, by = "X")
pseudoz_haematobium <- merge(pseudoz_haematobium, pseudoz_preds_haematobium, by = "X")
fpr_mansoni<-rep(NA, length(threshold))
fpr_haematobium<-rep(NA, length(threshold))
for (a in 1:length(threshold)){
fpr_mansoni[a]<-length(pseudoz_mansoni$prob[pseudoz_mansoni$prob>threshold[a]])/length(pseudoz_mansoni$prob)
fpr_haematobium[a]<-length(pseudoz_haematobium$prob[pseudoz_haematobium$prob>threshold[a]])/length(pseudoz_haematobium$prob)
}
ROC_table<-data.frame(threshold=threshold, sensitivity_m=sensitivity_mansoni, sensitivity_h=sensitivity_haematobium, fp_m=fpr_mansoni, fp_h=fpr_haematobium)
# plot(x=ROC_table$false_positive, y=ROC_table$sensitivity, type='l', main=paste(i))
for(a in 1:nrow(ROC_table)){
ROC_table$hypotenuse_m[a]<-sqrt(((1-ROC_table$sensitivity_m[a])^2)+(ROC_table$fp_m[a]^2))
ROC_table$hypotenuse_h[a]<-sqrt(((1-ROC_table$sensitivity_h[a])^2)+(ROC_table$fp_h[a]^2))
}
ROC_mega_table[[1]]<-ROC_table
optimal_thresholds_m[1]<-ROC_table$threshold[which(ROC_table$hypotenuse_m==min(ROC_table$hypotenuse_m))]
optimal_thresholds_h[1]<-ROC_table$threshold[which(ROC_table$hypotenuse_h==min(ROC_table$hypotenuse_h))]
###############
## generate PAR per district per map
###############
zonal_PAR<-list()
PAR<-list()
# setup raster shapes for stamping out areas
ras_shp_mansoni[ras_shp_mansoni>0 & ras_shp_mansoni<=100000]<- 1
ras_shp_mansoni[is.na(ras_shp_mansoni)]<-0
ras_shp_haematobium[ras_shp_haematobium>0 & ras_shp_haematobium<=100000]<- 1
ras_shp_haematobium[is.na(ras_shp_haematobium)]<-0
threshold_map_m<-nichemap_mansoni>optimal_thresholds_m[1]
threshold_map_m<-threshold_map_m*ras_shp_mansoni
threshold_map_m[is.na(threshold_map_m)]<- 0
threshold_map_h<-nichemap_haematobium>optimal_thresholds_h[1]
threshold_map_h<-threshold_map_h*ras_shp_haematobium
threshold_map_h[is.na(threshold_map_h)]<- 0
threshold_map <- do.call("sum", list(threshold_map_m, threshold_map_h))
threshold_map[threshold_map>=1 & threshold_map <= 2]<-1
PAR[[1]]<-threshold_map*pop
zonal_PAR[[1]]<-zonal(PAR[[1]],
ras_shp,
fun='sum',
na.rm=TRUE)
zonal_PAR_clean<-zonal_PAR[[1]]
for (i in 1:nrow(zonal_PAR_clean)){
if (zonal_PAR_clean[i,2]<1){
zonal_PAR_clean[i,2]<-0
}
}
zonal_PAR[[1]]<-zonal_PAR_clean
colnames(zonal_PAR[[1]]) <- c('zone', 'par')
#calculate zonal totals
total_sums<-zonal(pop,
ras_shp,
fun = 'sum',
na.rm = TRUE)
for (i in 1:nrow(total_sums)){
if (total_sums[i,2]<=0){
total_sums[i,2]<-NA
}
}
# generate the total population to get proportion
prop_par <- cbind(total_sums, zonal_PAR[[1]])
prop_par <- as.data.table(prop_par)
prop_par[, prop:=par/sum]
prop_par <- prop_par[, c("zone", "prop")]
colnames(prop_par)[2] <- c(paste0("prop_", jobnum))
write.csv(prop_par, file = paste0(outpath, "FILEPATH/prop_", jobnum, ".csv"))
# save optimal thresholds
optimal_thresholds[1] <- optimal_thresholds_m[1]
optimal_thresholds[2] <- optimal_thresholds_h[1]
write.csv(optimal_thresholds, file = paste0(outpath, "FILEPATH/threshold_", jobnum, ".csv"))
|
/gbd_2017/nonfatal_code/ntd_schisto/extract_par_species_parallel.r
|
no_license
|
Nermin-Ghith/ihme-modeling
|
R
| false | false | 8,805 |
r
|
# set up parallelized portion from qsub
jobnum <- as.numeric(commandArgs()[3])
root_path <- commandArgs()[4]
outpath <- commandArgs()[5]
if (Sys.info()[1] == "Linux"){
j <- "FILEPATH"
h <- paste0("FILEPATH",Sys.info()[6])
package_lib <- paste0(j,'FILEPATH')
} else {
j <- "FILEPATH"
h <- "FILEPATH"
package_lib <- paste0(j,'FILEPATH')
}
.libPaths(package_lib)
#load necessary packages
library(dplyr)
library(maptools)
library(raster)
library(rgeos)
library(rgdal)
library(data.table)
library(seegSDM)
library(stringr)
library(sp)
repo <- paste0(j, 'FILEPATH')
source(paste0(repo, 'FILEPATH/functions.R'))
source(paste0(repo, 'FILEPATH/econiche_qsub.R'))
source(paste0(repo, 'FILEPATH/check_loc_results.R'))
# set i
i <- jobnum
# set up filepaths and directories
##############
## pull in mean map
nichemap_mansoni <- raster(paste0(root_path, 'FILEPATH/preds_', i, '.tif'), band=1)
nichemap_haematobium <- raster(paste0(root_path, 'FILEPATH/preds_', i, '.tif'), band=1)
urbanicity <- raster(paste0(j, 'FILEPATH/ghslurbanicity_mean_1y_2005_00_00.tif'))
urbanicity[urbanicity>0 & urbanicity<=100000] <- 3
urbanicity[is.na(urbanicity) | urbanicity == 0] <- 1
urbanicity[urbanicity == 3] <- 0
# # get world shapefile
# worldshp = readOGR(paste0(j, 'FILEPATH'), 'GBD2016_analysis_final')
# # get rid of the factor class
# worldshp$loc_id = as.numeric(levels(worldshp$loc_id))[as.integer(worldshp$loc_id)]
# # pull in geographic restrictions
# excl_list = data.table(read.csv(paste0(j, "FILEPATH/species_specific_exclusions.csv"), stringsAsFactors = F))
# # keep if a location is endemic
# excl_list = excl_list[japonicum == "" | mansoni == "" | haematobium =="" | other == "",]
# excl_list_mansoni = excl_list[mansoni == "" | location_id == 172,]
# excl_list_haematobium = excl_list[haematobium =="" | location_id == 215,]
# excl_list_japonicum = excl_list[japonicum == "" | location_id == 10 | location_id == 12,]
# #subset world shp to be only locations with da shishto
# worldshp = worldshp[worldshp$loc_id %in% excl_list[,location_id],]
# worldshp_mansoni = worldshp[worldshp$loc_id %in% excl_list_mansoni[,location_id],]
# worldshp_haematobium = worldshp[worldshp$loc_id %in% excl_list_haematobium[,location_id],]
# worldshp_japonicum = worldshp[worldshp$loc_id %in% excl_list_japonicum[,location_id],]
#load world pop
pop = raster(paste0(j, 'FILEPATH/worldpop_total_5y_2005_00_00.tif'))
# mask our urban areas
pop <- pop * urbanicity
ras_shp <- raster(paste0(j, "FILEPATH/ras_shp.tif"))
ras_shp_mansoni <- raster(paste0(j, "FILEPATH/ras_shp_mansoni.tif"))
ras_shp_haematobium <- raster(paste0(j, "FILEPATH/ras_shp_haematobium.tif"))
setExtent(pop, ras_shp)
#load in occurrence reference csv
occ_raw_mansoni<-read.csv(paste0(root_path, 'FILEPATH/dat_all.csv'))
occ_raw_haematobium<-read.csv(paste0(root_path, 'FILEPATH/dat_all.csv'))
#define threshold value stringexte
threshold<-seq(0,1,0.001)
threshold<-threshold[order(threshold, decreasing = TRUE)]
#generate independent subset of the data using a subset of the occurrence probabilities
sensitivity<-rep(NA, length(threshold))
ROC_mega_table<-list(NA)
optimal_thresholds<-NA
optimal_thresholds_m<-NA
optimal_thresholds_h<-NA
#for each map extract values for all occurrences and pseudo 0s
occ_mansoni <- occ_raw_mansoni[occ_raw_mansoni$PA == 1,]
occ_mansoni$X <- seq.int(nrow(occ_mansoni))
occ_haematobium <- occ_raw_haematobium[occ_raw_haematobium$PA == 1,]
occ_haematobium$X <- seq.int(nrow(occ_haematobium))
occ_coords_mansoni <- occ_mansoni[c('longitude', 'latitude')]
occ_coords_haematobium <- occ_haematobium[c('longitude', 'latitude')]
pseudoz_mansoni <- occ_raw_mansoni[occ_raw_mansoni$PA == 0,]
pseudoz_haematobium <- occ_raw_haematobium[occ_raw_haematobium$PA == 0,]
# now do this for pseudo 0s
pseudoz_coords_mansoni <- pseudoz_mansoni[c('longitude', 'latitude')]
pseudoz_coords_haematobium <- pseudoz_haematobium[c('longitude', 'latitude')]
##############################
##### TEST WITH MEAN MAP #####
##############################
# extract these values from the raster
occ_preds_mansoni <- raster::extract(nichemap_mansoni, occ_coords_mansoni, df = T, method = 'simple')
occ_preds_haematobium <- raster::extract(nichemap_haematobium, occ_coords_haematobium, df = T, method = 'simple')
# setup for merge
colnames(occ_preds_mansoni) <- c('X', 'prob')
colnames(occ_preds_haematobium) <- c('X', 'prob')
occ_mansoni <- merge(occ_mansoni, occ_preds_mansoni, by = "X")
occ_haematobium <- merge(occ_haematobium, occ_preds_haematobium, by = "X")
#generate independent subset of the data using a subset of the occurrence probabilities
sensitivity_mansoni<-rep(NA, length(threshold))
sensitivity_haematobium<-rep(NA, length(threshold))
# extract raster values for true occurence data only
# set sensitivities
for (k in 1:length(threshold)){
sensitivity_mansoni[k]<-length(occ_mansoni$prob[occ_mansoni$prob>threshold[k]])/length(occ_mansoni$prob)
sensitivity_haematobium[k]<-length(occ_haematobium$prob[occ_haematobium$prob>threshold[k]])/length(occ_haematobium$prob)
}
## now for pseudo 0s
## extract these values from the raster
pseudoz_preds_mansoni <- raster::extract(nichemap_mansoni, pseudoz_coords_mansoni, df = T, method = 'simple')
pseudoz_preds_haematobium <- raster::extract(nichemap_haematobium, pseudoz_coords_haematobium, df = T, method = 'simple')
# setup for merge
colnames(pseudoz_preds_mansoni) <- c('X', 'prob')
colnames(pseudoz_preds_haematobium) <- c('X', 'prob')
# setup pseudoz
pseudoz_mansoni$X <- (1:nrow(pseudoz_mansoni))
pseudoz_haematobium$X <- (1:nrow(pseudoz_haematobium))
# merge together
pseudoz_mansoni <- merge(pseudoz_mansoni, pseudoz_preds_mansoni, by = "X")
pseudoz_haematobium <- merge(pseudoz_haematobium, pseudoz_preds_haematobium, by = "X")
fpr_mansoni<-rep(NA, length(threshold))
fpr_haematobium<-rep(NA, length(threshold))
for (a in 1:length(threshold)){
fpr_mansoni[a]<-length(pseudoz_mansoni$prob[pseudoz_mansoni$prob>threshold[a]])/length(pseudoz_mansoni$prob)
fpr_haematobium[a]<-length(pseudoz_haematobium$prob[pseudoz_haematobium$prob>threshold[a]])/length(pseudoz_haematobium$prob)
}
ROC_table<-data.frame(threshold=threshold, sensitivity_m=sensitivity_mansoni, sensitivity_h=sensitivity_haematobium, fp_m=fpr_mansoni, fp_h=fpr_haematobium)
# plot(x=ROC_table$false_positive, y=ROC_table$sensitivity, type='l', main=paste(i))
for(a in 1:nrow(ROC_table)){
ROC_table$hypotenuse_m[a]<-sqrt(((1-ROC_table$sensitivity_m[a])^2)+(ROC_table$fp_m[a]^2))
ROC_table$hypotenuse_h[a]<-sqrt(((1-ROC_table$sensitivity_h[a])^2)+(ROC_table$fp_h[a]^2))
}
ROC_mega_table[[1]]<-ROC_table
optimal_thresholds_m[1]<-ROC_table$threshold[which(ROC_table$hypotenuse_m==min(ROC_table$hypotenuse_m))]
optimal_thresholds_h[1]<-ROC_table$threshold[which(ROC_table$hypotenuse_h==min(ROC_table$hypotenuse_h))]
###############
## generate PAR per district per map
###############
zonal_PAR<-list()
PAR<-list()
# setup raster shapes for stamping out areas
ras_shp_mansoni[ras_shp_mansoni>0 & ras_shp_mansoni<=100000]<- 1
ras_shp_mansoni[is.na(ras_shp_mansoni)]<-0
ras_shp_haematobium[ras_shp_haematobium>0 & ras_shp_haematobium<=100000]<- 1
ras_shp_haematobium[is.na(ras_shp_haematobium)]<-0
threshold_map_m<-nichemap_mansoni>optimal_thresholds_m[1]
threshold_map_m<-threshold_map_m*ras_shp_mansoni
threshold_map_m[is.na(threshold_map_m)]<- 0
threshold_map_h<-nichemap_haematobium>optimal_thresholds_h[1]
threshold_map_h<-threshold_map_h*ras_shp_haematobium
threshold_map_h[is.na(threshold_map_h)]<- 0
threshold_map <- do.call("sum", list(threshold_map_m, threshold_map_h))
threshold_map[threshold_map>=1 & threshold_map <= 2]<-1
PAR[[1]]<-threshold_map*pop
zonal_PAR[[1]]<-zonal(PAR[[1]],
ras_shp,
fun='sum',
na.rm=TRUE)
zonal_PAR_clean<-zonal_PAR[[1]]
for (i in 1:nrow(zonal_PAR_clean)){
if (zonal_PAR_clean[i,2]<1){
zonal_PAR_clean[i,2]<-0
}
}
zonal_PAR[[1]]<-zonal_PAR_clean
colnames(zonal_PAR[[1]]) <- c('zone', 'par')
#calculate zonal totals
total_sums<-zonal(pop,
ras_shp,
fun = 'sum',
na.rm = TRUE)
for (i in 1:nrow(total_sums)){
if (total_sums[i,2]<=0){
total_sums[i,2]<-NA
}
}
# generate the total population to get proportion
prop_par <- cbind(total_sums, zonal_PAR[[1]])
prop_par <- as.data.table(prop_par)
prop_par[, prop:=par/sum]
prop_par <- prop_par[, c("zone", "prop")]
colnames(prop_par)[2] <- c(paste0("prop_", jobnum))
write.csv(prop_par, file = paste0(outpath, "FILEPATH/prop_", jobnum, ".csv"))
# save optimal thresholds
optimal_thresholds[1] <- optimal_thresholds_m[1]
optimal_thresholds[2] <- optimal_thresholds_h[1]
write.csv(optimal_thresholds, file = paste0(outpath, "FILEPATH/threshold_", jobnum, ".csv"))
|
## ----setup,echo=FALSE,include=FALSE--------------------------------------
library(knitr)
library(bodenmiller)
library(ggplot2)
library(dplyr)
library(reshape2)
library(RColorBrewer)
knitr::opts_chunk$set(warning=FALSE,
fig.keep='high',
fig.align='center')
do.fan <- function(x,step=0.01) {
data.frame(ymin=quantile(x,probs=seq(0,1,step))[-length(seq(0,1,step))],
ymax=quantile(x,probs=seq(0,1,step))[-1],
id=seq(1,length(seq(step,1,step))),
percent=abs(seq(step,1,step)-0.5))
}
scale_fill_fan <- function(...) scale_fill_gradientn(colours=rev(brewer.pal(9,'Oranges')) )
## ----ref_pheno_boxplot---------------------------------------------------
data(refPhenoMat)
refPhenoFrame <- melt(refPhenoMat)
names(refPhenoFrame) <- c('cell_id','channel','value')
ggplot(data=refPhenoFrame,aes(x=channel,y=value))+
geom_boxplot()+
theme(axis.text.x = element_text(angle = 45, hjust = 1))
## ----annots--------------------------------------------------------------
data('refAnnots')
refPhenoFrame$Cells <- rep(refAnnots$Cells,ncol(refPhenoMat))
cell.colors <- setNames(c('#9CA5D5','#0015C5','#5B6CB4','#BFC5E8','#C79ED0','#850094',
'#A567B1','#DBBCE2','#D3C6A1','#5E4500','#BBDEB1','#8A1923',
'#B35E62','#CEA191'),
c('cd14-hladr-','cd14-hladrhigh','cd14-hladrmid','cd14-surf-',
'cd14+hladr-','cd14+hladrhigh','cd14+hladrmid','cd14+surf-',
'cd4+','cd8+','dendritic','igm-','igm+','nk'))
## ----ref_pheno_pop_boxplot,fig.width=6,fig.height=4----------------------
cd7.pops <- refPhenoFrame %>% filter(channel=='CD7')
ggplot(data=cd7.pops,
aes(x=Cells,y=value,fill=Cells))+
geom_boxplot()+
scale_fill_manual(values=cell.colors)+
guides(fill=FALSE)+
theme(axis.text.x = element_text(angle = 45, hjust = 1))
## ----ref_pheno_sub_fan---------------------------------------------------
ggplot(refPhenoFrame %>% filter(Cells=='cd4+') %>% group_by(Cells,channel) %>% do(do.fan(.$value)),
aes(x=channel,fill=percent,group=id))+
geom_ribbon(aes(ymin=ymin,ymax=ymax))+
guides(fill=F)+
facet_wrap(~Cells)+
theme(axis.text.x = element_text(angle = 45, hjust = 1))+
scale_fill_fan()
## ----ref_func_sub_fan,fig.width=5,fig.height=3---------------------------
data(refFuncMat)
refFuncFrame <- melt(refFuncMat)
names(refFuncFrame) <- c('cell_id','channel','value')
refFuncFrame$Cells <- rep(refAnnots$Cells,ncol(refFuncMat))
ggplot(refFuncFrame %>% filter(Cells=='cd4+') %>% group_by(Cells,channel) %>% do(do.fan(.$value)),
aes(x=channel,fill=percent,group=id))+
geom_ribbon(aes(ymin=ymin,ymax=ymax))+
facet_wrap(~Cells)+
guides(fill=FALSE)+
theme(axis.text.x = element_text(angle = 45, hjust = 1))+
scale_fill_fan()
## ----untreated_func------------------------------------------------------
data('untreatedFuncMat')
data('untreatedAnnots')
untreatedFuncFrame <- melt(untreatedFuncMat)
names(untreatedFuncFrame) <- c('cell_id','channel','value')
untreatedFuncFrame$Cells <- rep(untreatedAnnots$Cells,ncol(untreatedFuncMat))
untreatedFuncFrame$Treatment <- rep(untreatedAnnots$Treatment,ncol(untreatedFuncMat))
## ----un_func_sub_fan,fig.width=6,fig.height=6----------------------------
refFuncLine <- refFuncFrame %>% filter(Cells=='cd4+') %>% group_by(Cells,channel) %>% summarise(value=median(value))
refFuncLine <- do.call(rbind,lapply(seq(1,3),function(x) refFuncLine))
refFuncLine$Treatment <- rep(levels(untreatedFuncFrame$Treatment),each=nlevels(refFuncLine$channel))
refFuncLine$percent <- 0
refFuncLine$id <- 'ref'
ggplot(untreatedFuncFrame %>% filter(Cells=='cd4+') %>% group_by(Treatment,Cells,channel) %>% do(do.fan(.$value)),
aes(x=channel,fill=percent,group=id))+
geom_ribbon(aes(ymin=ymin,ymax=ymax))+
geom_line(data=refFuncLine,aes(y=value),
col='black',linetype=4)+
guides(fill=F)+
facet_wrap(~Cells*Treatment,ncol=2,scale='free_x')+
theme(axis.text.x = element_text(angle = 45, hjust = 1))+
scale_fill_fan()
|
/data/genthat_extracted_code/bodenmiller/vignettes/bodenmiller.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false | false | 4,239 |
r
|
## ----setup,echo=FALSE,include=FALSE--------------------------------------
library(knitr)
library(bodenmiller)
library(ggplot2)
library(dplyr)
library(reshape2)
library(RColorBrewer)
knitr::opts_chunk$set(warning=FALSE,
fig.keep='high',
fig.align='center')
do.fan <- function(x,step=0.01) {
data.frame(ymin=quantile(x,probs=seq(0,1,step))[-length(seq(0,1,step))],
ymax=quantile(x,probs=seq(0,1,step))[-1],
id=seq(1,length(seq(step,1,step))),
percent=abs(seq(step,1,step)-0.5))
}
scale_fill_fan <- function(...) scale_fill_gradientn(colours=rev(brewer.pal(9,'Oranges')) )
## ----ref_pheno_boxplot---------------------------------------------------
data(refPhenoMat)
refPhenoFrame <- melt(refPhenoMat)
names(refPhenoFrame) <- c('cell_id','channel','value')
ggplot(data=refPhenoFrame,aes(x=channel,y=value))+
geom_boxplot()+
theme(axis.text.x = element_text(angle = 45, hjust = 1))
## ----annots--------------------------------------------------------------
data('refAnnots')
refPhenoFrame$Cells <- rep(refAnnots$Cells,ncol(refPhenoMat))
cell.colors <- setNames(c('#9CA5D5','#0015C5','#5B6CB4','#BFC5E8','#C79ED0','#850094',
'#A567B1','#DBBCE2','#D3C6A1','#5E4500','#BBDEB1','#8A1923',
'#B35E62','#CEA191'),
c('cd14-hladr-','cd14-hladrhigh','cd14-hladrmid','cd14-surf-',
'cd14+hladr-','cd14+hladrhigh','cd14+hladrmid','cd14+surf-',
'cd4+','cd8+','dendritic','igm-','igm+','nk'))
## ----ref_pheno_pop_boxplot,fig.width=6,fig.height=4----------------------
cd7.pops <- refPhenoFrame %>% filter(channel=='CD7')
ggplot(data=cd7.pops,
aes(x=Cells,y=value,fill=Cells))+
geom_boxplot()+
scale_fill_manual(values=cell.colors)+
guides(fill=FALSE)+
theme(axis.text.x = element_text(angle = 45, hjust = 1))
## ----ref_pheno_sub_fan---------------------------------------------------
ggplot(refPhenoFrame %>% filter(Cells=='cd4+') %>% group_by(Cells,channel) %>% do(do.fan(.$value)),
aes(x=channel,fill=percent,group=id))+
geom_ribbon(aes(ymin=ymin,ymax=ymax))+
guides(fill=F)+
facet_wrap(~Cells)+
theme(axis.text.x = element_text(angle = 45, hjust = 1))+
scale_fill_fan()
## ----ref_func_sub_fan,fig.width=5,fig.height=3---------------------------
data(refFuncMat)
refFuncFrame <- melt(refFuncMat)
names(refFuncFrame) <- c('cell_id','channel','value')
refFuncFrame$Cells <- rep(refAnnots$Cells,ncol(refFuncMat))
ggplot(refFuncFrame %>% filter(Cells=='cd4+') %>% group_by(Cells,channel) %>% do(do.fan(.$value)),
aes(x=channel,fill=percent,group=id))+
geom_ribbon(aes(ymin=ymin,ymax=ymax))+
facet_wrap(~Cells)+
guides(fill=FALSE)+
theme(axis.text.x = element_text(angle = 45, hjust = 1))+
scale_fill_fan()
## ----untreated_func------------------------------------------------------
data('untreatedFuncMat')
data('untreatedAnnots')
untreatedFuncFrame <- melt(untreatedFuncMat)
names(untreatedFuncFrame) <- c('cell_id','channel','value')
untreatedFuncFrame$Cells <- rep(untreatedAnnots$Cells,ncol(untreatedFuncMat))
untreatedFuncFrame$Treatment <- rep(untreatedAnnots$Treatment,ncol(untreatedFuncMat))
## ----un_func_sub_fan,fig.width=6,fig.height=6----------------------------
refFuncLine <- refFuncFrame %>% filter(Cells=='cd4+') %>% group_by(Cells,channel) %>% summarise(value=median(value))
refFuncLine <- do.call(rbind,lapply(seq(1,3),function(x) refFuncLine))
refFuncLine$Treatment <- rep(levels(untreatedFuncFrame$Treatment),each=nlevels(refFuncLine$channel))
refFuncLine$percent <- 0
refFuncLine$id <- 'ref'
ggplot(untreatedFuncFrame %>% filter(Cells=='cd4+') %>% group_by(Treatment,Cells,channel) %>% do(do.fan(.$value)),
aes(x=channel,fill=percent,group=id))+
geom_ribbon(aes(ymin=ymin,ymax=ymax))+
geom_line(data=refFuncLine,aes(y=value),
col='black',linetype=4)+
guides(fill=F)+
facet_wrap(~Cells*Treatment,ncol=2,scale='free_x')+
theme(axis.text.x = element_text(angle = 45, hjust = 1))+
scale_fill_fan()
|
# load the required packages
install.packages("caret")
install.packages("rattle")
install.packages("rpart")
install.packages("rpart.plot")
install.packages("randomForest")
install.packages("repmis")
install.packages("e1071")
library(caret)
library(rattle)
library(rpart)
library(rpart.plot)
library(randomForest)
library(repmis)
library(e1071)
training <- read.csv("pml-training.csv", na.strings = c("NA", ""))
testing <- read.csv("pml-testing.csv", na.strings = c("NA", ""))
training <- training[, colSums(is.na(training)) == 0]
testing <- testing[, colSums(is.na(testing)) == 0]
trainData <- training[, -c(1:7)]
testData <- testing[, -c(1:7)]
set.seed(7826)
inTrain <- createDataPartition(trainData$classe, p = 0.7, list = FALSE)
train <- trainData[inTrain, ]
valid <- trainData[-inTrain, ]
control <- trainControl(method = "cv", number = 5)
fitRpart <- train(classe ~ ., data = train, method = "rpart",
trControl = control)
print(fitRpart, digits = 4)
# predict outcomes using validation set
predictRpart <- predict(fitRpart, valid)
fancyRpartPlot(fitRpart$finalModel)
# Show prediction result
confRpart <- confusionMatrix(valid$classe, predict_rpart)
## Confusion Matrix and Statistics
accuracyRpart <- confRpart$overall[1]
##### Using Random Forest... #####
fitRf <- train(classe ~ ., data = train, method = "rf", trControl = control)
print(fitRf, digits = 4)
# predict outcomes using validation set
predictRf <- predict(fitRf, valid)
# Show prediction result
confRf <- confusionMatrix(valid$classe, predictRf)
accuracyRf <- confRf$overall[1]
predict(fitRf, testData)
## Result: B A B A A E D B A A B C B A E E A B B B
|
/predict.R
|
no_license
|
CherryPon/predictionAssignment
|
R
| false | false | 1,664 |
r
|
# load the required packages
install.packages("caret")
install.packages("rattle")
install.packages("rpart")
install.packages("rpart.plot")
install.packages("randomForest")
install.packages("repmis")
install.packages("e1071")
library(caret)
library(rattle)
library(rpart)
library(rpart.plot)
library(randomForest)
library(repmis)
library(e1071)
training <- read.csv("pml-training.csv", na.strings = c("NA", ""))
testing <- read.csv("pml-testing.csv", na.strings = c("NA", ""))
training <- training[, colSums(is.na(training)) == 0]
testing <- testing[, colSums(is.na(testing)) == 0]
trainData <- training[, -c(1:7)]
testData <- testing[, -c(1:7)]
set.seed(7826)
inTrain <- createDataPartition(trainData$classe, p = 0.7, list = FALSE)
train <- trainData[inTrain, ]
valid <- trainData[-inTrain, ]
control <- trainControl(method = "cv", number = 5)
fitRpart <- train(classe ~ ., data = train, method = "rpart",
trControl = control)
print(fitRpart, digits = 4)
# predict outcomes using validation set
predictRpart <- predict(fitRpart, valid)
fancyRpartPlot(fitRpart$finalModel)
# Show prediction result
confRpart <- confusionMatrix(valid$classe, predict_rpart)
## Confusion Matrix and Statistics
accuracyRpart <- confRpart$overall[1]
##### Using Random Forest... #####
fitRf <- train(classe ~ ., data = train, method = "rf", trControl = control)
print(fitRf, digits = 4)
# predict outcomes using validation set
predictRf <- predict(fitRf, valid)
# Show prediction result
confRf <- confusionMatrix(valid$classe, predictRf)
accuracyRf <- confRf$overall[1]
predict(fitRf, testData)
## Result: B A B A A E D B A A B C B A E E A B B B
|
library(raster)
library(rgdal)
library(ggplot2)
library(reshape2)
library(plyr)
# --------------------------------------------------------------------------------
# Note: This first part I found in a tutorial online:
# https://downwithtime.wordpress.com/2013/12/04/naturalearthdata-and-r-in-ggplot2/
# --------------------------------------------------------------------------------
# Assuming you have a path 'Maps' that you store your spatial files in. This
# is all downloaded from <a href="http://www.naturalearthdata.com/downloads/">http://www.naturalearthdata.com/downloads/</a> using the
# 1:50m "Medium" scale data.
nat.earth <- stack('./Maps/NE2_50M_SR_W/NE2_50M_SR_W.tif')
ne_lakes <- readOGR('./Maps/NaturalEarth/ne_50m_lakes.shp',
'ne_50m_lakes')
ne_rivers <- readOGR('./Maps/NaturalEarth/ne_50m_rivers_lake_centerlines.shp',
'ne_50m_rivers_lake_centerlines')
ne_coast <- readOGR('./Maps/NaturalEarth/ne_50m_coastline.shp',
'ne_50m_coastline')
# I have a domain I'm interested in, but there's no reason you can't define something else:
quick.subset <- function(x, longlat){
# longlat should be a vector of four values: c(xmin, xmax, ymin, ymax)
x@data$id <- rownames(x@data)
x.f = fortify(x, region="id")
x.join = join(x.f, x@data, by="id")
x.subset <- subset(x.join, x.join$long > longlat[1] & x.join$long < longlat[2] &
x.join$lat > longlat[3] & x.join$lat < longlat[4])
x.subset
}
domain <- c(-98.6, -66.1, 36.5, 49.7)
lakes.subset <- quick.subset(ne_lakes, domain)
river.subset <- quick.subset(ne_rivers, domain)
coast.subset <- quick.subset(ne_coast, domain)
nat.crop <- crop(nat.earth, y=extent(domain))
rast.table <- data.frame(xyFromCell(nat.crop, 1:ncell(nat.crop)),
getValues(nat.crop/255))
rast.table$rgb <- with(rast.table, rgb(NE2_50M_SR_W.1,
NE2_50M_SR_W.2,
NE2_50M_SR_W.3,
1))
# et voila!
ggplot(data = rast.table, aes(x = x, y = y)) +
geom_tile(fill = rast.table$rgb) +
geom_polygon(data=lakes.subset, aes(x = long, y = lat, group = group), fill = '#ADD8E6') +
scale_alpha_discrete(range=c(1,0)) +
geom_path(data=lakes.subset, aes(x = long, y = lat, group = group), color = 'blue') +
geom_path(data=river.subset, aes(x = long, y = lat, group = group), color = 'blue') +
geom_path(data=coast.subset, aes(x = long, y = lat, group = group), color = 'blue') +
scale_x_continuous(expand=c(0,0)) +
scale_y_continuous(expand=c(0,0)) +
xlab('') + ylab('')
ggplot(data = lakes.subset) +
geom_polygon(aes(x = long, y = lat, group = group), fill = '#ADD8E6') +
scale_alpha_discrete(range=c(1,0)) +
geom_path(data=lakes.subset, aes(x = long, y = lat, group = group), color = 'blue') +
geom_path(data=river.subset, aes(x = long, y = lat, group = group), color = 'blue') +
geom_path(data=coast.subset, aes(x = long, y = lat, group = group), color = 'blue') +
scale_x_continuous(expand=c(0,0)) +
scale_y_continuous(expand=c(0,0)) +
xlab('') + ylab('')
# __________________________________________________________________________
# This is me borrowing from above to try with india (for my Ptolemy project..)
#
domain <- c(40, 120, 0, 45)
lakes.subset <- quick.subset(ne_lakes, domain)
river.subset <- quick.subset(ne_rivers, domain)
coast.subset <- quick.subset(ne_coast, domain)
nat.crop <- crop(nat.earth, y=extent(domain))
rast.table <- data.frame(xyFromCell(nat.crop, 1:ncell(nat.crop)),
getValues(nat.crop/255))
rast.table$rgb <- with(rast.table, rgb(NE2_50M_SR_W.1,
NE2_50M_SR_W.2,
NE2_50M_SR_W.3,
1))
# et voila!
ggplot(data = rast.table, aes(x = x, y = y)) +
geom_tile(fill = rast.table$rgb) +
geom_polygon(data=lakes.subset, aes(x = long, y = lat, group = group), fill = '#ADD8E6') +
scale_alpha_discrete(range=c(1,0)) +
geom_path(data=lakes.subset, aes(x = long, y = lat, group = group), color = 'blue') +
geom_path(data=river.subset, aes(x = long, y = lat, group = group), color = 'blue') +
geom_path(data=coast.subset, aes(x = long, y = lat, group = group), color = 'blue') +
scale_x_continuous(expand=c(0,0)) +
scale_y_continuous(expand=c(0,0)) +
xlab('') + ylab('')
ggplot(data = lakes.subset) +
geom_polygon(aes(x = long, y = lat, group = group), fill = '#ADD8E6') +
scale_alpha_discrete(range=c(1,0)) +
geom_path(data=lakes.subset, aes(x = long, y = lat, group = group), color = 'blue') +
geom_path(data=river.subset, aes(x = long, y = lat, group = group), color = 'blue') +
geom_path(data=coast.subset, aes(x = long, y = lat, group = group), color = 'blue') +
scale_x_continuous(expand=c(0,0)) +
scale_y_continuous(expand=c(0,0)) +
xlab('') + ylab('')
# __________________________________________________________________________
# This is the one I used for the GTAP data (and thus the poster).
#
admin0 <- readOGR('../Maps/NaturalEarth/ne_50m_admin_0_countries.shp',
'ne_50m_admin_0_countries')
domain <- c(-180, 180, -90, 90)
admin0.subset <- quick.subset(admin0, domain)
admin0.subset$id <- as.integer(admin0.subset$id)
nat.crop <- crop(nat.earth, y=extent(domain))
rast.table <- data.frame(xyFromCell(nat.crop, 1:ncell(nat.crop)),
getValues(nat.crop/255))
ggplot(data = admin0.subset) +
geom_polygon(aes(x = long, y = lat, group = group, color = pop_est, fill = pop_est)) +
scale_alpha_discrete(range=c(1,0)) +
scale_x_continuous(expand=c(0,0)) +
scale_y_continuous(expand=c(0,0)) +
xlab('') + ylab('')
admin0.merge <- merge(x=admin0.subset, y=trade.exp1995, by.x="iso_a3", by.y="iso3code", all.x=T)
admin0.merge$id <- as.integer(admin0.merge$id)
admin0.merge <- admin0.merge[order(admin0.merge$id, admin0.merge$order),]
admin0.iso3 <- ddply(admin0.merge, .(iso_a3), "nrow")
pdf("geo1995.pdf", width=6, height=3)
ggplot(data = admin0.merge) +
geom_polygon(aes(x = long, y = lat, group = group, fill = expval), color = "#000000") +
scale_alpha_discrete(range=c(1,0)) +
scale_x_continuous(expand=c(0,0)) +
scale_y_continuous(expand=c(0,0)) +
xlab('') + ylab('')
dev.off()
admin0.merge <- merge(x=admin0.subset, y=trade.exp2009, by.x="iso_a3", by.y="iso3code", all.x=T)
admin0.merge$id <- as.integer(admin0.merge$id)
admin0.merge <- admin0.merge[order(admin0.merge$id, admin0.merge$order),]
admin0.iso3 <- ddply(admin0.merge, .(iso_a3), "nrow")
pdf("geo2009.pdf", width=6, height=3)
ggplot(data = admin0.merge) +
geom_polygon(aes(x = long, y = lat, group = group, fill = expval), color = "#000000") +
scale_alpha_discrete(range=c(1,0)) +
scale_x_continuous(expand=c(0,0)) +
scale_y_continuous(expand=c(0,0)) +
xlab('') + ylab('')
dev.off()
admin0.iso3$iso_a3
trade.exp2000[!(trade.exp2000$iso3code %in% admin0.iso3$iso_a3), ]
admin0.iso3[!(admin0.iso3$iso_a3 %in% trade.exp2000$iso3code), ]
|
/natearthtest.R
|
permissive
|
coreyabshire/ivmooc-gtap
|
R
| false | false | 7,272 |
r
|
library(raster)
library(rgdal)
library(ggplot2)
library(reshape2)
library(plyr)
# --------------------------------------------------------------------------------
# Note: This first part I found in a tutorial online:
# https://downwithtime.wordpress.com/2013/12/04/naturalearthdata-and-r-in-ggplot2/
# --------------------------------------------------------------------------------
# Assuming you have a path 'Maps' that you store your spatial files in. This
# is all downloaded from <a href="http://www.naturalearthdata.com/downloads/">http://www.naturalearthdata.com/downloads/</a> using the
# 1:50m "Medium" scale data.
nat.earth <- stack('./Maps/NE2_50M_SR_W/NE2_50M_SR_W.tif')
ne_lakes <- readOGR('./Maps/NaturalEarth/ne_50m_lakes.shp',
'ne_50m_lakes')
ne_rivers <- readOGR('./Maps/NaturalEarth/ne_50m_rivers_lake_centerlines.shp',
'ne_50m_rivers_lake_centerlines')
ne_coast <- readOGR('./Maps/NaturalEarth/ne_50m_coastline.shp',
'ne_50m_coastline')
# I have a domain I'm interested in, but there's no reason you can't define something else:
quick.subset <- function(x, longlat){
# longlat should be a vector of four values: c(xmin, xmax, ymin, ymax)
x@data$id <- rownames(x@data)
x.f = fortify(x, region="id")
x.join = join(x.f, x@data, by="id")
x.subset <- subset(x.join, x.join$long > longlat[1] & x.join$long < longlat[2] &
x.join$lat > longlat[3] & x.join$lat < longlat[4])
x.subset
}
domain <- c(-98.6, -66.1, 36.5, 49.7)
lakes.subset <- quick.subset(ne_lakes, domain)
river.subset <- quick.subset(ne_rivers, domain)
coast.subset <- quick.subset(ne_coast, domain)
nat.crop <- crop(nat.earth, y=extent(domain))
rast.table <- data.frame(xyFromCell(nat.crop, 1:ncell(nat.crop)),
getValues(nat.crop/255))
rast.table$rgb <- with(rast.table, rgb(NE2_50M_SR_W.1,
NE2_50M_SR_W.2,
NE2_50M_SR_W.3,
1))
# et voila!
ggplot(data = rast.table, aes(x = x, y = y)) +
geom_tile(fill = rast.table$rgb) +
geom_polygon(data=lakes.subset, aes(x = long, y = lat, group = group), fill = '#ADD8E6') +
scale_alpha_discrete(range=c(1,0)) +
geom_path(data=lakes.subset, aes(x = long, y = lat, group = group), color = 'blue') +
geom_path(data=river.subset, aes(x = long, y = lat, group = group), color = 'blue') +
geom_path(data=coast.subset, aes(x = long, y = lat, group = group), color = 'blue') +
scale_x_continuous(expand=c(0,0)) +
scale_y_continuous(expand=c(0,0)) +
xlab('') + ylab('')
ggplot(data = lakes.subset) +
geom_polygon(aes(x = long, y = lat, group = group), fill = '#ADD8E6') +
scale_alpha_discrete(range=c(1,0)) +
geom_path(data=lakes.subset, aes(x = long, y = lat, group = group), color = 'blue') +
geom_path(data=river.subset, aes(x = long, y = lat, group = group), color = 'blue') +
geom_path(data=coast.subset, aes(x = long, y = lat, group = group), color = 'blue') +
scale_x_continuous(expand=c(0,0)) +
scale_y_continuous(expand=c(0,0)) +
xlab('') + ylab('')
# __________________________________________________________________________
# This is me borrowing from above to try with india (for my Ptolemy project..)
#
domain <- c(40, 120, 0, 45)
lakes.subset <- quick.subset(ne_lakes, domain)
river.subset <- quick.subset(ne_rivers, domain)
coast.subset <- quick.subset(ne_coast, domain)
nat.crop <- crop(nat.earth, y=extent(domain))
rast.table <- data.frame(xyFromCell(nat.crop, 1:ncell(nat.crop)),
getValues(nat.crop/255))
rast.table$rgb <- with(rast.table, rgb(NE2_50M_SR_W.1,
NE2_50M_SR_W.2,
NE2_50M_SR_W.3,
1))
# et voila!
ggplot(data = rast.table, aes(x = x, y = y)) +
geom_tile(fill = rast.table$rgb) +
geom_polygon(data=lakes.subset, aes(x = long, y = lat, group = group), fill = '#ADD8E6') +
scale_alpha_discrete(range=c(1,0)) +
geom_path(data=lakes.subset, aes(x = long, y = lat, group = group), color = 'blue') +
geom_path(data=river.subset, aes(x = long, y = lat, group = group), color = 'blue') +
geom_path(data=coast.subset, aes(x = long, y = lat, group = group), color = 'blue') +
scale_x_continuous(expand=c(0,0)) +
scale_y_continuous(expand=c(0,0)) +
xlab('') + ylab('')
ggplot(data = lakes.subset) +
geom_polygon(aes(x = long, y = lat, group = group), fill = '#ADD8E6') +
scale_alpha_discrete(range=c(1,0)) +
geom_path(data=lakes.subset, aes(x = long, y = lat, group = group), color = 'blue') +
geom_path(data=river.subset, aes(x = long, y = lat, group = group), color = 'blue') +
geom_path(data=coast.subset, aes(x = long, y = lat, group = group), color = 'blue') +
scale_x_continuous(expand=c(0,0)) +
scale_y_continuous(expand=c(0,0)) +
xlab('') + ylab('')
# __________________________________________________________________________
# This is the one I used for the GTAP data (and thus the poster).
#
admin0 <- readOGR('../Maps/NaturalEarth/ne_50m_admin_0_countries.shp',
'ne_50m_admin_0_countries')
domain <- c(-180, 180, -90, 90)
admin0.subset <- quick.subset(admin0, domain)
admin0.subset$id <- as.integer(admin0.subset$id)
nat.crop <- crop(nat.earth, y=extent(domain))
rast.table <- data.frame(xyFromCell(nat.crop, 1:ncell(nat.crop)),
getValues(nat.crop/255))
ggplot(data = admin0.subset) +
geom_polygon(aes(x = long, y = lat, group = group, color = pop_est, fill = pop_est)) +
scale_alpha_discrete(range=c(1,0)) +
scale_x_continuous(expand=c(0,0)) +
scale_y_continuous(expand=c(0,0)) +
xlab('') + ylab('')
admin0.merge <- merge(x=admin0.subset, y=trade.exp1995, by.x="iso_a3", by.y="iso3code", all.x=T)
admin0.merge$id <- as.integer(admin0.merge$id)
admin0.merge <- admin0.merge[order(admin0.merge$id, admin0.merge$order),]
admin0.iso3 <- ddply(admin0.merge, .(iso_a3), "nrow")
pdf("geo1995.pdf", width=6, height=3)
ggplot(data = admin0.merge) +
geom_polygon(aes(x = long, y = lat, group = group, fill = expval), color = "#000000") +
scale_alpha_discrete(range=c(1,0)) +
scale_x_continuous(expand=c(0,0)) +
scale_y_continuous(expand=c(0,0)) +
xlab('') + ylab('')
dev.off()
admin0.merge <- merge(x=admin0.subset, y=trade.exp2009, by.x="iso_a3", by.y="iso3code", all.x=T)
admin0.merge$id <- as.integer(admin0.merge$id)
admin0.merge <- admin0.merge[order(admin0.merge$id, admin0.merge$order),]
admin0.iso3 <- ddply(admin0.merge, .(iso_a3), "nrow")
pdf("geo2009.pdf", width=6, height=3)
ggplot(data = admin0.merge) +
geom_polygon(aes(x = long, y = lat, group = group, fill = expval), color = "#000000") +
scale_alpha_discrete(range=c(1,0)) +
scale_x_continuous(expand=c(0,0)) +
scale_y_continuous(expand=c(0,0)) +
xlab('') + ylab('')
dev.off()
admin0.iso3$iso_a3
trade.exp2000[!(trade.exp2000$iso3code %in% admin0.iso3$iso_a3), ]
admin0.iso3[!(admin0.iso3$iso_a3 %in% trade.exp2000$iso3code), ]
|
library(Signac)
library(Seurat)
library(ggplot2)
library(harmony)
library(dplyr)
options(future.globals.maxSize = 50000 * 1024^2)
load('../data_processed/obj_all.rda')
######## Quality control to filter cells ########
## Filter cells
# RNA-seq: nCount_RNA, percent.mt
# ATAC-seq: nCount_ATAC, nucleosome_signal, TSS.enrichment
pbmc <- subset(x = obj_all, subset = nCount_RNA > 2e2 &
nCount_RNA < 5e4 &
nCount_ATAC > 2e2 &
nCount_ATAC < 1e5 &
percent.mt < 5 &
nucleosome_signal < 3 &
TSS.enrichment > 1)
######## Precessing and Clustering ########
preprocess <- function(object, n.pc = 30, n.lsi = 10){
## Filter peaks/genes which are detected in < 10 cells
tmp <- Matrix::rowSums(object[['RNA']]@counts > 0)
object[['RNA']] <- subset(object[['RNA']], features = names(which(tmp >= 10)))
tmp <- Matrix::rowSums(object[['ATAC']]@counts > 0)
object[['ATAC']] <- subset(object[['ATAC']], features = names(which(tmp >= 10)))
## Normalization, dimensional reduction, and clustering on RNA-seq and ATAC-seq separately
# RNA-seq
DefaultAssay(object) <- 'RNA'
object <- SCTransform(object)
object <- RunPCA(object)
object <- RunUMAP(object, reduction = 'pca', dims = 1:n.pc, assay = 'SCT',
reduction.name = 'umap.rna', reduction.key = 'rnaUMAP_')
object <- FindNeighbors(object, reduction = 'pca', dims = 1:n.pc, assay = 'SCT')
object <- FindClusters(object, graph.name = 'SCT_snn', algorithm = 3, resolution = 0.2)
# ATAC-seq
DefaultAssay(object) <- "ATAC"
object <- RunTFIDF(object, method = 3)
object <- FindTopFeatures(object, min.cutoff = 'q75')
object <- RunSVD(object)
object <- RunUMAP(object, reduction = 'lsi', dims = 2:n.lsi, assay = 'ATAC',
reduction.name = "umap.atac", reduction.key = "atacUMAP_")
object <- FindNeighbors(object, reduction = 'lsi', dims = 2:n.lsi, assay = 'ATAC')
object <- FindClusters(object, graph.name = 'ATAC_snn', algorithm = 3, resolution = 0.2)
# Weighted nearest neighbor (WNN) analysis using both modalities
object <- FindMultiModalNeighbors(object,
reduction.list = list("pca", "lsi"),
dims.list = list(1:n.pc, 2:n.lsi),
modality.weight.name = 'RNA.weight')
object <- RunUMAP(object, nn.name = "weighted.nn",
reduction.name = "wnn.umap", reduction.key = "wnnUMAP_")
object <- FindClusters(object, graph.name = "wsnn", algorithm = 3, verbose = FALSE, resolution = 0.2)
object <- FindMultiModalNeighbors(object,
reduction.list = list("harmony.pca", "harmony.lsi"),
dims.list = list(1:n.pc, 2:n.lsi),
modality.weight.name = 'RNA.weight.harmony',
knn.graph.name = 'wknn.harmony',
snn.graph.name = 'wsnn.harmony',
weighted.nn.name = 'weighted.nn.harmony')
DefaultAssay(object) <- 'SCT'
return(object)
}
pbmc = preprocess(obj_all)
rm(obj_all); gc()
######## Cell types annotation ########
## WNN-derived
celltype <- rep(NA, length = ncol(pbmc))
Idents(pbmc) <- pbmc$wsnn_res.0.2
celltype[which(Idents(pbmc) %in% c(11))] <- 'RG'
celltype[which(Idents(pbmc) %in% c('5_3'))] <- 'IPC'
celltype[which(Idents(pbmc) %in% c(8))] <- 'IN-MGE'
celltype[which(Idents(pbmc) %in% c(10))] <- 'IN-CGE'
celltype[which(Idents(pbmc) %in% c(3))] <- 'IN-fetal'
celltype[which(Idents(pbmc) %in% c(0,14))] <- 'EN-fetal-late'
celltype[which(Idents(pbmc) %in% c(6,12))] <- 'EN'
celltype[which(Idents(pbmc) %in% c('5_0','5_1','5_2','5_4'))] <- 'EN-fetal-early'
celltype[which(Idents(pbmc) %in% c(4,9,15))] <- 'Astrocytes'
celltype[which(Idents(pbmc) %in% c(1,21,22))] <- 'Oligodendrocytes'
celltype[which(Idents(pbmc) %in% c(2,19))] <- 'OPC'
celltype[which(Idents(pbmc) %in% c(7,13,17,23))] <- 'Microglia'
celltype[which(Idents(pbmc) %in% c(16))] <- 'Endothelial'
celltype[which(Idents(pbmc) %in% c(18))] <- 'Pericytes'
celltype[which(Idents(pbmc) %in% c(20))] <- 'VSMC'
pbmc$celltype = celltype
## RNA-derived
celltype <- rep(NA, length = ncol(pbmc))
Idents(pbmc) <- pbmc$SCT_snn_res.0.2.subcluster
celltype[which(Idents(pbmc) %in% c('12_0','12_1','12_3'))] <- 'RG'
celltype[which(Idents(pbmc) %in% c('12_2'))] <- 'IPC'
celltype[which(Idents(pbmc) %in% c(9))] <- 'IN-MGE'
celltype[which(Idents(pbmc) %in% c(10))] <- 'IN-CGE'
celltype[which(Idents(pbmc) %in% c(6))] <- 'IN-fetal'
celltype[which(Idents(pbmc) %in% c(8))] <- 'EN-fetal-early'
celltype[which(Idents(pbmc) %in% c(0,11))] <- 'EN-fetal-late'
celltype[which(Idents(pbmc) %in% c(4))] <- 'EN'
celltype[which(Idents(pbmc) %in% c(5,7,16))] <- 'Astrocytes'
celltype[which(Idents(pbmc) %in% c(1,17))] <- 'Oligodendrocytes'
celltype[which(Idents(pbmc) %in% c(2,15))] <- 'OPC'
celltype[which(Idents(pbmc) %in% c(3,18))] <- 'Microglia'
celltype[which(Idents(pbmc) %in% c(14))] <- 'Endothelial'
celltype[which(Idents(pbmc) %in% c('13_0','13_1','13_4'))] <- 'Pericytes'
celltype[which(Idents(pbmc) %in% c('13_2', '13_3'))] <- 'VSMC'
celltype <- factor(celltype,
levels = c('RG', 'IPC', 'EN-fetal-early', 'EN-fetal-late', 'EN',
'IN-fetal', 'IN-MGE', 'IN-CGE',
'OPC', 'Astrocytes', 'Oligodendrocytes','Microglia',
'Endothelial', 'Pericytes', 'VSMC'),
ordered = T)
pbmc$celltype.rna <- celltype
## ATAC-derived
celltype <- rep(NA, length = ncol(pbmc))
Idents(pbmc) <- pbmc$ATAC_snn_res.0.2.subcluster
celltype[which(Idents(pbmc) %in% c('5_1'))] <- 'RG'
celltype[which(Idents(pbmc) %in% c('5_3'))] <- 'IPC'
celltype[which(Idents(pbmc) %in% c(4))] <- 'IN'
celltype[which(Idents(pbmc) %in% c(7))] <- 'IN-fetal'
celltype[which(Idents(pbmc) %in% c(0))] <- 'EN-fetal-late'
celltype[which(Idents(pbmc) %in% c(8,10))] <- 'EN'
celltype[which(Idents(pbmc) %in% c('5_0','5_2','5_4'))] <- 'EN-fetal-early'
celltype[which(Idents(pbmc) %in% c(1,11))] <- 'Astrocytes'
celltype[which(Idents(pbmc) %in% c(2,12))] <- 'Oligodendrocytes'
celltype[which(Idents(pbmc) %in% c(3))] <- 'OPC'
celltype[which(Idents(pbmc) %in% c(6))] <- 'Microglia'
celltype[which(Idents(pbmc) %in% c(9))] <- 'Endothelial'
celltype <- factor(celltype,
levels = c('RG', 'IPC', 'EN-fetal-early', 'EN-fetal-late', 'EN',
'IN-fetal', 'IN',
'OPC', 'Astrocytes', 'Oligodendrocytes','Microglia',
'Endothelial'),
ordered = T)
pbmc$celltype.atac <- celltype
######## Additional processing ########
## Re-call peaks for each annotated cell type using MACS2
peaks <- CallPeaks(pbmc, assay = 'ATAC',
macs2.path = '/home/kaiyi/anaconda3/bin/macs2',
group.by = 'celltype',
outdir = 'MACS2_output',
fragment.tempdir = 'MACS2_output',
cleanup = FALSE)
# Remove peaks on nonstandard chromosomes and in genomic blacklist regions
peaks <- keepStandardChromosomes(peaks, pruning.mode = 'coarse')
peaks <- subsetByOverlaps(x = peaks,
ranges = blacklist_hg38_unified,
invert = TRUE)
# Quantify counts in each peak
DefaultAssay(pbmc) <- 'ATAC'
frags <- Fragments(pbmc)
macs_count <- FeatureMatrix(
fragments = frags,
features = peaks,
cells = colnames(pbmc)
)
# Create a new assay using the MACS2 peak set and add it to the Seurat object
annotations <- GetGRangesFromEnsDb(ensdb = EnsDb.Hsapiens.v86)
seqlevelsStyle(annotations) <- "UCSC"
genome(annotations) <- "hg38"
pbmc[['peaks']] <- CreateChromatinAssay(
counts = macs_count,
sep = c(":", "-"),
genome = 'hg38',
fragments = frags,
annotation = annotations
)
DefaultAssay(pbmc) <- 'peaks'
pbmc <- RunTFIDF(pbmc, method = 3)
pbmc <- FindTopFeatures(pbmc, min.cutoff = 'q75')
## Create a gene activity matrix
gene.activities <- GeneActivity(pbmc)
pbmc[['GeneActivity']] <- CreateAssayObject(counts = gene.activities)
pbmc <- NormalizeData(pbmc, assay = 'GeneActivity')
## Add developmental stages information
ageGroup <- rep('early fetal', length = ncol(pbmc))
sampleID <- pbmc$orig.ident
ageGroup[which(sampleID %in% c('4', '8'))] <- 'late fetal'
ageGroup[which(sampleID %in% c('4413', '4422'))] <- 'infancy'
ageGroup[which(sampleID %in% c('6032', '5977'))] <- 'childhood'
ageGroup[which(sampleID %in% c('6007', '5936'))] <- 'adolescence'
ageGroup[which(sampleID %in% c('150666', '150656'))] <- 'adulthood'
ageGroup <- factor(ageGroup,
levels = c('early fetal', 'late fetal', 'infancy', 'childhood', 'adolescence', 'adulthood'),
ordered = T)
pbmc$age.group = ageGroup
obj_all_processed <- pbmc
save(obj_all_processed, file = '../data_processed/obj_all_processed_v3.rda')
|
/preprocessing.R
|
permissive
|
xbendl/singlecell-multiomics-developmental-human-brain
|
R
| false | false | 9,098 |
r
|
library(Signac)
library(Seurat)
library(ggplot2)
library(harmony)
library(dplyr)
options(future.globals.maxSize = 50000 * 1024^2)
load('../data_processed/obj_all.rda')
######## Quality control to filter cells ########
## Filter cells
# RNA-seq: nCount_RNA, percent.mt
# ATAC-seq: nCount_ATAC, nucleosome_signal, TSS.enrichment
pbmc <- subset(x = obj_all, subset = nCount_RNA > 2e2 &
nCount_RNA < 5e4 &
nCount_ATAC > 2e2 &
nCount_ATAC < 1e5 &
percent.mt < 5 &
nucleosome_signal < 3 &
TSS.enrichment > 1)
######## Precessing and Clustering ########
preprocess <- function(object, n.pc = 30, n.lsi = 10){
## Filter peaks/genes which are detected in < 10 cells
tmp <- Matrix::rowSums(object[['RNA']]@counts > 0)
object[['RNA']] <- subset(object[['RNA']], features = names(which(tmp >= 10)))
tmp <- Matrix::rowSums(object[['ATAC']]@counts > 0)
object[['ATAC']] <- subset(object[['ATAC']], features = names(which(tmp >= 10)))
## Normalization, dimensional reduction, and clustering on RNA-seq and ATAC-seq separately
# RNA-seq
DefaultAssay(object) <- 'RNA'
object <- SCTransform(object)
object <- RunPCA(object)
object <- RunUMAP(object, reduction = 'pca', dims = 1:n.pc, assay = 'SCT',
reduction.name = 'umap.rna', reduction.key = 'rnaUMAP_')
object <- FindNeighbors(object, reduction = 'pca', dims = 1:n.pc, assay = 'SCT')
object <- FindClusters(object, graph.name = 'SCT_snn', algorithm = 3, resolution = 0.2)
# ATAC-seq
DefaultAssay(object) <- "ATAC"
object <- RunTFIDF(object, method = 3)
object <- FindTopFeatures(object, min.cutoff = 'q75')
object <- RunSVD(object)
object <- RunUMAP(object, reduction = 'lsi', dims = 2:n.lsi, assay = 'ATAC',
reduction.name = "umap.atac", reduction.key = "atacUMAP_")
object <- FindNeighbors(object, reduction = 'lsi', dims = 2:n.lsi, assay = 'ATAC')
object <- FindClusters(object, graph.name = 'ATAC_snn', algorithm = 3, resolution = 0.2)
# Weighted nearest neighbor (WNN) analysis using both modalities
object <- FindMultiModalNeighbors(object,
reduction.list = list("pca", "lsi"),
dims.list = list(1:n.pc, 2:n.lsi),
modality.weight.name = 'RNA.weight')
object <- RunUMAP(object, nn.name = "weighted.nn",
reduction.name = "wnn.umap", reduction.key = "wnnUMAP_")
object <- FindClusters(object, graph.name = "wsnn", algorithm = 3, verbose = FALSE, resolution = 0.2)
object <- FindMultiModalNeighbors(object,
reduction.list = list("harmony.pca", "harmony.lsi"),
dims.list = list(1:n.pc, 2:n.lsi),
modality.weight.name = 'RNA.weight.harmony',
knn.graph.name = 'wknn.harmony',
snn.graph.name = 'wsnn.harmony',
weighted.nn.name = 'weighted.nn.harmony')
DefaultAssay(object) <- 'SCT'
return(object)
}
pbmc = preprocess(obj_all)
rm(obj_all); gc()
######## Cell types annotation ########
## WNN-derived
celltype <- rep(NA, length = ncol(pbmc))
Idents(pbmc) <- pbmc$wsnn_res.0.2
celltype[which(Idents(pbmc) %in% c(11))] <- 'RG'
celltype[which(Idents(pbmc) %in% c('5_3'))] <- 'IPC'
celltype[which(Idents(pbmc) %in% c(8))] <- 'IN-MGE'
celltype[which(Idents(pbmc) %in% c(10))] <- 'IN-CGE'
celltype[which(Idents(pbmc) %in% c(3))] <- 'IN-fetal'
celltype[which(Idents(pbmc) %in% c(0,14))] <- 'EN-fetal-late'
celltype[which(Idents(pbmc) %in% c(6,12))] <- 'EN'
celltype[which(Idents(pbmc) %in% c('5_0','5_1','5_2','5_4'))] <- 'EN-fetal-early'
celltype[which(Idents(pbmc) %in% c(4,9,15))] <- 'Astrocytes'
celltype[which(Idents(pbmc) %in% c(1,21,22))] <- 'Oligodendrocytes'
celltype[which(Idents(pbmc) %in% c(2,19))] <- 'OPC'
celltype[which(Idents(pbmc) %in% c(7,13,17,23))] <- 'Microglia'
celltype[which(Idents(pbmc) %in% c(16))] <- 'Endothelial'
celltype[which(Idents(pbmc) %in% c(18))] <- 'Pericytes'
celltype[which(Idents(pbmc) %in% c(20))] <- 'VSMC'
pbmc$celltype = celltype
## RNA-derived
celltype <- rep(NA, length = ncol(pbmc))
Idents(pbmc) <- pbmc$SCT_snn_res.0.2.subcluster
celltype[which(Idents(pbmc) %in% c('12_0','12_1','12_3'))] <- 'RG'
celltype[which(Idents(pbmc) %in% c('12_2'))] <- 'IPC'
celltype[which(Idents(pbmc) %in% c(9))] <- 'IN-MGE'
celltype[which(Idents(pbmc) %in% c(10))] <- 'IN-CGE'
celltype[which(Idents(pbmc) %in% c(6))] <- 'IN-fetal'
celltype[which(Idents(pbmc) %in% c(8))] <- 'EN-fetal-early'
celltype[which(Idents(pbmc) %in% c(0,11))] <- 'EN-fetal-late'
celltype[which(Idents(pbmc) %in% c(4))] <- 'EN'
celltype[which(Idents(pbmc) %in% c(5,7,16))] <- 'Astrocytes'
celltype[which(Idents(pbmc) %in% c(1,17))] <- 'Oligodendrocytes'
celltype[which(Idents(pbmc) %in% c(2,15))] <- 'OPC'
celltype[which(Idents(pbmc) %in% c(3,18))] <- 'Microglia'
celltype[which(Idents(pbmc) %in% c(14))] <- 'Endothelial'
celltype[which(Idents(pbmc) %in% c('13_0','13_1','13_4'))] <- 'Pericytes'
celltype[which(Idents(pbmc) %in% c('13_2', '13_3'))] <- 'VSMC'
celltype <- factor(celltype,
levels = c('RG', 'IPC', 'EN-fetal-early', 'EN-fetal-late', 'EN',
'IN-fetal', 'IN-MGE', 'IN-CGE',
'OPC', 'Astrocytes', 'Oligodendrocytes','Microglia',
'Endothelial', 'Pericytes', 'VSMC'),
ordered = T)
pbmc$celltype.rna <- celltype
## ATAC-derived
celltype <- rep(NA, length = ncol(pbmc))
Idents(pbmc) <- pbmc$ATAC_snn_res.0.2.subcluster
celltype[which(Idents(pbmc) %in% c('5_1'))] <- 'RG'
celltype[which(Idents(pbmc) %in% c('5_3'))] <- 'IPC'
celltype[which(Idents(pbmc) %in% c(4))] <- 'IN'
celltype[which(Idents(pbmc) %in% c(7))] <- 'IN-fetal'
celltype[which(Idents(pbmc) %in% c(0))] <- 'EN-fetal-late'
celltype[which(Idents(pbmc) %in% c(8,10))] <- 'EN'
celltype[which(Idents(pbmc) %in% c('5_0','5_2','5_4'))] <- 'EN-fetal-early'
celltype[which(Idents(pbmc) %in% c(1,11))] <- 'Astrocytes'
celltype[which(Idents(pbmc) %in% c(2,12))] <- 'Oligodendrocytes'
celltype[which(Idents(pbmc) %in% c(3))] <- 'OPC'
celltype[which(Idents(pbmc) %in% c(6))] <- 'Microglia'
celltype[which(Idents(pbmc) %in% c(9))] <- 'Endothelial'
celltype <- factor(celltype,
levels = c('RG', 'IPC', 'EN-fetal-early', 'EN-fetal-late', 'EN',
'IN-fetal', 'IN',
'OPC', 'Astrocytes', 'Oligodendrocytes','Microglia',
'Endothelial'),
ordered = T)
pbmc$celltype.atac <- celltype
######## Additional processing ########
## Re-call peaks for each annotated cell type using MACS2
peaks <- CallPeaks(pbmc, assay = 'ATAC',
macs2.path = '/home/kaiyi/anaconda3/bin/macs2',
group.by = 'celltype',
outdir = 'MACS2_output',
fragment.tempdir = 'MACS2_output',
cleanup = FALSE)
# Remove peaks on nonstandard chromosomes and in genomic blacklist regions
peaks <- keepStandardChromosomes(peaks, pruning.mode = 'coarse')
peaks <- subsetByOverlaps(x = peaks,
ranges = blacklist_hg38_unified,
invert = TRUE)
# Quantify counts in each peak
DefaultAssay(pbmc) <- 'ATAC'
frags <- Fragments(pbmc)
macs_count <- FeatureMatrix(
fragments = frags,
features = peaks,
cells = colnames(pbmc)
)
# Create a new assay using the MACS2 peak set and add it to the Seurat object
annotations <- GetGRangesFromEnsDb(ensdb = EnsDb.Hsapiens.v86)
seqlevelsStyle(annotations) <- "UCSC"
genome(annotations) <- "hg38"
pbmc[['peaks']] <- CreateChromatinAssay(
counts = macs_count,
sep = c(":", "-"),
genome = 'hg38',
fragments = frags,
annotation = annotations
)
DefaultAssay(pbmc) <- 'peaks'
pbmc <- RunTFIDF(pbmc, method = 3)
pbmc <- FindTopFeatures(pbmc, min.cutoff = 'q75')
## Create a gene activity matrix
gene.activities <- GeneActivity(pbmc)
pbmc[['GeneActivity']] <- CreateAssayObject(counts = gene.activities)
pbmc <- NormalizeData(pbmc, assay = 'GeneActivity')
## Add developmental stages information
ageGroup <- rep('early fetal', length = ncol(pbmc))
sampleID <- pbmc$orig.ident
ageGroup[which(sampleID %in% c('4', '8'))] <- 'late fetal'
ageGroup[which(sampleID %in% c('4413', '4422'))] <- 'infancy'
ageGroup[which(sampleID %in% c('6032', '5977'))] <- 'childhood'
ageGroup[which(sampleID %in% c('6007', '5936'))] <- 'adolescence'
ageGroup[which(sampleID %in% c('150666', '150656'))] <- 'adulthood'
ageGroup <- factor(ageGroup,
levels = c('early fetal', 'late fetal', 'infancy', 'childhood', 'adolescence', 'adulthood'),
ordered = T)
pbmc$age.group = ageGroup
obj_all_processed <- pbmc
save(obj_all_processed, file = '../data_processed/obj_all_processed_v3.rda')
|
library(data.table)
library(ggplot2)
library(fts)
library(Rmisc)
library(lubridate)
library(gtools)
library(dplyr)
library(zoo)
setwd("D://codes//Rfile//investment theory and practice//")
files <- list.files(".//factor_daily//return//")
factor.data <- NULL
for (file in files){
path <- paste0(".//factor_daily//return//",file)
factors <- fread(path)
factor.data <- rbind(factor.data,factors)
}
factor.data[,month:= as.numeric(substr(gsub("-","",as.character(trade_date)),1,6))]
# 多因子组合回测 (样本内)-----------------------------------------------------------------
# 判断vol.managed是否产生正显著的alpha
# 127/255的组合会产生正alpha
#factor.list <- unique(factor.data$name)
total.result <- NULL
factor.list <- c("turnover","pb","dv_ttm","tol_mv","tol_skew","ivol","dROA","dROE","ROE","ROA")
for (n.comb in 1:10){
factor.group <- combinations(length(factor.list),n.comb,factor.list)
result <- NULL
for (i in 1:nrow(factor.group)){
factor_to_test <- as.vector(factor.group[i,])
long_short_dt <- factor.data[(decile == 1)&(month>200301)]
test_dt <- dcast(long_short_dt[name%in%factor_to_test,.(trade_date,name,ret)],trade_date~name) %>% na.omit()
test_dt$mv <- rowMeans(test_dt[,factor_to_test,with=F])
test_dt <- test_dt[,c("trade_date","mv")]
colnames(test_dt) <- c("trade_date","ret")
test_dt[,vol := frollapply(ret,22,var)]
test_dt[,month := as.numeric(substr(gsub('-','',as.character(trade_date)),1,6))]
vol_dt <- test_dt[,.(weights = 1/vol[1]),by=month]
test_dt <- vol_dt[test_dt,on=c("month")]
test_dt$wret <- test_dt$weights * test_dt$ret
test_dt <- na.omit(test_dt)
c <- sd(test_dt$ret)/sd(test_dt$wret)
test_dt$wret <- test_dt$wret * c
test_dt$cum.ret <- cumprod(1+test_dt$ret)
test_dt$wcum.ret <- cumprod(1+test_dt$wret)
vol.managed.sharpe <- sqrt(240)*mean(test_dt$wret)/sd(test_dt$wret)
raw.sharpe <- sqrt(240)*mean(test_dt$ret)/sd(test_dt$ret)
# monthly return
# ao <- test_dt[,.(vol.cum.Mret = cumprod(1+wret),raw.cum.Mret = cumprod(1+ret)),by=month]
# ao <- ao[,.(vol.Mret = tail(vol.cum.Mret-1,1),raw.Mret = tail(raw.cum.Mret-1,1)),by=month]
# model <- summary(lm(ao$vol.Mret~ao$raw.Mret))
model <- summary(lm(test_dt$wret~test_dt$ret))
t <- model$coefficients[1,3]
p <- model$coefficients[1,4]
#rows <- c(factor_to_test,vol.managed.sharpe,row.sharpe,t,p)
rows <- data.table("var1" = factor_to_test[1],
"var2" = factor_to_test[2],
"var3" = factor_to_test[3],
"var4" = factor_to_test[4],
"var5" = factor_to_test[5],
"var6" = factor_to_test[6],
"var7" = factor_to_test[7],
"var8" = factor_to_test[8],
"var9" = factor_to_test[9],
"var10" = factor_to_test[10],
"vol.sharpe" = vol.managed.sharpe,
"raw.sharpe" = raw.sharpe,
"t" = t,
"p" = p)
result <- rbind(result,rows)
}
total.result <- rbind(total.result,result)
}
ao <- total.result[(p<0.1)&(t>0),]
ao1 <- total.result[(p<0.05)&(t>0),]
ao2 <- total.result[(p<0.01)&(t>0),]
ao3 <- total.result[(p<0.001)&(t>0),]
ao4 <- total.result[(vol.sharpe>raw.sharpe),]
# out of sample -----------------------------------------------------------
total.result <- NULL
factor.list <- c("turnover","pb","dv_ttm","tol_mv","tol_skew","ivol","dROA","dROE","ROE","ROA")
for (n.comb in 1:10){
factor.group <- combinations(length(factor.list),n.comb,factor.list)
result <- NULL
for (i in 1:nrow(factor.group)){
factor_to_test <- as.vector(factor.group[i,])
long_short_dt <- factor.data[(decile == 1)]
test_dt <- dcast(long_short_dt[name%in%factor_to_test,.(trade_date,name,ret)],trade_date~name) %>% na.omit()
test_dt$mv <- rowMeans(test_dt[,factor_to_test,with=F])
test_dt <- test_dt[,c("trade_date","mv")]
colnames(test_dt) <- c("trade_date","ret")
test_dt[,month := as.numeric(substr(gsub('-','',as.character(trade_date)),1,6))]
test_dt[,vol := frollapply(ret,22,var)]
vol_dt <- test_dt[,.(weights = 1/vol[1]),by=month]
test_dt <- vol_dt[test_dt,on=c("month")]
test_dt$wret <- test_dt$weights * test_dt$ret
test_dt <- na.omit(test_dt)
test_dt[,cc:=frollapply(ret,960,sd)/frollapply(wret,960,sd)]
cc_dt <- test_dt[,.(c = cc[1]),by=month]
test_dt <- cc_dt[test_dt,on=c("month")]
test_dt$wret <- test_dt$wret * test_dt$c
test_dt <- na.omit(test_dt)
test_dt$cum.ret <- cumprod(1+test_dt$ret)
test_dt$wcum.ret <- cumprod(1+test_dt$wret)
vol.managed.return <- 240 * mean(test_dt$wret)
raw.return <- 240 * mean(test_dt$ret)
vol.managed.sigma <- sqrt(240) * sd(test_dt$wret)
raw.sigma <- sqrt(240) * sd(test_dt$ret)
vol.managed.sharpe <- sqrt(240)*mean(test_dt$wret)/sd(test_dt$wret)
raw.sharpe <- sqrt(240)*mean(test_dt$ret)/sd(test_dt$ret)
#monthly return
# ao <- test_dt[,.(vol.cum.Mret = cumprod(1+wret),raw.cum.Mret = cumprod(1+ret)),by=month]
# ao <- ao[,.(vol.Mret = tail(vol.cum.Mret-1,1),raw.Mret = tail(raw.cum.Mret-1,1)),by=month]
# model <- summary(lm(ao$vol.Mret~ao$raw.Mret))
model <- summary(lm(test_dt$wret~test_dt$ret))
t <- model$coefficients[1,3]
p <- model$coefficients[1,4]
#rows <- c(factor_to_test,vol.managed.sharpe,row.sharpe,t,p)
rows <- data.table("var1" = factor_to_test[1],
"var2" = factor_to_test[2],
"var3" = factor_to_test[3],
"var4" = factor_to_test[4],
"var5" = factor_to_test[5],
"var6" = factor_to_test[6],
"var7" = factor_to_test[7],
"var8" = factor_to_test[8],
"var9" = factor_to_test[9],
"var10" = factor_to_test[10],
"vol.return" = vol.managed.return,
"raw.return" = raw.return,
"vol.sigma" = vol.managed.sigma,
"raw.sigma" = raw.sigma,
"vol.sharpe" = vol.managed.sharpe,
"raw.sharpe" = raw.sharpe,
"t" = t,
"p" = p)
result <- rbind(result,rows)
}
total.result <- rbind(total.result,result)
}
ao <- total.result[(p<0.1)&(t>0),]
ao1 <- total.result[(p<0.05)&(t>0),]
ao2 <- total.result[(p<0.01)&(t>0),]
ao3 <- total.result[(p<0.001)&(t>0),]
ao4 <- total.result[(vol.sharpe>raw.sharpe),]
#excess_sharpe <-
mean(total.result$vol.sharpe - total.result$raw.sharpe)
#excess_return <-
mean(total.result$vol.return - total.result$raw.return)
#describe <-
fwrite(total.result,"c://users//coolgan//desktop//total_result.csv")
t.test(total.result$vol.sigma - total.result$raw.sigma)
ao <- total.result$vol.return - total.result$raw.return
length(ao[(ao)>0])
# in-sample visualization -----------------------------------------------------------
factor_to_test <- c("turnover")
long_short_dt <- factor.data[(decile == 1)&(month>200501)]
test_dt <- dcast(long_short_dt[name%in%factor_to_test,.(trade_date,name,ret)],trade_date~name) %>% na.omit()
test_dt$mv <- rowMeans(test_dt[,factor_to_test,with=F])
test_dt <- test_dt[,c("trade_date","mv")]
colnames(test_dt) <- c("trade_date","ret")
test_dt[,vol := frollapply(ret,22,var)]
test_dt[,month := as.numeric(substr(gsub('-','',as.character(trade_date)),1,6))]
vol_dt <- test_dt[,.(weights = 1/vol[1]),by=month]
test_dt <- vol_dt[test_dt,on=c("month")]
test_dt$wret <- test_dt$weights * test_dt$ret
test_dt <- na.omit(test_dt)
c <- sd(test_dt$ret)/sd(test_dt$wret)
test_dt$wret <- test_dt$wret * c
test_dt$cum.ret <- cumprod(1+test_dt$ret)
test_dt$wcum.ret <- cumprod(1+test_dt$wret)
long_short_dt <- test_dt
long_short_dt$trade_date <- ymd(long_short_dt$trade_date)
p_list <- list()
# net value curve
p_list[[1]] <- ggplot()+
geom_line(data = long_short_dt,aes(x = trade_date,y = wcum.ret,color="vol managed"))+
geom_line(data = long_short_dt,aes(x = trade_date,y = cum.ret,color="buy and hold"))+
ggtitle("Cumulative Performance")+
theme(plot.title = element_text(hjust = 0.5),
legend.position = c(0.9,0.3),
legend.title = element_blank(),
legend.background = element_rect(fill = "transparent"),
legend.key = element_rect(colour = "transparent", fill = "transparent"))+
scale_colour_manual("",values = c("vol managed" = "red","buy and hold"="blue"))+
xlab("")+ylab("Cumulative return")+
scale_y_log10()
# one-year rolling average return
p_list[[2]] <- ggplot()+
geom_line(data = long_short_dt[,roll_wret:=frollmean(wret,240)] %>% na.omit(),aes(x = trade_date,y = roll_wret,color = "vol managed"))+
geom_line(data = long_short_dt[,roll_ret:=frollmean(ret,240)] %>% na.omit(),aes(x = trade_date,y = roll_ret,color = "buy and hold"))+
ggtitle("One-Year Rolling Average Returns")+
theme(plot.title = element_text(hjust = 0.5),
legend.position = c(0.4,0.15),
legend.title = element_blank(),
legend.background = element_rect(fill = "transparent"), # 线条宽度
legend.key = element_rect(colour = "transparent", fill = "transparent"))+
scale_colour_manual("",values = c("vol managed" = "red","buy and hold"="blue"))+
xlab("")+ylab("Average Return")
# maxdrawdown curve
temp <- fts(index = long_short_dt$trade_date,data = long_short_dt[,.(wcum.ret,cum.ret)])
temp$wcum.drawdown <- (temp$wcum.ret - expanding.max(temp$wcum.ret))/expanding.max(temp$wcum.ret)
temp$cum.drawdown <- (temp$cum.ret - expanding.max(temp$cum.ret))/expanding.max(temp$cum.ret)
temp2 <- as.data.table(temp)
p_list[[3]] <- ggplot()+
geom_line(data = temp2,aes(x = asofdate,y = wcum.drawdown,color="vol managed"))+
geom_line(data = temp2,aes(x = asofdate,y = cum.drawdown,color = "buy and hold"))+
ggtitle("Drawdowns")+
theme(plot.title = element_text(hjust = 0.5),
legend.position = c(0.8,0.15),
legend.title = element_blank(),
legend.background = element_rect(fill = "transparent"),
legend.key = element_rect(colour = "transparent", fill = "transparent"))+
scale_colour_manual("",values = c("vol managed" = "red","buy and hold"="blue"))+
xlab("")+ylab("Drawdowns")
multiplot(plotlist = p_list[1:3], layout = matrix(c(1,1,2,3), nrow = 2, byrow = T))
View(temp)
# out-of-sample visualization ---------------------------------------------
ao4[vol.sharpe==max(vol.sharpe)]
factor_to_test <- c("tol_mv","ROE","tol_skew","turnover")
long_short_dt <- factor.data[(decile == 1)]
test_dt <- dcast(long_short_dt[name%in%factor_to_test,.(trade_date,name,ret)],trade_date~name) %>% na.omit()
test_dt$mv <- rowMeans(test_dt[,factor_to_test,with=F])
test_dt <- test_dt[,c("trade_date","mv")]
colnames(test_dt) <- c("trade_date","ret")
test_dt[,month := as.numeric(substr(gsub('-','',as.character(trade_date)),1,6))]
test_dt[,vol := frollapply(ret,22,var)]
vol_dt <- test_dt[,.(weights = 1/vol[1]),by=month]
test_dt <- vol_dt[test_dt,on=c("month")]
test_dt$wret <- test_dt$weights * test_dt$ret
test_dt <- na.omit(test_dt)
test_dt[,cc:=frollapply(ret,240,sd)/frollapply(wret,240,sd)]
cc_dt <- test_dt[,.(c = cc[1]),by=month]
test_dt <- cc_dt[test_dt,on=c("month")]
test_dt$wret <- test_dt$wret * test_dt$c
test_dt <- na.omit(test_dt)
# c <- sd(test_dt$ret)/sd(test_dt$wret)
# test_dt$wret <- test_dt$wret * c
test_dt$cum.ret <- cumprod(1+test_dt$ret)
test_dt$wcum.ret <- cumprod(1+test_dt$wret)
long_short_dt <- test_dt
long_short_dt$trade_date <- ymd(long_short_dt$trade_date)
index <- fread("index_daily.csv")
index <- index[,':='(trade_date=ymd(index$trade_date),
pct_chg = pct_chg/100)][,c("trade_date","pct_chg")]
long_short_dt <- index[long_short_dt,on=c("trade_date")]
long_short_dt$bench.cumret <- cumprod(long_short_dt$pct_chg+1)
#240*mean(long_short_dt$wret)/(sqrt(240)*sd(long_short_dt$wret))
p_list <- list()
# net value curve
p_list[[1]] <- ggplot()+
geom_line(data = long_short_dt,aes(x = trade_date,y = wcum.ret,color="vol managed"))+
geom_line(data = long_short_dt,aes(x = trade_date,y = cum.ret,color="buy and hold"))+
geom_line(data = long_short_dt,aes(x = trade_date,y = bench.cumret,color="benchmark"))+
ggtitle("Cumulative Performance")+
theme(plot.title = element_text(hjust = 0.5),
legend.position = "bottom",
legend.title = element_blank(),
legend.background = element_rect(fill = "transparent"),
legend.key = element_rect(colour = "transparent", fill = "transparent"))+
scale_colour_manual("",values = c("vol managed" = "red",
"buy and hold"="blue",
"benchmark"="black"))+
xlab("")+ylab("Cumulative return")#+
scale_y_log10()
# one-year rolling average return
p_list[[2]] <- ggplot()+
geom_line(data = long_short_dt[,roll_wret:=frollmean(wret,240)] %>% na.omit(),aes(x = trade_date,y = roll_wret,color = "vol managed"))+
geom_line(data = long_short_dt[,roll_ret:=frollmean(ret,240)] %>% na.omit(),aes(x = trade_date,y = roll_ret,color = "buy and hold"))+
geom_line(data = long_short_dt[,roll_ret:=frollmean(pct_chg,240)] %>% na.omit(),aes(x = trade_date,y = roll_ret,color = "benchmark"))+
ggtitle("One-Year Rolling Average Returns")+
theme(plot.title = element_text(hjust = 0.5),
legend.position = "none")+
#legend.title = element_blank(),
#legend.background = element_rect(fill = "transparent"), # 线条宽度
#legend.key = element_rect(colour = "transparent", fill = "transparent"))+
scale_colour_manual("",values = c("vol managed" = "red",
"buy and hold"="blue",
"benchmark"="black"))+
xlab("")+ylab("Average Return")
# maxdrawdown curve
temp <- fts(index = long_short_dt$trade_date,data = long_short_dt[,.(wcum.ret,cum.ret,bench.cumret)])
temp$wcum.drawdown <- (temp$wcum.ret - expanding.max(temp$wcum.ret))/expanding.max(temp$wcum.ret)
temp$cum.drawdown <- (temp$cum.ret - expanding.max(temp$cum.ret))/expanding.max(temp$cum.ret)
temp$bench.drawdown <- (temp$bench.cumret - expanding.max(temp$bench.cumret))/expanding.max(temp$bench.cumret)
temp2 <- as.data.table(temp)
p_list[[3]] <- ggplot()+
geom_line(data = temp2,aes(x = asofdate,y = wcum.drawdown,color="vol managed"))+
geom_line(data = temp2,aes(x = asofdate,y = cum.drawdown,color = "buy and hold"))+
geom_line(data = temp2,aes(x = asofdate,y = bench.drawdown,color = "benchmark"))+
ggtitle("Drawdowns")+
theme(plot.title = element_text(hjust = 0.5),
legend.position = "none")+
#legend.title = element_blank(),
#legend.background = element_rect(fill = "transparent"),
#legend.key = element_rect(colour = "transparent", fill = "transparent"))+
scale_colour_manual("",values = c("vol managed" = "red",
"buy and hold"="blue",
"benchmark" = "black"))+
xlab("")+ylab("Drawdowns")
multiplot(plotlist = p_list[1:3], layout = matrix(c(1,1,2,3), nrow = 2, byrow = T))
# sharpe histogram
ggplot(data=melt(total.result[,c("vol.sharpe","raw.sharpe")]),aes(x=value))+
geom_histogram(aes(color = variable,fill = variable),alpha=0.4)+
scale_color_manual(values = c("#e9ecef", "#e9ecef"))+
scale_fill_manual(values = c("red", "blue"))+
xlab("sharpe ratio")+ylab("frequency")+
theme(legend.position = "bottom",
legend.title = element_blank())
ggplot()+
geom_histogram(data = total.result[,d.sharpe:=vol.sharpe-raw.sharpe],
aes(x=d.sharpe),fill="red",
color="#e9ecef",alpha=0.8)+
xlab("Difference of sharpe ratio")+ylab("frequency")
# return histogram
ggplot(data=melt(total.result[,c("vol.return","raw.return")]),aes(x=value))+
geom_histogram(aes(color = variable,fill = variable),alpha=0.4)+
scale_color_manual(values = c("#e9ecef", "#e9ecef"))+
scale_fill_manual(values = c("red", "blue"))+
xlab("return")+ylab("frequency")+
theme(legend.position = "bottom",
legend.title = element_blank())
ggplot()+
geom_histogram(data = total.result[,d.return:=vol.return-raw.return],
aes(x=d.return),fill="red",
color="#e9ecef",alpha=0.8)+
xlab("Difference of return")+ylab("frequency")
# sigma histogram
ggplot(data=melt(total.result[,c("vol.sigma","raw.sigma")]),aes(x=value))+
geom_histogram(aes(color = variable,fill = variable),alpha=0.4)+
scale_color_manual(values = c("#e9ecef", "#e9ecef"))+
scale_fill_manual(values = c("red", "blue"))+
xlab("sigma")+ylab("frequency")+
theme(legend.position = "bottom",
legend.title = element_blank())
ggplot()+
geom_histogram(data = total.result[,d.sigma:=vol.sigma-raw.sigma],
aes(x=d.sigma),fill="red",
color="#e9ecef",alpha=0.8)+
xlab("Difference of sigma")+ylab("frequency")
# c time series
ggplot()+
geom_line(data = unique(test_dt[,month:=ymd(month,truncated = 1)][,.(month,c)]),
aes(x = month,y = c))
fwrite(total.result,"./simulation_result/t_180.csv")
# further discussion ------------------------------------------------------
simu_result <- data.table(win_length = c(30,60,90,150,180,240,270,300,360,390,420,450,480,720,960,1200,2400),
win_ratio = c(480,595,708,902,1017,1017,1019,1012,839,367,420,167,283,0,0,0,0),
excess_sharpe = c(-0.003535527,0.003213709,0.005967851,
0.02625643,0.1284329,0.1194,0.1229948,0.09944962,
0.01896007,-0.01205235,-0.008979446,-0.02769992,-0.01759528,
-0.1536956,-0.28415,-0.2488994,-0.1833913),
excess_return = c(0.0198818,0.02240382,0.02683424,0.02906138,
0.06561102,0.0685,0.06833286,0.06559085,0.0305349,
0.01940536,0.01530657,0.00762139,0.01106782,-0.04177113,
-0.08740466,-0.0741429,-0.0417174
))
ggplot()+
geom_line(data = simu_result[win_length<=1200],aes(x = win_length,y = win_ratio/1023),colour="blue",size=0.6)+
geom_point(data = simu_result[win_length<=1200],aes(x = win_length,y = win_ratio/1023),colour="blue",size=2.3)+
xlab("训练窗口长度(天)")+ylab("胜率")+scale_y_continuous(labels = scales::percent)+
theme_set(theme_bw())
#theme(panel.grid.major = element_blank(),panel.grid.minor = element_blank())
ggplot()+
geom_line(data = simu_result[win_length<=1200],aes(x = win_length,y = excess_sharpe),colour="blue",size=0.6)+
geom_point(data = simu_result[win_length<=1200],aes(x = win_length,y = excess_sharpe),colour="blue",size=2.3)+
xlab("训练窗口长度(天)")+ylab("平均超额夏普")+scale_y_continuous(labels = scales::percent)+
theme_set(theme_bw())
ggplot()+
geom_line(data = simu_result[win_length<=1200],aes(x = win_length,y = excess_return),colour="blue",size=0.6)+
geom_point(data = simu_result[win_length<=1200],aes(x = win_length,y = excess_return),colour="blue",size=2.3)+
xlab("训练窗口长度(天)")+ylab("平均超额年化收益")+scale_y_continuous(labels = scales::percent)+
theme_set(theme_bw())
ggplot()+
geom_line(data = simu_result,aes(x = win_length,y = win_ratio))
# try betaplus 100 index --------------------------------------------------
factor.data <- fread("C://users//coolgan//desktop//betaplus-1000-indexdaily-1.csv")
factor.data$trade_date <- as.Date(factor.data$trade_date,format="%Y/%m/%d")
factor.ret <- as.data.table(lapply(factor.data[,2:9],diff))/na.omit(lag(factor.data[,2:9],1))
factor.ret$trade_date <- factor.data$trade_date[2:nrow(factor.data)]
factor.list <- colnames(factor.ret)[1:length(colnames(factor.ret))-1]
total.result <- NULL
for (n.comb in 1:8){
factor.group <- combinations(length(factor.list),n.comb,factor.list)
result <- NULL
for (i in 1:nrow(factor.group)){
factor_to_test <- as.vector(factor.group[i,])
test_dt <- factor.ret[,c("trade_date",factor_to_test),with=F]
test_dt$mv <- rowMeans(factor.ret[,factor_to_test,with=F])
test_dt <- test_dt[,c("trade_date","mv")]
colnames(test_dt) <- c("trade_date","ret")
test_dt[,vol:=frollapply(ret,22,var)]
test_dt[,month := as.numeric(substr(gsub('-','',as.character(trade_date)),1,6))]
vol_dt <- test_dt[,.(weights = 1/vol[1]),by=month]
test_dt <- vol_dt[test_dt,on=c("month")]
test_dt$wret <- test_dt$weights * test_dt$ret
test_dt <- na.omit(test_dt)
c <- sd(test_dt$ret)/sd(test_dt$wret)
test_dt$wret <- test_dt$wret * c
test_dt$cum.ret <- cumprod(1+test_dt$ret)
test_dt$wcum.ret <- cumprod(1+test_dt$wret)
vol.managed.sharpe <- sqrt(240)*mean(test_dt$wret)/sd(test_dt$wret)
raw.sharpe <- sqrt(240)*mean(test_dt$ret)/sd(test_dt$ret)
model <- summary(lm(test_dt$wret~test_dt$ret))
t <- model$coefficients[1,3]
p <- model$coefficients[1,4]
#rows <- c(factor_to_test,vol.managed.sharpe,row.sharpe,t,p)
rows <- data.table("var1" = factor_to_test[1],
"var2" = factor_to_test[2],
"var3" = factor_to_test[3],
"var4" = factor_to_test[4],
"var5" = factor_to_test[5],
"var6" = factor_to_test[6],
"var7" = factor_to_test[7],
"var8" = factor_to_test[8],
"var9" = factor_to_test[9],
"var10" = factor_to_test[10],
"vol.sharpe" = vol.managed.sharpe,
"raw.sharpe" = raw.sharpe,
"t" = t,
"p" = p)
result <- rbind(result,rows)
}
total.result <- rbind(total.result,result)
}
ao <- total.result[(p<0.1)&(vol.managed.sharpe>raw.sharpe)]
#
total <- NULL
ret_dt <- factor.data[(decile==10)&(name=="ROE")]
temp <- fts(index = ymd(ret_dt$trade_date),data = ret_dt[,.(cum.ret,ret)])
temp$cum.drawdown <- (temp$cum.ret - expanding.max(temp$cum.ret))/expanding.max(temp$cum.ret)
result.analysis <- ret_dt[,.(name="dv_ttm",
annual_mean_ret = 240*mean(ret),
annual_sd_ret = sqrt(240)*sd(ret),
t_ret = t.test(ret,mu=0)$statistic,
max.drawdown = max(-temp$cum.drawdown),
# max.drawdown = maxdrawdown(ret)$maxdrawdown,
# max.drawdown_from = .SD[ maxdrawdown(.SD[,ret])$from,date],
# max.drawdown_to = .SD[ maxdrawdown(.SD[,ret])$to,date],
win_ratio = length(.SD[ret>0,ret])/nrow(.SD)),by=c("decile")] %>% setorder(decile)
result.analysis$sharpe <- (result.analysis$annual_mean_ret)/(result.analysis$annual_sd_ret)
total <- rbind(total,result.analysis)
fwrite(total,"c://users//coolgan//desktop//long_description.csv")
|
/Volatility-Managed Portfolios Sometimes it Works/codes/vol-timing.R
|
no_license
|
coolgan/Quantitative-Finance
|
R
| false | false | 24,009 |
r
|
library(data.table)
library(ggplot2)
library(fts)
library(Rmisc)
library(lubridate)
library(gtools)
library(dplyr)
library(zoo)
setwd("D://codes//Rfile//investment theory and practice//")
files <- list.files(".//factor_daily//return//")
factor.data <- NULL
for (file in files){
path <- paste0(".//factor_daily//return//",file)
factors <- fread(path)
factor.data <- rbind(factor.data,factors)
}
factor.data[,month:= as.numeric(substr(gsub("-","",as.character(trade_date)),1,6))]
# 多因子组合回测 (样本内)-----------------------------------------------------------------
# 判断vol.managed是否产生正显著的alpha
# 127/255的组合会产生正alpha
#factor.list <- unique(factor.data$name)
total.result <- NULL
factor.list <- c("turnover","pb","dv_ttm","tol_mv","tol_skew","ivol","dROA","dROE","ROE","ROA")
for (n.comb in 1:10){
factor.group <- combinations(length(factor.list),n.comb,factor.list)
result <- NULL
for (i in 1:nrow(factor.group)){
factor_to_test <- as.vector(factor.group[i,])
long_short_dt <- factor.data[(decile == 1)&(month>200301)]
test_dt <- dcast(long_short_dt[name%in%factor_to_test,.(trade_date,name,ret)],trade_date~name) %>% na.omit()
test_dt$mv <- rowMeans(test_dt[,factor_to_test,with=F])
test_dt <- test_dt[,c("trade_date","mv")]
colnames(test_dt) <- c("trade_date","ret")
test_dt[,vol := frollapply(ret,22,var)]
test_dt[,month := as.numeric(substr(gsub('-','',as.character(trade_date)),1,6))]
vol_dt <- test_dt[,.(weights = 1/vol[1]),by=month]
test_dt <- vol_dt[test_dt,on=c("month")]
test_dt$wret <- test_dt$weights * test_dt$ret
test_dt <- na.omit(test_dt)
c <- sd(test_dt$ret)/sd(test_dt$wret)
test_dt$wret <- test_dt$wret * c
test_dt$cum.ret <- cumprod(1+test_dt$ret)
test_dt$wcum.ret <- cumprod(1+test_dt$wret)
vol.managed.sharpe <- sqrt(240)*mean(test_dt$wret)/sd(test_dt$wret)
raw.sharpe <- sqrt(240)*mean(test_dt$ret)/sd(test_dt$ret)
# monthly return
# ao <- test_dt[,.(vol.cum.Mret = cumprod(1+wret),raw.cum.Mret = cumprod(1+ret)),by=month]
# ao <- ao[,.(vol.Mret = tail(vol.cum.Mret-1,1),raw.Mret = tail(raw.cum.Mret-1,1)),by=month]
# model <- summary(lm(ao$vol.Mret~ao$raw.Mret))
model <- summary(lm(test_dt$wret~test_dt$ret))
t <- model$coefficients[1,3]
p <- model$coefficients[1,4]
#rows <- c(factor_to_test,vol.managed.sharpe,row.sharpe,t,p)
rows <- data.table("var1" = factor_to_test[1],
"var2" = factor_to_test[2],
"var3" = factor_to_test[3],
"var4" = factor_to_test[4],
"var5" = factor_to_test[5],
"var6" = factor_to_test[6],
"var7" = factor_to_test[7],
"var8" = factor_to_test[8],
"var9" = factor_to_test[9],
"var10" = factor_to_test[10],
"vol.sharpe" = vol.managed.sharpe,
"raw.sharpe" = raw.sharpe,
"t" = t,
"p" = p)
result <- rbind(result,rows)
}
total.result <- rbind(total.result,result)
}
ao <- total.result[(p<0.1)&(t>0),]
ao1 <- total.result[(p<0.05)&(t>0),]
ao2 <- total.result[(p<0.01)&(t>0),]
ao3 <- total.result[(p<0.001)&(t>0),]
ao4 <- total.result[(vol.sharpe>raw.sharpe),]
# out of sample -----------------------------------------------------------
total.result <- NULL
factor.list <- c("turnover","pb","dv_ttm","tol_mv","tol_skew","ivol","dROA","dROE","ROE","ROA")
for (n.comb in 1:10){
factor.group <- combinations(length(factor.list),n.comb,factor.list)
result <- NULL
for (i in 1:nrow(factor.group)){
factor_to_test <- as.vector(factor.group[i,])
long_short_dt <- factor.data[(decile == 1)]
test_dt <- dcast(long_short_dt[name%in%factor_to_test,.(trade_date,name,ret)],trade_date~name) %>% na.omit()
test_dt$mv <- rowMeans(test_dt[,factor_to_test,with=F])
test_dt <- test_dt[,c("trade_date","mv")]
colnames(test_dt) <- c("trade_date","ret")
test_dt[,month := as.numeric(substr(gsub('-','',as.character(trade_date)),1,6))]
test_dt[,vol := frollapply(ret,22,var)]
vol_dt <- test_dt[,.(weights = 1/vol[1]),by=month]
test_dt <- vol_dt[test_dt,on=c("month")]
test_dt$wret <- test_dt$weights * test_dt$ret
test_dt <- na.omit(test_dt)
test_dt[,cc:=frollapply(ret,960,sd)/frollapply(wret,960,sd)]
cc_dt <- test_dt[,.(c = cc[1]),by=month]
test_dt <- cc_dt[test_dt,on=c("month")]
test_dt$wret <- test_dt$wret * test_dt$c
test_dt <- na.omit(test_dt)
test_dt$cum.ret <- cumprod(1+test_dt$ret)
test_dt$wcum.ret <- cumprod(1+test_dt$wret)
vol.managed.return <- 240 * mean(test_dt$wret)
raw.return <- 240 * mean(test_dt$ret)
vol.managed.sigma <- sqrt(240) * sd(test_dt$wret)
raw.sigma <- sqrt(240) * sd(test_dt$ret)
vol.managed.sharpe <- sqrt(240)*mean(test_dt$wret)/sd(test_dt$wret)
raw.sharpe <- sqrt(240)*mean(test_dt$ret)/sd(test_dt$ret)
#monthly return
# ao <- test_dt[,.(vol.cum.Mret = cumprod(1+wret),raw.cum.Mret = cumprod(1+ret)),by=month]
# ao <- ao[,.(vol.Mret = tail(vol.cum.Mret-1,1),raw.Mret = tail(raw.cum.Mret-1,1)),by=month]
# model <- summary(lm(ao$vol.Mret~ao$raw.Mret))
model <- summary(lm(test_dt$wret~test_dt$ret))
t <- model$coefficients[1,3]
p <- model$coefficients[1,4]
#rows <- c(factor_to_test,vol.managed.sharpe,row.sharpe,t,p)
rows <- data.table("var1" = factor_to_test[1],
"var2" = factor_to_test[2],
"var3" = factor_to_test[3],
"var4" = factor_to_test[4],
"var5" = factor_to_test[5],
"var6" = factor_to_test[6],
"var7" = factor_to_test[7],
"var8" = factor_to_test[8],
"var9" = factor_to_test[9],
"var10" = factor_to_test[10],
"vol.return" = vol.managed.return,
"raw.return" = raw.return,
"vol.sigma" = vol.managed.sigma,
"raw.sigma" = raw.sigma,
"vol.sharpe" = vol.managed.sharpe,
"raw.sharpe" = raw.sharpe,
"t" = t,
"p" = p)
result <- rbind(result,rows)
}
total.result <- rbind(total.result,result)
}
ao <- total.result[(p<0.1)&(t>0),]
ao1 <- total.result[(p<0.05)&(t>0),]
ao2 <- total.result[(p<0.01)&(t>0),]
ao3 <- total.result[(p<0.001)&(t>0),]
ao4 <- total.result[(vol.sharpe>raw.sharpe),]
#excess_sharpe <-
mean(total.result$vol.sharpe - total.result$raw.sharpe)
#excess_return <-
mean(total.result$vol.return - total.result$raw.return)
#describe <-
fwrite(total.result,"c://users//coolgan//desktop//total_result.csv")
t.test(total.result$vol.sigma - total.result$raw.sigma)
ao <- total.result$vol.return - total.result$raw.return
length(ao[(ao)>0])
# in-sample visualization -----------------------------------------------------------
factor_to_test <- c("turnover")
long_short_dt <- factor.data[(decile == 1)&(month>200501)]
test_dt <- dcast(long_short_dt[name%in%factor_to_test,.(trade_date,name,ret)],trade_date~name) %>% na.omit()
test_dt$mv <- rowMeans(test_dt[,factor_to_test,with=F])
test_dt <- test_dt[,c("trade_date","mv")]
colnames(test_dt) <- c("trade_date","ret")
test_dt[,vol := frollapply(ret,22,var)]
test_dt[,month := as.numeric(substr(gsub('-','',as.character(trade_date)),1,6))]
vol_dt <- test_dt[,.(weights = 1/vol[1]),by=month]
test_dt <- vol_dt[test_dt,on=c("month")]
test_dt$wret <- test_dt$weights * test_dt$ret
test_dt <- na.omit(test_dt)
c <- sd(test_dt$ret)/sd(test_dt$wret)
test_dt$wret <- test_dt$wret * c
test_dt$cum.ret <- cumprod(1+test_dt$ret)
test_dt$wcum.ret <- cumprod(1+test_dt$wret)
long_short_dt <- test_dt
long_short_dt$trade_date <- ymd(long_short_dt$trade_date)
p_list <- list()
# net value curve
p_list[[1]] <- ggplot()+
geom_line(data = long_short_dt,aes(x = trade_date,y = wcum.ret,color="vol managed"))+
geom_line(data = long_short_dt,aes(x = trade_date,y = cum.ret,color="buy and hold"))+
ggtitle("Cumulative Performance")+
theme(plot.title = element_text(hjust = 0.5),
legend.position = c(0.9,0.3),
legend.title = element_blank(),
legend.background = element_rect(fill = "transparent"),
legend.key = element_rect(colour = "transparent", fill = "transparent"))+
scale_colour_manual("",values = c("vol managed" = "red","buy and hold"="blue"))+
xlab("")+ylab("Cumulative return")+
scale_y_log10()
# one-year rolling average return
p_list[[2]] <- ggplot()+
geom_line(data = long_short_dt[,roll_wret:=frollmean(wret,240)] %>% na.omit(),aes(x = trade_date,y = roll_wret,color = "vol managed"))+
geom_line(data = long_short_dt[,roll_ret:=frollmean(ret,240)] %>% na.omit(),aes(x = trade_date,y = roll_ret,color = "buy and hold"))+
ggtitle("One-Year Rolling Average Returns")+
theme(plot.title = element_text(hjust = 0.5),
legend.position = c(0.4,0.15),
legend.title = element_blank(),
legend.background = element_rect(fill = "transparent"), # 线条宽度
legend.key = element_rect(colour = "transparent", fill = "transparent"))+
scale_colour_manual("",values = c("vol managed" = "red","buy and hold"="blue"))+
xlab("")+ylab("Average Return")
# maxdrawdown curve
temp <- fts(index = long_short_dt$trade_date,data = long_short_dt[,.(wcum.ret,cum.ret)])
temp$wcum.drawdown <- (temp$wcum.ret - expanding.max(temp$wcum.ret))/expanding.max(temp$wcum.ret)
temp$cum.drawdown <- (temp$cum.ret - expanding.max(temp$cum.ret))/expanding.max(temp$cum.ret)
temp2 <- as.data.table(temp)
p_list[[3]] <- ggplot()+
geom_line(data = temp2,aes(x = asofdate,y = wcum.drawdown,color="vol managed"))+
geom_line(data = temp2,aes(x = asofdate,y = cum.drawdown,color = "buy and hold"))+
ggtitle("Drawdowns")+
theme(plot.title = element_text(hjust = 0.5),
legend.position = c(0.8,0.15),
legend.title = element_blank(),
legend.background = element_rect(fill = "transparent"),
legend.key = element_rect(colour = "transparent", fill = "transparent"))+
scale_colour_manual("",values = c("vol managed" = "red","buy and hold"="blue"))+
xlab("")+ylab("Drawdowns")
multiplot(plotlist = p_list[1:3], layout = matrix(c(1,1,2,3), nrow = 2, byrow = T))
View(temp)
# out-of-sample visualization ---------------------------------------------
ao4[vol.sharpe==max(vol.sharpe)]
factor_to_test <- c("tol_mv","ROE","tol_skew","turnover")
long_short_dt <- factor.data[(decile == 1)]
test_dt <- dcast(long_short_dt[name%in%factor_to_test,.(trade_date,name,ret)],trade_date~name) %>% na.omit()
test_dt$mv <- rowMeans(test_dt[,factor_to_test,with=F])
test_dt <- test_dt[,c("trade_date","mv")]
colnames(test_dt) <- c("trade_date","ret")
test_dt[,month := as.numeric(substr(gsub('-','',as.character(trade_date)),1,6))]
test_dt[,vol := frollapply(ret,22,var)]
vol_dt <- test_dt[,.(weights = 1/vol[1]),by=month]
test_dt <- vol_dt[test_dt,on=c("month")]
test_dt$wret <- test_dt$weights * test_dt$ret
test_dt <- na.omit(test_dt)
test_dt[,cc:=frollapply(ret,240,sd)/frollapply(wret,240,sd)]
cc_dt <- test_dt[,.(c = cc[1]),by=month]
test_dt <- cc_dt[test_dt,on=c("month")]
test_dt$wret <- test_dt$wret * test_dt$c
test_dt <- na.omit(test_dt)
# c <- sd(test_dt$ret)/sd(test_dt$wret)
# test_dt$wret <- test_dt$wret * c
test_dt$cum.ret <- cumprod(1+test_dt$ret)
test_dt$wcum.ret <- cumprod(1+test_dt$wret)
long_short_dt <- test_dt
long_short_dt$trade_date <- ymd(long_short_dt$trade_date)
index <- fread("index_daily.csv")
index <- index[,':='(trade_date=ymd(index$trade_date),
pct_chg = pct_chg/100)][,c("trade_date","pct_chg")]
long_short_dt <- index[long_short_dt,on=c("trade_date")]
long_short_dt$bench.cumret <- cumprod(long_short_dt$pct_chg+1)
#240*mean(long_short_dt$wret)/(sqrt(240)*sd(long_short_dt$wret))
p_list <- list()
# net value curve
p_list[[1]] <- ggplot()+
geom_line(data = long_short_dt,aes(x = trade_date,y = wcum.ret,color="vol managed"))+
geom_line(data = long_short_dt,aes(x = trade_date,y = cum.ret,color="buy and hold"))+
geom_line(data = long_short_dt,aes(x = trade_date,y = bench.cumret,color="benchmark"))+
ggtitle("Cumulative Performance")+
theme(plot.title = element_text(hjust = 0.5),
legend.position = "bottom",
legend.title = element_blank(),
legend.background = element_rect(fill = "transparent"),
legend.key = element_rect(colour = "transparent", fill = "transparent"))+
scale_colour_manual("",values = c("vol managed" = "red",
"buy and hold"="blue",
"benchmark"="black"))+
xlab("")+ylab("Cumulative return")#+
scale_y_log10()
# one-year rolling average return
p_list[[2]] <- ggplot()+
geom_line(data = long_short_dt[,roll_wret:=frollmean(wret,240)] %>% na.omit(),aes(x = trade_date,y = roll_wret,color = "vol managed"))+
geom_line(data = long_short_dt[,roll_ret:=frollmean(ret,240)] %>% na.omit(),aes(x = trade_date,y = roll_ret,color = "buy and hold"))+
geom_line(data = long_short_dt[,roll_ret:=frollmean(pct_chg,240)] %>% na.omit(),aes(x = trade_date,y = roll_ret,color = "benchmark"))+
ggtitle("One-Year Rolling Average Returns")+
theme(plot.title = element_text(hjust = 0.5),
legend.position = "none")+
#legend.title = element_blank(),
#legend.background = element_rect(fill = "transparent"), # 线条宽度
#legend.key = element_rect(colour = "transparent", fill = "transparent"))+
scale_colour_manual("",values = c("vol managed" = "red",
"buy and hold"="blue",
"benchmark"="black"))+
xlab("")+ylab("Average Return")
# maxdrawdown curve
temp <- fts(index = long_short_dt$trade_date,data = long_short_dt[,.(wcum.ret,cum.ret,bench.cumret)])
temp$wcum.drawdown <- (temp$wcum.ret - expanding.max(temp$wcum.ret))/expanding.max(temp$wcum.ret)
temp$cum.drawdown <- (temp$cum.ret - expanding.max(temp$cum.ret))/expanding.max(temp$cum.ret)
temp$bench.drawdown <- (temp$bench.cumret - expanding.max(temp$bench.cumret))/expanding.max(temp$bench.cumret)
temp2 <- as.data.table(temp)
p_list[[3]] <- ggplot()+
geom_line(data = temp2,aes(x = asofdate,y = wcum.drawdown,color="vol managed"))+
geom_line(data = temp2,aes(x = asofdate,y = cum.drawdown,color = "buy and hold"))+
geom_line(data = temp2,aes(x = asofdate,y = bench.drawdown,color = "benchmark"))+
ggtitle("Drawdowns")+
theme(plot.title = element_text(hjust = 0.5),
legend.position = "none")+
#legend.title = element_blank(),
#legend.background = element_rect(fill = "transparent"),
#legend.key = element_rect(colour = "transparent", fill = "transparent"))+
scale_colour_manual("",values = c("vol managed" = "red",
"buy and hold"="blue",
"benchmark" = "black"))+
xlab("")+ylab("Drawdowns")
multiplot(plotlist = p_list[1:3], layout = matrix(c(1,1,2,3), nrow = 2, byrow = T))
# sharpe histogram
ggplot(data=melt(total.result[,c("vol.sharpe","raw.sharpe")]),aes(x=value))+
geom_histogram(aes(color = variable,fill = variable),alpha=0.4)+
scale_color_manual(values = c("#e9ecef", "#e9ecef"))+
scale_fill_manual(values = c("red", "blue"))+
xlab("sharpe ratio")+ylab("frequency")+
theme(legend.position = "bottom",
legend.title = element_blank())
ggplot()+
geom_histogram(data = total.result[,d.sharpe:=vol.sharpe-raw.sharpe],
aes(x=d.sharpe),fill="red",
color="#e9ecef",alpha=0.8)+
xlab("Difference of sharpe ratio")+ylab("frequency")
# return histogram
ggplot(data=melt(total.result[,c("vol.return","raw.return")]),aes(x=value))+
geom_histogram(aes(color = variable,fill = variable),alpha=0.4)+
scale_color_manual(values = c("#e9ecef", "#e9ecef"))+
scale_fill_manual(values = c("red", "blue"))+
xlab("return")+ylab("frequency")+
theme(legend.position = "bottom",
legend.title = element_blank())
ggplot()+
geom_histogram(data = total.result[,d.return:=vol.return-raw.return],
aes(x=d.return),fill="red",
color="#e9ecef",alpha=0.8)+
xlab("Difference of return")+ylab("frequency")
# sigma histogram
ggplot(data=melt(total.result[,c("vol.sigma","raw.sigma")]),aes(x=value))+
geom_histogram(aes(color = variable,fill = variable),alpha=0.4)+
scale_color_manual(values = c("#e9ecef", "#e9ecef"))+
scale_fill_manual(values = c("red", "blue"))+
xlab("sigma")+ylab("frequency")+
theme(legend.position = "bottom",
legend.title = element_blank())
ggplot()+
geom_histogram(data = total.result[,d.sigma:=vol.sigma-raw.sigma],
aes(x=d.sigma),fill="red",
color="#e9ecef",alpha=0.8)+
xlab("Difference of sigma")+ylab("frequency")
# c time series
ggplot()+
geom_line(data = unique(test_dt[,month:=ymd(month,truncated = 1)][,.(month,c)]),
aes(x = month,y = c))
fwrite(total.result,"./simulation_result/t_180.csv")
# further discussion ------------------------------------------------------
simu_result <- data.table(win_length = c(30,60,90,150,180,240,270,300,360,390,420,450,480,720,960,1200,2400),
win_ratio = c(480,595,708,902,1017,1017,1019,1012,839,367,420,167,283,0,0,0,0),
excess_sharpe = c(-0.003535527,0.003213709,0.005967851,
0.02625643,0.1284329,0.1194,0.1229948,0.09944962,
0.01896007,-0.01205235,-0.008979446,-0.02769992,-0.01759528,
-0.1536956,-0.28415,-0.2488994,-0.1833913),
excess_return = c(0.0198818,0.02240382,0.02683424,0.02906138,
0.06561102,0.0685,0.06833286,0.06559085,0.0305349,
0.01940536,0.01530657,0.00762139,0.01106782,-0.04177113,
-0.08740466,-0.0741429,-0.0417174
))
ggplot()+
geom_line(data = simu_result[win_length<=1200],aes(x = win_length,y = win_ratio/1023),colour="blue",size=0.6)+
geom_point(data = simu_result[win_length<=1200],aes(x = win_length,y = win_ratio/1023),colour="blue",size=2.3)+
xlab("训练窗口长度(天)")+ylab("胜率")+scale_y_continuous(labels = scales::percent)+
theme_set(theme_bw())
#theme(panel.grid.major = element_blank(),panel.grid.minor = element_blank())
ggplot()+
geom_line(data = simu_result[win_length<=1200],aes(x = win_length,y = excess_sharpe),colour="blue",size=0.6)+
geom_point(data = simu_result[win_length<=1200],aes(x = win_length,y = excess_sharpe),colour="blue",size=2.3)+
xlab("训练窗口长度(天)")+ylab("平均超额夏普")+scale_y_continuous(labels = scales::percent)+
theme_set(theme_bw())
ggplot()+
geom_line(data = simu_result[win_length<=1200],aes(x = win_length,y = excess_return),colour="blue",size=0.6)+
geom_point(data = simu_result[win_length<=1200],aes(x = win_length,y = excess_return),colour="blue",size=2.3)+
xlab("训练窗口长度(天)")+ylab("平均超额年化收益")+scale_y_continuous(labels = scales::percent)+
theme_set(theme_bw())
ggplot()+
geom_line(data = simu_result,aes(x = win_length,y = win_ratio))
# try betaplus 100 index --------------------------------------------------
factor.data <- fread("C://users//coolgan//desktop//betaplus-1000-indexdaily-1.csv")
factor.data$trade_date <- as.Date(factor.data$trade_date,format="%Y/%m/%d")
factor.ret <- as.data.table(lapply(factor.data[,2:9],diff))/na.omit(lag(factor.data[,2:9],1))
factor.ret$trade_date <- factor.data$trade_date[2:nrow(factor.data)]
factor.list <- colnames(factor.ret)[1:length(colnames(factor.ret))-1]
total.result <- NULL
for (n.comb in 1:8){
factor.group <- combinations(length(factor.list),n.comb,factor.list)
result <- NULL
for (i in 1:nrow(factor.group)){
factor_to_test <- as.vector(factor.group[i,])
test_dt <- factor.ret[,c("trade_date",factor_to_test),with=F]
test_dt$mv <- rowMeans(factor.ret[,factor_to_test,with=F])
test_dt <- test_dt[,c("trade_date","mv")]
colnames(test_dt) <- c("trade_date","ret")
test_dt[,vol:=frollapply(ret,22,var)]
test_dt[,month := as.numeric(substr(gsub('-','',as.character(trade_date)),1,6))]
vol_dt <- test_dt[,.(weights = 1/vol[1]),by=month]
test_dt <- vol_dt[test_dt,on=c("month")]
test_dt$wret <- test_dt$weights * test_dt$ret
test_dt <- na.omit(test_dt)
c <- sd(test_dt$ret)/sd(test_dt$wret)
test_dt$wret <- test_dt$wret * c
test_dt$cum.ret <- cumprod(1+test_dt$ret)
test_dt$wcum.ret <- cumprod(1+test_dt$wret)
vol.managed.sharpe <- sqrt(240)*mean(test_dt$wret)/sd(test_dt$wret)
raw.sharpe <- sqrt(240)*mean(test_dt$ret)/sd(test_dt$ret)
model <- summary(lm(test_dt$wret~test_dt$ret))
t <- model$coefficients[1,3]
p <- model$coefficients[1,4]
#rows <- c(factor_to_test,vol.managed.sharpe,row.sharpe,t,p)
rows <- data.table("var1" = factor_to_test[1],
"var2" = factor_to_test[2],
"var3" = factor_to_test[3],
"var4" = factor_to_test[4],
"var5" = factor_to_test[5],
"var6" = factor_to_test[6],
"var7" = factor_to_test[7],
"var8" = factor_to_test[8],
"var9" = factor_to_test[9],
"var10" = factor_to_test[10],
"vol.sharpe" = vol.managed.sharpe,
"raw.sharpe" = raw.sharpe,
"t" = t,
"p" = p)
result <- rbind(result,rows)
}
total.result <- rbind(total.result,result)
}
ao <- total.result[(p<0.1)&(vol.managed.sharpe>raw.sharpe)]
#
total <- NULL
ret_dt <- factor.data[(decile==10)&(name=="ROE")]
temp <- fts(index = ymd(ret_dt$trade_date),data = ret_dt[,.(cum.ret,ret)])
temp$cum.drawdown <- (temp$cum.ret - expanding.max(temp$cum.ret))/expanding.max(temp$cum.ret)
result.analysis <- ret_dt[,.(name="dv_ttm",
annual_mean_ret = 240*mean(ret),
annual_sd_ret = sqrt(240)*sd(ret),
t_ret = t.test(ret,mu=0)$statistic,
max.drawdown = max(-temp$cum.drawdown),
# max.drawdown = maxdrawdown(ret)$maxdrawdown,
# max.drawdown_from = .SD[ maxdrawdown(.SD[,ret])$from,date],
# max.drawdown_to = .SD[ maxdrawdown(.SD[,ret])$to,date],
win_ratio = length(.SD[ret>0,ret])/nrow(.SD)),by=c("decile")] %>% setorder(decile)
result.analysis$sharpe <- (result.analysis$annual_mean_ret)/(result.analysis$annual_sd_ret)
total <- rbind(total,result.analysis)
fwrite(total,"c://users//coolgan//desktop//long_description.csv")
|
# Data Types
# Vector
# Lists
# Matrices
# Array
# DataFrames & Factors
my.var <- 432
is.vector(my.var)
# Characters
# Numeric and Integer
# Logical
# Complex
|
/R/dataTypes.R
|
no_license
|
thedatatot/rahul_kumar
|
R
| false | false | 166 |
r
|
# Data Types
# Vector
# Lists
# Matrices
# Array
# DataFrames & Factors
my.var <- 432
is.vector(my.var)
# Characters
# Numeric and Integer
# Logical
# Complex
|
source("main.R")
# most 5 star reviewed companies
top5StarCompanies = reviews %>%
filter(stars == 5) %>%
group_by(business_id) %>%
summarise(Count = n()) %>%
arrange(desc(Count)) %>%
ungroup() %>%
mutate(BusinessID = reorder(business_id,Count)) %>%
head(10)
top5StarCompanies = inner_join(top5StarCompanies, restuarants)
top5StarCompanies %>%
mutate(name = reorder(name,Count)) %>%
ggplot(aes(x = name,y = Count)) +
geom_bar(stat='identity',colour="white", fill = fillColor) +
geom_text(aes(x = name, y = 1, label = paste0("(",Count,")",sep="")),
hjust=0, vjust=.5, size = 4, colour = 'black',
fontface = 'bold') +
labs(x = 'Name of the Restuarant',
y = 'Count',
title = 'Name of the Restuarant and Count') +
coord_flip() +
theme_bw()
|
/src/5Star-Reviewed-Restaurants.R
|
no_license
|
HENRY-JERRY/mit-805-2020-yelp-project
|
R
| false | false | 806 |
r
|
source("main.R")
# most 5 star reviewed companies
top5StarCompanies = reviews %>%
filter(stars == 5) %>%
group_by(business_id) %>%
summarise(Count = n()) %>%
arrange(desc(Count)) %>%
ungroup() %>%
mutate(BusinessID = reorder(business_id,Count)) %>%
head(10)
top5StarCompanies = inner_join(top5StarCompanies, restuarants)
top5StarCompanies %>%
mutate(name = reorder(name,Count)) %>%
ggplot(aes(x = name,y = Count)) +
geom_bar(stat='identity',colour="white", fill = fillColor) +
geom_text(aes(x = name, y = 1, label = paste0("(",Count,")",sep="")),
hjust=0, vjust=.5, size = 4, colour = 'black',
fontface = 'bold') +
labs(x = 'Name of the Restuarant',
y = 'Count',
title = 'Name of the Restuarant and Count') +
coord_flip() +
theme_bw()
|
#' Budget manager
#'
#' This package is a personnal project to manage a budget
#'
#' @name budgetmanager
#'
#' @import magrittr
#'
NULL
|
/R/budgetmanager.R
|
no_license
|
denrou/budgetmanager
|
R
| false | false | 136 |
r
|
#' Budget manager
#'
#' This package is a personnal project to manage a budget
#'
#' @name budgetmanager
#'
#' @import magrittr
#'
NULL
|
traf <- read.csv("flow_occ.txt")
nint <- nrow(traf) # number of 5-min intervals, 1740
## Suppose starts on March 14th, 2003, Friday at midnight (we don't know the time)
## then ends at 1am on Thur, March 20th
day <- rep(c("Friday", "Saturday", "Sunday", "Monday", "Tuesday",
"Wednesday", "Thursday"),
each = 24*12, length.out = nint)
time <- paste(rep(0:23, each = 12, length.out = nint),
rep(c("00", "05", seq(10, 55, by = 5)), length.out = nint),
sep = ":")
## reshape data
traf <- data.frame(Occ = with(traf, c(Occ1, Occ2, Occ3)),
Flow = with(traf, c(Flow1, Flow2, Flow3)),
lane = rep(1:3, each = nint),
day = rep(day, 3),
time = rep(time, 3))
traf$Speed <- with(traf, Flow/Occ)
save(traf, file = "flow_occ.RData")
|
/trafficJams/data-raw/flow_occ.R
|
no_license
|
debnolan/DynDocs
|
R
| false | false | 860 |
r
|
traf <- read.csv("flow_occ.txt")
nint <- nrow(traf) # number of 5-min intervals, 1740
## Suppose starts on March 14th, 2003, Friday at midnight (we don't know the time)
## then ends at 1am on Thur, March 20th
day <- rep(c("Friday", "Saturday", "Sunday", "Monday", "Tuesday",
"Wednesday", "Thursday"),
each = 24*12, length.out = nint)
time <- paste(rep(0:23, each = 12, length.out = nint),
rep(c("00", "05", seq(10, 55, by = 5)), length.out = nint),
sep = ":")
## reshape data
traf <- data.frame(Occ = with(traf, c(Occ1, Occ2, Occ3)),
Flow = with(traf, c(Flow1, Flow2, Flow3)),
lane = rep(1:3, each = nint),
day = rep(day, 3),
time = rep(time, 3))
traf$Speed <- with(traf, Flow/Occ)
save(traf, file = "flow_occ.RData")
|
context("junit")
test_that("multiplication works", {
expect_equal(1 * 2, 2)
expect_equal(2 * 2, 3)
expect_equal(3 * 2, 6)
expect_equal(4 * 2, 6)
})
test_that("summation works", {
expect_equal(1 + 1, 2)
expect_equal(2 + 2, 5)
expect_equal(3 + 3, 6)
expect_equal(4 + 4, 8)
})
|
/tests/testthat/test-junit.R
|
no_license
|
yutannihilation/testthatJunitRporterTest
|
R
| false | false | 292 |
r
|
context("junit")
test_that("multiplication works", {
expect_equal(1 * 2, 2)
expect_equal(2 * 2, 3)
expect_equal(3 * 2, 6)
expect_equal(4 * 2, 6)
})
test_that("summation works", {
expect_equal(1 + 1, 2)
expect_equal(2 + 2, 5)
expect_equal(3 + 3, 6)
expect_equal(4 + 4, 8)
})
|
context("ifan vs dfan")
test_that("Can create FFTrees object with dfan", {
object <- FFTrees(diagnosis ~., data = heartdisease, algorithm = "dfan")
expect_is(object = object, class = "FFTrees")
})
test_that("Different results with ifan and dfan", {
trees_ifan <- FFTrees(diagnosis ~.,
data = heartdisease,
algorithm = "ifan")
trees_dfan <- FFTrees(diagnosis ~.,
data = heartdisease,
algorithm = "dfan")
expect_false(identical(trees_ifan$trees$definitions,
trees_dfan$trees$definitions))
})
|
/tests/testthat/test-ifan_v_dfan.R
|
no_license
|
Barardo/FFTrees
|
R
| false | false | 631 |
r
|
context("ifan vs dfan")
test_that("Can create FFTrees object with dfan", {
object <- FFTrees(diagnosis ~., data = heartdisease, algorithm = "dfan")
expect_is(object = object, class = "FFTrees")
})
test_that("Different results with ifan and dfan", {
trees_ifan <- FFTrees(diagnosis ~.,
data = heartdisease,
algorithm = "ifan")
trees_dfan <- FFTrees(diagnosis ~.,
data = heartdisease,
algorithm = "dfan")
expect_false(identical(trees_ifan$trees$definitions,
trees_dfan$trees$definitions))
})
|
## ----eval=FALSE,include=FALSE-------------------------------------------------
## source("../../rnw2pdf.R")
## rnw2pdf("lecture-survival")
## rnw2pdf("lecture-survival",tangle=TRUE)
|
/lectures/survival/lecture-survival.R
|
no_license
|
rbchan/applied-popdy
|
R
| false | false | 185 |
r
|
## ----eval=FALSE,include=FALSE-------------------------------------------------
## source("../../rnw2pdf.R")
## rnw2pdf("lecture-survival")
## rnw2pdf("lecture-survival",tangle=TRUE)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/auth.R
\name{orcid_auth}
\alias{orcid_auth}
\alias{rorcid-auth}
\title{ORCID authorization}
\usage{
orcid_auth(scope = "/authenticate", reauth = FALSE,
redirect_uri = getOption("rorcid.redirect_uri"))
}
\arguments{
\item{scope}{(character) one or more scopes. default: \code{"/authenticate"}}
\item{reauth}{(logical) Force re-authorization?}
\item{redirect_uri}{(character) a redirect URI. optional}
}
\value{
a character string with the access token prefixed with "Bearer "
}
\description{
ORCID authorization
}
\details{
There are two ways to authorise with \pkg{rorcid}:
\itemize{
\item Use a token as a result of a OAuth authentication process. The token
is a alphanumeric UUID, e.g. \code{dc0a6b6b-b4d4-4276-bc89-78c1e9ede56e}. You
can get this token by running \code{orcid_auth()}, then storing that key
(the uuid alone, not the "Bearer " part) either as en environment
variable in your \code{.Renviron} file in your home directory, or as an R
option in your \code{.Rprofile} file. See \link{Startup} for more information.
Either an environment variable or R option work. If we don't find
either we do the next option.
\item Interactively login with OAuth. This doesn't require any input on
your part. We use a client id and client secret key to ping ORCID.org;
at which point you log in with your username/password; then we get back
a token (same as the above option). We don't know your username or
password, only the token that we get back. We cache that token locally
in a hidden file in whatever working directory you're in. If you delete
that file, or run the code from a new working directory, then we
re-authorize.
}
We recommend the former option. That is, get a token and store it as an
environment variable.
If both options above fail, we proceed without using authentication.
ORCID does not require authentication at this point, but may in the future -
this prepares you for when that happens :)
}
\section{ORCID OAuth Scopes}{
See \url{https://members.orcid.org/api/orcid-scopes} for more
}
\examples{
\dontrun{
x <- orcid_auth()
orcid_auth(reauth = TRUE)
#orcid_auth(scope = "/read-public", reauth = TRUE)
}
}
|
/man/orcid_auth.Rd
|
permissive
|
pkraker/rorcid
|
R
| false | true | 2,216 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/auth.R
\name{orcid_auth}
\alias{orcid_auth}
\alias{rorcid-auth}
\title{ORCID authorization}
\usage{
orcid_auth(scope = "/authenticate", reauth = FALSE,
redirect_uri = getOption("rorcid.redirect_uri"))
}
\arguments{
\item{scope}{(character) one or more scopes. default: \code{"/authenticate"}}
\item{reauth}{(logical) Force re-authorization?}
\item{redirect_uri}{(character) a redirect URI. optional}
}
\value{
a character string with the access token prefixed with "Bearer "
}
\description{
ORCID authorization
}
\details{
There are two ways to authorise with \pkg{rorcid}:
\itemize{
\item Use a token as a result of a OAuth authentication process. The token
is a alphanumeric UUID, e.g. \code{dc0a6b6b-b4d4-4276-bc89-78c1e9ede56e}. You
can get this token by running \code{orcid_auth()}, then storing that key
(the uuid alone, not the "Bearer " part) either as en environment
variable in your \code{.Renviron} file in your home directory, or as an R
option in your \code{.Rprofile} file. See \link{Startup} for more information.
Either an environment variable or R option work. If we don't find
either we do the next option.
\item Interactively login with OAuth. This doesn't require any input on
your part. We use a client id and client secret key to ping ORCID.org;
at which point you log in with your username/password; then we get back
a token (same as the above option). We don't know your username or
password, only the token that we get back. We cache that token locally
in a hidden file in whatever working directory you're in. If you delete
that file, or run the code from a new working directory, then we
re-authorize.
}
We recommend the former option. That is, get a token and store it as an
environment variable.
If both options above fail, we proceed without using authentication.
ORCID does not require authentication at this point, but may in the future -
this prepares you for when that happens :)
}
\section{ORCID OAuth Scopes}{
See \url{https://members.orcid.org/api/orcid-scopes} for more
}
\examples{
\dontrun{
x <- orcid_auth()
orcid_auth(reauth = TRUE)
#orcid_auth(scope = "/read-public", reauth = TRUE)
}
}
|
library(TOAST)
library(peakRAM)
# config = "n_100_DE_pattern_2_1_1_replicate_1"
test_TOAST_TPM = function(config) {
RDatafile = sprintf(file.path("../simulation", config, "simulation.RData"))
load(RDatafile)
# Will use: observed_TPM, rho_from_TPM, clinical_variables, adjusted_signature_gene_TPM (only for cell type names)
colnames(rho_from_TPM) = colnames(adjusted_signature_gene_TPM)
with_RIN = without_RIN = list()
empty_pval_matrix = matrix(nrow=nrow(observed_TPM), ncol=ncol(rho_from_TPM))
rownames(empty_pval_matrix) = rownames(observed_TPM)
colnames(empty_pval_matrix) = colnames(rho_from_TPM)
with_RIN$pval_matrix = without_RIN$pval_matrix = empty_pval_matrix
# Without RIN
without_RIN_bench = peakRAM({
cell_types = colnames(rho_from_TPM)
design = data.frame(group=gl(2, round(n/2)))
design_out = makeDesign(design, rho_from_TPM)
fitted_model = fitModel(design_out, observed_TPM)
for (cell_type in cell_types) {
res_table = csTest(fitted_model,
coef = "group",
cell_type = cell_type)
without_RIN$pval_matrix[, cell_type] = res_table[rownames(observed_TPM), "p_value"]
}
})
without_RIN$Elapsed_Time_sec = without_RIN_bench$Elapsed_Time_sec
without_RIN$Peak_RAM_Used_MiB = without_RIN_bench$Peak_RAM_Used_MiB
# With RIN
with_RIN_bench = peakRAM({
cell_types = colnames(rho_from_TPM)
design = data.frame(RIN=clinical_variables, group=gl(2, round(n/2)))
design_out = makeDesign(design, rho_from_TPM)
fitted_model = fitModel(design_out, observed_TPM)
for (cell_type in cell_types) {
res_table = csTest(fitted_model,
coef = "group",
cell_type = cell_type)
with_RIN$pval_matrix[, cell_type] = res_table[rownames(observed_TPM), "p_value"]
}
})
with_RIN$Elapsed_Time_sec = with_RIN_bench$Elapsed_Time_sec
with_RIN$Peak_RAM_Used_MiB = with_RIN_bench$Peak_RAM_Used_MiB
pdf(file.path("../simulation", config, "TOAST_TPM_pvalue_distribution.pdf"), height=8, width=10)
par(mfrow=c(2,3))
hist(without_RIN$pval_matrix[1:2000, 1], breaks=seq(0, 1, by = 0.05), xlim=c(0,1))
hist(without_RIN$pval_matrix[1:2000, 2], breaks=seq(0, 1, by = 0.05), xlim=c(0,1))
hist(without_RIN$pval_matrix[1:2000, 3], breaks=seq(0, 1, by = 0.05), xlim=c(0,1))
hist(without_RIN$pval_matrix[2001:10000, 1], breaks=seq(0, 1, by = 0.05), xlim=c(0,1))
hist(without_RIN$pval_matrix[2001:10000, 2], breaks=seq(0, 1, by = 0.05), xlim=c(0,1))
hist(without_RIN$pval_matrix[2001:10000, 3], breaks=seq(0, 1, by = 0.05), xlim=c(0,1))
hist(with_RIN$pval_matrix[1:2000, 1], breaks=seq(0, 1, by = 0.05), xlim=c(0,1))
hist(with_RIN$pval_matrix[1:2000, 2], breaks=seq(0, 1, by = 0.05), xlim=c(0,1))
hist(with_RIN$pval_matrix[1:2000, 3], breaks=seq(0, 1, by = 0.05), xlim=c(0,1))
hist(with_RIN$pval_matrix[2001:10000, 1], breaks=seq(0, 1, by = 0.05), xlim=c(0,1))
hist(with_RIN$pval_matrix[2001:10000, 2], breaks=seq(0, 1, by = 0.05), xlim=c(0,1))
hist(with_RIN$pval_matrix[2001:10000, 3], breaks=seq(0, 1, by = 0.05), xlim=c(0,1))
dev.off()
save(with_RIN,
without_RIN,
file=file.path("../simulation", config, "TOAST_TPM_res.RData"))
}
|
/R/simulation_test_TOAST_TPM.R
|
permissive
|
Sun-lab/CARseq_pipelines
|
R
| false | false | 3,283 |
r
|
library(TOAST)
library(peakRAM)
# config = "n_100_DE_pattern_2_1_1_replicate_1"
test_TOAST_TPM = function(config) {
RDatafile = sprintf(file.path("../simulation", config, "simulation.RData"))
load(RDatafile)
# Will use: observed_TPM, rho_from_TPM, clinical_variables, adjusted_signature_gene_TPM (only for cell type names)
colnames(rho_from_TPM) = colnames(adjusted_signature_gene_TPM)
with_RIN = without_RIN = list()
empty_pval_matrix = matrix(nrow=nrow(observed_TPM), ncol=ncol(rho_from_TPM))
rownames(empty_pval_matrix) = rownames(observed_TPM)
colnames(empty_pval_matrix) = colnames(rho_from_TPM)
with_RIN$pval_matrix = without_RIN$pval_matrix = empty_pval_matrix
# Without RIN
without_RIN_bench = peakRAM({
cell_types = colnames(rho_from_TPM)
design = data.frame(group=gl(2, round(n/2)))
design_out = makeDesign(design, rho_from_TPM)
fitted_model = fitModel(design_out, observed_TPM)
for (cell_type in cell_types) {
res_table = csTest(fitted_model,
coef = "group",
cell_type = cell_type)
without_RIN$pval_matrix[, cell_type] = res_table[rownames(observed_TPM), "p_value"]
}
})
without_RIN$Elapsed_Time_sec = without_RIN_bench$Elapsed_Time_sec
without_RIN$Peak_RAM_Used_MiB = without_RIN_bench$Peak_RAM_Used_MiB
# With RIN
with_RIN_bench = peakRAM({
cell_types = colnames(rho_from_TPM)
design = data.frame(RIN=clinical_variables, group=gl(2, round(n/2)))
design_out = makeDesign(design, rho_from_TPM)
fitted_model = fitModel(design_out, observed_TPM)
for (cell_type in cell_types) {
res_table = csTest(fitted_model,
coef = "group",
cell_type = cell_type)
with_RIN$pval_matrix[, cell_type] = res_table[rownames(observed_TPM), "p_value"]
}
})
with_RIN$Elapsed_Time_sec = with_RIN_bench$Elapsed_Time_sec
with_RIN$Peak_RAM_Used_MiB = with_RIN_bench$Peak_RAM_Used_MiB
pdf(file.path("../simulation", config, "TOAST_TPM_pvalue_distribution.pdf"), height=8, width=10)
par(mfrow=c(2,3))
hist(without_RIN$pval_matrix[1:2000, 1], breaks=seq(0, 1, by = 0.05), xlim=c(0,1))
hist(without_RIN$pval_matrix[1:2000, 2], breaks=seq(0, 1, by = 0.05), xlim=c(0,1))
hist(without_RIN$pval_matrix[1:2000, 3], breaks=seq(0, 1, by = 0.05), xlim=c(0,1))
hist(without_RIN$pval_matrix[2001:10000, 1], breaks=seq(0, 1, by = 0.05), xlim=c(0,1))
hist(without_RIN$pval_matrix[2001:10000, 2], breaks=seq(0, 1, by = 0.05), xlim=c(0,1))
hist(without_RIN$pval_matrix[2001:10000, 3], breaks=seq(0, 1, by = 0.05), xlim=c(0,1))
hist(with_RIN$pval_matrix[1:2000, 1], breaks=seq(0, 1, by = 0.05), xlim=c(0,1))
hist(with_RIN$pval_matrix[1:2000, 2], breaks=seq(0, 1, by = 0.05), xlim=c(0,1))
hist(with_RIN$pval_matrix[1:2000, 3], breaks=seq(0, 1, by = 0.05), xlim=c(0,1))
hist(with_RIN$pval_matrix[2001:10000, 1], breaks=seq(0, 1, by = 0.05), xlim=c(0,1))
hist(with_RIN$pval_matrix[2001:10000, 2], breaks=seq(0, 1, by = 0.05), xlim=c(0,1))
hist(with_RIN$pval_matrix[2001:10000, 3], breaks=seq(0, 1, by = 0.05), xlim=c(0,1))
dev.off()
save(with_RIN,
without_RIN,
file=file.path("../simulation", config, "TOAST_TPM_res.RData"))
}
|
# --------------------------------------------------------
# ARGUMENTS:
# data - a dataframe
# respvar - a string; variable name of the response variable
# env - a string; variable name of the environment variable
# is.random - logical; indicates whether genotype/treatment is random or not; default value is FALSE (FIXED factor)
# result - output of single environment analysis
#
# Author: Alaine Gulles
# --------------------------------------------------------
graph.mea1s.diagplots <- function(data, respvar, is.random = FALSE, result) UseMethod("graph.mea1s.diagplots")
graph.mea1s.diagplots.default <- function(data, respvar, is.random = FALSE, result) {
#dir.create("plots")
#create diag plots
for (i in (1:length(respvar))) {
xlabel = respvar[i]
cc = rgb(0.3, 0.3, 0.7, 0.2)
if (is.random) {
filename1 = paste(getwd(),"/diagPlotsMea1s_",respvar[i], "_random.png",sep="");
} else filename1 = paste(getwd(),"/diagPlotsMea1s_",respvar[i], "_fixed.png",sep="");
residValues<-result$output[[i]]$residuals
fittedValues<-result$output[[i]]$fitted.values
if (!is.null(residValues) & (!is.null(fittedValues))) {
png(filename = filename1); par(mfrow = c(2,2)) #par(mfrow = n2mfrow(length(respvar)*nlevels(data[,match(env, names(data))])));
#scatterplot of residuals against fitted values
plot(result$output[[i]]$residuals~result$output[[i]]$fitted.values, xlab = "Predicted Values", pch = 15, cex = 0.7, col = cc, ylab = "Residuals", main = "Scatterplot of Residuals \nagainst Fitted Values");
#qqplot of residuals
qqnorm(result$output[[i]]$residuals); qqline(result$output[[i]]$residuals, col=2, main=title,sub=xlabel)
#freq dist of residuals
hist(result$output[[i]]$residuals, main = "Histogram of Residuals", col = cc, xlab = "Residual", ylab = 'Frequency' )
#create blank plot
#plot(seq(1:10)~seq(1:10), type="n", boxed = FALSE, axes=FALSE, xlab="", ylab="")
plot(seq(1:10)~seq(1:10), type="n", bty = "n", axes=FALSE, xlab="", ylab="")
if (is.random) {
noteString <- paste("NOTE: Residuals plotted are taken from the model where genotype is random and response variable = ", respvar[i], ".", sep="")
} else {
noteString <- paste("NOTE: Residuals plotted are taken from the model where genotype is fixed and response variable = ", respvar[i], ".", sep="")
}
text(5,7,paste(strwrap(noteString,width=30), sep="", collapse="\n"))
dev.off();
}
}
}
|
/R3.0.2 Package Creation/PBTools/R/graph_mea1s_diagplots.R
|
no_license
|
djnpisano/RScriptLibrary
|
R
| false | false | 2,559 |
r
|
# --------------------------------------------------------
# ARGUMENTS:
# data - a dataframe
# respvar - a string; variable name of the response variable
# env - a string; variable name of the environment variable
# is.random - logical; indicates whether genotype/treatment is random or not; default value is FALSE (FIXED factor)
# result - output of single environment analysis
#
# Author: Alaine Gulles
# --------------------------------------------------------
graph.mea1s.diagplots <- function(data, respvar, is.random = FALSE, result) UseMethod("graph.mea1s.diagplots")
graph.mea1s.diagplots.default <- function(data, respvar, is.random = FALSE, result) {
#dir.create("plots")
#create diag plots
for (i in (1:length(respvar))) {
xlabel = respvar[i]
cc = rgb(0.3, 0.3, 0.7, 0.2)
if (is.random) {
filename1 = paste(getwd(),"/diagPlotsMea1s_",respvar[i], "_random.png",sep="");
} else filename1 = paste(getwd(),"/diagPlotsMea1s_",respvar[i], "_fixed.png",sep="");
residValues<-result$output[[i]]$residuals
fittedValues<-result$output[[i]]$fitted.values
if (!is.null(residValues) & (!is.null(fittedValues))) {
png(filename = filename1); par(mfrow = c(2,2)) #par(mfrow = n2mfrow(length(respvar)*nlevels(data[,match(env, names(data))])));
#scatterplot of residuals against fitted values
plot(result$output[[i]]$residuals~result$output[[i]]$fitted.values, xlab = "Predicted Values", pch = 15, cex = 0.7, col = cc, ylab = "Residuals", main = "Scatterplot of Residuals \nagainst Fitted Values");
#qqplot of residuals
qqnorm(result$output[[i]]$residuals); qqline(result$output[[i]]$residuals, col=2, main=title,sub=xlabel)
#freq dist of residuals
hist(result$output[[i]]$residuals, main = "Histogram of Residuals", col = cc, xlab = "Residual", ylab = 'Frequency' )
#create blank plot
#plot(seq(1:10)~seq(1:10), type="n", boxed = FALSE, axes=FALSE, xlab="", ylab="")
plot(seq(1:10)~seq(1:10), type="n", bty = "n", axes=FALSE, xlab="", ylab="")
if (is.random) {
noteString <- paste("NOTE: Residuals plotted are taken from the model where genotype is random and response variable = ", respvar[i], ".", sep="")
} else {
noteString <- paste("NOTE: Residuals plotted are taken from the model where genotype is fixed and response variable = ", respvar[i], ".", sep="")
}
text(5,7,paste(strwrap(noteString,width=30), sep="", collapse="\n"))
dev.off();
}
}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/wfm.R
\name{docs}
\alias{docs}
\alias{docs<-}
\title{Extract Document Names}
\usage{
docs(wfm)
docs(wfm) <- value
}
\arguments{
\item{wfm}{an object of type wfm}
\item{value}{replacement if assignment}
}
\value{
A list of document names.
}
\description{
Extracts the document names from a wfm object.
}
\author{
Will Lowe
}
\seealso{
\code{\link{wfm}}
}
|
/man/docs.Rd
|
no_license
|
markwestcott34/austin
|
R
| false | true | 435 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/wfm.R
\name{docs}
\alias{docs}
\alias{docs<-}
\title{Extract Document Names}
\usage{
docs(wfm)
docs(wfm) <- value
}
\arguments{
\item{wfm}{an object of type wfm}
\item{value}{replacement if assignment}
}
\value{
A list of document names.
}
\description{
Extracts the document names from a wfm object.
}
\author{
Will Lowe
}
\seealso{
\code{\link{wfm}}
}
|
#' Creates a dataframe with predictions from each model
#' and real choices
#' params tree dataframe
compare_link_prediction <- function(df.tree, params.Gomez2013, params.Aragon2017){
# We can compare with barabasi becaus the choice is independent of the alpha
parents <- df.tree$parent
parents.users <- df.tree$parent.user
users <- df.tree$user
# Results container
df.preds <- data.frame()
# skip root and first post
# Recall: t has already t posts before (because root t=0)
# Thus, post at t has t posts to choose from.
for(t in 2:length(parents)){
# Data common too all models
b <- rep(0,t)
lags <- t:1
popularities <- 1 + tabulate(parents[1:(t-1)], nbins=t) # we follow Gomez. root also starts with 1
grandparents <- c(FALSE, (df.tree$parent.user[1:(t-1)]==as.character(users[t]))) # root is always false
chosen <- parents[t]
# Gomez2013 model
#######################
alpha <- params.Gomez2013$alpha
beta <- params.Gomez2013$beta
tau <- params.Gomez2013$tau
b[1] <- beta
probs.Gomez2013 <- alpha * popularities + b + tau^lags
predicted.Gomez2013 <- which.max(probs.Gomez2013)
like.Gomez2013 <- log(probs.Gomez2013[chosen]) - log(sum(probs.Gomez2013))
ranking.Gomez2013 <- rank(-probs.Gomez2013)[chosen]
# Aragon2017 model
#######################
# print(grandparents)
# check grandparents
grandparents <- c(FALSE, (df.tree$parent.user[1:(t-1)]==as.character(users[t]))) # root is always false
potential_brother <- df.tree[which(df.tree$user[1:(t-1)] == users[t]),]$parent ## select the parent of the potential brother to set it to FALSE in the grandparents vector
grandparents[potential_brother] <- FALSE
# print(grandparents)
alpha <- params.Aragon2017$alpha
beta <- params.Aragon2017$beta
tau <- params.Aragon2017$tau
gamma <- params.Aragon2017$gamma
b[1] <- beta
probs.Aragon2017 <- alpha * popularities + gamma*grandparents + b + tau^lags
predicted.Aragon2017 <- which.max(probs.Aragon2017)
like.Aragon2017 <- log(probs.Aragon2017[chosen]) - log(sum(probs.Aragon2017))
ranking.Aragon2017 <- rank(-probs.Aragon2017)[chosen] # the rank of an element is its position in the sorted vector.
# predicted: the most likely post
# like: likelihood of the choosen post (the real observed one)
# ranking: position in which the real choice was placed. Best is 1. Worst is current thread size
df.preds <- rbindlist(list(df.preds,
data.frame(predicted.Gomez2013 = predicted.Gomez2013,
like.Gomez2013 = like.Gomez2013,
ranking.Gomez2013 = ranking.Gomez2013,
predicted.Aragon2017 = predicted.Aragon2017,
like.Aragon2017 = like.Aragon2017,
ranking.Aragon2017 = ranking.Aragon2017,
tree.size = t,
# thread= df.tree$thread[t],
chosen = chosen, ## this is the real parent
user = df.tree$user[t] )))
}
df.preds
}
|
/R/link_prediction.R
|
permissive
|
elaragon/generative-discussion-threads
|
R
| false | false | 3,312 |
r
|
#' Creates a dataframe with predictions from each model
#' and real choices
#' params tree dataframe
compare_link_prediction <- function(df.tree, params.Gomez2013, params.Aragon2017){
# We can compare with barabasi becaus the choice is independent of the alpha
parents <- df.tree$parent
parents.users <- df.tree$parent.user
users <- df.tree$user
# Results container
df.preds <- data.frame()
# skip root and first post
# Recall: t has already t posts before (because root t=0)
# Thus, post at t has t posts to choose from.
for(t in 2:length(parents)){
# Data common too all models
b <- rep(0,t)
lags <- t:1
popularities <- 1 + tabulate(parents[1:(t-1)], nbins=t) # we follow Gomez. root also starts with 1
grandparents <- c(FALSE, (df.tree$parent.user[1:(t-1)]==as.character(users[t]))) # root is always false
chosen <- parents[t]
# Gomez2013 model
#######################
alpha <- params.Gomez2013$alpha
beta <- params.Gomez2013$beta
tau <- params.Gomez2013$tau
b[1] <- beta
probs.Gomez2013 <- alpha * popularities + b + tau^lags
predicted.Gomez2013 <- which.max(probs.Gomez2013)
like.Gomez2013 <- log(probs.Gomez2013[chosen]) - log(sum(probs.Gomez2013))
ranking.Gomez2013 <- rank(-probs.Gomez2013)[chosen]
# Aragon2017 model
#######################
# print(grandparents)
# check grandparents
grandparents <- c(FALSE, (df.tree$parent.user[1:(t-1)]==as.character(users[t]))) # root is always false
potential_brother <- df.tree[which(df.tree$user[1:(t-1)] == users[t]),]$parent ## select the parent of the potential brother to set it to FALSE in the grandparents vector
grandparents[potential_brother] <- FALSE
# print(grandparents)
alpha <- params.Aragon2017$alpha
beta <- params.Aragon2017$beta
tau <- params.Aragon2017$tau
gamma <- params.Aragon2017$gamma
b[1] <- beta
probs.Aragon2017 <- alpha * popularities + gamma*grandparents + b + tau^lags
predicted.Aragon2017 <- which.max(probs.Aragon2017)
like.Aragon2017 <- log(probs.Aragon2017[chosen]) - log(sum(probs.Aragon2017))
ranking.Aragon2017 <- rank(-probs.Aragon2017)[chosen] # the rank of an element is its position in the sorted vector.
# predicted: the most likely post
# like: likelihood of the choosen post (the real observed one)
# ranking: position in which the real choice was placed. Best is 1. Worst is current thread size
df.preds <- rbindlist(list(df.preds,
data.frame(predicted.Gomez2013 = predicted.Gomez2013,
like.Gomez2013 = like.Gomez2013,
ranking.Gomez2013 = ranking.Gomez2013,
predicted.Aragon2017 = predicted.Aragon2017,
like.Aragon2017 = like.Aragon2017,
ranking.Aragon2017 = ranking.Aragon2017,
tree.size = t,
# thread= df.tree$thread[t],
chosen = chosen, ## this is the real parent
user = df.tree$user[t] )))
}
df.preds
}
|
#' Extract multiple subclades from a phylogeny based on node numbers
#'
#' Given a tree and a vector or list of nodes, this function extracts subclades and
#' returns a list of smaller trees.
#'
#' @param tree An ape-style phylogenetic tree.
#' @param nodes A named list or vector of node numbers that subtend the clade in question
#' to extract. Can extract and create single species trees with appropriate branch length.
#' The input vector or list must have names!
#' @param root.edge Default is 0, meaning that no stem is left below the node from which
#' the subclade is extracted. If root.edge is 1, then a root edge (stem) is also included
#' with the extracted clade.
#'
#' @details Given a named list or vector of node numbers that subtend one or more clades,
#' extracts those clades and returns a list of trees. The nodes provided must have names,
#' though the tree itself does not need to have named nodes. This is because the output
#' trees are provided with these names, which are used in further downstream analyses.
#' If the node provided relates to a tip, then the tree that is returned contains the
#' stem length from the ancestor of that species to the tip in question.
#'
#' @return A named list of trees.
#'
#' @export
#'
#' @importFrom ape extract.clade
#'
#' @references Mast et al. 2015. Paraphyly changes understanding of timing and tempo of
#' diversification in subtribe Hakeinae (Proteaceae), a giant Australian plant radiation.
#' American Journal of Botany.
#'
#' @examples
#' #load a molecular tree up
#' data(bird.families)
#'
#' #define a named vector of node numbers, including a root
#' temp <- c()
#' temp[1] <- length(bird.families$tip.label) + 1
#' names(temp) <- "root"
#'
#' #create a data frame of all taxa from the phylogeny, and make up clade memberships
#' #for each. note that the names on this data frame differ from "groupings" in other
#' #functions
#' dummy.frame <- data.frame(species=bird.families$tip.label,
#' clade=c(rep("nonPasserine", 95), rep("suboscine", 9), rep("basalOscine", 13),
#' rep("oscine", 20)))
#'
#' #use the function getMRCAs() to determine the nodes subtending these named clades
#' nodes <- getMRCAs(bird.families, dummy.frame)
#' #unlist the results and append to the root node defined above
#' nodes <- append(temp, nodes)
#'
#' #use the function. note the effect of including a root edge or not. also note that
#' #because non-passerines are not monophyletic, the "subtree" corresponding to that
#' #"clade" is the whole tree
#' temp1 <- extractClade(bird.families, nodes)
#' temp2 <- extractClade(bird.families, nodes, root.edge=1)
#' plot(temp1$oscine, root.edge=TRUE)
#' plot(temp2$oscine, root.edge=TRUE)
extractClade <- function(tree, nodes, root.edge=0)
{
#this function can sometimes be provided with lists of notes
nodes <- unlist(nodes)
#set up a blank list to save results into
trees <- list()
for(i in 1:length(nodes))
{
#check whether the node relates to a tip or an internal node
if(nodes[i] <= length(tree$tip.label))
{
#if so, call your singleSpeciesTree function
tip <- tree$tip.label[nodes[i]]
trees[[i]] <- singleSpeciesTree(tree, tip)
}
else
{
trees[[i]] <- extract.clade(phy=tree, node=nodes[i], root.edge=root.edge)
}
}
names(trees) <- names(nodes)
trees
}
|
/R/extractClade.R
|
no_license
|
jesusNPL/addTaxa
|
R
| false | false | 3,302 |
r
|
#' Extract multiple subclades from a phylogeny based on node numbers
#'
#' Given a tree and a vector or list of nodes, this function extracts subclades and
#' returns a list of smaller trees.
#'
#' @param tree An ape-style phylogenetic tree.
#' @param nodes A named list or vector of node numbers that subtend the clade in question
#' to extract. Can extract and create single species trees with appropriate branch length.
#' The input vector or list must have names!
#' @param root.edge Default is 0, meaning that no stem is left below the node from which
#' the subclade is extracted. If root.edge is 1, then a root edge (stem) is also included
#' with the extracted clade.
#'
#' @details Given a named list or vector of node numbers that subtend one or more clades,
#' extracts those clades and returns a list of trees. The nodes provided must have names,
#' though the tree itself does not need to have named nodes. This is because the output
#' trees are provided with these names, which are used in further downstream analyses.
#' If the node provided relates to a tip, then the tree that is returned contains the
#' stem length from the ancestor of that species to the tip in question.
#'
#' @return A named list of trees.
#'
#' @export
#'
#' @importFrom ape extract.clade
#'
#' @references Mast et al. 2015. Paraphyly changes understanding of timing and tempo of
#' diversification in subtribe Hakeinae (Proteaceae), a giant Australian plant radiation.
#' American Journal of Botany.
#'
#' @examples
#' #load a molecular tree up
#' data(bird.families)
#'
#' #define a named vector of node numbers, including a root
#' temp <- c()
#' temp[1] <- length(bird.families$tip.label) + 1
#' names(temp) <- "root"
#'
#' #create a data frame of all taxa from the phylogeny, and make up clade memberships
#' #for each. note that the names on this data frame differ from "groupings" in other
#' #functions
#' dummy.frame <- data.frame(species=bird.families$tip.label,
#' clade=c(rep("nonPasserine", 95), rep("suboscine", 9), rep("basalOscine", 13),
#' rep("oscine", 20)))
#'
#' #use the function getMRCAs() to determine the nodes subtending these named clades
#' nodes <- getMRCAs(bird.families, dummy.frame)
#' #unlist the results and append to the root node defined above
#' nodes <- append(temp, nodes)
#'
#' #use the function. note the effect of including a root edge or not. also note that
#' #because non-passerines are not monophyletic, the "subtree" corresponding to that
#' #"clade" is the whole tree
#' temp1 <- extractClade(bird.families, nodes)
#' temp2 <- extractClade(bird.families, nodes, root.edge=1)
#' plot(temp1$oscine, root.edge=TRUE)
#' plot(temp2$oscine, root.edge=TRUE)
extractClade <- function(tree, nodes, root.edge=0)
{
#this function can sometimes be provided with lists of notes
nodes <- unlist(nodes)
#set up a blank list to save results into
trees <- list()
for(i in 1:length(nodes))
{
#check whether the node relates to a tip or an internal node
if(nodes[i] <= length(tree$tip.label))
{
#if so, call your singleSpeciesTree function
tip <- tree$tip.label[nodes[i]]
trees[[i]] <- singleSpeciesTree(tree, tip)
}
else
{
trees[[i]] <- extract.clade(phy=tree, node=nodes[i], root.edge=root.edge)
}
}
names(trees) <- names(nodes)
trees
}
|
library(shiny)
library(shinydashboard)
library(data.table)
library(ggplot2)
data = data.table(group = rep(c(1, 3, 6), each = 10), x = rep(1:10, times = 3), value = rnorm(30))
sidebar <- dashboardSidebar(
uiOutput("Sidebar")
)
body <- dashboardBody(
uiOutput("TABUI")
)
# Put them together into a dashboardPage
ui <- dashboardPage(
dashboardHeader(title = "test tabbed inputs"),
sidebar,
body,
skin = 'green'
)
server <- function(input, output) {
ntabs <- 3
tabnames <- paste0("tab", 1:ntabs) # "tab1", "tab2", ...
checkboxnames <- paste0(tabnames, 'group') # "tab1group", "tab2group", ...
plotnames <- paste0("plot", 1:ntabs) # "plot1", "plot2", ...
output$Sidebar <- renderUI({
Menus <- vector("list", ntabs)
for(i in 1:ntabs){
Menus[[i]] <- menuItem(tabnames[i], tabName = tabnames[i], icon = icon("dashboard"), selected = i==1)
}
do.call(function(...) sidebarMenu(id = 'sidebarMenu', ...), Menus)
})
output$TABUI <- renderUI({
Tabs <- vector("list", ntabs)
for(i in 1:ntabs){
Tabs[[i]] <- tabItem(tabName = tabnames[i],
fluidRow(
box(title = "Controls",
checkboxGroupInput(checkboxnames[i], 'group:', c(1, 3, 6), selected = 6, inline = TRUE),
width = 4),
box(plotOutput(paste0("plot",i)), width = 8)
)
)
}
do.call(tabItems, Tabs)
})
RV <- reactiveValues()
observe({
selection <- input[[paste0(input$sidebarMenu, 'group')]]
RV$plotData <- data[group %in% selection]
})
for(i in 1:ntabs){
output[[plotnames[i]]] <- renderPlot({
plotData <- RV$plotData
p <- ggplot(plotData, aes(x = x, y = value, colour = factor(group))) +
geom_line() + geom_point()
print(p)
})
}
}
shinyApp(ui, server)
|
/R/shiny-examples/08_dynamic-tabs/app5.R
|
no_license
|
michellymenezes/ladybird-umbrella
|
R
| false | false | 1,926 |
r
|
library(shiny)
library(shinydashboard)
library(data.table)
library(ggplot2)
data = data.table(group = rep(c(1, 3, 6), each = 10), x = rep(1:10, times = 3), value = rnorm(30))
sidebar <- dashboardSidebar(
uiOutput("Sidebar")
)
body <- dashboardBody(
uiOutput("TABUI")
)
# Put them together into a dashboardPage
ui <- dashboardPage(
dashboardHeader(title = "test tabbed inputs"),
sidebar,
body,
skin = 'green'
)
server <- function(input, output) {
ntabs <- 3
tabnames <- paste0("tab", 1:ntabs) # "tab1", "tab2", ...
checkboxnames <- paste0(tabnames, 'group') # "tab1group", "tab2group", ...
plotnames <- paste0("plot", 1:ntabs) # "plot1", "plot2", ...
output$Sidebar <- renderUI({
Menus <- vector("list", ntabs)
for(i in 1:ntabs){
Menus[[i]] <- menuItem(tabnames[i], tabName = tabnames[i], icon = icon("dashboard"), selected = i==1)
}
do.call(function(...) sidebarMenu(id = 'sidebarMenu', ...), Menus)
})
output$TABUI <- renderUI({
Tabs <- vector("list", ntabs)
for(i in 1:ntabs){
Tabs[[i]] <- tabItem(tabName = tabnames[i],
fluidRow(
box(title = "Controls",
checkboxGroupInput(checkboxnames[i], 'group:', c(1, 3, 6), selected = 6, inline = TRUE),
width = 4),
box(plotOutput(paste0("plot",i)), width = 8)
)
)
}
do.call(tabItems, Tabs)
})
RV <- reactiveValues()
observe({
selection <- input[[paste0(input$sidebarMenu, 'group')]]
RV$plotData <- data[group %in% selection]
})
for(i in 1:ntabs){
output[[plotnames[i]]] <- renderPlot({
plotData <- RV$plotData
p <- ggplot(plotData, aes(x = x, y = value, colour = factor(group))) +
geom_line() + geom_point()
print(p)
})
}
}
shinyApp(ui, server)
|
# testSource
# Load the library
require(RCurl)
# Provide the web address of the file:
fileURL <- getURL('https://raw.githubusercontent.com/SCBI-MigBirds/MigBirds/master/data/exampleBirdData.csv')
# Read in the data:
birdCounts <- read.csv(text = fileURL)
birdCounts
|
/testSource.R
|
no_license
|
SCBI-MigBirds/scbi-migbirds.github.io
|
R
| false | false | 274 |
r
|
# testSource
# Load the library
require(RCurl)
# Provide the web address of the file:
fileURL <- getURL('https://raw.githubusercontent.com/SCBI-MigBirds/MigBirds/master/data/exampleBirdData.csv')
# Read in the data:
birdCounts <- read.csv(text = fileURL)
birdCounts
|
# @title .onAttach
# @description Load required data into gloval enviroment
# @keywords internal
.onAttach<- function (libname, pkgname){
packageStartupMessage(paste0(
" ==============================================================\n",
" \n",
" CAMPARI analysis tools \n",
" \n",
" \n",
" ---------------------------------------- \n",
" Analysing time series. \n",
" Version: ", utils::packageVersion("CampaRi"), "\n",
" ==============================================================\n"))
}
# .onLoad <- function(libname, pkgname) {
# op <- options()
# op.CampaRi <- list(
# CampaRi.data_management = "R"
# )
# toset <- !(names(op.CampaRi) %in% names(op))
# if(any(toset)) options(op.CampaRi[toset])
# # .setting_up_netcdf()
# # if(getOption("CampaRi.data_management")=='netcdf'){
# # nc_lib_dir <- "/usr/include/"
# # makevars_file <- paste0('MY_PKG_LIBS= -lnetcdff -I', nc_lib_dir, '
# # MY_PKG_FFLAGS= -fbacktrace -fbounds-check -fcheck-array-temporaries -g
# # mypackage_FFLAGS = $(FPICFLAGS) $(SHLIB_FFLAGS) $(FFLAGS)
# # all: $(SHLIB)
# # main_clu_adjl_mst.o: main_clu_adjl_mst.f90
# # $(FC) $(mypackage_FFLAGS) $(MY_PKG_FFLAGS) -c main_clu_adjl_mst.f90 -o main_clu_adjl_mst.o $(MY_PKG_LIBS)
# #
# # utilities_netcdf.o: utilities_netcdf.f90
# # $(FC) $(mypackage_FFLAGS) $(MY_PKG_FFLAGS) -c utilities_netcdf.f90 -o utilities_netcdf.o $(MY_PKG_LIBS)
# #
# # PKG_LIBS= -lnetcdff -I', nc_lib_dir, '
# # ')
# # cat(makevars_file, file = paste0(".R/Makevars"))
# # }
# invisible() #no output from this function
# }
# short hand for length
.lt <- function(x) return(length(x))
# check for single integer value
.isSingleInteger <- function(x) {
if(!is.numeric(x) || x%%1 != 0 || (is.null(dim(x)) && length(x) != 1) || (!is.null(dim(x))))
return(FALSE)
else
return(TRUE)
}
# check for single element (e.g. character)
.isSingleElement <- function(x) {
if((is.null(dim(x)) && length(x) != 1) || (!is.null(dim(x))))
return(FALSE)
else
return(TRUE)
}
.isSingleNumeric <- function(x) return(.isSingleElement(x) && is.numeric(x))
.isSingleChar <- function(x) return(.isSingleElement(x) && is.character(x))
# This routine is able to print a loading bar within a loop to know the work done
.print_consecutio <- function(itering, total_to_iter, tot_to_print = 10, other_to_print = "", timeit = T, time_first = NULL){
state_to_print <- floor(((itering*1.0)/total_to_iter)*tot_to_print)
white_not_to_print <- tot_to_print - state_to_print
if(timeit && is.null(time_first))
stop("If you want to time me you need to give me the starting of time (time_first).")
if(state_to_print%%1 == 0){
if(timeit){
time_spent <- proc.time() - time_first
time_spent <- time_spent["elapsed"]
time_spent <- round(as.numeric(time_spent), digits = 0)
if(time_spent>120)
time_needed <- paste0("(needs: ", round((time_spent*1.0/itering)*total_to_iter, digits = 0)," s)")
else
time_needed <- ""
if(time_spent != 0)
time_spent <- paste0(" Time spent: ", time_spent , " s")
else
time_spent <- ""
other_to_print <- paste(time_spent, time_needed, other_to_print)
}
string_to_print <- "\r|"
if(state_to_print != 0){
for(eq in 1:state_to_print)
string_to_print <- paste0(string_to_print, "=")
}
if(state_to_print != tot_to_print){
for(emp in 1:white_not_to_print)
string_to_print <- paste0(string_to_print, " ")
}
string_to_print <- paste0(string_to_print,"| ", floor((state_to_print*100)/tot_to_print), "% ", other_to_print)
cat(string_to_print, sep = "")
}
}
# check for install_campari()
.get_os <- function(){
sysinf <- Sys.info()
if (!is.null(sysinf)){
os <- sysinf['sysname']
if (os == 'Darwin')
os <- "osx"
} else { ## mystery machine
os <- .Platform$OS.type
if (grepl("^darwin", R.version$os))
os <- "osx"
if (grepl("linux-gnu", R.version$os))
os <- "linux"
}
tolower(os)
}
# binary search for true-false. It is 100 times faster than %in%
.BiSearch <- function(table, key, start.idx = 1, end.idx = length(table),
tol = .Machine$double.eps ^ 0.5,
check = TRUE) {
# Takes sorted (in ascending order) vectors
if (check) stopifnot(is.vector(table), is.numeric(table))
m <- as.integer(ceiling((end.idx + start.idx) / 2)) # Midpoint
if (table[m] > key + tol) {
if (start.idx == end.idx) return(FALSE)
Recall(table, key, start.idx = start.idx, end.idx = m - 1L, tol = tol, check = FALSE)
} else if (table[m] < key - tol) {
if (start.idx == end.idx) return(FALSE)
Recall(table, key, start.idx = m + 1L, end.idx = end.idx, tol = tol, check = FALSE)
} else return(TRUE)
}
# normalize
.normalize <- function(x, xmax = NULL, xmin = NULL) { # not equivalent of scale(x, center = F)
# if(.isSingleElement(x)) return(1)
if(is.null(xmax)) xmax <- max(x)
if(is.null(xmin)) xmin <- min(x)
if(xmin == xmax) return(x/xmax)
else return((x*1.0 - xmin)/(xmax - xmin))
}
# cutting the 0s
.cut0 <- function(x) { x[x<0] <- 0; x }
# fitting a quadratic model
.denaturate <- function(yyy, xxx, polydeg = 7, plotit = FALSE){
# yyy <- kin.pl
# xxx <- seq(lpi)
q.mod <- lm(yyy ~ poly(xxx, polydeg, raw=TRUE))
new_yyy <- .normalize(yyy - stats::predict(q.mod))
if(plotit){
plot(xxx, yyy, type = 'l', ylim = c(0,1))
abline(lm(yyy ~ xxx), col = 'darkblue') # linear
lines(xxx, stats::predict(q.mod), col = "darkgreen", lwd = 2)
lines(xxx, new_yyy, col = "red", lwd = 3)
}
return(new_yyy)
}
# checking function for output command (intern or not, i.e. talkative or not)
.check_cmd.out <- function(cmd.out, intern = FALSE){
if(intern) {
return(!is.null(attr(cmd.out, which = 'status')))
}else{
if(!.isSingleInteger(cmd.out)) stop('Something went wrong. The command output is not an integer even if intern is FALSE.')
return(cmd.out > 0)
}
}
# simple double separator creator; something like the following.
# > a <- .get_adjusted_separator(' a ', 11)
# > cat(a$sep1, '\n', a$sep2, sep = '')
# ~~~~ a ~~~~
# ~~~~~~~~~~~
.get_adjusted_separators <- function(tit, length_it = 68){
if(length_it%%2) length_it <- length_it + 1 # simple check on the lengths
if(.lt(tit) > length_it) stop('Please insert correctly the final length. It was too short in comparison to the title length.')
if(nchar(tit)%%2 != 0) nchar_sep <- length_it - 1
else nchar_sep <- length_it
half_it <- (nchar_sep - nchar(tit))/2
stp1 <- ""
for(u in 1:half_it) stp1 <- paste0(stp1, "~")
stp1 <- paste0(stp1, tit, stp1)
stp2 <- ""
for(u in 1:nchar_sep) stp2 <- paste0(stp2, "~")
stp2 <- paste0(stp2)
return(list('sep1' = stp1, 'sep2' = stp2))
}
# create annotation vector from barrier vector # not used at the moment
.vec_from_barriers <- function(bar.vec, label.vec = NULL, reorder.index = FALSE){ # , end.point = NULL (I can put it inside the bar.vec)
# bar.vec must be the position on the PI!!! not the lengths of stretches
stopifnot(all(bar.vec > 1))
# sorting
sorted.bar.vec <- sort(bar.vec, index.return=TRUE) # should I consider also the indexing? I don't think so
bar.vec <- sorted.bar.vec$x
if(reorder.index){
if(!is.null(label.vec)) label.vec <- label.vec[sorted.bar.vec$ix]
else label.vec <- sorted.bar.vec$ix
}
# if not defined define the label vector
if(is.null(label.vec)) label.vec <- 1:length(bar.vec)
stopifnot(all(1:length(label.vec) == sort(label.vec))) # no gaps allowed
stopifnot(length(label.vec) == length(bar.vec))
# generate the diff bector
bar.vec.diff <- c(bar.vec[1], diff(bar.vec))
stopifnot(sum(bar.vec.diff) == bar.vec[length(bar.vec)]) # check the right summing up!
# final loop
return(unlist(sapply(1:length(label.vec), function(x) rep(label.vec[x], bar.vec.diff[x]))))
}
# symmetrize matrix - fcocina
.symm <- function(x) return((x+t(x))/2)
# trace of a matrix
.tr <- function(x) return(sum(diag(x)))
|
/R/misc.R
|
no_license
|
clangi/CampaRi
|
R
| false | false | 8,170 |
r
|
# @title .onAttach
# @description Load required data into gloval enviroment
# @keywords internal
.onAttach<- function (libname, pkgname){
packageStartupMessage(paste0(
" ==============================================================\n",
" \n",
" CAMPARI analysis tools \n",
" \n",
" \n",
" ---------------------------------------- \n",
" Analysing time series. \n",
" Version: ", utils::packageVersion("CampaRi"), "\n",
" ==============================================================\n"))
}
# .onLoad <- function(libname, pkgname) {
# op <- options()
# op.CampaRi <- list(
# CampaRi.data_management = "R"
# )
# toset <- !(names(op.CampaRi) %in% names(op))
# if(any(toset)) options(op.CampaRi[toset])
# # .setting_up_netcdf()
# # if(getOption("CampaRi.data_management")=='netcdf'){
# # nc_lib_dir <- "/usr/include/"
# # makevars_file <- paste0('MY_PKG_LIBS= -lnetcdff -I', nc_lib_dir, '
# # MY_PKG_FFLAGS= -fbacktrace -fbounds-check -fcheck-array-temporaries -g
# # mypackage_FFLAGS = $(FPICFLAGS) $(SHLIB_FFLAGS) $(FFLAGS)
# # all: $(SHLIB)
# # main_clu_adjl_mst.o: main_clu_adjl_mst.f90
# # $(FC) $(mypackage_FFLAGS) $(MY_PKG_FFLAGS) -c main_clu_adjl_mst.f90 -o main_clu_adjl_mst.o $(MY_PKG_LIBS)
# #
# # utilities_netcdf.o: utilities_netcdf.f90
# # $(FC) $(mypackage_FFLAGS) $(MY_PKG_FFLAGS) -c utilities_netcdf.f90 -o utilities_netcdf.o $(MY_PKG_LIBS)
# #
# # PKG_LIBS= -lnetcdff -I', nc_lib_dir, '
# # ')
# # cat(makevars_file, file = paste0(".R/Makevars"))
# # }
# invisible() #no output from this function
# }
# short hand for length
.lt <- function(x) return(length(x))
# check for single integer value
.isSingleInteger <- function(x) {
if(!is.numeric(x) || x%%1 != 0 || (is.null(dim(x)) && length(x) != 1) || (!is.null(dim(x))))
return(FALSE)
else
return(TRUE)
}
# check for single element (e.g. character)
.isSingleElement <- function(x) {
if((is.null(dim(x)) && length(x) != 1) || (!is.null(dim(x))))
return(FALSE)
else
return(TRUE)
}
.isSingleNumeric <- function(x) return(.isSingleElement(x) && is.numeric(x))
.isSingleChar <- function(x) return(.isSingleElement(x) && is.character(x))
# This routine is able to print a loading bar within a loop to know the work done
.print_consecutio <- function(itering, total_to_iter, tot_to_print = 10, other_to_print = "", timeit = T, time_first = NULL){
state_to_print <- floor(((itering*1.0)/total_to_iter)*tot_to_print)
white_not_to_print <- tot_to_print - state_to_print
if(timeit && is.null(time_first))
stop("If you want to time me you need to give me the starting of time (time_first).")
if(state_to_print%%1 == 0){
if(timeit){
time_spent <- proc.time() - time_first
time_spent <- time_spent["elapsed"]
time_spent <- round(as.numeric(time_spent), digits = 0)
if(time_spent>120)
time_needed <- paste0("(needs: ", round((time_spent*1.0/itering)*total_to_iter, digits = 0)," s)")
else
time_needed <- ""
if(time_spent != 0)
time_spent <- paste0(" Time spent: ", time_spent , " s")
else
time_spent <- ""
other_to_print <- paste(time_spent, time_needed, other_to_print)
}
string_to_print <- "\r|"
if(state_to_print != 0){
for(eq in 1:state_to_print)
string_to_print <- paste0(string_to_print, "=")
}
if(state_to_print != tot_to_print){
for(emp in 1:white_not_to_print)
string_to_print <- paste0(string_to_print, " ")
}
string_to_print <- paste0(string_to_print,"| ", floor((state_to_print*100)/tot_to_print), "% ", other_to_print)
cat(string_to_print, sep = "")
}
}
# check for install_campari()
.get_os <- function(){
sysinf <- Sys.info()
if (!is.null(sysinf)){
os <- sysinf['sysname']
if (os == 'Darwin')
os <- "osx"
} else { ## mystery machine
os <- .Platform$OS.type
if (grepl("^darwin", R.version$os))
os <- "osx"
if (grepl("linux-gnu", R.version$os))
os <- "linux"
}
tolower(os)
}
# binary search for true-false. It is 100 times faster than %in%
.BiSearch <- function(table, key, start.idx = 1, end.idx = length(table),
tol = .Machine$double.eps ^ 0.5,
check = TRUE) {
# Takes sorted (in ascending order) vectors
if (check) stopifnot(is.vector(table), is.numeric(table))
m <- as.integer(ceiling((end.idx + start.idx) / 2)) # Midpoint
if (table[m] > key + tol) {
if (start.idx == end.idx) return(FALSE)
Recall(table, key, start.idx = start.idx, end.idx = m - 1L, tol = tol, check = FALSE)
} else if (table[m] < key - tol) {
if (start.idx == end.idx) return(FALSE)
Recall(table, key, start.idx = m + 1L, end.idx = end.idx, tol = tol, check = FALSE)
} else return(TRUE)
}
# normalize
.normalize <- function(x, xmax = NULL, xmin = NULL) { # not equivalent of scale(x, center = F)
# if(.isSingleElement(x)) return(1)
if(is.null(xmax)) xmax <- max(x)
if(is.null(xmin)) xmin <- min(x)
if(xmin == xmax) return(x/xmax)
else return((x*1.0 - xmin)/(xmax - xmin))
}
# cutting the 0s
.cut0 <- function(x) { x[x<0] <- 0; x }
# fitting a quadratic model
.denaturate <- function(yyy, xxx, polydeg = 7, plotit = FALSE){
# yyy <- kin.pl
# xxx <- seq(lpi)
q.mod <- lm(yyy ~ poly(xxx, polydeg, raw=TRUE))
new_yyy <- .normalize(yyy - stats::predict(q.mod))
if(plotit){
plot(xxx, yyy, type = 'l', ylim = c(0,1))
abline(lm(yyy ~ xxx), col = 'darkblue') # linear
lines(xxx, stats::predict(q.mod), col = "darkgreen", lwd = 2)
lines(xxx, new_yyy, col = "red", lwd = 3)
}
return(new_yyy)
}
# checking function for output command (intern or not, i.e. talkative or not)
.check_cmd.out <- function(cmd.out, intern = FALSE){
if(intern) {
return(!is.null(attr(cmd.out, which = 'status')))
}else{
if(!.isSingleInteger(cmd.out)) stop('Something went wrong. The command output is not an integer even if intern is FALSE.')
return(cmd.out > 0)
}
}
# simple double separator creator; something like the following.
# > a <- .get_adjusted_separator(' a ', 11)
# > cat(a$sep1, '\n', a$sep2, sep = '')
# ~~~~ a ~~~~
# ~~~~~~~~~~~
.get_adjusted_separators <- function(tit, length_it = 68){
if(length_it%%2) length_it <- length_it + 1 # simple check on the lengths
if(.lt(tit) > length_it) stop('Please insert correctly the final length. It was too short in comparison to the title length.')
if(nchar(tit)%%2 != 0) nchar_sep <- length_it - 1
else nchar_sep <- length_it
half_it <- (nchar_sep - nchar(tit))/2
stp1 <- ""
for(u in 1:half_it) stp1 <- paste0(stp1, "~")
stp1 <- paste0(stp1, tit, stp1)
stp2 <- ""
for(u in 1:nchar_sep) stp2 <- paste0(stp2, "~")
stp2 <- paste0(stp2)
return(list('sep1' = stp1, 'sep2' = stp2))
}
# create annotation vector from barrier vector # not used at the moment
.vec_from_barriers <- function(bar.vec, label.vec = NULL, reorder.index = FALSE){ # , end.point = NULL (I can put it inside the bar.vec)
# bar.vec must be the position on the PI!!! not the lengths of stretches
stopifnot(all(bar.vec > 1))
# sorting
sorted.bar.vec <- sort(bar.vec, index.return=TRUE) # should I consider also the indexing? I don't think so
bar.vec <- sorted.bar.vec$x
if(reorder.index){
if(!is.null(label.vec)) label.vec <- label.vec[sorted.bar.vec$ix]
else label.vec <- sorted.bar.vec$ix
}
# if not defined define the label vector
if(is.null(label.vec)) label.vec <- 1:length(bar.vec)
stopifnot(all(1:length(label.vec) == sort(label.vec))) # no gaps allowed
stopifnot(length(label.vec) == length(bar.vec))
# generate the diff bector
bar.vec.diff <- c(bar.vec[1], diff(bar.vec))
stopifnot(sum(bar.vec.diff) == bar.vec[length(bar.vec)]) # check the right summing up!
# final loop
return(unlist(sapply(1:length(label.vec), function(x) rep(label.vec[x], bar.vec.diff[x]))))
}
# symmetrize matrix - fcocina
.symm <- function(x) return((x+t(x))/2)
# trace of a matrix
.tr <- function(x) return(sum(diag(x)))
|
###Script for task 04
###Author: April Armes
###Group: IMAAS
library(ggplot2)#loads ggplot 2
diamonds # calls diamonds dataset
nrow(diamonds)#tells you how many rows diamonds dataset has -> a lot.
set.seed(1410) #makes the results the same for everyone
dsmall <- diamonds[sample(nrow(diamonds), 100), ] #creates a vector called dsmall that only contains 100 "random" rows of diamonds
###use dsmall for all plots
ggplot(dsmall,aes (x, y, color = z)) + geom_point() + facet_wrap(~cut) #creates a scatterplot from the diamonds dataset columns x vs y colored by z facetted by cut
ggplot(dsmall, aes (price, carat, color =cut)) + geom_point()+ geom_smooth(method = "lm", se=FALSE) #creates a scaterplot from the diamonds dataset with columns x=price, y=carat, colored by cut and smoothed using the lm method - linear model/line of best fit
ggplot(dsmall, aes (carat, color= clarity)) + geom_density()+facet_wrap(~clarity)#doesn't look like the example, but a density plot of carat facetted and colored but clarity
ggplot(dsmall, aes (cut,price)) + geom_boxplot()# a box plot of the dataset diamonds with x=cut and y = price"
ggplot(dsmall, aes (x, y, color="red"))+ geom_point()+ geom_smooth(method="lm", color = "blue", linetype=2)+labs(x="x, in mm", y="y, in mm")# creates a scatterplot of x over y, designates that the points should be red, a best fit line should be blue and dashed
#now how do I print? click run
#ugly plot contest
library(jpeg)#loads jpeg library
img<-readJPEG("c:/Users/April/Documents/GEOL_590/misc/steen.jpg", native = TRUE)#pulls image from file
plot(img)
res<-dim(img)[1:2]#assigns resolution to image
plot(1,1,xlim=c(1,res[1]),ylim=c(1,res[2]),asp=1,type="n",xaxs="i",yaxs="i",xaxt="n",yaxt="n",xlab="",ylab="",bty="n")#????
rasterImage(img, 1,1,res[1],res[2])#profit
#ugly plot contest forrealdo
###load jped and grid libraries
library(jpeg)
library(grid)
library(ggplot2)
###call image
img<-readJPEG("c:/Users/April/Documents/GEOL_590/misc/steen.jpg", native = TRUE)
###place image into vector called img
img<-readJPEG("c:/Users/April/Documents/GEOL_590/misc/steen.jpg", native = TRUE)
#grab image
g <- rasterGrob(img, interpolate=TRUE)
###use diamonds dataset, set seed to make the same for everyone
set.seed(1410) #makes the results the same for everyone
dsmall <- diamonds[sample(nrow(diamonds), 100), ]
ggplot(dsmall, aes(carat, price,color="red"))+ geom_point(color="red") + theme_dark()#creates a plot on dsmall data set where the points are red and the background of the graph is grey
+theme(panel.grid.major = element_line(colour = "orange"))#makes organge gridlines on graph
+annotation_custom(g, xmin=-Inf, xmax=Inf, ymin=-Inf, ymax=Inf)#puts image into graph
+stat_identity()#need this to maintain data points and puts them on top of image
+geom_jitter()#adds random variation to the location of each data point
+geom_smooth(span =0.1, se=FALSE, color="blue", linetype=2)#smoothes plot with a blue dashed wiggly line
+ theme(plot.background = element_rect(fill = "green"))#makes the background behing the plot green
+ theme(panel.grid.minor = element_line(colour = "red", linetype = "dotted"))#creates small red dotted gridlines
+ theme(panel.grid.major = element_line(size = 2))#makes the organge gridlines thick
+ theme(legend.position = "none")#deletes legend
###You must delete all the comments for this to work properly, stops reading after first #
###see below - different image
###alt ugly plot
library(jpeg)
library(grid)
library(ggplot2)
jpg<-readJPEG("c:/Users/April/Documents/GEOL_590/misc/armes.jpg", native = TRUE)
p<-g <- rasterGrob(jpg, interpolate=TRUE)
set.seed(1410) #makes the results the same for everyone
dsmall <- diamonds[sample(nrow(diamonds), 100), ]
ggplot(dsmall, aes(carat, price,color="red"))+ geom_point(color="red") + theme_dark()+theme(panel.grid.major = element_line(colour = "orange"))+annotation_custom(p, xmin=-Inf, xmax=Inf, ymin=-Inf, ymax=Inf)+ stat_identity()+geom_jitter()+geom_smooth(span =0.1, se=FALSE, color="blue", linetype=2)+ theme(plot.background = element_rect(fill = "green"))+ theme(panel.grid.minor = element_line(colour = "red", linetype = "dotted"))+ theme(panel.grid.major = element_line(size = 2))+ theme(legend.position = "none")
|
/R/Task_4.R
|
no_license
|
Nuapril/GEOL_590
|
R
| false | false | 4,234 |
r
|
###Script for task 04
###Author: April Armes
###Group: IMAAS
library(ggplot2)#loads ggplot 2
diamonds # calls diamonds dataset
nrow(diamonds)#tells you how many rows diamonds dataset has -> a lot.
set.seed(1410) #makes the results the same for everyone
dsmall <- diamonds[sample(nrow(diamonds), 100), ] #creates a vector called dsmall that only contains 100 "random" rows of diamonds
###use dsmall for all plots
ggplot(dsmall,aes (x, y, color = z)) + geom_point() + facet_wrap(~cut) #creates a scatterplot from the diamonds dataset columns x vs y colored by z facetted by cut
ggplot(dsmall, aes (price, carat, color =cut)) + geom_point()+ geom_smooth(method = "lm", se=FALSE) #creates a scaterplot from the diamonds dataset with columns x=price, y=carat, colored by cut and smoothed using the lm method - linear model/line of best fit
ggplot(dsmall, aes (carat, color= clarity)) + geom_density()+facet_wrap(~clarity)#doesn't look like the example, but a density plot of carat facetted and colored but clarity
ggplot(dsmall, aes (cut,price)) + geom_boxplot()# a box plot of the dataset diamonds with x=cut and y = price"
ggplot(dsmall, aes (x, y, color="red"))+ geom_point()+ geom_smooth(method="lm", color = "blue", linetype=2)+labs(x="x, in mm", y="y, in mm")# creates a scatterplot of x over y, designates that the points should be red, a best fit line should be blue and dashed
#now how do I print? click run
#ugly plot contest
library(jpeg)#loads jpeg library
img<-readJPEG("c:/Users/April/Documents/GEOL_590/misc/steen.jpg", native = TRUE)#pulls image from file
plot(img)
res<-dim(img)[1:2]#assigns resolution to image
plot(1,1,xlim=c(1,res[1]),ylim=c(1,res[2]),asp=1,type="n",xaxs="i",yaxs="i",xaxt="n",yaxt="n",xlab="",ylab="",bty="n")#????
rasterImage(img, 1,1,res[1],res[2])#profit
#ugly plot contest forrealdo
###load jped and grid libraries
library(jpeg)
library(grid)
library(ggplot2)
###call image
img<-readJPEG("c:/Users/April/Documents/GEOL_590/misc/steen.jpg", native = TRUE)
###place image into vector called img
img<-readJPEG("c:/Users/April/Documents/GEOL_590/misc/steen.jpg", native = TRUE)
#grab image
g <- rasterGrob(img, interpolate=TRUE)
###use diamonds dataset, set seed to make the same for everyone
set.seed(1410) #makes the results the same for everyone
dsmall <- diamonds[sample(nrow(diamonds), 100), ]
ggplot(dsmall, aes(carat, price,color="red"))+ geom_point(color="red") + theme_dark()#creates a plot on dsmall data set where the points are red and the background of the graph is grey
+theme(panel.grid.major = element_line(colour = "orange"))#makes organge gridlines on graph
+annotation_custom(g, xmin=-Inf, xmax=Inf, ymin=-Inf, ymax=Inf)#puts image into graph
+stat_identity()#need this to maintain data points and puts them on top of image
+geom_jitter()#adds random variation to the location of each data point
+geom_smooth(span =0.1, se=FALSE, color="blue", linetype=2)#smoothes plot with a blue dashed wiggly line
+ theme(plot.background = element_rect(fill = "green"))#makes the background behing the plot green
+ theme(panel.grid.minor = element_line(colour = "red", linetype = "dotted"))#creates small red dotted gridlines
+ theme(panel.grid.major = element_line(size = 2))#makes the organge gridlines thick
+ theme(legend.position = "none")#deletes legend
###You must delete all the comments for this to work properly, stops reading after first #
###see below - different image
###alt ugly plot
library(jpeg)
library(grid)
library(ggplot2)
jpg<-readJPEG("c:/Users/April/Documents/GEOL_590/misc/armes.jpg", native = TRUE)
p<-g <- rasterGrob(jpg, interpolate=TRUE)
set.seed(1410) #makes the results the same for everyone
dsmall <- diamonds[sample(nrow(diamonds), 100), ]
ggplot(dsmall, aes(carat, price,color="red"))+ geom_point(color="red") + theme_dark()+theme(panel.grid.major = element_line(colour = "orange"))+annotation_custom(p, xmin=-Inf, xmax=Inf, ymin=-Inf, ymax=Inf)+ stat_identity()+geom_jitter()+geom_smooth(span =0.1, se=FALSE, color="blue", linetype=2)+ theme(plot.background = element_rect(fill = "green"))+ theme(panel.grid.minor = element_line(colour = "red", linetype = "dotted"))+ theme(panel.grid.major = element_line(size = 2))+ theme(legend.position = "none")
|
% Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/Transition_functions.r
\name{cjs_gamma}
\alias{cjs_gamma}
\alias{ms2_gamma}
\alias{ms_gamma}
\title{HMM Transition matrix functions}
\usage{
cjs_gamma(pars, m, F, T)
}
\arguments{
\item{pars}{list of real parameter values for each type of parameter}
\item{m}{number of states}
\item{F}{initial occasion vector}
\item{T}{number of occasions}
}
\value{
array of id and occasion-specific transition matrices - Gamma in Zucchini and MacDonald (2009)
}
\description{
Functions that compute the transition matrix for various models. Currently only CJS and MS models
are included.
}
\author{
Jeff Laake <jeff.laake@noaa.gov>
}
\references{
Zucchini, W. and I.L. MacDonald. 2009. Hidden Markov Models for Time Series: An Introduction using R. Chapman and Hall, Boca Raton, FL. 275p.
}
|
/marked/man/cjs_gamma.Rd
|
no_license
|
bmcclintock/marked
|
R
| false | false | 900 |
rd
|
% Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/Transition_functions.r
\name{cjs_gamma}
\alias{cjs_gamma}
\alias{ms2_gamma}
\alias{ms_gamma}
\title{HMM Transition matrix functions}
\usage{
cjs_gamma(pars, m, F, T)
}
\arguments{
\item{pars}{list of real parameter values for each type of parameter}
\item{m}{number of states}
\item{F}{initial occasion vector}
\item{T}{number of occasions}
}
\value{
array of id and occasion-specific transition matrices - Gamma in Zucchini and MacDonald (2009)
}
\description{
Functions that compute the transition matrix for various models. Currently only CJS and MS models
are included.
}
\author{
Jeff Laake <jeff.laake@noaa.gov>
}
\references{
Zucchini, W. and I.L. MacDonald. 2009. Hidden Markov Models for Time Series: An Introduction using R. Chapman and Hall, Boca Raton, FL. 275p.
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/gutenberg_richter_densities.R
\name{qGR}
\alias{qGR}
\title{Inverse cumulative distribution function for Gutenberg Richter distribution}
\usage{
qGR(p, b, mw_min)
}
\arguments{
\item{p}{vector of probabilities}
\item{b}{Gutenberg-Richter b value}
\item{mw_min}{Minimum mw}
}
\value{
vector with probability(mw < q | mw >= mw_min)
}
\description{
Note corresponding functions pGR, qGR, dGR, rGR. See ?rGR for an extended example
of fitting with maximum likelihood.
}
\examples{
# Compute the 90th percentile Mw
Mw_90 = qGR(0.9, b=0.8, mw_min=6.0)
}
|
/R/rptha/man/qGR.Rd
|
permissive
|
GeoscienceAustralia/ptha
|
R
| false | true | 628 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/gutenberg_richter_densities.R
\name{qGR}
\alias{qGR}
\title{Inverse cumulative distribution function for Gutenberg Richter distribution}
\usage{
qGR(p, b, mw_min)
}
\arguments{
\item{p}{vector of probabilities}
\item{b}{Gutenberg-Richter b value}
\item{mw_min}{Minimum mw}
}
\value{
vector with probability(mw < q | mw >= mw_min)
}
\description{
Note corresponding functions pGR, qGR, dGR, rGR. See ?rGR for an extended example
of fitting with maximum likelihood.
}
\examples{
# Compute the 90th percentile Mw
Mw_90 = qGR(0.9, b=0.8, mw_min=6.0)
}
|
## The functions are used to calculate and cache the inverse of a matrix
## The following function creates a list containing functions to set and
## get the values of a matrix and its inverse
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y) {
x <<- y
inv <- NULL
}
get <- function() x
setinverse <- function(inverse) inv <<- inverse
getinverse <- function() inv
list(set = set, get = get,
setinverse = setinverse, getinverse = getinverse)
}
## The following function calculates the inverse of the vector created
## with the above function
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inv <- x$getinverse()
if(!is.null(inv)) {
message("getting cached data")
return(inv)
}
data <- x$get()
inv <- solve(data, ...)
x$setinverse(inv)
inv
}
|
/cachematrix.R
|
no_license
|
luciawrq/ProgrammingAssignment2
|
R
| false | false | 963 |
r
|
## The functions are used to calculate and cache the inverse of a matrix
## The following function creates a list containing functions to set and
## get the values of a matrix and its inverse
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y) {
x <<- y
inv <- NULL
}
get <- function() x
setinverse <- function(inverse) inv <<- inverse
getinverse <- function() inv
list(set = set, get = get,
setinverse = setinverse, getinverse = getinverse)
}
## The following function calculates the inverse of the vector created
## with the above function
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inv <- x$getinverse()
if(!is.null(inv)) {
message("getting cached data")
return(inv)
}
data <- x$get()
inv <- solve(data, ...)
x$setinverse(inv)
inv
}
|
source("Project1Lib.R")
fn_Plot4 <- function(x)
{
fname <- "plot4.png"
cat("Creating",fname, "\n")
#
attach(hpc)
par(mfrow=c(2,2))
plot(hpc$DateTime, hpc$Global_active_power, type="l", ylab="Global Active Power", xlab="")
plot(DateTime, Voltage, type="l", ylab="Voltage",xlab="datetime")
#
yrange<-range(c(hpc$Sub_metering_1,hpc$Sub_metering_2,hpc$Sub_metering_3))
plot(hpc$DateTime, hpc$Sub_metering_1, type="l", xlab="", ylim=yrange, ylab="Energy sub metering")
lines(hpc$DateTime,hpc$Sub_metering_2, col="red")
lines(hpc$DateTime,hpc$Sub_metering_3, col="blue")
legend("topright",bty="n",cex=.75,lty=c(1,1,1),col=c("black","red","blue"),c("Sub_metering_1 ","Sub_metering_2 ","Sub_metering_3 "))
#
plot(DateTime, Global_reactive_power, type="l", xlab="datetime")
dev.copy(png,fname, width=480, height=480)
dev.off()
}
if (exists("hpc") && is.data.frame(get("hpc"))) {
fn_Plot4()
} else {
hpc <- fn_GetData()
}
|
/plot4.R
|
no_license
|
nate43026/ExData_Plotting1
|
R
| false | false | 962 |
r
|
source("Project1Lib.R")
fn_Plot4 <- function(x)
{
fname <- "plot4.png"
cat("Creating",fname, "\n")
#
attach(hpc)
par(mfrow=c(2,2))
plot(hpc$DateTime, hpc$Global_active_power, type="l", ylab="Global Active Power", xlab="")
plot(DateTime, Voltage, type="l", ylab="Voltage",xlab="datetime")
#
yrange<-range(c(hpc$Sub_metering_1,hpc$Sub_metering_2,hpc$Sub_metering_3))
plot(hpc$DateTime, hpc$Sub_metering_1, type="l", xlab="", ylim=yrange, ylab="Energy sub metering")
lines(hpc$DateTime,hpc$Sub_metering_2, col="red")
lines(hpc$DateTime,hpc$Sub_metering_3, col="blue")
legend("topright",bty="n",cex=.75,lty=c(1,1,1),col=c("black","red","blue"),c("Sub_metering_1 ","Sub_metering_2 ","Sub_metering_3 "))
#
plot(DateTime, Global_reactive_power, type="l", xlab="datetime")
dev.copy(png,fname, width=480, height=480)
dev.off()
}
if (exists("hpc") && is.data.frame(get("hpc"))) {
fn_Plot4()
} else {
hpc <- fn_GetData()
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/DataOrganization.R
\name{NumberPermsAnySize}
\alias{NumberPermsAnySize}
\title{Total Number of Permutations of all sizes}
\usage{
NumberPermsAnySize(n, min = 1)
}
\arguments{
\item{n}{The total number of choices}
\item{min}{The total number of choices}
}
\description{
Calculates the number of permutatons for a total number of objects when it is possible to select any number of them (with no repeats). This is useful for calculateing how many scoring possibilities for a click and drag ordering item. Also allows for specifying a minimum number. Such as for items of the form: Select at least 5 most important things to do, and order them by priority.
}
\examples{
NumberPermsAnySize(10,6)
}
\keyword{permutation}
|
/man/NumberPermsAnySize.Rd
|
no_license
|
alexbrodersen/teis
|
R
| false | true | 796 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/DataOrganization.R
\name{NumberPermsAnySize}
\alias{NumberPermsAnySize}
\title{Total Number of Permutations of all sizes}
\usage{
NumberPermsAnySize(n, min = 1)
}
\arguments{
\item{n}{The total number of choices}
\item{min}{The total number of choices}
}
\description{
Calculates the number of permutatons for a total number of objects when it is possible to select any number of them (with no repeats). This is useful for calculateing how many scoring possibilities for a click and drag ordering item. Also allows for specifying a minimum number. Such as for items of the form: Select at least 5 most important things to do, and order them by priority.
}
\examples{
NumberPermsAnySize(10,6)
}
\keyword{permutation}
|
library(datafsm)
context("Main evolve_model function")
test_that("evolve_model() returns correct type of object", {
cdata <- data.frame(period = 1:5, outcome = c(1,2,1,1,1),
my.decision1 = c(1,0,1,1,1), other.decision1 = c(0,0,0,1,1))
result <- evolve_model(cdata, cv=FALSE)
expect_is(result, "ga_fsm")
})
test_that("evolve_model() returns warnings and errors", {
cdata <- as.matrix(data.frame(period = 1:5, outcome = c(1,2,1,1,1),
my.decision1 = c(1,0,1,1,1), other.decision1 = c(0,0,0,1,1)))
expect_warning(evolve_model(cdata, cv=FALSE), "did not supply a data.frame")
cdata <- data.frame(period = 1:5, outcome = c(NA,2,1,1,1),
my.decision1 = c(1,0,1,1,1), other.decision1 = c(0,0,0,1,1))
expect_error(evolve_model(cdata, cv=FALSE), "missing")
cdata <- data.frame(period = 1:5, outcome = c(1,2,1,1,1),
my.decision1 = c(1,0,1,1,1), other.decision1 = c(0,0,0,1,1),
joe.decision1 = c(0,0,0,1,1), jack.decision1 = c(0,0,0,1,1) )
expect_warning(evolve_model(cdata, cv=FALSE), "predictor", all=FALSE)
cdata <- data.frame(period = 1:5, outcome = c(1,1,1,1,1),
my.decision1 = c(1,0,1,3,1), other.decision1 = c(0,0,0,1,1))
expect_error(evolve_model(cdata, cv=FALSE), "unique")
cdata <- data.frame(period = 1:5,
my.decision1 = c(1,0,1,3,1), other.decision1 = c(0,0,0,1,1))
expect_error(evolve_model(cdata, cv=FALSE), regexp = "predictor")
})
|
/tests/testthat/test_mainfunc.R
|
permissive
|
jdblischak/datafsm
|
R
| false | false | 1,556 |
r
|
library(datafsm)
context("Main evolve_model function")
test_that("evolve_model() returns correct type of object", {
cdata <- data.frame(period = 1:5, outcome = c(1,2,1,1,1),
my.decision1 = c(1,0,1,1,1), other.decision1 = c(0,0,0,1,1))
result <- evolve_model(cdata, cv=FALSE)
expect_is(result, "ga_fsm")
})
test_that("evolve_model() returns warnings and errors", {
cdata <- as.matrix(data.frame(period = 1:5, outcome = c(1,2,1,1,1),
my.decision1 = c(1,0,1,1,1), other.decision1 = c(0,0,0,1,1)))
expect_warning(evolve_model(cdata, cv=FALSE), "did not supply a data.frame")
cdata <- data.frame(period = 1:5, outcome = c(NA,2,1,1,1),
my.decision1 = c(1,0,1,1,1), other.decision1 = c(0,0,0,1,1))
expect_error(evolve_model(cdata, cv=FALSE), "missing")
cdata <- data.frame(period = 1:5, outcome = c(1,2,1,1,1),
my.decision1 = c(1,0,1,1,1), other.decision1 = c(0,0,0,1,1),
joe.decision1 = c(0,0,0,1,1), jack.decision1 = c(0,0,0,1,1) )
expect_warning(evolve_model(cdata, cv=FALSE), "predictor", all=FALSE)
cdata <- data.frame(period = 1:5, outcome = c(1,1,1,1,1),
my.decision1 = c(1,0,1,3,1), other.decision1 = c(0,0,0,1,1))
expect_error(evolve_model(cdata, cv=FALSE), "unique")
cdata <- data.frame(period = 1:5,
my.decision1 = c(1,0,1,3,1), other.decision1 = c(0,0,0,1,1))
expect_error(evolve_model(cdata, cv=FALSE), regexp = "predictor")
})
|
gcv_function <-
function(alpha,gamma2,beta)
{
f = (alpha^2)/(gamma2 + alpha^2);
### length(f);
### length(beta);
if(length(f)>length(beta))
{
f=f[1:length(beta)];
}
else
{
if(length(beta)>length(f))
{
iend = length(beta)
beta=beta[(iend-length(f)+1):iend];
}
}
g = (Mnorm(f*beta)^2)/(sum(f))^2;
return(g)
}
|
/R/gcv_function.R
|
no_license
|
cran/PEIP
|
R
| false | false | 430 |
r
|
gcv_function <-
function(alpha,gamma2,beta)
{
f = (alpha^2)/(gamma2 + alpha^2);
### length(f);
### length(beta);
if(length(f)>length(beta))
{
f=f[1:length(beta)];
}
else
{
if(length(beta)>length(f))
{
iend = length(beta)
beta=beta[(iend-length(f)+1):iend];
}
}
g = (Mnorm(f*beta)^2)/(sum(f))^2;
return(g)
}
|
##########################################################################################################################
## Loop BKMR model through 100 seeds
## 11/18/2019
##########################################################################################################################
#install.packages("bkmr")
#install.packages("tidyverse")
## load required libraries
# Location for HPC!
library(truncnorm, lib.loc = "/ifs/home/msph/ehs/eag2186/local/hpc/")
library(bkmr, lib.loc = "/ifs/home/msph/ehs/eag2186/local/hpc/")
library(tidyverse)
library(dotCall64, lib.loc = "/ifs/home/msph/ehs/eag2186/local/hpc/")
library(spam, lib.loc = "/ifs/home/msph/ehs/eag2186/local/hpc/")
library(maps, lib.loc = "/ifs/home/msph/ehs/eag2186/local/hpc/")
library(fields, lib.loc = "/ifs/home/msph/ehs/eag2186/local/hpc/")
##########################################################################################################################
## Data Manipulation
##########################################################################################################################
## read in data and only consider complete data
## this drops 327 individuals, but BKMR does not handle missing data
nhanes = na.omit(read_csv("Data/studypop.csv"))
## center/scale continous covariates and create indicators for categorical covariates
nhanes$age_z = scale(nhanes$age_cent) ## center and scale age
nhanes$agez_sq = nhanes$age_z^2 ## square this age variable
nhanes$bmicat2 = as.numeric(nhanes$bmi_cat3 == 2) ## 25 <= BMI < 30
nhanes$bmicat3 = as.numeric(nhanes$bmi_cat3 == 3) ## BMI >= 30 (BMI < 25 is the reference)
nhanes$educat1 = as.numeric(nhanes$edu_cat == 1) ## no high school diploma
nhanes$educat3 = as.numeric(nhanes$edu_cat == 3) ## some college or AA degree
nhanes$educat4 = as.numeric(nhanes$edu_cat == 4) ## college grad or above (reference is high schol grad/GED or equivalent)
nhanes$otherhispanic = as.numeric(nhanes$race_cat == 1) ## other Hispanic or other race - including multi-racial
nhanes$mexamerican = as.numeric(nhanes$race_cat == 2) ## Mexican American
nhanes$black = as.numeric(nhanes$race_cat == 3) ## non-Hispanic Black (non-Hispanic White as reference group)
nhanes$wbcc_z = scale(nhanes$LBXWBCSI)
nhanes$lymphocytes_z = scale(nhanes$LBXLYPCT)
nhanes$monocytes_z = scale(nhanes$LBXMOPCT)
nhanes$neutrophils_z = scale(nhanes$LBXNEPCT)
nhanes$eosinophils_z = scale(nhanes$LBXEOPCT)
nhanes$basophils_z = scale(nhanes$LBXBAPCT)
nhanes$lncotinine_z = scale(nhanes$ln_lbxcot) ## to access smoking status, scaled ln cotinine levels
## our y variable - ln transformed and scaled mean telomere length
lnLTL_z = scale(log(nhanes$TELOMEAN))
## our Z matrix
mixture = with(nhanes, cbind(LBX074LA, LBX099LA, LBX118LA, LBX138LA, LBX153LA, LBX170LA, LBX180LA,
LBX187LA, LBX194LA, LBXHXCLA, LBXPCBLA, LBXD03LA, LBXD05LA, LBXD07LA,
LBXF03LA, LBXF04LA, LBXF05LA, LBXF08LA))
lnmixture = apply(mixture, 2, log)
lnmixture_z = scale(lnmixture)
colnames(lnmixture_z) = c(paste0("PCB",c(74, 99, 118, 138, 153, 170, 180, 187, 194, 169, 126)),
paste0("Dioxin",1:3), paste0("Furan",1:4))
## our X matrix
covariates = with(nhanes, cbind(age_z, agez_sq, male, bmicat2, bmicat3, educat1, educat3, educat4,
otherhispanic, mexamerican, black, wbcc_z, lymphocytes_z, monocytes_z,
neutrophils_z, eosinophils_z, basophils_z, lncotinine_z))
############################################################################################################################
## Above code does not change
## Next begin looping over seeds
############################################################################################################################
### create knots matrix for Gaussian predictive process (to speed up BKMR with large datasets)
#set.seed(1506744763) # use better seed now
set.seed(10)
knots100 <- fields::cover.design(lnmixture_z, nd = 100)$design
##########################################################################################################################
## Fit Model
##########################################################################################################################
### Group VS Fit with all Exposures using GPP and 100 Knots
##### fit BKMR models WITH Gaussian predictive process using 100 knots
## Loop Over Seeds
fit_seed <- function(seed) {
set.seed(seed)
print(format(Sys.time(), "%c"))
fit_bkmr_seed <- kmbayes( y = lnLTL_z, Z = lnmixture_z, X = covariates,
iter = 100000,
verbose = FALSE, varsel = TRUE,
groups = c(rep(1, times = 2), 2, rep(1, times = 6),
rep(3, times = 2), rep(2, times = 7)), knots = knots100)
}
## read job number from system environment
## This only works if run on cluster!!
job_num = as.integer(Sys.getenv("SGE_TASK_ID"))
## set seed, create sample, export -- this can help double check that code is
## reproducibly simulating examples
repeat_model_25 <- tibble(seed = job_num) %>%
mutate(fits = list(fit_seed(seed)))
##########################################################################################################################
## Posterior Inclusion Probabilities (PIPs)
##########################################################################################################################
get_pips <- function(model) {
all_pips = ExtractPIPs(model)
all_pips
}
repeat_model_25 <- repeat_model_25 %>%
mutate(pips = list(get_pips(repeat_model_25$fits[[1]])))
##########################################################################################################################
## Data Visualization
##########################################################################################################################
get_data_viz <- function(fit) {
### change this for each model you fit and then rerun the code from here to the bottom
modeltoplot <- fit ## name of model object
Z <- lnmixture_z ## Z matrix to match what was used in model
### values to keep after burnin/thin
sel <- seq(50001, 100000,by = 50)
#### Univariable and Bivariable Exposure-Response Functions
#### create dataframes for ggplot (this takes a little while to run)
pred.resp.univar <- PredictorResponseUnivar(fit = modeltoplot, sel = sel, method = "approx")
risks.overall <- OverallRiskSummaries(fit = modeltoplot,
qs = seq(0.25, 0.75, by = 0.05),
q.fixed = 0.5, method = "approx", sel = sel)
list(pred.resp.univar = pred.resp.univar,
risks.overall = risks.overall)
}
repeat_model_25 <- repeat_model_25 %>%
mutate(plot_dat = list(get_data_viz(repeat_model_25$fits[[1]])))
# Keep fits to assess 4 null seeds
#repeat_model_25 <- repeat_model_25 %>% select(-fits)
save(repeat_model_25, file = paste0("bkmr_", job_num, "_model_loop.RDA"))
|
/BKMR/HPC/bkmr_loop_model_25.R
|
no_license
|
yanellinunez/Commentary-to-mixture-methods-paper
|
R
| false | false | 7,118 |
r
|
##########################################################################################################################
## Loop BKMR model through 100 seeds
## 11/18/2019
##########################################################################################################################
#install.packages("bkmr")
#install.packages("tidyverse")
## load required libraries
# Location for HPC!
library(truncnorm, lib.loc = "/ifs/home/msph/ehs/eag2186/local/hpc/")
library(bkmr, lib.loc = "/ifs/home/msph/ehs/eag2186/local/hpc/")
library(tidyverse)
library(dotCall64, lib.loc = "/ifs/home/msph/ehs/eag2186/local/hpc/")
library(spam, lib.loc = "/ifs/home/msph/ehs/eag2186/local/hpc/")
library(maps, lib.loc = "/ifs/home/msph/ehs/eag2186/local/hpc/")
library(fields, lib.loc = "/ifs/home/msph/ehs/eag2186/local/hpc/")
##########################################################################################################################
## Data Manipulation
##########################################################################################################################
## read in data and only consider complete data
## this drops 327 individuals, but BKMR does not handle missing data
nhanes = na.omit(read_csv("Data/studypop.csv"))
## center/scale continous covariates and create indicators for categorical covariates
nhanes$age_z = scale(nhanes$age_cent) ## center and scale age
nhanes$agez_sq = nhanes$age_z^2 ## square this age variable
nhanes$bmicat2 = as.numeric(nhanes$bmi_cat3 == 2) ## 25 <= BMI < 30
nhanes$bmicat3 = as.numeric(nhanes$bmi_cat3 == 3) ## BMI >= 30 (BMI < 25 is the reference)
nhanes$educat1 = as.numeric(nhanes$edu_cat == 1) ## no high school diploma
nhanes$educat3 = as.numeric(nhanes$edu_cat == 3) ## some college or AA degree
nhanes$educat4 = as.numeric(nhanes$edu_cat == 4) ## college grad or above (reference is high schol grad/GED or equivalent)
nhanes$otherhispanic = as.numeric(nhanes$race_cat == 1) ## other Hispanic or other race - including multi-racial
nhanes$mexamerican = as.numeric(nhanes$race_cat == 2) ## Mexican American
nhanes$black = as.numeric(nhanes$race_cat == 3) ## non-Hispanic Black (non-Hispanic White as reference group)
nhanes$wbcc_z = scale(nhanes$LBXWBCSI)
nhanes$lymphocytes_z = scale(nhanes$LBXLYPCT)
nhanes$monocytes_z = scale(nhanes$LBXMOPCT)
nhanes$neutrophils_z = scale(nhanes$LBXNEPCT)
nhanes$eosinophils_z = scale(nhanes$LBXEOPCT)
nhanes$basophils_z = scale(nhanes$LBXBAPCT)
nhanes$lncotinine_z = scale(nhanes$ln_lbxcot) ## to access smoking status, scaled ln cotinine levels
## our y variable - ln transformed and scaled mean telomere length
lnLTL_z = scale(log(nhanes$TELOMEAN))
## our Z matrix
mixture = with(nhanes, cbind(LBX074LA, LBX099LA, LBX118LA, LBX138LA, LBX153LA, LBX170LA, LBX180LA,
LBX187LA, LBX194LA, LBXHXCLA, LBXPCBLA, LBXD03LA, LBXD05LA, LBXD07LA,
LBXF03LA, LBXF04LA, LBXF05LA, LBXF08LA))
lnmixture = apply(mixture, 2, log)
lnmixture_z = scale(lnmixture)
colnames(lnmixture_z) = c(paste0("PCB",c(74, 99, 118, 138, 153, 170, 180, 187, 194, 169, 126)),
paste0("Dioxin",1:3), paste0("Furan",1:4))
## our X matrix
covariates = with(nhanes, cbind(age_z, agez_sq, male, bmicat2, bmicat3, educat1, educat3, educat4,
otherhispanic, mexamerican, black, wbcc_z, lymphocytes_z, monocytes_z,
neutrophils_z, eosinophils_z, basophils_z, lncotinine_z))
############################################################################################################################
## Above code does not change
## Next begin looping over seeds
############################################################################################################################
### create knots matrix for Gaussian predictive process (to speed up BKMR with large datasets)
#set.seed(1506744763) # use better seed now
set.seed(10)
knots100 <- fields::cover.design(lnmixture_z, nd = 100)$design
##########################################################################################################################
## Fit Model
##########################################################################################################################
### Group VS Fit with all Exposures using GPP and 100 Knots
##### fit BKMR models WITH Gaussian predictive process using 100 knots
## Loop Over Seeds
fit_seed <- function(seed) {
set.seed(seed)
print(format(Sys.time(), "%c"))
fit_bkmr_seed <- kmbayes( y = lnLTL_z, Z = lnmixture_z, X = covariates,
iter = 100000,
verbose = FALSE, varsel = TRUE,
groups = c(rep(1, times = 2), 2, rep(1, times = 6),
rep(3, times = 2), rep(2, times = 7)), knots = knots100)
}
## read job number from system environment
## This only works if run on cluster!!
job_num = as.integer(Sys.getenv("SGE_TASK_ID"))
## set seed, create sample, export -- this can help double check that code is
## reproducibly simulating examples
repeat_model_25 <- tibble(seed = job_num) %>%
mutate(fits = list(fit_seed(seed)))
##########################################################################################################################
## Posterior Inclusion Probabilities (PIPs)
##########################################################################################################################
get_pips <- function(model) {
all_pips = ExtractPIPs(model)
all_pips
}
repeat_model_25 <- repeat_model_25 %>%
mutate(pips = list(get_pips(repeat_model_25$fits[[1]])))
##########################################################################################################################
## Data Visualization
##########################################################################################################################
get_data_viz <- function(fit) {
### change this for each model you fit and then rerun the code from here to the bottom
modeltoplot <- fit ## name of model object
Z <- lnmixture_z ## Z matrix to match what was used in model
### values to keep after burnin/thin
sel <- seq(50001, 100000,by = 50)
#### Univariable and Bivariable Exposure-Response Functions
#### create dataframes for ggplot (this takes a little while to run)
pred.resp.univar <- PredictorResponseUnivar(fit = modeltoplot, sel = sel, method = "approx")
risks.overall <- OverallRiskSummaries(fit = modeltoplot,
qs = seq(0.25, 0.75, by = 0.05),
q.fixed = 0.5, method = "approx", sel = sel)
list(pred.resp.univar = pred.resp.univar,
risks.overall = risks.overall)
}
repeat_model_25 <- repeat_model_25 %>%
mutate(plot_dat = list(get_data_viz(repeat_model_25$fits[[1]])))
# Keep fits to assess 4 null seeds
#repeat_model_25 <- repeat_model_25 %>% select(-fits)
save(repeat_model_25, file = paste0("bkmr_", job_num, "_model_loop.RDA"))
|
#' @name windfarmGA
#' @description The initiating function of an optimization run which will
#' interactively check user-inputs. If all inputs are correct, an optimization
#' will be started.
#'
#' @export
#'
#' @param dns The data source name (interpretation varies by driver — for some
#' drivers, dsn is a file name, but may also be a folder)
#' @param layer The layer name
#'
#' @seealso \code{\link{genetic_algorithm}}
#' @family Genetic Algorithm Functions
#'
#' @inherit genetic_algorithm details return params title
windfarmGA <- function(dns, layer, Polygon1, GridMethod, Projection,
sourceCCL, sourceCCLRoughness,
vdirspe, Rotor = 30, fcrR = 5, n = 10, topograp = FALSE,
iteration = 20, referenceHeight = 50,
RotorHeight = 50, SurfaceRoughness = 0.14,
Proportionality = 1, mutr = 0.008,
elitism = TRUE, nelit = 7,
selstate = "FIX", crossPart1 = "EQU", trimForce = TRUE,
weibull, weibullsrc,
Parallel, numCluster, verbose = FALSE, plotit = FALSE) {
## Plotting Settings #########
opar <- par(no.readonly = TRUE)
on.exit(par(opar))
par(mfrow = c(1,2), ask = FALSE)
######## Check Polygon ########
## Check if Polygon given is correctly
if (!missing(Polygon1)) {
if (!missing(Projection)) {
Polygon1 <- isSpatial(Polygon1, Projection)
} else {
Polygon1 <- isSpatial(Polygon1)
}
plot(Polygon1, col = "red", main = "Original Input Shapefile", axes =TRUE)
title(sub = st_crs(Polygon1)$input, line = 1)
readline(prompt = "\nHit <ENTER> if this is your Polygon")
}
## Load Polygon from a Source File. Just if dns and layer are not missing.
if (!missing(dns) & !missing(layer)) {
# Input the Source of the desired Polygon
Polygon1 <- sf::st_read(dsn = dns, layer = layer)
plot(Polygon1, col = "red", main = "Original Input Shapefile", axes =TRUE)
title(sub = paste("CRS:", st_crs(Polygon1)$input), line = 1)
readline(prompt = "\nHit <ENTER> if this is your Polygon")
}
PROJ6 <- utils::compareVersion(sf::sf_extSoftVersion()[[3]], "6") > 0
## Project the Polygon to LAEA if it is not already.
if (missing(Projection)) {
cat("No Projection is given. Take Lambert Azimuthal Equal Area Projection (EPSG:3035).\n")
if (PROJ6) {
Projection <- 3035
} else {
Projection <- "+proj=laea +lat_0=52 +lon_0=10 +x_0=4321000 +y_0=3210000 +ellps=GRS80 +units=m +no_defs"
}
} else {
Projection <- Projection
}
if (is.na(st_crs(Polygon1))) {
cat("Polygon is not projected. The spatial reference WGS 84 (EPSG:4326) is assumed.\n")
if (PROJ6) {
st_crs(Polygon1) <- 4326
} else {
Projection <- "+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs "
}
}
if (PROJ6) {
if (!isTRUE(all.equal(st_crs(Polygon1), st_crs(Projection)))) {
Polygon1 <- sf::st_transform(Polygon1, st_crs(Projection))
}
} else {
if (as.character(raster::crs(Polygon1)) != Projection) {
Polygon1 <- sf::st_transform(Polygon1, st_crs(Projection))
}
}
plot(Polygon1, col = "red", main = "Projected Input Shapefile", axes =TRUE)
title(sub = paste("CRS:", st_crs(Polygon1)$input), line = 1)
readline(prompt = "\nHit <ENTER> if this is your Polygon")
######## CHECK Crossover and Selection Params ########
## Check if Crossover Method is chosen correctly.
if (missing(crossPart1)) {crossPart1 <- readinteger()}
if (crossPart1 != "EQU" & crossPart1 != "RAN") {
crossPart1 <- readinteger()
}
## Check if Selection Method is chosen correctly.
if (missing(selstate)) {selstate <- readintegerSel()}
if (selstate != "FIX" & selstate != "VAR") {
selstate <- readintegerSel()
}
######## Check Wind Data ############
## Check if Input Wind data is given correctly.
if (missing(vdirspe)) {
stop("\n##### No wind data.frame is given. \nThis input is required for an optimization run")
}
plot.new()
plot_windrose(data = vdirspe, spd = vdirspe[, 'ws'], dir = vdirspe[, 'wd'])
readline(prompt = "\nPress <ENTER> if the windrose looks correct?")
######## Check Numeric and logical Inputs ############
## Check if Rotor,fcrR,n,iteration,RotorHeight,
## SurfaceRoughness,Proportionality,mutr,nelit are numeric
ChekNumer <- is.numeric(c(Rotor, n, fcrR, iteration, referenceHeight,
RotorHeight, SurfaceRoughness, Proportionality, mutr, nelit))
if (ChekNumer == FALSE) {
cat("################### GA WARNING MESSAGE ###################")
stop("\n##### A required numeric input is not numeric.\n
See documentation.")
}
## Check if topograp,elitism,trimForce are logical
ChekLogic <- is.logical(c(topograp, elitism, trimForce))
if (ChekLogic == FALSE) {
cat("################### GA WARNING MESSAGE ###################")
stop("\n##### A required logical input is not logical.\n
See documentation.")
}
######## Check Gridding Params ############
## Check if Grid with given inputs is correctly
if (missing(GridMethod)) {
GridMethod <- "Rectangular"
}
GridMethod <- toupper(GridMethod)
## Decide if the space division should be rectangular or in hexagons.
if (GridMethod == "HEXAGON" | GridMethod == "H") {
Grid <- hexa_area(shape = Polygon1,
size = (Rotor * fcrR), plotGrid = TRUE)
} else {
Grid <- grid_area(shape = Polygon1, size = (Rotor * fcrR),
prop = Proportionality, plotGrid = TRUE)
}
cat("\nIs the grid spacing appropriate?\n")
# InputDaccor <- readline(prompt = "Hit 'ENTER' if the the grid is corrent and 'n' if you like to change some inputs.")
cat("Type 'ENTER' if the the grid is corrent and 'n' if you like to change some inputs.")
InputDaccor <- readLines(n = 1, con = getOption("windfarmGA.connection"))
InputDaccor <- tolower(InputDaccor)
if (InputDaccor == "n") {
cat("################### GA WARNING MESSAGE ###################\n")
stop("\n##### The grid spacing is not as required. \n",
"Adjust the rotor radius (Rotor), the fraction of the radius (fcrR) or the spatial reference system (Projection).\n",
"You can also use the function `grid_area` / `hexa_area` first, to see the resulting grid.\n"
)
}
######## Check Topographic Model and Weibull Params ########
if (missing(topograp)) {
topograp <- FALSE
}
if (missing(weibull)) {
weibull <- FALSE
}
if (weibull) {
if (!missing(weibullsrc)) {
if (!class(weibullsrc) == "list") {
cat(paste("\nweibullsrc class: \n1 - ", class(weibullsrc), "\n"))
stop("\n'weibullsrc' must be a list with two rasters. List item 1 should be the shape parameter (k) raster
and list item 2 should be the scale parameter (a) raster of a weibull distribution.")
}
if (!class(weibullsrc[[1]])[1] == "RasterLayer" | !class(weibullsrc[[2]])[1] == "RasterLayer" ) {
cat(paste("\nlist item classes: \n1 - ", class(weibullsrc[[1]]), "\n2 - ", class(weibullsrc[[2]])), "\n")
stop("\nOne of the given list items is not a raster.")
}
}
}
######## Check Parallel Params ########
if (missing(Parallel)) {
Parallel <- FALSE
}
if (missing(numCluster)) {
numCluster <- 2
}
if (Parallel == TRUE) {
# numPossClus <- as.integer(Sys.getenv("NUMBER_OF_PROCESSORS"))
numPossClus <- parallel::detectCores()
if (numCluster > numPossClus) {
cat("\nNumber of clusters is bigger than the amount of available cores. Reduce to max.")
numCluster <- numPossClus
}
}
######## RUN GENETIC ALGORITHM ##############
cat("Run Algorithm:\n")
result <- genetic_algorithm(Polygon1 = Polygon1, GridMethod = GridMethod,
Rotor = Rotor, n = n, fcrR = fcrR, iteration = iteration,
vdirspe = vdirspe, topograp = topograp,
referenceHeight = referenceHeight, RotorHeight = RotorHeight,
SurfaceRoughness = SurfaceRoughness,
Proportionality = Proportionality,
mutr = mutr, elitism = elitism, nelit = nelit,
selstate = selstate, crossPart1 = crossPart1,
trimForce = trimForce,
Projection = Projection, sourceCCL = sourceCCL,
sourceCCLRoughness = sourceCCLRoughness,
weibull = weibull, weibullsrc = weibullsrc,
Parallel = Parallel, numCluster = numCluster,
verbose = verbose, plotit = plotit)
invisible(result)
}
|
/R/windfarmGA.R
|
no_license
|
allenfieldin/windfarmGA
|
R
| false | false | 8,756 |
r
|
#' @name windfarmGA
#' @description The initiating function of an optimization run which will
#' interactively check user-inputs. If all inputs are correct, an optimization
#' will be started.
#'
#' @export
#'
#' @param dns The data source name (interpretation varies by driver — for some
#' drivers, dsn is a file name, but may also be a folder)
#' @param layer The layer name
#'
#' @seealso \code{\link{genetic_algorithm}}
#' @family Genetic Algorithm Functions
#'
#' @inherit genetic_algorithm details return params title
windfarmGA <- function(dns, layer, Polygon1, GridMethod, Projection,
sourceCCL, sourceCCLRoughness,
vdirspe, Rotor = 30, fcrR = 5, n = 10, topograp = FALSE,
iteration = 20, referenceHeight = 50,
RotorHeight = 50, SurfaceRoughness = 0.14,
Proportionality = 1, mutr = 0.008,
elitism = TRUE, nelit = 7,
selstate = "FIX", crossPart1 = "EQU", trimForce = TRUE,
weibull, weibullsrc,
Parallel, numCluster, verbose = FALSE, plotit = FALSE) {
## Plotting Settings #########
opar <- par(no.readonly = TRUE)
on.exit(par(opar))
par(mfrow = c(1,2), ask = FALSE)
######## Check Polygon ########
## Check if Polygon given is correctly
if (!missing(Polygon1)) {
if (!missing(Projection)) {
Polygon1 <- isSpatial(Polygon1, Projection)
} else {
Polygon1 <- isSpatial(Polygon1)
}
plot(Polygon1, col = "red", main = "Original Input Shapefile", axes =TRUE)
title(sub = st_crs(Polygon1)$input, line = 1)
readline(prompt = "\nHit <ENTER> if this is your Polygon")
}
## Load Polygon from a Source File. Just if dns and layer are not missing.
if (!missing(dns) & !missing(layer)) {
# Input the Source of the desired Polygon
Polygon1 <- sf::st_read(dsn = dns, layer = layer)
plot(Polygon1, col = "red", main = "Original Input Shapefile", axes =TRUE)
title(sub = paste("CRS:", st_crs(Polygon1)$input), line = 1)
readline(prompt = "\nHit <ENTER> if this is your Polygon")
}
PROJ6 <- utils::compareVersion(sf::sf_extSoftVersion()[[3]], "6") > 0
## Project the Polygon to LAEA if it is not already.
if (missing(Projection)) {
cat("No Projection is given. Take Lambert Azimuthal Equal Area Projection (EPSG:3035).\n")
if (PROJ6) {
Projection <- 3035
} else {
Projection <- "+proj=laea +lat_0=52 +lon_0=10 +x_0=4321000 +y_0=3210000 +ellps=GRS80 +units=m +no_defs"
}
} else {
Projection <- Projection
}
if (is.na(st_crs(Polygon1))) {
cat("Polygon is not projected. The spatial reference WGS 84 (EPSG:4326) is assumed.\n")
if (PROJ6) {
st_crs(Polygon1) <- 4326
} else {
Projection <- "+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs "
}
}
if (PROJ6) {
if (!isTRUE(all.equal(st_crs(Polygon1), st_crs(Projection)))) {
Polygon1 <- sf::st_transform(Polygon1, st_crs(Projection))
}
} else {
if (as.character(raster::crs(Polygon1)) != Projection) {
Polygon1 <- sf::st_transform(Polygon1, st_crs(Projection))
}
}
plot(Polygon1, col = "red", main = "Projected Input Shapefile", axes =TRUE)
title(sub = paste("CRS:", st_crs(Polygon1)$input), line = 1)
readline(prompt = "\nHit <ENTER> if this is your Polygon")
######## CHECK Crossover and Selection Params ########
## Check if Crossover Method is chosen correctly.
if (missing(crossPart1)) {crossPart1 <- readinteger()}
if (crossPart1 != "EQU" & crossPart1 != "RAN") {
crossPart1 <- readinteger()
}
## Check if Selection Method is chosen correctly.
if (missing(selstate)) {selstate <- readintegerSel()}
if (selstate != "FIX" & selstate != "VAR") {
selstate <- readintegerSel()
}
######## Check Wind Data ############
## Check if Input Wind data is given correctly.
if (missing(vdirspe)) {
stop("\n##### No wind data.frame is given. \nThis input is required for an optimization run")
}
plot.new()
plot_windrose(data = vdirspe, spd = vdirspe[, 'ws'], dir = vdirspe[, 'wd'])
readline(prompt = "\nPress <ENTER> if the windrose looks correct?")
######## Check Numeric and logical Inputs ############
## Check if Rotor,fcrR,n,iteration,RotorHeight,
## SurfaceRoughness,Proportionality,mutr,nelit are numeric
ChekNumer <- is.numeric(c(Rotor, n, fcrR, iteration, referenceHeight,
RotorHeight, SurfaceRoughness, Proportionality, mutr, nelit))
if (ChekNumer == FALSE) {
cat("################### GA WARNING MESSAGE ###################")
stop("\n##### A required numeric input is not numeric.\n
See documentation.")
}
## Check if topograp,elitism,trimForce are logical
ChekLogic <- is.logical(c(topograp, elitism, trimForce))
if (ChekLogic == FALSE) {
cat("################### GA WARNING MESSAGE ###################")
stop("\n##### A required logical input is not logical.\n
See documentation.")
}
######## Check Gridding Params ############
## Check if Grid with given inputs is correctly
if (missing(GridMethod)) {
GridMethod <- "Rectangular"
}
GridMethod <- toupper(GridMethod)
## Decide if the space division should be rectangular or in hexagons.
if (GridMethod == "HEXAGON" | GridMethod == "H") {
Grid <- hexa_area(shape = Polygon1,
size = (Rotor * fcrR), plotGrid = TRUE)
} else {
Grid <- grid_area(shape = Polygon1, size = (Rotor * fcrR),
prop = Proportionality, plotGrid = TRUE)
}
cat("\nIs the grid spacing appropriate?\n")
# InputDaccor <- readline(prompt = "Hit 'ENTER' if the the grid is corrent and 'n' if you like to change some inputs.")
cat("Type 'ENTER' if the the grid is corrent and 'n' if you like to change some inputs.")
InputDaccor <- readLines(n = 1, con = getOption("windfarmGA.connection"))
InputDaccor <- tolower(InputDaccor)
if (InputDaccor == "n") {
cat("################### GA WARNING MESSAGE ###################\n")
stop("\n##### The grid spacing is not as required. \n",
"Adjust the rotor radius (Rotor), the fraction of the radius (fcrR) or the spatial reference system (Projection).\n",
"You can also use the function `grid_area` / `hexa_area` first, to see the resulting grid.\n"
)
}
######## Check Topographic Model and Weibull Params ########
if (missing(topograp)) {
topograp <- FALSE
}
if (missing(weibull)) {
weibull <- FALSE
}
if (weibull) {
if (!missing(weibullsrc)) {
if (!class(weibullsrc) == "list") {
cat(paste("\nweibullsrc class: \n1 - ", class(weibullsrc), "\n"))
stop("\n'weibullsrc' must be a list with two rasters. List item 1 should be the shape parameter (k) raster
and list item 2 should be the scale parameter (a) raster of a weibull distribution.")
}
if (!class(weibullsrc[[1]])[1] == "RasterLayer" | !class(weibullsrc[[2]])[1] == "RasterLayer" ) {
cat(paste("\nlist item classes: \n1 - ", class(weibullsrc[[1]]), "\n2 - ", class(weibullsrc[[2]])), "\n")
stop("\nOne of the given list items is not a raster.")
}
}
}
######## Check Parallel Params ########
if (missing(Parallel)) {
Parallel <- FALSE
}
if (missing(numCluster)) {
numCluster <- 2
}
if (Parallel == TRUE) {
# numPossClus <- as.integer(Sys.getenv("NUMBER_OF_PROCESSORS"))
numPossClus <- parallel::detectCores()
if (numCluster > numPossClus) {
cat("\nNumber of clusters is bigger than the amount of available cores. Reduce to max.")
numCluster <- numPossClus
}
}
######## RUN GENETIC ALGORITHM ##############
cat("Run Algorithm:\n")
result <- genetic_algorithm(Polygon1 = Polygon1, GridMethod = GridMethod,
Rotor = Rotor, n = n, fcrR = fcrR, iteration = iteration,
vdirspe = vdirspe, topograp = topograp,
referenceHeight = referenceHeight, RotorHeight = RotorHeight,
SurfaceRoughness = SurfaceRoughness,
Proportionality = Proportionality,
mutr = mutr, elitism = elitism, nelit = nelit,
selstate = selstate, crossPart1 = crossPart1,
trimForce = trimForce,
Projection = Projection, sourceCCL = sourceCCL,
sourceCCLRoughness = sourceCCLRoughness,
weibull = weibull, weibullsrc = weibullsrc,
Parallel = Parallel, numCluster = numCluster,
verbose = verbose, plotit = plotit)
invisible(result)
}
|
source('global.R')
fluidPage(
#tab title
title="time2pub",
#titlePanel(h2("Journal-Specific Fast-Track time2pub Visualization",align='center')),
mainPanel(width=12,
# tags$style(type="text/css",
# ".shiny-output-error { visibility: hidden; }",
# ".shiny-output-error:before { visibility: hidden; }"
# ),
fixedRow(
column(2,br(),a(href='https://www.time2pub.com/',img(src='logo.jpg',height='75px',width='150px'))),
column(8,offset=0,br(),titlePanel(h3("Journal-Specific Fast-Track time2pub Visualization",align='center')))
),
br(),
fluidRow(
column(1,br(),br(),br(),br(),br(),br(),br(),br(),
radioButtons('y','',choices=c('Covid','Pre-Covid'),selected='Covid',inline=F),
style='padding:0px;margin-right:-1em'),
column(5,
#div(align='left',uiOutput('searchUI')),
div(align='left',selectInput('search',label=NULL,choices=c('All Journals',sort(unique(summarizedData$Journal))),selected='All Journals',multiple=FALSE)),
#div(align='left',style='margin-bottom=-3em',selectInput('search',label=NULL,choices=c('All Journals',sort(unique(summarizedData$Journal))),selected='All Journals',multiple=FALSE)),
div(plotlyOutput(outputId='mainPlot',width='100%'),style='margin-left:-2em;margin-top:-1em'),
div(align = "center", style='margin-top:-2em',br(),radioButtons('x','',choices=c('Covid','Non-Covid'),selected='Non-Covid',inline=T))
),
column(6,br(),br(),br(),
div(plotOutput(outputId='densityPlot',width='100%'),style='margin-left:2em;')
)
),
fluidRow(
br(),h4('PubMed Entry Navigation',style='margin-left:1em;')
),
tabsetPanel(
tabPanel('COVID',br(),dataTableOutput("table")),
tabPanel('Non-COVID',br(),dataTableOutput('nonCovidTable')),
tabPanel('Pre-COVID',br(),dataTableOutput('preCovidTable'))
),
br(),br(),br(),br()
# br(),br(),
# HTML(paste(h3('Covid Papers'))),br(),
# fluidRow(
# dataTableOutput("table")
# ),
# br(),br(),
# HTML(paste(h3('Non-Covid Papers'))),br(),
# fluidRow(
# dataTableOutput('nonCovidTable')
# ),
# br(),br()
)
)
|
/ui.R
|
permissive
|
joshuamwang/time2pub
|
R
| false | false | 2,291 |
r
|
source('global.R')
fluidPage(
#tab title
title="time2pub",
#titlePanel(h2("Journal-Specific Fast-Track time2pub Visualization",align='center')),
mainPanel(width=12,
# tags$style(type="text/css",
# ".shiny-output-error { visibility: hidden; }",
# ".shiny-output-error:before { visibility: hidden; }"
# ),
fixedRow(
column(2,br(),a(href='https://www.time2pub.com/',img(src='logo.jpg',height='75px',width='150px'))),
column(8,offset=0,br(),titlePanel(h3("Journal-Specific Fast-Track time2pub Visualization",align='center')))
),
br(),
fluidRow(
column(1,br(),br(),br(),br(),br(),br(),br(),br(),
radioButtons('y','',choices=c('Covid','Pre-Covid'),selected='Covid',inline=F),
style='padding:0px;margin-right:-1em'),
column(5,
#div(align='left',uiOutput('searchUI')),
div(align='left',selectInput('search',label=NULL,choices=c('All Journals',sort(unique(summarizedData$Journal))),selected='All Journals',multiple=FALSE)),
#div(align='left',style='margin-bottom=-3em',selectInput('search',label=NULL,choices=c('All Journals',sort(unique(summarizedData$Journal))),selected='All Journals',multiple=FALSE)),
div(plotlyOutput(outputId='mainPlot',width='100%'),style='margin-left:-2em;margin-top:-1em'),
div(align = "center", style='margin-top:-2em',br(),radioButtons('x','',choices=c('Covid','Non-Covid'),selected='Non-Covid',inline=T))
),
column(6,br(),br(),br(),
div(plotOutput(outputId='densityPlot',width='100%'),style='margin-left:2em;')
)
),
fluidRow(
br(),h4('PubMed Entry Navigation',style='margin-left:1em;')
),
tabsetPanel(
tabPanel('COVID',br(),dataTableOutput("table")),
tabPanel('Non-COVID',br(),dataTableOutput('nonCovidTable')),
tabPanel('Pre-COVID',br(),dataTableOutput('preCovidTable'))
),
br(),br(),br(),br()
# br(),br(),
# HTML(paste(h3('Covid Papers'))),br(),
# fluidRow(
# dataTableOutput("table")
# ),
# br(),br(),
# HTML(paste(h3('Non-Covid Papers'))),br(),
# fluidRow(
# dataTableOutput('nonCovidTable')
# ),
# br(),br()
)
)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.