content
large_stringlengths 0
6.46M
| path
large_stringlengths 3
331
| license_type
large_stringclasses 2
values | repo_name
large_stringlengths 5
125
| language
large_stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.46M
| extension
large_stringclasses 75
values | text
stringlengths 0
6.46M
|
---|---|---|---|---|---|---|---|---|---|
# Description : Real life example of heuristics about local regression and kernel smoothing
# Website : http://freakonometrics.hypotheses.org/9182
toInstall <- c("xml","downloader")
if(doInstall){install.packages(toInstall, repos = "http://cran.r-project.org")}
lapply(toInstall, library, character.only = TRUE)
file = "geos-tww.csv"
html = htmlParse("http://www.geos.tv/index.php/list?sid=189&collection=all")
html = xpathApply(html, "//table[@id='collectionTable']")[[1]]
data = readHTMLTable(html)
data = data[,-3]
names(data)=c("no",names(data)[-1])
data=data[-(61:64),]
data$no = 1:96
data$mu = as.numeric(substr(as.character(data$Mean), 0, 4))
data$se = sd(data$mu,na.rm=TRUE)/sqrt(as.numeric(as.character(data$Count)))
data$season = 1 + (data$no - 1)%/%12
data$season = factor(data$season)
plot(data$no,data$mu,ylim=c(6,10))
segments(data$no,data$mu-1.96*data$se,
data$no,data$mu+1.96*data$se,col="light blue")
plot(data$no,data$mu,ylim=c(6,10))
abline(v=12*(0:8)+.5,lty=2)
for(s in 1:8){
reg=lm(mu~no,data=subset(data,season==s))
lines((s-1)*12+1:12,predict(reg)[1:12],col="red")
}
db = data
NW = ksmooth(db$no,db$mu,kernel = "normal",bandwidth=5)
plot(data$no,data$mu)
lines(NW,col="red")
# Fix missing value
db$mu[95]=7
NW = ksmooth(db$no,db$mu,kernel = "normal",bandwidth=12)
plot(data$no,data$mu,ylim=c(6,10))
lines(NW,col="red")
|
/heuristics-regression-kernel-smoothing-real-life-ex.R
|
no_license
|
vikasgupta1812/rsnippets
|
R
| false | false | 1,365 |
r
|
# Description : Real life example of heuristics about local regression and kernel smoothing
# Website : http://freakonometrics.hypotheses.org/9182
toInstall <- c("xml","downloader")
if(doInstall){install.packages(toInstall, repos = "http://cran.r-project.org")}
lapply(toInstall, library, character.only = TRUE)
file = "geos-tww.csv"
html = htmlParse("http://www.geos.tv/index.php/list?sid=189&collection=all")
html = xpathApply(html, "//table[@id='collectionTable']")[[1]]
data = readHTMLTable(html)
data = data[,-3]
names(data)=c("no",names(data)[-1])
data=data[-(61:64),]
data$no = 1:96
data$mu = as.numeric(substr(as.character(data$Mean), 0, 4))
data$se = sd(data$mu,na.rm=TRUE)/sqrt(as.numeric(as.character(data$Count)))
data$season = 1 + (data$no - 1)%/%12
data$season = factor(data$season)
plot(data$no,data$mu,ylim=c(6,10))
segments(data$no,data$mu-1.96*data$se,
data$no,data$mu+1.96*data$se,col="light blue")
plot(data$no,data$mu,ylim=c(6,10))
abline(v=12*(0:8)+.5,lty=2)
for(s in 1:8){
reg=lm(mu~no,data=subset(data,season==s))
lines((s-1)*12+1:12,predict(reg)[1:12],col="red")
}
db = data
NW = ksmooth(db$no,db$mu,kernel = "normal",bandwidth=5)
plot(data$no,data$mu)
lines(NW,col="red")
# Fix missing value
db$mu[95]=7
NW = ksmooth(db$no,db$mu,kernel = "normal",bandwidth=12)
plot(data$no,data$mu,ylim=c(6,10))
lines(NW,col="red")
|
\name{AAMetric}
\Rdversion{1.1}
\alias{AAMetric}
\docType{data}
\title{
Amino Acid Metric Solution using R (Atchley et al 2005)
}
\description{
Atchley et al 2005 performed factor analysis on a set of Amino Acid Indices (AA54) and inferred
a 5 factor latent variable structure relating amino acid characteristics using SAS. An equivalent analysis
was performed using factor.pa.ginv from the HDMD package in R. Based on the
relationship between factors and variable descriptions, the latent
variables are defined as
Factor1 (PAH): Polarity, Accessibility, Hydrophobicity; Factor2 (PSS): Propensity for Secondary Structure;
Factor3 (MS) : Molecular Size; Factor4 (CC): Codon Composition; Factor5 (EC): Electrostatic Charge.
While the Factor Analysis loadings were the same, R and SAS calculated scores slightly differently.
AAMetric are scores from the R factor analysis which convey the similarities and differences
among amino acids (rows) for each latent variable (columns).
}
\format{
Rows are alphabetized Amino Acids and the 5 columns are factors where
Factor1 (PAH): Polarity, Accessibility, Hydrophobicity; Factor2 (PSS): Propensity for Secondary Structure;
Factor3 (MS) : Molecular Size; Factor4 (CC): Codon Composition; Factor5 (EC): Electrostatic Charge.
}
\details{
54 Amino Acid Indices were selected from www.genome.jp/aaindex to quantify Amino Acid Similarities.
Using Factor Analysis on 5 factors, interpretable latent variables were determined to quantify
Amino Acid attributes. These are the scores from factor analysis calculated by factor.pa.ginv in R.
}
\source{
Method similar to Atchley, W. R., Zhao, J., Fernandes, A. and Drueke, T. 2005. Solving the sequence "metric" problem: Proc. Natl. Acad. Sci. USA 102: 6395-6400.
}
\seealso{
\code{\link{AAMetric.Atchley}}, \code{\link{factor.pa.ginv}}
}
\examples{
data(AAMetric)
plot(AAMetric[,1], AAMetric[,2], pch = AminoAcids)
cor(AAMetric, AAMetric.Atchley)
}
\keyword{datasets}
|
/man/AAMetric.Rd
|
no_license
|
cran/HDMD
|
R
| false | false | 2,017 |
rd
|
\name{AAMetric}
\Rdversion{1.1}
\alias{AAMetric}
\docType{data}
\title{
Amino Acid Metric Solution using R (Atchley et al 2005)
}
\description{
Atchley et al 2005 performed factor analysis on a set of Amino Acid Indices (AA54) and inferred
a 5 factor latent variable structure relating amino acid characteristics using SAS. An equivalent analysis
was performed using factor.pa.ginv from the HDMD package in R. Based on the
relationship between factors and variable descriptions, the latent
variables are defined as
Factor1 (PAH): Polarity, Accessibility, Hydrophobicity; Factor2 (PSS): Propensity for Secondary Structure;
Factor3 (MS) : Molecular Size; Factor4 (CC): Codon Composition; Factor5 (EC): Electrostatic Charge.
While the Factor Analysis loadings were the same, R and SAS calculated scores slightly differently.
AAMetric are scores from the R factor analysis which convey the similarities and differences
among amino acids (rows) for each latent variable (columns).
}
\format{
Rows are alphabetized Amino Acids and the 5 columns are factors where
Factor1 (PAH): Polarity, Accessibility, Hydrophobicity; Factor2 (PSS): Propensity for Secondary Structure;
Factor3 (MS) : Molecular Size; Factor4 (CC): Codon Composition; Factor5 (EC): Electrostatic Charge.
}
\details{
54 Amino Acid Indices were selected from www.genome.jp/aaindex to quantify Amino Acid Similarities.
Using Factor Analysis on 5 factors, interpretable latent variables were determined to quantify
Amino Acid attributes. These are the scores from factor analysis calculated by factor.pa.ginv in R.
}
\source{
Method similar to Atchley, W. R., Zhao, J., Fernandes, A. and Drueke, T. 2005. Solving the sequence "metric" problem: Proc. Natl. Acad. Sci. USA 102: 6395-6400.
}
\seealso{
\code{\link{AAMetric.Atchley}}, \code{\link{factor.pa.ginv}}
}
\examples{
data(AAMetric)
plot(AAMetric[,1], AAMetric[,2], pch = AminoAcids)
cor(AAMetric, AAMetric.Atchley)
}
\keyword{datasets}
|
`HDGENE.QQ` <-
function(P.values, plot.type = "log_P_values", name.of.trait = "Trait",DPP=50000,plot.style="rainbow"){
#Object: Make a QQ-Plot of the P-values
#Options for plot.type = "log_P_values" and "P_values"
#Output: A pdf of the QQ-plot
#Authors: Alex Lipka and Zhiwu Zhang
# Last update: May 9, 2011
##############################################################################################
# Sort the data by the raw P-values
#print("Sorting p values")
#print(paste("Number of P values: ",length(P.values)))
#remove NAs and keep the ones between between 0 and 1
P.values=P.values[!is.na(P.values)]
P.values=P.values[P.values>0]
P.values=P.values[P.values<=1]
if(length(P.values[P.values>0])<1) return(NULL)
N=length(P.values)
DPP=round(DPP/4) #Reduce to 1/4 for QQ plot
P.values <- P.values[order(P.values)]
#Set up the p-value quantiles
#print("Setting p_value_quantiles...")
p_value_quantiles <- (1:length(P.values))/(length(P.values)+1)
if(plot.type == "log_P_values")
{
log.P.values <- -log10(P.values)
log.Quantiles <- -log10(p_value_quantiles)
index=GAPIT.Pruning(log.P.values,DPP=DPP)
log.P.values=log.P.values[index ]
log.Quantiles=log.Quantiles[index]
#Add conficence interval
N1=length(log.Quantiles)
## create the confidence intervals
c95 <- rep(NA,N1)
c05 <- rep(NA,N1)
for(j in 1:N1){
i=ceiling((10^-log.Quantiles[j])*N)
if(i==0)i=1
c95[j] <- qbeta(0.95,i,N-i+1)
c05[j] <- qbeta(0.05,i,N-i+1)
#print(c(j,i,c95[j],c05[j]))
}
#CI Lines
#plot(log.Quantiles, -log10(c05), xlim = c(0,max(log.Quantiles)), ylim = c(0,max(log.P.values)), type="l",lty=5, axes=FALSE, xlab="", ylab="",col="black")
#par(new=T)
#plot(log.Quantiles, -log10(c95), xlim = c(0,max(log.Quantiles)), ylim = c(0,max(log.P.values)), type="l",lty=5, axes=FALSE, xlab="", ylab="",col="black")
#CI shade
plot(NULL, xlim = c(0,max(log.Quantiles)), ylim = c(0,max(log.P.values)), type="l",lty=5, lwd = 2, axes=FALSE, xlab="", ylab="",col="gray")
index=length(c95):1
polygon(c(log.Quantiles[index],log.Quantiles),c(-log10(c05)[index],-log10(c95)),col='gray',border=NA)
#Diagonal line
abline(a = 0, b = 1, col = "red",lwd=2)
#data
par(new=T)
if(plot.style=="FarmCPU"){
plot(log.Quantiles, log.P.values, cex.axis=1.1, cex.lab=1.3, lty = 1, lwd = 2, col = "Black" ,bty='l', xlab =expression(Expected~~-log[10](italic(p))), ylab = expression(Observed~~-log[10](italic(p))), main = paste(name.of.trait,sep=""),pch=20)
}
if(plot.style=="rainbow"){
plot(log.Quantiles, log.P.values, xlim = c(0,max(log.Quantiles)), ylim = c(0,max(log.P.values)), cex.axis=1.1, cex.lab=1.3, lty = 1, lwd = 2, col = "Black" ,xlab =expression(Expected~~-log[10](italic(p))),ylab = expression(Observed~~-log[10](italic(p))), main = paste(name.of.trait,sep=""))
}
}
if(plot.type == "P_values")
{
pdf(paste("QQ-Plot_", name.of.trait,".pdf" ,sep = ""))
par(mar = c(5,5,5,5))
qqplot(p_value_quantiles, P.values, xlim = c(0,1),
ylim = c(0,1), type = "l" , xlab = "Uniform[0,1] Theoretical Quantiles",
lty = 1, lwd = 1, ylab = "Quantiles of P-values from GWAS", col = "Blue",
main = paste(name.of.trait,sep=" "))
abline(a = 0, b = 1, col = "red")
}
#print("GAPIT.QQ accomplished successfully!")
}
#=============================================================================================
|
/R/HDGENE.QQ.R
|
no_license
|
YaoZhou89/HDGENE
|
R
| false | false | 3,813 |
r
|
`HDGENE.QQ` <-
function(P.values, plot.type = "log_P_values", name.of.trait = "Trait",DPP=50000,plot.style="rainbow"){
#Object: Make a QQ-Plot of the P-values
#Options for plot.type = "log_P_values" and "P_values"
#Output: A pdf of the QQ-plot
#Authors: Alex Lipka and Zhiwu Zhang
# Last update: May 9, 2011
##############################################################################################
# Sort the data by the raw P-values
#print("Sorting p values")
#print(paste("Number of P values: ",length(P.values)))
#remove NAs and keep the ones between between 0 and 1
P.values=P.values[!is.na(P.values)]
P.values=P.values[P.values>0]
P.values=P.values[P.values<=1]
if(length(P.values[P.values>0])<1) return(NULL)
N=length(P.values)
DPP=round(DPP/4) #Reduce to 1/4 for QQ plot
P.values <- P.values[order(P.values)]
#Set up the p-value quantiles
#print("Setting p_value_quantiles...")
p_value_quantiles <- (1:length(P.values))/(length(P.values)+1)
if(plot.type == "log_P_values")
{
log.P.values <- -log10(P.values)
log.Quantiles <- -log10(p_value_quantiles)
index=GAPIT.Pruning(log.P.values,DPP=DPP)
log.P.values=log.P.values[index ]
log.Quantiles=log.Quantiles[index]
#Add conficence interval
N1=length(log.Quantiles)
## create the confidence intervals
c95 <- rep(NA,N1)
c05 <- rep(NA,N1)
for(j in 1:N1){
i=ceiling((10^-log.Quantiles[j])*N)
if(i==0)i=1
c95[j] <- qbeta(0.95,i,N-i+1)
c05[j] <- qbeta(0.05,i,N-i+1)
#print(c(j,i,c95[j],c05[j]))
}
#CI Lines
#plot(log.Quantiles, -log10(c05), xlim = c(0,max(log.Quantiles)), ylim = c(0,max(log.P.values)), type="l",lty=5, axes=FALSE, xlab="", ylab="",col="black")
#par(new=T)
#plot(log.Quantiles, -log10(c95), xlim = c(0,max(log.Quantiles)), ylim = c(0,max(log.P.values)), type="l",lty=5, axes=FALSE, xlab="", ylab="",col="black")
#CI shade
plot(NULL, xlim = c(0,max(log.Quantiles)), ylim = c(0,max(log.P.values)), type="l",lty=5, lwd = 2, axes=FALSE, xlab="", ylab="",col="gray")
index=length(c95):1
polygon(c(log.Quantiles[index],log.Quantiles),c(-log10(c05)[index],-log10(c95)),col='gray',border=NA)
#Diagonal line
abline(a = 0, b = 1, col = "red",lwd=2)
#data
par(new=T)
if(plot.style=="FarmCPU"){
plot(log.Quantiles, log.P.values, cex.axis=1.1, cex.lab=1.3, lty = 1, lwd = 2, col = "Black" ,bty='l', xlab =expression(Expected~~-log[10](italic(p))), ylab = expression(Observed~~-log[10](italic(p))), main = paste(name.of.trait,sep=""),pch=20)
}
if(plot.style=="rainbow"){
plot(log.Quantiles, log.P.values, xlim = c(0,max(log.Quantiles)), ylim = c(0,max(log.P.values)), cex.axis=1.1, cex.lab=1.3, lty = 1, lwd = 2, col = "Black" ,xlab =expression(Expected~~-log[10](italic(p))),ylab = expression(Observed~~-log[10](italic(p))), main = paste(name.of.trait,sep=""))
}
}
if(plot.type == "P_values")
{
pdf(paste("QQ-Plot_", name.of.trait,".pdf" ,sep = ""))
par(mar = c(5,5,5,5))
qqplot(p_value_quantiles, P.values, xlim = c(0,1),
ylim = c(0,1), type = "l" , xlab = "Uniform[0,1] Theoretical Quantiles",
lty = 1, lwd = 1, ylab = "Quantiles of P-values from GWAS", col = "Blue",
main = paste(name.of.trait,sep=" "))
abline(a = 0, b = 1, col = "red")
}
#print("GAPIT.QQ accomplished successfully!")
}
#=============================================================================================
|
# AUTO GENERATED FILE - DO NOT EDIT
htmlDialog <- function(children=NULL, id=NULL, n_clicks=NULL, n_clicks_timestamp=NULL, key=NULL, role=NULL, accessKey=NULL, className=NULL, contentEditable=NULL, contextMenu=NULL, dir=NULL, draggable=NULL, hidden=NULL, lang=NULL, spellCheck=NULL, style=NULL, tabIndex=NULL, title=NULL, loading_state=NULL, ...) {
wildcard_names = names(assert_valid_wildcards(...))
component <- list(
props = list(children=children, id=id, n_clicks=n_clicks, n_clicks_timestamp=n_clicks_timestamp, key=key, role=role, accessKey=accessKey, className=className, contentEditable=contentEditable, contextMenu=contextMenu, dir=dir, draggable=draggable, hidden=hidden, lang=lang, spellCheck=spellCheck, style=style, tabIndex=tabIndex, title=title, loading_state=loading_state, ...),
type = 'Dialog',
namespace = 'dash_html_components',
propNames = c('children', 'id', 'n_clicks', 'n_clicks_timestamp', 'key', 'role', 'accessKey', 'className', 'contentEditable', 'contextMenu', 'dir', 'draggable', 'hidden', 'lang', 'spellCheck', 'style', 'tabIndex', 'title', 'loading_state', wildcard_names),
package = 'dashHtmlComponents'
)
component$props <- filter_null(component$props)
structure(component, class = c('dash_component', 'list'))
}
|
/R/htmlDialog.R
|
permissive
|
TannerSorensen/dash-html-components
|
R
| false | false | 1,320 |
r
|
# AUTO GENERATED FILE - DO NOT EDIT
htmlDialog <- function(children=NULL, id=NULL, n_clicks=NULL, n_clicks_timestamp=NULL, key=NULL, role=NULL, accessKey=NULL, className=NULL, contentEditable=NULL, contextMenu=NULL, dir=NULL, draggable=NULL, hidden=NULL, lang=NULL, spellCheck=NULL, style=NULL, tabIndex=NULL, title=NULL, loading_state=NULL, ...) {
wildcard_names = names(assert_valid_wildcards(...))
component <- list(
props = list(children=children, id=id, n_clicks=n_clicks, n_clicks_timestamp=n_clicks_timestamp, key=key, role=role, accessKey=accessKey, className=className, contentEditable=contentEditable, contextMenu=contextMenu, dir=dir, draggable=draggable, hidden=hidden, lang=lang, spellCheck=spellCheck, style=style, tabIndex=tabIndex, title=title, loading_state=loading_state, ...),
type = 'Dialog',
namespace = 'dash_html_components',
propNames = c('children', 'id', 'n_clicks', 'n_clicks_timestamp', 'key', 'role', 'accessKey', 'className', 'contentEditable', 'contextMenu', 'dir', 'draggable', 'hidden', 'lang', 'spellCheck', 'style', 'tabIndex', 'title', 'loading_state', wildcard_names),
package = 'dashHtmlComponents'
)
component$props <- filter_null(component$props)
structure(component, class = c('dash_component', 'list'))
}
|
#
# Linux-logistic-fut.R, 23 Dec 15
# Data from:
# The {Linux} Kernel as a Case Study in Software Evolution
# Ayelet Israeli and Dror G. Feitelson
#
# Example from:
# Empirical Software Engineering using R
# Derek M. Jones
source("ESEUR_config.r")
pal_col=rainbow(4)
ll=read.csv(paste0(ESEUR_dir, "regression/Linux-LOC.csv.xz"), as.is=TRUE)
ld=read.csv(paste0(ESEUR_dir, "regression/Linux-days.csv.xz"), as.is=TRUE)
loc_date=merge(ll, ld)
loc_date$Release_date=as.Date(loc_date$Release_date, format="%d-%b-%Y")
start_date=loc_date$Release_date[1]
loc_date$Number_days=as.integer(difftime(loc_date$Release_date,
start_date,
units="days"))
ld_ordered=loc_date[order(loc_date$Release_date), ]
strip_support_v=function(version_date, step)
{
v=substr(version_date$Version, 1, 3)
q=c(rep(TRUE, step), v[1:(length(v)-step)] <= v[(1+step):length(v)])
return (version_date[q, ])
}
h1=strip_support_v(ld_ordered, 1)
all_days=strip_support_v(h1, 5)
x_bounds=0:6000
plot(all_days$Number_days, all_days$LOC, col=point_col,
xlim=range(x_bounds),
xlab="Days since version 1.0 release", ylab="Total lines of code\n")
plot_subset=function(max_days, col_num)
{
first_3000=subset(all_days, Number_days <= max_days)
m3=nls(LOC ~ SSfpl(Number_days, a, b, c, d), data=first_3000)
y=predict(m3, list(Number_days=x_bounds))
lines(x_bounds, y, col=pal_col[col_num])
}
# For some values of Number_days the following error occurs:
# step factor 0.000488281 reduced below 'minFactor' of 0.000976562
# It is simpler to find values that work than fiddle around
# with tuning start values.
plot_subset(2900, 1)
plot_subset(3650, 2)
plot_subset(4200, 3)
plot_subset(max(all_days$Number_days), 4)
|
/regression/linux-logistic-fut.R
|
no_license
|
gopinathsubbegowda/ESEUR-code-data
|
R
| false | false | 1,770 |
r
|
#
# Linux-logistic-fut.R, 23 Dec 15
# Data from:
# The {Linux} Kernel as a Case Study in Software Evolution
# Ayelet Israeli and Dror G. Feitelson
#
# Example from:
# Empirical Software Engineering using R
# Derek M. Jones
source("ESEUR_config.r")
pal_col=rainbow(4)
ll=read.csv(paste0(ESEUR_dir, "regression/Linux-LOC.csv.xz"), as.is=TRUE)
ld=read.csv(paste0(ESEUR_dir, "regression/Linux-days.csv.xz"), as.is=TRUE)
loc_date=merge(ll, ld)
loc_date$Release_date=as.Date(loc_date$Release_date, format="%d-%b-%Y")
start_date=loc_date$Release_date[1]
loc_date$Number_days=as.integer(difftime(loc_date$Release_date,
start_date,
units="days"))
ld_ordered=loc_date[order(loc_date$Release_date), ]
strip_support_v=function(version_date, step)
{
v=substr(version_date$Version, 1, 3)
q=c(rep(TRUE, step), v[1:(length(v)-step)] <= v[(1+step):length(v)])
return (version_date[q, ])
}
h1=strip_support_v(ld_ordered, 1)
all_days=strip_support_v(h1, 5)
x_bounds=0:6000
plot(all_days$Number_days, all_days$LOC, col=point_col,
xlim=range(x_bounds),
xlab="Days since version 1.0 release", ylab="Total lines of code\n")
plot_subset=function(max_days, col_num)
{
first_3000=subset(all_days, Number_days <= max_days)
m3=nls(LOC ~ SSfpl(Number_days, a, b, c, d), data=first_3000)
y=predict(m3, list(Number_days=x_bounds))
lines(x_bounds, y, col=pal_col[col_num])
}
# For some values of Number_days the following error occurs:
# step factor 0.000488281 reduced below 'minFactor' of 0.000976562
# It is simpler to find values that work than fiddle around
# with tuning start values.
plot_subset(2900, 1)
plot_subset(3650, 2)
plot_subset(4200, 3)
plot_subset(max(all_days$Number_days), 4)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/taxmap--internal.R
\name{print_item}
\alias{print_item}
\title{Print a table}
\usage{
print_item(data, name = NULL, max_rows = 3, max_items = 3,
max_width = getOption("width") - 10, prefix = "")
}
\arguments{
\item{data}{The item to be printed}
\item{max_rows}{(\code{numeric} of length 1) The maximum number of rows in
tables to print.}
\item{max_items}{(\code{numeric} of length 1) The maximum number of list
items to print.}
\item{max_width}{(\code{numeric} of length 1) The maximum number of
characters to print.}
\item{prefix}{(\code{numeric} of length 1) What to print in front of each
line.}
}
\description{
Used to print each item in the \code{taxmap} print method.
}
\examples{
taxa:::print_item(ex_taxmap$data$info)
taxa:::print_item(1:100)
}
\keyword{internal}
|
/man/print_item.Rd
|
permissive
|
lionel-/taxa
|
R
| false | true | 857 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/taxmap--internal.R
\name{print_item}
\alias{print_item}
\title{Print a table}
\usage{
print_item(data, name = NULL, max_rows = 3, max_items = 3,
max_width = getOption("width") - 10, prefix = "")
}
\arguments{
\item{data}{The item to be printed}
\item{max_rows}{(\code{numeric} of length 1) The maximum number of rows in
tables to print.}
\item{max_items}{(\code{numeric} of length 1) The maximum number of list
items to print.}
\item{max_width}{(\code{numeric} of length 1) The maximum number of
characters to print.}
\item{prefix}{(\code{numeric} of length 1) What to print in front of each
line.}
}
\description{
Used to print each item in the \code{taxmap} print method.
}
\examples{
taxa:::print_item(ex_taxmap$data$info)
taxa:::print_item(1:100)
}
\keyword{internal}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{mm10_NcoI_10000}
\alias{mm10_NcoI_10000}
\title{Genomic features for mm10 genome and NcoI restriction enzyme at 10 Kbp}
\format{A data frame with 272474 rows and 5 variables:
\describe{
\item{chr:}{chromosome}
\item{map:}{mappability as computed by gem}
\item{res:}{restriction enzyme density per 1 Kbp computed by Biostrings::matchPattern()}
\item{cg:}{cg content as computed by bedtools}
\item{bin:}{genomic bin with the format chromosome:start_position}
\item{pos:}{start postion of the genomic bin}
}}
\usage{
mm10_NcoI_10000
}
\description{
A \code{data.frame} containing the mappability, restriction
enzyme density and CG proportion of the mm10 genome and
NcoI restriction enzyme in 10 Kbp bins
}
\keyword{datasets}
|
/man/mm10_NcoI_10000.Rd
|
no_license
|
4DGenome/hicfeatures
|
R
| false | true | 848 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{mm10_NcoI_10000}
\alias{mm10_NcoI_10000}
\title{Genomic features for mm10 genome and NcoI restriction enzyme at 10 Kbp}
\format{A data frame with 272474 rows and 5 variables:
\describe{
\item{chr:}{chromosome}
\item{map:}{mappability as computed by gem}
\item{res:}{restriction enzyme density per 1 Kbp computed by Biostrings::matchPattern()}
\item{cg:}{cg content as computed by bedtools}
\item{bin:}{genomic bin with the format chromosome:start_position}
\item{pos:}{start postion of the genomic bin}
}}
\usage{
mm10_NcoI_10000
}
\description{
A \code{data.frame} containing the mappability, restriction
enzyme density and CG proportion of the mm10 genome and
NcoI restriction enzyme in 10 Kbp bins
}
\keyword{datasets}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/weatherData.R
\name{getDetailedWeather}
\alias{getDetailedWeather}
\title{Gets weather data for a single date (All records)}
\usage{
getDetailedWeather(station_id, date, station_type = "airportCode",
opt_temperature_columns = TRUE, opt_all_columns = FALSE,
opt_custom_columns = FALSE, custom_columns = NULL,
opt_compress_output = FALSE, opt_verbose = FALSE, opt_warnings = TRUE)
}
\arguments{
\item{station_id}{is a valid 3-letter airport code or a valid Weather Station ID}
\item{date}{is a valid string representing a date in the past (YYYY-MM-DD)}
\item{station_type}{can be \code{airportCode} which is the default, or it
can be \code{id} which is a weather-station ID}
\item{opt_temperature_columns}{Boolen flag to indicate only Temperature data is to be returned (default TRUE)}
\item{opt_all_columns}{Boolen flag to indicate whether all available data is to be returned (default FALSE)}
\item{opt_custom_columns}{Boolen flag to indicate if only a user-specified set of columns are to be returned. (default FALSE)
If TRUE, then the desired columns must be specified via \code{custom_columns}}
\item{custom_columns}{Vector of integers specified by the user to indicate which columns to fetch.
The Date column is always returned as the first column. The
column numbers specfied in \code{custom_columns} are appended as columns of
the data frame being returned (default NULL). The exact column numbers can be
found by visiting the weatherUnderground URL, and counting from 1. Note that if \code{opt_custom_columns} is TRUE,
then \code{custom_columns} must be specified.}
\item{opt_compress_output}{Boolean flag to indicate if a compressed output is preferred.
If this option is set to be TRUE, only every other record is returned}
\item{opt_verbose}{Boolean flag to indicate if verbose output is desired}
\item{opt_warnings}{Boolean flag to turn off warnings. Default value is TRUE, to keep
the warnings on.}
}
\value{
A data frame with each row containing: \itemize{
\item Date and Time stamp for the date specified
\item Temperature and/or other weather columns
}
}
\description{
Given a valid station and a single date this function
will return a dataframe of time-stamped weather data. It does not summarize
the data.
}
\examples{
\dontrun{
getDetailedWeather("NRT", "2014-04-29") #just the Temperature Columns
# Returns all columns available
getDetailedWeather("NRT", "2014-04-29", opt_all_columns=T)
wCDG <- getDetailedWeather("CDG", "2013-12-12",opt_custom_columns=T,
custom_columns=c(10,11,12))
}
}
\seealso{
getWeatherForDate, getSummarizedWeather
}
|
/man/getDetailedWeather.Rd
|
no_license
|
cran/weatherData
|
R
| false | true | 2,762 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/weatherData.R
\name{getDetailedWeather}
\alias{getDetailedWeather}
\title{Gets weather data for a single date (All records)}
\usage{
getDetailedWeather(station_id, date, station_type = "airportCode",
opt_temperature_columns = TRUE, opt_all_columns = FALSE,
opt_custom_columns = FALSE, custom_columns = NULL,
opt_compress_output = FALSE, opt_verbose = FALSE, opt_warnings = TRUE)
}
\arguments{
\item{station_id}{is a valid 3-letter airport code or a valid Weather Station ID}
\item{date}{is a valid string representing a date in the past (YYYY-MM-DD)}
\item{station_type}{can be \code{airportCode} which is the default, or it
can be \code{id} which is a weather-station ID}
\item{opt_temperature_columns}{Boolen flag to indicate only Temperature data is to be returned (default TRUE)}
\item{opt_all_columns}{Boolen flag to indicate whether all available data is to be returned (default FALSE)}
\item{opt_custom_columns}{Boolen flag to indicate if only a user-specified set of columns are to be returned. (default FALSE)
If TRUE, then the desired columns must be specified via \code{custom_columns}}
\item{custom_columns}{Vector of integers specified by the user to indicate which columns to fetch.
The Date column is always returned as the first column. The
column numbers specfied in \code{custom_columns} are appended as columns of
the data frame being returned (default NULL). The exact column numbers can be
found by visiting the weatherUnderground URL, and counting from 1. Note that if \code{opt_custom_columns} is TRUE,
then \code{custom_columns} must be specified.}
\item{opt_compress_output}{Boolean flag to indicate if a compressed output is preferred.
If this option is set to be TRUE, only every other record is returned}
\item{opt_verbose}{Boolean flag to indicate if verbose output is desired}
\item{opt_warnings}{Boolean flag to turn off warnings. Default value is TRUE, to keep
the warnings on.}
}
\value{
A data frame with each row containing: \itemize{
\item Date and Time stamp for the date specified
\item Temperature and/or other weather columns
}
}
\description{
Given a valid station and a single date this function
will return a dataframe of time-stamped weather data. It does not summarize
the data.
}
\examples{
\dontrun{
getDetailedWeather("NRT", "2014-04-29") #just the Temperature Columns
# Returns all columns available
getDetailedWeather("NRT", "2014-04-29", opt_all_columns=T)
wCDG <- getDetailedWeather("CDG", "2013-12-12",opt_custom_columns=T,
custom_columns=c(10,11,12))
}
}
\seealso{
getWeatherForDate, getSummarizedWeather
}
|
good_years <- read.csv("data/goodyears.csv")[,1]
|
/code/goodyears_stack.R
|
no_license
|
jsta/dataflowchl
|
R
| false | false | 50 |
r
|
good_years <- read.csv("data/goodyears.csv")[,1]
|
## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this function
## Cache inverse of a Matrix
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y) {
x <<- y
inv <<- NULL
}
get <- function() x
setinv <- function(inverse) inv <<- inverse
getinv <- function() inv
list(set = set, get = get, setinv = setinv, getinv = getinv)
}
## Write a short comment describing this function
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inv <- x$getinv()
if(!is.null(inv)) {
message("getting cached result")
return(inv)
}
data <- x$get()
inv <- solve(data, ...)
x$setinv(inv)
inv
}
|
/cachematrix.R
|
no_license
|
caarthee/ProgrammingAssignment2
|
R
| false | false | 773 |
r
|
## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this function
## Cache inverse of a Matrix
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y) {
x <<- y
inv <<- NULL
}
get <- function() x
setinv <- function(inverse) inv <<- inverse
getinv <- function() inv
list(set = set, get = get, setinv = setinv, getinv = getinv)
}
## Write a short comment describing this function
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inv <- x$getinv()
if(!is.null(inv)) {
message("getting cached result")
return(inv)
}
data <- x$get()
inv <- solve(data, ...)
x$setinv(inv)
inv
}
|
Sys.setlocale(category = "LC_ALL", locale = "english")
rawData <- read.table("household_power_consumption.txt",
sep = ";",
header = TRUE)
rawData$Date <- as.Date(as.character(rawData$Date), "%d/%m/%Y")
electricData <- rawData[rawData$Date>=as.Date("2007-02-01")
& rawData$Date<=as.Date("2007-02-02"),]
electricData$Global_active_power <- as.numeric(as.character(electricData$Global_active_power))
png('plot3.png', width=480, height=480)
plot(as.POSIXct(paste(electricData$Date, as.character(electricData$Time)), format="%Y-%m-%d %H:%M:%S"),
as.numeric(as.character(electricData$Sub_metering_1)),
type="l",
ylab="Energy sub metering",
xlab="")
lines(as.POSIXct(paste(electricData$Date, as.character(electricData$Time)), format="%Y-%m-%d %H:%M:%S"),
as.numeric(as.character(electricData$Sub_metering_2)),
type="l",
col="red")
lines(as.POSIXct(paste(electricData$Date, as.character(electricData$Time)), format="%Y-%m-%d %H:%M:%S"),
as.numeric(as.character(electricData$Sub_metering_3)),
type="l",
col="blue")
legend("topright",
legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"),
lty=c(1,1,1),
col=c("black", "red", "blue"))
dev.off()
|
/plot3.R
|
no_license
|
edwada/Exploratory-Data-Analysis-HW1
|
R
| false | false | 1,296 |
r
|
Sys.setlocale(category = "LC_ALL", locale = "english")
rawData <- read.table("household_power_consumption.txt",
sep = ";",
header = TRUE)
rawData$Date <- as.Date(as.character(rawData$Date), "%d/%m/%Y")
electricData <- rawData[rawData$Date>=as.Date("2007-02-01")
& rawData$Date<=as.Date("2007-02-02"),]
electricData$Global_active_power <- as.numeric(as.character(electricData$Global_active_power))
png('plot3.png', width=480, height=480)
plot(as.POSIXct(paste(electricData$Date, as.character(electricData$Time)), format="%Y-%m-%d %H:%M:%S"),
as.numeric(as.character(electricData$Sub_metering_1)),
type="l",
ylab="Energy sub metering",
xlab="")
lines(as.POSIXct(paste(electricData$Date, as.character(electricData$Time)), format="%Y-%m-%d %H:%M:%S"),
as.numeric(as.character(electricData$Sub_metering_2)),
type="l",
col="red")
lines(as.POSIXct(paste(electricData$Date, as.character(electricData$Time)), format="%Y-%m-%d %H:%M:%S"),
as.numeric(as.character(electricData$Sub_metering_3)),
type="l",
col="blue")
legend("topright",
legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"),
lty=c(1,1,1),
col=c("black", "red", "blue"))
dev.off()
|
library(reshape2)
filename <- "getdata_dataset.zip"
## Download and unzip the dataset:
if (!file.exists(filename)){
fileurl <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip "
download.file(fileurl, filename)
}
if (!file.exists("UCI HAR Dataset")) {
unzip(filename)
}
# Load activity labels + features
activity_labels <- read.table("UCI HAR Dataset/activity_labels.txt")
#convert column 2 into character
activity_labels[,2] <- as.character(activity_labels[,2])
feature <- read.table("UCI HAR Dataset/features.txt")
feature[,2] <- as.character(feature[,2])
# Extract only the data on mean and standard deviation
meanandstd <- grep(".*mean.*|.*std.*", feature[,2])
# Rename and remove mean, std and ()
meanandstd.names <- feature[meanandstd,2]
meanandstd.names <- gsub('-mean', 'Mean', meanandstd.names)
meanandstd.names <- gsub('-std', 'Std', meanandstd.names)
meanandstd.names <- gsub('[-()]', '', meanandstd.names)
# Load the datasets
x_train <- read.table("UCI HAR Dataset/train/X_train.txt")[meanandstd]
y_train <- read.table("UCI HAR Dataset/train/Y_train.txt")
subject_train <- read.table("UCI HAR Dataset/train/subject_train.txt")
training <- cbind(subject_train, y_train , x_train)
x_test <- read.table("UCI HAR Dataset/test/X_test.txt")[meanandstd]
y_test <- read.table("UCI HAR Dataset/test/Y_test.txt")
subject_test <- read.table("UCI HAR Dataset/test/subject_test.txt")
testing <- cbind(subject_test, y_test, x_test)
# merge datasets and add labels
completedata <- rbind(training, testing)
colnames(completedata) <- c("subject", "activity", meanandstd.names)
# turn activities & subjects into factors
completedata$activity <- factor(completedata$activity, levels = activity_labels[,1], labels = activity_labels[,2])
completedata$subject <- as.factor(completedata$subject)
completedata.melted <- melt(completedata, id = c("subject", "activity"))
completedata.mean <- dcast(completedata.melted, subject + activity ~ variable, mean)
write.table(completedata.mean, "tidy.txt", row.names = FALSE, quote = FALSE)
|
/run_analysis.R
|
no_license
|
rikimaru1181/getting-and-cleaning-data-course-project
|
R
| false | false | 2,082 |
r
|
library(reshape2)
filename <- "getdata_dataset.zip"
## Download and unzip the dataset:
if (!file.exists(filename)){
fileurl <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip "
download.file(fileurl, filename)
}
if (!file.exists("UCI HAR Dataset")) {
unzip(filename)
}
# Load activity labels + features
activity_labels <- read.table("UCI HAR Dataset/activity_labels.txt")
#convert column 2 into character
activity_labels[,2] <- as.character(activity_labels[,2])
feature <- read.table("UCI HAR Dataset/features.txt")
feature[,2] <- as.character(feature[,2])
# Extract only the data on mean and standard deviation
meanandstd <- grep(".*mean.*|.*std.*", feature[,2])
# Rename and remove mean, std and ()
meanandstd.names <- feature[meanandstd,2]
meanandstd.names <- gsub('-mean', 'Mean', meanandstd.names)
meanandstd.names <- gsub('-std', 'Std', meanandstd.names)
meanandstd.names <- gsub('[-()]', '', meanandstd.names)
# Load the datasets
x_train <- read.table("UCI HAR Dataset/train/X_train.txt")[meanandstd]
y_train <- read.table("UCI HAR Dataset/train/Y_train.txt")
subject_train <- read.table("UCI HAR Dataset/train/subject_train.txt")
training <- cbind(subject_train, y_train , x_train)
x_test <- read.table("UCI HAR Dataset/test/X_test.txt")[meanandstd]
y_test <- read.table("UCI HAR Dataset/test/Y_test.txt")
subject_test <- read.table("UCI HAR Dataset/test/subject_test.txt")
testing <- cbind(subject_test, y_test, x_test)
# merge datasets and add labels
completedata <- rbind(training, testing)
colnames(completedata) <- c("subject", "activity", meanandstd.names)
# turn activities & subjects into factors
completedata$activity <- factor(completedata$activity, levels = activity_labels[,1], labels = activity_labels[,2])
completedata$subject <- as.factor(completedata$subject)
completedata.melted <- melt(completedata, id = c("subject", "activity"))
completedata.mean <- dcast(completedata.melted, subject + activity ~ variable, mean)
write.table(completedata.mean, "tidy.txt", row.names = FALSE, quote = FALSE)
|
#' Create an example rmarkdown file report.
#'
#' This function has a template rmarkdown file. The file contains BEQR functions
#' for plotting, NCA and ABE analyis.
#' @param dir_path relative or absolute path to an existing file directory
#' @param file_name a name to be given to the template rmarkdown file (include
#' *.rmd extension)
#' @return creates an rmarkdown file in an existing file directory
#' @export
make_template_report<-function(dir_path = "lab-notebook", file_name = "newfile.rmd")
{
path<-paste0(dir_path,"/", file_name)
file.create(path)
writeLines(c("---",
'title: "Untitled"',
'author: "Eliford"',
'date: "November 4, 2018"',
"output: html_document",
"---",
" ",
" ",
"```{r}",
"#Load the bioequivalence package",
"library(beq)",
"```",
" ",
" ",
"```{r}",
"base_mean_plot()",
"```",
" ",
"```{r}",
'pddata_smooth<-pddata%>%group_by(SEQUENCE,PERIOD,ID)%>%do(get_smooth_dv(., idv = "TAD", dv = "GIR", span = 0.25))',
"```",
" ",
"```{r}",
"##Prepare PKNCA for NCA analysis",
"library(PKNCA)",
"###Set method used to calculate AUCs",
"##Method can be linear or log linear (default). Linear was chosen as required in FDA guidance",
'PKNCA.options(auc.method="linear")',
"###Check if method is set",
'PKNCA.options(name = "auc.method")',
"```",
" ",
"```{r}",
"###Create partial AUC dummy dataset",
'testdf<-make_nca_par_dataset(nca_params = c("cmax","tmax","aumclast", "lambda.z","half.life","aucpext.obs"), ',
" partial_aucs = list(start=c(0,0,0), end=c(4, 6, 12)), ",
" protocol_tlast = 24, ",
" compute_aucinf = FALSE)",
"###Create dose dataset",
"dose<-make_nca_dose_dataset(df = insulinpk, treatment_var = TREATMENT, ",
" id_var = ID, time_var = TIME, dose = 7287700, ",
" dose_time = 0, tau = 12)",
" ",
"###Create Conc object",
"myconc<-PKNCAconc(insulinpk, formula = CONC~TIME|TREATMENT+ID, ",
' labels = c(TIME="Time (hrs)", CONC="Concentration (ng/mL)"),',
' units = c(TIME="hrs", CONC="ng/mL"))',
" ",
"##The create the dose object",
'mydose<-PKNCAdose(dose, formula = DOSE~TIME|TREATMENT+ID, units=c(DOSE="ng", TIME="hrs"), route="extravascular")',
" ",
"###Create data object",
"mydata<-PKNCAdata(myconc, mydose, intervals=testdf)",
"```",
" ",
"```{r}",
"#Calculate NCA parameters",
"myresults<-pk.nca(mydata)",
"```",
" ",
" ",
"```{r}",
"#Extact calculated PK parameters",
'results_wide<-extract_nca_results(myresults, select_nca_param = c("auclast", "aucinf.obs","cmax",',
' "tmax","aumclast", "lambda.z",',
' "half.life","aucpext.obs"))',
"```",
" ",
"```{r}",
"#Summarize",
'nca_summary<-summarize_statz(results_wide, group = "TREATMENT", ',
' variables = c("auclast0_24","cmax0_24","auclast0_6"))',
"```",
" ",
"```{r}",
"## Summarize tmax and half life",
"#Make sure only subjects with PK parameters for both treatments are present in the dataset",
"results_wide_pairs<-results_wide%>%group_by(ID)%>%mutate(N=n())%>%ungroup()%>%filter(N==2)%>%select(-N)",
"## Summarize the PK parameters",
'test<-table_median_tmax_thalf(results_wide_pairs, group = "TREATMENT")',
"```",
" ",
" ",
"```{r}",
"###Get bioequivalence",
"# First convert treatment variable to factor",
"results_wide<-results_wide%>%mutate(TREATMENT=factor(TREATMENT), logCmax0_24=log(cmax0_24))",
"# Fit model",
"fit<-get_abe_bylme(data = results_wide, treatments = TREATMENT, sequences = SEQUENCE, periods = PERIOD,",
' idvar = ID, abe_param = "logCmax0_24", contr_variable = "Mixtard 30/70")',
"```",
" ",
" ",
"```{r}",
"##Extract BEQ table",
"ABE_result<-fit$ABEresult",
"```",
" ",
"```{r}",
"## Investigate model fit ",
"lmefit<-fit$lmefit",
"## adjusted geometric mean for treatments",
"fixed_effects<-fixef(lmefit)",
'reference_mean<-exp(fixed_effects[["(Intercept)"]])',
'test_mean<-exp(fixed_effects[["(Intercept)"]] + fixed_effects[["TREATMENTConsegna 30/70"]])',
"```"),
con = path )
}
|
/R/make_template_report.R
|
no_license
|
Eliford/BEQR
|
R
| false | false | 5,748 |
r
|
#' Create an example rmarkdown file report.
#'
#' This function has a template rmarkdown file. The file contains BEQR functions
#' for plotting, NCA and ABE analyis.
#' @param dir_path relative or absolute path to an existing file directory
#' @param file_name a name to be given to the template rmarkdown file (include
#' *.rmd extension)
#' @return creates an rmarkdown file in an existing file directory
#' @export
make_template_report<-function(dir_path = "lab-notebook", file_name = "newfile.rmd")
{
path<-paste0(dir_path,"/", file_name)
file.create(path)
writeLines(c("---",
'title: "Untitled"',
'author: "Eliford"',
'date: "November 4, 2018"',
"output: html_document",
"---",
" ",
" ",
"```{r}",
"#Load the bioequivalence package",
"library(beq)",
"```",
" ",
" ",
"```{r}",
"base_mean_plot()",
"```",
" ",
"```{r}",
'pddata_smooth<-pddata%>%group_by(SEQUENCE,PERIOD,ID)%>%do(get_smooth_dv(., idv = "TAD", dv = "GIR", span = 0.25))',
"```",
" ",
"```{r}",
"##Prepare PKNCA for NCA analysis",
"library(PKNCA)",
"###Set method used to calculate AUCs",
"##Method can be linear or log linear (default). Linear was chosen as required in FDA guidance",
'PKNCA.options(auc.method="linear")',
"###Check if method is set",
'PKNCA.options(name = "auc.method")',
"```",
" ",
"```{r}",
"###Create partial AUC dummy dataset",
'testdf<-make_nca_par_dataset(nca_params = c("cmax","tmax","aumclast", "lambda.z","half.life","aucpext.obs"), ',
" partial_aucs = list(start=c(0,0,0), end=c(4, 6, 12)), ",
" protocol_tlast = 24, ",
" compute_aucinf = FALSE)",
"###Create dose dataset",
"dose<-make_nca_dose_dataset(df = insulinpk, treatment_var = TREATMENT, ",
" id_var = ID, time_var = TIME, dose = 7287700, ",
" dose_time = 0, tau = 12)",
" ",
"###Create Conc object",
"myconc<-PKNCAconc(insulinpk, formula = CONC~TIME|TREATMENT+ID, ",
' labels = c(TIME="Time (hrs)", CONC="Concentration (ng/mL)"),',
' units = c(TIME="hrs", CONC="ng/mL"))',
" ",
"##The create the dose object",
'mydose<-PKNCAdose(dose, formula = DOSE~TIME|TREATMENT+ID, units=c(DOSE="ng", TIME="hrs"), route="extravascular")',
" ",
"###Create data object",
"mydata<-PKNCAdata(myconc, mydose, intervals=testdf)",
"```",
" ",
"```{r}",
"#Calculate NCA parameters",
"myresults<-pk.nca(mydata)",
"```",
" ",
" ",
"```{r}",
"#Extact calculated PK parameters",
'results_wide<-extract_nca_results(myresults, select_nca_param = c("auclast", "aucinf.obs","cmax",',
' "tmax","aumclast", "lambda.z",',
' "half.life","aucpext.obs"))',
"```",
" ",
"```{r}",
"#Summarize",
'nca_summary<-summarize_statz(results_wide, group = "TREATMENT", ',
' variables = c("auclast0_24","cmax0_24","auclast0_6"))',
"```",
" ",
"```{r}",
"## Summarize tmax and half life",
"#Make sure only subjects with PK parameters for both treatments are present in the dataset",
"results_wide_pairs<-results_wide%>%group_by(ID)%>%mutate(N=n())%>%ungroup()%>%filter(N==2)%>%select(-N)",
"## Summarize the PK parameters",
'test<-table_median_tmax_thalf(results_wide_pairs, group = "TREATMENT")',
"```",
" ",
" ",
"```{r}",
"###Get bioequivalence",
"# First convert treatment variable to factor",
"results_wide<-results_wide%>%mutate(TREATMENT=factor(TREATMENT), logCmax0_24=log(cmax0_24))",
"# Fit model",
"fit<-get_abe_bylme(data = results_wide, treatments = TREATMENT, sequences = SEQUENCE, periods = PERIOD,",
' idvar = ID, abe_param = "logCmax0_24", contr_variable = "Mixtard 30/70")',
"```",
" ",
" ",
"```{r}",
"##Extract BEQ table",
"ABE_result<-fit$ABEresult",
"```",
" ",
"```{r}",
"## Investigate model fit ",
"lmefit<-fit$lmefit",
"## adjusted geometric mean for treatments",
"fixed_effects<-fixef(lmefit)",
'reference_mean<-exp(fixed_effects[["(Intercept)"]])',
'test_mean<-exp(fixed_effects[["(Intercept)"]] + fixed_effects[["TREATMENTConsegna 30/70"]])',
"```"),
con = path )
}
|
library(ifultools)
### Name: rotateVector
### Title: Circularly vector rotation
### Aliases: rotateVector
### Keywords: utilities
### ** Examples
rotateVector(1:5, 2)
rotateVector(1:5, -2)
|
/data/genthat_extracted_code/ifultools/examples/rotateVector.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false | false | 196 |
r
|
library(ifultools)
### Name: rotateVector
### Title: Circularly vector rotation
### Aliases: rotateVector
### Keywords: utilities
### ** Examples
rotateVector(1:5, 2)
rotateVector(1:5, -2)
|
#
#
# Author: aagarwal
###############################################################################
df1 = read.table(commandArgs()[3],header=TRUE,sep="\t")
df2 = read.table(commandArgs()[4],header=TRUE,sep="\t")
df3 = read.table(commandArgs()[5],header=TRUE,sep="\t")
print("Subtracting pariwise x and y columns of data frames to check \n if the data is in the same order or not.")
sum(df1$x-df2$x)==0
sum(df2$x-df3$x)==0
sum(df1$y-df2$y)==0
sum(df2$y-df3$y)==0
print("If all True, everything is fine, go ahead.")
print("Adding weights of the files.")
df = df1
df$weight = df1$weight + df2$weight + df3$weight
print("writing data to the file.")
write.table(df,file=commandArgs()[6],sep="\t")
|
/src/main/resources/R/writeFile/writeRowSumToFile.R
|
no_license
|
teg-iitr/matsim-iitr
|
R
| false | false | 703 |
r
|
#
#
# Author: aagarwal
###############################################################################
df1 = read.table(commandArgs()[3],header=TRUE,sep="\t")
df2 = read.table(commandArgs()[4],header=TRUE,sep="\t")
df3 = read.table(commandArgs()[5],header=TRUE,sep="\t")
print("Subtracting pariwise x and y columns of data frames to check \n if the data is in the same order or not.")
sum(df1$x-df2$x)==0
sum(df2$x-df3$x)==0
sum(df1$y-df2$y)==0
sum(df2$y-df3$y)==0
print("If all True, everything is fine, go ahead.")
print("Adding weights of the files.")
df = df1
df$weight = df1$weight + df2$weight + df3$weight
print("writing data to the file.")
write.table(df,file=commandArgs()[6],sep="\t")
|
# ------------------------------------------------------------------------------
# 04-analyze.R: clean and visualize
# ------------------------------------------------------------------------------
# load packages ----------------------------------------------------------------
library(tidyverse)
library(scales)
# load data --------------------------------------------------------------------
pac_all <- read_csv(here::here("08-cerse/code/opensecrets/data/", "pac-all.csv"))
# ------------------------------------------------------------------------------
# data cleaning
# ------------------------------------------------------------------------------
# fix country_parent -----------------------------------------------------------
pac_all <- pac_all %>%
separate(country_parent, into = c("country", "parent"), sep = "/", extra = "merge")
# fix dollar amounts -----------------------------------------------------------
parse_currency <- function(x){
x %>%
str_remove("\\$") %>%
str_remove_all(",") %>%
as.numeric()
}
pac_all <- pac_all %>%
mutate(
total = parse_currency(total),
dems = parse_currency(dems),
repubs = parse_currency(repubs)
)
# write data -------------------------------------------------------------------
write_csv(pac_all, path = here::here("opensecrets/data/", "pac-all-clean.csv"))
# ------------------------------------------------------------------------------
# data visualization
# ------------------------------------------------------------------------------
# UK and Canada contributions --------------------------------------------------
pac_all %>%
filter(country %in% c("Canada", "UK")) %>%
group_by(country, year) %>%
summarise(tot = sum(total)) %>%
ggplot(aes(x = year, y = tot, group = country, color = country)) +
geom_line()
# UK contributions to democrats and republicans --------------------------------
pac_all %>%
filter(
country == "UK",
year < 2020
) %>%
group_by(year) %>%
summarise(
Democrat = sum(dems),
Republican = sum(repubs)
) %>%
pivot_longer(cols = c(Democrat, Republican), names_to = "party", values_to = "amount") %>%
ggplot(aes(x = year)) +
geom_line(aes(y = amount, group = party, color = party)) +
scale_color_manual(values = c("blue", "red")) +
scale_y_continuous(labels = dollar_format(scale = 0.000001, suffix = "M")) +
labs(
x = "Year",
y = "Amount",
color = "Party",
title = "Contribution to US politics from UK-Connected PACs",
subtitle = "By party, over time"
) +
theme_minimal()
|
/08-cerse/code/opensecrets/04-analyze.R
|
no_license
|
mine-cetinkaya-rundel/eatcake
|
R
| false | false | 2,570 |
r
|
# ------------------------------------------------------------------------------
# 04-analyze.R: clean and visualize
# ------------------------------------------------------------------------------
# load packages ----------------------------------------------------------------
library(tidyverse)
library(scales)
# load data --------------------------------------------------------------------
pac_all <- read_csv(here::here("08-cerse/code/opensecrets/data/", "pac-all.csv"))
# ------------------------------------------------------------------------------
# data cleaning
# ------------------------------------------------------------------------------
# fix country_parent -----------------------------------------------------------
pac_all <- pac_all %>%
separate(country_parent, into = c("country", "parent"), sep = "/", extra = "merge")
# fix dollar amounts -----------------------------------------------------------
parse_currency <- function(x){
x %>%
str_remove("\\$") %>%
str_remove_all(",") %>%
as.numeric()
}
pac_all <- pac_all %>%
mutate(
total = parse_currency(total),
dems = parse_currency(dems),
repubs = parse_currency(repubs)
)
# write data -------------------------------------------------------------------
write_csv(pac_all, path = here::here("opensecrets/data/", "pac-all-clean.csv"))
# ------------------------------------------------------------------------------
# data visualization
# ------------------------------------------------------------------------------
# UK and Canada contributions --------------------------------------------------
pac_all %>%
filter(country %in% c("Canada", "UK")) %>%
group_by(country, year) %>%
summarise(tot = sum(total)) %>%
ggplot(aes(x = year, y = tot, group = country, color = country)) +
geom_line()
# UK contributions to democrats and republicans --------------------------------
pac_all %>%
filter(
country == "UK",
year < 2020
) %>%
group_by(year) %>%
summarise(
Democrat = sum(dems),
Republican = sum(repubs)
) %>%
pivot_longer(cols = c(Democrat, Republican), names_to = "party", values_to = "amount") %>%
ggplot(aes(x = year)) +
geom_line(aes(y = amount, group = party, color = party)) +
scale_color_manual(values = c("blue", "red")) +
scale_y_continuous(labels = dollar_format(scale = 0.000001, suffix = "M")) +
labs(
x = "Year",
y = "Amount",
color = "Party",
title = "Contribution to US politics from UK-Connected PACs",
subtitle = "By party, over time"
) +
theme_minimal()
|
# Exercise-1: practice with basic syntax
# Create a variable `hometown` that stores the city in which you were born
hometown <- "Orlando, Florida"
# Assign your name to the variable `my.name`
my.name <- "James Lyou"
# Assign your height (in inches) to a variable `my.height`
my.height <- 70
# Create a variable `puppies` equal to the number of puppies you'd like to have
puppies <- 1
# Create a variable `puppy.price`, which is how expensive you think a puppy is
puppy.price <- 100
# Create a variable `total.cost` that has the total cost of all of your puppies
total.cost <- 100
# Create a boolean variable `too.expensive`, set to TRUE if the cost is greater than $1,000
too.expensive <- FALSE
# Create a variable `max.puppies`, which is the number of puppies you can afford for $1,000.
max.puppies <- 10
|
/exercise-1/exercise.R
|
permissive
|
lyoujl/module5-r-intro
|
R
| false | false | 814 |
r
|
# Exercise-1: practice with basic syntax
# Create a variable `hometown` that stores the city in which you were born
hometown <- "Orlando, Florida"
# Assign your name to the variable `my.name`
my.name <- "James Lyou"
# Assign your height (in inches) to a variable `my.height`
my.height <- 70
# Create a variable `puppies` equal to the number of puppies you'd like to have
puppies <- 1
# Create a variable `puppy.price`, which is how expensive you think a puppy is
puppy.price <- 100
# Create a variable `total.cost` that has the total cost of all of your puppies
total.cost <- 100
# Create a boolean variable `too.expensive`, set to TRUE if the cost is greater than $1,000
too.expensive <- FALSE
# Create a variable `max.puppies`, which is the number of puppies you can afford for $1,000.
max.puppies <- 10
|
#' Square grid over system area
#'
#' Creates a square grid with given cell size over the service area
#' of the dockless bike sharing system.
#'
#' @param area object of class \code{sf} representing the system area.
#' @param ... further arguments passed to \code{st_make_grid}.
#' @return If \code{type} is set to \code{polygons}, it returns an object
#' of class \code{sf} containing the grid cells as square polygons.
#' If \code{type} is set to \code{centers} it returns an object of
#' class \code{sf} containing the grid cell centroids as points.
#' @export
create_grid = function(area, ...) {
# Project the area
area_projected = dockless::project_sf(area)
# Create a grid over the area, with the given cell size
geometry = sf::st_make_grid(area_projected, ...)
grid = sf::st_sf(geometry)
# Clip the grid with the area
grid$intersects = as.vector(
sf::st_intersects(grid, area_projected, sparse = FALSE)
)
grid_clipped = grid[grid$intersects,]
grid_clipped$intersects = NULL
sf::st_transform(grid_clipped, crs = 4326)
}
#' Usage intensity in grid cells
#'
#' Calculates the number of pick-ups in each polygon of an overlaying grid.
#'
#' @param usage pick-ups, as an object of class \code{sf} with point geometry.
#' @param grid grid cells as an object of class \code{sf} with polygon geometry.
#' @return Returns an numeric vector with each element specifying the number
#' of pick-ups in the grid cell with corresponding index.
#' @export
usage_intensity = function(usage, grid) {
# Project the points to State Plane California Zone III (EPSG:26943)
points_projected = dockless::project_sf(usage)
# Project the polygons to State Plane California Zone III (EPSG:26943)
polygons_projected = dockless::project_sf(grid)
# Calculate the number of points in each polygon
f = function(x) {
nrow(sf::st_intersection(x, points_projected))
}
sapply(split(polygons_projected, 1:nrow(polygons_projected)), f)
}
#' Spatially constrained hierarchical clustering of a \code{dockless_dfc} object
#'
#' Clusters the \code{dockless_df} objects in a \code{dockless_dfc} with
#' spatially constrained hierarchical clustering.
#'
#' @param data object of class \code{dockless_dfc}.
#' @param grid grid cells as object of class \code{sf} with polygon geometry, in which
#' each cell refers to the spatial location of each \code{dockless_df} object in
#' the \code{dockless_dfc}. Must contain a column named \code{intensity}, which provides
#' the number of pick-ups per grid cill.
#' @param area object of class \code{sf} representing the system area.
#' @param K vector of integers specifying which values of k, the number of
#' clusters, should be tested.
#' @param omega vector of values specifying which values of alpha, the mixing
#' parameter, should be tested.
#' @return Returns a list of 2 with one element being a numeric vector of cluster
#' indices that specifies for each of the given \code{dockless_df} objects to which
#' cluster it belongs, and the second element being the geographical outines of each
#' cluster, bundled in an object of class \code{sf} with polygon geometry.
#' @export
spatial_cluster = function(data, grid, area, K, omega = seq(0, 1, 0.1)) {
# Create a dissimilarity matrix from the data
data_dis = dockless::dissimilarity_data(data)
# Create a spatial dissimilarity matrix
spatial_dis = dockless::dissimilarity_spatial(grid)
# Calculate Dunn Index for all k in K
validation = suppressWarnings(
clValid::clValid(
obj = as.matrix(data_dis),
nClust = K,
clMethods = 'hierarchical',
validation = 'internal',
method = 'ward'
)
)
dunn_indices = validation@measures[2, , ]
# Choose the optimal value of k
k_star = K[which.max(dunn_indices)]
# Spatially constrained hierarchical clusterings for all alpha in omega
information_criteria = ClustGeo::choicealpha(
data_dis,
spatial_dis,
range.alpha = omega,
K = k_star,
graph = FALSE
)
# Choose the optimal value of alpha
alpha_star = omega[which.max(information_criteria$Qnorm[which(information_criteria$Qnorm[, 1] >= 0.9), 2])]
# Cluster with spatially constrained hierarchical clustering
sch_clust = ClustGeo::hclustgeo(
D0 = data_dis,
D1 = spatial_dis,
alpha = alpha_star
)
# Cut the tree based on the provided number of clusters k
cluster_indices = stats::cutree(sch_clust, k = k_star)
# Add cluster information to grid cells
grid$cluster = cluster_indices
# Split by cluster
cells_per_cluster = split(
x = grid,
f = grid$cluster
)
# Dissolve grid cells per cluster
cells_dissolved = lapply(
cells_per_cluster,
function(x) sf::st_union(x)
)
# If a cluster is a multipolygon, split it into seperate polygons
# Return one sf data frame
f = function(x) {
if (methods::is(x, 'sfc_MULTIPOLYGON')) {
cluster = sf::st_sf(sf::st_cast(x, 'POLYGON'))
names(cluster)[1]= 'geometry'
sf::st_geometry(cluster) = 'geometry'
return(cluster)
} else {
cluster = sf::st_sf(x)
names(cluster)[1]= 'geometry'
sf::st_geometry(cluster) = 'geometry'
return(cluster)
}
}
cluster_outlines = do.call('rbind', lapply(cells_dissolved, f))
# Sort based on Y coordinate of centroid
cluster_centroids = sf::st_coordinates(
sf::st_centroid(dockless::project_sf(cluster_outlines))
)
cluster_outlines = cluster_outlines[order(cluster_centroids[,"Y"]), ]
# Add cluster index
cluster_outlines$cluster = as.factor(c(1:nrow(cluster_outlines)))
# Update cluster information of grid cells
grid$cluster = NULL
grid_updated = sf::st_join(
dockless::project_sf(grid),
dockless::project_sf(cluster_outlines),
join = sf::st_within
)
grid_updated = sf::st_transform(
grid_updated,
crs = 4326
)
# Calculate number of pick-ups per cluster
pickups_per_cluster = stats::aggregate(
grid_updated$intensity,
by = list(grid_updated$cluster),
FUN = sum
)
if (!all(pickups_per_cluster$x >= 56)) {
# Identify the cluster with too low usage intensity
small_cluster = which(pickups_per_cluster$x < 56)
# Calculate the centroids of all clusters
centroids = lapply(
cluster_outlines$geometry,
function(x) sf::st_centroid(x)
)
# For the small_cluster, find cluster with nearest centroid
distances = sapply(
centroids,
function(x) sf::st_distance(x, centroids[[small_cluster]])
)
nearest_cluster = which.min(distances[distances > 0])
# Replace cluster indices of small_cluster with cluster indices...
# ... of nearest cluster
grid_updated[grid_updated$cluster == small_cluster,]$cluster =
nearest_cluster
# Split by cluster
cells_per_cluster = split(
x = grid_updated,
f = grid_updated$cluster
)
# Dissolve grid cells per cluster
cells_dissolved = lapply(
cells_per_cluster,
function(x) sf::st_union(x)
)
cluster_outlines = do.call('rbind', lapply(cells_dissolved, f))
# Sort based on Y coordinate of centroid
cluster_centroids = sf::st_coordinates(
sf::st_centroid(dockless::project_sf(cluster_outlines))
)
cluster_outlines = cluster_outlines[order(cluster_centroids[,"Y"]), ]
# Add cluster index
cluster_outlines$cluster = as.factor(c(1:nrow(cluster_outlines)))
# Update cluster information of grid cells
grid_updated$cluster = NULL
grid_updated = sf::st_join(
dockless::project_sf(grid_updated),
dockless::project_sf(cluster_outlines),
join = sf::st_within
)
grid_updated = sf::st_transform(
grid_updated,
crs = 4326
)
}
# Retrieve cluster indices
cluster_indices_updated = grid_updated$cluster
# Clip cluster outlines by system area
cluster_outlines_updated = sf::st_intersection(
dockless::project_sf(cluster_outlines),
dockless::project_sf(area)
)
cluster_outlines_updated = sf::st_transform(
cluster_outlines_updated,
crs = 4326
)
# Return list of indices and outlines
list(
indices = cluster_indices_updated,
outlines = cluster_outlines_updated
)
}
#' Create model points
#'
#' Creates an object of class \code{sf} containing the geographical locations
#' for which the forecasting models will be build. The locations are calculated
#' by taking, per cluster, the centroid of the grid cell centers , weighted by
#' the usage intensity of the grid cell polygons.
#'
#' @param centroids all grid cell centroids as an \code{sf} object with point geometry,
#' containing at least the attributes \code{cluster}, specifying to which cluster each
#' grid cell centroid belongs, and \code{intensity}, specifying the number of pick-ups
#' in the grid cell that corresponds to the grid cell centroid.
#' @return Returns an object of class \code{sf} with point geometry.
#' @export
create_modelpoints = function(centroids) {
# Split the centroids object by cluster
centroids_per_cluster = split(
x = centroids,
f = centroids$cluster
)
# Calculate weighted centroid per cluster
# Output as sf data frame instead of only sfc geometry
f = function(x) {
geometry = dockless::weighted_centroid(
points = x,
weights = x$intensity
)
sf::st_sf(geometry)
}
modelpoints = do.call('rbind', lapply(centroids_per_cluster, f))
# Add cluster information
modelpoints$cluster = as.factor(seq(1, nrow(modelpoints), 1))
return(modelpoints)
}
|
/R/cluster_loop.R
|
no_license
|
kezekwem/dockless
|
R
| false | false | 9,529 |
r
|
#' Square grid over system area
#'
#' Creates a square grid with given cell size over the service area
#' of the dockless bike sharing system.
#'
#' @param area object of class \code{sf} representing the system area.
#' @param ... further arguments passed to \code{st_make_grid}.
#' @return If \code{type} is set to \code{polygons}, it returns an object
#' of class \code{sf} containing the grid cells as square polygons.
#' If \code{type} is set to \code{centers} it returns an object of
#' class \code{sf} containing the grid cell centroids as points.
#' @export
create_grid = function(area, ...) {
# Project the area
area_projected = dockless::project_sf(area)
# Create a grid over the area, with the given cell size
geometry = sf::st_make_grid(area_projected, ...)
grid = sf::st_sf(geometry)
# Clip the grid with the area
grid$intersects = as.vector(
sf::st_intersects(grid, area_projected, sparse = FALSE)
)
grid_clipped = grid[grid$intersects,]
grid_clipped$intersects = NULL
sf::st_transform(grid_clipped, crs = 4326)
}
#' Usage intensity in grid cells
#'
#' Calculates the number of pick-ups in each polygon of an overlaying grid.
#'
#' @param usage pick-ups, as an object of class \code{sf} with point geometry.
#' @param grid grid cells as an object of class \code{sf} with polygon geometry.
#' @return Returns an numeric vector with each element specifying the number
#' of pick-ups in the grid cell with corresponding index.
#' @export
usage_intensity = function(usage, grid) {
# Project the points to State Plane California Zone III (EPSG:26943)
points_projected = dockless::project_sf(usage)
# Project the polygons to State Plane California Zone III (EPSG:26943)
polygons_projected = dockless::project_sf(grid)
# Calculate the number of points in each polygon
f = function(x) {
nrow(sf::st_intersection(x, points_projected))
}
sapply(split(polygons_projected, 1:nrow(polygons_projected)), f)
}
#' Spatially constrained hierarchical clustering of a \code{dockless_dfc} object
#'
#' Clusters the \code{dockless_df} objects in a \code{dockless_dfc} with
#' spatially constrained hierarchical clustering.
#'
#' @param data object of class \code{dockless_dfc}.
#' @param grid grid cells as object of class \code{sf} with polygon geometry, in which
#' each cell refers to the spatial location of each \code{dockless_df} object in
#' the \code{dockless_dfc}. Must contain a column named \code{intensity}, which provides
#' the number of pick-ups per grid cill.
#' @param area object of class \code{sf} representing the system area.
#' @param K vector of integers specifying which values of k, the number of
#' clusters, should be tested.
#' @param omega vector of values specifying which values of alpha, the mixing
#' parameter, should be tested.
#' @return Returns a list of 2 with one element being a numeric vector of cluster
#' indices that specifies for each of the given \code{dockless_df} objects to which
#' cluster it belongs, and the second element being the geographical outines of each
#' cluster, bundled in an object of class \code{sf} with polygon geometry.
#' @export
spatial_cluster = function(data, grid, area, K, omega = seq(0, 1, 0.1)) {
# Create a dissimilarity matrix from the data
data_dis = dockless::dissimilarity_data(data)
# Create a spatial dissimilarity matrix
spatial_dis = dockless::dissimilarity_spatial(grid)
# Calculate Dunn Index for all k in K
validation = suppressWarnings(
clValid::clValid(
obj = as.matrix(data_dis),
nClust = K,
clMethods = 'hierarchical',
validation = 'internal',
method = 'ward'
)
)
dunn_indices = validation@measures[2, , ]
# Choose the optimal value of k
k_star = K[which.max(dunn_indices)]
# Spatially constrained hierarchical clusterings for all alpha in omega
information_criteria = ClustGeo::choicealpha(
data_dis,
spatial_dis,
range.alpha = omega,
K = k_star,
graph = FALSE
)
# Choose the optimal value of alpha
alpha_star = omega[which.max(information_criteria$Qnorm[which(information_criteria$Qnorm[, 1] >= 0.9), 2])]
# Cluster with spatially constrained hierarchical clustering
sch_clust = ClustGeo::hclustgeo(
D0 = data_dis,
D1 = spatial_dis,
alpha = alpha_star
)
# Cut the tree based on the provided number of clusters k
cluster_indices = stats::cutree(sch_clust, k = k_star)
# Add cluster information to grid cells
grid$cluster = cluster_indices
# Split by cluster
cells_per_cluster = split(
x = grid,
f = grid$cluster
)
# Dissolve grid cells per cluster
cells_dissolved = lapply(
cells_per_cluster,
function(x) sf::st_union(x)
)
# If a cluster is a multipolygon, split it into seperate polygons
# Return one sf data frame
f = function(x) {
if (methods::is(x, 'sfc_MULTIPOLYGON')) {
cluster = sf::st_sf(sf::st_cast(x, 'POLYGON'))
names(cluster)[1]= 'geometry'
sf::st_geometry(cluster) = 'geometry'
return(cluster)
} else {
cluster = sf::st_sf(x)
names(cluster)[1]= 'geometry'
sf::st_geometry(cluster) = 'geometry'
return(cluster)
}
}
cluster_outlines = do.call('rbind', lapply(cells_dissolved, f))
# Sort based on Y coordinate of centroid
cluster_centroids = sf::st_coordinates(
sf::st_centroid(dockless::project_sf(cluster_outlines))
)
cluster_outlines = cluster_outlines[order(cluster_centroids[,"Y"]), ]
# Add cluster index
cluster_outlines$cluster = as.factor(c(1:nrow(cluster_outlines)))
# Update cluster information of grid cells
grid$cluster = NULL
grid_updated = sf::st_join(
dockless::project_sf(grid),
dockless::project_sf(cluster_outlines),
join = sf::st_within
)
grid_updated = sf::st_transform(
grid_updated,
crs = 4326
)
# Calculate number of pick-ups per cluster
pickups_per_cluster = stats::aggregate(
grid_updated$intensity,
by = list(grid_updated$cluster),
FUN = sum
)
if (!all(pickups_per_cluster$x >= 56)) {
# Identify the cluster with too low usage intensity
small_cluster = which(pickups_per_cluster$x < 56)
# Calculate the centroids of all clusters
centroids = lapply(
cluster_outlines$geometry,
function(x) sf::st_centroid(x)
)
# For the small_cluster, find cluster with nearest centroid
distances = sapply(
centroids,
function(x) sf::st_distance(x, centroids[[small_cluster]])
)
nearest_cluster = which.min(distances[distances > 0])
# Replace cluster indices of small_cluster with cluster indices...
# ... of nearest cluster
grid_updated[grid_updated$cluster == small_cluster,]$cluster =
nearest_cluster
# Split by cluster
cells_per_cluster = split(
x = grid_updated,
f = grid_updated$cluster
)
# Dissolve grid cells per cluster
cells_dissolved = lapply(
cells_per_cluster,
function(x) sf::st_union(x)
)
cluster_outlines = do.call('rbind', lapply(cells_dissolved, f))
# Sort based on Y coordinate of centroid
cluster_centroids = sf::st_coordinates(
sf::st_centroid(dockless::project_sf(cluster_outlines))
)
cluster_outlines = cluster_outlines[order(cluster_centroids[,"Y"]), ]
# Add cluster index
cluster_outlines$cluster = as.factor(c(1:nrow(cluster_outlines)))
# Update cluster information of grid cells
grid_updated$cluster = NULL
grid_updated = sf::st_join(
dockless::project_sf(grid_updated),
dockless::project_sf(cluster_outlines),
join = sf::st_within
)
grid_updated = sf::st_transform(
grid_updated,
crs = 4326
)
}
# Retrieve cluster indices
cluster_indices_updated = grid_updated$cluster
# Clip cluster outlines by system area
cluster_outlines_updated = sf::st_intersection(
dockless::project_sf(cluster_outlines),
dockless::project_sf(area)
)
cluster_outlines_updated = sf::st_transform(
cluster_outlines_updated,
crs = 4326
)
# Return list of indices and outlines
list(
indices = cluster_indices_updated,
outlines = cluster_outlines_updated
)
}
#' Create model points
#'
#' Creates an object of class \code{sf} containing the geographical locations
#' for which the forecasting models will be build. The locations are calculated
#' by taking, per cluster, the centroid of the grid cell centers , weighted by
#' the usage intensity of the grid cell polygons.
#'
#' @param centroids all grid cell centroids as an \code{sf} object with point geometry,
#' containing at least the attributes \code{cluster}, specifying to which cluster each
#' grid cell centroid belongs, and \code{intensity}, specifying the number of pick-ups
#' in the grid cell that corresponds to the grid cell centroid.
#' @return Returns an object of class \code{sf} with point geometry.
#' @export
create_modelpoints = function(centroids) {
# Split the centroids object by cluster
centroids_per_cluster = split(
x = centroids,
f = centroids$cluster
)
# Calculate weighted centroid per cluster
# Output as sf data frame instead of only sfc geometry
f = function(x) {
geometry = dockless::weighted_centroid(
points = x,
weights = x$intensity
)
sf::st_sf(geometry)
}
modelpoints = do.call('rbind', lapply(centroids_per_cluster, f))
# Add cluster information
modelpoints$cluster = as.factor(seq(1, nrow(modelpoints), 1))
return(modelpoints)
}
|
#lecture 4
install.packages('hexbin')
library(hexbin)
library(tidyverse)
genes <- read_tsv("data/data/gene_lengths.txt")
genes
ggplot(genes) + geom_density(aes(x=gene_length)) + facet_wrap((~chr))
#the plots show the data is not normally distributed at all, chr1 looks likeit has more small genes
kruskal.test(gene_length ~ chr, data=genes)
pairwise.wilcox.test(genes$gene_length, g =genes$chr)
#ex 2
heights <- read_tsv("data/data/height.txt")
heights
ggplot(heights, aes(x=Year, y= mean_weight, col = mean_height)) +
geom_point() +
facet_wrap(~Gender, scales = "free_y") +
geom_smooth(method = "lm")
#ex4
many.genes <- read_tsv("data/data/manyGenes.txt")
ggplot(many.genes, aes(x=exprA, y=exprB)) + geom_point(alpha = 0.1)
ggplot(many.genes, aes(x=exprA, y=exprC)) + geom_point(alpha = 0.1, size = 0.1)
ggplot(many.genes, aes(y=exprA, x=exprC ))+
geom_bin2d(bins=100) # try changing bins to some other value
ggplot(many.genes, aes(y=exprA, x=exprC ))+
geom_hex(bins=50) # try changing bins to some other value
ggplot(many.genes, aes(y=exprA, x=exprC ))+
geom_density_2d()
ggplot(many.genes, aes(y=exprA, x=exprC ))+
stat_density_2d(aes(fill = after_stat(level)), geom = "polygon")
#ex 5
gss_cat
gss_cat %>%
filter(relig=="Christian" | relig== "Orthodox-christian",
marital=="Divorced"| marital=="Married") %>%
count(marital, relig) %>%
spread(relig, n) -> simple_count
simple_count
simple_count_mat <- as.matrix(simple_count[1:2, 2:3])
fisher.test(simple_count_mat)
fisher.test(simple_count_mat*2)
fisher.test(round(simple_count_mat*0.5))
|
/Class/lecture4.R
|
no_license
|
mahdi-robbani/high_throughput
|
R
| false | false | 1,575 |
r
|
#lecture 4
install.packages('hexbin')
library(hexbin)
library(tidyverse)
genes <- read_tsv("data/data/gene_lengths.txt")
genes
ggplot(genes) + geom_density(aes(x=gene_length)) + facet_wrap((~chr))
#the plots show the data is not normally distributed at all, chr1 looks likeit has more small genes
kruskal.test(gene_length ~ chr, data=genes)
pairwise.wilcox.test(genes$gene_length, g =genes$chr)
#ex 2
heights <- read_tsv("data/data/height.txt")
heights
ggplot(heights, aes(x=Year, y= mean_weight, col = mean_height)) +
geom_point() +
facet_wrap(~Gender, scales = "free_y") +
geom_smooth(method = "lm")
#ex4
many.genes <- read_tsv("data/data/manyGenes.txt")
ggplot(many.genes, aes(x=exprA, y=exprB)) + geom_point(alpha = 0.1)
ggplot(many.genes, aes(x=exprA, y=exprC)) + geom_point(alpha = 0.1, size = 0.1)
ggplot(many.genes, aes(y=exprA, x=exprC ))+
geom_bin2d(bins=100) # try changing bins to some other value
ggplot(many.genes, aes(y=exprA, x=exprC ))+
geom_hex(bins=50) # try changing bins to some other value
ggplot(many.genes, aes(y=exprA, x=exprC ))+
geom_density_2d()
ggplot(many.genes, aes(y=exprA, x=exprC ))+
stat_density_2d(aes(fill = after_stat(level)), geom = "polygon")
#ex 5
gss_cat
gss_cat %>%
filter(relig=="Christian" | relig== "Orthodox-christian",
marital=="Divorced"| marital=="Married") %>%
count(marital, relig) %>%
spread(relig, n) -> simple_count
simple_count
simple_count_mat <- as.matrix(simple_count[1:2, 2:3])
fisher.test(simple_count_mat)
fisher.test(simple_count_mat*2)
fisher.test(round(simple_count_mat*0.5))
|
library(gt)
library(tidyverse)
# Create a table where rows are formatted conditionally
conditional_tbl <-
readr::read_csv(
system.file("extdata", "sp500.csv", package = "gt"),
col_types = "cddddd"
) %>%
gt() %>%
fmt_number(
columns = Open,
rows = Open > 1900,
decimals = 3,
scale_by = 1/1000,
pattern = "{x}K"
) %>%
fmt_number(
columns = Close,
rows = High < 1940 & Low > 1915,
decimals = 3
) %>%
fmt_currency(
columns = c(High, Low, Close),
rows = Date > "2016-02-20",
currency = "USD"
)
conditional_tbl
|
/tests/gt-examples/01-html-script/html-10-conditional_formatting.R
|
permissive
|
rstudio/gt
|
R
| false | false | 577 |
r
|
library(gt)
library(tidyverse)
# Create a table where rows are formatted conditionally
conditional_tbl <-
readr::read_csv(
system.file("extdata", "sp500.csv", package = "gt"),
col_types = "cddddd"
) %>%
gt() %>%
fmt_number(
columns = Open,
rows = Open > 1900,
decimals = 3,
scale_by = 1/1000,
pattern = "{x}K"
) %>%
fmt_number(
columns = Close,
rows = High < 1940 & Low > 1915,
decimals = 3
) %>%
fmt_currency(
columns = c(High, Low, Close),
rows = Date > "2016-02-20",
currency = "USD"
)
conditional_tbl
|
library(DLMtool)
### Name: MRreal
### Title: Spatial closure and allocation management procedures
### Aliases: MRreal MRnoreal
### ** Examples
MRreal(1, DLMtool::Atlantic_mackerel, plot=TRUE)
MRnoreal(1, DLMtool::Atlantic_mackerel, plot=TRUE)
|
/data/genthat_extracted_code/DLMtool/examples/MRreal.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false | false | 250 |
r
|
library(DLMtool)
### Name: MRreal
### Title: Spatial closure and allocation management procedures
### Aliases: MRreal MRnoreal
### ** Examples
MRreal(1, DLMtool::Atlantic_mackerel, plot=TRUE)
MRnoreal(1, DLMtool::Atlantic_mackerel, plot=TRUE)
|
.dashEditableDiv_js_metadata <- function() {
deps_metadata <- list(`dash_editable_div` = structure(list(name = "dash_editable_div",
version = "0.0.1", src = list(href = NULL,
file = "deps"), meta = NULL,
script = 'dash_editable_div.min.js',
stylesheet = NULL, head = NULL, attachment = NULL, package = "dashEditableDiv",
all_files = FALSE), class = "html_dependency"),
`dash_editable_div` = structure(list(name = "dash_editable_div",
version = "0.0.1", src = list(href = NULL,
file = "deps"), meta = NULL,
script = 'dash_editable_div.min.js.map',
stylesheet = NULL, head = NULL, attachment = NULL, package = "dashEditableDiv",
all_files = FALSE, dynamic = TRUE), class = "html_dependency"))
return(deps_metadata)
}
|
/R/internal.R
|
no_license
|
remidbs/dash-editable-div
|
R
| false | false | 715 |
r
|
.dashEditableDiv_js_metadata <- function() {
deps_metadata <- list(`dash_editable_div` = structure(list(name = "dash_editable_div",
version = "0.0.1", src = list(href = NULL,
file = "deps"), meta = NULL,
script = 'dash_editable_div.min.js',
stylesheet = NULL, head = NULL, attachment = NULL, package = "dashEditableDiv",
all_files = FALSE), class = "html_dependency"),
`dash_editable_div` = structure(list(name = "dash_editable_div",
version = "0.0.1", src = list(href = NULL,
file = "deps"), meta = NULL,
script = 'dash_editable_div.min.js.map',
stylesheet = NULL, head = NULL, attachment = NULL, package = "dashEditableDiv",
all_files = FALSE, dynamic = TRUE), class = "html_dependency"))
return(deps_metadata)
}
|
install.packages("tidyverse")
install.packages("emmeans")
install.packages("afex")
|
/install.R
|
no_license
|
AngelicaCastaneda/first_binder
|
R
| false | false | 83 |
r
|
install.packages("tidyverse")
install.packages("emmeans")
install.packages("afex")
|
### =========================================================================
### Expression objects
### -------------------------------------------------------------------------
###
### Temporarily copy/pasted from rsolr
###
setClassUnion("Expression", "language")
setClass("ConstantExpression",
slots=c(value="ANY"))
setIs("ConstantExpression", "Expression")
setClassUnion("Symbol", "name")
##setIs("Symbol", "Expression")
.SimpleSymbol <- setClass("SimpleSymbol",
slots=c(name="character"),
validity=function(object) {
if (!isSingleString(object@name))
"'name' must be a single, non-NA string"
})
setIs("SimpleSymbol", "Symbol")
setClassUnion("Call", "call")
setIs("Call", "Expression")
## Could sit above SolrFunctionCall
.SimpleCall <- setClass("SimpleCall", slots=c(name="Symbol", args="list"))
setIs("SimpleCall", "Call")
setClass("MethodCall")
setIs("MethodCall", "Call")
.SimpleMethodCall <- setClass("SimpleMethodCall",
slots=c(target="Expression"),
contains="SimpleCall")
### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
### Constructors
###
SimpleSymbol <- function(name) {
.SimpleSymbol(name=as.character(name))
}
SimpleCall <- function(name, args) {
.SimpleCall(name=name, args=as.list(args))
}
SimpleMethodCall <- function(target, name, args) {
.SimpleMethodCall(SimpleCall(name, args), target=target)
}
### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
### Accessors
###
name <- function(x) x@name
target <- function(x) x@target
args <- function(x) x@args
### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
### Factory
###
setGeneric("languageClass", function(x) standardGeneric("languageClass"))
setGeneric("expressionClass", function(x) standardGeneric("expressionClass"))
### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
### Coercion
###
setMethod("as.character", "SimpleCall", function(x) {
paste0(name(x), "(", paste(args(x), collapse=", "), ")")
})
setMethod("as.character", "SimpleSymbol", function(x) {
name(x)
})
|
/R/Expression-class.R
|
no_license
|
lawremi/hailr
|
R
| false | false | 2,302 |
r
|
### =========================================================================
### Expression objects
### -------------------------------------------------------------------------
###
### Temporarily copy/pasted from rsolr
###
setClassUnion("Expression", "language")
setClass("ConstantExpression",
slots=c(value="ANY"))
setIs("ConstantExpression", "Expression")
setClassUnion("Symbol", "name")
##setIs("Symbol", "Expression")
.SimpleSymbol <- setClass("SimpleSymbol",
slots=c(name="character"),
validity=function(object) {
if (!isSingleString(object@name))
"'name' must be a single, non-NA string"
})
setIs("SimpleSymbol", "Symbol")
setClassUnion("Call", "call")
setIs("Call", "Expression")
## Could sit above SolrFunctionCall
.SimpleCall <- setClass("SimpleCall", slots=c(name="Symbol", args="list"))
setIs("SimpleCall", "Call")
setClass("MethodCall")
setIs("MethodCall", "Call")
.SimpleMethodCall <- setClass("SimpleMethodCall",
slots=c(target="Expression"),
contains="SimpleCall")
### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
### Constructors
###
SimpleSymbol <- function(name) {
.SimpleSymbol(name=as.character(name))
}
SimpleCall <- function(name, args) {
.SimpleCall(name=name, args=as.list(args))
}
SimpleMethodCall <- function(target, name, args) {
.SimpleMethodCall(SimpleCall(name, args), target=target)
}
### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
### Accessors
###
name <- function(x) x@name
target <- function(x) x@target
args <- function(x) x@args
### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
### Factory
###
setGeneric("languageClass", function(x) standardGeneric("languageClass"))
setGeneric("expressionClass", function(x) standardGeneric("expressionClass"))
### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
### Coercion
###
setMethod("as.character", "SimpleCall", function(x) {
paste0(name(x), "(", paste(args(x), collapse=", "), ")")
})
setMethod("as.character", "SimpleSymbol", function(x) {
name(x)
})
|
#getwd()
library(dplyr)
par(mfrow = c(1, 1))
# Načtení dat z xlsx souboru
data_xlsx = readxl::read_excel("./1S/ukol_123.xlsx",
sheet = "Vysledky mereni",
skip = 0)
data_xlsx = data_xlsx[,-1] # odstraníme první sloupec s indexy
colnames(data_xlsx)=c("A22","A5","B22","B5","C22","C5","D22","D5")
head(data_xlsx)
svetTok = 0.8 * 1000
data_xlsx$A_min = data_xlsx$A5 >= svetTok
data_xlsx$B_min = data_xlsx$B5 >= svetTok
data_xlsx$C_min = data_xlsx$C5 >= svetTok
data_xlsx$D_min = data_xlsx$D5 >= svetTok
data=reshape(data=as.data.frame(data_xlsx),
direction="long",
varying=list(c("A5", "B5", "C5", "D5"),
c("A22","B22","C22","D22"),
c("A_min","B_min","C_min","D_min")),
v.names=c("C5","C22","min"),
times=c("Amber","Bright","Clear","Dim"),
timevar="vyrobce")
row.names(data) = 1:nrow(data)
data = data[-length(data)]
data = na.omit(data)
head(data)
tail(data)
# A) kontingenční tabulka a mozaikový graf
data.tab.abs = table(data$vyrobce, data$min)
colnames(data.tab.abs) = c("pod 80 %","nad 80 %")
data.tab.abs
# Explorační analýza
# prop.table(data.tab.abs) # sdružené relativní četnosti
data.tab.rel = prop.table(data.tab.abs,1) # řádkové relativní četnosti
# prop.table(data.tab.abs,2) # sloupcové relativní četnosti
data.tab.celkem = data %>% group_by(vyrobce) %>% summarise( pocet_abs=sum(!is.na(min)))
data.tab.celkem$pocet_rel = prop.table(data.tab.celkem$pocet_abs)
abs.sum.ano = sum(as.numeric(data.tab.abs[,1]))
abs.sum.ne = sum(as.numeric(data.tab.abs[,2]))
rel.sum.ano = sum(as.numeric(prop.table(data.tab.abs)[,1]))
rel.sum.ne = sum(as.numeric(prop.table(data.tab.abs)[,2]))
data.tab = data.frame( nad_abs = as.numeric(data.tab.abs[,2]), nad_rel = as.numeric(data.tab.rel[,2]), nad_rel_r = paste0(round(as.numeric(data.tab.rel[,2])*100,2),' %'),
pod_abs = as.numeric(data.tab.abs[,1]), pod_rel = as.numeric(data.tab.rel[,1]), pod_rel_r = paste0(round(as.numeric(data.tab.rel[,1])*100,2),' %'),
celkem_abs = data.tab.celkem$pocet_abs, celkem_rel = data.tab.celkem$pocet_rel, celkem_rel_r = paste0(round(data.tab.celkem$pocet_rel*100,2),' %'))
rownames(data.tab) = rownames(data.tab.rel)
data.tab.presentation = data.tab
data.tab.presentation[nrow(data.tab)+1,] = c(
abs.sum.ne, rel.sum.ne, paste0(round((rel.sum.ne)*100,2),' %'),
abs.sum.ano, rel.sum.ano, paste0(round((rel.sum.ano)*100,2),' %'),
abs.sum.ano + abs.sum.ne, rel.sum.ano + rel.sum.ne, paste0(round((rel.sum.ano + rel.sum.ne)*100,2),' %'))
rownames(data.tab.presentation) = c(rownames(data.tab),'Celkem')
data.tab.presentation[,c(1,3,4,6,7,9)]
# paste0 osekává nuly, pozor
mosaicplot(data.tab.abs,
las = 1,
color = gray.colors(2),
main = "Světelnost po 30 s při 5°C")
# crammerovo V závislost TODO
# install.packages("lsr")
round(lsr::cramersV(data.tab.rel),3)
# slabá závislost
# B) Bodový a intervalový odhad
# předpoklad pro cloper-pearsnův
bri_ind = match("Bright",rownames(data.tab))
predpoklad = round(9/(as.numeric(data.tab$pod_rel)[bri_ind]*(1-as.numeric(data.tab$pod_rel)[bri_ind])),2)
paste0('$9/(',round(as.numeric(data.tab$pod_rel)[bri_ind],4),'*',round(1-as.numeric(data.tab$pod_rel)[bri_ind],4),')$')
if(predpoklad < as.numeric(data.tab$celkem_abs)[bri_ind]){
paste0('předpoklad pro cloper-pearsnuv test je splněn s hodnotou ', predpoklad,' < ',as.numeric(data.tab$celkem_abs)[bri_ind])
}else{
paste0('předpoklad pro cloper-pearsnuv test neni splněn s hodnotou ', predpoklad,' > ',as.numeric(data.tab$celkem_abs)[bri_ind])
}
int.odhad = binom.test(data.tab$pod_abs[bri_ind], n=data.tab$celkem_abs[bri_ind])$conf.int
paste0('bodový odhad: ',data.tab$pod_abs[bri_ind], ' intervalový odhad: (',floor(int.odhad[1]*10000)/100,';',ceiling(int.odhad[2]*10000)/100,'), pomocí binom.test')
# TODO, který test?
int.odhad2 = prop.test(data.tab$pod_abs[bri_ind], n=data.tab$celkem_abs[bri_ind])$conf.int
paste0('bodový odhad: ',data.tab$pod_abs[bri_ind], ' intervalový odhad: (',floor(int.odhad2[1]*10000)/100,';',ceiling(int.odhad2[2]*10000)/100,'), pomocí prop.test')
# C) relativní bodový a intervalový odhad mezi nejlepším a nejhorším
nejlepsi_ind = match(min(data.tab$pod_rel),data.tab$pod_rel)
nejhorsi_ind = match(max(data.tab$pod_rel),data.tab$pod_rel)
# install.packages("epiR")
pom = epiR::epi.2by2(data.tab.abs[c(nejhorsi_ind,nejlepsi_ind),])
paste0('nejlepsi vyrobce: ', rownames(data.tab)[nejlepsi_ind],', nejhorsi vyrobce: ',
rownames(data.tab)[nejhorsi_ind],', ', round(pom$massoc$RR.strata.wald[1],2) , 'x lepsi') #max(data.tab$pod_rel)/min(data.tab$pod_rel)
paste0('intervalový odhad (',floor(pom$massoc$RR.strata.wald[2]*100)/100,';',ceiling(pom$massoc$RR.strata.wald[3]*100)/100,')')
# D) poměr šancí, bodový a intervalový odhad
nejlepsi_sance = data.tab$pod_abs[nejlepsi_ind]/data.tab$nad_abs[nejlepsi_ind]*1000
nejhorsi_sance = data.tab$pod_abs[nejhorsi_ind]/data.tab$nad_abs[nejhorsi_ind]*1000
paste0('šance, že nejlepší nedosáhne 80 % je ',round(nejlepsi_sance),':1000')
paste0('šance, že nejhorší nedosáhne 80 % je ',round(nejhorsi_sance),':1000')
sance = pom$massoc$OR.strata.wald
paste0('šance na nedosahnutí světelnosti je ',round(sance[1],2),'x vyšší') #nejhorsi_sance/nejlepsi_sance
paste0('95% intervalový odhad je (',floor(sance[2]*100)/100,';',ceiling(sance[3]*100)/100,')')
# E) chikvadrát
# předpoklad
pom = chisq.test(data.tab.abs)
min(as.numeric(pom$expected)>5) == 1
# všechny četnosti jsou větší než 5, splněno
round(pom$p.value,3) # < 0,05, zamítáme H_0 ->
# existuje statisticky významná závislost mezi výrobcem a ...
|
/4S/4S.r
|
permissive
|
Atheloses/VSB-S8-PS
|
R
| false | false | 6,082 |
r
|
#getwd()
library(dplyr)
par(mfrow = c(1, 1))
# Načtení dat z xlsx souboru
data_xlsx = readxl::read_excel("./1S/ukol_123.xlsx",
sheet = "Vysledky mereni",
skip = 0)
data_xlsx = data_xlsx[,-1] # odstraníme první sloupec s indexy
colnames(data_xlsx)=c("A22","A5","B22","B5","C22","C5","D22","D5")
head(data_xlsx)
svetTok = 0.8 * 1000
data_xlsx$A_min = data_xlsx$A5 >= svetTok
data_xlsx$B_min = data_xlsx$B5 >= svetTok
data_xlsx$C_min = data_xlsx$C5 >= svetTok
data_xlsx$D_min = data_xlsx$D5 >= svetTok
data=reshape(data=as.data.frame(data_xlsx),
direction="long",
varying=list(c("A5", "B5", "C5", "D5"),
c("A22","B22","C22","D22"),
c("A_min","B_min","C_min","D_min")),
v.names=c("C5","C22","min"),
times=c("Amber","Bright","Clear","Dim"),
timevar="vyrobce")
row.names(data) = 1:nrow(data)
data = data[-length(data)]
data = na.omit(data)
head(data)
tail(data)
# A) kontingenční tabulka a mozaikový graf
data.tab.abs = table(data$vyrobce, data$min)
colnames(data.tab.abs) = c("pod 80 %","nad 80 %")
data.tab.abs
# Explorační analýza
# prop.table(data.tab.abs) # sdružené relativní četnosti
data.tab.rel = prop.table(data.tab.abs,1) # řádkové relativní četnosti
# prop.table(data.tab.abs,2) # sloupcové relativní četnosti
data.tab.celkem = data %>% group_by(vyrobce) %>% summarise( pocet_abs=sum(!is.na(min)))
data.tab.celkem$pocet_rel = prop.table(data.tab.celkem$pocet_abs)
abs.sum.ano = sum(as.numeric(data.tab.abs[,1]))
abs.sum.ne = sum(as.numeric(data.tab.abs[,2]))
rel.sum.ano = sum(as.numeric(prop.table(data.tab.abs)[,1]))
rel.sum.ne = sum(as.numeric(prop.table(data.tab.abs)[,2]))
data.tab = data.frame( nad_abs = as.numeric(data.tab.abs[,2]), nad_rel = as.numeric(data.tab.rel[,2]), nad_rel_r = paste0(round(as.numeric(data.tab.rel[,2])*100,2),' %'),
pod_abs = as.numeric(data.tab.abs[,1]), pod_rel = as.numeric(data.tab.rel[,1]), pod_rel_r = paste0(round(as.numeric(data.tab.rel[,1])*100,2),' %'),
celkem_abs = data.tab.celkem$pocet_abs, celkem_rel = data.tab.celkem$pocet_rel, celkem_rel_r = paste0(round(data.tab.celkem$pocet_rel*100,2),' %'))
rownames(data.tab) = rownames(data.tab.rel)
data.tab.presentation = data.tab
data.tab.presentation[nrow(data.tab)+1,] = c(
abs.sum.ne, rel.sum.ne, paste0(round((rel.sum.ne)*100,2),' %'),
abs.sum.ano, rel.sum.ano, paste0(round((rel.sum.ano)*100,2),' %'),
abs.sum.ano + abs.sum.ne, rel.sum.ano + rel.sum.ne, paste0(round((rel.sum.ano + rel.sum.ne)*100,2),' %'))
rownames(data.tab.presentation) = c(rownames(data.tab),'Celkem')
data.tab.presentation[,c(1,3,4,6,7,9)]
# paste0 osekává nuly, pozor
mosaicplot(data.tab.abs,
las = 1,
color = gray.colors(2),
main = "Světelnost po 30 s při 5°C")
# crammerovo V závislost TODO
# install.packages("lsr")
round(lsr::cramersV(data.tab.rel),3)
# slabá závislost
# B) Bodový a intervalový odhad
# předpoklad pro cloper-pearsnův
bri_ind = match("Bright",rownames(data.tab))
predpoklad = round(9/(as.numeric(data.tab$pod_rel)[bri_ind]*(1-as.numeric(data.tab$pod_rel)[bri_ind])),2)
paste0('$9/(',round(as.numeric(data.tab$pod_rel)[bri_ind],4),'*',round(1-as.numeric(data.tab$pod_rel)[bri_ind],4),')$')
if(predpoklad < as.numeric(data.tab$celkem_abs)[bri_ind]){
paste0('předpoklad pro cloper-pearsnuv test je splněn s hodnotou ', predpoklad,' < ',as.numeric(data.tab$celkem_abs)[bri_ind])
}else{
paste0('předpoklad pro cloper-pearsnuv test neni splněn s hodnotou ', predpoklad,' > ',as.numeric(data.tab$celkem_abs)[bri_ind])
}
int.odhad = binom.test(data.tab$pod_abs[bri_ind], n=data.tab$celkem_abs[bri_ind])$conf.int
paste0('bodový odhad: ',data.tab$pod_abs[bri_ind], ' intervalový odhad: (',floor(int.odhad[1]*10000)/100,';',ceiling(int.odhad[2]*10000)/100,'), pomocí binom.test')
# TODO, který test?
int.odhad2 = prop.test(data.tab$pod_abs[bri_ind], n=data.tab$celkem_abs[bri_ind])$conf.int
paste0('bodový odhad: ',data.tab$pod_abs[bri_ind], ' intervalový odhad: (',floor(int.odhad2[1]*10000)/100,';',ceiling(int.odhad2[2]*10000)/100,'), pomocí prop.test')
# C) relativní bodový a intervalový odhad mezi nejlepším a nejhorším
nejlepsi_ind = match(min(data.tab$pod_rel),data.tab$pod_rel)
nejhorsi_ind = match(max(data.tab$pod_rel),data.tab$pod_rel)
# install.packages("epiR")
pom = epiR::epi.2by2(data.tab.abs[c(nejhorsi_ind,nejlepsi_ind),])
paste0('nejlepsi vyrobce: ', rownames(data.tab)[nejlepsi_ind],', nejhorsi vyrobce: ',
rownames(data.tab)[nejhorsi_ind],', ', round(pom$massoc$RR.strata.wald[1],2) , 'x lepsi') #max(data.tab$pod_rel)/min(data.tab$pod_rel)
paste0('intervalový odhad (',floor(pom$massoc$RR.strata.wald[2]*100)/100,';',ceiling(pom$massoc$RR.strata.wald[3]*100)/100,')')
# D) poměr šancí, bodový a intervalový odhad
nejlepsi_sance = data.tab$pod_abs[nejlepsi_ind]/data.tab$nad_abs[nejlepsi_ind]*1000
nejhorsi_sance = data.tab$pod_abs[nejhorsi_ind]/data.tab$nad_abs[nejhorsi_ind]*1000
paste0('šance, že nejlepší nedosáhne 80 % je ',round(nejlepsi_sance),':1000')
paste0('šance, že nejhorší nedosáhne 80 % je ',round(nejhorsi_sance),':1000')
sance = pom$massoc$OR.strata.wald
paste0('šance na nedosahnutí světelnosti je ',round(sance[1],2),'x vyšší') #nejhorsi_sance/nejlepsi_sance
paste0('95% intervalový odhad je (',floor(sance[2]*100)/100,';',ceiling(sance[3]*100)/100,')')
# E) chikvadrát
# předpoklad
pom = chisq.test(data.tab.abs)
min(as.numeric(pom$expected)>5) == 1
# všechny četnosti jsou větší než 5, splněno
round(pom$p.value,3) # < 0,05, zamítáme H_0 ->
# existuje statisticky významná závislost mezi výrobcem a ...
|
##
## tests/gcc323.R
##
## $Revision: 1.2 $ $Date: 2015/12/29 08:54:49 $
##
require(spatstat)
local({
# critical R values that provoke GCC bug #323
a <- marktable(lansing, R=0.25)
a <- marktable(lansing, R=0.21)
a <- marktable(lansing, R=0.20)
a <- marktable(lansing, R=0.10)
})
#
# tests/hobjects.R
#
# Validity of methods for ppm(... method="ho")
#
require(spatstat)
local({
set.seed(42)
fit <- ppm(cells ~1, Strauss(0.1), method="ho", nsim=10)
fitx <- ppm(cells ~offset(x), Strauss(0.1), method="ho", nsim=10)
a <- AIC(fit)
ax <- AIC(fitx)
f <- fitted(fit)
fx <- fitted(fitx)
p <- predict(fit)
px <- predict(fitx)
})
#
# tests/hyperframe.R
#
# test "[.hyperframe" etc
#
# $Revision: 1.4 $ $Date: 2018/05/15 14:20:38 $
#
require(spatstat)
local({
lambda <- runif(4, min=50, max=100)
X <- lapply(as.list(lambda), function(x) { rpoispp(x) })
h <- hyperframe(lambda=lambda, X=X)
h$lambda2 <- lambda^2
h[, "lambda3"] <- lambda^3
h[, "Y"] <- X
h[, "X"] <- lapply(X, flipxy)
h[, c("X", "Y")] <- hyperframe(X=X, Y=X)
names(h) <- LETTERS[1:5]
print(h)
summary(h)
str(h)
head(h)
tail(h)
})
#' tests/hypotests.R
#' Hypothesis tests
#'
#' $Revision: 1.2 $ $Date: 2018/07/21 03:02:20 $
require(spatstat)
local({
hopskel.test(redwood, method="MonteCarlo", nsim=5)
berman.test(spiders, "x")
berman.test(lppm(spiders ~ x), "y")
#' quadrat test - spatial methods
a <- quadrat.test(redwood, 3)
domain(a)
shift(a, c(1,1))
})
#
# tests/imageops.R
#
# $Revision: 1.16 $ $Date: 2019/01/22 03:20:16 $
#
require(spatstat)
local({
AA <- A <- as.im(owin())
BB <- B <- as.im(owin(c(1.1, 1.9), c(0,1)))
Z <- imcov(A, B)
stopifnot(abs(max(Z) - 0.8) < 0.1)
Frame(AA) <- Frame(B)
Frame(BB) <- Frame(A)
## handling images with 1 row or column
ycov <- function(x, y) y
E <- as.im(ycov, owin(), dimyx = c(2,1))
G <- cut(E, 2)
H <- as.tess(G)
E12 <- as.im(ycov, owin(), dimyx = c(1,2))
G12 <- cut(E12, 2)
H12 <- as.tess(G12)
AAA <- as.array(AA)
EEE <- as.array(E)
AAD <- as.double(AA)
EED <- as.double(E)
aaa <- xtfrm(AAA)
eee <- xtfrm(E)
##
d <- distmap(cells, dimyx=32)
Z <- connected(d <= 0.06, method="interpreted")
a <- where.max(d, first=FALSE)
a <- where.min(d, first=FALSE)
dx <- raster.x(d)
dy <- raster.y(d)
dxy <- raster.xy(d)
xyZ <- raster.xy(Z, drop=TRUE)
horosho <- conform.imagelist(cells, list(d, Z))
#' split.im
W <- square(1)
X <- as.im(function(x,y){x}, W)
Y <- dirichlet(runifpoint(7, W))
Z <- split(X, as.im(Y))
## cases of "[.im"
ee <- d[simplenet, drop=FALSE]
eev <- d[simplenet]
Empty <- cells[FALSE]
EmptyFun <- ssf(Empty, numeric(0))
ff <- d[Empty]
ff <- d[EmptyFun]
gg <- d[2,]
gg <- d[,2]
gg <- d[2:4, 3:5]
hh <- d[2:4, 3:5, rescue=TRUE]
if(!is.im(hh)) stop("rectangle was not rescued in [.im")
## cases of "[<-.im"
d[Empty] <- 42
d[EmptyFun] <- 42
## smudge() and rasterfilter()
dd <- smudge(d)
## rgb/hsv options
X <- setcov(owin())
M <- Window(X)
Y <- as.im(function(x,y) x, W=M)
Z <- as.im(function(x,y) y, W=M)
# convert after rescaling
RGBscal <- rgbim(X, Y, Z, autoscale=TRUE, maxColorValue=1)
HSVscal <- hsvim(X, Y, Z, autoscale=TRUE)
#' cases of [.im
Ma <- as.mask(M, dimyx=37)
ZM <- Z[raster=Ma, drop=FALSE]
ZM[solutionset(Y+Z > 0.4)] <- NA
ZF <- cut(ZM, breaks=5)
ZL <- (ZM > 0)
P <- list(x=c(0.511, 0.774, 0.633, 0.248, 0.798),
y=c(0.791, 0.608, 0.337, 0.613, 0.819))
zmp <- ZM[P, drop=TRUE]
zfp <- ZF[P, drop=TRUE]
zlp <- ZL[P, drop=TRUE]
P <- as.ppp(P, owin())
zmp <- ZM[P, drop=TRUE]
zfp <- ZF[P, drop=TRUE]
zlp <- ZL[P, drop=TRUE]
#' miscellaneous
ZZ <- zapsmall(Z, digits=6)
ZZ <- zapsmall(Z)
ZS <- shift(Z, origin="centroid")
ZS <- shift(Z, origin="bottomleft")
plot(Z, ribside="left")
plot(Z, ribside="top")
h <- hist(Z)
plot(h)
#' safelookup (including extrapolation case)
Z <- as.im(function(x,y) { x - y }, letterR)
B <- grow.rectangle(Frame(letterR), 1)
X <- superimpose(runifpoint(10,letterR),
runifpoint(20, setminus.owin(B, letterR)),
W=B)
a <- safelookup(Z, X)
})
#' indices.R
#' Tests of code for understanding index vectors etc
#' $Revision: 1.1 $ $Date: 2018/03/01 03:38:07 $
require(spatstat)
local({
a <- grokIndexVector(c(FALSE,TRUE), 10)
b <- grokIndexVector(rep(c(FALSE,TRUE), 7), 10)
d <- grokIndexVector(c(2,12), 10)
e <- grokIndexVector(letters[4:2], nama=letters)
f <- grokIndexVector(letters[10:1], nama=letters[1:5])
g <- grokIndexVector(-c(2, 5), 10)
h <- grokIndexVector(-c(2, 5, 15), 10)
Nam <- letters[1:10]
j <- positiveIndex(-c(2,5), nama=Nam)
jj <- logicalIndex(-c(2,5), nama=Nam)
k <- positiveIndex(-c(2,5), nama=Nam)
kk <- logicalIndex(-c(2,5), nama=Nam)
mm <- positiveIndex(c(FALSE,TRUE), nama=Nam)
nn <- positiveIndex(FALSE, nama=Nam)
aa <- ppsubset(cells, square(0.1))
})
#' tests/ippm.R
#' Tests of 'ippm' class
#' $Revision: 1.2 $ $Date: 2019/02/02 02:26:02 $
require(spatstat)
local({
# .......... set up example from help file .................
nd <- 10
gamma0 <- 3
delta0 <- 5
POW <- 3
# Terms in intensity
Z <- function(x,y) { -2*y }
f <- function(x,y,gamma,delta) { 1 + exp(gamma - delta * x^POW) }
# True intensity
lamb <- function(x,y,gamma,delta) { 200 * exp(Z(x,y)) * f(x,y,gamma,delta) }
# Simulate realisation
lmax <- max(lamb(0,0,gamma0,delta0), lamb(1,1,gamma0,delta0))
set.seed(42)
X <- rpoispp(lamb, lmax=lmax, win=owin(), gamma=gamma0, delta=delta0)
# Partial derivatives of log f
DlogfDgamma <- function(x,y, gamma, delta) {
topbit <- exp(gamma - delta * x^POW)
topbit/(1 + topbit)
}
DlogfDdelta <- function(x,y, gamma, delta) {
topbit <- exp(gamma - delta * x^POW)
- (x^POW) * topbit/(1 + topbit)
}
# irregular score
Dlogf <- list(gamma=DlogfDgamma, delta=DlogfDdelta)
# fit model
fit <- ippm(X ~Z + offset(log(f)),
covariates=list(Z=Z, f=f),
iScore=Dlogf,
start=list(gamma=1, delta=1),
nd=nd)
## ............. test ippm class support ......................
Ar <- model.matrix(fit)
Ai <- model.matrix(fit, irregular=TRUE)
Zr <- model.images(fit)
Zi <- model.images(fit, irregular=TRUE)
## update.ippm
fit2 <- update(fit, . ~ . + I(Z^2))
fit0 <- update(fit,
. ~ . - Z,
start=list(gamma=2, delta=4))
oldfit <- ippm(X,
~Z + offset(log(f)),
covariates=list(Z=Z, f=f),
iScore=Dlogf,
start=list(gamma=1, delta=1),
nd=nd)
oldfit2 <- update(oldfit, . ~ . + I(Z^2))
oldfit0 <- update(oldfit,
. ~ . - Z,
start=list(gamma=2, delta=4))
})
#'
#' tests/Kfuns.R
#'
#' Various K and L functions and pcf
#'
#' $Revision: 1.11 $ $Date: 2019/01/25 03:43:49 $
#'
require(spatstat)
myfun <- function(x,y){(x+1) * y }
local({
#' supporting code
implemented.for.K(c("border", "bord.modif", "translate", "good", "best"),
"polygonal", TRUE)
implemented.for.K(c("border", "bord.modif", "translate", "good", "best"),
"mask", TRUE)
implemented.for.K(c("border", "isotropic"), "mask", FALSE)
#' Kest special code blocks
K <- Kest(cells, var.approx=TRUE, ratio=FALSE)
Z <- distmap(cells) + 1
Kb <- Kest(cells, correction=c("border","bord.modif"),
weights=Z, ratio=TRUE)
Kn <- Kest(cells, correction="none",
weights=Z, ratio=TRUE)
Knb <- Kest(cells, correction=c("border","bord.modif","none"),
weights=Z, ratio=TRUE)
bigint <- 50000 # This is only "big" on a 32-bit system where
# sqrt(.Machine$integer.max) = 46340.9
X <- runifpoint(bigint)
Z <- as.im(1/bigint, owin())
Kb <- Kest(X, correction=c("border","bord.modif"),
rmax=0.02, weights=Z, ratio=TRUE)
Kn <- Kest(X, correction="none",
rmax=0.02, weights=Z, ratio=TRUE)
Knb <- Kest(X, correction=c("border","bord.modif","none"),
rmax=0.02, weights=Z, ratio=TRUE)
#' pcf.ppp special code blocks
pr <- pcf(cells, ratio=TRUE, var.approx=TRUE)
pc <- pcf(cells, domain=square(0.5))
pcr <- pcf(cells, domain=square(0.5), ratio=TRUE)
#' inhomogeneous multitype
fit <- ppm(amacrine ~ marks)
K1 <- Kcross.inhom(amacrine, lambdaX=fit)
K2 <- Kcross.inhom(amacrine, lambdaX=densityfun(amacrine))
K3 <- Kcross.inhom(amacrine, lambdaX=density(amacrine, at="points"))
On <- split(amacrine)$on
Off <- split(amacrine)$off
K4 <- Kcross.inhom(amacrine, lambdaI=ppm(On), lambdaJ=ppm(Off))
K5 <- Kcross.inhom(amacrine, correction="bord.modif")
#' Kmark, markcorr
X <- runifpoint(100) %mark% runif(100)
km <- Kmark(X, f=atan2)
km <- Kmark(X, f1=sin)
km <- Kmark(X, f="myfun")
Y <- X %mark% data.frame(u=runif(100), v=runif(100))
mk <- markcorr(Y)
#'
rr <- rep(0.1, npoints(cells))
eC <- edge.Ripley(cells, rr)
eI <- edge.Ripley(cells, rr, method="interpreted")
if(max(abs(eC-eI)) > 0.1)
stop("Ripley edge correction results do not match")
a <- rmax.Ripley(square(1))
a <- rmax.Rigid(square(1))
a <- rmax.Ripley(as.polygonal(square(1)))
a <- rmax.Rigid(as.polygonal(square(1)))
a <- rmax.Ripley(letterR)
a <- rmax.Rigid(letterR)
#' run slow code for edge correction and compare results
X <- redwood[c(TRUE, FALSE, FALSE)]
Window(X) <- as.polygonal(Window(X))
Eapprox <- edge.Trans(X)
Eexact <- edge.Trans(X, exact=TRUE)
maxrelerr <- max(abs(1 - range(Eapprox/Eexact)))
if(maxrelerr > 0.1)
stop(paste("Exact and approximate algorithms for edge.Trans disagree by",
paste0(round(100*maxrelerr), "%")),
call.=FALSE)
#'
#' directional K functions
#'
a <- Ksector(swedishpines,
-pi/2, pi/2, units="radians",
correction=c("none", "border", "bord.modif", "Ripley", "translate"),
ratio=TRUE)
plot(a)
#'
#' local K functions
#'
fut <- ppm(swedishpines ~ polynom(x,y,2))
Z <- predict(fut)
Lam <- fitted(fut, dataonly=TRUE)
a <- localLinhom(swedishpines, lambda=fut)
a <- localLinhom(swedishpines, lambda=Z)
a <- localLinhom(swedishpines, lambda=Lam)
a <- localLinhom(swedishpines, lambda=Z, correction="none")
a <- localLinhom(swedishpines, lambda=Z, correction="translate")
#'
#' lohboot code blocks
#'
Ared <- lohboot(redwood, block=TRUE, Vcorrection=TRUE, global=FALSE)
Bred <- lohboot(redwood, block=TRUE, basicboot=TRUE, global=FALSE)
X <- runifpoint(100, letterR)
AX <- lohboot(X, block=TRUE, nx=7, ny=10)
#'
#' residual K functions etc
#'
rco <- compareFit(cells, Kcom,
interaction=anylist(P=Poisson(), S=Strauss(0.08)),
same="trans", different="tcom")
})
#
# tests/kppm.R
#
# $Revision: 1.26 $ $Date: 2019/01/25 03:33:40 $
#
# Test functionality of kppm that depends on RandomFields
# Test update.kppm for old style kppm objects
require(spatstat)
local({
fit <- kppm(redwood ~1, "Thomas") # sic
fitx <- update(fit, ~ . + x)
fitM <- update(fit, clusters="MatClust")
fitC <- update(fit, cells)
fitCx <- update(fit, cells ~ x)
#'
Wsub <- owin(c(0, 0.5), c(-0.5, 0))
fitsub <- kppm(redwood ~1, "Thomas", subset=Wsub)
fitsub
#' various methods
ff <- as.fv(fitx)
Y <- simulate(fitx, seed=42)[[1]]
uu <- unitname(fitx)
unitname(fitCx) <- "furlong"
mo <- model.images(fitCx)
# vcov.kppm different algorithms
vc <- vcov(fitx)
vc2 <- vcov(fitx, fast=TRUE)
vc3 <- vcov(fitx, fast=TRUE, splitup=TRUE)
vc4 <- vcov(fitx, splitup=TRUE)
## other code blocks
a <- varcount(fitx, function(x,y){x+1}) # always positive
a <- varcount(fitx, function(x,y){y-1}) # always negative
a <- varcount(fitx, function(x,y){x+y}) # positive or negative
# improve.kppm
fitI <- update(fit, improve.type="quasi")
fitxI <- update(fitx, improve.type="quasi")
# vcov.kppm
vcI <- vcov(fitxI)
# plot.kppm including predict.kppm
fitMC <- kppm(redwood ~ x, "Thomas")
fitCL <- kppm(redwood ~ x, "Thomas", method="c")
fitPA <- kppm(redwood ~ x, "Thomas", method="p")
plot(fitMC)
plot(fitCL)
plot(fitPA)
# fit with composite likelihood method [thanks to Abdollah Jalilian]
fut <- kppm(redwood ~ x, "VarGamma", method="clik2", nu.ker=-3/8)
kfut <- as.fv(fut)
if(require(RandomFields)) {
fit0 <- kppm(redwood ~1, "LGCP")
is.poisson(fit0)
Y0 <- simulate(fit0)[[1]]
stopifnot(is.ppp(Y0))
## fit LGCP using K function: slow
fit1 <- kppm(redwood ~x, "LGCP",
covmodel=list(model="matern", nu=0.3),
control=list(maxit=3))
Y1 <- simulate(fit1)[[1]]
stopifnot(is.ppp(Y1))
## fit LGCP using pcf
fit1p <- kppm(redwood ~x, "LGCP",
covmodel=list(model="matern", nu=0.3),
statistic="pcf")
Y1p <- simulate(fit1p)[[1]]
stopifnot(is.ppp(Y1p))
## .. and using different fitting methods
fit1pClik <- update(fit1p, method="clik")
fit1pPalm <- update(fit1p, method="palm")
## image covariate (a different code block)
xx <- as.im(function(x,y) x, Window(redwood))
fit1xx <- update(fit1p, . ~ xx, data=solist(xx=xx))
Y1xx <- simulate(fit1xx)[[1]]
stopifnot(is.ppp(Y1xx))
fit1xxVG <- update(fit1xx, clusters="VarGamma", nu=-1/4)
Y1xxVG <- simulate(fit1xxVG)[[1]]
stopifnot(is.ppp(Y1xxVG))
# ... and Abdollah's code
fit2 <- kppm(redwood ~x, cluster="Cauchy", statistic="K")
Y2 <- simulate(fit2)[[1]]
stopifnot(is.ppp(Y2))
}
})
local({
#' various code blocks
fut <- kppm(redwood, ~x)
fet <- update(fut, redwood3)
fot <- update(fut, trend=~y)
fit <- kppm(redwoodfull ~ x)
Y <- simulate(fit, window=redwoodfull.extra$regionII)
gut <- improve.kppm(fit, type="wclik1")
gut <- improve.kppm(fit, vcov=TRUE, fast.vcov=TRUE, save.internals=TRUE)
hut <- kppm(redwood ~ x, method="clik", weightfun=NULL)
hut <- kppm(redwood ~ x, method="palm", weightfun=NULL)
})
local({
#' minimum contrast code
K <- Kest(redwood)
a <- matclust.estK(K)
a <- thomas.estK(K)
a <- cauchy.estK(K)
a <- vargamma.estK(K)
a <- lgcp.estK(K)
print(a)
u <- unitname(a)
g <- pcf(redwood)
a <- matclust.estpcf(g)
a <- thomas.estpcf(g)
a <- cauchy.estpcf(g)
a <- vargamma.estpcf(g)
a <- lgcp.estpcf(g)
#' auxiliary functions
b <- resolve.vargamma.shape(nu.pcf=1.5)
Z <- clusterfield("Thomas", kappa=1, scale=0.2)
aa <- NULL
aa <- accumulateStatus(simpleMessage("Woof"), aa)
aa <- accumulateStatus(simpleMessage("Sit"), aa)
aa <- accumulateStatus(simpleMessage("Woof"), aa)
printStatusList(aa)
RMIN <- 0.01
fit <- kppm(redwood ~ 1, ctrl=list(rmin=RMIN,q=1/2))
if(fit$Fit$mcfit$ctrl$rmin != RMIN)
stop("kppm did not handle parameter 'rmin' in argument 'ctrl' ")
fit <- kppm(redwood ~ 1, ctrl=list(rmin=0,q=1/2), rmin=RMIN)
if(fit$Fit$mcfit$ctrl$rmin != RMIN)
stop("kppm did not handle parameter 'rmin' in argument 'ctrl'")
RMIN <- 2
fit <- dppm(swedishpines~1, dppGauss(), ctrl=list(rmin=RMIN,q=1))
if(fit$Fit$mcfit$ctrl$rmin != RMIN)
stop("dppm did not handle parameter 'rmin' in argument 'ctrl'")
fit <- dppm(swedishpines~1, dppGauss(), ctrl=list(rmin=0,q=1), rmin=RMIN)
if(fit$Fit$mcfit$ctrl$rmin != RMIN)
stop("dppm did not handle argument 'rmin'")
})
local({
#' experimental
spatstat.options(kppm.canonical=TRUE, kppm.adjusted=TRUE)
futTT1 <- kppm(redwood)
futTT2 <- kppm(redwood, method="palm")
futTT3 <- kppm(redwood, method="clik2")
spatstat.options(kppm.canonical=TRUE, kppm.adjusted=FALSE)
futTF1 <- kppm(redwood)
futTF2 <- kppm(redwood, method="palm")
futTF3 <- kppm(redwood, method="clik2")
spatstat.options(kppm.canonical=FALSE, kppm.adjusted=TRUE)
futFT1 <- kppm(redwood)
futFT2 <- kppm(redwood, method="palm")
futFT3 <- kppm(redwood, method="clik2")
spatstat.options(kppm.canonical=FALSE, kppm.adjusted=FALSE)
futFF1 <- kppm(redwood)
futFF2 <- kppm(redwood, method="palm")
futFF3 <- kppm(redwood, method="clik2")
})
reset.spatstat.options()
|
/tests/testsGtoK.R
|
no_license
|
kasselhingee/spatstat
|
R
| false | false | 16,328 |
r
|
##
## tests/gcc323.R
##
## $Revision: 1.2 $ $Date: 2015/12/29 08:54:49 $
##
require(spatstat)
local({
# critical R values that provoke GCC bug #323
a <- marktable(lansing, R=0.25)
a <- marktable(lansing, R=0.21)
a <- marktable(lansing, R=0.20)
a <- marktable(lansing, R=0.10)
})
#
# tests/hobjects.R
#
# Validity of methods for ppm(... method="ho")
#
require(spatstat)
local({
set.seed(42)
fit <- ppm(cells ~1, Strauss(0.1), method="ho", nsim=10)
fitx <- ppm(cells ~offset(x), Strauss(0.1), method="ho", nsim=10)
a <- AIC(fit)
ax <- AIC(fitx)
f <- fitted(fit)
fx <- fitted(fitx)
p <- predict(fit)
px <- predict(fitx)
})
#
# tests/hyperframe.R
#
# test "[.hyperframe" etc
#
# $Revision: 1.4 $ $Date: 2018/05/15 14:20:38 $
#
require(spatstat)
local({
lambda <- runif(4, min=50, max=100)
X <- lapply(as.list(lambda), function(x) { rpoispp(x) })
h <- hyperframe(lambda=lambda, X=X)
h$lambda2 <- lambda^2
h[, "lambda3"] <- lambda^3
h[, "Y"] <- X
h[, "X"] <- lapply(X, flipxy)
h[, c("X", "Y")] <- hyperframe(X=X, Y=X)
names(h) <- LETTERS[1:5]
print(h)
summary(h)
str(h)
head(h)
tail(h)
})
#' tests/hypotests.R
#' Hypothesis tests
#'
#' $Revision: 1.2 $ $Date: 2018/07/21 03:02:20 $
require(spatstat)
local({
hopskel.test(redwood, method="MonteCarlo", nsim=5)
berman.test(spiders, "x")
berman.test(lppm(spiders ~ x), "y")
#' quadrat test - spatial methods
a <- quadrat.test(redwood, 3)
domain(a)
shift(a, c(1,1))
})
#
# tests/imageops.R
#
# $Revision: 1.16 $ $Date: 2019/01/22 03:20:16 $
#
require(spatstat)
local({
AA <- A <- as.im(owin())
BB <- B <- as.im(owin(c(1.1, 1.9), c(0,1)))
Z <- imcov(A, B)
stopifnot(abs(max(Z) - 0.8) < 0.1)
Frame(AA) <- Frame(B)
Frame(BB) <- Frame(A)
## handling images with 1 row or column
ycov <- function(x, y) y
E <- as.im(ycov, owin(), dimyx = c(2,1))
G <- cut(E, 2)
H <- as.tess(G)
E12 <- as.im(ycov, owin(), dimyx = c(1,2))
G12 <- cut(E12, 2)
H12 <- as.tess(G12)
AAA <- as.array(AA)
EEE <- as.array(E)
AAD <- as.double(AA)
EED <- as.double(E)
aaa <- xtfrm(AAA)
eee <- xtfrm(E)
##
d <- distmap(cells, dimyx=32)
Z <- connected(d <= 0.06, method="interpreted")
a <- where.max(d, first=FALSE)
a <- where.min(d, first=FALSE)
dx <- raster.x(d)
dy <- raster.y(d)
dxy <- raster.xy(d)
xyZ <- raster.xy(Z, drop=TRUE)
horosho <- conform.imagelist(cells, list(d, Z))
#' split.im
W <- square(1)
X <- as.im(function(x,y){x}, W)
Y <- dirichlet(runifpoint(7, W))
Z <- split(X, as.im(Y))
## cases of "[.im"
ee <- d[simplenet, drop=FALSE]
eev <- d[simplenet]
Empty <- cells[FALSE]
EmptyFun <- ssf(Empty, numeric(0))
ff <- d[Empty]
ff <- d[EmptyFun]
gg <- d[2,]
gg <- d[,2]
gg <- d[2:4, 3:5]
hh <- d[2:4, 3:5, rescue=TRUE]
if(!is.im(hh)) stop("rectangle was not rescued in [.im")
## cases of "[<-.im"
d[Empty] <- 42
d[EmptyFun] <- 42
## smudge() and rasterfilter()
dd <- smudge(d)
## rgb/hsv options
X <- setcov(owin())
M <- Window(X)
Y <- as.im(function(x,y) x, W=M)
Z <- as.im(function(x,y) y, W=M)
# convert after rescaling
RGBscal <- rgbim(X, Y, Z, autoscale=TRUE, maxColorValue=1)
HSVscal <- hsvim(X, Y, Z, autoscale=TRUE)
#' cases of [.im
Ma <- as.mask(M, dimyx=37)
ZM <- Z[raster=Ma, drop=FALSE]
ZM[solutionset(Y+Z > 0.4)] <- NA
ZF <- cut(ZM, breaks=5)
ZL <- (ZM > 0)
P <- list(x=c(0.511, 0.774, 0.633, 0.248, 0.798),
y=c(0.791, 0.608, 0.337, 0.613, 0.819))
zmp <- ZM[P, drop=TRUE]
zfp <- ZF[P, drop=TRUE]
zlp <- ZL[P, drop=TRUE]
P <- as.ppp(P, owin())
zmp <- ZM[P, drop=TRUE]
zfp <- ZF[P, drop=TRUE]
zlp <- ZL[P, drop=TRUE]
#' miscellaneous
ZZ <- zapsmall(Z, digits=6)
ZZ <- zapsmall(Z)
ZS <- shift(Z, origin="centroid")
ZS <- shift(Z, origin="bottomleft")
plot(Z, ribside="left")
plot(Z, ribside="top")
h <- hist(Z)
plot(h)
#' safelookup (including extrapolation case)
Z <- as.im(function(x,y) { x - y }, letterR)
B <- grow.rectangle(Frame(letterR), 1)
X <- superimpose(runifpoint(10,letterR),
runifpoint(20, setminus.owin(B, letterR)),
W=B)
a <- safelookup(Z, X)
})
#' indices.R
#' Tests of code for understanding index vectors etc
#' $Revision: 1.1 $ $Date: 2018/03/01 03:38:07 $
require(spatstat)
local({
a <- grokIndexVector(c(FALSE,TRUE), 10)
b <- grokIndexVector(rep(c(FALSE,TRUE), 7), 10)
d <- grokIndexVector(c(2,12), 10)
e <- grokIndexVector(letters[4:2], nama=letters)
f <- grokIndexVector(letters[10:1], nama=letters[1:5])
g <- grokIndexVector(-c(2, 5), 10)
h <- grokIndexVector(-c(2, 5, 15), 10)
Nam <- letters[1:10]
j <- positiveIndex(-c(2,5), nama=Nam)
jj <- logicalIndex(-c(2,5), nama=Nam)
k <- positiveIndex(-c(2,5), nama=Nam)
kk <- logicalIndex(-c(2,5), nama=Nam)
mm <- positiveIndex(c(FALSE,TRUE), nama=Nam)
nn <- positiveIndex(FALSE, nama=Nam)
aa <- ppsubset(cells, square(0.1))
})
#' tests/ippm.R
#' Tests of 'ippm' class
#' $Revision: 1.2 $ $Date: 2019/02/02 02:26:02 $
require(spatstat)
local({
# .......... set up example from help file .................
nd <- 10
gamma0 <- 3
delta0 <- 5
POW <- 3
# Terms in intensity
Z <- function(x,y) { -2*y }
f <- function(x,y,gamma,delta) { 1 + exp(gamma - delta * x^POW) }
# True intensity
lamb <- function(x,y,gamma,delta) { 200 * exp(Z(x,y)) * f(x,y,gamma,delta) }
# Simulate realisation
lmax <- max(lamb(0,0,gamma0,delta0), lamb(1,1,gamma0,delta0))
set.seed(42)
X <- rpoispp(lamb, lmax=lmax, win=owin(), gamma=gamma0, delta=delta0)
# Partial derivatives of log f
DlogfDgamma <- function(x,y, gamma, delta) {
topbit <- exp(gamma - delta * x^POW)
topbit/(1 + topbit)
}
DlogfDdelta <- function(x,y, gamma, delta) {
topbit <- exp(gamma - delta * x^POW)
- (x^POW) * topbit/(1 + topbit)
}
# irregular score
Dlogf <- list(gamma=DlogfDgamma, delta=DlogfDdelta)
# fit model
fit <- ippm(X ~Z + offset(log(f)),
covariates=list(Z=Z, f=f),
iScore=Dlogf,
start=list(gamma=1, delta=1),
nd=nd)
## ............. test ippm class support ......................
Ar <- model.matrix(fit)
Ai <- model.matrix(fit, irregular=TRUE)
Zr <- model.images(fit)
Zi <- model.images(fit, irregular=TRUE)
## update.ippm
fit2 <- update(fit, . ~ . + I(Z^2))
fit0 <- update(fit,
. ~ . - Z,
start=list(gamma=2, delta=4))
oldfit <- ippm(X,
~Z + offset(log(f)),
covariates=list(Z=Z, f=f),
iScore=Dlogf,
start=list(gamma=1, delta=1),
nd=nd)
oldfit2 <- update(oldfit, . ~ . + I(Z^2))
oldfit0 <- update(oldfit,
. ~ . - Z,
start=list(gamma=2, delta=4))
})
#'
#' tests/Kfuns.R
#'
#' Various K and L functions and pcf
#'
#' $Revision: 1.11 $ $Date: 2019/01/25 03:43:49 $
#'
require(spatstat)
myfun <- function(x,y){(x+1) * y }
local({
#' supporting code
implemented.for.K(c("border", "bord.modif", "translate", "good", "best"),
"polygonal", TRUE)
implemented.for.K(c("border", "bord.modif", "translate", "good", "best"),
"mask", TRUE)
implemented.for.K(c("border", "isotropic"), "mask", FALSE)
#' Kest special code blocks
K <- Kest(cells, var.approx=TRUE, ratio=FALSE)
Z <- distmap(cells) + 1
Kb <- Kest(cells, correction=c("border","bord.modif"),
weights=Z, ratio=TRUE)
Kn <- Kest(cells, correction="none",
weights=Z, ratio=TRUE)
Knb <- Kest(cells, correction=c("border","bord.modif","none"),
weights=Z, ratio=TRUE)
bigint <- 50000 # This is only "big" on a 32-bit system where
# sqrt(.Machine$integer.max) = 46340.9
X <- runifpoint(bigint)
Z <- as.im(1/bigint, owin())
Kb <- Kest(X, correction=c("border","bord.modif"),
rmax=0.02, weights=Z, ratio=TRUE)
Kn <- Kest(X, correction="none",
rmax=0.02, weights=Z, ratio=TRUE)
Knb <- Kest(X, correction=c("border","bord.modif","none"),
rmax=0.02, weights=Z, ratio=TRUE)
#' pcf.ppp special code blocks
pr <- pcf(cells, ratio=TRUE, var.approx=TRUE)
pc <- pcf(cells, domain=square(0.5))
pcr <- pcf(cells, domain=square(0.5), ratio=TRUE)
#' inhomogeneous multitype
fit <- ppm(amacrine ~ marks)
K1 <- Kcross.inhom(amacrine, lambdaX=fit)
K2 <- Kcross.inhom(amacrine, lambdaX=densityfun(amacrine))
K3 <- Kcross.inhom(amacrine, lambdaX=density(amacrine, at="points"))
On <- split(amacrine)$on
Off <- split(amacrine)$off
K4 <- Kcross.inhom(amacrine, lambdaI=ppm(On), lambdaJ=ppm(Off))
K5 <- Kcross.inhom(amacrine, correction="bord.modif")
#' Kmark, markcorr
X <- runifpoint(100) %mark% runif(100)
km <- Kmark(X, f=atan2)
km <- Kmark(X, f1=sin)
km <- Kmark(X, f="myfun")
Y <- X %mark% data.frame(u=runif(100), v=runif(100))
mk <- markcorr(Y)
#'
rr <- rep(0.1, npoints(cells))
eC <- edge.Ripley(cells, rr)
eI <- edge.Ripley(cells, rr, method="interpreted")
if(max(abs(eC-eI)) > 0.1)
stop("Ripley edge correction results do not match")
a <- rmax.Ripley(square(1))
a <- rmax.Rigid(square(1))
a <- rmax.Ripley(as.polygonal(square(1)))
a <- rmax.Rigid(as.polygonal(square(1)))
a <- rmax.Ripley(letterR)
a <- rmax.Rigid(letterR)
#' run slow code for edge correction and compare results
X <- redwood[c(TRUE, FALSE, FALSE)]
Window(X) <- as.polygonal(Window(X))
Eapprox <- edge.Trans(X)
Eexact <- edge.Trans(X, exact=TRUE)
maxrelerr <- max(abs(1 - range(Eapprox/Eexact)))
if(maxrelerr > 0.1)
stop(paste("Exact and approximate algorithms for edge.Trans disagree by",
paste0(round(100*maxrelerr), "%")),
call.=FALSE)
#'
#' directional K functions
#'
a <- Ksector(swedishpines,
-pi/2, pi/2, units="radians",
correction=c("none", "border", "bord.modif", "Ripley", "translate"),
ratio=TRUE)
plot(a)
#'
#' local K functions
#'
fut <- ppm(swedishpines ~ polynom(x,y,2))
Z <- predict(fut)
Lam <- fitted(fut, dataonly=TRUE)
a <- localLinhom(swedishpines, lambda=fut)
a <- localLinhom(swedishpines, lambda=Z)
a <- localLinhom(swedishpines, lambda=Lam)
a <- localLinhom(swedishpines, lambda=Z, correction="none")
a <- localLinhom(swedishpines, lambda=Z, correction="translate")
#'
#' lohboot code blocks
#'
Ared <- lohboot(redwood, block=TRUE, Vcorrection=TRUE, global=FALSE)
Bred <- lohboot(redwood, block=TRUE, basicboot=TRUE, global=FALSE)
X <- runifpoint(100, letterR)
AX <- lohboot(X, block=TRUE, nx=7, ny=10)
#'
#' residual K functions etc
#'
rco <- compareFit(cells, Kcom,
interaction=anylist(P=Poisson(), S=Strauss(0.08)),
same="trans", different="tcom")
})
#
# tests/kppm.R
#
# $Revision: 1.26 $ $Date: 2019/01/25 03:33:40 $
#
# Test functionality of kppm that depends on RandomFields
# Test update.kppm for old style kppm objects
require(spatstat)
local({
fit <- kppm(redwood ~1, "Thomas") # sic
fitx <- update(fit, ~ . + x)
fitM <- update(fit, clusters="MatClust")
fitC <- update(fit, cells)
fitCx <- update(fit, cells ~ x)
#'
Wsub <- owin(c(0, 0.5), c(-0.5, 0))
fitsub <- kppm(redwood ~1, "Thomas", subset=Wsub)
fitsub
#' various methods
ff <- as.fv(fitx)
Y <- simulate(fitx, seed=42)[[1]]
uu <- unitname(fitx)
unitname(fitCx) <- "furlong"
mo <- model.images(fitCx)
# vcov.kppm different algorithms
vc <- vcov(fitx)
vc2 <- vcov(fitx, fast=TRUE)
vc3 <- vcov(fitx, fast=TRUE, splitup=TRUE)
vc4 <- vcov(fitx, splitup=TRUE)
## other code blocks
a <- varcount(fitx, function(x,y){x+1}) # always positive
a <- varcount(fitx, function(x,y){y-1}) # always negative
a <- varcount(fitx, function(x,y){x+y}) # positive or negative
# improve.kppm
fitI <- update(fit, improve.type="quasi")
fitxI <- update(fitx, improve.type="quasi")
# vcov.kppm
vcI <- vcov(fitxI)
# plot.kppm including predict.kppm
fitMC <- kppm(redwood ~ x, "Thomas")
fitCL <- kppm(redwood ~ x, "Thomas", method="c")
fitPA <- kppm(redwood ~ x, "Thomas", method="p")
plot(fitMC)
plot(fitCL)
plot(fitPA)
# fit with composite likelihood method [thanks to Abdollah Jalilian]
fut <- kppm(redwood ~ x, "VarGamma", method="clik2", nu.ker=-3/8)
kfut <- as.fv(fut)
if(require(RandomFields)) {
fit0 <- kppm(redwood ~1, "LGCP")
is.poisson(fit0)
Y0 <- simulate(fit0)[[1]]
stopifnot(is.ppp(Y0))
## fit LGCP using K function: slow
fit1 <- kppm(redwood ~x, "LGCP",
covmodel=list(model="matern", nu=0.3),
control=list(maxit=3))
Y1 <- simulate(fit1)[[1]]
stopifnot(is.ppp(Y1))
## fit LGCP using pcf
fit1p <- kppm(redwood ~x, "LGCP",
covmodel=list(model="matern", nu=0.3),
statistic="pcf")
Y1p <- simulate(fit1p)[[1]]
stopifnot(is.ppp(Y1p))
## .. and using different fitting methods
fit1pClik <- update(fit1p, method="clik")
fit1pPalm <- update(fit1p, method="palm")
## image covariate (a different code block)
xx <- as.im(function(x,y) x, Window(redwood))
fit1xx <- update(fit1p, . ~ xx, data=solist(xx=xx))
Y1xx <- simulate(fit1xx)[[1]]
stopifnot(is.ppp(Y1xx))
fit1xxVG <- update(fit1xx, clusters="VarGamma", nu=-1/4)
Y1xxVG <- simulate(fit1xxVG)[[1]]
stopifnot(is.ppp(Y1xxVG))
# ... and Abdollah's code
fit2 <- kppm(redwood ~x, cluster="Cauchy", statistic="K")
Y2 <- simulate(fit2)[[1]]
stopifnot(is.ppp(Y2))
}
})
local({
#' various code blocks
fut <- kppm(redwood, ~x)
fet <- update(fut, redwood3)
fot <- update(fut, trend=~y)
fit <- kppm(redwoodfull ~ x)
Y <- simulate(fit, window=redwoodfull.extra$regionII)
gut <- improve.kppm(fit, type="wclik1")
gut <- improve.kppm(fit, vcov=TRUE, fast.vcov=TRUE, save.internals=TRUE)
hut <- kppm(redwood ~ x, method="clik", weightfun=NULL)
hut <- kppm(redwood ~ x, method="palm", weightfun=NULL)
})
local({
#' minimum contrast code
K <- Kest(redwood)
a <- matclust.estK(K)
a <- thomas.estK(K)
a <- cauchy.estK(K)
a <- vargamma.estK(K)
a <- lgcp.estK(K)
print(a)
u <- unitname(a)
g <- pcf(redwood)
a <- matclust.estpcf(g)
a <- thomas.estpcf(g)
a <- cauchy.estpcf(g)
a <- vargamma.estpcf(g)
a <- lgcp.estpcf(g)
#' auxiliary functions
b <- resolve.vargamma.shape(nu.pcf=1.5)
Z <- clusterfield("Thomas", kappa=1, scale=0.2)
aa <- NULL
aa <- accumulateStatus(simpleMessage("Woof"), aa)
aa <- accumulateStatus(simpleMessage("Sit"), aa)
aa <- accumulateStatus(simpleMessage("Woof"), aa)
printStatusList(aa)
RMIN <- 0.01
fit <- kppm(redwood ~ 1, ctrl=list(rmin=RMIN,q=1/2))
if(fit$Fit$mcfit$ctrl$rmin != RMIN)
stop("kppm did not handle parameter 'rmin' in argument 'ctrl' ")
fit <- kppm(redwood ~ 1, ctrl=list(rmin=0,q=1/2), rmin=RMIN)
if(fit$Fit$mcfit$ctrl$rmin != RMIN)
stop("kppm did not handle parameter 'rmin' in argument 'ctrl'")
RMIN <- 2
fit <- dppm(swedishpines~1, dppGauss(), ctrl=list(rmin=RMIN,q=1))
if(fit$Fit$mcfit$ctrl$rmin != RMIN)
stop("dppm did not handle parameter 'rmin' in argument 'ctrl'")
fit <- dppm(swedishpines~1, dppGauss(), ctrl=list(rmin=0,q=1), rmin=RMIN)
if(fit$Fit$mcfit$ctrl$rmin != RMIN)
stop("dppm did not handle argument 'rmin'")
})
local({
#' experimental
spatstat.options(kppm.canonical=TRUE, kppm.adjusted=TRUE)
futTT1 <- kppm(redwood)
futTT2 <- kppm(redwood, method="palm")
futTT3 <- kppm(redwood, method="clik2")
spatstat.options(kppm.canonical=TRUE, kppm.adjusted=FALSE)
futTF1 <- kppm(redwood)
futTF2 <- kppm(redwood, method="palm")
futTF3 <- kppm(redwood, method="clik2")
spatstat.options(kppm.canonical=FALSE, kppm.adjusted=TRUE)
futFT1 <- kppm(redwood)
futFT2 <- kppm(redwood, method="palm")
futFT3 <- kppm(redwood, method="clik2")
spatstat.options(kppm.canonical=FALSE, kppm.adjusted=FALSE)
futFF1 <- kppm(redwood)
futFF2 <- kppm(redwood, method="palm")
futFF3 <- kppm(redwood, method="clik2")
})
reset.spatstat.options()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/functions_wmm.R
\name{GetMagneticFieldWMM}
\alias{GetMagneticFieldWMM}
\title{Calculate Expected Magnetic Field from WMM}
\usage{
GetMagneticFieldWMM(lon, lat, height, time, wmmVersion = "derived")
}
\arguments{
\item{lon}{GPS longitude}
\item{lat}{GPS latitude, geodetic}
\item{height}{GPS height in meters above ellipsoid}
\item{time}{Annualized date time. E.g., 2015-02-01 = (2015 + 32/365) = 2015.088; optionally an object (length 1) of class 'POSIXt' or 'Date'}
\item{wmmVersion}{String representing WMM version to use. Must be consistent with \code{time} and one of the following: 'derived', 'WMM2000', 'WMM2005', 'WMM2010', 'WMM2015', 'WMM2015v2', 'WMM2020'. Default 'derived' value will infer the latest WMM version consistent with \code{time}.}
}
\value{
\code{list} of calculated main field and secular variation vector components in nT and nT/yr, resp. The magnetic element intensities (i.e., horizontal and total intensities, h & f) are in nT and the magnetic element angles (i.e., inclination and declination, i & d) are in degrees, with their secular variation in nT/yr and deg/yr, resp.: \code{x}, \code{y}, \code{z}, \code{xDot}, \code{yDot}, \code{zDot}, \code{h}, \code{f}, \code{i}, \code{d}, \code{hDot}, \code{fDot}, \code{iDot}, \code{dDot}
}
\description{
Function that takes in geodetic GPS location and annualized time, and returns the expected magnetic field from WMM.
}
\examples{
GetMagneticFieldWMM(
lon = 240,
lat = -80,
height = 1e5,
time = 2022.5,
wmmVersion = 'WMM2020'
)
## Expected output
# x = 5814.9658886215 nT
# y = 14802.9663839328 nT
# z = -49755.3119939183 nT
# xDot = 28.0381961827 nT/yr
# yDot = 1.3970624624 nT/yr
# zDot = 85.6309533031 nT/yr
# h = 15904.1391483373 nT
# f = 52235.3588449608 nT
# i = -72.27367 deg
# d = 68.55389 deg
# hDot = 11.5518244235 nT/yr
# fDot = -78.0481471753 nT/yr
# iDot = 0.04066726 deg/yr
# dDot = -0.09217566 deg/yr
## Calculated output
#$x
#[1] 5814.966
#$y
#[1] 14802.97
#$z
#[1] -49755.31
#$xDot
#[1] 28.0382
#$yDot
#[1] 1.397062
#$zDot
#[1] 85.63095
#$h
#[1] 15904.14
#$f
#[1] 52235.36
#$i
#[1] -72.27367
#$d
#[1] 68.55389
#$hDot
#[1] 11.55182
#$fDot
#[1] -78.04815
#$iDot
#[1] 0.04066726
#$dDot
#[1] -0.09217566
}
|
/man/GetMagneticFieldWMM.Rd
|
no_license
|
cran/wmm
|
R
| false | true | 2,403 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/functions_wmm.R
\name{GetMagneticFieldWMM}
\alias{GetMagneticFieldWMM}
\title{Calculate Expected Magnetic Field from WMM}
\usage{
GetMagneticFieldWMM(lon, lat, height, time, wmmVersion = "derived")
}
\arguments{
\item{lon}{GPS longitude}
\item{lat}{GPS latitude, geodetic}
\item{height}{GPS height in meters above ellipsoid}
\item{time}{Annualized date time. E.g., 2015-02-01 = (2015 + 32/365) = 2015.088; optionally an object (length 1) of class 'POSIXt' or 'Date'}
\item{wmmVersion}{String representing WMM version to use. Must be consistent with \code{time} and one of the following: 'derived', 'WMM2000', 'WMM2005', 'WMM2010', 'WMM2015', 'WMM2015v2', 'WMM2020'. Default 'derived' value will infer the latest WMM version consistent with \code{time}.}
}
\value{
\code{list} of calculated main field and secular variation vector components in nT and nT/yr, resp. The magnetic element intensities (i.e., horizontal and total intensities, h & f) are in nT and the magnetic element angles (i.e., inclination and declination, i & d) are in degrees, with their secular variation in nT/yr and deg/yr, resp.: \code{x}, \code{y}, \code{z}, \code{xDot}, \code{yDot}, \code{zDot}, \code{h}, \code{f}, \code{i}, \code{d}, \code{hDot}, \code{fDot}, \code{iDot}, \code{dDot}
}
\description{
Function that takes in geodetic GPS location and annualized time, and returns the expected magnetic field from WMM.
}
\examples{
GetMagneticFieldWMM(
lon = 240,
lat = -80,
height = 1e5,
time = 2022.5,
wmmVersion = 'WMM2020'
)
## Expected output
# x = 5814.9658886215 nT
# y = 14802.9663839328 nT
# z = -49755.3119939183 nT
# xDot = 28.0381961827 nT/yr
# yDot = 1.3970624624 nT/yr
# zDot = 85.6309533031 nT/yr
# h = 15904.1391483373 nT
# f = 52235.3588449608 nT
# i = -72.27367 deg
# d = 68.55389 deg
# hDot = 11.5518244235 nT/yr
# fDot = -78.0481471753 nT/yr
# iDot = 0.04066726 deg/yr
# dDot = -0.09217566 deg/yr
## Calculated output
#$x
#[1] 5814.966
#$y
#[1] 14802.97
#$z
#[1] -49755.31
#$xDot
#[1] 28.0382
#$yDot
#[1] 1.397062
#$zDot
#[1] 85.63095
#$h
#[1] 15904.14
#$f
#[1] 52235.36
#$i
#[1] -72.27367
#$d
#[1] 68.55389
#$hDot
#[1] 11.55182
#$fDot
#[1] -78.04815
#$iDot
#[1] 0.04066726
#$dDot
#[1] -0.09217566
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/pr_stem.R
\name{pr_stem_sentences}
\alias{pr_stem_sentences}
\title{Stem a dataframe containing a column with sentences}
\usage{
pr_stem_sentences(df, col, language = "french")
}
\arguments{
\item{df}{the data.frame containing the text}
\item{col}{the column with the text}
\item{language}{the language of the text. Defaut is french. See SnowballC::getStemLanguages() function for a list of supported languages.}
}
\value{
a tibble
}
\description{
Implementation of the {SnowballC} stemmer. Note that punctuation and capital letters
are removed when processing.
}
\examples{
a <- proustr::laprisonniere[1:10,]
pr_stem_sentences(a, text)
}
|
/man/pr_stem_sentences.Rd
|
no_license
|
ColinFay/proustr
|
R
| false | true | 721 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/pr_stem.R
\name{pr_stem_sentences}
\alias{pr_stem_sentences}
\title{Stem a dataframe containing a column with sentences}
\usage{
pr_stem_sentences(df, col, language = "french")
}
\arguments{
\item{df}{the data.frame containing the text}
\item{col}{the column with the text}
\item{language}{the language of the text. Defaut is french. See SnowballC::getStemLanguages() function for a list of supported languages.}
}
\value{
a tibble
}
\description{
Implementation of the {SnowballC} stemmer. Note that punctuation and capital letters
are removed when processing.
}
\examples{
a <- proustr::laprisonniere[1:10,]
pr_stem_sentences(a, text)
}
|
require(foreign)
require(ff)
require(dplyr)
require(lubridate)
require(ggplot2)
require(ggmap)
require(dplyr)
require(plotly)
require(tidyr)
require(Formula)
require(Hmisc)
require(WDI)
require(XML)
require(acepack)
require(acs)
require(checkmate)
require(choroplethr)
require(choroplethrMaps)
require(htmlTable)
require(zipcode)
#loading data from Markdown file
load("wild07.Rdata")
load("wild15.Rdata")
load("tot_acre07.Rdata")
load("tot_acre15.Rdata")
new <- spread(tot_acres1, key = STATE, value = TOT_ACRE)
new2 <- spread(tot_acres2, key = STATE, value = TOT_ACRE)
row.names(new) <- c("2007")
master <- full_join(new, new2)
#ShinyApp
# Define UI for application that draws a histogram
ui <- shinyUI (fluidPage(
# Application title
titlePanel("United States Wildfires in 2007 and 2015"),
# Sidebar
sidebarLayout(
sidebarPanel(
helpText("Create a bar graph showing the change in frequency of wildfires for different states."),
checkboxGroupInput("state1",
"Choose which state you'd like to see:",
choices = colnames(master)),
# checkboxGroupInput("state2",
# "Choose which state you'd like to see:",
# choices = c(levels(wild07$STATE))),
hr(),
helpText("Data from U.S. Department of Homeland Security United States Fire
Administration National Fire Data Center")
),
# Show a plot of the generated distribution
mainPanel(
plotOutput("distPlot"),
textOutput("text1")
)
)
)
)
#Define server logic required to draw a histogram
server <- shinyServer(function(input, output) {
output$text1 <- renderText({
paste("You have selected the state",input$state1)})
# output$text2 <- renderText({
# paster("and", input$state2)})
output$distPlot <- renderPlot({
barplot(master[,input$state1] *1000,
main = input$state1,
ylab = "Number of Wildfires",
xlab = "State"
)
})
})
# Run the application
shinyApp(ui = ui, server = server)
|
/app.R
|
no_license
|
rfeingold18/Final-Project
|
R
| false | false | 2,070 |
r
|
require(foreign)
require(ff)
require(dplyr)
require(lubridate)
require(ggplot2)
require(ggmap)
require(dplyr)
require(plotly)
require(tidyr)
require(Formula)
require(Hmisc)
require(WDI)
require(XML)
require(acepack)
require(acs)
require(checkmate)
require(choroplethr)
require(choroplethrMaps)
require(htmlTable)
require(zipcode)
#loading data from Markdown file
load("wild07.Rdata")
load("wild15.Rdata")
load("tot_acre07.Rdata")
load("tot_acre15.Rdata")
new <- spread(tot_acres1, key = STATE, value = TOT_ACRE)
new2 <- spread(tot_acres2, key = STATE, value = TOT_ACRE)
row.names(new) <- c("2007")
master <- full_join(new, new2)
#ShinyApp
# Define UI for application that draws a histogram
ui <- shinyUI (fluidPage(
# Application title
titlePanel("United States Wildfires in 2007 and 2015"),
# Sidebar
sidebarLayout(
sidebarPanel(
helpText("Create a bar graph showing the change in frequency of wildfires for different states."),
checkboxGroupInput("state1",
"Choose which state you'd like to see:",
choices = colnames(master)),
# checkboxGroupInput("state2",
# "Choose which state you'd like to see:",
# choices = c(levels(wild07$STATE))),
hr(),
helpText("Data from U.S. Department of Homeland Security United States Fire
Administration National Fire Data Center")
),
# Show a plot of the generated distribution
mainPanel(
plotOutput("distPlot"),
textOutput("text1")
)
)
)
)
#Define server logic required to draw a histogram
server <- shinyServer(function(input, output) {
output$text1 <- renderText({
paste("You have selected the state",input$state1)})
# output$text2 <- renderText({
# paster("and", input$state2)})
output$distPlot <- renderPlot({
barplot(master[,input$state1] *1000,
main = input$state1,
ylab = "Number of Wildfires",
xlab = "State"
)
})
})
# Run the application
shinyApp(ui = ui, server = server)
|
get_finalaCRFpages <- function (dsin_crf = NULL, dsin_linkPages = NULL,
domain_list = NULL) {
crf_out_final <- data.frame(domain = character(), sdtm_vars = character(), Variable = character(), page_nbr_concat = character())
for (sel_domain in domain_list) {
print(sel_domain)
dsin_domain <- subset(dsin_crf, domain %in% sel_domain)
variables <- dsin_domain[ , "sdtm_vars"]
vars_list <- unique(variables)
for (var in vars_list) {
## main page numbers
dsin <- subset(dsin_domain, sdtm_vars %in% var)
## add link pages, if exist
if ( any(stringr::str_detect(unique(dsin_linkPages$domain), sel_domain)) ) {
dsin_linkPages_sel <- subset(dsin_linkPages, sdtm_vars %in% var)
## -- remove duplicate values
dsin_linkPages_sel <- dsin_linkPages_sel[!duplicated(dsin_linkPages_sel[, c("sdtm_vars", "page_nbr")]), ]
## combine all page numbers
dsin <- as.data.frame(rbind(dsin, dsin_linkPages_sel))
dsin <- dsin[order(dsin[, "page_nbr"]), ]
}
query_pgNbr <- paste(paste("SELECT A.*, group_concat(page_nbr)", " AS page_nbr_concat ", sep = ""),
"FROM dsin AS A", sep = "")
crf_out_res <- sqldf::sqldf(query_pgNbr)
crf_out_res <- crf_out_res[, !names(crf_out_res) %in% c("page_nbr")]
## concatenate result
crf_out_final <- rbind(crf_out_final, crf_out_res)
}
}
crf_out_final$page_nbr_concat <- unlist(str_replace_all(crf_out_final$page_nbr_concat, ",", ", "))
crf_out_final <- as.data.frame(crf_out_final)
# message("END minning aCRF: ", date())
return(crf_out_final)
}
|
/proj-mini-parse-aCRF-page-master/dev/functions/get_finalaCRFpages_fn - old.R
|
no_license
|
Hw1OCS/aCRFExtractor_GSK-version
|
R
| false | false | 1,757 |
r
|
get_finalaCRFpages <- function (dsin_crf = NULL, dsin_linkPages = NULL,
domain_list = NULL) {
crf_out_final <- data.frame(domain = character(), sdtm_vars = character(), Variable = character(), page_nbr_concat = character())
for (sel_domain in domain_list) {
print(sel_domain)
dsin_domain <- subset(dsin_crf, domain %in% sel_domain)
variables <- dsin_domain[ , "sdtm_vars"]
vars_list <- unique(variables)
for (var in vars_list) {
## main page numbers
dsin <- subset(dsin_domain, sdtm_vars %in% var)
## add link pages, if exist
if ( any(stringr::str_detect(unique(dsin_linkPages$domain), sel_domain)) ) {
dsin_linkPages_sel <- subset(dsin_linkPages, sdtm_vars %in% var)
## -- remove duplicate values
dsin_linkPages_sel <- dsin_linkPages_sel[!duplicated(dsin_linkPages_sel[, c("sdtm_vars", "page_nbr")]), ]
## combine all page numbers
dsin <- as.data.frame(rbind(dsin, dsin_linkPages_sel))
dsin <- dsin[order(dsin[, "page_nbr"]), ]
}
query_pgNbr <- paste(paste("SELECT A.*, group_concat(page_nbr)", " AS page_nbr_concat ", sep = ""),
"FROM dsin AS A", sep = "")
crf_out_res <- sqldf::sqldf(query_pgNbr)
crf_out_res <- crf_out_res[, !names(crf_out_res) %in% c("page_nbr")]
## concatenate result
crf_out_final <- rbind(crf_out_final, crf_out_res)
}
}
crf_out_final$page_nbr_concat <- unlist(str_replace_all(crf_out_final$page_nbr_concat, ",", ", "))
crf_out_final <- as.data.frame(crf_out_final)
# message("END minning aCRF: ", date())
return(crf_out_final)
}
|
#' zip_state_city
#' @docType data
#'
#' @usage data(zip_state_city)
#'
#' @format A data.frame with combinations of zipcode, city and state names
#'
#' @keywords zip, zipcode, city, state
#'
#' @references \url{www.pier2pier.com/links/files/Countrystate/USA-Zip.xls}
"zip_state_city"
#' get_state_city_zipcode
#' @description This function convert a vector of zipcodes to corresponding state-city pairs
#' @param zips A vector of zip codes
#' @return A data.frame with first column as state, second column as zipcode,
#' and last column the corresponding city.
#' @export
get_state_city_zipcode <- function(zips){
ret <- setNames(data.frame(matrix(ncol = 3, nrow = 0)), c("state", "zip", "city"))
for (n in zips){
r <- zip_state_city[which(zip_state_city$zipcode == n), ]
if (nrow(r) < 1){
print("Zipcode area not found!")
} else {
ret <- rbind(ret, data.frame(state=r$st_abbr, zip=r$zipcode, city=r$city))
}
}
return(ret)
}
|
/BDEEPInfousa/R/zip_state_city.R
|
no_license
|
uiuc-bdeep/InfoUSA_Database
|
R
| false | false | 970 |
r
|
#' zip_state_city
#' @docType data
#'
#' @usage data(zip_state_city)
#'
#' @format A data.frame with combinations of zipcode, city and state names
#'
#' @keywords zip, zipcode, city, state
#'
#' @references \url{www.pier2pier.com/links/files/Countrystate/USA-Zip.xls}
"zip_state_city"
#' get_state_city_zipcode
#' @description This function convert a vector of zipcodes to corresponding state-city pairs
#' @param zips A vector of zip codes
#' @return A data.frame with first column as state, second column as zipcode,
#' and last column the corresponding city.
#' @export
get_state_city_zipcode <- function(zips){
ret <- setNames(data.frame(matrix(ncol = 3, nrow = 0)), c("state", "zip", "city"))
for (n in zips){
r <- zip_state_city[which(zip_state_city$zipcode == n), ]
if (nrow(r) < 1){
print("Zipcode area not found!")
} else {
ret <- rbind(ret, data.frame(state=r$st_abbr, zip=r$zipcode, city=r$city))
}
}
return(ret)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/FCP_TPA.R
\name{FCP_TPA}
\alias{FCP_TPA}
\title{The functional CP-TPA algorithm}
\usage{
FCP_TPA(
X,
K,
penMat,
alphaRange,
verbose = FALSE,
tol = 1e-04,
maxIter = 15,
adaptTol = TRUE
)
}
\arguments{
\item{X}{The data tensor of dimensions \code{N x S1 x S2}.}
\item{K}{The number of eigentensors to be calculated.}
\item{penMat}{A list with entries \code{v} and \code{w}, containing a
roughness penalty matrix for each direction of the image. The algorithm
does not induce smoothness along observations (see Details).}
\item{alphaRange}{A list of length 2 with entries \code{v} and \code{w} ,
containing the range of smoothness parameters to test for each direction.}
\item{verbose}{Logical. If \code{TRUE}, computational details are given on
the standard output during calculation of the FCP_TPA.}
\item{tol}{A numeric value, giving the tolerance for relative error values in
the algorithm. Defaults to \code{1e-4}. It is automatically multiplied by
10 after \code{maxIter} steps, if \code{adaptTol = TRUE}.}
\item{maxIter}{A numeric value, the maximal iteration steps. Can be doubled,
if \code{adaptTol = TRUE}.}
\item{adaptTol}{Logical. If \code{TRUE}, the tolerance is adapted (multiplied
by 10), if the algorithm has not converged after \code{maxIter} steps and
another \code{maxIter} steps are allowed with the increased tolerance, see
Details. Use with caution. Defaults to \code{TRUE}.}
}
\value{
\item{d}{A vector of length \code{K}, containing the numeric weights
\eqn{d_k} in the CP model.} \item{U}{A matrix of dimensions \code{N x K},
containing the eigenvectors \eqn{u_k} in the first dimension.} \item{V}{A
matrix of dimensions \code{S1 x K}, containing the eigenvectors \eqn{v_k}
in the second dimension.} \item{W}{A matrix of dimensions \code{S2 x K},
containing the eigenvectors \eqn{w_k} in the third dimension.}
}
\description{
This function implements the functional CP-TPA (FCP-TPA) algorithm, that
calculates a smooth PCA for 3D tensor data (i.e. \code{N} observations of 2D
images with dimension \code{S1 x S2}). The results are given in a
CANDECOMP/PARAFRAC (CP) model format \deqn{X = \sum_{k = 1}^K d_k \cdot u_k
\circ v_k \circ w_k}{X = \sum d_k u_k \%o\% v_k \%o\% w_k} where
\eqn{\circ}{\%o\%} stands for the outer product, \eqn{d_k} is a scalar and
\eqn{u_k, v_k, w_k} are eigenvectors for each direction of the tensor. In
this representation, the outer product \eqn{v_k \circ w_k}{v_k \%o\% w_k} can
be regarded as the \eqn{k}-th eigenimage, while \eqn{d_k \cdot u_k}{d_k u_k}
represents the vector of individual scores for this eigenimage and each
observation.
}
\details{
The smoothness of the eigenvectors \eqn{v_k, w_k} is induced by penalty
matrices for both image directions, that are weighted by smoothing parameters
\eqn{\alpha_{vk}, \alpha_{wk}}. The eigenvectors \eqn{u_k} are not smoothed,
hence the algorithm does not induce smoothness along observations.
Optimal smoothing parameters are found via a nested generalized cross
validation. In each iteration of the TPA (tensor power algorithm), the GCV
criterion is optimized via \code{\link[stats]{optimize}} on the interval
specified via \code{alphaRange$v} (or \code{alphaRange$w}, respectively).
The FCP_TPA algorithm is an iterative algorithm. Convergence is assumed if
the relative difference between the actual and the previous values are all
below the tolerance level \code{tol}. The tolerance level is increased
automatically, if the algorithm has not converged after \code{maxIter} steps
and if \code{adaptTol = TRUE}. If the algorithm did not converge after
\code{maxIter} steps (or \code{2 * maxIter}) steps, the function throws a
warning.
}
\examples{
# set.seed(1234)
N <- 100
S1 <- 75
S2 <- 75
# define "true" components
v <- sin(seq(-pi, pi, length.out = S1))
w <- exp(seq(-0.5, 1, length.out = S2))
# simulate tensor data with dimensions N x S1 x S2
X <- rnorm(N, sd = 0.5) \%o\% v \%o\% w
# create penalty matrices (penalize first differences for each dimension)
Pv <- crossprod(diff(diag(S1)))
Pw <- crossprod(diff(diag(S2)))
# estimate one eigentensor
res <- FCP_TPA(X, K = 1, penMat = list(v = Pv, w = Pw),
alphaRange = list(v = c(1e-4, 1e4), w = c(1e-4, 1e4)),
verbose = TRUE)
# plot the results and compare to true values
plot(res$V)
points(v/sqrt(sum(v^2)), pch = 20)
legend("topleft", legend = c("True", "Estimated"), pch = c(20, 1))
plot(res$W)
points(w/sqrt(sum(w^2)), pch = 20)
legend("topleft", legend = c("True", "Estimated"), pch = c(20, 1))
}
\references{
G. I. Allen, "Multi-way Functional Principal Components
Analysis", IEEE International Workshop on Computational Advances in
Multi-Sensor Adaptive Processing, 2013.
}
\seealso{
\code{\link{fcptpaBasis}}
}
|
/man/FCP_TPA.Rd
|
no_license
|
cran/MFPCA
|
R
| false | true | 4,895 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/FCP_TPA.R
\name{FCP_TPA}
\alias{FCP_TPA}
\title{The functional CP-TPA algorithm}
\usage{
FCP_TPA(
X,
K,
penMat,
alphaRange,
verbose = FALSE,
tol = 1e-04,
maxIter = 15,
adaptTol = TRUE
)
}
\arguments{
\item{X}{The data tensor of dimensions \code{N x S1 x S2}.}
\item{K}{The number of eigentensors to be calculated.}
\item{penMat}{A list with entries \code{v} and \code{w}, containing a
roughness penalty matrix for each direction of the image. The algorithm
does not induce smoothness along observations (see Details).}
\item{alphaRange}{A list of length 2 with entries \code{v} and \code{w} ,
containing the range of smoothness parameters to test for each direction.}
\item{verbose}{Logical. If \code{TRUE}, computational details are given on
the standard output during calculation of the FCP_TPA.}
\item{tol}{A numeric value, giving the tolerance for relative error values in
the algorithm. Defaults to \code{1e-4}. It is automatically multiplied by
10 after \code{maxIter} steps, if \code{adaptTol = TRUE}.}
\item{maxIter}{A numeric value, the maximal iteration steps. Can be doubled,
if \code{adaptTol = TRUE}.}
\item{adaptTol}{Logical. If \code{TRUE}, the tolerance is adapted (multiplied
by 10), if the algorithm has not converged after \code{maxIter} steps and
another \code{maxIter} steps are allowed with the increased tolerance, see
Details. Use with caution. Defaults to \code{TRUE}.}
}
\value{
\item{d}{A vector of length \code{K}, containing the numeric weights
\eqn{d_k} in the CP model.} \item{U}{A matrix of dimensions \code{N x K},
containing the eigenvectors \eqn{u_k} in the first dimension.} \item{V}{A
matrix of dimensions \code{S1 x K}, containing the eigenvectors \eqn{v_k}
in the second dimension.} \item{W}{A matrix of dimensions \code{S2 x K},
containing the eigenvectors \eqn{w_k} in the third dimension.}
}
\description{
This function implements the functional CP-TPA (FCP-TPA) algorithm, that
calculates a smooth PCA for 3D tensor data (i.e. \code{N} observations of 2D
images with dimension \code{S1 x S2}). The results are given in a
CANDECOMP/PARAFRAC (CP) model format \deqn{X = \sum_{k = 1}^K d_k \cdot u_k
\circ v_k \circ w_k}{X = \sum d_k u_k \%o\% v_k \%o\% w_k} where
\eqn{\circ}{\%o\%} stands for the outer product, \eqn{d_k} is a scalar and
\eqn{u_k, v_k, w_k} are eigenvectors for each direction of the tensor. In
this representation, the outer product \eqn{v_k \circ w_k}{v_k \%o\% w_k} can
be regarded as the \eqn{k}-th eigenimage, while \eqn{d_k \cdot u_k}{d_k u_k}
represents the vector of individual scores for this eigenimage and each
observation.
}
\details{
The smoothness of the eigenvectors \eqn{v_k, w_k} is induced by penalty
matrices for both image directions, that are weighted by smoothing parameters
\eqn{\alpha_{vk}, \alpha_{wk}}. The eigenvectors \eqn{u_k} are not smoothed,
hence the algorithm does not induce smoothness along observations.
Optimal smoothing parameters are found via a nested generalized cross
validation. In each iteration of the TPA (tensor power algorithm), the GCV
criterion is optimized via \code{\link[stats]{optimize}} on the interval
specified via \code{alphaRange$v} (or \code{alphaRange$w}, respectively).
The FCP_TPA algorithm is an iterative algorithm. Convergence is assumed if
the relative difference between the actual and the previous values are all
below the tolerance level \code{tol}. The tolerance level is increased
automatically, if the algorithm has not converged after \code{maxIter} steps
and if \code{adaptTol = TRUE}. If the algorithm did not converge after
\code{maxIter} steps (or \code{2 * maxIter}) steps, the function throws a
warning.
}
\examples{
# set.seed(1234)
N <- 100
S1 <- 75
S2 <- 75
# define "true" components
v <- sin(seq(-pi, pi, length.out = S1))
w <- exp(seq(-0.5, 1, length.out = S2))
# simulate tensor data with dimensions N x S1 x S2
X <- rnorm(N, sd = 0.5) \%o\% v \%o\% w
# create penalty matrices (penalize first differences for each dimension)
Pv <- crossprod(diff(diag(S1)))
Pw <- crossprod(diff(diag(S2)))
# estimate one eigentensor
res <- FCP_TPA(X, K = 1, penMat = list(v = Pv, w = Pw),
alphaRange = list(v = c(1e-4, 1e4), w = c(1e-4, 1e4)),
verbose = TRUE)
# plot the results and compare to true values
plot(res$V)
points(v/sqrt(sum(v^2)), pch = 20)
legend("topleft", legend = c("True", "Estimated"), pch = c(20, 1))
plot(res$W)
points(w/sqrt(sum(w^2)), pch = 20)
legend("topleft", legend = c("True", "Estimated"), pch = c(20, 1))
}
\references{
G. I. Allen, "Multi-way Functional Principal Components
Analysis", IEEE International Workshop on Computational Advances in
Multi-Sensor Adaptive Processing, 2013.
}
\seealso{
\code{\link{fcptpaBasis}}
}
|
###############################################################################
#' @import svDialogs
###############################################################################
#' @rdname AQSysCurve
#' @title This functions plot a curve based in the chosen model and its
#' parameters.
#' @description The function returns a plot after using the parameters and
#' model given by the user.
#' @details The function owns predefined set of equations that can be seen
#' below and must be used, with adequated parameters,
#' to return a plot which represent the chosen model.
#' @export AQSysCurve
#' @param modelName Equation to be used: merchuk, murugesan [type:string]
#' @param modelPars Model's parameters [type::data.frame]
#' @param xlbl Plot's Horizontal axis label.
#' @param ylbl Plot's Vertical axis label.
#'
#' @param col Legacy from plot package. For more details,
#' see \code{\link{plot.default}}
#' @param cex Legacy from plot package. For more details,
#' see \code{\link{plot.default}}
#' @param cexlab Legacy from plot package. For more details,
#' see \code{\link{plot.default}}
#' @param cexaxis Legacy from plot package. For more details,
#' see \code{\link{plot.default}}
#' @param cexmain Legacy from plot package. For more details,
#' see \code{\link{plot.default}}
#' @param cexsub Legacy from plot package. For more details,
#' see \code{\link{plot.default}}
#'
#' @param xmax Maximum value for the Horizontal axis' value
#' (bottom-rich component) [type:double]
#' @param NP Number of points used to build the fitted curve.
#' Default is 100. [type:Integer]
#' @param save Save the generated plot in the disk using path and filename
#' provided by the user. [type:Logical]
#' @param HR Adjust Plot's text to be compatible with High Resolution
#' size [type:Logical]
#' @param seriesNames A list of sequential names which will identify each system
#' provided by the user in the dataSET variable. [type:List]
#' @param filename Filename provided by the user to save a given
#' plot. [type:String]
#' @param wdir The directory in which the plot file will be
#' saved. [type:String]
#' @param silent save plot file without actually showing it to the
#' user. [type:Logical]
# ' @param maxiter - A positive integer specifying the maximum number of
# iterations allowed.
#' @inheritParams graphics::plot.default
#' @return A plot using the input model within the chosen interval and the
#' curve's raw XY data.
#' If no interval is selected, xmax = 0.4.
#' @examples
#' \dontrun{
#' AQSysCurve("murugesan", data.frame(90.389, -34.897, 2.924), col = "red")
#' }
###############################################################################
AQSysCurve <- function(
modelName,
modelPars,
seriesNames = NULL,
xlbl = "",
ylbl = "",
col = "black",
type = "p",
cex = 1,
cexlab = 1,
cexaxis = 1,
cexmain = 1,
cexsub = 1,
xmax = 35,
HR = FALSE,
NP = 100,
filename = NULL,
wdir = NULL,
save = FALSE,
silent = FALSE,
...
)
{
#
nSys <- nrow(modelPars)
models_npars <- AQSysList(TRUE)
#
if ((ncol(modelPars) == models_npars[[modelName]]) && (nSys >= 1 )) {
# mass fraction range of bottom-rich component (min is 0, max is 1)
x <- sort(runif(NP, 0.1, xmax))
# select which model will be used to generate the plot
# if user selects an option not available, it triggers an error
# (check AQSys.err.R for details)
Fn <- ifelse(modelName %in% names(models_npars), AQSys.mathDesc(modelName),
AQSys.err("0"))
#
if (is.null(seriesNames) || !(length(seriesNames) == nSys)) {
cat(
paste(
"\nThe array seriesNames must have", nSys,
"element(s). Default names will be used instead.\n\n"
)
)
seriesNames <- sapply(seq(1, nSys), function(x) paste("Series", x))
} else {
SysNames <- TRUE
}
# CREATE A LIST WHICH WILL HOLD EACH SYSTEM'S DATA
SysList <- list()
for (i in seq(1, nSys)) {
# unlist and convert parameters to double
model_pars <- as.double(unlist(modelPars[i, ]))
SysList[[i]] <- unname(data.frame(x, Fn(model_pars, x)))
names(SysList[[i]]) <- c("X", "Y")
SysList[[i]]["System"] <- seriesNames[i]
}
# BIND ALL DATA FROM SEVERAL SYSTEMS
output_data <- bind_rows(SysList)
# PLOT DATA
output_plot <- BLOPlot(output_data, xlbl, ylbl)
#
saveConfig(output_plot, save, HR, filename, wdir, silent)
# make available data from fitted curve to user. Function returns it
# silently but user can get data using simple assign '<-'
if (silent == FALSE) {
print(output_plot)
invisible(output_data)
} else {
invisible(list("data" = output_data, "plot" = output_plot))
}
}
else{
AQSys.err("9")
}
}
BLOPlot <- function(dataSET, xlbl = "", ylbl = "") {
# Calculate X and Y maximums as multiple of five and 8% superior of the axis
# maximum value
xmax <- ceiling(round(max(dataSET$X) / 0.92, 1) / 5) * 5
ymax <- ceiling(round(max(dataSET$Y) / 0.92, 1) / 5) * 5
# Plot data
output_plot <- ggplot(dataSET, aes_string(x = "X", y = "Y", color = "System",
shape = "System")) +
geom_line(size = 1) +
geom_point(size = 2) +
theme_light() +
scale_color_llsr(palette = "mixed") +
xlab(paste(xlbl, "(%, m/m)")) +
ylab(paste(ylbl, "(%, m/m)")) +
theme(
validate = FALSE,
plot.margin = unit(c(1, 1, 1, 1), "cm"),
text = element_text(size = 16),
legend.position = "top",
axis.title.y = element_text(vjust = 5),
axis.title.x = element_text(vjust = -2),
panel.grid.major = element_line(size = .70, colour = "black"),
panel.grid.minor = element_line(size = .70),
panel.border = element_rect(size = .5, colour = "white"),
axis.text.x = element_text(size = 15),
axis.text.y = element_text(size = 15),
axis.line = element_line(colour = 'black', size = 1.25),
legend.title = element_blank(),
legend.text = element_text(
colour = "black",
size = 12,
face = "plain"
)
) +
scale_y_continuous(
expand = c(0, 0),
limits = c(-2.5, ymax),
breaks = seq(0, ymax, by = 5),
labels = seq(0, ymax, by = 5)
) +
scale_x_continuous(
expand = c(0, 0),
limits = c(-0.5, xmax),
breaks = seq(0, xmax, by = xmax / 10),
labels = seq(0, xmax, by = xmax / 10)
)
return(output_plot)
}
|
/R/AQSysCurve.R
|
no_license
|
cran/LLSR
|
R
| false | false | 6,700 |
r
|
###############################################################################
#' @import svDialogs
###############################################################################
#' @rdname AQSysCurve
#' @title This functions plot a curve based in the chosen model and its
#' parameters.
#' @description The function returns a plot after using the parameters and
#' model given by the user.
#' @details The function owns predefined set of equations that can be seen
#' below and must be used, with adequated parameters,
#' to return a plot which represent the chosen model.
#' @export AQSysCurve
#' @param modelName Equation to be used: merchuk, murugesan [type:string]
#' @param modelPars Model's parameters [type::data.frame]
#' @param xlbl Plot's Horizontal axis label.
#' @param ylbl Plot's Vertical axis label.
#'
#' @param col Legacy from plot package. For more details,
#' see \code{\link{plot.default}}
#' @param cex Legacy from plot package. For more details,
#' see \code{\link{plot.default}}
#' @param cexlab Legacy from plot package. For more details,
#' see \code{\link{plot.default}}
#' @param cexaxis Legacy from plot package. For more details,
#' see \code{\link{plot.default}}
#' @param cexmain Legacy from plot package. For more details,
#' see \code{\link{plot.default}}
#' @param cexsub Legacy from plot package. For more details,
#' see \code{\link{plot.default}}
#'
#' @param xmax Maximum value for the Horizontal axis' value
#' (bottom-rich component) [type:double]
#' @param NP Number of points used to build the fitted curve.
#' Default is 100. [type:Integer]
#' @param save Save the generated plot in the disk using path and filename
#' provided by the user. [type:Logical]
#' @param HR Adjust Plot's text to be compatible with High Resolution
#' size [type:Logical]
#' @param seriesNames A list of sequential names which will identify each system
#' provided by the user in the dataSET variable. [type:List]
#' @param filename Filename provided by the user to save a given
#' plot. [type:String]
#' @param wdir The directory in which the plot file will be
#' saved. [type:String]
#' @param silent save plot file without actually showing it to the
#' user. [type:Logical]
# ' @param maxiter - A positive integer specifying the maximum number of
# iterations allowed.
#' @inheritParams graphics::plot.default
#' @return A plot using the input model within the chosen interval and the
#' curve's raw XY data.
#' If no interval is selected, xmax = 0.4.
#' @examples
#' \dontrun{
#' AQSysCurve("murugesan", data.frame(90.389, -34.897, 2.924), col = "red")
#' }
###############################################################################
AQSysCurve <- function(
modelName,
modelPars,
seriesNames = NULL,
xlbl = "",
ylbl = "",
col = "black",
type = "p",
cex = 1,
cexlab = 1,
cexaxis = 1,
cexmain = 1,
cexsub = 1,
xmax = 35,
HR = FALSE,
NP = 100,
filename = NULL,
wdir = NULL,
save = FALSE,
silent = FALSE,
...
)
{
#
nSys <- nrow(modelPars)
models_npars <- AQSysList(TRUE)
#
if ((ncol(modelPars) == models_npars[[modelName]]) && (nSys >= 1 )) {
# mass fraction range of bottom-rich component (min is 0, max is 1)
x <- sort(runif(NP, 0.1, xmax))
# select which model will be used to generate the plot
# if user selects an option not available, it triggers an error
# (check AQSys.err.R for details)
Fn <- ifelse(modelName %in% names(models_npars), AQSys.mathDesc(modelName),
AQSys.err("0"))
#
if (is.null(seriesNames) || !(length(seriesNames) == nSys)) {
cat(
paste(
"\nThe array seriesNames must have", nSys,
"element(s). Default names will be used instead.\n\n"
)
)
seriesNames <- sapply(seq(1, nSys), function(x) paste("Series", x))
} else {
SysNames <- TRUE
}
# CREATE A LIST WHICH WILL HOLD EACH SYSTEM'S DATA
SysList <- list()
for (i in seq(1, nSys)) {
# unlist and convert parameters to double
model_pars <- as.double(unlist(modelPars[i, ]))
SysList[[i]] <- unname(data.frame(x, Fn(model_pars, x)))
names(SysList[[i]]) <- c("X", "Y")
SysList[[i]]["System"] <- seriesNames[i]
}
# BIND ALL DATA FROM SEVERAL SYSTEMS
output_data <- bind_rows(SysList)
# PLOT DATA
output_plot <- BLOPlot(output_data, xlbl, ylbl)
#
saveConfig(output_plot, save, HR, filename, wdir, silent)
# make available data from fitted curve to user. Function returns it
# silently but user can get data using simple assign '<-'
if (silent == FALSE) {
print(output_plot)
invisible(output_data)
} else {
invisible(list("data" = output_data, "plot" = output_plot))
}
}
else{
AQSys.err("9")
}
}
BLOPlot <- function(dataSET, xlbl = "", ylbl = "") {
# Calculate X and Y maximums as multiple of five and 8% superior of the axis
# maximum value
xmax <- ceiling(round(max(dataSET$X) / 0.92, 1) / 5) * 5
ymax <- ceiling(round(max(dataSET$Y) / 0.92, 1) / 5) * 5
# Plot data
output_plot <- ggplot(dataSET, aes_string(x = "X", y = "Y", color = "System",
shape = "System")) +
geom_line(size = 1) +
geom_point(size = 2) +
theme_light() +
scale_color_llsr(palette = "mixed") +
xlab(paste(xlbl, "(%, m/m)")) +
ylab(paste(ylbl, "(%, m/m)")) +
theme(
validate = FALSE,
plot.margin = unit(c(1, 1, 1, 1), "cm"),
text = element_text(size = 16),
legend.position = "top",
axis.title.y = element_text(vjust = 5),
axis.title.x = element_text(vjust = -2),
panel.grid.major = element_line(size = .70, colour = "black"),
panel.grid.minor = element_line(size = .70),
panel.border = element_rect(size = .5, colour = "white"),
axis.text.x = element_text(size = 15),
axis.text.y = element_text(size = 15),
axis.line = element_line(colour = 'black', size = 1.25),
legend.title = element_blank(),
legend.text = element_text(
colour = "black",
size = 12,
face = "plain"
)
) +
scale_y_continuous(
expand = c(0, 0),
limits = c(-2.5, ymax),
breaks = seq(0, ymax, by = 5),
labels = seq(0, ymax, by = 5)
) +
scale_x_continuous(
expand = c(0, 0),
limits = c(-0.5, xmax),
breaks = seq(0, xmax, by = xmax / 10),
labels = seq(0, xmax, by = xmax / 10)
)
return(output_plot)
}
|
library(readr)
Abstract_Catalogados <- read_csv("Abstract Catalogados.csv")
View(Abstract_Catalogados)
library(dplyr)
names(Abstract_Catalogados)
Abstract_Catalogados=mutate(Abstract_Catalogados, AbstractYRevista= paste(Abstract,Titulo_Revista,sep=" "))
Abstract_Catalogados=filter(Abstract_Catalogados,Disciplina!="")
source("Entrenamiento_Svm_1.R")
EntrenarSVMTexto(Abstract_Catalogados$Disciplina,Abstract_Catalogados$AbstractYRevista)
modelo<-readRDS("ModeloEntrenado.rds")
Abstract_Catalogados <- read_csv("Testeo Tesis Experimento 3.csv")
Abstract_Catalogados=mutate(Abstract_Catalogados, AbstractYRevista= paste(Abstract,Titulo_Revista,sep=" "))
Abstract_Catalogados=filter(Abstract_Catalogados,Abstract!="")
MATRIZ_CUERPO=modelo$MATRIZ
PREDICCION_CUERPO <- crear_matriz(Abstract_Catalogados$AbstractYRevista
,language='english'
,removeNumbers=TRUE
,stemWords=TRUE
,toLower = TRUE
,stripWhitespace = TRUE
,removeStopwords = TRUE
,originalMatrix=MATRIZ_CUERPO)
largo_prediccion_CUERPO=nrow(PREDICCION_CUERPO);
predictionContainer_CUERPO<- create_container(PREDICCION_CUERPO,
labels=rep(0,largo_prediccion_CUERPO)
,testSize=1:largo_prediccion_CUERPO, virgin=FALSE)
modelo_entrenado=modelo$MODELO
scores <- classify_model(predictionContainer_CUERPO,modelo_entrenado)
Abstract_Catalogados$Etiqueta<-scores$SVM_LABEL
Abstract_Catalogados$Probabilidad<-scores$SVM_PROB
View(Abstract_Catalogados)
write.csv(Abstract_Catalogados,"Resultados_SVM_1.csv")
Resultados_Grafico <- read_csv("Resultados_Grafico.csv")
library(ggplot2)
p <- ggplot(Resultados_Grafico, aes(Ano, Cantidad))
p + geom_point(aes(colour = factor(Etiqueta)))
ggplot(Resultados_Grafico, aes(Ano, Cantidad)) +
geom_point() +
facet_grid(Etiqueta ~ ., scales = "free", space = "free") +
theme(strip.text.y = element_text(angle = 0))
|
/RunSVM.R
|
no_license
|
claudiasolervicens/Tesis-prueba2
|
R
| false | false | 2,160 |
r
|
library(readr)
Abstract_Catalogados <- read_csv("Abstract Catalogados.csv")
View(Abstract_Catalogados)
library(dplyr)
names(Abstract_Catalogados)
Abstract_Catalogados=mutate(Abstract_Catalogados, AbstractYRevista= paste(Abstract,Titulo_Revista,sep=" "))
Abstract_Catalogados=filter(Abstract_Catalogados,Disciplina!="")
source("Entrenamiento_Svm_1.R")
EntrenarSVMTexto(Abstract_Catalogados$Disciplina,Abstract_Catalogados$AbstractYRevista)
modelo<-readRDS("ModeloEntrenado.rds")
Abstract_Catalogados <- read_csv("Testeo Tesis Experimento 3.csv")
Abstract_Catalogados=mutate(Abstract_Catalogados, AbstractYRevista= paste(Abstract,Titulo_Revista,sep=" "))
Abstract_Catalogados=filter(Abstract_Catalogados,Abstract!="")
MATRIZ_CUERPO=modelo$MATRIZ
PREDICCION_CUERPO <- crear_matriz(Abstract_Catalogados$AbstractYRevista
,language='english'
,removeNumbers=TRUE
,stemWords=TRUE
,toLower = TRUE
,stripWhitespace = TRUE
,removeStopwords = TRUE
,originalMatrix=MATRIZ_CUERPO)
largo_prediccion_CUERPO=nrow(PREDICCION_CUERPO);
predictionContainer_CUERPO<- create_container(PREDICCION_CUERPO,
labels=rep(0,largo_prediccion_CUERPO)
,testSize=1:largo_prediccion_CUERPO, virgin=FALSE)
modelo_entrenado=modelo$MODELO
scores <- classify_model(predictionContainer_CUERPO,modelo_entrenado)
Abstract_Catalogados$Etiqueta<-scores$SVM_LABEL
Abstract_Catalogados$Probabilidad<-scores$SVM_PROB
View(Abstract_Catalogados)
write.csv(Abstract_Catalogados,"Resultados_SVM_1.csv")
Resultados_Grafico <- read_csv("Resultados_Grafico.csv")
library(ggplot2)
p <- ggplot(Resultados_Grafico, aes(Ano, Cantidad))
p + geom_point(aes(colour = factor(Etiqueta)))
ggplot(Resultados_Grafico, aes(Ano, Cantidad)) +
geom_point() +
facet_grid(Etiqueta ~ ., scales = "free", space = "free") +
theme(strip.text.y = element_text(angle = 0))
|
library(ape)
testtree <- read.tree("1639_19.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="1639_19_unrooted.txt")
|
/codeml_files/newick_trees_processed/1639_19/rinput.R
|
no_license
|
DaniBoo/cyanobacteria_project
|
R
| false | false | 137 |
r
|
library(ape)
testtree <- read.tree("1639_19.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="1639_19_unrooted.txt")
|
# Sidebar with a slider input for number of bins
sidebarLayout(
fluidRow(
box(status="primary", title="Test App 1",
sliderInput("bins1",
"Number of bins:",
min = 1,
max = 50,
value = 30)
),
# Show a plot of the generated distribution
box(status="warning", title="Histogram 1",
plotOutput("dPlot")
)
),
fluidRow(
box(collapsible=T,
dataTableOutput("tbl")
)
)
)
|
/apps/app1/ui.R
|
no_license
|
maryskalicky/shinydashboard
|
R
| false | false | 494 |
r
|
# Sidebar with a slider input for number of bins
sidebarLayout(
fluidRow(
box(status="primary", title="Test App 1",
sliderInput("bins1",
"Number of bins:",
min = 1,
max = 50,
value = 30)
),
# Show a plot of the generated distribution
box(status="warning", title="Histogram 1",
plotOutput("dPlot")
)
),
fluidRow(
box(collapsible=T,
dataTableOutput("tbl")
)
)
)
|
###############################################################################
# OVERVIEW:
# Code to create a cleaned person table from the combined
# King County Housing Authority and Seattle Housing Authority data sets
# Aim is to have a single row per contiguous time in a house per person
#
# STEPS:
# 01 - Process raw KCHA data and load to SQL database
# 02 - Process raw SHA data and load to SQL database
# 03 - Bring in individual PHA datasets and combine into a single file
# 04 - Deduplicate data and tidy up via matching process
# 05 - Recode race and other demographics ### (THIS CODE) ###
# 06 - Clean up addresses
# 06a - Geocode addresses
# 07 - Consolidate data rows
# 08 - Add in final data elements and set up analyses
# 09 - Join with Medicaid eligibility data
# 10 - Set up joint housing/Medicaid analyses
#
# Alastair Matheson (PHSKC-APDE)
# alastair.matheson@kingcounty.gov
# 2016-05-13, split into separate files 2017-10
#
###############################################################################
#### Set up global parameter and call in libraries ####
options(max.print = 350, tibble.print_max = 50, scipen = 999)
library(tidyverse) # Used to manipulate data
library(RJSONIO)
library(RCurl)
script <- RCurl::getURL("https://raw.githubusercontent.com/jmhernan/Housing/uw_test/processing/metadata/set_data_env.r")
eval(parse(text = script))
METADATA = RJSONIO::fromJSON("//home/ubuntu/data/metadata/metadata.json")
set_data_envr(METADATA,"combined")
#### Bring in data ####
pha_clean <- readRDS(file = paste0(housing_path, pha_clean_fn))
#### Race ####
# Recode race variables and make numeric
# Note: Because of typos and other errors, this process will overestimate
# the number of people with multiple races
pha_recoded <- pha_clean %>%
mutate_at(vars(r_white:r_nhpi),
funs(new = car::recode(., "'Y' = 1; '1' = 1; 'N' = 0; '0' = 0; 'NULL' = NA; else = NA",
as.numeric.result = TRUE, as.factor.result = FALSE
))
) %>%
# Make r_hisp new for now, need to check recode eventually
mutate(r_hisp_new = ifelse(r_hisp == 2 & !is.na(r_hisp), 0, r_hisp),
# Propogate collapsed race code from SHA HCV data
r_white_new = ifelse(race == 1 & !is.na(race), 1, r_white_new),
r_black_new = ifelse(race == 2 & !is.na(race), 1, r_black_new),
r_aian_new = ifelse(race == 3 & !is.na(race), 1, r_aian_new),
r_asian_new = ifelse(race == 4 & !is.na(race), 1, r_asian_new),
r_nhpi_new = ifelse(race == 5 & !is.na(race), 1, r_nhpi_new)
)
# Identify individuals with contradictory race values and set to Y
pha_recoded <- pha_recoded %>%
group_by(pid) %>%
mutate_at(vars(r_white_new:r_hisp_new), funs(tot = sum(., na.rm = TRUE))) %>%
ungroup() %>%
mutate_at(vars(r_white_new_tot:r_hisp_new_tot),
funs(replace(., which(. > 0), 1))) %>%
mutate(r_white_new = ifelse(r_white_new_tot == 1, 1, 0),
r_black_new = ifelse(r_black_new_tot == 1, 1, 0),
r_aian_new = ifelse(r_aian_new_tot == 1, 1, 0),
r_asian_new = ifelse(r_asian_new_tot == 1, 1, 0),
r_nhpi_new = ifelse(r_nhpi_new_tot == 1, 1, 0),
r_hisp_new = ifelse(r_hisp_new_tot == 1, 1, 0),
# Find people with multiple races
r_multi_new = rowSums(cbind(r_white_new_tot, r_black_new_tot,
r_aian_new_tot, r_asian_new_tot,
r_nhpi_new_tot), na.rm = TRUE),
r_multi_new = ifelse(r_multi_new > 1, 1, 0)) %>%
# make new variable to look at people with one race only
mutate_at(vars(r_white_new:r_nhpi_new),
funs(alone = ifelse(r_multi_new == 1, 0, .))) %>%
# make single race variable
mutate(race_new = case_when(
r_white_new_alone == 1 ~ "White only",
r_black_new_alone == 1 ~ "Black only",
r_aian_new_alone == 1 ~ "AIAN only",
r_asian_new_alone == 1 ~ "Asian only",
r_nhpi_new_alone == 1 ~ "NHPI only",
r_multi_new == 1 ~ "Multiple race",
TRUE ~ ""
)) %>%
# Drop earlier race variables
select(-r_white, -r_black, -r_aian, -r_asian, -r_nhpi,
-race, -contains("_new_tot"), -contains("_alone"), -r_multi_new)
### Fill in missing gender information (won't work if all are missing, also
# will not fill in any initial NAs)
pha_recoded <- pha_recoded %>%
group_by(pid) %>%
mutate_at(vars(gender_new_m6), funs(zoo::na.locf(., na.rm = F))) %>%
ungroup()
#### Add other recodes later ####
#### Save point ####
saveRDS(pha_recoded, file = paste0(housing_path,
pha_recoded_fn))
#### Clean up ####
rm(pha_clean)
gc()
|
/processing/05_pha_recodes.R
|
permissive
|
jmhernan/Housing
|
R
| false | false | 4,710 |
r
|
###############################################################################
# OVERVIEW:
# Code to create a cleaned person table from the combined
# King County Housing Authority and Seattle Housing Authority data sets
# Aim is to have a single row per contiguous time in a house per person
#
# STEPS:
# 01 - Process raw KCHA data and load to SQL database
# 02 - Process raw SHA data and load to SQL database
# 03 - Bring in individual PHA datasets and combine into a single file
# 04 - Deduplicate data and tidy up via matching process
# 05 - Recode race and other demographics ### (THIS CODE) ###
# 06 - Clean up addresses
# 06a - Geocode addresses
# 07 - Consolidate data rows
# 08 - Add in final data elements and set up analyses
# 09 - Join with Medicaid eligibility data
# 10 - Set up joint housing/Medicaid analyses
#
# Alastair Matheson (PHSKC-APDE)
# alastair.matheson@kingcounty.gov
# 2016-05-13, split into separate files 2017-10
#
###############################################################################
#### Set up global parameter and call in libraries ####
options(max.print = 350, tibble.print_max = 50, scipen = 999)
library(tidyverse) # Used to manipulate data
library(RJSONIO)
library(RCurl)
script <- RCurl::getURL("https://raw.githubusercontent.com/jmhernan/Housing/uw_test/processing/metadata/set_data_env.r")
eval(parse(text = script))
METADATA = RJSONIO::fromJSON("//home/ubuntu/data/metadata/metadata.json")
set_data_envr(METADATA,"combined")
#### Bring in data ####
pha_clean <- readRDS(file = paste0(housing_path, pha_clean_fn))
#### Race ####
# Recode race variables and make numeric
# Note: Because of typos and other errors, this process will overestimate
# the number of people with multiple races
pha_recoded <- pha_clean %>%
mutate_at(vars(r_white:r_nhpi),
funs(new = car::recode(., "'Y' = 1; '1' = 1; 'N' = 0; '0' = 0; 'NULL' = NA; else = NA",
as.numeric.result = TRUE, as.factor.result = FALSE
))
) %>%
# Make r_hisp new for now, need to check recode eventually
mutate(r_hisp_new = ifelse(r_hisp == 2 & !is.na(r_hisp), 0, r_hisp),
# Propogate collapsed race code from SHA HCV data
r_white_new = ifelse(race == 1 & !is.na(race), 1, r_white_new),
r_black_new = ifelse(race == 2 & !is.na(race), 1, r_black_new),
r_aian_new = ifelse(race == 3 & !is.na(race), 1, r_aian_new),
r_asian_new = ifelse(race == 4 & !is.na(race), 1, r_asian_new),
r_nhpi_new = ifelse(race == 5 & !is.na(race), 1, r_nhpi_new)
)
# Identify individuals with contradictory race values and set to Y
pha_recoded <- pha_recoded %>%
group_by(pid) %>%
mutate_at(vars(r_white_new:r_hisp_new), funs(tot = sum(., na.rm = TRUE))) %>%
ungroup() %>%
mutate_at(vars(r_white_new_tot:r_hisp_new_tot),
funs(replace(., which(. > 0), 1))) %>%
mutate(r_white_new = ifelse(r_white_new_tot == 1, 1, 0),
r_black_new = ifelse(r_black_new_tot == 1, 1, 0),
r_aian_new = ifelse(r_aian_new_tot == 1, 1, 0),
r_asian_new = ifelse(r_asian_new_tot == 1, 1, 0),
r_nhpi_new = ifelse(r_nhpi_new_tot == 1, 1, 0),
r_hisp_new = ifelse(r_hisp_new_tot == 1, 1, 0),
# Find people with multiple races
r_multi_new = rowSums(cbind(r_white_new_tot, r_black_new_tot,
r_aian_new_tot, r_asian_new_tot,
r_nhpi_new_tot), na.rm = TRUE),
r_multi_new = ifelse(r_multi_new > 1, 1, 0)) %>%
# make new variable to look at people with one race only
mutate_at(vars(r_white_new:r_nhpi_new),
funs(alone = ifelse(r_multi_new == 1, 0, .))) %>%
# make single race variable
mutate(race_new = case_when(
r_white_new_alone == 1 ~ "White only",
r_black_new_alone == 1 ~ "Black only",
r_aian_new_alone == 1 ~ "AIAN only",
r_asian_new_alone == 1 ~ "Asian only",
r_nhpi_new_alone == 1 ~ "NHPI only",
r_multi_new == 1 ~ "Multiple race",
TRUE ~ ""
)) %>%
# Drop earlier race variables
select(-r_white, -r_black, -r_aian, -r_asian, -r_nhpi,
-race, -contains("_new_tot"), -contains("_alone"), -r_multi_new)
### Fill in missing gender information (won't work if all are missing, also
# will not fill in any initial NAs)
pha_recoded <- pha_recoded %>%
group_by(pid) %>%
mutate_at(vars(gender_new_m6), funs(zoo::na.locf(., na.rm = F))) %>%
ungroup()
#### Add other recodes later ####
#### Save point ####
saveRDS(pha_recoded, file = paste0(housing_path,
pha_recoded_fn))
#### Clean up ####
rm(pha_clean)
gc()
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# How to invoke:
# mt-tablet-test-graph.R <tsvfile> <testname>
# This script takes in input a TSV file that contains the timing results
# from running mt-table-test, and parsed out by graph-metrics.py
# The file needs to have the following header:
# memrowset_kb updated scanned time num_rowsets inserted
# Three png are generated:
# - Insert rate as data is inserted
# - Scan rate as data is inserted
# - Multiple plots, where x is time, and y shows a variety of different
# progressions like the number of rowsets over time.
library(ggplot2)
library(reshape)
library(Cairo)
newpng<- function(filename = "img.png", width = 400, height = 400) {
CairoPNG(filename, width, height)
}
args <- commandArgs(trailingOnly = TRUE)
if (length(args) < 2) {
stop("usage: jobs_runtime.R <tsvfile> <testname>")
}
filename = args[1]
testname = args[2]
source("si_vec.R")
newpng(paste(testname, "-1.png", sep = ""))
print(c("Using file ", filename))
d <- read.table(file=filename, header=T)
d$insert_rate = c(0, diff(d$inserted)/diff(d$time))
if (exists("scanned", where=d)) {
d$scan_rate = c(0, diff(d$scanned)/diff(d$time))
d <- subset(d, select = -c(scanned))
}
if (!is.null(d$updated)) {
d$update_rate = c(0, diff(d$updated)/diff(d$time))
d <- subset(d, select = -c(updated))
}
# Put memrowset usage in bytes
d$memrowset_bytes <- d$memrowset * 1024
d <- subset(d, select = -c(memrowset_kb))
print(ggplot(d, aes(inserted, insert_rate)) +
geom_point(alpha=0.5) +
scale_x_continuous(labels=si_vec) +
scale_y_log10(labels=si_vec))
if (exists("scan_rate", where=d)) {
newpng(paste(testname, "-2.png", sep = ""))
print(ggplot(d, aes(inserted, scan_rate)) +
geom_point(alpha=0.5) +
scale_x_continuous(labels=si_vec) +
scale_y_log10(labels=si_vec))
}
newpng(paste(testname, "-3.png", sep = ""))
d <- rename(d, c(
insert_rate="Insert rate (rows/sec)",
memrowset="Memstore Memory Usage"))
if (exists("scan_rate", where=d)) {
d <- rename(d, c(
scan_rate="Scan int col (rows/sec)"))
}
# set span to 5 seconds worth of data
span = 5.0/max(d$time)
d.melted = melt(d, id="time")
print(qplot(time, value, data=d.melted, geom="line", group = variable)
+ scale_y_continuous(labels=si_vec)
+ facet_grid(variable~., scale = "free_y")
+ stat_smooth())
|
/src/kudu/scripts/mt-tablet-test-graph.R
|
permissive
|
apache/kudu
|
R
| false | false | 3,193 |
r
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# How to invoke:
# mt-tablet-test-graph.R <tsvfile> <testname>
# This script takes in input a TSV file that contains the timing results
# from running mt-table-test, and parsed out by graph-metrics.py
# The file needs to have the following header:
# memrowset_kb updated scanned time num_rowsets inserted
# Three png are generated:
# - Insert rate as data is inserted
# - Scan rate as data is inserted
# - Multiple plots, where x is time, and y shows a variety of different
# progressions like the number of rowsets over time.
library(ggplot2)
library(reshape)
library(Cairo)
newpng<- function(filename = "img.png", width = 400, height = 400) {
CairoPNG(filename, width, height)
}
args <- commandArgs(trailingOnly = TRUE)
if (length(args) < 2) {
stop("usage: jobs_runtime.R <tsvfile> <testname>")
}
filename = args[1]
testname = args[2]
source("si_vec.R")
newpng(paste(testname, "-1.png", sep = ""))
print(c("Using file ", filename))
d <- read.table(file=filename, header=T)
d$insert_rate = c(0, diff(d$inserted)/diff(d$time))
if (exists("scanned", where=d)) {
d$scan_rate = c(0, diff(d$scanned)/diff(d$time))
d <- subset(d, select = -c(scanned))
}
if (!is.null(d$updated)) {
d$update_rate = c(0, diff(d$updated)/diff(d$time))
d <- subset(d, select = -c(updated))
}
# Put memrowset usage in bytes
d$memrowset_bytes <- d$memrowset * 1024
d <- subset(d, select = -c(memrowset_kb))
print(ggplot(d, aes(inserted, insert_rate)) +
geom_point(alpha=0.5) +
scale_x_continuous(labels=si_vec) +
scale_y_log10(labels=si_vec))
if (exists("scan_rate", where=d)) {
newpng(paste(testname, "-2.png", sep = ""))
print(ggplot(d, aes(inserted, scan_rate)) +
geom_point(alpha=0.5) +
scale_x_continuous(labels=si_vec) +
scale_y_log10(labels=si_vec))
}
newpng(paste(testname, "-3.png", sep = ""))
d <- rename(d, c(
insert_rate="Insert rate (rows/sec)",
memrowset="Memstore Memory Usage"))
if (exists("scan_rate", where=d)) {
d <- rename(d, c(
scan_rate="Scan int col (rows/sec)"))
}
# set span to 5 seconds worth of data
span = 5.0/max(d$time)
d.melted = melt(d, id="time")
print(qplot(time, value, data=d.melted, geom="line", group = variable)
+ scale_y_continuous(labels=si_vec)
+ facet_grid(variable~., scale = "free_y")
+ stat_smooth())
|
#jian dan xian xing hui gui de zhi huan jian yan
library(lmPerm)
set.seed(1234)
fit <- lmp(weight~height,data=women,perm = "Prob")
summary(fit)
#duo xiang shi hui gui de zhi huan jian yan
fit <- lmp(weight~height + I(height^2), data=women,perm = "Prob")
summary(fit)
#duo yuan hui gui
states <- as.data.frame(state.x77)
fit <- lmp(Murder~Population + Illiteracy + Income + Frost,data=states,perm="Prob")
#dan yin su fang cha fen xi de zhi huan jian yan
library(multcomp)
set.seed(1234)
fit <- aovp(response~trt, data=cholesterol,perm = "Prob")
summary(fit)
fit <- aovp(weight~gesttime + dose, data=litter,perm = "Prob")
summary(fit)
#shuang yin su fang cha fen xi de zhi huan jian yan
set.seed(1234)
fit <- aovp(len~supp*dose,data=ToothGrowth,perm = "Prob")
summary(fit)
# boot bao zhong de zi zhu fa
#dui dan ge tong ji liang shi yong zi zhu fa
rsq <- function(formula, data, indices) {
d <- data[indices,]
fit <- lm(formula, data = d)
return(summary(fit)$r.square)
}
library(boot)
set.seed(1234)
results <- boot(data=mtcars,statistic = rsq,R=1000,formula=mpg~wt+disp)
print(results)
plot(results)
boot.ci(results,type=c("perc","bca"))
# duo ge tong ji liang de zi zhu fa
bs <- function(formula, data, indices) {
d <- data[indices,]
fit <- lm(formula, data=d)
return(coef(fit))
}
set.seed(1234)
results <- boot(data=mtcars, statistic = bs,
R=1000,formula=mpg~wt+disp)
print(results)
plot(results,index = 2)
boot.ci(results,type="bca",index=2)
boot.ci(results,type="bca",index=3)
boot.ci(boot.out = results,type="bca",index=2)
|
/9.10/sample03.R
|
no_license
|
weidaoming/R
|
R
| false | false | 1,561 |
r
|
#jian dan xian xing hui gui de zhi huan jian yan
library(lmPerm)
set.seed(1234)
fit <- lmp(weight~height,data=women,perm = "Prob")
summary(fit)
#duo xiang shi hui gui de zhi huan jian yan
fit <- lmp(weight~height + I(height^2), data=women,perm = "Prob")
summary(fit)
#duo yuan hui gui
states <- as.data.frame(state.x77)
fit <- lmp(Murder~Population + Illiteracy + Income + Frost,data=states,perm="Prob")
#dan yin su fang cha fen xi de zhi huan jian yan
library(multcomp)
set.seed(1234)
fit <- aovp(response~trt, data=cholesterol,perm = "Prob")
summary(fit)
fit <- aovp(weight~gesttime + dose, data=litter,perm = "Prob")
summary(fit)
#shuang yin su fang cha fen xi de zhi huan jian yan
set.seed(1234)
fit <- aovp(len~supp*dose,data=ToothGrowth,perm = "Prob")
summary(fit)
# boot bao zhong de zi zhu fa
#dui dan ge tong ji liang shi yong zi zhu fa
rsq <- function(formula, data, indices) {
d <- data[indices,]
fit <- lm(formula, data = d)
return(summary(fit)$r.square)
}
library(boot)
set.seed(1234)
results <- boot(data=mtcars,statistic = rsq,R=1000,formula=mpg~wt+disp)
print(results)
plot(results)
boot.ci(results,type=c("perc","bca"))
# duo ge tong ji liang de zi zhu fa
bs <- function(formula, data, indices) {
d <- data[indices,]
fit <- lm(formula, data=d)
return(coef(fit))
}
set.seed(1234)
results <- boot(data=mtcars, statistic = bs,
R=1000,formula=mpg~wt+disp)
print(results)
plot(results,index = 2)
boot.ci(results,type="bca",index=2)
boot.ci(results,type="bca",index=3)
boot.ci(boot.out = results,type="bca",index=2)
|
#' Create a dataframe formatted using the Actual and Forecast Table Schema (AFTS)
#'
#' Joins a table containing time series actuals (given in the TSTS format) and a
#' table containing forecasts (given in the FTS format) to create a table containing both
#' actuals and forecasts using the Actual and Forecast Table Schema (AFTS) format.
#'
#' @aliases createAFTS
#' @param ts dataframe containing time series actuals formatted using the Time Series
#' Table Schema (TSTS), use \code{showTSTS()} to display schema specification details.
#' @param fc dataframe containing forecasts formatted using the Forecast Table Schema (FTS),
#' use \code{showFTS()} to display schema specification details.
#' @return dataframe in the AFTS format, use \code{showAFTS()} to display schema specification details.
#' @details Takes all records contained in \code{fc} and matches with values from \code{ts}.
#' If no matching value is found, the record is not included in the result.
#' @author Cuong Sai, Andrey Davydenko, and Maxim Shcherbakov.
#' @seealso \code{\link{showTSTS}}, \code{\link{showFTS}}, \code{\link{example1_ts}}, \code{\link{example1_fc}}
#' @keywords datasets
#' @examples
#' af <- createAFTS(example1_ts, example1_fc)
#' head(af)
#'
#' @export
createAFTS <-function(ts, fc) {
# Error handling
# For TSTS schema
if (!is.data.frame(ts)){
stop("Argument ts should be a data frame.")
}
if (!sum(is.element(c("series_id", "timestamp"), colnames(ts))) == 2) {
stop("Check the column names of input data frame ts. The input data ts needed in the form of
a data frame containing columns named 'series_id' and 'timestamp'.")
}
# For FTS schema
if (!is.data.frame(fc)){
stop("Argument fc should be a data frame.")
}
if (!sum(is.element(c("series_id", "timestamp"), colnames(fc))) == 2) {
stop("Check the column names of input data frame fc. The input data fc needed in the form of
a data frame containing columns named 'series_id' and 'timestamp'.")
}
#
df <- dplyr::inner_join(ts, fc, by = c("series_id", "timestamp"))
df <- dplyr::arrange(df, origin_timestamp, horizon)
return(df)
}
|
/R/createAFTS.R
|
no_license
|
forvis/forvision
|
R
| false | false | 2,158 |
r
|
#' Create a dataframe formatted using the Actual and Forecast Table Schema (AFTS)
#'
#' Joins a table containing time series actuals (given in the TSTS format) and a
#' table containing forecasts (given in the FTS format) to create a table containing both
#' actuals and forecasts using the Actual and Forecast Table Schema (AFTS) format.
#'
#' @aliases createAFTS
#' @param ts dataframe containing time series actuals formatted using the Time Series
#' Table Schema (TSTS), use \code{showTSTS()} to display schema specification details.
#' @param fc dataframe containing forecasts formatted using the Forecast Table Schema (FTS),
#' use \code{showFTS()} to display schema specification details.
#' @return dataframe in the AFTS format, use \code{showAFTS()} to display schema specification details.
#' @details Takes all records contained in \code{fc} and matches with values from \code{ts}.
#' If no matching value is found, the record is not included in the result.
#' @author Cuong Sai, Andrey Davydenko, and Maxim Shcherbakov.
#' @seealso \code{\link{showTSTS}}, \code{\link{showFTS}}, \code{\link{example1_ts}}, \code{\link{example1_fc}}
#' @keywords datasets
#' @examples
#' af <- createAFTS(example1_ts, example1_fc)
#' head(af)
#'
#' @export
createAFTS <-function(ts, fc) {
# Error handling
# For TSTS schema
if (!is.data.frame(ts)){
stop("Argument ts should be a data frame.")
}
if (!sum(is.element(c("series_id", "timestamp"), colnames(ts))) == 2) {
stop("Check the column names of input data frame ts. The input data ts needed in the form of
a data frame containing columns named 'series_id' and 'timestamp'.")
}
# For FTS schema
if (!is.data.frame(fc)){
stop("Argument fc should be a data frame.")
}
if (!sum(is.element(c("series_id", "timestamp"), colnames(fc))) == 2) {
stop("Check the column names of input data frame fc. The input data fc needed in the form of
a data frame containing columns named 'series_id' and 'timestamp'.")
}
#
df <- dplyr::inner_join(ts, fc, by = c("series_id", "timestamp"))
df <- dplyr::arrange(df, origin_timestamp, horizon)
return(df)
}
|
#' @title Update posterior latent class assignment \code{C} for each subject.
#'
#' @description This uses Bayes' theorem to update posterior latent class assignment. This is a ratio of the likelihood contribution of each subject to class \code{k} and the prior probability of belonging in class \code{k} to the likelihood contribution marginalized over latent classes. The function accomodates the case of \code{K} equal to 2 or greater than 2.
#' @return A list of 2 elements. An \code{n} by 1 vector of latent class assignments called \code{C}. An \code{n} by \code{K} matrix of posterior probabilities of latent class assignment.
update_C_fast <- function(K, Z, C, betaObs, bSub, betaSub, Sigma, Psi, phi, tau, Omega, lambda, kappa, Theta, Y, XRe, XObs, XSub, D, UObs, URe, M, Mvec, VObs, VRe, subjectIDY, subjectID, subjectIDM, modelVisit, modelResponse){
### Number of subjects
n <- length(C)
### Prior probability of class assignment
priorPikReorder <- matrix(NA, nrow = n, ncol = K)
if (K == 2) {
# K = 2:
priorPikReorder[ , K] <- pnorm(Z)
priorPikReorder[ , 1] <- 1 - priorPikReorder[ , K]
} else {
# K > 2:
# Construct original matrix R from Z based parameterization
#We have been working with the differenced parameterization such that the coefficients represent a difference from baseline class K, where after a simple reparameterization, class K is switched to class 0 (the reference group moves from the Kth column to the first column)
#If we assume the original parameterization is represented by R, then
#Z_{i1} = R_{i1} - R_{iK}; Z_{i2} = R_{i2} - R_{iK}, and so forth
#R_{iK} \sim N(0, 1) since \delta_K and \theta_K are fixed to 0
#R is an n x K matrix, where the K^th column is that with \delta_k fixed to 0 (the K^th column is the reference column)
R <- matrix(NA, nrow = n, ncol = K)
R[ , K] <- rnorm(n, mean = 0, sd = 1)
for (k in 1:(K - 1)) {
R[ , k] <- Z[, k] + R[ , K]
}
# Covariance matrix
VarCovP <- matrix(1, nrow = (K - 1), ncol = (K - 1))
diag(VarCovP) <- 2
# Values where distribution is evaluated
vals <- matrix(0, nrow = n, ncol = (K - 1))
for (k in 1:K) {
mur_diffk <- R[ , -k] - R[ , k]
priorPikReorder[ , k] <- mnormt::pmnorm(vals, mean = mur_diffk, varcov = VarCovP)
}
priorPikReorder <- as.matrix(unname(priorPikReorder))
}
### Construct likelihood contribution for subject i to each class k
J <- ncol(Y)
N <- nrow(Y)
q <- ncol(XRe)
# For Y
# Place holder for log likelihood contribution from Y_i, D_i, M_i
# Factorization Pr(C_i = k \ | \ b_i, Y_i) \propto MVN_q(b_{1i}, \ | \ C, \Psi_1, betaSub_1) MVN_q(b_{2i}, \ | \ C, \Psi_2, betaSub_2) \prod_{t = 1}^{n_i} MVN_J(Y_{1i}, Y_{2i}, \ | \ b_{1i}, b_{2i}, \Sigma_2, C_i)
# Log scale \log MVN_q(b_{1i}, \ | \ C, \Psi_1, betaSub_1) + \log MVN_q(b_{2i}, \ | \ C, \Psi_2, betaSub_2) + \sum_{t = 1}^{n_i} \log MVN_J(Y_{1i}, Y_{2i}, \ | \ b_{1i}, b_{2i}, \Sigma_2, C_i)
likY <- matrix(NA, nrow = n, ncol = K)
if (modelVisit == TRUE) {
likD <- matrix(NA, nrow = n, ncol = K)
# For D random effects piece
#sp_URe_sub <- lapply(split(as.data.frame(URe), subjectID, drop = TRUE), as.matrix)
#convert_URe_sub <- as.matrix(Matrix::bdiag(sp_URe_sub))
}
if (modelResponse == TRUE) {
likM <- matrix(NA, nrow = n, ncol = K)
# For M random effects piece
#sp_VRe_sub <- lapply(split(as.data.frame(VRe), subjectIDM, drop = TRUE), as.matrix)
#convert_VRe_sub <- as.matrix(Matrix::bdiag(sp_VRe_sub))
}
for (k in 1:K) {
# Storage that changes by latent class
muObs <- matrix(NA, nrow = N, ncol = J)
muSub <- matrix(NA, nrow = n, ncol = J)
likSub <- matrix(NA, nrow = n, ncol = J)
# For f(Y | b, C) in llikObs, construct muObs = E[Y | b, C], which will be N x J
#Y: N x J matrix of longitudinal outcomes for each outcome Y_j
#muObs: N x J matrix of means for latent class k
#Sigma: J x J variance-covariance specific to latent class k
#llikObs: N x 1 vector
for (j in 1:J) {
# Random effects contribution to observation level
bSubjm <- matrix(bSub[[j]], nrow = n, ncol = q, byrow = TRUE)
bSubjCont <- unlist(sapply(unique(subjectIDY), function(x) {
selObs <- which(subjectIDY == x)
XReObs <- as.matrix(XRe)[selObs, ]
values <- as.matrix(XReObs) %*% c(t(as.matrix(bSubjm[which(unique(subjectIDY) == x), ])))
return(values)
}))
# Observation level mean
muObs[ , j] <- XObs %*% betaObs[[j]][ , k] + c(bSubjCont)
}
likObs <- sapply(1:nrow(Y), function(x) {
mvtnorm::dmvnorm(Y[x, ], mean = muObs[x, ], sigma = Sigma[ , , k], log = FALSE)
})
#Conditional on random effects b, observations are independent, so can sum on log scale
prodlikObs <- as.vector(tapply(likObs, subjectIDY, FUN = prod))
# Random effects contribution
#_{j} indexes Y_j
# f(b_{1i}, b_{2i} | C_i, \Psi_1, \Psi_2) = f(b_{1i} | C_i, betaSub_1, \Psi_1) f(b_{2i}| C, betaSub_2, \Psi_2)
for (j in 1:J) {
# Subject level equation
bSubm <- matrix(bSub[[j]], nrow = n, ncol = q, byrow = TRUE)
muSub <- XSub %*% betaSub[[j]][ , , k]
likSub[ , j] <- sapply(1:nrow(bSubm), function(x) {
mvtnorm::dmvnorm(bSubm[x, ], mean = muSub[x, ], sigma = as.matrix(Psi[[j]][ , , k]), log = FALSE)
})
}
# Sum each random effects contribution for subject i since on log scale
prodlikSub <- apply(likSub, 1, prod)
# Log likelihood contribution of Y to class k
likY[ , k] <- prodlikSub * prodlikObs
# Log likelihood contribution of visit process D to class k
if (modelVisit == TRUE) {
# Observation level contribution
taum <- matrix(tau, nrow = n, ncol = q, byrow = TRUE)
# EDITS here
# Yields T x n
tauCont <- sapply(unique(subjectID), function(x) {
selObs <- which(subjectID == x)
UReObs <- as.matrix(URe)[selObs, ]
values <- as.matrix(UReObs) %*% c(t(as.matrix(taum[which(unique(subjectID) == x), ])))
return(values)
})
# Analogous to bSub area, c(tauCont) will yield n*T x 1
muObs <- UObs %*% as.matrix(phi[ , k]) + c(tauCont)
likObs <- dbinom(D, size = 1, prob = pnorm(muObs), log = FALSE)
prodlikObs <- as.vector(tapply(likObs, subjectID, FUN = prod))
# Subject level contribution
muSub <- matrix(rep(0, n*q), nrow = n, ncol = q)
likSub <- sapply(1:nrow(taum), function(x) {
mvtnorm::dmvnorm(taum[x, ], mean = muSub[x, ], sigma = as.matrix(Omega[ , , k]), log = FALSE)
})
likD[ , k] <- prodlikObs * likSub
}
# Log likelihood contribution from subject i for the response model
# Assume that f(M_{1i}, M_{1i},...M_{Jstar,i} | D = 1, C; ...) = f(M_{1i} | D = 1, C; ...)f(M_{2i} | D = 1, C; ...)...f(M_{Jstar,i} | D = 1, C; ...)
if (modelResponse == TRUE) {
#Jstar <- ncol(M)
Jstar <- length(Mvec)
### Running product
likMk <- 1
for (j in 1:Jstar) {
# For a given latent class, get the log likelihood from outcome j at the observation level. Given theta, we can sum on the log scale
# Get the random effect contribution for j
kappajm <- matrix(kappa[[j]], nrow = n, ncol = q, byrow = TRUE)
kappajCont <- unlist(sapply(unique(subjectIDM), function(x) {
selObs <- which(subjectIDM == x)
VReObs <- as.matrix(VRe)[selObs, ]
values <- as.matrix(VReObs) %*% c(t(as.matrix(kappajm[which(unique(subjectIDM) == x), ])))
return(values)
}))
# Observation level likelihood
muObsj <- VObs %*% lambda[[j]][ , k] + c(kappajCont)
likObsj <- dbinom(M[ , Mvec[j]], size = 1, prob = pnorm(muObsj), log = FALSE)
prodlikObsj <- as.vector(tapply(likObsj, subjectIDM, FUN = prod))
# Subject level likelihood
muSubj <- matrix(rep(0, n*q), nrow = n, ncol = q)
likSubj <- sapply(1:nrow(kappajm), function(x) {
mvtnorm::dmvnorm(kappajm[x, ], mean = muSubj[x, ], sigma = as.matrix(Theta[[j]][ , , k]), log = FALSE)
})
# Log likelihood contribution for response j
likMj <- prodlikObsj * likSubj
# Add above to running product over j responses in latent class k
likMk <- likMk * likMj
}
likM[ , k] <- likMk
}
}
# Reordering is necessary K > 2 but not for K = 2
if (K > 2) {
# Reorder latent classes to be on original parameterization
#Because the McCulloch and Rossi parameterization effectively switches latent class K to class 0, and because the prior pik are computed on the original un-differenced scale, we need to make sure that priorPik and llik_y_pik are consistently ordered.
#In llik_y, the the reference group in first column is returned to the last column to be consistent with the original parameterization
likYReorder <- cbind(likY[, 2:K], likY[, 1])
if (modelVisit == TRUE & modelResponse == TRUE) {
likDReorder <- cbind(likD[, 2:K], likD[, 1])
likMReorder <- cbind(likM[, 2:K], likM[, 1])
pik_num <- priorPikReorder * likYReorder * likDReorder * likMReorder
} else if (modelVisit == TRUE & modelResponse == FALSE) {
likDReorder <- cbind(likD[, 2:K], likD[, 1])
pik_num <- priorPikReorder * likYReorder * likDReorder
} else if (modelVisit == FALSE & modelResponse == FALSE) {
pik_num <- priorPikReorder * likYReorder
}
# Posterior probability of subject i belonging in each class
pikReorder <- t(apply(pik_num, 1, function(x) x / sum(x)))
# Draw C from multinomial, first returning pik to the formulation in McCulloch and Rossi that is the basis for latent normal bounds updating. This means that column K moves to column 1 to be the reference class. Reordering pik will funnel through to C.
pik <- cbind(pikReorder[ , K], pikReorder[ , 1:(K - 1)])
# Replace NA's with 1/K
sel <- apply(pik, 1, function(x) sum(is.na(x)))
pik[sel > 0, ] <- rep(rep(1 / K, times = K), times = sum(sel > 0))
} else if (K == 2) {
# K = 2
# No reordering is necessary
priorPik <- priorPikReorder
if (modelVisit == TRUE & modelResponse == TRUE) {
# Numerator
pik_num <- priorPik * likY * likD * likM
} else if (modelVisit == TRUE & modelResponse == FALSE) {
# Numerator
pik_num <- priorPik * likY * likD
} else if (modelVisit == FALSE & modelResponse == FALSE) {
pik_num <- priorPik * likY
}
# Posterior probability of subject i belonging in each class
pik <- t(apply(pik_num, 1, function(x) x / sum(x)))
sel <- apply(pik, 1, function(x) sum(is.na(x)))
pik[sel > 0, ] <- rep(rep(1 / K, times = K), times = sum(sel > 0))
}
pik <- as.matrix(unname(pik))
C <- Hmisc::rMultinom(pik, 1)
list(C = C, pik = pik)
}
|
/R/update_C_fast.R
|
permissive
|
anthopolos/EHRMiss
|
R
| false | false | 10,911 |
r
|
#' @title Update posterior latent class assignment \code{C} for each subject.
#'
#' @description This uses Bayes' theorem to update posterior latent class assignment. This is a ratio of the likelihood contribution of each subject to class \code{k} and the prior probability of belonging in class \code{k} to the likelihood contribution marginalized over latent classes. The function accomodates the case of \code{K} equal to 2 or greater than 2.
#' @return A list of 2 elements. An \code{n} by 1 vector of latent class assignments called \code{C}. An \code{n} by \code{K} matrix of posterior probabilities of latent class assignment.
update_C_fast <- function(K, Z, C, betaObs, bSub, betaSub, Sigma, Psi, phi, tau, Omega, lambda, kappa, Theta, Y, XRe, XObs, XSub, D, UObs, URe, M, Mvec, VObs, VRe, subjectIDY, subjectID, subjectIDM, modelVisit, modelResponse){
### Number of subjects
n <- length(C)
### Prior probability of class assignment
priorPikReorder <- matrix(NA, nrow = n, ncol = K)
if (K == 2) {
# K = 2:
priorPikReorder[ , K] <- pnorm(Z)
priorPikReorder[ , 1] <- 1 - priorPikReorder[ , K]
} else {
# K > 2:
# Construct original matrix R from Z based parameterization
#We have been working with the differenced parameterization such that the coefficients represent a difference from baseline class K, where after a simple reparameterization, class K is switched to class 0 (the reference group moves from the Kth column to the first column)
#If we assume the original parameterization is represented by R, then
#Z_{i1} = R_{i1} - R_{iK}; Z_{i2} = R_{i2} - R_{iK}, and so forth
#R_{iK} \sim N(0, 1) since \delta_K and \theta_K are fixed to 0
#R is an n x K matrix, where the K^th column is that with \delta_k fixed to 0 (the K^th column is the reference column)
R <- matrix(NA, nrow = n, ncol = K)
R[ , K] <- rnorm(n, mean = 0, sd = 1)
for (k in 1:(K - 1)) {
R[ , k] <- Z[, k] + R[ , K]
}
# Covariance matrix
VarCovP <- matrix(1, nrow = (K - 1), ncol = (K - 1))
diag(VarCovP) <- 2
# Values where distribution is evaluated
vals <- matrix(0, nrow = n, ncol = (K - 1))
for (k in 1:K) {
mur_diffk <- R[ , -k] - R[ , k]
priorPikReorder[ , k] <- mnormt::pmnorm(vals, mean = mur_diffk, varcov = VarCovP)
}
priorPikReorder <- as.matrix(unname(priorPikReorder))
}
### Construct likelihood contribution for subject i to each class k
J <- ncol(Y)
N <- nrow(Y)
q <- ncol(XRe)
# For Y
# Place holder for log likelihood contribution from Y_i, D_i, M_i
# Factorization Pr(C_i = k \ | \ b_i, Y_i) \propto MVN_q(b_{1i}, \ | \ C, \Psi_1, betaSub_1) MVN_q(b_{2i}, \ | \ C, \Psi_2, betaSub_2) \prod_{t = 1}^{n_i} MVN_J(Y_{1i}, Y_{2i}, \ | \ b_{1i}, b_{2i}, \Sigma_2, C_i)
# Log scale \log MVN_q(b_{1i}, \ | \ C, \Psi_1, betaSub_1) + \log MVN_q(b_{2i}, \ | \ C, \Psi_2, betaSub_2) + \sum_{t = 1}^{n_i} \log MVN_J(Y_{1i}, Y_{2i}, \ | \ b_{1i}, b_{2i}, \Sigma_2, C_i)
likY <- matrix(NA, nrow = n, ncol = K)
if (modelVisit == TRUE) {
likD <- matrix(NA, nrow = n, ncol = K)
# For D random effects piece
#sp_URe_sub <- lapply(split(as.data.frame(URe), subjectID, drop = TRUE), as.matrix)
#convert_URe_sub <- as.matrix(Matrix::bdiag(sp_URe_sub))
}
if (modelResponse == TRUE) {
likM <- matrix(NA, nrow = n, ncol = K)
# For M random effects piece
#sp_VRe_sub <- lapply(split(as.data.frame(VRe), subjectIDM, drop = TRUE), as.matrix)
#convert_VRe_sub <- as.matrix(Matrix::bdiag(sp_VRe_sub))
}
for (k in 1:K) {
# Storage that changes by latent class
muObs <- matrix(NA, nrow = N, ncol = J)
muSub <- matrix(NA, nrow = n, ncol = J)
likSub <- matrix(NA, nrow = n, ncol = J)
# For f(Y | b, C) in llikObs, construct muObs = E[Y | b, C], which will be N x J
#Y: N x J matrix of longitudinal outcomes for each outcome Y_j
#muObs: N x J matrix of means for latent class k
#Sigma: J x J variance-covariance specific to latent class k
#llikObs: N x 1 vector
for (j in 1:J) {
# Random effects contribution to observation level
bSubjm <- matrix(bSub[[j]], nrow = n, ncol = q, byrow = TRUE)
bSubjCont <- unlist(sapply(unique(subjectIDY), function(x) {
selObs <- which(subjectIDY == x)
XReObs <- as.matrix(XRe)[selObs, ]
values <- as.matrix(XReObs) %*% c(t(as.matrix(bSubjm[which(unique(subjectIDY) == x), ])))
return(values)
}))
# Observation level mean
muObs[ , j] <- XObs %*% betaObs[[j]][ , k] + c(bSubjCont)
}
likObs <- sapply(1:nrow(Y), function(x) {
mvtnorm::dmvnorm(Y[x, ], mean = muObs[x, ], sigma = Sigma[ , , k], log = FALSE)
})
#Conditional on random effects b, observations are independent, so can sum on log scale
prodlikObs <- as.vector(tapply(likObs, subjectIDY, FUN = prod))
# Random effects contribution
#_{j} indexes Y_j
# f(b_{1i}, b_{2i} | C_i, \Psi_1, \Psi_2) = f(b_{1i} | C_i, betaSub_1, \Psi_1) f(b_{2i}| C, betaSub_2, \Psi_2)
for (j in 1:J) {
# Subject level equation
bSubm <- matrix(bSub[[j]], nrow = n, ncol = q, byrow = TRUE)
muSub <- XSub %*% betaSub[[j]][ , , k]
likSub[ , j] <- sapply(1:nrow(bSubm), function(x) {
mvtnorm::dmvnorm(bSubm[x, ], mean = muSub[x, ], sigma = as.matrix(Psi[[j]][ , , k]), log = FALSE)
})
}
# Sum each random effects contribution for subject i since on log scale
prodlikSub <- apply(likSub, 1, prod)
# Log likelihood contribution of Y to class k
likY[ , k] <- prodlikSub * prodlikObs
# Log likelihood contribution of visit process D to class k
if (modelVisit == TRUE) {
# Observation level contribution
taum <- matrix(tau, nrow = n, ncol = q, byrow = TRUE)
# EDITS here
# Yields T x n
tauCont <- sapply(unique(subjectID), function(x) {
selObs <- which(subjectID == x)
UReObs <- as.matrix(URe)[selObs, ]
values <- as.matrix(UReObs) %*% c(t(as.matrix(taum[which(unique(subjectID) == x), ])))
return(values)
})
# Analogous to bSub area, c(tauCont) will yield n*T x 1
muObs <- UObs %*% as.matrix(phi[ , k]) + c(tauCont)
likObs <- dbinom(D, size = 1, prob = pnorm(muObs), log = FALSE)
prodlikObs <- as.vector(tapply(likObs, subjectID, FUN = prod))
# Subject level contribution
muSub <- matrix(rep(0, n*q), nrow = n, ncol = q)
likSub <- sapply(1:nrow(taum), function(x) {
mvtnorm::dmvnorm(taum[x, ], mean = muSub[x, ], sigma = as.matrix(Omega[ , , k]), log = FALSE)
})
likD[ , k] <- prodlikObs * likSub
}
# Log likelihood contribution from subject i for the response model
# Assume that f(M_{1i}, M_{1i},...M_{Jstar,i} | D = 1, C; ...) = f(M_{1i} | D = 1, C; ...)f(M_{2i} | D = 1, C; ...)...f(M_{Jstar,i} | D = 1, C; ...)
if (modelResponse == TRUE) {
#Jstar <- ncol(M)
Jstar <- length(Mvec)
### Running product
likMk <- 1
for (j in 1:Jstar) {
# For a given latent class, get the log likelihood from outcome j at the observation level. Given theta, we can sum on the log scale
# Get the random effect contribution for j
kappajm <- matrix(kappa[[j]], nrow = n, ncol = q, byrow = TRUE)
kappajCont <- unlist(sapply(unique(subjectIDM), function(x) {
selObs <- which(subjectIDM == x)
VReObs <- as.matrix(VRe)[selObs, ]
values <- as.matrix(VReObs) %*% c(t(as.matrix(kappajm[which(unique(subjectIDM) == x), ])))
return(values)
}))
# Observation level likelihood
muObsj <- VObs %*% lambda[[j]][ , k] + c(kappajCont)
likObsj <- dbinom(M[ , Mvec[j]], size = 1, prob = pnorm(muObsj), log = FALSE)
prodlikObsj <- as.vector(tapply(likObsj, subjectIDM, FUN = prod))
# Subject level likelihood
muSubj <- matrix(rep(0, n*q), nrow = n, ncol = q)
likSubj <- sapply(1:nrow(kappajm), function(x) {
mvtnorm::dmvnorm(kappajm[x, ], mean = muSubj[x, ], sigma = as.matrix(Theta[[j]][ , , k]), log = FALSE)
})
# Log likelihood contribution for response j
likMj <- prodlikObsj * likSubj
# Add above to running product over j responses in latent class k
likMk <- likMk * likMj
}
likM[ , k] <- likMk
}
}
# Reordering is necessary K > 2 but not for K = 2
if (K > 2) {
# Reorder latent classes to be on original parameterization
#Because the McCulloch and Rossi parameterization effectively switches latent class K to class 0, and because the prior pik are computed on the original un-differenced scale, we need to make sure that priorPik and llik_y_pik are consistently ordered.
#In llik_y, the the reference group in first column is returned to the last column to be consistent with the original parameterization
likYReorder <- cbind(likY[, 2:K], likY[, 1])
if (modelVisit == TRUE & modelResponse == TRUE) {
likDReorder <- cbind(likD[, 2:K], likD[, 1])
likMReorder <- cbind(likM[, 2:K], likM[, 1])
pik_num <- priorPikReorder * likYReorder * likDReorder * likMReorder
} else if (modelVisit == TRUE & modelResponse == FALSE) {
likDReorder <- cbind(likD[, 2:K], likD[, 1])
pik_num <- priorPikReorder * likYReorder * likDReorder
} else if (modelVisit == FALSE & modelResponse == FALSE) {
pik_num <- priorPikReorder * likYReorder
}
# Posterior probability of subject i belonging in each class
pikReorder <- t(apply(pik_num, 1, function(x) x / sum(x)))
# Draw C from multinomial, first returning pik to the formulation in McCulloch and Rossi that is the basis for latent normal bounds updating. This means that column K moves to column 1 to be the reference class. Reordering pik will funnel through to C.
pik <- cbind(pikReorder[ , K], pikReorder[ , 1:(K - 1)])
# Replace NA's with 1/K
sel <- apply(pik, 1, function(x) sum(is.na(x)))
pik[sel > 0, ] <- rep(rep(1 / K, times = K), times = sum(sel > 0))
} else if (K == 2) {
# K = 2
# No reordering is necessary
priorPik <- priorPikReorder
if (modelVisit == TRUE & modelResponse == TRUE) {
# Numerator
pik_num <- priorPik * likY * likD * likM
} else if (modelVisit == TRUE & modelResponse == FALSE) {
# Numerator
pik_num <- priorPik * likY * likD
} else if (modelVisit == FALSE & modelResponse == FALSE) {
pik_num <- priorPik * likY
}
# Posterior probability of subject i belonging in each class
pik <- t(apply(pik_num, 1, function(x) x / sum(x)))
sel <- apply(pik, 1, function(x) sum(is.na(x)))
pik[sel > 0, ] <- rep(rep(1 / K, times = K), times = sum(sel > 0))
}
pik <- as.matrix(unname(pik))
C <- Hmisc::rMultinom(pik, 1)
list(C = C, pik = pik)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sampleAges.R
\name{sampleAges}
\alias{sampleAges}
\title{Get sample ages from a set of Bchron calibrated dates}
\usage{
sampleAges(CalDates, n_samp = 10000)
}
\arguments{
\item{CalDates}{A list created from either \code{\link{BchronCalibrate}}.}
\item{n_samp}{The desired number of samples}
}
\value{
A vector of length \code{n_samp} containing sample ages for the specified date
}
\description{
A function for extracting sample ages from Bchron calibrated dates
}
\details{
Sometimes it is useful to have a set of sample calendar ages for your calibrated dates. For example the samples might be required to create a credible/confidence interval, or to create another non-trivial function of calibrated dates, such as differences. By default the \code{\link{BchronCalibrate}} function provides a grid of ages and an associated density, similar to OxCal. This function extracts that information and uses the \code{\link{sample}} function to output the desired number of samples
}
\examples{
# Calibrate multiple ages and summarise them
ages = BchronCalibrate(ages=c(3445,11553,7456),ageSds=c(50,230,110),
calCurves=c('intcal13','intcal13','shcal13'))
# Get samples
age_samples = sampleAges(ages)
# Create a credible interval and the median for each date
apply(age_samples, 2, quantile, probs = c(0.05, 0.5, 0.95))
}
\seealso{
\code{\link{BchronCalibrate}}
}
|
/man/sampleAges.Rd
|
no_license
|
allisonstegner/Bchron
|
R
| false | true | 1,459 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sampleAges.R
\name{sampleAges}
\alias{sampleAges}
\title{Get sample ages from a set of Bchron calibrated dates}
\usage{
sampleAges(CalDates, n_samp = 10000)
}
\arguments{
\item{CalDates}{A list created from either \code{\link{BchronCalibrate}}.}
\item{n_samp}{The desired number of samples}
}
\value{
A vector of length \code{n_samp} containing sample ages for the specified date
}
\description{
A function for extracting sample ages from Bchron calibrated dates
}
\details{
Sometimes it is useful to have a set of sample calendar ages for your calibrated dates. For example the samples might be required to create a credible/confidence interval, or to create another non-trivial function of calibrated dates, such as differences. By default the \code{\link{BchronCalibrate}} function provides a grid of ages and an associated density, similar to OxCal. This function extracts that information and uses the \code{\link{sample}} function to output the desired number of samples
}
\examples{
# Calibrate multiple ages and summarise them
ages = BchronCalibrate(ages=c(3445,11553,7456),ageSds=c(50,230,110),
calCurves=c('intcal13','intcal13','shcal13'))
# Get samples
age_samples = sampleAges(ages)
# Create a credible interval and the median for each date
apply(age_samples, 2, quantile, probs = c(0.05, 0.5, 0.95))
}
\seealso{
\code{\link{BchronCalibrate}}
}
|
#' Generates grouped violin plots based on peptides signals intensities with t tests options
#'
#' Takes in the scaled dataset from krsa_scaleModel() and plot violin figures using ggplot2
#'
#' @param data the scaled dataset from krsa_scaleModel
#' @param peptides vector of peptides
#' @param grp_comp list of group comparison names
#' @param groups (optional) a vector of group names
#' @param test perform two group test
#' @param test_method type of test (default is wilcox.test)
#' @param violin add violin layer
#' @param dots add dotplot layer
#' @param lines add lines layer
#' @param avg_line draw averaged line across the two groups
#' @param ... arguments passed to ggsignif
#'
#'
#' @return ggplot figure
#'
#' @family plots
#'
#' @export
#'
#' @examples
#' TRUE
krsa_violin_plot_grouped <- function(data, peptides,grp_comp,groups = NULL,test = T, test_method = "wilcox.test",
violin = TRUE, dots = TRUE, lines = T, avg_line = F, ...) {
data %>%
dplyr::filter(Peptide %in% peptides) %>%
dplyr::filter(!is.na(slope)) %>%
{if(!is.null(groups)) dplyr::filter(.,Group %in% groups) else .} -> data
data %>%
ggplot2::ggplot(ggplot2::aes(Group, slope)) -> gg
if(violin) {
gg <- gg + ggplot2::geom_violin(ggplot2::aes(fill = Group), show.legend = F, trim = F, width=0.4)
}
gg <- gg + ggplot2::geom_boxplot(ggplot2::aes(fill = Group),width=0.1)
if(dots) {
gg <- gg + ggplot2::geom_dotplot(binaxis='y', stackdir='center', dotsize=1, alpha = 1/2)
}
if(lines) {
gg <- gg + ggplot2::geom_line(ggplot2::aes(group = Peptide), alpha = 1/2)
}
if(test) {
gg <- gg + ggsignif::geom_signif(
comparisons = grp_comp,
test = test_method,
...
)
}
if(avg_line) {
data %>%
group_by(Group) %>%
summarise(slopeM = mean(slope)) -> avg_data
gg <- gg + ggplot2::geom_line(data = avg_data, ggplot2::aes(Group, slopeM, group = 1),
color = "black", size = 3)
}
gg +
ggplot2::labs(
x = "",
y = "Signal Intensity"
) +
ggplot2::theme_bw()
}
|
/R/krsa_violin_plot_grouped.R
|
permissive
|
AliSajid/KRSA
|
R
| false | false | 2,119 |
r
|
#' Generates grouped violin plots based on peptides signals intensities with t tests options
#'
#' Takes in the scaled dataset from krsa_scaleModel() and plot violin figures using ggplot2
#'
#' @param data the scaled dataset from krsa_scaleModel
#' @param peptides vector of peptides
#' @param grp_comp list of group comparison names
#' @param groups (optional) a vector of group names
#' @param test perform two group test
#' @param test_method type of test (default is wilcox.test)
#' @param violin add violin layer
#' @param dots add dotplot layer
#' @param lines add lines layer
#' @param avg_line draw averaged line across the two groups
#' @param ... arguments passed to ggsignif
#'
#'
#' @return ggplot figure
#'
#' @family plots
#'
#' @export
#'
#' @examples
#' TRUE
krsa_violin_plot_grouped <- function(data, peptides,grp_comp,groups = NULL,test = T, test_method = "wilcox.test",
violin = TRUE, dots = TRUE, lines = T, avg_line = F, ...) {
data %>%
dplyr::filter(Peptide %in% peptides) %>%
dplyr::filter(!is.na(slope)) %>%
{if(!is.null(groups)) dplyr::filter(.,Group %in% groups) else .} -> data
data %>%
ggplot2::ggplot(ggplot2::aes(Group, slope)) -> gg
if(violin) {
gg <- gg + ggplot2::geom_violin(ggplot2::aes(fill = Group), show.legend = F, trim = F, width=0.4)
}
gg <- gg + ggplot2::geom_boxplot(ggplot2::aes(fill = Group),width=0.1)
if(dots) {
gg <- gg + ggplot2::geom_dotplot(binaxis='y', stackdir='center', dotsize=1, alpha = 1/2)
}
if(lines) {
gg <- gg + ggplot2::geom_line(ggplot2::aes(group = Peptide), alpha = 1/2)
}
if(test) {
gg <- gg + ggsignif::geom_signif(
comparisons = grp_comp,
test = test_method,
...
)
}
if(avg_line) {
data %>%
group_by(Group) %>%
summarise(slopeM = mean(slope)) -> avg_data
gg <- gg + ggplot2::geom_line(data = avg_data, ggplot2::aes(Group, slopeM, group = 1),
color = "black", size = 3)
}
gg +
ggplot2::labs(
x = "",
y = "Signal Intensity"
) +
ggplot2::theme_bw()
}
|
#' @include facet-.R
NULL
#' Facet specification: a single panel.
#'
#' @inheritParams facet_grid
#' @keywords internal
#' @export
#' @examples
#' # facet_null is the default faceting specification if you
#' # don't override it with facet_grid or facet_wrap
#' ggplot(mtcars, aes(mpg, wt)) + geom_point()
facet_null <- function(shrink = TRUE) {
ggproto(NULL, FacetNull,
shrink = shrink
)
}
#' @rdname ggplot2-ggproto
#' @format NULL
#' @usage NULL
#' @export
FacetNull <- ggproto("FacetNull", Facet,
shrink = TRUE,
compute_layout = function(data, params) {
layout_null()
},
map_data = function(data, layout, params) {
# Need the is.waive check for special case where no data, but aesthetics
# are mapped to vectors
if (is.waive(data))
return(data_frame0(PANEL = factor()))
if (empty(data))
return(data_frame0(data, PANEL = factor()))
# Needs to be a factor to be consistent with other facet types
data$PANEL <- factor(1)
data
},
draw_panels = function(panels, layout, x_scales, y_scales, ranges, coord, data, theme, params) {
range <- ranges[[1]]
# Figure out aspect ratio
aspect_ratio <- theme$aspect.ratio %||% coord$aspect(range)
if (is.null(aspect_ratio)) {
aspect_ratio <- 1
respect <- FALSE
} else {
respect <- TRUE
}
axis_h <- coord$render_axis_h(range, theme)
axis_v <- coord$render_axis_v(range, theme)
all <- matrix(list(
zeroGrob(), axis_h$top, zeroGrob(),
axis_v$left, panels[[1]], axis_v$right,
zeroGrob(), axis_h$bottom, zeroGrob()
), ncol = 3, byrow = TRUE)
z_matrix <- matrix(c(5, 6, 4, 7, 1, 8, 3, 9, 2), ncol = 3, byrow = TRUE)
grob_widths <- unit.c(grobWidth(axis_v$left), unit(1, "null"), grobWidth(axis_v$right))
grob_heights <- unit.c(grobHeight(axis_h$top), unit(abs(aspect_ratio), "null"), grobHeight(axis_h$bottom))
grob_names <- c("spacer", "axis-l", "spacer", "axis-t", "panel", "axis-b", "spacer", "axis-r", "spacer")
grob_clip <- c("off", "off", "off", "off", coord$clip, "off", "off", "off", "off")
layout <- gtable_matrix("layout", all,
widths = grob_widths, heights = grob_heights,
respect = respect, clip = grob_clip,
z = z_matrix
)
layout$layout$name <- grob_names
layout
}
)
|
/R/facet-null.R
|
no_license
|
cran/ggplot2
|
R
| false | false | 2,400 |
r
|
#' @include facet-.R
NULL
#' Facet specification: a single panel.
#'
#' @inheritParams facet_grid
#' @keywords internal
#' @export
#' @examples
#' # facet_null is the default faceting specification if you
#' # don't override it with facet_grid or facet_wrap
#' ggplot(mtcars, aes(mpg, wt)) + geom_point()
facet_null <- function(shrink = TRUE) {
ggproto(NULL, FacetNull,
shrink = shrink
)
}
#' @rdname ggplot2-ggproto
#' @format NULL
#' @usage NULL
#' @export
FacetNull <- ggproto("FacetNull", Facet,
shrink = TRUE,
compute_layout = function(data, params) {
layout_null()
},
map_data = function(data, layout, params) {
# Need the is.waive check for special case where no data, but aesthetics
# are mapped to vectors
if (is.waive(data))
return(data_frame0(PANEL = factor()))
if (empty(data))
return(data_frame0(data, PANEL = factor()))
# Needs to be a factor to be consistent with other facet types
data$PANEL <- factor(1)
data
},
draw_panels = function(panels, layout, x_scales, y_scales, ranges, coord, data, theme, params) {
range <- ranges[[1]]
# Figure out aspect ratio
aspect_ratio <- theme$aspect.ratio %||% coord$aspect(range)
if (is.null(aspect_ratio)) {
aspect_ratio <- 1
respect <- FALSE
} else {
respect <- TRUE
}
axis_h <- coord$render_axis_h(range, theme)
axis_v <- coord$render_axis_v(range, theme)
all <- matrix(list(
zeroGrob(), axis_h$top, zeroGrob(),
axis_v$left, panels[[1]], axis_v$right,
zeroGrob(), axis_h$bottom, zeroGrob()
), ncol = 3, byrow = TRUE)
z_matrix <- matrix(c(5, 6, 4, 7, 1, 8, 3, 9, 2), ncol = 3, byrow = TRUE)
grob_widths <- unit.c(grobWidth(axis_v$left), unit(1, "null"), grobWidth(axis_v$right))
grob_heights <- unit.c(grobHeight(axis_h$top), unit(abs(aspect_ratio), "null"), grobHeight(axis_h$bottom))
grob_names <- c("spacer", "axis-l", "spacer", "axis-t", "panel", "axis-b", "spacer", "axis-r", "spacer")
grob_clip <- c("off", "off", "off", "off", coord$clip, "off", "off", "off", "off")
layout <- gtable_matrix("layout", all,
widths = grob_widths, heights = grob_heights,
respect = respect, clip = grob_clip,
z = z_matrix
)
layout$layout$name <- grob_names
layout
}
)
|
src <- read.csv(file = "C:/Users/Sam/Desktop/source.csv")
splitted <- strsplit(as.character(src$ORIGINAL_TEXT),"[.,_]")
for(i in 1:length(splitted))
{
temp <- unlist(splitted[i])
temp <- temp[temp !="MODE"]
temp <- temp[temp !="TO"]
splitted[i] <- list(temp)
}
zz <- file("C:/Users/Sam/Desktop/target.txt","w")
for(i in 1:length(splitted))
{
temp <- unlist(splitted[i])
writeLines(temp,con = zz,sep="\t")
writeLines("",con=zz)
}
close(zz)
|
/wf_file.r
|
no_license
|
SamJake/test
|
R
| false | false | 452 |
r
|
src <- read.csv(file = "C:/Users/Sam/Desktop/source.csv")
splitted <- strsplit(as.character(src$ORIGINAL_TEXT),"[.,_]")
for(i in 1:length(splitted))
{
temp <- unlist(splitted[i])
temp <- temp[temp !="MODE"]
temp <- temp[temp !="TO"]
splitted[i] <- list(temp)
}
zz <- file("C:/Users/Sam/Desktop/target.txt","w")
for(i in 1:length(splitted))
{
temp <- unlist(splitted[i])
writeLines(temp,con = zz,sep="\t")
writeLines("",con=zz)
}
close(zz)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/docker_class.R
\docType{data}
\name{docker}
\alias{docker}
\title{docker}
\format{An object of class \code{R6ClassGenerator} of length 24.}
\usage{
docker
}
\description{
docker
}
\keyword{datasets}
|
/man/docker.Rd
|
no_license
|
favstats/dockeR
|
R
| false | true | 277 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/docker_class.R
\docType{data}
\name{docker}
\alias{docker}
\title{docker}
\format{An object of class \code{R6ClassGenerator} of length 24.}
\usage{
docker
}
\description{
docker
}
\keyword{datasets}
|
test.occuMulti.fit.simple.1 <- function() {
y <- list(matrix(rep(1,10),5,2),
matrix(rep(1,10),5,2))
umf <- unmarkedFrameOccuMulti(y = y)
fm <- occuMulti(detformulas=rep("~1",2),
stateformulas=rep("~1",3), data = umf, se=FALSE)
#Probably should not be calling predict here b/c unit test
#but complicated to get actual occupancy prob otherwise
occ <- predict(fm,'state')$Predicted[1,1]
checkEqualsNumeric(occ,1, tolerance = 1e-4)
detlist <- predict(fm,'det')
det <- sapply(detlist,function(x) x[1,1])
checkEqualsNumeric(det, rep(1,length(detlist)), tolerance= 1e-4)
#Check fitList
fl <- fitList(fm, fm)
checkEquals(class(fl)[1],"unmarkedFitList")
checkEqualsNumeric(length(fl@fits), 2)
}
test.occuMulti.fit.simple.0 <- function() {
y <- list(matrix(rep(0,10),5,2),
matrix(rep(0,10),5,2))
umf <- unmarkedFrameOccuMulti(y = y)
fm <- occuMulti(detformulas=rep("~1",2),
stateformulas=rep("~1",3), data = umf, se=FALSE)
occ <- predict(fm,'state')$Predicted[1,1]
checkEqualsNumeric(occ,0, tolerance = 1e-4)
detlist <- predict(fm,'det')
det <- sapply(detlist,function(x) x[1,1])
checkEqualsNumeric(det, rep(0,length(detlist)), tolerance= 1e-4)
}
test.occuMulti.fit.covs <- function() {
y <- list(matrix(rep(0:1,10),5,2),
matrix(rep(0:1,10),5,2))
set.seed(123)
N <- dim(y[[1]])[1]
J <- dim(y[[1]])[2]
occ_covs <- as.data.frame(matrix(rnorm(N * 3),ncol=3))
names(occ_covs) <- paste('occ_cov',1:3,sep='')
det_covs <- as.data.frame(matrix(rnorm(N*J*2),ncol=2))
names(det_covs) <- paste('det_cov',1:2,sep='')
umf <- unmarkedFrameOccuMulti(y = y, siteCovs = occ_covs, obsCovs = det_covs)
stateformulas <- c('~occ_cov1','~occ_cov2','~occ_cov3')
detformulas <- c('~det_cov1','~det_cov2')
fm <- occuMulti(detformulas, stateformulas, data = umf, se=FALSE)
occ <- fm['state']
det <- fm['det']
checkEqualsNumeric(coef(occ), c(5.36630,0.79876,5.45492,-0.868451,9.21242,1.14561),
tolerance = 1e-4)
checkEqualsNumeric(coef(det), c(-0.27586,-0.81837,-0.09537,0.42334), tolerance = 1e-4)
fit <- fitted(fm)
checkEqualsNumeric(length(fit),2)
checkEqualsNumeric(sapply(fit,function(x) x[1,1]),c(0.14954,0.30801), tol = 1e-4)
res <- residuals(fm)
checkEqualsNumeric(length(res),2)
checkEqualsNumeric(sapply(res,function(x) x[1,1]),c(-0.14954,-0.30801), tol= 1e-4)
#Check site cov can be used in detection formula
detformulas <- c('~occ_cov1','~det_cov2')
fm <- occuMulti(detformulas, stateformulas, data = umf, se=FALSE)
checkEqualsNumeric(coef(fm,'det')[2],3.355328e-05, tol=1e-4)
}
test.occuMulti.fit.NA <- function() {
y <- list(matrix(rep(0:1,10),5,2),
matrix(rep(0:1,10),5,2))
set.seed(456)
N <- dim(y[[1]])[1]
J <- dim(y[[1]])[2]
occ_covs <- as.data.frame(matrix(rnorm(N * 3),ncol=3))
names(occ_covs) <- paste('occ_cov',1:3,sep='')
det_covs <- as.data.frame(matrix(rnorm(N*J*2),ncol=2))
names(det_covs) <- paste('det_cov',1:2,sep='')
stateformulas <- c('~occ_cov1','~occ_cov2','~occ_cov3')
detformulas <- c('~det_cov1','~det_cov2')
#Check error thrown when missing site covariates
occ_covsNA <- occ_covs
occ_covsNA[1,1] <- NA
umf <- unmarkedFrameOccuMulti(y = y, siteCovs = occ_covsNA, obsCovs = det_covs)
checkException(occuMulti(detformulas, stateformulas, data=umf, se=FALSE))
#Check for warning when missing detection
yna <- y
yna[[1]][1,1] <- NA
umf <- unmarkedFrameOccuMulti(y = yna, siteCovs = occ_covs, obsCovs = det_covs)
options(warn=2)
checkException(occuMulti(detformulas, stateformulas, data=umf, se=FALSE))
options(warn=1)
#Check correct answer given when missing detection
fm <- occuMulti(detformulas, stateformulas, data = umf, se=FALSE)
checkEqualsNumeric(coef(fm)[c(1,7)], c(6.63207,0.35323), tol= 1e-4)
fit <- fitted(fm)
checkTrue(is.na(fit[[1]][1,1]))
res <- residuals(fm)
checkTrue(is.na(res[[1]][1,1]))
#Check error thrown when all detections are missing
yna[[1]][1,] <- NA
umf <- unmarkedFrameOccuMulti(y = yna, siteCovs = occ_covs, obsCovs = det_covs)
checkException(occuMulti(detformulas, stateformulas, data=umf, se=FALSE))
#Check warning when missing covariate value on detection
det_covsNA <- det_covs
det_covsNA[1,1] <- NA
umf <- unmarkedFrameOccuMulti(y = y, siteCovs = occ_covs, obsCovs = det_covsNA)
options(warn=2)
checkException(occuMulti(detformulas,stateformulas,data=umf, se=FALSE))
options(warn=1)
}
test.occuMulti.fit.fixed0 <- function(){
y <- list(matrix(rep(0:1,10),5,2),
matrix(rep(0:1,10),5,2))
set.seed(123)
N <- dim(y[[1]])[1]
J <- dim(y[[1]])[2]
occ_covs <- as.data.frame(matrix(rnorm(N * 3),ncol=3))
names(occ_covs) <- paste('occ_cov',1:3,sep='')
det_covs <- as.data.frame(matrix(rnorm(N*J*2),ncol=2))
names(det_covs) <- paste('det_cov',1:2,sep='')
stateformulas <- c('~occ_cov1','~occ_cov2','0')
detformulas <- c('~det_cov1','~det_cov2')
umf <- unmarkedFrameOccuMulti(y = y, siteCovs = occ_covs, obsCovs = det_covs)
fm <- occuMulti(detformulas, stateformulas, data = umf, se=FALSE)
occ <- fm['state']
checkEqualsNumeric(length(coef(occ)),4)
checkEqualsNumeric(coef(occ),c(12.26043,0.61183,12.41110,0.18764),tol=1e-4)
stateformulas <- c('~occ_cov1','~occ_cov2')
fm2 <- occuMulti(detformulas, stateformulas, data = umf, maxOrder=1,se=FALSE)
occ <- fm2['state']
checkEqualsNumeric(length(coef(occ)),4)
checkEqualsNumeric(coef(occ),c(12.26043,0.61183,12.41110,0.18764),tol=1e-4)
}
test.occuMulti.predict <- function(){
set.seed(123)
y <- list(matrix(rbinom(40,1,0.2),20,2),
matrix(rbinom(40,1,0.3),20,2))
N <- dim(y[[1]])[1]
J <- dim(y[[1]])[2]
occ_covs <- as.data.frame(matrix(rnorm(N * 3),ncol=3))
names(occ_covs) <- paste('occ_cov',1:3,sep='')
det_covs <- as.data.frame(matrix(rnorm(N*J*2),ncol=2))
names(det_covs) <- paste('det_cov',1:2,sep='')
stateformulas <- c('~occ_cov1','~occ_cov2','0')
detformulas <- c('~det_cov1','~det_cov2')
umf <- unmarkedFrameOccuMulti(y = y, siteCovs = occ_covs, obsCovs = det_covs)
fm <- occuMulti(detformulas, stateformulas, data = umf)
prState <- predict(fm, type='state')
checkEqualsNumeric(sapply(prState,function(x) x[1,1]),
c(0.30807707,0.23924385,0.02382635,0.85377734),tol=1e-4)
prDet <- predict(fm, type='det')
checkEqualsNumeric(as.numeric(prDet$sp2[1,]),
c(0.190485,0.0945992,0.00507,0.37589566), tol=1e-4)
#Check with newdata
nd <- siteCovs(umf)[1:2,]
pr_nd <- predict(fm, type='state', newdata=nd)$Predicted
checkEqualsNumeric(pr_nd[,1],c(0.3080771,0.3196486), tol=1e-4)
nd <- siteCovs(umf)[1:2,]
pr_nd <- predict(fm, type='state', newdata=nd, species=1, cond=2)$Predicted
checkEqualsNumeric(pr_nd,c(0.3858233,0.5402935), tol=1e-4)
#Make sure it works with newdata having only one row
nd <- siteCovs(umf)[1,]
pr_nd <- predict(fm, type='state', newdata=nd)$Predicted
checkEqualsNumeric(pr_nd[,1],c(0.3080771), tol=1e-4)
pr_nd <- predict(fm, type='state', newdata=nd, species=1, cond=2)$Predicted
checkEqualsNumeric(pr_nd,c(0.3858233), tol=1e-4)
stateformulas <- c('~1','~1','0')
detformulas <- c('~1','~det_cov2')
umf <- unmarkedFrameOccuMulti(y = y, siteCovs = occ_covs, obsCovs = det_covs)
fm <- occuMulti(detformulas, stateformulas, data = umf)
prState <- predict(fm, type='state')
checkEqualsNumeric(sapply(prState,function(x) x[1,1]),
c(0.475928,0.24416,0.01807069,0.846532),tol=1e-4)
prDet <- predict(fm, type='det')
checkEqualsNumeric(as.numeric(prDet$sp2[1,]),
c(0.20494,0.17175,-0.13168,0.541579), tol=1e-4)
#Check predicting co-occurrence
nd <- siteCovs(umf)[1:2,]
pr_all <- predict(fm, type='state', se=F)$Predicted[1:2,1]
pr_nd <- predict(fm, type='state', newdata=nd, species=c(1,2))$Predicted
checkEqualsNumeric(pr_nd,pr_all, tol=1e-4)
}
test.occuMulti.predict.NA <- function(){
set.seed(123)
y <- list(matrix(rbinom(40,1,0.2),20,2),
matrix(rbinom(40,1,0.3),20,2))
N <- dim(y[[1]])[1]
J <- dim(y[[1]])[2]
occ_covs <- as.data.frame(matrix(rnorm(N * 3),ncol=3))
names(occ_covs) <- paste('occ_cov',1:3,sep='')
det_covs <- as.data.frame(matrix(rnorm(N*J*2),ncol=2))
names(det_covs) <- paste('det_cov',1:2,sep='')
det_covs[1,1] <- NA
stateformulas <- c('~occ_cov1','~occ_cov2','0')
detformulas <- c('~det_cov1','~det_cov2')
umf <- unmarkedFrameOccuMulti(y = y, siteCovs = occ_covs, obsCovs = det_covs)
fm <- occuMulti(detformulas, stateformulas, data = umf)
prDet <- predict(fm, type='det')
checkTrue(all(is.na(prDet$sp1[1,])))
checkEqualsNumeric(as.numeric(prDet$sp1[2,]),
c(0.49781,0.243148,0.021250,0.974375), tol=1e-4)
#Check that you can predict with NAs in siteCovs
newdata <- siteCovs(umf)
newdata[1,1] <- NA
prOcc <- predict(fm, type='state', newdata=newdata)
checkTrue(all(is.na(prOcc$Predicted[1,])))
checkTrue(all(!is.na(sapply(prOcc,`[`,2,1))))
prOcc_sp <- predict(fm, type='state', species=1, newdata=newdata)
checkTrue(all(is.na(prOcc_sp[1,])))
checkTrue(all(!is.na(prOcc_sp[2,])))
checkEqualsNumeric(prOcc_sp$Predicted[2],0.4731427, tol=1e-4)
prOcc_cond <- predict(fm, type='state', species=1, cond=2, newdata=newdata)
checkTrue(all(is.na(prOcc_cond[1,])))
checkTrue(all(!is.na(prOcc_cond[2,])))
checkEqualsNumeric(prOcc_sp$Predicted[2],0.4731427, tol=1e-4)
}
test.occuMulti.predict.complexFormulas <- function(){
#Check scale(), etc
set.seed(123)
y <- list(matrix(rbinom(40,1,0.2),20,2),
matrix(rbinom(40,1,0.3),20,2))
N <- dim(y[[1]])[1]
J <- dim(y[[1]])[2]
occ_covs <- as.data.frame(matrix(rnorm(N * 3, mean=2),ncol=3))
names(occ_covs) <- paste('occ_cov',1:3,sep='')
det_covs <- as.data.frame(matrix(rnorm(N*J*2, mean=3),ncol=2))
names(det_covs) <- paste('det_cov',1:2,sep='')
stateformulas <- c('~scale(occ_cov1)','~1','0')
detformulas <- c('~scale(det_cov1)','~1')
umf <- unmarkedFrameOccuMulti(y = y, siteCovs = occ_covs, obsCovs = det_covs)
fm <- occuMulti(detformulas, stateformulas, data = umf)
#Check with newdata; contents of newdata should not
#effect resulting predictions (scale should be based on
#original data)
nd <- siteCovs(umf)[1:5,]
pr_nd <- predict(fm, type='state', newdata=nd, se=F)$Predicted
nd <- siteCovs(umf)[1:2,]
pr_nd2 <- predict(fm, type='state', newdata=nd, se=F)$Predicted
nd <- siteCovs(umf)[c(1,1),]
pr_nd3 <- predict(fm, type='state', newdata=nd, se=F)$Predicted
checkEqualsNumeric(pr_nd[1:2,], pr_nd2)
checkEqualsNumeric(pr_nd[c(1,1),], pr_nd3)
#Check for factor level handling
occ_covs$occ_fac <- factor(sample(c('a','b','c'),N,replace=T))
umf <- unmarkedFrameOccuMulti(y = y, siteCovs = occ_covs, obsCovs = det_covs)
stateformulas <- c('~occ_fac','~1','~1')
fm <- occuMulti(detformulas, stateformulas, data = umf)
nd <- siteCovs(umf)[1:2,]
pr_nd <- predict(fm, type='state', newdata=nd, se=F)$Predicted
nd2 <- data.frame(occ_fac=factor(c('a','b'),levels=c('a','b','c')))
pr_nd2 <- predict(fm, type='state', newdata=nd2, se=F)$Predicted
checkEqualsNumeric(pr_nd, pr_nd2[c(2,1),])
nd3 <- data.frame(occ_fac=c('a','b'))
pr_nd3 <- predict(fm, type='state', newdata=nd3, se=F)$Predicted
checkEqualsNumeric(pr_nd, pr_nd3[c(2,1),])
nd4 <- data.frame(occ_fac=factor(c('a','d'),levels=c('a','d')))
checkException(predict(fm, type='state', newdata=nd4, se=F))
#Check that predicting detection also works
nd5 <- data.frame(det_cov1 = rnorm(5))
pr_nd5 <- predict(fm, type='det', newdata=nd5)
checkEqualsNumeric(sapply(pr_nd5, nrow), c(5,5))
checkEqualsNumeric(pr_nd5$sp1$Predicted[1], 0.1680881)
}
|
/fuzzedpackages/unmarked/inst/unitTests/runit.occuMulti.R
|
no_license
|
akhikolla/testpackages
|
R
| false | false | 11,814 |
r
|
test.occuMulti.fit.simple.1 <- function() {
y <- list(matrix(rep(1,10),5,2),
matrix(rep(1,10),5,2))
umf <- unmarkedFrameOccuMulti(y = y)
fm <- occuMulti(detformulas=rep("~1",2),
stateformulas=rep("~1",3), data = umf, se=FALSE)
#Probably should not be calling predict here b/c unit test
#but complicated to get actual occupancy prob otherwise
occ <- predict(fm,'state')$Predicted[1,1]
checkEqualsNumeric(occ,1, tolerance = 1e-4)
detlist <- predict(fm,'det')
det <- sapply(detlist,function(x) x[1,1])
checkEqualsNumeric(det, rep(1,length(detlist)), tolerance= 1e-4)
#Check fitList
fl <- fitList(fm, fm)
checkEquals(class(fl)[1],"unmarkedFitList")
checkEqualsNumeric(length(fl@fits), 2)
}
test.occuMulti.fit.simple.0 <- function() {
y <- list(matrix(rep(0,10),5,2),
matrix(rep(0,10),5,2))
umf <- unmarkedFrameOccuMulti(y = y)
fm <- occuMulti(detformulas=rep("~1",2),
stateformulas=rep("~1",3), data = umf, se=FALSE)
occ <- predict(fm,'state')$Predicted[1,1]
checkEqualsNumeric(occ,0, tolerance = 1e-4)
detlist <- predict(fm,'det')
det <- sapply(detlist,function(x) x[1,1])
checkEqualsNumeric(det, rep(0,length(detlist)), tolerance= 1e-4)
}
test.occuMulti.fit.covs <- function() {
y <- list(matrix(rep(0:1,10),5,2),
matrix(rep(0:1,10),5,2))
set.seed(123)
N <- dim(y[[1]])[1]
J <- dim(y[[1]])[2]
occ_covs <- as.data.frame(matrix(rnorm(N * 3),ncol=3))
names(occ_covs) <- paste('occ_cov',1:3,sep='')
det_covs <- as.data.frame(matrix(rnorm(N*J*2),ncol=2))
names(det_covs) <- paste('det_cov',1:2,sep='')
umf <- unmarkedFrameOccuMulti(y = y, siteCovs = occ_covs, obsCovs = det_covs)
stateformulas <- c('~occ_cov1','~occ_cov2','~occ_cov3')
detformulas <- c('~det_cov1','~det_cov2')
fm <- occuMulti(detformulas, stateformulas, data = umf, se=FALSE)
occ <- fm['state']
det <- fm['det']
checkEqualsNumeric(coef(occ), c(5.36630,0.79876,5.45492,-0.868451,9.21242,1.14561),
tolerance = 1e-4)
checkEqualsNumeric(coef(det), c(-0.27586,-0.81837,-0.09537,0.42334), tolerance = 1e-4)
fit <- fitted(fm)
checkEqualsNumeric(length(fit),2)
checkEqualsNumeric(sapply(fit,function(x) x[1,1]),c(0.14954,0.30801), tol = 1e-4)
res <- residuals(fm)
checkEqualsNumeric(length(res),2)
checkEqualsNumeric(sapply(res,function(x) x[1,1]),c(-0.14954,-0.30801), tol= 1e-4)
#Check site cov can be used in detection formula
detformulas <- c('~occ_cov1','~det_cov2')
fm <- occuMulti(detformulas, stateformulas, data = umf, se=FALSE)
checkEqualsNumeric(coef(fm,'det')[2],3.355328e-05, tol=1e-4)
}
test.occuMulti.fit.NA <- function() {
y <- list(matrix(rep(0:1,10),5,2),
matrix(rep(0:1,10),5,2))
set.seed(456)
N <- dim(y[[1]])[1]
J <- dim(y[[1]])[2]
occ_covs <- as.data.frame(matrix(rnorm(N * 3),ncol=3))
names(occ_covs) <- paste('occ_cov',1:3,sep='')
det_covs <- as.data.frame(matrix(rnorm(N*J*2),ncol=2))
names(det_covs) <- paste('det_cov',1:2,sep='')
stateformulas <- c('~occ_cov1','~occ_cov2','~occ_cov3')
detformulas <- c('~det_cov1','~det_cov2')
#Check error thrown when missing site covariates
occ_covsNA <- occ_covs
occ_covsNA[1,1] <- NA
umf <- unmarkedFrameOccuMulti(y = y, siteCovs = occ_covsNA, obsCovs = det_covs)
checkException(occuMulti(detformulas, stateformulas, data=umf, se=FALSE))
#Check for warning when missing detection
yna <- y
yna[[1]][1,1] <- NA
umf <- unmarkedFrameOccuMulti(y = yna, siteCovs = occ_covs, obsCovs = det_covs)
options(warn=2)
checkException(occuMulti(detformulas, stateformulas, data=umf, se=FALSE))
options(warn=1)
#Check correct answer given when missing detection
fm <- occuMulti(detformulas, stateformulas, data = umf, se=FALSE)
checkEqualsNumeric(coef(fm)[c(1,7)], c(6.63207,0.35323), tol= 1e-4)
fit <- fitted(fm)
checkTrue(is.na(fit[[1]][1,1]))
res <- residuals(fm)
checkTrue(is.na(res[[1]][1,1]))
#Check error thrown when all detections are missing
yna[[1]][1,] <- NA
umf <- unmarkedFrameOccuMulti(y = yna, siteCovs = occ_covs, obsCovs = det_covs)
checkException(occuMulti(detformulas, stateformulas, data=umf, se=FALSE))
#Check warning when missing covariate value on detection
det_covsNA <- det_covs
det_covsNA[1,1] <- NA
umf <- unmarkedFrameOccuMulti(y = y, siteCovs = occ_covs, obsCovs = det_covsNA)
options(warn=2)
checkException(occuMulti(detformulas,stateformulas,data=umf, se=FALSE))
options(warn=1)
}
test.occuMulti.fit.fixed0 <- function(){
y <- list(matrix(rep(0:1,10),5,2),
matrix(rep(0:1,10),5,2))
set.seed(123)
N <- dim(y[[1]])[1]
J <- dim(y[[1]])[2]
occ_covs <- as.data.frame(matrix(rnorm(N * 3),ncol=3))
names(occ_covs) <- paste('occ_cov',1:3,sep='')
det_covs <- as.data.frame(matrix(rnorm(N*J*2),ncol=2))
names(det_covs) <- paste('det_cov',1:2,sep='')
stateformulas <- c('~occ_cov1','~occ_cov2','0')
detformulas <- c('~det_cov1','~det_cov2')
umf <- unmarkedFrameOccuMulti(y = y, siteCovs = occ_covs, obsCovs = det_covs)
fm <- occuMulti(detformulas, stateformulas, data = umf, se=FALSE)
occ <- fm['state']
checkEqualsNumeric(length(coef(occ)),4)
checkEqualsNumeric(coef(occ),c(12.26043,0.61183,12.41110,0.18764),tol=1e-4)
stateformulas <- c('~occ_cov1','~occ_cov2')
fm2 <- occuMulti(detformulas, stateformulas, data = umf, maxOrder=1,se=FALSE)
occ <- fm2['state']
checkEqualsNumeric(length(coef(occ)),4)
checkEqualsNumeric(coef(occ),c(12.26043,0.61183,12.41110,0.18764),tol=1e-4)
}
test.occuMulti.predict <- function(){
set.seed(123)
y <- list(matrix(rbinom(40,1,0.2),20,2),
matrix(rbinom(40,1,0.3),20,2))
N <- dim(y[[1]])[1]
J <- dim(y[[1]])[2]
occ_covs <- as.data.frame(matrix(rnorm(N * 3),ncol=3))
names(occ_covs) <- paste('occ_cov',1:3,sep='')
det_covs <- as.data.frame(matrix(rnorm(N*J*2),ncol=2))
names(det_covs) <- paste('det_cov',1:2,sep='')
stateformulas <- c('~occ_cov1','~occ_cov2','0')
detformulas <- c('~det_cov1','~det_cov2')
umf <- unmarkedFrameOccuMulti(y = y, siteCovs = occ_covs, obsCovs = det_covs)
fm <- occuMulti(detformulas, stateformulas, data = umf)
prState <- predict(fm, type='state')
checkEqualsNumeric(sapply(prState,function(x) x[1,1]),
c(0.30807707,0.23924385,0.02382635,0.85377734),tol=1e-4)
prDet <- predict(fm, type='det')
checkEqualsNumeric(as.numeric(prDet$sp2[1,]),
c(0.190485,0.0945992,0.00507,0.37589566), tol=1e-4)
#Check with newdata
nd <- siteCovs(umf)[1:2,]
pr_nd <- predict(fm, type='state', newdata=nd)$Predicted
checkEqualsNumeric(pr_nd[,1],c(0.3080771,0.3196486), tol=1e-4)
nd <- siteCovs(umf)[1:2,]
pr_nd <- predict(fm, type='state', newdata=nd, species=1, cond=2)$Predicted
checkEqualsNumeric(pr_nd,c(0.3858233,0.5402935), tol=1e-4)
#Make sure it works with newdata having only one row
nd <- siteCovs(umf)[1,]
pr_nd <- predict(fm, type='state', newdata=nd)$Predicted
checkEqualsNumeric(pr_nd[,1],c(0.3080771), tol=1e-4)
pr_nd <- predict(fm, type='state', newdata=nd, species=1, cond=2)$Predicted
checkEqualsNumeric(pr_nd,c(0.3858233), tol=1e-4)
stateformulas <- c('~1','~1','0')
detformulas <- c('~1','~det_cov2')
umf <- unmarkedFrameOccuMulti(y = y, siteCovs = occ_covs, obsCovs = det_covs)
fm <- occuMulti(detformulas, stateformulas, data = umf)
prState <- predict(fm, type='state')
checkEqualsNumeric(sapply(prState,function(x) x[1,1]),
c(0.475928,0.24416,0.01807069,0.846532),tol=1e-4)
prDet <- predict(fm, type='det')
checkEqualsNumeric(as.numeric(prDet$sp2[1,]),
c(0.20494,0.17175,-0.13168,0.541579), tol=1e-4)
#Check predicting co-occurrence
nd <- siteCovs(umf)[1:2,]
pr_all <- predict(fm, type='state', se=F)$Predicted[1:2,1]
pr_nd <- predict(fm, type='state', newdata=nd, species=c(1,2))$Predicted
checkEqualsNumeric(pr_nd,pr_all, tol=1e-4)
}
test.occuMulti.predict.NA <- function(){
set.seed(123)
y <- list(matrix(rbinom(40,1,0.2),20,2),
matrix(rbinom(40,1,0.3),20,2))
N <- dim(y[[1]])[1]
J <- dim(y[[1]])[2]
occ_covs <- as.data.frame(matrix(rnorm(N * 3),ncol=3))
names(occ_covs) <- paste('occ_cov',1:3,sep='')
det_covs <- as.data.frame(matrix(rnorm(N*J*2),ncol=2))
names(det_covs) <- paste('det_cov',1:2,sep='')
det_covs[1,1] <- NA
stateformulas <- c('~occ_cov1','~occ_cov2','0')
detformulas <- c('~det_cov1','~det_cov2')
umf <- unmarkedFrameOccuMulti(y = y, siteCovs = occ_covs, obsCovs = det_covs)
fm <- occuMulti(detformulas, stateformulas, data = umf)
prDet <- predict(fm, type='det')
checkTrue(all(is.na(prDet$sp1[1,])))
checkEqualsNumeric(as.numeric(prDet$sp1[2,]),
c(0.49781,0.243148,0.021250,0.974375), tol=1e-4)
#Check that you can predict with NAs in siteCovs
newdata <- siteCovs(umf)
newdata[1,1] <- NA
prOcc <- predict(fm, type='state', newdata=newdata)
checkTrue(all(is.na(prOcc$Predicted[1,])))
checkTrue(all(!is.na(sapply(prOcc,`[`,2,1))))
prOcc_sp <- predict(fm, type='state', species=1, newdata=newdata)
checkTrue(all(is.na(prOcc_sp[1,])))
checkTrue(all(!is.na(prOcc_sp[2,])))
checkEqualsNumeric(prOcc_sp$Predicted[2],0.4731427, tol=1e-4)
prOcc_cond <- predict(fm, type='state', species=1, cond=2, newdata=newdata)
checkTrue(all(is.na(prOcc_cond[1,])))
checkTrue(all(!is.na(prOcc_cond[2,])))
checkEqualsNumeric(prOcc_sp$Predicted[2],0.4731427, tol=1e-4)
}
test.occuMulti.predict.complexFormulas <- function(){
#Check scale(), etc
set.seed(123)
y <- list(matrix(rbinom(40,1,0.2),20,2),
matrix(rbinom(40,1,0.3),20,2))
N <- dim(y[[1]])[1]
J <- dim(y[[1]])[2]
occ_covs <- as.data.frame(matrix(rnorm(N * 3, mean=2),ncol=3))
names(occ_covs) <- paste('occ_cov',1:3,sep='')
det_covs <- as.data.frame(matrix(rnorm(N*J*2, mean=3),ncol=2))
names(det_covs) <- paste('det_cov',1:2,sep='')
stateformulas <- c('~scale(occ_cov1)','~1','0')
detformulas <- c('~scale(det_cov1)','~1')
umf <- unmarkedFrameOccuMulti(y = y, siteCovs = occ_covs, obsCovs = det_covs)
fm <- occuMulti(detformulas, stateformulas, data = umf)
#Check with newdata; contents of newdata should not
#effect resulting predictions (scale should be based on
#original data)
nd <- siteCovs(umf)[1:5,]
pr_nd <- predict(fm, type='state', newdata=nd, se=F)$Predicted
nd <- siteCovs(umf)[1:2,]
pr_nd2 <- predict(fm, type='state', newdata=nd, se=F)$Predicted
nd <- siteCovs(umf)[c(1,1),]
pr_nd3 <- predict(fm, type='state', newdata=nd, se=F)$Predicted
checkEqualsNumeric(pr_nd[1:2,], pr_nd2)
checkEqualsNumeric(pr_nd[c(1,1),], pr_nd3)
#Check for factor level handling
occ_covs$occ_fac <- factor(sample(c('a','b','c'),N,replace=T))
umf <- unmarkedFrameOccuMulti(y = y, siteCovs = occ_covs, obsCovs = det_covs)
stateformulas <- c('~occ_fac','~1','~1')
fm <- occuMulti(detformulas, stateformulas, data = umf)
nd <- siteCovs(umf)[1:2,]
pr_nd <- predict(fm, type='state', newdata=nd, se=F)$Predicted
nd2 <- data.frame(occ_fac=factor(c('a','b'),levels=c('a','b','c')))
pr_nd2 <- predict(fm, type='state', newdata=nd2, se=F)$Predicted
checkEqualsNumeric(pr_nd, pr_nd2[c(2,1),])
nd3 <- data.frame(occ_fac=c('a','b'))
pr_nd3 <- predict(fm, type='state', newdata=nd3, se=F)$Predicted
checkEqualsNumeric(pr_nd, pr_nd3[c(2,1),])
nd4 <- data.frame(occ_fac=factor(c('a','d'),levels=c('a','d')))
checkException(predict(fm, type='state', newdata=nd4, se=F))
#Check that predicting detection also works
nd5 <- data.frame(det_cov1 = rnorm(5))
pr_nd5 <- predict(fm, type='det', newdata=nd5)
checkEqualsNumeric(sapply(pr_nd5, nrow), c(5,5))
checkEqualsNumeric(pr_nd5$sp1$Predicted[1], 0.1680881)
}
|
Define your own operators:
`%+%` <- function(e1, e2) {
e1[is.na(e1)] <- 0; e2[is.na(e2)] <- 0; return(e1 + e2)}
`%-%` <- function(e1, e2) {
e1[is.na(e1)] <- 0; e2[is.na(e2)] <- 0; return(e1 - e2)}
within(df, e <- a %-% b %+% c)
a b c e
1 1 0 9 10
2 2 1 10 11
3 3 NA 11 14
4 4 3 NA 1
5 5 4 13 14
|
/define_your_own_operators.R
|
no_license
|
pstessel/useful_R
|
R
| false | false | 312 |
r
|
Define your own operators:
`%+%` <- function(e1, e2) {
e1[is.na(e1)] <- 0; e2[is.na(e2)] <- 0; return(e1 + e2)}
`%-%` <- function(e1, e2) {
e1[is.na(e1)] <- 0; e2[is.na(e2)] <- 0; return(e1 - e2)}
within(df, e <- a %-% b %+% c)
a b c e
1 1 0 9 10
2 2 1 10 11
3 3 NA 11 14
4 4 3 NA 1
5 5 4 13 14
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/knapsack_dynamic.R
\name{knapsack_dynamic}
\alias{knapsack_dynamic}
\title{0/1 knapsack problem}
\usage{
knapsack_dynamic(x, W)
}
\arguments{
\item{x}{a dataframe consists of two variables \code{w} and \code{v}. Both variables contain only positive values.
\itemize{
\item w: weight of each element in the knapsack
\item v: value of each element in the knapsack
}}
\item{W}{a numeric value of the knapsack size.}
}
\value{
A list of maximum knapsack value and which elements that can add to the knapsack.
}
\description{
It is one approach to solve the knapsack problem by using Dynamic-Programming.
}
\references{
\url{https://en.wikipedia.org/wiki/Knapsack_problem#0.2F1_knapsack_problem}
}
|
/knapsack/man/knapsack_dynamic.Rd
|
no_license
|
ClaraSchartner/lab6
|
R
| false | false | 783 |
rd
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/knapsack_dynamic.R
\name{knapsack_dynamic}
\alias{knapsack_dynamic}
\title{0/1 knapsack problem}
\usage{
knapsack_dynamic(x, W)
}
\arguments{
\item{x}{a dataframe consists of two variables \code{w} and \code{v}. Both variables contain only positive values.
\itemize{
\item w: weight of each element in the knapsack
\item v: value of each element in the knapsack
}}
\item{W}{a numeric value of the knapsack size.}
}
\value{
A list of maximum knapsack value and which elements that can add to the knapsack.
}
\description{
It is one approach to solve the knapsack problem by using Dynamic-Programming.
}
\references{
\url{https://en.wikipedia.org/wiki/Knapsack_problem#0.2F1_knapsack_problem}
}
|
transmute(
flights,
dep_min = (dep_time %/% 100) * 60 + dep_time %% 100,
sched_dep_min = (sched_dep_time %/% 100) * 60 + sched_dep_time %% 100,
)
|
/cap05/transmute03.R
|
permissive
|
vcwild/r4ds
|
R
| false | false | 157 |
r
|
transmute(
flights,
dep_min = (dep_time %/% 100) * 60 + dep_time %% 100,
sched_dep_min = (sched_dep_time %/% 100) * 60 + sched_dep_time %% 100,
)
|
## In this script there are two function, the first one is to save on cache
## some info, and the second one is to solve info on the first one.
## This function save info on cache
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setinverse <- function(inverse) m <<- inverse
getinverse <- function() m
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## This other function solve info about the first one.
cacheSolve <- function(x, ...) {
m <- x$getinverse()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- solve(data, ...)
x$setinverse(m)
## Return a matrix that is the inverse of 'x'
m
}
|
/ProgrammingAssignment2/cachematrix.R
|
no_license
|
BRodas/datasciencecoursera
|
R
| false | false | 790 |
r
|
## In this script there are two function, the first one is to save on cache
## some info, and the second one is to solve info on the first one.
## This function save info on cache
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setinverse <- function(inverse) m <<- inverse
getinverse <- function() m
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## This other function solve info about the first one.
cacheSolve <- function(x, ...) {
m <- x$getinverse()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- solve(data, ...)
x$setinverse(m)
## Return a matrix that is the inverse of 'x'
m
}
|
\name{nexml_write}
\alias{nexml_write}
\alias{write.nexml}
\title{Write nexml files}
\usage{
nexml_write(x = new("nexml"), file = NULL, trees = NULL,
characters = NULL, meta = NULL, ...)
}
\arguments{
\item{x}{a nexml object, or any phylogeny object (e.g.
phylo, phylo4) that can be coerced into one. Can also be
omitted, in which case a new nexml object will be
constructed with the additional parameters specified.}
\item{file}{the name of the file to write out}
\item{trees}{phylogenetic trees to add to the nexml file
(if not already given in x) see \code{\link{add_trees}}
for details.}
\item{characters}{additional characters}
\item{meta}{A meta element or list of meta elements, see
\code{\link{add_meta}}}
\item{...}{additional arguments to add_meta, such as the
namespaces. See \code{\link{add_meta}}.}
}
\value{
Writes out a nexml file
}
\description{
Write nexml files
}
\examples{
## Write an ape tree to nexml, analgous to write.nexus:
library(ape); data(bird.orders)
write.nexml(bird.orders, file="example.xml")
## Assemble a nexml section by section and then write to file:
library(geiger)
data(geospiza)
nexml <- add_trees(geospiza$phy) # creates new nexml
nexml <- add_characters(geospiza$dat, nexml) # pass the nexml obj to append character data
nexml <- add_basic_meta(nexml, title="my title", creator = "Carl Boettiger")
nexml <- add_meta(meta("prism:modificationDate", format(Sys.Date())), nexml)
write.nexml(nexml, file="example.xml")
## As above, but in one call (except for add_meta() call).
write.nexml(trees = geospiza$phy,
characters = geospiza$dat,
title = "My title",
creator = "Carl Boettiger",
file = "example.xml")
## Mix and match: identical to the section by section:
nexml <- add_meta(meta("prism:modificationDate", format(Sys.Date())))
write.nexml(x = nexml,
trees = geospiza$phy,
characters = geospiza$dat,
title = "My title",
creator = "Carl Boettiger",
file = "example.xml")
}
\seealso{
\code{\link{add_trees}} \code{\link{add_characters}}
\code{\link{add_meta}} \code{\link{nexml_read}}
}
|
/man/nexml_write.Rd
|
permissive
|
craigcitro/RNeXML
|
R
| false | false | 2,199 |
rd
|
\name{nexml_write}
\alias{nexml_write}
\alias{write.nexml}
\title{Write nexml files}
\usage{
nexml_write(x = new("nexml"), file = NULL, trees = NULL,
characters = NULL, meta = NULL, ...)
}
\arguments{
\item{x}{a nexml object, or any phylogeny object (e.g.
phylo, phylo4) that can be coerced into one. Can also be
omitted, in which case a new nexml object will be
constructed with the additional parameters specified.}
\item{file}{the name of the file to write out}
\item{trees}{phylogenetic trees to add to the nexml file
(if not already given in x) see \code{\link{add_trees}}
for details.}
\item{characters}{additional characters}
\item{meta}{A meta element or list of meta elements, see
\code{\link{add_meta}}}
\item{...}{additional arguments to add_meta, such as the
namespaces. See \code{\link{add_meta}}.}
}
\value{
Writes out a nexml file
}
\description{
Write nexml files
}
\examples{
## Write an ape tree to nexml, analgous to write.nexus:
library(ape); data(bird.orders)
write.nexml(bird.orders, file="example.xml")
## Assemble a nexml section by section and then write to file:
library(geiger)
data(geospiza)
nexml <- add_trees(geospiza$phy) # creates new nexml
nexml <- add_characters(geospiza$dat, nexml) # pass the nexml obj to append character data
nexml <- add_basic_meta(nexml, title="my title", creator = "Carl Boettiger")
nexml <- add_meta(meta("prism:modificationDate", format(Sys.Date())), nexml)
write.nexml(nexml, file="example.xml")
## As above, but in one call (except for add_meta() call).
write.nexml(trees = geospiza$phy,
characters = geospiza$dat,
title = "My title",
creator = "Carl Boettiger",
file = "example.xml")
## Mix and match: identical to the section by section:
nexml <- add_meta(meta("prism:modificationDate", format(Sys.Date())))
write.nexml(x = nexml,
trees = geospiza$phy,
characters = geospiza$dat,
title = "My title",
creator = "Carl Boettiger",
file = "example.xml")
}
\seealso{
\code{\link{add_trees}} \code{\link{add_characters}}
\code{\link{add_meta}} \code{\link{nexml_read}}
}
|
\name{dizzysNewInfec-package}
\alias{dizzysNewInfec-package}
\alias{dizzysNewInfec}
\docType{package}
\title{
\packageTitle{dizzysNewInfec}
}
\description{
\packageDescription{dizzysNewInfec}
}
\details{
The DESCRIPTION file:
\packageDESCRIPTION{dizzysNewInfec}
\packageIndices{dizzysNewInfec}
~~ An overview of how to use the package, including the most important ~~
~~ functions ~~
}
\author{
\packageAuthor{dizzysNewInfec}
Maintainer: \packageMaintainer{dizzysNewInfec}
}
\references{
~~ Literature or other references for background information ~~
}
~~ Optionally other standard keywords, one per line, from file KEYWORDS in ~~
~~ the R documentation directory ~~
\keyword{ package }
\seealso{
~~ Optional links to other man pages, e.g. ~~
~~ \code{\link[<pkg>:<pkg>-package]{<pkg>}} ~~
}
\examples{
~~ simple examples of the most important functions ~~
}
|
/CODE_dizzys/refreshDIZZYS_2015_10_23/lan2/dizzysNewInfec/man/dizzysNewInfec-package.Rd
|
no_license
|
ttcgiang/THESE_GitHub
|
R
| false | false | 862 |
rd
|
\name{dizzysNewInfec-package}
\alias{dizzysNewInfec-package}
\alias{dizzysNewInfec}
\docType{package}
\title{
\packageTitle{dizzysNewInfec}
}
\description{
\packageDescription{dizzysNewInfec}
}
\details{
The DESCRIPTION file:
\packageDESCRIPTION{dizzysNewInfec}
\packageIndices{dizzysNewInfec}
~~ An overview of how to use the package, including the most important ~~
~~ functions ~~
}
\author{
\packageAuthor{dizzysNewInfec}
Maintainer: \packageMaintainer{dizzysNewInfec}
}
\references{
~~ Literature or other references for background information ~~
}
~~ Optionally other standard keywords, one per line, from file KEYWORDS in ~~
~~ the R documentation directory ~~
\keyword{ package }
\seealso{
~~ Optional links to other man pages, e.g. ~~
~~ \code{\link[<pkg>:<pkg>-package]{<pkg>}} ~~
}
\examples{
~~ simple examples of the most important functions ~~
}
|
te=c(27, 20, 7, 4, 1,1)
ye=c("European", "Indian", "Chinese", "West Asian", "Korean", "Japanese" )
df=data.frame(cbind(ye,te))
colnames(df)=c("origin", "count")
df$count=as.numeric(as.character(df$count))
df$percentage= round(df$count/sum(te)*100,1)
df$pop=c(197.3, 3.18, 3.79, 10.5, 1.7, 1.3 )
totalpop=sum(df$pop[c(2,3,5,6)])
df$biomed.pop=c(.58,.34*1.8/totalpop,.34*5.2/totalpop, .025, .34*1.67/totalpop, .34*1.3/totalpop)*69000
df$cheaters.ppm=df$count/df$pop
df$cheaters.p1000=df$count/df$biomed.pop*1000
#tabulate
knitr::kable(df[,c(1,2,3,5,7)],format = "pandoc")
xtable::xtable(df[,c(1,2,3,5,7)])
#probability of Indian fraud
pbinom(q=20,size = 4236, prob = 60/69000,lower.tail = F)
#how many times more likely are Indians to commit fraud
4.7219854/((60-20)/(69000-4236)*1000)
|
/scripts/cheaters.R
|
no_license
|
somasushma/R-code
|
R
| false | false | 809 |
r
|
te=c(27, 20, 7, 4, 1,1)
ye=c("European", "Indian", "Chinese", "West Asian", "Korean", "Japanese" )
df=data.frame(cbind(ye,te))
colnames(df)=c("origin", "count")
df$count=as.numeric(as.character(df$count))
df$percentage= round(df$count/sum(te)*100,1)
df$pop=c(197.3, 3.18, 3.79, 10.5, 1.7, 1.3 )
totalpop=sum(df$pop[c(2,3,5,6)])
df$biomed.pop=c(.58,.34*1.8/totalpop,.34*5.2/totalpop, .025, .34*1.67/totalpop, .34*1.3/totalpop)*69000
df$cheaters.ppm=df$count/df$pop
df$cheaters.p1000=df$count/df$biomed.pop*1000
#tabulate
knitr::kable(df[,c(1,2,3,5,7)],format = "pandoc")
xtable::xtable(df[,c(1,2,3,5,7)])
#probability of Indian fraud
pbinom(q=20,size = 4236, prob = 60/69000,lower.tail = F)
#how many times more likely are Indians to commit fraud
4.7219854/((60-20)/(69000-4236)*1000)
|
#' Normalised sequence read coverage ratios for wild type S.cerevisiae W303
#'
#' Sequence read coverage ratios for wild type sample
#' (T7107 strain). The cells were stained with DNA dye and sorted
#' based on DNA content into S or G2/M phase fractions. Extracted
#' DNA was sequenced and mapped to sacCer3 genome. Unique reads
#' for replicating (S) and non-replicating (G2/M) samples were
#' calculated in 1 kb genomic bins. The ratio was created by
#' dividing 'score' values from replicating sample by non-
#' replicating sample 'score' values, adjusted by total number
#' of reads. The ratio values were further adjusted by multiplying
#' them by 1.41 to put the values onto biologically relevant
#' relative copy number scale from 1 to 2.
#'
#' @docType data
#'
#' @usage data(W303norm)
#'
#' @format data frame with 11340 rows and 7 variables:
#' \describe{
#' \item{chrom}{short chromosome name}
#' \item{chromStart}{left chromosome coordinate}
#' \item{chromEnd}{right chromosome coordinate}
#' \item{name.rep}{replicating sample name}
#' \item{name.nonRep}{non-replicating sample name}
#' \item{ratio}{ratio value in the current bin}
#' \item{ratioFactor}{adjustment factor used for the current ratio}
#' }
#'
#' @keywords datasets sortSeq ratio replication
#'
#' @references Natsume et al. (2013) Mol Cell 50(5):661-74
#' (\href{https://pubmed.ncbi.nlm.nih.gov/23746350}{PubMed})
#'
#' @source S phase sample: \href{https://www.ncbi.nlm.nih.gov/sra/SRX204358}{SRA};
#' G2 sample: \href{https://www.ncbi.nlm.nih.gov/sra/SRX204357}{SRA}
#'
#' @examples
#' data(W303norm)
"W303norm"
|
/R/W303norm.R
|
no_license
|
cran/Repliscope
|
R
| false | false | 1,640 |
r
|
#' Normalised sequence read coverage ratios for wild type S.cerevisiae W303
#'
#' Sequence read coverage ratios for wild type sample
#' (T7107 strain). The cells were stained with DNA dye and sorted
#' based on DNA content into S or G2/M phase fractions. Extracted
#' DNA was sequenced and mapped to sacCer3 genome. Unique reads
#' for replicating (S) and non-replicating (G2/M) samples were
#' calculated in 1 kb genomic bins. The ratio was created by
#' dividing 'score' values from replicating sample by non-
#' replicating sample 'score' values, adjusted by total number
#' of reads. The ratio values were further adjusted by multiplying
#' them by 1.41 to put the values onto biologically relevant
#' relative copy number scale from 1 to 2.
#'
#' @docType data
#'
#' @usage data(W303norm)
#'
#' @format data frame with 11340 rows and 7 variables:
#' \describe{
#' \item{chrom}{short chromosome name}
#' \item{chromStart}{left chromosome coordinate}
#' \item{chromEnd}{right chromosome coordinate}
#' \item{name.rep}{replicating sample name}
#' \item{name.nonRep}{non-replicating sample name}
#' \item{ratio}{ratio value in the current bin}
#' \item{ratioFactor}{adjustment factor used for the current ratio}
#' }
#'
#' @keywords datasets sortSeq ratio replication
#'
#' @references Natsume et al. (2013) Mol Cell 50(5):661-74
#' (\href{https://pubmed.ncbi.nlm.nih.gov/23746350}{PubMed})
#'
#' @source S phase sample: \href{https://www.ncbi.nlm.nih.gov/sra/SRX204358}{SRA};
#' G2 sample: \href{https://www.ncbi.nlm.nih.gov/sra/SRX204357}{SRA}
#'
#' @examples
#' data(W303norm)
"W303norm"
|
world_map_title_www <- function(p) {
print_debug_info(p)
if (!is_world_map_www(p)) return(p)
p_return <- p # Don't pass the modified p (e.g. with swap) on to next function (bit of hack)
x_position_left <- p$labels_margin_left
if (!is_set(p$world_map_value)) {
title_width <- p$width
} else {
title_width <- strwidth(p$title, cex = p$world_map_title_font_size, units = "inches", family = p$font) * cm(1) + 2 * x_position_left
}
# Correct for logo
if (is_yes(p$logo_show)) vshift <- p$logo_height / p$height else vshift <- 0
# COLOR TITLE BG
h <- .85
dy <- (1 - h) / (1 + vshift)
delta_dy <- (1 - h) - dy
r <- title_width / p$width
ybot <- h - vshift + delta_dy
ytop <- h - vshift + (1 - h) - delta_dy
rect(0, ybot, r, ytop, col = get_col_from_p(p, p$world_map_title_bg_col), border = NA)
rect(r, ybot, 1, ytop, col = get_col_from_p(p, p$world_map_value_bg_col), border = NA)
text(x_position_left / p$width, (1+h)/2 - vshift, labels = p$title, adj = c(0, 0.5), cex = p$world_map_title_font_size, col = get_col_from_p(p, p$world_map_title_col), family = p$font)
if (is_set(p$world_map_value)) {
sgn <- if (0 < p$world_map_value) "+" else ""
value <- fix_numbers(p$world_map_value, n_decimals = p$world_map_value_n_decimals, p$decimal_mark, big_mark = p$big_mark)
text((1+r)/2, (1+h)/2 - vshift, labels = paste0(sgn, value, p$world_map_value_symbol), adj = c(.5, 0.5), cex = p$world_map_value_font_size, col = get_col_from_p(p, p$world_map_value_col), family = p$font)
}
p_return
}
|
/R/world-map-title-www.R
|
permissive
|
data-science-made-easy/james
|
R
| false | false | 1,580 |
r
|
world_map_title_www <- function(p) {
print_debug_info(p)
if (!is_world_map_www(p)) return(p)
p_return <- p # Don't pass the modified p (e.g. with swap) on to next function (bit of hack)
x_position_left <- p$labels_margin_left
if (!is_set(p$world_map_value)) {
title_width <- p$width
} else {
title_width <- strwidth(p$title, cex = p$world_map_title_font_size, units = "inches", family = p$font) * cm(1) + 2 * x_position_left
}
# Correct for logo
if (is_yes(p$logo_show)) vshift <- p$logo_height / p$height else vshift <- 0
# COLOR TITLE BG
h <- .85
dy <- (1 - h) / (1 + vshift)
delta_dy <- (1 - h) - dy
r <- title_width / p$width
ybot <- h - vshift + delta_dy
ytop <- h - vshift + (1 - h) - delta_dy
rect(0, ybot, r, ytop, col = get_col_from_p(p, p$world_map_title_bg_col), border = NA)
rect(r, ybot, 1, ytop, col = get_col_from_p(p, p$world_map_value_bg_col), border = NA)
text(x_position_left / p$width, (1+h)/2 - vshift, labels = p$title, adj = c(0, 0.5), cex = p$world_map_title_font_size, col = get_col_from_p(p, p$world_map_title_col), family = p$font)
if (is_set(p$world_map_value)) {
sgn <- if (0 < p$world_map_value) "+" else ""
value <- fix_numbers(p$world_map_value, n_decimals = p$world_map_value_n_decimals, p$decimal_mark, big_mark = p$big_mark)
text((1+r)/2, (1+h)/2 - vshift, labels = paste0(sgn, value, p$world_map_value_symbol), adj = c(.5, 0.5), cex = p$world_map_value_font_size, col = get_col_from_p(p, p$world_map_value_col), family = p$font)
}
p_return
}
|
#' @include internal.R
NULL
#' Get coordinate reference system
#'
#' Extract the coordinate reference system from an object.
#'
#' @param x [sf::st_sf()], [terra::rast()], [Spatial-class], or
#' [raster::raster()] object.
#'
#' @return A [sf::st_crs()] object.
#'
#' @noRd
NULL
get_crs <- function(x) {
assert_required(x)
UseMethod("get_crs")
}
get_crs.sf <- function(x) sf::st_crs(x)
.S3method("get_crs", "sf", get_crs.sf)
get_crs.Spatial <- function(x) sf::st_crs(x@proj4string)
.S3method("get_crs", "Spatial", get_crs.Spatial)
get_crs.Raster <- function(x) sf::st_crs(x@crs)
.S3method("get_crs", "Raster", get_crs.Raster)
get_crs.ZonesRaster <- function(x) get_crs(x[[1]])
.S3method("get_crs", "ZonesRaster", get_crs.ZonesRaster)
get_crs.SpatRaster <- function(x) {
x_crs <- terra::crs(x)
if (nzchar(x_crs)) {
return(sf::st_crs(x_crs))
} else {
return(sf::st_crs(NA))
}
}
.S3method("get_crs", "SpatRaster", get_crs.SpatRaster)
get_crs.ZonesSpatRaster <- function(x) get_crs(x[[1]])
.S3method("get_crs", "ZonesSpatRaster", get_crs.ZonesSpatRaster)
na_crs <- "ENGCRS[\"Undefined Cartesian SRS\",\n EDATUM[\"\"],\n CS[Cartesian,2],\n AXIS[\"(E)\",east,\n ORDER[1],\n LENGTHUNIT[\"Meter\",1]],\n AXIS[\"(N)\",north,\n ORDER[2],\n LENGTHUNIT[\"Meter\",1]]]"
|
/R/get_crs.R
|
no_license
|
prioritizr/prioritizr
|
R
| false | false | 1,357 |
r
|
#' @include internal.R
NULL
#' Get coordinate reference system
#'
#' Extract the coordinate reference system from an object.
#'
#' @param x [sf::st_sf()], [terra::rast()], [Spatial-class], or
#' [raster::raster()] object.
#'
#' @return A [sf::st_crs()] object.
#'
#' @noRd
NULL
get_crs <- function(x) {
assert_required(x)
UseMethod("get_crs")
}
get_crs.sf <- function(x) sf::st_crs(x)
.S3method("get_crs", "sf", get_crs.sf)
get_crs.Spatial <- function(x) sf::st_crs(x@proj4string)
.S3method("get_crs", "Spatial", get_crs.Spatial)
get_crs.Raster <- function(x) sf::st_crs(x@crs)
.S3method("get_crs", "Raster", get_crs.Raster)
get_crs.ZonesRaster <- function(x) get_crs(x[[1]])
.S3method("get_crs", "ZonesRaster", get_crs.ZonesRaster)
get_crs.SpatRaster <- function(x) {
x_crs <- terra::crs(x)
if (nzchar(x_crs)) {
return(sf::st_crs(x_crs))
} else {
return(sf::st_crs(NA))
}
}
.S3method("get_crs", "SpatRaster", get_crs.SpatRaster)
get_crs.ZonesSpatRaster <- function(x) get_crs(x[[1]])
.S3method("get_crs", "ZonesSpatRaster", get_crs.ZonesSpatRaster)
na_crs <- "ENGCRS[\"Undefined Cartesian SRS\",\n EDATUM[\"\"],\n CS[Cartesian,2],\n AXIS[\"(E)\",east,\n ORDER[1],\n LENGTHUNIT[\"Meter\",1]],\n AXIS[\"(N)\",north,\n ORDER[2],\n LENGTHUNIT[\"Meter\",1]]]"
|
# En este script buscamos relacionar la presencia de especies domésticas
# (vacas, borregos, perros, caballos) con integridad ecológica (ROBIN)
library(Hmisc)
library(rgdal)
library(sp)
library(raster)
library(dplyr)
# conexión a la base de datos (snmb)
PASS_SNMB = Sys.getenv("PASS_SNMB")
base_input <- src_postgres(dbname = "snmb", host = "dbms", user = "snmb",
password = PASS_SNMB)
# Para obtener el valor de integridad que corresponde a cada conglomerado
# INFyS usamos el mapa de integridad y extraemos el valor correspondiente a
# las coordenadas del "Centro" del conglomerado (ingresadas en campo)
# encontrar coordenadas de sitios
conglomerado <- tbl(base_input, "conglomerado_muestra") %>%
collect() %>%
filter(id != 638 & id != 632) %>% # quitamos cgls repetidos
# filter(nombre == "110296" | nombre == "158970" )
select(conglomerado_muestra_id = id, nombre)
sitio <- collect(tbl(base_input, "sitio_muestra")) %>%
filter(sitio_numero == "Centro") %>%
inner_join(conglomerado, by = "conglomerado_muestra_id") %>%
mutate(
lat = lat_grado + lat_min/60 + lat_seg/3600,
lon = ifelse(lon_grado > 0, lon_grado + lon_min/60 + lon_seg/3600,
-(lon_grado - lon_min/60 - lon_seg/3600)),
lon = -lon,
cgl = as.numeric(nombre)
) %>%
dplyr::select(cgl, conglomerado_muestra_id, lon, lat)
# copiamos la proyección de este shape
malla_real <- readOGR("/Volumes/ARCHIVOS_C/Mac_Pro/SNMB/datos/malla_real",
"infys_2cWGS")
malla_real@proj4string
# y copiamos la proyección del raster de integridad para reproyectar a lcc
raster_ei <- raster("/Volumes/ARCHIVOS_MAC/Dropbox/Datos Redes Bayesianas/EI_maps/Stage_3/Final_net_Scores.tif")
sitio_shape <- as.data.frame(sitio)
coordinates(sitio_shape) <- ~ lon + lat
sitio_shape@proj4string <- malla_real@proj4string
sitio_lcc <- spTransform(sitio_shape, projection(raster_ei))
plot(raster_ei)
points(sitio_lcc)
sitio$ei <- raster::extract(raster_ei, sitio_lcc)
sitio$id <- as.character(sitio$cgl)
sitio$x_s <- sitio_lcc@coords[, 1]
sitio$y_s <- sitio_lcc@coords[, 2]
# Ahora usamos la siguiente función para buscar animales domésticos en los
# conglomerados
### IMPORTANTE: hay un filtro ad-hoc en la tabla de conglomerado
especieInv <- function(noms){
# noms: string que indica los nombres a agrupar separados por pipes, ej.
# "Bos|taurus|vaca"
conglomerado <- tbl(base_input, "conglomerado_muestra") %>%
collect() %>%
filter(id != 638 & id != 632) %>% # quitamos cgls repetidos
select(conglomerado_muestra_id = id, nombre, estado, municipio,
uso = uso_suelo_tipo)
sitio <- tbl(base_input, "sitio_muestra") %>%
collect() %>%
select(conglomerado_muestra_id, sitio_muestra_id = id) %>%
inner_join(conglomerado, by = "conglomerado_muestra_id") %>%
select(sitio_muestra_id, conglomerado_muestra_id, nombre)
tr_ei <- tbl(base_input, "transecto_especies_invasoras_muestra") %>%
collect() %>%
select(transecto_especies_invasoras_id = id, conglomerado_muestra_id) %>%
left_join(conglomerado, by = "conglomerado_muestra_id")
ei <- tbl(base_input, "especie_invasora") %>%
collect() %>%
mutate(
ei_esp = grepl(noms, nombre_comun, ignore.case = TRUE) |
grepl(noms, nombre_cientifico, ignore.case = TRUE)
) %>%
select(transecto_especies_invasoras_id, ei_esp) %>%
left_join(tr_ei, by = "transecto_especies_invasoras_id") %>%
group_by(nombre) %>%
summarise(
ei_esp = sum(ei_esp, na.rm = TRUE)
) %>%
select(nombre, ei_esp)
ei_ex <- tbl(base_input, "especie_invasora_extra") %>%
collect() %>%
mutate(
ei_ex_esp = grepl(noms, nombre_comun, ignore.case = TRUE) |
grepl(noms, nombre_cientifico, ignore.case = TRUE)
) %>%
select(conglomerado_muestra_id, ei_ex_esp) %>%
right_join(conglomerado, by = "conglomerado_muestra_id") %>%
group_by(nombre) %>%
summarise(
ei_ex_esp = sum(ei_ex_esp, na.rm = TRUE)
)
er_ex <- tbl(base_input, "especimen_restos_extra") %>%
collect() %>%
mutate(
er_ex_esp = grepl(noms, nombre_comun, ignore.case = TRUE) |
grepl(noms, nombre_cientifico, ignore.case = TRUE)
) %>%
select(conglomerado_muestra_id, er_ex_esp) %>%
right_join(conglomerado, by = "conglomerado_muestra_id") %>%
group_by(nombre) %>%
summarise(
er_ex_esp = sum(er_ex_esp, na.rm = TRUE)
)
tr_he <- tbl(base_input, "transecto_huellas_excretas_muestra") %>%
collect() %>%
select(transecto_huellas_excretas_id = id, conglomerado_muestra_id) %>%
left_join(conglomerado, by = "conglomerado_muestra_id")
he <- tbl(base_input, "huella_excreta") %>%
collect() %>%
mutate(
he_esp = grepl(noms, nombre_comun, ignore.case = TRUE) |
grepl(noms, nombre_cientifico, ignore.case = TRUE)
) %>%
select(transecto_huellas_excretas_id, he_esp) %>%
left_join(tr_he, by = "transecto_huellas_excretas_id") %>%
group_by(nombre) %>%
summarise(
he_esp = sum(he_esp, na.rm = TRUE)
) %>%
select(nombre, he_esp)
he_ex <- tbl(base_input, "huella_excreta_extra") %>%
collect() %>%
mutate(
he_ex_esp = grepl(noms, nombre_comun, ignore.case = TRUE) |
grepl(noms, nombre_cientifico, ignore.case = TRUE)
) %>%
select(conglomerado_muestra_id, he_ex_esp) %>%
right_join(conglomerado, by = "conglomerado_muestra_id") %>%
group_by(nombre) %>%
summarise(
he_ex_esp = sum(he_ex_esp, na.rm = TRUE)
)
camara <- tbl(base_input, "camara") %>%
collect() %>%
select(camara_id = id, sitio_muestra_id) %>%
left_join(sitio, by = "sitio_muestra_id")
ar_camara <- tbl(base_input, "archivo_camara") %>%
collect() %>%
mutate(
camara_esp = grepl(noms, nombre_comun, ignore.case = TRUE) |
grepl(noms, nombre_cientifico, ignore.case = TRUE)
) %>%
select(camara_id, camara_esp) %>%
left_join(camara, by = "camara_id") %>%
group_by(nombre) %>%
summarise(
camara_esp = sum(camara_esp, na.rm = TRUE)
)
naZero <- function(x){
ifelse(is.na(x), 0, (x > 0)*1)
}
desagregado <- conglomerado %>%
left_join(ei) %>%
left_join(he) %>%
left_join(ei_ex) %>%
left_join(he_ex) %>%
left_join(er_ex) %>%
left_join(ar_camara) %>%
mutate_each(funs(naZero), contains("esp"))
presencia <- desagregado %>%
mutate(pres = (ei_esp + he_esp + ei_ex_esp + he_ex_esp + er_ex_esp +
camara_esp) > 0) %>%
select(id = nombre, pres)
list(desagregado = desagregado, presencia = presencia)
}
# extraemos los conglomerados con animales domésticos
vacas <- especieInv("bos|taurus|vaca|equus|caballo|mula|perro|borrego")
vacas$desagregado
sum(vacas$presencia$pres)
# leemos la base con conglomerado e integridad producida por Julián
# vamos a favorecer esta base sobre la que creamos arriba, esto porque las
# coordenadas ingresadas manualmente tienen errores y Julián usó una malla
# teórica
ie <- read.csv("congs_snmb_ie.csv")
head(ie)
head(sitio)
ggplot(ie, aes(x = ie)) + geom_histogram()
# creamos una variable categórica de integridad
ie_vacas <- ie %>%
mutate(
id = as.character(Cgl)
) %>%
right_join(vacas$presencia) %>%
left_join(select(sitio, id, ei, x_s, y_s)) %>%
mutate(
# si ie (base Julián) es NA tomamos el valor ei de sitio
ie_join = ifelse(is.na(ie), ei, ie),
ie_class = cut2(ie_join, cuts = c(0, 0.5, 0.75, 1)),
x = ifelse(is.na(x), x_s, x),
y = ifelse(is.na(y), y_s, y)
)
shape_domesticos <- select(ie_vacas, x, y, pres)
coordinates(shape_domesticos) <- ~ x + y
shape_domesticos@proj4string <- sitio_lcc@proj4string
writeOGR(shape_domesticos, "./shapes_domesticos", "domesticos",
driver = "ESRI Shapefile", verbose = FALSE, overwrite_layer = TRUE)
# vemos la proporción de vacas en cada clase de integridad
ie_vacas %>%
group_by(ie_class) %>%
summarise(
n = n(),
num_vacas = sum(pres, na.rm = TRUE),
prop_vacas = round(mean(pres, na.rm = TRUE) * 100, 1),
se = round(sd(pres, na.rm = TRUE) / sqrt(n) * 100, 1)
)
|
/integrity_domestic_animals/integrity_domestic.R
|
no_license
|
tereom/robin-snmb
|
R
| false | false | 8,141 |
r
|
# En este script buscamos relacionar la presencia de especies domésticas
# (vacas, borregos, perros, caballos) con integridad ecológica (ROBIN)
library(Hmisc)
library(rgdal)
library(sp)
library(raster)
library(dplyr)
# conexión a la base de datos (snmb)
PASS_SNMB = Sys.getenv("PASS_SNMB")
base_input <- src_postgres(dbname = "snmb", host = "dbms", user = "snmb",
password = PASS_SNMB)
# Para obtener el valor de integridad que corresponde a cada conglomerado
# INFyS usamos el mapa de integridad y extraemos el valor correspondiente a
# las coordenadas del "Centro" del conglomerado (ingresadas en campo)
# encontrar coordenadas de sitios
conglomerado <- tbl(base_input, "conglomerado_muestra") %>%
collect() %>%
filter(id != 638 & id != 632) %>% # quitamos cgls repetidos
# filter(nombre == "110296" | nombre == "158970" )
select(conglomerado_muestra_id = id, nombre)
sitio <- collect(tbl(base_input, "sitio_muestra")) %>%
filter(sitio_numero == "Centro") %>%
inner_join(conglomerado, by = "conglomerado_muestra_id") %>%
mutate(
lat = lat_grado + lat_min/60 + lat_seg/3600,
lon = ifelse(lon_grado > 0, lon_grado + lon_min/60 + lon_seg/3600,
-(lon_grado - lon_min/60 - lon_seg/3600)),
lon = -lon,
cgl = as.numeric(nombre)
) %>%
dplyr::select(cgl, conglomerado_muestra_id, lon, lat)
# copiamos la proyección de este shape
malla_real <- readOGR("/Volumes/ARCHIVOS_C/Mac_Pro/SNMB/datos/malla_real",
"infys_2cWGS")
malla_real@proj4string
# y copiamos la proyección del raster de integridad para reproyectar a lcc
raster_ei <- raster("/Volumes/ARCHIVOS_MAC/Dropbox/Datos Redes Bayesianas/EI_maps/Stage_3/Final_net_Scores.tif")
sitio_shape <- as.data.frame(sitio)
coordinates(sitio_shape) <- ~ lon + lat
sitio_shape@proj4string <- malla_real@proj4string
sitio_lcc <- spTransform(sitio_shape, projection(raster_ei))
plot(raster_ei)
points(sitio_lcc)
sitio$ei <- raster::extract(raster_ei, sitio_lcc)
sitio$id <- as.character(sitio$cgl)
sitio$x_s <- sitio_lcc@coords[, 1]
sitio$y_s <- sitio_lcc@coords[, 2]
# Ahora usamos la siguiente función para buscar animales domésticos en los
# conglomerados
### IMPORTANTE: hay un filtro ad-hoc en la tabla de conglomerado
especieInv <- function(noms){
# noms: string que indica los nombres a agrupar separados por pipes, ej.
# "Bos|taurus|vaca"
conglomerado <- tbl(base_input, "conglomerado_muestra") %>%
collect() %>%
filter(id != 638 & id != 632) %>% # quitamos cgls repetidos
select(conglomerado_muestra_id = id, nombre, estado, municipio,
uso = uso_suelo_tipo)
sitio <- tbl(base_input, "sitio_muestra") %>%
collect() %>%
select(conglomerado_muestra_id, sitio_muestra_id = id) %>%
inner_join(conglomerado, by = "conglomerado_muestra_id") %>%
select(sitio_muestra_id, conglomerado_muestra_id, nombre)
tr_ei <- tbl(base_input, "transecto_especies_invasoras_muestra") %>%
collect() %>%
select(transecto_especies_invasoras_id = id, conglomerado_muestra_id) %>%
left_join(conglomerado, by = "conglomerado_muestra_id")
ei <- tbl(base_input, "especie_invasora") %>%
collect() %>%
mutate(
ei_esp = grepl(noms, nombre_comun, ignore.case = TRUE) |
grepl(noms, nombre_cientifico, ignore.case = TRUE)
) %>%
select(transecto_especies_invasoras_id, ei_esp) %>%
left_join(tr_ei, by = "transecto_especies_invasoras_id") %>%
group_by(nombre) %>%
summarise(
ei_esp = sum(ei_esp, na.rm = TRUE)
) %>%
select(nombre, ei_esp)
ei_ex <- tbl(base_input, "especie_invasora_extra") %>%
collect() %>%
mutate(
ei_ex_esp = grepl(noms, nombre_comun, ignore.case = TRUE) |
grepl(noms, nombre_cientifico, ignore.case = TRUE)
) %>%
select(conglomerado_muestra_id, ei_ex_esp) %>%
right_join(conglomerado, by = "conglomerado_muestra_id") %>%
group_by(nombre) %>%
summarise(
ei_ex_esp = sum(ei_ex_esp, na.rm = TRUE)
)
er_ex <- tbl(base_input, "especimen_restos_extra") %>%
collect() %>%
mutate(
er_ex_esp = grepl(noms, nombre_comun, ignore.case = TRUE) |
grepl(noms, nombre_cientifico, ignore.case = TRUE)
) %>%
select(conglomerado_muestra_id, er_ex_esp) %>%
right_join(conglomerado, by = "conglomerado_muestra_id") %>%
group_by(nombre) %>%
summarise(
er_ex_esp = sum(er_ex_esp, na.rm = TRUE)
)
tr_he <- tbl(base_input, "transecto_huellas_excretas_muestra") %>%
collect() %>%
select(transecto_huellas_excretas_id = id, conglomerado_muestra_id) %>%
left_join(conglomerado, by = "conglomerado_muestra_id")
he <- tbl(base_input, "huella_excreta") %>%
collect() %>%
mutate(
he_esp = grepl(noms, nombre_comun, ignore.case = TRUE) |
grepl(noms, nombre_cientifico, ignore.case = TRUE)
) %>%
select(transecto_huellas_excretas_id, he_esp) %>%
left_join(tr_he, by = "transecto_huellas_excretas_id") %>%
group_by(nombre) %>%
summarise(
he_esp = sum(he_esp, na.rm = TRUE)
) %>%
select(nombre, he_esp)
he_ex <- tbl(base_input, "huella_excreta_extra") %>%
collect() %>%
mutate(
he_ex_esp = grepl(noms, nombre_comun, ignore.case = TRUE) |
grepl(noms, nombre_cientifico, ignore.case = TRUE)
) %>%
select(conglomerado_muestra_id, he_ex_esp) %>%
right_join(conglomerado, by = "conglomerado_muestra_id") %>%
group_by(nombre) %>%
summarise(
he_ex_esp = sum(he_ex_esp, na.rm = TRUE)
)
camara <- tbl(base_input, "camara") %>%
collect() %>%
select(camara_id = id, sitio_muestra_id) %>%
left_join(sitio, by = "sitio_muestra_id")
ar_camara <- tbl(base_input, "archivo_camara") %>%
collect() %>%
mutate(
camara_esp = grepl(noms, nombre_comun, ignore.case = TRUE) |
grepl(noms, nombre_cientifico, ignore.case = TRUE)
) %>%
select(camara_id, camara_esp) %>%
left_join(camara, by = "camara_id") %>%
group_by(nombre) %>%
summarise(
camara_esp = sum(camara_esp, na.rm = TRUE)
)
naZero <- function(x){
ifelse(is.na(x), 0, (x > 0)*1)
}
desagregado <- conglomerado %>%
left_join(ei) %>%
left_join(he) %>%
left_join(ei_ex) %>%
left_join(he_ex) %>%
left_join(er_ex) %>%
left_join(ar_camara) %>%
mutate_each(funs(naZero), contains("esp"))
presencia <- desagregado %>%
mutate(pres = (ei_esp + he_esp + ei_ex_esp + he_ex_esp + er_ex_esp +
camara_esp) > 0) %>%
select(id = nombre, pres)
list(desagregado = desagregado, presencia = presencia)
}
# extraemos los conglomerados con animales domésticos
vacas <- especieInv("bos|taurus|vaca|equus|caballo|mula|perro|borrego")
vacas$desagregado
sum(vacas$presencia$pres)
# leemos la base con conglomerado e integridad producida por Julián
# vamos a favorecer esta base sobre la que creamos arriba, esto porque las
# coordenadas ingresadas manualmente tienen errores y Julián usó una malla
# teórica
ie <- read.csv("congs_snmb_ie.csv")
head(ie)
head(sitio)
ggplot(ie, aes(x = ie)) + geom_histogram()
# creamos una variable categórica de integridad
ie_vacas <- ie %>%
mutate(
id = as.character(Cgl)
) %>%
right_join(vacas$presencia) %>%
left_join(select(sitio, id, ei, x_s, y_s)) %>%
mutate(
# si ie (base Julián) es NA tomamos el valor ei de sitio
ie_join = ifelse(is.na(ie), ei, ie),
ie_class = cut2(ie_join, cuts = c(0, 0.5, 0.75, 1)),
x = ifelse(is.na(x), x_s, x),
y = ifelse(is.na(y), y_s, y)
)
shape_domesticos <- select(ie_vacas, x, y, pres)
coordinates(shape_domesticos) <- ~ x + y
shape_domesticos@proj4string <- sitio_lcc@proj4string
writeOGR(shape_domesticos, "./shapes_domesticos", "domesticos",
driver = "ESRI Shapefile", verbose = FALSE, overwrite_layer = TRUE)
# vemos la proporción de vacas en cada clase de integridad
ie_vacas %>%
group_by(ie_class) %>%
summarise(
n = n(),
num_vacas = sum(pres, na.rm = TRUE),
prop_vacas = round(mean(pres, na.rm = TRUE) * 100, 1),
se = round(sd(pres, na.rm = TRUE) / sqrt(n) * 100, 1)
)
|
teasel<-matrix(
c(
0, 0, 0, 0, 0, 322.388,
0.966, 0, 0, 0, 0, 0,
0.013, 0.01, 0.125, 0, 0, 3.488,
0.007, 0, 0.125, 0.238, 0, 30.170,
0.008, 0, 0.038, 0.245, 0.167, 0.862,
0, 0, 0, 0.023, 0.750, 0
),
nrow=6, byrow=TRUE,
dimnames=list(c("seed1", "seed2", "small", "medium", "large", "flowering"),
c("seed1", "seed2", "small", "medium", "large", "flowering")))
|
/data/teasel.R
|
no_license
|
ashander/popbio
|
R
| false | false | 445 |
r
|
teasel<-matrix(
c(
0, 0, 0, 0, 0, 322.388,
0.966, 0, 0, 0, 0, 0,
0.013, 0.01, 0.125, 0, 0, 3.488,
0.007, 0, 0.125, 0.238, 0, 30.170,
0.008, 0, 0.038, 0.245, 0.167, 0.862,
0, 0, 0, 0.023, 0.750, 0
),
nrow=6, byrow=TRUE,
dimnames=list(c("seed1", "seed2", "small", "medium", "large", "flowering"),
c("seed1", "seed2", "small", "medium", "large", "flowering")))
|
## Put comments here that give an overall description of what your
## functions do
# Following functions calculates the Inverse of a matrix and store it in the Cache memory.
# If the matrix is not changed then R does not compute Inverse and returns Inverse of the matrix
# stored in the cache memory
## Write a short comment describing this function
# makeCacheMatrix function creates an object that caches the value of inverse of a matrix
makeCacheMatrix <- function(x = matrix()) {
matrix <- NULL
setMatrix <- function(y){
x <<- y
matrix <<- NULL
}
getMatrix <- function() x
setInv <- function(inv) matrix <<- inv
getInv <- function() matrix
list(setMatrix = setMatrix, getMatrix = getMatrix, setInv = setInv, getInv = getInv )
}
## Write a short comment describing this function
# cacheSolve function computes the inverse of the matrix returned by mackCacheMatrix function
# if the inverse has been already calculated then it retrives the inverse for the cache.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
matrix <- x$getInv()
if (!is.null(matrix)) {
message("getting cache data")
return(matrix)
}
data <- x$getMatrix()
inv <- solve(data,...)
x$setInv(inv)
}
|
/cachematrix.R
|
no_license
|
saurabh2086/ProgrammingAssignment2
|
R
| false | false | 1,279 |
r
|
## Put comments here that give an overall description of what your
## functions do
# Following functions calculates the Inverse of a matrix and store it in the Cache memory.
# If the matrix is not changed then R does not compute Inverse and returns Inverse of the matrix
# stored in the cache memory
## Write a short comment describing this function
# makeCacheMatrix function creates an object that caches the value of inverse of a matrix
makeCacheMatrix <- function(x = matrix()) {
matrix <- NULL
setMatrix <- function(y){
x <<- y
matrix <<- NULL
}
getMatrix <- function() x
setInv <- function(inv) matrix <<- inv
getInv <- function() matrix
list(setMatrix = setMatrix, getMatrix = getMatrix, setInv = setInv, getInv = getInv )
}
## Write a short comment describing this function
# cacheSolve function computes the inverse of the matrix returned by mackCacheMatrix function
# if the inverse has been already calculated then it retrives the inverse for the cache.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
matrix <- x$getInv()
if (!is.null(matrix)) {
message("getting cache data")
return(matrix)
}
data <- x$getMatrix()
inv <- solve(data,...)
x$setInv(inv)
}
|
#assignment 1.5
#1. Create an m x n matrix with replicate(m, rnorm(n)) with m=10 column vectors of n=10 elements each,
#constructed with rnorm(n), which creates random normal numbers.
#Then we transform it into a dataframe (thus 10 observations of 10 variables) and perform an algebraic
#operation on each element using a nested for loop: at each iteration, every element referred by the two
#indexes is incremented by a sinusoidal function, compare the vectorized and non-vectorized form of creating
#the solution and report the system time differences.
#Answer 1
#Vectorized form
set.seed(42)
#create matrix
mat_1= replicate(10,rnorm(10))
#transform into data frame
df_1= data.frame(mat_1)
df_1= df_1 + 10*sin(0.75*pi)
#non-vectorized form
set.seed(42)
#create matrix
mat_1= replicate(10,rnorm(10))
#transform into data frame
df_1= data.frame(mat_1)
for(i in 1:10){
for(j in 1:10){
df_1[i,j]<- df_1[i,j] + 10*sin(0.75*pi)
print(df_1)
}
}
|
/assignment 1.5.R
|
no_license
|
Farhanaaz522/assignment-1.5
|
R
| false | false | 993 |
r
|
#assignment 1.5
#1. Create an m x n matrix with replicate(m, rnorm(n)) with m=10 column vectors of n=10 elements each,
#constructed with rnorm(n), which creates random normal numbers.
#Then we transform it into a dataframe (thus 10 observations of 10 variables) and perform an algebraic
#operation on each element using a nested for loop: at each iteration, every element referred by the two
#indexes is incremented by a sinusoidal function, compare the vectorized and non-vectorized form of creating
#the solution and report the system time differences.
#Answer 1
#Vectorized form
set.seed(42)
#create matrix
mat_1= replicate(10,rnorm(10))
#transform into data frame
df_1= data.frame(mat_1)
df_1= df_1 + 10*sin(0.75*pi)
#non-vectorized form
set.seed(42)
#create matrix
mat_1= replicate(10,rnorm(10))
#transform into data frame
df_1= data.frame(mat_1)
for(i in 1:10){
for(j in 1:10){
df_1[i,j]<- df_1[i,j] + 10*sin(0.75*pi)
print(df_1)
}
}
|
## Project: NYSG R/SHH-18 part 1.
## Purpose of study: investigate the effect of pre-growth condition and
## strain diversity on the nisin efficacy against Listeria monocytogenes
## on cold-smoked salmon.
## Purpose of script:
# i) post hoc analysis for the salmon lme model.
# ii) making figures associated with the salmon lme model.
#library(tidyverse);library(emmeans);library(lme4);library(plyr);library(lmerTest)
#library(car);library(effects)
### Read in data.
pregrowth_salmon <- read.csv("pregrowth_salmon_raw.csv")
pregrowth_salmon$Day <- as.factor(pregrowth_salmon$Day)
### Build salmon lme model.
pregrowth_salmon_lme <- lmer(Log_CFU ~ Nisin*Condition*Serotype + Nisin*Condition*Day +
Nisin*Serotype*Source +
(1|AgeGroup), data = pregrowth_salmon, REML = FALSE)
# Anova table for the salmon lme model.
anova(pregrowth_salmon_lme)
### Make Figure 2.
# Calculate compact numbers on figure 2.
salmon_all_var.emm <- emmeans(pregrowth_salmon_lme, ~Condition+Serotype+Source+Day+Nisin)
nis_day_by_con_ser_sou.cld1 <- cld(salmon_all_var.emm, single = c("Nisin", "Day"), by = c("Condition", "Serotype", "Source"), Letters = LETTERS)
nis_day_by_con_ser_sou.cld1$letters <-trimws(nis_day_by_con_ser_sou.cld1$.group)
nis_day_by_con_ser_sou.cld1_df <- as.data.frame(nis_day_by_con_ser_sou.cld1)
nis_day_by_con_ser_sou.cld1_df %>% arrange(Condition, Serotype, Source, Day, Nisin) -> nis_day_by_con_ser_sou.cld1_df
# Add a strain variable to nis_day_by_con_ser_sou.cld1_df.
fg2_strainvec <- rep(NA, nrow(nis_day_by_con_ser_sou.cld1_df))
fg2_strainvec[which(nis_day_by_con_ser_sou.cld1_df$Serotype=="1/2a"&nis_day_by_con_ser_sou.cld1_df$Source=="Environment")] <- "FSL L4-0396"
fg2_strainvec[which(nis_day_by_con_ser_sou.cld1_df$Serotype=="1/2a"&nis_day_by_con_ser_sou.cld1_df$Source=="Salmon")] <- "FSL F2-0237"
fg2_strainvec[which(nis_day_by_con_ser_sou.cld1_df$Serotype=="1/2b"&nis_day_by_con_ser_sou.cld1_df$Source=="Environment")] <- "FSL L4-0060"
fg2_strainvec[which(nis_day_by_con_ser_sou.cld1_df$Serotype=="1/2b"&nis_day_by_con_ser_sou.cld1_df$Source=="Salmon")] <- "FSL L3-0051"
fg2_strainvec[which(nis_day_by_con_ser_sou.cld1_df$Serotype=="4b"&nis_day_by_con_ser_sou.cld1_df$Source=="Environment")] <- "FSL N1-0061"
fg2_strainvec[which(nis_day_by_con_ser_sou.cld1_df$Serotype=="4b"&nis_day_by_con_ser_sou.cld1_df$Source=="Salmon")] <- "FSL F2-0310"
nis_day_by_con_ser_sou.cld1_df$Strain <- fg2_strainvec
nis_day_by_con_ser_sou.cld1_df <- nis_day_by_con_ser_sou.cld1_df %>% mutate(Strain=factor(Strain, levels = c("FSL L4-0396", "FSL F2-0237", "FSL L4-0060",
"FSL L3-0051", "FSL N1-0061", "FSL F2-0310"))) %>% arrange(Strain) -> nis_day_by_con_ser_sou.cld1_df
# Plot figure 2.
high_inoc_fg2 <- ggplot(nis_day_by_con_ser_sou.cld1_df, aes(x=Day, y=emmean, group=Nisin,color=Nisin)) +
scale_fill_manual(values=c("deepskyblue2", "springgreen2")) +
guides(fill=FALSE) +
scale_color_manual(values=c("deepskyblue2", "springgreen2"),name="Nisin Treatment",
breaks=c("Minus", "Plus"),
labels=c("Untreated", "Nisin-treated")) +
facet_grid(Strain~Condition) +
geom_point(position = position_dodge(.2)) +
geom_line(position = position_dodge(.2)) +
geom_text(aes(label=letters, group=Nisin), vjust=0.5, hjust=2, color = "black", size = 3, position = position_dodge(.2)) +
geom_point(data = pregrowth_salmon, aes(x=Day, y=Log_CFU, fill=Nisin, group=AgeGroup),
position = position_dodge(0.1), size=1, color="grey10", shape=23, alpha=.6) +
labs(x="Storage Day", y="Log CFU/g") +
theme(panel.background = element_rect(fill = "grey93", colour = "grey93", size = 0.5, linetype = "solid"),
panel.grid.major = element_line(size = 0.5, linetype = 'solid',
colour = "white"),
panel.grid.minor = element_line(size = 0.25, linetype = 'solid',
colour = "white"))
### Make figure 3.
salmon_lme_ef1 <- effect(term="Nisin*Condition*Day", mod=pregrowth_salmon_lme)
salmon_lme_ef1_df <-as.data.frame(salmon_lme_ef1)
high_inoc_fg3 <- ggplot(data=salmon_lme_ef1_df, aes(x=Day, y=fit, group=Nisin)) +
geom_point(aes(color=Nisin)) +
geom_line(aes(color=Nisin)) +
scale_x_discrete(name="Storage Day",labels = c("1", "15", "30")) +
scale_y_continuous(name = "Log CFU/g") +
geom_ribbon(aes(ymin=fit-se,ymax=fit+se, fill=Nisin),alpha=0.3) +
facet_wrap(~Condition) +
theme(axis.text.x=element_text(size=12),
axis.text.y=element_text(size=12),
axis.title.x=element_text(face='bold', size=14),
axis.title.y=element_text(face='bold', size=14)) +
scale_fill_manual(values=c("deepskyblue2", "springgreen2")) +
guides(fill=FALSE) +
scale_color_manual(values=c("deepskyblue2", "springgreen2"),name="Nisin Treatment",
breaks=c("Minus", "Plus"),
labels=c("Untreated", "Nisin-treated")) +
theme(panel.background = element_rect(fill = "grey98", colour = "grey93", size = 0.5, linetype = "solid"),
panel.grid.major = element_line(size = 0.5, linetype = 'solid',
colour = "grey93"),
panel.grid.minor = element_line(size = 0.25, linetype = 'solid',
colour = "grey93"))
### Post hoc analysis for Nisin:Condition:Day.
### Make figure 4.
nis_con_by_day.emm <- emmeans(pregrowth_salmon_lme, ~Condition+Nisin|Day)
nis_con_by_day.ctr1 <- contrast(nis_con_by_day.emm, interaction = "pairwise",
simple = "Nisin", combine = TRUE, adjust = "tukey")
nis_con_by_day.ctr2_dun <- emmeans(nis_con_by_day.ctr1, specs = trt.vs.ctrl ~ Condition|Day)
nis_con_by_day.ctr2_dun$contrasts
# Manually assign significance label.
# 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1.
nis_con_by_day.ctr1_df <- as.data.frame(nis_con_by_day.ctr1)
nis_con_by_day.ctr1_df$significance <- c("", "*", "", "",
"", "", "***", "*",
"", "**", "***", "***")
# Read in log reduction dataset for plotting.
pregrowth_salmon_LR <- read.csv("pregrowth_salmon_raw_LR.csv")
pregrowth_salmon_LR$Day <- as.factor(pregrowth_salmon_LR$Day)
# Summarise raw data
pregrowth_salmon_LR_con_day <- ddply(pregrowth_salmon_LR, c("Condition", "Day"), summarise,
N = sum(!is.na(Log_RD)),
mean = mean(Log_RD, na.rm = TRUE),
sd = sd(Log_RD, na.rm = TRUE),
se = sd/sqrt(N)
)
# Make annotation dataframe for cond_nis_day plot
fg4_anno_df <- data.frame("Day"=c("1","15","15","30","30","30"), x1 = c(1,1,1,1,1,1), x2 = c(2,3,4,2,3,4),
y1 = c(1.5,2.0,2.0,1.7,1.7,1.7), y2 = c(2.20,2.10,2.20,1.75,1.85,1.95), y3 = c(2.0,1.25,1.5,1.5,1.3,1.1), xstar = c(1.5,2,2.5,1.5,2,2.5),
ystar = c(2.25,2.15,2.25,1.8,1.9,2.0), lab = c("*","***","*","**","***","***"))
# Panel label.
fg4_anno_panel <- data.frame("xpos" = c(.8,.8,.8), "ypos" = c(2.2,2.2,2.2), "Label" = c("A", "B", "C"), "Day" = c("1", "15", "30"))
# Plot figure 4.
high_inoc_fg4 <- ggplot(nis_con_by_day.ctr1_df) +
geom_bar(aes(x=Condition, y=estimate, group=Condition,
fill=Condition), stat = "identity", width = 0.7, alpha=0.6, color="black") +
facet_grid(.~Day) +
scale_x_discrete(name="Pre-growth Condition", labels=c("BHI", "NaCl", "pH", "Quat")) +
scale_y_continuous(name="Log Reduction (Untreated vs Nisin-treated)",
breaks = seq(0,2,.5), limits = c(0,2.3)) +
scale_fill_manual(values=c("limegreen", "deepskyblue2", "coral", "orchid1"),name="Pre-growth Condition") +
theme(axis.text.x=element_text(size=14),
axis.text.y=element_text(size=14),
axis.title.x=element_text(face='bold', size=16),
axis.title.y=element_text(face='bold', size=16)) +
theme(panel.background = element_rect(fill = "grey93", colour = "grey93", size = 0.5, linetype = "solid"),
panel.grid.major = element_line(size = 0.5, linetype = 'solid',
colour = "white"),
panel.grid.minor = element_line(size = 0.25, linetype = 'solid',
colour = "white")) +
geom_pointrange(data = pregrowth_salmon_LR_con_day, aes(x=Condition, y=mean, ymin=mean-se, ymax=mean+se,
group=Condition, fill=Condition),shape=23,size=.3,color="black") +
geom_segment(data = fg4_anno_df, aes(x = x1, xend = x1, y = y1, yend = y2), colour = "black") +
geom_segment(data = fg4_anno_df, aes(x = x2, xend = x2, y = y3, yend = y2), colour = "black") +
geom_segment(data = fg4_anno_df, aes(x = x1, xend = x2, y = y2, yend = y2), colour = "black") +
geom_text(data = fg4_anno_df, aes(x = xstar, y = ystar, label = lab), color = "red") +
geom_text(data = fg4_anno_panel, aes(x=xpos, y=ypos, label=Label), color="black", size=6)
### Make figure 5.
salmon_lme_ef3 <- effect(term="Nisin*Serotype*Source", mod=pregrowth_salmon_lme)
salmon_lme_ef3_df <-as.data.frame(salmon_lme_ef3)
source.labs <- c("Environment", "Finished Product")
names(source.labs) <- c("Environment", "Salmon")
high_inoc_fg5 <- ggplot(data=salmon_lme_ef3_df, aes(x=Serotype, y=fit, group=Nisin)) +
geom_point(aes(color=Nisin)) +
geom_line(aes(color=Nisin)) +
scale_x_discrete(name="Strain (Serotype)", labels = c("(1/2a)", "(1/2b)", "(4b)")) +
scale_y_continuous(name = "Log CFU/g") +
geom_ribbon(aes(ymin=fit-se,ymax=fit+se, fill=Nisin),alpha=0.3) +
facet_wrap(~Source, labeller = labeller(Source = source.labs)) +
theme(axis.text.x=element_text(size=12),
axis.text.y=element_text(size=12),
axis.title.x=element_text(face='bold', size=14),
axis.title.y=element_text(face='bold', size=14)) +
scale_fill_manual(values=c("deepskyblue2", "springgreen2")) +
guides(fill=FALSE) +
scale_color_manual(values=c("deepskyblue2", "springgreen2"),name="Nisin Treatment",
breaks=c("Minus", "Plus"),
labels=c("Untreated", "Nisin-treated")) +
theme(panel.background = element_rect(fill = "grey98", colour = "grey93", size = 0.5, linetype = "solid"),
panel.grid.major = element_line(size = 0.5, linetype = 'solid',
colour = "grey93"),
panel.grid.minor = element_line(size = 0.25, linetype = 'solid',
colour = "grey93"))
### Post hoc analysis for Nisin:Serotype:Source.
### Make table 6.
nis_ser_by_sou.emm <- emmeans(pregrowth_salmon_lme, ~Serotype+Nisin|Source)
nis_ser_by_sou.ctr <- contrast(nis_ser_by_sou.emm, interaction = "pairwise",
simple = "Nisin", combine = TRUE, adjust = "tukey")
nis_ser_by_sou.ctr2 <- contrast(nis_ser_by_sou.ctr, interaction = "pairwise",
simple = "Serotype", combine = TRUE, adjust = "tukey")
### Make figure 6.
salmon_lme_ef2 <- effect(term="Nisin*Condition*Serotype", mod=pregrowth_salmon_lme)
salmon_lme_ef2_df <-as.data.frame(salmon_lme_ef2)
high_inoc_fg6 <- ggplot(data=salmon_lme_ef2_df, aes(x=Condition, y=fit, group=Nisin)) +
geom_point(aes(color=Nisin)) +
geom_line(aes(color=Nisin)) +
scale_x_discrete(name="Pre-growth Condition",labels = c("BHI", "NaCl", "pH", "Quat")) +
scale_y_continuous(name = "Log CFU/g") +
geom_ribbon(aes(ymin=fit-se,ymax=fit+se, fill=Nisin),alpha=0.3) +
facet_wrap(~Serotype) +
theme(axis.text.x=element_text(size=12),
axis.text.y=element_text(size=12),
axis.title.x=element_text(face='bold', size=14),
axis.title.y=element_text(face='bold', size=14)) +
scale_fill_manual(values=c("deepskyblue2", "springgreen2")) +
guides(fill=FALSE) +
scale_color_manual(values=c("deepskyblue2", "springgreen2"),name="Nisin Treatment",
breaks=c("Minus", "Plus"),
labels=c("Untreated", "Nisin-treated")) +
theme(panel.background = element_rect(fill = "grey98", colour = "grey93", size = 0.5, linetype = "solid"),
panel.grid.major = element_line(size = 0.5, linetype = 'solid',
colour = "grey93"),
panel.grid.minor = element_line(size = 0.25, linetype = 'solid',
colour = "grey93")) +
theme(text = element_text(family = "Times New Roman"))
### Post hoc analysis for Nisin:Serotype:Condition.
### Make figure 7.
# Read in raw log reduction data for plotting.
pregrowth_salmon_LR <- read.csv("pregrowth_salmon_raw_LR.csv")
pregrowth_salmon_LR$Day <- as.factor(pregrowth_salmon_LR$Day)
# Summarise raw data for plotting.
pregrowth_salmon_LR_con_sero <- ddply(pregrowth_salmon_LR, c("Condition", "Serotype"), summarise,
N = sum(!is.na(Log_RD)),
mean = mean(Log_RD, na.rm = TRUE),
sd = sd(Log_RD, na.rm = TRUE),
se = sd/sqrt(N)
)
# Post hoc analysis for Nisin:Condition:Serotype
nis_con_by_ser.emm <- emmeans(pregrowth_salmon_lme, ~Nisin+Condition|Serotype)
nis_con_by_ser.emm_df <- as.data.frame(nis_con_by_ser.emm)
nis_con_by_ser.ctr1 <- contrast(nis_con_by_ser.emm, interaction="pairwise", simple="Nisin",
combine=TRUE, adjust="tukey")
nis_con_by_ser.ctr1_df <- as.data.frame(nis_con_by_ser.ctr1)
nis_con_by_ser.ctr2_dun <- emmeans(nis_con_by_ser.ctr1, specs = trt.vs.ctrl ~ Condition|Serotype)
nis_con_by_ser.ctr2_dun$contrasts
# Manually assign the significance level.
# 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1.
nis_con_by_ser.ctr1_df$Significance <- c("","","**","",
"","","","*",
"","","***","*")
# Make a dataframe to denote significance (asterisks)
high_inoc_fg7_anno_df <- data.frame("Serotype"=c("1/2a","1/2b","4b","4b"), x1 = c(1,1,1,1), x2 = c(3,4,3,4),
y1 = c(1.6,1.8,1.7,1.7), y2 = c(1.65,1.85,1.75,1.85), y3 = c(0.8,1.25,1.1,1.1), xstar = c(2,2.5,2,2.5),
ystar = c(1.7,1.9,1.8,1.9), lab = c("**","*","***","*"))
high_inoc_fg7_anno_panel <- data.frame("xpos" = c(.8,.8,.8), "ypos" = c(2,2,2), "Label" = c("A", "B", "C"), "Serotype" = c("1/2a", "1/2b", "4b"))
high_inoc_fg7 <- ggplot(nis_con_by_ser.ctr1_df) +
geom_bar(stat = "identity", aes(x=Condition, y=estimate, group=Condition,
fill=Condition), width = 0.7, alpha=0.6, color="black") +
facet_grid(.~Serotype) +
scale_x_discrete(name="Pre-growth Condition", labels=c("BHI", "NaCl", "pH", "Quat")) +
scale_y_continuous(name="Log Reduction (Untreated vs Nisin-treated)",
breaks = seq(0,2,.5), limits = c(0,2)) +
scale_fill_manual(values=c("limegreen", "deepskyblue2", "coral", "orchid1"),name="Pre-growth Condition") +
theme(axis.text.x=element_text(size=14), axis.text.y=element_text(size=14),
axis.title.x=element_text(face='bold', size=16), axis.title.y=element_text(face='bold', size=16)) +
theme(panel.background = element_rect(fill = "grey93", colour = "grey93", size = 0.5, linetype = "solid"),
panel.grid.major = element_line(size = 0.5, linetype = 'solid', colour = "white"),
panel.grid.minor = element_line(size = 0.25, linetype = 'solid', colour = "white")) +
geom_pointrange(data = pregrowth_salmon_LR_con_sero, aes(x=Condition, y=mean, ymin=mean-se, ymax=mean+se,
group=Condition, fill=Condition),shape=23,size=.3,color="black") +
geom_segment(data = high_inoc_fg7_anno_df, aes(x = x1, xend = x1, y = y1, yend = y2), colour = "black") +
geom_segment(data = high_inoc_fg7_anno_df, aes(x = x2, xend = x2, y = y3, yend = y2), colour = "black") +
geom_segment(data = high_inoc_fg7_anno_df, aes(x = x1, xend = x2, y = y2, yend = y2), colour = "black") +
geom_text(data = high_inoc_fg7_anno_df, aes(x = xstar, y = ystar, label = lab), color = "red") +
geom_text(data = high_inoc_fg7_anno_panel, aes(x=xpos, y=ypos, label=Label), color="black", size=6)
### Make supplementary table 2.
salmon_all_var.emm <- emmeans(pregrowth_salmon_lme, ~Serotype+Source+Condition+Day+Nisin)
nis_by_con_ser_sou_day.ctr <- contrast(salmon_all_var.emm, interaction = "pairwise",
simple = "Nisin", combine = TRUE, adjust = "tukey")
nis_by_con_ser_sou_day.ctr_df <- as.data.frame(nis_by_con_ser_sou_day.ctr) %>% arrange(Condition, Serotype, Source, Day)
# Adding a strain variable to nis_by_con_ser_sou_day.ctr_df.
supp_tb2_strainvec <- rep(NA, nrow(nis_by_con_ser_sou_day.ctr_df))
supp_tb2_strainvec[which(nis_by_con_ser_sou_day.ctr_df$Serotype=="1/2a"&nis_by_con_ser_sou_day.ctr_df$Source=="Environment")] <- "FSL L4-0396"
supp_tb2_strainvec[which(nis_by_con_ser_sou_day.ctr_df$Serotype=="1/2a"&nis_by_con_ser_sou_day.ctr_df$Source=="Salmon")] <- "FSL F2-0237"
supp_tb2_strainvec[which(nis_by_con_ser_sou_day.ctr_df$Serotype=="1/2b"&nis_by_con_ser_sou_day.ctr_df$Source=="Environment")] <- "FSL L4-0060"
supp_tb2_strainvec[which(nis_by_con_ser_sou_day.ctr_df$Serotype=="1/2b"&nis_by_con_ser_sou_day.ctr_df$Source=="Salmon")] <- "FSL L3-0051"
supp_tb2_strainvec[which(nis_by_con_ser_sou_day.ctr_df$Serotype=="4b"&nis_by_con_ser_sou_day.ctr_df$Source=="Environment")] <- "FSL N1-0061"
supp_tb2_strainvec[which(nis_by_con_ser_sou_day.ctr_df$Serotype=="4b"&nis_by_con_ser_sou_day.ctr_df$Source=="Salmon")] <- "FSL F2-0310"
# Attach Strain column to nis_by_con_ser_sou_day.ctr_df
nis_by_con_ser_sou_day.ctr_df$Strain <- supp_tb2_strainvec
nis_by_con_ser_sou_day.ctr_df <- nis_by_con_ser_sou_day.ctr_df[,-1]
# Arrange nis_by_con_ser_sou_day.ctr_df to make supplementary table 2.
nis_by_con_ser_sou_day.ctr_df_subset <- nis_by_con_ser_sou_day.ctr_df[c("Condition", "Day", "Strain", "estimate", "SE")]
nis_by_con_ser_sou_day.ctr_df_subset %>% arrange(Condition, Day, Strain) -> nis_by_con_ser_sou_day.ctr_df_subset
nis_by_con_ser_sou_day.ctr_df_subset$estimate <- round(nis_by_con_ser_sou_day.ctr_df_subset$estimate, digits = 2)
nis_by_con_ser_sou_day.ctr_df_subset$SE <- round(nis_by_con_ser_sou_day.ctr_df_subset$SE, digits = 2)
high_inoc_supp_table2 <- data.frame(matrix(nrow = 6, ncol = 12))
rownames(high_inoc_supp_table2) <- c("FSL F2-0237", "FSL F2-0310", "FSL L3-0051", "FSL L4-0060", "FSL L4-0396", "FSL N1-0061")
condition_vec <- c("BHI", "NaCl", "pH", "Quat")
day_vec <- c(1,15,30)
for (i in 1:4) {
for (j in 1:3) {
colnames(high_inoc_supp_table2)[3*(i-1)+j] <- paste(condition_vec[i],day_vec[j],sep = "_D")
}
}
for (i in 1:ncol(high_inoc_supp_table2)) {
high_inoc_supp_table2[,i] <- nis_by_con_ser_sou_day.ctr_df_subset$estimate[(6*(i-1)+1):(6*(i-1)+6)]
}
high_inoc_supp_table2 <- high_inoc_supp_table2[c(5,1,4,3,6,2),]
# End.
|
/hi_salmon_figures.R
|
no_license
|
FSL-MQIP/PregrowthListeria_Nisin
|
R
| false | false | 19,266 |
r
|
## Project: NYSG R/SHH-18 part 1.
## Purpose of study: investigate the effect of pre-growth condition and
## strain diversity on the nisin efficacy against Listeria monocytogenes
## on cold-smoked salmon.
## Purpose of script:
# i) post hoc analysis for the salmon lme model.
# ii) making figures associated with the salmon lme model.
#library(tidyverse);library(emmeans);library(lme4);library(plyr);library(lmerTest)
#library(car);library(effects)
### Read in data.
pregrowth_salmon <- read.csv("pregrowth_salmon_raw.csv")
pregrowth_salmon$Day <- as.factor(pregrowth_salmon$Day)
### Build salmon lme model.
pregrowth_salmon_lme <- lmer(Log_CFU ~ Nisin*Condition*Serotype + Nisin*Condition*Day +
Nisin*Serotype*Source +
(1|AgeGroup), data = pregrowth_salmon, REML = FALSE)
# Anova table for the salmon lme model.
anova(pregrowth_salmon_lme)
### Make Figure 2.
# Calculate compact numbers on figure 2.
salmon_all_var.emm <- emmeans(pregrowth_salmon_lme, ~Condition+Serotype+Source+Day+Nisin)
nis_day_by_con_ser_sou.cld1 <- cld(salmon_all_var.emm, single = c("Nisin", "Day"), by = c("Condition", "Serotype", "Source"), Letters = LETTERS)
nis_day_by_con_ser_sou.cld1$letters <-trimws(nis_day_by_con_ser_sou.cld1$.group)
nis_day_by_con_ser_sou.cld1_df <- as.data.frame(nis_day_by_con_ser_sou.cld1)
nis_day_by_con_ser_sou.cld1_df %>% arrange(Condition, Serotype, Source, Day, Nisin) -> nis_day_by_con_ser_sou.cld1_df
# Add a strain variable to nis_day_by_con_ser_sou.cld1_df.
fg2_strainvec <- rep(NA, nrow(nis_day_by_con_ser_sou.cld1_df))
fg2_strainvec[which(nis_day_by_con_ser_sou.cld1_df$Serotype=="1/2a"&nis_day_by_con_ser_sou.cld1_df$Source=="Environment")] <- "FSL L4-0396"
fg2_strainvec[which(nis_day_by_con_ser_sou.cld1_df$Serotype=="1/2a"&nis_day_by_con_ser_sou.cld1_df$Source=="Salmon")] <- "FSL F2-0237"
fg2_strainvec[which(nis_day_by_con_ser_sou.cld1_df$Serotype=="1/2b"&nis_day_by_con_ser_sou.cld1_df$Source=="Environment")] <- "FSL L4-0060"
fg2_strainvec[which(nis_day_by_con_ser_sou.cld1_df$Serotype=="1/2b"&nis_day_by_con_ser_sou.cld1_df$Source=="Salmon")] <- "FSL L3-0051"
fg2_strainvec[which(nis_day_by_con_ser_sou.cld1_df$Serotype=="4b"&nis_day_by_con_ser_sou.cld1_df$Source=="Environment")] <- "FSL N1-0061"
fg2_strainvec[which(nis_day_by_con_ser_sou.cld1_df$Serotype=="4b"&nis_day_by_con_ser_sou.cld1_df$Source=="Salmon")] <- "FSL F2-0310"
nis_day_by_con_ser_sou.cld1_df$Strain <- fg2_strainvec
nis_day_by_con_ser_sou.cld1_df <- nis_day_by_con_ser_sou.cld1_df %>% mutate(Strain=factor(Strain, levels = c("FSL L4-0396", "FSL F2-0237", "FSL L4-0060",
"FSL L3-0051", "FSL N1-0061", "FSL F2-0310"))) %>% arrange(Strain) -> nis_day_by_con_ser_sou.cld1_df
# Plot figure 2.
high_inoc_fg2 <- ggplot(nis_day_by_con_ser_sou.cld1_df, aes(x=Day, y=emmean, group=Nisin,color=Nisin)) +
scale_fill_manual(values=c("deepskyblue2", "springgreen2")) +
guides(fill=FALSE) +
scale_color_manual(values=c("deepskyblue2", "springgreen2"),name="Nisin Treatment",
breaks=c("Minus", "Plus"),
labels=c("Untreated", "Nisin-treated")) +
facet_grid(Strain~Condition) +
geom_point(position = position_dodge(.2)) +
geom_line(position = position_dodge(.2)) +
geom_text(aes(label=letters, group=Nisin), vjust=0.5, hjust=2, color = "black", size = 3, position = position_dodge(.2)) +
geom_point(data = pregrowth_salmon, aes(x=Day, y=Log_CFU, fill=Nisin, group=AgeGroup),
position = position_dodge(0.1), size=1, color="grey10", shape=23, alpha=.6) +
labs(x="Storage Day", y="Log CFU/g") +
theme(panel.background = element_rect(fill = "grey93", colour = "grey93", size = 0.5, linetype = "solid"),
panel.grid.major = element_line(size = 0.5, linetype = 'solid',
colour = "white"),
panel.grid.minor = element_line(size = 0.25, linetype = 'solid',
colour = "white"))
### Make figure 3.
salmon_lme_ef1 <- effect(term="Nisin*Condition*Day", mod=pregrowth_salmon_lme)
salmon_lme_ef1_df <-as.data.frame(salmon_lme_ef1)
high_inoc_fg3 <- ggplot(data=salmon_lme_ef1_df, aes(x=Day, y=fit, group=Nisin)) +
geom_point(aes(color=Nisin)) +
geom_line(aes(color=Nisin)) +
scale_x_discrete(name="Storage Day",labels = c("1", "15", "30")) +
scale_y_continuous(name = "Log CFU/g") +
geom_ribbon(aes(ymin=fit-se,ymax=fit+se, fill=Nisin),alpha=0.3) +
facet_wrap(~Condition) +
theme(axis.text.x=element_text(size=12),
axis.text.y=element_text(size=12),
axis.title.x=element_text(face='bold', size=14),
axis.title.y=element_text(face='bold', size=14)) +
scale_fill_manual(values=c("deepskyblue2", "springgreen2")) +
guides(fill=FALSE) +
scale_color_manual(values=c("deepskyblue2", "springgreen2"),name="Nisin Treatment",
breaks=c("Minus", "Plus"),
labels=c("Untreated", "Nisin-treated")) +
theme(panel.background = element_rect(fill = "grey98", colour = "grey93", size = 0.5, linetype = "solid"),
panel.grid.major = element_line(size = 0.5, linetype = 'solid',
colour = "grey93"),
panel.grid.minor = element_line(size = 0.25, linetype = 'solid',
colour = "grey93"))
### Post hoc analysis for Nisin:Condition:Day.
### Make figure 4.
nis_con_by_day.emm <- emmeans(pregrowth_salmon_lme, ~Condition+Nisin|Day)
nis_con_by_day.ctr1 <- contrast(nis_con_by_day.emm, interaction = "pairwise",
simple = "Nisin", combine = TRUE, adjust = "tukey")
nis_con_by_day.ctr2_dun <- emmeans(nis_con_by_day.ctr1, specs = trt.vs.ctrl ~ Condition|Day)
nis_con_by_day.ctr2_dun$contrasts
# Manually assign significance label.
# 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1.
nis_con_by_day.ctr1_df <- as.data.frame(nis_con_by_day.ctr1)
nis_con_by_day.ctr1_df$significance <- c("", "*", "", "",
"", "", "***", "*",
"", "**", "***", "***")
# Read in log reduction dataset for plotting.
pregrowth_salmon_LR <- read.csv("pregrowth_salmon_raw_LR.csv")
pregrowth_salmon_LR$Day <- as.factor(pregrowth_salmon_LR$Day)
# Summarise raw data
pregrowth_salmon_LR_con_day <- ddply(pregrowth_salmon_LR, c("Condition", "Day"), summarise,
N = sum(!is.na(Log_RD)),
mean = mean(Log_RD, na.rm = TRUE),
sd = sd(Log_RD, na.rm = TRUE),
se = sd/sqrt(N)
)
# Make annotation dataframe for cond_nis_day plot
fg4_anno_df <- data.frame("Day"=c("1","15","15","30","30","30"), x1 = c(1,1,1,1,1,1), x2 = c(2,3,4,2,3,4),
y1 = c(1.5,2.0,2.0,1.7,1.7,1.7), y2 = c(2.20,2.10,2.20,1.75,1.85,1.95), y3 = c(2.0,1.25,1.5,1.5,1.3,1.1), xstar = c(1.5,2,2.5,1.5,2,2.5),
ystar = c(2.25,2.15,2.25,1.8,1.9,2.0), lab = c("*","***","*","**","***","***"))
# Panel label.
fg4_anno_panel <- data.frame("xpos" = c(.8,.8,.8), "ypos" = c(2.2,2.2,2.2), "Label" = c("A", "B", "C"), "Day" = c("1", "15", "30"))
# Plot figure 4.
high_inoc_fg4 <- ggplot(nis_con_by_day.ctr1_df) +
geom_bar(aes(x=Condition, y=estimate, group=Condition,
fill=Condition), stat = "identity", width = 0.7, alpha=0.6, color="black") +
facet_grid(.~Day) +
scale_x_discrete(name="Pre-growth Condition", labels=c("BHI", "NaCl", "pH", "Quat")) +
scale_y_continuous(name="Log Reduction (Untreated vs Nisin-treated)",
breaks = seq(0,2,.5), limits = c(0,2.3)) +
scale_fill_manual(values=c("limegreen", "deepskyblue2", "coral", "orchid1"),name="Pre-growth Condition") +
theme(axis.text.x=element_text(size=14),
axis.text.y=element_text(size=14),
axis.title.x=element_text(face='bold', size=16),
axis.title.y=element_text(face='bold', size=16)) +
theme(panel.background = element_rect(fill = "grey93", colour = "grey93", size = 0.5, linetype = "solid"),
panel.grid.major = element_line(size = 0.5, linetype = 'solid',
colour = "white"),
panel.grid.minor = element_line(size = 0.25, linetype = 'solid',
colour = "white")) +
geom_pointrange(data = pregrowth_salmon_LR_con_day, aes(x=Condition, y=mean, ymin=mean-se, ymax=mean+se,
group=Condition, fill=Condition),shape=23,size=.3,color="black") +
geom_segment(data = fg4_anno_df, aes(x = x1, xend = x1, y = y1, yend = y2), colour = "black") +
geom_segment(data = fg4_anno_df, aes(x = x2, xend = x2, y = y3, yend = y2), colour = "black") +
geom_segment(data = fg4_anno_df, aes(x = x1, xend = x2, y = y2, yend = y2), colour = "black") +
geom_text(data = fg4_anno_df, aes(x = xstar, y = ystar, label = lab), color = "red") +
geom_text(data = fg4_anno_panel, aes(x=xpos, y=ypos, label=Label), color="black", size=6)
### Make figure 5.
salmon_lme_ef3 <- effect(term="Nisin*Serotype*Source", mod=pregrowth_salmon_lme)
salmon_lme_ef3_df <-as.data.frame(salmon_lme_ef3)
source.labs <- c("Environment", "Finished Product")
names(source.labs) <- c("Environment", "Salmon")
high_inoc_fg5 <- ggplot(data=salmon_lme_ef3_df, aes(x=Serotype, y=fit, group=Nisin)) +
geom_point(aes(color=Nisin)) +
geom_line(aes(color=Nisin)) +
scale_x_discrete(name="Strain (Serotype)", labels = c("(1/2a)", "(1/2b)", "(4b)")) +
scale_y_continuous(name = "Log CFU/g") +
geom_ribbon(aes(ymin=fit-se,ymax=fit+se, fill=Nisin),alpha=0.3) +
facet_wrap(~Source, labeller = labeller(Source = source.labs)) +
theme(axis.text.x=element_text(size=12),
axis.text.y=element_text(size=12),
axis.title.x=element_text(face='bold', size=14),
axis.title.y=element_text(face='bold', size=14)) +
scale_fill_manual(values=c("deepskyblue2", "springgreen2")) +
guides(fill=FALSE) +
scale_color_manual(values=c("deepskyblue2", "springgreen2"),name="Nisin Treatment",
breaks=c("Minus", "Plus"),
labels=c("Untreated", "Nisin-treated")) +
theme(panel.background = element_rect(fill = "grey98", colour = "grey93", size = 0.5, linetype = "solid"),
panel.grid.major = element_line(size = 0.5, linetype = 'solid',
colour = "grey93"),
panel.grid.minor = element_line(size = 0.25, linetype = 'solid',
colour = "grey93"))
### Post hoc analysis for Nisin:Serotype:Source.
### Make table 6.
nis_ser_by_sou.emm <- emmeans(pregrowth_salmon_lme, ~Serotype+Nisin|Source)
nis_ser_by_sou.ctr <- contrast(nis_ser_by_sou.emm, interaction = "pairwise",
simple = "Nisin", combine = TRUE, adjust = "tukey")
nis_ser_by_sou.ctr2 <- contrast(nis_ser_by_sou.ctr, interaction = "pairwise",
simple = "Serotype", combine = TRUE, adjust = "tukey")
### Make figure 6.
salmon_lme_ef2 <- effect(term="Nisin*Condition*Serotype", mod=pregrowth_salmon_lme)
salmon_lme_ef2_df <-as.data.frame(salmon_lme_ef2)
high_inoc_fg6 <- ggplot(data=salmon_lme_ef2_df, aes(x=Condition, y=fit, group=Nisin)) +
geom_point(aes(color=Nisin)) +
geom_line(aes(color=Nisin)) +
scale_x_discrete(name="Pre-growth Condition",labels = c("BHI", "NaCl", "pH", "Quat")) +
scale_y_continuous(name = "Log CFU/g") +
geom_ribbon(aes(ymin=fit-se,ymax=fit+se, fill=Nisin),alpha=0.3) +
facet_wrap(~Serotype) +
theme(axis.text.x=element_text(size=12),
axis.text.y=element_text(size=12),
axis.title.x=element_text(face='bold', size=14),
axis.title.y=element_text(face='bold', size=14)) +
scale_fill_manual(values=c("deepskyblue2", "springgreen2")) +
guides(fill=FALSE) +
scale_color_manual(values=c("deepskyblue2", "springgreen2"),name="Nisin Treatment",
breaks=c("Minus", "Plus"),
labels=c("Untreated", "Nisin-treated")) +
theme(panel.background = element_rect(fill = "grey98", colour = "grey93", size = 0.5, linetype = "solid"),
panel.grid.major = element_line(size = 0.5, linetype = 'solid',
colour = "grey93"),
panel.grid.minor = element_line(size = 0.25, linetype = 'solid',
colour = "grey93")) +
theme(text = element_text(family = "Times New Roman"))
### Post hoc analysis for Nisin:Serotype:Condition.
### Make figure 7.
# Read in raw log reduction data for plotting.
pregrowth_salmon_LR <- read.csv("pregrowth_salmon_raw_LR.csv")
pregrowth_salmon_LR$Day <- as.factor(pregrowth_salmon_LR$Day)
# Summarise raw data for plotting.
pregrowth_salmon_LR_con_sero <- ddply(pregrowth_salmon_LR, c("Condition", "Serotype"), summarise,
N = sum(!is.na(Log_RD)),
mean = mean(Log_RD, na.rm = TRUE),
sd = sd(Log_RD, na.rm = TRUE),
se = sd/sqrt(N)
)
# Post hoc analysis for Nisin:Condition:Serotype
nis_con_by_ser.emm <- emmeans(pregrowth_salmon_lme, ~Nisin+Condition|Serotype)
nis_con_by_ser.emm_df <- as.data.frame(nis_con_by_ser.emm)
nis_con_by_ser.ctr1 <- contrast(nis_con_by_ser.emm, interaction="pairwise", simple="Nisin",
combine=TRUE, adjust="tukey")
nis_con_by_ser.ctr1_df <- as.data.frame(nis_con_by_ser.ctr1)
nis_con_by_ser.ctr2_dun <- emmeans(nis_con_by_ser.ctr1, specs = trt.vs.ctrl ~ Condition|Serotype)
nis_con_by_ser.ctr2_dun$contrasts
# Manually assign the significance level.
# 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1.
nis_con_by_ser.ctr1_df$Significance <- c("","","**","",
"","","","*",
"","","***","*")
# Make a dataframe to denote significance (asterisks)
high_inoc_fg7_anno_df <- data.frame("Serotype"=c("1/2a","1/2b","4b","4b"), x1 = c(1,1,1,1), x2 = c(3,4,3,4),
y1 = c(1.6,1.8,1.7,1.7), y2 = c(1.65,1.85,1.75,1.85), y3 = c(0.8,1.25,1.1,1.1), xstar = c(2,2.5,2,2.5),
ystar = c(1.7,1.9,1.8,1.9), lab = c("**","*","***","*"))
high_inoc_fg7_anno_panel <- data.frame("xpos" = c(.8,.8,.8), "ypos" = c(2,2,2), "Label" = c("A", "B", "C"), "Serotype" = c("1/2a", "1/2b", "4b"))
high_inoc_fg7 <- ggplot(nis_con_by_ser.ctr1_df) +
geom_bar(stat = "identity", aes(x=Condition, y=estimate, group=Condition,
fill=Condition), width = 0.7, alpha=0.6, color="black") +
facet_grid(.~Serotype) +
scale_x_discrete(name="Pre-growth Condition", labels=c("BHI", "NaCl", "pH", "Quat")) +
scale_y_continuous(name="Log Reduction (Untreated vs Nisin-treated)",
breaks = seq(0,2,.5), limits = c(0,2)) +
scale_fill_manual(values=c("limegreen", "deepskyblue2", "coral", "orchid1"),name="Pre-growth Condition") +
theme(axis.text.x=element_text(size=14), axis.text.y=element_text(size=14),
axis.title.x=element_text(face='bold', size=16), axis.title.y=element_text(face='bold', size=16)) +
theme(panel.background = element_rect(fill = "grey93", colour = "grey93", size = 0.5, linetype = "solid"),
panel.grid.major = element_line(size = 0.5, linetype = 'solid', colour = "white"),
panel.grid.minor = element_line(size = 0.25, linetype = 'solid', colour = "white")) +
geom_pointrange(data = pregrowth_salmon_LR_con_sero, aes(x=Condition, y=mean, ymin=mean-se, ymax=mean+se,
group=Condition, fill=Condition),shape=23,size=.3,color="black") +
geom_segment(data = high_inoc_fg7_anno_df, aes(x = x1, xend = x1, y = y1, yend = y2), colour = "black") +
geom_segment(data = high_inoc_fg7_anno_df, aes(x = x2, xend = x2, y = y3, yend = y2), colour = "black") +
geom_segment(data = high_inoc_fg7_anno_df, aes(x = x1, xend = x2, y = y2, yend = y2), colour = "black") +
geom_text(data = high_inoc_fg7_anno_df, aes(x = xstar, y = ystar, label = lab), color = "red") +
geom_text(data = high_inoc_fg7_anno_panel, aes(x=xpos, y=ypos, label=Label), color="black", size=6)
### Make supplementary table 2.
salmon_all_var.emm <- emmeans(pregrowth_salmon_lme, ~Serotype+Source+Condition+Day+Nisin)
nis_by_con_ser_sou_day.ctr <- contrast(salmon_all_var.emm, interaction = "pairwise",
simple = "Nisin", combine = TRUE, adjust = "tukey")
nis_by_con_ser_sou_day.ctr_df <- as.data.frame(nis_by_con_ser_sou_day.ctr) %>% arrange(Condition, Serotype, Source, Day)
# Adding a strain variable to nis_by_con_ser_sou_day.ctr_df.
supp_tb2_strainvec <- rep(NA, nrow(nis_by_con_ser_sou_day.ctr_df))
supp_tb2_strainvec[which(nis_by_con_ser_sou_day.ctr_df$Serotype=="1/2a"&nis_by_con_ser_sou_day.ctr_df$Source=="Environment")] <- "FSL L4-0396"
supp_tb2_strainvec[which(nis_by_con_ser_sou_day.ctr_df$Serotype=="1/2a"&nis_by_con_ser_sou_day.ctr_df$Source=="Salmon")] <- "FSL F2-0237"
supp_tb2_strainvec[which(nis_by_con_ser_sou_day.ctr_df$Serotype=="1/2b"&nis_by_con_ser_sou_day.ctr_df$Source=="Environment")] <- "FSL L4-0060"
supp_tb2_strainvec[which(nis_by_con_ser_sou_day.ctr_df$Serotype=="1/2b"&nis_by_con_ser_sou_day.ctr_df$Source=="Salmon")] <- "FSL L3-0051"
supp_tb2_strainvec[which(nis_by_con_ser_sou_day.ctr_df$Serotype=="4b"&nis_by_con_ser_sou_day.ctr_df$Source=="Environment")] <- "FSL N1-0061"
supp_tb2_strainvec[which(nis_by_con_ser_sou_day.ctr_df$Serotype=="4b"&nis_by_con_ser_sou_day.ctr_df$Source=="Salmon")] <- "FSL F2-0310"
# Attach Strain column to nis_by_con_ser_sou_day.ctr_df
nis_by_con_ser_sou_day.ctr_df$Strain <- supp_tb2_strainvec
nis_by_con_ser_sou_day.ctr_df <- nis_by_con_ser_sou_day.ctr_df[,-1]
# Arrange nis_by_con_ser_sou_day.ctr_df to make supplementary table 2.
nis_by_con_ser_sou_day.ctr_df_subset <- nis_by_con_ser_sou_day.ctr_df[c("Condition", "Day", "Strain", "estimate", "SE")]
nis_by_con_ser_sou_day.ctr_df_subset %>% arrange(Condition, Day, Strain) -> nis_by_con_ser_sou_day.ctr_df_subset
nis_by_con_ser_sou_day.ctr_df_subset$estimate <- round(nis_by_con_ser_sou_day.ctr_df_subset$estimate, digits = 2)
nis_by_con_ser_sou_day.ctr_df_subset$SE <- round(nis_by_con_ser_sou_day.ctr_df_subset$SE, digits = 2)
high_inoc_supp_table2 <- data.frame(matrix(nrow = 6, ncol = 12))
rownames(high_inoc_supp_table2) <- c("FSL F2-0237", "FSL F2-0310", "FSL L3-0051", "FSL L4-0060", "FSL L4-0396", "FSL N1-0061")
condition_vec <- c("BHI", "NaCl", "pH", "Quat")
day_vec <- c(1,15,30)
for (i in 1:4) {
for (j in 1:3) {
colnames(high_inoc_supp_table2)[3*(i-1)+j] <- paste(condition_vec[i],day_vec[j],sep = "_D")
}
}
for (i in 1:ncol(high_inoc_supp_table2)) {
high_inoc_supp_table2[,i] <- nis_by_con_ser_sou_day.ctr_df_subset$estimate[(6*(i-1)+1):(6*(i-1)+6)]
}
high_inoc_supp_table2 <- high_inoc_supp_table2[c(5,1,4,3,6,2),]
# End.
|
# Sum by person, condition, across event
all_pers <- all_events %>%
group_by(.subgrps., DUPERSID, VARSTR, VARPSU, PERWT.yy.F, Condition, count) %>%
summarize_at(vars(SF.yy.X, PR.yy.X, MR.yy.X, MD.yy.X, OZ.yy.X, XP.yy.X),sum) %>% ungroup
PERSdsgn <- svydesign(
id = ~VARPSU,
strata = ~VARSTR,
weights = ~PERWT.yy.F,
data = all_pers,
nest = TRUE)
|
/build_hc_tables/code/r/dsgn/cond_PERS.R
|
permissive
|
RandomCriticalAnalysis/MEPS-summary-tables
|
R
| false | false | 366 |
r
|
# Sum by person, condition, across event
all_pers <- all_events %>%
group_by(.subgrps., DUPERSID, VARSTR, VARPSU, PERWT.yy.F, Condition, count) %>%
summarize_at(vars(SF.yy.X, PR.yy.X, MR.yy.X, MD.yy.X, OZ.yy.X, XP.yy.X),sum) %>% ungroup
PERSdsgn <- svydesign(
id = ~VARPSU,
strata = ~VARSTR,
weights = ~PERWT.yy.F,
data = all_pers,
nest = TRUE)
|
misoMatrices <- list.files("/Users/larssteenhuis/miso/matrices/stimuli_events/")
dexSeqMatrices <- "/Users/larssteenhuis/DEXSEQ/matrices/"
misoDexIntersect <- "/Users/larssteenhuis/intersectMisoDex/"
lapply(misoMatrices,function(x){
x <- "Rhizopus_oryzae_24h"
matrixFile <- paste(misoDexIntersect, "intersect_",x ,sep = "")
if (file.exists(matrixFile)){
file.remove(matrixFile)
}
# loads dexseq results object, refactor to DF, removes NA form subscript
file.create(matrixFile)
stimuli <- substr(x,1,nchar(x)-16)
dexSeq <- read.table(paste("/Users/larssteenhuis/DEXSEQ/matrices/Rhizopus_oryzae.24h.matrix" ,sep = ""), header=T, sep="\t")
miso <- read.table("/Users/larssteenhuis/miso/matrices/stimuli_events/Rhizopus_oryzae_24h_event_names.tsv", header=T, sep="\t")
intersect.misDex <- intersect(dexSeq$EnsemblId, miso$EnsemblId)
#miso$GeneName[miso$EnsemblId == dexSeq$EnsemblId]
lapply(intersect.misDex, function(x){
# gene <- miso[miso$EnsemblId == x,5]
geneName = as.character(miso$GeneName[miso$EnsemblId == x])[[1]]
printLine = paste(x,geneName, sep="\t")
write(printLine,matrixFile, sep = "", append = T)
})
#print(intersect.misDex, miso$GeneName[miso$EnsemblId == dexSeq$EnsemblId])
})
|
/dexseq/scripts/R/makeIntersectionFiles.R
|
no_license
|
Lsteenhuis/visualizingDEStimulatedExons
|
R
| false | false | 1,251 |
r
|
misoMatrices <- list.files("/Users/larssteenhuis/miso/matrices/stimuli_events/")
dexSeqMatrices <- "/Users/larssteenhuis/DEXSEQ/matrices/"
misoDexIntersect <- "/Users/larssteenhuis/intersectMisoDex/"
lapply(misoMatrices,function(x){
x <- "Rhizopus_oryzae_24h"
matrixFile <- paste(misoDexIntersect, "intersect_",x ,sep = "")
if (file.exists(matrixFile)){
file.remove(matrixFile)
}
# loads dexseq results object, refactor to DF, removes NA form subscript
file.create(matrixFile)
stimuli <- substr(x,1,nchar(x)-16)
dexSeq <- read.table(paste("/Users/larssteenhuis/DEXSEQ/matrices/Rhizopus_oryzae.24h.matrix" ,sep = ""), header=T, sep="\t")
miso <- read.table("/Users/larssteenhuis/miso/matrices/stimuli_events/Rhizopus_oryzae_24h_event_names.tsv", header=T, sep="\t")
intersect.misDex <- intersect(dexSeq$EnsemblId, miso$EnsemblId)
#miso$GeneName[miso$EnsemblId == dexSeq$EnsemblId]
lapply(intersect.misDex, function(x){
# gene <- miso[miso$EnsemblId == x,5]
geneName = as.character(miso$GeneName[miso$EnsemblId == x])[[1]]
printLine = paste(x,geneName, sep="\t")
write(printLine,matrixFile, sep = "", append = T)
})
#print(intersect.misDex, miso$GeneName[miso$EnsemblId == dexSeq$EnsemblId])
})
|
#' Remove duplicates.
#'
#' @section Aesthetics:
#' \Sexpr[results=rd,stage=build]{animint2:::rd_aesthetics("a_stat", "unique")}
#'
#' @export
#' @inheritParams a_layer
#' @inheritParams a_geom_point
#' @examples
#' a_plot(mtcars, a_aes(vs, am)) + a_geom_point(alpha = 0.1)
#' a_plot(mtcars, a_aes(vs, am)) + a_geom_point(alpha = 0.1, a_stat="unique")
a_stat_unique <- function(mapping = NULL, data = NULL,
a_geom = "point", a_position = "identity",
...,
na.rm = FALSE,
show.legend = NA,
inherit.a_aes = TRUE) {
a_layer(
data = data,
mapping = mapping,
a_stat = a_StatUnique,
a_geom = a_geom,
a_position = a_position,
show.legend = show.legend,
inherit.a_aes = inherit.a_aes,
params = list(
na.rm = na.rm,
...
)
)
}
#' @rdname animint2-ggproto
#' @format NULL
#' @usage NULL
#' @export
a_StatUnique <- a_ggproto("a_StatUnique", a_Stat,
compute_panel = function(data, scales) unique(data)
)
|
/R/stat-unique.r
|
no_license
|
vivekktiwari/animint2
|
R
| false | false | 1,104 |
r
|
#' Remove duplicates.
#'
#' @section Aesthetics:
#' \Sexpr[results=rd,stage=build]{animint2:::rd_aesthetics("a_stat", "unique")}
#'
#' @export
#' @inheritParams a_layer
#' @inheritParams a_geom_point
#' @examples
#' a_plot(mtcars, a_aes(vs, am)) + a_geom_point(alpha = 0.1)
#' a_plot(mtcars, a_aes(vs, am)) + a_geom_point(alpha = 0.1, a_stat="unique")
a_stat_unique <- function(mapping = NULL, data = NULL,
a_geom = "point", a_position = "identity",
...,
na.rm = FALSE,
show.legend = NA,
inherit.a_aes = TRUE) {
a_layer(
data = data,
mapping = mapping,
a_stat = a_StatUnique,
a_geom = a_geom,
a_position = a_position,
show.legend = show.legend,
inherit.a_aes = inherit.a_aes,
params = list(
na.rm = na.rm,
...
)
)
}
#' @rdname animint2-ggproto
#' @format NULL
#' @usage NULL
#' @export
a_StatUnique <- a_ggproto("a_StatUnique", a_Stat,
compute_panel = function(data, scales) unique(data)
)
|
##Download zip file and save it in a folder on working directory:
if (!file.exists("./Project2")) {dir.create("./Project2")}
site <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2FNEI_data.zip"
download.file(site, "./Project2/EPA.zip", method = "curl")
##Unzip file in the new directory:
unzip("./Project2/EPA.zip", exdir = "./Project2")
##Read both files and check their features:
NEI <- readRDS("./Project2/summarySCC_PM25.rds")
SCC <- readRDS("./Project2/Source_Classification_Code.rds")
str(NEI)
str(SCC)
##Aggregate emissions per year:
year_emissions <- aggregate(Emissions ~ year, NEI, FUN = sum)
##Plot emissions and save into a .png file:
png("plot1.png") ##Default size
with(year_emissions, barplot(height = Emissions/1000, names.arg = year, col = unclass(year),
xlab = "Year", ylab = "PM2.5 in Ktons", main = "Annual Emissions PM2.5 from 1999 to 2008"))
dev.off()
|
/plot1.R
|
no_license
|
alroru95/Exploratory-Data-Analysis-Course-project-2
|
R
| false | false | 915 |
r
|
##Download zip file and save it in a folder on working directory:
if (!file.exists("./Project2")) {dir.create("./Project2")}
site <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2FNEI_data.zip"
download.file(site, "./Project2/EPA.zip", method = "curl")
##Unzip file in the new directory:
unzip("./Project2/EPA.zip", exdir = "./Project2")
##Read both files and check their features:
NEI <- readRDS("./Project2/summarySCC_PM25.rds")
SCC <- readRDS("./Project2/Source_Classification_Code.rds")
str(NEI)
str(SCC)
##Aggregate emissions per year:
year_emissions <- aggregate(Emissions ~ year, NEI, FUN = sum)
##Plot emissions and save into a .png file:
png("plot1.png") ##Default size
with(year_emissions, barplot(height = Emissions/1000, names.arg = year, col = unclass(year),
xlab = "Year", ylab = "PM2.5 in Ktons", main = "Annual Emissions PM2.5 from 1999 to 2008"))
dev.off()
|
## app.R ##
library(shiny)
library(tidyverse)
library(leaflet)
library(markdown)
citydata <- read_csv("500_Cities__City-level_Data__GIS_Friendly_Format___2018_release.csv")
citydata <- tidyr::separate(data=citydata,
col=Geolocation,
into=c("Latitude", "Longitude"),
sep=",",
remove=FALSE)
citydata$Latitude <- stringr::str_replace_all(citydata$Latitude, "[(]", "")
citydata$Longitude <- stringr::str_replace_all(citydata$Longitude, "[)]", "")
citydata$Latitude <- as.numeric(citydata$Latitude)
citydata$Longitude <- as.numeric(citydata$Longitude)
citydata$color <- ""
citydata$rank <- 1
##########
ui <- bootstrapPage(
tags$style(
type = "text/css", "html, body {width:100%;height:100%}"
),
leafletOutput(
"mymap", width = "100%", height = "100%"
),
absolutePanel(width = 300,top = 5, right = 5,style="padding: 8px; border: 1px solid #CCC; background: #EEE; opacity: .95",draggable=TRUE,
wellPanel(
HTML(markdownToHTML(fragment.only=TRUE, text=c(
"Welcome! <br> <br> This app lets you view a selection of the
most populous cities in the United States
and how they rank among each other in
several different health conditions. <br><br>
Markers are according to percentile."
),style="background: #FFF")),
HTML(markdownToHTML(fragment.only=TRUE, text=c(
"Green: <75th percentile<br> Orange: >75th percentile<br> Red: >90th percentile"
),style="background: #FFF; float: left; text-align: right"))),
selectInput("healthstat","Select a health statistic:"
,choices=c("Arthritis"="ARTHRITIS_AdjPrev","Binge Drinking"="BINGE_AdjPrev","High Blood Pressure"="BPHIGH_AdjPrev"
,"Cancer"="CANCER_AdjPrev","Current Asthma"="CASTHMA_AdjPrev", "Coronary Heart Disease"="CHD_AdjPrev"
,"Smoking"="CSMOKING_AdjPrev","Diabetes"="DIABETES_AdjPrev","Obesity"="OBESITY_AdjPrev","Stroke"="STROKE_AdjPrev"
)
,multiple=FALSE, selectize=TRUE),
sliderInput("poprange","Population range of visible cities:",40000,3000000,value=c(40000,3000000),step=20000,width=250),
wellPanel(
HTML(markdownToHTML(fragment.only=TRUE, text=c(
"This project utilizes data from <br> <a href=https://chronicdata.cdc.gov/500-Cities/500-Cities-City-level-Data-GIS-Friendly-Format-201/dxpw-cm5u>the CDC's 500 Cities Project</a>")
)
)
)
)
)
server <- function(input,output){
#render map
output$mymap <- renderLeaflet({
leaflet(citydata) %>%
setMaxBounds(lng1 = -162, lat1 = 64, lng2 = -44, lat2 = 17) %>%
addProviderTiles(providers$Esri.WorldImagery,options = providerTileOptions(noWrap = TRUE, minZoom = 4, maxZoom = 8)) %>%
addProviderTiles(providers$Stamen.TonerLines,options = providerTileOptions(noWrap = TRUE, minZoom = 4, maxZoom = 8)) %>%
addProviderTiles(providers$Stamen.TonerLabels,options = providerTileOptions(noWrap = TRUE, minZoom = 4, maxZoom = 8))
})
#Observer to update map markers when a new statistic is selected
observe({
proxy <- leafletProxy("mymap", data = citydata)
#Sort and set marker colors
citydata <<- arrange(citydata,desc(get(input$healthstat)))
citydata[1:50,]$color <<- c("red")
citydata[51:125,]$color <<- c("orange")
citydata[126:500,]$color <<- c("green")
citydata$rank <<- 1:nrow(citydata)
citydata$rank <<- (100 - (round(citydata$rank /5, digits = 0)))
citydata$Population2010 <<- ifelse((citydata$Population2010 > 2999999), 2999999, citydata$Population2010)
citydatasub <<- subset(citydata,(Population2010 > input$poprange[1]))
citydatasub <<- subset(citydatasub,(Population2010 < input$poprange[2]))
#Assign colors to markers
icons <- awesomeIcons(
icon = 'ios-close',
markerColor = citydatasub$color
)
#Set popup text
popups <- paste(
"City: ", citydatasub$PlaceName,", ",citydatasub$StateAbbr, "<br>",
"Prevalence: ", citydatasub[[input$healthstat]],"%","<br>",
"Percentile: ", citydatasub$rank,"%",sep=""
)
#Clear existing markers and draw new ones
proxy %>% clearMarkers()
proxy %>% addAwesomeMarkers(citydatasub$Longitude, citydatasub$Latitude, icon=icons, popup=popups)
})
}
shinyApp(ui, server)
|
/app.R
|
no_license
|
mattdoesdata/cdc-map
|
R
| false | false | 4,884 |
r
|
## app.R ##
library(shiny)
library(tidyverse)
library(leaflet)
library(markdown)
citydata <- read_csv("500_Cities__City-level_Data__GIS_Friendly_Format___2018_release.csv")
citydata <- tidyr::separate(data=citydata,
col=Geolocation,
into=c("Latitude", "Longitude"),
sep=",",
remove=FALSE)
citydata$Latitude <- stringr::str_replace_all(citydata$Latitude, "[(]", "")
citydata$Longitude <- stringr::str_replace_all(citydata$Longitude, "[)]", "")
citydata$Latitude <- as.numeric(citydata$Latitude)
citydata$Longitude <- as.numeric(citydata$Longitude)
citydata$color <- ""
citydata$rank <- 1
##########
ui <- bootstrapPage(
tags$style(
type = "text/css", "html, body {width:100%;height:100%}"
),
leafletOutput(
"mymap", width = "100%", height = "100%"
),
absolutePanel(width = 300,top = 5, right = 5,style="padding: 8px; border: 1px solid #CCC; background: #EEE; opacity: .95",draggable=TRUE,
wellPanel(
HTML(markdownToHTML(fragment.only=TRUE, text=c(
"Welcome! <br> <br> This app lets you view a selection of the
most populous cities in the United States
and how they rank among each other in
several different health conditions. <br><br>
Markers are according to percentile."
),style="background: #FFF")),
HTML(markdownToHTML(fragment.only=TRUE, text=c(
"Green: <75th percentile<br> Orange: >75th percentile<br> Red: >90th percentile"
),style="background: #FFF; float: left; text-align: right"))),
selectInput("healthstat","Select a health statistic:"
,choices=c("Arthritis"="ARTHRITIS_AdjPrev","Binge Drinking"="BINGE_AdjPrev","High Blood Pressure"="BPHIGH_AdjPrev"
,"Cancer"="CANCER_AdjPrev","Current Asthma"="CASTHMA_AdjPrev", "Coronary Heart Disease"="CHD_AdjPrev"
,"Smoking"="CSMOKING_AdjPrev","Diabetes"="DIABETES_AdjPrev","Obesity"="OBESITY_AdjPrev","Stroke"="STROKE_AdjPrev"
)
,multiple=FALSE, selectize=TRUE),
sliderInput("poprange","Population range of visible cities:",40000,3000000,value=c(40000,3000000),step=20000,width=250),
wellPanel(
HTML(markdownToHTML(fragment.only=TRUE, text=c(
"This project utilizes data from <br> <a href=https://chronicdata.cdc.gov/500-Cities/500-Cities-City-level-Data-GIS-Friendly-Format-201/dxpw-cm5u>the CDC's 500 Cities Project</a>")
)
)
)
)
)
server <- function(input,output){
#render map
output$mymap <- renderLeaflet({
leaflet(citydata) %>%
setMaxBounds(lng1 = -162, lat1 = 64, lng2 = -44, lat2 = 17) %>%
addProviderTiles(providers$Esri.WorldImagery,options = providerTileOptions(noWrap = TRUE, minZoom = 4, maxZoom = 8)) %>%
addProviderTiles(providers$Stamen.TonerLines,options = providerTileOptions(noWrap = TRUE, minZoom = 4, maxZoom = 8)) %>%
addProviderTiles(providers$Stamen.TonerLabels,options = providerTileOptions(noWrap = TRUE, minZoom = 4, maxZoom = 8))
})
#Observer to update map markers when a new statistic is selected
observe({
proxy <- leafletProxy("mymap", data = citydata)
#Sort and set marker colors
citydata <<- arrange(citydata,desc(get(input$healthstat)))
citydata[1:50,]$color <<- c("red")
citydata[51:125,]$color <<- c("orange")
citydata[126:500,]$color <<- c("green")
citydata$rank <<- 1:nrow(citydata)
citydata$rank <<- (100 - (round(citydata$rank /5, digits = 0)))
citydata$Population2010 <<- ifelse((citydata$Population2010 > 2999999), 2999999, citydata$Population2010)
citydatasub <<- subset(citydata,(Population2010 > input$poprange[1]))
citydatasub <<- subset(citydatasub,(Population2010 < input$poprange[2]))
#Assign colors to markers
icons <- awesomeIcons(
icon = 'ios-close',
markerColor = citydatasub$color
)
#Set popup text
popups <- paste(
"City: ", citydatasub$PlaceName,", ",citydatasub$StateAbbr, "<br>",
"Prevalence: ", citydatasub[[input$healthstat]],"%","<br>",
"Percentile: ", citydatasub$rank,"%",sep=""
)
#Clear existing markers and draw new ones
proxy %>% clearMarkers()
proxy %>% addAwesomeMarkers(citydatasub$Longitude, citydatasub$Latitude, icon=icons, popup=popups)
})
}
shinyApp(ui, server)
|
f <-file.choose()
f
d <- read.table(f, header = TRUE, sep = "\t", stringsAsFactors = FALSE, fill = TRUE)
d
library(readr)
d <- read_tsv(f, col_names = TRUE) # for tab-separated value files
d
library(readxl)
f <- "data/CPDS-1960-2014-reduced.xlsx"
d <- read_excel(f, sheet = 1, col_names = TRUE)
head(d)
library(curl)
f <- curl("https://raw.githubusercontent.com/difiore/ADA-datasets/master/CPDS-1960-2014-reduced.csv")
d <- read.csv(f, header = TRUE, sep = ",", stringsAsFactors = FALSE)
head(d)
library(tidyverse)
library(curl)
library(skimr)
library(summarytools)
library(dataMaid)
library(psych)
library(pastecs)
library(Hmisc)
library(car)
library(GGally)
library(corrplot)
library(magrittr)
|
/Class_02_11.R
|
no_license
|
brittaw/ADA
|
R
| false | false | 698 |
r
|
f <-file.choose()
f
d <- read.table(f, header = TRUE, sep = "\t", stringsAsFactors = FALSE, fill = TRUE)
d
library(readr)
d <- read_tsv(f, col_names = TRUE) # for tab-separated value files
d
library(readxl)
f <- "data/CPDS-1960-2014-reduced.xlsx"
d <- read_excel(f, sheet = 1, col_names = TRUE)
head(d)
library(curl)
f <- curl("https://raw.githubusercontent.com/difiore/ADA-datasets/master/CPDS-1960-2014-reduced.csv")
d <- read.csv(f, header = TRUE, sep = ",", stringsAsFactors = FALSE)
head(d)
library(tidyverse)
library(curl)
library(skimr)
library(summarytools)
library(dataMaid)
library(psych)
library(pastecs)
library(Hmisc)
library(car)
library(GGally)
library(corrplot)
library(magrittr)
|
##----------------------------------------------------------------------------------------------------------##
## Cedric Scherer (cedricphilippscherer@gmail.com) ##
## Creating NLMs as .txt files ##
## 2017-02-28 ##
##----------------------------------------------------------------------------------------------------------b##
library(NLMR)
library(landscapetools)
source("./R/parameters.R")
## three landscape setups:
## * homogeneous
## * mosaictess with low fragmentation
## * mosaictess landscapes with high fragmentation
##
## mean breeding capacity = 4.5 females
## -> 10 classes between 0 and 10 for heterogeneous setups
## paramters
n_col <- 25
n_row <- 50
## homogeneous setup (only one needed)
hom <- rep(4.5, n_col * n_row)
write(hom, file = paste0("./model/nlms/hom.txt"), sep = " ")
## set seed for reproducible neutral landscape model generation
set.seed(2018)
for (i in 1:n) { ## n = number of replications -> assigned in ".R/parameters.R"
## low number of patches: germs = 15
low <- nlm_mosaictess(ncol = n_col, nrow = n_row, resolution = 1, germs = 15)
low <- util_classify(low, weighting = rep(0.1, 10)) ## 10 classes
write(low@data@values, file = paste0("./model/nlms/frag_low_", i, ".txt"), sep = " ")
## high number of patches: germs = 150
high <- nlm_mosaictess(ncol = n_col, nrow = n_row, resolution = 1, germs = 150)
high <- util_classify(high, weighting = rep(0.1, 10)) ## 10 classes
write(high@data@values, file = paste0("./model/nlms/frag_high_", i, ".txt"), sep = " ")
}
|
/swifcol/R/nlms.R
|
no_license
|
marcosci/Sciaini_et_al_2018
|
R
| false | false | 1,766 |
r
|
##----------------------------------------------------------------------------------------------------------##
## Cedric Scherer (cedricphilippscherer@gmail.com) ##
## Creating NLMs as .txt files ##
## 2017-02-28 ##
##----------------------------------------------------------------------------------------------------------b##
library(NLMR)
library(landscapetools)
source("./R/parameters.R")
## three landscape setups:
## * homogeneous
## * mosaictess with low fragmentation
## * mosaictess landscapes with high fragmentation
##
## mean breeding capacity = 4.5 females
## -> 10 classes between 0 and 10 for heterogeneous setups
## paramters
n_col <- 25
n_row <- 50
## homogeneous setup (only one needed)
hom <- rep(4.5, n_col * n_row)
write(hom, file = paste0("./model/nlms/hom.txt"), sep = " ")
## set seed for reproducible neutral landscape model generation
set.seed(2018)
for (i in 1:n) { ## n = number of replications -> assigned in ".R/parameters.R"
## low number of patches: germs = 15
low <- nlm_mosaictess(ncol = n_col, nrow = n_row, resolution = 1, germs = 15)
low <- util_classify(low, weighting = rep(0.1, 10)) ## 10 classes
write(low@data@values, file = paste0("./model/nlms/frag_low_", i, ".txt"), sep = " ")
## high number of patches: germs = 150
high <- nlm_mosaictess(ncol = n_col, nrow = n_row, resolution = 1, germs = 150)
high <- util_classify(high, weighting = rep(0.1, 10)) ## 10 classes
write(high@data@values, file = paste0("./model/nlms/frag_high_", i, ".txt"), sep = " ")
}
|
library(seewave)
### Name: dynspec
### Title: Dynamic sliding spectrum
### Aliases: dynspec
### Keywords: dplot ts
### ** Examples
## Not run:
##D data(sheep)
##D require(rpanel)
##D dynspec(sheep,f=8000,wl=1024,ovlp=50,osc=TRUE)
## End(Not run)
|
/data/genthat_extracted_code/seewave/examples/dynspec.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false | false | 254 |
r
|
library(seewave)
### Name: dynspec
### Title: Dynamic sliding spectrum
### Aliases: dynspec
### Keywords: dplot ts
### ** Examples
## Not run:
##D data(sheep)
##D require(rpanel)
##D dynspec(sheep,f=8000,wl=1024,ovlp=50,osc=TRUE)
## End(Not run)
|
% Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/ac_ranking.R
\name{centroid.buckets}
\alias{centroid.buckets}
\title{Buckets Centroid}
\usage{
centroid.buckets(buckets, simulations = 1000)
}
\arguments{
\item{buckets}{a list where each element contains the index of the assets in
the respective bucket. The assets within each bucket have no order.
The bucket elements are in ascending order such that
R_bucket_1 < ... < R_bucket_n}
\item{simulations}{number of simulations}
}
\value{
the centroid vector
}
\description{
Compute the centroid for buckets of assets
}
\details{
A common use of buckets is to divide the assets into quartiles or deciles,
but is generalized here for an arbitrary number of buckets and arbitrary
number of assets in each bucket.
}
\author{
Ross Bennett
}
|
/man/centroid.buckets.Rd
|
no_license
|
arturochian/PortfolioAnalytics-1
|
R
| false | false | 822 |
rd
|
% Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/ac_ranking.R
\name{centroid.buckets}
\alias{centroid.buckets}
\title{Buckets Centroid}
\usage{
centroid.buckets(buckets, simulations = 1000)
}
\arguments{
\item{buckets}{a list where each element contains the index of the assets in
the respective bucket. The assets within each bucket have no order.
The bucket elements are in ascending order such that
R_bucket_1 < ... < R_bucket_n}
\item{simulations}{number of simulations}
}
\value{
the centroid vector
}
\description{
Compute the centroid for buckets of assets
}
\details{
A common use of buckets is to divide the assets into quartiles or deciles,
but is generalized here for an arbitrary number of buckets and arbitrary
number of assets in each bucket.
}
\author{
Ross Bennett
}
|
#
# Functions for analysing A. Thaliana Tiling Arrays
# last modified: 05-06-2013
# first written: 03-06-2013
# (c) 2013 GBIC Yalan Bi, Danny Arends, R.C. Jansen
#
#************************************************************* plot exp part *************************************************************#
#plot 4 env separately in 4 panel
setwd("D:/Arabidopsis Arrays")
#load environment file
menvironment <- read.table("Data/ann_env.txt", sep="\t")[,2]
#load genotype file
geno <- read.table("refined map/genotypes.txt",sep="\t", row.names=1, header=TRUE)
ce_threshold = 5.86; threshold_qtl = 8.0; threshold_int = 11.6; cutoffnProbe = 2; #cutoffratio = 0.6#(cutoffratio>=0.6; cutoffnProbe >= 2 probes)
#*************************************************************** load part ***************************************************************#
#load main/int QTL genes
load(file=paste0("Data/geneticsAS/genelist_QTL", threshold_qtl, "_Int",threshold_int, "_np", cutoffnProbe, ".Rdata"))
#direction selection
probesDir <- function(exp_data = rawexp){
if(unique(exp_data[,"strand"]) == "sense"){
direction_id <- which(exp_data[, "direction"] == "reverse")
}
if(unique(exp_data[,"strand"]) == "complement"){
direction_id <- which(exp_data[, "direction"] == "forward")
}
return(direction_id)
}
plotExpEnvSep <- function(chr, filename, rawexp, newexp, probes_dir, exonID, uniqueExon, ind_tu, nprobes, ASexon, genoProbes, markerToDraw, int, ce_threshold){
#par(mfrow = c(4, 1), pty = "m", oma = c(5, 3, 5, 0.5))
for(env in 1:4){
ind_env <- which(as.numeric(menvironment) == env)
if(env == 1) par(fig = c(0, 1, 1-0.25*env, 1.25-0.25*env), oma = c(5, 3, 5, 0.5), mar = c(0, 4, 0, 2))
else par(fig = c(0, 1, 1-0.25*env, 1.25-0.25*env), oma = c(5, 3, 5, 0.5), mar = c(0, 4, 0, 2), new = TRUE)
plot(c(0.5, nprobes + 0.5), c(min(newexp) - 0.2, max(newexp) + 0.2), xaxt = 'n', xlab = "", ylab = levels(menvironment)[env], cex.axis = 1, cex.lab = 1.5, col.lab = env, las = 1, mgp = c(2.25, 0.5, 0), tck = -0.017, t = "n")
if(env == 1){
title(main = filename, cex.main = 2.5, xlab = "Probes", mgp = c(3, 0.5, 0), cex.lab = 1.5, outer = TRUE)
title(ylab = "Expression Intensity", mgp = c(1, 0.5, 0), cex.lab = 1.5, outer = TRUE)
}
for(p in 1:nprobes){
#background for introns
if(!p %in% ind_tu) rect((p - 0.5), -3, (p + 0.5), max(newexp[ ,ind_env])* 2, col = grey(0.85), border = "transparent")
if(p %in% genoProbes){
if(length(grep(filename, genesIAS[[paste0("chr", chr)]][[env]], value=T)) > 0) points(rep(p, length(ind_env)) + runif(length(ind_env), min = -0.05, max = 0.05), newexp[p,ind_env], t = 'p', col = geno[ ,markerToDraw[1]]+4, pch = 20, cex = 0.75)
else points(rep(p, length(ind_env)) + runif(length(ind_env), min = -0.05, max = 0.05), newexp[p,ind_env], t = 'p', col = env, pch = 20, cex = 0.75)
points(p, mean(unlist(newexp[p,ind_env[geno[ind_env,markerToDraw[1]] == 1]])), pch="*", cex=2, col=5)
points(p, mean(unlist(newexp[p,ind_env[geno[ind_env,markerToDraw[1]] == 2]])), pch="*", cex=2, col=6)
if(int[p,markerToDraw[1]] >= threshold_int && env %in% grep(paste0(filename, "_", rawexp[p, "tu"]), genesIAS[[paste0("chr", chr)]], value=F)){
text(x = p, y = min(newexp)+0.15, labels = round(int[p,markerToDraw[1]], digits = 1), col = env, cex = 0.8)
}
} else if(p %in% probes_dir) points(rep(p, length(ind_env)) + runif(length(ind_env), min = -0.05, max = 0.05), newexp[p,ind_env], t = 'p', col = env, pch = 20, cex = 0.75)
}
for(exon in uniqueExon){
#ind <- judge which probe in exonID is of current exon name (T/F)
ind <- rawexp[exonID, "tu"] == exon
#cat(as.character(exon), "has probes", exonID[ind], "\n")
#if(length(exonID[ind]) >= 3) text(x = median(which(rawexp[ ,"tu"] == exon)), y = max(newexp)-0.1, labels = exon, col = "magenta4", cex = 1)
#use mean/median to test cassette
lines(c(min(which(rawexp[ ,"tu"] == exon))-0.5, max(which(rawexp[ ,"tu"] == exon))+0.5), c(mean(unlist(newexp[exonID[ind],ind_env])), mean(unlist(newexp[exonID[ind],ind_env]))), col = env, lwd = 2)
cExoninEnv <- unlist(lapply(strsplit(rownames(cematrix)[which(cematrix[ ,env] >= ce_threshold)][grepl(filename, rownames(cematrix)[which(cematrix[ ,env] >= ce_threshold)])], "_"), "[[", 2))
if(exon %in% cExoninEnv){
text(x = median(which(rawexp[ ,"tu"] == exon)), y = max(newexp)-0.25, labels=paste0("ce=", round(cematrix[paste0(filename, "_", exon),env], digits = 1)), col = env, cex = 0.8)
}
if(exon %in% ASexon){
text(x = median(which(rawexp[ ,"tu"] == exon)), y = max(newexp)-0.4, labels=paste0("m=", markerToDraw[1]), col = "magenta4", cex = 0.8)
}
}
box()
}
axis(1, at = probes_dir, labels = row.names(rawexp)[probes_dir], mgp=c(2.25, 0.5, 0), cex.axis = 1, las = 2, tck = 0.02)
}
plotcExonExp <- function(chr, filename, ce_threshold){
rawexp <- read.table(paste0("Data/chr", chr, "_norm_hf_cor/", filename, ".txt"), row.names=1, header=T)
newexp <- rawexp[ ,17:164]
int <- read.table(paste0("Data/fullModeMapping/chr", chr, "_norm_hf_cor/", filename, "_FM_Int.txt"), row.names=1, header=T)
#cat(" int loading succeed!\n")
probes_dir <- probesDir(rawexp)
#cat(filename, "\nprobeDir:", probes_dir, "\n")
exonID <- probes_dir[grepl("tu", rawexp[probes_dir,"tu"])]
#cat("exons of right direction:", exonID, "\n")
#uniqueExon <- all tu names of exon probes
uniqueExon <- unique(rawexp[exonID,"tu"])
#cat("tu names:", as.character(uniqueExon), "\n")
ind_tu <- grep("tu", rawexp[ ,"tu"])
nprobes <- nrow(rawexp)
genoProbes <- NULL
for(env in 1:4){
if(length(grep(filename, genesIAS[[paste0("chr", chr)]][[env]], value=T)) > 0 && length(genoProbes) == 0){
ASexon <- unique(unlist(lapply(strsplit(grep(filename, genesIAS[[paste0("chr", chr)]][[env]], value=T), "_"), "[[", 2)))
genoProbes <- exonID[rawexp[exonID,"tu"] == ASexon]
consSigIntMarkers <- as.numeric(unlist(lapply(strsplit(grep(filename, genesIAS[[paste0("chr", chr)]][[env]], value=T), "_"), "[[", 3)))
markerToDraw <- consSigIntMarkers[which(int[genoProbes,consSigIntMarkers] == max(int[genoProbes,consSigIntMarkers]), arr.ind=T)[,2]]
#cat("env", env, ": we are markers, at which there're >= 2 probes with sig Int,", markerToDraw, "\n")
}
}
png(filename = paste0("Data/geneticsAS/plotGeneticsAS/", filename, "_QTL", threshold_qtl, "_Int", threshold_int, "_np", cutoffnProbe, "_4s.png"), width = 960, height = 1728, bg = "white")
plotExpEnvSep(chr, filename, rawexp, newexp, probes_dir, exonID, uniqueExon, ind_tu, nprobes, ASexon, genoProbes, markerToDraw, int, ce_threshold)
dev.off()
}
for(chr in 1:5){
cematrix <- read.table(paste0("Data/cassetteExon/cassetteExon_chr", chr, "_allind.txt"), row.names=1, header=T)
#for all genes which have cassette exons in at least one env
if(length(unlist(genesIAS[[paste0("chr", chr)]])) > 0){
plotGenenames <- sort(unique(unlist(lapply(strsplit(unlist(genesIAS[[paste0("chr", chr)]]), "_"), "[[", 1))))
#filename(AT1G01010)
for(filename in plotGenenames){
plotcExonExp(chr, filename, ce_threshold = 5.86)
}
}
}
|
/functions/old/geneticsAS_s1_plot.r
|
no_license
|
YalanBi/AA
|
R
| false | false | 7,341 |
r
|
#
# Functions for analysing A. Thaliana Tiling Arrays
# last modified: 05-06-2013
# first written: 03-06-2013
# (c) 2013 GBIC Yalan Bi, Danny Arends, R.C. Jansen
#
#************************************************************* plot exp part *************************************************************#
#plot 4 env separately in 4 panel
setwd("D:/Arabidopsis Arrays")
#load environment file
menvironment <- read.table("Data/ann_env.txt", sep="\t")[,2]
#load genotype file
geno <- read.table("refined map/genotypes.txt",sep="\t", row.names=1, header=TRUE)
ce_threshold = 5.86; threshold_qtl = 8.0; threshold_int = 11.6; cutoffnProbe = 2; #cutoffratio = 0.6#(cutoffratio>=0.6; cutoffnProbe >= 2 probes)
#*************************************************************** load part ***************************************************************#
#load main/int QTL genes
load(file=paste0("Data/geneticsAS/genelist_QTL", threshold_qtl, "_Int",threshold_int, "_np", cutoffnProbe, ".Rdata"))
#direction selection
probesDir <- function(exp_data = rawexp){
if(unique(exp_data[,"strand"]) == "sense"){
direction_id <- which(exp_data[, "direction"] == "reverse")
}
if(unique(exp_data[,"strand"]) == "complement"){
direction_id <- which(exp_data[, "direction"] == "forward")
}
return(direction_id)
}
plotExpEnvSep <- function(chr, filename, rawexp, newexp, probes_dir, exonID, uniqueExon, ind_tu, nprobes, ASexon, genoProbes, markerToDraw, int, ce_threshold){
#par(mfrow = c(4, 1), pty = "m", oma = c(5, 3, 5, 0.5))
for(env in 1:4){
ind_env <- which(as.numeric(menvironment) == env)
if(env == 1) par(fig = c(0, 1, 1-0.25*env, 1.25-0.25*env), oma = c(5, 3, 5, 0.5), mar = c(0, 4, 0, 2))
else par(fig = c(0, 1, 1-0.25*env, 1.25-0.25*env), oma = c(5, 3, 5, 0.5), mar = c(0, 4, 0, 2), new = TRUE)
plot(c(0.5, nprobes + 0.5), c(min(newexp) - 0.2, max(newexp) + 0.2), xaxt = 'n', xlab = "", ylab = levels(menvironment)[env], cex.axis = 1, cex.lab = 1.5, col.lab = env, las = 1, mgp = c(2.25, 0.5, 0), tck = -0.017, t = "n")
if(env == 1){
title(main = filename, cex.main = 2.5, xlab = "Probes", mgp = c(3, 0.5, 0), cex.lab = 1.5, outer = TRUE)
title(ylab = "Expression Intensity", mgp = c(1, 0.5, 0), cex.lab = 1.5, outer = TRUE)
}
for(p in 1:nprobes){
#background for introns
if(!p %in% ind_tu) rect((p - 0.5), -3, (p + 0.5), max(newexp[ ,ind_env])* 2, col = grey(0.85), border = "transparent")
if(p %in% genoProbes){
if(length(grep(filename, genesIAS[[paste0("chr", chr)]][[env]], value=T)) > 0) points(rep(p, length(ind_env)) + runif(length(ind_env), min = -0.05, max = 0.05), newexp[p,ind_env], t = 'p', col = geno[ ,markerToDraw[1]]+4, pch = 20, cex = 0.75)
else points(rep(p, length(ind_env)) + runif(length(ind_env), min = -0.05, max = 0.05), newexp[p,ind_env], t = 'p', col = env, pch = 20, cex = 0.75)
points(p, mean(unlist(newexp[p,ind_env[geno[ind_env,markerToDraw[1]] == 1]])), pch="*", cex=2, col=5)
points(p, mean(unlist(newexp[p,ind_env[geno[ind_env,markerToDraw[1]] == 2]])), pch="*", cex=2, col=6)
if(int[p,markerToDraw[1]] >= threshold_int && env %in% grep(paste0(filename, "_", rawexp[p, "tu"]), genesIAS[[paste0("chr", chr)]], value=F)){
text(x = p, y = min(newexp)+0.15, labels = round(int[p,markerToDraw[1]], digits = 1), col = env, cex = 0.8)
}
} else if(p %in% probes_dir) points(rep(p, length(ind_env)) + runif(length(ind_env), min = -0.05, max = 0.05), newexp[p,ind_env], t = 'p', col = env, pch = 20, cex = 0.75)
}
for(exon in uniqueExon){
#ind <- judge which probe in exonID is of current exon name (T/F)
ind <- rawexp[exonID, "tu"] == exon
#cat(as.character(exon), "has probes", exonID[ind], "\n")
#if(length(exonID[ind]) >= 3) text(x = median(which(rawexp[ ,"tu"] == exon)), y = max(newexp)-0.1, labels = exon, col = "magenta4", cex = 1)
#use mean/median to test cassette
lines(c(min(which(rawexp[ ,"tu"] == exon))-0.5, max(which(rawexp[ ,"tu"] == exon))+0.5), c(mean(unlist(newexp[exonID[ind],ind_env])), mean(unlist(newexp[exonID[ind],ind_env]))), col = env, lwd = 2)
cExoninEnv <- unlist(lapply(strsplit(rownames(cematrix)[which(cematrix[ ,env] >= ce_threshold)][grepl(filename, rownames(cematrix)[which(cematrix[ ,env] >= ce_threshold)])], "_"), "[[", 2))
if(exon %in% cExoninEnv){
text(x = median(which(rawexp[ ,"tu"] == exon)), y = max(newexp)-0.25, labels=paste0("ce=", round(cematrix[paste0(filename, "_", exon),env], digits = 1)), col = env, cex = 0.8)
}
if(exon %in% ASexon){
text(x = median(which(rawexp[ ,"tu"] == exon)), y = max(newexp)-0.4, labels=paste0("m=", markerToDraw[1]), col = "magenta4", cex = 0.8)
}
}
box()
}
axis(1, at = probes_dir, labels = row.names(rawexp)[probes_dir], mgp=c(2.25, 0.5, 0), cex.axis = 1, las = 2, tck = 0.02)
}
plotcExonExp <- function(chr, filename, ce_threshold){
rawexp <- read.table(paste0("Data/chr", chr, "_norm_hf_cor/", filename, ".txt"), row.names=1, header=T)
newexp <- rawexp[ ,17:164]
int <- read.table(paste0("Data/fullModeMapping/chr", chr, "_norm_hf_cor/", filename, "_FM_Int.txt"), row.names=1, header=T)
#cat(" int loading succeed!\n")
probes_dir <- probesDir(rawexp)
#cat(filename, "\nprobeDir:", probes_dir, "\n")
exonID <- probes_dir[grepl("tu", rawexp[probes_dir,"tu"])]
#cat("exons of right direction:", exonID, "\n")
#uniqueExon <- all tu names of exon probes
uniqueExon <- unique(rawexp[exonID,"tu"])
#cat("tu names:", as.character(uniqueExon), "\n")
ind_tu <- grep("tu", rawexp[ ,"tu"])
nprobes <- nrow(rawexp)
genoProbes <- NULL
for(env in 1:4){
if(length(grep(filename, genesIAS[[paste0("chr", chr)]][[env]], value=T)) > 0 && length(genoProbes) == 0){
ASexon <- unique(unlist(lapply(strsplit(grep(filename, genesIAS[[paste0("chr", chr)]][[env]], value=T), "_"), "[[", 2)))
genoProbes <- exonID[rawexp[exonID,"tu"] == ASexon]
consSigIntMarkers <- as.numeric(unlist(lapply(strsplit(grep(filename, genesIAS[[paste0("chr", chr)]][[env]], value=T), "_"), "[[", 3)))
markerToDraw <- consSigIntMarkers[which(int[genoProbes,consSigIntMarkers] == max(int[genoProbes,consSigIntMarkers]), arr.ind=T)[,2]]
#cat("env", env, ": we are markers, at which there're >= 2 probes with sig Int,", markerToDraw, "\n")
}
}
png(filename = paste0("Data/geneticsAS/plotGeneticsAS/", filename, "_QTL", threshold_qtl, "_Int", threshold_int, "_np", cutoffnProbe, "_4s.png"), width = 960, height = 1728, bg = "white")
plotExpEnvSep(chr, filename, rawexp, newexp, probes_dir, exonID, uniqueExon, ind_tu, nprobes, ASexon, genoProbes, markerToDraw, int, ce_threshold)
dev.off()
}
for(chr in 1:5){
cematrix <- read.table(paste0("Data/cassetteExon/cassetteExon_chr", chr, "_allind.txt"), row.names=1, header=T)
#for all genes which have cassette exons in at least one env
if(length(unlist(genesIAS[[paste0("chr", chr)]])) > 0){
plotGenenames <- sort(unique(unlist(lapply(strsplit(unlist(genesIAS[[paste0("chr", chr)]]), "_"), "[[", 1))))
#filename(AT1G01010)
for(filename in plotGenenames){
plotcExonExp(chr, filename, ce_threshold = 5.86)
}
}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/UCTSUpload.R
\name{UCTSAppend}
\alias{UCTSAppend}
\title{Append a xts to an existing UCTS timeseries in Datastream}
\usage{
UCTSAppend(
tsData,
TSCode = "",
MGMTGroup = "ABC",
freq = c("D", "W", "M", "Q", "Y"),
seriesName,
Units = "",
Decimals = 2,
ActPer = c("N", "Y"),
freqConversion = c("ACT", "SUM", "AVG", "END"),
Alignment = c("1ST", "MID", "END"),
Carry = c("YES", "NO", "PAD"),
PrimeCurr = "",
overwrite = TRUE,
strUsername = ifelse(Sys.getenv("DatastreamUsername") != "",
Sys.getenv("DatastreamUsername"), options()$Datastream.Username),
strPassword = ifelse(Sys.getenv("DatastreamPassword") != "",
Sys.getenv("DatastreamPassword"), options()$Datastream.Password),
strServerName = "https://product.datastream.com",
strServerPage = "/UCTS/UCTSMaint.asp"
)
}
\arguments{
\item{tsData}{- an xts (or timeseries object that can be converted to
one) to be uploaded.}
\item{TSCode}{The mnemonic of the target UCTS}
\item{MGMTGroup}{Must have managment group. Only the first
characters will be used.}
\item{freq}{The frequency of the data to be uploaded}
\item{seriesName}{the name of the series}
\item{Units}{Units of the data - can be no more than 12 characters -
excess will be trimmed to that length}
\item{Decimals}{Number of Decimals in the data - a number between 0 and
9 - if outside that range then trimmed}
\item{ActPer}{Whether the values are percentages ("N") or actual
numbers ("Y")}
\item{freqConversion}{How to do any FX conversions}
\item{Alignment}{Alignment of the data within periods}
\item{Carry}{whether to carry data over missing dates}
\item{PrimeCurr}{the currency of the timeseries}
\item{overwrite}{if TRUE then existing data in the UCTS will be overwritten}
\item{strUsername}{your Datastream username}
\item{strPassword}{your Datastream Password}
\item{strServerName}{URL of the Datastream server}
\item{strServerPage}{page on the datastream server}
}
\value{
TRUE if the upload has been a success, otherwise an error message
}
\description{
Uploads and appends an xts into a UCTS in the Datastream Database
}
\details{
This function checks if there is a pre-existing timeseries already in Datastream.
If there is then it will append the xts onto the existing series. If there are any
overlapping dates then depending on the setting of overwrite then the new data
will overwrite the existing data in the UCTS
}
|
/man/UCTSAppend.Rd
|
no_license
|
CharlesCara/DatastreamDSWS2R
|
R
| false | true | 2,478 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/UCTSUpload.R
\name{UCTSAppend}
\alias{UCTSAppend}
\title{Append a xts to an existing UCTS timeseries in Datastream}
\usage{
UCTSAppend(
tsData,
TSCode = "",
MGMTGroup = "ABC",
freq = c("D", "W", "M", "Q", "Y"),
seriesName,
Units = "",
Decimals = 2,
ActPer = c("N", "Y"),
freqConversion = c("ACT", "SUM", "AVG", "END"),
Alignment = c("1ST", "MID", "END"),
Carry = c("YES", "NO", "PAD"),
PrimeCurr = "",
overwrite = TRUE,
strUsername = ifelse(Sys.getenv("DatastreamUsername") != "",
Sys.getenv("DatastreamUsername"), options()$Datastream.Username),
strPassword = ifelse(Sys.getenv("DatastreamPassword") != "",
Sys.getenv("DatastreamPassword"), options()$Datastream.Password),
strServerName = "https://product.datastream.com",
strServerPage = "/UCTS/UCTSMaint.asp"
)
}
\arguments{
\item{tsData}{- an xts (or timeseries object that can be converted to
one) to be uploaded.}
\item{TSCode}{The mnemonic of the target UCTS}
\item{MGMTGroup}{Must have managment group. Only the first
characters will be used.}
\item{freq}{The frequency of the data to be uploaded}
\item{seriesName}{the name of the series}
\item{Units}{Units of the data - can be no more than 12 characters -
excess will be trimmed to that length}
\item{Decimals}{Number of Decimals in the data - a number between 0 and
9 - if outside that range then trimmed}
\item{ActPer}{Whether the values are percentages ("N") or actual
numbers ("Y")}
\item{freqConversion}{How to do any FX conversions}
\item{Alignment}{Alignment of the data within periods}
\item{Carry}{whether to carry data over missing dates}
\item{PrimeCurr}{the currency of the timeseries}
\item{overwrite}{if TRUE then existing data in the UCTS will be overwritten}
\item{strUsername}{your Datastream username}
\item{strPassword}{your Datastream Password}
\item{strServerName}{URL of the Datastream server}
\item{strServerPage}{page on the datastream server}
}
\value{
TRUE if the upload has been a success, otherwise an error message
}
\description{
Uploads and appends an xts into a UCTS in the Datastream Database
}
\details{
This function checks if there is a pre-existing timeseries already in Datastream.
If there is then it will append the xts onto the existing series. If there are any
overlapping dates then depending on the setting of overwrite then the new data
will overwrite the existing data in the UCTS
}
|
reconcile_predictions <- function(use_case = 'team_games',
predictions) {
if (use_case == 'team_games') {
}
}
|
/R/reconcile_predictions.R
|
no_license
|
jimtheflash/nba.modelR
|
R
| false | false | 152 |
r
|
reconcile_predictions <- function(use_case = 'team_games',
predictions) {
if (use_case == 'team_games') {
}
}
|
\name{gtex.4k}
\alias{gtex.4k}
\docType{data}
\title{
Example data for the spqn package.
}
\description{
A random sample of 4,000 expressed genes (protein-coding or lincRNAs)
from GTEx v6p. The tissue is Adipose Subcutaneous.
}
\usage{data("gtex.4k")}
\format{An object of class \code{SummarizedExperiment}.
}
\details{
Data is 350 samples from GTEx v6p. The tissue is Adipose Subcutanous.
We first selected protein-coding or lincRNAs based on the supplied annotation
files. Next we kept genes with a median log2(RPKM) expression greater
than zero. This resulted in a data matrix with 12,267 genes of which
11,911 are protein-coding. We stored the mean expression value per gene
in \code{rowData(gtex.4k)$ave_logrpkm}.
We next mean centered and variance scaled the expression values so all
genes have zero mean and variance 1. We then removed 4 principal
components from this data matrix using the
\code{removePrincipalComponents} function from the \code{WGCNA} package.
Finally, we randomly selected 4,000 genes.
Additional information on the genes are present in the
\code{rowData}. The type of gene (lincRNA or protein-coding) is present
in the \code{gene_type} column. The average expression of each gene on
the log2(RPKM)-scale, prior to removing principal components, are
present in the \code{ave_logrpkm} column.
}
\source{
Original data from \url{gtexportal.org}. A script for downloading and
processing the paper is included in \code{scripts/gtex.Rmd}.
}
\keyword{datasets}
\examples{
data(gtex.4k)
}
|
/man/gtex.4k.Rd
|
no_license
|
hansenlab/spqnData
|
R
| false | false | 1,514 |
rd
|
\name{gtex.4k}
\alias{gtex.4k}
\docType{data}
\title{
Example data for the spqn package.
}
\description{
A random sample of 4,000 expressed genes (protein-coding or lincRNAs)
from GTEx v6p. The tissue is Adipose Subcutaneous.
}
\usage{data("gtex.4k")}
\format{An object of class \code{SummarizedExperiment}.
}
\details{
Data is 350 samples from GTEx v6p. The tissue is Adipose Subcutanous.
We first selected protein-coding or lincRNAs based on the supplied annotation
files. Next we kept genes with a median log2(RPKM) expression greater
than zero. This resulted in a data matrix with 12,267 genes of which
11,911 are protein-coding. We stored the mean expression value per gene
in \code{rowData(gtex.4k)$ave_logrpkm}.
We next mean centered and variance scaled the expression values so all
genes have zero mean and variance 1. We then removed 4 principal
components from this data matrix using the
\code{removePrincipalComponents} function from the \code{WGCNA} package.
Finally, we randomly selected 4,000 genes.
Additional information on the genes are present in the
\code{rowData}. The type of gene (lincRNA or protein-coding) is present
in the \code{gene_type} column. The average expression of each gene on
the log2(RPKM)-scale, prior to removing principal components, are
present in the \code{ave_logrpkm} column.
}
\source{
Original data from \url{gtexportal.org}. A script for downloading and
processing the paper is included in \code{scripts/gtex.Rmd}.
}
\keyword{datasets}
\examples{
data(gtex.4k)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/pphi.R
\name{pphi}
\alias{pphi}
\title{calculate the left-tail probability of phi-divergence under general correlation matrix.}
\usage{
pphi(q, M, k0, k1, s = 2, t = 30, onesided = FALSE)
}
\arguments{
\item{q}{- quantile, must be a scalar.}
\item{M}{- correlation matrix of input statistics (of the input p-values).}
\item{k0}{- search range starts from the k0th smallest p-value.}
\item{k1}{- search range ends at the k1th smallest p-value.}
\item{s}{- the phi-divergence test parameter.}
\item{t}{- numerical truncation parameter.}
\item{onesided}{- TRUE if the input p-values are one-sided.}
}
\description{
calculate the left-tail probability of phi-divergence under general correlation matrix.
}
\examples{
M = toeplitz(1/(1:10)*(-1)^(0:9)) #alternating polynomial decaying correlation matrix
pphi(q=2, M=M, k0=1, k1=5, s=2)
pphi(q=2, M=diag(10), k0=1, k1=5, s=2)
}
\references{
1. Hong Zhang, Jiashun Jin and Zheyang Wu. "Distributions and Statistical Power of Optimal Signal-Detection Methods In Finite Cases", submitted.
}
|
/man/pphi.Rd
|
no_license
|
cran/SetTest
|
R
| false | true | 1,150 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/pphi.R
\name{pphi}
\alias{pphi}
\title{calculate the left-tail probability of phi-divergence under general correlation matrix.}
\usage{
pphi(q, M, k0, k1, s = 2, t = 30, onesided = FALSE)
}
\arguments{
\item{q}{- quantile, must be a scalar.}
\item{M}{- correlation matrix of input statistics (of the input p-values).}
\item{k0}{- search range starts from the k0th smallest p-value.}
\item{k1}{- search range ends at the k1th smallest p-value.}
\item{s}{- the phi-divergence test parameter.}
\item{t}{- numerical truncation parameter.}
\item{onesided}{- TRUE if the input p-values are one-sided.}
}
\description{
calculate the left-tail probability of phi-divergence under general correlation matrix.
}
\examples{
M = toeplitz(1/(1:10)*(-1)^(0:9)) #alternating polynomial decaying correlation matrix
pphi(q=2, M=M, k0=1, k1=5, s=2)
pphi(q=2, M=diag(10), k0=1, k1=5, s=2)
}
\references{
1. Hong Zhang, Jiashun Jin and Zheyang Wu. "Distributions and Statistical Power of Optimal Signal-Detection Methods In Finite Cases", submitted.
}
|
library(dplyr)
# Returns data frame with latest nytimes per state covid data.
# Output columns include:
# - date (as a date object)
# - state (as a string)
# - cases
# - newCasesPerDay (might be NA)
loadAndFormatNytimesCovidPerState <- function() {
covidByState <- read.csv2('https://raw.githubusercontent.com/nytimes/covid-19-data/master/us-states.csv',
sep=",",
stringsAsFactors =FALSE)
covidByState$date <- as.Date(covidByState$date)
covidByState2 <- covidByState %>%
group_by(state) %>%
arrange(date, .by_group = TRUE) %>%
mutate(prevDate = lag(date), prevCases = lag(cases))
covidByState2$newCasesPerDay <- (covidByState2$cases - covidByState2$prevCases) / as.numeric(covidByState2$date - covidByState2$prevDate)
covidByState2
}
|
/covid_log_log_diff/functions.R
|
permissive
|
jhofman/covid_log_log_diff
|
R
| false | false | 814 |
r
|
library(dplyr)
# Returns data frame with latest nytimes per state covid data.
# Output columns include:
# - date (as a date object)
# - state (as a string)
# - cases
# - newCasesPerDay (might be NA)
loadAndFormatNytimesCovidPerState <- function() {
covidByState <- read.csv2('https://raw.githubusercontent.com/nytimes/covid-19-data/master/us-states.csv',
sep=",",
stringsAsFactors =FALSE)
covidByState$date <- as.Date(covidByState$date)
covidByState2 <- covidByState %>%
group_by(state) %>%
arrange(date, .by_group = TRUE) %>%
mutate(prevDate = lag(date), prevCases = lag(cases))
covidByState2$newCasesPerDay <- (covidByState2$cases - covidByState2$prevCases) / as.numeric(covidByState2$date - covidByState2$prevDate)
covidByState2
}
|
\name{NISTpascalTOinchOfWaterConvtnl}
\alias{NISTpascalTOinchOfWaterConvtnl}
\title{Convert pascal to inch of water, conventional 12}
\usage{NISTpascalTOinchOfWaterConvtnl(pascal)}
\description{\code{NISTpascalTOinchOfWaterConvtnl} converts from pascal (Pa) to inch of water, conventional (inH2O) 12 }
\arguments{
\item{pascal}{pascal (Pa) }
}
\value{inch of water, conventional (inH2O) 12 }
\source{
National Institute of Standards and Technology (NIST), 2014
NIST Guide to SI Units
B.8 Factors for Units Listed Alphabetically
\url{http://physics.nist.gov/Pubs/SP811/appenB8.html}
}
\references{
National Institute of Standards and Technology (NIST), 2014
NIST Guide to SI Units
B.8 Factors for Units Listed Alphabetically
\url{http://physics.nist.gov/Pubs/SP811/appenB8.html}
}
\author{Jose Gama}
\examples{
NISTpascalTOinchOfWaterConvtnl(10)
}
\keyword{programming}
|
/man/NISTpascalTOinchOfWaterConvtnl.Rd
|
no_license
|
cran/NISTunits
|
R
| false | false | 871 |
rd
|
\name{NISTpascalTOinchOfWaterConvtnl}
\alias{NISTpascalTOinchOfWaterConvtnl}
\title{Convert pascal to inch of water, conventional 12}
\usage{NISTpascalTOinchOfWaterConvtnl(pascal)}
\description{\code{NISTpascalTOinchOfWaterConvtnl} converts from pascal (Pa) to inch of water, conventional (inH2O) 12 }
\arguments{
\item{pascal}{pascal (Pa) }
}
\value{inch of water, conventional (inH2O) 12 }
\source{
National Institute of Standards and Technology (NIST), 2014
NIST Guide to SI Units
B.8 Factors for Units Listed Alphabetically
\url{http://physics.nist.gov/Pubs/SP811/appenB8.html}
}
\references{
National Institute of Standards and Technology (NIST), 2014
NIST Guide to SI Units
B.8 Factors for Units Listed Alphabetically
\url{http://physics.nist.gov/Pubs/SP811/appenB8.html}
}
\author{Jose Gama}
\examples{
NISTpascalTOinchOfWaterConvtnl(10)
}
\keyword{programming}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plots.R
\name{plot.rpca}
\alias{plot.rpca}
\title{Screeplot}
\usage{
\method{plot}{rpca}(x, ...)
}
\arguments{
\item{x}{Object returned by the \code{\link[rsvd]{rpca}} function.}
\item{...}{Additional arguments passed to the individual plot functions (see below).}
}
\description{
Creates a screeplot, variables and individual factor maps to
summarize the results of the \code{\link[rsvd]{rpca}} function.
}
\examples{
#
}
\seealso{
\code{\link{ggscreeplot}}, \code{\link{ggcorplot}} , \code{\link{ggindplot}}
}
|
/man/plot.rpca.Rd
|
no_license
|
cran/rsvd
|
R
| false | true | 605 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plots.R
\name{plot.rpca}
\alias{plot.rpca}
\title{Screeplot}
\usage{
\method{plot}{rpca}(x, ...)
}
\arguments{
\item{x}{Object returned by the \code{\link[rsvd]{rpca}} function.}
\item{...}{Additional arguments passed to the individual plot functions (see below).}
}
\description{
Creates a screeplot, variables and individual factor maps to
summarize the results of the \code{\link[rsvd]{rpca}} function.
}
\examples{
#
}
\seealso{
\code{\link{ggscreeplot}}, \code{\link{ggcorplot}} , \code{\link{ggindplot}}
}
|
# Download the file
if (!file.exists("getdata-projectfiles-UCI HAR Dataset.zip"))
{
download.file(url="https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip",
destfile = "getdata-projectfiles-UCI HAR Dataset.zip")
# Unzip the file
unzip("getdata-projectfiles-UCI HAR Dataset.zip")
}
# 1. Merges the training and the test sets to create one data set.
trainData <- read.table("./UCI HAR Dataset/train/X_train.txt")
trainLabel <- read.table("./UCI HAR Dataset/train/y_train.txt")
trainSubject <- read.table("./UCI HAR Dataset/train/subject_train.txt")
testData <- read.table("./UCI HAR Dataset/test/X_test.txt")
testLabel <- read.table("./UCI HAR Dataset/test/y_test.txt")
testSubject <- read.table("./UCI HAR Dataset/test/subject_test.txt")
joinedData <- rbind(trainData, testData)
joinedLabel <- rbind(trainLabel, testLabel)
joinedSubject <- rbind(trainSubject, testSubject)
mergedData <- cbind(joinedSubject, joinedLabel, joinedData)
dim(mergedData) # 10299*563
head(mergedData)
# 2. Extracts only the measurements on the mean and standard deviation for each measurement.
features <- read.table("./UCI HAR Dataset/features.txt")
dim(features) # 561*2
meansd = grep("mean\\(\\)|std\\(\\)", features$V2)
length(meansd) # 66
extractedData <- joinedData[,meansd]
dim(extractedData) # 10299*66
head(extractedData)
# 3. Uses descriptive activity names to name the activities in the data set
activity <- read.table("./UCI HAR Dataset/activity_labels.txt")
activity[, 2] <- tolower(gsub("_", " ", activity[, 2]))
activityLabel <- activity[joinedLabel[, 1], 2]
joinedLabel[, 1] <- activityLabel
names(joinedLabel) <- "Activity"
head(joinedLabel)
# 4. Appropriately labels the data set with descriptive activity names.
names(joinedSubject) <- "Subject"
colnames(extractedData) <- features$V2[meansd]
firstData <- cbind(joinedSubject, joinedLabel, extractedData)
dim(firstData) # 10299*68
head(firstData)
write.table(firstData, "first_dataset.txt") # write out the 1st dataset
# 5. Creates a second, independent tidy data set with the average of
# each variable for each activity and each subject.
library("plyr")
secondData <- ddply(firstData, c("Subject","Activity"), colwise(mean))
head(secondData)
write.table(secondData, "tidy_dataset.txt") # write out the 2nd dataset
|
/run_analysis.R
|
no_license
|
ltlow/Getting-And-Cleaning-Data
|
R
| false | false | 2,348 |
r
|
# Download the file
if (!file.exists("getdata-projectfiles-UCI HAR Dataset.zip"))
{
download.file(url="https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip",
destfile = "getdata-projectfiles-UCI HAR Dataset.zip")
# Unzip the file
unzip("getdata-projectfiles-UCI HAR Dataset.zip")
}
# 1. Merges the training and the test sets to create one data set.
trainData <- read.table("./UCI HAR Dataset/train/X_train.txt")
trainLabel <- read.table("./UCI HAR Dataset/train/y_train.txt")
trainSubject <- read.table("./UCI HAR Dataset/train/subject_train.txt")
testData <- read.table("./UCI HAR Dataset/test/X_test.txt")
testLabel <- read.table("./UCI HAR Dataset/test/y_test.txt")
testSubject <- read.table("./UCI HAR Dataset/test/subject_test.txt")
joinedData <- rbind(trainData, testData)
joinedLabel <- rbind(trainLabel, testLabel)
joinedSubject <- rbind(trainSubject, testSubject)
mergedData <- cbind(joinedSubject, joinedLabel, joinedData)
dim(mergedData) # 10299*563
head(mergedData)
# 2. Extracts only the measurements on the mean and standard deviation for each measurement.
features <- read.table("./UCI HAR Dataset/features.txt")
dim(features) # 561*2
meansd = grep("mean\\(\\)|std\\(\\)", features$V2)
length(meansd) # 66
extractedData <- joinedData[,meansd]
dim(extractedData) # 10299*66
head(extractedData)
# 3. Uses descriptive activity names to name the activities in the data set
activity <- read.table("./UCI HAR Dataset/activity_labels.txt")
activity[, 2] <- tolower(gsub("_", " ", activity[, 2]))
activityLabel <- activity[joinedLabel[, 1], 2]
joinedLabel[, 1] <- activityLabel
names(joinedLabel) <- "Activity"
head(joinedLabel)
# 4. Appropriately labels the data set with descriptive activity names.
names(joinedSubject) <- "Subject"
colnames(extractedData) <- features$V2[meansd]
firstData <- cbind(joinedSubject, joinedLabel, extractedData)
dim(firstData) # 10299*68
head(firstData)
write.table(firstData, "first_dataset.txt") # write out the 1st dataset
# 5. Creates a second, independent tidy data set with the average of
# each variable for each activity and each subject.
library("plyr")
secondData <- ddply(firstData, c("Subject","Activity"), colwise(mean))
head(secondData)
write.table(secondData, "tidy_dataset.txt") # write out the 2nd dataset
|
testlist <- list(a = -1L, b = -15007745L, x = c(-1L, NA, -1L, -15066369L, -618987776L, 517726432L, -522133280L, -522125535L, -32L, -522133280L, -522133280L, -522133280L, -522133467L, -522133280L, -1310662432L, -522133280L, -7968L, -522133280L, -522133280L, -522133280L, NA, 0L, 0L, 0L, 110L))
result <- do.call(grattan:::anyOutside,testlist)
str(result)
|
/grattan/inst/testfiles/anyOutside/libFuzzer_anyOutside/anyOutside_valgrind_files/1610128207-test.R
|
no_license
|
akhikolla/updated-only-Issues
|
R
| false | false | 357 |
r
|
testlist <- list(a = -1L, b = -15007745L, x = c(-1L, NA, -1L, -15066369L, -618987776L, 517726432L, -522133280L, -522125535L, -32L, -522133280L, -522133280L, -522133280L, -522133467L, -522133280L, -1310662432L, -522133280L, -7968L, -522133280L, -522133280L, -522133280L, NA, 0L, 0L, 0L, 110L))
result <- do.call(grattan:::anyOutside,testlist)
str(result)
|
library(lubridate)
library(dplyr)
url<- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
temp<-tempfile()
download.file(url, temp, method="curl")
data<- read.table(unz(temp, "household_power_consumption.txt"), header = T, sep = ";")
unlink(temp)
rm(temp, url)
data<- mutate(data, dates=dmy(data$Date))
data<- filter(data, dates>=as.Date('2007-02-01') & dates<=as.Date('2007-02-02'))
data$Global_active_power<- sub("?", "", data$Global_active_power)
data$Global_active_power<- as.numeric(data$Global_active_power)
## Histogram of global active power
hist(data$Global_active_power, col="red", main = "Global Active Power",
xlab = "Global Active Power (kilowatts)")
##Save it to PNG file with a width of 480 pixels and a height of 480 pixels
dev.copy(png, file="plot1.png", width=480, height=480, units="px")
dev.off()
|
/Plot1.R
|
no_license
|
Yadanaparthiharsha/Individual-household-electric-power-consumption-Data-Set-
|
R
| false | false | 868 |
r
|
library(lubridate)
library(dplyr)
url<- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
temp<-tempfile()
download.file(url, temp, method="curl")
data<- read.table(unz(temp, "household_power_consumption.txt"), header = T, sep = ";")
unlink(temp)
rm(temp, url)
data<- mutate(data, dates=dmy(data$Date))
data<- filter(data, dates>=as.Date('2007-02-01') & dates<=as.Date('2007-02-02'))
data$Global_active_power<- sub("?", "", data$Global_active_power)
data$Global_active_power<- as.numeric(data$Global_active_power)
## Histogram of global active power
hist(data$Global_active_power, col="red", main = "Global Active Power",
xlab = "Global Active Power (kilowatts)")
##Save it to PNG file with a width of 480 pixels and a height of 480 pixels
dev.copy(png, file="plot1.png", width=480, height=480, units="px")
dev.off()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/info_utilities.R
\name{res_cendist}
\alias{res_cendist}
\title{Get average distance between H3 cell centers}
\usage{
res_cendist(res = NULL, units = c("m", "km"), fast = TRUE)
}
\arguments{
\item{res}{Integer; Desired H3 resolution. See
\url{https://h3geo.org/docs/core-library/restable/} for allowable values and related dimensions.}
\item{units}{Length unit to report in, either meters or kilometers.}
\item{fast}{Logical; whether to retieve values from a locally stored table or
reclaculate from source.}
}
\value{
Numeric; H3 cell center separation distance.
}
\description{
This function returns the average distance between the center of H3 cells
at a given resolution.
}
\note{
This isn't in the core library but may be useful.
}
\examples{
# Return average H3 cell separation distance at each resolution in kilometers
res_cendist(res = seq(0, 15), units = 'km')
}
|
/man/res_cendist.Rd
|
permissive
|
dcooley/h3jsr
|
R
| false | true | 954 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/info_utilities.R
\name{res_cendist}
\alias{res_cendist}
\title{Get average distance between H3 cell centers}
\usage{
res_cendist(res = NULL, units = c("m", "km"), fast = TRUE)
}
\arguments{
\item{res}{Integer; Desired H3 resolution. See
\url{https://h3geo.org/docs/core-library/restable/} for allowable values and related dimensions.}
\item{units}{Length unit to report in, either meters or kilometers.}
\item{fast}{Logical; whether to retieve values from a locally stored table or
reclaculate from source.}
}
\value{
Numeric; H3 cell center separation distance.
}
\description{
This function returns the average distance between the center of H3 cells
at a given resolution.
}
\note{
This isn't in the core library but may be useful.
}
\examples{
# Return average H3 cell separation distance at each resolution in kilometers
res_cendist(res = seq(0, 15), units = 'km')
}
|
### R code from vignette source 'rrcov.Rnw'
###################################################
### code chunk number 1: rrcov.Rnw:471-473
###################################################
## set the prompt to "R> " and the continuation to "+ "
options(prompt="R> ", continue="+ ")
###################################################
### code chunk number 2: intro
###################################################
##
## Load the 'rrcov' package and the first two data sets to be
## used throughout the examples
##
library("rrcov")
data("delivery")
delivery.x <- delivery[,1:2] # take only the X part
data("hbk")
hbk.x <- hbk[,1:3] # take only the X part
###################################################
### code chunk number 3: intro-mcd
###################################################
##
## Compute MCD estimates for the delivery data set
## - show() and summary() examples
##
mcd <- CovMcd(delivery.x)
mcd
summary(mcd)
###################################################
### code chunk number 4: intro-plot
###################################################
##
## Example plot of the robust against classical
## distances for the delivery data set
##
plot(mcd, which="dd")
###################################################
### code chunk number 5: intro-S
###################################################
##
## Compute the S-estimates for the delivery data set
## and provide the standard and extended output
##
est <- CovSest(delivery.x, method="bisquare")
est
summary(est)
###################################################
### code chunk number 6: intro-CovRobust
###################################################
##
## Automatically select the appropriate estimator according
## to the problem size - in this example the Stahel-Donoho estimates
## will be selected.
##
est <- CovRobust(delivery.x)
est
###################################################
### code chunk number 7: rrcov.Rnw:1019-1020
###################################################
set.seed(1234)
###################################################
### code chunk number 8: CovControl
###################################################
##
## Controlling the estimation options with a control object
##
control <- CovControlSest(method="biweight")
PcaCov(hbk.x, cov.control=control)
###################################################
### code chunk number 9: CovControl-loop
###################################################
##
## Controlling the estimation options: example
## of looping through several estimators
##
cc <- list(CovControlMcd(), CovControlMest(), CovControlOgk(), CovControlSest(), CovControlSest(method="rocke"))
clist <- sapply(cc, restimate, x=delivery.x)
sapply(clist, data.class)
sapply(clist, getMeth)
###################################################
### code chunk number 10: CovRobust
###################################################
##
## Automatically select the appropriate estimator according
## to the problem size.
##
getMeth(CovRobust(matrix(rnorm(40), ncol=2))) # 20x2 - SDE
getMeth(CovRobust(matrix(rnorm(16000), ncol=8))) # 2000x8 - bisquare S
getMeth(CovRobust(matrix(rnorm(20000), ncol=10))) # 2000x10 - Rocke S
getMeth(CovRobust(matrix(rnorm(200000), ncol=2))) # 100000x2 - OGK
###################################################
### code chunk number 11: CovControl-S
###################################################
##
## Rocke-type S-estimates
##
getMeth(CovRobust(matrix(rnorm(40), ncol=2), control="rocke"))
###################################################
### code chunk number 12: CovControl-CovRobust
###################################################
##
## Specify some estimation parameters through a control object.
## The last command line illustrates the accessor method
## for getting the correlation matrix of the estimate
## as well as a nice formatting method for covariance
## matrices.
##
data("toxicity")
ctrl <- CovControlOgk(smrob = "s_mad", svrob = "qc")
est <- CovRobust(toxicity, ctrl)
round(getCenter(est),2)
as.dist(round(getCorr(est), 2))
###################################################
### code chunk number 13: ex-cov-plot-lattice
###################################################
##
## Distance plot and Chi-square Q-Q plot of the robust and classical distances
## Lattice plots (currently not available in rrcov)
##
library("lattice")
library("grid")
library("rrcov")
data("delivery")
X <- delivery[,1:2]
myPanel.dd <- function(x, y, subscripts, cutoff, ...) {
panel.xyplot(x, y, ...)
panel.abline(h=cutoff,lty="dashed")
n <- length(y)
id.n <- length(which(y>cutoff))
if(id.n > 0){
ind <- sort(y, index.return=TRUE)$ix
ind <- ind[(n-id.n+1):n]
xrange<-range(y)
adj <- (xrange[2]-xrange[1])/20
ltext(x[ind] + adj, y[ind] + adj, ind, cex=1.0)
}
}
myPanel.qchi <- function(x, y, subscripts, cutoff, ...) {
y <- sort(y, index.return=TRUE)
iy <- y$ix
y <- y$x
panel.xyplot(x, y, ...)
panel.abline(0,1,lty="dashed")
n <- length(y)
id.n <- length(which(y>cutoff))
if(id.n > 0){
ind <- (n-id.n+1):n
xrange<-range(y)
adj <- (xrange[2]-xrange[1])/50
ltext(x[ind] + adj, y[ind] + adj, iy[ind], cex=1.0)
}
}
n<-nrow(X)
p <- ncol(X)
cutoff <- sqrt(qchisq(0.975, p))
mm <- covMcd(X)
dd1 <- sqrt(mm$mah) # robust distances
vv<-cov.wt(X)
dd2 <- sqrt(mahalanobis(X,vv$center,vv$cov)) # classical distances
dd<-c(dd1,dd2) # both robust and classical distances
gr <- as.factor(c(rep(1,n), rep(2,n)))
levels(gr)[1] <- "Robust"
levels(gr)[2] <- "Classical"
qq <- sqrt(qchisq(((1:n)-1/3)/(n+1/3), p))
ind.dd <- c(1:n, 1:n)
ind.qchi <- c(qq, qq)
dplot <- xyplot(dd~ind.dd|gr,
cutoff=cutoff,
panel = myPanel.dd,
xlab="Index",
ylab="Mahalanobis distance",
main="Distance Plot",
col = "darkred",
scales=list(cex=1.0))
qplot <- xyplot(dd~ind.qchi|gr,
cutoff=cutoff,
panel = myPanel.qchi,
xlab="Quantiles of the chi-squared distribution",
ylab="Mahalanobis distance",
main="Chi-Square QQ-Plot",
col = "darkred",
scales=list(cex=1.0))
plot(dplot, split = c(1, 1, 2, 1), more = TRUE)
plot(qplot, split = c(2, 1, 2, 1), more = FALSE)
###################################################
### code chunk number 14: ex-cov-plot-ellipse
###################################################
##
## a) scatter plot of the data with robust and classical confidence ellipses.
## b) screeplot presenting the robust and classical eigenvalues
##
data("milk")
usr<-par(mfrow=c(1,2))
plot(CovMcd(delivery[,1:2]), which="tolEllipsePlot", classic=TRUE)
plot(CovMcd(milk), which="screeplot", classic=TRUE)
par(usr)
###################################################
### code chunk number 15: pca-ex-hbk
###################################################
##
## Principal Component Analysis example:
## Plot of the first two principal components of the
## Hawkins, Bradu and Kass data set: classical and robust
##
par(mfrow=c(1,2))
pca <- PcaClassic(hbk.x, k=2)
cpca <- list(center=c(0,0), cov=diag(pca@eigenvalues), n.obs=pca@n.obs)
rrcov:::.myellipse(pca@scores, xcov=cpca, main="Classical", xlab="PC1", ylab="PC2", ylim=c(-30,30), id.n=0)
abline(v=0)
abline(h=0)
text(-29,6,labels="1-10", cex=0.8)
text(-37,-13,labels="14", cex=0.8)
text(-31,-5,labels="11-13", cex=0.8)
hub <- PcaHubert(hbk.x, k=2, mcd=TRUE)
chub <- list(center=c(0,0), cov=diag(hub@eigenvalues), n.obs=hub@n.obs)
rrcov:::.myellipse(hub@scores, xcov=chub, main="Robust (MCD)", xlab="PC1", ylab="PC2", xlim=c(-10,45), ylim=c(-25,15), id.n=0)
abline(v=0)
abline(h=0)
text(30,-9,labels="1-10", cex=0.8)
text(36,-11,labels="11", cex=0.8)
text(42,-4,labels="12", cex=0.8)
text(41,-11,labels="13", cex=0.8)
text(44,-15,labels="14", cex=0.8)
###################################################
### code chunk number 16: pca-classic
###################################################
##
## Classical PCA
##
pca <- PcaClassic(~., data=hbk.x)
pca
summary(pca)
plot(pca)
getLoadings(pca)
###################################################
### code chunk number 17: pca-robust
###################################################
##
## Robust PCA
##
rpca <- PcaGrid(~., data=hbk.x)
rpca
summary(rpca)
###################################################
### code chunk number 18: ex-pca-plot-screeplot
###################################################
##
## Screeplot for classical and robust PCA of the milk data set.
##
usr <- par(mfrow=c(1,2))
screeplot(PcaClassic(milk), type="lines",
main="Screeplot: classical PCA", sub="milk data")
screeplot(PcaHubert(milk), type="lines", main="Screeplot: robust PCA",
sub="milk data")
par(usr)
###################################################
### code chunk number 19: ex-pca-plot-biplot
###################################################
##
## Robust biplot for the UN86 data
##
data("un86")
set.seed(9)
usr<-par(mfrow=c(1,2))
biplot(PcaCov(un86, corr=TRUE, cov.control=NULL),
main="Classical biplot", col=c("gray55", "red"))
biplot(PcaCov(un86, corr=TRUE), main="Robust biplot",
col=c("gray55", "red"))
par(usr)
###################################################
### code chunk number 20: ex-pca-plot-diagplot
###################################################
##
## An example of the classical and robust diagnostic
## plot for the hbk data set
##
usr<-par(mfrow=c(1,2))
plot(PcaClassic(hbk.x, k=2), sub="data set: hbk, k=2")
plot(PcaHubert(hbk.x, k=2), sub="data set: hbk, k=2")
par(usr)
###################################################
### code chunk number 21: ex-pca-plot-distplot
###################################################
##
## If k=p the orthogonal distances are not meaningful and
## simple distance plot of the score distances will be shown
##
usr<-par(mfrow=c(1,2))
plot(PcaClassic(hbk.x, k=3), sub="data set: hbk.x, k=3")
plot(PcaHubert(hbk.x, k=3), sub="data set: hbk.x, k=3")
par(usr)
###################################################
### code chunk number 22: lda-prelim
###################################################
data("diabetes")
###################################################
### code chunk number 23: lda-cloud
###################################################
library("lattice") # load the graphical library
## set different plot symbols - important for black-and-white print
sup.sym <- trellis.par.get("superpose.symbol")
sup.sym$pch <- c(1,2,3,4,5)
trellis.par.set("superpose.symbol", sup.sym)
cloud.plt <- cloud(insulin ~ glucose + sspg, groups = group, data = diabetes, auto.key=TRUE)
###################################################
### code chunk number 24: lda-cloud-fig
###################################################
print(cloud.plt)
###################################################
### code chunk number 25: lda-classic
###################################################
lda <- LdaClassic(group~glucose+insulin+sspg, data=diabetes)
lda
###################################################
### code chunk number 26: lda-classic-predict
###################################################
predict(lda)
###################################################
### code chunk number 27: lda-robust
###################################################
rlda <- Linda(group~glucose+insulin+sspg, data=diabetes)
rlda
rlda.predict <- predict(rlda)
cat("\nApparent error rate: ", round(rrcov:::.AER(rlda.predict@ct),4))
###################################################
### code chunk number 28: qda-robust
###################################################
rqda <- QdaCov(group~glucose+insulin+sspg, data=diabetes)
rqda
rqda.predict <- predict(rqda)
cat("\nApparent error rate: ", round(rrcov:::.AER(rqda.predict@ct),4))
|
/resources/app/R-Portable-Win/library/rrcov/doc/rrcov.R
|
permissive
|
tim7en/SedSatV2_electron
|
R
| false | false | 12,216 |
r
|
### R code from vignette source 'rrcov.Rnw'
###################################################
### code chunk number 1: rrcov.Rnw:471-473
###################################################
## set the prompt to "R> " and the continuation to "+ "
options(prompt="R> ", continue="+ ")
###################################################
### code chunk number 2: intro
###################################################
##
## Load the 'rrcov' package and the first two data sets to be
## used throughout the examples
##
library("rrcov")
data("delivery")
delivery.x <- delivery[,1:2] # take only the X part
data("hbk")
hbk.x <- hbk[,1:3] # take only the X part
###################################################
### code chunk number 3: intro-mcd
###################################################
##
## Compute MCD estimates for the delivery data set
## - show() and summary() examples
##
mcd <- CovMcd(delivery.x)
mcd
summary(mcd)
###################################################
### code chunk number 4: intro-plot
###################################################
##
## Example plot of the robust against classical
## distances for the delivery data set
##
plot(mcd, which="dd")
###################################################
### code chunk number 5: intro-S
###################################################
##
## Compute the S-estimates for the delivery data set
## and provide the standard and extended output
##
est <- CovSest(delivery.x, method="bisquare")
est
summary(est)
###################################################
### code chunk number 6: intro-CovRobust
###################################################
##
## Automatically select the appropriate estimator according
## to the problem size - in this example the Stahel-Donoho estimates
## will be selected.
##
est <- CovRobust(delivery.x)
est
###################################################
### code chunk number 7: rrcov.Rnw:1019-1020
###################################################
set.seed(1234)
###################################################
### code chunk number 8: CovControl
###################################################
##
## Controlling the estimation options with a control object
##
control <- CovControlSest(method="biweight")
PcaCov(hbk.x, cov.control=control)
###################################################
### code chunk number 9: CovControl-loop
###################################################
##
## Controlling the estimation options: example
## of looping through several estimators
##
cc <- list(CovControlMcd(), CovControlMest(), CovControlOgk(), CovControlSest(), CovControlSest(method="rocke"))
clist <- sapply(cc, restimate, x=delivery.x)
sapply(clist, data.class)
sapply(clist, getMeth)
###################################################
### code chunk number 10: CovRobust
###################################################
##
## Automatically select the appropriate estimator according
## to the problem size.
##
getMeth(CovRobust(matrix(rnorm(40), ncol=2))) # 20x2 - SDE
getMeth(CovRobust(matrix(rnorm(16000), ncol=8))) # 2000x8 - bisquare S
getMeth(CovRobust(matrix(rnorm(20000), ncol=10))) # 2000x10 - Rocke S
getMeth(CovRobust(matrix(rnorm(200000), ncol=2))) # 100000x2 - OGK
###################################################
### code chunk number 11: CovControl-S
###################################################
##
## Rocke-type S-estimates
##
getMeth(CovRobust(matrix(rnorm(40), ncol=2), control="rocke"))
###################################################
### code chunk number 12: CovControl-CovRobust
###################################################
##
## Specify some estimation parameters through a control object.
## The last command line illustrates the accessor method
## for getting the correlation matrix of the estimate
## as well as a nice formatting method for covariance
## matrices.
##
data("toxicity")
ctrl <- CovControlOgk(smrob = "s_mad", svrob = "qc")
est <- CovRobust(toxicity, ctrl)
round(getCenter(est),2)
as.dist(round(getCorr(est), 2))
###################################################
### code chunk number 13: ex-cov-plot-lattice
###################################################
##
## Distance plot and Chi-square Q-Q plot of the robust and classical distances
## Lattice plots (currently not available in rrcov)
##
library("lattice")
library("grid")
library("rrcov")
data("delivery")
X <- delivery[,1:2]
myPanel.dd <- function(x, y, subscripts, cutoff, ...) {
panel.xyplot(x, y, ...)
panel.abline(h=cutoff,lty="dashed")
n <- length(y)
id.n <- length(which(y>cutoff))
if(id.n > 0){
ind <- sort(y, index.return=TRUE)$ix
ind <- ind[(n-id.n+1):n]
xrange<-range(y)
adj <- (xrange[2]-xrange[1])/20
ltext(x[ind] + adj, y[ind] + adj, ind, cex=1.0)
}
}
myPanel.qchi <- function(x, y, subscripts, cutoff, ...) {
y <- sort(y, index.return=TRUE)
iy <- y$ix
y <- y$x
panel.xyplot(x, y, ...)
panel.abline(0,1,lty="dashed")
n <- length(y)
id.n <- length(which(y>cutoff))
if(id.n > 0){
ind <- (n-id.n+1):n
xrange<-range(y)
adj <- (xrange[2]-xrange[1])/50
ltext(x[ind] + adj, y[ind] + adj, iy[ind], cex=1.0)
}
}
n<-nrow(X)
p <- ncol(X)
cutoff <- sqrt(qchisq(0.975, p))
mm <- covMcd(X)
dd1 <- sqrt(mm$mah) # robust distances
vv<-cov.wt(X)
dd2 <- sqrt(mahalanobis(X,vv$center,vv$cov)) # classical distances
dd<-c(dd1,dd2) # both robust and classical distances
gr <- as.factor(c(rep(1,n), rep(2,n)))
levels(gr)[1] <- "Robust"
levels(gr)[2] <- "Classical"
qq <- sqrt(qchisq(((1:n)-1/3)/(n+1/3), p))
ind.dd <- c(1:n, 1:n)
ind.qchi <- c(qq, qq)
dplot <- xyplot(dd~ind.dd|gr,
cutoff=cutoff,
panel = myPanel.dd,
xlab="Index",
ylab="Mahalanobis distance",
main="Distance Plot",
col = "darkred",
scales=list(cex=1.0))
qplot <- xyplot(dd~ind.qchi|gr,
cutoff=cutoff,
panel = myPanel.qchi,
xlab="Quantiles of the chi-squared distribution",
ylab="Mahalanobis distance",
main="Chi-Square QQ-Plot",
col = "darkred",
scales=list(cex=1.0))
plot(dplot, split = c(1, 1, 2, 1), more = TRUE)
plot(qplot, split = c(2, 1, 2, 1), more = FALSE)
###################################################
### code chunk number 14: ex-cov-plot-ellipse
###################################################
##
## a) scatter plot of the data with robust and classical confidence ellipses.
## b) screeplot presenting the robust and classical eigenvalues
##
data("milk")
usr<-par(mfrow=c(1,2))
plot(CovMcd(delivery[,1:2]), which="tolEllipsePlot", classic=TRUE)
plot(CovMcd(milk), which="screeplot", classic=TRUE)
par(usr)
###################################################
### code chunk number 15: pca-ex-hbk
###################################################
##
## Principal Component Analysis example:
## Plot of the first two principal components of the
## Hawkins, Bradu and Kass data set: classical and robust
##
par(mfrow=c(1,2))
pca <- PcaClassic(hbk.x, k=2)
cpca <- list(center=c(0,0), cov=diag(pca@eigenvalues), n.obs=pca@n.obs)
rrcov:::.myellipse(pca@scores, xcov=cpca, main="Classical", xlab="PC1", ylab="PC2", ylim=c(-30,30), id.n=0)
abline(v=0)
abline(h=0)
text(-29,6,labels="1-10", cex=0.8)
text(-37,-13,labels="14", cex=0.8)
text(-31,-5,labels="11-13", cex=0.8)
hub <- PcaHubert(hbk.x, k=2, mcd=TRUE)
chub <- list(center=c(0,0), cov=diag(hub@eigenvalues), n.obs=hub@n.obs)
rrcov:::.myellipse(hub@scores, xcov=chub, main="Robust (MCD)", xlab="PC1", ylab="PC2", xlim=c(-10,45), ylim=c(-25,15), id.n=0)
abline(v=0)
abline(h=0)
text(30,-9,labels="1-10", cex=0.8)
text(36,-11,labels="11", cex=0.8)
text(42,-4,labels="12", cex=0.8)
text(41,-11,labels="13", cex=0.8)
text(44,-15,labels="14", cex=0.8)
###################################################
### code chunk number 16: pca-classic
###################################################
##
## Classical PCA
##
pca <- PcaClassic(~., data=hbk.x)
pca
summary(pca)
plot(pca)
getLoadings(pca)
###################################################
### code chunk number 17: pca-robust
###################################################
##
## Robust PCA
##
rpca <- PcaGrid(~., data=hbk.x)
rpca
summary(rpca)
###################################################
### code chunk number 18: ex-pca-plot-screeplot
###################################################
##
## Screeplot for classical and robust PCA of the milk data set.
##
usr <- par(mfrow=c(1,2))
screeplot(PcaClassic(milk), type="lines",
main="Screeplot: classical PCA", sub="milk data")
screeplot(PcaHubert(milk), type="lines", main="Screeplot: robust PCA",
sub="milk data")
par(usr)
###################################################
### code chunk number 19: ex-pca-plot-biplot
###################################################
##
## Robust biplot for the UN86 data
##
data("un86")
set.seed(9)
usr<-par(mfrow=c(1,2))
biplot(PcaCov(un86, corr=TRUE, cov.control=NULL),
main="Classical biplot", col=c("gray55", "red"))
biplot(PcaCov(un86, corr=TRUE), main="Robust biplot",
col=c("gray55", "red"))
par(usr)
###################################################
### code chunk number 20: ex-pca-plot-diagplot
###################################################
##
## An example of the classical and robust diagnostic
## plot for the hbk data set
##
usr<-par(mfrow=c(1,2))
plot(PcaClassic(hbk.x, k=2), sub="data set: hbk, k=2")
plot(PcaHubert(hbk.x, k=2), sub="data set: hbk, k=2")
par(usr)
###################################################
### code chunk number 21: ex-pca-plot-distplot
###################################################
##
## If k=p the orthogonal distances are not meaningful and
## simple distance plot of the score distances will be shown
##
usr<-par(mfrow=c(1,2))
plot(PcaClassic(hbk.x, k=3), sub="data set: hbk.x, k=3")
plot(PcaHubert(hbk.x, k=3), sub="data set: hbk.x, k=3")
par(usr)
###################################################
### code chunk number 22: lda-prelim
###################################################
data("diabetes")
###################################################
### code chunk number 23: lda-cloud
###################################################
library("lattice") # load the graphical library
## set different plot symbols - important for black-and-white print
sup.sym <- trellis.par.get("superpose.symbol")
sup.sym$pch <- c(1,2,3,4,5)
trellis.par.set("superpose.symbol", sup.sym)
cloud.plt <- cloud(insulin ~ glucose + sspg, groups = group, data = diabetes, auto.key=TRUE)
###################################################
### code chunk number 24: lda-cloud-fig
###################################################
print(cloud.plt)
###################################################
### code chunk number 25: lda-classic
###################################################
lda <- LdaClassic(group~glucose+insulin+sspg, data=diabetes)
lda
###################################################
### code chunk number 26: lda-classic-predict
###################################################
predict(lda)
###################################################
### code chunk number 27: lda-robust
###################################################
rlda <- Linda(group~glucose+insulin+sspg, data=diabetes)
rlda
rlda.predict <- predict(rlda)
cat("\nApparent error rate: ", round(rrcov:::.AER(rlda.predict@ct),4))
###################################################
### code chunk number 28: qda-robust
###################################################
rqda <- QdaCov(group~glucose+insulin+sspg, data=diabetes)
rqda
rqda.predict <- predict(rqda)
cat("\nApparent error rate: ", round(rrcov:::.AER(rqda.predict@ct),4))
|
context("People data")
sample_person <- "peter-dinklage"
multi_person <- c("peter-dinklage", "maisie-williams")
test_that("trakt.people.summary returns data.frame", {
expect_is(trakt.people.summary(target = sample_person, extended = "min"), "data.frame")
expect_is(trakt.people.summary(target = sample_person, extended = "full"), "data.frame")
expect_is(trakt.people.summary(target = multi_person, extended = "min"), "data.frame")
})
test_that("trakt.people.movies returns data.frame", {
people.movies.min <- trakt.people.movies(target = sample_person, extended = "min")
expect_is(people.movies.min, "list")
expect_is(people.movies.min$cast, "data.frame")
people.movies.full <- trakt.people.movies(target = sample_person, extended = "full")
expect_is(people.movies.full, "list")
expect_is(people.movies.full$cast, "data.frame")
})
test_that("trakt.people.shows returns data.frame", {
people.shows.min <- trakt.people.shows(target = sample_person, extended = "min")
expect_is(people.shows.min, "list")
expect_is(people.shows.min$cast, "data.frame")
people.shows.full <- trakt.people.shows(target = sample_person, extended = "full")
expect_is(people.shows.full, "list")
expect_is(people.shows.full$cast, "data.frame")
})
|
/tests/testthat/test_people-data.R
|
no_license
|
cran/tRakt
|
R
| false | false | 1,277 |
r
|
context("People data")
sample_person <- "peter-dinklage"
multi_person <- c("peter-dinklage", "maisie-williams")
test_that("trakt.people.summary returns data.frame", {
expect_is(trakt.people.summary(target = sample_person, extended = "min"), "data.frame")
expect_is(trakt.people.summary(target = sample_person, extended = "full"), "data.frame")
expect_is(trakt.people.summary(target = multi_person, extended = "min"), "data.frame")
})
test_that("trakt.people.movies returns data.frame", {
people.movies.min <- trakt.people.movies(target = sample_person, extended = "min")
expect_is(people.movies.min, "list")
expect_is(people.movies.min$cast, "data.frame")
people.movies.full <- trakt.people.movies(target = sample_person, extended = "full")
expect_is(people.movies.full, "list")
expect_is(people.movies.full$cast, "data.frame")
})
test_that("trakt.people.shows returns data.frame", {
people.shows.min <- trakt.people.shows(target = sample_person, extended = "min")
expect_is(people.shows.min, "list")
expect_is(people.shows.min$cast, "data.frame")
people.shows.full <- trakt.people.shows(target = sample_person, extended = "full")
expect_is(people.shows.full, "list")
expect_is(people.shows.full$cast, "data.frame")
})
|
#Error calculation for Model 1 across all plot sizes
#Load Model 1 predicted dbh distributions
load(file="PredictedDbhDistbn_Mod1.RData")
#Load observed dbh distributions
load(file="ObservedDbhDistbn.RData")
#Load observed Dominant height and age data
load(file="Dominant Height and Age Dataset_Model 1.Rdata")
#========== PLOT SIZE 0.05-ACRE =========================================================================
#Combine predicted & observed distribution data
#Make dbhClass & MeasmtObs factor variables
standTableAllPlots_05$dbhClass<-as.factor(standTableAllPlots_05$dbhClass)
standTableAllPlots_05$MeasmtObs<-as.factor(standTableAllPlots_05$MeasmtObs)
#Merge the datasets
Plot05<-merge(Obs05,standTableAllPlots_05,all=T)
Plot05$plotSize<-NULL
#Convert NA TPAs to zero
Plot05$tpaObs<-ifelse(is.na(Plot05$tpaObs),0,Plot05$tpaObs)
Plot05$tpaPredMom<-ifelse(is.na(Plot05$tpaPredMom),0,Plot05$tpaPredMom)
Plot05$tpaPredPct<-ifelse(is.na(Plot05$tpaPredPct),0,Plot05$tpaPredPct)
Plot05$tpaPredHyb<-ifelse(is.na(Plot05$tpaPredHyb),0,Plot05$tpaPredHyb)
Plot05Final<-subset(Plot05,!(tpaObs==0&tpaPredMom==0&tpaPredPct==0&tpaPredHyb==0))
#========== PLOT SIZE 0.10-ACRE =========================================================================
#Combine predicted & observed distribution data
#Make dbhClass & MeasmtObs factor variables
standTableAllPlots_10$dbhClass<-as.factor(standTableAllPlots_10$dbhClass)
standTableAllPlots_10$MeasmtObs<-as.factor(standTableAllPlots_10$MeasmtObs)
#Merge the datasets
Plot10<-merge(Obs10,standTableAllPlots_10,all=T)
Plot10$plotSize<-NULL
#Convert NA TPAs to zero
Plot10$tpaObs<-ifelse(is.na(Plot10$tpaObs),0,Plot10$tpaObs)
Plot10$tpaPredMom<-ifelse(is.na(Plot10$tpaPredMom),0,Plot10$tpaPredMom)
Plot10$tpaPredPct<-ifelse(is.na(Plot10$tpaPredPct),0,Plot10$tpaPredPct)
Plot10$tpaPredHyb<-ifelse(is.na(Plot10$tpaPredHyb),0,Plot10$tpaPredHyb)
Plot10Final<-subset(Plot10,!(tpaObs==0&tpaPredMom==0&tpaPredPct==0&tpaPredHyb==0))
#========== PLOT SIZE 0.15-ACRE =========================================================================
#Combine predicted & observed distribution data
#Make dbhClass & MeasmtObs factor variables
standTableAllPlots_15$dbhClass<-as.factor(standTableAllPlots_15$dbhClass)
standTableAllPlots_15$MeasmtObs<-as.factor(standTableAllPlots_15$MeasmtObs)
#Merge the datasets
Plot15<-merge(Obs15,standTableAllPlots_10,all=T)
Plot15$plotSize<-NULL
#Convert NA TPAs to zero
Plot15$tpaObs<-ifelse(is.na(Plot15$tpaObs),0,Plot15$tpaObs)
Plot15$tpaPredMom<-ifelse(is.na(Plot15$tpaPredMom),0,Plot15$tpaPredMom)
Plot15$tpaPredPct<-ifelse(is.na(Plot15$tpaPredPct),0,Plot15$tpaPredPct)
Plot15$tpaPredHyb<-ifelse(is.na(Plot15$tpaPredHyb),0,Plot15$tpaPredHyb)
Plot15Final<-subset(Plot15,!(tpaObs==0&tpaPredMom==0&tpaPredPct==0&tpaPredHyb==0))
#========== PLOT SIZE 0.20-ACRE =========================================================================
#Combine predicted & observed distribution data
#Make dbhClass & MeasmtObs factor variables
standTableAllPlots_20$dbhClass<-as.factor(standTableAllPlots_20$dbhClass)
standTableAllPlots_20$MeasmtObs<-as.factor(standTableAllPlots_20$MeasmtObs)
#Merge the datasets
Plot20<-merge(Obs20,standTableAllPlots_20,all=T)
Plot20$plotSize<-NULL
#Convert NA TPAs to zero
Plot20$tpaObs<-ifelse(is.na(Plot20$tpaObs),0,Plot20$tpaObs)
Plot20$tpaPredMom<-ifelse(is.na(Plot20$tpaPredMom),0,Plot20$tpaPredMom)
Plot20$tpaPredPct<-ifelse(is.na(Plot20$tpaPredPct),0,Plot20$tpaPredPct)
Plot20$tpaPredHyb<-ifelse(is.na(Plot20$tpaPredHyb),0,Plot20$tpaPredHyb)
Plot20Final<-subset(Plot20,!(tpaObs==0&tpaPredMom==0&tpaPredPct==0&tpaPredHyb==0))
#========== PLOT SIZE 0.25-ACRE =========================================================================
#Combine predicted & observed distribution data
#Make dbhClass & MeasmtObs factor variables
standTableAllPlots_25$dbhClass<-as.factor(standTableAllPlots_25$dbhClass)
standTableAllPlots_25$MeasmtObs<-as.factor(standTableAllPlots_25$MeasmtObs)
#Merge the datasets
Plot25<-merge(Obs25,standTableAllPlots_25,all=T)
Plot25$plotSize<-NULL
#Convert NA TPAs to zero
Plot25$tpaObs<-ifelse(is.na(Plot25$tpaObs),0,Plot25$tpaObs)
Plot25$tpaPredMom<-ifelse(is.na(Plot25$tpaPredMom),0,Plot25$tpaPredMom)
Plot25$tpaPredPct<-ifelse(is.na(Plot25$tpaPredPct),0,Plot25$tpaPredPct)
Plot25$tpaPredHyb<-ifelse(is.na(Plot25$tpaPredHyb),0,Plot25$tpaPredHyb)
Plot25Final<-subset(Plot25,!(tpaObs==0&tpaPredMom==0&tpaPredPct==0&tpaPredHyb==0))
#==============================================
#Merging Error tables with Dominant height and Age dataset
FinalErr_05<-merge(Plot05Final, HtDat_05)
FinalErr_10<-merge(Plot10Final, HtDat_10)
FinalErr_15<-merge(Plot15Final, HtDat_15)
FinalErr_20<-merge(Plot20Final, HtDat_20)
FinalErr_25<-merge(Plot25Final, HtDat_25)
#Saving final tables into RData object
save(FinalErr_05, FinalErr_10, FinalErr_15, FinalErr_20, FinalErr_25, file="Error Tables_Model 1.RData")
|
/Prediction Error Calculation Mod 1.R
|
no_license
|
Fedzzy/Diameter-Class-Growth-and-Yield---Thesis
|
R
| false | false | 5,027 |
r
|
#Error calculation for Model 1 across all plot sizes
#Load Model 1 predicted dbh distributions
load(file="PredictedDbhDistbn_Mod1.RData")
#Load observed dbh distributions
load(file="ObservedDbhDistbn.RData")
#Load observed Dominant height and age data
load(file="Dominant Height and Age Dataset_Model 1.Rdata")
#========== PLOT SIZE 0.05-ACRE =========================================================================
#Combine predicted & observed distribution data
#Make dbhClass & MeasmtObs factor variables
standTableAllPlots_05$dbhClass<-as.factor(standTableAllPlots_05$dbhClass)
standTableAllPlots_05$MeasmtObs<-as.factor(standTableAllPlots_05$MeasmtObs)
#Merge the datasets
Plot05<-merge(Obs05,standTableAllPlots_05,all=T)
Plot05$plotSize<-NULL
#Convert NA TPAs to zero
Plot05$tpaObs<-ifelse(is.na(Plot05$tpaObs),0,Plot05$tpaObs)
Plot05$tpaPredMom<-ifelse(is.na(Plot05$tpaPredMom),0,Plot05$tpaPredMom)
Plot05$tpaPredPct<-ifelse(is.na(Plot05$tpaPredPct),0,Plot05$tpaPredPct)
Plot05$tpaPredHyb<-ifelse(is.na(Plot05$tpaPredHyb),0,Plot05$tpaPredHyb)
Plot05Final<-subset(Plot05,!(tpaObs==0&tpaPredMom==0&tpaPredPct==0&tpaPredHyb==0))
#========== PLOT SIZE 0.10-ACRE =========================================================================
#Combine predicted & observed distribution data
#Make dbhClass & MeasmtObs factor variables
standTableAllPlots_10$dbhClass<-as.factor(standTableAllPlots_10$dbhClass)
standTableAllPlots_10$MeasmtObs<-as.factor(standTableAllPlots_10$MeasmtObs)
#Merge the datasets
Plot10<-merge(Obs10,standTableAllPlots_10,all=T)
Plot10$plotSize<-NULL
#Convert NA TPAs to zero
Plot10$tpaObs<-ifelse(is.na(Plot10$tpaObs),0,Plot10$tpaObs)
Plot10$tpaPredMom<-ifelse(is.na(Plot10$tpaPredMom),0,Plot10$tpaPredMom)
Plot10$tpaPredPct<-ifelse(is.na(Plot10$tpaPredPct),0,Plot10$tpaPredPct)
Plot10$tpaPredHyb<-ifelse(is.na(Plot10$tpaPredHyb),0,Plot10$tpaPredHyb)
Plot10Final<-subset(Plot10,!(tpaObs==0&tpaPredMom==0&tpaPredPct==0&tpaPredHyb==0))
#========== PLOT SIZE 0.15-ACRE =========================================================================
#Combine predicted & observed distribution data
#Make dbhClass & MeasmtObs factor variables
standTableAllPlots_15$dbhClass<-as.factor(standTableAllPlots_15$dbhClass)
standTableAllPlots_15$MeasmtObs<-as.factor(standTableAllPlots_15$MeasmtObs)
#Merge the datasets
Plot15<-merge(Obs15,standTableAllPlots_10,all=T)
Plot15$plotSize<-NULL
#Convert NA TPAs to zero
Plot15$tpaObs<-ifelse(is.na(Plot15$tpaObs),0,Plot15$tpaObs)
Plot15$tpaPredMom<-ifelse(is.na(Plot15$tpaPredMom),0,Plot15$tpaPredMom)
Plot15$tpaPredPct<-ifelse(is.na(Plot15$tpaPredPct),0,Plot15$tpaPredPct)
Plot15$tpaPredHyb<-ifelse(is.na(Plot15$tpaPredHyb),0,Plot15$tpaPredHyb)
Plot15Final<-subset(Plot15,!(tpaObs==0&tpaPredMom==0&tpaPredPct==0&tpaPredHyb==0))
#========== PLOT SIZE 0.20-ACRE =========================================================================
#Combine predicted & observed distribution data
#Make dbhClass & MeasmtObs factor variables
standTableAllPlots_20$dbhClass<-as.factor(standTableAllPlots_20$dbhClass)
standTableAllPlots_20$MeasmtObs<-as.factor(standTableAllPlots_20$MeasmtObs)
#Merge the datasets
Plot20<-merge(Obs20,standTableAllPlots_20,all=T)
Plot20$plotSize<-NULL
#Convert NA TPAs to zero
Plot20$tpaObs<-ifelse(is.na(Plot20$tpaObs),0,Plot20$tpaObs)
Plot20$tpaPredMom<-ifelse(is.na(Plot20$tpaPredMom),0,Plot20$tpaPredMom)
Plot20$tpaPredPct<-ifelse(is.na(Plot20$tpaPredPct),0,Plot20$tpaPredPct)
Plot20$tpaPredHyb<-ifelse(is.na(Plot20$tpaPredHyb),0,Plot20$tpaPredHyb)
Plot20Final<-subset(Plot20,!(tpaObs==0&tpaPredMom==0&tpaPredPct==0&tpaPredHyb==0))
#========== PLOT SIZE 0.25-ACRE =========================================================================
#Combine predicted & observed distribution data
#Make dbhClass & MeasmtObs factor variables
standTableAllPlots_25$dbhClass<-as.factor(standTableAllPlots_25$dbhClass)
standTableAllPlots_25$MeasmtObs<-as.factor(standTableAllPlots_25$MeasmtObs)
#Merge the datasets
Plot25<-merge(Obs25,standTableAllPlots_25,all=T)
Plot25$plotSize<-NULL
#Convert NA TPAs to zero
Plot25$tpaObs<-ifelse(is.na(Plot25$tpaObs),0,Plot25$tpaObs)
Plot25$tpaPredMom<-ifelse(is.na(Plot25$tpaPredMom),0,Plot25$tpaPredMom)
Plot25$tpaPredPct<-ifelse(is.na(Plot25$tpaPredPct),0,Plot25$tpaPredPct)
Plot25$tpaPredHyb<-ifelse(is.na(Plot25$tpaPredHyb),0,Plot25$tpaPredHyb)
Plot25Final<-subset(Plot25,!(tpaObs==0&tpaPredMom==0&tpaPredPct==0&tpaPredHyb==0))
#==============================================
#Merging Error tables with Dominant height and Age dataset
FinalErr_05<-merge(Plot05Final, HtDat_05)
FinalErr_10<-merge(Plot10Final, HtDat_10)
FinalErr_15<-merge(Plot15Final, HtDat_15)
FinalErr_20<-merge(Plot20Final, HtDat_20)
FinalErr_25<-merge(Plot25Final, HtDat_25)
#Saving final tables into RData object
save(FinalErr_05, FinalErr_10, FinalErr_15, FinalErr_20, FinalErr_25, file="Error Tables_Model 1.RData")
|
This function converts temperatures in celsius to fahrenheit.
Although, I would mainly use temperatures in celsius
in my data analysis, I have had instances where the
hydrolab (what I am using to collect abiotic data in the field)
accidently gets set to fahrenheit instead of celsius.
I then have had to convert those values from fahrenheit to celsius
for data consistency, so a function like this could come in handy going forward.
install.packages("tidyverse")
library(tidyverse)
library(dplyr)
project_data <- read_csv("/cloud/project/convert_temp/CopyR_project_3_data.csv")
View(project_data)
no_na <- project_data %>%
drop_na(Temp)
c_to_f <- function(temp) {
f <- ((temp*9/5) + 32)
return(f)}
c_to_f(no_na$Temp)
|
/convert_temp/Celsius_to_Fahrenheit.R
|
no_license
|
kearstinfindley/Project3
|
R
| false | false | 738 |
r
|
This function converts temperatures in celsius to fahrenheit.
Although, I would mainly use temperatures in celsius
in my data analysis, I have had instances where the
hydrolab (what I am using to collect abiotic data in the field)
accidently gets set to fahrenheit instead of celsius.
I then have had to convert those values from fahrenheit to celsius
for data consistency, so a function like this could come in handy going forward.
install.packages("tidyverse")
library(tidyverse)
library(dplyr)
project_data <- read_csv("/cloud/project/convert_temp/CopyR_project_3_data.csv")
View(project_data)
no_na <- project_data %>%
drop_na(Temp)
c_to_f <- function(temp) {
f <- ((temp*9/5) + 32)
return(f)}
c_to_f(no_na$Temp)
|
\alias{gtkTreeStoreSet}
\name{gtkTreeStoreSet}
\title{gtkTreeStoreSet}
\description{Sets the value of one or more cells in the row referenced by \code{iter}.
The variable argument list should contain integer column numbers,
each column number followed by the value to be set.
The list is terminated by a -1. For example, to set column 0 with type
\code{G_TYPE_STRING} to "Foo", you would write
\code{gtk_tree_store_set (store, iter, 0, "Foo", -1)}.}
\usage{gtkTreeStoreSet(object, iter, ...)}
\arguments{\item{\code{...}}{[\code{\link{GtkTreeIter}}] A valid \code{\link{GtkTreeIter}} for the row being modified}}
\author{Derived by RGtkGen from GTK+ documentation}
\keyword{internal}
|
/man/gtkTreeStoreSet.Rd
|
no_license
|
cran/RGtk2.10
|
R
| false | false | 687 |
rd
|
\alias{gtkTreeStoreSet}
\name{gtkTreeStoreSet}
\title{gtkTreeStoreSet}
\description{Sets the value of one or more cells in the row referenced by \code{iter}.
The variable argument list should contain integer column numbers,
each column number followed by the value to be set.
The list is terminated by a -1. For example, to set column 0 with type
\code{G_TYPE_STRING} to "Foo", you would write
\code{gtk_tree_store_set (store, iter, 0, "Foo", -1)}.}
\usage{gtkTreeStoreSet(object, iter, ...)}
\arguments{\item{\code{...}}{[\code{\link{GtkTreeIter}}] A valid \code{\link{GtkTreeIter}} for the row being modified}}
\author{Derived by RGtkGen from GTK+ documentation}
\keyword{internal}
|
plot.l1pca <- function(x, ...) {
if(!inherits(x,"l1pca"))
stop("Not an l1pca object")
if(ncol(x$scores) == 1)
stop("Need scores in at least two dimensions")
plot(x$scores[,1:2])
}
|
/R/plot.l1pca.R
|
no_license
|
cran/pcaL1
|
R
| false | false | 198 |
r
|
plot.l1pca <- function(x, ...) {
if(!inherits(x,"l1pca"))
stop("Not an l1pca object")
if(ncol(x$scores) == 1)
stop("Need scores in at least two dimensions")
plot(x$scores[,1:2])
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/internal_fluo_est.R
\name{distfromcenter}
\alias{distfromcenter}
\title{distfromcenter}
\usage{
distfromcenter(data, center)
}
\arguments{
\item{data}{Data matrix. A 2-dimensional location.}
\item{center}{Data matrix. A second 2-dimensional location.}
}
\value{
The Eucledian distance between the two locations
}
\description{
It calculates the Eucledian distance between two 2-dimensional locations.
}
\keyword{internal}
|
/man/distfromcenter.Rd
|
no_license
|
dianalow/CONFESS
|
R
| false | true | 501 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/internal_fluo_est.R
\name{distfromcenter}
\alias{distfromcenter}
\title{distfromcenter}
\usage{
distfromcenter(data, center)
}
\arguments{
\item{data}{Data matrix. A 2-dimensional location.}
\item{center}{Data matrix. A second 2-dimensional location.}
}
\value{
The Eucledian distance between the two locations
}
\description{
It calculates the Eucledian distance between two 2-dimensional locations.
}
\keyword{internal}
|
# Script that checks which images are actually embedded in any file,
# given one or several folders of rmd files and a folder of images.
#
# Images that are not found to be embedded in any file, are removed from
# the image folder (either deleted or moved to a given folder)
remove_images <-
function(img, files, delete=FALSE, img_archive=NULL) {
require(stringr)
if (!delete & is.null(img_archive)){
stop("Path to image archive is missing!\n")
}
# get a list of all the images
images <- list.files(path = img)
images_paths <- list.files(path = img,
full.names = TRUE)
# read all the files as strings
files_found <- unlist(lapply(files,
list.files,
full.names = TRUE, recursive = TRUE,
pattern= "\\.Rmd"))
docs <- lapply(files_found, readLines)
docs <- lapply(docs, paste, collapse = " ")
# find images in files
matches <- unique(unlist(lapply(docs, str_which, pattern = images)))
images_not_used <- images_paths[matches*-1]
if (!delete) {
# move the unused images
copied <- file.copy(from = images_not_used,
to = img_archive)
if (all(copied)){
lapply(images_not_used, unlink)
}
} else {
lapply(images_not_used, unlink)
}
}
|
/code/remove_images.R
|
no_license
|
oskarmwagner/datahandling
|
R
| false | false | 1,428 |
r
|
# Script that checks which images are actually embedded in any file,
# given one or several folders of rmd files and a folder of images.
#
# Images that are not found to be embedded in any file, are removed from
# the image folder (either deleted or moved to a given folder)
remove_images <-
function(img, files, delete=FALSE, img_archive=NULL) {
require(stringr)
if (!delete & is.null(img_archive)){
stop("Path to image archive is missing!\n")
}
# get a list of all the images
images <- list.files(path = img)
images_paths <- list.files(path = img,
full.names = TRUE)
# read all the files as strings
files_found <- unlist(lapply(files,
list.files,
full.names = TRUE, recursive = TRUE,
pattern= "\\.Rmd"))
docs <- lapply(files_found, readLines)
docs <- lapply(docs, paste, collapse = " ")
# find images in files
matches <- unique(unlist(lapply(docs, str_which, pattern = images)))
images_not_used <- images_paths[matches*-1]
if (!delete) {
# move the unused images
copied <- file.copy(from = images_not_used,
to = img_archive)
if (all(copied)){
lapply(images_not_used, unlink)
}
} else {
lapply(images_not_used, unlink)
}
}
|
library(readr)
library(dplyr)
# Remove duplicate Admission IDs from TrackVia in the Utilizations table
# Remove blank subscriber_id
# Read in data
util <- read.csv("utilization.csv")
# Get duplicates
util$dup <- duplicated(util$admission_id) | duplicated(util$admission_id, fromLast = TRUE)
# Subset duplicates
util_dupes <- subset(util, util$dup == "TRUE")
# Selects rows where inp6mo are NA so they can be removed
util_dupes_remove <- subset(util_dupes, complete.cases(util_dupes[,11]) != "TRUE")
# Remove util_dupes_remove from util
util_csv <- anti_join(util, util_dupes_remove)
# Remove null subscriber ID
sub_id <- subset(util, util$subscriber_id == " ")
util_csv <- anti_join(util_csv, sub_id)
# Remove util$dup column
util_csv$dup <- NULL
# Write CSV
write.csv(util_csv, "utilization.csv", row.names = F, na = "")
|
/utilization.R
|
no_license
|
CamdenCoalitionOfHealthcareProviders/database
|
R
| false | false | 831 |
r
|
library(readr)
library(dplyr)
# Remove duplicate Admission IDs from TrackVia in the Utilizations table
# Remove blank subscriber_id
# Read in data
util <- read.csv("utilization.csv")
# Get duplicates
util$dup <- duplicated(util$admission_id) | duplicated(util$admission_id, fromLast = TRUE)
# Subset duplicates
util_dupes <- subset(util, util$dup == "TRUE")
# Selects rows where inp6mo are NA so they can be removed
util_dupes_remove <- subset(util_dupes, complete.cases(util_dupes[,11]) != "TRUE")
# Remove util_dupes_remove from util
util_csv <- anti_join(util, util_dupes_remove)
# Remove null subscriber ID
sub_id <- subset(util, util$subscriber_id == " ")
util_csv <- anti_join(util_csv, sub_id)
# Remove util$dup column
util_csv$dup <- NULL
# Write CSV
write.csv(util_csv, "utilization.csv", row.names = F, na = "")
|
#' An expert rule based system using Reduction Based on Significance
#'
#' @name CREA.RBS
#' @description CREA-RBS is a rule reduction method for allocating a significance value to each rule in the system so that experts
#' may select the rules that should be considered as preferable and understand the exact degree of correlation between the different
#' rule attributes.
#'
#' @param formula a formula of the form y ~ x1 + x2 + ...
#' @param data the data frame that contains the variables specified in \code{formula}.
#'
#'@details Significance is calculated from the antecedent frequency and rule frequency parameters for each rule; if the first
#' one is above the minimal level and rule frequency is in a critical interval, its significance ratio is computed by the algorithm.
#' These critical boundaries are calculated by an incremental method and the rule space is divided according to them.
#' The significance function is defined for these intervals.
#'
#' @return A MLA object of subclass CREA-RBS
#'
#'@references Almiñana, M., Escudero, L. F., Pérez-Martín, A., Rabasa, A., & Santamaría, L. (2014). A classification rule reduction algorithm based on significance domains. Top, 22(1), 397-418.
#'
#' @examples
#' ## Load a Dataset
#' data(EGATUR)
#' ## Generate a CREA-RBS model, remmember only support discretized variables
#' CREA.RBS(GastoTotalD~pais+aloja+motivo,data=EGATUR)
#'
#'
#'
#' @importFrom magrittr "%>%"
#' @importFrom utils globalVariables
#' @import formula.tools
#' @import dplyr magrittr
#'
utils::globalVariables(c("totalRegla","total.casos"))
#' @export
CREA.RBS <- function(formula, data) {
# mf <- match.call(expand.dots = FALSE)
# m <- match(c("formula", "data"), names(mf), 0L)
# mf <- mf[c(1L, m)]
# mf$drop.unused.levels <- TRUE
# mf[[1L]] <- quote(stats::model.frame)
# mf <- eval(mf, parent.frame())
# Is discrete??
# apply(data,2,FUN=class)
## Process the formula
responsevariable <- as.character(formula.tools::lhs(formula))
dependentvariable <- as.character(formula.tools::rhs.vars(formula, data = data))
## Detect the Response Variable and Save
Y <- data[, responsevariable]
Ec <- 1 / length(unique(data[, responsevariable]))
Es <- 1 / prod(sapply(lapply(data[, dependentvariable], levels), length))
## Length of Dataset
length_data <- nrow(data)
data <- data[, c(dependentvariable, responsevariable)]
Rules <- data %>%
dplyr::group_by_at(dplyr::vars(c(dependentvariable, responsevariable))) %>%
dplyr::summarise(
"totalRegla" = n()
# ,
# Confianza=ElementosIguales/total.casos
)
Rules2 <- data %>%
dplyr::group_by_at(dplyr::vars(c(dependentvariable))) %>%
dplyr::summarise(
"total.casos" = n(),
Support = n() / length_data
)
RulesOut <- merge(x = Rules, y = Rules2, by = dependentvariable) %>%
dplyr::mutate(Confidence = totalRegla / total.casos)
Eci <- mean(RulesOut$Confidence[RulesOut$Confidence <= Ec])
Ecs <- mean(RulesOut$Confidence[RulesOut$Confidence > Ec])
Esi <- mean(RulesOut$Support[RulesOut$Support <= Es])
Ess <- mean(RulesOut$Support[RulesOut$Support > Es])
RulesOut$Region <- 0
RulesOut$Region[which(RulesOut$Confidence <= Eci & RulesOut$Support >= Ess)] <- 1
RulesOut$Region[which(RulesOut$Confidence >= Ecs & RulesOut$Support >= Ess)] <- 2
RulesOut$Region[which(RulesOut$Support <= Esi)] <- 3
rules_reg1 <- which(RulesOut$Region == 1)
rules_reg2 <- which(RulesOut$Region == 2)
rules_reg3 <- which(RulesOut$Region == 3)
rules_reg0 <- which(RulesOut$Region == 0)
### Reglas en cada región
rules_reg1_size <- length(rules_reg1)
rules_reg2_size <- length(rules_reg2)
rules_reg3_size <- length(rules_reg3)
rules_reg0_size <- length(rules_reg0)
### Reajuste de limites de Regiones
### Usar TRY error y si es == Inf cancelar o mejor no reajustar si el length en la region ==0
if(!is.infinite(max(RulesOut$Support[rules_reg3]))) Esi <- max(RulesOut$Support[rules_reg3])
if(!is.infinite(min(RulesOut$Support[c(rules_reg1, rules_reg2)]))) Ess <- min(RulesOut$Support[c(rules_reg1, rules_reg2)])
if(!is.infinite(max(RulesOut$Confidence[rules_reg1]))) Eci <- max(RulesOut$Confidence[rules_reg1])
if(!is.infinite(min(RulesOut$Confidence[rules_reg2]))) Ecs <- min(RulesOut$Confidence[rules_reg2])
## Rule Significance
RulesOut$Importance <- 0
RulesOut$Importance[rules_reg1] <- -1+RulesOut$Support[rules_reg1]* RulesOut$Confidence[rules_reg1]
## RulesOut$Importance[rules_reg1] <- - 1 - RulesOut$Support[rules_reg1] * RulesOut$Confidence[rules_reg1]
RulesOut$Importance[rules_reg2] <- RulesOut$Support[rules_reg2]* RulesOut$Confidence[rules_reg2]
## RulesOut$Importance[rules_reg2] <- 1 + RulesOut$Support[rules_reg2] * RulesOut$Confidence[rules_reg2]
RulesOut$Importance[rules_reg3] <- -2 + RulesOut$Support[rules_reg3]* RulesOut$Confidence[rules_reg3]
## RulesOut$Importance[rules_reg3] <- RulesOut$Support[rules_reg3] * RulesOut$Confidence[rules_reg3]
ACI_Num <- (rules_reg1_size +
rules_reg2_size +
rules_reg3_size) / nrow(RulesOut)
############# Region 1 + Region 3 + Region 2
ACI_Denom <- (1 - Ess) * Eci + Esi*1 + (1 - Ecs) * (1 - Ess)
aci <- ACI_Num / ACI_Denom
# Como gráficarlo
# plot(RulesOut$Confidence,RulesOut$Support,xlab = "Confidence",ylab = "Support")
# abline(v=Ecs,lty=2)
# abline(v=Eci,lty=2)
# abline(h=Esi,lty=2)
# abline(h=Ess,lty=2)
## Falta ordenar y eliminar las reglas de la Selección 0 o cargarselas del PRINT
RulesOutput <- RulesOut[-rules_reg0,]
RulesOutput <- RulesOutput[order(RulesOutput$Importance, decreasing = TRUE),]
rownames(RulesOutput) <- 1:nrow(RulesOutput)
out <- list(Subclass="CREARBS",
Model = RulesOutput,
ACI = aci,
Axis <- list(
Ess = Ess,
Esi = Esi,
Eci = Eci,
Ecs = Ecs
)
)
class(out) <- "MLA"
return(out)
}
#' @export
print.CREA_RBS <- function(x, first=100, digits = getOption("digits"), ...) {
if (!inherits(x, "CREA_RBS")) stop("Not a legitimate \"VarRank\" object")
if(nrow(x[[1]])>100){
printable_rules <- x[[1]][1:first,]
} else {
printable_rules <- x[[1]]
}
print(printable_rules, digits = digits)
}
|
/R/CREARBS.R
|
no_license
|
datascienceumh/MachineLearning
|
R
| false | false | 6,242 |
r
|
#' An expert rule based system using Reduction Based on Significance
#'
#' @name CREA.RBS
#' @description CREA-RBS is a rule reduction method for allocating a significance value to each rule in the system so that experts
#' may select the rules that should be considered as preferable and understand the exact degree of correlation between the different
#' rule attributes.
#'
#' @param formula a formula of the form y ~ x1 + x2 + ...
#' @param data the data frame that contains the variables specified in \code{formula}.
#'
#'@details Significance is calculated from the antecedent frequency and rule frequency parameters for each rule; if the first
#' one is above the minimal level and rule frequency is in a critical interval, its significance ratio is computed by the algorithm.
#' These critical boundaries are calculated by an incremental method and the rule space is divided according to them.
#' The significance function is defined for these intervals.
#'
#' @return A MLA object of subclass CREA-RBS
#'
#'@references Almiñana, M., Escudero, L. F., Pérez-Martín, A., Rabasa, A., & Santamaría, L. (2014). A classification rule reduction algorithm based on significance domains. Top, 22(1), 397-418.
#'
#' @examples
#' ## Load a Dataset
#' data(EGATUR)
#' ## Generate a CREA-RBS model, remmember only support discretized variables
#' CREA.RBS(GastoTotalD~pais+aloja+motivo,data=EGATUR)
#'
#'
#'
#' @importFrom magrittr "%>%"
#' @importFrom utils globalVariables
#' @import formula.tools
#' @import dplyr magrittr
#'
utils::globalVariables(c("totalRegla","total.casos"))
#' @export
CREA.RBS <- function(formula, data) {
# mf <- match.call(expand.dots = FALSE)
# m <- match(c("formula", "data"), names(mf), 0L)
# mf <- mf[c(1L, m)]
# mf$drop.unused.levels <- TRUE
# mf[[1L]] <- quote(stats::model.frame)
# mf <- eval(mf, parent.frame())
# Is discrete??
# apply(data,2,FUN=class)
## Process the formula
responsevariable <- as.character(formula.tools::lhs(formula))
dependentvariable <- as.character(formula.tools::rhs.vars(formula, data = data))
## Detect the Response Variable and Save
Y <- data[, responsevariable]
Ec <- 1 / length(unique(data[, responsevariable]))
Es <- 1 / prod(sapply(lapply(data[, dependentvariable], levels), length))
## Length of Dataset
length_data <- nrow(data)
data <- data[, c(dependentvariable, responsevariable)]
Rules <- data %>%
dplyr::group_by_at(dplyr::vars(c(dependentvariable, responsevariable))) %>%
dplyr::summarise(
"totalRegla" = n()
# ,
# Confianza=ElementosIguales/total.casos
)
Rules2 <- data %>%
dplyr::group_by_at(dplyr::vars(c(dependentvariable))) %>%
dplyr::summarise(
"total.casos" = n(),
Support = n() / length_data
)
RulesOut <- merge(x = Rules, y = Rules2, by = dependentvariable) %>%
dplyr::mutate(Confidence = totalRegla / total.casos)
Eci <- mean(RulesOut$Confidence[RulesOut$Confidence <= Ec])
Ecs <- mean(RulesOut$Confidence[RulesOut$Confidence > Ec])
Esi <- mean(RulesOut$Support[RulesOut$Support <= Es])
Ess <- mean(RulesOut$Support[RulesOut$Support > Es])
RulesOut$Region <- 0
RulesOut$Region[which(RulesOut$Confidence <= Eci & RulesOut$Support >= Ess)] <- 1
RulesOut$Region[which(RulesOut$Confidence >= Ecs & RulesOut$Support >= Ess)] <- 2
RulesOut$Region[which(RulesOut$Support <= Esi)] <- 3
rules_reg1 <- which(RulesOut$Region == 1)
rules_reg2 <- which(RulesOut$Region == 2)
rules_reg3 <- which(RulesOut$Region == 3)
rules_reg0 <- which(RulesOut$Region == 0)
### Reglas en cada región
rules_reg1_size <- length(rules_reg1)
rules_reg2_size <- length(rules_reg2)
rules_reg3_size <- length(rules_reg3)
rules_reg0_size <- length(rules_reg0)
### Reajuste de limites de Regiones
### Usar TRY error y si es == Inf cancelar o mejor no reajustar si el length en la region ==0
if(!is.infinite(max(RulesOut$Support[rules_reg3]))) Esi <- max(RulesOut$Support[rules_reg3])
if(!is.infinite(min(RulesOut$Support[c(rules_reg1, rules_reg2)]))) Ess <- min(RulesOut$Support[c(rules_reg1, rules_reg2)])
if(!is.infinite(max(RulesOut$Confidence[rules_reg1]))) Eci <- max(RulesOut$Confidence[rules_reg1])
if(!is.infinite(min(RulesOut$Confidence[rules_reg2]))) Ecs <- min(RulesOut$Confidence[rules_reg2])
## Rule Significance
RulesOut$Importance <- 0
RulesOut$Importance[rules_reg1] <- -1+RulesOut$Support[rules_reg1]* RulesOut$Confidence[rules_reg1]
## RulesOut$Importance[rules_reg1] <- - 1 - RulesOut$Support[rules_reg1] * RulesOut$Confidence[rules_reg1]
RulesOut$Importance[rules_reg2] <- RulesOut$Support[rules_reg2]* RulesOut$Confidence[rules_reg2]
## RulesOut$Importance[rules_reg2] <- 1 + RulesOut$Support[rules_reg2] * RulesOut$Confidence[rules_reg2]
RulesOut$Importance[rules_reg3] <- -2 + RulesOut$Support[rules_reg3]* RulesOut$Confidence[rules_reg3]
## RulesOut$Importance[rules_reg3] <- RulesOut$Support[rules_reg3] * RulesOut$Confidence[rules_reg3]
ACI_Num <- (rules_reg1_size +
rules_reg2_size +
rules_reg3_size) / nrow(RulesOut)
############# Region 1 + Region 3 + Region 2
ACI_Denom <- (1 - Ess) * Eci + Esi*1 + (1 - Ecs) * (1 - Ess)
aci <- ACI_Num / ACI_Denom
# Como gráficarlo
# plot(RulesOut$Confidence,RulesOut$Support,xlab = "Confidence",ylab = "Support")
# abline(v=Ecs,lty=2)
# abline(v=Eci,lty=2)
# abline(h=Esi,lty=2)
# abline(h=Ess,lty=2)
## Falta ordenar y eliminar las reglas de la Selección 0 o cargarselas del PRINT
RulesOutput <- RulesOut[-rules_reg0,]
RulesOutput <- RulesOutput[order(RulesOutput$Importance, decreasing = TRUE),]
rownames(RulesOutput) <- 1:nrow(RulesOutput)
out <- list(Subclass="CREARBS",
Model = RulesOutput,
ACI = aci,
Axis <- list(
Ess = Ess,
Esi = Esi,
Eci = Eci,
Ecs = Ecs
)
)
class(out) <- "MLA"
return(out)
}
#' @export
print.CREA_RBS <- function(x, first=100, digits = getOption("digits"), ...) {
if (!inherits(x, "CREA_RBS")) stop("Not a legitimate \"VarRank\" object")
if(nrow(x[[1]])>100){
printable_rules <- x[[1]][1:first,]
} else {
printable_rules <- x[[1]]
}
print(printable_rules, digits = digits)
}
|
# usage:
# R --slave --vanilla --file=ImpreciseDEASMAACCREfficienciesCLI_XMCDAv2.R --args "[inDirectory]" "[outDirectory]"
rm(list=ls())
# tell R to use the rJava package and the RXMCDA3 package
library(hitandrun)
library(rJava)
library(XMCDA3)
# cf. http://stackoverflow.com/questions/1815606/rscript-determine-path-of-the-executing-script
script.dir <- function() {
cmdArgs <- commandArgs(trailingOnly = FALSE)
needle <- "--file="
match <- grep(needle, cmdArgs)
if (length(match) > 0) {
# Rscript
return(dirname(normalizePath(sub(needle, "", cmdArgs[match]))))
} else {
# 'source'd via R console
return(dirname(normalizePath(sys.frames()[[1]]$ofile)))
}
}
if(length(commandArgs(trailingOnly=TRUE)) > 2
&& !is.na(as.numeric(commandArgs(trailingOnly=TRUE)[3]))){
set.seed(as.numeric(commandArgs(trailingOnly=TRUE)[3]))
}
# load the R files in the script's directory
script.wd <- setwd(script.dir())
source("utils.R")
source("inputsHandler.R")
source("outputsHandler.R")
# restore the working directory so that relative paths passed as
# arguments work as expected
if (!is.null(script.wd)) setwd(script.wd)
# get the in and out directories from the arguments
inDirectory <- commandArgs(trailingOnly=TRUE)[1]
outDirectory <- commandArgs(trailingOnly=TRUE)[2]
# filenames
unitsFile <- "units.xml"
inputsOutputsFile <- "inputsOutputs.xml"
performanceTableFile <- "performanceTable.xml"
maxPerformanceTableFile <- "maxPerformanceTable.xml"
weightsLinearConstraintsFile <- "weightsLinearConstraints.xml"
methodParametersFile <- "methodParameters.xml"
efficiencyDistributionFile <- "efficiencyDistribution.xml"
maxEfficiencyFile <- "maxEfficiency.xml"
minEfficiencyFile <- "minEfficiency.xml"
avgEfficiencyFile <- "avgEfficiency.xml"
messagesFile <- "messages.xml"
# the Java xmcda object for the output messages
xmcdaMessages<-.jnew("org/xmcda/XMCDA")
xmcdaDatav2 <- .jnew("org/xmcda/v2/XMCDA")
xmcdaData <- .jnew("org/xmcda/XMCDA")
loadXMCDAv2(xmcdaDatav2, inDirectory, unitsFile, mandatory = TRUE, xmcdaMessages,"alternatives")
loadXMCDAv2(xmcdaDatav2, inDirectory, inputsOutputsFile, mandatory = TRUE, xmcdaMessages,"criteria")
loadXMCDAv2(xmcdaDatav2, inDirectory, performanceTableFile, mandatory = TRUE, xmcdaMessages,"performanceTable")
loadXMCDAv2(xmcdaDatav2, inDirectory, maxPerformanceTableFile, mandatory = FALSE, xmcdaMessages,"performanceTable")
loadXMCDAv2(xmcdaDatav2, inDirectory, weightsLinearConstraintsFile, mandatory = FALSE, xmcdaMessages,"criteriaLinearConstraints")
loadXMCDAv2(xmcdaDatav2, inDirectory, methodParametersFile, mandatory = FALSE, xmcdaMessages,"methodParameters")
# if we have problem with the inputs, it is time to stop
if (xmcdaMessages$programExecutionResultsList$size() > 0){
if (xmcdaMessages$programExecutionResultsList$get(as.integer(0))$isError()){
writeXMCDAv2(xmcdaMessages, paste(outDirectory, messagesFile, sep="/"))
stop(paste("An error has occured while loading the input files. For further details, see ", messagesFile, sep=""))
}
}
# convert that to XMCDA v3
converter<-.jnew("org/xmcda/converters/v2_v3/XMCDAConverter")
xmcdaData <- handleException(
function() return(
converter$convertTo_v3(xmcdaDatav2)
),
xmcdaMessages,
humanMessage = "Could not convert inputs to XMCDA v3, reason: "
)
xmcdaData <- convertConstraints(xmcdaDatav2, xmcdaData)
if (xmcdaMessages$programExecutionResultsList$size() > 0){
if (xmcdaMessages$programExecutionResultsList$get(as.integer(0))$isError()){
writeXMCDAv2(xmcdaMessages, paste(outDirectory, messagesFile, sep="/"))
stop(paste("An error has occured while converting the inputs to XMCDA v3. For further details, see ", messagesFile, sep=""))
}
}
# let's check the inputs and convert them into our own structures
inputs<-checkAndExtractInputs(xmcdaData, programExecutionResult)
if (xmcdaMessages$programExecutionResultsList$size()>0){
if (xmcdaMessages$programExecutionResultsList$get(as.integer(0))$isError()){
writeXMCDAv2(xmcdaMessages, paste(outDirectory, messagesFile, sep="/"))
stop(paste("An error has occured while checking and extracting the inputs. For further details, see ", messagesFile, sep=""))
}
}
# here we know that everything was loaded as expected
# now let's call the calculation method
source("ImpreciseEfficiencySMAA.R")
results <- handleException(
function() return(
calculateEfficiencyIntervals(inputs, inputs$samplesNo, inputs$intervalsNo)
),
xmcdaMessages,
humanMessage = "The calculation could not be performed, reason: "
)
if (is.null(results)){
writeXMCDAv2(xmcdaMessages, paste(outDirectory, messagesFile, sep="/"))
stop("Calculation failed.")
}
# fine, now let's put the results into XMCDA structures
xResults = convert(results, xmcdaData$alternatives, xmcdaMessages)
if (is.null(xResults)){
writeXMCDAv2(xmcdaMessages, paste(outDirectory, messagesFile, sep="/"))
stop("Could not convert ImpreciseDEASMAACCREfficiencies results into XMCDA")
}
# and last, convert them to XMCDAv2 and write them onto the disk
for (i in 1:length(xResults)){
outputFilename = paste(outDirectory, paste(names(xResults)[i],".xml",sep=""), sep="/")
# convert current xResults to v2
results_v2 <- .jnew("org/xmcda/v2/XMCDA")
results_v2 <- handleException(
function() return(
converter$convertTo_v2(xResults[[i]])
),
xmcdaMessages,
humanMessage = paste("Could not convert ", names(xResults)[i], " into XMCDA_v2, reason: ", sep ="")
)
if (is.null(results_v2)){
writeXMCDAv2(xmcdaMessages, paste(outDirectory, messagesFile, sep="/"))
stop(paste("Could not convert ", names(xResults)[i], " into XMCDA_v2", sep =""))
}
# now write the converted result to the file
parser2<-.jnew("org/xmcda/parsers/xml/xmcda_v2/XMCDAParser")
tmp <- handleException(
function() return(
parser2$writeXMCDA(results_v2, outputFilename, .jarray(xmcda_v2_tag(names(xResults)[i])))
),
xmcdaMessages,
humanMessage = paste("Error while writing ", outputFilename, " reason: ", sep="")
)
if (xmcdaMessages$programExecutionResultsList$size()>0){
if (xmcdaMessages$programExecutionResultsList$get(as.integer(0))$isError()){
writeXMCDAv2(xmcdaMessages, paste(outDirectory, messagesFile, sep="/"))
stop(paste("Error while writing ", outputFilename, sep=""))
}
}
}
# then the messages file
# TODO faire autrement qu'en ajoutant une info vide pour faire un <status>ok</status>
tmp <- handleException(
function() return(
putProgramExecutionResult(xmcdaMessages, infos="OK")
),
xmcdaMessages
)
if (is.null(tmp)){
writeXMCDAv2(xmcdaMessages, paste(outDirectory, messagesFile, sep="/"))
stop("Could not add methodExecutionResult to tree.")
}
tmp <- handleException(
function() return(
writeXMCDAv2(xmcdaMessages, paste(outDirectory, messagesFile, sep="/"))
),
xmcdaMessages
)
if (xmcdaMessages$programExecutionResultsList$size()>0){
if (xmcdaMessages$programExecutionResultsList$get(as.integer(0))$isError()){
writeXMCDAv2(xmcdaMessages, paste(outDirectory, messagesFile, sep="/"))
stop("Error while writing messages file.")
}
}
|
/ImpreciseDEACCR/ImpreciseDEASMAACCREfficiencies/src/ImpreciseDEASMAACCREfficienciesCLI_XMCDAv2.R
|
no_license
|
alabijak/diviz_DEA
|
R
| false | false | 7,175 |
r
|
# usage:
# R --slave --vanilla --file=ImpreciseDEASMAACCREfficienciesCLI_XMCDAv2.R --args "[inDirectory]" "[outDirectory]"
rm(list=ls())
# tell R to use the rJava package and the RXMCDA3 package
library(hitandrun)
library(rJava)
library(XMCDA3)
# cf. http://stackoverflow.com/questions/1815606/rscript-determine-path-of-the-executing-script
script.dir <- function() {
cmdArgs <- commandArgs(trailingOnly = FALSE)
needle <- "--file="
match <- grep(needle, cmdArgs)
if (length(match) > 0) {
# Rscript
return(dirname(normalizePath(sub(needle, "", cmdArgs[match]))))
} else {
# 'source'd via R console
return(dirname(normalizePath(sys.frames()[[1]]$ofile)))
}
}
if(length(commandArgs(trailingOnly=TRUE)) > 2
&& !is.na(as.numeric(commandArgs(trailingOnly=TRUE)[3]))){
set.seed(as.numeric(commandArgs(trailingOnly=TRUE)[3]))
}
# load the R files in the script's directory
script.wd <- setwd(script.dir())
source("utils.R")
source("inputsHandler.R")
source("outputsHandler.R")
# restore the working directory so that relative paths passed as
# arguments work as expected
if (!is.null(script.wd)) setwd(script.wd)
# get the in and out directories from the arguments
inDirectory <- commandArgs(trailingOnly=TRUE)[1]
outDirectory <- commandArgs(trailingOnly=TRUE)[2]
# filenames
unitsFile <- "units.xml"
inputsOutputsFile <- "inputsOutputs.xml"
performanceTableFile <- "performanceTable.xml"
maxPerformanceTableFile <- "maxPerformanceTable.xml"
weightsLinearConstraintsFile <- "weightsLinearConstraints.xml"
methodParametersFile <- "methodParameters.xml"
efficiencyDistributionFile <- "efficiencyDistribution.xml"
maxEfficiencyFile <- "maxEfficiency.xml"
minEfficiencyFile <- "minEfficiency.xml"
avgEfficiencyFile <- "avgEfficiency.xml"
messagesFile <- "messages.xml"
# the Java xmcda object for the output messages
xmcdaMessages<-.jnew("org/xmcda/XMCDA")
xmcdaDatav2 <- .jnew("org/xmcda/v2/XMCDA")
xmcdaData <- .jnew("org/xmcda/XMCDA")
loadXMCDAv2(xmcdaDatav2, inDirectory, unitsFile, mandatory = TRUE, xmcdaMessages,"alternatives")
loadXMCDAv2(xmcdaDatav2, inDirectory, inputsOutputsFile, mandatory = TRUE, xmcdaMessages,"criteria")
loadXMCDAv2(xmcdaDatav2, inDirectory, performanceTableFile, mandatory = TRUE, xmcdaMessages,"performanceTable")
loadXMCDAv2(xmcdaDatav2, inDirectory, maxPerformanceTableFile, mandatory = FALSE, xmcdaMessages,"performanceTable")
loadXMCDAv2(xmcdaDatav2, inDirectory, weightsLinearConstraintsFile, mandatory = FALSE, xmcdaMessages,"criteriaLinearConstraints")
loadXMCDAv2(xmcdaDatav2, inDirectory, methodParametersFile, mandatory = FALSE, xmcdaMessages,"methodParameters")
# if we have problem with the inputs, it is time to stop
if (xmcdaMessages$programExecutionResultsList$size() > 0){
if (xmcdaMessages$programExecutionResultsList$get(as.integer(0))$isError()){
writeXMCDAv2(xmcdaMessages, paste(outDirectory, messagesFile, sep="/"))
stop(paste("An error has occured while loading the input files. For further details, see ", messagesFile, sep=""))
}
}
# convert that to XMCDA v3
converter<-.jnew("org/xmcda/converters/v2_v3/XMCDAConverter")
xmcdaData <- handleException(
function() return(
converter$convertTo_v3(xmcdaDatav2)
),
xmcdaMessages,
humanMessage = "Could not convert inputs to XMCDA v3, reason: "
)
xmcdaData <- convertConstraints(xmcdaDatav2, xmcdaData)
if (xmcdaMessages$programExecutionResultsList$size() > 0){
if (xmcdaMessages$programExecutionResultsList$get(as.integer(0))$isError()){
writeXMCDAv2(xmcdaMessages, paste(outDirectory, messagesFile, sep="/"))
stop(paste("An error has occured while converting the inputs to XMCDA v3. For further details, see ", messagesFile, sep=""))
}
}
# let's check the inputs and convert them into our own structures
inputs<-checkAndExtractInputs(xmcdaData, programExecutionResult)
if (xmcdaMessages$programExecutionResultsList$size()>0){
if (xmcdaMessages$programExecutionResultsList$get(as.integer(0))$isError()){
writeXMCDAv2(xmcdaMessages, paste(outDirectory, messagesFile, sep="/"))
stop(paste("An error has occured while checking and extracting the inputs. For further details, see ", messagesFile, sep=""))
}
}
# here we know that everything was loaded as expected
# now let's call the calculation method
source("ImpreciseEfficiencySMAA.R")
results <- handleException(
function() return(
calculateEfficiencyIntervals(inputs, inputs$samplesNo, inputs$intervalsNo)
),
xmcdaMessages,
humanMessage = "The calculation could not be performed, reason: "
)
if (is.null(results)){
writeXMCDAv2(xmcdaMessages, paste(outDirectory, messagesFile, sep="/"))
stop("Calculation failed.")
}
# fine, now let's put the results into XMCDA structures
xResults = convert(results, xmcdaData$alternatives, xmcdaMessages)
if (is.null(xResults)){
writeXMCDAv2(xmcdaMessages, paste(outDirectory, messagesFile, sep="/"))
stop("Could not convert ImpreciseDEASMAACCREfficiencies results into XMCDA")
}
# and last, convert them to XMCDAv2 and write them onto the disk
for (i in 1:length(xResults)){
outputFilename = paste(outDirectory, paste(names(xResults)[i],".xml",sep=""), sep="/")
# convert current xResults to v2
results_v2 <- .jnew("org/xmcda/v2/XMCDA")
results_v2 <- handleException(
function() return(
converter$convertTo_v2(xResults[[i]])
),
xmcdaMessages,
humanMessage = paste("Could not convert ", names(xResults)[i], " into XMCDA_v2, reason: ", sep ="")
)
if (is.null(results_v2)){
writeXMCDAv2(xmcdaMessages, paste(outDirectory, messagesFile, sep="/"))
stop(paste("Could not convert ", names(xResults)[i], " into XMCDA_v2", sep =""))
}
# now write the converted result to the file
parser2<-.jnew("org/xmcda/parsers/xml/xmcda_v2/XMCDAParser")
tmp <- handleException(
function() return(
parser2$writeXMCDA(results_v2, outputFilename, .jarray(xmcda_v2_tag(names(xResults)[i])))
),
xmcdaMessages,
humanMessage = paste("Error while writing ", outputFilename, " reason: ", sep="")
)
if (xmcdaMessages$programExecutionResultsList$size()>0){
if (xmcdaMessages$programExecutionResultsList$get(as.integer(0))$isError()){
writeXMCDAv2(xmcdaMessages, paste(outDirectory, messagesFile, sep="/"))
stop(paste("Error while writing ", outputFilename, sep=""))
}
}
}
# then the messages file
# TODO faire autrement qu'en ajoutant une info vide pour faire un <status>ok</status>
tmp <- handleException(
function() return(
putProgramExecutionResult(xmcdaMessages, infos="OK")
),
xmcdaMessages
)
if (is.null(tmp)){
writeXMCDAv2(xmcdaMessages, paste(outDirectory, messagesFile, sep="/"))
stop("Could not add methodExecutionResult to tree.")
}
tmp <- handleException(
function() return(
writeXMCDAv2(xmcdaMessages, paste(outDirectory, messagesFile, sep="/"))
),
xmcdaMessages
)
if (xmcdaMessages$programExecutionResultsList$size()>0){
if (xmcdaMessages$programExecutionResultsList$get(as.integer(0))$isError()){
writeXMCDAv2(xmcdaMessages, paste(outDirectory, messagesFile, sep="/"))
stop("Error while writing messages file.")
}
}
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/distributions.R
\name{summary_gamma}
\alias{summary_gamma}
\title{Summary of a Gamma distribution}
\usage{
summary_gamma(a, b, output = "list", ...)
}
\arguments{
\item{a,b}{Shape and rate parameters.}
\item{output}{\code{"list"} to return a list, \code{"pandoc"} to print a table}
\item{...}{arguments passed to \code{\link[=pander]{pander.data.frame}}}
}
\description{
Mode, mean, variance, and quartiles for a Gamma distribution
with shape parameter \code{a} and rate parameter \code{b}.
}
\examples{
summary_gamma(a=2, b=4, output="pandoc", style="rmarkdown")
}
|
/man/summary_gamma.Rd
|
no_license
|
stla/brr
|
R
| false | false | 655 |
rd
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/distributions.R
\name{summary_gamma}
\alias{summary_gamma}
\title{Summary of a Gamma distribution}
\usage{
summary_gamma(a, b, output = "list", ...)
}
\arguments{
\item{a,b}{Shape and rate parameters.}
\item{output}{\code{"list"} to return a list, \code{"pandoc"} to print a table}
\item{...}{arguments passed to \code{\link[=pander]{pander.data.frame}}}
}
\description{
Mode, mean, variance, and quartiles for a Gamma distribution
with shape parameter \code{a} and rate parameter \code{b}.
}
\examples{
summary_gamma(a=2, b=4, output="pandoc", style="rmarkdown")
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/hlpr.R
\name{matrixSwitch}
\alias{matrixSwitch}
\title{Switch that takes expression and results as vectors (helper.R)}
\usage{
matrixSwitch(expression, ...)
}
\arguments{
\item{expression}{Expression vector}
\item{...}{Cases and vectors of results. Results must be same class}
}
\value{
Result[i] of case that matches expression[i]
}
\description{
Switch that takes expression and results as vectors (helper.R)
}
\examples{
matrixSwitch(c("a","b","c"), a = 1:3, b = 11:13, c = 101:103, NA)
}
|
/man/matrixSwitch.Rd
|
no_license
|
solavrov/hlpr
|
R
| false | true | 572 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/hlpr.R
\name{matrixSwitch}
\alias{matrixSwitch}
\title{Switch that takes expression and results as vectors (helper.R)}
\usage{
matrixSwitch(expression, ...)
}
\arguments{
\item{expression}{Expression vector}
\item{...}{Cases and vectors of results. Results must be same class}
}
\value{
Result[i] of case that matches expression[i]
}
\description{
Switch that takes expression and results as vectors (helper.R)
}
\examples{
matrixSwitch(c("a","b","c"), a = 1:3, b = 11:13, c = 101:103, NA)
}
|
#' @importFrom magrittr %>%
Training <- R6::R6Class(
"Training",
public = list(
#'@field points dataframe
points = NULL,
#' @description Constructor class
initialize = function(width, height, amount){
x = sample(width,amount)
y = sample(height,amount)
points <- data.frame(x,y)
# This is a diagonal line that will define the (1,-1) spaces
# Label is the defined "correct" answer.
self$points <- points %>%
dplyr::mutate(label = dplyr::if_else(x > y, 1, -1)) %>%
dplyr::mutate(label = as.factor(label))
}
),
active = list(
#' @description Draw a chart with dots that are different colors
show_answers = function(){
ggplot2::ggplot(self$points, ggplot2::aes(x=x, y=y, color=label)) +
ggplot2::geom_point()
},
show_guesses = function(){
ggplot2::ggplot(self$points, ggplot2::aes(x=x, y=y, color=g)) +
ggplot2::geom_point()
}
),
private = list()
)
|
/training.R
|
no_license
|
dlotto/Learning-Neural-Networks
|
R
| false | false | 1,011 |
r
|
#' @importFrom magrittr %>%
Training <- R6::R6Class(
"Training",
public = list(
#'@field points dataframe
points = NULL,
#' @description Constructor class
initialize = function(width, height, amount){
x = sample(width,amount)
y = sample(height,amount)
points <- data.frame(x,y)
# This is a diagonal line that will define the (1,-1) spaces
# Label is the defined "correct" answer.
self$points <- points %>%
dplyr::mutate(label = dplyr::if_else(x > y, 1, -1)) %>%
dplyr::mutate(label = as.factor(label))
}
),
active = list(
#' @description Draw a chart with dots that are different colors
show_answers = function(){
ggplot2::ggplot(self$points, ggplot2::aes(x=x, y=y, color=label)) +
ggplot2::geom_point()
},
show_guesses = function(){
ggplot2::ggplot(self$points, ggplot2::aes(x=x, y=y, color=g)) +
ggplot2::geom_point()
}
),
private = list()
)
|
# Fingerprinting worker script - running single chip enrichment option
# Delay allows time to monitor process starting up
print("preparing for script launch, 5s synch delay")
Sys.sleep(5)
# Read the genelist in
genelist <- sampleset[[8]];
total <- length(genelist)
# load contents of directory once
# this used to be within the loop
path<-"/home/galtschu2/Documents/Projects/Fingerprinting/data/Fingerprints/random_sq/"
dir<-dir(path = path)
# removed line to record vector as a waste of memory and not used
# vector<-vector("list", 0)
for (i in 1:length(genelist)){
print(paste(i, "of", total, sep = " "))
filename<-paste("random_sq", genelist[i], sep = "_")
if (filename %in% dir){
print ("File already analyzed")
}
else if (!(filename %in% dir(path = path))){
temp<-geo2fingerprint(
GSM = genelist[i],
GEOthreshold = FALSE,
GEOpath = "/home/galtschu2/Documents/Databases/GEOfiles/",
geneset = "KEGG and Wikipathways and static",
enrichmentMethod = "SCE",
transformation = "squared.rank",
statistic = "mean",
normalizedScore = FALSE,
progressBar = FALSE
)
temp1<-list("name" = temp)
names(temp1)<-genelist[i]
save(temp1, file = paste(path, filename, sep =""))
# vector<-append(vector, temp1)
}
}
# save(vector, file="data/parallel_vector1.R")
|
/Fingerprint running scripts/random_SCE_sq_parallel/random_SCE_parallel_h.R
|
no_license
|
hidelab/database-fingerprint-scripts
|
R
| false | false | 1,415 |
r
|
# Fingerprinting worker script - running single chip enrichment option
# Delay allows time to monitor process starting up
print("preparing for script launch, 5s synch delay")
Sys.sleep(5)
# Read the genelist in
genelist <- sampleset[[8]];
total <- length(genelist)
# load contents of directory once
# this used to be within the loop
path<-"/home/galtschu2/Documents/Projects/Fingerprinting/data/Fingerprints/random_sq/"
dir<-dir(path = path)
# removed line to record vector as a waste of memory and not used
# vector<-vector("list", 0)
for (i in 1:length(genelist)){
print(paste(i, "of", total, sep = " "))
filename<-paste("random_sq", genelist[i], sep = "_")
if (filename %in% dir){
print ("File already analyzed")
}
else if (!(filename %in% dir(path = path))){
temp<-geo2fingerprint(
GSM = genelist[i],
GEOthreshold = FALSE,
GEOpath = "/home/galtschu2/Documents/Databases/GEOfiles/",
geneset = "KEGG and Wikipathways and static",
enrichmentMethod = "SCE",
transformation = "squared.rank",
statistic = "mean",
normalizedScore = FALSE,
progressBar = FALSE
)
temp1<-list("name" = temp)
names(temp1)<-genelist[i]
save(temp1, file = paste(path, filename, sep =""))
# vector<-append(vector, temp1)
}
}
# save(vector, file="data/parallel_vector1.R")
|
### LOAD LIBRARIES -----------------------------------------------------------------------------------------
library("gplots")
library("RColorBrewer")
library("stringr")
### SET PATHS ----------------------------------------------------------------------------------------------
dir.wrk <- file.path("/Data/Raunak/projects/MESO_peritoneal/task/03_enrichment_analysis")
dir.data <- file.path(dir.wrk, "data")
dir.plot <- file.path(dir.wrk, "plot")
dir.enrichment <- file.path(dir.wrk, "enrichment")
### DEFINE FILE --------------------------------------------------------------------------------------------
file.name <- "enrichment_c2.cp.kegg.v5.0.symbols.txt"
### DEFINE ENVIRONMENT -------------------------------------------------------------------------------------
batch <- "MESO_OUTLIERS_p01_NEW"
dir.batch <- file.path(dir.enrichment, batch)
dir.patients <- list.dirs(path=dir.batch, full.names=T, recursive=F)
ids.patients <- list.dirs(path=dir.batch, full.names=F, recursive=F)
### PROCESS EACH PATIENT DATA ------------------------------------------------------------------------------
list.dat <- list()
for(i in 1:length(ids.patients)){
file.dat <- file.path(dir.patients[i], file.name)
if(!file.exists(file.dat)) next
dat <- read.delim(file.dat, header=T, stringsAsFactors=F)
dat$Category <- unlist(lapply(str_split(dat$Category, "_"), function(x) paste(x[2:length(x)], collapse=" ")))
dat$ngenes <- unlist(lapply(str_split(dat$overlap.genes, ","), function(x) length(x)))
dat <- subset(dat, dat$ngenes >=2)
if(nrow(dat) != 0){
list.dat[[i]] <- data.frame(SampleID=ids.patients[i],
Category=dat$Category,
pvalue=dat$pvalue,
fdr=dat$fdr,
EnrichmentScore=-log10(dat$fdr),
genes=dat$overlap.genes)
} else{
list.dat[[i]] <- data.frame(NULL)
}
}
df <- do.call(rbind.data.frame, list.dat)
df$SampleID <- as.character(df$SampleID)
df$Category <- as.character(df$Category)
df$genes <- as.character(df$genes)
### CONSTRUCT PATHWAY MATRIX ----------------------------------------------------------------------------------
pathways <- unique(df$Category)
patients <- unique(df$SampleID)
mat <- matrix(0, nrow=length(pathways), ncol=length(ids.patients), dimnames=list(pathways, ids.patients))
for(i in 1:nrow(df)){
x <- df$Category[i]
y <- df$SampleID[i]
mat[x,y] <- df$EnrichmentScore[i]
}
write.table(mat, file.path(dir.data, "MESO_OUTLIERS_p01_pathway_enrichment_kegg.tsv"), sep="\t", row.names=T, col.names=NA, quote=F)
### RELOAD DATA ----
file.mat <- file.path(dir.data, "MESO_OUTLIERS_p01_pathway_enrichment_kegg.tsv")
mat <- read.delim(file.mat, header=T, stringsAsFactors=FALSE, row.names=1)
colnames(mat) <- str_replace_all(colnames(mat), "[.]", "-")
### PLOT PATHWAYS HEATMAP ----------------------------------------------------------------------------------
jColFun <- colorRampPalette(brewer.pal(n = 9, "Greys"))
file.plot <- file.path(dir.plot, "MESO_OUTLIERS_p01_pathway_heatmap_KEGG.pdf")
pdf(file.plot, height=6, width=6)
heatmap.2(as.matrix(mat),
col = jColFun(256),
Colv=TRUE, Rowv=TRUE,
dendrogram ="both", trace="none", scale="none",
cexCol=0.5, cexRow=0.5, margins = c(5,20),
hclustfun = function(x) hclust(x, method = "ward.D2"),
distfun = function(x) dist(x, method = "euclidean"),
colsep=c(1:150), rowsep=c(1:150),
sepcolor="white", sepwidth=c(0.0005,0.0005),
key="TRUE", keysize=0.8, density.info="none")
dev.off()
###
#which(as.numeric(apply(mat, 1, function(x) length(which(x == 0)))) >= 14)
#y <- mat[which(as.numeric(apply(mat, 1, function(x) length(which(x == 0)))) >= 14),]
#write.table(y, file.path(dir.data, "y.tsv"), sep="\t", row.names=T, col.names=NA, quote=F)
|
/task/03_enrichment_analysis/scripts/02_02_compile_enrichment_data_kegg.R
|
permissive
|
raunakms/meso_pem
|
R
| false | false | 3,777 |
r
|
### LOAD LIBRARIES -----------------------------------------------------------------------------------------
library("gplots")
library("RColorBrewer")
library("stringr")
### SET PATHS ----------------------------------------------------------------------------------------------
dir.wrk <- file.path("/Data/Raunak/projects/MESO_peritoneal/task/03_enrichment_analysis")
dir.data <- file.path(dir.wrk, "data")
dir.plot <- file.path(dir.wrk, "plot")
dir.enrichment <- file.path(dir.wrk, "enrichment")
### DEFINE FILE --------------------------------------------------------------------------------------------
file.name <- "enrichment_c2.cp.kegg.v5.0.symbols.txt"
### DEFINE ENVIRONMENT -------------------------------------------------------------------------------------
batch <- "MESO_OUTLIERS_p01_NEW"
dir.batch <- file.path(dir.enrichment, batch)
dir.patients <- list.dirs(path=dir.batch, full.names=T, recursive=F)
ids.patients <- list.dirs(path=dir.batch, full.names=F, recursive=F)
### PROCESS EACH PATIENT DATA ------------------------------------------------------------------------------
list.dat <- list()
for(i in 1:length(ids.patients)){
file.dat <- file.path(dir.patients[i], file.name)
if(!file.exists(file.dat)) next
dat <- read.delim(file.dat, header=T, stringsAsFactors=F)
dat$Category <- unlist(lapply(str_split(dat$Category, "_"), function(x) paste(x[2:length(x)], collapse=" ")))
dat$ngenes <- unlist(lapply(str_split(dat$overlap.genes, ","), function(x) length(x)))
dat <- subset(dat, dat$ngenes >=2)
if(nrow(dat) != 0){
list.dat[[i]] <- data.frame(SampleID=ids.patients[i],
Category=dat$Category,
pvalue=dat$pvalue,
fdr=dat$fdr,
EnrichmentScore=-log10(dat$fdr),
genes=dat$overlap.genes)
} else{
list.dat[[i]] <- data.frame(NULL)
}
}
df <- do.call(rbind.data.frame, list.dat)
df$SampleID <- as.character(df$SampleID)
df$Category <- as.character(df$Category)
df$genes <- as.character(df$genes)
### CONSTRUCT PATHWAY MATRIX ----------------------------------------------------------------------------------
pathways <- unique(df$Category)
patients <- unique(df$SampleID)
mat <- matrix(0, nrow=length(pathways), ncol=length(ids.patients), dimnames=list(pathways, ids.patients))
for(i in 1:nrow(df)){
x <- df$Category[i]
y <- df$SampleID[i]
mat[x,y] <- df$EnrichmentScore[i]
}
write.table(mat, file.path(dir.data, "MESO_OUTLIERS_p01_pathway_enrichment_kegg.tsv"), sep="\t", row.names=T, col.names=NA, quote=F)
### RELOAD DATA ----
file.mat <- file.path(dir.data, "MESO_OUTLIERS_p01_pathway_enrichment_kegg.tsv")
mat <- read.delim(file.mat, header=T, stringsAsFactors=FALSE, row.names=1)
colnames(mat) <- str_replace_all(colnames(mat), "[.]", "-")
### PLOT PATHWAYS HEATMAP ----------------------------------------------------------------------------------
jColFun <- colorRampPalette(brewer.pal(n = 9, "Greys"))
file.plot <- file.path(dir.plot, "MESO_OUTLIERS_p01_pathway_heatmap_KEGG.pdf")
pdf(file.plot, height=6, width=6)
heatmap.2(as.matrix(mat),
col = jColFun(256),
Colv=TRUE, Rowv=TRUE,
dendrogram ="both", trace="none", scale="none",
cexCol=0.5, cexRow=0.5, margins = c(5,20),
hclustfun = function(x) hclust(x, method = "ward.D2"),
distfun = function(x) dist(x, method = "euclidean"),
colsep=c(1:150), rowsep=c(1:150),
sepcolor="white", sepwidth=c(0.0005,0.0005),
key="TRUE", keysize=0.8, density.info="none")
dev.off()
###
#which(as.numeric(apply(mat, 1, function(x) length(which(x == 0)))) >= 14)
#y <- mat[which(as.numeric(apply(mat, 1, function(x) length(which(x == 0)))) >= 14),]
#write.table(y, file.path(dir.data, "y.tsv"), sep="\t", row.names=T, col.names=NA, quote=F)
|
#' @title Chi square tests for symptom mapping
#'
#' @description
#' Lesion to symptom mapping performed on a prepared matrix.
#' The behavior must be a binary vector.
#' Chi square tests are performed at each voxel. By default
#' the Yates correction is performed, use \code{correct=FALSE}
#' if you need to disable it. The behavior must
#' be a binary vector. Exact p-values can be obtained with permutation
#' based estimatins.
#'
#' @param lesmat binary matrix (0/1) of voxels (columns)
#' and subjects (rows).
#' @param behavior vector of behavioral scores (must be binary.
#' @param YatesCorrect (default=T) logical whether to use Yates correction.
#' @param runPermutations logical (default=FALSE) whether to
#' use permutation based p-value estimation.
#' @param nperm (default=2000) The number of permutations to run.
#' @param showInfo display info messagges when running the function.
#' @param ... other arguments received from \code{\link{lesymap}}.
#'
#' @return
#' List of objects returned:
#' \itemize{
#' \item\code{statistic} - vector of statistical values
#' \item\code{pvalue} - vector of pvalues
#' \item\code{zscore} - vector of zscores
#' }
#'
#' @examples{
#' set.seed(123)
#' lesmat = matrix(rbinom(200,1,0.5), ncol=2)
#' set.seed(1234)
#' behavior = rbinom(100,1,0.5)
#' result = lsm_chisq(lesmat, behavior)
#' }
#'
#' @author Dorian Pustina
#'
#' @export
lsm_chisq <- function(lesmat, behavior, YatesCorrect=TRUE,
runPermutations = F, nperm=2000,
showInfo=TRUE, ...) {
behavOn = sum(behavior == 1)
behavOff = length(behavior) - behavOn
lesVox = colSums(lesmat)
lesOnBehavOn = colSums(apply(lesmat, 2, function(x) x*behavior))
lesOnBehavOff = lesVox - lesOnBehavOn
lesOffBehavOn = behavOn - lesOnBehavOn
lesOffBehavOff = behavOff - lesOnBehavOff
chimatrix = rbind( lesOnBehavOff, lesOnBehavOn, lesOffBehavOff, lesOffBehavOn)
rm(behavOn,behavOff,lesVox,lesOnBehavOn,lesOnBehavOff,lesOffBehavOn,lesOffBehavOff)
# estiamte runtime
if (showInfo & runPermutations) {
tic = Sys.time()
temp = chisq.test(matrix(chimatrix[,1],ncol=2),
simulate.p.value=runPermutations,
correct=YatesCorrect, B=nperm)
onerun = as.double(difftime(Sys.time(),tic, units = 'sec'))
toc = tic + (ncol(chimatrix)* onerun)
expect = paste(round(as.double(difftime(toc,tic)),1), units(difftime(toc,tic)))
printInfo(paste('\n Chi square permutations, expected run =', expect), type='middle')
}
output = apply(chimatrix, 2, function(x) {
temp=chisq.test(matrix(x,ncol=2),
simulate.p.value=runPermutations,
B=nperm)
return(list(
stat=temp$statistic,
pval=temp$p.value))
}
)
temp = unlist(output)
statistic = unname( temp[seq(1,length(temp),by=2)] )
pvalue = unname( temp[seq(2,length(temp),by=2)] )
# zscore = qchisq(pvalue, df=1)
# zscore = qnorm(pvalue, lower.tail=TRUE)
# zscore[is.infinite(zscore)] = 0 # fixing infinite values for p=1
return(list(
statistic=statistic,
pvalue=pvalue
#zscore=zscore
))
}
|
/R/lsm_chisq.R
|
permissive
|
mbowren/LESYMAP
|
R
| false | false | 3,294 |
r
|
#' @title Chi square tests for symptom mapping
#'
#' @description
#' Lesion to symptom mapping performed on a prepared matrix.
#' The behavior must be a binary vector.
#' Chi square tests are performed at each voxel. By default
#' the Yates correction is performed, use \code{correct=FALSE}
#' if you need to disable it. The behavior must
#' be a binary vector. Exact p-values can be obtained with permutation
#' based estimatins.
#'
#' @param lesmat binary matrix (0/1) of voxels (columns)
#' and subjects (rows).
#' @param behavior vector of behavioral scores (must be binary.
#' @param YatesCorrect (default=T) logical whether to use Yates correction.
#' @param runPermutations logical (default=FALSE) whether to
#' use permutation based p-value estimation.
#' @param nperm (default=2000) The number of permutations to run.
#' @param showInfo display info messagges when running the function.
#' @param ... other arguments received from \code{\link{lesymap}}.
#'
#' @return
#' List of objects returned:
#' \itemize{
#' \item\code{statistic} - vector of statistical values
#' \item\code{pvalue} - vector of pvalues
#' \item\code{zscore} - vector of zscores
#' }
#'
#' @examples{
#' set.seed(123)
#' lesmat = matrix(rbinom(200,1,0.5), ncol=2)
#' set.seed(1234)
#' behavior = rbinom(100,1,0.5)
#' result = lsm_chisq(lesmat, behavior)
#' }
#'
#' @author Dorian Pustina
#'
#' @export
lsm_chisq <- function(lesmat, behavior, YatesCorrect=TRUE,
runPermutations = F, nperm=2000,
showInfo=TRUE, ...) {
behavOn = sum(behavior == 1)
behavOff = length(behavior) - behavOn
lesVox = colSums(lesmat)
lesOnBehavOn = colSums(apply(lesmat, 2, function(x) x*behavior))
lesOnBehavOff = lesVox - lesOnBehavOn
lesOffBehavOn = behavOn - lesOnBehavOn
lesOffBehavOff = behavOff - lesOnBehavOff
chimatrix = rbind( lesOnBehavOff, lesOnBehavOn, lesOffBehavOff, lesOffBehavOn)
rm(behavOn,behavOff,lesVox,lesOnBehavOn,lesOnBehavOff,lesOffBehavOn,lesOffBehavOff)
# estiamte runtime
if (showInfo & runPermutations) {
tic = Sys.time()
temp = chisq.test(matrix(chimatrix[,1],ncol=2),
simulate.p.value=runPermutations,
correct=YatesCorrect, B=nperm)
onerun = as.double(difftime(Sys.time(),tic, units = 'sec'))
toc = tic + (ncol(chimatrix)* onerun)
expect = paste(round(as.double(difftime(toc,tic)),1), units(difftime(toc,tic)))
printInfo(paste('\n Chi square permutations, expected run =', expect), type='middle')
}
output = apply(chimatrix, 2, function(x) {
temp=chisq.test(matrix(x,ncol=2),
simulate.p.value=runPermutations,
B=nperm)
return(list(
stat=temp$statistic,
pval=temp$p.value))
}
)
temp = unlist(output)
statistic = unname( temp[seq(1,length(temp),by=2)] )
pvalue = unname( temp[seq(2,length(temp),by=2)] )
# zscore = qchisq(pvalue, df=1)
# zscore = qnorm(pvalue, lower.tail=TRUE)
# zscore[is.infinite(zscore)] = 0 # fixing infinite values for p=1
return(list(
statistic=statistic,
pvalue=pvalue
#zscore=zscore
))
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.