content
large_stringlengths 0
6.46M
| path
large_stringlengths 3
331
| license_type
large_stringclasses 2
values | repo_name
large_stringlengths 5
125
| language
large_stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.46M
| extension
large_stringclasses 75
values | text
stringlengths 0
6.46M
|
---|---|---|---|---|---|---|---|---|---|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/distribution.R
\name{pNbinomLnorm}
\alias{pNbinomLnorm}
\title{Negative binomial and Log-Gaussian convolution}
\usage{
pNbinomLnorm(q, mu, size, mug, sigmag)
}
\arguments{
\item{q}{quantiles}
\item{mu}{parameter1-1}
\item{size}{parameter1-2}
\item{mug}{parameter2-1}
\item{sigmag}{parameter2-2}
}
\description{
Negative binomial and Log-Gaussian convolution
}
\keyword{convolution}
|
/man/pNbinomLnorm.Rd
|
permissive
|
ick003/convReg
|
R
| false | true | 464 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/distribution.R
\name{pNbinomLnorm}
\alias{pNbinomLnorm}
\title{Negative binomial and Log-Gaussian convolution}
\usage{
pNbinomLnorm(q, mu, size, mug, sigmag)
}
\arguments{
\item{q}{quantiles}
\item{mu}{parameter1-1}
\item{size}{parameter1-2}
\item{mug}{parameter2-1}
\item{sigmag}{parameter2-2}
}
\description{
Negative binomial and Log-Gaussian convolution
}
\keyword{convolution}
|
# Ying Zhu, so_zy2003@126.com
# 2021-07
# clean enviroment object
rm(list=ls())
# load related packages
library("ggplot2")
library("vegan")
# Set ggplot2 drawing parameter
main_theme = theme(panel.background=element_blank(),
panel.grid=element_blank(),
axis.line.x=element_line(size=.5, colour="black"),
axis.line.y=element_line(size=.5, colour="black"),
axis.ticks=element_line(color="black"),
axis.text=element_text(color="black", size=7),
legend.position="right",
legend.background=element_blank(),
legend.key=element_blank(),
legend.text= element_text(size=7),
text=element_text(family="sans", size=7))
# Design of experiment
design = read.table("metadata.txt", header=T, row.names= 1, sep="\t")
design$phase <- design$Day
design$phase[design$Day<12] <- "p1"
design$phase[design$Day%in%c(12,15,18,21,24)] <- "p2"
design$phase[design$Day>24] <- "p3"
design$Day2 <- factor(design$Day2,
levels = c("d1", "d3","d6", "d9", "d12", "d15",
"d18", "d21", "d24","d27", "d30", "d37", "d44"))
# PCoA bray_curtis
bray_curtis = read.table("picrustbray_curtis.txt", sep="\t", header=T, check.names=F)
rownames(bray_curtis)=bray_curtis$bray_curtis
bray_curtis=bray_curtis[,-1]
bray_curtis = bray_curtis[rownames(design), rownames(design)]
# subset and reorder distance matrix
#Classical multidimensional scaling (MDS) of a data matrix. Also known as principal coordinates analysis
pcoa = cmdscale(bray_curtis, k=4, eig=T) # k is dimension, 3 is recommended; eig is eigenvalues
points = as.data.frame(pcoa$points) # get coordinate string, format to dataframme
colnames(points) = c("x", "y", "z","a")
eig = pcoa$eig
points = cbind(points, design[match(rownames(points), rownames(design)), ])
# plot PCo 1 and 2
p = ggplot(points, aes(x=x, y=y, color=Day,shape=Stage))
p = p + geom_point(alpha=.7, size=2) +
labs(x=paste("PCoA 1 (", format(100 * eig[1] / sum(eig), digits=4), "%)", sep=""),
y=paste("PCoA 2 (", format(100 * eig[2] / sum(eig), digits=4), "%)", sep=""),
title="bray_curtis PCoA") + main_theme
q= p + scale_color_gradientn(colours=rainbow(7))
q
# plot PCo 1 and 3
p = ggplot(points, aes(x=x, y=z, color=Day,shape=Stage))
p = p + geom_point(alpha=.7, size=2) +
labs(x=paste("PCoA 1 (", format(100 * eig[1] / sum(eig), digits=4), "%)", sep=""),
y=paste("PCoA 3 (", format(100 * eig[3] / sum(eig), digits=4), "%)", sep=""),
title="bray_curtis PCoA") + main_theme
p+scale_color_gradientn(colours=rainbow(5))
# age effect
# plot PCo 1 and 2
p = ggplot(points, aes(x=x, y=y, color=Day))
p = p + geom_point(alpha=.7, size=2) +
labs(x=paste("PCoA 1 (", format(100 * eig[1] / sum(eig), digits=4), "%)", sep=""),
y=paste("PCoA 2 (", format(100 * eig[2] / sum(eig), digits=4), "%)", sep=""),
title="bray_curtis PCoA") + main_theme
q= p + scale_color_gradientn(colours=rainbow(7))
q
beta_pcoa_day_bray_curtis1v2_picrust<- q
# environment effects
p = ggplot(points, aes(x=x, y=y, color=phase))
p = p + geom_point(alpha=.7, size=2) +
labs(x=paste("PCoA 1 (", format(100 * eig[1] / sum(eig), digits=4), "%)", sep=""),
y=paste("PCoA 2 (", format(100 * eig[2] / sum(eig), digits=4), "%)", sep=""),
title="bray_curtis PCoA") + main_theme
p
beta_pcoa_phase_bray_curtis1v2_picrust <- p
# ggsave("beta_pcoa_phase_bray_curtis1v2_picrust_20210607.pdf",
# beta_pcoa_phase_bray_curtis1v2_picrust, width = 4, height = 2.5)
# sex effect
p = ggplot(points, aes(x=x, y=y, color=Sex))
p = p + geom_point(alpha=.7, size=2) +
labs(x=paste("PCoA 1 (", format(100 * eig[1] / sum(eig), digits=4), "%)", sep=""),
y=paste("PCoA 2 (", format(100 * eig[2] / sum(eig), digits=4), "%)", sep=""),
title="Bray_Curtis PCoA") + main_theme+stat_ellipse(level=0.95)
p
# family group
p = ggplot(points, aes(x=x, y=y, color=Group))
p = p + geom_point(alpha=.7, size=2) +
labs(x=paste("PCoA 1 (", format(100 * eig[1] / sum(eig), digits=4), "%)", sep=""),
y=paste("PCoA 2 (", format(100 * eig[2] / sum(eig), digits=4), "%)", sep=""),
title="Bray_Curtis PCoA") + main_theme+stat_ellipse(level=0.95)
p
# individual ID
p = ggplot(points, aes(x=x, y=y, color=Individual))
p = p + geom_point(alpha=.7, size=2) +
labs(x=paste("PCoA 1 (", format(100 * eig[1] / sum(eig), digits=4), "%)", sep=""),
y=paste("PCoA 2 (", format(100 * eig[2] / sum(eig), digits=4), "%)", sep=""),
title="Bray_Curtis PCoA") + main_theme+stat_ellipse(level=0.95)
p
# Permutational Multivariate Analysis of Variance Using Distance Matrices (PERMANOVA)
adonis_day2 <-adonis(bray_curtis~design$phase+design$Day2,
permutations = 999)
adonis_day2$aov.tab
|
/FigureS7A_and_7B_ibis_day_pcoa_picrust_final.R
|
no_license
|
sozy2003/ZhuYing
|
R
| false | false | 5,048 |
r
|
# Ying Zhu, so_zy2003@126.com
# 2021-07
# clean enviroment object
rm(list=ls())
# load related packages
library("ggplot2")
library("vegan")
# Set ggplot2 drawing parameter
main_theme = theme(panel.background=element_blank(),
panel.grid=element_blank(),
axis.line.x=element_line(size=.5, colour="black"),
axis.line.y=element_line(size=.5, colour="black"),
axis.ticks=element_line(color="black"),
axis.text=element_text(color="black", size=7),
legend.position="right",
legend.background=element_blank(),
legend.key=element_blank(),
legend.text= element_text(size=7),
text=element_text(family="sans", size=7))
# Design of experiment
design = read.table("metadata.txt", header=T, row.names= 1, sep="\t")
design$phase <- design$Day
design$phase[design$Day<12] <- "p1"
design$phase[design$Day%in%c(12,15,18,21,24)] <- "p2"
design$phase[design$Day>24] <- "p3"
design$Day2 <- factor(design$Day2,
levels = c("d1", "d3","d6", "d9", "d12", "d15",
"d18", "d21", "d24","d27", "d30", "d37", "d44"))
# PCoA bray_curtis
bray_curtis = read.table("picrustbray_curtis.txt", sep="\t", header=T, check.names=F)
rownames(bray_curtis)=bray_curtis$bray_curtis
bray_curtis=bray_curtis[,-1]
bray_curtis = bray_curtis[rownames(design), rownames(design)]
# subset and reorder distance matrix
#Classical multidimensional scaling (MDS) of a data matrix. Also known as principal coordinates analysis
pcoa = cmdscale(bray_curtis, k=4, eig=T) # k is dimension, 3 is recommended; eig is eigenvalues
points = as.data.frame(pcoa$points) # get coordinate string, format to dataframme
colnames(points) = c("x", "y", "z","a")
eig = pcoa$eig
points = cbind(points, design[match(rownames(points), rownames(design)), ])
# plot PCo 1 and 2
p = ggplot(points, aes(x=x, y=y, color=Day,shape=Stage))
p = p + geom_point(alpha=.7, size=2) +
labs(x=paste("PCoA 1 (", format(100 * eig[1] / sum(eig), digits=4), "%)", sep=""),
y=paste("PCoA 2 (", format(100 * eig[2] / sum(eig), digits=4), "%)", sep=""),
title="bray_curtis PCoA") + main_theme
q= p + scale_color_gradientn(colours=rainbow(7))
q
# plot PCo 1 and 3
p = ggplot(points, aes(x=x, y=z, color=Day,shape=Stage))
p = p + geom_point(alpha=.7, size=2) +
labs(x=paste("PCoA 1 (", format(100 * eig[1] / sum(eig), digits=4), "%)", sep=""),
y=paste("PCoA 3 (", format(100 * eig[3] / sum(eig), digits=4), "%)", sep=""),
title="bray_curtis PCoA") + main_theme
p+scale_color_gradientn(colours=rainbow(5))
# age effect
# plot PCo 1 and 2
p = ggplot(points, aes(x=x, y=y, color=Day))
p = p + geom_point(alpha=.7, size=2) +
labs(x=paste("PCoA 1 (", format(100 * eig[1] / sum(eig), digits=4), "%)", sep=""),
y=paste("PCoA 2 (", format(100 * eig[2] / sum(eig), digits=4), "%)", sep=""),
title="bray_curtis PCoA") + main_theme
q= p + scale_color_gradientn(colours=rainbow(7))
q
beta_pcoa_day_bray_curtis1v2_picrust<- q
# environment effects
p = ggplot(points, aes(x=x, y=y, color=phase))
p = p + geom_point(alpha=.7, size=2) +
labs(x=paste("PCoA 1 (", format(100 * eig[1] / sum(eig), digits=4), "%)", sep=""),
y=paste("PCoA 2 (", format(100 * eig[2] / sum(eig), digits=4), "%)", sep=""),
title="bray_curtis PCoA") + main_theme
p
beta_pcoa_phase_bray_curtis1v2_picrust <- p
# ggsave("beta_pcoa_phase_bray_curtis1v2_picrust_20210607.pdf",
# beta_pcoa_phase_bray_curtis1v2_picrust, width = 4, height = 2.5)
# sex effect
p = ggplot(points, aes(x=x, y=y, color=Sex))
p = p + geom_point(alpha=.7, size=2) +
labs(x=paste("PCoA 1 (", format(100 * eig[1] / sum(eig), digits=4), "%)", sep=""),
y=paste("PCoA 2 (", format(100 * eig[2] / sum(eig), digits=4), "%)", sep=""),
title="Bray_Curtis PCoA") + main_theme+stat_ellipse(level=0.95)
p
# family group
p = ggplot(points, aes(x=x, y=y, color=Group))
p = p + geom_point(alpha=.7, size=2) +
labs(x=paste("PCoA 1 (", format(100 * eig[1] / sum(eig), digits=4), "%)", sep=""),
y=paste("PCoA 2 (", format(100 * eig[2] / sum(eig), digits=4), "%)", sep=""),
title="Bray_Curtis PCoA") + main_theme+stat_ellipse(level=0.95)
p
# individual ID
p = ggplot(points, aes(x=x, y=y, color=Individual))
p = p + geom_point(alpha=.7, size=2) +
labs(x=paste("PCoA 1 (", format(100 * eig[1] / sum(eig), digits=4), "%)", sep=""),
y=paste("PCoA 2 (", format(100 * eig[2] / sum(eig), digits=4), "%)", sep=""),
title="Bray_Curtis PCoA") + main_theme+stat_ellipse(level=0.95)
p
# Permutational Multivariate Analysis of Variance Using Distance Matrices (PERMANOVA)
adonis_day2 <-adonis(bray_curtis~design$phase+design$Day2,
permutations = 999)
adonis_day2$aov.tab
|
#!/usr/bin/env Rscript
# id2phone.R
#
#
# Created by Eleanor Chodroff on 3/24/15.
#
# This script converts time marks and phone IDs
# module purge
# moduke load R
# Specify paths to phones.txt and merged_alignments.txt (should be contained in the alignment directory)
#phones <- read.table("C:\Users\QuyThao\Documents\Prosody analysis\Tests_ERJ_TIMIT\ERJ\Alignment\tri2_ali_s8000\phones.txt", quote="\"")
phones <- read.table("C:\Users\QuyThao\Documents\Prosody analysis\Tests_ERJ_TIMIT\ERJ\Alignment\tri1_align_words\phones.txt", quote="\"")
# segments <- read.table("/Users/Eleanor/mycorpus/recipefiles/segments.txt", quote="\"")
#ctm <- read.table("C:\Users\QuyThao\Documents\Prosody analysis\Tests_ERJ_TIMIT\ERJ\Alignment\tri2_ali_s8000\merged_alignments.txt", quote="\"")
ctm <- read.table("C:\Users\QuyThao\Documents\Prosody analysis\Tests_ERJ_TIMIT\ERJ\Alignment\tri1_align_words\merged_alignments.txt", quote="\"")
names(ctm) <- c("file_utt","utt","start","dur","id")
ctm$file <- gsub("_[0-9]*$","",ctm$file_utt)
names(phones) <- c("phone","id")
# names(segments) <- c("file_utt","file","start_utt","end_utt")
ctm2 <- merge(ctm, phones, by="id")
# ctm3 <- merge(ctm2, segments, by=c("file_utt","file"))
# ctm3$start_real <- ctm3$start + ctm3$start_utt
# ctm3$end_real <- ctm3$start_utt + ctm3$dur
# write.table(ctm3, "Users/Eleanor/mycorpus/recipefiles/final_ali.txt", row.names=F, quote=F, sep="\t")
write.table(ctm2, "C:\Users\QuyThao\Documents\Prosody analysis\Tests_ERJ_TIMIT\ERJ\Alignment\tri2_ali_s8000\final_ali.txt", row.names=F, quote=F, sep="\t")
|
/RTools/id2phone.R
|
no_license
|
Kithao/AM_Tools
|
R
| false | false | 1,577 |
r
|
#!/usr/bin/env Rscript
# id2phone.R
#
#
# Created by Eleanor Chodroff on 3/24/15.
#
# This script converts time marks and phone IDs
# module purge
# moduke load R
# Specify paths to phones.txt and merged_alignments.txt (should be contained in the alignment directory)
#phones <- read.table("C:\Users\QuyThao\Documents\Prosody analysis\Tests_ERJ_TIMIT\ERJ\Alignment\tri2_ali_s8000\phones.txt", quote="\"")
phones <- read.table("C:\Users\QuyThao\Documents\Prosody analysis\Tests_ERJ_TIMIT\ERJ\Alignment\tri1_align_words\phones.txt", quote="\"")
# segments <- read.table("/Users/Eleanor/mycorpus/recipefiles/segments.txt", quote="\"")
#ctm <- read.table("C:\Users\QuyThao\Documents\Prosody analysis\Tests_ERJ_TIMIT\ERJ\Alignment\tri2_ali_s8000\merged_alignments.txt", quote="\"")
ctm <- read.table("C:\Users\QuyThao\Documents\Prosody analysis\Tests_ERJ_TIMIT\ERJ\Alignment\tri1_align_words\merged_alignments.txt", quote="\"")
names(ctm) <- c("file_utt","utt","start","dur","id")
ctm$file <- gsub("_[0-9]*$","",ctm$file_utt)
names(phones) <- c("phone","id")
# names(segments) <- c("file_utt","file","start_utt","end_utt")
ctm2 <- merge(ctm, phones, by="id")
# ctm3 <- merge(ctm2, segments, by=c("file_utt","file"))
# ctm3$start_real <- ctm3$start + ctm3$start_utt
# ctm3$end_real <- ctm3$start_utt + ctm3$dur
# write.table(ctm3, "Users/Eleanor/mycorpus/recipefiles/final_ali.txt", row.names=F, quote=F, sep="\t")
write.table(ctm2, "C:\Users\QuyThao\Documents\Prosody analysis\Tests_ERJ_TIMIT\ERJ\Alignment\tri2_ali_s8000\final_ali.txt", row.names=F, quote=F, sep="\t")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/CorrelationTest.R
\name{findCorLags}
\alias{findCorLags}
\title{Find Most Correlated Lags}
\usage{
findCorLags(x, y, mlag = 60)
}
\arguments{
\item{x}{main series}
\item{y}{secondary series which will be applied to get lags}
\item{mlag}{maximum lags to test}
}
\value{
A \code{list} of all lags and their correlations
}
\description{
Search the number of lags with highest correlations between x and y
}
|
/man/findCorLags.Rd
|
no_license
|
ivanliu1989/AutoPairTrading
|
R
| false | true | 485 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/CorrelationTest.R
\name{findCorLags}
\alias{findCorLags}
\title{Find Most Correlated Lags}
\usage{
findCorLags(x, y, mlag = 60)
}
\arguments{
\item{x}{main series}
\item{y}{secondary series which will be applied to get lags}
\item{mlag}{maximum lags to test}
}
\value{
A \code{list} of all lags and their correlations
}
\description{
Search the number of lags with highest correlations between x and y
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/biomod2_classes_4.R
\name{predict.bm}
\alias{predict.bm}
\alias{predict.biomod2_model}
\alias{predict,biomod2_model-method}
\title{Functions to get predictions from \code{\link{biomod2_model}} objects}
\usage{
\S4method{predict}{biomod2_model}(object, newdata, ...)
}
\arguments{
\item{object}{a \code{\link{biomod2_model}} object}
\item{newdata}{a \code{data.frame} or
\code{\link[terra:SpatRaster]{SpatRaster}} object
containing data for new predictions}
\item{\ldots}{(\emph{optional})}
}
\description{
This function allows the user to predict single models from
\code{\link{biomod2_model}} on (new) explanatory variables.
}
\seealso{
\code{\link{biomod2_model}}
Other Toolbox functions:
\code{\link{getters.bm}},
\code{\link{getters.out}},
\code{\link{load_stored_object}()},
\code{\link{predict.em}},
\code{\link{predict2.bm}},
\code{\link{predict2.em}}
}
\author{
Damien Georges
}
\concept{Toolbox functions}
|
/man/predict.bm.Rd
|
no_license
|
biomodhub/biomod2
|
R
| false | true | 999 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/biomod2_classes_4.R
\name{predict.bm}
\alias{predict.bm}
\alias{predict.biomod2_model}
\alias{predict,biomod2_model-method}
\title{Functions to get predictions from \code{\link{biomod2_model}} objects}
\usage{
\S4method{predict}{biomod2_model}(object, newdata, ...)
}
\arguments{
\item{object}{a \code{\link{biomod2_model}} object}
\item{newdata}{a \code{data.frame} or
\code{\link[terra:SpatRaster]{SpatRaster}} object
containing data for new predictions}
\item{\ldots}{(\emph{optional})}
}
\description{
This function allows the user to predict single models from
\code{\link{biomod2_model}} on (new) explanatory variables.
}
\seealso{
\code{\link{biomod2_model}}
Other Toolbox functions:
\code{\link{getters.bm}},
\code{\link{getters.out}},
\code{\link{load_stored_object}()},
\code{\link{predict.em}},
\code{\link{predict2.bm}},
\code{\link{predict2.em}}
}
\author{
Damien Georges
}
\concept{Toolbox functions}
|
#' Access files in the current app
#'
#' NOTE: If you manually change your package name in the DESCRIPTION,
#' don't forget to change it here too, and in the config file.
#' For a safer name change mechanism, use the `golem::set_golem_name()` function.
#'
#' @param ... character vectors, specifying subdirectory and file(s)
#' within your package. The default, none, returns the root of the app.
#'
#' @noRd
app_sys <- function(...){
system.file(..., package = "rs")
}
#' Read App Config
#'
#' @param value Value to retrieve from the config file.
#' @param config GOLEM_CONFIG_ACTIVE value. If unset, R_CONFIG_ACTIVE.
#' If unset, "default".
#' @param use_parent Logical, scan the parent directory for config file.
#'
#' @noRd
get_golem_config <- function(
value,
config = Sys.getenv(
"GOLEM_CONFIG_ACTIVE",
Sys.getenv(
"R_CONFIG_ACTIVE",
"default"
)
),
use_parent = TRUE
){
config::get(
value = value,
config = config,
# Modify this if your config file is somewhere else:
file = app_sys("golem-config.yml"),
use_parent = use_parent
)
}
|
/R/app_config.R
|
permissive
|
mtaylor-semo/rs
|
R
| false | false | 1,117 |
r
|
#' Access files in the current app
#'
#' NOTE: If you manually change your package name in the DESCRIPTION,
#' don't forget to change it here too, and in the config file.
#' For a safer name change mechanism, use the `golem::set_golem_name()` function.
#'
#' @param ... character vectors, specifying subdirectory and file(s)
#' within your package. The default, none, returns the root of the app.
#'
#' @noRd
app_sys <- function(...){
system.file(..., package = "rs")
}
#' Read App Config
#'
#' @param value Value to retrieve from the config file.
#' @param config GOLEM_CONFIG_ACTIVE value. If unset, R_CONFIG_ACTIVE.
#' If unset, "default".
#' @param use_parent Logical, scan the parent directory for config file.
#'
#' @noRd
get_golem_config <- function(
value,
config = Sys.getenv(
"GOLEM_CONFIG_ACTIVE",
Sys.getenv(
"R_CONFIG_ACTIVE",
"default"
)
),
use_parent = TRUE
){
config::get(
value = value,
config = config,
# Modify this if your config file is somewhere else:
file = app_sys("golem-config.yml"),
use_parent = use_parent
)
}
|
/man/chisq.bintest.Rd
|
no_license
|
danaesc/RVAideMemoire
|
R
| false | false | 2,684 |
rd
| ||
# input is the dataset but uses parameters to subset
# orgnize data frame so that there are separate vectors of hourly data in the subset
# use a function that covers the whole hour so that we can look at "the 9 o'clock hour"
# do a loop that runs each of the vectors through 3 t.tests (drawing 2 outputs from the first)
# the 95% confidence, 75% confidence, and 50% confidence, and the estimate
# use 2 years worth of data?
# use geom_ribbon like in ab.test to plot it
staffing.day.plot <- function(df, weekday.vec, start.date, end.date, nickname.of.time.range){
library(lubridate)
library(dplyr)
library(ggplot2)
df$Date.Sold <- as_datetime(df$Date.Sold) #, ymd_hms)
df <- df %>%
mutate("Date.Sold.Round" = floor_date(df$Date.Sold, unit = "hour")) %>%
mutate("Hour" = hour(Date.Sold.Round)) %>%
mutate("Weekday" = wday((Date.Sold.Round))) %>%
filter(Year %in% c(2016, 2017)) %>%
filter(Date.Sold >= start.date, Date.Sold <= end.date) %>%
filter(Weekday %in% weekday.vec)
df.agg <- aggregate(Total.Sales ~ Date.Sold.Round + Hour + Weekday, df, sum)
conf.df <- data.frame()
for(i in 9:23){
print(i)
i.hour.vec <- df.agg %>%
filter(Hour == i) %>%
select(Total.Sales)
i.estimate.95 <- t.test(i.hour.vec, conf.level = 0.95)$estimate
i.conf.int.95 <- t.test(i.hour.vec, conf.level = 0.95)$conf.int
i.conf.int.75 <- t.test(i.hour.vec, conf.level = 0.75)$conf.int
i.conf.int.50 <- t.test(i.hour.vec, conf.level = 0.50)$conf.int
three.conf.int <- c(i, i.estimate.95, i.conf.int.95, i.conf.int.75, i.conf.int.50)
conf.df <- rbind(conf.df, three.conf.int)
}
colnames(conf.df) <- c("hour", "estimate", "low95", "high95", "low75", "high75", "low50", "high50")
day.plot <- ggplot(data = conf.df, aes(x=hour)) +
geom_ribbon(aes(ymin=low95, ymax=high95), fill="#0571b0", alpha = .75)+
geom_ribbon(aes(ymin=low75, ymax=high75), fill="#92c5de", alpha = .75)+
geom_ribbon(aes(ymin=low50, ymax=high50), fill="#d1e5f0", alpha = .75)+
geom_ribbon(aes(ymin=estimate-5, ymax=estimate+5), fill="#ef8a62")
print(day.plot)
}
# end of function ####
|
/staffing.apr18/staffing.day.plot.R
|
no_license
|
hamnsannah/wt_gh
|
R
| false | false | 2,186 |
r
|
# input is the dataset but uses parameters to subset
# orgnize data frame so that there are separate vectors of hourly data in the subset
# use a function that covers the whole hour so that we can look at "the 9 o'clock hour"
# do a loop that runs each of the vectors through 3 t.tests (drawing 2 outputs from the first)
# the 95% confidence, 75% confidence, and 50% confidence, and the estimate
# use 2 years worth of data?
# use geom_ribbon like in ab.test to plot it
staffing.day.plot <- function(df, weekday.vec, start.date, end.date, nickname.of.time.range){
library(lubridate)
library(dplyr)
library(ggplot2)
df$Date.Sold <- as_datetime(df$Date.Sold) #, ymd_hms)
df <- df %>%
mutate("Date.Sold.Round" = floor_date(df$Date.Sold, unit = "hour")) %>%
mutate("Hour" = hour(Date.Sold.Round)) %>%
mutate("Weekday" = wday((Date.Sold.Round))) %>%
filter(Year %in% c(2016, 2017)) %>%
filter(Date.Sold >= start.date, Date.Sold <= end.date) %>%
filter(Weekday %in% weekday.vec)
df.agg <- aggregate(Total.Sales ~ Date.Sold.Round + Hour + Weekday, df, sum)
conf.df <- data.frame()
for(i in 9:23){
print(i)
i.hour.vec <- df.agg %>%
filter(Hour == i) %>%
select(Total.Sales)
i.estimate.95 <- t.test(i.hour.vec, conf.level = 0.95)$estimate
i.conf.int.95 <- t.test(i.hour.vec, conf.level = 0.95)$conf.int
i.conf.int.75 <- t.test(i.hour.vec, conf.level = 0.75)$conf.int
i.conf.int.50 <- t.test(i.hour.vec, conf.level = 0.50)$conf.int
three.conf.int <- c(i, i.estimate.95, i.conf.int.95, i.conf.int.75, i.conf.int.50)
conf.df <- rbind(conf.df, three.conf.int)
}
colnames(conf.df) <- c("hour", "estimate", "low95", "high95", "low75", "high75", "low50", "high50")
day.plot <- ggplot(data = conf.df, aes(x=hour)) +
geom_ribbon(aes(ymin=low95, ymax=high95), fill="#0571b0", alpha = .75)+
geom_ribbon(aes(ymin=low75, ymax=high75), fill="#92c5de", alpha = .75)+
geom_ribbon(aes(ymin=low50, ymax=high50), fill="#d1e5f0", alpha = .75)+
geom_ribbon(aes(ymin=estimate-5, ymax=estimate+5), fill="#ef8a62")
print(day.plot)
}
# end of function ####
|
#' @title mcmcsurv
#'
#' @description \code{mcmcsurv} estimates a Bayesian Exponential or Weibull survival model via Markov Chain Monte Carlo (MCMC). Slice samplig is employed to draw the posterior sample of the model's survival stage parameters.
#' @param Y response variable.
#' @param Y0 the elapsed time since inception until the beginning of time period (t-1).
#' @param C censoring indicator.
#' @param X covariates for betas.
#' @param N number of MCMC iterations.
#' @param burn burn-in to be discarded.
#' @param thin thinning to prevent from autocorrelation.
#' @param w size of the slice in the slice sampling for (betas, gammas, rho).
#' @param m limit on steps in the slice sampling.
#' @param form type of parametric model (Exponential or Weibull).
#'
#' @return chain of the variables of interest.
#'
#' @examples
#' set.seed(95)
#' bgl <- Buhaugetal_2009_JCR
#' bgl <- subset(bgl, coupx == 0)
#' bgl <- na.omit(bgl)
#' Y <- bgl$Y
#' X <- as.matrix(cbind(1, bgl[,1:7]))
#' C <- bgl$C
#' Y0 <- bgl$Y0
#' model2 <- mcmcsurv(Y = Y, Y0 = Y0, C = C, X = X,
#' N = 50,
#' burn = 20,
#' thin = 15,
#' w = c(0.5, 0.5, 0.5),
#' m = 5,
#' form = "Weibull")
#'
#' summary(model2, parameter = "betas")
#'
#' @export
mcmcsurv <- function(Y, Y0,C, X, N, burn, thin, w = c(1, 1, 1), m = 10, form) {
p1 = dim(X)[2]
# initial values
betas = rep(0, p1)
rho = 1
W = rep(0, length(Y))
delta = rep(0, length(Y))
Sigma.b = 10 *p1 * diag(p1)
betas.samp = matrix(NA, nrow = (N - burn) / thin, ncol = p1)
rho.samp = rep(NA, (N - burn) / thin)
for (iter in 1:N) {
if (iter %% 5000 == 0) print(iter)
if (iter > burn) {
Sigma.b = riwish(1 + p1, betas %*% t(betas) + p1* diag(p1))
}
betas = betas.slice.sampling(Sigma.b, Y, Y0,X, W, betas, delta, C, rho, w[1], m, form = form)
eXB = exp(X %*% betas+ W)
if (form %in% "Weibull") {
rho = rho.slice.sampling(Y,Y0, eXB, delta, C, rho, w[3], m)
}
if (iter > burn & (iter - burn) %% thin == 0) {
betas.samp[(iter - burn) / thin, ] = betas
rho.samp[(iter - burn) / thin] = rho
}
}
colnames(betas.samp) <- paste0("X", colnames(X))#---------------------------
out <- list(betas = betas.samp, rho = rho.samp, Y=Y, Y0=Y0, X=X, N=N, C=C, iterations = N, burn_in = burn,
thinning = thin, betan = nrow(betas), distribution = form)
class(out) <- "mcmcsurv"
return(out)
}
#' @title summary.mcmcsurv
#' @description Returns a summary of a mfsurv object via \code{\link[coda]{summary.mcmc}}.
#' @param object an object of class \code{mfsurv}, the output of \code{\link{mfsurv}}.
#' @param parameter one of three parameters of the mfsurv output. Indicate either "betas" or "rho".
#' @param ... additional parameter
#' @return list. Empirical mean, standard deviation and quantiles for each variable.
#' @rdname mcmcsurv
#' @export
summary.mcmcsurv <- function(object, parameter = c("betas", "rho"), ...){
if (parameter == "betas"){
sum <- summary(mcmc(object$betas), ...)
return(sum)
}
if (parameter == "rho"){
sum <- summary(mcmc(object$lambda), ...)
return(sum)
}
}
|
/R/mcmcSurv.R
|
permissive
|
gonzalezrostani/BayesMFSurv
|
R
| false | false | 3,360 |
r
|
#' @title mcmcsurv
#'
#' @description \code{mcmcsurv} estimates a Bayesian Exponential or Weibull survival model via Markov Chain Monte Carlo (MCMC). Slice samplig is employed to draw the posterior sample of the model's survival stage parameters.
#' @param Y response variable.
#' @param Y0 the elapsed time since inception until the beginning of time period (t-1).
#' @param C censoring indicator.
#' @param X covariates for betas.
#' @param N number of MCMC iterations.
#' @param burn burn-in to be discarded.
#' @param thin thinning to prevent from autocorrelation.
#' @param w size of the slice in the slice sampling for (betas, gammas, rho).
#' @param m limit on steps in the slice sampling.
#' @param form type of parametric model (Exponential or Weibull).
#'
#' @return chain of the variables of interest.
#'
#' @examples
#' set.seed(95)
#' bgl <- Buhaugetal_2009_JCR
#' bgl <- subset(bgl, coupx == 0)
#' bgl <- na.omit(bgl)
#' Y <- bgl$Y
#' X <- as.matrix(cbind(1, bgl[,1:7]))
#' C <- bgl$C
#' Y0 <- bgl$Y0
#' model2 <- mcmcsurv(Y = Y, Y0 = Y0, C = C, X = X,
#' N = 50,
#' burn = 20,
#' thin = 15,
#' w = c(0.5, 0.5, 0.5),
#' m = 5,
#' form = "Weibull")
#'
#' summary(model2, parameter = "betas")
#'
#' @export
mcmcsurv <- function(Y, Y0,C, X, N, burn, thin, w = c(1, 1, 1), m = 10, form) {
p1 = dim(X)[2]
# initial values
betas = rep(0, p1)
rho = 1
W = rep(0, length(Y))
delta = rep(0, length(Y))
Sigma.b = 10 *p1 * diag(p1)
betas.samp = matrix(NA, nrow = (N - burn) / thin, ncol = p1)
rho.samp = rep(NA, (N - burn) / thin)
for (iter in 1:N) {
if (iter %% 5000 == 0) print(iter)
if (iter > burn) {
Sigma.b = riwish(1 + p1, betas %*% t(betas) + p1* diag(p1))
}
betas = betas.slice.sampling(Sigma.b, Y, Y0,X, W, betas, delta, C, rho, w[1], m, form = form)
eXB = exp(X %*% betas+ W)
if (form %in% "Weibull") {
rho = rho.slice.sampling(Y,Y0, eXB, delta, C, rho, w[3], m)
}
if (iter > burn & (iter - burn) %% thin == 0) {
betas.samp[(iter - burn) / thin, ] = betas
rho.samp[(iter - burn) / thin] = rho
}
}
colnames(betas.samp) <- paste0("X", colnames(X))#---------------------------
out <- list(betas = betas.samp, rho = rho.samp, Y=Y, Y0=Y0, X=X, N=N, C=C, iterations = N, burn_in = burn,
thinning = thin, betan = nrow(betas), distribution = form)
class(out) <- "mcmcsurv"
return(out)
}
#' @title summary.mcmcsurv
#' @description Returns a summary of a mfsurv object via \code{\link[coda]{summary.mcmc}}.
#' @param object an object of class \code{mfsurv}, the output of \code{\link{mfsurv}}.
#' @param parameter one of three parameters of the mfsurv output. Indicate either "betas" or "rho".
#' @param ... additional parameter
#' @return list. Empirical mean, standard deviation and quantiles for each variable.
#' @rdname mcmcsurv
#' @export
summary.mcmcsurv <- function(object, parameter = c("betas", "rho"), ...){
if (parameter == "betas"){
sum <- summary(mcmc(object$betas), ...)
return(sum)
}
if (parameter == "rho"){
sum <- summary(mcmc(object$lambda), ...)
return(sum)
}
}
|
head(iris)
summary(iris)
# Some parameters related to titles, labels, limits...
plot(x = iris$Sepal.Length, y = iris$Sepal.Width,
main = "Sepal Width vs. Sepal Length", # Title
sub = "Iris", # Sub title
xlim = range(iris$Sepal.Length), # limits of x-axis
ylim = c(1, 10), # Limits of y-axis
xlab = "Sepal Length", # Label of x-axis
ylab = "Sepal Width", # Label of y-axis
log = "xy" # Axis to be set on log scale
)
# x and y are arguments - these two are sufficient for a plot
# In this plot, I specified some most basic graphical parameters.
# Some parameters related to text and plot size.
plot(iris$Sepal.Length,
type = "p", # Type of plot, default is points.
pch = "µ", # plotting symbols, 1~25 plus anything you want.
cex = 2, # Plotting text and symbol size
cex.axis = 2, # Axis annotation size
main = "Sepal Length",
cex.main = 2, # Title text size
cex.lab = 1, # Axis label size
lwd = 1, # Line width
lty = 4
)
table(iris$Species)
# My second pie chart......
pie(x = table(iris$Species),
# col = 1:3,
clockwise = T,
main = "Species of Iris",
)
# Some parameters related to colors
par(bg = "lightyellow") # par() sets graphical parameters before plots.
hist(iris$Sepal.Length,
freq = F, # count or proportion
# breaks = 15,
breaks = seq(from = 4,to = 8,by = 0.5),
xlim = range(iris$Sepal.Length),
main = "Histogram of Sepal Length",
sub = "Iris",
xlab = "Sepal Length",
col.main = "blue",
col.axis = 2,
col.lab = "#009933",
col.sub = 4, # multiple ways to specify color.
col = "darkgreen",
border = "blue", # Color of border of the bars
density = 2, # density of shading lines
angle = 15 # angle of shading lines, in degrees.
)
# border, density and angle are parameters specific to hist().
# Mostly showed parameters related to colors.
# Sorry I am really really really bad with colors.
table(iris$Species)
barplot(table(iris$Species))
par(mar = c(8, 4, 4, 2) + 0.1)
# Set the margins around the plotting area
plot(table(iris$Species), type = "h", las = 2)
# las controls the orientation of axis annotations.
boxplot(iris$Sepal.Length, iris$Sepal.Width, iris$Petal.Length, iris$Petal.Width)
fx1 <- function(x){x^2-10}
fx2 <- function(x){x^3}
curve(fx1,
xlim = c(-10, 10), ylim = c(-10, 10),
col = 2, lty = 1)
curve(fx2, add = TRUE, # add is an parameter in curve()
col = 3, lty = 2) # TRUE -> plot on the existing plot
x <- seq(from = -10, to = 2, by = 0.25)
y1 <- exp(x[1:24])
y2 <- exp(x[25:49])
points(x=x[1:24], y=y1, pch = ">") # Add these points to the existing plot
lines(x=x[25:49], y=y2)
# Add the smooth line containing these points to the existing plot
# lines(x=x, y=y)
abline(h = 5, lty = 3) # h -> horizontal line at y = 5
abline(v = -8, lty = "dotdash") # v => vertical line at x = -8
abline(a = 0, b = 1/2) # y = a + bx
# legend
legend("topright", # Can also be "top", "bottomright", ...
c(expression(paste(x^2-10)), expression(paste(x^3))),
col = c(2,3), # Usually corresponds to the plot
lty = c(1,2),
text.col = c(2,3))
xx <- seq(from = -10, to = 10, by = 0.1)
yy <- dnorm(xx, mean = 0, sd = 2)
# dnorm() gives the normal distribution density
plot(x = xx, y = yy, type = "l", main = "PDF of Normal(0, 2)",
axes = F, # Suppress axes
xlab = "x", ylab = "Density"
)
# axis() allows us to customize axes.
axis(1, at = seq(from = -10, to = 10, by = 4))
axis(2, at = seq(from = 0, to = 1, by = 0.02))
grid() # add grid lines.
# Explore the parameters allowed in grid()
par(mfrow = c(2,2), mar = c(3, 2, 1, 1) + 0.1) # 2 x 2 = 4 plots on the same page, mar allows us to change margin
plot(lm(Sepal.Length~Petal.Length, data = iris))
# lm() for linear regression - EPIB 621 material
# Plot your linear regression object will give 4 diagnostic plots.
matrix(c(1,1,2,3), 2, 2, byrow = TRUE)
nf <- layout(matrix(c(1,1,2,3), 2, 2, byrow = TRUE))
par(mar = c(3, 2, 1, 1) + 0.1)
# layout.show(nf) # Shows the partition of the plotting area
plot(x=x, y=c(y1, y2), type = "l")
plot(x=xx, y=yy, type = "l")
boxplot(iris$Sepal.Length, iris$Petal.Length)
pdf(file = "Normal_Density.pdf")
plot(x = xx, y = yy, type = "l", main = "PDF of Normal(0, 2)",
axes = F, xlab = "x", ylab = "Density")
axis(1, at = seq(from = -10, to = 10, by = 2))
axis(2, at = seq(from = 0, to = 1, by = 0.02))
grid()
dev.off()
# mu <- 0:4
# sigma <- 1:5
bg.color <- c("grey90", "grey80", "grey70", "grey60", "grey50") # Colors of your choice
x <- seq(from = -15, to = 20, by = 0.1)
for (i in 1:5) {
y <- dnorm(x, mean = i-1, sd = i)
pdf(file = paste("Normal_Density_", i, ".pdf", sep = ""))
par(bg = bg.color[i])
plot(x = x, y = y, type = "l", lty = i, xlim = c(-15, 20),
main = paste("PDF of Normal(", i-1, ",", i, ")", sep = ""))
abline(v = i-1, col = "maroon4")
grid()
dev.off()
}
|
/Lecture6.r
|
no_license
|
thomasmcgill/EPIB613_2019
|
R
| false | false | 5,249 |
r
|
head(iris)
summary(iris)
# Some parameters related to titles, labels, limits...
plot(x = iris$Sepal.Length, y = iris$Sepal.Width,
main = "Sepal Width vs. Sepal Length", # Title
sub = "Iris", # Sub title
xlim = range(iris$Sepal.Length), # limits of x-axis
ylim = c(1, 10), # Limits of y-axis
xlab = "Sepal Length", # Label of x-axis
ylab = "Sepal Width", # Label of y-axis
log = "xy" # Axis to be set on log scale
)
# x and y are arguments - these two are sufficient for a plot
# In this plot, I specified some most basic graphical parameters.
# Some parameters related to text and plot size.
plot(iris$Sepal.Length,
type = "p", # Type of plot, default is points.
pch = "µ", # plotting symbols, 1~25 plus anything you want.
cex = 2, # Plotting text and symbol size
cex.axis = 2, # Axis annotation size
main = "Sepal Length",
cex.main = 2, # Title text size
cex.lab = 1, # Axis label size
lwd = 1, # Line width
lty = 4
)
table(iris$Species)
# My second pie chart......
pie(x = table(iris$Species),
# col = 1:3,
clockwise = T,
main = "Species of Iris",
)
# Some parameters related to colors
par(bg = "lightyellow") # par() sets graphical parameters before plots.
hist(iris$Sepal.Length,
freq = F, # count or proportion
# breaks = 15,
breaks = seq(from = 4,to = 8,by = 0.5),
xlim = range(iris$Sepal.Length),
main = "Histogram of Sepal Length",
sub = "Iris",
xlab = "Sepal Length",
col.main = "blue",
col.axis = 2,
col.lab = "#009933",
col.sub = 4, # multiple ways to specify color.
col = "darkgreen",
border = "blue", # Color of border of the bars
density = 2, # density of shading lines
angle = 15 # angle of shading lines, in degrees.
)
# border, density and angle are parameters specific to hist().
# Mostly showed parameters related to colors.
# Sorry I am really really really bad with colors.
table(iris$Species)
barplot(table(iris$Species))
par(mar = c(8, 4, 4, 2) + 0.1)
# Set the margins around the plotting area
plot(table(iris$Species), type = "h", las = 2)
# las controls the orientation of axis annotations.
boxplot(iris$Sepal.Length, iris$Sepal.Width, iris$Petal.Length, iris$Petal.Width)
fx1 <- function(x){x^2-10}
fx2 <- function(x){x^3}
curve(fx1,
xlim = c(-10, 10), ylim = c(-10, 10),
col = 2, lty = 1)
curve(fx2, add = TRUE, # add is an parameter in curve()
col = 3, lty = 2) # TRUE -> plot on the existing plot
x <- seq(from = -10, to = 2, by = 0.25)
y1 <- exp(x[1:24])
y2 <- exp(x[25:49])
points(x=x[1:24], y=y1, pch = ">") # Add these points to the existing plot
lines(x=x[25:49], y=y2)
# Add the smooth line containing these points to the existing plot
# lines(x=x, y=y)
abline(h = 5, lty = 3) # h -> horizontal line at y = 5
abline(v = -8, lty = "dotdash") # v => vertical line at x = -8
abline(a = 0, b = 1/2) # y = a + bx
# legend
legend("topright", # Can also be "top", "bottomright", ...
c(expression(paste(x^2-10)), expression(paste(x^3))),
col = c(2,3), # Usually corresponds to the plot
lty = c(1,2),
text.col = c(2,3))
xx <- seq(from = -10, to = 10, by = 0.1)
yy <- dnorm(xx, mean = 0, sd = 2)
# dnorm() gives the normal distribution density
plot(x = xx, y = yy, type = "l", main = "PDF of Normal(0, 2)",
axes = F, # Suppress axes
xlab = "x", ylab = "Density"
)
# axis() allows us to customize axes.
axis(1, at = seq(from = -10, to = 10, by = 4))
axis(2, at = seq(from = 0, to = 1, by = 0.02))
grid() # add grid lines.
# Explore the parameters allowed in grid()
par(mfrow = c(2,2), mar = c(3, 2, 1, 1) + 0.1) # 2 x 2 = 4 plots on the same page, mar allows us to change margin
plot(lm(Sepal.Length~Petal.Length, data = iris))
# lm() for linear regression - EPIB 621 material
# Plot your linear regression object will give 4 diagnostic plots.
matrix(c(1,1,2,3), 2, 2, byrow = TRUE)
nf <- layout(matrix(c(1,1,2,3), 2, 2, byrow = TRUE))
par(mar = c(3, 2, 1, 1) + 0.1)
# layout.show(nf) # Shows the partition of the plotting area
plot(x=x, y=c(y1, y2), type = "l")
plot(x=xx, y=yy, type = "l")
boxplot(iris$Sepal.Length, iris$Petal.Length)
pdf(file = "Normal_Density.pdf")
plot(x = xx, y = yy, type = "l", main = "PDF of Normal(0, 2)",
axes = F, xlab = "x", ylab = "Density")
axis(1, at = seq(from = -10, to = 10, by = 2))
axis(2, at = seq(from = 0, to = 1, by = 0.02))
grid()
dev.off()
# mu <- 0:4
# sigma <- 1:5
bg.color <- c("grey90", "grey80", "grey70", "grey60", "grey50") # Colors of your choice
x <- seq(from = -15, to = 20, by = 0.1)
for (i in 1:5) {
y <- dnorm(x, mean = i-1, sd = i)
pdf(file = paste("Normal_Density_", i, ".pdf", sep = ""))
par(bg = bg.color[i])
plot(x = x, y = y, type = "l", lty = i, xlim = c(-15, 20),
main = paste("PDF of Normal(", i-1, ",", i, ")", sep = ""))
abline(v = i-1, col = "maroon4")
grid()
dev.off()
}
|
context("Variable types")
with_mock_HTTP({
ds <- loadDataset("test ds")
test_that("Variable type method", {
expect_identical(type(ds[["birthyr"]]), "numeric")
expect_identical(type(ds$gender), "categorical")
})
test_that("Changing numeric type by <- makes requests", {
expect_POST(type(ds$birthyr) <- "categorical",
'https://app.crunch.io/api/datasets/1/variables/birthyr/cast/',
'{"cast_as":"categorical"}')
expect_POST(type(ds$birthyr) <- "text",
'https://app.crunch.io/api/datasets/1/variables/birthyr/cast/',
'{"cast_as":"text"}')
})
test_that("Setting the same type is a no-op", {
expect_no_request(type(ds$birthyr) <- "numeric")
})
test_that("Attempting to set an unsupported type fails", {
for (i in c("multiple_response", "categorical_array", "datetime", "foo")) {
expect_error(type(ds$birthyr) <- i,
"is not a Crunch variable type that can be assigned",
info=i)
}
})
test_that("Changing multiple_response type by <- fails", {
for (i in c("categorical", "text", "numeric", "categorical_array", "datetime", "foo")) {
expect_error(type(ds$mymrset) <- i,
"Cannot change the type of a MultipleResponseVariable by type<-",
info=i)
}
})
})
with_test_authentication({
test_that("Type changing alters data on the server", {
ds <- newDataset(df[,1,drop=FALSE])
testvar <- ds$v1
expect_true(is.Numeric(testvar))
type(testvar) <- "text"
expect_true(is.Text(testvar))
ds <- refresh(ds)
expect_true(is.Text(ds$v1))
type(ds$v1) <- "numeric"
expect_true(is.Numeric(ds$v1))
})
})
|
/tests/testthat/test-variable-type.R
|
no_license
|
malecki/rcrunch
|
R
| false | false | 1,819 |
r
|
context("Variable types")
with_mock_HTTP({
ds <- loadDataset("test ds")
test_that("Variable type method", {
expect_identical(type(ds[["birthyr"]]), "numeric")
expect_identical(type(ds$gender), "categorical")
})
test_that("Changing numeric type by <- makes requests", {
expect_POST(type(ds$birthyr) <- "categorical",
'https://app.crunch.io/api/datasets/1/variables/birthyr/cast/',
'{"cast_as":"categorical"}')
expect_POST(type(ds$birthyr) <- "text",
'https://app.crunch.io/api/datasets/1/variables/birthyr/cast/',
'{"cast_as":"text"}')
})
test_that("Setting the same type is a no-op", {
expect_no_request(type(ds$birthyr) <- "numeric")
})
test_that("Attempting to set an unsupported type fails", {
for (i in c("multiple_response", "categorical_array", "datetime", "foo")) {
expect_error(type(ds$birthyr) <- i,
"is not a Crunch variable type that can be assigned",
info=i)
}
})
test_that("Changing multiple_response type by <- fails", {
for (i in c("categorical", "text", "numeric", "categorical_array", "datetime", "foo")) {
expect_error(type(ds$mymrset) <- i,
"Cannot change the type of a MultipleResponseVariable by type<-",
info=i)
}
})
})
with_test_authentication({
test_that("Type changing alters data on the server", {
ds <- newDataset(df[,1,drop=FALSE])
testvar <- ds$v1
expect_true(is.Numeric(testvar))
type(testvar) <- "text"
expect_true(is.Text(testvar))
ds <- refresh(ds)
expect_true(is.Text(ds$v1))
type(ds$v1) <- "numeric"
expect_true(is.Numeric(ds$v1))
})
})
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/normalize.R
\name{prop_table}
\alias{prop_table}
\title{Convert OTU table to a proportion table}
\usage{
prop_table(x, Margin = 1)
}
\description{
This function takes an OTU counts table as input returns a proportion table.
The default input is samples as rows (Margin=1), but samples can be set to
columns by setting Margin to 2.
}
\examples{
## example otu table with samples as rows
otu_tab1 <- matrix(
c(2, 4, 3, 1, 5, 7),
nrow=2, ncol=3)
rownames(otu_tab1) <- c("sample1","sample2")
colnames(otu_tab1) <- c("otu1","otu2","otu3")
prop_table(otu_tab1)
}
|
/man/prop_table.Rd
|
no_license
|
cjschulz/micromixR
|
R
| false | false | 645 |
rd
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/normalize.R
\name{prop_table}
\alias{prop_table}
\title{Convert OTU table to a proportion table}
\usage{
prop_table(x, Margin = 1)
}
\description{
This function takes an OTU counts table as input returns a proportion table.
The default input is samples as rows (Margin=1), but samples can be set to
columns by setting Margin to 2.
}
\examples{
## example otu table with samples as rows
otu_tab1 <- matrix(
c(2, 4, 3, 1, 5, 7),
nrow=2, ncol=3)
rownames(otu_tab1) <- c("sample1","sample2")
colnames(otu_tab1) <- c("otu1","otu2","otu3")
prop_table(otu_tab1)
}
|
# Exercise 2: using built-in string functions
# Create a variable `lyric` that contains the text "I like to eat apples and
# bananas"
lyric <- "I like to eat apples and bananas"
# Use the `substr()` function to extract the 1st through 13th letters from the
# `lyric`, and store the result in a variable called `intro`
# Use `?substr` to see more about this function
intro <- substr(lyric, 0, 13)
# Use the `substr()` function to extract the 15th through the last letter of the
# `lyric`, and store the result in a variable called `fruits`
# Hint: use `nchar()` to determine how many total letters there are!
fruits <- substr(lyric, 15, nchar(lyric))
# Use the `gsub()` function to substitute all the "a"s in `fruits` with "ee".
# Store the result in a variable called `fruits_e`
# Hint: see http://www.endmemo.com/program/R/sub.php for a simpmle example (or
# use `?gsub`)
fruits_e <- gsub("a", "ee", fruits)
# Use the `gsub()` function to substitute all the "a"s in `fruits` with "o".
# Store the result in a variable called `fruits_o`
fruits_o <- gsub("a", "o", fruits)
# Create a new variable `lyric_e` that is the `intro` combined with the new
# `fruits_e` ending. Print out this variable
lyric_e <- paste(intro, fruits_e)
print(lyric_e)
# Without making a new variable, print out the `intro` combined with the new
# `fruits_o` ending
print(paste(intro, fruits_o))
|
/exercise-2/exercise.R
|
permissive
|
wangw05/ch6-functions
|
R
| false | false | 1,380 |
r
|
# Exercise 2: using built-in string functions
# Create a variable `lyric` that contains the text "I like to eat apples and
# bananas"
lyric <- "I like to eat apples and bananas"
# Use the `substr()` function to extract the 1st through 13th letters from the
# `lyric`, and store the result in a variable called `intro`
# Use `?substr` to see more about this function
intro <- substr(lyric, 0, 13)
# Use the `substr()` function to extract the 15th through the last letter of the
# `lyric`, and store the result in a variable called `fruits`
# Hint: use `nchar()` to determine how many total letters there are!
fruits <- substr(lyric, 15, nchar(lyric))
# Use the `gsub()` function to substitute all the "a"s in `fruits` with "ee".
# Store the result in a variable called `fruits_e`
# Hint: see http://www.endmemo.com/program/R/sub.php for a simpmle example (or
# use `?gsub`)
fruits_e <- gsub("a", "ee", fruits)
# Use the `gsub()` function to substitute all the "a"s in `fruits` with "o".
# Store the result in a variable called `fruits_o`
fruits_o <- gsub("a", "o", fruits)
# Create a new variable `lyric_e` that is the `intro` combined with the new
# `fruits_e` ending. Print out this variable
lyric_e <- paste(intro, fruits_e)
print(lyric_e)
# Without making a new variable, print out the `intro` combined with the new
# `fruits_o` ending
print(paste(intro, fruits_o))
|
#' Create a TAB-delimited text file from AGS or FGS
#'
#' Each line in this file represents one gene/protein from an AGS/FGS and is accompanied with respective AGS/FGS ID. This format can be used e.g. as input at web site EviNet \url{https://www.evinet.org/}
#' @param gs.list a list created with \code{\link{samples2ags}}, \code{\link{mutations2ags}}, \code{\link{as_genes_fgs}}, or \code{\link{import.gs}}.
#' @param File output file name.
#' @seealso \code{\link{samples2ags}}, \code{\link{mutations2ags}}, \code{\link{as_genes_fgs}}, \code{\link{import.gs}}
#' @references \url{http://www.biomedcentral.com/1471-2105/13/226}
#' @references \url{https://www.evinet.org/}
#' @examples
#' data(net.kegg)
#' netpath <- net.kegg
#' net <- import.net(netpath);
#' fgs.genes <- as_genes_fgs(net);
#' save_gs_list(fgs.genes, File = "single_gene_ags.groups.tsv");
#' @importFrom utils write.table
#' @export
save_gs_list <- function(gs.list, File = "gs.list.groups") {
t1 <- NULL;
for (gs in names(gs.list)) {
t1 <- rbind(t1, cbind(gs.list[[gs]], gs));
}
write.table(t1, file=File, append = FALSE, quote = FALSE, sep = "\t", eol = "\n", na = "NA", dec = ".", row.names = FALSE, col.names = FALSE)
}
|
/NEArender/R/save_gs_list.r
|
no_license
|
ingted/R-Examples
|
R
| false | false | 1,212 |
r
|
#' Create a TAB-delimited text file from AGS or FGS
#'
#' Each line in this file represents one gene/protein from an AGS/FGS and is accompanied with respective AGS/FGS ID. This format can be used e.g. as input at web site EviNet \url{https://www.evinet.org/}
#' @param gs.list a list created with \code{\link{samples2ags}}, \code{\link{mutations2ags}}, \code{\link{as_genes_fgs}}, or \code{\link{import.gs}}.
#' @param File output file name.
#' @seealso \code{\link{samples2ags}}, \code{\link{mutations2ags}}, \code{\link{as_genes_fgs}}, \code{\link{import.gs}}
#' @references \url{http://www.biomedcentral.com/1471-2105/13/226}
#' @references \url{https://www.evinet.org/}
#' @examples
#' data(net.kegg)
#' netpath <- net.kegg
#' net <- import.net(netpath);
#' fgs.genes <- as_genes_fgs(net);
#' save_gs_list(fgs.genes, File = "single_gene_ags.groups.tsv");
#' @importFrom utils write.table
#' @export
save_gs_list <- function(gs.list, File = "gs.list.groups") {
t1 <- NULL;
for (gs in names(gs.list)) {
t1 <- rbind(t1, cbind(gs.list[[gs]], gs));
}
write.table(t1, file=File, append = FALSE, quote = FALSE, sep = "\t", eol = "\n", na = "NA", dec = ".", row.names = FALSE, col.names = FALSE)
}
|
args <- commandArgs(trailingOnly=TRUE)
if (length(args)==0) {
stop("At least one argument must be supplied (input file).n", call.=FALSE)
} else if (length(args)==1) {
# default output file
args[2] <- "vcf_converted_to_hapmap.txt"
}
vcf <- read.table(args[1], header=TRUE, stringsAsFactors = FALSE)
rs <- NA
alleles <- paste(vcf$REF, vcf$ALT, sep = "/")
chrom <- vcf$CHROM
pos <- vcf$POS
strand <- "+"
assembly <- NA
center <- NA
protLSID <- NA
assayLSID <- NA
panelLSID <- NA
Qcode <- NA
genotype_df <- vcf[c(-1:-10)]
hapmap <- cbind(rs, alleles, chrom, pos, strand, assembly, center, protLSID, assayLSID, panelLSID, Qcode, genotype_df)
colnames(hapmap)[[1]]<-"rs#"
colnames(hapmap)[[6]]<-"assembly#"
write.table(hapmap, file=args[2], row.names=FALSE, quote=FALSE, sep="\t")
|
/R/vcf2happmap.R
|
no_license
|
guokai8/biotiny
|
R
| false | false | 786 |
r
|
args <- commandArgs(trailingOnly=TRUE)
if (length(args)==0) {
stop("At least one argument must be supplied (input file).n", call.=FALSE)
} else if (length(args)==1) {
# default output file
args[2] <- "vcf_converted_to_hapmap.txt"
}
vcf <- read.table(args[1], header=TRUE, stringsAsFactors = FALSE)
rs <- NA
alleles <- paste(vcf$REF, vcf$ALT, sep = "/")
chrom <- vcf$CHROM
pos <- vcf$POS
strand <- "+"
assembly <- NA
center <- NA
protLSID <- NA
assayLSID <- NA
panelLSID <- NA
Qcode <- NA
genotype_df <- vcf[c(-1:-10)]
hapmap <- cbind(rs, alleles, chrom, pos, strand, assembly, center, protLSID, assayLSID, panelLSID, Qcode, genotype_df)
colnames(hapmap)[[1]]<-"rs#"
colnames(hapmap)[[6]]<-"assembly#"
write.table(hapmap, file=args[2], row.names=FALSE, quote=FALSE, sep="\t")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/getMAFFiles.R
\name{getWTExomeMutationMAF}
\alias{getWTExomeMutationMAF}
\title{Get the Wilms Tumor Exome-based Somatic MAF file}
\usage{
getWTExomeMutationMAF()
}
\description{
Get the Wilms Tumor Exome-based Somatic MAF file
}
|
/man/getWTExomeMutationMAF.Rd
|
no_license
|
teamcgc/TARGETCloud
|
R
| false | true | 308 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/getMAFFiles.R
\name{getWTExomeMutationMAF}
\alias{getWTExomeMutationMAF}
\title{Get the Wilms Tumor Exome-based Somatic MAF file}
\usage{
getWTExomeMutationMAF()
}
\description{
Get the Wilms Tumor Exome-based Somatic MAF file
}
|
context("format e and E")
test_that("fmt_new(\"e\") can output exponent notation", {
f <- fmt_new("e")
expect_equal(f(0), "0.000000e+00")
expect_equal(f(42), "4.200000e+01")
expect_equal(f(42000000), "4.200000e+07")
expect_equal(f(420000000), "4.200000e+08")
expect_equal(f(-4), "-4.000000e+00")
expect_equal(f(-42), "-4.200000e+01")
expect_equal(f(-4200000), "-4.200000e+06")
expect_equal(f(-42000000), "-4.200000e+07")
expect_equal(fmt_new(".0e")(42), "4e+01")
expect_equal(fmt_new(".3e")(42), "4.200e+01")
})
test_that("fmt_new(\"E\") can output exponent notation", {
f <- fmt_new("E")
expect_equal(f(0), "0.000000E+00")
expect_equal(f(42), "4.200000E+01")
expect_equal(f(42000000), "4.200000E+07")
expect_equal(f(420000000), "4.200000E+08")
expect_equal(f(-4), "-4.000000E+00")
expect_equal(f(-42), "-4.200000E+01")
expect_equal(f(-4200000), "-4.200000E+06")
expect_equal(f(-42000000), "-4.200000E+07")
expect_equal(fmt_new(".0E")(42), "4E+01")
expect_equal(fmt_new(".3E")(42), "4.200E+01")
})
test_that("fmt_new(\"e\") can format negative zero as zero", {
expect_equal(fmt_new("1e")(-0), "0.000000e+00")
expect_equal(fmt_new("1e")(-1e-12), "-1.000000e-12")
})
test_that("fmt_new(\",e\") and fmt_new() does not group special characters", {
expect_equal(fmt_new(",e")(c(Inf, -Inf, NA, NaN)),
c("Inf", "-Inf", "NA", "NaN"))
expect_equal(fmt_new(",E")(c(Inf, -Inf, NA, NaN)),
c("Inf", "-Inf", "NA", "NaN"))
})
|
/tests/testthat/test-format-e.R
|
no_license
|
jrnold/fivemat
|
R
| false | false | 1,525 |
r
|
context("format e and E")
test_that("fmt_new(\"e\") can output exponent notation", {
f <- fmt_new("e")
expect_equal(f(0), "0.000000e+00")
expect_equal(f(42), "4.200000e+01")
expect_equal(f(42000000), "4.200000e+07")
expect_equal(f(420000000), "4.200000e+08")
expect_equal(f(-4), "-4.000000e+00")
expect_equal(f(-42), "-4.200000e+01")
expect_equal(f(-4200000), "-4.200000e+06")
expect_equal(f(-42000000), "-4.200000e+07")
expect_equal(fmt_new(".0e")(42), "4e+01")
expect_equal(fmt_new(".3e")(42), "4.200e+01")
})
test_that("fmt_new(\"E\") can output exponent notation", {
f <- fmt_new("E")
expect_equal(f(0), "0.000000E+00")
expect_equal(f(42), "4.200000E+01")
expect_equal(f(42000000), "4.200000E+07")
expect_equal(f(420000000), "4.200000E+08")
expect_equal(f(-4), "-4.000000E+00")
expect_equal(f(-42), "-4.200000E+01")
expect_equal(f(-4200000), "-4.200000E+06")
expect_equal(f(-42000000), "-4.200000E+07")
expect_equal(fmt_new(".0E")(42), "4E+01")
expect_equal(fmt_new(".3E")(42), "4.200E+01")
})
test_that("fmt_new(\"e\") can format negative zero as zero", {
expect_equal(fmt_new("1e")(-0), "0.000000e+00")
expect_equal(fmt_new("1e")(-1e-12), "-1.000000e-12")
})
test_that("fmt_new(\",e\") and fmt_new() does not group special characters", {
expect_equal(fmt_new(",e")(c(Inf, -Inf, NA, NaN)),
c("Inf", "-Inf", "NA", "NaN"))
expect_equal(fmt_new(",E")(c(Inf, -Inf, NA, NaN)),
c("Inf", "-Inf", "NA", "NaN"))
})
|
#' Default UI styles for the sign-in pages
#'
#' Default styling for the sign in and registration pages. Update the \code{sign_in_ui_default()}
#' arguments with your brand and colors to quickly style the sign in and registration
#' pages to match your brand.
#'
#' @param sign_in_module UI module for the sign in and registration pages.
#' @param color hex color for the background and button.
#' @param company_name your company name.
#' @param logo_top html for logo to go above the sign in panel.
#' @param logo_bottom html for the logo below the sign in panel.
#' @param icon_href the url/path to the browser tab icon.
#' @param background_image the url/path to a full width background image. If set to NULL,
#' the default, the \code{color} argument will be used for the background instead of this
#' image.
#' @param terms_and_privacy_footer links to place in the footer, directly above the copyright
#' notice.
#' @param align The horizontal alignment of the sign in box. Defaults to "center". Valid
#' values are "left", "center", or "right"
#'
#' @export
#'
#' @importFrom shiny fluidPage fluidRow column
#' @importFrom htmltools tags HTML
#' @importFrom stringr str_interp
#'
#' @return the UI for the sign in page
#'
sign_in_ui_default <- function(
sign_in_module = sign_in_module_ui("sign_in"),
color = "#5ec7dd",
company_name = "Your Brand Here",
logo_top = tags$div(
style = "width: 300px; max-width: 100%; color: #FFF;",
class = "text-center",
h1("Your", style = "margin-bottom: 0; margin-top: 30px;"),
h1("Brand", style = "margin-bottom: 0; margin-top: 10px;"),
h1("Here", style = "margin-bottom: 15px; margin-top: 10px;")
),
logo_bottom = NULL,
icon_href = "polish/images/polished_icon.png",
background_image = NULL,
terms_and_privacy_footer = NULL,
align = "center"
) {
if (is.null(background_image)) {
background_image_css <- stringr::str_interp("")
} else {
background_image_css <- stringr::str_interp("
background-image: url(${background_image});
background-repeat: no-repeat;
background-position: 0 0;
background-size: cover;
")
}
if (length(align) != 1 && !(align %in% c("left", "center", "right"))) {
stop('`align` must be either "lect", "center", or "right"', call. = FALSE)
}
if (is.null(terms_and_privacy_footer)) {
footer_margin <- -40
} else {
footer_margin <- -68
}
if (align == "center") {
left_col <- list()
main_width <- 12
right_col <- list()
} else if (align == "left") {
left_col <- list()
main_width <- 6
right_col <- column(6)
} else {
left_col <- column(6)
main_width <- 6
right_col <- list()
}
shiny::fluidPage(
style = "height: 100vh;",
tags$head(
tags$link(rel = "shortcut icon", href = icon_href),
tags$title(company_name),
tags$meta(
name = "viewport",
content = "
width=device-width,
initial-scale=1,
maximum-scale=1,
minimum-scale=1,
user-scalable=no,
viewport-fit=cover"
),
tags$style(
stringr::str_interp("
.auth_panel {
width: 100%;
max-width: 300px;
padding: 10px 25px;
background-color: #fff;
color: #080021;
margin: 0 auto;
box-shadow: 0 4px 8px 0 rgba(0, 0, 0, 0.2), 0 6px 20px 0 rgba(0, 0, 0, 0.19);
}
.auth_panel_2 {
width: 100%;
max-width: 600px;
padding: 10px 25px;
background-color: #fff;
color: #080021;
margin: 0 auto;
box-shadow: 0 4px 8px 0 rgba(0, 0, 0, 0.2), 0 6px 20px 0 rgba(0, 0, 0, 0.19);
}
.btn-primary {
background-color: ${color} !important;
border: none;
width: 100%;
color: #FFF;
}
.footer {
color: #FFF;
text-align: center;
z-index: 1;
margin-top: ${footer_margin}px;
}
body {
background-color: ${color} !important;
${background_image_css}
}
")
)
),
shiny::fluidRow(
style = "padding-bottom: 50px; min-height: 100%;",
left_col,
shiny::column(
width = main_width,
align = "center",
logo_top,
tags$div(
sign_in_module,
logo_bottom
)
),
right_col
),
shiny::fluidRow(
shiny::column(
12,
class = "footer",
terms_and_privacy_footer,
tags$p(
htmltools::HTML("©"),
paste0(
substr(Sys.Date(), 1, 4),
" - ",
company_name
)
)
)
))
}
|
/R/sign_in_ui_default.R
|
no_license
|
Yaswanth-Tippireddy/polished
|
R
| false | false | 4,728 |
r
|
#' Default UI styles for the sign-in pages
#'
#' Default styling for the sign in and registration pages. Update the \code{sign_in_ui_default()}
#' arguments with your brand and colors to quickly style the sign in and registration
#' pages to match your brand.
#'
#' @param sign_in_module UI module for the sign in and registration pages.
#' @param color hex color for the background and button.
#' @param company_name your company name.
#' @param logo_top html for logo to go above the sign in panel.
#' @param logo_bottom html for the logo below the sign in panel.
#' @param icon_href the url/path to the browser tab icon.
#' @param background_image the url/path to a full width background image. If set to NULL,
#' the default, the \code{color} argument will be used for the background instead of this
#' image.
#' @param terms_and_privacy_footer links to place in the footer, directly above the copyright
#' notice.
#' @param align The horizontal alignment of the sign in box. Defaults to "center". Valid
#' values are "left", "center", or "right"
#'
#' @export
#'
#' @importFrom shiny fluidPage fluidRow column
#' @importFrom htmltools tags HTML
#' @importFrom stringr str_interp
#'
#' @return the UI for the sign in page
#'
sign_in_ui_default <- function(
sign_in_module = sign_in_module_ui("sign_in"),
color = "#5ec7dd",
company_name = "Your Brand Here",
logo_top = tags$div(
style = "width: 300px; max-width: 100%; color: #FFF;",
class = "text-center",
h1("Your", style = "margin-bottom: 0; margin-top: 30px;"),
h1("Brand", style = "margin-bottom: 0; margin-top: 10px;"),
h1("Here", style = "margin-bottom: 15px; margin-top: 10px;")
),
logo_bottom = NULL,
icon_href = "polish/images/polished_icon.png",
background_image = NULL,
terms_and_privacy_footer = NULL,
align = "center"
) {
if (is.null(background_image)) {
background_image_css <- stringr::str_interp("")
} else {
background_image_css <- stringr::str_interp("
background-image: url(${background_image});
background-repeat: no-repeat;
background-position: 0 0;
background-size: cover;
")
}
if (length(align) != 1 && !(align %in% c("left", "center", "right"))) {
stop('`align` must be either "lect", "center", or "right"', call. = FALSE)
}
if (is.null(terms_and_privacy_footer)) {
footer_margin <- -40
} else {
footer_margin <- -68
}
if (align == "center") {
left_col <- list()
main_width <- 12
right_col <- list()
} else if (align == "left") {
left_col <- list()
main_width <- 6
right_col <- column(6)
} else {
left_col <- column(6)
main_width <- 6
right_col <- list()
}
shiny::fluidPage(
style = "height: 100vh;",
tags$head(
tags$link(rel = "shortcut icon", href = icon_href),
tags$title(company_name),
tags$meta(
name = "viewport",
content = "
width=device-width,
initial-scale=1,
maximum-scale=1,
minimum-scale=1,
user-scalable=no,
viewport-fit=cover"
),
tags$style(
stringr::str_interp("
.auth_panel {
width: 100%;
max-width: 300px;
padding: 10px 25px;
background-color: #fff;
color: #080021;
margin: 0 auto;
box-shadow: 0 4px 8px 0 rgba(0, 0, 0, 0.2), 0 6px 20px 0 rgba(0, 0, 0, 0.19);
}
.auth_panel_2 {
width: 100%;
max-width: 600px;
padding: 10px 25px;
background-color: #fff;
color: #080021;
margin: 0 auto;
box-shadow: 0 4px 8px 0 rgba(0, 0, 0, 0.2), 0 6px 20px 0 rgba(0, 0, 0, 0.19);
}
.btn-primary {
background-color: ${color} !important;
border: none;
width: 100%;
color: #FFF;
}
.footer {
color: #FFF;
text-align: center;
z-index: 1;
margin-top: ${footer_margin}px;
}
body {
background-color: ${color} !important;
${background_image_css}
}
")
)
),
shiny::fluidRow(
style = "padding-bottom: 50px; min-height: 100%;",
left_col,
shiny::column(
width = main_width,
align = "center",
logo_top,
tags$div(
sign_in_module,
logo_bottom
)
),
right_col
),
shiny::fluidRow(
shiny::column(
12,
class = "footer",
terms_and_privacy_footer,
tags$p(
htmltools::HTML("©"),
paste0(
substr(Sys.Date(), 1, 4),
" - ",
company_name
)
)
)
))
}
|
#' @title Create model multiplexer for model selection to tune over multiple possible models.
#'
#' @description
#' Combines multiple base learners by dispatching
#' on the hyperparameter \dQuote{selected.learner} to a specific model class.
#' This allows to tune not only the model class (SVM, random forest, etc) but also
#' their hyperparameters in one go. Combine this with \code{\link{tuneParams}} and
#' \code{\link{makeTuneControlIrace}} for a very powerful approach, see example below.
#'
#' The parameter set is the union of all (unique) base learners.
#' In order to avoid name clashes all parameter names are prefixed
#' with the base learner id, i.e. \dQuote{[learner.id].[parameter.name]}.
#'
#' @param base.learners [\code{list} of \code{\link{Learner}}]\cr
#' List of Learners with unique IDs.
#' @return [\code{ModelMultiplexer}]. A \code{\link{Learner}} specialized as \code{ModelMultiplexer}.
#' @aliases ModelMultiplexer
#' @family multiplexer
#' @family tune
#' @export
#' @note Note that logging output during tuning is somewhat shortened to make it more readable.
#' I.e., the artificial prefix before parameter names is suppressed.
#' @examples
#' bls = list(
#' makeLearner("classif.ksvm"),
#' makeLearner("classif.randomForest")
#' )
#' lrn = makeModelMultiplexer(bls)
#' # simple way to contruct param set for tuning
#' # parameter names are prefixed automatically and the 'requires'
#' # element is set, too, to make all paramaters subordinate to 'selected.learner'
#' ps = makeModelMultiplexerParamSet(lrn,
#' makeNumericParam("sigma", lower = -10, upper = 10, trafo = function(x) 2^x),
#' makeIntegerParam("ntree", lower = 1L, upper = 500L)
#' )
#' print(ps)
#' rdesc = makeResampleDesc("CV", iters = 2L)
#' # to save some time we use random search. but you probably want something like this:
#' # ctrl = makeTuneControlIrace(maxExperiments = 500L)
#' ctrl = makeTuneControlRandom(maxit = 10L)
#' res = tuneParams(lrn, iris.task, rdesc, par.set = ps, control = ctrl)
#' print(res)
#' print(head(as.data.frame(res$opt.path)))
#'
#' # more unique and reliable way to construct the param set
#' ps = makeModelMultiplexerParamSet(lrn,
#' classif.ksvm = makeParamSet(
#' makeNumericParam("sigma", lower = -10, upper = 10, trafo = function(x) 2^x)
#' ),
#' classif.randomForest = makeParamSet(
#' makeIntegerParam("ntree", lower = 1L, upper = 500L)
#' )
#' )
#'
#' # this is how you would construct the param set manually, works too
#' ps = makeParamSet(
#' makeDiscreteParam("selected.learner", values = extractSubList(bls, "id")),
#' makeNumericParam("classif.ksvm.sigma", lower = -10, upper = 10, trafo = function(x) 2^x,
#' requires = quote(selected.learner == "classif.ksvm")),
#' makeIntegerParam("classif.randomForest.ntree", lower = 1L, upper = 500L,
#' requires = quote(selected.learner == "classif.randomForst"))
#' )
#'
#' # all three ps-objects are exactly the same internally.
makeModelMultiplexer = function(base.learners) {
lrn = makeBaseEnsemble(
id = "ModelMultiplexer",
short.name = "mm",
base.learners = base.learners,
bls.type = NULL,
ens.type = NULL,
cl = "ModelMultiplexer"
)
# add extra param to parset, after we did all checks and so on in the base function
ps = makeParamSet(makeDiscreteLearnerParam("selected.learner", values = names(lrn$base.learners)))
lrn$par.set = c(lrn$par.set, ps)
lrn$par.set.ens = ps
lrn$fix.factors = TRUE
setHyperPars(lrn, selected.learner = names(lrn$base.learners)[1L])
}
#' @export
trainLearner.ModelMultiplexer = function(.learner, .task, .subset, .weights = NULL, selected.learner, ...) {
# train selected learner model and remove prefix from its param settings
bl = .learner$base.learners[[selected.learner]]
m = train(bl, task = .task, subset = .subset, weights = .weights)
makeChainModel(next.model = m, cl = "ModelMultiplexerModel")
}
#' @export
predictLearner.ModelMultiplexer = function(.learner, .model, .newdata, ...) {
# simply predict with the model
sl = .learner$par.vals$selected.learner
bl = .learner$base.learners[[sl]]
predictLearner(bl, .model$learner.model$next.model, .newdata)
}
#' @export
makeWrappedModel.ModelMultiplexer = function(learner, learner.model, task.desc, subset, features, factor.levels, time) {
x = NextMethod()
class(x) = c("ModelMultiplexerModel", class(x))
return(x)
}
#' @export
getLearnerModel.ModelMultiplexerModel = function(model) {
model$learner.model$next.model$learner.model
}
#' @export
isFailureModel.ModelMultiplexerModel = function(model) {
isFailureModel(model$learner.model$next.model)
}
|
/R/ModelMultiplexer.R
|
no_license
|
Libardo1/mlr
|
R
| false | false | 4,634 |
r
|
#' @title Create model multiplexer for model selection to tune over multiple possible models.
#'
#' @description
#' Combines multiple base learners by dispatching
#' on the hyperparameter \dQuote{selected.learner} to a specific model class.
#' This allows to tune not only the model class (SVM, random forest, etc) but also
#' their hyperparameters in one go. Combine this with \code{\link{tuneParams}} and
#' \code{\link{makeTuneControlIrace}} for a very powerful approach, see example below.
#'
#' The parameter set is the union of all (unique) base learners.
#' In order to avoid name clashes all parameter names are prefixed
#' with the base learner id, i.e. \dQuote{[learner.id].[parameter.name]}.
#'
#' @param base.learners [\code{list} of \code{\link{Learner}}]\cr
#' List of Learners with unique IDs.
#' @return [\code{ModelMultiplexer}]. A \code{\link{Learner}} specialized as \code{ModelMultiplexer}.
#' @aliases ModelMultiplexer
#' @family multiplexer
#' @family tune
#' @export
#' @note Note that logging output during tuning is somewhat shortened to make it more readable.
#' I.e., the artificial prefix before parameter names is suppressed.
#' @examples
#' bls = list(
#' makeLearner("classif.ksvm"),
#' makeLearner("classif.randomForest")
#' )
#' lrn = makeModelMultiplexer(bls)
#' # simple way to contruct param set for tuning
#' # parameter names are prefixed automatically and the 'requires'
#' # element is set, too, to make all paramaters subordinate to 'selected.learner'
#' ps = makeModelMultiplexerParamSet(lrn,
#' makeNumericParam("sigma", lower = -10, upper = 10, trafo = function(x) 2^x),
#' makeIntegerParam("ntree", lower = 1L, upper = 500L)
#' )
#' print(ps)
#' rdesc = makeResampleDesc("CV", iters = 2L)
#' # to save some time we use random search. but you probably want something like this:
#' # ctrl = makeTuneControlIrace(maxExperiments = 500L)
#' ctrl = makeTuneControlRandom(maxit = 10L)
#' res = tuneParams(lrn, iris.task, rdesc, par.set = ps, control = ctrl)
#' print(res)
#' print(head(as.data.frame(res$opt.path)))
#'
#' # more unique and reliable way to construct the param set
#' ps = makeModelMultiplexerParamSet(lrn,
#' classif.ksvm = makeParamSet(
#' makeNumericParam("sigma", lower = -10, upper = 10, trafo = function(x) 2^x)
#' ),
#' classif.randomForest = makeParamSet(
#' makeIntegerParam("ntree", lower = 1L, upper = 500L)
#' )
#' )
#'
#' # this is how you would construct the param set manually, works too
#' ps = makeParamSet(
#' makeDiscreteParam("selected.learner", values = extractSubList(bls, "id")),
#' makeNumericParam("classif.ksvm.sigma", lower = -10, upper = 10, trafo = function(x) 2^x,
#' requires = quote(selected.learner == "classif.ksvm")),
#' makeIntegerParam("classif.randomForest.ntree", lower = 1L, upper = 500L,
#' requires = quote(selected.learner == "classif.randomForst"))
#' )
#'
#' # all three ps-objects are exactly the same internally.
makeModelMultiplexer = function(base.learners) {
lrn = makeBaseEnsemble(
id = "ModelMultiplexer",
short.name = "mm",
base.learners = base.learners,
bls.type = NULL,
ens.type = NULL,
cl = "ModelMultiplexer"
)
# add extra param to parset, after we did all checks and so on in the base function
ps = makeParamSet(makeDiscreteLearnerParam("selected.learner", values = names(lrn$base.learners)))
lrn$par.set = c(lrn$par.set, ps)
lrn$par.set.ens = ps
lrn$fix.factors = TRUE
setHyperPars(lrn, selected.learner = names(lrn$base.learners)[1L])
}
#' @export
trainLearner.ModelMultiplexer = function(.learner, .task, .subset, .weights = NULL, selected.learner, ...) {
# train selected learner model and remove prefix from its param settings
bl = .learner$base.learners[[selected.learner]]
m = train(bl, task = .task, subset = .subset, weights = .weights)
makeChainModel(next.model = m, cl = "ModelMultiplexerModel")
}
#' @export
predictLearner.ModelMultiplexer = function(.learner, .model, .newdata, ...) {
# simply predict with the model
sl = .learner$par.vals$selected.learner
bl = .learner$base.learners[[sl]]
predictLearner(bl, .model$learner.model$next.model, .newdata)
}
#' @export
makeWrappedModel.ModelMultiplexer = function(learner, learner.model, task.desc, subset, features, factor.levels, time) {
x = NextMethod()
class(x) = c("ModelMultiplexerModel", class(x))
return(x)
}
#' @export
getLearnerModel.ModelMultiplexerModel = function(model) {
model$learner.model$next.model$learner.model
}
#' @export
isFailureModel.ModelMultiplexerModel = function(model) {
isFailureModel(model$learner.model$next.model)
}
|
\name{wday}
\alias{wday}
\alias{wday<-}
\title{Get/set days component of a date-time.}
\usage{
wday(x, label = FALSE, abbr = TRUE)
}
\arguments{
\item{x}{a POSIXct, POSIXlt, Date, chron, yearmon,
yearqtr, zoo, zooreg, timeDate, xts, its, ti, jul,
timeSeries, or fts object.}
\item{label}{logical. Only available for wday. TRUE will
display the day of the week as a character string label
such as "Sunday." FALSE will display the day of the week
as a number.}
\item{abbr}{logical. Only available for wday. FALSE will
display the day of the week as a character string label
such as "Sunday." TRUE will display an abbreviated
version of the label, such as "Sun". abbr is disregarded
if label = FALSE.}
}
\value{
wday returns the day of the week as a decimal number
(01-07, Sunday is 1).
}
\description{
Date-time must be a POSIXct, POSIXlt, Date, chron,
yearmon, yearqtr, zoo, zooreg, timeDate, xts, its, ti,
jul, timeSeries, and fts objects.
}
\examples{
x <- as.Date("2009-09-02")
wday(x) #4
wday(ymd(080101))
# 3
wday(ymd(080101), label = TRUE)
# "Tuesday"
wday(ymd(080101), label = TRUE, abbr = TRUE)
# "Tues"
wday(ymd(080101) + days(-2:4), label = TRUE, abbr = TRUE)
# "Sun" "Mon" "Tues" "Wed" "Thurs" "Fri" "Sat"
}
\seealso{
\code{\link{yday}}, \code{\link{mday}}
}
\keyword{chron}
\keyword{manip}
\keyword{methods}
\keyword{utilities}
|
/man/wday.Rd
|
no_license
|
wch/lubridate
|
R
| false | false | 1,390 |
rd
|
\name{wday}
\alias{wday}
\alias{wday<-}
\title{Get/set days component of a date-time.}
\usage{
wday(x, label = FALSE, abbr = TRUE)
}
\arguments{
\item{x}{a POSIXct, POSIXlt, Date, chron, yearmon,
yearqtr, zoo, zooreg, timeDate, xts, its, ti, jul,
timeSeries, or fts object.}
\item{label}{logical. Only available for wday. TRUE will
display the day of the week as a character string label
such as "Sunday." FALSE will display the day of the week
as a number.}
\item{abbr}{logical. Only available for wday. FALSE will
display the day of the week as a character string label
such as "Sunday." TRUE will display an abbreviated
version of the label, such as "Sun". abbr is disregarded
if label = FALSE.}
}
\value{
wday returns the day of the week as a decimal number
(01-07, Sunday is 1).
}
\description{
Date-time must be a POSIXct, POSIXlt, Date, chron,
yearmon, yearqtr, zoo, zooreg, timeDate, xts, its, ti,
jul, timeSeries, and fts objects.
}
\examples{
x <- as.Date("2009-09-02")
wday(x) #4
wday(ymd(080101))
# 3
wday(ymd(080101), label = TRUE)
# "Tuesday"
wday(ymd(080101), label = TRUE, abbr = TRUE)
# "Tues"
wday(ymd(080101) + days(-2:4), label = TRUE, abbr = TRUE)
# "Sun" "Mon" "Tues" "Wed" "Thurs" "Fri" "Sat"
}
\seealso{
\code{\link{yday}}, \code{\link{mday}}
}
\keyword{chron}
\keyword{manip}
\keyword{methods}
\keyword{utilities}
|
# Install and load packages
package_names <- c("survey","dplyr","foreign","devtools")
lapply(package_names, function(x) if(!x %in% installed.packages()) install.packages(x))
lapply(package_names, require, character.only=T)
install_github("e-mitchell/meps_r_pkg/MEPS")
library(MEPS)
options(survey.lonely.psu="adjust")
# Load FYC file
FYC <- read.xport('C:/MEPS/.FYC..ssp');
year <- .year.
if(year <= 2001) FYC <- FYC %>% mutate(VARPSU = VARPSU.yy., VARSTR=VARSTR.yy.)
if(year <= 1998) FYC <- FYC %>% rename(PERWT.yy.F = WTDPER.yy.)
if(year == 1996) FYC <- FYC %>% mutate(AGE42X = AGE2X, AGE31X = AGE1X)
FYC <- FYC %>%
mutate_at(vars(starts_with("AGE")),funs(replace(., .< 0, NA))) %>%
mutate(AGELAST = coalesce(AGE.yy.X, AGE42X, AGE31X))
FYC$ind = 1
# Insurance coverage
# To compute for all insurance categories, replace 'insurance' in the 'svyby' function with 'insurance_v2X'
if(year == 1996){
FYC <- FYC %>%
mutate(MCDEV96 = MCDEVER, MCREV96 = MCREVER,
OPAEV96 = OPAEVER, OPBEV96 = OPBEVER)
}
if(year < 2011){
FYC <- FYC %>%
mutate(
public = (MCDEV.yy.==1|OPAEV.yy.==1|OPBEV.yy.==1),
medicare = (MCREV.yy.==1),
private = (INSCOV.yy.==1),
mcr_priv = (medicare & private),
mcr_pub = (medicare & !private & public),
mcr_only = (medicare & !private & !public),
no_mcr = (!medicare),
ins_gt65 = 4*mcr_only + 5*mcr_priv + 6*mcr_pub + 7*no_mcr,
INSURC.yy. = ifelse(AGELAST < 65, INSCOV.yy., ins_gt65)
)
}
FYC <- FYC %>%
mutate(insurance = recode_factor(INSCOV.yy., .default = "Missing", .missing = "Missing",
"1" = "Any private, all ages",
"2" = "Public only, all ages",
"3" = "Uninsured, all ages")) %>%
mutate(insurance_v2X = recode_factor(INSURC.yy., .default = "Missing", .missing = "Missing",
"1" = "<65, Any private",
"2" = "<65, Public only",
"3" = "<65, Uninsured",
"4" = "65+, Medicare only",
"5" = "65+, Medicare and private",
"6" = "65+, Medicare and other public",
"7" = "65+, No medicare",
"8" = "65+, No medicare"))
# Age groups
# To compute for all age groups, replace 'agegrps' in the 'svyby' function with 'agegrps_v2X' or 'agegrps_v3X'
FYC <- FYC %>%
mutate(agegrps = cut(AGELAST,
breaks = c(-1, 4.5, 17.5, 44.5, 64.5, Inf),
labels = c("Under 5","5-17","18-44","45-64","65+"))) %>%
mutate(agegrps_v2X = cut(AGELAST,
breaks = c(-1, 17.5 ,64.5, Inf),
labels = c("Under 18","18-64","65+"))) %>%
mutate(agegrps_v3X = cut(AGELAST,
breaks = c(-1, 4.5, 6.5, 12.5, 17.5, 18.5, 24.5, 29.5, 34.5, 44.5, 54.5, 64.5, Inf),
labels = c("Under 5", "5-6", "7-12", "13-17", "18", "19-24", "25-29",
"30-34", "35-44", "45-54", "55-64", "65+")))
# Keep only needed variables from FYC
FYCsub <- FYC %>% select(agegrps,insurance,ind, DUPERSID, PERWT.yy.F, VARSTR, VARPSU)
# Load event files
RX <- read.xport('C:/MEPS/.RX..ssp')
DVT <- read.xport('C:/MEPS/.DV..ssp')
IPT <- read.xport('C:/MEPS/.IP..ssp')
ERT <- read.xport('C:/MEPS/.ER..ssp')
OPT <- read.xport('C:/MEPS/.OP..ssp')
OBV <- read.xport('C:/MEPS/.OB..ssp')
HHT <- read.xport('C:/MEPS/.HH..ssp')
# Define sub-levels for office-based and outpatient
# To compute estimates for these sub-events, replace 'event' with 'event_v2X'
# in the 'svyby' statement below, when applicable
OBV <- OBV %>%
mutate(event_v2X = recode_factor(
SEEDOC, .default = 'Missing', .missing = "Missing", '1' = 'OBD', '2' = 'OBO'))
OPT <- OPT %>%
mutate(event_v2X = recode_factor(
SEEDOC, .default = 'Missing', .missing = "Missing", '1' = 'OPY', '2' = 'OPZ'))
# Stack events
stacked_events <- stack_events(RX, DVT, IPT, ERT, OPT, OBV, HHT,
keep.vars = c('SEEDOC','event_v2X'))
stacked_events <- stacked_events %>%
mutate(event = data,
PR.yy.X = PV.yy.X + TR.yy.X,
OZ.yy.X = OF.yy.X + SL.yy.X + OT.yy.X + OR.yy.X + OU.yy.X + WC.yy.X + VA.yy.X) %>%
select(DUPERSID, event, event_v2X, SEEDOC,
XP.yy.X, SF.yy.X, MR.yy.X, MD.yy.X, PR.yy.X, OZ.yy.X)
EVENTS <- stacked_events %>% full_join(FYCsub, by='DUPERSID')
EVNTdsgn <- svydesign(
id = ~VARPSU,
strata = ~VARSTR,
weights = ~PERWT.yy.F,
data = EVENTS,
nest = TRUE)
results <- svyby(~(XP.yy.X >= 0), FUN=svytotal, by = ~agegrps + insurance, design = subset(EVNTdsgn, XP.yy.X >= 0))
print(results)
|
/mepstrends/hc_use/json/code/r/totEVT__agegrps__insurance__.r
|
permissive
|
RandomCriticalAnalysis/MEPS-summary-tables
|
R
| false | false | 4,504 |
r
|
# Install and load packages
package_names <- c("survey","dplyr","foreign","devtools")
lapply(package_names, function(x) if(!x %in% installed.packages()) install.packages(x))
lapply(package_names, require, character.only=T)
install_github("e-mitchell/meps_r_pkg/MEPS")
library(MEPS)
options(survey.lonely.psu="adjust")
# Load FYC file
FYC <- read.xport('C:/MEPS/.FYC..ssp');
year <- .year.
if(year <= 2001) FYC <- FYC %>% mutate(VARPSU = VARPSU.yy., VARSTR=VARSTR.yy.)
if(year <= 1998) FYC <- FYC %>% rename(PERWT.yy.F = WTDPER.yy.)
if(year == 1996) FYC <- FYC %>% mutate(AGE42X = AGE2X, AGE31X = AGE1X)
FYC <- FYC %>%
mutate_at(vars(starts_with("AGE")),funs(replace(., .< 0, NA))) %>%
mutate(AGELAST = coalesce(AGE.yy.X, AGE42X, AGE31X))
FYC$ind = 1
# Insurance coverage
# To compute for all insurance categories, replace 'insurance' in the 'svyby' function with 'insurance_v2X'
if(year == 1996){
FYC <- FYC %>%
mutate(MCDEV96 = MCDEVER, MCREV96 = MCREVER,
OPAEV96 = OPAEVER, OPBEV96 = OPBEVER)
}
if(year < 2011){
FYC <- FYC %>%
mutate(
public = (MCDEV.yy.==1|OPAEV.yy.==1|OPBEV.yy.==1),
medicare = (MCREV.yy.==1),
private = (INSCOV.yy.==1),
mcr_priv = (medicare & private),
mcr_pub = (medicare & !private & public),
mcr_only = (medicare & !private & !public),
no_mcr = (!medicare),
ins_gt65 = 4*mcr_only + 5*mcr_priv + 6*mcr_pub + 7*no_mcr,
INSURC.yy. = ifelse(AGELAST < 65, INSCOV.yy., ins_gt65)
)
}
FYC <- FYC %>%
mutate(insurance = recode_factor(INSCOV.yy., .default = "Missing", .missing = "Missing",
"1" = "Any private, all ages",
"2" = "Public only, all ages",
"3" = "Uninsured, all ages")) %>%
mutate(insurance_v2X = recode_factor(INSURC.yy., .default = "Missing", .missing = "Missing",
"1" = "<65, Any private",
"2" = "<65, Public only",
"3" = "<65, Uninsured",
"4" = "65+, Medicare only",
"5" = "65+, Medicare and private",
"6" = "65+, Medicare and other public",
"7" = "65+, No medicare",
"8" = "65+, No medicare"))
# Age groups
# To compute for all age groups, replace 'agegrps' in the 'svyby' function with 'agegrps_v2X' or 'agegrps_v3X'
FYC <- FYC %>%
mutate(agegrps = cut(AGELAST,
breaks = c(-1, 4.5, 17.5, 44.5, 64.5, Inf),
labels = c("Under 5","5-17","18-44","45-64","65+"))) %>%
mutate(agegrps_v2X = cut(AGELAST,
breaks = c(-1, 17.5 ,64.5, Inf),
labels = c("Under 18","18-64","65+"))) %>%
mutate(agegrps_v3X = cut(AGELAST,
breaks = c(-1, 4.5, 6.5, 12.5, 17.5, 18.5, 24.5, 29.5, 34.5, 44.5, 54.5, 64.5, Inf),
labels = c("Under 5", "5-6", "7-12", "13-17", "18", "19-24", "25-29",
"30-34", "35-44", "45-54", "55-64", "65+")))
# Keep only needed variables from FYC
FYCsub <- FYC %>% select(agegrps,insurance,ind, DUPERSID, PERWT.yy.F, VARSTR, VARPSU)
# Load event files
RX <- read.xport('C:/MEPS/.RX..ssp')
DVT <- read.xport('C:/MEPS/.DV..ssp')
IPT <- read.xport('C:/MEPS/.IP..ssp')
ERT <- read.xport('C:/MEPS/.ER..ssp')
OPT <- read.xport('C:/MEPS/.OP..ssp')
OBV <- read.xport('C:/MEPS/.OB..ssp')
HHT <- read.xport('C:/MEPS/.HH..ssp')
# Define sub-levels for office-based and outpatient
# To compute estimates for these sub-events, replace 'event' with 'event_v2X'
# in the 'svyby' statement below, when applicable
OBV <- OBV %>%
mutate(event_v2X = recode_factor(
SEEDOC, .default = 'Missing', .missing = "Missing", '1' = 'OBD', '2' = 'OBO'))
OPT <- OPT %>%
mutate(event_v2X = recode_factor(
SEEDOC, .default = 'Missing', .missing = "Missing", '1' = 'OPY', '2' = 'OPZ'))
# Stack events
stacked_events <- stack_events(RX, DVT, IPT, ERT, OPT, OBV, HHT,
keep.vars = c('SEEDOC','event_v2X'))
stacked_events <- stacked_events %>%
mutate(event = data,
PR.yy.X = PV.yy.X + TR.yy.X,
OZ.yy.X = OF.yy.X + SL.yy.X + OT.yy.X + OR.yy.X + OU.yy.X + WC.yy.X + VA.yy.X) %>%
select(DUPERSID, event, event_v2X, SEEDOC,
XP.yy.X, SF.yy.X, MR.yy.X, MD.yy.X, PR.yy.X, OZ.yy.X)
EVENTS <- stacked_events %>% full_join(FYCsub, by='DUPERSID')
EVNTdsgn <- svydesign(
id = ~VARPSU,
strata = ~VARSTR,
weights = ~PERWT.yy.F,
data = EVENTS,
nest = TRUE)
results <- svyby(~(XP.yy.X >= 0), FUN=svytotal, by = ~agegrps + insurance, design = subset(EVNTdsgn, XP.yy.X >= 0))
print(results)
|
# original ui.R code from shiny.io
library(shiny)
library(httr)
library(ggplot2)
dataset <- diamonds
shinyUI(pageWithSidebar(
headerPanel("Diamonds Explorer"),
sidebarPanel(
sliderInput('sampleSize', 'Sample Size', min=1, max=nrow(dataset),
value=min(1000, nrow(dataset)), step=500, round=0),
selectInput('x', 'X', names(dataset)),
selectInput('y', 'Y', names(dataset), names(dataset)[[2]]),
selectInput('color', 'Color', c('None', names(dataset))),
checkboxInput('jitter', 'Jitter'),
checkboxInput('smooth', 'Smooth'),
selectInput('facet_row', 'Facet Row', c(None='.', names(dataset))),
selectInput('facet_col', 'Facet Column', c(None='.', names(dataset)))
),
mainPanel(
plotOutput('plot')
)
))
|
/ui_trial.R
|
no_license
|
HuichunChien/Developing-Data-Products
|
R
| false | false | 861 |
r
|
# original ui.R code from shiny.io
library(shiny)
library(httr)
library(ggplot2)
dataset <- diamonds
shinyUI(pageWithSidebar(
headerPanel("Diamonds Explorer"),
sidebarPanel(
sliderInput('sampleSize', 'Sample Size', min=1, max=nrow(dataset),
value=min(1000, nrow(dataset)), step=500, round=0),
selectInput('x', 'X', names(dataset)),
selectInput('y', 'Y', names(dataset), names(dataset)[[2]]),
selectInput('color', 'Color', c('None', names(dataset))),
checkboxInput('jitter', 'Jitter'),
checkboxInput('smooth', 'Smooth'),
selectInput('facet_row', 'Facet Row', c(None='.', names(dataset))),
selectInput('facet_col', 'Facet Column', c(None='.', names(dataset)))
),
mainPanel(
plotOutput('plot')
)
))
|
##---- check hardcoded basenames ----
library(stringr) #str_trim
# get R files
r_files <- list.files(file.path("..", "figures"), pattern = ".R", recursive = TRUE)
checklist <- data.frame(r_file = basename(r_files),
bname_match = rep(NA, length(r_files)))
for(i in 1:length(r_files)) {
fileConn <- file(file.path("..", "figures", r_files[i]))
txt <- readLines(fileConn)
close(fileConn)
# get index of line containing this_base
ind <- grep("this_base <- ", txt)
# extract file name
tmp <- sub("this_base <- *", "\\1", txt[ind])
tmp <- gsub('\"', "", tmp)
tmp <- str_trim(tmp) # remove trailing whitespace
if(tmp == sub(".R", "", basename(r_files[i]))) {
checklist$bname_match[[i]] <- TRUE
} else {
checklist$bname_match[[i]] <- FALSE
}
}
message(paste(sum(checklist$bname_match), "out of", nrow(checklist),
"hardcoded basenames matches the figure directory name."))
|
/scripts/12_check-hardcoded-basenames.R
|
no_license
|
alexandrebouchard/r-graph-catalog
|
R
| false | false | 981 |
r
|
##---- check hardcoded basenames ----
library(stringr) #str_trim
# get R files
r_files <- list.files(file.path("..", "figures"), pattern = ".R", recursive = TRUE)
checklist <- data.frame(r_file = basename(r_files),
bname_match = rep(NA, length(r_files)))
for(i in 1:length(r_files)) {
fileConn <- file(file.path("..", "figures", r_files[i]))
txt <- readLines(fileConn)
close(fileConn)
# get index of line containing this_base
ind <- grep("this_base <- ", txt)
# extract file name
tmp <- sub("this_base <- *", "\\1", txt[ind])
tmp <- gsub('\"', "", tmp)
tmp <- str_trim(tmp) # remove trailing whitespace
if(tmp == sub(".R", "", basename(r_files[i]))) {
checklist$bname_match[[i]] <- TRUE
} else {
checklist$bname_match[[i]] <- FALSE
}
}
message(paste(sum(checklist$bname_match), "out of", nrow(checklist),
"hardcoded basenames matches the figure directory name."))
|
source("statistics_functions_ccc.R")
setwd("..")
outputDir <- "Output"
# NOTEBOOK DATA
codeCells <- read.csv(paste(outputDir, "/code_cells.csv", sep=""), header=TRUE, stringsAsFactors=FALSE)
sizes <- read.csv(paste(outputDir, "/notebook_sizes.csv", sep=""), header=TRUE)
loc <- read.csv(paste(outputDir, "/loc.csv", sep=""), header=TRUE, stringsAsFactors=FALSE)
languages <- read.csv(paste(outputDir, "/languages.csv", sep=""), header=TRUE, stringsAsFactors=FALSE)
snippetOccurrencesA <- read.csv(paste(outputDir, "/filesPerSnippetA.csv", sep=""), header=TRUE, stringsAsFactors=FALSE)
snippetOccurrencesNE <- read.csv(paste(outputDir, "/filesPerSnippetNE.csv", sep=""), header=TRUE, stringsAsFactors=FALSE)
notebookOccurencesA <- read.csv(paste(outputDir, "/nb_clone_distrA.csv", sep="") , header=FALSE)
notebookOccurencesNE <- read.csv(paste(outputDir, "/nb_clone_distrNE.csv", sep=""), header=FALSE)
cloneFreq <- read.csv(paste(outputDir, "/cloneFrequency.csv", sep=""), header=TRUE, stringsAsFactors=FALSE)
connections <-read.csv(paste(outputDir, "/connections.csv", sep=""), header=TRUE, stringsAsFactors=FALSE)
nbData <- merge(languages, cloneFreq, by="file")
nbData <- merge(codeCells, nbData, by="file")
nbData$non.empty.snippets <- nbData$clones + nbData$unique - nbData$empty
# SIZES
cells <- codeCells[,"code.cells"]
bytes <- sizes[,"bytes"]
locTotal <- loc[,"total.LOC"]
locTotalReduced <- locTotal[locTotal<max(locTotal)]
locNonBlank <- loc[,"non.blank.LOC"]
locNonBlankReduced <- locNonBlank[locNonBlank<max(locNonBlank)]
# Descriptive statistics
print("Cells:")
printMeanAndPercentiles(cells)
print("Bytes:")
printMeanAndPercentiles(bytes)
print("Non-empty LOC:")
printMeanAndPercentiles(locNonBlank)
print("Total LOC:")
printMeanAndPercentiles(locTotal)
# Histograms (with log scale on y axis)
# All 4 metrics decay too fast: an ordinary histogram looks like 1 single bar
logHist(cells, specifier="code_cells")
logHist(bytes, specifier="bytes")
#logHist(locTotal, specifier="Lines of code")
logHist(locTotalReduced, specifier="loc")
#logHist(locNonBlank, specifier="Non-blank lines of code")
logHist(locNonBlankReduced, specifier="loc_non-blank")
# CLONES
# Snippet occurences distribution
logHist(snippetOccurrencesA$count, specifier="snippetOccurencesA", objects="Snippets")
logHist(snippetOccurrencesNE$count, specifier="snippetOccurencesNE", objects="Snippets")
# Notebook clone occurrences distribution
logHist(notebookOccurencesA[,1], specifier="notebookOccurencesA")
logHist(notebookOccurencesNE[,1], specifier="notebookOccurencesNE")
# Clone sizes (LOC) distribution
cloneGroupsA <- snippetOccurrencesA[snippetOccurrencesA$count>1,]
print("LOC in clone groups, with empty snippet")
printMeanAndPercentiles(cloneGroupsA$LOC)
cloneSizesA <- do.call(c, apply(cloneGroupsA, 1, repeatSecondColumn))
print("LOC in clone instances, with empty snippet")
printMeanAndPercentiles(as.integer(cloneSizesA))
logHist(cloneGroupsA$LOC, specifier="cloneGroupSizesA", objects="Clone groups")
logHist(as.integer(cloneSizesA), specifier="cloneSizesA", objects="Clones")
cloneGroupsNE <- snippetOccurrencesNE[snippetOccurrencesNE$count>1,]
print("LOC in clone groups, without empty snippet")
printMeanAndPercentiles(cloneGroupsNE[,2])
cloneSizesNE <- do.call(c, apply(cloneGroupsNE, 1, repeatSecondColumn))
print("LOC in clone instances, without empty snippet")
printMeanAndPercentiles(as.integer(cloneSizesNE))
logHist(cloneGroupsNE[,2], specifier="cloneGroupSizesNE", objects="Clone groups")
logHist(as.integer(cloneSizesNE), specifier="cloneSizesNE", objects="Clones")
# Clone frequency
frequencies <- nbData[,"clone.frequency"]
print("Clone frequency")
printMeanAndPercentiles(frequencies)
histogram(frequencies, "clone_frequencyA") # Non-parametric test needed (Spearman!?) Liknande distribution för alla språk -> Kurskal-Wallis lämpligt för att jämföra medianer
frequencies <- nbData[,"non.empty.clone.frequency"]
print("Clone frequency, empty snippets excluded")
printMeanAndPercentiles(frequencies)
histogram(frequencies, "clone_frequencyNE")
# Correlation with size (=number of code cells)
exportAsEPS(plot(clone.frequency~code.cells, data=nbData, xlab="Number of code cells", ylab="Clone frequency"),
"cells_frequencyA")
print("Correlation with size (all clones):")
cor.test(nbData$code.cells, nbData$clone.frequency, alternative="two.sided", method="spearman")
exportAsEPS(plot(non.empty.clone.frequency~non.empty.snippets, data=nbData, xlab="Number of code cells", ylab="Clone frequency"),
"cells_frequencyNE")
print("Correlation with size (non-empty clones):")
cor.test(nbData$non.empty.snippets, nbData$non.empty.clone.frequency, alternative="two.sided", method="spearman")
# Association with language
nbDataKnownLang <- nbData[nbData$language!=" UNKNOWN",]
exportAsEPS(boxplot(clone.frequency~language, data=nbDataKnownLang), "lang_frequencyA")
checkLM(nbDataKnownLang$clone.frequency, as.factor(nbDataKnownLang$language))
print("Correlation with language (all clones):")
kruskalWallisWithPost(nbDataKnownLang$clone.frequency, as.factor(nbDataKnownLang$language))
exportAsEPS(boxplot(non.empty.clone.frequency~language, data=nbDataKnownLang), "lang_frequencyNE")
checkLM(nbDataKnownLang$non.empty.clone.frequency, as.factor(nbDataKnownLang$language))
print("Correlation with language (non-empty clones):")
kruskalWallisWithPost(nbDataKnownLang$non.empty.clone.frequency, as.factor(nbDataKnownLang$language))
# CONNECTIONS
connection_analysis(connections)
|
/Scripts/statistics_ccc_nba.R
|
no_license
|
saeedsiddik/notebooks-code-clone-
|
R
| false | false | 5,517 |
r
|
source("statistics_functions_ccc.R")
setwd("..")
outputDir <- "Output"
# NOTEBOOK DATA
codeCells <- read.csv(paste(outputDir, "/code_cells.csv", sep=""), header=TRUE, stringsAsFactors=FALSE)
sizes <- read.csv(paste(outputDir, "/notebook_sizes.csv", sep=""), header=TRUE)
loc <- read.csv(paste(outputDir, "/loc.csv", sep=""), header=TRUE, stringsAsFactors=FALSE)
languages <- read.csv(paste(outputDir, "/languages.csv", sep=""), header=TRUE, stringsAsFactors=FALSE)
snippetOccurrencesA <- read.csv(paste(outputDir, "/filesPerSnippetA.csv", sep=""), header=TRUE, stringsAsFactors=FALSE)
snippetOccurrencesNE <- read.csv(paste(outputDir, "/filesPerSnippetNE.csv", sep=""), header=TRUE, stringsAsFactors=FALSE)
notebookOccurencesA <- read.csv(paste(outputDir, "/nb_clone_distrA.csv", sep="") , header=FALSE)
notebookOccurencesNE <- read.csv(paste(outputDir, "/nb_clone_distrNE.csv", sep=""), header=FALSE)
cloneFreq <- read.csv(paste(outputDir, "/cloneFrequency.csv", sep=""), header=TRUE, stringsAsFactors=FALSE)
connections <-read.csv(paste(outputDir, "/connections.csv", sep=""), header=TRUE, stringsAsFactors=FALSE)
nbData <- merge(languages, cloneFreq, by="file")
nbData <- merge(codeCells, nbData, by="file")
nbData$non.empty.snippets <- nbData$clones + nbData$unique - nbData$empty
# SIZES
cells <- codeCells[,"code.cells"]
bytes <- sizes[,"bytes"]
locTotal <- loc[,"total.LOC"]
locTotalReduced <- locTotal[locTotal<max(locTotal)]
locNonBlank <- loc[,"non.blank.LOC"]
locNonBlankReduced <- locNonBlank[locNonBlank<max(locNonBlank)]
# Descriptive statistics
print("Cells:")
printMeanAndPercentiles(cells)
print("Bytes:")
printMeanAndPercentiles(bytes)
print("Non-empty LOC:")
printMeanAndPercentiles(locNonBlank)
print("Total LOC:")
printMeanAndPercentiles(locTotal)
# Histograms (with log scale on y axis)
# All 4 metrics decay too fast: an ordinary histogram looks like 1 single bar
logHist(cells, specifier="code_cells")
logHist(bytes, specifier="bytes")
#logHist(locTotal, specifier="Lines of code")
logHist(locTotalReduced, specifier="loc")
#logHist(locNonBlank, specifier="Non-blank lines of code")
logHist(locNonBlankReduced, specifier="loc_non-blank")
# CLONES
# Snippet occurences distribution
logHist(snippetOccurrencesA$count, specifier="snippetOccurencesA", objects="Snippets")
logHist(snippetOccurrencesNE$count, specifier="snippetOccurencesNE", objects="Snippets")
# Notebook clone occurrences distribution
logHist(notebookOccurencesA[,1], specifier="notebookOccurencesA")
logHist(notebookOccurencesNE[,1], specifier="notebookOccurencesNE")
# Clone sizes (LOC) distribution
cloneGroupsA <- snippetOccurrencesA[snippetOccurrencesA$count>1,]
print("LOC in clone groups, with empty snippet")
printMeanAndPercentiles(cloneGroupsA$LOC)
cloneSizesA <- do.call(c, apply(cloneGroupsA, 1, repeatSecondColumn))
print("LOC in clone instances, with empty snippet")
printMeanAndPercentiles(as.integer(cloneSizesA))
logHist(cloneGroupsA$LOC, specifier="cloneGroupSizesA", objects="Clone groups")
logHist(as.integer(cloneSizesA), specifier="cloneSizesA", objects="Clones")
cloneGroupsNE <- snippetOccurrencesNE[snippetOccurrencesNE$count>1,]
print("LOC in clone groups, without empty snippet")
printMeanAndPercentiles(cloneGroupsNE[,2])
cloneSizesNE <- do.call(c, apply(cloneGroupsNE, 1, repeatSecondColumn))
print("LOC in clone instances, without empty snippet")
printMeanAndPercentiles(as.integer(cloneSizesNE))
logHist(cloneGroupsNE[,2], specifier="cloneGroupSizesNE", objects="Clone groups")
logHist(as.integer(cloneSizesNE), specifier="cloneSizesNE", objects="Clones")
# Clone frequency
frequencies <- nbData[,"clone.frequency"]
print("Clone frequency")
printMeanAndPercentiles(frequencies)
histogram(frequencies, "clone_frequencyA") # Non-parametric test needed (Spearman!?) Liknande distribution för alla språk -> Kurskal-Wallis lämpligt för att jämföra medianer
frequencies <- nbData[,"non.empty.clone.frequency"]
print("Clone frequency, empty snippets excluded")
printMeanAndPercentiles(frequencies)
histogram(frequencies, "clone_frequencyNE")
# Correlation with size (=number of code cells)
exportAsEPS(plot(clone.frequency~code.cells, data=nbData, xlab="Number of code cells", ylab="Clone frequency"),
"cells_frequencyA")
print("Correlation with size (all clones):")
cor.test(nbData$code.cells, nbData$clone.frequency, alternative="two.sided", method="spearman")
exportAsEPS(plot(non.empty.clone.frequency~non.empty.snippets, data=nbData, xlab="Number of code cells", ylab="Clone frequency"),
"cells_frequencyNE")
print("Correlation with size (non-empty clones):")
cor.test(nbData$non.empty.snippets, nbData$non.empty.clone.frequency, alternative="two.sided", method="spearman")
# Association with language
nbDataKnownLang <- nbData[nbData$language!=" UNKNOWN",]
exportAsEPS(boxplot(clone.frequency~language, data=nbDataKnownLang), "lang_frequencyA")
checkLM(nbDataKnownLang$clone.frequency, as.factor(nbDataKnownLang$language))
print("Correlation with language (all clones):")
kruskalWallisWithPost(nbDataKnownLang$clone.frequency, as.factor(nbDataKnownLang$language))
exportAsEPS(boxplot(non.empty.clone.frequency~language, data=nbDataKnownLang), "lang_frequencyNE")
checkLM(nbDataKnownLang$non.empty.clone.frequency, as.factor(nbDataKnownLang$language))
print("Correlation with language (non-empty clones):")
kruskalWallisWithPost(nbDataKnownLang$non.empty.clone.frequency, as.factor(nbDataKnownLang$language))
# CONNECTIONS
connection_analysis(connections)
|
#'
#' @title Compare predicted maturity ogives among several model runs
#'
#' @description Function to compare predicted maturity ogives among several model runs.
#'
#' @param objs - list of resLst objects
#' @param nyrs - number of years per plot
#' @param pdf - name for output pdf file
#' @param showPlot - flag to print plot to current device
#' @param verbose - flag (T/F) to print diagnostic information
#'
#' @details None.
#'
#' @return ggplot object
#'
#' @import ggplot2
#'
#' @export
#'
compareResults.Pop.MaturityOgives<-function(objs,
nyrs=10,
pdf=NULL,
showPlot=FALSE,
verbose=FALSE){
options(stringsAsFactors=FALSE);
#create pdf, if necessary
if(!is.null(pdf)){
pdf(file=pdf,width=11,height=8,onefile=TRUE);
on.exit(grDevices::dev.off());
showPlot<-TRUE;
}
cases<-names(objs);
mdfr<-NULL;
for (case in cases){
mdfr1<-NULL;
obj<-objs[[case]];
#if (inherits(obj,"tcsam2013.resLst")) mdfr1<-rTCSAM2013::getMDFR.Pop.MeanMaturity(obj,verbose);
#if (inherits(obj,"rsimTCSAM.resLst")) mdfr1<-rsimTCSAM::getMDFR.Pop.MeanMaturity(obj,verbose);
if (inherits(obj,"tcsam02.resLst")) mdfr1<-rTCSAM02::getMDFR.Pop.MaturityOgives(obj,verbose);
if (!is.null(mdfr1)){
mdfr1$case<-case;
mdfr<-rbind(mdfr,mdfr1);
}
}
if (is.null(mdfr)) {
cat("\n \nNo maturity ogive data.\n \n")
return(NULL);
}
mdfr$z<-as.numeric(mdfr$z)
mdfr$case<-factor(mdfr$case,levels=cases);
mdfr$y<-as.character(mdfr$y);
datasets<-unique(mdfr$category);
plots<-list();
mdfrp<-mdfr;
dcs<-unique(mdfrp$case); #unique cases
uys<-sort(unique(mdfrp$y)); #unique years
cat("dcs: ",dcs,"\n")
cat("uys: ",uys,"\n")
#-------------------------------------------#
#plot predicted maturity ogives
#-------------------------------------------#
nys<-length(uys);
for (iy in 1:ceiling(nys/nyrs)){
iys<-min(c(nyrs*(iy-1)+1,nys+1)):min(c(nyrs*(iy),nys));
cat("iys: ",iys,"\n");
cat("uys[iys]: ",uys[iys],"\n");
dfrp<-mdfrp[mdfrp$y %in% uys[iys],];
mdfrpp<-dfrp[dfrp$type == 'predicted',];
p <- ggplot(mdfrpp,aes_string(x='z',y='val',colour='y'));
p <- p + geom_line(data=mdfrpp);
if (any(!is.na(mdfrpp$lci))) p <- p + geom_errorbar(aes_string(ymin='lci',ymax='uci'),position=pd);
p <- p + labs(x='size (mm CW)',y="probability(mature)");
#p <- p + ggtitle(d);
p <- p + facet_grid(case~.);
if (showPlot) print(p);
cap<-paste0("\n \nFigure &&figno. Predicted maturity ogives for ",uys[min(iys)]," to ",uys[max(iys)],".\n \n")
plots[[cap]]<-p;
}
return(plots);
}
|
/R/compareResults.Pop.MaturityOgives.R
|
permissive
|
wStockhausen/rCompTCMs
|
R
| false | false | 3,046 |
r
|
#'
#' @title Compare predicted maturity ogives among several model runs
#'
#' @description Function to compare predicted maturity ogives among several model runs.
#'
#' @param objs - list of resLst objects
#' @param nyrs - number of years per plot
#' @param pdf - name for output pdf file
#' @param showPlot - flag to print plot to current device
#' @param verbose - flag (T/F) to print diagnostic information
#'
#' @details None.
#'
#' @return ggplot object
#'
#' @import ggplot2
#'
#' @export
#'
compareResults.Pop.MaturityOgives<-function(objs,
nyrs=10,
pdf=NULL,
showPlot=FALSE,
verbose=FALSE){
options(stringsAsFactors=FALSE);
#create pdf, if necessary
if(!is.null(pdf)){
pdf(file=pdf,width=11,height=8,onefile=TRUE);
on.exit(grDevices::dev.off());
showPlot<-TRUE;
}
cases<-names(objs);
mdfr<-NULL;
for (case in cases){
mdfr1<-NULL;
obj<-objs[[case]];
#if (inherits(obj,"tcsam2013.resLst")) mdfr1<-rTCSAM2013::getMDFR.Pop.MeanMaturity(obj,verbose);
#if (inherits(obj,"rsimTCSAM.resLst")) mdfr1<-rsimTCSAM::getMDFR.Pop.MeanMaturity(obj,verbose);
if (inherits(obj,"tcsam02.resLst")) mdfr1<-rTCSAM02::getMDFR.Pop.MaturityOgives(obj,verbose);
if (!is.null(mdfr1)){
mdfr1$case<-case;
mdfr<-rbind(mdfr,mdfr1);
}
}
if (is.null(mdfr)) {
cat("\n \nNo maturity ogive data.\n \n")
return(NULL);
}
mdfr$z<-as.numeric(mdfr$z)
mdfr$case<-factor(mdfr$case,levels=cases);
mdfr$y<-as.character(mdfr$y);
datasets<-unique(mdfr$category);
plots<-list();
mdfrp<-mdfr;
dcs<-unique(mdfrp$case); #unique cases
uys<-sort(unique(mdfrp$y)); #unique years
cat("dcs: ",dcs,"\n")
cat("uys: ",uys,"\n")
#-------------------------------------------#
#plot predicted maturity ogives
#-------------------------------------------#
nys<-length(uys);
for (iy in 1:ceiling(nys/nyrs)){
iys<-min(c(nyrs*(iy-1)+1,nys+1)):min(c(nyrs*(iy),nys));
cat("iys: ",iys,"\n");
cat("uys[iys]: ",uys[iys],"\n");
dfrp<-mdfrp[mdfrp$y %in% uys[iys],];
mdfrpp<-dfrp[dfrp$type == 'predicted',];
p <- ggplot(mdfrpp,aes_string(x='z',y='val',colour='y'));
p <- p + geom_line(data=mdfrpp);
if (any(!is.na(mdfrpp$lci))) p <- p + geom_errorbar(aes_string(ymin='lci',ymax='uci'),position=pd);
p <- p + labs(x='size (mm CW)',y="probability(mature)");
#p <- p + ggtitle(d);
p <- p + facet_grid(case~.);
if (showPlot) print(p);
cap<-paste0("\n \nFigure &&figno. Predicted maturity ogives for ",uys[min(iys)]," to ",uys[max(iys)],".\n \n")
plots[[cap]]<-p;
}
return(plots);
}
|
#######
#Distance to shoreline
#######
rm(list=ls())
library(sp)
library(rgdal)
library(RColorBrewer)
library(raster)
library(spatstat)
library(maptools)
library(RColorBrewer)
source("R/functions.R")
source("R/data_read_in_est_and_use.R")
####
#Need to read in est using ReadShapeSpatial
###
est.shape<-readShapeSpatial("Data/SH_est_poly_clipped.shp", proj4string=CRS("+proj=longlat +ellps=GRS80"))
est.1<-spTransform(est.shape, CRS("+proj=utm +zone=56 +south +ellps=GRS80 +units=m +no_defs"))
est.sldf <- as( est.1 , "SpatialLinesDataFrame")
####
#Rasterize the shoreline
####
#create a blank raster
rast<-blank.raster(est.sldf,10)
#rasterise
shore_rast<-rasterize(est.sldf,rast)
distance<-distance(shore_rast)
shore_distance<-mask(distance, est)
plot(shore_distance)
|
/R/distance_to_shoreline_raster.R
|
no_license
|
LucasCo/LucasCo.github.io
|
R
| false | false | 780 |
r
|
#######
#Distance to shoreline
#######
rm(list=ls())
library(sp)
library(rgdal)
library(RColorBrewer)
library(raster)
library(spatstat)
library(maptools)
library(RColorBrewer)
source("R/functions.R")
source("R/data_read_in_est_and_use.R")
####
#Need to read in est using ReadShapeSpatial
###
est.shape<-readShapeSpatial("Data/SH_est_poly_clipped.shp", proj4string=CRS("+proj=longlat +ellps=GRS80"))
est.1<-spTransform(est.shape, CRS("+proj=utm +zone=56 +south +ellps=GRS80 +units=m +no_defs"))
est.sldf <- as( est.1 , "SpatialLinesDataFrame")
####
#Rasterize the shoreline
####
#create a blank raster
rast<-blank.raster(est.sldf,10)
#rasterise
shore_rast<-rasterize(est.sldf,rast)
distance<-distance(shore_rast)
shore_distance<-mask(distance, est)
plot(shore_distance)
|
psbcGL <-
function(survObj, priorPara, initial, rw = FALSE, mcmcPara, num.reps, thin, chain = 1, save = 1000){
survObj$n <- n <- length(survObj$t)
survObj$p <- p <- dim(survObj$x)[2]
eta0 <- priorPara$eta0
kappa0 <- priorPara$kappa0
c0 <- priorPara$c0
r <- priorPara$r
delta <- priorPara$delta
s <- priorPara$s
J <- priorPara$J <- length(priorPara$s)
groupInd <- priorPara$groupInd
groupNo <- priorPara$groupNo <- unique(priorPara$groupInd)
K <- priorPara$K <- length(groupNo)
m_k <- priorPara$m_k
m_k <- rep(NA, K)
for(i in 1:K){
m_k[i] <- sum(groupInd == groupNo[i])
}
priorPara$m_k <- m_k
intv <- setting.interval(survObj$t, survObj$di, priorPara$s, priorPara$J)
priorPara$ind.r <- intv$ind.r
priorPara$ind.d <- intv$ind.d
priorPara$ind.r_d <- intv$ind.r_d
priorPara$d <- intv$d
ini <- initial
beta.ini <- ini$beta.ini
lambdaSq <- ini$lambdaSq
sigmaSq <- ini$sigmaSq
tauSq <- ini$tauSq
h <- ini$h
mcmcPara$beta.prop.me <- beta.ini
tauSq.exp <- rep(NA, p)
for(i in 1:K){
tauSq.exp[groupInd == groupNo[i]] <- tauSq[i]
}
ini$sd.be <- sqrt(sigmaSq*tauSq.exp)
ini$xbeta <- as.vector(survObj$x %*% beta.ini)
be.normSq <- c()
for(i in 1:K){
be.normSq[i] <- sum(beta.ini[which(groupInd == groupNo[i])]^2)
}
ini$be.normSq <- be.normSq
H.star <- alpha0 <- c()
for (j in 1:J){
H.star[j] <- eta0 * s[j]^kappa0
alpha0[j] <- c0 * H.star[j]
}
priorPara$hPriorSh <- diff(c(0, alpha0))
## for posterior samples
mcmcOutcome <- list()
mcmcOutcome$initial <- initial
mcmcOutcome$priorPara <- priorPara
beta.p <- beta.ini
h.p <- h
tauSq.p <- tauSq
mcmcOutcome$sigmaSq.p <- sigmaSq
mcmcOutcome$lambdaSq.p <- lambdaSq
mcmcOutcome$accept.beta <- c(rep(0, p))
outcomeSum <- list()
dir.create('mcmcOutcome', showWarnings = FALSE)
# MCMC sampling
for(M in 1:num.reps){
if(M %% 1000 == 0)
{
cat("Chain", chain, "Iteration", M, fill=TRUE);
}
# Updating regression parameters
if(rw == FALSE){
sampleRP <- UpdateRP(survObj, priorPara, mcmcPara, ini)
}
if(rw == TRUE){
sampleRP <- UpdateRPrw(survObj, priorPara, mcmcPara, ini)
}
beta.ini <- ini$beta.ini <- sampleRP$beta.ini
xbeta <- ini$xbeta <- sampleRP$xbeta
mcmcOutcome$accept.beta <- mcmcOutcome$accept.beta + sampleRP$accept
# Updating the squared norm of beta using new beta values
for(i in 1:K){
be.normSq <- ini$be.normSq[i] <- sum(beta.ini[which(groupInd == groupNo[i])]^2)
}
# Updating increments in cumulative hazards
h <- ini$h <- UpdateBH(survObj, priorPara, ini)
# Updating 1/tauSq
tauSq <- ini$tauSq <- UpdateTau.GL(survObj, priorPara, ini)
# Updating tauSq.exp with new tauSq
for(i in 1:K){
tauSq.exp[groupInd == groupNo[i]] <- tauSq[i]
}
# Updating sigmaSq
sigmaSq <- ini$sigmaSq <- UpdateSigma.GL(survObj, priorPara, ini)
# Updating lambdaSq
lambdaSq <- ini$lambdaSq <- UpdateLambda.GL(survObj, priorPara, ini)
##########################
ini$sd.be <- sqrt(sigmaSq*tauSq.exp)
###### storing posterior samples
if(M %% thin == 0){
beta.p <- rbind(beta.p, beta.ini, deparse.level = 0)
h.p <- rbind(h.p, h, deparse.level = 0)
tauSq.p <- rbind(tauSq.p, tauSq, deparse.level = 0)
mcmcOutcome$sigmaSq.p <- c(mcmcOutcome$sigmaSq.p, sigmaSq)
mcmcOutcome$lambdaSq.p <- c(mcmcOutcome$lambdaSq.p, lambdaSq)
mcmcOutcome$ini <- ini
}
###### Tuning algorithm for the mean of the proposal density ###
for(j in 1:survObj$p){
if(M%/%thin > (20%/%thin)){
if(beta.ini[j] == beta.p[(M%/%thin + 1 -(20%/%thin)),j]){
mcmcPara$beta.prop.me[j] <- beta.p[(M%/%thin + 1),j];
}
}
}
# saving the mcmc outcomes
if(M %% save == 0 | M == num.reps){
save(mcmcOutcome, file = paste("mcmcOutcome/otherAll.ch", chain, ".Rdata", sep = ""))
save(beta.p, file = paste("mcmcOutcome/betaAll.ch", chain, ".Rdata", sep = ""))
save(tauSq.p, file = paste("mcmcOutcome/tauSqAll.ch", chain, ".Rdata", sep = ""))
save(h.p, file = paste("mcmcOutcome/hAll.ch", chain, ".Rdata", sep = ""))
}
} # the end of MCMC sampling
ret <- list(beta.p = beta.p, h.p = h.p, tauSq.p = tauSq.p, mcmcOutcome = mcmcOutcome, t=survObj$t, di=survObj$di)
class(ret) <- "psbcGL"
return(ret)
} # end of "psbcGrp" function
|
/R/psbcGL.R
|
no_license
|
cran/psbcGroup
|
R
| false | false | 4,505 |
r
|
psbcGL <-
function(survObj, priorPara, initial, rw = FALSE, mcmcPara, num.reps, thin, chain = 1, save = 1000){
survObj$n <- n <- length(survObj$t)
survObj$p <- p <- dim(survObj$x)[2]
eta0 <- priorPara$eta0
kappa0 <- priorPara$kappa0
c0 <- priorPara$c0
r <- priorPara$r
delta <- priorPara$delta
s <- priorPara$s
J <- priorPara$J <- length(priorPara$s)
groupInd <- priorPara$groupInd
groupNo <- priorPara$groupNo <- unique(priorPara$groupInd)
K <- priorPara$K <- length(groupNo)
m_k <- priorPara$m_k
m_k <- rep(NA, K)
for(i in 1:K){
m_k[i] <- sum(groupInd == groupNo[i])
}
priorPara$m_k <- m_k
intv <- setting.interval(survObj$t, survObj$di, priorPara$s, priorPara$J)
priorPara$ind.r <- intv$ind.r
priorPara$ind.d <- intv$ind.d
priorPara$ind.r_d <- intv$ind.r_d
priorPara$d <- intv$d
ini <- initial
beta.ini <- ini$beta.ini
lambdaSq <- ini$lambdaSq
sigmaSq <- ini$sigmaSq
tauSq <- ini$tauSq
h <- ini$h
mcmcPara$beta.prop.me <- beta.ini
tauSq.exp <- rep(NA, p)
for(i in 1:K){
tauSq.exp[groupInd == groupNo[i]] <- tauSq[i]
}
ini$sd.be <- sqrt(sigmaSq*tauSq.exp)
ini$xbeta <- as.vector(survObj$x %*% beta.ini)
be.normSq <- c()
for(i in 1:K){
be.normSq[i] <- sum(beta.ini[which(groupInd == groupNo[i])]^2)
}
ini$be.normSq <- be.normSq
H.star <- alpha0 <- c()
for (j in 1:J){
H.star[j] <- eta0 * s[j]^kappa0
alpha0[j] <- c0 * H.star[j]
}
priorPara$hPriorSh <- diff(c(0, alpha0))
## for posterior samples
mcmcOutcome <- list()
mcmcOutcome$initial <- initial
mcmcOutcome$priorPara <- priorPara
beta.p <- beta.ini
h.p <- h
tauSq.p <- tauSq
mcmcOutcome$sigmaSq.p <- sigmaSq
mcmcOutcome$lambdaSq.p <- lambdaSq
mcmcOutcome$accept.beta <- c(rep(0, p))
outcomeSum <- list()
dir.create('mcmcOutcome', showWarnings = FALSE)
# MCMC sampling
for(M in 1:num.reps){
if(M %% 1000 == 0)
{
cat("Chain", chain, "Iteration", M, fill=TRUE);
}
# Updating regression parameters
if(rw == FALSE){
sampleRP <- UpdateRP(survObj, priorPara, mcmcPara, ini)
}
if(rw == TRUE){
sampleRP <- UpdateRPrw(survObj, priorPara, mcmcPara, ini)
}
beta.ini <- ini$beta.ini <- sampleRP$beta.ini
xbeta <- ini$xbeta <- sampleRP$xbeta
mcmcOutcome$accept.beta <- mcmcOutcome$accept.beta + sampleRP$accept
# Updating the squared norm of beta using new beta values
for(i in 1:K){
be.normSq <- ini$be.normSq[i] <- sum(beta.ini[which(groupInd == groupNo[i])]^2)
}
# Updating increments in cumulative hazards
h <- ini$h <- UpdateBH(survObj, priorPara, ini)
# Updating 1/tauSq
tauSq <- ini$tauSq <- UpdateTau.GL(survObj, priorPara, ini)
# Updating tauSq.exp with new tauSq
for(i in 1:K){
tauSq.exp[groupInd == groupNo[i]] <- tauSq[i]
}
# Updating sigmaSq
sigmaSq <- ini$sigmaSq <- UpdateSigma.GL(survObj, priorPara, ini)
# Updating lambdaSq
lambdaSq <- ini$lambdaSq <- UpdateLambda.GL(survObj, priorPara, ini)
##########################
ini$sd.be <- sqrt(sigmaSq*tauSq.exp)
###### storing posterior samples
if(M %% thin == 0){
beta.p <- rbind(beta.p, beta.ini, deparse.level = 0)
h.p <- rbind(h.p, h, deparse.level = 0)
tauSq.p <- rbind(tauSq.p, tauSq, deparse.level = 0)
mcmcOutcome$sigmaSq.p <- c(mcmcOutcome$sigmaSq.p, sigmaSq)
mcmcOutcome$lambdaSq.p <- c(mcmcOutcome$lambdaSq.p, lambdaSq)
mcmcOutcome$ini <- ini
}
###### Tuning algorithm for the mean of the proposal density ###
for(j in 1:survObj$p){
if(M%/%thin > (20%/%thin)){
if(beta.ini[j] == beta.p[(M%/%thin + 1 -(20%/%thin)),j]){
mcmcPara$beta.prop.me[j] <- beta.p[(M%/%thin + 1),j];
}
}
}
# saving the mcmc outcomes
if(M %% save == 0 | M == num.reps){
save(mcmcOutcome, file = paste("mcmcOutcome/otherAll.ch", chain, ".Rdata", sep = ""))
save(beta.p, file = paste("mcmcOutcome/betaAll.ch", chain, ".Rdata", sep = ""))
save(tauSq.p, file = paste("mcmcOutcome/tauSqAll.ch", chain, ".Rdata", sep = ""))
save(h.p, file = paste("mcmcOutcome/hAll.ch", chain, ".Rdata", sep = ""))
}
} # the end of MCMC sampling
ret <- list(beta.p = beta.p, h.p = h.p, tauSq.p = tauSq.p, mcmcOutcome = mcmcOutcome, t=survObj$t, di=survObj$di)
class(ret) <- "psbcGL"
return(ret)
} # end of "psbcGrp" function
|
#Preambula
library(tseries)
library(forecast)
library(ggplot2)
library(seasonal)
library(tidyverse)
library(rio)
library(xts)
library(gridExtra)
library(corrplot)
library(lmtest)
library(vars)
library(BigVAR)
library(tsDyn)
library(urca)
setwd("~/Documents/GitHub/forecasting/")
#Data import
dd=import('data_for_HW1.xlsx')
glimpse(dd)
colSums(is.na(dd))
date=dplyr::select(dd, Time)
dd=dplyr::select(dd, -Time)
var.data <- ts(dd[,1:5], start = c(2010, 1,1), frequency = 12)
#var.data[,1] <- log(var.data[,1])
#Data Visualisation, Stationarity
plot(var.data
)
##all ts are non stationary in row data (нужно сделать фасетку с acf pacf, попробовать gg)
for (i in 1:5){
a<-ggAcf(var.data[,i])
b<-ggPacf(var.data[,i])
print(a)
print(b)
}
#ADF testing
##all of them are non stationary
for (i in 1:5){
c<-adf.test(var.data[,i])
print(c)
}
#How should we modify process? Box Cox procedure
#Read here
#https://www.statisticshowto.datasciencecentral.com/box-cox-transformation/
ggplot(dd, aes(x=log(var.data[,1])))+geom_histogram()
for (i in 1:5){
d<-BoxCox.lambda(var.data[,i])
print(d)
}
#Data transformation for ARIMA only
#Actually autoarima can work with non stat and work successfully as diff the data by itself
var.data[,1] <- log(var.data[,1])
var.data[,2] <- log(var.data[,2])
var.data[,3] <- (sqrt(var.data[,3]))^(-1)
var.data[,4] <- log(var.data[,4])
plot(var.data)
#Split into train and test data
train.var <- window(var.data, start=c(2010,1),
end=c(2016,12), frequency=12) #The first train sample
test.var <- window(var.data, start=c(2017,1), frequency=12) # whole test sample
hor <- 12
Nsmp <- nrow(test.var)
# train_log=log(train)
# plot(train_log)
# ggAcf(train_log)
# adf.test(diff(train_log))
#Structure breaks at least I've tried to do it
##Иван Павлович, тут Галю Пажитнову понесло куда-то в дебри, хз что она тут творит
# library(tsDyn)
# dts<-setar(diff(train),1)
# plot(dts)
require(strucchange)
for (i in 1:5){
bp.train <- breakpoints(var.data[,i] ~ 1)
summary(bp.train)
plot(bp.train)
plot(var.data[,i])
lines(bp.train)
## confidence intervals
ci.train <- confint(bp.train)
ci.train
lines(ci.train)
}
# нужно дамми на конец 2014 - начало 2015 год, там была смена режима курса
# и переход на инфляционное таргетирование,
# правила можно найти на сайте цб
dummy <- var.data*0
window(dummy, start = c(2014,12)) <- 1
#Some useful plots
ggseasonplot(train.var[,1]) #Seasonal subseries plot
ggseasonplot(train.var[,1], polar = T) #Seasonal polar plot
#Cointegration VECM prep
summary(ca.jo(var.data))
p1<-predict(vec2var(ca.jo(var.data), r=1), h=12)
df <- ts(p1$fcst$Deposits_ind[,1], start = c(2020,1))
autoplot(var.data[,1], series = 'Actual Data') + autolayer(df, series = 'Forecast')
##Naïve forecasting
plot(train.var)
n1<-naive(train.var[,1], h=12)
autoplot(n1)
##Models
models <- c('arima', 'ets', 'arimax', 'seas', 'var', 'var_lasso','vecm', 'rwd')
e <- f <-fL<-fH <- lapply(1:length(models),
function(i) {matrix(NA, hor, (Nsmp - hor))})
names(e) <- names(f) <-names(fL) <-names(fH) <- models
# NB! Initialize TT before loop!!!
TT <- nrow(train.var)
nn = ncol(var.data)
criterion <- 1
for (i in 1:(Nsmp - hor)){
cat(paste0('Sample ', TT, '\n'))
# Form train samples
train.yi <- var.data[1:TT, 1]
train.xi <- var.data[1:TT, 2:5]
dtrain.i <- dummy[1:TT]
# Form test samples
test.yi <- var.data[(TT+1):(TT+hor), 1]
test.xi <- var.data[(TT+1):(TT+hor), 2:5]
dtest.i <- dummy[(TT+1):(TT+hor)]
# ARIMA
m.arima <- auto.arima(train.yi, xreg = dtrain.i)
f[['arima']][, i] <- forecast( m.arima , xreg = dtest.i, h=hor)$mean
fL[['arima']][, i] <- forecast( m.arima , xreg = dtest.i, h=hor)$lower[,2]
fH[['arima']][, i] <- forecast( m.arima , xreg = dtest.i, h=hor)$upper[,2]
# ARIMAX
xtr <- cbind(dtrain.i, train.xi); colnames(xtr) <- NULL
xte <- cbind(dtest.i, test.xi); colnames(xte) <- NULL
m.arimax <- auto.arima(train.yi, xreg = xtr)
f[['arimax']][, i] <- forecast( m.arimax ,
xreg = xte,
h=hor)$mean
fL[['arimax']][, i] <- forecast( m.arimax ,
xreg = xte,
h=hor)$lower[,2]
fH[['arimax']][, i] <- forecast( m.arimax ,
xreg = xte,
h=hor)$upper[,2]
# X13 SEATS
seasD <- seas(ts(train.yi, frequency = 12, start = c(2010, 1)))
seasComponent = seasD$series$s10
trendComponent = seasD$series$s12
f[['seas']][, i] <- snaive(seasComponent, h = hor)$mean *
rwf(trendComponent, drift = TRUE, h = hor)$mean
fL[['seas']][, i] <- snaive(seasComponent, h = hor)$mean *
rwf(trendComponent, drift = TRUE, h = hor)$lower[,2]
fH[['seas']][, i] <- snaive(seasComponent, h = hor)$mean *
rwf(trendComponent, drift = TRUE, h = hor)$upper[,2]
# ETS
m.ets <- ets(train.yi)
f[['ets']][, i] <- forecast(m.ets, h=hor)$mean
fL[['ets']][, i] <- forecast( m.ets,
h=hor)$lower[,2]
fH[['ets']][, i] <- forecast( m.ets,
h=hor)$upper[,2]
# RWD
f[['rwd']][,i] <- rwf(train.yi, drift = TRUE, h = hor)$mean
fL[['rwd']][,i] <- rwf(train.yi, drift = TRUE, h = hor)$lower[,2]
fH[['rwd']][,i] <- rwf(train.yi, drift = TRUE, h = hor)$upper[,2]
# VAR
lag.sel <- VARselect(var.data[1:TT, ])$selection[criterion]
m.var <- VAR(var.data[1:TT, ], p= lag.sel)
f[['var']][,i] <- predict(m.var, n.ahead = hor)$fcst[[1]][, 1]
fL[['var']][,i] <- predict(m.var, n.ahead = hor)$fcst[[1]][, 2]
fH[['var']][,i] <- predict(m.var, n.ahead = hor)$fcst[[1]][, 3]
# LASSO VAR
nlg = lag.sel
mod.lasso <-constructModel(var.data[1:TT, ], p=lag.sel,
"Basic", gran = c(500,10),
cv="Rolling",
MN=TRUE,
C = c(1, 1, 1,0, 1),
verbose=FALSE)
cv.res = cv.BigVAR(mod.lasso)
res =BigVAR.est(mod.lasso)
tmp.Z = matrix(1, nn*nlg+1, 1);
for (ik in 1:nlg) {
tmp.Z[((ik-1)*nn+2):(ik*nn+1), ] <- var.data[TT-ik+1, ]
}
BB <- res$B[,,cv.res@index]
for (kk in 1:hor) {
f.t <- BB%*%tmp.Z
f[['var_lasso']][kk, i] <- f.t[1,1]
tmp.Z[(nn+2):((nlg)*nn+1), ] <- tmp.Z[2:((nlg-1)*nn+1), ]
tmp.Z[2:(nn+1), ] <- f.t
}
#VECM
ca.jo(var.data)
f[['vecm']][, i]<-predict(vec2var(ca.jo(var.data[1:TT,]), r=1), n.ahead=hor)$fcst[[1]][,1]
# fL[['vecm']][, i]<-predict(vec2var(ca.jo(var.data), r=1), n.ahead=hor)$lower[[1]][,1]
# fH[['vecm']][, i]<-predict(vec2var(ca.jo(var.data), r=1), n.ahead=hor)$upper[[1]][,1]
# Eval errors
for (kk in 1:length(models)) {
e[[kk]][, i] <- test.yi - f[[kk]][, i]
}
# Next sample
TT <- TT+1
}
rmse <- matrix(NA, hor, length(models))
colnames(rmse) <- models
ratio <- matrix(NA, hor, length(models)-1)
colnames(ratio) <- models[1:(length(models)-1)]
for (kk in 1:length(models)) {
tmp <- e[[kk]]*e[[kk]]
rmse[, kk] <- sqrt(rowMeans(tmp))
}
for (kk in 1:length(models)-1) {
ratio[, kk] <- rmse[, kk]/rmse[, ncol(rmse)]
}
rmse
ratio
#Checkresiduals
checkresiduals(m.arima)
checkresiduals(m.arimax)
checkresiduals(seasD)
checkresiduals(m.ets)
#Casuality тип а чтобы и нет то?
causality(m.var, cause = c('Weighted_rate', 'USDRUB', 'KeyRate', 'Inflation'))$Granger
#Hair Plots forecasting (cross validation)
for (m in models) {
xx <- ts(var.data[, 1], start = c(2010, 1), frequency = 12)
gg <- autoplot(xx, col = 'black')
ddates <- time(xx)
TT <- nrow(train.var) + 1
for (i in 1:ncol(f[[m]])) {
xx0 <- ts(f[[m]][, i], start = ddates[TT], frequency = 12)
gg <- gg + autolayer(xx0)
TT <- TT+1
}
#print(gg)
ggsave(filename = paste0('hairplot_forecast_', m, '.png'), plot = gg)
}
##Forecasting plots
for (m in models) {
xx <- ts(var.data[, 1], start = c(2010, 1), frequency = 12)
ddates <- time(xx)
for (hh in 1:1) {
TT <- nrow(train.var) + hh
xx0 <- ts(f[[m]][hh, ], start = ddates[TT], frequency = 12)
xxl <- ts(fL[[m]][hh, ], start = ddates[TT], frequency = 12)
xxh <- ts(fH[[m]][hh, ], start = ddates[TT], frequency = 12)
gg <- autoplot(xx, col = 'black')
gg <- gg + autolayer(xx0, color= 'red') +autolayer(xxl, color = 'blue', linetype= 'dashed')+
autolayer(xxh, color = 'blue', linetype= 'dashed')
#gg
ggsave(filename = paste0('h_fore_', m, '_h_', hh, '.png'),
plot = gg)
}
}
|
/HW0_working version.R
|
no_license
|
Galunay/forecasting
|
R
| false | false | 8,785 |
r
|
#Preambula
library(tseries)
library(forecast)
library(ggplot2)
library(seasonal)
library(tidyverse)
library(rio)
library(xts)
library(gridExtra)
library(corrplot)
library(lmtest)
library(vars)
library(BigVAR)
library(tsDyn)
library(urca)
setwd("~/Documents/GitHub/forecasting/")
#Data import
dd=import('data_for_HW1.xlsx')
glimpse(dd)
colSums(is.na(dd))
date=dplyr::select(dd, Time)
dd=dplyr::select(dd, -Time)
var.data <- ts(dd[,1:5], start = c(2010, 1,1), frequency = 12)
#var.data[,1] <- log(var.data[,1])
#Data Visualisation, Stationarity
plot(var.data
)
##all ts are non stationary in row data (нужно сделать фасетку с acf pacf, попробовать gg)
for (i in 1:5){
a<-ggAcf(var.data[,i])
b<-ggPacf(var.data[,i])
print(a)
print(b)
}
#ADF testing
##all of them are non stationary
for (i in 1:5){
c<-adf.test(var.data[,i])
print(c)
}
#How should we modify process? Box Cox procedure
#Read here
#https://www.statisticshowto.datasciencecentral.com/box-cox-transformation/
ggplot(dd, aes(x=log(var.data[,1])))+geom_histogram()
for (i in 1:5){
d<-BoxCox.lambda(var.data[,i])
print(d)
}
#Data transformation for ARIMA only
#Actually autoarima can work with non stat and work successfully as diff the data by itself
var.data[,1] <- log(var.data[,1])
var.data[,2] <- log(var.data[,2])
var.data[,3] <- (sqrt(var.data[,3]))^(-1)
var.data[,4] <- log(var.data[,4])
plot(var.data)
#Split into train and test data
train.var <- window(var.data, start=c(2010,1),
end=c(2016,12), frequency=12) #The first train sample
test.var <- window(var.data, start=c(2017,1), frequency=12) # whole test sample
hor <- 12
Nsmp <- nrow(test.var)
# train_log=log(train)
# plot(train_log)
# ggAcf(train_log)
# adf.test(diff(train_log))
#Structure breaks at least I've tried to do it
##Иван Павлович, тут Галю Пажитнову понесло куда-то в дебри, хз что она тут творит
# library(tsDyn)
# dts<-setar(diff(train),1)
# plot(dts)
require(strucchange)
for (i in 1:5){
bp.train <- breakpoints(var.data[,i] ~ 1)
summary(bp.train)
plot(bp.train)
plot(var.data[,i])
lines(bp.train)
## confidence intervals
ci.train <- confint(bp.train)
ci.train
lines(ci.train)
}
# нужно дамми на конец 2014 - начало 2015 год, там была смена режима курса
# и переход на инфляционное таргетирование,
# правила можно найти на сайте цб
dummy <- var.data*0
window(dummy, start = c(2014,12)) <- 1
#Some useful plots
ggseasonplot(train.var[,1]) #Seasonal subseries plot
ggseasonplot(train.var[,1], polar = T) #Seasonal polar plot
#Cointegration VECM prep
summary(ca.jo(var.data))
p1<-predict(vec2var(ca.jo(var.data), r=1), h=12)
df <- ts(p1$fcst$Deposits_ind[,1], start = c(2020,1))
autoplot(var.data[,1], series = 'Actual Data') + autolayer(df, series = 'Forecast')
##Naïve forecasting
plot(train.var)
n1<-naive(train.var[,1], h=12)
autoplot(n1)
##Models
models <- c('arima', 'ets', 'arimax', 'seas', 'var', 'var_lasso','vecm', 'rwd')
e <- f <-fL<-fH <- lapply(1:length(models),
function(i) {matrix(NA, hor, (Nsmp - hor))})
names(e) <- names(f) <-names(fL) <-names(fH) <- models
# NB! Initialize TT before loop!!!
TT <- nrow(train.var)
nn = ncol(var.data)
criterion <- 1
for (i in 1:(Nsmp - hor)){
cat(paste0('Sample ', TT, '\n'))
# Form train samples
train.yi <- var.data[1:TT, 1]
train.xi <- var.data[1:TT, 2:5]
dtrain.i <- dummy[1:TT]
# Form test samples
test.yi <- var.data[(TT+1):(TT+hor), 1]
test.xi <- var.data[(TT+1):(TT+hor), 2:5]
dtest.i <- dummy[(TT+1):(TT+hor)]
# ARIMA
m.arima <- auto.arima(train.yi, xreg = dtrain.i)
f[['arima']][, i] <- forecast( m.arima , xreg = dtest.i, h=hor)$mean
fL[['arima']][, i] <- forecast( m.arima , xreg = dtest.i, h=hor)$lower[,2]
fH[['arima']][, i] <- forecast( m.arima , xreg = dtest.i, h=hor)$upper[,2]
# ARIMAX
xtr <- cbind(dtrain.i, train.xi); colnames(xtr) <- NULL
xte <- cbind(dtest.i, test.xi); colnames(xte) <- NULL
m.arimax <- auto.arima(train.yi, xreg = xtr)
f[['arimax']][, i] <- forecast( m.arimax ,
xreg = xte,
h=hor)$mean
fL[['arimax']][, i] <- forecast( m.arimax ,
xreg = xte,
h=hor)$lower[,2]
fH[['arimax']][, i] <- forecast( m.arimax ,
xreg = xte,
h=hor)$upper[,2]
# X13 SEATS
seasD <- seas(ts(train.yi, frequency = 12, start = c(2010, 1)))
seasComponent = seasD$series$s10
trendComponent = seasD$series$s12
f[['seas']][, i] <- snaive(seasComponent, h = hor)$mean *
rwf(trendComponent, drift = TRUE, h = hor)$mean
fL[['seas']][, i] <- snaive(seasComponent, h = hor)$mean *
rwf(trendComponent, drift = TRUE, h = hor)$lower[,2]
fH[['seas']][, i] <- snaive(seasComponent, h = hor)$mean *
rwf(trendComponent, drift = TRUE, h = hor)$upper[,2]
# ETS
m.ets <- ets(train.yi)
f[['ets']][, i] <- forecast(m.ets, h=hor)$mean
fL[['ets']][, i] <- forecast( m.ets,
h=hor)$lower[,2]
fH[['ets']][, i] <- forecast( m.ets,
h=hor)$upper[,2]
# RWD
f[['rwd']][,i] <- rwf(train.yi, drift = TRUE, h = hor)$mean
fL[['rwd']][,i] <- rwf(train.yi, drift = TRUE, h = hor)$lower[,2]
fH[['rwd']][,i] <- rwf(train.yi, drift = TRUE, h = hor)$upper[,2]
# VAR
lag.sel <- VARselect(var.data[1:TT, ])$selection[criterion]
m.var <- VAR(var.data[1:TT, ], p= lag.sel)
f[['var']][,i] <- predict(m.var, n.ahead = hor)$fcst[[1]][, 1]
fL[['var']][,i] <- predict(m.var, n.ahead = hor)$fcst[[1]][, 2]
fH[['var']][,i] <- predict(m.var, n.ahead = hor)$fcst[[1]][, 3]
# LASSO VAR
nlg = lag.sel
mod.lasso <-constructModel(var.data[1:TT, ], p=lag.sel,
"Basic", gran = c(500,10),
cv="Rolling",
MN=TRUE,
C = c(1, 1, 1,0, 1),
verbose=FALSE)
cv.res = cv.BigVAR(mod.lasso)
res =BigVAR.est(mod.lasso)
tmp.Z = matrix(1, nn*nlg+1, 1);
for (ik in 1:nlg) {
tmp.Z[((ik-1)*nn+2):(ik*nn+1), ] <- var.data[TT-ik+1, ]
}
BB <- res$B[,,cv.res@index]
for (kk in 1:hor) {
f.t <- BB%*%tmp.Z
f[['var_lasso']][kk, i] <- f.t[1,1]
tmp.Z[(nn+2):((nlg)*nn+1), ] <- tmp.Z[2:((nlg-1)*nn+1), ]
tmp.Z[2:(nn+1), ] <- f.t
}
#VECM
ca.jo(var.data)
f[['vecm']][, i]<-predict(vec2var(ca.jo(var.data[1:TT,]), r=1), n.ahead=hor)$fcst[[1]][,1]
# fL[['vecm']][, i]<-predict(vec2var(ca.jo(var.data), r=1), n.ahead=hor)$lower[[1]][,1]
# fH[['vecm']][, i]<-predict(vec2var(ca.jo(var.data), r=1), n.ahead=hor)$upper[[1]][,1]
# Eval errors
for (kk in 1:length(models)) {
e[[kk]][, i] <- test.yi - f[[kk]][, i]
}
# Next sample
TT <- TT+1
}
rmse <- matrix(NA, hor, length(models))
colnames(rmse) <- models
ratio <- matrix(NA, hor, length(models)-1)
colnames(ratio) <- models[1:(length(models)-1)]
for (kk in 1:length(models)) {
tmp <- e[[kk]]*e[[kk]]
rmse[, kk] <- sqrt(rowMeans(tmp))
}
for (kk in 1:length(models)-1) {
ratio[, kk] <- rmse[, kk]/rmse[, ncol(rmse)]
}
rmse
ratio
#Checkresiduals
checkresiduals(m.arima)
checkresiduals(m.arimax)
checkresiduals(seasD)
checkresiduals(m.ets)
#Casuality тип а чтобы и нет то?
causality(m.var, cause = c('Weighted_rate', 'USDRUB', 'KeyRate', 'Inflation'))$Granger
#Hair Plots forecasting (cross validation)
for (m in models) {
xx <- ts(var.data[, 1], start = c(2010, 1), frequency = 12)
gg <- autoplot(xx, col = 'black')
ddates <- time(xx)
TT <- nrow(train.var) + 1
for (i in 1:ncol(f[[m]])) {
xx0 <- ts(f[[m]][, i], start = ddates[TT], frequency = 12)
gg <- gg + autolayer(xx0)
TT <- TT+1
}
#print(gg)
ggsave(filename = paste0('hairplot_forecast_', m, '.png'), plot = gg)
}
##Forecasting plots
for (m in models) {
xx <- ts(var.data[, 1], start = c(2010, 1), frequency = 12)
ddates <- time(xx)
for (hh in 1:1) {
TT <- nrow(train.var) + hh
xx0 <- ts(f[[m]][hh, ], start = ddates[TT], frequency = 12)
xxl <- ts(fL[[m]][hh, ], start = ddates[TT], frequency = 12)
xxh <- ts(fH[[m]][hh, ], start = ddates[TT], frequency = 12)
gg <- autoplot(xx, col = 'black')
gg <- gg + autolayer(xx0, color= 'red') +autolayer(xxl, color = 'blue', linetype= 'dashed')+
autolayer(xxh, color = 'blue', linetype= 'dashed')
#gg
ggsave(filename = paste0('h_fore_', m, '_h_', hh, '.png'),
plot = gg)
}
}
|
invisible(options(echo = TRUE))
## read in data
pangenome <- read.table("###input_file###", header=FALSE)
genome_count <- max(pangenome$V8)
genomes <- (pangenome$V9[1:genome_count])
print(genomes)
pangenome <- pangenome[ pangenome$V1 > 1, ]
attach(pangenome)
## Calculate the means
v4means <- as.vector(tapply(V4,V1,FUN=mean))
print(v4means)
v1means <- as.vector(tapply(V1,V1,FUN=mean))
print(v1means)
## Calculate the medians
v4allmedians <- as.vector(tapply(V4,V1,FUN=median))
print(v4allmedians)
v1allmedians <- as.vector(tapply(V1,V1,FUN=median))
print(v1allmedians)
## plot points from each new comparison genome in its own color
row_count <- length(V1)
source_colors <- rainbow(genome_count)
p_color <- c()
for ( ii in c(1:row_count) ) {
p_color[ii] <- source_colors[V8[ii]]
# points(temp_v1, temp_v4, pch=17, col=p_color)
}
## end of color block
## exponential model based on medianss
nlmodel_exp <- nls(v4allmedians ~ th1 + th2* exp(-v1allmedians / th3), data=pangenome,
start=list(th1=33, th2=476, th3=1.5))
#summary(nlmodel_exp)
# Open up the output file for the log graph
postscript(file="###output_path###new_genes_exponential_medians_log_error_bar.ps", width=11, height=8.5, paper='special')
layout(matrix(c(1,2),byrow=TRUE), heights=c(7.5,1))
# Draw the axis
plot(V1,V4, xlab="number of genomes", ylab="new genes", main="###TITLE### new genes exponential log axis", col=p_color, cex=0.5, log="xy", type="n")
superpose.eb <-
function (x,y, ...) {
sum = summary(y)
q1 <- quantile(y, names=FALSE)[2]
print(q1)
med <- quantile(y,names=FALSE)[3]
q4 <- quantile(y, names=FALSE)[4]
print(q4)
print(med)
print(x)
arrows(as.integer(x), sum[[2]],as.integer(x) , sum[[5]], angle = 90, code = 3,
length = 0.08, ...)
}
m <- tapply(pangenome$V4,pangenome$V1,c)
for( x in names(m) ) {
superpose.eb(x, m[[x]])
}
# plot the medians
points(tapply(pangenome$V4,pangenome$V1,FUN=median)~tapply(pangenome$V1,pangenome$V1,FUN=median),pch=5,col='black')
# plot the means
points(tapply(V4,V1,FUN=mean)~tapply(V1,V1,FUN=mean),pch=6,col='black')
# plot the regression
x <- seq(par()$xaxp[1]-1,as.integer(1.0 + 10^par()$usr[[2]]))
lines(x, predict(nlmodel_exp, data.frame(v1allmedians=x)), lwd=2, col="black")
abline(h=nlmodel_exp$m$getPars()[1], lty=2, lwd=2,col="black")
expr_exp <- substitute(
expression(y == th1 + th2 * italic(e)^(-x / th3)),
list(
th1 = round(nlmodel_exp$m$getPars()[1], digit=4),
th1err = round(summary(nlmodel_exp)[10][[1]][3], digit=4),
th2 = round(nlmodel_exp$m$getPars()[2], digit=4),
th2err = round(summary(nlmodel_exp)[10][[1]][4], digit=4),
th3 = round(nlmodel_exp$m$getPars()[3], digit=4),
th3err = round(summary(nlmodel_exp)[10][[1]][5], digit=4)
)
)
par(mai=c(.2,0,0,0))
height<- (10^(par()$usr[4]) - 10^(par()$usr[3]))
width<- (10^(par()$usr[2]) - 10^(par()$usr[1]))
plot.new()
legend("top", c(eval(expr_exp)), lwd=c(2,2), yjust=0.5,xjust=0)
#legend(10^(par()$usr[2])+(0.01*width),10^(par()$usr[3]) + height/2, c(eval(expr_exp)), lwd=c(2,2), yjust=0.5,xjust=0)
|
/bin/new_genes_exponential_medians_log_error_bar.new.R
|
no_license
|
kamsellin/ReVac-package
|
R
| false | false | 3,237 |
r
|
invisible(options(echo = TRUE))
## read in data
pangenome <- read.table("###input_file###", header=FALSE)
genome_count <- max(pangenome$V8)
genomes <- (pangenome$V9[1:genome_count])
print(genomes)
pangenome <- pangenome[ pangenome$V1 > 1, ]
attach(pangenome)
## Calculate the means
v4means <- as.vector(tapply(V4,V1,FUN=mean))
print(v4means)
v1means <- as.vector(tapply(V1,V1,FUN=mean))
print(v1means)
## Calculate the medians
v4allmedians <- as.vector(tapply(V4,V1,FUN=median))
print(v4allmedians)
v1allmedians <- as.vector(tapply(V1,V1,FUN=median))
print(v1allmedians)
## plot points from each new comparison genome in its own color
row_count <- length(V1)
source_colors <- rainbow(genome_count)
p_color <- c()
for ( ii in c(1:row_count) ) {
p_color[ii] <- source_colors[V8[ii]]
# points(temp_v1, temp_v4, pch=17, col=p_color)
}
## end of color block
## exponential model based on medianss
nlmodel_exp <- nls(v4allmedians ~ th1 + th2* exp(-v1allmedians / th3), data=pangenome,
start=list(th1=33, th2=476, th3=1.5))
#summary(nlmodel_exp)
# Open up the output file for the log graph
postscript(file="###output_path###new_genes_exponential_medians_log_error_bar.ps", width=11, height=8.5, paper='special')
layout(matrix(c(1,2),byrow=TRUE), heights=c(7.5,1))
# Draw the axis
plot(V1,V4, xlab="number of genomes", ylab="new genes", main="###TITLE### new genes exponential log axis", col=p_color, cex=0.5, log="xy", type="n")
superpose.eb <-
function (x,y, ...) {
sum = summary(y)
q1 <- quantile(y, names=FALSE)[2]
print(q1)
med <- quantile(y,names=FALSE)[3]
q4 <- quantile(y, names=FALSE)[4]
print(q4)
print(med)
print(x)
arrows(as.integer(x), sum[[2]],as.integer(x) , sum[[5]], angle = 90, code = 3,
length = 0.08, ...)
}
m <- tapply(pangenome$V4,pangenome$V1,c)
for( x in names(m) ) {
superpose.eb(x, m[[x]])
}
# plot the medians
points(tapply(pangenome$V4,pangenome$V1,FUN=median)~tapply(pangenome$V1,pangenome$V1,FUN=median),pch=5,col='black')
# plot the means
points(tapply(V4,V1,FUN=mean)~tapply(V1,V1,FUN=mean),pch=6,col='black')
# plot the regression
x <- seq(par()$xaxp[1]-1,as.integer(1.0 + 10^par()$usr[[2]]))
lines(x, predict(nlmodel_exp, data.frame(v1allmedians=x)), lwd=2, col="black")
abline(h=nlmodel_exp$m$getPars()[1], lty=2, lwd=2,col="black")
expr_exp <- substitute(
expression(y == th1 + th2 * italic(e)^(-x / th3)),
list(
th1 = round(nlmodel_exp$m$getPars()[1], digit=4),
th1err = round(summary(nlmodel_exp)[10][[1]][3], digit=4),
th2 = round(nlmodel_exp$m$getPars()[2], digit=4),
th2err = round(summary(nlmodel_exp)[10][[1]][4], digit=4),
th3 = round(nlmodel_exp$m$getPars()[3], digit=4),
th3err = round(summary(nlmodel_exp)[10][[1]][5], digit=4)
)
)
par(mai=c(.2,0,0,0))
height<- (10^(par()$usr[4]) - 10^(par()$usr[3]))
width<- (10^(par()$usr[2]) - 10^(par()$usr[1]))
plot.new()
legend("top", c(eval(expr_exp)), lwd=c(2,2), yjust=0.5,xjust=0)
#legend(10^(par()$usr[2])+(0.01*width),10^(par()$usr[3]) + height/2, c(eval(expr_exp)), lwd=c(2,2), yjust=0.5,xjust=0)
|
setwd("~/Dropbox/PhD/Placebo/Experiments/CaffeinePredictorsOfWIthdrawalSeverity/data")
source("~/Dropbox/PhD/Placebo/Experiments/CaffeinePredictorsOfWIthdrawalSeverity/data/masterCaffRegression.R")
library(car)
library(plyr)
caffReg <- mRFrame
############### descriptives for each of the numeric columns
caffRegMeans <- colMeans(data.frame(caffReg$totCaffeine, caffReg$numQuitAttempts, caffReg$yearsCaffUse, caffReg$numCupsCoffee, caffReg$B1Total, caffReg$B2Total))
# this is for the sd of the columns. The '2' tells the apply function to calculate column means. 1 would tell it to do rows. c(1,2) does both. You could apply any function to the FUN argument btw, including mean, which means you could calculate means with this method instead of the method you used above.
caffRegSDs <- apply(data.frame(caffReg$totCaffeine, caffReg$numQuitAttempts, caffReg$yearsCaffUse, caffReg$numCupsCoffee, caffReg$B1Total, caffReg$B2Total), 2, FUN = sd)
caffRegSEs <- caffRegSDs/sqrt(nrow(caffReg))
# number of each level of each factor. The apply function applies the 'count' function inside it to the columns specified in the first argument. We have restricted this to only the columns that are factors. In the first argument the sapply function is isolating only those columns of caffReg that are factors. If we just ran the count function called by 'apply' on caffReg it would give us counts for numeric vectors, dates etc. We want only the factors. The '2' specifies that we are conducting this operation on columns (1 would be rows).
# you may have to detach dplyr to run the following, as it, along with plyr, also has a count function
detach("package:dplyr", unload = T)
apply(caffReg[, sapply(caffReg, is.factor)], 2, count)
# # could do the above another way if you had a list of column names that were factors
# caffFactNames <- colnames(caffReg[, sapply(caffReg, is.factor)])
#
# apply(caffReg[, caffFactNames], 2, count)
# group means
gMeansCaffReg <- with(caffReg, tapply(caffReg$B2Total, dSetID, mean))
#aggregate(data.frame(caffReg[, sapply(caffReg, is.numeric)]) ~ dSetID, data = caffReg, mean)
########## Simple Regression #############################
reg <- lm(B2Total ~ totCaffeine, data = caffReg)
b <- summary(reg)
b$coefficients[2,4] # delivers p-value for the regression by isolating the element in the element of the summary of the regression
## Hierachical regression
# model 1
model1 <- lm(B2Total ~ totCaffeine, data = caffReg)
# model 2
model2 <- lm(B2Total ~ totCaffeine + numQuitAttempts, data = caffReg)
# model 3
model3 <- lm(B2Total ~ totCaffeine + numQuitAttempts + yearsCaffUse, data = caffReg)
# model 4
model4 <- lm(B2Total ~ totCaffeine + numQuitAttempts + yearsCaffUse + numCupsCoffee, data = caffReg)
# model 5
model5 <- lm(B2Total ~ totCaffeine + numQuitAttempts + yearsCaffUse + numCupsCoffee + B1Total, data = caffReg)
# look at the changing regression coefficients, p-values and t-statistics for how much each variable contributes to the overall model
summary(model1)
summary(model2)
summary(model3)
summary(model4)
summary(model5)
# to compare successive models use the anova function. The output here tells buy how much each new model predicts the variance in the DV over and above the previous model and whether or not this difference is significant.
anova(model1, model2, model3, model4, model5)
# for semipartial/part (delta-R squared) and partial correlation squared (partial eta-squared) we can use this function
require(lmSupport)
lm.sumSquares(model5)
# or we can also use the following function from the lmSupport package to give us change in r-squared and partial eta-squared
lm.deltaR2(model4, model5) # note you can only compare two models at a time.
# this does the same as above except it gives a p-value for the second model (first model must be subset of second)
modelCompare(model4, model5)
### using stepwise regression via the rms package
require(rms)
## creates empty regression object without predictors
regStep <- lm(B2Total ~ 1, data = caffReg)
## uses the previous object as the basis for a stepwise regression
# note: forward stepwise regression is a procedure whereby the variables which have the largest semi-partial r-squared and hence make the largest contribution to R-squared (variables which have the largets t-values) are included in the model and the least significant excluded. predictors are not admitted when they cease to make a contribution which is statistically significant at a level specified by the user.
regStepFor <- step(regStep, direction = "forward", scope = (~ totCaffeine + numQuitAttempts + yearsCaffUse + numCupsCoffee + B1Total), data = caffReg, trace = 0)
regStepBack <- step(regStep, direction = "backward", scope = (~ totCaffeine + numQuitAttempts + yearsCaffUse + numCupsCoffee + B1Total), data = caffReg, trace = 0)
summary(regStepFor)
summary(regStepBack)
############################################################################################################################################################################################################################################################################
################## Testing assumptions of regression.############################################################################################################################################### We will work on the lm object 'model5' ####################################################################################################################################################################################################################################################################
require(car)
require(MASS)
library(stats)
library(lmtest)
######################################################################################
################# Assumption 1: testing for independence of errors ####################
##########################################################################################
# first assumption of regression is indepedence of errors. This means that the error of observation for each observation is unrelated to that of adjacent observations. The Durbin Watson statistic is a test for a certain type of lack of independence, namely 1st-order autocorrelation (also called serial correlation) which means that the errors of adjacent observations are not independent (i.e. are correlated).
# test for autocorrelated errors
# The Durbin Watson statistic can run from 0 to 4 but a value near 2 would indicate little or no autocorrelation.
durbinWatsonTest(model5)
# You would write this up like so:
# ‘There was independence of observations, as assessed by a Durbin-Watson statistic of 2.222’
##########################################################################################
#################### Assumption 2: Assumption of linearity ########################################################################################################################
# one of the assumptions of multiple linear regression is that the independent variables are linearly related to the dependent variable, and that each independent variable is linearly related to the DV
# You can check for this by plotting the studentised residuals against the unstandardised predicted values. If your residuals form a horizontal band then the relationship between your DV and IVs is likely linear. In general values close to the horizontal line are well predicted. Point over the line are underpredicted and points over the line are overpredicted.
# calculate standardised residuals
unStResid <- resid(model5)
stdResid <- (unStResid - mean(unStResid)) / sd(unStResid)
# studentised residuals
studResid <- rstudent(model5)
# unstandardised predicted values
unStPredicted <- predict(model5)
plot(unStPredicted, studResid, main = "studentised residuals by unstandardised Predicted Values", ylab = "Studentised Residuals", xlab = "Unstandardised Predicted")
plot(unStPredicted, stdResid, main = "studentised residuals by unstandardised Predicted Values", ylab = "Standardised Residuals", xlab = "Unstandardised Predicted")
abline(0,0)
# you can also test this with partial regression plots. These are simply plotting the different predictors against the DV. Partial Regression plots should show a linear relationship.
avPlots(model5)
######################################################################################################### Assumption 3: Assumption of homoscedasticity #######################################################################################################################
# The assumption of homoscedasticity is that the residuals are equal for all values of the predicted dependent variable.
# you can test for this by running either the Breusch-Pagan test of heteroscedasticity
bptest(model5) # We're hoping for a non-significant value here. Significant means heteroscedasticity, non-sig means homoscedasticity, which is what we want
# or White's test for heteroscedasticity. The following gives a heteroscedasticity corrected covariance matrix.
coeftest(model5, vcov=hccm(model5))
# you can also look at the studentised residuals plot above. If there is homoscedasticity the spread of the studentised residuals will not increase or decrease as you move across the predicted values.
## Non-constant error variance. Constant error variance implies homoscedasticity, which we want. This is a test where the null hypothesis is constant error variance (i.e. equal spread of error variance around the predictors) therefore we WANT this test to be non-significant.
# non-constant error variance test.
ncvTest(model5)
# plot sudentised residuals vs fitted values. An increasing trend in this plot would be saying that the absolute residuals are getting larger as the fitted values do - indicating a spread that's related to the mean, a violation of the assumptions (the plot is used to assess potential heteroskedasticity). If no slope then even distribution of error across the IVs
spreadLevelPlot(model5)
###################################################################################################### Assumption 4: Assumption of non-colinearity ###########################################################################################################################
# multicolinearity occurs when you have 2 or more IVs that correlate highly with eachother. This will cause problems in demarcating which variables contribute how much of the variance in the DV
# there are two ways to test this, correlations and tolerance
# the method below yields correlations as Matrix first, and then same matrix as p-values
############## correlations.
# If any of these are over r=.7 then you have mutlicollinearity
library(Hmisc)
IVs <- data.frame(caffReg$totCaffeine, caffReg$numQuitAttempts, caffReg$yearsCaffUse, caffReg$numCupsCoffee, caffReg$B1Total)
corsMatrix <- rcorr(as.matrix(IVs)) # needs to be a matrix not a dframe for this to work
###### tolerance and VIF
# Tolerance and VIF are two sides of the same things (VIF is 1/tolerance). If tolerance is <0.1 and VIF > 10 then you might havs a collinearity problems
vif(model5) # VIF = variance inflation factors
################################################################################################################ Assumption 5: No highly influential observation #####################################################################################################################
########## Testing for outliers
# qqplot for studentised residuals. You want the data to stick as close to the diagonal as possible
qqPlot(model5)
# studentised deleted residuals. You are looking for any values greater than + or - 3
rstudent(model5)
# check leverage plots. Points that are further out along the extremes of x will push harder on the lever (i.e. the regression line) and thus will have more leverage.
leveragePlots(model5)
########## Testing for influential observations
## added variable plots
avPlots(model5)
## Cook's distance measures the effect of deleting an observation. Data points with large residuals and high leverage may distort the outcome and accuracy of a regression. It is calculated as (for each observation) D = ((predicted Y from full model) - (predicted Y from refitted model with the observation omitted))/ (number of parameters for model)*MSE(model).
# Some texts say that a observations with a cook's distance of >1 are considered to be inlfuential. Others identify D values > 4/(n-k-1) where n is number of obervations in the model and k is number of predictors. D values represent the distance one's estimates move within the confidence ellipsoid that represents a region of plausible values for the parameter (i.e. change in regression paramters when one exlcudes vs includes the observation)
cutoff <- 4/(nrow(caffReg) - length(model5$coefficients) - 1) # uses the element 'coefficients inside the model 5 object
# now plot cook's distance. Cook's distance values above 1 should be checked out
plot(model5, which = 4, cook.levels = cutoff)
## influence plot (Note: press esc to exit graph)
influencePlot(model5, id.method = "identify", main = "Influence Plot",
sub = "Circle size is proportional to Cook's distance")
#################################################################################################### Assumption 6: Normality of residuals (i.e. error of prediciton) ####################################################################################
# distribution of studentised residuals in histogram. The histogram should be approximately normally distributed (which it sort of is) and the mean should be close to 0 and the SD close to 1.
sresid <- studres(model5)
hist(sresid, freq = FALSE, main = "DIstribution of Studentised Residuals")
xModel5 <- seq(min(sresid), max(sresid, length = 40))
yModel5 <- dnorm(xModel5)
lines(xModel5, yModel5)
# qq plot for studentised residuals. To confirm this you should look at a P-Plot. If the residuals are normally distributed the points should cluster tightly around the diagonal line. Regression is quite robust to violations of normality so the points don’t have to be perfectly aligned for the regression to go ahead. The further away from the diagonal the less normal the distribution of studentised residuals
qqPlot(model5, main = "QQ-Plot")
####################################################################################################### Assumption 7: Linearity and additivty #######################################################################################################################
# A key assumption behind using linear regression models for purposes of inference or prediction is that there is linearity and additivity of the relationship between dependent and independent variables:
#(a) The expected value of dependent variable is a straight-line function of each independent variable, holding the others fixed.
#(b) The slope of that line does not depend on the values of the other variables.
#(c) The effects of different independent variables on the expected value of the dependent variable are additive
######## component + residual plot. Component residual plots, an extension of partial residual plots, are a good way to see if the predictors have a linear relationship to the dependent variable. A partial residual plot essentially attempts to model the residuals of one predictor against the dependent variable. A component residual plot adds a line indicating where the line of best fit lies. A significant difference between the residual line and the component line indicates that the predictor does not have a linear relationship with the dependent variable (which is a violation of the linearity assumption)
crPlots(model5)
# Ceres plots also test for linearity. Looks much the same as above.
ceresPlots(model5)
### the gvlma package performs a global validation of linear model assumptions as well as separate evaluationsof skewness kurtosis and heteroscedasticity
library(gvlma)
gvModel5 <- gvlma(model5)
summary(gvModel5)
|
/cafRegressionAssumptions.r
|
no_license
|
ferambot/caffeineRegressionMaster
|
R
| false | false | 16,213 |
r
|
setwd("~/Dropbox/PhD/Placebo/Experiments/CaffeinePredictorsOfWIthdrawalSeverity/data")
source("~/Dropbox/PhD/Placebo/Experiments/CaffeinePredictorsOfWIthdrawalSeverity/data/masterCaffRegression.R")
library(car)
library(plyr)
caffReg <- mRFrame
############### descriptives for each of the numeric columns
caffRegMeans <- colMeans(data.frame(caffReg$totCaffeine, caffReg$numQuitAttempts, caffReg$yearsCaffUse, caffReg$numCupsCoffee, caffReg$B1Total, caffReg$B2Total))
# this is for the sd of the columns. The '2' tells the apply function to calculate column means. 1 would tell it to do rows. c(1,2) does both. You could apply any function to the FUN argument btw, including mean, which means you could calculate means with this method instead of the method you used above.
caffRegSDs <- apply(data.frame(caffReg$totCaffeine, caffReg$numQuitAttempts, caffReg$yearsCaffUse, caffReg$numCupsCoffee, caffReg$B1Total, caffReg$B2Total), 2, FUN = sd)
caffRegSEs <- caffRegSDs/sqrt(nrow(caffReg))
# number of each level of each factor. The apply function applies the 'count' function inside it to the columns specified in the first argument. We have restricted this to only the columns that are factors. In the first argument the sapply function is isolating only those columns of caffReg that are factors. If we just ran the count function called by 'apply' on caffReg it would give us counts for numeric vectors, dates etc. We want only the factors. The '2' specifies that we are conducting this operation on columns (1 would be rows).
# you may have to detach dplyr to run the following, as it, along with plyr, also has a count function
detach("package:dplyr", unload = T)
apply(caffReg[, sapply(caffReg, is.factor)], 2, count)
# # could do the above another way if you had a list of column names that were factors
# caffFactNames <- colnames(caffReg[, sapply(caffReg, is.factor)])
#
# apply(caffReg[, caffFactNames], 2, count)
# group means
gMeansCaffReg <- with(caffReg, tapply(caffReg$B2Total, dSetID, mean))
#aggregate(data.frame(caffReg[, sapply(caffReg, is.numeric)]) ~ dSetID, data = caffReg, mean)
########## Simple Regression #############################
reg <- lm(B2Total ~ totCaffeine, data = caffReg)
b <- summary(reg)
b$coefficients[2,4] # delivers p-value for the regression by isolating the element in the element of the summary of the regression
## Hierachical regression
# model 1
model1 <- lm(B2Total ~ totCaffeine, data = caffReg)
# model 2
model2 <- lm(B2Total ~ totCaffeine + numQuitAttempts, data = caffReg)
# model 3
model3 <- lm(B2Total ~ totCaffeine + numQuitAttempts + yearsCaffUse, data = caffReg)
# model 4
model4 <- lm(B2Total ~ totCaffeine + numQuitAttempts + yearsCaffUse + numCupsCoffee, data = caffReg)
# model 5
model5 <- lm(B2Total ~ totCaffeine + numQuitAttempts + yearsCaffUse + numCupsCoffee + B1Total, data = caffReg)
# look at the changing regression coefficients, p-values and t-statistics for how much each variable contributes to the overall model
summary(model1)
summary(model2)
summary(model3)
summary(model4)
summary(model5)
# to compare successive models use the anova function. The output here tells buy how much each new model predicts the variance in the DV over and above the previous model and whether or not this difference is significant.
anova(model1, model2, model3, model4, model5)
# for semipartial/part (delta-R squared) and partial correlation squared (partial eta-squared) we can use this function
require(lmSupport)
lm.sumSquares(model5)
# or we can also use the following function from the lmSupport package to give us change in r-squared and partial eta-squared
lm.deltaR2(model4, model5) # note you can only compare two models at a time.
# this does the same as above except it gives a p-value for the second model (first model must be subset of second)
modelCompare(model4, model5)
### using stepwise regression via the rms package
require(rms)
## creates empty regression object without predictors
regStep <- lm(B2Total ~ 1, data = caffReg)
## uses the previous object as the basis for a stepwise regression
# note: forward stepwise regression is a procedure whereby the variables which have the largest semi-partial r-squared and hence make the largest contribution to R-squared (variables which have the largets t-values) are included in the model and the least significant excluded. predictors are not admitted when they cease to make a contribution which is statistically significant at a level specified by the user.
regStepFor <- step(regStep, direction = "forward", scope = (~ totCaffeine + numQuitAttempts + yearsCaffUse + numCupsCoffee + B1Total), data = caffReg, trace = 0)
regStepBack <- step(regStep, direction = "backward", scope = (~ totCaffeine + numQuitAttempts + yearsCaffUse + numCupsCoffee + B1Total), data = caffReg, trace = 0)
summary(regStepFor)
summary(regStepBack)
############################################################################################################################################################################################################################################################################
################## Testing assumptions of regression.############################################################################################################################################### We will work on the lm object 'model5' ####################################################################################################################################################################################################################################################################
require(car)
require(MASS)
library(stats)
library(lmtest)
######################################################################################
################# Assumption 1: testing for independence of errors ####################
##########################################################################################
# first assumption of regression is indepedence of errors. This means that the error of observation for each observation is unrelated to that of adjacent observations. The Durbin Watson statistic is a test for a certain type of lack of independence, namely 1st-order autocorrelation (also called serial correlation) which means that the errors of adjacent observations are not independent (i.e. are correlated).
# test for autocorrelated errors
# The Durbin Watson statistic can run from 0 to 4 but a value near 2 would indicate little or no autocorrelation.
durbinWatsonTest(model5)
# You would write this up like so:
# ‘There was independence of observations, as assessed by a Durbin-Watson statistic of 2.222’
##########################################################################################
#################### Assumption 2: Assumption of linearity ########################################################################################################################
# one of the assumptions of multiple linear regression is that the independent variables are linearly related to the dependent variable, and that each independent variable is linearly related to the DV
# You can check for this by plotting the studentised residuals against the unstandardised predicted values. If your residuals form a horizontal band then the relationship between your DV and IVs is likely linear. In general values close to the horizontal line are well predicted. Point over the line are underpredicted and points over the line are overpredicted.
# calculate standardised residuals
unStResid <- resid(model5)
stdResid <- (unStResid - mean(unStResid)) / sd(unStResid)
# studentised residuals
studResid <- rstudent(model5)
# unstandardised predicted values
unStPredicted <- predict(model5)
plot(unStPredicted, studResid, main = "studentised residuals by unstandardised Predicted Values", ylab = "Studentised Residuals", xlab = "Unstandardised Predicted")
plot(unStPredicted, stdResid, main = "studentised residuals by unstandardised Predicted Values", ylab = "Standardised Residuals", xlab = "Unstandardised Predicted")
abline(0,0)
# you can also test this with partial regression plots. These are simply plotting the different predictors against the DV. Partial Regression plots should show a linear relationship.
avPlots(model5)
######################################################################################################### Assumption 3: Assumption of homoscedasticity #######################################################################################################################
# The assumption of homoscedasticity is that the residuals are equal for all values of the predicted dependent variable.
# you can test for this by running either the Breusch-Pagan test of heteroscedasticity
bptest(model5) # We're hoping for a non-significant value here. Significant means heteroscedasticity, non-sig means homoscedasticity, which is what we want
# or White's test for heteroscedasticity. The following gives a heteroscedasticity corrected covariance matrix.
coeftest(model5, vcov=hccm(model5))
# you can also look at the studentised residuals plot above. If there is homoscedasticity the spread of the studentised residuals will not increase or decrease as you move across the predicted values.
## Non-constant error variance. Constant error variance implies homoscedasticity, which we want. This is a test where the null hypothesis is constant error variance (i.e. equal spread of error variance around the predictors) therefore we WANT this test to be non-significant.
# non-constant error variance test.
ncvTest(model5)
# plot sudentised residuals vs fitted values. An increasing trend in this plot would be saying that the absolute residuals are getting larger as the fitted values do - indicating a spread that's related to the mean, a violation of the assumptions (the plot is used to assess potential heteroskedasticity). If no slope then even distribution of error across the IVs
spreadLevelPlot(model5)
###################################################################################################### Assumption 4: Assumption of non-colinearity ###########################################################################################################################
# multicolinearity occurs when you have 2 or more IVs that correlate highly with eachother. This will cause problems in demarcating which variables contribute how much of the variance in the DV
# there are two ways to test this, correlations and tolerance
# the method below yields correlations as Matrix first, and then same matrix as p-values
############## correlations.
# If any of these are over r=.7 then you have mutlicollinearity
library(Hmisc)
IVs <- data.frame(caffReg$totCaffeine, caffReg$numQuitAttempts, caffReg$yearsCaffUse, caffReg$numCupsCoffee, caffReg$B1Total)
corsMatrix <- rcorr(as.matrix(IVs)) # needs to be a matrix not a dframe for this to work
###### tolerance and VIF
# Tolerance and VIF are two sides of the same things (VIF is 1/tolerance). If tolerance is <0.1 and VIF > 10 then you might havs a collinearity problems
vif(model5) # VIF = variance inflation factors
################################################################################################################ Assumption 5: No highly influential observation #####################################################################################################################
########## Testing for outliers
# qqplot for studentised residuals. You want the data to stick as close to the diagonal as possible
qqPlot(model5)
# studentised deleted residuals. You are looking for any values greater than + or - 3
rstudent(model5)
# check leverage plots. Points that are further out along the extremes of x will push harder on the lever (i.e. the regression line) and thus will have more leverage.
leveragePlots(model5)
########## Testing for influential observations
## added variable plots
avPlots(model5)
## Cook's distance measures the effect of deleting an observation. Data points with large residuals and high leverage may distort the outcome and accuracy of a regression. It is calculated as (for each observation) D = ((predicted Y from full model) - (predicted Y from refitted model with the observation omitted))/ (number of parameters for model)*MSE(model).
# Some texts say that a observations with a cook's distance of >1 are considered to be inlfuential. Others identify D values > 4/(n-k-1) where n is number of obervations in the model and k is number of predictors. D values represent the distance one's estimates move within the confidence ellipsoid that represents a region of plausible values for the parameter (i.e. change in regression paramters when one exlcudes vs includes the observation)
cutoff <- 4/(nrow(caffReg) - length(model5$coefficients) - 1) # uses the element 'coefficients inside the model 5 object
# now plot cook's distance. Cook's distance values above 1 should be checked out
plot(model5, which = 4, cook.levels = cutoff)
## influence plot (Note: press esc to exit graph)
influencePlot(model5, id.method = "identify", main = "Influence Plot",
sub = "Circle size is proportional to Cook's distance")
#################################################################################################### Assumption 6: Normality of residuals (i.e. error of prediciton) ####################################################################################
# distribution of studentised residuals in histogram. The histogram should be approximately normally distributed (which it sort of is) and the mean should be close to 0 and the SD close to 1.
sresid <- studres(model5)
hist(sresid, freq = FALSE, main = "DIstribution of Studentised Residuals")
xModel5 <- seq(min(sresid), max(sresid, length = 40))
yModel5 <- dnorm(xModel5)
lines(xModel5, yModel5)
# qq plot for studentised residuals. To confirm this you should look at a P-Plot. If the residuals are normally distributed the points should cluster tightly around the diagonal line. Regression is quite robust to violations of normality so the points don’t have to be perfectly aligned for the regression to go ahead. The further away from the diagonal the less normal the distribution of studentised residuals
qqPlot(model5, main = "QQ-Plot")
####################################################################################################### Assumption 7: Linearity and additivty #######################################################################################################################
# A key assumption behind using linear regression models for purposes of inference or prediction is that there is linearity and additivity of the relationship between dependent and independent variables:
#(a) The expected value of dependent variable is a straight-line function of each independent variable, holding the others fixed.
#(b) The slope of that line does not depend on the values of the other variables.
#(c) The effects of different independent variables on the expected value of the dependent variable are additive
######## component + residual plot. Component residual plots, an extension of partial residual plots, are a good way to see if the predictors have a linear relationship to the dependent variable. A partial residual plot essentially attempts to model the residuals of one predictor against the dependent variable. A component residual plot adds a line indicating where the line of best fit lies. A significant difference between the residual line and the component line indicates that the predictor does not have a linear relationship with the dependent variable (which is a violation of the linearity assumption)
crPlots(model5)
# Ceres plots also test for linearity. Looks much the same as above.
ceresPlots(model5)
### the gvlma package performs a global validation of linear model assumptions as well as separate evaluationsof skewness kurtosis and heteroscedasticity
library(gvlma)
gvModel5 <- gvlma(model5)
summary(gvModel5)
|
library(kalmanST)
kfadvance(1,1,1,1,1,1,1,1,1,1,1)
|
/tests/test_kfadvance.R
|
no_license
|
bentaylor1/kalmanST
|
R
| false | false | 51 |
r
|
library(kalmanST)
kfadvance(1,1,1,1,1,1,1,1,1,1,1)
|
## utility functions, presumably used mostly or entirely internally
#' Compact list then lapply
#'
#' Compacts the list (i.e., removes NULL objects), then calls [`lapply()`][base::lapply()]
#' on the result with the remaining parameters.
#' @param X the list object
#' @param ... remaining arguments to `lapply()`
#' @importFrom plyr compact
lcapply <- function(X, ...) {
X <- plyr::compact(X)
lapply(X, ...)
}
#' Front-end to dplyr::coalesce to deal with NULL vectors
#'
#' Replaces any NULL argument with a vector of `NA`, and casts every vector
#' to the same type as the last vector. After that, calls [dplyr::coalesce()].
#' @param ... the vectors to coalesce on NA
#' @return a vector of the same type and length as the last argument
#' @seealso [dplyr::coalesce()]
#' @importFrom dplyr coalesce
coalesce_ <- function(...) {
idvecs <- list(...)
idvecs <- lapply(idvecs,
function(v, template) {
if (is.null(v))
as(rep(NA, times=length(template)), typeof(template))
else
as(v, typeof(template))
},
template = idvecs[[length(idvecs)]])
dplyr::coalesce(!!!idvecs)
}
#' Treats zero-length character vectors as empty strings
#'
#' If the argument is a zero-length character vector (character(0)), returns
#' an empty string (which is a character vector of length 1). Otherwise passes
#' through the argument.
#' @param x the object to be tested for zero-length character vector
#' @return an empty string if `x` is a character vector of length zero, and `x`
#' otherwise
charzero_as_empty <- function(x) {
if (is.character(x) && length(x) == 0)
""
else
x
}
# rlang is used in classes.R to deal with the incorrect way in which
# the S4 system handles class collisions.
# R check incorrectly fails to recognize it there and objects to rlang
# being in Imports.
# So here's another cludgy hack
dummy <- function() {
rlang::as_character(1)
}
|
/R/utils.R
|
permissive
|
ropensci/RNeXML
|
R
| false | false | 2,015 |
r
|
## utility functions, presumably used mostly or entirely internally
#' Compact list then lapply
#'
#' Compacts the list (i.e., removes NULL objects), then calls [`lapply()`][base::lapply()]
#' on the result with the remaining parameters.
#' @param X the list object
#' @param ... remaining arguments to `lapply()`
#' @importFrom plyr compact
lcapply <- function(X, ...) {
X <- plyr::compact(X)
lapply(X, ...)
}
#' Front-end to dplyr::coalesce to deal with NULL vectors
#'
#' Replaces any NULL argument with a vector of `NA`, and casts every vector
#' to the same type as the last vector. After that, calls [dplyr::coalesce()].
#' @param ... the vectors to coalesce on NA
#' @return a vector of the same type and length as the last argument
#' @seealso [dplyr::coalesce()]
#' @importFrom dplyr coalesce
coalesce_ <- function(...) {
idvecs <- list(...)
idvecs <- lapply(idvecs,
function(v, template) {
if (is.null(v))
as(rep(NA, times=length(template)), typeof(template))
else
as(v, typeof(template))
},
template = idvecs[[length(idvecs)]])
dplyr::coalesce(!!!idvecs)
}
#' Treats zero-length character vectors as empty strings
#'
#' If the argument is a zero-length character vector (character(0)), returns
#' an empty string (which is a character vector of length 1). Otherwise passes
#' through the argument.
#' @param x the object to be tested for zero-length character vector
#' @return an empty string if `x` is a character vector of length zero, and `x`
#' otherwise
charzero_as_empty <- function(x) {
if (is.character(x) && length(x) == 0)
""
else
x
}
# rlang is used in classes.R to deal with the incorrect way in which
# the S4 system handles class collisions.
# R check incorrectly fails to recognize it there and objects to rlang
# being in Imports.
# So here's another cludgy hack
dummy <- function() {
rlang::as_character(1)
}
|
# this is the course shiny project for build data product
####
library(shiny)
require(rjson)
require(rCharts)
options(RCHART_WIDTH = 1000)
load("long_melt.RData")
load("movie_rating.RData")
film <- sort(unique(long_melt$film))
shinyServer(
function(input, output) {
output$chart11 <- renderChart2({
compare = input$website
fan_comp = subset(long_melt, website == "Fandango_Stars" | website == compare)
d1 <- dPlot(
x = "rating",
y = "film",
groups = "website",
data = fan_comp,
type = "bubble"
, bounds = list( x = 250, y = 50, width = 700, height = 2400)
)
d1$xAxis( type = "addMeasureAxis" )
d1$yAxis( type = "addCategoryAxis", orderRule = "film", desc = T)
d1$legend(
x = 200,
y = 10,
width = 300,
height = 200,
horizontalAlign = "left"
)
d1$set(height = 3000, width = 1000)
return(d1)
}) # end of chart 1
output$chart12 <- renderChart2({
range = input$rating_error
rating_sub = subset(movie_rating, rating_error >= range[1] & rating_error <= range[2])
d2 <- dPlot(
x = "review_count",
y = "rating_error",
groups = c("film", "website"),
data = rating_sub,
type = "bubble"
)
d2$xAxis( type = "addMeasureAxis" )
d2$yAxis( type = "addMeasureAxis")
d2$legend(
x = 200,
y = 10,
width = 300,
height = 200,
horizontalAlign = "left"
)
d2$set(height = 800, width = 800)
return(d2)
}) # end of chart 2
# Initialize reactive values
values <- reactiveValues()
values$film <- film
output$movieSelector <- renderUI({
checkboxGroupInput('film', 'Select Movies to Compare Ratings:',
film, selected = values$film)
}) # end of box
# Add observer on select-all button
observe({
if(input$selectAll == 0) return()
values$film <- film
})
# Add observer on clear-all button
observe({
if(input$clearAll == 0) return()
values$film <- c() # empty list
})
} # end of function
)
|
/Course Project/server.R
|
no_license
|
AaronRanAn/JHU_Developing_Data_Products
|
R
| false | false | 3,828 |
r
|
# this is the course shiny project for build data product
####
library(shiny)
require(rjson)
require(rCharts)
options(RCHART_WIDTH = 1000)
load("long_melt.RData")
load("movie_rating.RData")
film <- sort(unique(long_melt$film))
shinyServer(
function(input, output) {
output$chart11 <- renderChart2({
compare = input$website
fan_comp = subset(long_melt, website == "Fandango_Stars" | website == compare)
d1 <- dPlot(
x = "rating",
y = "film",
groups = "website",
data = fan_comp,
type = "bubble"
, bounds = list( x = 250, y = 50, width = 700, height = 2400)
)
d1$xAxis( type = "addMeasureAxis" )
d1$yAxis( type = "addCategoryAxis", orderRule = "film", desc = T)
d1$legend(
x = 200,
y = 10,
width = 300,
height = 200,
horizontalAlign = "left"
)
d1$set(height = 3000, width = 1000)
return(d1)
}) # end of chart 1
output$chart12 <- renderChart2({
range = input$rating_error
rating_sub = subset(movie_rating, rating_error >= range[1] & rating_error <= range[2])
d2 <- dPlot(
x = "review_count",
y = "rating_error",
groups = c("film", "website"),
data = rating_sub,
type = "bubble"
)
d2$xAxis( type = "addMeasureAxis" )
d2$yAxis( type = "addMeasureAxis")
d2$legend(
x = 200,
y = 10,
width = 300,
height = 200,
horizontalAlign = "left"
)
d2$set(height = 800, width = 800)
return(d2)
}) # end of chart 2
# Initialize reactive values
values <- reactiveValues()
values$film <- film
output$movieSelector <- renderUI({
checkboxGroupInput('film', 'Select Movies to Compare Ratings:',
film, selected = values$film)
}) # end of box
# Add observer on select-all button
observe({
if(input$selectAll == 0) return()
values$film <- film
})
# Add observer on clear-all button
observe({
if(input$clearAll == 0) return()
values$film <- c() # empty list
})
} # end of function
)
|
##########################################################################################
##
## PLOT FUNCTION VERSION 2
##
##########################################################################################
##
##
#'@export
PlotF<-function(formula, data,na.action="na.omit",pz=NULL,nBreaks=NULL,
plotYt=TRUE,axisxdate=NULL,transf=1,model="Poisson",posts,Proc="Smooth",Type="Marg",
distl="PRED",a0=0.01,b0=0.01,ci=0.95,startdate=NULL,enddate=NULL,Freq=NULL,...){
#argumentsdesignplot <- list(...)
arg <- list(...)
## ... = typeline='l', arg1
#cols="black", arg2
# "blue",
# "lightgrey"),
#xxlab="t", arg3
# yylab=expression(paste(hat(mu)[t])), arg4
#,xxlim=NULL, arg5
#yylim=NULL, arg6
#Lty=c(1,2,1), arg7
## Lwd=c(2,2,2), arg8
#Cex=0.68, arg9
#argument breaks ==FALSE or TRUE
#NA
if(na.action=="na.omit"){na.omit(data)}
##Check formula
# if(check.env){
# envs<-lapply(formula, environment)
# hasenv<-which(sapply(envs,is.null))
# if (length(hasenv)>1){
# for(i in 2:length(hasenv))
# if (!identical(envs[[hasenv[1]]],envs[[hasenv[i]]]))
# warning("Different environments on formulas")
# }
# }
# Event=NULL
Break=NULL
###################################################################################
###################################################################################
###################################################################################
nameaux=all.vars(formula)
nameaux=nameaux[-1]
namesz=paste("Z",1:2000,sep="")
xz<- nameaux %in% namesz
if(is.null(pz)){
if(length(which(xz==TRUE))!=0)stop("Bad input for Z and pz!")
}else{
if(pz!=length(which(xz==TRUE)))stop("Bad input for Z and pz!!")
}
# Event: PEM
if(model=="PEM"){
namey=all.vars(formula)[1] #Y
Y=get(namey,data) # Y
xze<- nameaux %in% "Event"
if(length(which(xze==TRUE))==0)stop("Include an Event variable!")
#all.vars(formula) =="Event"
#rr=which(all.vars(formula) =="Event")
#aa=all.vars(formula)[rr]
Event=get("Event",data)
Break=GridP(Y, Event, nT = nBreaks)
da2=model.frame(formula,data)
namey=all.vars(formula)[1] #Y
nameaux1y=all.vars(formula)
xz1y<- nameaux1y %in% namey
namexz1y=nameaux1y[xz1y==FALSE]
da2=da2[namexz1y]
Z=NULL
if(dim(da2)[2]==1){X=NULL
}else{
nameaux1=all.vars(formula)
nameaux1=nameaux1[-1]
xz1<- nameaux1 %in% "Event"
namexz1=nameaux1[xz1==FALSE]
if(length(namexz1)==0){X=NULL}else{X=da2[namexz1]}
}
Break=GridP(Y, Event, nT = nBreaks)
}else{
Event=NULL
Break=NULL
# End Event
namey=all.vars(formula)[1] #Y
Y=get(namey,data) # Y
#Y
#names(data)=c("Y","X1","Z1","Z2","Z3")
#fz <- Y~X1+Z1+Z2+Z3
if(is.null(pz)){
da2=model.frame(formula,data)
namey=all.vars(formula)[1] #Y
nameaux1y=all.vars(formula)
xz1y<- nameaux1y %in% namey
namexz1y=nameaux1y[xz1y==FALSE]
if(dim(da2[namexz1y])[2]==0){X=NULL}else{da2=da2[namexz1y];X=da2}
Z=NULL
}else{
#if(pz!=NULL){
#names(da1)=c("Y","X1","Z1","Z2","Z3")
#fz <- Y~X1+Z1+Z2+Z3
namesz=paste("Z",1:pz,sep="")
da2=model.frame(formula,da1)
Z=da2[namesz]
#Z
nameaux=all.vars(formula)
nameaux=nameaux[-1]
xz<- nameaux %in% namesz
namexz=nameaux[xz==FALSE]
if(dim(da2[namexz])[2]==0){X=NULL}else{X=da2[namexz]}
#X
#}
}
}
Yt<-Y
Xt<-X
Zt<-Z
# cat("Yt=",Yt)
# print(Xt)
# print(Zt)
# print(Event)
###################################################################################
###################################################################################
###################################################################################
# DataFrame:
#dataf<-data
#dataf<-dataf[all.vars(formula)]
#Dataframe data
#if(length(all.vars(formula))> dim(data)[2])stop("Check the formula and data.")
#if(is.data.frame(data)==FALSE)stop("The argument needs to be a data frame.")
#attach(dataf)
#if(model=="PEM"){
##Event=get(names(dataf)[2])
#dataf<-data
#dataf<-dataf[c(all.vars(formula)[1],colnames(data)[2],all.vars(formula)[-1])]
##Dataframe data
#if(length(all.vars(formula))> dim(data)[2])stop("Check the formula and data.")
#if(is.data.frame(data)==FALSE)stop("The argument needs to be a data frame.")
##dataf<-dataf[all.vars(formula)]
##Yt=get(names(dataf)[1])
#Ytdd=dataf[[colnames(dataf)[1]]]
#Eventdd=dataf[[colnames(dataf)[2]]]
#Breakdd=GridP(Ytdd, Eventdd, nT = nBreaks)
#Event<-Eventdd
#Break<-Breakdd
#Xtdd=NULL
#Ztdd=NULL
#if(is.null(pz)){
#if(dim(dataf)[2]>2){
#nnnd=dim(dataf)[1]
#ppd=dim(dataf)[2]-2
#Xtdd=matrix(0,nnnd,ppd)
#for(i in 1:ppd){
##Xt[,i]=get(names(dataf)[i+2])
#Xtdd[,i]=dataf[[names(dataf)[i+2]]]
#}
#}
#}
# if(is.null(pz)!=TRUE){
#nnnd=dim(dataf)[1]
#ppd=dim(dataf)[2]-2-pz
#if(ppd>=1){
#Xtdd=matrix(0,nnnd,ppd)
#for(i in 1:ppd){
##Xt[,i]=get(names(dataf)[i+2])
#Xtdd[,i]=dataf[[names(dataf)[i+2]]]
#}
#}
#if(pz>=1){
#Ztdd=matrix(0,nnnd,pz)
#for(j in 1:pz){
##Zt[,j]=get(names(dataf)[j+ppd+2])
#Ztdd[,j]=dataf[[names(dataf)[j+ppd+2]]]
#}
#}
#}
#}
#if(model!="PEM"){
#dataf<-data
#dataf<-dataf[all.vars(formula)]
#Event<-NULL
#Break<-NULL
##Dataframe data
#if(length(all.vars(formula))> dim(data)[2])stop("Check the formula and data.")
#if(is.data.frame(data)==FALSE)stop("The argument needs to be a data frame.")
#Ytdd=dataf[[colnames(dataf)[1]]]
#Xtdd=NULL
#Ztdd=NULL
#if(is.null(pz)){
#if(dim(dataf)[2]>1){
#nnnd=dim(dataf)[1]
#ppd=dim(dataf)[2]-1
#Xtdd=matrix(0,nnnd,ppd)
#for(i in 1:ppd){
##Xt[,i]=get(names(dataf)[i+1])
###print(get(names(dataf)[i+1]))
#Xtdd[,i]=dataf[[names(dataf)[i+1]]]
#}
#}
#}
#if(is.null(pz)!=TRUE){
#nnnd=dim(dataf)[1]
#ppd=dim(dataf)[2]-1-pz
#if(ppd>=1){
#Xtdd=matrix(0,nnnd,ppd)
#for(i in 1:ppd){
##Xt[,i]=get(names(dataf)[i+1])
#Xtdd[,i]=dataf[[names(dataf)[i+1]]]
#}
#}
#if(pz>=1){
#Ztdd=matrix(0,nnnd,pz)
#for(j in 1:pz){
##Zt[,j]=get(names(dataf)[j+ppd+1])
#Ztdd[,j]=dataf[[names(dataf)[j+ppd+1]]]
#}
#}
#}
#}
#Yt<-Ytdd
#Xt<-Xtdd
#Zt<-Ztdd
##detach(dataf)
##print(Yt)
##print(Xt)
##print(Zt)
###################################################################################
if(is.null(Xt)==FALSE){if(is.matrix(Xt)==FALSE){Xt=as.matrix(Xt)}}
if(is.null(Zt)==FALSE){if(is.matrix(Zt)==FALSE){Zt=as.matrix(Xt)}}
if(Proc=="Smooth"){
#Smoothing:
set.seed(1000)
nn=length(Yt)
samples=1
if(is.null(dim(posts))){samples=2000}
if(model=="PEM"){nn=length(Break)-1}
fits=SmoothingF(StaPar=posts,formula=formula,data=data,pz=pz,model=model,Type=Type,
a0=a0,b0=b0,ci=ci,splot=FALSE,samples=samples)
## Graph:
ytm=matrix(0,nn,4)
if(model=="PEM"){}else{ytm[,1]=Yt}
alpha=1-ci
#aplicar uma transf.
sums=fits[[1]]
if(is.null(dim(posts))){sums=fits}
ytm[,1]=ytm[,1]^(transf)
ytm[,2]=sums[,1]^(transf)
ytm[,3]=sums[,3]^(transf)
ytm[,4]=sums[,4]^(transf)
minyt=min(ytm)
maxyt=max(ytm)
if(is.null(axisxdate)){
#Seq. ordem das obs.
at=1:nn
}else{
#Date
#Deixar especificar o eixo date at!
#at = seq(as.Date(startdate),as.Date(enddate),Freq)
at=axisxdate
}
d=at
#if(is.null(startdate)==FALSE & is.null(enddate)==FALSE){
#dat=seq(d[1], d[length(d)], by="month")}
if(is.null(arg[[6]])|is.null(arg[[6]])){
plot(at,ytm[,2],xlab=arg[[3]],ylab=arg[[4]],type=arg[[1]],axes=FALSE,ylim=c(minyt,maxyt),col="white")
seq1=at
seq2=sort(at,decreasing = TRUE)
xxx=sort(at,decreasing = TRUE)
polygon(c(at, xxx),c((ytm[,4]),rev((ytm[,3]))),ylim=c(minyt,maxyt),col=arg[[2]][3],border=arg[[2]][3])
if(plotYt==TRUE){
par(new=TRUE)
plot(at,ytm[,1],xlab=arg[[3]],ylab=arg[[4]],type=arg[[1]],axes=TRUE,ylim=c(minyt,maxyt),lty=arg[[7]][1],
lwd=arg[[8]][1],col=arg[[2]][1])
}
par(new=TRUE)
plot(at,ytm[,2],xlab=arg[[3]],ylab=arg[[4]],type=arg[[1]],axes=TRUE,ylim=c(minyt,maxyt),lty=arg[[7]][2],
lwd=arg[[8]][2],col=arg[[2]][2],cex=arg[[9]])
}else{
plot(at,ytm[,2],xlab=arg[[3]],ylab=arg[[4]],type=arg[[1]],axes=FALSE,ylim=arg[[6]],col="white",cex=arg[[9]])
seq1=at
seq2=sort(at,decreasing = TRUE)
xxx=sort(at,decreasing = TRUE)
polygon(c(at, xxx),c((ytm[,4]),rev((ytm[,3]))),ylim=arg[[6]],col=arg[[2]][3],border=arg[[2]][3])
if(plotYt==TRUE){
par(new=TRUE)
plot(at,ytm[,1],xlab=arg[[3]],ylab=arg[[4]],type=arg[[1]],ylim=arg[[6]],axes=TRUE,lty=arg[[7]][1],
lwd=arg[[8]][1],col=arg[[2]][1],cex=arg[[9]])
}
par(new=TRUE)
plot(at,ytm[,2],xlab=arg[[3]],ylab=arg[[4]],type=arg[[1]],ylim=arg[[6]],axes=TRUE,lty=arg[[7]][2],
lwd=arg[[8]][2],col=arg[[2]][2],cex=arg[[9]])
}
#axis.Date(1,at=dat,labels=dat, las = 1)
##axis(2, at =round(seq(min(ytm[,4]), max(ytm[,4]),((max(ytm[,4])-min(ytm[,4]))/10)),digits=2), las = 1, tck = +0.01,cex.axis=0.7)
##if(is.null(startdate) | is.null(enddate)|is.null(Freq)|is.null(axisxdate)){
#Seq. ordem das obs.
#seq(min(ytm[,4]), max(ytm[,4]),((max(ytm[,4]))/10))
#axis(1, at =at , las = 1, tck =+0.01,cex.axis=0.5)
##axis(1, at =round(seq(min(att), max(att),((max(att))/100))), las = 1, tck =+0.01,cex.axis=0.7)
##}else{
#Date
#Deixar especificar o eixo date at!
#axis.Date(1, at = seq(as.Date(startdate),as.Date(enddate),"years"),cex.axis=0.7)
#axis.Date(1, at = seq(as.Date(startdate),as.Date(enddate),Freq),labels = FALSE, tcl = -0.2,cex.axis=0.7)
##at=1:nn
##seqq=round(seq(min(att), max(att),((max(att))/100)))
##axis.Date(1, at = axisxdate[seqq],labels = axisxdate[seqq], tcl = -0.4,cex.axis=0.7)
##}
if(plotYt==TRUE){
legend("topright", c("Time Series","Smoothed Mean","95 CI"),
lty=arg[[7]],lwd=arg[[8]],col=arg[[2]],cex=arg[[9]], bty="n")
}else{
legend("topright", c("Smoothed Mean","95 CI"),
lty=arg[[7]][-1],lwd=arg[[8]][-1],col=arg[[2]][-1],cex=arg[[9]], bty="n")
}
} #End Smooth
if(Proc=="Filter"){
#Filtering:
nn=length(Yt)
nsamplex=dim(posts)[1]
if(model=="PEM"){nn=length(Break)-1}
filpar1=matrix(0,nn,nsamplex)
#samples=1
#if(is.null(dim(posts))){samples=500}
for(j in 1:nsamplex){
filparaux=FilteringF(StaPar=posts[j,],formula=formula,data=data,model=model,pz=pz,
a0=a0,b0=b0,distl=distl,splot=FALSE)
filpar1[,j]=((filparaux[2,]/ filparaux[1,]))
}
filpar=apply(filpar1,1,mean)
#filparmedian=apply(filpar1,1,median)]
print(filparaux)
alpha=1-ci
filparp1=apply(filpar1,1,function(x) quantile(x,probs=c(alpha/2),na.rm=TRUE)) #perc alpha/2
filparp2=apply(filpar1,1,function(x) quantile(x,probs=c(1-(alpha/2)),na.rm=TRUE)) #perc 1-alpha/2
## Graph:
ytm=matrix(0,nn,4)
if(model=="PEM"){}else{ytm[,1]=Yt}
#aplicar uma transf.
#sums=fits[[1]]
ytm[,1]=ytm[,1]^(transf)
ytm[,2]=filpar^(transf)
ytm[,3]=filparp1^(transf)
ytm[,4]=filparp2^(transf)
minyt=min(ytm)
maxyt=max(ytm)
if(is.null(axisxdate)){
#Seq. ordem das obs.
at=1:nn
}else{
#Date
#Deixar especificar o eixo date at!
#at = seq(as.Date(startdate),as.Date(enddate),Freq)
at=axisxdate
}
d=at
#if(is.null(startdate)==FALSE & is.null(enddate)==FALSE){
#dat=seq(d[1], d[length(d)], by="month")}
if(is.null(arg[[6]])|is.null(arg[[6]])){
plot(at,ytm[,2],xlab=arg[[3]],ylab=arg[[4]],type=arg[[1]],axes=FALSE,ylim=c(minyt,maxyt),col="white",cex=arg[[9]])
seq1=at
seq2=sort(at,decreasing = TRUE)
xxx=sort(at,decreasing = TRUE)
polygon(c(at, xxx),c((ytm[,4]),rev((ytm[,3]))),ylim=c(minyt,maxyt),col=arg[[2]][3],border=arg[[2]][3])
if(plotYt==TRUE){
par(new=TRUE)
plot(at,ytm[,1],xlab=arg[[3]],ylab=arg[[4]],type=arg[[1]],axes=TRUE,ylim=c(minyt,maxyt),lty=arg[[7]][1],
lwd=arg[[8]][1],col=arg[[2]][1],cex=arg[[9]])
}
par(new=TRUE)
plot(at,ytm[,2],xlab=arg[[3]],ylab=arg[[4]],type=arg[[1]],axes=TRUE,ylim=c(minyt,maxyt),lty=arg[[7]][2],
lwd=arg[[8]][2],col=arg[[2]][2],cex=arg[[9]])
}else{
plot(at,ytm[,2],xlab=arg[[3]],ylab=arg[[4]],type=arg[[1]],axes=FALSE,ylim=arg[[6]],col="white",cex=arg[[9]])
seq1=at
seq2=sort(at,decreasing = TRUE)
xxx=sort(at,decreasing = TRUE)
polygon(c(at, xxx),c((ytm[,4]),rev((ytm[,3]))),ylim=arg[[6]],col=arg[[2]][3],border=arg[[2]][3])
if(plotYt==TRUE){
par(new=TRUE)
plot(at,ytm[,1],xlab=arg[[3]],ylab=arg[[4]],type=arg[[1]],ylim=arg[[6]],axes=TRUE,lty=arg[[7]][1],
lwd=arg[[8]][2],col=arg[[2]][1],cex=arg[[9]])
}
par(new=TRUE)
plot(at,ytm[,2],xlab=arg[[3]],ylab=arg[[4]],type=arg[[1]],ylim=arg[[6]],axes=TRUE,lty=arg[[7]][2],
lwd=arg[[8]][2],col=arg[[2]][2],cex=arg[[9]])
}
if(plotYt==TRUE){
legend("topright", c("Time Series","Filtered Mean Level","95 CI"),
lty=arg[[7]],lwd=arg[[8]],col=arg[[2]],cex=arg[[9]], bty="n")
}else{
legend("topright", c("Filtered Mean","95 CI"),
lty=arg[[7]][-1],lwd=arg[[8]][-1],col=arg[[2]][-1],cex=arg[[9]], bty="n")
}
} #EndFilter
} #End PlotF
##########################################################################################
|
/R/PlotF.r
|
no_license
|
cran/NGSSEML
|
R
| false | false | 12,920 |
r
|
##########################################################################################
##
## PLOT FUNCTION VERSION 2
##
##########################################################################################
##
##
#'@export
PlotF<-function(formula, data,na.action="na.omit",pz=NULL,nBreaks=NULL,
plotYt=TRUE,axisxdate=NULL,transf=1,model="Poisson",posts,Proc="Smooth",Type="Marg",
distl="PRED",a0=0.01,b0=0.01,ci=0.95,startdate=NULL,enddate=NULL,Freq=NULL,...){
#argumentsdesignplot <- list(...)
arg <- list(...)
## ... = typeline='l', arg1
#cols="black", arg2
# "blue",
# "lightgrey"),
#xxlab="t", arg3
# yylab=expression(paste(hat(mu)[t])), arg4
#,xxlim=NULL, arg5
#yylim=NULL, arg6
#Lty=c(1,2,1), arg7
## Lwd=c(2,2,2), arg8
#Cex=0.68, arg9
#argument breaks ==FALSE or TRUE
#NA
if(na.action=="na.omit"){na.omit(data)}
##Check formula
# if(check.env){
# envs<-lapply(formula, environment)
# hasenv<-which(sapply(envs,is.null))
# if (length(hasenv)>1){
# for(i in 2:length(hasenv))
# if (!identical(envs[[hasenv[1]]],envs[[hasenv[i]]]))
# warning("Different environments on formulas")
# }
# }
# Event=NULL
Break=NULL
###################################################################################
###################################################################################
###################################################################################
nameaux=all.vars(formula)
nameaux=nameaux[-1]
namesz=paste("Z",1:2000,sep="")
xz<- nameaux %in% namesz
if(is.null(pz)){
if(length(which(xz==TRUE))!=0)stop("Bad input for Z and pz!")
}else{
if(pz!=length(which(xz==TRUE)))stop("Bad input for Z and pz!!")
}
# Event: PEM
if(model=="PEM"){
namey=all.vars(formula)[1] #Y
Y=get(namey,data) # Y
xze<- nameaux %in% "Event"
if(length(which(xze==TRUE))==0)stop("Include an Event variable!")
#all.vars(formula) =="Event"
#rr=which(all.vars(formula) =="Event")
#aa=all.vars(formula)[rr]
Event=get("Event",data)
Break=GridP(Y, Event, nT = nBreaks)
da2=model.frame(formula,data)
namey=all.vars(formula)[1] #Y
nameaux1y=all.vars(formula)
xz1y<- nameaux1y %in% namey
namexz1y=nameaux1y[xz1y==FALSE]
da2=da2[namexz1y]
Z=NULL
if(dim(da2)[2]==1){X=NULL
}else{
nameaux1=all.vars(formula)
nameaux1=nameaux1[-1]
xz1<- nameaux1 %in% "Event"
namexz1=nameaux1[xz1==FALSE]
if(length(namexz1)==0){X=NULL}else{X=da2[namexz1]}
}
Break=GridP(Y, Event, nT = nBreaks)
}else{
Event=NULL
Break=NULL
# End Event
namey=all.vars(formula)[1] #Y
Y=get(namey,data) # Y
#Y
#names(data)=c("Y","X1","Z1","Z2","Z3")
#fz <- Y~X1+Z1+Z2+Z3
if(is.null(pz)){
da2=model.frame(formula,data)
namey=all.vars(formula)[1] #Y
nameaux1y=all.vars(formula)
xz1y<- nameaux1y %in% namey
namexz1y=nameaux1y[xz1y==FALSE]
if(dim(da2[namexz1y])[2]==0){X=NULL}else{da2=da2[namexz1y];X=da2}
Z=NULL
}else{
#if(pz!=NULL){
#names(da1)=c("Y","X1","Z1","Z2","Z3")
#fz <- Y~X1+Z1+Z2+Z3
namesz=paste("Z",1:pz,sep="")
da2=model.frame(formula,da1)
Z=da2[namesz]
#Z
nameaux=all.vars(formula)
nameaux=nameaux[-1]
xz<- nameaux %in% namesz
namexz=nameaux[xz==FALSE]
if(dim(da2[namexz])[2]==0){X=NULL}else{X=da2[namexz]}
#X
#}
}
}
Yt<-Y
Xt<-X
Zt<-Z
# cat("Yt=",Yt)
# print(Xt)
# print(Zt)
# print(Event)
###################################################################################
###################################################################################
###################################################################################
# DataFrame:
#dataf<-data
#dataf<-dataf[all.vars(formula)]
#Dataframe data
#if(length(all.vars(formula))> dim(data)[2])stop("Check the formula and data.")
#if(is.data.frame(data)==FALSE)stop("The argument needs to be a data frame.")
#attach(dataf)
#if(model=="PEM"){
##Event=get(names(dataf)[2])
#dataf<-data
#dataf<-dataf[c(all.vars(formula)[1],colnames(data)[2],all.vars(formula)[-1])]
##Dataframe data
#if(length(all.vars(formula))> dim(data)[2])stop("Check the formula and data.")
#if(is.data.frame(data)==FALSE)stop("The argument needs to be a data frame.")
##dataf<-dataf[all.vars(formula)]
##Yt=get(names(dataf)[1])
#Ytdd=dataf[[colnames(dataf)[1]]]
#Eventdd=dataf[[colnames(dataf)[2]]]
#Breakdd=GridP(Ytdd, Eventdd, nT = nBreaks)
#Event<-Eventdd
#Break<-Breakdd
#Xtdd=NULL
#Ztdd=NULL
#if(is.null(pz)){
#if(dim(dataf)[2]>2){
#nnnd=dim(dataf)[1]
#ppd=dim(dataf)[2]-2
#Xtdd=matrix(0,nnnd,ppd)
#for(i in 1:ppd){
##Xt[,i]=get(names(dataf)[i+2])
#Xtdd[,i]=dataf[[names(dataf)[i+2]]]
#}
#}
#}
# if(is.null(pz)!=TRUE){
#nnnd=dim(dataf)[1]
#ppd=dim(dataf)[2]-2-pz
#if(ppd>=1){
#Xtdd=matrix(0,nnnd,ppd)
#for(i in 1:ppd){
##Xt[,i]=get(names(dataf)[i+2])
#Xtdd[,i]=dataf[[names(dataf)[i+2]]]
#}
#}
#if(pz>=1){
#Ztdd=matrix(0,nnnd,pz)
#for(j in 1:pz){
##Zt[,j]=get(names(dataf)[j+ppd+2])
#Ztdd[,j]=dataf[[names(dataf)[j+ppd+2]]]
#}
#}
#}
#}
#if(model!="PEM"){
#dataf<-data
#dataf<-dataf[all.vars(formula)]
#Event<-NULL
#Break<-NULL
##Dataframe data
#if(length(all.vars(formula))> dim(data)[2])stop("Check the formula and data.")
#if(is.data.frame(data)==FALSE)stop("The argument needs to be a data frame.")
#Ytdd=dataf[[colnames(dataf)[1]]]
#Xtdd=NULL
#Ztdd=NULL
#if(is.null(pz)){
#if(dim(dataf)[2]>1){
#nnnd=dim(dataf)[1]
#ppd=dim(dataf)[2]-1
#Xtdd=matrix(0,nnnd,ppd)
#for(i in 1:ppd){
##Xt[,i]=get(names(dataf)[i+1])
###print(get(names(dataf)[i+1]))
#Xtdd[,i]=dataf[[names(dataf)[i+1]]]
#}
#}
#}
#if(is.null(pz)!=TRUE){
#nnnd=dim(dataf)[1]
#ppd=dim(dataf)[2]-1-pz
#if(ppd>=1){
#Xtdd=matrix(0,nnnd,ppd)
#for(i in 1:ppd){
##Xt[,i]=get(names(dataf)[i+1])
#Xtdd[,i]=dataf[[names(dataf)[i+1]]]
#}
#}
#if(pz>=1){
#Ztdd=matrix(0,nnnd,pz)
#for(j in 1:pz){
##Zt[,j]=get(names(dataf)[j+ppd+1])
#Ztdd[,j]=dataf[[names(dataf)[j+ppd+1]]]
#}
#}
#}
#}
#Yt<-Ytdd
#Xt<-Xtdd
#Zt<-Ztdd
##detach(dataf)
##print(Yt)
##print(Xt)
##print(Zt)
###################################################################################
if(is.null(Xt)==FALSE){if(is.matrix(Xt)==FALSE){Xt=as.matrix(Xt)}}
if(is.null(Zt)==FALSE){if(is.matrix(Zt)==FALSE){Zt=as.matrix(Xt)}}
if(Proc=="Smooth"){
#Smoothing:
set.seed(1000)
nn=length(Yt)
samples=1
if(is.null(dim(posts))){samples=2000}
if(model=="PEM"){nn=length(Break)-1}
fits=SmoothingF(StaPar=posts,formula=formula,data=data,pz=pz,model=model,Type=Type,
a0=a0,b0=b0,ci=ci,splot=FALSE,samples=samples)
## Graph:
ytm=matrix(0,nn,4)
if(model=="PEM"){}else{ytm[,1]=Yt}
alpha=1-ci
#aplicar uma transf.
sums=fits[[1]]
if(is.null(dim(posts))){sums=fits}
ytm[,1]=ytm[,1]^(transf)
ytm[,2]=sums[,1]^(transf)
ytm[,3]=sums[,3]^(transf)
ytm[,4]=sums[,4]^(transf)
minyt=min(ytm)
maxyt=max(ytm)
if(is.null(axisxdate)){
#Seq. ordem das obs.
at=1:nn
}else{
#Date
#Deixar especificar o eixo date at!
#at = seq(as.Date(startdate),as.Date(enddate),Freq)
at=axisxdate
}
d=at
#if(is.null(startdate)==FALSE & is.null(enddate)==FALSE){
#dat=seq(d[1], d[length(d)], by="month")}
if(is.null(arg[[6]])|is.null(arg[[6]])){
plot(at,ytm[,2],xlab=arg[[3]],ylab=arg[[4]],type=arg[[1]],axes=FALSE,ylim=c(minyt,maxyt),col="white")
seq1=at
seq2=sort(at,decreasing = TRUE)
xxx=sort(at,decreasing = TRUE)
polygon(c(at, xxx),c((ytm[,4]),rev((ytm[,3]))),ylim=c(minyt,maxyt),col=arg[[2]][3],border=arg[[2]][3])
if(plotYt==TRUE){
par(new=TRUE)
plot(at,ytm[,1],xlab=arg[[3]],ylab=arg[[4]],type=arg[[1]],axes=TRUE,ylim=c(minyt,maxyt),lty=arg[[7]][1],
lwd=arg[[8]][1],col=arg[[2]][1])
}
par(new=TRUE)
plot(at,ytm[,2],xlab=arg[[3]],ylab=arg[[4]],type=arg[[1]],axes=TRUE,ylim=c(minyt,maxyt),lty=arg[[7]][2],
lwd=arg[[8]][2],col=arg[[2]][2],cex=arg[[9]])
}else{
plot(at,ytm[,2],xlab=arg[[3]],ylab=arg[[4]],type=arg[[1]],axes=FALSE,ylim=arg[[6]],col="white",cex=arg[[9]])
seq1=at
seq2=sort(at,decreasing = TRUE)
xxx=sort(at,decreasing = TRUE)
polygon(c(at, xxx),c((ytm[,4]),rev((ytm[,3]))),ylim=arg[[6]],col=arg[[2]][3],border=arg[[2]][3])
if(plotYt==TRUE){
par(new=TRUE)
plot(at,ytm[,1],xlab=arg[[3]],ylab=arg[[4]],type=arg[[1]],ylim=arg[[6]],axes=TRUE,lty=arg[[7]][1],
lwd=arg[[8]][1],col=arg[[2]][1],cex=arg[[9]])
}
par(new=TRUE)
plot(at,ytm[,2],xlab=arg[[3]],ylab=arg[[4]],type=arg[[1]],ylim=arg[[6]],axes=TRUE,lty=arg[[7]][2],
lwd=arg[[8]][2],col=arg[[2]][2],cex=arg[[9]])
}
#axis.Date(1,at=dat,labels=dat, las = 1)
##axis(2, at =round(seq(min(ytm[,4]), max(ytm[,4]),((max(ytm[,4])-min(ytm[,4]))/10)),digits=2), las = 1, tck = +0.01,cex.axis=0.7)
##if(is.null(startdate) | is.null(enddate)|is.null(Freq)|is.null(axisxdate)){
#Seq. ordem das obs.
#seq(min(ytm[,4]), max(ytm[,4]),((max(ytm[,4]))/10))
#axis(1, at =at , las = 1, tck =+0.01,cex.axis=0.5)
##axis(1, at =round(seq(min(att), max(att),((max(att))/100))), las = 1, tck =+0.01,cex.axis=0.7)
##}else{
#Date
#Deixar especificar o eixo date at!
#axis.Date(1, at = seq(as.Date(startdate),as.Date(enddate),"years"),cex.axis=0.7)
#axis.Date(1, at = seq(as.Date(startdate),as.Date(enddate),Freq),labels = FALSE, tcl = -0.2,cex.axis=0.7)
##at=1:nn
##seqq=round(seq(min(att), max(att),((max(att))/100)))
##axis.Date(1, at = axisxdate[seqq],labels = axisxdate[seqq], tcl = -0.4,cex.axis=0.7)
##}
if(plotYt==TRUE){
legend("topright", c("Time Series","Smoothed Mean","95 CI"),
lty=arg[[7]],lwd=arg[[8]],col=arg[[2]],cex=arg[[9]], bty="n")
}else{
legend("topright", c("Smoothed Mean","95 CI"),
lty=arg[[7]][-1],lwd=arg[[8]][-1],col=arg[[2]][-1],cex=arg[[9]], bty="n")
}
} #End Smooth
if(Proc=="Filter"){
#Filtering:
nn=length(Yt)
nsamplex=dim(posts)[1]
if(model=="PEM"){nn=length(Break)-1}
filpar1=matrix(0,nn,nsamplex)
#samples=1
#if(is.null(dim(posts))){samples=500}
for(j in 1:nsamplex){
filparaux=FilteringF(StaPar=posts[j,],formula=formula,data=data,model=model,pz=pz,
a0=a0,b0=b0,distl=distl,splot=FALSE)
filpar1[,j]=((filparaux[2,]/ filparaux[1,]))
}
filpar=apply(filpar1,1,mean)
#filparmedian=apply(filpar1,1,median)]
print(filparaux)
alpha=1-ci
filparp1=apply(filpar1,1,function(x) quantile(x,probs=c(alpha/2),na.rm=TRUE)) #perc alpha/2
filparp2=apply(filpar1,1,function(x) quantile(x,probs=c(1-(alpha/2)),na.rm=TRUE)) #perc 1-alpha/2
## Graph:
ytm=matrix(0,nn,4)
if(model=="PEM"){}else{ytm[,1]=Yt}
#aplicar uma transf.
#sums=fits[[1]]
ytm[,1]=ytm[,1]^(transf)
ytm[,2]=filpar^(transf)
ytm[,3]=filparp1^(transf)
ytm[,4]=filparp2^(transf)
minyt=min(ytm)
maxyt=max(ytm)
if(is.null(axisxdate)){
#Seq. ordem das obs.
at=1:nn
}else{
#Date
#Deixar especificar o eixo date at!
#at = seq(as.Date(startdate),as.Date(enddate),Freq)
at=axisxdate
}
d=at
#if(is.null(startdate)==FALSE & is.null(enddate)==FALSE){
#dat=seq(d[1], d[length(d)], by="month")}
if(is.null(arg[[6]])|is.null(arg[[6]])){
plot(at,ytm[,2],xlab=arg[[3]],ylab=arg[[4]],type=arg[[1]],axes=FALSE,ylim=c(minyt,maxyt),col="white",cex=arg[[9]])
seq1=at
seq2=sort(at,decreasing = TRUE)
xxx=sort(at,decreasing = TRUE)
polygon(c(at, xxx),c((ytm[,4]),rev((ytm[,3]))),ylim=c(minyt,maxyt),col=arg[[2]][3],border=arg[[2]][3])
if(plotYt==TRUE){
par(new=TRUE)
plot(at,ytm[,1],xlab=arg[[3]],ylab=arg[[4]],type=arg[[1]],axes=TRUE,ylim=c(minyt,maxyt),lty=arg[[7]][1],
lwd=arg[[8]][1],col=arg[[2]][1],cex=arg[[9]])
}
par(new=TRUE)
plot(at,ytm[,2],xlab=arg[[3]],ylab=arg[[4]],type=arg[[1]],axes=TRUE,ylim=c(minyt,maxyt),lty=arg[[7]][2],
lwd=arg[[8]][2],col=arg[[2]][2],cex=arg[[9]])
}else{
plot(at,ytm[,2],xlab=arg[[3]],ylab=arg[[4]],type=arg[[1]],axes=FALSE,ylim=arg[[6]],col="white",cex=arg[[9]])
seq1=at
seq2=sort(at,decreasing = TRUE)
xxx=sort(at,decreasing = TRUE)
polygon(c(at, xxx),c((ytm[,4]),rev((ytm[,3]))),ylim=arg[[6]],col=arg[[2]][3],border=arg[[2]][3])
if(plotYt==TRUE){
par(new=TRUE)
plot(at,ytm[,1],xlab=arg[[3]],ylab=arg[[4]],type=arg[[1]],ylim=arg[[6]],axes=TRUE,lty=arg[[7]][1],
lwd=arg[[8]][2],col=arg[[2]][1],cex=arg[[9]])
}
par(new=TRUE)
plot(at,ytm[,2],xlab=arg[[3]],ylab=arg[[4]],type=arg[[1]],ylim=arg[[6]],axes=TRUE,lty=arg[[7]][2],
lwd=arg[[8]][2],col=arg[[2]][2],cex=arg[[9]])
}
if(plotYt==TRUE){
legend("topright", c("Time Series","Filtered Mean Level","95 CI"),
lty=arg[[7]],lwd=arg[[8]],col=arg[[2]],cex=arg[[9]], bty="n")
}else{
legend("topright", c("Filtered Mean","95 CI"),
lty=arg[[7]][-1],lwd=arg[[8]][-1],col=arg[[2]][-1],cex=arg[[9]], bty="n")
}
} #EndFilter
} #End PlotF
##########################################################################################
|
merge.ts = function(timestamps1, values1, timestamps2, values2) {
all.ts = c(timestamps1[!is.na(values1)], timestamps2[!is.na(values2)])
all.values = c(values1, values2)
unique.timestamps = sort(unique(all.ts))
merged.values = sapply(unique.timestamps,function(x) mean(all.values[all.ts==x],na.rm=T))
return(list(timestamps=unique.timestamps,values=merged.values))
}
|
/functions/merge_ts.R
|
permissive
|
instigatorofawe/clustering_manuscript
|
R
| false | false | 388 |
r
|
merge.ts = function(timestamps1, values1, timestamps2, values2) {
all.ts = c(timestamps1[!is.na(values1)], timestamps2[!is.na(values2)])
all.values = c(values1, values2)
unique.timestamps = sort(unique(all.ts))
merged.values = sapply(unique.timestamps,function(x) mean(all.values[all.ts==x],na.rm=T))
return(list(timestamps=unique.timestamps,values=merged.values))
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{SPECT}
\alias{SPECT}
\title{SPECT Heart Data Set}
\format{
A data frame with 267 rows and 23 variables:
\describe{
\item{F1 - F22}{the partial diagnosis, binary}
\item{class}{class attribute, binary (0 - normal, 1 - abnormal)}
}
}
\source{
{\url{https://archive.ics.uci.edu/ml/datasets/SPECT+Heart}}
}
\usage{
SPECT
}
\description{
The dataset describes diagnosing of cardiac Single Proton Emission Computed Tomography (SPECT) images. Each of the patients is classified into two categories: normal and abnormal. The database of 267 SPECT image sets (patients) was processed to extract features that summarize the original SPECT images. As a result, 44 continuous feature pattern was created for each patient. The pattern was further processed to obtain 22 binary feature patterns.
}
\keyword{datasets}
|
/man/SPECT.Rd
|
no_license
|
fanne-stat/DGCPCA
|
R
| false | true | 920 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{SPECT}
\alias{SPECT}
\title{SPECT Heart Data Set}
\format{
A data frame with 267 rows and 23 variables:
\describe{
\item{F1 - F22}{the partial diagnosis, binary}
\item{class}{class attribute, binary (0 - normal, 1 - abnormal)}
}
}
\source{
{\url{https://archive.ics.uci.edu/ml/datasets/SPECT+Heart}}
}
\usage{
SPECT
}
\description{
The dataset describes diagnosing of cardiac Single Proton Emission Computed Tomography (SPECT) images. Each of the patients is classified into two categories: normal and abnormal. The database of 267 SPECT image sets (patients) was processed to extract features that summarize the original SPECT images. As a result, 44 continuous feature pattern was created for each patient. The pattern was further processed to obtain 22 binary feature patterns.
}
\keyword{datasets}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tidiers_stl.R
\name{tidiers_stl}
\alias{tidiers_stl}
\alias{sw_tidy.stl}
\alias{sw_tidy_decomp.stl}
\alias{sw_tidy_decomp.stlm}
\title{Tidying methods for STL (Seasonal, Trend, Level) decomposition of time series}
\usage{
\method{sw_tidy}{stl}(x, ...)
\method{sw_tidy_decomp}{stl}(x, timekit_idx = FALSE, rename_index = "index",
...)
\method{sw_tidy_decomp}{stlm}(x, timekit_idx = FALSE,
rename_index = "index", ...)
}
\arguments{
\item{x}{An object of class "stl" or "stlm"}
\item{...}{Not used.}
\item{timekit_idx}{Used with \code{sw_tidy_decomp}.
When \code{TRUE}, uses a timekit index (irregular, typically date or datetime) if present.}
\item{rename_index}{Used with \code{sw_tidy_decomp}.
A string representing the name of the index generated.}
}
\value{
\strong{\code{sw_tidy()}} wraps \code{sw_tidy_decomp()}
\strong{\code{sw_tidy_decomp()}} returns a tibble with the following time series attributes:
\itemize{
\item \code{index}: An index is either attempted to be extracted from the model or
a sequential index is created for plotting purposes
\item \code{season}: The seasonal component
\item \code{trend}: The trend component
\item \code{remainder}: observed - (season + trend)
\item \code{seasadj}: observed - season (or trend + remainder)
}
}
\description{
Tidying methods for STL (Seasonal, Trend, Level) decomposition of time series
}
\examples{
library(forecast)
library(sweep)
fit_stl <- USAccDeaths \%>\%
stl(s.window = "periodic")
sw_tidy_decomp(fit_stl)
}
\seealso{
\code{\link[=stl]{stl()}}
}
|
/man/tidiers_stl.Rd
|
no_license
|
datactivist/sweep
|
R
| false | true | 1,611 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tidiers_stl.R
\name{tidiers_stl}
\alias{tidiers_stl}
\alias{sw_tidy.stl}
\alias{sw_tidy_decomp.stl}
\alias{sw_tidy_decomp.stlm}
\title{Tidying methods for STL (Seasonal, Trend, Level) decomposition of time series}
\usage{
\method{sw_tidy}{stl}(x, ...)
\method{sw_tidy_decomp}{stl}(x, timekit_idx = FALSE, rename_index = "index",
...)
\method{sw_tidy_decomp}{stlm}(x, timekit_idx = FALSE,
rename_index = "index", ...)
}
\arguments{
\item{x}{An object of class "stl" or "stlm"}
\item{...}{Not used.}
\item{timekit_idx}{Used with \code{sw_tidy_decomp}.
When \code{TRUE}, uses a timekit index (irregular, typically date or datetime) if present.}
\item{rename_index}{Used with \code{sw_tidy_decomp}.
A string representing the name of the index generated.}
}
\value{
\strong{\code{sw_tidy()}} wraps \code{sw_tidy_decomp()}
\strong{\code{sw_tidy_decomp()}} returns a tibble with the following time series attributes:
\itemize{
\item \code{index}: An index is either attempted to be extracted from the model or
a sequential index is created for plotting purposes
\item \code{season}: The seasonal component
\item \code{trend}: The trend component
\item \code{remainder}: observed - (season + trend)
\item \code{seasadj}: observed - season (or trend + remainder)
}
}
\description{
Tidying methods for STL (Seasonal, Trend, Level) decomposition of time series
}
\examples{
library(forecast)
library(sweep)
fit_stl <- USAccDeaths \%>\%
stl(s.window = "periodic")
sw_tidy_decomp(fit_stl)
}
\seealso{
\code{\link[=stl]{stl()}}
}
|
require(TargetSearchData)
smp <- new('tsSample', RIfiles=tsd_rifiles(), CDFfiles=tsd_cdffiles())
lib <- ImportLibrary(file.path(tsd_data_path(), 'library.txt'))
# check for invalid usage
expect_error(ret <- FindAllPeaks(smp))
expect_error(ret <- FindAllPeaks(smp, lib))
# check for leucine (both are equivalent)
m <- c(102, 158, 232, 260)
x <- FindAllPeaks(smp, dev=1500, RI=306800, mz=m)
y <- FindAllPeaks(smp, lib, 'GC.5')
expect_equal(x, y)
# check for valine
m <- c(100, 144, 156, 218, 246)
x <- FindAllPeaks(smp, dev=2000, RI=271500, mz=m)
y <- FindAllPeaks(smp, lib, 'GC.3')
expect_equal(x, y)
# check searching by RT
source('mock_functions.R')
peak_search <- function(data, dev, RT, mz)
{
search <- function(x, fid) {
k <- which(abs(x$Time - RT) < dev)
y <- x$Peaks[k, mz - x$massRange[1] + 1]
j <- arrayInd(which(y > 0), dim(y))
f <- if(length(j) > 0) fid else integer(0)
cbind(Int=y[y > 0], RI=x$Index[k][j[,1]], RT=x$Time[k][j[,1]], mz=mz[j[,2]], fid=f)
}
res <- mapply(search, data, seq(data), USE.NAMES=FALSE, SIMPLIFY=FALSE)
if(nrow(res <- do.call('rbind', res)) == 0)
return(NULL)
res
}
tmp <- tempdir()
ncfiles <- file.path(tmp, sprintf("file%d.nc4", 1:3))
smp <- new('tsSample', CDFfiles=ncfiles)
# repeat 10 times with random values
for(i in 1:20) {
# write files and recover data
data <- lapply(RIfiles(smp), mock_rifile, mz_range=c(100,200), time_range=c(200, 300), n_peaks=300, n_scans=50)
RT <- runif(1, 200, 300)
mz <- sample(100:200, 5)
x <- FindAllPeaks(smp, dev=10, RT=RT, mz=mz)
y <- peak_search(data, dev=10, RT=RT, mz=mz)
expect_identical(x, y)
}
|
/inst/tinytest/test_FindAllPeaks.R
|
no_license
|
acinostroza/TargetSearch
|
R
| false | false | 1,680 |
r
|
require(TargetSearchData)
smp <- new('tsSample', RIfiles=tsd_rifiles(), CDFfiles=tsd_cdffiles())
lib <- ImportLibrary(file.path(tsd_data_path(), 'library.txt'))
# check for invalid usage
expect_error(ret <- FindAllPeaks(smp))
expect_error(ret <- FindAllPeaks(smp, lib))
# check for leucine (both are equivalent)
m <- c(102, 158, 232, 260)
x <- FindAllPeaks(smp, dev=1500, RI=306800, mz=m)
y <- FindAllPeaks(smp, lib, 'GC.5')
expect_equal(x, y)
# check for valine
m <- c(100, 144, 156, 218, 246)
x <- FindAllPeaks(smp, dev=2000, RI=271500, mz=m)
y <- FindAllPeaks(smp, lib, 'GC.3')
expect_equal(x, y)
# check searching by RT
source('mock_functions.R')
peak_search <- function(data, dev, RT, mz)
{
search <- function(x, fid) {
k <- which(abs(x$Time - RT) < dev)
y <- x$Peaks[k, mz - x$massRange[1] + 1]
j <- arrayInd(which(y > 0), dim(y))
f <- if(length(j) > 0) fid else integer(0)
cbind(Int=y[y > 0], RI=x$Index[k][j[,1]], RT=x$Time[k][j[,1]], mz=mz[j[,2]], fid=f)
}
res <- mapply(search, data, seq(data), USE.NAMES=FALSE, SIMPLIFY=FALSE)
if(nrow(res <- do.call('rbind', res)) == 0)
return(NULL)
res
}
tmp <- tempdir()
ncfiles <- file.path(tmp, sprintf("file%d.nc4", 1:3))
smp <- new('tsSample', CDFfiles=ncfiles)
# repeat 10 times with random values
for(i in 1:20) {
# write files and recover data
data <- lapply(RIfiles(smp), mock_rifile, mz_range=c(100,200), time_range=c(200, 300), n_peaks=300, n_scans=50)
RT <- runif(1, 200, 300)
mz <- sample(100:200, 5)
x <- FindAllPeaks(smp, dev=10, RT=RT, mz=mz)
y <- peak_search(data, dev=10, RT=RT, mz=mz)
expect_identical(x, y)
}
|
#' Bilinear interpolation layer (2-D)
#'
#' @docType class
#'
#' @section Usage:
#' \preformatted{outputs <- layer_bilinear_interpolation_2d( inputs, resampledSize )}
#'
#' @section Arguments:
#' \describe{
#' \item{inputs}{list of size 2 where the first element are the images. The second
#' element are the weights.}
#' \item{resampledSize}{size of the resampled output images.}
#' }
#'
#' @section Details:
#' \code{$initialize} instantiates a new class.
#'
#' \code{$call} main body.
#'
#' \code{$compute_output_shape} computes the output shape.
#'
#' @author Tustison NJ
#'
#' @return resampled batch images.
#'
#' @name BilinearInterpolationLayer2D
NULL
#' @export
BilinearInterpolationLayer2D <- R6::R6Class( "BilinearInterpolationLayer2D",
inherit = KerasLayer,
public = list(
resampledSize = NULL,
initialize = function( resampledSize )
{
K <- keras::backend()
if( K$backend() != 'tensorflow' )
{
stop( "Error: tensorflow is required for this STN implementations." )
}
if( length( resampledSize ) != 2 )
{
stop( "Error: resampled size must be a vector of length 2 (for 2-D).")
}
self$resampledSize <- resampledSize
},
call = function( inputs, mask = NULL )
{
image <- inputs[[1]]
transformParameters <- inputs[[2]]
output <-
self$affineTransformImage( image, transformParameters, self$resampledSize )
return( output )
},
compute_output_shape = function( input_shape )
{
numberOfChannels <- as.integer( tail( unlist( input_shape[[1]] ), 1 ) )
return( list( NULL, as.integer( self$resampledSize[1] ),
as.integer( self$resampledSize[2] ), numberOfChannels ) )
},
affineTransformImage = function( image, affineTransformParameters, resampledSize )
{
K <- keras::backend()
batchSize <- K$shape( image )[1]
numberOfChannels <- K$shape( image )[4]
transformParameters <- K$reshape( affineTransformParameters,
shape = reticulate::tuple( batchSize, 2L, 3L ) )
regularGrids <- self$makeRegularGrids( batchSize, resampledSize )
sampledGrids <- K$batch_dot( transformParameters, regularGrids )
interpolatedImage <- self$interpolate( image, sampledGrids, resampledSize )
newOutputShape <- reticulate::tuple( batchSize, as.integer( resampledSize[1] ),
as.integer( resampledSize[2] ), numberOfChannels )
interpolatedImage <- K$reshape( interpolatedImage, shape = newOutputShape )
return( interpolatedImage )
},
makeRegularGrids = function( batchSize, resampledSize )
{
K <- keras::backend()
xLinearSpace <- tensorflow::tf$linspace( -1.0, 1.0, as.integer( resampledSize[2] ) )
yLinearSpace <- tensorflow::tf$linspace( -1.0, 1.0, as.integer( resampledSize[1] ) )
coords <- tensorflow::tf$meshgrid( xLinearSpace, yLinearSpace )
coords[[1]] <- K$flatten( coords[[1]] )
coords[[2]] <- K$flatten( coords[[2]] )
ones <- K$ones_like( coords[[1]] )
regularGrid <- K$concatenate( list( coords[[1]], coords[[2]], ones ), axis = 0L )
regularGrid <- K$flatten( regularGrid )
regularGrids <- K$tile( regularGrid, K$stack( list( batchSize ) ) )
regularGrids <- K$reshape( regularGrids,
reticulate::tuple( batchSize, 3L, as.integer( prod( resampledSize ) ) ) )
return( regularGrids )
},
interpolate = function( image, sampledGrids, resampledSize )
{
K <- keras::backend()
batchSize <- K$shape( image )[1]
height <- K$shape( image )[2]
width <- K$shape( image )[3]
numberOfChannels <- K$shape( image )[4]
x <- K$cast( K$flatten( sampledGrids[, 1,] ), dtype = 'float32' )
y <- K$cast( K$flatten( sampledGrids[, 2,] ), dtype = 'float32' )
x <- 0.5 * ( x + 1.0 ) * K$cast( width, dtype = 'float32' )
y <- 0.5 * ( y + 1.0 ) * K$cast( height, dtype = 'float32' )
x0 <- K$cast( x, dtype = 'int32' )
x1 <- x0 + 1L
y0 <- K$cast( y, dtype = 'int32' )
y1 <- y0 + 1L
xMax <- as.integer( unlist( K$int_shape( image ) )[3] ) - 1L
yMax <- as.integer( unlist( K$int_shape( image ) )[2] ) - 1L
x0 <- K$clip( x0, 0L, xMax )
x1 <- K$clip( x1, 0L, xMax )
y0 <- K$clip( y0, 0L, yMax )
y1 <- K$clip( y1, 0L, yMax )
batchPixels <- K$arange( 0L, batchSize ) * ( height * width )
batchPixels <- K$expand_dims( batchPixels, axis = -1L )
base <- K$repeat_elements(
batchPixels, rep = as.integer( prod( resampledSize ) ), axis = 1L )
base <- K$flatten( base )
indices00 <- base + y0 * width + x0
indices01 <- base + y1 * width + x0
indices10 <- base + y0 * width + x1
indices11 <- base + y1 * width + x1
flatImage <- K$reshape( image, shape = c( -1L, numberOfChannels ) )
flatImage <- K$cast( flatImage, dtype = 'float32' )
pixelValues00 <- K$gather( flatImage, indices00 )
pixelValues01 <- K$gather( flatImage, indices01 )
pixelValues10 <- K$gather( flatImage, indices10 )
pixelValues11 <- K$gather( flatImage, indices11 )
x0 <- K$cast( x0, dtype = 'float32' )
x1 <- K$cast( x1, dtype = 'float32' )
y0 <- K$cast( y0, dtype = 'float32' )
y1 <- K$cast( y1, dtype = 'float32' )
weight00 <- K$expand_dims( ( ( x1 - x ) * ( y1 - y ) ), axis = 1L )
weight01 <- K$expand_dims( ( ( x1 - x ) * ( y - y0 ) ), axis = 1L )
weight10 <- K$expand_dims( ( ( x - x0 ) * ( y1 - y ) ), axis = 1L )
weight11 <- K$expand_dims( ( ( x - x0 ) * ( y - y0 ) ), axis = 1L )
interpolatedValues00 <- weight00 * pixelValues00
interpolatedValues01 <- weight01 * pixelValues01
interpolatedValues10 <- weight10 * pixelValues10
interpolatedValues11 <- weight10 * pixelValues11
interpolatedValues <- interpolatedValues00 + interpolatedValues01 +
interpolatedValues10 + interpolatedValues11
return( interpolatedValues )
}
)
)
layer_bilinear_interpolation_2d <- function( objects, resampledSize ) {
create_layer( BilinearInterpolationLayer2D, objects,
list( resampledSize = resampledSize )
)
}
#' Trilinear interpolation layer (3-D)
#'
#' @docType class
#'
#' @section Usage:
#' \preformatted{outputs <- layer_trilinear_interpolation_3d( inputs, resampledSize )}
#'
#' @section Arguments:
#' \describe{
#' \item{inputs}{list of size 2 where the first element are the images. The second
#' element are the weights.}
#' \item{resampledSize}{size of the resampled output images.}
#' }
#'
#' @section Details:
#' \code{$initialize} instantiates a new class.
#'
#' \code{$call} main body.
#'
#' \code{$compute_output_shape} computes the output shape.
#'
#' @author Tustison NJ
#'
#' @return resampled batch images.
#'
#' @name TrilinearInterpolationLayer3D
NULL
#' @export
TrilinearInterpolationLayer3D <- R6::R6Class( "TrilinearInterpolationLayer3D",
inherit = KerasLayer,
public = list(
resampledSize = NULL,
initialize = function( resampledSize )
{
K <- keras::backend()
if( K$backend() != 'tensorflow' )
{
stop( "Error: tensorflow is required for this STN implementations." )
}
if( length( resampledSize ) != 3 )
{
stop( "Error: resampled size must be a vector of length 3 (for 3-D).")
}
self$resampledSize <- resampledSize
},
call = function( inputs, mask = NULL )
{
image <- inputs[[1]]
transformParameters <- inputs[[2]]
output <-
self$affineTransformImage( image, transformParameters, self$resampledSize )
return( output )
},
compute_output_shape = function( input_shape )
{
numberOfChannels <- as.integer( tail( unlist( input_shape[[1]] ), 1 ) )
return( list( NULL, as.integer( self$resampledSize[1] ),
as.integer( self$resampledSize[2] ), as.integer( self$resampledSize[3] ),
numberOfChannels ) )
},
affineTransformImage = function( image, affineTransformParameters, resampledSize )
{
K <- keras::backend()
batchSize <- K$shape( image )[1]
numberOfChannels <- K$shape( image )[5]
transformParameters <- K$reshape( affineTransformParameters,
shape = reticulate::tuple( batchSize, 3L, 4L ) )
regularGrids <- self$makeRegularGrids( batchSize, resampledSize )
sampledGrids <- K$batch_dot( transformParameters, regularGrids )
interpolatedImage <- self$interpolate( image, sampledGrids, resampledSize )
newOutputShape <- reticulate::tuple( batchSize, as.integer( resampledSize[1] ),
as.integer( resampledSize[2] ), as.integer( resampledSize[3] ), numberOfChannels )
interpolatedImage <- K$reshape( interpolatedImage, shape = newOutputShape )
return( interpolatedImage )
},
makeRegularGrids = function( batchSize, resampledSize )
{
K <- keras::backend()
xLinearSpace <- tensorflow::tf$linspace( -1.0, 1.0, as.integer( resampledSize[2] ) )
yLinearSpace <- tensorflow::tf$linspace( -1.0, 1.0, as.integer( resampledSize[1] ) )
zLinearSpace <- tensorflow::tf$linspace( -1.0, 1.0, as.integer( resampledSize[3] ) )
coords <- tensorflow::tf$meshgrid( xLinearSpace, yLinearSpace, zLinearSpace )
coords[[1]] <- K$flatten( coords[[1]] )
coords[[2]] <- K$flatten( coords[[2]] )
coords[[3]] <- K$flatten( coords[[3]] )
ones <- K$ones_like( coords[[1]] )
regularGrid <- K$concatenate( list( coords[[1]], coords[[2]], coords[[3]], ones ), 0L )
regularGrid <- K$flatten( regularGrid )
regularGrids <- K$tile( regularGrid, K$stack( list( batchSize ) ) )
regularGrids <- K$reshape( regularGrids,
reticulate::tuple( batchSize, 4L, as.integer( prod( resampledSize ) ) ) )
return( regularGrids )
},
interpolate = function( image, sampledGrids, resampledSize )
{
K <- keras::backend()
batchSize <- K$shape( image )[1]
height <- K$shape( image )[2]
width <- K$shape( image )[3]
depth <- K$shape( image )[4]
numberOfChannels <- K$shape( image )[5]
x <- K$cast( K$flatten( sampledGrids[, 1,] ), dtype = 'float32' )
y <- K$cast( K$flatten( sampledGrids[, 2,] ), dtype = 'float32' )
z <- K$cast( K$flatten( sampledGrids[, 3,] ), dtype = 'float32' )
x <- 0.5 * ( x + 1.0 ) * K$cast( width, dtype = 'float32' )
y <- 0.5 * ( y + 1.0 ) * K$cast( height, dtype = 'float32' )
z <- 0.5 * ( z + 1.0 ) * K$cast( depth, dtype = 'float32' )
x0 <- K$cast( x, dtype = 'int32' )
x1 <- x0 + 1L
y0 <- K$cast( y, dtype = 'int32' )
y1 <- y0 + 1L
z0 <- K$cast( z, dtype = 'int32' )
z1 <- z0 + 1L
xMax <- as.integer( unlist( K$int_shape( image ) )[2] ) - 1L
yMax <- as.integer( unlist( K$int_shape( image ) )[1] ) - 1L
zMax <- as.integer( unlist( K$int_shape( image ) )[3] ) - 1L
x0 <- K$clip( x0, 0L, xMax )
x1 <- K$clip( x1, 0L, xMax )
y0 <- K$clip( y0, 0L, yMax )
y1 <- K$clip( y1, 0L, yMax )
z0 <- K$clip( z0, 0L, zMax )
z1 <- K$clip( z1, 0L, zMax )
batchPixels <- K$arange( 0L, batchSize ) * ( height * width * depth )
batchPixels <- K$expand_dims( batchPixels, axis = -1L )
base <- K$repeat_elements(
batchPixels, rep = as.integer( prod( resampledSize ) ), axis = 1L )
base <- K$flatten( base )
indices000 <- base + z0 * ( width * height ) + y0 * width + x0
indices001 <- base + z1 * ( width * height ) + y0 * width + x0
indices010 <- base + z0 * ( width * height ) + y1 * width + x0
indices011 <- base + z1 * ( width * height ) + y1 * width + x0
indices100 <- base + z0 * ( width * height ) + y0 * width + x1
indices101 <- base + z1 * ( width * height ) + y0 * width + x1
indices110 <- base + z0 * ( width * height ) + y1 * width + x1
indices111 <- base + z1 * ( width * height ) + y1 * width + x1
flatImage <- K$reshape( image, shape = c( -1L, numberOfChannels ) )
flatImage <- K$cast( flatImage, dtype = 'float32' )
pixelValues000 <- K$gather( flatImage, indices000 )
pixelValues001 <- K$gather( flatImage, indices001 )
pixelValues010 <- K$gather( flatImage, indices010 )
pixelValues011 <- K$gather( flatImage, indices011 )
pixelValues100 <- K$gather( flatImage, indices100 )
pixelValues101 <- K$gather( flatImage, indices101 )
pixelValues110 <- K$gather( flatImage, indices110 )
pixelValues111 <- K$gather( flatImage, indices111 )
x0 <- K$cast( x0, dtype = 'float32' )
x1 <- K$cast( x1, dtype = 'float32' )
y0 <- K$cast( y0, dtype = 'float32' )
y1 <- K$cast( y1, dtype = 'float32' )
z0 <- K$cast( z0, dtype = 'float32' )
z1 <- K$cast( z1, dtype = 'float32' )
weight000 <- K$expand_dims( ( ( x1 - x ) * ( y1 - y ) * ( z1 - z ) ), axis = 1L )
weight001 <- K$expand_dims( ( ( x1 - x ) * ( y1 - y ) * ( z - z0 ) ), axis = 1L )
weight010 <- K$expand_dims( ( ( x1 - x ) * ( y - y0 ) * ( z1 - z ) ), axis = 1L )
weight011 <- K$expand_dims( ( ( x1 - x ) * ( y - y0 ) * ( z - z0 ) ), axis = 1L )
weight100 <- K$expand_dims( ( ( x - x0 ) * ( y1 - y ) * ( z1 - z ) ), axis = 1L )
weight101 <- K$expand_dims( ( ( x - x0 ) * ( y1 - y ) * ( z - z0 ) ), axis = 1L )
weight110 <- K$expand_dims( ( ( x - x0 ) * ( y - y0 ) * ( z1 - z ) ), axis = 1L )
weight111 <- K$expand_dims( ( ( x - x0 ) * ( y - y0 ) * ( z - z0 ) ), axis = 1L )
interpolatedValues000 <- weight000 * pixelValues000
interpolatedValues001 <- weight001 * pixelValues001
interpolatedValues010 <- weight010 * pixelValues010
interpolatedValues011 <- weight011 * pixelValues011
interpolatedValues100 <- weight100 * pixelValues100
interpolatedValues101 <- weight101 * pixelValues101
interpolatedValues110 <- weight110 * pixelValues110
interpolatedValues111 <- weight111 * pixelValues111
interpolatedValues <-
interpolatedValues000 +
interpolatedValues001 +
interpolatedValues010 +
interpolatedValues011 +
interpolatedValues100 +
interpolatedValues101 +
interpolatedValues110 +
interpolatedValues111
return( interpolatedValues )
}
)
)
layer_trilinear_interpolation_3d <- function( objects, resampledSize ) {
create_layer( TrilinearInterpolationLayer3D, objects,
list( resampledSize = resampledSize )
)
}
|
/R/spatialTransformerNetworkUtilities.R
|
no_license
|
neuroimaginador/ANTsRNet
|
R
| false | false | 14,672 |
r
|
#' Bilinear interpolation layer (2-D)
#'
#' @docType class
#'
#' @section Usage:
#' \preformatted{outputs <- layer_bilinear_interpolation_2d( inputs, resampledSize )}
#'
#' @section Arguments:
#' \describe{
#' \item{inputs}{list of size 2 where the first element are the images. The second
#' element are the weights.}
#' \item{resampledSize}{size of the resampled output images.}
#' }
#'
#' @section Details:
#' \code{$initialize} instantiates a new class.
#'
#' \code{$call} main body.
#'
#' \code{$compute_output_shape} computes the output shape.
#'
#' @author Tustison NJ
#'
#' @return resampled batch images.
#'
#' @name BilinearInterpolationLayer2D
NULL
#' @export
BilinearInterpolationLayer2D <- R6::R6Class( "BilinearInterpolationLayer2D",
inherit = KerasLayer,
public = list(
resampledSize = NULL,
initialize = function( resampledSize )
{
K <- keras::backend()
if( K$backend() != 'tensorflow' )
{
stop( "Error: tensorflow is required for this STN implementations." )
}
if( length( resampledSize ) != 2 )
{
stop( "Error: resampled size must be a vector of length 2 (for 2-D).")
}
self$resampledSize <- resampledSize
},
call = function( inputs, mask = NULL )
{
image <- inputs[[1]]
transformParameters <- inputs[[2]]
output <-
self$affineTransformImage( image, transformParameters, self$resampledSize )
return( output )
},
compute_output_shape = function( input_shape )
{
numberOfChannels <- as.integer( tail( unlist( input_shape[[1]] ), 1 ) )
return( list( NULL, as.integer( self$resampledSize[1] ),
as.integer( self$resampledSize[2] ), numberOfChannels ) )
},
affineTransformImage = function( image, affineTransformParameters, resampledSize )
{
K <- keras::backend()
batchSize <- K$shape( image )[1]
numberOfChannels <- K$shape( image )[4]
transformParameters <- K$reshape( affineTransformParameters,
shape = reticulate::tuple( batchSize, 2L, 3L ) )
regularGrids <- self$makeRegularGrids( batchSize, resampledSize )
sampledGrids <- K$batch_dot( transformParameters, regularGrids )
interpolatedImage <- self$interpolate( image, sampledGrids, resampledSize )
newOutputShape <- reticulate::tuple( batchSize, as.integer( resampledSize[1] ),
as.integer( resampledSize[2] ), numberOfChannels )
interpolatedImage <- K$reshape( interpolatedImage, shape = newOutputShape )
return( interpolatedImage )
},
makeRegularGrids = function( batchSize, resampledSize )
{
K <- keras::backend()
xLinearSpace <- tensorflow::tf$linspace( -1.0, 1.0, as.integer( resampledSize[2] ) )
yLinearSpace <- tensorflow::tf$linspace( -1.0, 1.0, as.integer( resampledSize[1] ) )
coords <- tensorflow::tf$meshgrid( xLinearSpace, yLinearSpace )
coords[[1]] <- K$flatten( coords[[1]] )
coords[[2]] <- K$flatten( coords[[2]] )
ones <- K$ones_like( coords[[1]] )
regularGrid <- K$concatenate( list( coords[[1]], coords[[2]], ones ), axis = 0L )
regularGrid <- K$flatten( regularGrid )
regularGrids <- K$tile( regularGrid, K$stack( list( batchSize ) ) )
regularGrids <- K$reshape( regularGrids,
reticulate::tuple( batchSize, 3L, as.integer( prod( resampledSize ) ) ) )
return( regularGrids )
},
interpolate = function( image, sampledGrids, resampledSize )
{
K <- keras::backend()
batchSize <- K$shape( image )[1]
height <- K$shape( image )[2]
width <- K$shape( image )[3]
numberOfChannels <- K$shape( image )[4]
x <- K$cast( K$flatten( sampledGrids[, 1,] ), dtype = 'float32' )
y <- K$cast( K$flatten( sampledGrids[, 2,] ), dtype = 'float32' )
x <- 0.5 * ( x + 1.0 ) * K$cast( width, dtype = 'float32' )
y <- 0.5 * ( y + 1.0 ) * K$cast( height, dtype = 'float32' )
x0 <- K$cast( x, dtype = 'int32' )
x1 <- x0 + 1L
y0 <- K$cast( y, dtype = 'int32' )
y1 <- y0 + 1L
xMax <- as.integer( unlist( K$int_shape( image ) )[3] ) - 1L
yMax <- as.integer( unlist( K$int_shape( image ) )[2] ) - 1L
x0 <- K$clip( x0, 0L, xMax )
x1 <- K$clip( x1, 0L, xMax )
y0 <- K$clip( y0, 0L, yMax )
y1 <- K$clip( y1, 0L, yMax )
batchPixels <- K$arange( 0L, batchSize ) * ( height * width )
batchPixels <- K$expand_dims( batchPixels, axis = -1L )
base <- K$repeat_elements(
batchPixels, rep = as.integer( prod( resampledSize ) ), axis = 1L )
base <- K$flatten( base )
indices00 <- base + y0 * width + x0
indices01 <- base + y1 * width + x0
indices10 <- base + y0 * width + x1
indices11 <- base + y1 * width + x1
flatImage <- K$reshape( image, shape = c( -1L, numberOfChannels ) )
flatImage <- K$cast( flatImage, dtype = 'float32' )
pixelValues00 <- K$gather( flatImage, indices00 )
pixelValues01 <- K$gather( flatImage, indices01 )
pixelValues10 <- K$gather( flatImage, indices10 )
pixelValues11 <- K$gather( flatImage, indices11 )
x0 <- K$cast( x0, dtype = 'float32' )
x1 <- K$cast( x1, dtype = 'float32' )
y0 <- K$cast( y0, dtype = 'float32' )
y1 <- K$cast( y1, dtype = 'float32' )
weight00 <- K$expand_dims( ( ( x1 - x ) * ( y1 - y ) ), axis = 1L )
weight01 <- K$expand_dims( ( ( x1 - x ) * ( y - y0 ) ), axis = 1L )
weight10 <- K$expand_dims( ( ( x - x0 ) * ( y1 - y ) ), axis = 1L )
weight11 <- K$expand_dims( ( ( x - x0 ) * ( y - y0 ) ), axis = 1L )
interpolatedValues00 <- weight00 * pixelValues00
interpolatedValues01 <- weight01 * pixelValues01
interpolatedValues10 <- weight10 * pixelValues10
interpolatedValues11 <- weight10 * pixelValues11
interpolatedValues <- interpolatedValues00 + interpolatedValues01 +
interpolatedValues10 + interpolatedValues11
return( interpolatedValues )
}
)
)
layer_bilinear_interpolation_2d <- function( objects, resampledSize ) {
create_layer( BilinearInterpolationLayer2D, objects,
list( resampledSize = resampledSize )
)
}
#' Trilinear interpolation layer (3-D)
#'
#' @docType class
#'
#' @section Usage:
#' \preformatted{outputs <- layer_trilinear_interpolation_3d( inputs, resampledSize )}
#'
#' @section Arguments:
#' \describe{
#' \item{inputs}{list of size 2 where the first element are the images. The second
#' element are the weights.}
#' \item{resampledSize}{size of the resampled output images.}
#' }
#'
#' @section Details:
#' \code{$initialize} instantiates a new class.
#'
#' \code{$call} main body.
#'
#' \code{$compute_output_shape} computes the output shape.
#'
#' @author Tustison NJ
#'
#' @return resampled batch images.
#'
#' @name TrilinearInterpolationLayer3D
NULL
#' @export
TrilinearInterpolationLayer3D <- R6::R6Class( "TrilinearInterpolationLayer3D",
inherit = KerasLayer,
public = list(
resampledSize = NULL,
initialize = function( resampledSize )
{
K <- keras::backend()
if( K$backend() != 'tensorflow' )
{
stop( "Error: tensorflow is required for this STN implementations." )
}
if( length( resampledSize ) != 3 )
{
stop( "Error: resampled size must be a vector of length 3 (for 3-D).")
}
self$resampledSize <- resampledSize
},
call = function( inputs, mask = NULL )
{
image <- inputs[[1]]
transformParameters <- inputs[[2]]
output <-
self$affineTransformImage( image, transformParameters, self$resampledSize )
return( output )
},
compute_output_shape = function( input_shape )
{
numberOfChannels <- as.integer( tail( unlist( input_shape[[1]] ), 1 ) )
return( list( NULL, as.integer( self$resampledSize[1] ),
as.integer( self$resampledSize[2] ), as.integer( self$resampledSize[3] ),
numberOfChannels ) )
},
affineTransformImage = function( image, affineTransformParameters, resampledSize )
{
K <- keras::backend()
batchSize <- K$shape( image )[1]
numberOfChannels <- K$shape( image )[5]
transformParameters <- K$reshape( affineTransformParameters,
shape = reticulate::tuple( batchSize, 3L, 4L ) )
regularGrids <- self$makeRegularGrids( batchSize, resampledSize )
sampledGrids <- K$batch_dot( transformParameters, regularGrids )
interpolatedImage <- self$interpolate( image, sampledGrids, resampledSize )
newOutputShape <- reticulate::tuple( batchSize, as.integer( resampledSize[1] ),
as.integer( resampledSize[2] ), as.integer( resampledSize[3] ), numberOfChannels )
interpolatedImage <- K$reshape( interpolatedImage, shape = newOutputShape )
return( interpolatedImage )
},
makeRegularGrids = function( batchSize, resampledSize )
{
K <- keras::backend()
xLinearSpace <- tensorflow::tf$linspace( -1.0, 1.0, as.integer( resampledSize[2] ) )
yLinearSpace <- tensorflow::tf$linspace( -1.0, 1.0, as.integer( resampledSize[1] ) )
zLinearSpace <- tensorflow::tf$linspace( -1.0, 1.0, as.integer( resampledSize[3] ) )
coords <- tensorflow::tf$meshgrid( xLinearSpace, yLinearSpace, zLinearSpace )
coords[[1]] <- K$flatten( coords[[1]] )
coords[[2]] <- K$flatten( coords[[2]] )
coords[[3]] <- K$flatten( coords[[3]] )
ones <- K$ones_like( coords[[1]] )
regularGrid <- K$concatenate( list( coords[[1]], coords[[2]], coords[[3]], ones ), 0L )
regularGrid <- K$flatten( regularGrid )
regularGrids <- K$tile( regularGrid, K$stack( list( batchSize ) ) )
regularGrids <- K$reshape( regularGrids,
reticulate::tuple( batchSize, 4L, as.integer( prod( resampledSize ) ) ) )
return( regularGrids )
},
interpolate = function( image, sampledGrids, resampledSize )
{
K <- keras::backend()
batchSize <- K$shape( image )[1]
height <- K$shape( image )[2]
width <- K$shape( image )[3]
depth <- K$shape( image )[4]
numberOfChannels <- K$shape( image )[5]
x <- K$cast( K$flatten( sampledGrids[, 1,] ), dtype = 'float32' )
y <- K$cast( K$flatten( sampledGrids[, 2,] ), dtype = 'float32' )
z <- K$cast( K$flatten( sampledGrids[, 3,] ), dtype = 'float32' )
x <- 0.5 * ( x + 1.0 ) * K$cast( width, dtype = 'float32' )
y <- 0.5 * ( y + 1.0 ) * K$cast( height, dtype = 'float32' )
z <- 0.5 * ( z + 1.0 ) * K$cast( depth, dtype = 'float32' )
x0 <- K$cast( x, dtype = 'int32' )
x1 <- x0 + 1L
y0 <- K$cast( y, dtype = 'int32' )
y1 <- y0 + 1L
z0 <- K$cast( z, dtype = 'int32' )
z1 <- z0 + 1L
xMax <- as.integer( unlist( K$int_shape( image ) )[2] ) - 1L
yMax <- as.integer( unlist( K$int_shape( image ) )[1] ) - 1L
zMax <- as.integer( unlist( K$int_shape( image ) )[3] ) - 1L
x0 <- K$clip( x0, 0L, xMax )
x1 <- K$clip( x1, 0L, xMax )
y0 <- K$clip( y0, 0L, yMax )
y1 <- K$clip( y1, 0L, yMax )
z0 <- K$clip( z0, 0L, zMax )
z1 <- K$clip( z1, 0L, zMax )
batchPixels <- K$arange( 0L, batchSize ) * ( height * width * depth )
batchPixels <- K$expand_dims( batchPixels, axis = -1L )
base <- K$repeat_elements(
batchPixels, rep = as.integer( prod( resampledSize ) ), axis = 1L )
base <- K$flatten( base )
indices000 <- base + z0 * ( width * height ) + y0 * width + x0
indices001 <- base + z1 * ( width * height ) + y0 * width + x0
indices010 <- base + z0 * ( width * height ) + y1 * width + x0
indices011 <- base + z1 * ( width * height ) + y1 * width + x0
indices100 <- base + z0 * ( width * height ) + y0 * width + x1
indices101 <- base + z1 * ( width * height ) + y0 * width + x1
indices110 <- base + z0 * ( width * height ) + y1 * width + x1
indices111 <- base + z1 * ( width * height ) + y1 * width + x1
flatImage <- K$reshape( image, shape = c( -1L, numberOfChannels ) )
flatImage <- K$cast( flatImage, dtype = 'float32' )
pixelValues000 <- K$gather( flatImage, indices000 )
pixelValues001 <- K$gather( flatImage, indices001 )
pixelValues010 <- K$gather( flatImage, indices010 )
pixelValues011 <- K$gather( flatImage, indices011 )
pixelValues100 <- K$gather( flatImage, indices100 )
pixelValues101 <- K$gather( flatImage, indices101 )
pixelValues110 <- K$gather( flatImage, indices110 )
pixelValues111 <- K$gather( flatImage, indices111 )
x0 <- K$cast( x0, dtype = 'float32' )
x1 <- K$cast( x1, dtype = 'float32' )
y0 <- K$cast( y0, dtype = 'float32' )
y1 <- K$cast( y1, dtype = 'float32' )
z0 <- K$cast( z0, dtype = 'float32' )
z1 <- K$cast( z1, dtype = 'float32' )
weight000 <- K$expand_dims( ( ( x1 - x ) * ( y1 - y ) * ( z1 - z ) ), axis = 1L )
weight001 <- K$expand_dims( ( ( x1 - x ) * ( y1 - y ) * ( z - z0 ) ), axis = 1L )
weight010 <- K$expand_dims( ( ( x1 - x ) * ( y - y0 ) * ( z1 - z ) ), axis = 1L )
weight011 <- K$expand_dims( ( ( x1 - x ) * ( y - y0 ) * ( z - z0 ) ), axis = 1L )
weight100 <- K$expand_dims( ( ( x - x0 ) * ( y1 - y ) * ( z1 - z ) ), axis = 1L )
weight101 <- K$expand_dims( ( ( x - x0 ) * ( y1 - y ) * ( z - z0 ) ), axis = 1L )
weight110 <- K$expand_dims( ( ( x - x0 ) * ( y - y0 ) * ( z1 - z ) ), axis = 1L )
weight111 <- K$expand_dims( ( ( x - x0 ) * ( y - y0 ) * ( z - z0 ) ), axis = 1L )
interpolatedValues000 <- weight000 * pixelValues000
interpolatedValues001 <- weight001 * pixelValues001
interpolatedValues010 <- weight010 * pixelValues010
interpolatedValues011 <- weight011 * pixelValues011
interpolatedValues100 <- weight100 * pixelValues100
interpolatedValues101 <- weight101 * pixelValues101
interpolatedValues110 <- weight110 * pixelValues110
interpolatedValues111 <- weight111 * pixelValues111
interpolatedValues <-
interpolatedValues000 +
interpolatedValues001 +
interpolatedValues010 +
interpolatedValues011 +
interpolatedValues100 +
interpolatedValues101 +
interpolatedValues110 +
interpolatedValues111
return( interpolatedValues )
}
)
)
layer_trilinear_interpolation_3d <- function( objects, resampledSize ) {
create_layer( TrilinearInterpolationLayer3D, objects,
list( resampledSize = resampledSize )
)
}
|
#' Creates a data.frame representing Airly meta
#' @param item list returned by Airly API
#' @return data.frame representing an airly_meta
#'
create_airly_meta <- function(item) {
if (exists("name", where = item) & exists("levels", where = item)) {
for( i in 1:length(item$name)) {
item$levels[[i]]$name <- item$name[i]
}
item <- do.call(rbind, item$levels)
item <- item[, -which(names(item) %in% c("values", "level"))]
item
} else {
NULL
}
}
#' Checks whether the given object is correctly correctly defined
#'
#' @param airly_meta object of the class airly_meta
validate_airly_meta <- function(airly_meta) {
assert(all(c("name", "minValue","maxValue") %in% names(airly_meta)), "Object must have max/minValue and name field")
}
|
/R/airly_meta.R
|
no_license
|
cran/aiRly
|
R
| false | false | 793 |
r
|
#' Creates a data.frame representing Airly meta
#' @param item list returned by Airly API
#' @return data.frame representing an airly_meta
#'
create_airly_meta <- function(item) {
if (exists("name", where = item) & exists("levels", where = item)) {
for( i in 1:length(item$name)) {
item$levels[[i]]$name <- item$name[i]
}
item <- do.call(rbind, item$levels)
item <- item[, -which(names(item) %in% c("values", "level"))]
item
} else {
NULL
}
}
#' Checks whether the given object is correctly correctly defined
#'
#' @param airly_meta object of the class airly_meta
validate_airly_meta <- function(airly_meta) {
assert(all(c("name", "minValue","maxValue") %in% names(airly_meta)), "Object must have max/minValue and name field")
}
|
testlist <- list(mu = 1.53343154499748e-319, var = 0)
result <- do.call(metafolio:::est_beta_params,testlist)
str(result)
|
/metafolio/inst/testfiles/est_beta_params/libFuzzer_est_beta_params/est_beta_params_valgrind_files/1612989027-test.R
|
no_license
|
akhikolla/updatedatatype-list3
|
R
| false | false | 121 |
r
|
testlist <- list(mu = 1.53343154499748e-319, var = 0)
result <- do.call(metafolio:::est_beta_params,testlist)
str(result)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/f7Popover.R
\name{f7PopoverTarget}
\alias{f7PopoverTarget}
\title{Framework7 popover target}
\usage{
f7PopoverTarget(tag, targetId)
}
\arguments{
\item{tag}{Tag that will be targeted. Must be a f7Input element.}
\item{targetId}{Popover id. Must correspond to the \link{f7Popover} targetId.}
}
\description{
This must be used in combination of \link{f7Popover}.
Only works for input elements!
}
|
/man/f7PopoverTarget.Rd
|
no_license
|
cpsievert/shinyMobile
|
R
| false | true | 473 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/f7Popover.R
\name{f7PopoverTarget}
\alias{f7PopoverTarget}
\title{Framework7 popover target}
\usage{
f7PopoverTarget(tag, targetId)
}
\arguments{
\item{tag}{Tag that will be targeted. Must be a f7Input element.}
\item{targetId}{Popover id. Must correspond to the \link{f7Popover} targetId.}
}
\description{
This must be used in combination of \link{f7Popover}.
Only works for input elements!
}
|
source("date_sign.R")
source("study.R")
count_date <- function(sst, date = "0000-00-00"){
l <- length(time(sst))
i <- l
num <- 0
while(i>0){
if(as.Date(time(sst)[i])==date) num <- i
i<-i-1
}
if(num==0) print("count_date function says: out of bound")
else num
}
var_to_cap <- function(VaR,cap_max,cap_med,var_med=0.2){
cap <- cap_max - ((cap_max-cap_med)/(var_med))*VaR
cap}
clean_date <- function(date_up,date_down,debug=0){
if(debug) cat("starting clean_date function\n")
date <- c(date_up,date_down)
date <- sort(date)
date <- c(date,as.Date("2020-01-01"):as.Date("2021-01-01"))
if(debug) cat("eliminate duplicates\n")
l <- length(date_up)+length(date_down)
for(i in 1:l){
if(debug) cat("in the loop ", i, "on ", l, ":date array ", date,"\n")
if(date[i+1]==date[i]){
for(j in i:(2*l)){
date[j] <- date[j+1]
}
l <- l-1
}
if(debug) cat("end loop\n")
}
date <- date[1:l]
date}
interest <- function(data_ohlc, hta_signals, perc,mode=1,yinf=0.5,ysup=1.5,title="Backtest",plot=TRUE, end_delta=130, cap=1000, cap_med_fact=0.7, n_mean, debug=0){
## STUDY THE CAPITAL EVOLUTION
## mode 1 -> cumulative sum
## mode 2 -> exponential growth
## mode 3 -> EWMA capitaliazion
if(mode==1){
li <- sum(perc)
if(plot==TRUE){
plot(cumsum(perc),type='l',ylab="variation",xlab="time",main=title)
}
return(li)
}
if(mode==2){
capt <- c(cap)
for(i in 1:length(perc)){capt[i+1] <- capt[i]*(1+perc[i])}
if(plot==TRUE){
plot(x=c(1:length(capt)),y=capt, type='l', ylim=c(yinf*cap,ysup*cap), ylab="capital", xlab="time",main=title)
}
return(capt)
}
if(mode==3){ # <- EWMA_RiskMetrics()®
sst_closure <- data_ohlc[,4]
capt <- c(cap)
if(debug) cat("getting cleaned date signals\n")
date_signals <- clean_date(date_sign(sst_closure,hta_signals,1),date_sign(sst_closure,hta_signals,3),debug=debug)
for(i in 1:length(perc)){
if(debug){
cat("\n\nstarting calc new position for\n")
print(perc[i])
print(date_signals[i])
}
day_numeric <- count_date(sst_closure, date_signals[i])
if(debug) print(day_numeric)
## EWMA part (study function)
print(time(sst_closure))
if(debug) cat("getting VaR serie\nend date:",as.Date(time(sst_closure)[day_numeric-end_delta]),"\n")
sst_study <- study(serie=as.timeSeries(sst_closure),start="1900-01-01", end= time(sst_closure)[day_numeric-end_delta], N_day = day_numeric, debug=debug)
var <- sst_study[length(sst_study[,1]),1]
if(debug){
cat("calc var =",var,"\n")
print(sst_study)
cat("calc mean_var\n")
}
mean_with_na <- sst_study[(length(sst_study[,1])-n_mean):length(sst_study[,1]),1]
cat("\njj\n")
var_mean <- mean(mean_with_na[!is.na(mean_with_na)])
if(debug) cat("mean_var=",var_mean,"\n\ncapital accounting")
cap_invest <- as.numeric(var_to_cap(VaR=var,cap_max=capt[i],cap_med=cap_med_fact*capt[i],var_med=var_mean))
cap_left <- capt[i]-cap_invest
capt[i+1] <- cap_left+cap_invest*(1+perc[i])
print(cap_invest)
print(cap_left)
print(capt[i+1])
print("---------")
}
if(plot==TRUE){
plot(x=c(1:length(capt)),y=capt, type='l', ylim=c(yinf*cap,ysup*cap), ylab="capital", xlab="time",main=title)
}
return(capt)
}}
|
/interest.R
|
no_license
|
SolbiatiAlessandro/HERMes
|
R
| false | false | 3,282 |
r
|
source("date_sign.R")
source("study.R")
count_date <- function(sst, date = "0000-00-00"){
l <- length(time(sst))
i <- l
num <- 0
while(i>0){
if(as.Date(time(sst)[i])==date) num <- i
i<-i-1
}
if(num==0) print("count_date function says: out of bound")
else num
}
var_to_cap <- function(VaR,cap_max,cap_med,var_med=0.2){
cap <- cap_max - ((cap_max-cap_med)/(var_med))*VaR
cap}
clean_date <- function(date_up,date_down,debug=0){
if(debug) cat("starting clean_date function\n")
date <- c(date_up,date_down)
date <- sort(date)
date <- c(date,as.Date("2020-01-01"):as.Date("2021-01-01"))
if(debug) cat("eliminate duplicates\n")
l <- length(date_up)+length(date_down)
for(i in 1:l){
if(debug) cat("in the loop ", i, "on ", l, ":date array ", date,"\n")
if(date[i+1]==date[i]){
for(j in i:(2*l)){
date[j] <- date[j+1]
}
l <- l-1
}
if(debug) cat("end loop\n")
}
date <- date[1:l]
date}
interest <- function(data_ohlc, hta_signals, perc,mode=1,yinf=0.5,ysup=1.5,title="Backtest",plot=TRUE, end_delta=130, cap=1000, cap_med_fact=0.7, n_mean, debug=0){
## STUDY THE CAPITAL EVOLUTION
## mode 1 -> cumulative sum
## mode 2 -> exponential growth
## mode 3 -> EWMA capitaliazion
if(mode==1){
li <- sum(perc)
if(plot==TRUE){
plot(cumsum(perc),type='l',ylab="variation",xlab="time",main=title)
}
return(li)
}
if(mode==2){
capt <- c(cap)
for(i in 1:length(perc)){capt[i+1] <- capt[i]*(1+perc[i])}
if(plot==TRUE){
plot(x=c(1:length(capt)),y=capt, type='l', ylim=c(yinf*cap,ysup*cap), ylab="capital", xlab="time",main=title)
}
return(capt)
}
if(mode==3){ # <- EWMA_RiskMetrics()®
sst_closure <- data_ohlc[,4]
capt <- c(cap)
if(debug) cat("getting cleaned date signals\n")
date_signals <- clean_date(date_sign(sst_closure,hta_signals,1),date_sign(sst_closure,hta_signals,3),debug=debug)
for(i in 1:length(perc)){
if(debug){
cat("\n\nstarting calc new position for\n")
print(perc[i])
print(date_signals[i])
}
day_numeric <- count_date(sst_closure, date_signals[i])
if(debug) print(day_numeric)
## EWMA part (study function)
print(time(sst_closure))
if(debug) cat("getting VaR serie\nend date:",as.Date(time(sst_closure)[day_numeric-end_delta]),"\n")
sst_study <- study(serie=as.timeSeries(sst_closure),start="1900-01-01", end= time(sst_closure)[day_numeric-end_delta], N_day = day_numeric, debug=debug)
var <- sst_study[length(sst_study[,1]),1]
if(debug){
cat("calc var =",var,"\n")
print(sst_study)
cat("calc mean_var\n")
}
mean_with_na <- sst_study[(length(sst_study[,1])-n_mean):length(sst_study[,1]),1]
cat("\njj\n")
var_mean <- mean(mean_with_na[!is.na(mean_with_na)])
if(debug) cat("mean_var=",var_mean,"\n\ncapital accounting")
cap_invest <- as.numeric(var_to_cap(VaR=var,cap_max=capt[i],cap_med=cap_med_fact*capt[i],var_med=var_mean))
cap_left <- capt[i]-cap_invest
capt[i+1] <- cap_left+cap_invest*(1+perc[i])
print(cap_invest)
print(cap_left)
print(capt[i+1])
print("---------")
}
if(plot==TRUE){
plot(x=c(1:length(capt)),y=capt, type='l', ylim=c(yinf*cap,ysup*cap), ylab="capital", xlab="time",main=title)
}
return(capt)
}}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Data.R
\docType{data}
\name{Sleep_Sessions}
\alias{Sleep_Sessions}
\title{Sleep data}
\format{
a dataframe of 5 variables
\describe{
\item{BedTime}{price, in US dollars}
\item{`Sleep Start Time`}{weight of the diamond, in carats}
...
}
}
\usage{
Sleep_Sessions
}
\description{
Sleep data
}
\keyword{datasets}
|
/man/Sleep_Sessions.Rd
|
permissive
|
richardsprague/amazonhalor
|
R
| false | true | 393 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Data.R
\docType{data}
\name{Sleep_Sessions}
\alias{Sleep_Sessions}
\title{Sleep data}
\format{
a dataframe of 5 variables
\describe{
\item{BedTime}{price, in US dollars}
\item{`Sleep Start Time`}{weight of the diamond, in carats}
...
}
}
\usage{
Sleep_Sessions
}
\description{
Sleep data
}
\keyword{datasets}
|
#' Get date-time in a different time zone
#'
#' with_tz returns a date-time as it would appear in a different time zone.
#' The actual moment of time measured does not change, just the time zone it is
#' measured in. with_tz defaults to the Universal Coordinated time zone (UTC)
#' when an unrecognized time zone is inputted. See [Sys.timezone()]
#' for more information on how R recognizes time zones.
#'
#' @export with_tz
#' @param time a POSIXct, POSIXlt, Date, chron date-time object or a data.frame
#' object. When a data.frame all POSIXt elements of a data.frame are processed
#' with `with_tz()` and new data.frame is returned.
#' @param tzone a character string containing the time zone to convert to. R
#' must recognize the name contained in the string as a time zone on your
#' system.
#' @return a POSIXct object in the updated time zone
#' @keywords chron manip
#' @seealso [force_tz()]
#' @examples
#' x <- as.POSIXct("2009-08-07 00:00:01", tz = "America/New_York")
#' with_tz(x, "GMT")
with_tz <- function (time, tzone = ""){
if(is.data.frame(time)){
for(nm in names(time)){
if(is.POSIXt(time[[nm]])){
time[[nm]] <- with_tz(time[[nm]], tzone = tzone)
}
}
time
} else {
check_tz(tzone)
if (is.POSIXlt(time)) new <- as.POSIXct(time)
else new <- time
attr(new, "tzone") <- tzone
reclass_date(new, time)
}
}
#' Replace time zone to create new date-time
#'
#' force_tz returns a the date-time that has the same clock time as x in the new
#' time zone.
#' Although the new date-time has the same clock time (e.g. the
#' same values in the year, month, days, etc. elements) it is a
#' different moment of time than the input date-time. force_tz defaults to the
#' Universal Coordinated time zone (UTC) when an unrecognized time zone is
#' inputted. See [Sys.timezone()] for more information on how R
#' recognizes time zones.
#'
#' @export force_tz
#'
#' @param time a POSIXct, POSIXlt, Date, chron date-time object, or a data.frame
#' object. When a data.frame all POSIXt elements of a data.frame are processed
#' with `force_tz()` and new data.frame is returned.
#' @param tzone a character string containing the time zone to convert to. R
#' must recognize the name contained in the string as a time zone on your
#' system.
#' @param roll logical. If TRUE, and `time` falls into DST skip assume the next
#' valid civil time, otherwise return NA. See examples.
#' @return a POSIXct object in the updated time zone
#' @keywords chron manip
#' @seealso [with_tz()]
#' @examples
#' x <- ymd_hms("2009-08-07 00:00:01", tz = "America/New_York")
#' force_tz(x, "UTC")
#' force_tz(x, "Europe/Amsterdam")
#' ## DST skip:
#' y <- ymd_hms("2010-03-14 02:05:05 UTC")
#' force_tz(y, "America/New_York", roll=FALSE)
#' force_tz(y, "America/New_York", roll=TRUE)
force_tz <- function(time, tzone = "", roll = FALSE){
if(is.data.frame(time)){
for(nm in names(time)){
if(is.POSIXt(time[[nm]])){
time[[nm]] <- force_tz(time[[nm]], tzone = tzone)
}
}
time
} else {
check_tz(tzone)
out <- C_force_tz(as.POSIXct(time), tz = as.character(tzone), roll)
reclass_date(out, time)
## update(time, tz = tzone)
}
}
check_tz <- function(tz) {}
# Note: alternative method? as.POSIXlt(format(as.POSIXct(x)), tz = tzone)
|
/R/time-zones.r
|
no_license
|
lorenzwalthert/lubridate
|
R
| false | false | 3,334 |
r
|
#' Get date-time in a different time zone
#'
#' with_tz returns a date-time as it would appear in a different time zone.
#' The actual moment of time measured does not change, just the time zone it is
#' measured in. with_tz defaults to the Universal Coordinated time zone (UTC)
#' when an unrecognized time zone is inputted. See [Sys.timezone()]
#' for more information on how R recognizes time zones.
#'
#' @export with_tz
#' @param time a POSIXct, POSIXlt, Date, chron date-time object or a data.frame
#' object. When a data.frame all POSIXt elements of a data.frame are processed
#' with `with_tz()` and new data.frame is returned.
#' @param tzone a character string containing the time zone to convert to. R
#' must recognize the name contained in the string as a time zone on your
#' system.
#' @return a POSIXct object in the updated time zone
#' @keywords chron manip
#' @seealso [force_tz()]
#' @examples
#' x <- as.POSIXct("2009-08-07 00:00:01", tz = "America/New_York")
#' with_tz(x, "GMT")
with_tz <- function (time, tzone = ""){
if(is.data.frame(time)){
for(nm in names(time)){
if(is.POSIXt(time[[nm]])){
time[[nm]] <- with_tz(time[[nm]], tzone = tzone)
}
}
time
} else {
check_tz(tzone)
if (is.POSIXlt(time)) new <- as.POSIXct(time)
else new <- time
attr(new, "tzone") <- tzone
reclass_date(new, time)
}
}
#' Replace time zone to create new date-time
#'
#' force_tz returns a the date-time that has the same clock time as x in the new
#' time zone.
#' Although the new date-time has the same clock time (e.g. the
#' same values in the year, month, days, etc. elements) it is a
#' different moment of time than the input date-time. force_tz defaults to the
#' Universal Coordinated time zone (UTC) when an unrecognized time zone is
#' inputted. See [Sys.timezone()] for more information on how R
#' recognizes time zones.
#'
#' @export force_tz
#'
#' @param time a POSIXct, POSIXlt, Date, chron date-time object, or a data.frame
#' object. When a data.frame all POSIXt elements of a data.frame are processed
#' with `force_tz()` and new data.frame is returned.
#' @param tzone a character string containing the time zone to convert to. R
#' must recognize the name contained in the string as a time zone on your
#' system.
#' @param roll logical. If TRUE, and `time` falls into DST skip assume the next
#' valid civil time, otherwise return NA. See examples.
#' @return a POSIXct object in the updated time zone
#' @keywords chron manip
#' @seealso [with_tz()]
#' @examples
#' x <- ymd_hms("2009-08-07 00:00:01", tz = "America/New_York")
#' force_tz(x, "UTC")
#' force_tz(x, "Europe/Amsterdam")
#' ## DST skip:
#' y <- ymd_hms("2010-03-14 02:05:05 UTC")
#' force_tz(y, "America/New_York", roll=FALSE)
#' force_tz(y, "America/New_York", roll=TRUE)
force_tz <- function(time, tzone = "", roll = FALSE){
if(is.data.frame(time)){
for(nm in names(time)){
if(is.POSIXt(time[[nm]])){
time[[nm]] <- force_tz(time[[nm]], tzone = tzone)
}
}
time
} else {
check_tz(tzone)
out <- C_force_tz(as.POSIXct(time), tz = as.character(tzone), roll)
reclass_date(out, time)
## update(time, tz = tzone)
}
}
check_tz <- function(tz) {}
# Note: alternative method? as.POSIXlt(format(as.POSIXct(x)), tz = tzone)
|
#' dir_mat melt function
#' Takes a matlab dirmat file and converts it to long format
#'
#' @param bins number of time frames to bin. generally 400 for 20 minute video is standard
#' @export
#' @examples
#' dir_mat()
dir_mat <- function (bins) {
#environment<-globalenv()
message("Choose dirmmat file")
dirmat<-read.csv(file.choose())
id.cols<-c(colnames(dirmat[1:(length(dirmat)-2400)])) # identify all non time columns
dirmat[, id.cols] <- lapply(dirmat[,id.cols], factor) # convert all ID columns to factors
dirmat<-melt(dirmat, id.vars=id.cols) # convert to long format
colnames(dirmat)[(length(dirmat)-1):length(dirmat)]<-c("variable", "dir") # make last column "dir"
dirmat$time<-as.numeric(gsub("[^0-9]","", behmat$variable)) # add time column, drop X resulting from melt
dirmat$variable<-NULL
dirmat$pixelSize<-as.numeric(as.character(dirmat$pixelSize))
dirmat$wormID<-dirmat$genotype:dirmat$exp:dirmat$condition:dirmat$animal
#dirmat$bin<- cut(dirmat$time, seq(0,max(dirmat$time), by = n), dig.lab=10)
dirmat$dir.bin<-cut(dirmat$dir, c(0,45,135,225,315,360), dig.lab=10,na.rm = TRUE) # assume 0 = to the right
index<-c("(0,45]", "(45,135]", "(135,225]", "(225,315]", "(315,360]")
value<-c("right", "up", "left", "down", "right")
dirmat$direction<-value[match(dirmat$dir.bin, index)]
#dirmat$direction[is.na(dirmat$direction)] <- "right" fix this later in tot.mat
dirmat$direction<-as.factor(dirmat$direction)
return(data.frame(dirmat))
}
|
/R/dir_mat.R
|
no_license
|
SenguptaLab/MF.matR
|
R
| false | false | 1,486 |
r
|
#' dir_mat melt function
#' Takes a matlab dirmat file and converts it to long format
#'
#' @param bins number of time frames to bin. generally 400 for 20 minute video is standard
#' @export
#' @examples
#' dir_mat()
dir_mat <- function (bins) {
#environment<-globalenv()
message("Choose dirmmat file")
dirmat<-read.csv(file.choose())
id.cols<-c(colnames(dirmat[1:(length(dirmat)-2400)])) # identify all non time columns
dirmat[, id.cols] <- lapply(dirmat[,id.cols], factor) # convert all ID columns to factors
dirmat<-melt(dirmat, id.vars=id.cols) # convert to long format
colnames(dirmat)[(length(dirmat)-1):length(dirmat)]<-c("variable", "dir") # make last column "dir"
dirmat$time<-as.numeric(gsub("[^0-9]","", behmat$variable)) # add time column, drop X resulting from melt
dirmat$variable<-NULL
dirmat$pixelSize<-as.numeric(as.character(dirmat$pixelSize))
dirmat$wormID<-dirmat$genotype:dirmat$exp:dirmat$condition:dirmat$animal
#dirmat$bin<- cut(dirmat$time, seq(0,max(dirmat$time), by = n), dig.lab=10)
dirmat$dir.bin<-cut(dirmat$dir, c(0,45,135,225,315,360), dig.lab=10,na.rm = TRUE) # assume 0 = to the right
index<-c("(0,45]", "(45,135]", "(135,225]", "(225,315]", "(315,360]")
value<-c("right", "up", "left", "down", "right")
dirmat$direction<-value[match(dirmat$dir.bin, index)]
#dirmat$direction[is.na(dirmat$direction)] <- "right" fix this later in tot.mat
dirmat$direction<-as.factor(dirmat$direction)
return(data.frame(dirmat))
}
|
#' Alt-metrics total citations from all sources.
#'
#' @importFrom RJSONIO fromJSON
#' @param doi digital object identifier for an article in PLoS Journals
#' @param key your PLoS API key, either enter, or loads from .Rprofile
#' @param curl If using in a loop, call getCurlHandle() first and pass
#' the returned value in here (avoids unnecessary footprint)
#' @return data.frame of total no. views (counter + pmc), shares (facebook + twitter),
#' bookmarks (mendeley + citeulike), and citations (crossref)
#' @references See a tutorial/vignette for alm at
#' \url{http://ropensci.org/tutorials/alm_tutorial.html}
#' @examples \dontrun{
#' almtotals(doi = '10.1371/journal.pbio.0000012')
#' }
#' @export
almtotals <- function(doi, key = NULL, curl = getCurlHandle() )
{
url = 'http://alm.plos.org/api/v3/articles'
key <- getkey(key)
doi <- paste("doi/", doi, sep="")
doi2 <- gsub("/", "%2F", doi)
url2 <- paste(url, "/info%3A", doi2, '?api_key=', key, '&info=detail', sep='')
tt <- RJSONIO::fromJSON(url2)
data.frame(tt[[1]][c("views","shares","bookmarks","citations")])
}
|
/R/almtotals.R
|
no_license
|
imclab/alm
|
R
| false | false | 1,090 |
r
|
#' Alt-metrics total citations from all sources.
#'
#' @importFrom RJSONIO fromJSON
#' @param doi digital object identifier for an article in PLoS Journals
#' @param key your PLoS API key, either enter, or loads from .Rprofile
#' @param curl If using in a loop, call getCurlHandle() first and pass
#' the returned value in here (avoids unnecessary footprint)
#' @return data.frame of total no. views (counter + pmc), shares (facebook + twitter),
#' bookmarks (mendeley + citeulike), and citations (crossref)
#' @references See a tutorial/vignette for alm at
#' \url{http://ropensci.org/tutorials/alm_tutorial.html}
#' @examples \dontrun{
#' almtotals(doi = '10.1371/journal.pbio.0000012')
#' }
#' @export
almtotals <- function(doi, key = NULL, curl = getCurlHandle() )
{
url = 'http://alm.plos.org/api/v3/articles'
key <- getkey(key)
doi <- paste("doi/", doi, sep="")
doi2 <- gsub("/", "%2F", doi)
url2 <- paste(url, "/info%3A", doi2, '?api_key=', key, '&info=detail', sep='')
tt <- RJSONIO::fromJSON(url2)
data.frame(tt[[1]][c("views","shares","bookmarks","citations")])
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/misc.R
\name{.get_path_lazily}
\alias{.get_path_lazily}
\title{Construct a file path}
\usage{
.get_path_lazily(dir = NULL, ..., ext = NULL)
}
\arguments{
\item{dir}{character. Folder name (with a trailing slash).}
\item{...}{dots. Arguments parssed to construct name of file path without the directory nor
the extension. Uses \code{paste()} to collapse all arguments between \code{dir} and \code{ext}.}
\item{ext}{character. Bare extension (i.e. without a dot). Must be one of valid formats.}
}
\value{
character. Path.
}
\description{
Constructs a file path given a directory as the first input and an extension as the last input.
}
\details{
Assumes that the last arguments is the path extension. If \code{ext} is not
specified explicitly, it is assumed to be the last argument passed to the function.
This format allows for 'lazy' construction of the basename of the file (sans extension)
given all arguments that are not the first nor the last.
}
\keyword{internal}
|
/man/dot-get_path_lazily.Rd
|
no_license
|
tonyelhabr/teproj
|
R
| false | true | 1,050 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/misc.R
\name{.get_path_lazily}
\alias{.get_path_lazily}
\title{Construct a file path}
\usage{
.get_path_lazily(dir = NULL, ..., ext = NULL)
}
\arguments{
\item{dir}{character. Folder name (with a trailing slash).}
\item{...}{dots. Arguments parssed to construct name of file path without the directory nor
the extension. Uses \code{paste()} to collapse all arguments between \code{dir} and \code{ext}.}
\item{ext}{character. Bare extension (i.e. without a dot). Must be one of valid formats.}
}
\value{
character. Path.
}
\description{
Constructs a file path given a directory as the first input and an extension as the last input.
}
\details{
Assumes that the last arguments is the path extension. If \code{ext} is not
specified explicitly, it is assumed to be the last argument passed to the function.
This format allows for 'lazy' construction of the basename of the file (sans extension)
given all arguments that are not the first nor the last.
}
\keyword{internal}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/scales.R
\name{scale_color_discrete}
\alias{scale_color_discrete}
\title{Discrete color scale}
\usage{
scale_color_discrete(palette = "categorical", reverse = FALSE, ...)
}
\arguments{
\item{palette}{Character name of palette in tpl_palettes}
\item{reverse}{Boolean indicating whether the palette should be reversed}
\item{...}{Additional arguments passed to \code{discrete_scale()}}
}
\description{
Discrete color scale
}
|
/man/scale_color_discrete.Rd
|
no_license
|
connorrothschild/tpltheme
|
R
| false | true | 503 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/scales.R
\name{scale_color_discrete}
\alias{scale_color_discrete}
\title{Discrete color scale}
\usage{
scale_color_discrete(palette = "categorical", reverse = FALSE, ...)
}
\arguments{
\item{palette}{Character name of palette in tpl_palettes}
\item{reverse}{Boolean indicating whether the palette should be reversed}
\item{...}{Additional arguments passed to \code{discrete_scale()}}
}
\description{
Discrete color scale
}
|
load.multiresolution.transport.v1.distances <- function( folder, n , nscales){
library(data.table)
distances = list()
for( i in 1:nscales){
distances[[i]] = matrix(0, nrow=n, ncol=n)
}
debug1 = matrix(0, nrow=n, ncol=n)
debug2 = matrix(0, nrow=n, ncol=n)
for( i in 1:(n-1) ){
for( j in (i+1):n){
print( sprintf("%s/transport%.3d-%.3d.Rdata", folder, i, j) )
load( sprintf("%s/transport%.3d-%.3d.Rdata", folder, i, j) )
map = mtrp$trp$map[[ length( mtrp$trp$cost ) ]]
delta = 0
for(s in nscales:1){
#delta = delta + mtrp$delta[[s]]
tmp = as.data.table(mtrp$delta[[s]])
delta = delta + tmp
d = sqrt( sum( tmp[ , .( x^2+y^2+z^2+r^2) ] * mtrp$mass) )
distances[[s]][ i, j ] = d
distances[[s]][ j, i ] = d
}
debug1[i, j] = mtrp$trp$cost[ length( mtrp$trp$cost ) ]
debug1[j, i] = debug1[i,j]
d = sqrt( sum( delta[ , .( x^2+y^2+z^2+r^2) ] * mtrp$mass) )
debug2[i, j] = d
debug2[j, i]= d
}
}
list(distances=distances, debug1=debug1, debug2=debug2)
}
load.multiresolution.transport.v2.distances <- function( folder, n , nscales){
d.scales = matrix(0, nrow=nscales-1, ncol=42)
distances = matrix(0, nrow=n, ncol=n)
for( i in 1:(n-1) ){
for( j in (i+1):n){
print( sprintf("%s/transport%.3d-%.3d.Rdata", folder, i, j) )
load( sprintf("%s/transport%.3d-%.3d.Rdata", folder, i, j) )
d = mtrp$trp$cost[ length( mtrp$trp$cost ) ]
distances[i, j] = d
distances[j, i] = d
}
for(s in 1:(nscales-1) ){
d.scales[s, i] = mtrp$trp1[[s]]$cost[ length( mtrp$trp1[[s]]$cost ) ]
d.scales[s, j] = mtrp$trp1[[s]]$cost[ length( mtrp$trp1[[s]]$cost ) ]
}
}
list(distances=distances, d.scales=d.scales)
}
|
/Scripts/multiresolution.pairwise.distances.R
|
no_license
|
samuelgerber/VascularNetworks
|
R
| false | false | 1,817 |
r
|
load.multiresolution.transport.v1.distances <- function( folder, n , nscales){
library(data.table)
distances = list()
for( i in 1:nscales){
distances[[i]] = matrix(0, nrow=n, ncol=n)
}
debug1 = matrix(0, nrow=n, ncol=n)
debug2 = matrix(0, nrow=n, ncol=n)
for( i in 1:(n-1) ){
for( j in (i+1):n){
print( sprintf("%s/transport%.3d-%.3d.Rdata", folder, i, j) )
load( sprintf("%s/transport%.3d-%.3d.Rdata", folder, i, j) )
map = mtrp$trp$map[[ length( mtrp$trp$cost ) ]]
delta = 0
for(s in nscales:1){
#delta = delta + mtrp$delta[[s]]
tmp = as.data.table(mtrp$delta[[s]])
delta = delta + tmp
d = sqrt( sum( tmp[ , .( x^2+y^2+z^2+r^2) ] * mtrp$mass) )
distances[[s]][ i, j ] = d
distances[[s]][ j, i ] = d
}
debug1[i, j] = mtrp$trp$cost[ length( mtrp$trp$cost ) ]
debug1[j, i] = debug1[i,j]
d = sqrt( sum( delta[ , .( x^2+y^2+z^2+r^2) ] * mtrp$mass) )
debug2[i, j] = d
debug2[j, i]= d
}
}
list(distances=distances, debug1=debug1, debug2=debug2)
}
load.multiresolution.transport.v2.distances <- function( folder, n , nscales){
d.scales = matrix(0, nrow=nscales-1, ncol=42)
distances = matrix(0, nrow=n, ncol=n)
for( i in 1:(n-1) ){
for( j in (i+1):n){
print( sprintf("%s/transport%.3d-%.3d.Rdata", folder, i, j) )
load( sprintf("%s/transport%.3d-%.3d.Rdata", folder, i, j) )
d = mtrp$trp$cost[ length( mtrp$trp$cost ) ]
distances[i, j] = d
distances[j, i] = d
}
for(s in 1:(nscales-1) ){
d.scales[s, i] = mtrp$trp1[[s]]$cost[ length( mtrp$trp1[[s]]$cost ) ]
d.scales[s, j] = mtrp$trp1[[s]]$cost[ length( mtrp$trp1[[s]]$cost ) ]
}
}
list(distances=distances, d.scales=d.scales)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/distances.R
\name{plans_diversity}
\alias{plans_diversity}
\title{Calculate the diversity of a set of plans}
\usage{
plans_diversity(
plans,
chains = 1,
n_max = 100,
ncores = 1,
total_pop = attr(plans, "prec_pop")
)
}
\arguments{
\item{plans}{a \code{\link{redist_plans}} object.}
\item{chains}{For plans objects with multiple chains, which ones to compute
diversity for. Defaults to the first. Specify "all" to use all chains.}
\item{n_max}{the maximum number of plans to sample in computing the
distances. Larger numbers will have less sampling error but will require
more computation time.}
\item{ncores}{the number of cores to use in computing the distances.}
\item{total_pop}{The vector of precinct populations. Used only if computing
variation of information. If not provided, equal population of precincts
will be assumed, i.e. the VI will be computed with respect to the precincts
themselves, and not the population.}
}
\value{
A numeric vector of off-diagonal variation of information distances.
}
\description{
Returns the off-diagonal elements of the variation of information distance
matrix for a sample of plans, which can be used as a diagnostic measure to
assess the diversity of a set of plans. While the exact scale varies depending
on the number of precincts and districts, generally diversity is good if most
of the values are greater than 0.5. Conversely, if there are many values
close to zero, then the sample has many similar plans and may not be a good
approximation to the target distribution.
}
\examples{
data(iowa)
ia <- redist_map(iowa, existing_plan = cd_2010, pop_tol = 0.01)
plans <- redist_smc(ia, 100, silent = TRUE)
hist(plans_diversity(plans))
}
\concept{analyze}
|
/man/plans_diversity.Rd
|
no_license
|
cran/redist
|
R
| false | true | 1,844 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/distances.R
\name{plans_diversity}
\alias{plans_diversity}
\title{Calculate the diversity of a set of plans}
\usage{
plans_diversity(
plans,
chains = 1,
n_max = 100,
ncores = 1,
total_pop = attr(plans, "prec_pop")
)
}
\arguments{
\item{plans}{a \code{\link{redist_plans}} object.}
\item{chains}{For plans objects with multiple chains, which ones to compute
diversity for. Defaults to the first. Specify "all" to use all chains.}
\item{n_max}{the maximum number of plans to sample in computing the
distances. Larger numbers will have less sampling error but will require
more computation time.}
\item{ncores}{the number of cores to use in computing the distances.}
\item{total_pop}{The vector of precinct populations. Used only if computing
variation of information. If not provided, equal population of precincts
will be assumed, i.e. the VI will be computed with respect to the precincts
themselves, and not the population.}
}
\value{
A numeric vector of off-diagonal variation of information distances.
}
\description{
Returns the off-diagonal elements of the variation of information distance
matrix for a sample of plans, which can be used as a diagnostic measure to
assess the diversity of a set of plans. While the exact scale varies depending
on the number of precincts and districts, generally diversity is good if most
of the values are greater than 0.5. Conversely, if there are many values
close to zero, then the sample has many similar plans and may not be a good
approximation to the target distribution.
}
\examples{
data(iowa)
ia <- redist_map(iowa, existing_plan = cd_2010, pop_tol = 0.01)
plans <- redist_smc(ia, 100, silent = TRUE)
hist(plans_diversity(plans))
}
\concept{analyze}
|
\name{HINoV.Symbolic}
\alias{HINoV.Symbolic}
\title{Modification of Carmone, Kara & Maxwell Heuristic Identification of Noisy Variables (HINoV) method for symbolic interval data}
\description{Modification of Heuristic Identification of Noisy Variables (HINoV) method for symbolic interval data}
\usage{
HINoV.Symbolic(x, u=NULL, distance="H", method = "pam",
Index = "cRAND")
}
\arguments{
\item{x}{symbolic interval data: a 3-dimensional table, first dimension represents object number, second dimension - variable number, and third dimension contains lower- and upper-bounds of intervals}
\item{u}{number of clusters}
\item{distance}{"M" - minimal distance between all vertices of hyper-cubes defined by symbolic interval variables; "H" - Hausdorff distance; "S" - sum of squares of distance between all vertices of hyper-cubes defined by symbolic interval variables }
\item{method}{clustering method: "single", "ward.D", "ward.D2", "complete", "average", "mcquitty", "median", "centroid", "pam" (default)}
\item{Index}{"cRAND" - corrected Rand index (default); "RAND" - Rand index}
}
\details{
See file \url{../doc/HINoVSymbolic_details.pdf} for further details
}
\value{
\item{parim}{\emph{m} x \emph{m} symmetric matrix (\emph{m} - number of variables). Matrix contains pairwise corrected Rand (Rand) indices for partitions formed by the \emph{j}-th variable with partitions formed by the \emph{l}-th variable}
\item{topri}{sum of rows of \code{parim}}
\item{stopri}{ranked values of \code{topri} in decreasing order}
}
\author{
Marek Walesiak \email{marek.walesiak@ue.wroc.pl}, Andrzej Dudek \email{andrzej.dudek@ue.wroc.pl}
Department of Econometrics and Computer Science, University of Economics, Wroclaw, Poland \url{http://keii.ue.wroc.pl/clusterSim/}
}
\references{
Carmone, F.J., Kara, A., Maxwell, S. (1999), \emph{HINoV: a new method to improve market segment definition by identifying noisy variables}, "Journal of Marketing Research", November, vol. 36, 501-509.
Hubert, L.J., Arabie, P. (1985), \emph{Comparing partitions}, "Journal of Classification", no. 1, 193-218. Available at: \doi{10.1007/BF01908075}.
Rand, W.M. (1971), \emph{Objective criteria for the evaluation of clustering methods}, "Journal of the American Statistical Association", no. 336, 846-850. Available at: \doi{10.1080/01621459.1971.10482356}.
Walesiak, M., Dudek, A. (2008), \emph{Identification of noisy variables for nonmetric and symbolic data in cluster analysis}, In: C. Preisach, H. Burkhardt, L. Schmidt-Thieme, R. Decker (Eds.), Data analysis, machine learning and applications, Springer-Verlag, Berlin, Heidelberg, 85-92. Available at: \url{http://keii.ue.wroc.pl/pracownicy/mw/2010_Walesiak_Dudek_Springer.PDF}
}
\seealso{
\code{\link{hclust}}, \code{\link{kmeans}}, \code{\link{cluster.Sim}}
}
\examples{
library(clusterSim)
data(data_symbolic)
r<- HINoV.Symbolic(data_symbolic, u=5)
print(r$stopri)
plot(r$stopri[,2], xlab="Variable number", ylab="topri",
xaxt="n", type="b")
axis(1,at=c(1:max(r$stopri[,1])),labels=r$stopri[,1])
#symbolic data from .csv file
#library(clusterSim)
#dsym<-as.matrix(read.csv2(file="csv/symbolic.csv"))
#dim(dsym)<-c(dim(dsym)[1],dim(dsym)[2]\%/\%2,2)
#r<- HINoV.Symbolic(dsym, u=5)
#print(r$stopri)
#plot(r$stopri[,2], xlab="Variable number", ylab="topri",
#xaxt="n", type="b")
#axis(1,at=c(1:max(r$stopri[,1])),labels=r$stopri[,1])
}
\keyword{cluster}
|
/man/HINoV.Symbolic.rd
|
no_license
|
cran/clusterSim
|
R
| false | false | 3,466 |
rd
|
\name{HINoV.Symbolic}
\alias{HINoV.Symbolic}
\title{Modification of Carmone, Kara & Maxwell Heuristic Identification of Noisy Variables (HINoV) method for symbolic interval data}
\description{Modification of Heuristic Identification of Noisy Variables (HINoV) method for symbolic interval data}
\usage{
HINoV.Symbolic(x, u=NULL, distance="H", method = "pam",
Index = "cRAND")
}
\arguments{
\item{x}{symbolic interval data: a 3-dimensional table, first dimension represents object number, second dimension - variable number, and third dimension contains lower- and upper-bounds of intervals}
\item{u}{number of clusters}
\item{distance}{"M" - minimal distance between all vertices of hyper-cubes defined by symbolic interval variables; "H" - Hausdorff distance; "S" - sum of squares of distance between all vertices of hyper-cubes defined by symbolic interval variables }
\item{method}{clustering method: "single", "ward.D", "ward.D2", "complete", "average", "mcquitty", "median", "centroid", "pam" (default)}
\item{Index}{"cRAND" - corrected Rand index (default); "RAND" - Rand index}
}
\details{
See file \url{../doc/HINoVSymbolic_details.pdf} for further details
}
\value{
\item{parim}{\emph{m} x \emph{m} symmetric matrix (\emph{m} - number of variables). Matrix contains pairwise corrected Rand (Rand) indices for partitions formed by the \emph{j}-th variable with partitions formed by the \emph{l}-th variable}
\item{topri}{sum of rows of \code{parim}}
\item{stopri}{ranked values of \code{topri} in decreasing order}
}
\author{
Marek Walesiak \email{marek.walesiak@ue.wroc.pl}, Andrzej Dudek \email{andrzej.dudek@ue.wroc.pl}
Department of Econometrics and Computer Science, University of Economics, Wroclaw, Poland \url{http://keii.ue.wroc.pl/clusterSim/}
}
\references{
Carmone, F.J., Kara, A., Maxwell, S. (1999), \emph{HINoV: a new method to improve market segment definition by identifying noisy variables}, "Journal of Marketing Research", November, vol. 36, 501-509.
Hubert, L.J., Arabie, P. (1985), \emph{Comparing partitions}, "Journal of Classification", no. 1, 193-218. Available at: \doi{10.1007/BF01908075}.
Rand, W.M. (1971), \emph{Objective criteria for the evaluation of clustering methods}, "Journal of the American Statistical Association", no. 336, 846-850. Available at: \doi{10.1080/01621459.1971.10482356}.
Walesiak, M., Dudek, A. (2008), \emph{Identification of noisy variables for nonmetric and symbolic data in cluster analysis}, In: C. Preisach, H. Burkhardt, L. Schmidt-Thieme, R. Decker (Eds.), Data analysis, machine learning and applications, Springer-Verlag, Berlin, Heidelberg, 85-92. Available at: \url{http://keii.ue.wroc.pl/pracownicy/mw/2010_Walesiak_Dudek_Springer.PDF}
}
\seealso{
\code{\link{hclust}}, \code{\link{kmeans}}, \code{\link{cluster.Sim}}
}
\examples{
library(clusterSim)
data(data_symbolic)
r<- HINoV.Symbolic(data_symbolic, u=5)
print(r$stopri)
plot(r$stopri[,2], xlab="Variable number", ylab="topri",
xaxt="n", type="b")
axis(1,at=c(1:max(r$stopri[,1])),labels=r$stopri[,1])
#symbolic data from .csv file
#library(clusterSim)
#dsym<-as.matrix(read.csv2(file="csv/symbolic.csv"))
#dim(dsym)<-c(dim(dsym)[1],dim(dsym)[2]\%/\%2,2)
#r<- HINoV.Symbolic(dsym, u=5)
#print(r$stopri)
#plot(r$stopri[,2], xlab="Variable number", ylab="topri",
#xaxt="n", type="b")
#axis(1,at=c(1:max(r$stopri[,1])),labels=r$stopri[,1])
}
\keyword{cluster}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/simaerep.R
\name{prob_lower_site_ae_vs_study_ae}
\alias{prob_lower_site_ae_vs_study_ae}
\title{calculate bootstrapped probability for obtaining a lower mean AE}
\usage{
prob_lower_site_ae_vs_study_ae(site_ae, study_ae, r = 1000, parallel = F)
}
\arguments{
\item{site_ae}{vector with AE numbers}
\item{study_ae}{vector with AE numbers}
\item{r}{integer, denotes number of simulations, default = 1000}
\item{parallel}{logical, toggles parallel processing on and of, default = F}
}
\value{
pval
}
\description{
helper function used by \code{\link[=sim_sites]{sim_sites()}}
}
\details{
sets pvalue=1 if mean AE site is greater than mean AE study
}
\examples{
prob_lower_site_ae_vs_study_ae(
site_ae = c(5, 3, 3, 2, 1, 6),
study_ae = c(9, 8, 7, 9, 6, 7, 8),
parallel = FALSE
)
}
\seealso{
\code{\link[purrr]{safely}}
}
|
/man/prob_lower_site_ae_vs_study_ae.Rd
|
permissive
|
minghao2016/simaerep
|
R
| false | true | 902 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/simaerep.R
\name{prob_lower_site_ae_vs_study_ae}
\alias{prob_lower_site_ae_vs_study_ae}
\title{calculate bootstrapped probability for obtaining a lower mean AE}
\usage{
prob_lower_site_ae_vs_study_ae(site_ae, study_ae, r = 1000, parallel = F)
}
\arguments{
\item{site_ae}{vector with AE numbers}
\item{study_ae}{vector with AE numbers}
\item{r}{integer, denotes number of simulations, default = 1000}
\item{parallel}{logical, toggles parallel processing on and of, default = F}
}
\value{
pval
}
\description{
helper function used by \code{\link[=sim_sites]{sim_sites()}}
}
\details{
sets pvalue=1 if mean AE site is greater than mean AE study
}
\examples{
prob_lower_site_ae_vs_study_ae(
site_ae = c(5, 3, 3, 2, 1, 6),
study_ae = c(9, 8, 7, 9, 6, 7, 8),
parallel = FALSE
)
}
\seealso{
\code{\link[purrr]{safely}}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/search_safe.R
\name{search_safe}
\alias{search_safe}
\alias{search_dates}
\alias{search_fields}
\alias{search_authors}
\alias{search_taxa}
\alias{search_text}
\alias{search_spatial}
\title{SAFE dataset search functions.}
\usage{
search_dates(dates, match_type = "intersect", most_recent = FALSE, ids = NULL)
search_fields(
field_text = NULL,
field_type = NULL,
ids = NULL,
most_recent = FALSE
)
search_authors(author, ids = NULL, most_recent = FALSE)
search_taxa(
taxon_name = NULL,
taxon_rank = NULL,
taxon_id = NULL,
taxon_auth = NULL,
ids = NULL,
most_recent = FALSE
)
search_text(text, ids = NULL, most_recent = FALSE)
search_spatial(
wkt = NULL,
location = NULL,
distance = NULL,
ids = NULL,
most_recent = FALSE
)
}
\arguments{
\item{dates}{A vector of length 1 or 2, containing either ISO format date
character strings ("yyyy-mm-dd") or \code{POSIXt} dates.}
\item{match_type}{A character string (see Details).}
\item{most_recent}{Logical indicating whether to restrict the API to
returning only the most recent versions of the datasets found. By default
all versions of matching dataset concepts are returned.}
\item{ids}{A set of SAFE dataset record IDs to restrict a search. This will
typically be a \code{\link{safe_record_set}} object returned by another
search but can also be a vector of record ids in any of the formats
accepted by \code{\link{validate_record_ids}}.}
\item{field_text}{Text to search for within the data worksheet field name
and description.}
\item{field_type}{A data worksheet field type (see Links).}
\item{author}{A character string used to search for datasets by author
full (or partial) names.}
\item{taxon_name}{The scientific name of a taxon to search for.}
\item{taxon_rank}{A taxonomic rank to search for.}
\item{taxon_id}{A numeric taxon ID number from GBIF or NCBI.}
\item{taxon_auth}{One of GBIF or NCBI. If not specified, results will match
against taxa validated against either taxonomy database.}
\item{text}{Character string to look for within a SAFE dataset, worksheet,
title, field description, and dataset keywords.}
\item{wkt}{A well-known text geometry string, assumed to use latitude and
longitude in WGS84 (EPSG:4326).}
\item{location}{The name of a location in the SAFE gazetteer.}
\item{distance}{A buffer distance for spatial searches, giving the distance
in metres within which to match either location or wkt searches.}
}
\value{
An object of class \code{\link{safe_record_set}} of datasets that
match the query.
}
\description{
In addition to the datasets stored on Zenodo, the SAFE Project website
provides an API to search dataset metadata in more depth. The search
functions access this API and return \code{\link{safe_record_set}} objects
identifying datasets that match a particular query.
}
\details{
The API provides endpoints to search datasets by date extents, data
worksheet fields, authors, taxa, free text and by spatial query. All
of the functions accept the argument \code{most_recent}, which restricts
the returned datasets to the most recent versions of each matching dataset
concept. The functions can also be passed an existing
\code{\link{safe_record_set}} object to search within the results
of a previous search.
The \code{match_type} parameter specifies how to match date ranges and must
be one of "intersect" (default), "contain", or "within". The "contain" option
returns datasets that span a date range, "within" returns datasets that
fall within the given range and "intersect" selects datasets that overlap any
part of the date range. Note that match_type is ignored when only a single
date is provided.
}
\section{Functions}{
\itemize{
\item \code{search_dates}: Search datasets by date extent
\item \code{search_fields}: Search data worksheet field metadata.
\item \code{search_authors}: Search by dataset author
\item \code{search_taxa}: Search by taxon name, rank or taxon ID.
\item \code{search_text}: Search dataset, worksheet and field titles
and descriptions
\item \code{search_spatial}: Search by spatial sampling area/named location.
}}
\section{Spatial searches}{
For spatial searches, users can select a location name from a SAFE
data gazetteer (see e.g. \url{https://www.safeproject.net/info/gazetteer}
or \code{\link{load_gazetteer}}) or provide a WKT geometry. The sampling
locations provided in each SAFE dataset are tested to see if they intersect
the search geometry.
A buffer \code{distance} can aso be provided to extend the search around the
query geometry. Note that although WKT geometries should be provided
using WGS84 lat/long coordinates, since this is typical field GPS data,
distances must be provided as metres and all proximity calculations take
place in the UTM50N projected coordinate system.
The \code{search_spatial} function will not retrieve datasets that have not
provided sampling locations or use newly defined locations that are missing
coordinate information.
}
\section{Links}{
\describe{
\item{SAFE data API}{e.g. \url{https://www.safeproject.net/api}}
\item{Worksheet field types}{\url{https://safedata-validator.readthedocs.io/en/latest/data_format/data.html#field-types}}
\item{SAFE gazetteer}{See \code{\link{load_gazetteer}} and e.g.
\url{https://www.safeproject.net/info/gazetteer}}
\item{WKT}{\url{https://en.wikipedia.org/wiki/Well-known_text_representation_of_geometry}}
}
}
\examples{
\donttest{
search_dates("2014-06-12")
search_dates(as.POSIXct(c("2014-06-12", "2015-06-11")))
search_dates(c("2014-06-12", "2015-06-11"), match_type = "contain")
search_fields(field_text = "temperature")
search_fields(field_type = "numeric")
search_fields(field_text = "temperature", field_type = "numeric")
search_authors("Ewers")
search_taxa(taxon_name = "Formicidae")
search_taxa(taxon_id = 4342, taxon_auth = "GBIF")
search_taxa(taxon_rank = "family")
search_text("forest")
search_text("ant")
search_spatial(wkt = "Point(116.5 4.75)")
search_spatial(wkt = "Point(116.5 4.75)", distance = 100000)
search_spatial(wkt = "Polygon((110 0, 110 10,120 10,120 0,110 0))")
search_spatial(location = "A_1")
search_spatial(location = "A_1", distance = 2500)
# combining searches using logical operators
fish <- search_taxa("Actinopterygii")
odonates <- search_taxa("Odonata")
ewers <- search_authors("Ewers")
aquatic <- fish | odonates
aquatic_ewers <- aquatic & ewers
all_in_one <- (fish | odonates) & ewers
}
}
|
/man/search_safe.Rd
|
no_license
|
ImperialCollegeLondon/safedata
|
R
| false | true | 6,498 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/search_safe.R
\name{search_safe}
\alias{search_safe}
\alias{search_dates}
\alias{search_fields}
\alias{search_authors}
\alias{search_taxa}
\alias{search_text}
\alias{search_spatial}
\title{SAFE dataset search functions.}
\usage{
search_dates(dates, match_type = "intersect", most_recent = FALSE, ids = NULL)
search_fields(
field_text = NULL,
field_type = NULL,
ids = NULL,
most_recent = FALSE
)
search_authors(author, ids = NULL, most_recent = FALSE)
search_taxa(
taxon_name = NULL,
taxon_rank = NULL,
taxon_id = NULL,
taxon_auth = NULL,
ids = NULL,
most_recent = FALSE
)
search_text(text, ids = NULL, most_recent = FALSE)
search_spatial(
wkt = NULL,
location = NULL,
distance = NULL,
ids = NULL,
most_recent = FALSE
)
}
\arguments{
\item{dates}{A vector of length 1 or 2, containing either ISO format date
character strings ("yyyy-mm-dd") or \code{POSIXt} dates.}
\item{match_type}{A character string (see Details).}
\item{most_recent}{Logical indicating whether to restrict the API to
returning only the most recent versions of the datasets found. By default
all versions of matching dataset concepts are returned.}
\item{ids}{A set of SAFE dataset record IDs to restrict a search. This will
typically be a \code{\link{safe_record_set}} object returned by another
search but can also be a vector of record ids in any of the formats
accepted by \code{\link{validate_record_ids}}.}
\item{field_text}{Text to search for within the data worksheet field name
and description.}
\item{field_type}{A data worksheet field type (see Links).}
\item{author}{A character string used to search for datasets by author
full (or partial) names.}
\item{taxon_name}{The scientific name of a taxon to search for.}
\item{taxon_rank}{A taxonomic rank to search for.}
\item{taxon_id}{A numeric taxon ID number from GBIF or NCBI.}
\item{taxon_auth}{One of GBIF or NCBI. If not specified, results will match
against taxa validated against either taxonomy database.}
\item{text}{Character string to look for within a SAFE dataset, worksheet,
title, field description, and dataset keywords.}
\item{wkt}{A well-known text geometry string, assumed to use latitude and
longitude in WGS84 (EPSG:4326).}
\item{location}{The name of a location in the SAFE gazetteer.}
\item{distance}{A buffer distance for spatial searches, giving the distance
in metres within which to match either location or wkt searches.}
}
\value{
An object of class \code{\link{safe_record_set}} of datasets that
match the query.
}
\description{
In addition to the datasets stored on Zenodo, the SAFE Project website
provides an API to search dataset metadata in more depth. The search
functions access this API and return \code{\link{safe_record_set}} objects
identifying datasets that match a particular query.
}
\details{
The API provides endpoints to search datasets by date extents, data
worksheet fields, authors, taxa, free text and by spatial query. All
of the functions accept the argument \code{most_recent}, which restricts
the returned datasets to the most recent versions of each matching dataset
concept. The functions can also be passed an existing
\code{\link{safe_record_set}} object to search within the results
of a previous search.
The \code{match_type} parameter specifies how to match date ranges and must
be one of "intersect" (default), "contain", or "within". The "contain" option
returns datasets that span a date range, "within" returns datasets that
fall within the given range and "intersect" selects datasets that overlap any
part of the date range. Note that match_type is ignored when only a single
date is provided.
}
\section{Functions}{
\itemize{
\item \code{search_dates}: Search datasets by date extent
\item \code{search_fields}: Search data worksheet field metadata.
\item \code{search_authors}: Search by dataset author
\item \code{search_taxa}: Search by taxon name, rank or taxon ID.
\item \code{search_text}: Search dataset, worksheet and field titles
and descriptions
\item \code{search_spatial}: Search by spatial sampling area/named location.
}}
\section{Spatial searches}{
For spatial searches, users can select a location name from a SAFE
data gazetteer (see e.g. \url{https://www.safeproject.net/info/gazetteer}
or \code{\link{load_gazetteer}}) or provide a WKT geometry. The sampling
locations provided in each SAFE dataset are tested to see if they intersect
the search geometry.
A buffer \code{distance} can aso be provided to extend the search around the
query geometry. Note that although WKT geometries should be provided
using WGS84 lat/long coordinates, since this is typical field GPS data,
distances must be provided as metres and all proximity calculations take
place in the UTM50N projected coordinate system.
The \code{search_spatial} function will not retrieve datasets that have not
provided sampling locations or use newly defined locations that are missing
coordinate information.
}
\section{Links}{
\describe{
\item{SAFE data API}{e.g. \url{https://www.safeproject.net/api}}
\item{Worksheet field types}{\url{https://safedata-validator.readthedocs.io/en/latest/data_format/data.html#field-types}}
\item{SAFE gazetteer}{See \code{\link{load_gazetteer}} and e.g.
\url{https://www.safeproject.net/info/gazetteer}}
\item{WKT}{\url{https://en.wikipedia.org/wiki/Well-known_text_representation_of_geometry}}
}
}
\examples{
\donttest{
search_dates("2014-06-12")
search_dates(as.POSIXct(c("2014-06-12", "2015-06-11")))
search_dates(c("2014-06-12", "2015-06-11"), match_type = "contain")
search_fields(field_text = "temperature")
search_fields(field_type = "numeric")
search_fields(field_text = "temperature", field_type = "numeric")
search_authors("Ewers")
search_taxa(taxon_name = "Formicidae")
search_taxa(taxon_id = 4342, taxon_auth = "GBIF")
search_taxa(taxon_rank = "family")
search_text("forest")
search_text("ant")
search_spatial(wkt = "Point(116.5 4.75)")
search_spatial(wkt = "Point(116.5 4.75)", distance = 100000)
search_spatial(wkt = "Polygon((110 0, 110 10,120 10,120 0,110 0))")
search_spatial(location = "A_1")
search_spatial(location = "A_1", distance = 2500)
# combining searches using logical operators
fish <- search_taxa("Actinopterygii")
odonates <- search_taxa("Odonata")
ewers <- search_authors("Ewers")
aquatic <- fish | odonates
aquatic_ewers <- aquatic & ewers
all_in_one <- (fish | odonates) & ewers
}
}
|
\name{epi2newick}
\Rdversion{2.10}
\alias{epi2newick}
\alias{epi2newickmcmc}
\title{Prints a transmission tree in Newick format.}
\description{Prints a simulated or inferred transmission tree in Newick format.}
\usage{epi2newick(epi)
epi2newickmcmc(mcmcoutput, index = dim(mcmcoutput$transtree)[2])}
\arguments{
\item{epi}{a simulated epidemic, in the form of the
output produced by \code{\link{SEIR.simulator}}.}
\item{mcmcoutput}{output from \code{\link{epinet}}.}
\item{index}{a number indicating which of the MCMC
samples to plot. Defaults to the final sample in the chain.}
}
\details{
Converts the epinet epidemic format into a transmssion tree represented as a Newick string which is the standard tree format used in phylogenetics. There are many packages available to analyse Newick format trees such as the ape package, IcyTree and FigTree.
}
\value{A character string representing the epidemic transmission tree in Newick format. Note that this string contains control characters that can be removed by using \code{\link{cat}}}
\seealso{\code{\link{epinet}} for generating posterior samples of the parameters,
\code{\link{print.epinet}} and \code{\link{summary.epinet}} for printing basic
summary information about an epinet object, \code{\link{write.epinet}} for
writing parameter and transmission tree posterior samples to file, and
\code{\link{plot.epinet}} for plotting the posterior samples of the transmission tree.}
\references{
Rambaut A. 2014. FigTree v1.4. \url{http://tree.bio.ed.ac.uk/software/figtree/}.
Vaughan T. 2015. IcyTree \url{https://icytree.org}.
}
\author{ David Welch \email{david.welch@auckland.ac.nz},
Chris Groendyke \email{cgroendyke@gmail.com} }
\examples{
# Simulate an epidemic through a network of 30
set.seed(3)
N <- 30
# Build dyadic covariate matrix (X)
# Have a single covariate for overall edge density; this is the Erdos-Renyi model
nodecov <- matrix(1:N, nrow = N)
dcm <- BuildX(nodecov)
# Simulate network and then simulate epidemic over network
examplenet <- SimulateDyadicLinearERGM(N, dyadiccovmat = dcm, eta = -1.8)
exampleepidemic <- SEIR.simulator(examplenet, N = 30,
beta = 0.3, ki = 2, thetai = 5, latencydist="gamma")
cat(epi2newick(exampleepidemic))
\dontrun{
# Build covariates
set.seed(1)
N <- 50
mycov <- data.frame(id = 1:N, xpos = runif(N), ypos = runif(N))
dyadCov <- BuildX(mycov,binaryCol = list(c(2, 3)),binaryFunc = c("euclidean"))
# Build network
eta <- c(0, -7)
net <- SimulateDyadicLinearERGM(N = N,dyadiccovmat = dyadCov,eta = eta)
# Simulate epidemic
epi <- SEIR.simulator(M=net,N=N,beta=1,ki=3,thetai=7,ke=3,latencydist="gamma")
# Run MCMC routine on simulated epidemic
mcmcinput <- MCMCcontrol(nsamp = 1000000, thinning = 100, etapropsd = c(1, 1))
priors <- priorcontrol(bprior = c(0, 4), tiprior = c(1, 15), teprior = c(1, 15),
etaprior = c(0, 10, 0, 10), kiprior = c(1, 7), keprior = c(1, 7), priordists = "uniform")
out <- epinet(~ xpos.ypos.L2Dist, epidata = epi, dyadiccovmat = dyadCov,
mcmcinput = mcmcinput, priors = priors)
cat(epi2newickmcmc(out))}
}
\keyword{graphs}
|
/man/epi2newick.Rd
|
no_license
|
cran/epinet
|
R
| false | false | 3,102 |
rd
|
\name{epi2newick}
\Rdversion{2.10}
\alias{epi2newick}
\alias{epi2newickmcmc}
\title{Prints a transmission tree in Newick format.}
\description{Prints a simulated or inferred transmission tree in Newick format.}
\usage{epi2newick(epi)
epi2newickmcmc(mcmcoutput, index = dim(mcmcoutput$transtree)[2])}
\arguments{
\item{epi}{a simulated epidemic, in the form of the
output produced by \code{\link{SEIR.simulator}}.}
\item{mcmcoutput}{output from \code{\link{epinet}}.}
\item{index}{a number indicating which of the MCMC
samples to plot. Defaults to the final sample in the chain.}
}
\details{
Converts the epinet epidemic format into a transmssion tree represented as a Newick string which is the standard tree format used in phylogenetics. There are many packages available to analyse Newick format trees such as the ape package, IcyTree and FigTree.
}
\value{A character string representing the epidemic transmission tree in Newick format. Note that this string contains control characters that can be removed by using \code{\link{cat}}}
\seealso{\code{\link{epinet}} for generating posterior samples of the parameters,
\code{\link{print.epinet}} and \code{\link{summary.epinet}} for printing basic
summary information about an epinet object, \code{\link{write.epinet}} for
writing parameter and transmission tree posterior samples to file, and
\code{\link{plot.epinet}} for plotting the posterior samples of the transmission tree.}
\references{
Rambaut A. 2014. FigTree v1.4. \url{http://tree.bio.ed.ac.uk/software/figtree/}.
Vaughan T. 2015. IcyTree \url{https://icytree.org}.
}
\author{ David Welch \email{david.welch@auckland.ac.nz},
Chris Groendyke \email{cgroendyke@gmail.com} }
\examples{
# Simulate an epidemic through a network of 30
set.seed(3)
N <- 30
# Build dyadic covariate matrix (X)
# Have a single covariate for overall edge density; this is the Erdos-Renyi model
nodecov <- matrix(1:N, nrow = N)
dcm <- BuildX(nodecov)
# Simulate network and then simulate epidemic over network
examplenet <- SimulateDyadicLinearERGM(N, dyadiccovmat = dcm, eta = -1.8)
exampleepidemic <- SEIR.simulator(examplenet, N = 30,
beta = 0.3, ki = 2, thetai = 5, latencydist="gamma")
cat(epi2newick(exampleepidemic))
\dontrun{
# Build covariates
set.seed(1)
N <- 50
mycov <- data.frame(id = 1:N, xpos = runif(N), ypos = runif(N))
dyadCov <- BuildX(mycov,binaryCol = list(c(2, 3)),binaryFunc = c("euclidean"))
# Build network
eta <- c(0, -7)
net <- SimulateDyadicLinearERGM(N = N,dyadiccovmat = dyadCov,eta = eta)
# Simulate epidemic
epi <- SEIR.simulator(M=net,N=N,beta=1,ki=3,thetai=7,ke=3,latencydist="gamma")
# Run MCMC routine on simulated epidemic
mcmcinput <- MCMCcontrol(nsamp = 1000000, thinning = 100, etapropsd = c(1, 1))
priors <- priorcontrol(bprior = c(0, 4), tiprior = c(1, 15), teprior = c(1, 15),
etaprior = c(0, 10, 0, 10), kiprior = c(1, 7), keprior = c(1, 7), priordists = "uniform")
out <- epinet(~ xpos.ypos.L2Dist, epidata = epi, dyadiccovmat = dyadCov,
mcmcinput = mcmcinput, priors = priors)
cat(epi2newickmcmc(out))}
}
\keyword{graphs}
|
################################################################
# LAST REVISION: 2014-12-19
# AUTHOR: JIPATSAA
# GOAL: Obtain for each Subject-Activity pair the mean value of each feature containing *(M/m)ean* or *(S/s)td*
# PRE: 1) The library data.table must be installed.
# 2) Internet conection is required since the data will be downloaded form the web
# The data contains the movement captures (in the X Y and Z dimentions) of 30 Subjects and
# the corresponding activity associated to those movements
# The data will be downloaded from "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
# and unzip-ed and save in a NEW directory called ./data
# POST: The result has been saved into the file called "MeanAndStdValuesForEachSubject-Activity")
#
################################################################
library(data.table)
source("downloadFromURLAndUnzip.R") #function to create ./data dir and download and unzip the data
source("readTable.R") #function to read a file in the ./data/UCI HAR Dataset and dump it into a table
actualPath<-getwd()
#############################
# 1 Step: DOWNLOAD THE DATA
#############################
# Downlod the data from the https://d396qusza40orc.cloudfront.net website
# the file getdata_projectfiles_UCI HAR Dataset.zip will be download and unziped
# The information in this webpage was originally obtained from: http://archive.ics.uci.edu/ml/machine-learning-databases/00240/
# so if the previous webpage is down data could also be downloaded from
# fileUrl <-"http://archive.ics.uci.edu/ml/machine-learning-databases/00240/UCI%20HAR%20Dataset.zip"
fileUrl <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
dataDir <- paste(actualPath,"data",sep="/")
dirDataSet<-paste(dataDir,"UCI\ HAR\ Dataset",sep="/")#once it will be downloaded it will create this directory
fileNameZIP <- "HARdata.zip"
filePathZIP <- paste(dataDir,fileNameZIP,sep="/")
#downloadFromURLAndUnzip(fileUrl,workdirPath=dataDir,fileName=fileNameZIP)
# if there is any problem downloading from https://d396qusza40orc.cloudfront.net webpage try the original
if(!file.exists(filePathZIP))
{
fileUrl <-"http://archive.ics.uci.edu/ml/machine-learning-databases/00240/UCI%20HAR%20Dataset.zip"
downloadFromURLAndUnzip(fileUrl,workdirPath=dataDir,fileName=fileNameZIP)
}
###############################################
# 2 Step: OBTAIN GENERAL INFORMATION ABOUT THE ACTIVITY AND FEATURE CODES
###############################################
# OBTAIN ACTIVITY CODES AND DESCRIPTORS
# (6 activities in total)
# (activity_labels.txt) file containing the activity codes and descriptors i.e. 1 WALKING
activity<-readTable(dirDataSet,"activity_labels.txt")
actList<-as.character(activity$V2) #getting the Strings corresponding to the codes
# OBTAIN MOVEMENT FEATURES CODES AND DESCRIPTORS
# (561 features in total)
# (features.txt) file containing the descriptors of the movement features i.e. 1 tBodyAcc-mean()-X
features<-readTable(dirDataSet,"features.txt")
features<-features$V2 #we just want the 2nd column
#OBTAIN TEST+TRAIN
# (X_test.txt eta X_train.txt)
# TEST: dim=number of observations: 2947, number of processed movement features: 561
# TRAIN: dim=number of observations: 7351, number of processed movement features: 561
############################
trainProcessedFeatures<-readTable(dirDataSet,"train/X_train.txt")
testProcessedFeatures<-readTable(dirDataSet,"test/X_test.txt")
############################
# 3 Step: MERGE TRAIN+TEST
############################
corpProcessedFeatures<-rbind(trainProcessedFeatures,testProcessedFeatures) #Merge Test+Train obtaining the whole corpus (dataSet)
############################
# 4 Step: ADD THE NAMES OF THE FEATURES
############################
names(corpProcessedFeatures)<-as.character(features) #adding the names of the features to each column values
# From all the features we want to process ONLY the ones refering to (M/m)ean and (S/s)tandard deviation features
############################
# 5 Step: FILTER MEANS AND STD DEVIATIONS
############################
# get the indexes of the columns containing the words (M/m)ean or (S/s)td
meanInd<-grep("mean",names(corpProcessedFeatures),ignore.case=TRUE)
stdInd<-grep("std",names(corpProcessedFeatures),ignore.case=TRUE)
meanStdInd<-c(meanInd,stdInd)
meanStdInd<-sort(meanStdInd)
# filter those indx to obtain only the (M/m)ean and (S/s)td columns
corpMeanStdProcFeat<-subset(corpProcessedFeatures,select=meanStdInd) #equivalent corpProcessedFeatures[meanStdInd]
#################################
# 6 Step: OBTAIN INF. ABOUT THE SUBJECTS
#################################
# (30 different subjects)
# (subject_train.txt) file contains inf. about 21 subjects: 1 3 5 6 7 8 11 14 15 16 17 19 21 22 23 25 26 27 28 29 30
# (subject_test.txt) file contains inf. about 9 subjects: 2 4 9 10 12 13 18 20 24
#################################
trainfitx<-paste(dirDataSet,"train/subject_train.txt",sep="/")
trainSubjects<-read.table(trainfitx)
testfitx<-paste(dirDataSet,"test/subject_test.txt",sep="/")
testSubjects<-read.table(testfitx)
corpSubjs<-rbind(trainSubjects,testSubjects) #bind subjects inf. from test+train
#################################
# 7 Step: OBTAIN CLASS INFORMATION (the class to guess)
#################################
# (Y_test.txt) file contains inf. about the class to guess in the clustering task
#################################
trainfitx<-paste(dirDataSet,"train/Y_train.txt",sep="/")
trainActivityCode<-read.table(trainfitx)
testfitx<-paste(dirDataSet,"test/Y_test.txt",sep="/")
testActivityCode<-read.table(testfitx)
trainActivityDescriptor<-apply(trainActivityCode,1,function(x) as.character(actList[x]))
testActivityDescriptor<-apply(testActivityCode,1,function(x) as.character(actList[x]))
corpActivityDescriptor<-c(trainActivityDescriptor,testActivityDescriptor)
###############################
# 8 Step: MERGE SUBJ+FEATURES+ACTIVITY INF.
###############################
# Merge the information about the subjects with their corresponding processed movement features and the activity associated to the subject in that moment
corpProcFeatActCode<-cbind(corpSubjs,corpMeanStdProcFeat,corpActivityDescriptor)
names(corpProcFeatActCode)<-c("subject",as.character(names(corpMeanStdProcFeat)),"activity")
#remove all auxiliary variables
rm(features,trainSubjects,testSubjects,trainProcessedFeatures,testProcessedFeatures,trainActivityCode,testActivityCode,corpProcessedFeatures,corpActivityDescriptor,corpSubjs,meanInd,stdInd,meanStdInd)
################################
# 9 Step: PROCESS THE DATA TO OBTAIN THE MEAN OF EACH FEAT. BY SUBJECT-ACTIVITY PAIR:
# 1. Organize by Subject and Activity.
# 2. Calculate the mean of each feature for each subject-activity pair
# 3. name the calculated features
# 4. order the results by subject and activity
################################
DT<-data.table(corpProcFeatActCode)#convert the frame into a table to split it by subject and activity and apply the mean to each column
res<-DT[, lapply(.SD,mean), by=list(subject,activity)]#.SD stands for subset data and it is used to apply the mean to every column of the table
resNames<-names(res)#obtain the original names
aux<-lapply(resNames[3:length(resNames)],function(x){paste("MEAN-OVER-",x,sep="")})#CREATE NEW FEATURE NAMES (MEAN-OVER-feature)
resNames<-c("subject","activity",aux)
setnames(res,names(res),as.character(resNames))#substitute all features names by new ones
res[order(subject,activity)]#order de result by subject and activity
################################
# 10 Step: WRITE THE DATA IN A TXT FILE
###############################
write.table(res, file ="MeanAndStdValuesForEachSubject-Activity.txt",row.names=FALSE,sep=" ") #write the result as tidy data
|
/run_analysis.R
|
no_license
|
jipatsaa/datasciencecoursera
|
R
| false | false | 7,895 |
r
|
################################################################
# LAST REVISION: 2014-12-19
# AUTHOR: JIPATSAA
# GOAL: Obtain for each Subject-Activity pair the mean value of each feature containing *(M/m)ean* or *(S/s)td*
# PRE: 1) The library data.table must be installed.
# 2) Internet conection is required since the data will be downloaded form the web
# The data contains the movement captures (in the X Y and Z dimentions) of 30 Subjects and
# the corresponding activity associated to those movements
# The data will be downloaded from "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
# and unzip-ed and save in a NEW directory called ./data
# POST: The result has been saved into the file called "MeanAndStdValuesForEachSubject-Activity")
#
################################################################
library(data.table)
source("downloadFromURLAndUnzip.R") #function to create ./data dir and download and unzip the data
source("readTable.R") #function to read a file in the ./data/UCI HAR Dataset and dump it into a table
actualPath<-getwd()
#############################
# 1 Step: DOWNLOAD THE DATA
#############################
# Downlod the data from the https://d396qusza40orc.cloudfront.net website
# the file getdata_projectfiles_UCI HAR Dataset.zip will be download and unziped
# The information in this webpage was originally obtained from: http://archive.ics.uci.edu/ml/machine-learning-databases/00240/
# so if the previous webpage is down data could also be downloaded from
# fileUrl <-"http://archive.ics.uci.edu/ml/machine-learning-databases/00240/UCI%20HAR%20Dataset.zip"
fileUrl <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
dataDir <- paste(actualPath,"data",sep="/")
dirDataSet<-paste(dataDir,"UCI\ HAR\ Dataset",sep="/")#once it will be downloaded it will create this directory
fileNameZIP <- "HARdata.zip"
filePathZIP <- paste(dataDir,fileNameZIP,sep="/")
#downloadFromURLAndUnzip(fileUrl,workdirPath=dataDir,fileName=fileNameZIP)
# if there is any problem downloading from https://d396qusza40orc.cloudfront.net webpage try the original
if(!file.exists(filePathZIP))
{
fileUrl <-"http://archive.ics.uci.edu/ml/machine-learning-databases/00240/UCI%20HAR%20Dataset.zip"
downloadFromURLAndUnzip(fileUrl,workdirPath=dataDir,fileName=fileNameZIP)
}
###############################################
# 2 Step: OBTAIN GENERAL INFORMATION ABOUT THE ACTIVITY AND FEATURE CODES
###############################################
# OBTAIN ACTIVITY CODES AND DESCRIPTORS
# (6 activities in total)
# (activity_labels.txt) file containing the activity codes and descriptors i.e. 1 WALKING
activity<-readTable(dirDataSet,"activity_labels.txt")
actList<-as.character(activity$V2) #getting the Strings corresponding to the codes
# OBTAIN MOVEMENT FEATURES CODES AND DESCRIPTORS
# (561 features in total)
# (features.txt) file containing the descriptors of the movement features i.e. 1 tBodyAcc-mean()-X
features<-readTable(dirDataSet,"features.txt")
features<-features$V2 #we just want the 2nd column
#OBTAIN TEST+TRAIN
# (X_test.txt eta X_train.txt)
# TEST: dim=number of observations: 2947, number of processed movement features: 561
# TRAIN: dim=number of observations: 7351, number of processed movement features: 561
############################
trainProcessedFeatures<-readTable(dirDataSet,"train/X_train.txt")
testProcessedFeatures<-readTable(dirDataSet,"test/X_test.txt")
############################
# 3 Step: MERGE TRAIN+TEST
############################
corpProcessedFeatures<-rbind(trainProcessedFeatures,testProcessedFeatures) #Merge Test+Train obtaining the whole corpus (dataSet)
############################
# 4 Step: ADD THE NAMES OF THE FEATURES
############################
names(corpProcessedFeatures)<-as.character(features) #adding the names of the features to each column values
# From all the features we want to process ONLY the ones refering to (M/m)ean and (S/s)tandard deviation features
############################
# 5 Step: FILTER MEANS AND STD DEVIATIONS
############################
# get the indexes of the columns containing the words (M/m)ean or (S/s)td
meanInd<-grep("mean",names(corpProcessedFeatures),ignore.case=TRUE)
stdInd<-grep("std",names(corpProcessedFeatures),ignore.case=TRUE)
meanStdInd<-c(meanInd,stdInd)
meanStdInd<-sort(meanStdInd)
# filter those indx to obtain only the (M/m)ean and (S/s)td columns
corpMeanStdProcFeat<-subset(corpProcessedFeatures,select=meanStdInd) #equivalent corpProcessedFeatures[meanStdInd]
#################################
# 6 Step: OBTAIN INF. ABOUT THE SUBJECTS
#################################
# (30 different subjects)
# (subject_train.txt) file contains inf. about 21 subjects: 1 3 5 6 7 8 11 14 15 16 17 19 21 22 23 25 26 27 28 29 30
# (subject_test.txt) file contains inf. about 9 subjects: 2 4 9 10 12 13 18 20 24
#################################
trainfitx<-paste(dirDataSet,"train/subject_train.txt",sep="/")
trainSubjects<-read.table(trainfitx)
testfitx<-paste(dirDataSet,"test/subject_test.txt",sep="/")
testSubjects<-read.table(testfitx)
corpSubjs<-rbind(trainSubjects,testSubjects) #bind subjects inf. from test+train
#################################
# 7 Step: OBTAIN CLASS INFORMATION (the class to guess)
#################################
# (Y_test.txt) file contains inf. about the class to guess in the clustering task
#################################
trainfitx<-paste(dirDataSet,"train/Y_train.txt",sep="/")
trainActivityCode<-read.table(trainfitx)
testfitx<-paste(dirDataSet,"test/Y_test.txt",sep="/")
testActivityCode<-read.table(testfitx)
trainActivityDescriptor<-apply(trainActivityCode,1,function(x) as.character(actList[x]))
testActivityDescriptor<-apply(testActivityCode,1,function(x) as.character(actList[x]))
corpActivityDescriptor<-c(trainActivityDescriptor,testActivityDescriptor)
###############################
# 8 Step: MERGE SUBJ+FEATURES+ACTIVITY INF.
###############################
# Merge the information about the subjects with their corresponding processed movement features and the activity associated to the subject in that moment
corpProcFeatActCode<-cbind(corpSubjs,corpMeanStdProcFeat,corpActivityDescriptor)
names(corpProcFeatActCode)<-c("subject",as.character(names(corpMeanStdProcFeat)),"activity")
#remove all auxiliary variables
rm(features,trainSubjects,testSubjects,trainProcessedFeatures,testProcessedFeatures,trainActivityCode,testActivityCode,corpProcessedFeatures,corpActivityDescriptor,corpSubjs,meanInd,stdInd,meanStdInd)
################################
# 9 Step: PROCESS THE DATA TO OBTAIN THE MEAN OF EACH FEAT. BY SUBJECT-ACTIVITY PAIR:
# 1. Organize by Subject and Activity.
# 2. Calculate the mean of each feature for each subject-activity pair
# 3. name the calculated features
# 4. order the results by subject and activity
################################
DT<-data.table(corpProcFeatActCode)#convert the frame into a table to split it by subject and activity and apply the mean to each column
res<-DT[, lapply(.SD,mean), by=list(subject,activity)]#.SD stands for subset data and it is used to apply the mean to every column of the table
resNames<-names(res)#obtain the original names
aux<-lapply(resNames[3:length(resNames)],function(x){paste("MEAN-OVER-",x,sep="")})#CREATE NEW FEATURE NAMES (MEAN-OVER-feature)
resNames<-c("subject","activity",aux)
setnames(res,names(res),as.character(resNames))#substitute all features names by new ones
res[order(subject,activity)]#order de result by subject and activity
################################
# 10 Step: WRITE THE DATA IN A TXT FILE
###############################
write.table(res, file ="MeanAndStdValuesForEachSubject-Activity.txt",row.names=FALSE,sep=" ") #write the result as tidy data
|
#' Functional groups calculation
#'
#' This function loads, integates and calculates the functional group distribution from the raw spectra.
#' Output is a list with the raw data, integrals and corrected spectra.
#' @param raw.spec List of files already loaded with read_raw_spec()
#' @param batch_nmr Vector with file names, default
#' @param NMRmeth Regions to be integrated.
#' Default is spinning side bands, other methods available include: Bonanomi ("Bonanomi") and Molecular mixing model ("MMM").
#' @param ecosys Standards to be used for the MMM, can be Terrestrial("Terr_Nelson" or "Terr_Baldock") or Aquatic ("Aqua_Nelson" or "Aqua_Baldock")
#' @param ncdata Initial correction and normalization parameters
#' @keywords
#' @export
#' @examples
region_calc <- function (batch_nmr = NULL, file = NULL, NMRmeth = NULL, ecosys=NULL,
cndata = NULL, mod_std = NULL, stats = FALSE) {
if (is.null(batch_nmr)) {
if (is.null(file)) {
stop("Please provide either a raw spectrum from the read_raw_spec function or a raw NMR file")
} else {
## read the raw spectra from a file list
batch.nmr <- read_raw_spec(file = file)
}
} else {
batch.nmr <- batch_nmr
}
if (is.null(NMRmeth)) {
## loop to process all samples
NMR.end <- NULL
batch.nmr <- int_nmr(batch.nmr)
for (i in 1:length(batch.nmr)) {
file.name <- batch.nmr[[i]]$name
Integral <- c(batch.nmr[[i]]$data$Integral)
##carboxyl C calculation
carboxyl <- setNames(data.frame(sum(2*sum(Integral$normalized.Int[30:33]), sum(Integral$normalized.Int[21:24]), -sum(Integral$normalized.Int[3:6]))), c("Carboxyl"))
##Aryl C calculation
aryl <- setNames(data.frame(sum(2*sum(Integral$normalized.Int[27:29]), sum(Integral$normalized.Int[18:20]), -sum(Integral$normalized.Int[1:2]))), c("Aryl"))
##O-Alkyl C calculation
oalkyl <- setNames(data.frame(sum(2*sum(Integral$normalized.Int[4:8]), sum(Integral$normalized.Int[13:17]), -sum(Integral$normalized.Int[31:33]))), c("O-Alkyl"))
##Alkyl C calculation
alkyl <- setNames(data.frame(sum(2*sum(Integral$normalized.Int[1:3]), sum(Integral$normalized.Int[10:12]), -sum(Integral$normalized.Int[28:30]))), c("Alkyl"))
##Put all together
#NMR.end[[i]] <- list(file.name = file.name, data = data.frame(carboxyl, aryl, oalkyl, alkyl))
integral.end <- data.frame(carboxyl, aryl, oalkyl, alkyl)
norm <- sum(integral.end)
normalized.Int <- (integral.end/norm)*100
integral.end <- data.frame(normalized.Int)
#integral.end <- data.frame(file.name,integral.end)
NMR.end[[i]] <- data.frame(file.name, integral.end)
}
} else if (NMRmeth == "Bonanomi") {
## loop to process all samples
NMR.end <- NULL
for (i in 1:length(batch.nmr)) {
file.name <- batch.nmr[[i]]$name
sample <- list(batch.nmr[[i]])
Integral <- int_nmr (raw.spec = sample, NMRmeth = NMRmeth)
##carboxyl C calculation
carboxyl <- setNames(data.frame(sum(sum(Integral[30:33,2]), sum(Integral[21:24,2]), -2*sum(Integral[3:6,2]))), c("Carboxyl"))
##Aryl C calculation
aryl <- setNames(data.frame(sum(sum(Integral[27:29,2]), sum(Integral[18:20,2]), -2*sum(Integral[1:2,2]))), c("Aryl"))
##O-Alkyl C calculation
oalkyl <- setNames(data.frame(sum(sum(Integral[4:8,2]), sum(Integral[13:17,2]), -2*sum(Integral[31:33,2]))), c("O-Alkyl"))
##Alkyl C calculation
alkyl <- setNames(data.frame(sum(sum(Integral[1:3,2]), sum(Integral[10:12,2]), -2*sum(Integral[28:30,2]))), c("Alkyl"))
##Put all together
NMR.end[[i]] <- data.frame(file.name = file.name, data = data.frame(carboxyl, aryl, oalkyl, alkyl))
}
} else if (NMRmeth == "MMM") {
## loop to process all samples
NMR.end <- NULL
raw.spec.end <- NULL
batch.nmr <- int_nmr(batch.nmr, NMRmeth = "MMM-SSB")
nmrmerge <- NULL
for (i in 1:length(batch.nmr)) {
raw.spec.end[[i]] <- batch.nmr[[i]]
NCval <- as.numeric(cndata[[i]]$NC)
samplename <- batch.nmr[[i]]$name
sampleraw.spec <- batch.nmr[[i]]$data$raw.spec
sampleintegral <- as.data.frame(batch.nmr[[i]]$data$Integral)
##Alklyl C calculation
Alkyl <- setNames(data.frame(sum(2*sum(sampleintegral[1:3,1]), sum(sampleintegral[10:12,1]), -sum(sampleintegral[28:30,1]))), c("Alkyl"))
##N_Alkyl_Methoxyl C calculation
N_Alkyl_Methoxyl <- setNames(data.frame(sum(2*sum(sampleintegral[4:5,1]), sum(sampleintegral[13:14,1]), -sum(sampleintegral[31:32,1]))), c("N_Alkyl_Methoxyl"))
##O-Alkyl C calculation
O_Alkyl <- setNames(data.frame(sum(2*sum(sampleintegral[6:7,1]), sum(sampleintegral[15:16,1]), -sum(sampleintegral[33:33,1]))), c("O-Alkyl"))
##Di_O_Alkyl C calculation
Di_O_Alkyl <- setNames(data.frame(sum(2*sum(sampleintegral[8:8,1]), sum(sampleintegral[17:17,1]))), c("Di_O_Alkyl"))
##Aromatic C calculation
Aromatic <- setNames(data.frame(sum(2*sum(sampleintegral[27:28,1]), sum(sampleintegral[18:19,1]), -sum(sampleintegral[1:1,1]))), c("Aromatic"))
##Phenolic C calculation
Phenolic <- setNames(data.frame(sum(2*sum(sampleintegral[29:29,1]), sum(sampleintegral[20:20,1]), -sum(sampleintegral[2:2,1]))), c("Phenolic"))
##Amide_Carboxylic C calculation
Amide_Carboxylic <- setNames(data.frame(sum(2*sum(sampleintegral[30:32,1]), sum(sampleintegral[21:23,1]), -sum(sampleintegral[3:5,1]))), c("Amide_Carboxylic"))
##Ketone C calculation
Ketone <- setNames(data.frame(sum(2*sum(sampleintegral[33:33,1]), sum(sampleintegral[24:24,1]), -sum(sampleintegral[6:6,1]))), c("Ketone"))
##Put all together
Amide_to_Ketone <- c(Amide_Carboxylic + Ketone)
sampleintegraljoin <- data.frame(Alkyl, N_Alkyl_Methoxyl, O_Alkyl, Di_O_Alkyl, Aromatic, Phenolic, Amide_to_Ketone)
sampleintegraljoin <- t(sampleintegraljoin)
norm <- sum(sampleintegraljoin)
sampleintegraljoin <- (sampleintegraljoin/norm)*100
sampleintegraljoin <- data.frame(sampleintegraljoin)
sampleintegralend <- rbind(NCval,sampleintegraljoin)
raw.spec.end[[i]] <- list("name" = samplename, "data" = list("raw.spec" = sampleraw.spec,"Integral" = sampleintegralend))
if (ecosys == "Terr_Nelson") {
stdmat <- std_nmr(ecosys = "Terr_Nelson")
NMR.end <- fit_LCF(all.samples = raw.spec.end, all.standards = stdmat, amoSTD = 6, best.fits = 30, NMRmeth = "MMM")
## Elementar ratios (relative to C)
Cmol <- as.numeric(NMR.end$Carbohydrates*stdmat[[1]][9,2]+NMR.end$Protein*stdmat[[1]][9,1]+NMR.end$Lignin*stdmat[[1]][9,3]+
NMR.end$Lipid*stdmat[[1]][9,4]+NMR.end$Carbonyl*stdmat[[1]][9,5]+NMR.end$Char*stdmat[[1]][9,6])
Nmol <- as.numeric(NMR.end$Carbohydrates*stdmat[[1]][10,2]+NMR.end$Protein*stdmat[[1]][10,1]+NMR.end$Lignin*stdmat[[1]][10,3]+
NMR.end$Lipid*stdmat[[1]][10,4]+NMR.end$Carbonyl*stdmat[[1]][10,5]+NMR.end$Char*stdmat[[1]][10,6])
Hmol <- as.numeric(NMR.end$Carbohydrates*stdmat[[1]][11,2]+NMR.end$Protein*stdmat[[1]][11,1]+NMR.end$Lignin*stdmat[[1]][11,3]+
NMR.end$Lipid*stdmat[[1]][11,4]+NMR.end$Carbonyl*stdmat[[1]][11,5]+NMR.end$Char*stdmat[[1]][11,6])
Omol <- as.numeric(NMR.end$Carbohydrates*stdmat[[1]][12,2]+NMR.end$Protein*stdmat[[1]][12,1]+NMR.end$Lignin*stdmat[[1]][12,3]+
NMR.end$Lipid*stdmat[[1]][12,4]+NMR.end$Carbonyl*stdmat[[1]][12,5]+NMR.end$Char*stdmat[[1]][12,6])
Cwgt <- as.numeric(NMR.end$Carbohydrates*stdmat[[1]][9,2]*12.0107+NMR.end$Protein*stdmat[[1]][9,1]*12.0107+NMR.end$Lignin*stdmat[[1]][9,3]*12.0107+
NMR.end$Lipid*stdmat[[1]][9,4]*12.0107+NMR.end$Carbonyl*stdmat[[1]][9,5]*12.0107+NMR.end$Char*stdmat[[1]][9,6]*12.0107)
Nwgt <- as.numeric(NMR.end$Carbohydrates*stdmat[[1]][10,2]*14.0067+NMR.end$Protein*stdmat[[1]][10,1]*14.0067+NMR.end$Lignin*stdmat[[1]][10,3]*14.0067+
NMR.end$Lipid*stdmat[[1]][10,4]*14.0067+NMR.end$Carbonyl*stdmat[[1]][10,5]*14.0067+NMR.end$Char*stdmat[[1]][10,6]*14.0067)
Hwgt <- as.numeric(NMR.end$Carbohydrates*stdmat[[1]][11,2]*1.00794+NMR.end$Protein*stdmat[[1]][11,1]*1.00794+NMR.end$Lignin*stdmat[[1]][11,3]*1.00794+
NMR.end$Lipid*stdmat[[1]][11,4]*1.00794+NMR.end$Carbonyl*stdmat[[1]][11,5]*1.00794+NMR.end$Char*stdmat[[1]][11,6]*1.00794)
Owgt <- as.numeric(NMR.end$Carbohydrates*stdmat[[1]][12,2]*15.994+NMR.end$Protein*stdmat[[1]][12,1]*15.994+NMR.end$Lignin*stdmat[[1]][12,3]*15.994+
NMR.end$Lipid*stdmat[[1]][12,4]*15.994+NMR.end$Carbonyl*stdmat[[1]][12,5]*15.994+NMR.end$Char*stdmat[[1]][12,6]*15.994)
swgt <- c(Cwgt + Nwgt + Hwgt +Owgt)
NOSC <- as.numeric(4+((2*Omol+3*Nmol-1*Hmol-4*Cmol)/Cmol))
## Final result
NMR.end <- cbind(NMR.end, Cmol, Nmol, Hmol, Omol, Cwgt/swgt, Nwgt/swgt, Hwgt/swgt, Owgt/swgt, NOSC)
## Back calculated NMR results
##Alklyl C calculation
Alkyl_m <- setNames(NMR.end$Carbohydrates*stdmat[[1]][1,2]+NMR.end$Protein*stdmat[[1]][1,1]+NMR.end$Lignin*stdmat[[1]][1,3]+NMR.end$Lipid*stdmat[[1]][1,4]+
NMR.end$Carbonyl*stdmat[[1]][1,5]+NMR.end$Char*stdmat[[1]][1,6], c("Alkyl"))
##N_Alkyl_Methoxyl C calculation
N_Alkyl_Methoxyl_m <- setNames(NMR.end$Carbohydrates*stdmat[[1]][2,2]+NMR.end$Protein*stdmat[[1]][2,1]+NMR.end$Lignin*stdmat[[1]][2,3]+NMR.end$Lipid*stdmat[[1]][2,4]+
NMR.end$Carbonyl*stdmat[[1]][2,5]+NMR.end$Char*stdmat[[1]][2,6], c("N_Alkyl_Methoxyl"))
##O-Alkyl C calculation
O_Alkyl_m <- setNames(NMR.end$Carbohydrates*stdmat[[1]][3,2]+NMR.end$Protein*stdmat[[1]][3,1]+NMR.end$Lignin*stdmat[[1]][3,3]+NMR.end$Lipid*stdmat[[1]][3,4]+
NMR.end$Carbonyl*stdmat[[1]][3,5]+NMR.end$Char*stdmat[[1]][3,6], c("O-Alkyl"))
##Di_O_Alkyl C calculation
Di_O_Alkyl_m <- setNames(NMR.end$Carbohydrates*stdmat[[1]][4,2]+NMR.end$Protein*stdmat[[1]][4,1]+NMR.end$Lignin*stdmat[[1]][4,3]+NMR.end$Lipid*stdmat[[1]][4,4]+
NMR.end$Carbonyl*stdmat[[1]][4,5]+NMR.end$Char*stdmat[[1]][4,6], c("Di_O_Alkyl"))
##Aromatic C calculation
Aromatic_m <- setNames(NMR.end$Carbohydrates*stdmat[[1]][5,2]+NMR.end$Protein*stdmat[[1]][5,1]+NMR.end$Lignin*stdmat[[1]][5,3]+NMR.end$Lipid*stdmat[[1]][5,4]+
NMR.end$Carbonyl*stdmat[[1]][5,5]+NMR.end$Char*stdmat[[1]][5,6], c("Aromatic"))
##Phenolic C calculation
Phenolic_m <- setNames(NMR.end$Carbohydrates*stdmat[[1]][6,2]+NMR.end$Protein*stdmat[[1]][6,1]+NMR.end$Lignin*stdmat[[1]][6,3]+NMR.end$Lipid*stdmat[[1]][6,4]+
NMR.end$Carbonyl*stdmat[[1]][6,5]+NMR.end$Char*stdmat[[1]][6,6], c("Phenolic"))
##Amide_Carboxylic C calculation
Amide_to_Ketone_m <- setNames(NMR.end$Carbohydrates*stdmat[[1]][7,2]+NMR.end$Protein*stdmat[[1]][7,1]+NMR.end$Lignin*stdmat[[1]][7,3]+NMR.end$Lipid*stdmat[[1]][7,4]+
NMR.end$Carbonyl*stdmat[[1]][7,5]+NMR.end$Char*stdmat[[1]][7,6], c("Amide_to_Ketone"))
sum_m <- setNames(Alkyl_m + N_Alkyl_Methoxyl_m + O_Alkyl_m + Di_O_Alkyl_m + Aromatic_m + Phenolic_m + Amide_to_Ketone_m, c("Sum"))
sum_c <- sum(sampleintegraljoin)
sampleintegraljoin <-rbind(sampleintegraljoin, sum_c)
sample_stats <- data.frame(Alkyl_m, N_Alkyl_Methoxyl_m, O_Alkyl_m, Di_O_Alkyl_m, Aromatic_m, Phenolic_m, Amide_to_Ketone_m, sum_m)
nmrrest <- NULL
for (i in 1:nrow(sample_stats)) {
nmrrestt <- c(sampleintegraljoin)
nmrrest <- rbind(nmrrest, nmrrestt)
}
colnames(nmrrest) <- c("Alkyl", "N_Alkyl_Methoxyl", "O-Alkyl", "Di_O_Alkyl", "Aromatic", "Phenolic", "Amide_to_Ketone", "Sum")
sample_stats <- cbind(sample_stats,nmrrest)
ssq_sample <- data.frame((sample_stats$Alkyl-sample_stats$Alkyl_m)^2, (sample_stats$N_Alkyl_Methoxyl-sample_stats$N_Alkyl_Methoxyl_m)^2,
(sample_stats$`O-Alkyl` -sample_stats$O_Alkyl_m)^2, (sample_stats$Di_O_Alkyl-sample_stats$Di_O_Alkyl_m)^2,
(sample_stats$Aromatic -sample_stats$Aromatic_m)^2, (sample_stats$Phenolic -sample_stats$Phenolic_m)^2,
(sample_stats$Amide_to_Ketone -sample_stats$Amide_to_Ketone_m)^2, (sample_stats$Sum -sample_stats$sum_m)^2)
colnames(ssq_sample) <- c("Alkyl_ssq", "N_Alkyl_Methoxyl_ssq", "O-Alkyl_ssq", "Di_O_Alkyl_ssq", "Aromatic_ssq", "Phenolic_ssq", "Amide_to_Ketone_ssq", "Sum_ssq")
sample_stats <- cbind(sample_stats,ssq_sample)
} else if (ecosys == "Terr_Baldock") {
stdmat <- std_nmr(ecosys = "Terr_Baldock")
NMR.end <- fit_LCF(all.samples = raw.spec.end, all.standards = stdmat, amoSTD = 6, best.fits = 30, NMRmeth = "MMM")
## Elementar ratios (relative to C)
Cmol <- as.numeric(NMR.end$Carbohydrates*stdmat[[1]][9,2]+NMR.end$Protein*stdmat[[1]][9,1]+NMR.end$Lignin*stdmat[[1]][9,3]+
NMR.end$Lipid*stdmat[[1]][9,4]+NMR.end$Carbonyl*stdmat[[1]][9,5]+NMR.end$Char*stdmat[[1]][9,6])
Nmol <- as.numeric(NMR.end$Carbohydrates*stdmat[[1]][10,2]+NMR.end$Protein*stdmat[[1]][10,1]+NMR.end$Lignin*stdmat[[1]][10,3]+
NMR.end$Lipid*stdmat[[1]][10,4]+NMR.end$Carbonyl*stdmat[[1]][10,5]+NMR.end$Char*stdmat[[1]][10,6])
Hmol <- as.numeric(NMR.end$Carbohydrates*stdmat[[1]][11,2]+NMR.end$Protein*stdmat[[1]][11,1]+NMR.end$Lignin*stdmat[[1]][11,3]+
NMR.end$Lipid*stdmat[[1]][11,4]+NMR.end$Carbonyl*stdmat[[1]][11,5]+NMR.end$Char*stdmat[[1]][11,6])
Omol <- as.numeric(NMR.end$Carbohydrates*stdmat[[1]][12,2]+NMR.end$Protein*stdmat[[1]][12,1]+NMR.end$Lignin*stdmat[[1]][12,3]+
NMR.end$Lipid*stdmat[[1]][12,4]+NMR.end$Carbonyl*stdmat[[1]][12,5]+NMR.end$Char*stdmat[[1]][12,6])
Cwgt <- as.numeric(NMR.end$Carbohydrates*stdmat[[1]][9,2]*12.0107+NMR.end$Protein*stdmat[[1]][9,1]*12.0107+NMR.end$Lignin*stdmat[[1]][9,3]*12.0107+
NMR.end$Lipid*stdmat[[1]][9,4]*12.0107+NMR.end$Carbonyl*stdmat[[1]][9,5]*12.0107+NMR.end$Char*stdmat[[1]][9,6]*12.0107)
Nwgt <- as.numeric(NMR.end$Carbohydrates*stdmat[[1]][10,2]*14.0067+NMR.end$Protein*stdmat[[1]][10,1]*14.0067+NMR.end$Lignin*stdmat[[1]][10,3]*14.0067+
NMR.end$Lipid*stdmat[[1]][10,4]*14.0067+NMR.end$Carbonyl*stdmat[[1]][10,5]*14.0067+NMR.end$Char*stdmat[[1]][10,6]*14.0067)
Hwgt <- as.numeric(NMR.end$Carbohydrates*stdmat[[1]][11,2]*1.00794+NMR.end$Protein*stdmat[[1]][11,1]*1.00794+NMR.end$Lignin*stdmat[[1]][11,3]*1.00794+
NMR.end$Lipid*stdmat[[1]][11,4]*1.00794+NMR.end$Carbonyl*stdmat[[1]][11,5]*1.00794+NMR.end$Char*stdmat[[1]][11,6]*1.00794)
Owgt <- as.numeric(NMR.end$Carbohydrates*stdmat[[1]][12,2]*15.994+NMR.end$Protein*stdmat[[1]][12,1]*15.994+NMR.end$Lignin*stdmat[[1]][12,3]*15.994+
NMR.end$Lipid*stdmat[[1]][12,4]*15.994+NMR.end$Carbonyl*stdmat[[1]][12,5]*15.994+NMR.end$Char*stdmat[[1]][12,6]*15.994)
swgt <- c(Cwgt + Nwgt + Hwgt +Owgt)
NOSC <- as.numeric(4+((2*Omol+3*Nmol-1*Hmol-4*Cmol)/Cmol))
## Final result
NMR.end <- cbind(NMR.end, Cmol, Nmol, Hmol, Omol, Cwgt/swgt, Nwgt/swgt, Hwgt/swgt, Owgt/swgt, NOSC)
## Back calculated NMR results
##Alklyl C calculation
Alkyl_m <- setNames(NMR.end$Carbohydrates*stdmat[[1]][1,2]+NMR.end$Protein*stdmat[[1]][1,1]+NMR.end$Lignin*stdmat[[1]][1,3]+NMR.end$Lipid*stdmat[[1]][1,4]+
NMR.end$Carbonyl*stdmat[[1]][1,5]+NMR.end$Char*stdmat[[1]][1,6], c("Alkyl"))
##N_Alkyl_Methoxyl C calculation
N_Alkyl_Methoxyl_m <- setNames(NMR.end$Carbohydrates*stdmat[[1]][2,2]+NMR.end$Protein*stdmat[[1]][2,1]+NMR.end$Lignin*stdmat[[1]][2,3]+NMR.end$Lipid*stdmat[[1]][2,4]+
NMR.end$Carbonyl*stdmat[[1]][2,5]+NMR.end$Char*stdmat[[1]][2,6], c("N_Alkyl_Methoxyl"))
##O-Alkyl C calculation
O_Alkyl_m <- setNames(NMR.end$Carbohydrates*stdmat[[1]][3,2]+NMR.end$Protein*stdmat[[1]][3,1]+NMR.end$Lignin*stdmat[[1]][3,3]+NMR.end$Lipid*stdmat[[1]][3,4]+
NMR.end$Carbonyl*stdmat[[1]][3,5]+NMR.end$Char*stdmat[[1]][3,6], c("O-Alkyl"))
##Di_O_Alkyl C calculation
Di_O_Alkyl_m <- setNames(NMR.end$Carbohydrates*stdmat[[1]][4,2]+NMR.end$Protein*stdmat[[1]][4,1]+NMR.end$Lignin*stdmat[[1]][4,3]+NMR.end$Lipid*stdmat[[1]][4,4]+
NMR.end$Carbonyl*stdmat[[1]][4,5]+NMR.end$Char*stdmat[[1]][4,6], c("Di_O_Alkyl"))
##Aromatic C calculation
Aromatic_m <- setNames(NMR.end$Carbohydrates*stdmat[[1]][5,2]+NMR.end$Protein*stdmat[[1]][5,1]+NMR.end$Lignin*stdmat[[1]][5,3]+NMR.end$Lipid*stdmat[[1]][5,4]+
NMR.end$Carbonyl*stdmat[[1]][5,5]+NMR.end$Char*stdmat[[1]][5,6], c("Aromatic"))
##Phenolic C calculation
Phenolic_m <- setNames(NMR.end$Carbohydrates*stdmat[[1]][6,2]+NMR.end$Protein*stdmat[[1]][6,1]+NMR.end$Lignin*stdmat[[1]][6,3]+NMR.end$Lipid*stdmat[[1]][6,4]+
NMR.end$Carbonyl*stdmat[[1]][6,5]+NMR.end$Char*stdmat[[1]][6,6], c("Phenolic"))
##Amide_Carboxylic C calculation
Amide_to_Ketone_m <- setNames(NMR.end$Carbohydrates*stdmat[[1]][7,2]+NMR.end$Protein*stdmat[[1]][7,1]+NMR.end$Lignin*stdmat[[1]][7,3]+NMR.end$Lipid*stdmat[[1]][7,4]+
NMR.end$Carbonyl*stdmat[[1]][7,5]+NMR.end$Char*stdmat[[1]][7,6], c("Amide_to_Ketone"))
sum_m <- setNames(Alkyl_m + N_Alkyl_Methoxyl_m + O_Alkyl_m + Di_O_Alkyl_m + Aromatic_m + Phenolic_m + Amide_to_Ketone_m, c("Sum"))
sum_c <- sum(sampleintegraljoin)
sampleintegraljoin <-rbind(sampleintegraljoin, sum_c)
sample_stats <- data.frame(Alkyl_m, N_Alkyl_Methoxyl_m, O_Alkyl_m, Di_O_Alkyl_m, Aromatic_m, Phenolic_m, Amide_to_Ketone_m, sum_m)
nmrrest <- NULL
for (i in 1:nrow(sample_stats)) {
nmrrestt <- c(sampleintegraljoin)
nmrrest <- rbind(nmrrest, nmrrestt)
}
colnames(nmrrest) <- c("Alkyl", "N_Alkyl_Methoxyl", "O-Alkyl", "Di_O_Alkyl", "Aromatic", "Phenolic", "Amide_to_Ketone", "Sum")
sample_stats <- cbind(sample_stats,nmrrest)
ssq_sample <- data.frame((sample_stats$Alkyl-sample_stats$Alkyl_m)^2, (sample_stats$N_Alkyl_Methoxyl-sample_stats$N_Alkyl_Methoxyl_m)^2,
(sample_stats$`O-Alkyl` -sample_stats$O_Alkyl_m)^2, (sample_stats$Di_O_Alkyl-sample_stats$Di_O_Alkyl_m)^2,
(sample_stats$Aromatic -sample_stats$Aromatic_m)^2, (sample_stats$Phenolic -sample_stats$Phenolic_m)^2,
(sample_stats$Amide_to_Ketone -sample_stats$Amide_to_Ketone_m)^2, (sample_stats$Sum -sample_stats$sum_m)^2)
colnames(ssq_sample) <- c("Alkyl_ssq", "N_Alkyl_Methoxyl_ssq", "O-Alkyl_ssq", "Di_O_Alkyl_ssq", "Aromatic_ssq", "Phenolic_ssq", "Amide_to_Ketone_ssq", "Sum_ssq")
sample_stats <- cbind(sample_stats,ssq_sample)
} else if (ecosys == "Aqua_Nelson") {
stdmat <- std_nmr(ecosys = "Aqua_Nelson")
NMR.end <- fit_LCF(all.samples = raw.spec.end, all.standards = stdmat, amoSTD = 6, best.fits = 30, NMRmeth = "MMM")
## Elementar ratios (relative to C)
Cmol <- as.numeric(NMR.end$Carbohydrates*stdmat[[1]][9,2]+NMR.end$Protein*stdmat[[1]][9,1]+NMR.end$Lignin*stdmat[[1]][9,3]+
NMR.end$Lipid*stdmat[[1]][9,4]+NMR.end$Carbonyl*stdmat[[1]][9,5]+NMR.end$Char*stdmat[[1]][9,6])
Nmol <- as.numeric(NMR.end$Carbohydrates*stdmat[[1]][10,2]+NMR.end$Protein*stdmat[[1]][10,1]+NMR.end$Lignin*stdmat[[1]][10,3]+
NMR.end$Lipid*stdmat[[1]][10,4]+NMR.end$Carbonyl*stdmat[[1]][10,5]+NMR.end$Char*stdmat[[1]][10,6])
Hmol <- as.numeric(NMR.end$Carbohydrates*stdmat[[1]][11,2]+NMR.end$Protein*stdmat[[1]][11,1]+NMR.end$Lignin*stdmat[[1]][11,3]+
NMR.end$Lipid*stdmat[[1]][11,4]+NMR.end$Carbonyl*stdmat[[1]][11,5]+NMR.end$Char*stdmat[[1]][11,6])
Omol <- as.numeric(NMR.end$Carbohydrates*stdmat[[1]][12,2]+NMR.end$Protein*stdmat[[1]][12,1]+NMR.end$Lignin*stdmat[[1]][12,3]+
NMR.end$Lipid*stdmat[[1]][12,4]+NMR.end$Carbonyl*stdmat[[1]][12,5]+NMR.end$Char*stdmat[[1]][12,6])
Cwgt <- as.numeric(NMR.end$Carbohydrates*stdmat[[1]][9,2]*12.0107+NMR.end$Protein*stdmat[[1]][9,1]*12.0107+NMR.end$Lignin*stdmat[[1]][9,3]*12.0107+
NMR.end$Lipid*stdmat[[1]][9,4]*12.0107+NMR.end$Carbonyl*stdmat[[1]][9,5]*12.0107+NMR.end$Char*stdmat[[1]][9,6]*12.0107)
Nwgt <- as.numeric(NMR.end$Carbohydrates*stdmat[[1]][10,2]*14.0067+NMR.end$Protein*stdmat[[1]][10,1]*14.0067+NMR.end$Lignin*stdmat[[1]][10,3]*14.0067+
NMR.end$Lipid*stdmat[[1]][10,4]*14.0067+NMR.end$Carbonyl*stdmat[[1]][10,5]*14.0067+NMR.end$Char*stdmat[[1]][10,6]*14.0067)
Hwgt <- as.numeric(NMR.end$Carbohydrates*stdmat[[1]][11,2]*1.00794+NMR.end$Protein*stdmat[[1]][11,1]*1.00794+NMR.end$Lignin*stdmat[[1]][11,3]*1.00794+
NMR.end$Lipid*stdmat[[1]][11,4]*1.00794+NMR.end$Carbonyl*stdmat[[1]][11,5]*1.00794+NMR.end$Char*stdmat[[1]][11,6]*1.00794)
Owgt <- as.numeric(NMR.end$Carbohydrates*stdmat[[1]][12,2]*15.994+NMR.end$Protein*stdmat[[1]][12,1]*15.994+NMR.end$Lignin*stdmat[[1]][12,3]*15.994+
NMR.end$Lipid*stdmat[[1]][12,4]*15.994+NMR.end$Carbonyl*stdmat[[1]][12,5]*15.994+NMR.end$Char*stdmat[[1]][12,6]*15.994)
swgt <- c(Cwgt + Nwgt + Hwgt +Owgt)
NOSC <- as.numeric(4+((2*Omol+3*Nmol-1*Hmol-4*Cmol)/Cmol))
## Final result
NMR.end <- cbind(NMR.end, Cmol, Nmol, Hmol, Omol, Cwgt/swgt, Nwgt/swgt, Hwgt/swgt, Owgt/swgt, NOSC)
## Back calculated NMR results
##Alklyl C calculation
Alkyl_m <- setNames(NMR.end$Carbohydrates*stdmat[[1]][1,2]+NMR.end$Protein*stdmat[[1]][1,1]+NMR.end$Lignin*stdmat[[1]][1,3]+NMR.end$Lipid*stdmat[[1]][1,4]+
NMR.end$Carbonyl*stdmat[[1]][1,5]+NMR.end$Char*stdmat[[1]][1,6], c("Alkyl"))
##N_Alkyl_Methoxyl C calculation
N_Alkyl_Methoxyl_m <- setNames(NMR.end$Carbohydrates*stdmat[[1]][2,2]+NMR.end$Protein*stdmat[[1]][2,1]+NMR.end$Lignin*stdmat[[1]][2,3]+NMR.end$Lipid*stdmat[[1]][2,4]+
NMR.end$Carbonyl*stdmat[[1]][2,5]+NMR.end$Char*stdmat[[1]][2,6], c("N_Alkyl_Methoxyl"))
##O-Alkyl C calculation
O_Alkyl_m <- setNames(NMR.end$Carbohydrates*stdmat[[1]][3,2]+NMR.end$Protein*stdmat[[1]][3,1]+NMR.end$Lignin*stdmat[[1]][3,3]+NMR.end$Lipid*stdmat[[1]][3,4]+
NMR.end$Carbonyl*stdmat[[1]][3,5]+NMR.end$Char*stdmat[[1]][3,6], c("O-Alkyl"))
##Di_O_Alkyl C calculation
Di_O_Alkyl_m <- setNames(NMR.end$Carbohydrates*stdmat[[1]][4,2]+NMR.end$Protein*stdmat[[1]][4,1]+NMR.end$Lignin*stdmat[[1]][4,3]+NMR.end$Lipid*stdmat[[1]][4,4]+
NMR.end$Carbonyl*stdmat[[1]][4,5]+NMR.end$Char*stdmat[[1]][4,6], c("Di_O_Alkyl"))
##Aromatic C calculation
Aromatic_m <- setNames(NMR.end$Carbohydrates*stdmat[[1]][5,2]+NMR.end$Protein*stdmat[[1]][5,1]+NMR.end$Lignin*stdmat[[1]][5,3]+NMR.end$Lipid*stdmat[[1]][5,4]+
NMR.end$Carbonyl*stdmat[[1]][5,5]+NMR.end$Char*stdmat[[1]][5,6], c("Aromatic"))
##Phenolic C calculation
Phenolic_m <- setNames(NMR.end$Carbohydrates*stdmat[[1]][6,2]+NMR.end$Protein*stdmat[[1]][6,1]+NMR.end$Lignin*stdmat[[1]][6,3]+NMR.end$Lipid*stdmat[[1]][6,4]+
NMR.end$Carbonyl*stdmat[[1]][6,5]+NMR.end$Char*stdmat[[1]][6,6], c("Phenolic"))
##Amide_Carboxylic C calculation
Amide_to_Ketone_m <- setNames(NMR.end$Carbohydrates*stdmat[[1]][7,2]+NMR.end$Protein*stdmat[[1]][7,1]+NMR.end$Lignin*stdmat[[1]][7,3]+NMR.end$Lipid*stdmat[[1]][7,4]+
NMR.end$Carbonyl*stdmat[[1]][7,5]+NMR.end$Char*stdmat[[1]][7,6], c("Amide_to_Ketone"))
sum_m <- setNames(Alkyl_m + N_Alkyl_Methoxyl_m + O_Alkyl_m + Di_O_Alkyl_m + Aromatic_m + Phenolic_m + Amide_to_Ketone_m, c("Sum"))
sum_c <- sum(sampleintegraljoin)
sampleintegraljoin <-rbind(sampleintegraljoin, sum_c)
sample_stats <- data.frame(Alkyl_m, N_Alkyl_Methoxyl_m, O_Alkyl_m, Di_O_Alkyl_m, Aromatic_m, Phenolic_m, Amide_to_Ketone_m, sum_m)
nmrrest <- NULL
for (i in 1:nrow(sample_stats)) {
nmrrestt <- c(sampleintegraljoin)
nmrrest <- rbind(nmrrest, nmrrestt)
}
colnames(nmrrest) <- c("Alkyl", "N_Alkyl_Methoxyl", "O-Alkyl", "Di_O_Alkyl", "Aromatic", "Phenolic", "Amide_to_Ketone", "Sum")
sample_stats <- cbind(sample_stats,nmrrest)
ssq_sample <- data.frame((sample_stats$Alkyl-sample_stats$Alkyl_m)^2, (sample_stats$N_Alkyl_Methoxyl-sample_stats$N_Alkyl_Methoxyl_m)^2,
(sample_stats$`O-Alkyl` -sample_stats$O_Alkyl_m)^2, (sample_stats$Di_O_Alkyl-sample_stats$Di_O_Alkyl_m)^2,
(sample_stats$Aromatic -sample_stats$Aromatic_m)^2, (sample_stats$Phenolic -sample_stats$Phenolic_m)^2,
(sample_stats$Amide_to_Ketone -sample_stats$Amide_to_Ketone_m)^2, (sample_stats$Sum -sample_stats$sum_m)^2)
colnames(ssq_sample) <- c("Alkyl_ssq", "N_Alkyl_Methoxyl_ssq", "O-Alkyl_ssq", "Di_O_Alkyl_ssq", "Aromatic_ssq", "Phenolic_ssq", "Amide_to_Ketone_ssq", "Sum_ssq")
sample_stats <- cbind(sample_stats,ssq_sample)
} else if (ecosys == "Aqua_Baldock") {
stdmat <- std_nmr(ecosys = "Aqua_Baldock")
NMR.end <- fit_LCF(all.samples = raw.spec.end, all.standards = stdmat, amoSTD = 6, best.fits = 30, NMRmeth = "MMM")
## Elementar ratios (relative to C)
Cmol <- as.numeric(NMR.end$Carbohydrates*stdmat[[1]][9,2]+NMR.end$Protein*stdmat[[1]][9,1]+NMR.end$Lignin*stdmat[[1]][9,3]+
NMR.end$Lipid*stdmat[[1]][9,4]+NMR.end$Carbonyl*stdmat[[1]][9,5]+NMR.end$Char*stdmat[[1]][9,6])
Nmol <- as.numeric(NMR.end$Carbohydrates*stdmat[[1]][10,2]+NMR.end$Protein*stdmat[[1]][10,1]+NMR.end$Lignin*stdmat[[1]][10,3]+
NMR.end$Lipid*stdmat[[1]][10,4]+NMR.end$Carbonyl*stdmat[[1]][10,5]+NMR.end$Char*stdmat[[1]][10,6])
Hmol <- as.numeric(NMR.end$Carbohydrates*stdmat[[1]][11,2]+NMR.end$Protein*stdmat[[1]][11,1]+NMR.end$Lignin*stdmat[[1]][11,3]+
NMR.end$Lipid*stdmat[[1]][11,4]+NMR.end$Carbonyl*stdmat[[1]][11,5]+NMR.end$Char*stdmat[[1]][11,6])
Omol <- as.numeric(NMR.end$Carbohydrates*stdmat[[1]][12,2]+NMR.end$Protein*stdmat[[1]][12,1]+NMR.end$Lignin*stdmat[[1]][12,3]+
NMR.end$Lipid*stdmat[[1]][12,4]+NMR.end$Carbonyl*stdmat[[1]][12,5]+NMR.end$Char*stdmat[[1]][12,6])
Cwgt <- as.numeric(NMR.end$Carbohydrates*stdmat[[1]][9,2]*12.0107+NMR.end$Protein*stdmat[[1]][9,1]*12.0107+NMR.end$Lignin*stdmat[[1]][9,3]*12.0107+
NMR.end$Lipid*stdmat[[1]][9,4]*12.0107+NMR.end$Carbonyl*stdmat[[1]][9,5]*12.0107+NMR.end$Char*stdmat[[1]][9,6]*12.0107)
Nwgt <- as.numeric(NMR.end$Carbohydrates*stdmat[[1]][10,2]*14.0067+NMR.end$Protein*stdmat[[1]][10,1]*14.0067+NMR.end$Lignin*stdmat[[1]][10,3]*14.0067+
NMR.end$Lipid*stdmat[[1]][10,4]*14.0067+NMR.end$Carbonyl*stdmat[[1]][10,5]*14.0067+NMR.end$Char*stdmat[[1]][10,6]*14.0067)
Hwgt <- as.numeric(NMR.end$Carbohydrates*stdmat[[1]][11,2]*1.00794+NMR.end$Protein*stdmat[[1]][11,1]*1.00794+NMR.end$Lignin*stdmat[[1]][11,3]*1.00794+
NMR.end$Lipid*stdmat[[1]][11,4]*1.00794+NMR.end$Carbonyl*stdmat[[1]][11,5]*1.00794+NMR.end$Char*stdmat[[1]][11,6]*1.00794)
Owgt <- as.numeric(NMR.end$Carbohydrates*stdmat[[1]][12,2]*15.994+NMR.end$Protein*stdmat[[1]][12,1]*15.994+NMR.end$Lignin*stdmat[[1]][12,3]*15.994+
NMR.end$Lipid*stdmat[[1]][12,4]*15.994+NMR.end$Carbonyl*stdmat[[1]][12,5]*15.994+NMR.end$Char*stdmat[[1]][12,6]*15.994)
swgt <- c(Cwgt + Nwgt + Hwgt +Owgt)
NOSC <- as.numeric(4+((2*Omol+3*Nmol-1*Hmol-4*Cmol)/Cmol))
## Back calculated NMR results
##Alklyl C calculation
Alkyl_m <- setNames(NMR.end$Carbohydrates*stdmat[[1]][1,2]+NMR.end$Protein*stdmat[[1]][1,1]+NMR.end$Lignin*stdmat[[1]][1,3]+NMR.end$Lipid*stdmat[[1]][1,4]+
NMR.end$Carbonyl*stdmat[[1]][1,5]+NMR.end$Char*stdmat[[1]][1,6], c("Alkyl"))
##N_Alkyl_Methoxyl C calculation
N_Alkyl_Methoxyl_m <- setNames(NMR.end$Carbohydrates*stdmat[[1]][2,2]+NMR.end$Protein*stdmat[[1]][2,1]+NMR.end$Lignin*stdmat[[1]][2,3]+NMR.end$Lipid*stdmat[[1]][2,4]+
NMR.end$Carbonyl*stdmat[[1]][2,5]+NMR.end$Char*stdmat[[1]][2,6], c("N_Alkyl_Methoxyl"))
##O-Alkyl C calculation
O_Alkyl_m <- setNames(NMR.end$Carbohydrates*stdmat[[1]][3,2]+NMR.end$Protein*stdmat[[1]][3,1]+NMR.end$Lignin*stdmat[[1]][3,3]+NMR.end$Lipid*stdmat[[1]][3,4]+
NMR.end$Carbonyl*stdmat[[1]][3,5]+NMR.end$Char*stdmat[[1]][3,6], c("O-Alkyl"))
##Di_O_Alkyl C calculation
Di_O_Alkyl_m <- setNames(NMR.end$Carbohydrates*stdmat[[1]][4,2]+NMR.end$Protein*stdmat[[1]][4,1]+NMR.end$Lignin*stdmat[[1]][4,3]+NMR.end$Lipid*stdmat[[1]][4,4]+
NMR.end$Carbonyl*stdmat[[1]][4,5]+NMR.end$Char*stdmat[[1]][4,6], c("Di_O_Alkyl"))
##Aromatic C calculation
Aromatic_m <- setNames(NMR.end$Carbohydrates*stdmat[[1]][5,2]+NMR.end$Protein*stdmat[[1]][5,1]+NMR.end$Lignin*stdmat[[1]][5,3]+NMR.end$Lipid*stdmat[[1]][5,4]+
NMR.end$Carbonyl*stdmat[[1]][5,5]+NMR.end$Char*stdmat[[1]][5,6], c("Aromatic"))
##Phenolic C calculation
Phenolic_m <- setNames(NMR.end$Carbohydrates*stdmat[[1]][6,2]+NMR.end$Protein*stdmat[[1]][6,1]+NMR.end$Lignin*stdmat[[1]][6,3]+NMR.end$Lipid*stdmat[[1]][6,4]+
NMR.end$Carbonyl*stdmat[[1]][6,5]+NMR.end$Char*stdmat[[1]][6,6], c("Phenolic"))
##Amide_Carboxylic C calculation
Amide_to_Ketone_m <- setNames(NMR.end$Carbohydrates*stdmat[[1]][7,2]+NMR.end$Protein*stdmat[[1]][7,1]+NMR.end$Lignin*stdmat[[1]][7,3]+NMR.end$Lipid*stdmat[[1]][7,4]+
NMR.end$Carbonyl*stdmat[[1]][7,5]+NMR.end$Char*stdmat[[1]][7,6], c("Amide_to_Ketone"))
sum_m <- setNames(Alkyl_m + N_Alkyl_Methoxyl_m + O_Alkyl_m + Di_O_Alkyl_m + Aromatic_m + Phenolic_m + Amide_to_Ketone_m, c("Sum"))
sum_c <- sum(sampleintegraljoin)
sampleintegraljoin <-rbind(sampleintegraljoin, sum_c)
sample_stats <- data.frame(Alkyl_m, N_Alkyl_Methoxyl_m, O_Alkyl_m, Di_O_Alkyl_m, Aromatic_m, Phenolic_m, Amide_to_Ketone_m, sum_m)
nmrrest <- NULL
for (i in 1:nrow(sample_stats)) {
nmrrestt <- c(sampleintegraljoin)
nmrrest <- rbind(nmrrest, nmrrestt)
}
colnames(nmrrest) <- c("Alkyl", "N_Alkyl_Methoxyl", "O-Alkyl", "Di_O_Alkyl", "Aromatic", "Phenolic", "Amide_to_Ketone", "Sum")
sample_stats <- cbind(sample_stats,nmrrest)
ssq_sample <- data.frame((sample_stats$Alkyl-sample_stats$Alkyl_m)^2, (sample_stats$N_Alkyl_Methoxyl-sample_stats$N_Alkyl_Methoxyl_m)^2,
(sample_stats$`O-Alkyl` -sample_stats$O_Alkyl_m)^2, (sample_stats$Di_O_Alkyl-sample_stats$Di_O_Alkyl_m)^2,
(sample_stats$Aromatic -sample_stats$Aromatic_m)^2, (sample_stats$Phenolic -sample_stats$Phenolic_m)^2,
(sample_stats$Amide_to_Ketone -sample_stats$Amide_to_Ketone_m)^2, (sample_stats$Sum -sample_stats$sum_m)^2)
colnames(ssq_sample) <- c("Alkyl_ssq", "N_Alkyl_Methoxyl_ssq", "O-Alkyl_ssq", "Di_O_Alkyl_ssq", "Aromatic_ssq", "Phenolic_ssq", "Amide_to_Ketone_ssq", "Sum_ssq")
sample_stats <- cbind(sample_stats,ssq_sample)
## Final result
NMR.end <- cbind(NMR.end, Cmol, Nmol, Hmol, Omol, Cwgt/swgt, Nwgt/swgt, Hwgt/swgt, Owgt/swgt, NOSC)
} else if (ecosys == "mod") {
NMR.end <- fit_LCF(all.samples = raw.spec.end, all.standards = mod_std, amoSTD = 6, best.fits = 30, NMRmeth = "MMM")
## Elementar ratios (relative to C)
Cmol <- as.numeric(NMR.end$Carbohydrates*mod_std[[1]][9,2]+NMR.end$Protein*mod_std[[1]][9,1]+NMR.end$Lignin*mod_std[[1]][9,3]+
NMR.end$Lipid*mod_std[[1]][9,4]+NMR.end$Carbonyl*mod_std[[1]][9,5]+NMR.end$Char*mod_std[[1]][9,6])
Nmol <- as.numeric(NMR.end$Carbohydrates*mod_std[[1]][10,2]+NMR.end$Protein*mod_std[[1]][10,1]+NMR.end$Lignin*mod_std[[1]][10,3]+
NMR.end$Lipid*mod_std[[1]][10,4]+NMR.end$Carbonyl*mod_std[[1]][10,5]+NMR.end$Char*mod_std[[1]][10,6])
Hmol <- as.numeric(NMR.end$Carbohydrates*mod_std[[1]][11,2]+NMR.end$Protein*mod_std[[1]][11,1]+NMR.end$Lignin*mod_std[[1]][11,3]+
NMR.end$Lipid*mod_std[[1]][11,4]+NMR.end$Carbonyl*mod_std[[1]][11,5]+NMR.end$Char*mod_std[[1]][11,6])
Omol <- as.numeric(NMR.end$Carbohydrates*mod_std[[1]][12,2]+NMR.end$Protein*mod_std[[1]][12,1]+NMR.end$Lignin*mod_std[[1]][12,3]+
NMR.end$Lipid*mod_std[[1]][12,4]+NMR.end$Carbonyl*mod_std[[1]][12,5]+NMR.end$Char*mod_std[[1]][12,6])
Cwgt <- as.numeric(NMR.end$Carbohydrates*mod_std[[1]][9,2]*12.0107+NMR.end$Protein*mod_std[[1]][9,1]*12.0107+NMR.end$Lignin*mod_std[[1]][9,3]*12.0107+
NMR.end$Lipid*mod_std[[1]][9,4]*12.0107+NMR.end$Carbonyl*mod_std[[1]][9,5]*12.0107+NMR.end$Char*mod_std[[1]][9,6]*12.0107)
Nwgt <- as.numeric(NMR.end$Carbohydrates*mod_std[[1]][10,2]*14.0067+NMR.end$Protein*mod_std[[1]][10,1]*14.0067+NMR.end$Lignin*mod_std[[1]][10,3]*14.0067+
NMR.end$Lipid*mod_std[[1]][10,4]*14.0067+NMR.end$Carbonyl*mod_std[[1]][10,5]*14.0067+NMR.end$Char*mod_std[[1]][10,6]*14.0067)
Hwgt <- as.numeric(NMR.end$Carbohydrates*mod_std[[1]][11,2]*1.00794+NMR.end$Protein*mod_std[[1]][11,1]*1.00794+NMR.end$Lignin*mod_std[[1]][11,3]*1.00794+
NMR.end$Lipid*mod_std[[1]][11,4]*1.00794+NMR.end$Carbonyl*mod_std[[1]][11,5]*1.00794+NMR.end$Char*mod_std[[1]][11,6]*1.00794)
Owgt <- as.numeric(NMR.end$Carbohydrates*mod_std[[1]][12,2]*15.994+NMR.end$Protein*mod_std[[1]][12,1]*15.994+NMR.end$Lignin*mod_std[[1]][12,3]*15.994+
NMR.end$Lipid*mod_std[[1]][12,4]*15.994+NMR.end$Carbonyl*mod_std[[1]][12,5]*15.994+NMR.end$Char*mod_std[[1]][12,6]*15.994)
swgt <- c(Cwgt + Nwgt + Hwgt +Owgt)
NOSC <- as.numeric(4+((2*Omol+3*Nmol-1*Hmol-4*Cmol)/Cmol))
## Final result
NMR.end <- cbind(NMR.end, Cmol, Nmol, Hmol, Omol, Cwgt/swgt, Nwgt/swgt, Hwgt/swgt, Owgt/swgt, NOSC)
## Back calculated NMR results
##Alklyl C calculation
Alkyl_m <- setNames(NMR.end$Carbohydrates*stdmat[[1]][1,2]+NMR.end$Protein*stdmat[[1]][1,1]+NMR.end$Lignin*stdmat[[1]][1,3]+NMR.end$Lipid*stdmat[[1]][1,4]+
NMR.end$Carbonyl*stdmat[[1]][1,5]+NMR.end$Char*stdmat[[1]][1,6], c("Alkyl"))
##N_Alkyl_Methoxyl C calculation
N_Alkyl_Methoxyl_m <- setNames(NMR.end$Carbohydrates*stdmat[[1]][2,2]+NMR.end$Protein*stdmat[[1]][2,1]+NMR.end$Lignin*stdmat[[1]][2,3]+NMR.end$Lipid*stdmat[[1]][2,4]+
NMR.end$Carbonyl*stdmat[[1]][2,5]+NMR.end$Char*stdmat[[1]][2,6], c("N_Alkyl_Methoxyl"))
##O-Alkyl C calculation
O_Alkyl_m <- setNames(NMR.end$Carbohydrates*stdmat[[1]][3,2]+NMR.end$Protein*stdmat[[1]][3,1]+NMR.end$Lignin*stdmat[[1]][3,3]+NMR.end$Lipid*stdmat[[1]][3,4]+
NMR.end$Carbonyl*stdmat[[1]][3,5]+NMR.end$Char*stdmat[[1]][3,6], c("O-Alkyl"))
##Di_O_Alkyl C calculation
Di_O_Alkyl_m <- setNames(NMR.end$Carbohydrates*stdmat[[1]][4,2]+NMR.end$Protein*stdmat[[1]][4,1]+NMR.end$Lignin*stdmat[[1]][4,3]+NMR.end$Lipid*stdmat[[1]][4,4]+
NMR.end$Carbonyl*stdmat[[1]][4,5]+NMR.end$Char*stdmat[[1]][4,6], c("Di_O_Alkyl"))
##Aromatic C calculation
Aromatic_m <- setNames(NMR.end$Carbohydrates*stdmat[[1]][5,2]+NMR.end$Protein*stdmat[[1]][5,1]+NMR.end$Lignin*stdmat[[1]][5,3]+NMR.end$Lipid*stdmat[[1]][5,4]+
NMR.end$Carbonyl*stdmat[[1]][5,5]+NMR.end$Char*stdmat[[1]][5,6], c("Aromatic"))
##Phenolic C calculation
Phenolic_m <- setNames(NMR.end$Carbohydrates*stdmat[[1]][6,2]+NMR.end$Protein*stdmat[[1]][6,1]+NMR.end$Lignin*stdmat[[1]][6,3]+NMR.end$Lipid*stdmat[[1]][6,4]+
NMR.end$Carbonyl*stdmat[[1]][6,5]+NMR.end$Char*stdmat[[1]][6,6], c("Phenolic"))
##Amide_Carboxylic C calculation
Amide_to_Ketone_m <- setNames(NMR.end$Carbohydrates*stdmat[[1]][7,2]+NMR.end$Protein*stdmat[[1]][7,1]+NMR.end$Lignin*stdmat[[1]][7,3]+NMR.end$Lipid*stdmat[[1]][7,4]+
NMR.end$Carbonyl*stdmat[[1]][7,5]+NMR.end$Char*stdmat[[1]][7,6], c("Amide_to_Ketone"))
sum_m <- setNames(Alkyl_m + N_Alkyl_Methoxyl_m + O_Alkyl_m + Di_O_Alkyl_m + Aromatic_m + Phenolic_m + Amide_to_Ketone_m, c("Sum"))
sum_c <- sum(sampleintegraljoin)
sampleintegraljoin <-rbind(sampleintegraljoin, sum_c)
sample_stats <- data.frame(Alkyl_m, N_Alkyl_Methoxyl_m, O_Alkyl_m, Di_O_Alkyl_m, Aromatic_m, Phenolic_m, Amide_to_Ketone_m, sum_m)
nmrrest <- NULL
for (i in 1:nrow(sample_stats)) {
nmrrestt <- c(sampleintegraljoin)
nmrrest <- rbind(nmrrest, nmrrestt)
}
colnames(nmrrest) <- c("Alkyl", "N_Alkyl_Methoxyl", "O-Alkyl", "Di_O_Alkyl", "Aromatic", "Phenolic", "Amide_to_Ketone", "Sum")
rownames(nmrrest) <- NULL
sample_stats <- cbind(sample_stats,nmrrest)
ssq_sample <- data.frame((sample_stats$Alkyl-sample_stats$Alkyl_m)^2, (sample_stats$N_Alkyl_Methoxyl-sample_stats$N_Alkyl_Methoxyl_m)^2,
(sample_stats$`O-Alkyl` -sample_stats$O_Alkyl_m)^2, (sample_stats$Di_O_Alkyl-sample_stats$Di_O_Alkyl_m)^2,
(sample_stats$Aromatic -sample_stats$Aromatic_m)^2, (sample_stats$Phenolic -sample_stats$Phenolic_m)^2,
(sample_stats$Amide_to_Ketone -sample_stats$Amide_to_Ketone_m)^2, (sample_stats$Sum -sample_stats$sum_m)^2)
colnames(ssq_sample) <- c("Alkyl_ssq", "N_Alkyl_Methoxyl_ssq", "O-Alkyl_ssq", "Di_O_Alkyl_ssq", "Aromatic_ssq", "Phenolic_ssq", "Amide_to_Ketone_ssq", "Sum_ssq")
sample_stats <- cbind(sample_stats,ssq_sample)
}
}
## return the corrected spectra list
} else if (NMRmeth == "MMMFixN") {
## loop to process all samples
NMR.end <- NULL
raw.spec.end <- NULL
batch.nmr <- int_nmr(batch.nmr, NMRmeth = "MMM-SSB")
nmrmerge <- NULL
for (i in 1:length(batch.nmr)) {
raw.spec.end[[i]] <- batch.nmr[[i]]
NCval <- as.numeric(cndata[[i]]$NC)
samplename <- batch.nmr[[i]]$name
sampleraw.spec <- batch.nmr[[i]]$data$raw.spec
sampleintegral <- as.data.frame(batch.nmr[[i]]$data$Integral)
##Alklyl C calculation
Alkyl <- setNames(data.frame(sum(2*sum(sampleintegral[1:3,1]), sum(sampleintegral[10:12,1]), -sum(sampleintegral[28:30,1]))), c("Alkyl"))
##N_Alkyl_Methoxyl C calculation
N_Alkyl_Methoxyl <- setNames(data.frame(sum(2*sum(sampleintegral[4:5,1]), sum(sampleintegral[13:14,1]), -sum(sampleintegral[31:32,1]))), c("N_Alkyl_Methoxyl"))
##O-Alkyl C calculation
O_Alkyl <- setNames(data.frame(sum(2*sum(sampleintegral[6:7,1]), sum(sampleintegral[15:16,1]), -sum(sampleintegral[33:33,1]))), c("O-Alkyl"))
##Di_O_Alkyl C calculation
Di_O_Alkyl <- setNames(data.frame(sum(2*sum(sampleintegral[8:8,1]), sum(sampleintegral[17:17,1]))), c("Di_O_Alkyl"))
##Aromatic C calculation
Aromatic <- setNames(data.frame(sum(2*sum(sampleintegral[27:28,1]), sum(sampleintegral[18:19,1]), -sum(sampleintegral[1:1,1]))), c("Aromatic"))
##Phenolic C calculation
Phenolic <- setNames(data.frame(sum(2*sum(sampleintegral[29:29,1]), sum(sampleintegral[20:20,1]), -sum(sampleintegral[2:2,1]))), c("Phenolic"))
##Amide_Carboxylic C calculation
Amide_Carboxylic <- setNames(data.frame(sum(2*sum(sampleintegral[30:32,1]), sum(sampleintegral[21:23,1]), -sum(sampleintegral[3:5,1]))), c("Amide_Carboxylic"))
##Ketone C calculation
Ketone <- setNames(data.frame(sum(2*sum(sampleintegral[33:33,1]), sum(sampleintegral[24:24,1]), -sum(sampleintegral[6:6,1]))), c("Ketone"))
##Put all together
Amide_to_Ketone <- c(Amide_Carboxylic + Ketone)
sampleintegraljoin <- data.frame(Alkyl, N_Alkyl_Methoxyl, O_Alkyl, Di_O_Alkyl, Aromatic, Phenolic, Amide_to_Ketone)
sampleintegraljoin <- t(sampleintegraljoin)
norm <- sum(sampleintegraljoin)
sampleintegraljoin <- (sampleintegraljoin/norm)*100
sampleintegralend <- rbind(NCval,sampleintegraljoin)
sampleintegralend <- data.frame(sampleintegralend)
raw.spec.end[[i]] <- list("name" = samplename, "data" = list("raw.spec" = sampleraw.spec,"Integral" = sampleintegralend))
if (ecosys == "Terr_Nelson") {
stdmat <- std_nmr(ecosys = "Terr_Nelson")
NMR.end <- fit_LCF(all.samples = raw.spec.end, all.standards = stdmat, amoSTD = 6, best.fits = 30, NMRmeth = "MMMFixN")
## Elementar ratios (relative to C)
Cmol <- as.numeric(NMR.end$Carbohydrates*stdmat[[1]][9,2]+NMR.end$Protein*stdmat[[1]][9,1]+NMR.end$Lignin*stdmat[[1]][9,3]+
NMR.end$Lipid*stdmat[[1]][9,4]+NMR.end$Carbonyl*stdmat[[1]][9,5]+NMR.end$Char*stdmat[[1]][9,6])
Nmol <- as.numeric(NMR.end$Carbohydrates*stdmat[[1]][10,2]+NMR.end$Protein*stdmat[[1]][10,1]+NMR.end$Lignin*stdmat[[1]][10,3]+
NMR.end$Lipid*stdmat[[1]][10,4]+NMR.end$Carbonyl*stdmat[[1]][10,5]+NMR.end$Char*stdmat[[1]][10,6])
Hmol <- as.numeric(NMR.end$Carbohydrates*stdmat[[1]][11,2]+NMR.end$Protein*stdmat[[1]][11,1]+NMR.end$Lignin*stdmat[[1]][11,3]+
NMR.end$Lipid*stdmat[[1]][11,4]+NMR.end$Carbonyl*stdmat[[1]][11,5]+NMR.end$Char*stdmat[[1]][11,6])
Omol <- as.numeric(NMR.end$Carbohydrates*stdmat[[1]][12,2]+NMR.end$Protein*stdmat[[1]][12,1]+NMR.end$Lignin*stdmat[[1]][12,3]+
NMR.end$Lipid*stdmat[[1]][12,4]+NMR.end$Carbonyl*stdmat[[1]][12,5]+NMR.end$Char*stdmat[[1]][12,6])
Cwgt <- as.numeric(NMR.end$Carbohydrates*stdmat[[1]][9,2]*12.0107+NMR.end$Protein*stdmat[[1]][9,1]*12.0107+NMR.end$Lignin*stdmat[[1]][9,3]*12.0107+
NMR.end$Lipid*stdmat[[1]][9,4]*12.0107+NMR.end$Carbonyl*stdmat[[1]][9,5]*12.0107+NMR.end$Char*stdmat[[1]][9,6]*12.0107)
Nwgt <- as.numeric(NMR.end$Carbohydrates*stdmat[[1]][10,2]*14.0067+NMR.end$Protein*stdmat[[1]][10,1]*14.0067+NMR.end$Lignin*stdmat[[1]][10,3]*14.0067+
NMR.end$Lipid*stdmat[[1]][10,4]*14.0067+NMR.end$Carbonyl*stdmat[[1]][10,5]*14.0067+NMR.end$Char*stdmat[[1]][10,6]*14.0067)
Hwgt <- as.numeric(NMR.end$Carbohydrates*stdmat[[1]][11,2]*1.00794+NMR.end$Protein*stdmat[[1]][11,1]*1.00794+NMR.end$Lignin*stdmat[[1]][11,3]*1.00794+
NMR.end$Lipid*stdmat[[1]][11,4]*1.00794+NMR.end$Carbonyl*stdmat[[1]][11,5]*1.00794+NMR.end$Char*stdmat[[1]][11,6]*1.00794)
Owgt <- as.numeric(NMR.end$Carbohydrates*stdmat[[1]][12,2]*15.994+NMR.end$Protein*stdmat[[1]][12,1]*15.994+NMR.end$Lignin*stdmat[[1]][12,3]*15.994+
NMR.end$Lipid*stdmat[[1]][12,4]*15.994+NMR.end$Carbonyl*stdmat[[1]][12,5]*15.994+NMR.end$Char*stdmat[[1]][12,6]*15.994)
swgt <- c(Cwgt + Nwgt + Hwgt +Owgt)
NOSC <- as.numeric(4+((2*Omol+3*Nmol-1*Hmol-4*Cmol)/Cmol))
## Final result
NMR.end <- cbind(NMR.end, Cmol, Nmol, Hmol, Omol, Cwgt/swgt, Nwgt/swgt, Hwgt/swgt, Owgt/swgt, NOSC)
## Back calculated NMR results
##Alklyl C calculation
Alkyl_m <- setNames(NMR.end$Carbohydrates*stdmat[[1]][1,2]+NMR.end$Protein*stdmat[[1]][1,1]+NMR.end$Lignin*stdmat[[1]][1,3]+NMR.end$Lipid*stdmat[[1]][1,4]+
NMR.end$Carbonyl*stdmat[[1]][1,5]+NMR.end$Char*stdmat[[1]][1,6], c("Alkyl"))
##N_Alkyl_Methoxyl C calculation
N_Alkyl_Methoxyl_m <- setNames(NMR.end$Carbohydrates*stdmat[[1]][2,2]+NMR.end$Protein*stdmat[[1]][2,1]+NMR.end$Lignin*stdmat[[1]][2,3]+NMR.end$Lipid*stdmat[[1]][2,4]+
NMR.end$Carbonyl*stdmat[[1]][2,5]+NMR.end$Char*stdmat[[1]][2,6], c("N_Alkyl_Methoxyl"))
##O-Alkyl C calculation
O_Alkyl_m <- setNames(NMR.end$Carbohydrates*stdmat[[1]][3,2]+NMR.end$Protein*stdmat[[1]][3,1]+NMR.end$Lignin*stdmat[[1]][3,3]+NMR.end$Lipid*stdmat[[1]][3,4]+
NMR.end$Carbonyl*stdmat[[1]][3,5]+NMR.end$Char*stdmat[[1]][3,6], c("O-Alkyl"))
##Di_O_Alkyl C calculation
Di_O_Alkyl_m <- setNames(NMR.end$Carbohydrates*stdmat[[1]][4,2]+NMR.end$Protein*stdmat[[1]][4,1]+NMR.end$Lignin*stdmat[[1]][4,3]+NMR.end$Lipid*stdmat[[1]][4,4]+
NMR.end$Carbonyl*stdmat[[1]][4,5]+NMR.end$Char*stdmat[[1]][4,6], c("Di_O_Alkyl"))
##Aromatic C calculation
Aromatic_m <- setNames(NMR.end$Carbohydrates*stdmat[[1]][5,2]+NMR.end$Protein*stdmat[[1]][5,1]+NMR.end$Lignin*stdmat[[1]][5,3]+NMR.end$Lipid*stdmat[[1]][5,4]+
NMR.end$Carbonyl*stdmat[[1]][5,5]+NMR.end$Char*stdmat[[1]][5,6], c("Aromatic"))
##Phenolic C calculation
Phenolic_m <- setNames(NMR.end$Carbohydrates*stdmat[[1]][6,2]+NMR.end$Protein*stdmat[[1]][6,1]+NMR.end$Lignin*stdmat[[1]][6,3]+NMR.end$Lipid*stdmat[[1]][6,4]+
NMR.end$Carbonyl*stdmat[[1]][6,5]+NMR.end$Char*stdmat[[1]][6,6], c("Phenolic"))
##Amide_Carboxylic C calculation
Amide_to_Ketone_m <- setNames(NMR.end$Carbohydrates*stdmat[[1]][7,2]+NMR.end$Protein*stdmat[[1]][7,1]+NMR.end$Lignin*stdmat[[1]][7,3]+NMR.end$Lipid*stdmat[[1]][7,4]+
NMR.end$Carbonyl*stdmat[[1]][7,5]+NMR.end$Char*stdmat[[1]][7,6], c("Amide_to_Ketone"))
sum_m <- setNames(Alkyl_m + N_Alkyl_Methoxyl_m + O_Alkyl_m + Di_O_Alkyl_m + Aromatic_m + Phenolic_m + Amide_to_Ketone_m, c("Sum"))
sum_c <- sum(sampleintegraljoin)
sampleintegraljoin <-rbind(sampleintegraljoin, sum_c)
sample_stats <- data.frame(Alkyl_m, N_Alkyl_Methoxyl_m, O_Alkyl_m, Di_O_Alkyl_m, Aromatic_m, Phenolic_m, Amide_to_Ketone_m, sum_m)
nmrrest <- NULL
for (i in 1:nrow(sample_stats)) {
nmrrestt <- c(sampleintegraljoin)
nmrrest <- rbind(nmrrest, nmrrestt)
}
rownames(nmrrest) <- NULL
colnames(nmrrest) <- c("Alkyl", "N_Alkyl_Methoxyl", "O-Alkyl", "Di_O_Alkyl", "Aromatic", "Phenolic", "Amide_to_Ketone", "Sum")
sample_stats <- cbind(sample_stats,nmrrest)
ssq_sample <- data.frame((sample_stats$Alkyl-sample_stats$Alkyl_m)^2, (sample_stats$N_Alkyl_Methoxyl-sample_stats$N_Alkyl_Methoxyl_m)^2,
(sample_stats$`O-Alkyl` -sample_stats$O_Alkyl_m)^2, (sample_stats$Di_O_Alkyl-sample_stats$Di_O_Alkyl_m)^2,
(sample_stats$Aromatic -sample_stats$Aromatic_m)^2, (sample_stats$Phenolic -sample_stats$Phenolic_m)^2,
(sample_stats$Amide_to_Ketone -sample_stats$Amide_to_Ketone_m)^2, (sample_stats$Sum -sample_stats$sum_m)^2)
colnames(ssq_sample) <- c("Alkyl_ssq", "N_Alkyl_Methoxyl_ssq", "O-Alkyl_ssq", "Di_O_Alkyl_ssq", "Aromatic_ssq", "Phenolic_ssq", "Amide_to_Ketone_ssq", "Sum_ssq")
sample_stats <- cbind(sample_stats,ssq_sample)
} else if (ecosys == "Terr_Baldock") {
stdmat <- std_nmr(ecosys = "Terr_Baldock")
NMR.end <- fit_LCF(all.samples = raw.spec.end, all.standards = stdmat, amoSTD = 6, best.fits = 30, NMRmeth = "MMMFixN")
## Elementar ratios (relative to C)
Cmol <- as.numeric(NMR.end$Carbohydrates*stdmat[[1]][9,2]+NMR.end$Protein*stdmat[[1]][9,1]+NMR.end$Lignin*stdmat[[1]][9,3]+
NMR.end$Lipid*stdmat[[1]][9,4]+NMR.end$Carbonyl*stdmat[[1]][9,5]+NMR.end$Char*stdmat[[1]][9,6])
Nmol <- as.numeric(NMR.end$Carbohydrates*stdmat[[1]][10,2]+NMR.end$Protein*stdmat[[1]][10,1]+NMR.end$Lignin*stdmat[[1]][10,3]+
NMR.end$Lipid*stdmat[[1]][10,4]+NMR.end$Carbonyl*stdmat[[1]][10,5]+NMR.end$Char*stdmat[[1]][10,6])
Hmol <- as.numeric(NMR.end$Carbohydrates*stdmat[[1]][11,2]+NMR.end$Protein*stdmat[[1]][11,1]+NMR.end$Lignin*stdmat[[1]][11,3]+
NMR.end$Lipid*stdmat[[1]][11,4]+NMR.end$Carbonyl*stdmat[[1]][11,5]+NMR.end$Char*stdmat[[1]][11,6])
Omol <- as.numeric(NMR.end$Carbohydrates*stdmat[[1]][12,2]+NMR.end$Protein*stdmat[[1]][12,1]+NMR.end$Lignin*stdmat[[1]][12,3]+
NMR.end$Lipid*stdmat[[1]][12,4]+NMR.end$Carbonyl*stdmat[[1]][12,5]+NMR.end$Char*stdmat[[1]][12,6])
Cwgt <- as.numeric(NMR.end$Carbohydrates*stdmat[[1]][9,2]*12.0107+NMR.end$Protein*stdmat[[1]][9,1]*12.0107+NMR.end$Lignin*stdmat[[1]][9,3]*12.0107+
NMR.end$Lipid*stdmat[[1]][9,4]*12.0107+NMR.end$Carbonyl*stdmat[[1]][9,5]*12.0107+NMR.end$Char*stdmat[[1]][9,6]*12.0107)
Nwgt <- as.numeric(NMR.end$Carbohydrates*stdmat[[1]][10,2]*14.0067+NMR.end$Protein*stdmat[[1]][10,1]*14.0067+NMR.end$Lignin*stdmat[[1]][10,3]*14.0067+
NMR.end$Lipid*stdmat[[1]][10,4]*14.0067+NMR.end$Carbonyl*stdmat[[1]][10,5]*14.0067+NMR.end$Char*stdmat[[1]][10,6]*14.0067)
Hwgt <- as.numeric(NMR.end$Carbohydrates*stdmat[[1]][11,2]*1.00794+NMR.end$Protein*stdmat[[1]][11,1]*1.00794+NMR.end$Lignin*stdmat[[1]][11,3]*1.00794+
NMR.end$Lipid*stdmat[[1]][11,4]*1.00794+NMR.end$Carbonyl*stdmat[[1]][11,5]*1.00794+NMR.end$Char*stdmat[[1]][11,6]*1.00794)
Owgt <- as.numeric(NMR.end$Carbohydrates*stdmat[[1]][12,2]*15.994+NMR.end$Protein*stdmat[[1]][12,1]*15.994+NMR.end$Lignin*stdmat[[1]][12,3]*15.994+
NMR.end$Lipid*stdmat[[1]][12,4]*15.994+NMR.end$Carbonyl*stdmat[[1]][12,5]*15.994+NMR.end$Char*stdmat[[1]][12,6]*15.994)
swgt <- c(Cwgt + Nwgt + Hwgt +Owgt)
NOSC <- as.numeric(4+((2*Omol+3*Nmol-1*Hmol-4*Cmol)/Cmol))
## Final result
NMR.end <- cbind(NMR.end, Cmol, Nmol, Hmol, Omol, Cwgt/swgt, Nwgt/swgt, Hwgt/swgt, Owgt/swgt, NOSC)
## Back calculated NMR results
##Alklyl C calculation
Alkyl_m <- setNames(NMR.end$Carbohydrates*stdmat[[1]][1,2]+NMR.end$Protein*stdmat[[1]][1,1]+NMR.end$Lignin*stdmat[[1]][1,3]+NMR.end$Lipid*stdmat[[1]][1,4]+
NMR.end$Carbonyl*stdmat[[1]][1,5]+NMR.end$Char*stdmat[[1]][1,6], c("Alkyl"))
##N_Alkyl_Methoxyl C calculation
N_Alkyl_Methoxyl_m <- setNames(NMR.end$Carbohydrates*stdmat[[1]][2,2]+NMR.end$Protein*stdmat[[1]][2,1]+NMR.end$Lignin*stdmat[[1]][2,3]+NMR.end$Lipid*stdmat[[1]][2,4]+
NMR.end$Carbonyl*stdmat[[1]][2,5]+NMR.end$Char*stdmat[[1]][2,6], c("N_Alkyl_Methoxyl"))
##O-Alkyl C calculation
O_Alkyl_m <- setNames(NMR.end$Carbohydrates*stdmat[[1]][3,2]+NMR.end$Protein*stdmat[[1]][3,1]+NMR.end$Lignin*stdmat[[1]][3,3]+NMR.end$Lipid*stdmat[[1]][3,4]+
NMR.end$Carbonyl*stdmat[[1]][3,5]+NMR.end$Char*stdmat[[1]][3,6], c("O-Alkyl"))
##Di_O_Alkyl C calculation
Di_O_Alkyl_m <- setNames(NMR.end$Carbohydrates*stdmat[[1]][4,2]+NMR.end$Protein*stdmat[[1]][4,1]+NMR.end$Lignin*stdmat[[1]][4,3]+NMR.end$Lipid*stdmat[[1]][4,4]+
NMR.end$Carbonyl*stdmat[[1]][4,5]+NMR.end$Char*stdmat[[1]][4,6], c("Di_O_Alkyl"))
##Aromatic C calculation
Aromatic_m <- setNames(NMR.end$Carbohydrates*stdmat[[1]][5,2]+NMR.end$Protein*stdmat[[1]][5,1]+NMR.end$Lignin*stdmat[[1]][5,3]+NMR.end$Lipid*stdmat[[1]][5,4]+
NMR.end$Carbonyl*stdmat[[1]][5,5]+NMR.end$Char*stdmat[[1]][5,6], c("Aromatic"))
##Phenolic C calculation
Phenolic_m <- setNames(NMR.end$Carbohydrates*stdmat[[1]][6,2]+NMR.end$Protein*stdmat[[1]][6,1]+NMR.end$Lignin*stdmat[[1]][6,3]+NMR.end$Lipid*stdmat[[1]][6,4]+
NMR.end$Carbonyl*stdmat[[1]][6,5]+NMR.end$Char*stdmat[[1]][6,6], c("Phenolic"))
##Amide_Carboxylic C calculation
Amide_to_Ketone_m <- setNames(NMR.end$Carbohydrates*stdmat[[1]][7,2]+NMR.end$Protein*stdmat[[1]][7,1]+NMR.end$Lignin*stdmat[[1]][7,3]+NMR.end$Lipid*stdmat[[1]][7,4]+
NMR.end$Carbonyl*stdmat[[1]][7,5]+NMR.end$Char*stdmat[[1]][7,6], c("Amide_to_Ketone"))
sum_m <- setNames(Alkyl_m + N_Alkyl_Methoxyl_m + O_Alkyl_m + Di_O_Alkyl_m + Aromatic_m + Phenolic_m + Amide_to_Ketone_m, c("Sum"))
sum_c <- sum(sampleintegraljoin)
sampleintegraljoin <-rbind(sampleintegraljoin, sum_c)
sample_stats <- data.frame(Alkyl_m, N_Alkyl_Methoxyl_m, O_Alkyl_m, Di_O_Alkyl_m, Aromatic_m, Phenolic_m, Amide_to_Ketone_m, sum_m)
nmrrest <- NULL
for (i in 1:nrow(sample_stats)) {
nmrrestt <- c(sampleintegraljoin)
nmrrest <- rbind(nmrrest, nmrrestt)
}
colnames(nmrrest) <- c("Alkyl", "N_Alkyl_Methoxyl", "O-Alkyl", "Di_O_Alkyl", "Aromatic", "Phenolic", "Amide_to_Ketone", "Sum")
sample_stats <- cbind(sample_stats,nmrrest)
ssq_sample <- data.frame((sample_stats$Alkyl-sample_stats$Alkyl_m)^2, (sample_stats$N_Alkyl_Methoxyl-sample_stats$N_Alkyl_Methoxyl_m)^2,
(sample_stats$`O-Alkyl` -sample_stats$O_Alkyl_m)^2, (sample_stats$Di_O_Alkyl-sample_stats$Di_O_Alkyl_m)^2,
(sample_stats$Aromatic -sample_stats$Aromatic_m)^2, (sample_stats$Phenolic -sample_stats$Phenolic_m)^2,
(sample_stats$Amide_to_Ketone -sample_stats$Amide_to_Ketone_m)^2, (sample_stats$Sum -sample_stats$sum_m)^2)
colnames(ssq_sample) <- c("Alkyl_ssq", "N_Alkyl_Methoxyl_ssq", "O-Alkyl_ssq", "Di_O_Alkyl_ssq", "Aromatic_ssq", "Phenolic_ssq", "Amide_to_Ketone_ssq", "Sum_ssq")
sample_stats <- cbind(sample_stats,ssq_sample)
} else if (ecosys == "Aqua_Nelson") {
stdmat <- std_nmr(ecosys = "Aqua_Nelson")
NMR.end <- fit_LCF(all.samples = raw.spec.end, all.standards = stdmat, amoSTD = 6, best.fits = 30, NMRmeth = "MMMFixN")
## Elementar ratios (relative to C)
Cmol <- as.numeric(NMR.end$Carbohydrates*stdmat[[1]][9,2]+NMR.end$Protein*stdmat[[1]][9,1]+NMR.end$Lignin*stdmat[[1]][9,3]+
NMR.end$Lipid*stdmat[[1]][9,4]+NMR.end$Carbonyl*stdmat[[1]][9,5]+NMR.end$Char*stdmat[[1]][9,6])
Nmol <- as.numeric(NMR.end$Carbohydrates*stdmat[[1]][10,2]+NMR.end$Protein*stdmat[[1]][10,1]+NMR.end$Lignin*stdmat[[1]][10,3]+
NMR.end$Lipid*stdmat[[1]][10,4]+NMR.end$Carbonyl*stdmat[[1]][10,5]+NMR.end$Char*stdmat[[1]][10,6])
Hmol <- as.numeric(NMR.end$Carbohydrates*stdmat[[1]][11,2]+NMR.end$Protein*stdmat[[1]][11,1]+NMR.end$Lignin*stdmat[[1]][11,3]+
NMR.end$Lipid*stdmat[[1]][11,4]+NMR.end$Carbonyl*stdmat[[1]][11,5]+NMR.end$Char*stdmat[[1]][11,6])
Omol <- as.numeric(NMR.end$Carbohydrates*stdmat[[1]][12,2]+NMR.end$Protein*stdmat[[1]][12,1]+NMR.end$Lignin*stdmat[[1]][12,3]+
NMR.end$Lipid*stdmat[[1]][12,4]+NMR.end$Carbonyl*stdmat[[1]][12,5]+NMR.end$Char*stdmat[[1]][12,6])
Cwgt <- as.numeric(NMR.end$Carbohydrates*stdmat[[1]][9,2]*12.0107+NMR.end$Protein*stdmat[[1]][9,1]*12.0107+NMR.end$Lignin*stdmat[[1]][9,3]*12.0107+
NMR.end$Lipid*stdmat[[1]][9,4]*12.0107+NMR.end$Carbonyl*stdmat[[1]][9,5]*12.0107+NMR.end$Char*stdmat[[1]][9,6]*12.0107)
Nwgt <- as.numeric(NMR.end$Carbohydrates*stdmat[[1]][10,2]*14.0067+NMR.end$Protein*stdmat[[1]][10,1]*14.0067+NMR.end$Lignin*stdmat[[1]][10,3]*14.0067+
NMR.end$Lipid*stdmat[[1]][10,4]*14.0067+NMR.end$Carbonyl*stdmat[[1]][10,5]*14.0067+NMR.end$Char*stdmat[[1]][10,6]*14.0067)
Hwgt <- as.numeric(NMR.end$Carbohydrates*stdmat[[1]][11,2]*1.00794+NMR.end$Protein*stdmat[[1]][11,1]*1.00794+NMR.end$Lignin*stdmat[[1]][11,3]*1.00794+
NMR.end$Lipid*stdmat[[1]][11,4]*1.00794+NMR.end$Carbonyl*stdmat[[1]][11,5]*1.00794+NMR.end$Char*stdmat[[1]][11,6]*1.00794)
Owgt <- as.numeric(NMR.end$Carbohydrates*stdmat[[1]][12,2]*15.994+NMR.end$Protein*stdmat[[1]][12,1]*15.994+NMR.end$Lignin*stdmat[[1]][12,3]*15.994+
NMR.end$Lipid*stdmat[[1]][12,4]*15.994+NMR.end$Carbonyl*stdmat[[1]][12,5]*15.994+NMR.end$Char*stdmat[[1]][12,6]*15.994)
swgt <- c(Cwgt + Nwgt + Hwgt +Owgt)
NOSC <- as.numeric(4+((2*Omol+3*Nmol-1*Hmol-4*Cmol)/Cmol))
## Final result
NMR.end <- cbind(NMR.end, Cmol, Nmol, Hmol, Omol, Cwgt/swgt, Nwgt/swgt, Hwgt/swgt, Owgt/swgt, NOSC)
## Back calculated NMR results
##Alklyl C calculation
Alkyl_m <- setNames(NMR.end$Carbohydrates*stdmat[[1]][1,2]+NMR.end$Protein*stdmat[[1]][1,1]+NMR.end$Lignin*stdmat[[1]][1,3]+NMR.end$Lipid*stdmat[[1]][1,4]+
NMR.end$Carbonyl*stdmat[[1]][1,5]+NMR.end$Char*stdmat[[1]][1,6], c("Alkyl"))
##N_Alkyl_Methoxyl C calculation
N_Alkyl_Methoxyl_m <- setNames(NMR.end$Carbohydrates*stdmat[[1]][2,2]+NMR.end$Protein*stdmat[[1]][2,1]+NMR.end$Lignin*stdmat[[1]][2,3]+NMR.end$Lipid*stdmat[[1]][2,4]+
NMR.end$Carbonyl*stdmat[[1]][2,5]+NMR.end$Char*stdmat[[1]][2,6], c("N_Alkyl_Methoxyl"))
##O-Alkyl C calculation
O_Alkyl_m <- setNames(NMR.end$Carbohydrates*stdmat[[1]][3,2]+NMR.end$Protein*stdmat[[1]][3,1]+NMR.end$Lignin*stdmat[[1]][3,3]+NMR.end$Lipid*stdmat[[1]][3,4]+
NMR.end$Carbonyl*stdmat[[1]][3,5]+NMR.end$Char*stdmat[[1]][3,6], c("O-Alkyl"))
##Di_O_Alkyl C calculation
Di_O_Alkyl_m <- setNames(NMR.end$Carbohydrates*stdmat[[1]][4,2]+NMR.end$Protein*stdmat[[1]][4,1]+NMR.end$Lignin*stdmat[[1]][4,3]+NMR.end$Lipid*stdmat[[1]][4,4]+
NMR.end$Carbonyl*stdmat[[1]][4,5]+NMR.end$Char*stdmat[[1]][4,6], c("Di_O_Alkyl"))
##Aromatic C calculation
Aromatic_m <- setNames(NMR.end$Carbohydrates*stdmat[[1]][5,2]+NMR.end$Protein*stdmat[[1]][5,1]+NMR.end$Lignin*stdmat[[1]][5,3]+NMR.end$Lipid*stdmat[[1]][5,4]+
NMR.end$Carbonyl*stdmat[[1]][5,5]+NMR.end$Char*stdmat[[1]][5,6], c("Aromatic"))
##Phenolic C calculation
Phenolic_m <- setNames(NMR.end$Carbohydrates*stdmat[[1]][6,2]+NMR.end$Protein*stdmat[[1]][6,1]+NMR.end$Lignin*stdmat[[1]][6,3]+NMR.end$Lipid*stdmat[[1]][6,4]+
NMR.end$Carbonyl*stdmat[[1]][6,5]+NMR.end$Char*stdmat[[1]][6,6], c("Phenolic"))
##Amide_Carboxylic C calculation
Amide_to_Ketone_m <- setNames(NMR.end$Carbohydrates*stdmat[[1]][7,2]+NMR.end$Protein*stdmat[[1]][7,1]+NMR.end$Lignin*stdmat[[1]][7,3]+NMR.end$Lipid*stdmat[[1]][7,4]+
NMR.end$Carbonyl*stdmat[[1]][7,5]+NMR.end$Char*stdmat[[1]][7,6], c("Amide_to_Ketone"))
sum_m <- setNames(Alkyl_m + N_Alkyl_Methoxyl_m + O_Alkyl_m + Di_O_Alkyl_m + Aromatic_m + Phenolic_m + Amide_to_Ketone_m, c("Sum"))
sum_c <- sum(sampleintegraljoin)
sampleintegraljoin <-rbind(sampleintegraljoin, sum_c)
sample_stats <- data.frame(Alkyl_m, N_Alkyl_Methoxyl_m, O_Alkyl_m, Di_O_Alkyl_m, Aromatic_m, Phenolic_m, Amide_to_Ketone_m, sum_m)
nmrrest <- NULL
for (i in 1:nrow(sample_stats)) {
nmrrestt <- c(sampleintegraljoin)
nmrrest <- rbind(nmrrest, nmrrestt)
}
colnames(nmrrest) <- c("Alkyl", "N_Alkyl_Methoxyl", "O-Alkyl", "Di_O_Alkyl", "Aromatic", "Phenolic", "Amide_to_Ketone", "Sum")
sample_stats <- cbind(sample_stats,nmrrest)
ssq_sample <- data.frame((sample_stats$Alkyl-sample_stats$Alkyl_m)^2, (sample_stats$N_Alkyl_Methoxyl-sample_stats$N_Alkyl_Methoxyl_m)^2,
(sample_stats$`O-Alkyl` -sample_stats$O_Alkyl_m)^2, (sample_stats$Di_O_Alkyl-sample_stats$Di_O_Alkyl_m)^2,
(sample_stats$Aromatic -sample_stats$Aromatic_m)^2, (sample_stats$Phenolic -sample_stats$Phenolic_m)^2,
(sample_stats$Amide_to_Ketone -sample_stats$Amide_to_Ketone_m)^2, (sample_stats$Sum -sample_stats$sum_m)^2)
colnames(ssq_sample) <- c("Alkyl_ssq", "N_Alkyl_Methoxyl_ssq", "O-Alkyl_ssq", "Di_O_Alkyl_ssq", "Aromatic_ssq", "Phenolic_ssq", "Amide_to_Ketone_ssq", "Sum_ssq")
sample_stats <- cbind(sample_stats,ssq_sample)
} else if (ecosys == "Aqua_Baldock") {
stdmat <- std_nmr(ecosys = "Aqua_Baldock")
NMR.end <- fit_LCF(all.samples = raw.spec.end, all.standards = stdmat, amoSTD = 6, best.fits = 30, NMRmeth = "MMMFixN")
## Elementar ratios (relative to C)
Cmol <- as.numeric(NMR.end$Carbohydrates*stdmat[[1]][9,2]+NMR.end$Protein*stdmat[[1]][9,1]+NMR.end$Lignin*stdmat[[1]][9,3]+
NMR.end$Lipid*stdmat[[1]][9,4]+NMR.end$Carbonyl*stdmat[[1]][9,5]+NMR.end$Char*stdmat[[1]][9,6])
Nmol <- as.numeric(NMR.end$Carbohydrates*stdmat[[1]][10,2]+NMR.end$Protein*stdmat[[1]][10,1]+NMR.end$Lignin*stdmat[[1]][10,3]+
NMR.end$Lipid*stdmat[[1]][10,4]+NMR.end$Carbonyl*stdmat[[1]][10,5]+NMR.end$Char*stdmat[[1]][10,6])
Hmol <- as.numeric(NMR.end$Carbohydrates*stdmat[[1]][11,2]+NMR.end$Protein*stdmat[[1]][11,1]+NMR.end$Lignin*stdmat[[1]][11,3]+
NMR.end$Lipid*stdmat[[1]][11,4]+NMR.end$Carbonyl*stdmat[[1]][11,5]+NMR.end$Char*stdmat[[1]][11,6])
Omol <- as.numeric(NMR.end$Carbohydrates*stdmat[[1]][12,2]+NMR.end$Protein*stdmat[[1]][12,1]+NMR.end$Lignin*stdmat[[1]][12,3]+
NMR.end$Lipid*stdmat[[1]][12,4]+NMR.end$Carbonyl*stdmat[[1]][12,5]+NMR.end$Char*stdmat[[1]][12,6])
Cwgt <- as.numeric(NMR.end$Carbohydrates*stdmat[[1]][9,2]*12.0107+NMR.end$Protein*stdmat[[1]][9,1]*12.0107+NMR.end$Lignin*stdmat[[1]][9,3]*12.0107+
NMR.end$Lipid*stdmat[[1]][9,4]*12.0107+NMR.end$Carbonyl*stdmat[[1]][9,5]*12.0107+NMR.end$Char*stdmat[[1]][9,6]*12.0107)
Nwgt <- as.numeric(NMR.end$Carbohydrates*stdmat[[1]][10,2]*14.0067+NMR.end$Protein*stdmat[[1]][10,1]*14.0067+NMR.end$Lignin*stdmat[[1]][10,3]*14.0067+
NMR.end$Lipid*stdmat[[1]][10,4]*14.0067+NMR.end$Carbonyl*stdmat[[1]][10,5]*14.0067+NMR.end$Char*stdmat[[1]][10,6]*14.0067)
Hwgt <- as.numeric(NMR.end$Carbohydrates*stdmat[[1]][11,2]*1.00794+NMR.end$Protein*stdmat[[1]][11,1]*1.00794+NMR.end$Lignin*stdmat[[1]][11,3]*1.00794+
NMR.end$Lipid*stdmat[[1]][11,4]*1.00794+NMR.end$Carbonyl*stdmat[[1]][11,5]*1.00794+NMR.end$Char*stdmat[[1]][11,6]*1.00794)
Owgt <- as.numeric(NMR.end$Carbohydrates*stdmat[[1]][12,2]*15.994+NMR.end$Protein*stdmat[[1]][12,1]*15.994+NMR.end$Lignin*stdmat[[1]][12,3]*15.994+
NMR.end$Lipid*stdmat[[1]][12,4]*15.994+NMR.end$Carbonyl*stdmat[[1]][12,5]*15.994+NMR.end$Char*stdmat[[1]][12,6]*15.994)
swgt <- c(Cwgt + Nwgt + Hwgt +Owgt)
NOSC <- as.numeric(4+((2*Omol+3*Nmol-1*Hmol-4*Cmol)/Cmol))
## Final result
NMR.end <- cbind(NMR.end, Cmol, Nmol, Hmol, Omol, Cwgt/swgt, Nwgt/swgt, Hwgt/swgt, Owgt/swgt, NOSC)
## Back calculated NMR results
##Alklyl C calculation
Alkyl_m <- setNames(NMR.end$Carbohydrates*stdmat[[1]][1,2]+NMR.end$Protein*stdmat[[1]][1,1]+NMR.end$Lignin*stdmat[[1]][1,3]+NMR.end$Lipid*stdmat[[1]][1,4]+
NMR.end$Carbonyl*stdmat[[1]][1,5]+NMR.end$Char*stdmat[[1]][1,6], c("Alkyl"))
##N_Alkyl_Methoxyl C calculation
N_Alkyl_Methoxyl_m <- setNames(NMR.end$Carbohydrates*stdmat[[1]][2,2]+NMR.end$Protein*stdmat[[1]][2,1]+NMR.end$Lignin*stdmat[[1]][2,3]+NMR.end$Lipid*stdmat[[1]][2,4]+
NMR.end$Carbonyl*stdmat[[1]][2,5]+NMR.end$Char*stdmat[[1]][2,6], c("N_Alkyl_Methoxyl"))
##O-Alkyl C calculation
O_Alkyl_m <- setNames(NMR.end$Carbohydrates*stdmat[[1]][3,2]+NMR.end$Protein*stdmat[[1]][3,1]+NMR.end$Lignin*stdmat[[1]][3,3]+NMR.end$Lipid*stdmat[[1]][3,4]+
NMR.end$Carbonyl*stdmat[[1]][3,5]+NMR.end$Char*stdmat[[1]][3,6], c("O-Alkyl"))
##Di_O_Alkyl C calculation
Di_O_Alkyl_m <- setNames(NMR.end$Carbohydrates*stdmat[[1]][4,2]+NMR.end$Protein*stdmat[[1]][4,1]+NMR.end$Lignin*stdmat[[1]][4,3]+NMR.end$Lipid*stdmat[[1]][4,4]+
NMR.end$Carbonyl*stdmat[[1]][4,5]+NMR.end$Char*stdmat[[1]][4,6], c("Di_O_Alkyl"))
##Aromatic C calculation
Aromatic_m <- setNames(NMR.end$Carbohydrates*stdmat[[1]][5,2]+NMR.end$Protein*stdmat[[1]][5,1]+NMR.end$Lignin*stdmat[[1]][5,3]+NMR.end$Lipid*stdmat[[1]][5,4]+
NMR.end$Carbonyl*stdmat[[1]][5,5]+NMR.end$Char*stdmat[[1]][5,6], c("Aromatic"))
##Phenolic C calculation
Phenolic_m <- setNames(NMR.end$Carbohydrates*stdmat[[1]][6,2]+NMR.end$Protein*stdmat[[1]][6,1]+NMR.end$Lignin*stdmat[[1]][6,3]+NMR.end$Lipid*stdmat[[1]][6,4]+
NMR.end$Carbonyl*stdmat[[1]][6,5]+NMR.end$Char*stdmat[[1]][6,6], c("Phenolic"))
##Amide_Carboxylic C calculation
Amide_to_Ketone_m <- setNames(NMR.end$Carbohydrates*stdmat[[1]][7,2]+NMR.end$Protein*stdmat[[1]][7,1]+NMR.end$Lignin*stdmat[[1]][7,3]+NMR.end$Lipid*stdmat[[1]][7,4]+
NMR.end$Carbonyl*stdmat[[1]][7,5]+NMR.end$Char*stdmat[[1]][7,6], c("Amide_to_Ketone"))
sum_m <- setNames(Alkyl_m + N_Alkyl_Methoxyl_m + O_Alkyl_m + Di_O_Alkyl_m + Aromatic_m + Phenolic_m + Amide_to_Ketone_m, c("Sum"))
sum_c <- sum(sampleintegraljoin)
sampleintegraljoin <-rbind(sampleintegraljoin, sum_c)
sample_stats <- data.frame(Alkyl_m, N_Alkyl_Methoxyl_m, O_Alkyl_m, Di_O_Alkyl_m, Aromatic_m, Phenolic_m, Amide_to_Ketone_m, sum_m)
nmrrest <- NULL
for (i in 1:nrow(sample_stats)) {
nmrrestt <- c(sampleintegraljoin)
nmrrest <- rbind(nmrrest, nmrrestt)
}
colnames(nmrrest) <- c("Alkyl", "N_Alkyl_Methoxyl", "O-Alkyl", "Di_O_Alkyl", "Aromatic", "Phenolic", "Amide_to_Ketone", "Sum")
sample_stats <- cbind(sample_stats,nmrrest)
ssq_sample <- data.frame((sample_stats$Alkyl-sample_stats$Alkyl_m)^2, (sample_stats$N_Alkyl_Methoxyl-sample_stats$N_Alkyl_Methoxyl_m)^2,
(sample_stats$`O-Alkyl` -sample_stats$O_Alkyl_m)^2, (sample_stats$Di_O_Alkyl-sample_stats$Di_O_Alkyl_m)^2,
(sample_stats$Aromatic -sample_stats$Aromatic_m)^2, (sample_stats$Phenolic -sample_stats$Phenolic_m)^2,
(sample_stats$Amide_to_Ketone -sample_stats$Amide_to_Ketone_m)^2, (sample_stats$Sum -sample_stats$sum_m)^2)
colnames(ssq_sample) <- c("Alkyl_ssq", "N_Alkyl_Methoxyl_ssq", "O-Alkyl_ssq", "Di_O_Alkyl_ssq", "Aromatic_ssq", "Phenolic_ssq", "Amide_to_Ketone_ssq", "Sum_ssq")
sample_stats <- cbind(sample_stats,ssq_sample)
} else if (ecosys == "mod") {
NMR.end <- fit_LCF(all.samples = raw.spec.end, all.standards = mod_std, amoSTD = 6, best.fits = 30, NMRmeth = "MMMFixN")
## Elementar ratios (relative to C)
Cmol <- as.numeric(NMR.end$Carbohydrates*mod_std[[1]][9,2]+NMR.end$Protein*mod_std[[1]][9,1]+NMR.end$Lignin*mod_std[[1]][9,3]+
NMR.end$Lipid*mod_std[[1]][9,4]+NMR.end$Carbonyl*mod_std[[1]][9,5]+NMR.end$Char*mod_std[[1]][9,6])
Nmol <- as.numeric(NMR.end$Carbohydrates*mod_std[[1]][10,2]+NMR.end$Protein*mod_std[[1]][10,1]+NMR.end$Lignin*mod_std[[1]][10,3]+
NMR.end$Lipid*mod_std[[1]][10,4]+NMR.end$Carbonyl*mod_std[[1]][10,5]+NMR.end$Char*mod_std[[1]][10,6])
Hmol <- as.numeric(NMR.end$Carbohydrates*mod_std[[1]][11,2]+NMR.end$Protein*mod_std[[1]][11,1]+NMR.end$Lignin*mod_std[[1]][11,3]+
NMR.end$Lipid*mod_std[[1]][11,4]+NMR.end$Carbonyl*mod_std[[1]][11,5]+NMR.end$Char*mod_std[[1]][11,6])
Omol <- as.numeric(NMR.end$Carbohydrates*mod_std[[1]][12,2]+NMR.end$Protein*mod_std[[1]][12,1]+NMR.end$Lignin*mod_std[[1]][12,3]+
NMR.end$Lipid*mod_std[[1]][12,4]+NMR.end$Carbonyl*mod_std[[1]][12,5]+NMR.end$Char*mod_std[[1]][12,6])
Cwgt <- as.numeric(NMR.end$Carbohydrates*mod_std[[1]][9,2]*12.0107+NMR.end$Protein*mod_std[[1]][9,1]*12.0107+NMR.end$Lignin*mod_std[[1]][9,3]*12.0107+
NMR.end$Lipid*mod_std[[1]][9,4]*12.0107+NMR.end$Carbonyl*mod_std[[1]][9,5]*12.0107+NMR.end$Char*mod_std[[1]][9,6]*12.0107)
Nwgt <- as.numeric(NMR.end$Carbohydrates*mod_std[[1]][10,2]*14.0067+NMR.end$Protein*mod_std[[1]][10,1]*14.0067+NMR.end$Lignin*mod_std[[1]][10,3]*14.0067+
NMR.end$Lipid*mod_std[[1]][10,4]*14.0067+NMR.end$Carbonyl*mod_std[[1]][10,5]*14.0067+NMR.end$Char*mod_std[[1]][10,6]*14.0067)
Hwgt <- as.numeric(NMR.end$Carbohydrates*mod_std[[1]][11,2]*1.00794+NMR.end$Protein*mod_std[[1]][11,1]*1.00794+NMR.end$Lignin*mod_std[[1]][11,3]*1.00794+
NMR.end$Lipid*mod_std[[1]][11,4]*1.00794+NMR.end$Carbonyl*mod_std[[1]][11,5]*1.00794+NMR.end$Char*mod_std[[1]][11,6]*1.00794)
Owgt <- as.numeric(NMR.end$Carbohydrates*mod_std[[1]][12,2]*15.994+NMR.end$Protein*mod_std[[1]][12,1]*15.994+NMR.end$Lignin*mod_std[[1]][12,3]*15.994+
NMR.end$Lipid*mod_std[[1]][12,4]*15.994+NMR.end$Carbonyl*mod_std[[1]][12,5]*15.994+NMR.end$Char*mod_std[[1]][12,6]*15.994)
swgt <- c(Cwgt + Nwgt + Hwgt +Owgt)
NOSC <- as.numeric(4+((2*Omol+3*Nmol-1*Hmol-4*Cmol)/Cmol))
## Final result
NMR.end <- cbind(NMR.end, Cmol, Nmol, Hmol, Omol, Cwgt/swgt, Nwgt/swgt, Hwgt/swgt, Owgt/swgt, NOSC)
## Back calculated NMR results
##Alklyl C calculation
Alkyl_m <- setNames(NMR.end$Carbohydrates*stdmat[[1]][1,2]+NMR.end$Protein*stdmat[[1]][1,1]+NMR.end$Lignin*stdmat[[1]][1,3]+NMR.end$Lipid*stdmat[[1]][1,4]+
NMR.end$Carbonyl*stdmat[[1]][1,5]+NMR.end$Char*stdmat[[1]][1,6], c("Alkyl"))
##N_Alkyl_Methoxyl C calculation
N_Alkyl_Methoxyl_m <- setNames(NMR.end$Carbohydrates*stdmat[[1]][2,2]+NMR.end$Protein*stdmat[[1]][2,1]+NMR.end$Lignin*stdmat[[1]][2,3]+NMR.end$Lipid*stdmat[[1]][2,4]+
NMR.end$Carbonyl*stdmat[[1]][2,5]+NMR.end$Char*stdmat[[1]][2,6], c("N_Alkyl_Methoxyl"))
##O-Alkyl C calculation
O_Alkyl_m <- setNames(NMR.end$Carbohydrates*stdmat[[1]][3,2]+NMR.end$Protein*stdmat[[1]][3,1]+NMR.end$Lignin*stdmat[[1]][3,3]+NMR.end$Lipid*stdmat[[1]][3,4]+
NMR.end$Carbonyl*stdmat[[1]][3,5]+NMR.end$Char*stdmat[[1]][3,6], c("O-Alkyl"))
##Di_O_Alkyl C calculation
Di_O_Alkyl_m <- setNames(NMR.end$Carbohydrates*stdmat[[1]][4,2]+NMR.end$Protein*stdmat[[1]][4,1]+NMR.end$Lignin*stdmat[[1]][4,3]+NMR.end$Lipid*stdmat[[1]][4,4]+
NMR.end$Carbonyl*stdmat[[1]][4,5]+NMR.end$Char*stdmat[[1]][4,6], c("Di_O_Alkyl"))
##Aromatic C calculation
Aromatic_m <- setNames(NMR.end$Carbohydrates*stdmat[[1]][5,2]+NMR.end$Protein*stdmat[[1]][5,1]+NMR.end$Lignin*stdmat[[1]][5,3]+NMR.end$Lipid*stdmat[[1]][5,4]+
NMR.end$Carbonyl*stdmat[[1]][5,5]+NMR.end$Char*stdmat[[1]][5,6], c("Aromatic"))
##Phenolic C calculation
Phenolic_m <- setNames(NMR.end$Carbohydrates*stdmat[[1]][6,2]+NMR.end$Protein*stdmat[[1]][6,1]+NMR.end$Lignin*stdmat[[1]][6,3]+NMR.end$Lipid*stdmat[[1]][6,4]+
NMR.end$Carbonyl*stdmat[[1]][6,5]+NMR.end$Char*stdmat[[1]][6,6], c("Phenolic"))
##Amide_Carboxylic C calculation
Amide_to_Ketone_m <- setNames(NMR.end$Carbohydrates*stdmat[[1]][7,2]+NMR.end$Protein*stdmat[[1]][7,1]+NMR.end$Lignin*stdmat[[1]][7,3]+NMR.end$Lipid*stdmat[[1]][7,4]+
NMR.end$Carbonyl*stdmat[[1]][7,5]+NMR.end$Char*stdmat[[1]][7,6], c("Amide_to_Ketone"))
sum_m <- setNames(Alkyl_m + N_Alkyl_Methoxyl_m + O_Alkyl_m + Di_O_Alkyl_m + Aromatic_m + Phenolic_m + Amide_to_Ketone_m, c("Sum"))
sum_c <- sum(sampleintegraljoin)
sampleintegraljoin <-rbind(sampleintegraljoin, sum_c)
sample_stats <- data.frame(Alkyl_m, N_Alkyl_Methoxyl_m, O_Alkyl_m, Di_O_Alkyl_m, Aromatic_m, Phenolic_m, Amide_to_Ketone_m, sum_m)
nmrrest <- NULL
for (i in 1:nrow(sample_stats)) {
nmrrestt <- c(sampleintegraljoin)
nmrrest <- rbind(nmrrest, nmrrestt)
}
colnames(nmrrest) <- c("Alkyl", "N_Alkyl_Methoxyl", "O-Alkyl", "Di_O_Alkyl", "Aromatic", "Phenolic", "Amide_to_Ketone", "Sum")
rownames(nmrrest) <- NULL
sample_stats <- cbind(sample_stats,nmrrest)
ssq_sample <- data.frame((sample_stats$Alkyl-sample_stats$Alkyl_m)^2, (sample_stats$N_Alkyl_Methoxyl-sample_stats$N_Alkyl_Methoxyl_m)^2,
(sample_stats$`O-Alkyl` -sample_stats$O_Alkyl_m)^2, (sample_stats$Di_O_Alkyl-sample_stats$Di_O_Alkyl_m)^2,
(sample_stats$Aromatic -sample_stats$Aromatic_m)^2, (sample_stats$Phenolic -sample_stats$Phenolic_m)^2,
(sample_stats$Amide_to_Ketone -sample_stats$Amide_to_Ketone_m)^2, (sample_stats$Sum -sample_stats$sum_m)^2)
colnames(ssq_sample) <- c("Alkyl_ssq", "N_Alkyl_Methoxyl_ssq", "O-Alkyl_ssq", "Di_O_Alkyl_ssq", "Aromatic_ssq", "Phenolic_ssq", "Amide_to_Ketone_ssq", "Sum_ssq")
sample_stats <- cbind(sample_stats,ssq_sample)
}
}
}
#citation <- setNames(data.frame(matrix(ncol = 1, nrow= nrow(NMR.end))), c("Plz cite this work as an incentive to its curation"))
#NMR.end <- cbind(NMR.end,citation)
if (stats == FALSE) {
return(NMR.end)
} else if (stats == TRUE) {
sample_stats <- sample_stats[order(sample_stats$Sum_ssq, decreasing = FALSE),]
rownames(sample_stats) <- NULL
return(sample_stats)
}
}
|
/R/region_calc.R
|
permissive
|
LuisCol8/SOMnmR
|
R
| false | false | 78,475 |
r
|
#' Functional groups calculation
#'
#' This function loads, integates and calculates the functional group distribution from the raw spectra.
#' Output is a list with the raw data, integrals and corrected spectra.
#' @param raw.spec List of files already loaded with read_raw_spec()
#' @param batch_nmr Vector with file names, default
#' @param NMRmeth Regions to be integrated.
#' Default is spinning side bands, other methods available include: Bonanomi ("Bonanomi") and Molecular mixing model ("MMM").
#' @param ecosys Standards to be used for the MMM, can be Terrestrial("Terr_Nelson" or "Terr_Baldock") or Aquatic ("Aqua_Nelson" or "Aqua_Baldock")
#' @param ncdata Initial correction and normalization parameters
#' @keywords
#' @export
#' @examples
region_calc <- function (batch_nmr = NULL, file = NULL, NMRmeth = NULL, ecosys=NULL,
cndata = NULL, mod_std = NULL, stats = FALSE) {
if (is.null(batch_nmr)) {
if (is.null(file)) {
stop("Please provide either a raw spectrum from the read_raw_spec function or a raw NMR file")
} else {
## read the raw spectra from a file list
batch.nmr <- read_raw_spec(file = file)
}
} else {
batch.nmr <- batch_nmr
}
if (is.null(NMRmeth)) {
## loop to process all samples
NMR.end <- NULL
batch.nmr <- int_nmr(batch.nmr)
for (i in 1:length(batch.nmr)) {
file.name <- batch.nmr[[i]]$name
Integral <- c(batch.nmr[[i]]$data$Integral)
##carboxyl C calculation
carboxyl <- setNames(data.frame(sum(2*sum(Integral$normalized.Int[30:33]), sum(Integral$normalized.Int[21:24]), -sum(Integral$normalized.Int[3:6]))), c("Carboxyl"))
##Aryl C calculation
aryl <- setNames(data.frame(sum(2*sum(Integral$normalized.Int[27:29]), sum(Integral$normalized.Int[18:20]), -sum(Integral$normalized.Int[1:2]))), c("Aryl"))
##O-Alkyl C calculation
oalkyl <- setNames(data.frame(sum(2*sum(Integral$normalized.Int[4:8]), sum(Integral$normalized.Int[13:17]), -sum(Integral$normalized.Int[31:33]))), c("O-Alkyl"))
##Alkyl C calculation
alkyl <- setNames(data.frame(sum(2*sum(Integral$normalized.Int[1:3]), sum(Integral$normalized.Int[10:12]), -sum(Integral$normalized.Int[28:30]))), c("Alkyl"))
##Put all together
#NMR.end[[i]] <- list(file.name = file.name, data = data.frame(carboxyl, aryl, oalkyl, alkyl))
integral.end <- data.frame(carboxyl, aryl, oalkyl, alkyl)
norm <- sum(integral.end)
normalized.Int <- (integral.end/norm)*100
integral.end <- data.frame(normalized.Int)
#integral.end <- data.frame(file.name,integral.end)
NMR.end[[i]] <- data.frame(file.name, integral.end)
}
} else if (NMRmeth == "Bonanomi") {
## loop to process all samples
NMR.end <- NULL
for (i in 1:length(batch.nmr)) {
file.name <- batch.nmr[[i]]$name
sample <- list(batch.nmr[[i]])
Integral <- int_nmr (raw.spec = sample, NMRmeth = NMRmeth)
##carboxyl C calculation
carboxyl <- setNames(data.frame(sum(sum(Integral[30:33,2]), sum(Integral[21:24,2]), -2*sum(Integral[3:6,2]))), c("Carboxyl"))
##Aryl C calculation
aryl <- setNames(data.frame(sum(sum(Integral[27:29,2]), sum(Integral[18:20,2]), -2*sum(Integral[1:2,2]))), c("Aryl"))
##O-Alkyl C calculation
oalkyl <- setNames(data.frame(sum(sum(Integral[4:8,2]), sum(Integral[13:17,2]), -2*sum(Integral[31:33,2]))), c("O-Alkyl"))
##Alkyl C calculation
alkyl <- setNames(data.frame(sum(sum(Integral[1:3,2]), sum(Integral[10:12,2]), -2*sum(Integral[28:30,2]))), c("Alkyl"))
##Put all together
NMR.end[[i]] <- data.frame(file.name = file.name, data = data.frame(carboxyl, aryl, oalkyl, alkyl))
}
} else if (NMRmeth == "MMM") {
## loop to process all samples
NMR.end <- NULL
raw.spec.end <- NULL
batch.nmr <- int_nmr(batch.nmr, NMRmeth = "MMM-SSB")
nmrmerge <- NULL
for (i in 1:length(batch.nmr)) {
raw.spec.end[[i]] <- batch.nmr[[i]]
NCval <- as.numeric(cndata[[i]]$NC)
samplename <- batch.nmr[[i]]$name
sampleraw.spec <- batch.nmr[[i]]$data$raw.spec
sampleintegral <- as.data.frame(batch.nmr[[i]]$data$Integral)
##Alklyl C calculation
Alkyl <- setNames(data.frame(sum(2*sum(sampleintegral[1:3,1]), sum(sampleintegral[10:12,1]), -sum(sampleintegral[28:30,1]))), c("Alkyl"))
##N_Alkyl_Methoxyl C calculation
N_Alkyl_Methoxyl <- setNames(data.frame(sum(2*sum(sampleintegral[4:5,1]), sum(sampleintegral[13:14,1]), -sum(sampleintegral[31:32,1]))), c("N_Alkyl_Methoxyl"))
##O-Alkyl C calculation
O_Alkyl <- setNames(data.frame(sum(2*sum(sampleintegral[6:7,1]), sum(sampleintegral[15:16,1]), -sum(sampleintegral[33:33,1]))), c("O-Alkyl"))
##Di_O_Alkyl C calculation
Di_O_Alkyl <- setNames(data.frame(sum(2*sum(sampleintegral[8:8,1]), sum(sampleintegral[17:17,1]))), c("Di_O_Alkyl"))
##Aromatic C calculation
Aromatic <- setNames(data.frame(sum(2*sum(sampleintegral[27:28,1]), sum(sampleintegral[18:19,1]), -sum(sampleintegral[1:1,1]))), c("Aromatic"))
##Phenolic C calculation
Phenolic <- setNames(data.frame(sum(2*sum(sampleintegral[29:29,1]), sum(sampleintegral[20:20,1]), -sum(sampleintegral[2:2,1]))), c("Phenolic"))
##Amide_Carboxylic C calculation
Amide_Carboxylic <- setNames(data.frame(sum(2*sum(sampleintegral[30:32,1]), sum(sampleintegral[21:23,1]), -sum(sampleintegral[3:5,1]))), c("Amide_Carboxylic"))
##Ketone C calculation
Ketone <- setNames(data.frame(sum(2*sum(sampleintegral[33:33,1]), sum(sampleintegral[24:24,1]), -sum(sampleintegral[6:6,1]))), c("Ketone"))
##Put all together
Amide_to_Ketone <- c(Amide_Carboxylic + Ketone)
sampleintegraljoin <- data.frame(Alkyl, N_Alkyl_Methoxyl, O_Alkyl, Di_O_Alkyl, Aromatic, Phenolic, Amide_to_Ketone)
sampleintegraljoin <- t(sampleintegraljoin)
norm <- sum(sampleintegraljoin)
sampleintegraljoin <- (sampleintegraljoin/norm)*100
sampleintegraljoin <- data.frame(sampleintegraljoin)
sampleintegralend <- rbind(NCval,sampleintegraljoin)
raw.spec.end[[i]] <- list("name" = samplename, "data" = list("raw.spec" = sampleraw.spec,"Integral" = sampleintegralend))
if (ecosys == "Terr_Nelson") {
stdmat <- std_nmr(ecosys = "Terr_Nelson")
NMR.end <- fit_LCF(all.samples = raw.spec.end, all.standards = stdmat, amoSTD = 6, best.fits = 30, NMRmeth = "MMM")
## Elementar ratios (relative to C)
Cmol <- as.numeric(NMR.end$Carbohydrates*stdmat[[1]][9,2]+NMR.end$Protein*stdmat[[1]][9,1]+NMR.end$Lignin*stdmat[[1]][9,3]+
NMR.end$Lipid*stdmat[[1]][9,4]+NMR.end$Carbonyl*stdmat[[1]][9,5]+NMR.end$Char*stdmat[[1]][9,6])
Nmol <- as.numeric(NMR.end$Carbohydrates*stdmat[[1]][10,2]+NMR.end$Protein*stdmat[[1]][10,1]+NMR.end$Lignin*stdmat[[1]][10,3]+
NMR.end$Lipid*stdmat[[1]][10,4]+NMR.end$Carbonyl*stdmat[[1]][10,5]+NMR.end$Char*stdmat[[1]][10,6])
Hmol <- as.numeric(NMR.end$Carbohydrates*stdmat[[1]][11,2]+NMR.end$Protein*stdmat[[1]][11,1]+NMR.end$Lignin*stdmat[[1]][11,3]+
NMR.end$Lipid*stdmat[[1]][11,4]+NMR.end$Carbonyl*stdmat[[1]][11,5]+NMR.end$Char*stdmat[[1]][11,6])
Omol <- as.numeric(NMR.end$Carbohydrates*stdmat[[1]][12,2]+NMR.end$Protein*stdmat[[1]][12,1]+NMR.end$Lignin*stdmat[[1]][12,3]+
NMR.end$Lipid*stdmat[[1]][12,4]+NMR.end$Carbonyl*stdmat[[1]][12,5]+NMR.end$Char*stdmat[[1]][12,6])
Cwgt <- as.numeric(NMR.end$Carbohydrates*stdmat[[1]][9,2]*12.0107+NMR.end$Protein*stdmat[[1]][9,1]*12.0107+NMR.end$Lignin*stdmat[[1]][9,3]*12.0107+
NMR.end$Lipid*stdmat[[1]][9,4]*12.0107+NMR.end$Carbonyl*stdmat[[1]][9,5]*12.0107+NMR.end$Char*stdmat[[1]][9,6]*12.0107)
Nwgt <- as.numeric(NMR.end$Carbohydrates*stdmat[[1]][10,2]*14.0067+NMR.end$Protein*stdmat[[1]][10,1]*14.0067+NMR.end$Lignin*stdmat[[1]][10,3]*14.0067+
NMR.end$Lipid*stdmat[[1]][10,4]*14.0067+NMR.end$Carbonyl*stdmat[[1]][10,5]*14.0067+NMR.end$Char*stdmat[[1]][10,6]*14.0067)
Hwgt <- as.numeric(NMR.end$Carbohydrates*stdmat[[1]][11,2]*1.00794+NMR.end$Protein*stdmat[[1]][11,1]*1.00794+NMR.end$Lignin*stdmat[[1]][11,3]*1.00794+
NMR.end$Lipid*stdmat[[1]][11,4]*1.00794+NMR.end$Carbonyl*stdmat[[1]][11,5]*1.00794+NMR.end$Char*stdmat[[1]][11,6]*1.00794)
Owgt <- as.numeric(NMR.end$Carbohydrates*stdmat[[1]][12,2]*15.994+NMR.end$Protein*stdmat[[1]][12,1]*15.994+NMR.end$Lignin*stdmat[[1]][12,3]*15.994+
NMR.end$Lipid*stdmat[[1]][12,4]*15.994+NMR.end$Carbonyl*stdmat[[1]][12,5]*15.994+NMR.end$Char*stdmat[[1]][12,6]*15.994)
swgt <- c(Cwgt + Nwgt + Hwgt +Owgt)
NOSC <- as.numeric(4+((2*Omol+3*Nmol-1*Hmol-4*Cmol)/Cmol))
## Final result
NMR.end <- cbind(NMR.end, Cmol, Nmol, Hmol, Omol, Cwgt/swgt, Nwgt/swgt, Hwgt/swgt, Owgt/swgt, NOSC)
## Back calculated NMR results
##Alklyl C calculation
Alkyl_m <- setNames(NMR.end$Carbohydrates*stdmat[[1]][1,2]+NMR.end$Protein*stdmat[[1]][1,1]+NMR.end$Lignin*stdmat[[1]][1,3]+NMR.end$Lipid*stdmat[[1]][1,4]+
NMR.end$Carbonyl*stdmat[[1]][1,5]+NMR.end$Char*stdmat[[1]][1,6], c("Alkyl"))
##N_Alkyl_Methoxyl C calculation
N_Alkyl_Methoxyl_m <- setNames(NMR.end$Carbohydrates*stdmat[[1]][2,2]+NMR.end$Protein*stdmat[[1]][2,1]+NMR.end$Lignin*stdmat[[1]][2,3]+NMR.end$Lipid*stdmat[[1]][2,4]+
NMR.end$Carbonyl*stdmat[[1]][2,5]+NMR.end$Char*stdmat[[1]][2,6], c("N_Alkyl_Methoxyl"))
##O-Alkyl C calculation
O_Alkyl_m <- setNames(NMR.end$Carbohydrates*stdmat[[1]][3,2]+NMR.end$Protein*stdmat[[1]][3,1]+NMR.end$Lignin*stdmat[[1]][3,3]+NMR.end$Lipid*stdmat[[1]][3,4]+
NMR.end$Carbonyl*stdmat[[1]][3,5]+NMR.end$Char*stdmat[[1]][3,6], c("O-Alkyl"))
##Di_O_Alkyl C calculation
Di_O_Alkyl_m <- setNames(NMR.end$Carbohydrates*stdmat[[1]][4,2]+NMR.end$Protein*stdmat[[1]][4,1]+NMR.end$Lignin*stdmat[[1]][4,3]+NMR.end$Lipid*stdmat[[1]][4,4]+
NMR.end$Carbonyl*stdmat[[1]][4,5]+NMR.end$Char*stdmat[[1]][4,6], c("Di_O_Alkyl"))
##Aromatic C calculation
Aromatic_m <- setNames(NMR.end$Carbohydrates*stdmat[[1]][5,2]+NMR.end$Protein*stdmat[[1]][5,1]+NMR.end$Lignin*stdmat[[1]][5,3]+NMR.end$Lipid*stdmat[[1]][5,4]+
NMR.end$Carbonyl*stdmat[[1]][5,5]+NMR.end$Char*stdmat[[1]][5,6], c("Aromatic"))
##Phenolic C calculation
Phenolic_m <- setNames(NMR.end$Carbohydrates*stdmat[[1]][6,2]+NMR.end$Protein*stdmat[[1]][6,1]+NMR.end$Lignin*stdmat[[1]][6,3]+NMR.end$Lipid*stdmat[[1]][6,4]+
NMR.end$Carbonyl*stdmat[[1]][6,5]+NMR.end$Char*stdmat[[1]][6,6], c("Phenolic"))
##Amide_Carboxylic C calculation
Amide_to_Ketone_m <- setNames(NMR.end$Carbohydrates*stdmat[[1]][7,2]+NMR.end$Protein*stdmat[[1]][7,1]+NMR.end$Lignin*stdmat[[1]][7,3]+NMR.end$Lipid*stdmat[[1]][7,4]+
NMR.end$Carbonyl*stdmat[[1]][7,5]+NMR.end$Char*stdmat[[1]][7,6], c("Amide_to_Ketone"))
sum_m <- setNames(Alkyl_m + N_Alkyl_Methoxyl_m + O_Alkyl_m + Di_O_Alkyl_m + Aromatic_m + Phenolic_m + Amide_to_Ketone_m, c("Sum"))
sum_c <- sum(sampleintegraljoin)
sampleintegraljoin <-rbind(sampleintegraljoin, sum_c)
sample_stats <- data.frame(Alkyl_m, N_Alkyl_Methoxyl_m, O_Alkyl_m, Di_O_Alkyl_m, Aromatic_m, Phenolic_m, Amide_to_Ketone_m, sum_m)
nmrrest <- NULL
for (i in 1:nrow(sample_stats)) {
nmrrestt <- c(sampleintegraljoin)
nmrrest <- rbind(nmrrest, nmrrestt)
}
colnames(nmrrest) <- c("Alkyl", "N_Alkyl_Methoxyl", "O-Alkyl", "Di_O_Alkyl", "Aromatic", "Phenolic", "Amide_to_Ketone", "Sum")
sample_stats <- cbind(sample_stats,nmrrest)
ssq_sample <- data.frame((sample_stats$Alkyl-sample_stats$Alkyl_m)^2, (sample_stats$N_Alkyl_Methoxyl-sample_stats$N_Alkyl_Methoxyl_m)^2,
(sample_stats$`O-Alkyl` -sample_stats$O_Alkyl_m)^2, (sample_stats$Di_O_Alkyl-sample_stats$Di_O_Alkyl_m)^2,
(sample_stats$Aromatic -sample_stats$Aromatic_m)^2, (sample_stats$Phenolic -sample_stats$Phenolic_m)^2,
(sample_stats$Amide_to_Ketone -sample_stats$Amide_to_Ketone_m)^2, (sample_stats$Sum -sample_stats$sum_m)^2)
colnames(ssq_sample) <- c("Alkyl_ssq", "N_Alkyl_Methoxyl_ssq", "O-Alkyl_ssq", "Di_O_Alkyl_ssq", "Aromatic_ssq", "Phenolic_ssq", "Amide_to_Ketone_ssq", "Sum_ssq")
sample_stats <- cbind(sample_stats,ssq_sample)
} else if (ecosys == "Terr_Baldock") {
stdmat <- std_nmr(ecosys = "Terr_Baldock")
NMR.end <- fit_LCF(all.samples = raw.spec.end, all.standards = stdmat, amoSTD = 6, best.fits = 30, NMRmeth = "MMM")
## Elementar ratios (relative to C)
Cmol <- as.numeric(NMR.end$Carbohydrates*stdmat[[1]][9,2]+NMR.end$Protein*stdmat[[1]][9,1]+NMR.end$Lignin*stdmat[[1]][9,3]+
NMR.end$Lipid*stdmat[[1]][9,4]+NMR.end$Carbonyl*stdmat[[1]][9,5]+NMR.end$Char*stdmat[[1]][9,6])
Nmol <- as.numeric(NMR.end$Carbohydrates*stdmat[[1]][10,2]+NMR.end$Protein*stdmat[[1]][10,1]+NMR.end$Lignin*stdmat[[1]][10,3]+
NMR.end$Lipid*stdmat[[1]][10,4]+NMR.end$Carbonyl*stdmat[[1]][10,5]+NMR.end$Char*stdmat[[1]][10,6])
Hmol <- as.numeric(NMR.end$Carbohydrates*stdmat[[1]][11,2]+NMR.end$Protein*stdmat[[1]][11,1]+NMR.end$Lignin*stdmat[[1]][11,3]+
NMR.end$Lipid*stdmat[[1]][11,4]+NMR.end$Carbonyl*stdmat[[1]][11,5]+NMR.end$Char*stdmat[[1]][11,6])
Omol <- as.numeric(NMR.end$Carbohydrates*stdmat[[1]][12,2]+NMR.end$Protein*stdmat[[1]][12,1]+NMR.end$Lignin*stdmat[[1]][12,3]+
NMR.end$Lipid*stdmat[[1]][12,4]+NMR.end$Carbonyl*stdmat[[1]][12,5]+NMR.end$Char*stdmat[[1]][12,6])
Cwgt <- as.numeric(NMR.end$Carbohydrates*stdmat[[1]][9,2]*12.0107+NMR.end$Protein*stdmat[[1]][9,1]*12.0107+NMR.end$Lignin*stdmat[[1]][9,3]*12.0107+
NMR.end$Lipid*stdmat[[1]][9,4]*12.0107+NMR.end$Carbonyl*stdmat[[1]][9,5]*12.0107+NMR.end$Char*stdmat[[1]][9,6]*12.0107)
Nwgt <- as.numeric(NMR.end$Carbohydrates*stdmat[[1]][10,2]*14.0067+NMR.end$Protein*stdmat[[1]][10,1]*14.0067+NMR.end$Lignin*stdmat[[1]][10,3]*14.0067+
NMR.end$Lipid*stdmat[[1]][10,4]*14.0067+NMR.end$Carbonyl*stdmat[[1]][10,5]*14.0067+NMR.end$Char*stdmat[[1]][10,6]*14.0067)
Hwgt <- as.numeric(NMR.end$Carbohydrates*stdmat[[1]][11,2]*1.00794+NMR.end$Protein*stdmat[[1]][11,1]*1.00794+NMR.end$Lignin*stdmat[[1]][11,3]*1.00794+
NMR.end$Lipid*stdmat[[1]][11,4]*1.00794+NMR.end$Carbonyl*stdmat[[1]][11,5]*1.00794+NMR.end$Char*stdmat[[1]][11,6]*1.00794)
Owgt <- as.numeric(NMR.end$Carbohydrates*stdmat[[1]][12,2]*15.994+NMR.end$Protein*stdmat[[1]][12,1]*15.994+NMR.end$Lignin*stdmat[[1]][12,3]*15.994+
NMR.end$Lipid*stdmat[[1]][12,4]*15.994+NMR.end$Carbonyl*stdmat[[1]][12,5]*15.994+NMR.end$Char*stdmat[[1]][12,6]*15.994)
swgt <- c(Cwgt + Nwgt + Hwgt +Owgt)
NOSC <- as.numeric(4+((2*Omol+3*Nmol-1*Hmol-4*Cmol)/Cmol))
## Final result
NMR.end <- cbind(NMR.end, Cmol, Nmol, Hmol, Omol, Cwgt/swgt, Nwgt/swgt, Hwgt/swgt, Owgt/swgt, NOSC)
## Back calculated NMR results
##Alklyl C calculation
Alkyl_m <- setNames(NMR.end$Carbohydrates*stdmat[[1]][1,2]+NMR.end$Protein*stdmat[[1]][1,1]+NMR.end$Lignin*stdmat[[1]][1,3]+NMR.end$Lipid*stdmat[[1]][1,4]+
NMR.end$Carbonyl*stdmat[[1]][1,5]+NMR.end$Char*stdmat[[1]][1,6], c("Alkyl"))
##N_Alkyl_Methoxyl C calculation
N_Alkyl_Methoxyl_m <- setNames(NMR.end$Carbohydrates*stdmat[[1]][2,2]+NMR.end$Protein*stdmat[[1]][2,1]+NMR.end$Lignin*stdmat[[1]][2,3]+NMR.end$Lipid*stdmat[[1]][2,4]+
NMR.end$Carbonyl*stdmat[[1]][2,5]+NMR.end$Char*stdmat[[1]][2,6], c("N_Alkyl_Methoxyl"))
##O-Alkyl C calculation
O_Alkyl_m <- setNames(NMR.end$Carbohydrates*stdmat[[1]][3,2]+NMR.end$Protein*stdmat[[1]][3,1]+NMR.end$Lignin*stdmat[[1]][3,3]+NMR.end$Lipid*stdmat[[1]][3,4]+
NMR.end$Carbonyl*stdmat[[1]][3,5]+NMR.end$Char*stdmat[[1]][3,6], c("O-Alkyl"))
##Di_O_Alkyl C calculation
Di_O_Alkyl_m <- setNames(NMR.end$Carbohydrates*stdmat[[1]][4,2]+NMR.end$Protein*stdmat[[1]][4,1]+NMR.end$Lignin*stdmat[[1]][4,3]+NMR.end$Lipid*stdmat[[1]][4,4]+
NMR.end$Carbonyl*stdmat[[1]][4,5]+NMR.end$Char*stdmat[[1]][4,6], c("Di_O_Alkyl"))
##Aromatic C calculation
Aromatic_m <- setNames(NMR.end$Carbohydrates*stdmat[[1]][5,2]+NMR.end$Protein*stdmat[[1]][5,1]+NMR.end$Lignin*stdmat[[1]][5,3]+NMR.end$Lipid*stdmat[[1]][5,4]+
NMR.end$Carbonyl*stdmat[[1]][5,5]+NMR.end$Char*stdmat[[1]][5,6], c("Aromatic"))
##Phenolic C calculation
Phenolic_m <- setNames(NMR.end$Carbohydrates*stdmat[[1]][6,2]+NMR.end$Protein*stdmat[[1]][6,1]+NMR.end$Lignin*stdmat[[1]][6,3]+NMR.end$Lipid*stdmat[[1]][6,4]+
NMR.end$Carbonyl*stdmat[[1]][6,5]+NMR.end$Char*stdmat[[1]][6,6], c("Phenolic"))
##Amide_Carboxylic C calculation
Amide_to_Ketone_m <- setNames(NMR.end$Carbohydrates*stdmat[[1]][7,2]+NMR.end$Protein*stdmat[[1]][7,1]+NMR.end$Lignin*stdmat[[1]][7,3]+NMR.end$Lipid*stdmat[[1]][7,4]+
NMR.end$Carbonyl*stdmat[[1]][7,5]+NMR.end$Char*stdmat[[1]][7,6], c("Amide_to_Ketone"))
sum_m <- setNames(Alkyl_m + N_Alkyl_Methoxyl_m + O_Alkyl_m + Di_O_Alkyl_m + Aromatic_m + Phenolic_m + Amide_to_Ketone_m, c("Sum"))
sum_c <- sum(sampleintegraljoin)
sampleintegraljoin <-rbind(sampleintegraljoin, sum_c)
sample_stats <- data.frame(Alkyl_m, N_Alkyl_Methoxyl_m, O_Alkyl_m, Di_O_Alkyl_m, Aromatic_m, Phenolic_m, Amide_to_Ketone_m, sum_m)
nmrrest <- NULL
for (i in 1:nrow(sample_stats)) {
nmrrestt <- c(sampleintegraljoin)
nmrrest <- rbind(nmrrest, nmrrestt)
}
colnames(nmrrest) <- c("Alkyl", "N_Alkyl_Methoxyl", "O-Alkyl", "Di_O_Alkyl", "Aromatic", "Phenolic", "Amide_to_Ketone", "Sum")
sample_stats <- cbind(sample_stats,nmrrest)
ssq_sample <- data.frame((sample_stats$Alkyl-sample_stats$Alkyl_m)^2, (sample_stats$N_Alkyl_Methoxyl-sample_stats$N_Alkyl_Methoxyl_m)^2,
(sample_stats$`O-Alkyl` -sample_stats$O_Alkyl_m)^2, (sample_stats$Di_O_Alkyl-sample_stats$Di_O_Alkyl_m)^2,
(sample_stats$Aromatic -sample_stats$Aromatic_m)^2, (sample_stats$Phenolic -sample_stats$Phenolic_m)^2,
(sample_stats$Amide_to_Ketone -sample_stats$Amide_to_Ketone_m)^2, (sample_stats$Sum -sample_stats$sum_m)^2)
colnames(ssq_sample) <- c("Alkyl_ssq", "N_Alkyl_Methoxyl_ssq", "O-Alkyl_ssq", "Di_O_Alkyl_ssq", "Aromatic_ssq", "Phenolic_ssq", "Amide_to_Ketone_ssq", "Sum_ssq")
sample_stats <- cbind(sample_stats,ssq_sample)
} else if (ecosys == "Aqua_Nelson") {
stdmat <- std_nmr(ecosys = "Aqua_Nelson")
NMR.end <- fit_LCF(all.samples = raw.spec.end, all.standards = stdmat, amoSTD = 6, best.fits = 30, NMRmeth = "MMM")
## Elementar ratios (relative to C)
Cmol <- as.numeric(NMR.end$Carbohydrates*stdmat[[1]][9,2]+NMR.end$Protein*stdmat[[1]][9,1]+NMR.end$Lignin*stdmat[[1]][9,3]+
NMR.end$Lipid*stdmat[[1]][9,4]+NMR.end$Carbonyl*stdmat[[1]][9,5]+NMR.end$Char*stdmat[[1]][9,6])
Nmol <- as.numeric(NMR.end$Carbohydrates*stdmat[[1]][10,2]+NMR.end$Protein*stdmat[[1]][10,1]+NMR.end$Lignin*stdmat[[1]][10,3]+
NMR.end$Lipid*stdmat[[1]][10,4]+NMR.end$Carbonyl*stdmat[[1]][10,5]+NMR.end$Char*stdmat[[1]][10,6])
Hmol <- as.numeric(NMR.end$Carbohydrates*stdmat[[1]][11,2]+NMR.end$Protein*stdmat[[1]][11,1]+NMR.end$Lignin*stdmat[[1]][11,3]+
NMR.end$Lipid*stdmat[[1]][11,4]+NMR.end$Carbonyl*stdmat[[1]][11,5]+NMR.end$Char*stdmat[[1]][11,6])
Omol <- as.numeric(NMR.end$Carbohydrates*stdmat[[1]][12,2]+NMR.end$Protein*stdmat[[1]][12,1]+NMR.end$Lignin*stdmat[[1]][12,3]+
NMR.end$Lipid*stdmat[[1]][12,4]+NMR.end$Carbonyl*stdmat[[1]][12,5]+NMR.end$Char*stdmat[[1]][12,6])
Cwgt <- as.numeric(NMR.end$Carbohydrates*stdmat[[1]][9,2]*12.0107+NMR.end$Protein*stdmat[[1]][9,1]*12.0107+NMR.end$Lignin*stdmat[[1]][9,3]*12.0107+
NMR.end$Lipid*stdmat[[1]][9,4]*12.0107+NMR.end$Carbonyl*stdmat[[1]][9,5]*12.0107+NMR.end$Char*stdmat[[1]][9,6]*12.0107)
Nwgt <- as.numeric(NMR.end$Carbohydrates*stdmat[[1]][10,2]*14.0067+NMR.end$Protein*stdmat[[1]][10,1]*14.0067+NMR.end$Lignin*stdmat[[1]][10,3]*14.0067+
NMR.end$Lipid*stdmat[[1]][10,4]*14.0067+NMR.end$Carbonyl*stdmat[[1]][10,5]*14.0067+NMR.end$Char*stdmat[[1]][10,6]*14.0067)
Hwgt <- as.numeric(NMR.end$Carbohydrates*stdmat[[1]][11,2]*1.00794+NMR.end$Protein*stdmat[[1]][11,1]*1.00794+NMR.end$Lignin*stdmat[[1]][11,3]*1.00794+
NMR.end$Lipid*stdmat[[1]][11,4]*1.00794+NMR.end$Carbonyl*stdmat[[1]][11,5]*1.00794+NMR.end$Char*stdmat[[1]][11,6]*1.00794)
Owgt <- as.numeric(NMR.end$Carbohydrates*stdmat[[1]][12,2]*15.994+NMR.end$Protein*stdmat[[1]][12,1]*15.994+NMR.end$Lignin*stdmat[[1]][12,3]*15.994+
NMR.end$Lipid*stdmat[[1]][12,4]*15.994+NMR.end$Carbonyl*stdmat[[1]][12,5]*15.994+NMR.end$Char*stdmat[[1]][12,6]*15.994)
swgt <- c(Cwgt + Nwgt + Hwgt +Owgt)
NOSC <- as.numeric(4+((2*Omol+3*Nmol-1*Hmol-4*Cmol)/Cmol))
## Final result
NMR.end <- cbind(NMR.end, Cmol, Nmol, Hmol, Omol, Cwgt/swgt, Nwgt/swgt, Hwgt/swgt, Owgt/swgt, NOSC)
## Back calculated NMR results
##Alklyl C calculation
Alkyl_m <- setNames(NMR.end$Carbohydrates*stdmat[[1]][1,2]+NMR.end$Protein*stdmat[[1]][1,1]+NMR.end$Lignin*stdmat[[1]][1,3]+NMR.end$Lipid*stdmat[[1]][1,4]+
NMR.end$Carbonyl*stdmat[[1]][1,5]+NMR.end$Char*stdmat[[1]][1,6], c("Alkyl"))
##N_Alkyl_Methoxyl C calculation
N_Alkyl_Methoxyl_m <- setNames(NMR.end$Carbohydrates*stdmat[[1]][2,2]+NMR.end$Protein*stdmat[[1]][2,1]+NMR.end$Lignin*stdmat[[1]][2,3]+NMR.end$Lipid*stdmat[[1]][2,4]+
NMR.end$Carbonyl*stdmat[[1]][2,5]+NMR.end$Char*stdmat[[1]][2,6], c("N_Alkyl_Methoxyl"))
##O-Alkyl C calculation
O_Alkyl_m <- setNames(NMR.end$Carbohydrates*stdmat[[1]][3,2]+NMR.end$Protein*stdmat[[1]][3,1]+NMR.end$Lignin*stdmat[[1]][3,3]+NMR.end$Lipid*stdmat[[1]][3,4]+
NMR.end$Carbonyl*stdmat[[1]][3,5]+NMR.end$Char*stdmat[[1]][3,6], c("O-Alkyl"))
##Di_O_Alkyl C calculation
Di_O_Alkyl_m <- setNames(NMR.end$Carbohydrates*stdmat[[1]][4,2]+NMR.end$Protein*stdmat[[1]][4,1]+NMR.end$Lignin*stdmat[[1]][4,3]+NMR.end$Lipid*stdmat[[1]][4,4]+
NMR.end$Carbonyl*stdmat[[1]][4,5]+NMR.end$Char*stdmat[[1]][4,6], c("Di_O_Alkyl"))
##Aromatic C calculation
Aromatic_m <- setNames(NMR.end$Carbohydrates*stdmat[[1]][5,2]+NMR.end$Protein*stdmat[[1]][5,1]+NMR.end$Lignin*stdmat[[1]][5,3]+NMR.end$Lipid*stdmat[[1]][5,4]+
NMR.end$Carbonyl*stdmat[[1]][5,5]+NMR.end$Char*stdmat[[1]][5,6], c("Aromatic"))
##Phenolic C calculation
Phenolic_m <- setNames(NMR.end$Carbohydrates*stdmat[[1]][6,2]+NMR.end$Protein*stdmat[[1]][6,1]+NMR.end$Lignin*stdmat[[1]][6,3]+NMR.end$Lipid*stdmat[[1]][6,4]+
NMR.end$Carbonyl*stdmat[[1]][6,5]+NMR.end$Char*stdmat[[1]][6,6], c("Phenolic"))
##Amide_Carboxylic C calculation
Amide_to_Ketone_m <- setNames(NMR.end$Carbohydrates*stdmat[[1]][7,2]+NMR.end$Protein*stdmat[[1]][7,1]+NMR.end$Lignin*stdmat[[1]][7,3]+NMR.end$Lipid*stdmat[[1]][7,4]+
NMR.end$Carbonyl*stdmat[[1]][7,5]+NMR.end$Char*stdmat[[1]][7,6], c("Amide_to_Ketone"))
sum_m <- setNames(Alkyl_m + N_Alkyl_Methoxyl_m + O_Alkyl_m + Di_O_Alkyl_m + Aromatic_m + Phenolic_m + Amide_to_Ketone_m, c("Sum"))
sum_c <- sum(sampleintegraljoin)
sampleintegraljoin <-rbind(sampleintegraljoin, sum_c)
sample_stats <- data.frame(Alkyl_m, N_Alkyl_Methoxyl_m, O_Alkyl_m, Di_O_Alkyl_m, Aromatic_m, Phenolic_m, Amide_to_Ketone_m, sum_m)
nmrrest <- NULL
for (i in 1:nrow(sample_stats)) {
nmrrestt <- c(sampleintegraljoin)
nmrrest <- rbind(nmrrest, nmrrestt)
}
colnames(nmrrest) <- c("Alkyl", "N_Alkyl_Methoxyl", "O-Alkyl", "Di_O_Alkyl", "Aromatic", "Phenolic", "Amide_to_Ketone", "Sum")
sample_stats <- cbind(sample_stats,nmrrest)
ssq_sample <- data.frame((sample_stats$Alkyl-sample_stats$Alkyl_m)^2, (sample_stats$N_Alkyl_Methoxyl-sample_stats$N_Alkyl_Methoxyl_m)^2,
(sample_stats$`O-Alkyl` -sample_stats$O_Alkyl_m)^2, (sample_stats$Di_O_Alkyl-sample_stats$Di_O_Alkyl_m)^2,
(sample_stats$Aromatic -sample_stats$Aromatic_m)^2, (sample_stats$Phenolic -sample_stats$Phenolic_m)^2,
(sample_stats$Amide_to_Ketone -sample_stats$Amide_to_Ketone_m)^2, (sample_stats$Sum -sample_stats$sum_m)^2)
colnames(ssq_sample) <- c("Alkyl_ssq", "N_Alkyl_Methoxyl_ssq", "O-Alkyl_ssq", "Di_O_Alkyl_ssq", "Aromatic_ssq", "Phenolic_ssq", "Amide_to_Ketone_ssq", "Sum_ssq")
sample_stats <- cbind(sample_stats,ssq_sample)
} else if (ecosys == "Aqua_Baldock") {
stdmat <- std_nmr(ecosys = "Aqua_Baldock")
NMR.end <- fit_LCF(all.samples = raw.spec.end, all.standards = stdmat, amoSTD = 6, best.fits = 30, NMRmeth = "MMM")
## Elementar ratios (relative to C)
Cmol <- as.numeric(NMR.end$Carbohydrates*stdmat[[1]][9,2]+NMR.end$Protein*stdmat[[1]][9,1]+NMR.end$Lignin*stdmat[[1]][9,3]+
NMR.end$Lipid*stdmat[[1]][9,4]+NMR.end$Carbonyl*stdmat[[1]][9,5]+NMR.end$Char*stdmat[[1]][9,6])
Nmol <- as.numeric(NMR.end$Carbohydrates*stdmat[[1]][10,2]+NMR.end$Protein*stdmat[[1]][10,1]+NMR.end$Lignin*stdmat[[1]][10,3]+
NMR.end$Lipid*stdmat[[1]][10,4]+NMR.end$Carbonyl*stdmat[[1]][10,5]+NMR.end$Char*stdmat[[1]][10,6])
Hmol <- as.numeric(NMR.end$Carbohydrates*stdmat[[1]][11,2]+NMR.end$Protein*stdmat[[1]][11,1]+NMR.end$Lignin*stdmat[[1]][11,3]+
NMR.end$Lipid*stdmat[[1]][11,4]+NMR.end$Carbonyl*stdmat[[1]][11,5]+NMR.end$Char*stdmat[[1]][11,6])
Omol <- as.numeric(NMR.end$Carbohydrates*stdmat[[1]][12,2]+NMR.end$Protein*stdmat[[1]][12,1]+NMR.end$Lignin*stdmat[[1]][12,3]+
NMR.end$Lipid*stdmat[[1]][12,4]+NMR.end$Carbonyl*stdmat[[1]][12,5]+NMR.end$Char*stdmat[[1]][12,6])
Cwgt <- as.numeric(NMR.end$Carbohydrates*stdmat[[1]][9,2]*12.0107+NMR.end$Protein*stdmat[[1]][9,1]*12.0107+NMR.end$Lignin*stdmat[[1]][9,3]*12.0107+
NMR.end$Lipid*stdmat[[1]][9,4]*12.0107+NMR.end$Carbonyl*stdmat[[1]][9,5]*12.0107+NMR.end$Char*stdmat[[1]][9,6]*12.0107)
Nwgt <- as.numeric(NMR.end$Carbohydrates*stdmat[[1]][10,2]*14.0067+NMR.end$Protein*stdmat[[1]][10,1]*14.0067+NMR.end$Lignin*stdmat[[1]][10,3]*14.0067+
NMR.end$Lipid*stdmat[[1]][10,4]*14.0067+NMR.end$Carbonyl*stdmat[[1]][10,5]*14.0067+NMR.end$Char*stdmat[[1]][10,6]*14.0067)
Hwgt <- as.numeric(NMR.end$Carbohydrates*stdmat[[1]][11,2]*1.00794+NMR.end$Protein*stdmat[[1]][11,1]*1.00794+NMR.end$Lignin*stdmat[[1]][11,3]*1.00794+
NMR.end$Lipid*stdmat[[1]][11,4]*1.00794+NMR.end$Carbonyl*stdmat[[1]][11,5]*1.00794+NMR.end$Char*stdmat[[1]][11,6]*1.00794)
Owgt <- as.numeric(NMR.end$Carbohydrates*stdmat[[1]][12,2]*15.994+NMR.end$Protein*stdmat[[1]][12,1]*15.994+NMR.end$Lignin*stdmat[[1]][12,3]*15.994+
NMR.end$Lipid*stdmat[[1]][12,4]*15.994+NMR.end$Carbonyl*stdmat[[1]][12,5]*15.994+NMR.end$Char*stdmat[[1]][12,6]*15.994)
swgt <- c(Cwgt + Nwgt + Hwgt +Owgt)
NOSC <- as.numeric(4+((2*Omol+3*Nmol-1*Hmol-4*Cmol)/Cmol))
## Back calculated NMR results
##Alklyl C calculation
Alkyl_m <- setNames(NMR.end$Carbohydrates*stdmat[[1]][1,2]+NMR.end$Protein*stdmat[[1]][1,1]+NMR.end$Lignin*stdmat[[1]][1,3]+NMR.end$Lipid*stdmat[[1]][1,4]+
NMR.end$Carbonyl*stdmat[[1]][1,5]+NMR.end$Char*stdmat[[1]][1,6], c("Alkyl"))
##N_Alkyl_Methoxyl C calculation
N_Alkyl_Methoxyl_m <- setNames(NMR.end$Carbohydrates*stdmat[[1]][2,2]+NMR.end$Protein*stdmat[[1]][2,1]+NMR.end$Lignin*stdmat[[1]][2,3]+NMR.end$Lipid*stdmat[[1]][2,4]+
NMR.end$Carbonyl*stdmat[[1]][2,5]+NMR.end$Char*stdmat[[1]][2,6], c("N_Alkyl_Methoxyl"))
##O-Alkyl C calculation
O_Alkyl_m <- setNames(NMR.end$Carbohydrates*stdmat[[1]][3,2]+NMR.end$Protein*stdmat[[1]][3,1]+NMR.end$Lignin*stdmat[[1]][3,3]+NMR.end$Lipid*stdmat[[1]][3,4]+
NMR.end$Carbonyl*stdmat[[1]][3,5]+NMR.end$Char*stdmat[[1]][3,6], c("O-Alkyl"))
##Di_O_Alkyl C calculation
Di_O_Alkyl_m <- setNames(NMR.end$Carbohydrates*stdmat[[1]][4,2]+NMR.end$Protein*stdmat[[1]][4,1]+NMR.end$Lignin*stdmat[[1]][4,3]+NMR.end$Lipid*stdmat[[1]][4,4]+
NMR.end$Carbonyl*stdmat[[1]][4,5]+NMR.end$Char*stdmat[[1]][4,6], c("Di_O_Alkyl"))
##Aromatic C calculation
Aromatic_m <- setNames(NMR.end$Carbohydrates*stdmat[[1]][5,2]+NMR.end$Protein*stdmat[[1]][5,1]+NMR.end$Lignin*stdmat[[1]][5,3]+NMR.end$Lipid*stdmat[[1]][5,4]+
NMR.end$Carbonyl*stdmat[[1]][5,5]+NMR.end$Char*stdmat[[1]][5,6], c("Aromatic"))
##Phenolic C calculation
Phenolic_m <- setNames(NMR.end$Carbohydrates*stdmat[[1]][6,2]+NMR.end$Protein*stdmat[[1]][6,1]+NMR.end$Lignin*stdmat[[1]][6,3]+NMR.end$Lipid*stdmat[[1]][6,4]+
NMR.end$Carbonyl*stdmat[[1]][6,5]+NMR.end$Char*stdmat[[1]][6,6], c("Phenolic"))
##Amide_Carboxylic C calculation
Amide_to_Ketone_m <- setNames(NMR.end$Carbohydrates*stdmat[[1]][7,2]+NMR.end$Protein*stdmat[[1]][7,1]+NMR.end$Lignin*stdmat[[1]][7,3]+NMR.end$Lipid*stdmat[[1]][7,4]+
NMR.end$Carbonyl*stdmat[[1]][7,5]+NMR.end$Char*stdmat[[1]][7,6], c("Amide_to_Ketone"))
sum_m <- setNames(Alkyl_m + N_Alkyl_Methoxyl_m + O_Alkyl_m + Di_O_Alkyl_m + Aromatic_m + Phenolic_m + Amide_to_Ketone_m, c("Sum"))
sum_c <- sum(sampleintegraljoin)
sampleintegraljoin <-rbind(sampleintegraljoin, sum_c)
sample_stats <- data.frame(Alkyl_m, N_Alkyl_Methoxyl_m, O_Alkyl_m, Di_O_Alkyl_m, Aromatic_m, Phenolic_m, Amide_to_Ketone_m, sum_m)
nmrrest <- NULL
for (i in 1:nrow(sample_stats)) {
nmrrestt <- c(sampleintegraljoin)
nmrrest <- rbind(nmrrest, nmrrestt)
}
colnames(nmrrest) <- c("Alkyl", "N_Alkyl_Methoxyl", "O-Alkyl", "Di_O_Alkyl", "Aromatic", "Phenolic", "Amide_to_Ketone", "Sum")
sample_stats <- cbind(sample_stats,nmrrest)
ssq_sample <- data.frame((sample_stats$Alkyl-sample_stats$Alkyl_m)^2, (sample_stats$N_Alkyl_Methoxyl-sample_stats$N_Alkyl_Methoxyl_m)^2,
(sample_stats$`O-Alkyl` -sample_stats$O_Alkyl_m)^2, (sample_stats$Di_O_Alkyl-sample_stats$Di_O_Alkyl_m)^2,
(sample_stats$Aromatic -sample_stats$Aromatic_m)^2, (sample_stats$Phenolic -sample_stats$Phenolic_m)^2,
(sample_stats$Amide_to_Ketone -sample_stats$Amide_to_Ketone_m)^2, (sample_stats$Sum -sample_stats$sum_m)^2)
colnames(ssq_sample) <- c("Alkyl_ssq", "N_Alkyl_Methoxyl_ssq", "O-Alkyl_ssq", "Di_O_Alkyl_ssq", "Aromatic_ssq", "Phenolic_ssq", "Amide_to_Ketone_ssq", "Sum_ssq")
sample_stats <- cbind(sample_stats,ssq_sample)
## Final result
NMR.end <- cbind(NMR.end, Cmol, Nmol, Hmol, Omol, Cwgt/swgt, Nwgt/swgt, Hwgt/swgt, Owgt/swgt, NOSC)
} else if (ecosys == "mod") {
NMR.end <- fit_LCF(all.samples = raw.spec.end, all.standards = mod_std, amoSTD = 6, best.fits = 30, NMRmeth = "MMM")
## Elementar ratios (relative to C)
Cmol <- as.numeric(NMR.end$Carbohydrates*mod_std[[1]][9,2]+NMR.end$Protein*mod_std[[1]][9,1]+NMR.end$Lignin*mod_std[[1]][9,3]+
NMR.end$Lipid*mod_std[[1]][9,4]+NMR.end$Carbonyl*mod_std[[1]][9,5]+NMR.end$Char*mod_std[[1]][9,6])
Nmol <- as.numeric(NMR.end$Carbohydrates*mod_std[[1]][10,2]+NMR.end$Protein*mod_std[[1]][10,1]+NMR.end$Lignin*mod_std[[1]][10,3]+
NMR.end$Lipid*mod_std[[1]][10,4]+NMR.end$Carbonyl*mod_std[[1]][10,5]+NMR.end$Char*mod_std[[1]][10,6])
Hmol <- as.numeric(NMR.end$Carbohydrates*mod_std[[1]][11,2]+NMR.end$Protein*mod_std[[1]][11,1]+NMR.end$Lignin*mod_std[[1]][11,3]+
NMR.end$Lipid*mod_std[[1]][11,4]+NMR.end$Carbonyl*mod_std[[1]][11,5]+NMR.end$Char*mod_std[[1]][11,6])
Omol <- as.numeric(NMR.end$Carbohydrates*mod_std[[1]][12,2]+NMR.end$Protein*mod_std[[1]][12,1]+NMR.end$Lignin*mod_std[[1]][12,3]+
NMR.end$Lipid*mod_std[[1]][12,4]+NMR.end$Carbonyl*mod_std[[1]][12,5]+NMR.end$Char*mod_std[[1]][12,6])
Cwgt <- as.numeric(NMR.end$Carbohydrates*mod_std[[1]][9,2]*12.0107+NMR.end$Protein*mod_std[[1]][9,1]*12.0107+NMR.end$Lignin*mod_std[[1]][9,3]*12.0107+
NMR.end$Lipid*mod_std[[1]][9,4]*12.0107+NMR.end$Carbonyl*mod_std[[1]][9,5]*12.0107+NMR.end$Char*mod_std[[1]][9,6]*12.0107)
Nwgt <- as.numeric(NMR.end$Carbohydrates*mod_std[[1]][10,2]*14.0067+NMR.end$Protein*mod_std[[1]][10,1]*14.0067+NMR.end$Lignin*mod_std[[1]][10,3]*14.0067+
NMR.end$Lipid*mod_std[[1]][10,4]*14.0067+NMR.end$Carbonyl*mod_std[[1]][10,5]*14.0067+NMR.end$Char*mod_std[[1]][10,6]*14.0067)
Hwgt <- as.numeric(NMR.end$Carbohydrates*mod_std[[1]][11,2]*1.00794+NMR.end$Protein*mod_std[[1]][11,1]*1.00794+NMR.end$Lignin*mod_std[[1]][11,3]*1.00794+
NMR.end$Lipid*mod_std[[1]][11,4]*1.00794+NMR.end$Carbonyl*mod_std[[1]][11,5]*1.00794+NMR.end$Char*mod_std[[1]][11,6]*1.00794)
Owgt <- as.numeric(NMR.end$Carbohydrates*mod_std[[1]][12,2]*15.994+NMR.end$Protein*mod_std[[1]][12,1]*15.994+NMR.end$Lignin*mod_std[[1]][12,3]*15.994+
NMR.end$Lipid*mod_std[[1]][12,4]*15.994+NMR.end$Carbonyl*mod_std[[1]][12,5]*15.994+NMR.end$Char*mod_std[[1]][12,6]*15.994)
swgt <- c(Cwgt + Nwgt + Hwgt +Owgt)
NOSC <- as.numeric(4+((2*Omol+3*Nmol-1*Hmol-4*Cmol)/Cmol))
## Final result
NMR.end <- cbind(NMR.end, Cmol, Nmol, Hmol, Omol, Cwgt/swgt, Nwgt/swgt, Hwgt/swgt, Owgt/swgt, NOSC)
## Back calculated NMR results
##Alklyl C calculation
Alkyl_m <- setNames(NMR.end$Carbohydrates*stdmat[[1]][1,2]+NMR.end$Protein*stdmat[[1]][1,1]+NMR.end$Lignin*stdmat[[1]][1,3]+NMR.end$Lipid*stdmat[[1]][1,4]+
NMR.end$Carbonyl*stdmat[[1]][1,5]+NMR.end$Char*stdmat[[1]][1,6], c("Alkyl"))
##N_Alkyl_Methoxyl C calculation
N_Alkyl_Methoxyl_m <- setNames(NMR.end$Carbohydrates*stdmat[[1]][2,2]+NMR.end$Protein*stdmat[[1]][2,1]+NMR.end$Lignin*stdmat[[1]][2,3]+NMR.end$Lipid*stdmat[[1]][2,4]+
NMR.end$Carbonyl*stdmat[[1]][2,5]+NMR.end$Char*stdmat[[1]][2,6], c("N_Alkyl_Methoxyl"))
##O-Alkyl C calculation
O_Alkyl_m <- setNames(NMR.end$Carbohydrates*stdmat[[1]][3,2]+NMR.end$Protein*stdmat[[1]][3,1]+NMR.end$Lignin*stdmat[[1]][3,3]+NMR.end$Lipid*stdmat[[1]][3,4]+
NMR.end$Carbonyl*stdmat[[1]][3,5]+NMR.end$Char*stdmat[[1]][3,6], c("O-Alkyl"))
##Di_O_Alkyl C calculation
Di_O_Alkyl_m <- setNames(NMR.end$Carbohydrates*stdmat[[1]][4,2]+NMR.end$Protein*stdmat[[1]][4,1]+NMR.end$Lignin*stdmat[[1]][4,3]+NMR.end$Lipid*stdmat[[1]][4,4]+
NMR.end$Carbonyl*stdmat[[1]][4,5]+NMR.end$Char*stdmat[[1]][4,6], c("Di_O_Alkyl"))
##Aromatic C calculation
Aromatic_m <- setNames(NMR.end$Carbohydrates*stdmat[[1]][5,2]+NMR.end$Protein*stdmat[[1]][5,1]+NMR.end$Lignin*stdmat[[1]][5,3]+NMR.end$Lipid*stdmat[[1]][5,4]+
NMR.end$Carbonyl*stdmat[[1]][5,5]+NMR.end$Char*stdmat[[1]][5,6], c("Aromatic"))
##Phenolic C calculation
Phenolic_m <- setNames(NMR.end$Carbohydrates*stdmat[[1]][6,2]+NMR.end$Protein*stdmat[[1]][6,1]+NMR.end$Lignin*stdmat[[1]][6,3]+NMR.end$Lipid*stdmat[[1]][6,4]+
NMR.end$Carbonyl*stdmat[[1]][6,5]+NMR.end$Char*stdmat[[1]][6,6], c("Phenolic"))
##Amide_Carboxylic C calculation
Amide_to_Ketone_m <- setNames(NMR.end$Carbohydrates*stdmat[[1]][7,2]+NMR.end$Protein*stdmat[[1]][7,1]+NMR.end$Lignin*stdmat[[1]][7,3]+NMR.end$Lipid*stdmat[[1]][7,4]+
NMR.end$Carbonyl*stdmat[[1]][7,5]+NMR.end$Char*stdmat[[1]][7,6], c("Amide_to_Ketone"))
sum_m <- setNames(Alkyl_m + N_Alkyl_Methoxyl_m + O_Alkyl_m + Di_O_Alkyl_m + Aromatic_m + Phenolic_m + Amide_to_Ketone_m, c("Sum"))
sum_c <- sum(sampleintegraljoin)
sampleintegraljoin <-rbind(sampleintegraljoin, sum_c)
sample_stats <- data.frame(Alkyl_m, N_Alkyl_Methoxyl_m, O_Alkyl_m, Di_O_Alkyl_m, Aromatic_m, Phenolic_m, Amide_to_Ketone_m, sum_m)
nmrrest <- NULL
for (i in 1:nrow(sample_stats)) {
nmrrestt <- c(sampleintegraljoin)
nmrrest <- rbind(nmrrest, nmrrestt)
}
colnames(nmrrest) <- c("Alkyl", "N_Alkyl_Methoxyl", "O-Alkyl", "Di_O_Alkyl", "Aromatic", "Phenolic", "Amide_to_Ketone", "Sum")
rownames(nmrrest) <- NULL
sample_stats <- cbind(sample_stats,nmrrest)
ssq_sample <- data.frame((sample_stats$Alkyl-sample_stats$Alkyl_m)^2, (sample_stats$N_Alkyl_Methoxyl-sample_stats$N_Alkyl_Methoxyl_m)^2,
(sample_stats$`O-Alkyl` -sample_stats$O_Alkyl_m)^2, (sample_stats$Di_O_Alkyl-sample_stats$Di_O_Alkyl_m)^2,
(sample_stats$Aromatic -sample_stats$Aromatic_m)^2, (sample_stats$Phenolic -sample_stats$Phenolic_m)^2,
(sample_stats$Amide_to_Ketone -sample_stats$Amide_to_Ketone_m)^2, (sample_stats$Sum -sample_stats$sum_m)^2)
colnames(ssq_sample) <- c("Alkyl_ssq", "N_Alkyl_Methoxyl_ssq", "O-Alkyl_ssq", "Di_O_Alkyl_ssq", "Aromatic_ssq", "Phenolic_ssq", "Amide_to_Ketone_ssq", "Sum_ssq")
sample_stats <- cbind(sample_stats,ssq_sample)
}
}
## return the corrected spectra list
} else if (NMRmeth == "MMMFixN") {
## loop to process all samples
NMR.end <- NULL
raw.spec.end <- NULL
batch.nmr <- int_nmr(batch.nmr, NMRmeth = "MMM-SSB")
nmrmerge <- NULL
for (i in 1:length(batch.nmr)) {
raw.spec.end[[i]] <- batch.nmr[[i]]
NCval <- as.numeric(cndata[[i]]$NC)
samplename <- batch.nmr[[i]]$name
sampleraw.spec <- batch.nmr[[i]]$data$raw.spec
sampleintegral <- as.data.frame(batch.nmr[[i]]$data$Integral)
##Alklyl C calculation
Alkyl <- setNames(data.frame(sum(2*sum(sampleintegral[1:3,1]), sum(sampleintegral[10:12,1]), -sum(sampleintegral[28:30,1]))), c("Alkyl"))
##N_Alkyl_Methoxyl C calculation
N_Alkyl_Methoxyl <- setNames(data.frame(sum(2*sum(sampleintegral[4:5,1]), sum(sampleintegral[13:14,1]), -sum(sampleintegral[31:32,1]))), c("N_Alkyl_Methoxyl"))
##O-Alkyl C calculation
O_Alkyl <- setNames(data.frame(sum(2*sum(sampleintegral[6:7,1]), sum(sampleintegral[15:16,1]), -sum(sampleintegral[33:33,1]))), c("O-Alkyl"))
##Di_O_Alkyl C calculation
Di_O_Alkyl <- setNames(data.frame(sum(2*sum(sampleintegral[8:8,1]), sum(sampleintegral[17:17,1]))), c("Di_O_Alkyl"))
##Aromatic C calculation
Aromatic <- setNames(data.frame(sum(2*sum(sampleintegral[27:28,1]), sum(sampleintegral[18:19,1]), -sum(sampleintegral[1:1,1]))), c("Aromatic"))
##Phenolic C calculation
Phenolic <- setNames(data.frame(sum(2*sum(sampleintegral[29:29,1]), sum(sampleintegral[20:20,1]), -sum(sampleintegral[2:2,1]))), c("Phenolic"))
##Amide_Carboxylic C calculation
Amide_Carboxylic <- setNames(data.frame(sum(2*sum(sampleintegral[30:32,1]), sum(sampleintegral[21:23,1]), -sum(sampleintegral[3:5,1]))), c("Amide_Carboxylic"))
##Ketone C calculation
Ketone <- setNames(data.frame(sum(2*sum(sampleintegral[33:33,1]), sum(sampleintegral[24:24,1]), -sum(sampleintegral[6:6,1]))), c("Ketone"))
##Put all together
Amide_to_Ketone <- c(Amide_Carboxylic + Ketone)
sampleintegraljoin <- data.frame(Alkyl, N_Alkyl_Methoxyl, O_Alkyl, Di_O_Alkyl, Aromatic, Phenolic, Amide_to_Ketone)
sampleintegraljoin <- t(sampleintegraljoin)
norm <- sum(sampleintegraljoin)
sampleintegraljoin <- (sampleintegraljoin/norm)*100
sampleintegralend <- rbind(NCval,sampleintegraljoin)
sampleintegralend <- data.frame(sampleintegralend)
raw.spec.end[[i]] <- list("name" = samplename, "data" = list("raw.spec" = sampleraw.spec,"Integral" = sampleintegralend))
if (ecosys == "Terr_Nelson") {
stdmat <- std_nmr(ecosys = "Terr_Nelson")
NMR.end <- fit_LCF(all.samples = raw.spec.end, all.standards = stdmat, amoSTD = 6, best.fits = 30, NMRmeth = "MMMFixN")
## Elementar ratios (relative to C)
Cmol <- as.numeric(NMR.end$Carbohydrates*stdmat[[1]][9,2]+NMR.end$Protein*stdmat[[1]][9,1]+NMR.end$Lignin*stdmat[[1]][9,3]+
NMR.end$Lipid*stdmat[[1]][9,4]+NMR.end$Carbonyl*stdmat[[1]][9,5]+NMR.end$Char*stdmat[[1]][9,6])
Nmol <- as.numeric(NMR.end$Carbohydrates*stdmat[[1]][10,2]+NMR.end$Protein*stdmat[[1]][10,1]+NMR.end$Lignin*stdmat[[1]][10,3]+
NMR.end$Lipid*stdmat[[1]][10,4]+NMR.end$Carbonyl*stdmat[[1]][10,5]+NMR.end$Char*stdmat[[1]][10,6])
Hmol <- as.numeric(NMR.end$Carbohydrates*stdmat[[1]][11,2]+NMR.end$Protein*stdmat[[1]][11,1]+NMR.end$Lignin*stdmat[[1]][11,3]+
NMR.end$Lipid*stdmat[[1]][11,4]+NMR.end$Carbonyl*stdmat[[1]][11,5]+NMR.end$Char*stdmat[[1]][11,6])
Omol <- as.numeric(NMR.end$Carbohydrates*stdmat[[1]][12,2]+NMR.end$Protein*stdmat[[1]][12,1]+NMR.end$Lignin*stdmat[[1]][12,3]+
NMR.end$Lipid*stdmat[[1]][12,4]+NMR.end$Carbonyl*stdmat[[1]][12,5]+NMR.end$Char*stdmat[[1]][12,6])
Cwgt <- as.numeric(NMR.end$Carbohydrates*stdmat[[1]][9,2]*12.0107+NMR.end$Protein*stdmat[[1]][9,1]*12.0107+NMR.end$Lignin*stdmat[[1]][9,3]*12.0107+
NMR.end$Lipid*stdmat[[1]][9,4]*12.0107+NMR.end$Carbonyl*stdmat[[1]][9,5]*12.0107+NMR.end$Char*stdmat[[1]][9,6]*12.0107)
Nwgt <- as.numeric(NMR.end$Carbohydrates*stdmat[[1]][10,2]*14.0067+NMR.end$Protein*stdmat[[1]][10,1]*14.0067+NMR.end$Lignin*stdmat[[1]][10,3]*14.0067+
NMR.end$Lipid*stdmat[[1]][10,4]*14.0067+NMR.end$Carbonyl*stdmat[[1]][10,5]*14.0067+NMR.end$Char*stdmat[[1]][10,6]*14.0067)
Hwgt <- as.numeric(NMR.end$Carbohydrates*stdmat[[1]][11,2]*1.00794+NMR.end$Protein*stdmat[[1]][11,1]*1.00794+NMR.end$Lignin*stdmat[[1]][11,3]*1.00794+
NMR.end$Lipid*stdmat[[1]][11,4]*1.00794+NMR.end$Carbonyl*stdmat[[1]][11,5]*1.00794+NMR.end$Char*stdmat[[1]][11,6]*1.00794)
Owgt <- as.numeric(NMR.end$Carbohydrates*stdmat[[1]][12,2]*15.994+NMR.end$Protein*stdmat[[1]][12,1]*15.994+NMR.end$Lignin*stdmat[[1]][12,3]*15.994+
NMR.end$Lipid*stdmat[[1]][12,4]*15.994+NMR.end$Carbonyl*stdmat[[1]][12,5]*15.994+NMR.end$Char*stdmat[[1]][12,6]*15.994)
swgt <- c(Cwgt + Nwgt + Hwgt +Owgt)
NOSC <- as.numeric(4+((2*Omol+3*Nmol-1*Hmol-4*Cmol)/Cmol))
## Final result
NMR.end <- cbind(NMR.end, Cmol, Nmol, Hmol, Omol, Cwgt/swgt, Nwgt/swgt, Hwgt/swgt, Owgt/swgt, NOSC)
## Back calculated NMR results
##Alklyl C calculation
Alkyl_m <- setNames(NMR.end$Carbohydrates*stdmat[[1]][1,2]+NMR.end$Protein*stdmat[[1]][1,1]+NMR.end$Lignin*stdmat[[1]][1,3]+NMR.end$Lipid*stdmat[[1]][1,4]+
NMR.end$Carbonyl*stdmat[[1]][1,5]+NMR.end$Char*stdmat[[1]][1,6], c("Alkyl"))
##N_Alkyl_Methoxyl C calculation
N_Alkyl_Methoxyl_m <- setNames(NMR.end$Carbohydrates*stdmat[[1]][2,2]+NMR.end$Protein*stdmat[[1]][2,1]+NMR.end$Lignin*stdmat[[1]][2,3]+NMR.end$Lipid*stdmat[[1]][2,4]+
NMR.end$Carbonyl*stdmat[[1]][2,5]+NMR.end$Char*stdmat[[1]][2,6], c("N_Alkyl_Methoxyl"))
##O-Alkyl C calculation
O_Alkyl_m <- setNames(NMR.end$Carbohydrates*stdmat[[1]][3,2]+NMR.end$Protein*stdmat[[1]][3,1]+NMR.end$Lignin*stdmat[[1]][3,3]+NMR.end$Lipid*stdmat[[1]][3,4]+
NMR.end$Carbonyl*stdmat[[1]][3,5]+NMR.end$Char*stdmat[[1]][3,6], c("O-Alkyl"))
##Di_O_Alkyl C calculation
Di_O_Alkyl_m <- setNames(NMR.end$Carbohydrates*stdmat[[1]][4,2]+NMR.end$Protein*stdmat[[1]][4,1]+NMR.end$Lignin*stdmat[[1]][4,3]+NMR.end$Lipid*stdmat[[1]][4,4]+
NMR.end$Carbonyl*stdmat[[1]][4,5]+NMR.end$Char*stdmat[[1]][4,6], c("Di_O_Alkyl"))
##Aromatic C calculation
Aromatic_m <- setNames(NMR.end$Carbohydrates*stdmat[[1]][5,2]+NMR.end$Protein*stdmat[[1]][5,1]+NMR.end$Lignin*stdmat[[1]][5,3]+NMR.end$Lipid*stdmat[[1]][5,4]+
NMR.end$Carbonyl*stdmat[[1]][5,5]+NMR.end$Char*stdmat[[1]][5,6], c("Aromatic"))
##Phenolic C calculation
Phenolic_m <- setNames(NMR.end$Carbohydrates*stdmat[[1]][6,2]+NMR.end$Protein*stdmat[[1]][6,1]+NMR.end$Lignin*stdmat[[1]][6,3]+NMR.end$Lipid*stdmat[[1]][6,4]+
NMR.end$Carbonyl*stdmat[[1]][6,5]+NMR.end$Char*stdmat[[1]][6,6], c("Phenolic"))
##Amide_Carboxylic C calculation
Amide_to_Ketone_m <- setNames(NMR.end$Carbohydrates*stdmat[[1]][7,2]+NMR.end$Protein*stdmat[[1]][7,1]+NMR.end$Lignin*stdmat[[1]][7,3]+NMR.end$Lipid*stdmat[[1]][7,4]+
NMR.end$Carbonyl*stdmat[[1]][7,5]+NMR.end$Char*stdmat[[1]][7,6], c("Amide_to_Ketone"))
sum_m <- setNames(Alkyl_m + N_Alkyl_Methoxyl_m + O_Alkyl_m + Di_O_Alkyl_m + Aromatic_m + Phenolic_m + Amide_to_Ketone_m, c("Sum"))
sum_c <- sum(sampleintegraljoin)
sampleintegraljoin <-rbind(sampleintegraljoin, sum_c)
sample_stats <- data.frame(Alkyl_m, N_Alkyl_Methoxyl_m, O_Alkyl_m, Di_O_Alkyl_m, Aromatic_m, Phenolic_m, Amide_to_Ketone_m, sum_m)
nmrrest <- NULL
for (i in 1:nrow(sample_stats)) {
nmrrestt <- c(sampleintegraljoin)
nmrrest <- rbind(nmrrest, nmrrestt)
}
rownames(nmrrest) <- NULL
colnames(nmrrest) <- c("Alkyl", "N_Alkyl_Methoxyl", "O-Alkyl", "Di_O_Alkyl", "Aromatic", "Phenolic", "Amide_to_Ketone", "Sum")
sample_stats <- cbind(sample_stats,nmrrest)
ssq_sample <- data.frame((sample_stats$Alkyl-sample_stats$Alkyl_m)^2, (sample_stats$N_Alkyl_Methoxyl-sample_stats$N_Alkyl_Methoxyl_m)^2,
(sample_stats$`O-Alkyl` -sample_stats$O_Alkyl_m)^2, (sample_stats$Di_O_Alkyl-sample_stats$Di_O_Alkyl_m)^2,
(sample_stats$Aromatic -sample_stats$Aromatic_m)^2, (sample_stats$Phenolic -sample_stats$Phenolic_m)^2,
(sample_stats$Amide_to_Ketone -sample_stats$Amide_to_Ketone_m)^2, (sample_stats$Sum -sample_stats$sum_m)^2)
colnames(ssq_sample) <- c("Alkyl_ssq", "N_Alkyl_Methoxyl_ssq", "O-Alkyl_ssq", "Di_O_Alkyl_ssq", "Aromatic_ssq", "Phenolic_ssq", "Amide_to_Ketone_ssq", "Sum_ssq")
sample_stats <- cbind(sample_stats,ssq_sample)
} else if (ecosys == "Terr_Baldock") {
stdmat <- std_nmr(ecosys = "Terr_Baldock")
NMR.end <- fit_LCF(all.samples = raw.spec.end, all.standards = stdmat, amoSTD = 6, best.fits = 30, NMRmeth = "MMMFixN")
## Elementar ratios (relative to C)
Cmol <- as.numeric(NMR.end$Carbohydrates*stdmat[[1]][9,2]+NMR.end$Protein*stdmat[[1]][9,1]+NMR.end$Lignin*stdmat[[1]][9,3]+
NMR.end$Lipid*stdmat[[1]][9,4]+NMR.end$Carbonyl*stdmat[[1]][9,5]+NMR.end$Char*stdmat[[1]][9,6])
Nmol <- as.numeric(NMR.end$Carbohydrates*stdmat[[1]][10,2]+NMR.end$Protein*stdmat[[1]][10,1]+NMR.end$Lignin*stdmat[[1]][10,3]+
NMR.end$Lipid*stdmat[[1]][10,4]+NMR.end$Carbonyl*stdmat[[1]][10,5]+NMR.end$Char*stdmat[[1]][10,6])
Hmol <- as.numeric(NMR.end$Carbohydrates*stdmat[[1]][11,2]+NMR.end$Protein*stdmat[[1]][11,1]+NMR.end$Lignin*stdmat[[1]][11,3]+
NMR.end$Lipid*stdmat[[1]][11,4]+NMR.end$Carbonyl*stdmat[[1]][11,5]+NMR.end$Char*stdmat[[1]][11,6])
Omol <- as.numeric(NMR.end$Carbohydrates*stdmat[[1]][12,2]+NMR.end$Protein*stdmat[[1]][12,1]+NMR.end$Lignin*stdmat[[1]][12,3]+
NMR.end$Lipid*stdmat[[1]][12,4]+NMR.end$Carbonyl*stdmat[[1]][12,5]+NMR.end$Char*stdmat[[1]][12,6])
Cwgt <- as.numeric(NMR.end$Carbohydrates*stdmat[[1]][9,2]*12.0107+NMR.end$Protein*stdmat[[1]][9,1]*12.0107+NMR.end$Lignin*stdmat[[1]][9,3]*12.0107+
NMR.end$Lipid*stdmat[[1]][9,4]*12.0107+NMR.end$Carbonyl*stdmat[[1]][9,5]*12.0107+NMR.end$Char*stdmat[[1]][9,6]*12.0107)
Nwgt <- as.numeric(NMR.end$Carbohydrates*stdmat[[1]][10,2]*14.0067+NMR.end$Protein*stdmat[[1]][10,1]*14.0067+NMR.end$Lignin*stdmat[[1]][10,3]*14.0067+
NMR.end$Lipid*stdmat[[1]][10,4]*14.0067+NMR.end$Carbonyl*stdmat[[1]][10,5]*14.0067+NMR.end$Char*stdmat[[1]][10,6]*14.0067)
Hwgt <- as.numeric(NMR.end$Carbohydrates*stdmat[[1]][11,2]*1.00794+NMR.end$Protein*stdmat[[1]][11,1]*1.00794+NMR.end$Lignin*stdmat[[1]][11,3]*1.00794+
NMR.end$Lipid*stdmat[[1]][11,4]*1.00794+NMR.end$Carbonyl*stdmat[[1]][11,5]*1.00794+NMR.end$Char*stdmat[[1]][11,6]*1.00794)
Owgt <- as.numeric(NMR.end$Carbohydrates*stdmat[[1]][12,2]*15.994+NMR.end$Protein*stdmat[[1]][12,1]*15.994+NMR.end$Lignin*stdmat[[1]][12,3]*15.994+
NMR.end$Lipid*stdmat[[1]][12,4]*15.994+NMR.end$Carbonyl*stdmat[[1]][12,5]*15.994+NMR.end$Char*stdmat[[1]][12,6]*15.994)
swgt <- c(Cwgt + Nwgt + Hwgt +Owgt)
NOSC <- as.numeric(4+((2*Omol+3*Nmol-1*Hmol-4*Cmol)/Cmol))
## Final result
NMR.end <- cbind(NMR.end, Cmol, Nmol, Hmol, Omol, Cwgt/swgt, Nwgt/swgt, Hwgt/swgt, Owgt/swgt, NOSC)
## Back calculated NMR results
##Alklyl C calculation
Alkyl_m <- setNames(NMR.end$Carbohydrates*stdmat[[1]][1,2]+NMR.end$Protein*stdmat[[1]][1,1]+NMR.end$Lignin*stdmat[[1]][1,3]+NMR.end$Lipid*stdmat[[1]][1,4]+
NMR.end$Carbonyl*stdmat[[1]][1,5]+NMR.end$Char*stdmat[[1]][1,6], c("Alkyl"))
##N_Alkyl_Methoxyl C calculation
N_Alkyl_Methoxyl_m <- setNames(NMR.end$Carbohydrates*stdmat[[1]][2,2]+NMR.end$Protein*stdmat[[1]][2,1]+NMR.end$Lignin*stdmat[[1]][2,3]+NMR.end$Lipid*stdmat[[1]][2,4]+
NMR.end$Carbonyl*stdmat[[1]][2,5]+NMR.end$Char*stdmat[[1]][2,6], c("N_Alkyl_Methoxyl"))
##O-Alkyl C calculation
O_Alkyl_m <- setNames(NMR.end$Carbohydrates*stdmat[[1]][3,2]+NMR.end$Protein*stdmat[[1]][3,1]+NMR.end$Lignin*stdmat[[1]][3,3]+NMR.end$Lipid*stdmat[[1]][3,4]+
NMR.end$Carbonyl*stdmat[[1]][3,5]+NMR.end$Char*stdmat[[1]][3,6], c("O-Alkyl"))
##Di_O_Alkyl C calculation
Di_O_Alkyl_m <- setNames(NMR.end$Carbohydrates*stdmat[[1]][4,2]+NMR.end$Protein*stdmat[[1]][4,1]+NMR.end$Lignin*stdmat[[1]][4,3]+NMR.end$Lipid*stdmat[[1]][4,4]+
NMR.end$Carbonyl*stdmat[[1]][4,5]+NMR.end$Char*stdmat[[1]][4,6], c("Di_O_Alkyl"))
##Aromatic C calculation
Aromatic_m <- setNames(NMR.end$Carbohydrates*stdmat[[1]][5,2]+NMR.end$Protein*stdmat[[1]][5,1]+NMR.end$Lignin*stdmat[[1]][5,3]+NMR.end$Lipid*stdmat[[1]][5,4]+
NMR.end$Carbonyl*stdmat[[1]][5,5]+NMR.end$Char*stdmat[[1]][5,6], c("Aromatic"))
##Phenolic C calculation
Phenolic_m <- setNames(NMR.end$Carbohydrates*stdmat[[1]][6,2]+NMR.end$Protein*stdmat[[1]][6,1]+NMR.end$Lignin*stdmat[[1]][6,3]+NMR.end$Lipid*stdmat[[1]][6,4]+
NMR.end$Carbonyl*stdmat[[1]][6,5]+NMR.end$Char*stdmat[[1]][6,6], c("Phenolic"))
##Amide_Carboxylic C calculation
Amide_to_Ketone_m <- setNames(NMR.end$Carbohydrates*stdmat[[1]][7,2]+NMR.end$Protein*stdmat[[1]][7,1]+NMR.end$Lignin*stdmat[[1]][7,3]+NMR.end$Lipid*stdmat[[1]][7,4]+
NMR.end$Carbonyl*stdmat[[1]][7,5]+NMR.end$Char*stdmat[[1]][7,6], c("Amide_to_Ketone"))
sum_m <- setNames(Alkyl_m + N_Alkyl_Methoxyl_m + O_Alkyl_m + Di_O_Alkyl_m + Aromatic_m + Phenolic_m + Amide_to_Ketone_m, c("Sum"))
sum_c <- sum(sampleintegraljoin)
sampleintegraljoin <-rbind(sampleintegraljoin, sum_c)
sample_stats <- data.frame(Alkyl_m, N_Alkyl_Methoxyl_m, O_Alkyl_m, Di_O_Alkyl_m, Aromatic_m, Phenolic_m, Amide_to_Ketone_m, sum_m)
nmrrest <- NULL
for (i in 1:nrow(sample_stats)) {
nmrrestt <- c(sampleintegraljoin)
nmrrest <- rbind(nmrrest, nmrrestt)
}
colnames(nmrrest) <- c("Alkyl", "N_Alkyl_Methoxyl", "O-Alkyl", "Di_O_Alkyl", "Aromatic", "Phenolic", "Amide_to_Ketone", "Sum")
sample_stats <- cbind(sample_stats,nmrrest)
ssq_sample <- data.frame((sample_stats$Alkyl-sample_stats$Alkyl_m)^2, (sample_stats$N_Alkyl_Methoxyl-sample_stats$N_Alkyl_Methoxyl_m)^2,
(sample_stats$`O-Alkyl` -sample_stats$O_Alkyl_m)^2, (sample_stats$Di_O_Alkyl-sample_stats$Di_O_Alkyl_m)^2,
(sample_stats$Aromatic -sample_stats$Aromatic_m)^2, (sample_stats$Phenolic -sample_stats$Phenolic_m)^2,
(sample_stats$Amide_to_Ketone -sample_stats$Amide_to_Ketone_m)^2, (sample_stats$Sum -sample_stats$sum_m)^2)
colnames(ssq_sample) <- c("Alkyl_ssq", "N_Alkyl_Methoxyl_ssq", "O-Alkyl_ssq", "Di_O_Alkyl_ssq", "Aromatic_ssq", "Phenolic_ssq", "Amide_to_Ketone_ssq", "Sum_ssq")
sample_stats <- cbind(sample_stats,ssq_sample)
} else if (ecosys == "Aqua_Nelson") {
stdmat <- std_nmr(ecosys = "Aqua_Nelson")
NMR.end <- fit_LCF(all.samples = raw.spec.end, all.standards = stdmat, amoSTD = 6, best.fits = 30, NMRmeth = "MMMFixN")
## Elementar ratios (relative to C)
Cmol <- as.numeric(NMR.end$Carbohydrates*stdmat[[1]][9,2]+NMR.end$Protein*stdmat[[1]][9,1]+NMR.end$Lignin*stdmat[[1]][9,3]+
NMR.end$Lipid*stdmat[[1]][9,4]+NMR.end$Carbonyl*stdmat[[1]][9,5]+NMR.end$Char*stdmat[[1]][9,6])
Nmol <- as.numeric(NMR.end$Carbohydrates*stdmat[[1]][10,2]+NMR.end$Protein*stdmat[[1]][10,1]+NMR.end$Lignin*stdmat[[1]][10,3]+
NMR.end$Lipid*stdmat[[1]][10,4]+NMR.end$Carbonyl*stdmat[[1]][10,5]+NMR.end$Char*stdmat[[1]][10,6])
Hmol <- as.numeric(NMR.end$Carbohydrates*stdmat[[1]][11,2]+NMR.end$Protein*stdmat[[1]][11,1]+NMR.end$Lignin*stdmat[[1]][11,3]+
NMR.end$Lipid*stdmat[[1]][11,4]+NMR.end$Carbonyl*stdmat[[1]][11,5]+NMR.end$Char*stdmat[[1]][11,6])
Omol <- as.numeric(NMR.end$Carbohydrates*stdmat[[1]][12,2]+NMR.end$Protein*stdmat[[1]][12,1]+NMR.end$Lignin*stdmat[[1]][12,3]+
NMR.end$Lipid*stdmat[[1]][12,4]+NMR.end$Carbonyl*stdmat[[1]][12,5]+NMR.end$Char*stdmat[[1]][12,6])
Cwgt <- as.numeric(NMR.end$Carbohydrates*stdmat[[1]][9,2]*12.0107+NMR.end$Protein*stdmat[[1]][9,1]*12.0107+NMR.end$Lignin*stdmat[[1]][9,3]*12.0107+
NMR.end$Lipid*stdmat[[1]][9,4]*12.0107+NMR.end$Carbonyl*stdmat[[1]][9,5]*12.0107+NMR.end$Char*stdmat[[1]][9,6]*12.0107)
Nwgt <- as.numeric(NMR.end$Carbohydrates*stdmat[[1]][10,2]*14.0067+NMR.end$Protein*stdmat[[1]][10,1]*14.0067+NMR.end$Lignin*stdmat[[1]][10,3]*14.0067+
NMR.end$Lipid*stdmat[[1]][10,4]*14.0067+NMR.end$Carbonyl*stdmat[[1]][10,5]*14.0067+NMR.end$Char*stdmat[[1]][10,6]*14.0067)
Hwgt <- as.numeric(NMR.end$Carbohydrates*stdmat[[1]][11,2]*1.00794+NMR.end$Protein*stdmat[[1]][11,1]*1.00794+NMR.end$Lignin*stdmat[[1]][11,3]*1.00794+
NMR.end$Lipid*stdmat[[1]][11,4]*1.00794+NMR.end$Carbonyl*stdmat[[1]][11,5]*1.00794+NMR.end$Char*stdmat[[1]][11,6]*1.00794)
Owgt <- as.numeric(NMR.end$Carbohydrates*stdmat[[1]][12,2]*15.994+NMR.end$Protein*stdmat[[1]][12,1]*15.994+NMR.end$Lignin*stdmat[[1]][12,3]*15.994+
NMR.end$Lipid*stdmat[[1]][12,4]*15.994+NMR.end$Carbonyl*stdmat[[1]][12,5]*15.994+NMR.end$Char*stdmat[[1]][12,6]*15.994)
swgt <- c(Cwgt + Nwgt + Hwgt +Owgt)
NOSC <- as.numeric(4+((2*Omol+3*Nmol-1*Hmol-4*Cmol)/Cmol))
## Final result
NMR.end <- cbind(NMR.end, Cmol, Nmol, Hmol, Omol, Cwgt/swgt, Nwgt/swgt, Hwgt/swgt, Owgt/swgt, NOSC)
## Back calculated NMR results
##Alklyl C calculation
Alkyl_m <- setNames(NMR.end$Carbohydrates*stdmat[[1]][1,2]+NMR.end$Protein*stdmat[[1]][1,1]+NMR.end$Lignin*stdmat[[1]][1,3]+NMR.end$Lipid*stdmat[[1]][1,4]+
NMR.end$Carbonyl*stdmat[[1]][1,5]+NMR.end$Char*stdmat[[1]][1,6], c("Alkyl"))
##N_Alkyl_Methoxyl C calculation
N_Alkyl_Methoxyl_m <- setNames(NMR.end$Carbohydrates*stdmat[[1]][2,2]+NMR.end$Protein*stdmat[[1]][2,1]+NMR.end$Lignin*stdmat[[1]][2,3]+NMR.end$Lipid*stdmat[[1]][2,4]+
NMR.end$Carbonyl*stdmat[[1]][2,5]+NMR.end$Char*stdmat[[1]][2,6], c("N_Alkyl_Methoxyl"))
##O-Alkyl C calculation
O_Alkyl_m <- setNames(NMR.end$Carbohydrates*stdmat[[1]][3,2]+NMR.end$Protein*stdmat[[1]][3,1]+NMR.end$Lignin*stdmat[[1]][3,3]+NMR.end$Lipid*stdmat[[1]][3,4]+
NMR.end$Carbonyl*stdmat[[1]][3,5]+NMR.end$Char*stdmat[[1]][3,6], c("O-Alkyl"))
##Di_O_Alkyl C calculation
Di_O_Alkyl_m <- setNames(NMR.end$Carbohydrates*stdmat[[1]][4,2]+NMR.end$Protein*stdmat[[1]][4,1]+NMR.end$Lignin*stdmat[[1]][4,3]+NMR.end$Lipid*stdmat[[1]][4,4]+
NMR.end$Carbonyl*stdmat[[1]][4,5]+NMR.end$Char*stdmat[[1]][4,6], c("Di_O_Alkyl"))
##Aromatic C calculation
Aromatic_m <- setNames(NMR.end$Carbohydrates*stdmat[[1]][5,2]+NMR.end$Protein*stdmat[[1]][5,1]+NMR.end$Lignin*stdmat[[1]][5,3]+NMR.end$Lipid*stdmat[[1]][5,4]+
NMR.end$Carbonyl*stdmat[[1]][5,5]+NMR.end$Char*stdmat[[1]][5,6], c("Aromatic"))
##Phenolic C calculation
Phenolic_m <- setNames(NMR.end$Carbohydrates*stdmat[[1]][6,2]+NMR.end$Protein*stdmat[[1]][6,1]+NMR.end$Lignin*stdmat[[1]][6,3]+NMR.end$Lipid*stdmat[[1]][6,4]+
NMR.end$Carbonyl*stdmat[[1]][6,5]+NMR.end$Char*stdmat[[1]][6,6], c("Phenolic"))
##Amide_Carboxylic C calculation
Amide_to_Ketone_m <- setNames(NMR.end$Carbohydrates*stdmat[[1]][7,2]+NMR.end$Protein*stdmat[[1]][7,1]+NMR.end$Lignin*stdmat[[1]][7,3]+NMR.end$Lipid*stdmat[[1]][7,4]+
NMR.end$Carbonyl*stdmat[[1]][7,5]+NMR.end$Char*stdmat[[1]][7,6], c("Amide_to_Ketone"))
sum_m <- setNames(Alkyl_m + N_Alkyl_Methoxyl_m + O_Alkyl_m + Di_O_Alkyl_m + Aromatic_m + Phenolic_m + Amide_to_Ketone_m, c("Sum"))
sum_c <- sum(sampleintegraljoin)
sampleintegraljoin <-rbind(sampleintegraljoin, sum_c)
sample_stats <- data.frame(Alkyl_m, N_Alkyl_Methoxyl_m, O_Alkyl_m, Di_O_Alkyl_m, Aromatic_m, Phenolic_m, Amide_to_Ketone_m, sum_m)
nmrrest <- NULL
for (i in 1:nrow(sample_stats)) {
nmrrestt <- c(sampleintegraljoin)
nmrrest <- rbind(nmrrest, nmrrestt)
}
colnames(nmrrest) <- c("Alkyl", "N_Alkyl_Methoxyl", "O-Alkyl", "Di_O_Alkyl", "Aromatic", "Phenolic", "Amide_to_Ketone", "Sum")
sample_stats <- cbind(sample_stats,nmrrest)
ssq_sample <- data.frame((sample_stats$Alkyl-sample_stats$Alkyl_m)^2, (sample_stats$N_Alkyl_Methoxyl-sample_stats$N_Alkyl_Methoxyl_m)^2,
(sample_stats$`O-Alkyl` -sample_stats$O_Alkyl_m)^2, (sample_stats$Di_O_Alkyl-sample_stats$Di_O_Alkyl_m)^2,
(sample_stats$Aromatic -sample_stats$Aromatic_m)^2, (sample_stats$Phenolic -sample_stats$Phenolic_m)^2,
(sample_stats$Amide_to_Ketone -sample_stats$Amide_to_Ketone_m)^2, (sample_stats$Sum -sample_stats$sum_m)^2)
colnames(ssq_sample) <- c("Alkyl_ssq", "N_Alkyl_Methoxyl_ssq", "O-Alkyl_ssq", "Di_O_Alkyl_ssq", "Aromatic_ssq", "Phenolic_ssq", "Amide_to_Ketone_ssq", "Sum_ssq")
sample_stats <- cbind(sample_stats,ssq_sample)
} else if (ecosys == "Aqua_Baldock") {
stdmat <- std_nmr(ecosys = "Aqua_Baldock")
NMR.end <- fit_LCF(all.samples = raw.spec.end, all.standards = stdmat, amoSTD = 6, best.fits = 30, NMRmeth = "MMMFixN")
## Elementar ratios (relative to C)
Cmol <- as.numeric(NMR.end$Carbohydrates*stdmat[[1]][9,2]+NMR.end$Protein*stdmat[[1]][9,1]+NMR.end$Lignin*stdmat[[1]][9,3]+
NMR.end$Lipid*stdmat[[1]][9,4]+NMR.end$Carbonyl*stdmat[[1]][9,5]+NMR.end$Char*stdmat[[1]][9,6])
Nmol <- as.numeric(NMR.end$Carbohydrates*stdmat[[1]][10,2]+NMR.end$Protein*stdmat[[1]][10,1]+NMR.end$Lignin*stdmat[[1]][10,3]+
NMR.end$Lipid*stdmat[[1]][10,4]+NMR.end$Carbonyl*stdmat[[1]][10,5]+NMR.end$Char*stdmat[[1]][10,6])
Hmol <- as.numeric(NMR.end$Carbohydrates*stdmat[[1]][11,2]+NMR.end$Protein*stdmat[[1]][11,1]+NMR.end$Lignin*stdmat[[1]][11,3]+
NMR.end$Lipid*stdmat[[1]][11,4]+NMR.end$Carbonyl*stdmat[[1]][11,5]+NMR.end$Char*stdmat[[1]][11,6])
Omol <- as.numeric(NMR.end$Carbohydrates*stdmat[[1]][12,2]+NMR.end$Protein*stdmat[[1]][12,1]+NMR.end$Lignin*stdmat[[1]][12,3]+
NMR.end$Lipid*stdmat[[1]][12,4]+NMR.end$Carbonyl*stdmat[[1]][12,5]+NMR.end$Char*stdmat[[1]][12,6])
Cwgt <- as.numeric(NMR.end$Carbohydrates*stdmat[[1]][9,2]*12.0107+NMR.end$Protein*stdmat[[1]][9,1]*12.0107+NMR.end$Lignin*stdmat[[1]][9,3]*12.0107+
NMR.end$Lipid*stdmat[[1]][9,4]*12.0107+NMR.end$Carbonyl*stdmat[[1]][9,5]*12.0107+NMR.end$Char*stdmat[[1]][9,6]*12.0107)
Nwgt <- as.numeric(NMR.end$Carbohydrates*stdmat[[1]][10,2]*14.0067+NMR.end$Protein*stdmat[[1]][10,1]*14.0067+NMR.end$Lignin*stdmat[[1]][10,3]*14.0067+
NMR.end$Lipid*stdmat[[1]][10,4]*14.0067+NMR.end$Carbonyl*stdmat[[1]][10,5]*14.0067+NMR.end$Char*stdmat[[1]][10,6]*14.0067)
Hwgt <- as.numeric(NMR.end$Carbohydrates*stdmat[[1]][11,2]*1.00794+NMR.end$Protein*stdmat[[1]][11,1]*1.00794+NMR.end$Lignin*stdmat[[1]][11,3]*1.00794+
NMR.end$Lipid*stdmat[[1]][11,4]*1.00794+NMR.end$Carbonyl*stdmat[[1]][11,5]*1.00794+NMR.end$Char*stdmat[[1]][11,6]*1.00794)
Owgt <- as.numeric(NMR.end$Carbohydrates*stdmat[[1]][12,2]*15.994+NMR.end$Protein*stdmat[[1]][12,1]*15.994+NMR.end$Lignin*stdmat[[1]][12,3]*15.994+
NMR.end$Lipid*stdmat[[1]][12,4]*15.994+NMR.end$Carbonyl*stdmat[[1]][12,5]*15.994+NMR.end$Char*stdmat[[1]][12,6]*15.994)
swgt <- c(Cwgt + Nwgt + Hwgt +Owgt)
NOSC <- as.numeric(4+((2*Omol+3*Nmol-1*Hmol-4*Cmol)/Cmol))
## Final result
NMR.end <- cbind(NMR.end, Cmol, Nmol, Hmol, Omol, Cwgt/swgt, Nwgt/swgt, Hwgt/swgt, Owgt/swgt, NOSC)
## Back calculated NMR results
##Alklyl C calculation
Alkyl_m <- setNames(NMR.end$Carbohydrates*stdmat[[1]][1,2]+NMR.end$Protein*stdmat[[1]][1,1]+NMR.end$Lignin*stdmat[[1]][1,3]+NMR.end$Lipid*stdmat[[1]][1,4]+
NMR.end$Carbonyl*stdmat[[1]][1,5]+NMR.end$Char*stdmat[[1]][1,6], c("Alkyl"))
##N_Alkyl_Methoxyl C calculation
N_Alkyl_Methoxyl_m <- setNames(NMR.end$Carbohydrates*stdmat[[1]][2,2]+NMR.end$Protein*stdmat[[1]][2,1]+NMR.end$Lignin*stdmat[[1]][2,3]+NMR.end$Lipid*stdmat[[1]][2,4]+
NMR.end$Carbonyl*stdmat[[1]][2,5]+NMR.end$Char*stdmat[[1]][2,6], c("N_Alkyl_Methoxyl"))
##O-Alkyl C calculation
O_Alkyl_m <- setNames(NMR.end$Carbohydrates*stdmat[[1]][3,2]+NMR.end$Protein*stdmat[[1]][3,1]+NMR.end$Lignin*stdmat[[1]][3,3]+NMR.end$Lipid*stdmat[[1]][3,4]+
NMR.end$Carbonyl*stdmat[[1]][3,5]+NMR.end$Char*stdmat[[1]][3,6], c("O-Alkyl"))
##Di_O_Alkyl C calculation
Di_O_Alkyl_m <- setNames(NMR.end$Carbohydrates*stdmat[[1]][4,2]+NMR.end$Protein*stdmat[[1]][4,1]+NMR.end$Lignin*stdmat[[1]][4,3]+NMR.end$Lipid*stdmat[[1]][4,4]+
NMR.end$Carbonyl*stdmat[[1]][4,5]+NMR.end$Char*stdmat[[1]][4,6], c("Di_O_Alkyl"))
##Aromatic C calculation
Aromatic_m <- setNames(NMR.end$Carbohydrates*stdmat[[1]][5,2]+NMR.end$Protein*stdmat[[1]][5,1]+NMR.end$Lignin*stdmat[[1]][5,3]+NMR.end$Lipid*stdmat[[1]][5,4]+
NMR.end$Carbonyl*stdmat[[1]][5,5]+NMR.end$Char*stdmat[[1]][5,6], c("Aromatic"))
##Phenolic C calculation
Phenolic_m <- setNames(NMR.end$Carbohydrates*stdmat[[1]][6,2]+NMR.end$Protein*stdmat[[1]][6,1]+NMR.end$Lignin*stdmat[[1]][6,3]+NMR.end$Lipid*stdmat[[1]][6,4]+
NMR.end$Carbonyl*stdmat[[1]][6,5]+NMR.end$Char*stdmat[[1]][6,6], c("Phenolic"))
##Amide_Carboxylic C calculation
Amide_to_Ketone_m <- setNames(NMR.end$Carbohydrates*stdmat[[1]][7,2]+NMR.end$Protein*stdmat[[1]][7,1]+NMR.end$Lignin*stdmat[[1]][7,3]+NMR.end$Lipid*stdmat[[1]][7,4]+
NMR.end$Carbonyl*stdmat[[1]][7,5]+NMR.end$Char*stdmat[[1]][7,6], c("Amide_to_Ketone"))
sum_m <- setNames(Alkyl_m + N_Alkyl_Methoxyl_m + O_Alkyl_m + Di_O_Alkyl_m + Aromatic_m + Phenolic_m + Amide_to_Ketone_m, c("Sum"))
sum_c <- sum(sampleintegraljoin)
sampleintegraljoin <-rbind(sampleintegraljoin, sum_c)
sample_stats <- data.frame(Alkyl_m, N_Alkyl_Methoxyl_m, O_Alkyl_m, Di_O_Alkyl_m, Aromatic_m, Phenolic_m, Amide_to_Ketone_m, sum_m)
nmrrest <- NULL
for (i in 1:nrow(sample_stats)) {
nmrrestt <- c(sampleintegraljoin)
nmrrest <- rbind(nmrrest, nmrrestt)
}
colnames(nmrrest) <- c("Alkyl", "N_Alkyl_Methoxyl", "O-Alkyl", "Di_O_Alkyl", "Aromatic", "Phenolic", "Amide_to_Ketone", "Sum")
sample_stats <- cbind(sample_stats,nmrrest)
ssq_sample <- data.frame((sample_stats$Alkyl-sample_stats$Alkyl_m)^2, (sample_stats$N_Alkyl_Methoxyl-sample_stats$N_Alkyl_Methoxyl_m)^2,
(sample_stats$`O-Alkyl` -sample_stats$O_Alkyl_m)^2, (sample_stats$Di_O_Alkyl-sample_stats$Di_O_Alkyl_m)^2,
(sample_stats$Aromatic -sample_stats$Aromatic_m)^2, (sample_stats$Phenolic -sample_stats$Phenolic_m)^2,
(sample_stats$Amide_to_Ketone -sample_stats$Amide_to_Ketone_m)^2, (sample_stats$Sum -sample_stats$sum_m)^2)
colnames(ssq_sample) <- c("Alkyl_ssq", "N_Alkyl_Methoxyl_ssq", "O-Alkyl_ssq", "Di_O_Alkyl_ssq", "Aromatic_ssq", "Phenolic_ssq", "Amide_to_Ketone_ssq", "Sum_ssq")
sample_stats <- cbind(sample_stats,ssq_sample)
} else if (ecosys == "mod") {
NMR.end <- fit_LCF(all.samples = raw.spec.end, all.standards = mod_std, amoSTD = 6, best.fits = 30, NMRmeth = "MMMFixN")
## Elementar ratios (relative to C)
Cmol <- as.numeric(NMR.end$Carbohydrates*mod_std[[1]][9,2]+NMR.end$Protein*mod_std[[1]][9,1]+NMR.end$Lignin*mod_std[[1]][9,3]+
NMR.end$Lipid*mod_std[[1]][9,4]+NMR.end$Carbonyl*mod_std[[1]][9,5]+NMR.end$Char*mod_std[[1]][9,6])
Nmol <- as.numeric(NMR.end$Carbohydrates*mod_std[[1]][10,2]+NMR.end$Protein*mod_std[[1]][10,1]+NMR.end$Lignin*mod_std[[1]][10,3]+
NMR.end$Lipid*mod_std[[1]][10,4]+NMR.end$Carbonyl*mod_std[[1]][10,5]+NMR.end$Char*mod_std[[1]][10,6])
Hmol <- as.numeric(NMR.end$Carbohydrates*mod_std[[1]][11,2]+NMR.end$Protein*mod_std[[1]][11,1]+NMR.end$Lignin*mod_std[[1]][11,3]+
NMR.end$Lipid*mod_std[[1]][11,4]+NMR.end$Carbonyl*mod_std[[1]][11,5]+NMR.end$Char*mod_std[[1]][11,6])
Omol <- as.numeric(NMR.end$Carbohydrates*mod_std[[1]][12,2]+NMR.end$Protein*mod_std[[1]][12,1]+NMR.end$Lignin*mod_std[[1]][12,3]+
NMR.end$Lipid*mod_std[[1]][12,4]+NMR.end$Carbonyl*mod_std[[1]][12,5]+NMR.end$Char*mod_std[[1]][12,6])
Cwgt <- as.numeric(NMR.end$Carbohydrates*mod_std[[1]][9,2]*12.0107+NMR.end$Protein*mod_std[[1]][9,1]*12.0107+NMR.end$Lignin*mod_std[[1]][9,3]*12.0107+
NMR.end$Lipid*mod_std[[1]][9,4]*12.0107+NMR.end$Carbonyl*mod_std[[1]][9,5]*12.0107+NMR.end$Char*mod_std[[1]][9,6]*12.0107)
Nwgt <- as.numeric(NMR.end$Carbohydrates*mod_std[[1]][10,2]*14.0067+NMR.end$Protein*mod_std[[1]][10,1]*14.0067+NMR.end$Lignin*mod_std[[1]][10,3]*14.0067+
NMR.end$Lipid*mod_std[[1]][10,4]*14.0067+NMR.end$Carbonyl*mod_std[[1]][10,5]*14.0067+NMR.end$Char*mod_std[[1]][10,6]*14.0067)
Hwgt <- as.numeric(NMR.end$Carbohydrates*mod_std[[1]][11,2]*1.00794+NMR.end$Protein*mod_std[[1]][11,1]*1.00794+NMR.end$Lignin*mod_std[[1]][11,3]*1.00794+
NMR.end$Lipid*mod_std[[1]][11,4]*1.00794+NMR.end$Carbonyl*mod_std[[1]][11,5]*1.00794+NMR.end$Char*mod_std[[1]][11,6]*1.00794)
Owgt <- as.numeric(NMR.end$Carbohydrates*mod_std[[1]][12,2]*15.994+NMR.end$Protein*mod_std[[1]][12,1]*15.994+NMR.end$Lignin*mod_std[[1]][12,3]*15.994+
NMR.end$Lipid*mod_std[[1]][12,4]*15.994+NMR.end$Carbonyl*mod_std[[1]][12,5]*15.994+NMR.end$Char*mod_std[[1]][12,6]*15.994)
swgt <- c(Cwgt + Nwgt + Hwgt +Owgt)
NOSC <- as.numeric(4+((2*Omol+3*Nmol-1*Hmol-4*Cmol)/Cmol))
## Final result
NMR.end <- cbind(NMR.end, Cmol, Nmol, Hmol, Omol, Cwgt/swgt, Nwgt/swgt, Hwgt/swgt, Owgt/swgt, NOSC)
## Back calculated NMR results
##Alklyl C calculation
Alkyl_m <- setNames(NMR.end$Carbohydrates*stdmat[[1]][1,2]+NMR.end$Protein*stdmat[[1]][1,1]+NMR.end$Lignin*stdmat[[1]][1,3]+NMR.end$Lipid*stdmat[[1]][1,4]+
NMR.end$Carbonyl*stdmat[[1]][1,5]+NMR.end$Char*stdmat[[1]][1,6], c("Alkyl"))
##N_Alkyl_Methoxyl C calculation
N_Alkyl_Methoxyl_m <- setNames(NMR.end$Carbohydrates*stdmat[[1]][2,2]+NMR.end$Protein*stdmat[[1]][2,1]+NMR.end$Lignin*stdmat[[1]][2,3]+NMR.end$Lipid*stdmat[[1]][2,4]+
NMR.end$Carbonyl*stdmat[[1]][2,5]+NMR.end$Char*stdmat[[1]][2,6], c("N_Alkyl_Methoxyl"))
##O-Alkyl C calculation
O_Alkyl_m <- setNames(NMR.end$Carbohydrates*stdmat[[1]][3,2]+NMR.end$Protein*stdmat[[1]][3,1]+NMR.end$Lignin*stdmat[[1]][3,3]+NMR.end$Lipid*stdmat[[1]][3,4]+
NMR.end$Carbonyl*stdmat[[1]][3,5]+NMR.end$Char*stdmat[[1]][3,6], c("O-Alkyl"))
##Di_O_Alkyl C calculation
Di_O_Alkyl_m <- setNames(NMR.end$Carbohydrates*stdmat[[1]][4,2]+NMR.end$Protein*stdmat[[1]][4,1]+NMR.end$Lignin*stdmat[[1]][4,3]+NMR.end$Lipid*stdmat[[1]][4,4]+
NMR.end$Carbonyl*stdmat[[1]][4,5]+NMR.end$Char*stdmat[[1]][4,6], c("Di_O_Alkyl"))
##Aromatic C calculation
Aromatic_m <- setNames(NMR.end$Carbohydrates*stdmat[[1]][5,2]+NMR.end$Protein*stdmat[[1]][5,1]+NMR.end$Lignin*stdmat[[1]][5,3]+NMR.end$Lipid*stdmat[[1]][5,4]+
NMR.end$Carbonyl*stdmat[[1]][5,5]+NMR.end$Char*stdmat[[1]][5,6], c("Aromatic"))
##Phenolic C calculation
Phenolic_m <- setNames(NMR.end$Carbohydrates*stdmat[[1]][6,2]+NMR.end$Protein*stdmat[[1]][6,1]+NMR.end$Lignin*stdmat[[1]][6,3]+NMR.end$Lipid*stdmat[[1]][6,4]+
NMR.end$Carbonyl*stdmat[[1]][6,5]+NMR.end$Char*stdmat[[1]][6,6], c("Phenolic"))
##Amide_Carboxylic C calculation
Amide_to_Ketone_m <- setNames(NMR.end$Carbohydrates*stdmat[[1]][7,2]+NMR.end$Protein*stdmat[[1]][7,1]+NMR.end$Lignin*stdmat[[1]][7,3]+NMR.end$Lipid*stdmat[[1]][7,4]+
NMR.end$Carbonyl*stdmat[[1]][7,5]+NMR.end$Char*stdmat[[1]][7,6], c("Amide_to_Ketone"))
sum_m <- setNames(Alkyl_m + N_Alkyl_Methoxyl_m + O_Alkyl_m + Di_O_Alkyl_m + Aromatic_m + Phenolic_m + Amide_to_Ketone_m, c("Sum"))
sum_c <- sum(sampleintegraljoin)
sampleintegraljoin <-rbind(sampleintegraljoin, sum_c)
sample_stats <- data.frame(Alkyl_m, N_Alkyl_Methoxyl_m, O_Alkyl_m, Di_O_Alkyl_m, Aromatic_m, Phenolic_m, Amide_to_Ketone_m, sum_m)
nmrrest <- NULL
for (i in 1:nrow(sample_stats)) {
nmrrestt <- c(sampleintegraljoin)
nmrrest <- rbind(nmrrest, nmrrestt)
}
colnames(nmrrest) <- c("Alkyl", "N_Alkyl_Methoxyl", "O-Alkyl", "Di_O_Alkyl", "Aromatic", "Phenolic", "Amide_to_Ketone", "Sum")
rownames(nmrrest) <- NULL
sample_stats <- cbind(sample_stats,nmrrest)
ssq_sample <- data.frame((sample_stats$Alkyl-sample_stats$Alkyl_m)^2, (sample_stats$N_Alkyl_Methoxyl-sample_stats$N_Alkyl_Methoxyl_m)^2,
(sample_stats$`O-Alkyl` -sample_stats$O_Alkyl_m)^2, (sample_stats$Di_O_Alkyl-sample_stats$Di_O_Alkyl_m)^2,
(sample_stats$Aromatic -sample_stats$Aromatic_m)^2, (sample_stats$Phenolic -sample_stats$Phenolic_m)^2,
(sample_stats$Amide_to_Ketone -sample_stats$Amide_to_Ketone_m)^2, (sample_stats$Sum -sample_stats$sum_m)^2)
colnames(ssq_sample) <- c("Alkyl_ssq", "N_Alkyl_Methoxyl_ssq", "O-Alkyl_ssq", "Di_O_Alkyl_ssq", "Aromatic_ssq", "Phenolic_ssq", "Amide_to_Ketone_ssq", "Sum_ssq")
sample_stats <- cbind(sample_stats,ssq_sample)
}
}
}
#citation <- setNames(data.frame(matrix(ncol = 1, nrow= nrow(NMR.end))), c("Plz cite this work as an incentive to its curation"))
#NMR.end <- cbind(NMR.end,citation)
if (stats == FALSE) {
return(NMR.end)
} else if (stats == TRUE) {
sample_stats <- sample_stats[order(sample_stats$Sum_ssq, decreasing = FALSE),]
rownames(sample_stats) <- NULL
return(sample_stats)
}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/buildMatrix.r
\name{build.x}
\alias{build.x}
\title{build.x}
\usage{
build.x(formula, data, contrasts = TRUE, sparse = FALSE)
}
\arguments{
\item{formula}{A formula}
\item{data}{A data.frame}
\item{contrasts}{Logical indicating whether a factor's base level is removed. Can be either one single value applied to every factor or a value for each factor. Values will be recycled if necessary.}
\item{sparse}{Logical indicating if result should be sparse. Currently not used.}
}
\value{
A matrix of the predictor variables specified in the formula
}
\description{
Build the x matrix for a glmnet model
}
\details{
Given a formula and a data.frame build the predictor matrix
}
\examples{
require(ggplot2)
head(mpg)
head(build.x(hwy ~ class + cyl + year, data=mpg))
testFrame <- data.frame(First=sample(1:10, 20, replace=TRUE),
Second=sample(1:20, 20, replace=TRUE),
Third=sample(1:10, 20, replace=TRUE),
Fourth=factor(rep(c("Alice","Bob","Charlie","David"), 5)),
Fifth=ordered(rep(c("Edward","Frank","Georgia","Hank","Isaac"), 4)),
Sixth=factor(rep(c("a", "b"), 10)), stringsAsFactors=F)
head(build.x(First ~ Second + Fourth + Sixth, testFrame,
contrasts=c("Fourth"=TRUE, "Fifth"=FALSE, "Sixth"=TRUE)))
head(build.x(First ~ Second + Fourth + Fifth + Sixth, testFrame,
contrasts=c(Fourth=TRUE, Fifth=FALSE, Sixth=TRUE)))
head(build.x(First ~ Second + Fourth + Fifth + Sixth, testFrame, contrasts=TRUE))
head(build.x(First ~ Second + Fourth + Fifth + Sixth, testFrame,
contrasts=FALSE))
head(build.x(First ~ Second + Fourth + Fifth + Sixth - 1, testFrame,
contrasts=TRUE))
head(build.x(First ~ Second + Fourth + Fifth + Fourth*Sixth, testFrame, contrasts=TRUE))
head(build.x(First ~ Second + Fourth + Fifth + Third*Sixth, testFrame, contrasts=TRUE))
#' head(build.x(First ~ Second + Fourth + Fifth + Fourth*Sixth, testFrame, contrasts=FALSE))
head(build.x(First ~ Second + Fourth + Fifth + Third*Sixth, testFrame, contrasts=FALSE))
## if contrasts is a list then you can specify just certain factors
}
\author{
Jared P. Lander
}
|
/man/build.x.Rd
|
no_license
|
kpivert/useful
|
R
| false | true | 2,119 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/buildMatrix.r
\name{build.x}
\alias{build.x}
\title{build.x}
\usage{
build.x(formula, data, contrasts = TRUE, sparse = FALSE)
}
\arguments{
\item{formula}{A formula}
\item{data}{A data.frame}
\item{contrasts}{Logical indicating whether a factor's base level is removed. Can be either one single value applied to every factor or a value for each factor. Values will be recycled if necessary.}
\item{sparse}{Logical indicating if result should be sparse. Currently not used.}
}
\value{
A matrix of the predictor variables specified in the formula
}
\description{
Build the x matrix for a glmnet model
}
\details{
Given a formula and a data.frame build the predictor matrix
}
\examples{
require(ggplot2)
head(mpg)
head(build.x(hwy ~ class + cyl + year, data=mpg))
testFrame <- data.frame(First=sample(1:10, 20, replace=TRUE),
Second=sample(1:20, 20, replace=TRUE),
Third=sample(1:10, 20, replace=TRUE),
Fourth=factor(rep(c("Alice","Bob","Charlie","David"), 5)),
Fifth=ordered(rep(c("Edward","Frank","Georgia","Hank","Isaac"), 4)),
Sixth=factor(rep(c("a", "b"), 10)), stringsAsFactors=F)
head(build.x(First ~ Second + Fourth + Sixth, testFrame,
contrasts=c("Fourth"=TRUE, "Fifth"=FALSE, "Sixth"=TRUE)))
head(build.x(First ~ Second + Fourth + Fifth + Sixth, testFrame,
contrasts=c(Fourth=TRUE, Fifth=FALSE, Sixth=TRUE)))
head(build.x(First ~ Second + Fourth + Fifth + Sixth, testFrame, contrasts=TRUE))
head(build.x(First ~ Second + Fourth + Fifth + Sixth, testFrame,
contrasts=FALSE))
head(build.x(First ~ Second + Fourth + Fifth + Sixth - 1, testFrame,
contrasts=TRUE))
head(build.x(First ~ Second + Fourth + Fifth + Fourth*Sixth, testFrame, contrasts=TRUE))
head(build.x(First ~ Second + Fourth + Fifth + Third*Sixth, testFrame, contrasts=TRUE))
#' head(build.x(First ~ Second + Fourth + Fifth + Fourth*Sixth, testFrame, contrasts=FALSE))
head(build.x(First ~ Second + Fourth + Fifth + Third*Sixth, testFrame, contrasts=FALSE))
## if contrasts is a list then you can specify just certain factors
}
\author{
Jared P. Lander
}
|
context("test specfp objects")
pjnz <- system.file("extdata/testpjnz", "Botswana2018.PJNZ", package="eppasm")
fp <- prepare_directincid(pjnz)
test_that("paedsurv dist sums to one for each sex-year", {
expect_true(all(round(colSums(fp$paedsurv_cd4dist), 5) %in% 0:1))
expect_true(all(round(colSums(fp$paedsurv_artcd4dist,,2), 5) %in% 0:1))
})
|
/tests/testthat/test_specfp.R
|
no_license
|
aucarter/eppasm
|
R
| false | false | 348 |
r
|
context("test specfp objects")
pjnz <- system.file("extdata/testpjnz", "Botswana2018.PJNZ", package="eppasm")
fp <- prepare_directincid(pjnz)
test_that("paedsurv dist sums to one for each sex-year", {
expect_true(all(round(colSums(fp$paedsurv_cd4dist), 5) %in% 0:1))
expect_true(all(round(colSums(fp$paedsurv_artcd4dist,,2), 5) %in% 0:1))
})
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sequence2gtypes.R
\name{sequence2gtypes}
\alias{sequence2gtypes}
\title{Convert Sequences To \code{gtypes}}
\usage{
sequence2gtypes(x, strata = NULL, seq.names = NULL, schemes = NULL,
description = NULL, other = NULL)
}
\arguments{
\item{x}{DNA sequences as a character matrix, a \code{\link{DNAbin}} object,
or \linkS4class{multidna} object.}
\item{strata}{a vector or factor giving stratification for each sequence. If
not provided all individuals are assigned to the same stratum (Default).}
\item{seq.names}{names for each set of sequences. If not provided default names
are generated.}
\item{schemes}{an optional data.frame of stratification schemes.}
\item{description}{an optional label for the object.}
\item{other}{a slot to carry other related information - unused in package
analyses.}
}
\value{
a \linkS4class{gtypes} object.
}
\description{
Create a \linkS4class{gtypes} object from sequence data.
}
\examples{
#--- create a haploid sequence (mtDNA) gtypes object
data(dolph.strata)
data(dolph.seqs)
strata <- dolph.strata$fine
names(strata) <- dolph.strata$ids
dloop.fine <- sequence2gtypes(dolph.seqs, strata, seq.names = "dLoop",
description = "dLoop: fine-scale stratification")
}
\author{
Eric Archer \email{eric.archer@noaa.gov}
}
|
/man/sequence2gtypes.Rd
|
no_license
|
PAMorin/strataG
|
R
| false | true | 1,340 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sequence2gtypes.R
\name{sequence2gtypes}
\alias{sequence2gtypes}
\title{Convert Sequences To \code{gtypes}}
\usage{
sequence2gtypes(x, strata = NULL, seq.names = NULL, schemes = NULL,
description = NULL, other = NULL)
}
\arguments{
\item{x}{DNA sequences as a character matrix, a \code{\link{DNAbin}} object,
or \linkS4class{multidna} object.}
\item{strata}{a vector or factor giving stratification for each sequence. If
not provided all individuals are assigned to the same stratum (Default).}
\item{seq.names}{names for each set of sequences. If not provided default names
are generated.}
\item{schemes}{an optional data.frame of stratification schemes.}
\item{description}{an optional label for the object.}
\item{other}{a slot to carry other related information - unused in package
analyses.}
}
\value{
a \linkS4class{gtypes} object.
}
\description{
Create a \linkS4class{gtypes} object from sequence data.
}
\examples{
#--- create a haploid sequence (mtDNA) gtypes object
data(dolph.strata)
data(dolph.seqs)
strata <- dolph.strata$fine
names(strata) <- dolph.strata$ids
dloop.fine <- sequence2gtypes(dolph.seqs, strata, seq.names = "dLoop",
description = "dLoop: fine-scale stratification")
}
\author{
Eric Archer \email{eric.archer@noaa.gov}
}
|
Polylepis.pepei <- read.csv("~/R/pepei/Polylepis pepei.csv", header=TRUE)
Polylepis.pepei
data <- Polylepis.pepei
data
apply(data,2,mean)
apply(data,2,sd)
coefvar <- function(x){100*sd(x)/mean(x)}
coefvar
apply(data,2,coefvar)
boxplot(data,xlab="Especies de vegetación", ylab="Abundancia")
###########
library(vegan)
diversity(data,"shannon")
diversity(data,index="shannon", MARGIN=1,base=2)
diversity(data,index="simpson")
diversity(data,index="invsimpson")
specnumber(data)
###
barplot(diversity(data,index="shannon",base=2),xlab="Bosques de Polylepis",ylab="abundancia", col="black")
log2(specnumber(data))
renyi(data,scale=c(0,1,2),hill=TRUE)
cor(data)
euclidia <- dist(data,method="euclidean", upper=T, diag=T)
euclidia
jaccard <- vegdist(data,method="jaccard", diag=FALSE, upper=FALSE)
jaccard
plot(hclust(vegdist(data,method="jaccard")), hang=-1, main="Analisis de cluster por bosque")
cluster <- t(data)
plot(hclust(vegdist(data,method="jaccard")), hang=-1, main="analisis de cluster", xlab="Muestras", ylab="Disimilaridad de jaccard")
x <- as.matrix(data)
x
heatmap(x,distfun = function(c)vegdist(c,"jaccard"), col=topo.colors(16))
heat
|
/pepei.R
|
no_license
|
WilderQuispeRojas/BDPolylepis
|
R
| false | false | 1,148 |
r
|
Polylepis.pepei <- read.csv("~/R/pepei/Polylepis pepei.csv", header=TRUE)
Polylepis.pepei
data <- Polylepis.pepei
data
apply(data,2,mean)
apply(data,2,sd)
coefvar <- function(x){100*sd(x)/mean(x)}
coefvar
apply(data,2,coefvar)
boxplot(data,xlab="Especies de vegetación", ylab="Abundancia")
###########
library(vegan)
diversity(data,"shannon")
diversity(data,index="shannon", MARGIN=1,base=2)
diversity(data,index="simpson")
diversity(data,index="invsimpson")
specnumber(data)
###
barplot(diversity(data,index="shannon",base=2),xlab="Bosques de Polylepis",ylab="abundancia", col="black")
log2(specnumber(data))
renyi(data,scale=c(0,1,2),hill=TRUE)
cor(data)
euclidia <- dist(data,method="euclidean", upper=T, diag=T)
euclidia
jaccard <- vegdist(data,method="jaccard", diag=FALSE, upper=FALSE)
jaccard
plot(hclust(vegdist(data,method="jaccard")), hang=-1, main="Analisis de cluster por bosque")
cluster <- t(data)
plot(hclust(vegdist(data,method="jaccard")), hang=-1, main="analisis de cluster", xlab="Muestras", ylab="Disimilaridad de jaccard")
x <- as.matrix(data)
x
heatmap(x,distfun = function(c)vegdist(c,"jaccard"), col=topo.colors(16))
heat
|
PlotCSVWithTitleAndLabels <- function(csvFilename, plotTitle, xLabel, yLabel)
{
df = read.csv(csvFilename)
plot(df$time, df$amplitude, type = 'l',
main = plotTitle, xlab = xLabel, ylab = yLabel)
grid()
}
SignalTest_SignalWithOneWaveform_CheckWaveformIsGenerated <- function()
{
PlotCSVWithTitleAndLabels(
'CheckWaveformIsGenerated.csv',
'CheckWaveformIsGenerated',
'Time (s)',
'Amplitude (V)')
}
SignalTest_SignalWithWhiteNoise_CheckWhiteNoiseMeanIsZero <- function()
{
PlotCSVWithTitleAndLabels(
'CheckWhiteNoiseMeanIsZero.csv',
'CheckWhiteNoiseMeanIsZero',
'Time (s)',
'Amplitude (V)')
}
SignalTest_SignalWithWhiteNoise_CheckFrequencyChanging <- function()
{
PlotCSVWithTitleAndLabels(
'CheckFrequencyChanging.csv',
'CheckFrequencyChanging',
'Time (s)',
'Amplitude (V)')
}
SignalTest_SignalWithHarmonics_Check3rdHarmonicIsPresent <- function()
{
PlotCSVWithTitleAndLabels(
'Check3rdHarmonicIsPresent.csv',
'Check3rdHarmonicIsPresent',
'Time (s)',
'Amplitude (V)')
}
SignalTest_SignalWithHarmonics_CheckHarmonicsArePresent <- function()
{
PlotCSVWithTitleAndLabels(
'CheckHarmonicsArePresent.csv',
'CheckHarmonicsArePresent',
'Time (s)',
'Amplitude (V)')
}
SignalTest_SignalWithOneWaveformWithoutInterpolation_CheckSag <- function()
{
PlotCSVWithTitleAndLabels(
'CheckSag.csv',
'CheckSag',
'Time (s)',
'Amplitude (V)')
}
SignalTest_SignalWithOneWaveformWithoutInterpolation_CheckSwell <- function()
{
PlotCSVWithTitleAndLabels(
'CheckSwell.csv',
'CheckSwell',
'Time (s)',
'Amplitude (V)')
}
SignalTest_SignalWithOneWaveform_CheckWaveformIsGenerated()
SignalTest_SignalWithWhiteNoise_CheckWhiteNoiseMeanIsZero()
SignalTest_SignalWithWhiteNoise_CheckFrequencyChanging()
SignalTest_SignalWithHarmonics_Check3rdHarmonicIsPresent()
SignalTest_SignalWithHarmonics_CheckHarmonicsArePresent()
SignalTest_SignalWithOneWaveformWithoutInterpolation_CheckSag()
SignalTest_SignalWithOneWaveformWithoutInterpolation_CheckSwell()
shell.exec('Rplots.pdf')
|
/Rscripts/GenerateTestsOutputs.R
|
no_license
|
UAQpqd/synthSignal
|
R
| false | false | 2,103 |
r
|
PlotCSVWithTitleAndLabels <- function(csvFilename, plotTitle, xLabel, yLabel)
{
df = read.csv(csvFilename)
plot(df$time, df$amplitude, type = 'l',
main = plotTitle, xlab = xLabel, ylab = yLabel)
grid()
}
SignalTest_SignalWithOneWaveform_CheckWaveformIsGenerated <- function()
{
PlotCSVWithTitleAndLabels(
'CheckWaveformIsGenerated.csv',
'CheckWaveformIsGenerated',
'Time (s)',
'Amplitude (V)')
}
SignalTest_SignalWithWhiteNoise_CheckWhiteNoiseMeanIsZero <- function()
{
PlotCSVWithTitleAndLabels(
'CheckWhiteNoiseMeanIsZero.csv',
'CheckWhiteNoiseMeanIsZero',
'Time (s)',
'Amplitude (V)')
}
SignalTest_SignalWithWhiteNoise_CheckFrequencyChanging <- function()
{
PlotCSVWithTitleAndLabels(
'CheckFrequencyChanging.csv',
'CheckFrequencyChanging',
'Time (s)',
'Amplitude (V)')
}
SignalTest_SignalWithHarmonics_Check3rdHarmonicIsPresent <- function()
{
PlotCSVWithTitleAndLabels(
'Check3rdHarmonicIsPresent.csv',
'Check3rdHarmonicIsPresent',
'Time (s)',
'Amplitude (V)')
}
SignalTest_SignalWithHarmonics_CheckHarmonicsArePresent <- function()
{
PlotCSVWithTitleAndLabels(
'CheckHarmonicsArePresent.csv',
'CheckHarmonicsArePresent',
'Time (s)',
'Amplitude (V)')
}
SignalTest_SignalWithOneWaveformWithoutInterpolation_CheckSag <- function()
{
PlotCSVWithTitleAndLabels(
'CheckSag.csv',
'CheckSag',
'Time (s)',
'Amplitude (V)')
}
SignalTest_SignalWithOneWaveformWithoutInterpolation_CheckSwell <- function()
{
PlotCSVWithTitleAndLabels(
'CheckSwell.csv',
'CheckSwell',
'Time (s)',
'Amplitude (V)')
}
SignalTest_SignalWithOneWaveform_CheckWaveformIsGenerated()
SignalTest_SignalWithWhiteNoise_CheckWhiteNoiseMeanIsZero()
SignalTest_SignalWithWhiteNoise_CheckFrequencyChanging()
SignalTest_SignalWithHarmonics_Check3rdHarmonicIsPresent()
SignalTest_SignalWithHarmonics_CheckHarmonicsArePresent()
SignalTest_SignalWithOneWaveformWithoutInterpolation_CheckSag()
SignalTest_SignalWithOneWaveformWithoutInterpolation_CheckSwell()
shell.exec('Rplots.pdf')
|
library(rvest)
library(tidyverse)
content <- read_html("inputs/2020/sitemap.xml")
each_product <- content %>%
xml_find_all("//url") # Instead of HTML/CSS here we're using XML. It's fairly similar, but just a slightly different syntax. Each product is within a url tag.
all_products <-
data_frame(
product_link = html_node(each_product, "loc") %>% html_text(trim = TRUE), # We're first trying to get the URL for each product
product_name_existence_test = html_node(each_product, "image") %>% html_text(trim = TRUE), # There's a bunch of URLs that aren't products so we want to test for whether there's a name first
product_name = html_node(each_product, "image") %>% html_node("title") %>% html_text(trim = TRUE), # We're first trying to get the URL for each product
)
all_products <-
all_products %>%
filter(!is.na(product_name_existence_test)) %>% # Drop it if not a product
select(-product_name_existence_test)
write_csv(all_products, "outputs/misc/2020_links_from_xml.csv")
|
/scripts/get_2020_links_from_xml.R
|
no_license
|
RohanAlexander/paspaley
|
R
| false | false | 1,010 |
r
|
library(rvest)
library(tidyverse)
content <- read_html("inputs/2020/sitemap.xml")
each_product <- content %>%
xml_find_all("//url") # Instead of HTML/CSS here we're using XML. It's fairly similar, but just a slightly different syntax. Each product is within a url tag.
all_products <-
data_frame(
product_link = html_node(each_product, "loc") %>% html_text(trim = TRUE), # We're first trying to get the URL for each product
product_name_existence_test = html_node(each_product, "image") %>% html_text(trim = TRUE), # There's a bunch of URLs that aren't products so we want to test for whether there's a name first
product_name = html_node(each_product, "image") %>% html_node("title") %>% html_text(trim = TRUE), # We're first trying to get the URL for each product
)
all_products <-
all_products %>%
filter(!is.na(product_name_existence_test)) %>% # Drop it if not a product
select(-product_name_existence_test)
write_csv(all_products, "outputs/misc/2020_links_from_xml.csv")
|
##' Expected number of observed pairs assuming multiple-transmission and single-linkage
##'
##' @description
##' `r lifecycle::badge('deprecated')`
##' This function calculates the expected number of pairs observed in a sample of size `M`.
##' The multiple-transmission and single-linkage method assumes the following:
##' \enumerate{
##' \item Each case \eqn{i} is, on average, the infector of `R` cases in the population (\eqn{N})
##' \item Each case \eqn{i} is allowed to be linked by the linkage criteria to only one other case \eqn{j} in the sampled population (\eqn{M}).
##' }
##'
##' @param eta scalar or vector giving the sensitivity of the linkage criteria
##' @param chi scalar or vector giving the specificity of the linkage criteria
##' @param rho scalar or vector giving the proportion of the final outbreak size that is sampled
##' @param M scalar or vector giving the number of cases sampled
##' @param R scalar or vector giving the effective reproductive number of the pathogen
##'
##' @return scalar or vector giving the expected number of linked pairs observed in the sample
##'
##' @author John Giles, Shirlee Wohl and Justin Lessler
##'
##' @examples
##' # Perfect sensitivity and specificity
##' obs_pairs_mtsl(eta=1, chi=1, rho=0.5, M=100, R=1)
##'
##' obs_pairs_mtsl(eta=0.99, chi=0.9, rho=1, M=50, R=1)
##'
##' obs_pairs_mtsl(eta=0.99, chi=0.9, rho=0.5, M=100, R=1)
##'
##' @family obs_pairs
##'
##' @export
##'
##'
obs_pairs_mtsl <- function(chi, eta, rho, M, R) {
lifecycle::deprecate_soft("1.0.0", "obs_pairs_mtsl()", "translink_expected_links_obs_mtsl()")
lifecycle::deprecate_soft("1.0.0", "obs_pairs_mtsl(eta)", "translink_expected_links_obs_mtsl(sensitivity)")
lifecycle::deprecate_soft("1.0.0", "obs_pairs_mtsl(chi)", "translink_expected_links_obs_mtsl(specificity)")
if (!all(is.numeric(eta), eta >= 0 & eta <= 1))
stop("eta must be numeric between 0 and 1")
if (!all(is.numeric(chi), chi >= 0 & chi <= 1))
stop("chi must be numeric between 0 and 1")
if (!all(is.numeric(rho), rho > 0 & rho <= 1))
stop("rho must be numeric > 0 and <= 1")
if (!all(is.numeric(M) | is.integer(M), M >= 0))
stop("Sample size (M) must be integer or numeric greater than 0")
if (!all(is.numeric(R), R > 0))
stop("Reproductive number (R) must be numeric greater than 0")
if (!all(is.numeric(R), R <= 1))
warning("Reproductive number (R) is usually less than 1 for finite outbreaks")
(M * rho * (R + 1) * eta * (1 - ((chi^(M - 1))) * exp(rho * (R + 1) * (((1 -
eta)/chi) - 1))))/(2 * (1 - exp(-rho * (R + 1) * eta)))
}
|
/R/obs_pairs_mtsl.R
|
no_license
|
HopkinsIDD/phylosamp
|
R
| false | false | 2,643 |
r
|
##' Expected number of observed pairs assuming multiple-transmission and single-linkage
##'
##' @description
##' `r lifecycle::badge('deprecated')`
##' This function calculates the expected number of pairs observed in a sample of size `M`.
##' The multiple-transmission and single-linkage method assumes the following:
##' \enumerate{
##' \item Each case \eqn{i} is, on average, the infector of `R` cases in the population (\eqn{N})
##' \item Each case \eqn{i} is allowed to be linked by the linkage criteria to only one other case \eqn{j} in the sampled population (\eqn{M}).
##' }
##'
##' @param eta scalar or vector giving the sensitivity of the linkage criteria
##' @param chi scalar or vector giving the specificity of the linkage criteria
##' @param rho scalar or vector giving the proportion of the final outbreak size that is sampled
##' @param M scalar or vector giving the number of cases sampled
##' @param R scalar or vector giving the effective reproductive number of the pathogen
##'
##' @return scalar or vector giving the expected number of linked pairs observed in the sample
##'
##' @author John Giles, Shirlee Wohl and Justin Lessler
##'
##' @examples
##' # Perfect sensitivity and specificity
##' obs_pairs_mtsl(eta=1, chi=1, rho=0.5, M=100, R=1)
##'
##' obs_pairs_mtsl(eta=0.99, chi=0.9, rho=1, M=50, R=1)
##'
##' obs_pairs_mtsl(eta=0.99, chi=0.9, rho=0.5, M=100, R=1)
##'
##' @family obs_pairs
##'
##' @export
##'
##'
obs_pairs_mtsl <- function(chi, eta, rho, M, R) {
lifecycle::deprecate_soft("1.0.0", "obs_pairs_mtsl()", "translink_expected_links_obs_mtsl()")
lifecycle::deprecate_soft("1.0.0", "obs_pairs_mtsl(eta)", "translink_expected_links_obs_mtsl(sensitivity)")
lifecycle::deprecate_soft("1.0.0", "obs_pairs_mtsl(chi)", "translink_expected_links_obs_mtsl(specificity)")
if (!all(is.numeric(eta), eta >= 0 & eta <= 1))
stop("eta must be numeric between 0 and 1")
if (!all(is.numeric(chi), chi >= 0 & chi <= 1))
stop("chi must be numeric between 0 and 1")
if (!all(is.numeric(rho), rho > 0 & rho <= 1))
stop("rho must be numeric > 0 and <= 1")
if (!all(is.numeric(M) | is.integer(M), M >= 0))
stop("Sample size (M) must be integer or numeric greater than 0")
if (!all(is.numeric(R), R > 0))
stop("Reproductive number (R) must be numeric greater than 0")
if (!all(is.numeric(R), R <= 1))
warning("Reproductive number (R) is usually less than 1 for finite outbreaks")
(M * rho * (R + 1) * eta * (1 - ((chi^(M - 1))) * exp(rho * (R + 1) * (((1 -
eta)/chi) - 1))))/(2 * (1 - exp(-rho * (R + 1) * eta)))
}
|
library(BEDASSLE, lib = "/global/home/users/makman/R/")
count = read.table("subset3_5K_allele_counts.txt", h=F)
dim(count)
count = as.matrix(count[1:2081])
dim(count)
sample = read.table("subset3_5K_sample_size.txt", h=F)
sample = as.matrix(sample[1:2081])
dim(sample)
climate = load("subset3_envi.RData")
MCMC_BB(counts=count, sample_sizes = sample, D=Euc_dist, E=Climate, k=nrow(count), loci=ncol(count), delta = 0.0001,
aD_stp = 0.1, aE_stp = 0.1, a2_stp = 0.02, phi_stp = 0.2, thetas_stp = 0.2, mu_stp = 0.25, ngen = 1e6,
printfreq=100000, savefreq=1e5, samplefreq=250, prefix = "subset3_",
continue = FALSE, continuing.params = NULL)
|
/shells/bedassle/tests/bedassle3.R
|
no_license
|
melisakman/Helianthus
|
R
| false | false | 658 |
r
|
library(BEDASSLE, lib = "/global/home/users/makman/R/")
count = read.table("subset3_5K_allele_counts.txt", h=F)
dim(count)
count = as.matrix(count[1:2081])
dim(count)
sample = read.table("subset3_5K_sample_size.txt", h=F)
sample = as.matrix(sample[1:2081])
dim(sample)
climate = load("subset3_envi.RData")
MCMC_BB(counts=count, sample_sizes = sample, D=Euc_dist, E=Climate, k=nrow(count), loci=ncol(count), delta = 0.0001,
aD_stp = 0.1, aE_stp = 0.1, a2_stp = 0.02, phi_stp = 0.2, thetas_stp = 0.2, mu_stp = 0.25, ngen = 1e6,
printfreq=100000, savefreq=1e5, samplefreq=250, prefix = "subset3_",
continue = FALSE, continuing.params = NULL)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/makeFunctions.R
\name{sigmaSNR}
\alias{sigmaSNR}
\title{Determine noise scale levels from specified \bold{S}ignal to \bold{N}oise \bold{R}atios}
\usage{
sigmaSNR(signal, SNR)
}
\arguments{
\item{signal}{Noisefree multichannel input signal}
\item{SNR}{A numeric vector specifying the desired \bold{S}ignal to \bold{N}oise \bold{R}atio for each channel.}
}
\value{
A numeric vector with m elements giving the scales (standard deviation of the noise in each channel) to achieve the desired SNR.
}
\description{
Compute the noise scale levels for each channel using the \bold{S}ignal to \bold{N}oise \bold{R}atios
}
\details{
The output noise scale levels (theoretical standard deviation for the process noise process in each channel) is governed by the blurred \bold{S}ignal-to-\bold{N}oise \bold{R}atio (SNR) measured in decibels (dB) where,
\deqn{SNR = 10 log_{10} (\frac{||k*f||^2}{\sigma^2)}}
and k*f is the blurred signal, \eqn{||\cdot||} is the norm operator and \eqn{\sigma} is the standard deviation of the noise. Roughly speaking, noise levels are considered high, medium and low for the cases 10 dB, 20 dB and 30 dB respectively.
}
\examples{
n <- 1024
m <- 3
signal <- makeLIDAR(n)
blur <- gammaBlur(n, c(0.5, 0.75, 1), rep(1, m))
X <- blurSignal(signal, blur)
SNR <- 10*1:3
sigma <- sigmaSNR(X, SNR)
E <- multiNoise(n, sigma)
sigmaEst <- multiSigma(E)
}
\seealso{
\code{\link{multiNoise}} \code{\link{multiSigma}}
}
|
/man/sigmaSNR.Rd
|
no_license
|
jrwishart/mwaved
|
R
| false | true | 1,505 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/makeFunctions.R
\name{sigmaSNR}
\alias{sigmaSNR}
\title{Determine noise scale levels from specified \bold{S}ignal to \bold{N}oise \bold{R}atios}
\usage{
sigmaSNR(signal, SNR)
}
\arguments{
\item{signal}{Noisefree multichannel input signal}
\item{SNR}{A numeric vector specifying the desired \bold{S}ignal to \bold{N}oise \bold{R}atio for each channel.}
}
\value{
A numeric vector with m elements giving the scales (standard deviation of the noise in each channel) to achieve the desired SNR.
}
\description{
Compute the noise scale levels for each channel using the \bold{S}ignal to \bold{N}oise \bold{R}atios
}
\details{
The output noise scale levels (theoretical standard deviation for the process noise process in each channel) is governed by the blurred \bold{S}ignal-to-\bold{N}oise \bold{R}atio (SNR) measured in decibels (dB) where,
\deqn{SNR = 10 log_{10} (\frac{||k*f||^2}{\sigma^2)}}
and k*f is the blurred signal, \eqn{||\cdot||} is the norm operator and \eqn{\sigma} is the standard deviation of the noise. Roughly speaking, noise levels are considered high, medium and low for the cases 10 dB, 20 dB and 30 dB respectively.
}
\examples{
n <- 1024
m <- 3
signal <- makeLIDAR(n)
blur <- gammaBlur(n, c(0.5, 0.75, 1), rep(1, m))
X <- blurSignal(signal, blur)
SNR <- 10*1:3
sigma <- sigmaSNR(X, SNR)
E <- multiNoise(n, sigma)
sigmaEst <- multiSigma(E)
}
\seealso{
\code{\link{multiNoise}} \code{\link{multiSigma}}
}
|
# code to see how many of each of the lists the pairs are in.
all_lists <- list()
pb <- txtProgressBar(0,10000,style=3)
for (i in 1:10000) {
all_lists[[i]] <- read.csv(paste0('~/verts/sisterlists/truesisters', i, '.csv'), stringsAsFactors = FALSE)
setTxtProgressBar(pb,i)
}
close(pb)
all_lists <- do.call('rbind', all_lists)
library(dplyr)
sister_summary <- all_lists %>%
mutate(bothnames = paste(sister1, sister2, sep = '_')) %>%
group_by(bothnames, sister1, sister2) %>%
summarize(proportion = n()/10000,
meandist = mean(dist),
mediandist = median(dist),
dist025 = quantile(dist, probs = 0.025),
dist975 = quantile(dist, probs = 0.975)) %>%
ungroup %>%
select(-bothnames)
write.csv(sister_summary, file = '~/verts/sister_summary.csv', row.names = FALSE)
|
/data_extraction/truesisters_howmany.r
|
no_license
|
NEON-biodiversity/birdtraits
|
R
| false | false | 796 |
r
|
# code to see how many of each of the lists the pairs are in.
all_lists <- list()
pb <- txtProgressBar(0,10000,style=3)
for (i in 1:10000) {
all_lists[[i]] <- read.csv(paste0('~/verts/sisterlists/truesisters', i, '.csv'), stringsAsFactors = FALSE)
setTxtProgressBar(pb,i)
}
close(pb)
all_lists <- do.call('rbind', all_lists)
library(dplyr)
sister_summary <- all_lists %>%
mutate(bothnames = paste(sister1, sister2, sep = '_')) %>%
group_by(bothnames, sister1, sister2) %>%
summarize(proportion = n()/10000,
meandist = mean(dist),
mediandist = median(dist),
dist025 = quantile(dist, probs = 0.025),
dist975 = quantile(dist, probs = 0.975)) %>%
ungroup %>%
select(-bothnames)
write.csv(sister_summary, file = '~/verts/sister_summary.csv', row.names = FALSE)
|
library(ATACseqQC)
library(GenomicAlignments)
library(BSgenome.Hsapiens.UCSC.hg19)
library(TxDb.Hsapiens.UCSC.hg19.knownGene)
library(ChIPpeakAnno)
library(MotifDb)
samplelabel <- gsub(pattern='.d$',"",list.files(getwd(), pattern='*.d$'))
samplelabel
main_path <- gsub(pattern='/02_clean',"",getwd())
bam_after_shift <- paste(main_path,'/03_shift/',samplelabel,'.aligned.2.clean.shifted.bam',sep='')
bamclean <- paste(main_path,'/02_clean/',samplelabel,'.aligned.2.clean.bam',sep='')
pdf('02_fragment_size_dist_after_QC.pdf')
fragSize <- fragSizeDist(bamclean, bamclean.labels)
dev.off()
motif_interest <- motif_interest <- 'CTCF'
gal3 <- readGAlignments(bam_after_shift)
setwd(paste(main_path,'/99_plot',sep=''))
txs <- transcripts(TxDb.Hsapiens.UCSC.hg19.knownGene)
pt <- PTscore(gal3, txs)
pdf('06_PTscores.pdf')
plot(pt$log2meanCoverage, pt$PT_score,
xlab="log2 mean coverage",
ylab="Promoter vs Transcript")
dev.off()
###Nucleosome Free Regions (NFR) score
nfr <- NFRscore(gal3, txs)
pdf('07_Nuc_free_scores.pdf')
plot(nfr$log2meanCoverage, nfr$NFR_score,
xlab="log2 mean coverage",
ylab="Nucleosome Free Regions score",
main="NFRscore for 200bp flanking TSSs",
xlim=c(-10, 0), ylim=c(-5, 5))
dev.off()
###Plot Footprints
CTCF <- query(MotifDb, c(motif_interest))
CTCF <- as.list(CTCF)
seqlev <- c(paste("chr",1:22, sep=''),"chrX","chrY")
nucleosome_free_bamfile <- paste(main_path,'/04_splited/',samplelabel,'.nucFree.bam',sep='')
pdf(paste(motif_interest,".footprint.pdf",sep=''))
factorFootprints(nucleosome_free_bamfile, pfm=CTCF[[1]], genome=Hsapiens, min.score="90%", seqlev=seqlev, upstream=100, downstream=100)
dev.off()
q()
|
/ATACseq_pipeline/Rcode_atac_3.r
|
no_license
|
ggolczer/nr4a1_ieg_cancer
|
R
| false | false | 1,687 |
r
|
library(ATACseqQC)
library(GenomicAlignments)
library(BSgenome.Hsapiens.UCSC.hg19)
library(TxDb.Hsapiens.UCSC.hg19.knownGene)
library(ChIPpeakAnno)
library(MotifDb)
samplelabel <- gsub(pattern='.d$',"",list.files(getwd(), pattern='*.d$'))
samplelabel
main_path <- gsub(pattern='/02_clean',"",getwd())
bam_after_shift <- paste(main_path,'/03_shift/',samplelabel,'.aligned.2.clean.shifted.bam',sep='')
bamclean <- paste(main_path,'/02_clean/',samplelabel,'.aligned.2.clean.bam',sep='')
pdf('02_fragment_size_dist_after_QC.pdf')
fragSize <- fragSizeDist(bamclean, bamclean.labels)
dev.off()
motif_interest <- motif_interest <- 'CTCF'
gal3 <- readGAlignments(bam_after_shift)
setwd(paste(main_path,'/99_plot',sep=''))
txs <- transcripts(TxDb.Hsapiens.UCSC.hg19.knownGene)
pt <- PTscore(gal3, txs)
pdf('06_PTscores.pdf')
plot(pt$log2meanCoverage, pt$PT_score,
xlab="log2 mean coverage",
ylab="Promoter vs Transcript")
dev.off()
###Nucleosome Free Regions (NFR) score
nfr <- NFRscore(gal3, txs)
pdf('07_Nuc_free_scores.pdf')
plot(nfr$log2meanCoverage, nfr$NFR_score,
xlab="log2 mean coverage",
ylab="Nucleosome Free Regions score",
main="NFRscore for 200bp flanking TSSs",
xlim=c(-10, 0), ylim=c(-5, 5))
dev.off()
###Plot Footprints
CTCF <- query(MotifDb, c(motif_interest))
CTCF <- as.list(CTCF)
seqlev <- c(paste("chr",1:22, sep=''),"chrX","chrY")
nucleosome_free_bamfile <- paste(main_path,'/04_splited/',samplelabel,'.nucFree.bam',sep='')
pdf(paste(motif_interest,".footprint.pdf",sep=''))
factorFootprints(nucleosome_free_bamfile, pfm=CTCF[[1]], genome=Hsapiens, min.score="90%", seqlev=seqlev, upstream=100, downstream=100)
dev.off()
q()
|
is_valid_input <- function(d){
# input is names of input data file
# d <- names(data)
if("FT" %in% d){
return(TRUE)
}
else if("IF" %in% d){
return(TRUE)
}
else if("FN" %in% d){
return(TRUE)
}
else{
return(FALSE)
}
}
is_valid_result <- function(d){
if(is.data.frame(d)){
return(TRUE)
}else return(FALSE)
}
is_finite <- function(d){
if(d==TRUE||d==FALSE){
return(TRUE)
}else return(FALSE)
}
is_valid_string <- function(d){
if(is.character(d)){
return(TRUE)
}else return(FALSE)
}
is_valid_MVF <- function(d){
}
findFunction <- function(s){
return(s %in% eval(listfunctions))
}
|
/SFRAT/tests/test_functions.R
|
permissive
|
vnagaraju14/SFRAT_Automated-Report-Generation
|
R
| false | false | 643 |
r
|
is_valid_input <- function(d){
# input is names of input data file
# d <- names(data)
if("FT" %in% d){
return(TRUE)
}
else if("IF" %in% d){
return(TRUE)
}
else if("FN" %in% d){
return(TRUE)
}
else{
return(FALSE)
}
}
is_valid_result <- function(d){
if(is.data.frame(d)){
return(TRUE)
}else return(FALSE)
}
is_finite <- function(d){
if(d==TRUE||d==FALSE){
return(TRUE)
}else return(FALSE)
}
is_valid_string <- function(d){
if(is.character(d)){
return(TRUE)
}else return(FALSE)
}
is_valid_MVF <- function(d){
}
findFunction <- function(s){
return(s %in% eval(listfunctions))
}
|
data { // Number of patients
int<lower=0> ntree; // Number of columns
int row; // Vector describing the number of rows belonging to each patient
int<lower=0> counts[row,ntree]; // Count data
matrix<lower=0>[row,ntree] lengths; // Time data
}
parameters {
real<lower=0> sub_rate;
real<lower=0> tre_rates[ntree];
}
model {
tre_rates ~ normal(sub_rate, 0.05)
for (j in 1:ntree){ // 1 - #Columns
counts[j] ~ poisson_log(tre_rates[j] * lengths[j]);
}
print(target())
}
|
/src/stan-modeling/rate-test.R
|
no_license
|
jpalmer37/withinhost
|
R
| false | false | 599 |
r
|
data { // Number of patients
int<lower=0> ntree; // Number of columns
int row; // Vector describing the number of rows belonging to each patient
int<lower=0> counts[row,ntree]; // Count data
matrix<lower=0>[row,ntree] lengths; // Time data
}
parameters {
real<lower=0> sub_rate;
real<lower=0> tre_rates[ntree];
}
model {
tre_rates ~ normal(sub_rate, 0.05)
for (j in 1:ntree){ // 1 - #Columns
counts[j] ~ poisson_log(tre_rates[j] * lengths[j]);
}
print(target())
}
|
# Load packages ----------------------------------------------------------------
library(shiny)
# Define UI --------------------------------------------------------------------
ui <- fluidPage(
selectInput(inputId = "city",
label = "Select city",
choices = list("Scotland" = c("Edinburgh" = "edinburgh",
"Glasgow" = "glasgow"),
"Switzerland" = c("Lausanne" = "lausanne",
"Zurich" = "zurich"))),
strong("Selected city"),
textOutput(outputId = "selected_city"),
)
# Define server ----------------------------------------------------------------
server <- function(input, output, session) {
output$selected_city <- renderText(input$city)
}
# Create the app ---------------------------------------------------------------
shinyApp(ui, server)
|
/02-building-ui/03-ui.R
|
no_license
|
kmaheshkulkarni/advanced-shiny-sib
|
R
| false | false | 905 |
r
|
# Load packages ----------------------------------------------------------------
library(shiny)
# Define UI --------------------------------------------------------------------
ui <- fluidPage(
selectInput(inputId = "city",
label = "Select city",
choices = list("Scotland" = c("Edinburgh" = "edinburgh",
"Glasgow" = "glasgow"),
"Switzerland" = c("Lausanne" = "lausanne",
"Zurich" = "zurich"))),
strong("Selected city"),
textOutput(outputId = "selected_city"),
)
# Define server ----------------------------------------------------------------
server <- function(input, output, session) {
output$selected_city <- renderText(input$city)
}
# Create the app ---------------------------------------------------------------
shinyApp(ui, server)
|
library(rvest)
library(dplyr)
library(ggmap)
url <- "http://www.cancer.gov/researchandfunding/extramural/cancercenters/find-a-cancer-center"
webpage <- html(url)
centerNames <- webpage %>% html_nodes("#institution-listing2 a span") %>% html_text()
centerNames
coordinates <- sapply(centerNames, geocode)
coordinates.2 <- matrix(unlist(coordinates), nrow = 68, byrow = TRUE)
sum(complete.cases(coordinates.2))
centerNames[!complete.cases(coordinates.2)]
addressMissing <- c("4501 X Street, Suite 3003 Sacramento, California 95817",
"10010 North Torrey Pines Road La Jolla, California 92037",
"200 Hawkins Drive 5970Z JPP Iowa City, Iowa 52242",
"600 Main Street Bar Harbor, Maine 04609",
"450 Brookline Avenue Boston, Massachusetts 02215",
"660 South Euclid Avenue Campus Box 8109 St. Louis, Missouri 63110",
"85950 Nebraska Medical Center Omaha, Nebraska 68198",
"One Medical Center Drive Lebanon, New Hampshire 03756",
"550 First Avenue 1201 Smilow Building New York, New York 10016",
"1 Bungtown Road Cold Spring Harbor, New York 11724",
"Medical Center Boulevard Winston-Salem, North Carolina 27157",
"11100 Euclid Avenue, Wearn 151 Cleveland, Ohio 44106",
"300 West 10th Avenue, Suite 159 Columbus, Ohio 43210",
"3601 Spruce Street Philadelphia, Pennsylvania 19104",
"One Baylor Place MS: BCM305 Houston, Texas 77030",
"P.O. Box 19024, D1-060 Seattle, Washington 98109")
coordinatesMissing <- sapply(addressMissing, geocode)
coordinatesMissing.2 <- matrix(unlist(coordinatesMissing), nrow = 16, byrow = TRUE)
coordinatesMissing.2
lonLat <- coordinates.2
lonLat[!complete.cases(lonLat)] <- coordinatesMissing.2
lonLat
lonLat <- data.frame(centerNames, lonLat)
names(lonLat) <- c("Name", "lon", "lat")
library(ggplot2)
library(maps)
load("./Data/usMap.rda")
state_map <- map_data("state")
ggplot(data = state_map, aes(x = long, y = lat)) +
geom_polygon(aes(group = group), color = "grey40", size = 0.5) +
geom_point(data = lonLat[lonLat$Name != "University of Hawaii Cancer Center",],
aes(x = lon, y = lat), color = "red")
badCoordinates <- lonLat$lon > 0 | lonLat$lon < -150
sum(badCoordinates)
centerNames[badCoordinates]
badNames <- c("Sidney Kimmel Cancer Center at Thomas Jefferson University",
"Cancer Therapy & Research Center")
newAddresses <- c("233 South 10th Street, Philadelphia, Pennsylvania 19107",
"7979 Wurzbach Road, San Antonio, Texas 78229")
newCoord <- sapply(newAddresses, geocode)
unlist(newCoord)
which(lonLat$Name %in% badNames)
lonLat[56,2:3] <- unlist(newCoord)[1:2]
lonLat[56,2:3]
lonLat[60,2:3] <- unlist(newCoord)[3:4]
lonLat[60,2:3]
ggplot(data = state_map, aes(x = long, y = lat)) +
geom_polygon(aes(group = group), color = "grey40", size = 0.5) +
geom_point(data = lonLat[lonLat$Name != "University of Hawaii Cancer Center",],
aes(x = lon, y = lat), color = "red") +
geom_text(data=lonLat[lonLat$Name != "University of Hawaii Cancer Center",],
hjust=0.5, vjust=-0.5, aes(x=lon, y=lat, label=Name),
colour="gold2", size=4 )
which(lonLat$Name == "Sanford-Burnham Medical Research Institute")
newCoord <- geocode("10901 North Torrey Pines Road, La Jolla, California 92037")
newCoord
lonLat[11, 2:3]
lonLat[11,2:3] <- newCoord
lonLat[11,2:3]
ggplot(data = state_map, aes(x = long, y = lat)) +
geom_polygon(aes(group = group), color = "grey40", size = 0.5) +
geom_point(data = lonLat[lonLat$Name != "University of Hawaii Cancer Center",],
aes(x = lon, y = lat), color = "red") +
geom_text(data=lonLat[lonLat$Name != "University of Hawaii Cancer Center",],
hjust=0.5, vjust=-0.5, aes(x=lon, y=lat, label=Name),
colour="gold2", size=2 )
save(lonLat, file="./Data/NCI.rda")
library(plotly)
py <- plotly()
viz <- ggplot(data = state_map, aes(x = long, y = lat)) +
geom_polygon(aes(group = group), color = "grey40", size = 0.5) +
geom_point(data = lonLat[lonLat$Name != "University of Hawaii Cancer Center",],
aes(x = lon, y = lat, text = Name), color = "gold")
out <- py$ggplotly(viz, kwargs=list(filename="nci-test", fileopt="overwrite"))
plotly_url <- out$response$url
|
/NCI.R
|
no_license
|
jeromefro/Health_Demo
|
R
| false | false | 4,502 |
r
|
library(rvest)
library(dplyr)
library(ggmap)
url <- "http://www.cancer.gov/researchandfunding/extramural/cancercenters/find-a-cancer-center"
webpage <- html(url)
centerNames <- webpage %>% html_nodes("#institution-listing2 a span") %>% html_text()
centerNames
coordinates <- sapply(centerNames, geocode)
coordinates.2 <- matrix(unlist(coordinates), nrow = 68, byrow = TRUE)
sum(complete.cases(coordinates.2))
centerNames[!complete.cases(coordinates.2)]
addressMissing <- c("4501 X Street, Suite 3003 Sacramento, California 95817",
"10010 North Torrey Pines Road La Jolla, California 92037",
"200 Hawkins Drive 5970Z JPP Iowa City, Iowa 52242",
"600 Main Street Bar Harbor, Maine 04609",
"450 Brookline Avenue Boston, Massachusetts 02215",
"660 South Euclid Avenue Campus Box 8109 St. Louis, Missouri 63110",
"85950 Nebraska Medical Center Omaha, Nebraska 68198",
"One Medical Center Drive Lebanon, New Hampshire 03756",
"550 First Avenue 1201 Smilow Building New York, New York 10016",
"1 Bungtown Road Cold Spring Harbor, New York 11724",
"Medical Center Boulevard Winston-Salem, North Carolina 27157",
"11100 Euclid Avenue, Wearn 151 Cleveland, Ohio 44106",
"300 West 10th Avenue, Suite 159 Columbus, Ohio 43210",
"3601 Spruce Street Philadelphia, Pennsylvania 19104",
"One Baylor Place MS: BCM305 Houston, Texas 77030",
"P.O. Box 19024, D1-060 Seattle, Washington 98109")
coordinatesMissing <- sapply(addressMissing, geocode)
coordinatesMissing.2 <- matrix(unlist(coordinatesMissing), nrow = 16, byrow = TRUE)
coordinatesMissing.2
lonLat <- coordinates.2
lonLat[!complete.cases(lonLat)] <- coordinatesMissing.2
lonLat
lonLat <- data.frame(centerNames, lonLat)
names(lonLat) <- c("Name", "lon", "lat")
library(ggplot2)
library(maps)
load("./Data/usMap.rda")
state_map <- map_data("state")
ggplot(data = state_map, aes(x = long, y = lat)) +
geom_polygon(aes(group = group), color = "grey40", size = 0.5) +
geom_point(data = lonLat[lonLat$Name != "University of Hawaii Cancer Center",],
aes(x = lon, y = lat), color = "red")
badCoordinates <- lonLat$lon > 0 | lonLat$lon < -150
sum(badCoordinates)
centerNames[badCoordinates]
badNames <- c("Sidney Kimmel Cancer Center at Thomas Jefferson University",
"Cancer Therapy & Research Center")
newAddresses <- c("233 South 10th Street, Philadelphia, Pennsylvania 19107",
"7979 Wurzbach Road, San Antonio, Texas 78229")
newCoord <- sapply(newAddresses, geocode)
unlist(newCoord)
which(lonLat$Name %in% badNames)
lonLat[56,2:3] <- unlist(newCoord)[1:2]
lonLat[56,2:3]
lonLat[60,2:3] <- unlist(newCoord)[3:4]
lonLat[60,2:3]
ggplot(data = state_map, aes(x = long, y = lat)) +
geom_polygon(aes(group = group), color = "grey40", size = 0.5) +
geom_point(data = lonLat[lonLat$Name != "University of Hawaii Cancer Center",],
aes(x = lon, y = lat), color = "red") +
geom_text(data=lonLat[lonLat$Name != "University of Hawaii Cancer Center",],
hjust=0.5, vjust=-0.5, aes(x=lon, y=lat, label=Name),
colour="gold2", size=4 )
which(lonLat$Name == "Sanford-Burnham Medical Research Institute")
newCoord <- geocode("10901 North Torrey Pines Road, La Jolla, California 92037")
newCoord
lonLat[11, 2:3]
lonLat[11,2:3] <- newCoord
lonLat[11,2:3]
ggplot(data = state_map, aes(x = long, y = lat)) +
geom_polygon(aes(group = group), color = "grey40", size = 0.5) +
geom_point(data = lonLat[lonLat$Name != "University of Hawaii Cancer Center",],
aes(x = lon, y = lat), color = "red") +
geom_text(data=lonLat[lonLat$Name != "University of Hawaii Cancer Center",],
hjust=0.5, vjust=-0.5, aes(x=lon, y=lat, label=Name),
colour="gold2", size=2 )
save(lonLat, file="./Data/NCI.rda")
library(plotly)
py <- plotly()
viz <- ggplot(data = state_map, aes(x = long, y = lat)) +
geom_polygon(aes(group = group), color = "grey40", size = 0.5) +
geom_point(data = lonLat[lonLat$Name != "University of Hawaii Cancer Center",],
aes(x = lon, y = lat, text = Name), color = "gold")
out <- py$ggplotly(viz, kwargs=list(filename="nci-test", fileopt="overwrite"))
plotly_url <- out$response$url
|
# Pacotes ------------------------------------------------------------------
library(ggplot2)
library(tidymodels)
# Dados -------------------------------------------------------------------
data("diamonds")
# base treino e teste -----------------------------------------------------
# crie as tabelas de treino e teste com proporções de 80% e 20% respectivamente.
# use initial_split(), training() e testing()
set.seed(1)
# definicao do modelo -----------------------------------------------------
# Defina um modelo de árvore de decisão para regressão usando rpart e
# prepare para tunar os hiperparâmetros cost_complexity e min_n.
# Deixe o tree_depth fixo em 10.
# use as funções decision_tree(), tune(), set_engine() e set_mode().
# reamostragem com cross-validation ---------------------------------------
# Crie 5 conjuntos de cross-validation
# use vfold_cv().
# tunagem de hiperparametros ----------------------------------------------
# tune os hiperparâmetros usando somente a métrica rmse. Faça um grid de tamanho 5.
# OBS: a variável resposta é 'price' e a variável explicativa é 'x'.
# use tune_grid().
# inspecao da tunagem -----------------------------------------------------
# autoplot(...)
# collect_metrics(...)
# show_best(...)
# seleciona o melhor conjunto de hiperparametros --------------------------
# extraia o conjunto de hiperparametros que apresentou o melhor rmse de
# cross-validation.
# use select_best().
# finalizacao do modelo ---------------------------------------------------
# atualize a especificação do modelo com os hiperparametros encontrados na
# secao acima.
# use finalize_model().
# desempenho do modelo final ----------------------------------------------
# rode o ultimo ajuste para calcular o rmse na base de teste.
# use last_fit().
# collect_metrics(...)
# collect_predictions(...) %>%
# ggplot(aes(.pred, price)) +
# geom_point()
# modelo final ------------------------------------------------------------
# ajuste o modelo final com a base inteira (teste + treino).
# use fit().
# predicoes ---------------------------------------------------------------
# coloque uma coluna de predicoes na base original.
# use mutate(), predict()$.pred
# rpart.plot::rpart.plot(...$fit)
# guardar o modelo para usar depois ---------------------------------------
# saveRDS(..., file = "diamonds_final_model.rds")
|
/exercicios/03-cross-validation.R
|
permissive
|
curso-r/202104-intro-ml
|
R
| false | false | 2,422 |
r
|
# Pacotes ------------------------------------------------------------------
library(ggplot2)
library(tidymodels)
# Dados -------------------------------------------------------------------
data("diamonds")
# base treino e teste -----------------------------------------------------
# crie as tabelas de treino e teste com proporções de 80% e 20% respectivamente.
# use initial_split(), training() e testing()
set.seed(1)
# definicao do modelo -----------------------------------------------------
# Defina um modelo de árvore de decisão para regressão usando rpart e
# prepare para tunar os hiperparâmetros cost_complexity e min_n.
# Deixe o tree_depth fixo em 10.
# use as funções decision_tree(), tune(), set_engine() e set_mode().
# reamostragem com cross-validation ---------------------------------------
# Crie 5 conjuntos de cross-validation
# use vfold_cv().
# tunagem de hiperparametros ----------------------------------------------
# tune os hiperparâmetros usando somente a métrica rmse. Faça um grid de tamanho 5.
# OBS: a variável resposta é 'price' e a variável explicativa é 'x'.
# use tune_grid().
# inspecao da tunagem -----------------------------------------------------
# autoplot(...)
# collect_metrics(...)
# show_best(...)
# seleciona o melhor conjunto de hiperparametros --------------------------
# extraia o conjunto de hiperparametros que apresentou o melhor rmse de
# cross-validation.
# use select_best().
# finalizacao do modelo ---------------------------------------------------
# atualize a especificação do modelo com os hiperparametros encontrados na
# secao acima.
# use finalize_model().
# desempenho do modelo final ----------------------------------------------
# rode o ultimo ajuste para calcular o rmse na base de teste.
# use last_fit().
# collect_metrics(...)
# collect_predictions(...) %>%
# ggplot(aes(.pred, price)) +
# geom_point()
# modelo final ------------------------------------------------------------
# ajuste o modelo final com a base inteira (teste + treino).
# use fit().
# predicoes ---------------------------------------------------------------
# coloque uma coluna de predicoes na base original.
# use mutate(), predict()$.pred
# rpart.plot::rpart.plot(...$fit)
# guardar o modelo para usar depois ---------------------------------------
# saveRDS(..., file = "diamonds_final_model.rds")
|
#' @title Creates an independent copy of a ggplot layer object
#' @export
#' @description Creates copies of ggplot layers from within ggplot objects that
#' are independent of the parent object.
#' @details ggplot objects are comprimsed of layer objects. Once compiled they
#' are part of the plot object environment and if they are changed internally
#' regardless of where they are in the (ie different environment) it will change
#' the original plot. This function allows to create replicates of the plot layers
#' and edit them independent of the original plot. When setting verbose to TRUE
#' function returns the ggplot2 call as a string to paste in regular ggplot script
#' to generate the layer.
#' @param l ggplot2 object layer
#' @param verbose toggle to control if the output is ggproto object (verbose==FALSE,default) or string of layer call (verbose==TRUE)
#' @param showDefaults toggle to control if the verbose output shows all the input arguments passed to the proto object (if verbose==FALSE then ignored)
#' @return ggproto or string object (conditional on verbose)
#' @examples
#' p <- ggplot2::ggplot(iris,ggplot2::aes(x =Sepal.Length,y=Sepal.Width))
#'
#' p <- p+ggplot2::geom_point(ggplot2::aes(colour=Species))+ggplot2::geom_line()
#'
#' p$layers[[1]]
#'
#' newLayer <- cloneLayer(l=p$layers[[1]])
#'
#' all.equal(p$layers[[1]],newLayer)
#'
#' (v <- cloneLayer(l=p$layers[[1]],verbose=TRUE))
#'
#' eval(parse(text=v))
#'
#' all.equal(p$layers[[1]],eval(parse(text=v)))
#'
#' @importFrom utils capture.output
cloneLayer=function(l,verbose=FALSE,showDefaults=TRUE){
geom_opts<-ggedit_opts$get('session_geoms')
parent.layer<-proto_features(l)%>%
dplyr::left_join(geom_opts%>%dplyr::filter_(~!grepl('^stat',fn)),
by = c("position", "geom", "stat"))
if(is.na(parent.layer$fn)) parent.layer$fn=paste0(tolower(strsplit(parent.layer$stat,'(?<=Stat)',perl = TRUE)[[1]]),collapse = '_')
layer.names=c('mapping','data','geom','position',
'stat','show.legend','inherit.aes',
'aes_params','geom_params','stat_params')
x=sapply(layer.names,function(y){
b=l[[y]]
if('waiver'%in%class(b)) b=NULL
if(y=='geom') b=eval(parse(text=parent.layer$geom))
if(y=='position') b=gsub(y, "", tolower(class(b)[1]))
if(y=='stat') b=eval(parse(text=parent.layer$stat))
# if(y%in%c('position','stat')) {
# b=gsub(y, "", tolower(class(b)[1]))
# }
b
})
x$params=append(x$stat_params,x$geom_params)
x$params=append(x$params,x$aes_params)
x$params=x$params[!duplicated(names(x$params))]
x$geom_params<-x$aes_params<-x$stat_params<-NULL
if(typeof(x$data)=='closure'){
requireNamespace("ggraph")
x$data<-ggraph::get_edges()
}
if(verbose){
nm=names(x)
#nm=nm[!nm%in%c('geom','params','mapping')]
nm=nm[!sapply(x,typeof)%in%c('environment','closure','list')]
geom_aes=list(geom = parent.layer$fn,
mapping= paste0(names(x$mapping),sapply(x$mapping,build_map)),
params = paste0(names(x$params),sapply(x$params,build_map)),
layer = paste0(rev(nm),sapply(x[rev(nm)],build_map)),
data = paste0('data=',paste0(capture.output(dput(x$data)),collapse='\n'))
)
strRet=sprintf('%s(mapping=aes(%s),%s,%s)',
paste0(geom_aes$geom,collapse=','),
paste0(geom_aes$mapping,collapse=','),
paste0(geom_aes$params,collapse=','),
paste0(geom_aes$layer,collapse=','),
geom_aes$data
)
if(!showDefaults){
geom_proto<-cloneProto(eval(parse(text=paste0(geom_aes$geom,'()'))))
geom_diff<-sapply(names(geom_aes)[-1],function(x) geom_aes[[x]][!geom_aes[[x]]%in%geom_proto[[x]]])
strRet=sprintf('%s(aes(%s),%s,%s,%s)',
paste0(geom_aes$geom,collapse=','),
paste0(geom_diff$mapping,collapse=','),
paste0(geom_diff$params,collapse=','),
paste0(geom_diff$layer,collapse=','),
geom_aes$data
)
}
strRet=gsub('aes()','',strRet,fixed = T) #failsafe for empty aes() call
strRet=gsub('[,]{2,}',',',strRet)
strRet=gsub('data=NULL','',strRet)
strRet=gsub(',)',')',strRet)
strRet=gsub('\\(,','(',strRet)
strRet
}else{
do.call(layer,x)
}
}
|
/R/cloneLayer.R
|
no_license
|
DataXujing/ggedit
|
R
| false | false | 4,474 |
r
|
#' @title Creates an independent copy of a ggplot layer object
#' @export
#' @description Creates copies of ggplot layers from within ggplot objects that
#' are independent of the parent object.
#' @details ggplot objects are comprimsed of layer objects. Once compiled they
#' are part of the plot object environment and if they are changed internally
#' regardless of where they are in the (ie different environment) it will change
#' the original plot. This function allows to create replicates of the plot layers
#' and edit them independent of the original plot. When setting verbose to TRUE
#' function returns the ggplot2 call as a string to paste in regular ggplot script
#' to generate the layer.
#' @param l ggplot2 object layer
#' @param verbose toggle to control if the output is ggproto object (verbose==FALSE,default) or string of layer call (verbose==TRUE)
#' @param showDefaults toggle to control if the verbose output shows all the input arguments passed to the proto object (if verbose==FALSE then ignored)
#' @return ggproto or string object (conditional on verbose)
#' @examples
#' p <- ggplot2::ggplot(iris,ggplot2::aes(x =Sepal.Length,y=Sepal.Width))
#'
#' p <- p+ggplot2::geom_point(ggplot2::aes(colour=Species))+ggplot2::geom_line()
#'
#' p$layers[[1]]
#'
#' newLayer <- cloneLayer(l=p$layers[[1]])
#'
#' all.equal(p$layers[[1]],newLayer)
#'
#' (v <- cloneLayer(l=p$layers[[1]],verbose=TRUE))
#'
#' eval(parse(text=v))
#'
#' all.equal(p$layers[[1]],eval(parse(text=v)))
#'
#' @importFrom utils capture.output
cloneLayer=function(l,verbose=FALSE,showDefaults=TRUE){
geom_opts<-ggedit_opts$get('session_geoms')
parent.layer<-proto_features(l)%>%
dplyr::left_join(geom_opts%>%dplyr::filter_(~!grepl('^stat',fn)),
by = c("position", "geom", "stat"))
if(is.na(parent.layer$fn)) parent.layer$fn=paste0(tolower(strsplit(parent.layer$stat,'(?<=Stat)',perl = TRUE)[[1]]),collapse = '_')
layer.names=c('mapping','data','geom','position',
'stat','show.legend','inherit.aes',
'aes_params','geom_params','stat_params')
x=sapply(layer.names,function(y){
b=l[[y]]
if('waiver'%in%class(b)) b=NULL
if(y=='geom') b=eval(parse(text=parent.layer$geom))
if(y=='position') b=gsub(y, "", tolower(class(b)[1]))
if(y=='stat') b=eval(parse(text=parent.layer$stat))
# if(y%in%c('position','stat')) {
# b=gsub(y, "", tolower(class(b)[1]))
# }
b
})
x$params=append(x$stat_params,x$geom_params)
x$params=append(x$params,x$aes_params)
x$params=x$params[!duplicated(names(x$params))]
x$geom_params<-x$aes_params<-x$stat_params<-NULL
if(typeof(x$data)=='closure'){
requireNamespace("ggraph")
x$data<-ggraph::get_edges()
}
if(verbose){
nm=names(x)
#nm=nm[!nm%in%c('geom','params','mapping')]
nm=nm[!sapply(x,typeof)%in%c('environment','closure','list')]
geom_aes=list(geom = parent.layer$fn,
mapping= paste0(names(x$mapping),sapply(x$mapping,build_map)),
params = paste0(names(x$params),sapply(x$params,build_map)),
layer = paste0(rev(nm),sapply(x[rev(nm)],build_map)),
data = paste0('data=',paste0(capture.output(dput(x$data)),collapse='\n'))
)
strRet=sprintf('%s(mapping=aes(%s),%s,%s)',
paste0(geom_aes$geom,collapse=','),
paste0(geom_aes$mapping,collapse=','),
paste0(geom_aes$params,collapse=','),
paste0(geom_aes$layer,collapse=','),
geom_aes$data
)
if(!showDefaults){
geom_proto<-cloneProto(eval(parse(text=paste0(geom_aes$geom,'()'))))
geom_diff<-sapply(names(geom_aes)[-1],function(x) geom_aes[[x]][!geom_aes[[x]]%in%geom_proto[[x]]])
strRet=sprintf('%s(aes(%s),%s,%s,%s)',
paste0(geom_aes$geom,collapse=','),
paste0(geom_diff$mapping,collapse=','),
paste0(geom_diff$params,collapse=','),
paste0(geom_diff$layer,collapse=','),
geom_aes$data
)
}
strRet=gsub('aes()','',strRet,fixed = T) #failsafe for empty aes() call
strRet=gsub('[,]{2,}',',',strRet)
strRet=gsub('data=NULL','',strRet)
strRet=gsub(',)',')',strRet)
strRet=gsub('\\(,','(',strRet)
strRet
}else{
do.call(layer,x)
}
}
|
pkg <- devtools::as.package('~/academic/neighborhoods/code/common')
devtools::load_all(pkg)
js_age <- read.csv('../interchange/chicago_js_age.csv')$x
js_family <- read.csv('../interchange/chicago_js_family.csv')$x
js_race <- read.csv('../interchange/chicago_js_race.csv')$x
js_housing <- read.csv('../interchange/chicago_js_housing.csv')$x
rail <- read.csv('../interchange/chicago_rail_intersects.csv')$x
highway <- read.csv('../interchange/chicago_highway_intersects.csv')$x
grid_street <- read.csv('../interchange/chicago_grid_intersects.csv')$x
water <- read.csv('../interchange/chicago_water_intersects.csv')$x
elementary_school <- read.csv('../interchange/chicago_elementary_schools_crosses.csv')$x
high_school <- read.csv('../interchange/chicago_high_schools_crosses.csv')$x
block_angle <- read.csv('../interchange/chicago_block_angles.csv')$x
block_angle <- block_angle %% pi/2 / (pi/2)
population <- read.csv('../interchange/chicago_min_population.csv')$x
sufficient_pop <- as.numeric(population > 30)
features <- data.frame(sufficient_pop,
js_age,
js_family,
js_race,
js_housing,
rail,
highway,
water,
elementary_school,
high_school,
block_angle,
grid_street)
M <- model.matrix(~ (sufficient_pop:(js_age +
js_family +
js_race +
js_housing) +
sufficient_pop*(rail +
water +
highway +
grid_street +
elementary_school +
high_school +
block_angle)),
data=features)
write.table(M, "chicago.model.matrix", row.names=FALSE)
|
/code/training/chicago_features.R
|
no_license
|
fgregg/neighborhoods
|
R
| false | false | 2,063 |
r
|
pkg <- devtools::as.package('~/academic/neighborhoods/code/common')
devtools::load_all(pkg)
js_age <- read.csv('../interchange/chicago_js_age.csv')$x
js_family <- read.csv('../interchange/chicago_js_family.csv')$x
js_race <- read.csv('../interchange/chicago_js_race.csv')$x
js_housing <- read.csv('../interchange/chicago_js_housing.csv')$x
rail <- read.csv('../interchange/chicago_rail_intersects.csv')$x
highway <- read.csv('../interchange/chicago_highway_intersects.csv')$x
grid_street <- read.csv('../interchange/chicago_grid_intersects.csv')$x
water <- read.csv('../interchange/chicago_water_intersects.csv')$x
elementary_school <- read.csv('../interchange/chicago_elementary_schools_crosses.csv')$x
high_school <- read.csv('../interchange/chicago_high_schools_crosses.csv')$x
block_angle <- read.csv('../interchange/chicago_block_angles.csv')$x
block_angle <- block_angle %% pi/2 / (pi/2)
population <- read.csv('../interchange/chicago_min_population.csv')$x
sufficient_pop <- as.numeric(population > 30)
features <- data.frame(sufficient_pop,
js_age,
js_family,
js_race,
js_housing,
rail,
highway,
water,
elementary_school,
high_school,
block_angle,
grid_street)
M <- model.matrix(~ (sufficient_pop:(js_age +
js_family +
js_race +
js_housing) +
sufficient_pop*(rail +
water +
highway +
grid_street +
elementary_school +
high_school +
block_angle)),
data=features)
write.table(M, "chicago.model.matrix", row.names=FALSE)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sar.R
\name{sar}
\alias{sar}
\alias{sar.data.frame}
\alias{sar.default}
\alias{print.sar}
\title{Fit a SAR model}
\usage{
sar(...)
\method{sar}{data.frame}(x, user = "user", item = "item",
time = "time", event = "event", weight = "weight", ...)
\method{sar}{default}(user, item, time, event = NULL, weight = NULL,
support_threshold = 1, allowed_items = NULL,
allowed_events = c(Click = 1, RecommendationClick = 2, AddShopCart = 3,
RemoveShopCart = -1, Purchase = 4), by_user = TRUE,
similarity = c("jaccard", "lift", "count"), half_life = 30,
catalog_data = NULL, catalog_formula = item ~ .,
cold_to_cold = FALSE, cold_item_model = NULL, ...)
\method{print}{sar}(x, ...)
}
\arguments{
\item{...}{For \code{sar()}, further arguments to pass to the cold-items feature model.}
\item{x}{A data frame. For the \code{print} method, a SAR model object.}
\item{user, item, time, event, weight}{For the default method, vectors to use as the user IDs, item IDs, timestamps, event types, and transaction weights for SAR. For the \code{data.frame} method, the names of the columns in the data frame \code{x} to use for these variables.}
\item{support_threshold}{The SAR support threshold. Items that do not occur at least this many times in the data will be considered "cold".}
\item{allowed_items}{A character or factor vector of allowed item IDs to use in the SAR model. If supplied, this will be used to categorise the item IDs in the data.}
\item{allowed_events}{The allowed values for \code{events}, if that argument is supplied. Other values will be discarded.}
\item{by_user}{Should the analysis be by user ID, or by user ID and timestamp? Defaults to userID only.}
\item{similarity}{Similarity metric to use; defaults to Jaccard.}
\item{half_life}{The decay period to use when weighting transactions by age.}
\item{catalog_data}{A dataset to use for building the cold-items feature model.}
\item{catalog_formula}{A formula for the feature model used to compute similarities for cold items.}
\item{cold_to_cold}{Whether the cold-items feature model should include the cold items themselves in the training data, or only warm items.}
\item{cold_item_model}{The type of model to use for cold item features.}
}
\value{
An S3 object representing the SAR model. This is essentially the item-to-item similarity matrix in sparse format, along with the original transaction data used to fit the model.
}
\description{
Fit a SAR model
}
\details{
Smart Adaptive Recommendations (SAR) is a fast, scalable, adaptive algorithm for personalized recommendations based on user transaction history and item descriptions. It produces easily explainable/interpretable recommendations and handles "cold item" and "semi-cold user" scenarios.
Central to how SAR works is an item-to-item \emph{co-occurrence matrix}, which is based on how many times two items occur for the same users. For example, if a given user buys items \eqn{i_1} and \eqn{i_2}, then the cell \eqn{(i_1, i_2)} is incremented by 1. From this, an item \emph{similarity matrix} can be obtained by rescaling the co-occurrences according to a given metric. Options for the metric include Jaccard (the default), lift, and counts (which means no rescaling).
Note that the similarity matrix in SAR thus only includes information on which users transacted which items. It does not include any other information such as item ratings or features, which may be used by other recommender algorithms.
#' The SAR implementation in R should be usable on datasets with up to a few million rows and several thousand items. The main constraint is the size of the similarity matrix, which in turn depends (quadratically) on the number of unique items. The implementation has been successfully tested on the MovieLens 20M dataset, which contains about 138,000 users and 27,000 items. For larger datasets, it is recommended to use the \link[=az_rec_service]{Azure web service API}.
}
\section{Cold items}{
SAR has the ability to handle cold items, meaning those which have not been seen by any user, or which have only been seen by a number of users less than \code{support_threshold}. This is done by using item features to predict similarities. The method used for this is set by the \code{cold_items_model} argument:
\itemize{
\item If this is \code{NULL} (the default), a manual algorithm is used that correlates each feature in turn with similarity, and produces a predicted similarity based on which features two items have in common.
\item If this is the name of a modelling function, such as \code{"lm"} or \code{"randomForest"}, a model of that type is fit on the features and used to predict similarity. In particular, use \code{"lm"} to get a model that is (approximately) equivalent to that used by the Azure web service API.
}
The data frame and features used for cold items are given by the \code{catalog_data} and \code{catalog_formula} arguments. \code{catalog_data} should be a data frame whose first column is item ID. \code{catalog_formula} should be a one-sided formula (no LHS).
This feature is currently experimental, and subject to change.
}
\seealso{
\href{https://github.com/Microsoft/Product-Recommendations/blob/master/doc/sar.md}{Description of SAR} at the \href{https://github.com/Microsoft/Product-Recommendations}{Product Recommendations API repo} on GitHub
}
|
/man/sar.Rd
|
permissive
|
sumedhvdatar/SAR
|
R
| false | true | 5,429 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sar.R
\name{sar}
\alias{sar}
\alias{sar.data.frame}
\alias{sar.default}
\alias{print.sar}
\title{Fit a SAR model}
\usage{
sar(...)
\method{sar}{data.frame}(x, user = "user", item = "item",
time = "time", event = "event", weight = "weight", ...)
\method{sar}{default}(user, item, time, event = NULL, weight = NULL,
support_threshold = 1, allowed_items = NULL,
allowed_events = c(Click = 1, RecommendationClick = 2, AddShopCart = 3,
RemoveShopCart = -1, Purchase = 4), by_user = TRUE,
similarity = c("jaccard", "lift", "count"), half_life = 30,
catalog_data = NULL, catalog_formula = item ~ .,
cold_to_cold = FALSE, cold_item_model = NULL, ...)
\method{print}{sar}(x, ...)
}
\arguments{
\item{...}{For \code{sar()}, further arguments to pass to the cold-items feature model.}
\item{x}{A data frame. For the \code{print} method, a SAR model object.}
\item{user, item, time, event, weight}{For the default method, vectors to use as the user IDs, item IDs, timestamps, event types, and transaction weights for SAR. For the \code{data.frame} method, the names of the columns in the data frame \code{x} to use for these variables.}
\item{support_threshold}{The SAR support threshold. Items that do not occur at least this many times in the data will be considered "cold".}
\item{allowed_items}{A character or factor vector of allowed item IDs to use in the SAR model. If supplied, this will be used to categorise the item IDs in the data.}
\item{allowed_events}{The allowed values for \code{events}, if that argument is supplied. Other values will be discarded.}
\item{by_user}{Should the analysis be by user ID, or by user ID and timestamp? Defaults to userID only.}
\item{similarity}{Similarity metric to use; defaults to Jaccard.}
\item{half_life}{The decay period to use when weighting transactions by age.}
\item{catalog_data}{A dataset to use for building the cold-items feature model.}
\item{catalog_formula}{A formula for the feature model used to compute similarities for cold items.}
\item{cold_to_cold}{Whether the cold-items feature model should include the cold items themselves in the training data, or only warm items.}
\item{cold_item_model}{The type of model to use for cold item features.}
}
\value{
An S3 object representing the SAR model. This is essentially the item-to-item similarity matrix in sparse format, along with the original transaction data used to fit the model.
}
\description{
Fit a SAR model
}
\details{
Smart Adaptive Recommendations (SAR) is a fast, scalable, adaptive algorithm for personalized recommendations based on user transaction history and item descriptions. It produces easily explainable/interpretable recommendations and handles "cold item" and "semi-cold user" scenarios.
Central to how SAR works is an item-to-item \emph{co-occurrence matrix}, which is based on how many times two items occur for the same users. For example, if a given user buys items \eqn{i_1} and \eqn{i_2}, then the cell \eqn{(i_1, i_2)} is incremented by 1. From this, an item \emph{similarity matrix} can be obtained by rescaling the co-occurrences according to a given metric. Options for the metric include Jaccard (the default), lift, and counts (which means no rescaling).
Note that the similarity matrix in SAR thus only includes information on which users transacted which items. It does not include any other information such as item ratings or features, which may be used by other recommender algorithms.
#' The SAR implementation in R should be usable on datasets with up to a few million rows and several thousand items. The main constraint is the size of the similarity matrix, which in turn depends (quadratically) on the number of unique items. The implementation has been successfully tested on the MovieLens 20M dataset, which contains about 138,000 users and 27,000 items. For larger datasets, it is recommended to use the \link[=az_rec_service]{Azure web service API}.
}
\section{Cold items}{
SAR has the ability to handle cold items, meaning those which have not been seen by any user, or which have only been seen by a number of users less than \code{support_threshold}. This is done by using item features to predict similarities. The method used for this is set by the \code{cold_items_model} argument:
\itemize{
\item If this is \code{NULL} (the default), a manual algorithm is used that correlates each feature in turn with similarity, and produces a predicted similarity based on which features two items have in common.
\item If this is the name of a modelling function, such as \code{"lm"} or \code{"randomForest"}, a model of that type is fit on the features and used to predict similarity. In particular, use \code{"lm"} to get a model that is (approximately) equivalent to that used by the Azure web service API.
}
The data frame and features used for cold items are given by the \code{catalog_data} and \code{catalog_formula} arguments. \code{catalog_data} should be a data frame whose first column is item ID. \code{catalog_formula} should be a one-sided formula (no LHS).
This feature is currently experimental, and subject to change.
}
\seealso{
\href{https://github.com/Microsoft/Product-Recommendations/blob/master/doc/sar.md}{Description of SAR} at the \href{https://github.com/Microsoft/Product-Recommendations}{Product Recommendations API repo} on GitHub
}
|
library(igraph)
setwd('~/Downloads/network-analysis-course/')
nodes <- read.csv("data/star-wars-network-nodes.csv")
nodes
edges <- read.csv("data/star-wars-network-edges.csv")
edges
g <- graph_from_data_frame(d=edges, vertices=nodes, directed=FALSE)
plot(g)
nodes
V(g)
vertex_attr(g)
nodes$name
g$name
V(g)$name
dark_side <- c("DARTH VADER", "MOTTI", "TARKIN")
light_side <- c("R2-D2", "CHEWBACCA", "C-3PO", "LUKE", "CAMIE", "BIGGS",
"LEIA", "BERU", "OWEN", "OBI-WAN", "HAN", "DODONNA",
"GOLD LEADER", "WEDGE", "RED LEADER", "RED TEN", "GOLD FIVE")
neutral <- c("GREEDO", "JABBA")
V(g)$color <- NA
vertex_attr(g)
V(g)$color[V(g)$name %in% dark_side] <- "red"
V(g)$color
V(g)$color[V(g)$name %in% light_side] <- "gold" # set the light side color name to gold
plot(g)
V(g)$color
V(g)$color[V(g)$name %in% neutral] <- "green" # set the color of neutral characters to green
plot(g)
dark_side_graph <- induced_subgraph(g, dark_side) # Using the dark_side variable from above
dark_side_graph
V(dark_side_graph)
plot(dark_side_graph)
light_side_graph <- induced_subgraph(g, light_side) # Using the light_side variable
plot(light_side_graph)
plot(light_side_graph)
# Post break
head(edges)
d <- graph_from_data_frame(d=edges, vertices=nodes, directed=TRUE)
plot(d)
d
E(g)
E(d)
E(g)$weight
edge_attr(g)
edge_attr(d)
E(g)$color <- "blue"
edge_attr(g)
E(g)$color[E(g)$weight >= 5] <- "red"
edge_attr(g)
plot(g)
g[]
d[]
#plot(d, layout=layout_with_sugiyama, main="Sugiyama Layout")
plot(d, layout=layout_with_fr, main="FR Layout")
V(d)$color <- NA
V(d)$color[V(d)$name %in% dark_side] <- "red"
V(d)$color[V(d)$name %in% light_side] <- "gold"
V(d)$color[V(d)$name %in% neutral] <- "green"
V(d)
plot(d, layout=layout_with_fr, main="FR Layout")
color_var <- V(d)$color
color_var
|
/lab_sessions/2020_june/demonstration.R
|
permissive
|
griff-rees/network-analysis-course
|
R
| false | false | 1,800 |
r
|
library(igraph)
setwd('~/Downloads/network-analysis-course/')
nodes <- read.csv("data/star-wars-network-nodes.csv")
nodes
edges <- read.csv("data/star-wars-network-edges.csv")
edges
g <- graph_from_data_frame(d=edges, vertices=nodes, directed=FALSE)
plot(g)
nodes
V(g)
vertex_attr(g)
nodes$name
g$name
V(g)$name
dark_side <- c("DARTH VADER", "MOTTI", "TARKIN")
light_side <- c("R2-D2", "CHEWBACCA", "C-3PO", "LUKE", "CAMIE", "BIGGS",
"LEIA", "BERU", "OWEN", "OBI-WAN", "HAN", "DODONNA",
"GOLD LEADER", "WEDGE", "RED LEADER", "RED TEN", "GOLD FIVE")
neutral <- c("GREEDO", "JABBA")
V(g)$color <- NA
vertex_attr(g)
V(g)$color[V(g)$name %in% dark_side] <- "red"
V(g)$color
V(g)$color[V(g)$name %in% light_side] <- "gold" # set the light side color name to gold
plot(g)
V(g)$color
V(g)$color[V(g)$name %in% neutral] <- "green" # set the color of neutral characters to green
plot(g)
dark_side_graph <- induced_subgraph(g, dark_side) # Using the dark_side variable from above
dark_side_graph
V(dark_side_graph)
plot(dark_side_graph)
light_side_graph <- induced_subgraph(g, light_side) # Using the light_side variable
plot(light_side_graph)
plot(light_side_graph)
# Post break
head(edges)
d <- graph_from_data_frame(d=edges, vertices=nodes, directed=TRUE)
plot(d)
d
E(g)
E(d)
E(g)$weight
edge_attr(g)
edge_attr(d)
E(g)$color <- "blue"
edge_attr(g)
E(g)$color[E(g)$weight >= 5] <- "red"
edge_attr(g)
plot(g)
g[]
d[]
#plot(d, layout=layout_with_sugiyama, main="Sugiyama Layout")
plot(d, layout=layout_with_fr, main="FR Layout")
V(d)$color <- NA
V(d)$color[V(d)$name %in% dark_side] <- "red"
V(d)$color[V(d)$name %in% light_side] <- "gold"
V(d)$color[V(d)$name %in% neutral] <- "green"
V(d)
plot(d, layout=layout_with_fr, main="FR Layout")
color_var <- V(d)$color
color_var
|
TITLE,GENRE,YEARS,DIRECTOR,ACTOR
t=1000
Titanic,drama,90,James Cameron,Bill Paxton
Stardust Memories,drama,80,Woody Allen,Charlotte Rampling
New York Stories,drama,80,Woody Allen,Mia Farrow
A Midsummer Night’s Sex Comedy,comedy,80,Woody Allen,Mia Farrow
Crimewave,comedy,80,Joel Coen,Louise Lasser
Avatar,action,00,James Cameron,Zoe Saldana
|
/tests/pref/R.R
|
permissive
|
WydD/astral
|
R
| false | false | 343 |
r
|
TITLE,GENRE,YEARS,DIRECTOR,ACTOR
t=1000
Titanic,drama,90,James Cameron,Bill Paxton
Stardust Memories,drama,80,Woody Allen,Charlotte Rampling
New York Stories,drama,80,Woody Allen,Mia Farrow
A Midsummer Night’s Sex Comedy,comedy,80,Woody Allen,Mia Farrow
Crimewave,comedy,80,Joel Coen,Louise Lasser
Avatar,action,00,James Cameron,Zoe Saldana
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/devel-filter_repo.R
\name{filter_repo}
\alias{filter_repo}
\title{Destroy all traces of a file accidentally pushed to a public repo. This requires creating a fresh local clone of the repo to rewrite history. This function does not alter the repo from the command line, but rather returns the CLI commands to do so}
\usage{
filter_repo(path_to_local_repo_clone, destroy_this_file)
}
\arguments{
\item{path_to_local_repo_clone}{path to a clone of the local repo}
}
\description{
Destroy all traces of a file accidentally pushed to a public repo. This requires creating a fresh local clone of the repo to rewrite history. This function does not alter the repo from the command line, but rather returns the CLI commands to do so
}
\keyword{internal}
|
/man/filter_repo.Rd
|
no_license
|
meerapatelmd/glitter
|
R
| false | true | 824 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/devel-filter_repo.R
\name{filter_repo}
\alias{filter_repo}
\title{Destroy all traces of a file accidentally pushed to a public repo. This requires creating a fresh local clone of the repo to rewrite history. This function does not alter the repo from the command line, but rather returns the CLI commands to do so}
\usage{
filter_repo(path_to_local_repo_clone, destroy_this_file)
}
\arguments{
\item{path_to_local_repo_clone}{path to a clone of the local repo}
}
\description{
Destroy all traces of a file accidentally pushed to a public repo. This requires creating a fresh local clone of the repo to rewrite history. This function does not alter the repo from the command line, but rather returns the CLI commands to do so
}
\keyword{internal}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/windows.R
\name{windows_credentials}
\alias{windows_credentials}
\alias{windows_item_types}
\alias{windows_item}
\alias{windows_item_read}
\alias{windows_item_write}
\alias{windows_item_delete}
\alias{windows_item_enumerate}
\title{Query and manipulate the Windows Credential Store}
\usage{
windows_item_types()
windows_item(
credential_blob,
target_name,
type = "generic",
comment = NULL,
persist = c("local_machine", "session", "enterprise"),
attributes = list(),
target_alias = NULL,
username = NULL
)
windows_item_read(target_name, type = "generic")
windows_item_write(item, preserve = FALSE)
windows_item_delete(target_name, type = "generic")
windows_item_enumerate(filter = NULL, all = FALSE)
}
\arguments{
\item{credential_blob}{The secret credential, a password,
certificate or key. See also
\url{https://docs.microsoft.com/en-us/windows/win32/api/wincred/ns-wincred-credentialw}
This can be a raw vector, or a string. If it is a string, then it
will be converted to Unicode, without the terminating zero.
It can also be \code{NULL}, to be used with the \code{preserve = TRUE}
argument of \code{windows_item_write()}.}
\item{target_name}{The name of the credential. The \code{target_name}
and \code{type} members uniquely identify the credential. This member
cannot be changed after the credential is created. Instead, the
credential with the old name should be deleted and the credential
with the new name created. This member cannot be longer than
\code{CRED_MAX_GENERIC_TARGET_NAME_LENGTH} (32767) characters.
This member is case-insensitive.}
\item{type}{The type of the credential. This member cannot be
changed after the credential is created. See \code{windows_item_types()}
for possible values.}
\item{comment}{If not \code{NULL}, then a string comment from the user
that describes this credential. This member cannot be longer than
\code{CRED_MAX_STRING_LENGTH} (256) characters. It is stored as a Unicode
string.}
\item{persist}{Defines the persistence of this credential.
\itemize{
\item \code{"local_machine"}: The credential persists for all subsequent
logon sessions on this same computer. It is visible to other
logon sessions of this same user on this same computer and not
visible to logon sessions for this user on other computers.
\item \code{"session"}: The credential persists for the life of the logon
session. It will not be visible to other logon sessions of this
same user. It will not exist after this user logs off and back on.
\item \code{"enterprise"}: The credential persists for all subsequent logon
sessions on this same computer. It is visible to other logon
sessions of this same user on this same computer and to logon
sessions for this user on other computers.
}}
\item{attributes}{Application-defined attributes that are
associated with the credential. This is \code{NULL} or a named list
of raw or string vectors. String vectors are converted to
Unicode, without the terminating zero. A credential can have at
most 64 attributes, the names of the attributes cannot be
longer than \code{CRED_MAX_STRING_LENGTH} (256) characters each, and
the attributes themselves cannot be longer than
\code{CRED_MAX_VALUE_SIZE} (256) bytes.}
\item{target_alias}{Alias for the \code{target_name} member.
This member can be read and written. It cannot be longer than
\code{CRED_MAX_STRING_LENGTH} (256) characters. It is stored in Unicode.}
\item{username}{\code{NULL} or the user name of the account used to
connect to \code{target_name}.}
\item{item}{\code{oskeyring_windows_item} object to write.}
\item{preserve}{The credential BLOB from an existing credential
is preserved with the same credential name and credential type.
The \code{credential_blob} of the passed \code{oskeyring_windows_item}
object must be \code{NULL}.}
\item{filter}{If not \code{NULL}, then a string to filter the
credentials. Only credentials with a \code{target_name} matching the
filter will be returned. The filter specifies a name prefix
followed by an asterisk. For instance, the filter \code{"FRED*"} will
return all credentials with a \code{target_name} beginning with the
string \code{"FRED"}.}
\item{all}{Whether to use the \code{CRED_ENUMERATE_ALL_CREDENTIALS}
flag to enumerate all credentials. If this is \code{TRUE}, then \code{filter}
must be \code{NULL}. If this is \code{TRUE}, then the target name of each
credential is returned in the \verb{"namespace:attribute=target}" format.}
}
\value{
\code{windows_item_types()} returns a character vector, the
currently supported credential types.
\code{windows_item()} returns an \code{oskeyring_windows_item}
object.
\code{windows_item_read()} returns an \code{oskeyring_windows_item}
object.
\code{windows_item_write()} returns \code{NULL}, invisibly.
\code{windows_item_delete()} returns \code{NULL}, invisibly.
\code{windows_item_enumerate()} returns a list of
\code{oskeyring_windows_item} items.
}
\description{
\verb{windows_item_*} functions read, write, delete and list
credentials.
}
\details{
\subsection{\code{windows_item_types()}}{
\code{windows_item_types()} lists the currently supported credential
types.\if{html}{\out{<div class="r">}}\preformatted{windows_item_types()
}\if{html}{\out{</div>}}\preformatted{## [1] "generic" "domain_password"
## [3] "domain_certificate" "domain_visible_password"
}
}
\subsection{\code{windows_item()}}{
\code{windows_item()} creates a Windows credential, that can be
then added to the credential store.\if{html}{\out{<div class="r">}}\preformatted{it <- windows_item("secret", "my-host-password")
it
}\if{html}{\out{</div>}}\preformatted{## <oskeyring_windows_item: generic>
## target_name: my-host-password
## persist: local_machine
## credential_blob: <-- hidden -->
}
}
\subsection{\code{windows_item_write()}}{
Writes an item to the credential store.\if{html}{\out{<div class="r">}}\preformatted{windows_item_write(it)
}\if{html}{\out{</div>}}
}
\subsection{\code{windows_item_read()}}{
Reads a credential with the specified type and \code{target_name}.\if{html}{\out{<div class="r">}}\preformatted{windows_item_read("my-host-password")
}\if{html}{\out{</div>}}\preformatted{## <oskeyring_windows_item: generic>
## target_name: my-host-password
## persist: local_machine
## credential_blob: <-- hidden -->
}
}
\subsection{\code{windows_item_enumerate()}}{
List all credentials that match a prefix.\if{html}{\out{<div class="r">}}\preformatted{windows_item_enumerate(filter = "my-*")
}\if{html}{\out{</div>}}\preformatted{## [[1]]
## <oskeyring_windows_item: generic>
## target_name: my-host-password
## persist: local_machine
## credential_blob: <-- hidden -->
}
}
\subsection{\code{windows_item_delete()}}{
Delete a credential:\if{html}{\out{<div class="r">}}\preformatted{windows_item_delete("my-host-password")
windows_item_enumerate(filter = "my-*")
}\if{html}{\out{</div>}}\preformatted{## list()
}
}
}
\examples{
# See above
}
\seealso{
The API documentation at
\url{https://docs.microsoft.com/en-us/windows/win32/api/wincred/}
}
|
/man/windows_credentials.Rd
|
permissive
|
isabella232/oskeyring
|
R
| false | true | 7,121 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/windows.R
\name{windows_credentials}
\alias{windows_credentials}
\alias{windows_item_types}
\alias{windows_item}
\alias{windows_item_read}
\alias{windows_item_write}
\alias{windows_item_delete}
\alias{windows_item_enumerate}
\title{Query and manipulate the Windows Credential Store}
\usage{
windows_item_types()
windows_item(
credential_blob,
target_name,
type = "generic",
comment = NULL,
persist = c("local_machine", "session", "enterprise"),
attributes = list(),
target_alias = NULL,
username = NULL
)
windows_item_read(target_name, type = "generic")
windows_item_write(item, preserve = FALSE)
windows_item_delete(target_name, type = "generic")
windows_item_enumerate(filter = NULL, all = FALSE)
}
\arguments{
\item{credential_blob}{The secret credential, a password,
certificate or key. See also
\url{https://docs.microsoft.com/en-us/windows/win32/api/wincred/ns-wincred-credentialw}
This can be a raw vector, or a string. If it is a string, then it
will be converted to Unicode, without the terminating zero.
It can also be \code{NULL}, to be used with the \code{preserve = TRUE}
argument of \code{windows_item_write()}.}
\item{target_name}{The name of the credential. The \code{target_name}
and \code{type} members uniquely identify the credential. This member
cannot be changed after the credential is created. Instead, the
credential with the old name should be deleted and the credential
with the new name created. This member cannot be longer than
\code{CRED_MAX_GENERIC_TARGET_NAME_LENGTH} (32767) characters.
This member is case-insensitive.}
\item{type}{The type of the credential. This member cannot be
changed after the credential is created. See \code{windows_item_types()}
for possible values.}
\item{comment}{If not \code{NULL}, then a string comment from the user
that describes this credential. This member cannot be longer than
\code{CRED_MAX_STRING_LENGTH} (256) characters. It is stored as a Unicode
string.}
\item{persist}{Defines the persistence of this credential.
\itemize{
\item \code{"local_machine"}: The credential persists for all subsequent
logon sessions on this same computer. It is visible to other
logon sessions of this same user on this same computer and not
visible to logon sessions for this user on other computers.
\item \code{"session"}: The credential persists for the life of the logon
session. It will not be visible to other logon sessions of this
same user. It will not exist after this user logs off and back on.
\item \code{"enterprise"}: The credential persists for all subsequent logon
sessions on this same computer. It is visible to other logon
sessions of this same user on this same computer and to logon
sessions for this user on other computers.
}}
\item{attributes}{Application-defined attributes that are
associated with the credential. This is \code{NULL} or a named list
of raw or string vectors. String vectors are converted to
Unicode, without the terminating zero. A credential can have at
most 64 attributes, the names of the attributes cannot be
longer than \code{CRED_MAX_STRING_LENGTH} (256) characters each, and
the attributes themselves cannot be longer than
\code{CRED_MAX_VALUE_SIZE} (256) bytes.}
\item{target_alias}{Alias for the \code{target_name} member.
This member can be read and written. It cannot be longer than
\code{CRED_MAX_STRING_LENGTH} (256) characters. It is stored in Unicode.}
\item{username}{\code{NULL} or the user name of the account used to
connect to \code{target_name}.}
\item{item}{\code{oskeyring_windows_item} object to write.}
\item{preserve}{The credential BLOB from an existing credential
is preserved with the same credential name and credential type.
The \code{credential_blob} of the passed \code{oskeyring_windows_item}
object must be \code{NULL}.}
\item{filter}{If not \code{NULL}, then a string to filter the
credentials. Only credentials with a \code{target_name} matching the
filter will be returned. The filter specifies a name prefix
followed by an asterisk. For instance, the filter \code{"FRED*"} will
return all credentials with a \code{target_name} beginning with the
string \code{"FRED"}.}
\item{all}{Whether to use the \code{CRED_ENUMERATE_ALL_CREDENTIALS}
flag to enumerate all credentials. If this is \code{TRUE}, then \code{filter}
must be \code{NULL}. If this is \code{TRUE}, then the target name of each
credential is returned in the \verb{"namespace:attribute=target}" format.}
}
\value{
\code{windows_item_types()} returns a character vector, the
currently supported credential types.
\code{windows_item()} returns an \code{oskeyring_windows_item}
object.
\code{windows_item_read()} returns an \code{oskeyring_windows_item}
object.
\code{windows_item_write()} returns \code{NULL}, invisibly.
\code{windows_item_delete()} returns \code{NULL}, invisibly.
\code{windows_item_enumerate()} returns a list of
\code{oskeyring_windows_item} items.
}
\description{
\verb{windows_item_*} functions read, write, delete and list
credentials.
}
\details{
\subsection{\code{windows_item_types()}}{
\code{windows_item_types()} lists the currently supported credential
types.\if{html}{\out{<div class="r">}}\preformatted{windows_item_types()
}\if{html}{\out{</div>}}\preformatted{## [1] "generic" "domain_password"
## [3] "domain_certificate" "domain_visible_password"
}
}
\subsection{\code{windows_item()}}{
\code{windows_item()} creates a Windows credential, that can be
then added to the credential store.\if{html}{\out{<div class="r">}}\preformatted{it <- windows_item("secret", "my-host-password")
it
}\if{html}{\out{</div>}}\preformatted{## <oskeyring_windows_item: generic>
## target_name: my-host-password
## persist: local_machine
## credential_blob: <-- hidden -->
}
}
\subsection{\code{windows_item_write()}}{
Writes an item to the credential store.\if{html}{\out{<div class="r">}}\preformatted{windows_item_write(it)
}\if{html}{\out{</div>}}
}
\subsection{\code{windows_item_read()}}{
Reads a credential with the specified type and \code{target_name}.\if{html}{\out{<div class="r">}}\preformatted{windows_item_read("my-host-password")
}\if{html}{\out{</div>}}\preformatted{## <oskeyring_windows_item: generic>
## target_name: my-host-password
## persist: local_machine
## credential_blob: <-- hidden -->
}
}
\subsection{\code{windows_item_enumerate()}}{
List all credentials that match a prefix.\if{html}{\out{<div class="r">}}\preformatted{windows_item_enumerate(filter = "my-*")
}\if{html}{\out{</div>}}\preformatted{## [[1]]
## <oskeyring_windows_item: generic>
## target_name: my-host-password
## persist: local_machine
## credential_blob: <-- hidden -->
}
}
\subsection{\code{windows_item_delete()}}{
Delete a credential:\if{html}{\out{<div class="r">}}\preformatted{windows_item_delete("my-host-password")
windows_item_enumerate(filter = "my-*")
}\if{html}{\out{</div>}}\preformatted{## list()
}
}
}
\examples{
# See above
}
\seealso{
The API documentation at
\url{https://docs.microsoft.com/en-us/windows/win32/api/wincred/}
}
|
context('function test simpleHeatmaps function')
if ( ! expect_true( rmarkdown::pandoc_available() ,label= "pandoc is installed") ){
skip ( "Pandoc needed - but missing here")
}
#prefix = 'tests/testthat'
prefix = '.'
#print(file.path(getwd(), prefix, 'data', 'simpleHeatmap_mat.RData' ) )
#cellexalObj <- loadObject(file.path(prefix,'data','cellexalObjOK.RData') )
x = cellexalObj
x = reset(x)
x@outpath = file.path(prefix,'data','output','simpleHeatmaps' )
if ( file.exists(x@outpath ) ){
unlink( x@outpath ,recursive=TRUE)
}
dir.create( x@outpath )
x = sessionPath(x, 'simpleHeatmap' )
grouping <- normalizePath (file.path(prefix, 'data', 'SelectionHSPC_time.txt' ))
x = userGrouping( x, grouping)
x = pseudotimeTest3D( x, grouping = x@usedObj$lastGroup )
x = createStats( x@usedObj$linearSelections[[1]], x)
linearSelection = x@usedObj$linearSelections[[1]]
##that is the first time we produce a time in a test script
## Time to test this!
expect_equal( linearSelection@gname, 'Time.group.2', "time gname is correct" )
expect_equal( linearSelection@parentSelection, 'User.group.1', "time parentSelection is correct" )
expect_equal( linearSelection@geneClusters, list(), label="geneClusters are not populated" )
# that fails too many times - whenever I add a small chenge.
#expect_equal( linearSelection@id, "04a67710768848dba40b09e9499d4205", label="id correct" )
expect_equal( linearSelection@drc, "DDRtree", label="drc correct" )
expect_equal( length(linearSelection@error), 0 , label="no error" )
fname= file.path(x@usedObj$sessionPath,'png', 'simpleHeatmap' )
res = simplePlotHeatmaps (x, info = groupingInfo( x, linearSelection@gname), fname )
expect_equal( names(res), c("genes", "ofile", "pngs", "groupColors", "error",
'smoothedClusters', 'MaxInCluster', "mat" ) )
expect_equal( length(res$genes), 6, label="6 gene groups")
expect_equal( res$ofile , paste(sep=".", fname, 'png'), label="ofile correct" )
expect_equal( length(res$pngs), 6, label="6 heatmap pngs")
for ( f in c( res$ofile, (res$pngs )) ){
expect_true( file.exists( f ), label=f )
}
expect_equal( res$error, NULL, label="no error" )
#expect_equal( dim(res$mat), c( 159, 250 ), label="zscored dimension OK" )
|
/tests/testthat/test-simpleHeatmap.R
|
no_license
|
sonejilab/cellexalvrR
|
R
| false | false | 2,276 |
r
|
context('function test simpleHeatmaps function')
if ( ! expect_true( rmarkdown::pandoc_available() ,label= "pandoc is installed") ){
skip ( "Pandoc needed - but missing here")
}
#prefix = 'tests/testthat'
prefix = '.'
#print(file.path(getwd(), prefix, 'data', 'simpleHeatmap_mat.RData' ) )
#cellexalObj <- loadObject(file.path(prefix,'data','cellexalObjOK.RData') )
x = cellexalObj
x = reset(x)
x@outpath = file.path(prefix,'data','output','simpleHeatmaps' )
if ( file.exists(x@outpath ) ){
unlink( x@outpath ,recursive=TRUE)
}
dir.create( x@outpath )
x = sessionPath(x, 'simpleHeatmap' )
grouping <- normalizePath (file.path(prefix, 'data', 'SelectionHSPC_time.txt' ))
x = userGrouping( x, grouping)
x = pseudotimeTest3D( x, grouping = x@usedObj$lastGroup )
x = createStats( x@usedObj$linearSelections[[1]], x)
linearSelection = x@usedObj$linearSelections[[1]]
##that is the first time we produce a time in a test script
## Time to test this!
expect_equal( linearSelection@gname, 'Time.group.2', "time gname is correct" )
expect_equal( linearSelection@parentSelection, 'User.group.1', "time parentSelection is correct" )
expect_equal( linearSelection@geneClusters, list(), label="geneClusters are not populated" )
# that fails too many times - whenever I add a small chenge.
#expect_equal( linearSelection@id, "04a67710768848dba40b09e9499d4205", label="id correct" )
expect_equal( linearSelection@drc, "DDRtree", label="drc correct" )
expect_equal( length(linearSelection@error), 0 , label="no error" )
fname= file.path(x@usedObj$sessionPath,'png', 'simpleHeatmap' )
res = simplePlotHeatmaps (x, info = groupingInfo( x, linearSelection@gname), fname )
expect_equal( names(res), c("genes", "ofile", "pngs", "groupColors", "error",
'smoothedClusters', 'MaxInCluster', "mat" ) )
expect_equal( length(res$genes), 6, label="6 gene groups")
expect_equal( res$ofile , paste(sep=".", fname, 'png'), label="ofile correct" )
expect_equal( length(res$pngs), 6, label="6 heatmap pngs")
for ( f in c( res$ofile, (res$pngs )) ){
expect_true( file.exists( f ), label=f )
}
expect_equal( res$error, NULL, label="no error" )
#expect_equal( dim(res$mat), c( 159, 250 ), label="zscored dimension OK" )
|
combine.s1<-function(s1s,n=100){
sets<-split(1:length(s1s), ceiling(seq_along(1:length(s1s))/n))
cat("combining scanones by sets of", n, "\n")
out.byset<-list()
for(i in 1:length(sets)){
out.byset[[i]]<-s1.test[[sets[[i]][1]]]
for(j in (min(sets[[i]])+1): max(sets[[i]])){
out.byset[[i]]<-cbind(out.byset[[i]], s1.test[[j]])
}
cat(length(out.byset)*n,"...")
}
out.all<-out.byset[[1]]
cat("\n combining sets (total =",length(s1s),") : \n")
for(i in 2:length(out.byset)) {
out.all<-cbind(out.all, out.byset[[i]])
cat(length(colnames(out.all)),"...")
}
return(out.all)
}
|
/combine.s1.R
|
no_license
|
jtlovell/eqtlanalysis
|
R
| false | false | 618 |
r
|
combine.s1<-function(s1s,n=100){
sets<-split(1:length(s1s), ceiling(seq_along(1:length(s1s))/n))
cat("combining scanones by sets of", n, "\n")
out.byset<-list()
for(i in 1:length(sets)){
out.byset[[i]]<-s1.test[[sets[[i]][1]]]
for(j in (min(sets[[i]])+1): max(sets[[i]])){
out.byset[[i]]<-cbind(out.byset[[i]], s1.test[[j]])
}
cat(length(out.byset)*n,"...")
}
out.all<-out.byset[[1]]
cat("\n combining sets (total =",length(s1s),") : \n")
for(i in 2:length(out.byset)) {
out.all<-cbind(out.all, out.byset[[i]])
cat(length(colnames(out.all)),"...")
}
return(out.all)
}
|
## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this function
## Instantiate the cached version of matrix object
makeCacheMatrix <- function(x = matrix()) {
i <- NULL
set <- function(y) {
x <<- y
i <<- NULL
}
get <- function() x
setInverse <- function(inverse) i <<- inverse
getInverse <- function() i
list(set = set, get = get,
setInverse = setInverse,
getInverse = getInverse)
}
## Write a short comment describing this function
## Solve the special matric object, constructed in makeCacheMatrix function
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
i <- x$getInverse()
if (!is.null(i)) {
# message("getting cached data")
return(i)
}
data <- x$get()
i <- solve(data, diag(dim(data)[1]))
x$setInverse(i)
i
}
|
/cachematrix.R
|
no_license
|
doncem/ProgrammingAssignment2
|
R
| false | false | 931 |
r
|
## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this function
## Instantiate the cached version of matrix object
makeCacheMatrix <- function(x = matrix()) {
i <- NULL
set <- function(y) {
x <<- y
i <<- NULL
}
get <- function() x
setInverse <- function(inverse) i <<- inverse
getInverse <- function() i
list(set = set, get = get,
setInverse = setInverse,
getInverse = getInverse)
}
## Write a short comment describing this function
## Solve the special matric object, constructed in makeCacheMatrix function
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
i <- x$getInverse()
if (!is.null(i)) {
# message("getting cached data")
return(i)
}
data <- x$get()
i <- solve(data, diag(dim(data)[1]))
x$setInverse(i)
i
}
|
library(shiny)
shinyServer(function(input, output, session) {
values <- reactiveValues()
values$upload_flag <- FALSE
# UI element definitions #####################################################
# Procedure setup ============================================================
# Data selector/upload -------------------------------------------------------
output$data_selector <- renderUI({
fluidRow(
column(width = 12,
selectInput("data_select",
"Data:",
c("User Upload", "mtcars", "iris", "cell_survival", "galaxies")
),
conditionalPanel("input.data_select == 'User Upload'",
fileInput("user_file", "Upload CSV data", accept = ".csv")
)
)
)
})
# Data preview ---------------------------------------------------------------
output$data_preview <- renderDataTable({
# Don't display table until something is loaded
if (is.null(values$data)) return(NULL)
else return(dplyr::select(values$data, -row_id))
}, options = list(scrollX=TRUE))
# Variable selectors ---------------------------------------------------------
output$var_selector <- renderUI({
# Define selectors
var1_selector <- selectInput("param_var1", "var1_selector", values$col_names)
var2_selector <- selectInput("param_var2", "var2_selector", values$col_names)
vars_selector <- selectInput("param_vars",
"vars_selector",
values$col_names,
multiple = TRUE
)
# Generate control
if (input$param_stat %in% c("Correlation", "Smoothing Spline")) {
control <- fluidRow(column(width = 12, var1_selector, var2_selector))
} else if (input$param_stat %in% c("Linear Regression", "LOESS")) {
control <- fluidRow(column(width = 12, var1_selector, vars_selector))
} else control <- var1_selector
return(control)
})
output$strata_selector <- renderUI({
selectInput("param_strata", "Strata", c(" ", values$col_names))
})
# Jackknife-after-bootstrap ==================================================
# Plot -----------------------------------------------------------------------
output$jab_plot <- renderPlotly({
# Calculate disruption for given quantile
values$jab_samples <- bootstrap_disruption(
input$jab_quantile,
values$boots,
values$jab_samples
)
# Calculate uncertainty for given quantile and detect outliers
values$uncertainty <- get_uncertainty_bound(
input$outlier_threshold,
values$boots,
values$jab_samples
)
values$jab_samples <- mutate(values$jab_samples,
outlier = disruption > values$uncertainty
)
# Base Plotly object
fig <- plot_ly(type = "scatter", showlegend = FALSE) %>%
layout(
title = "Jackknife-After-Bootstrap Plot",
xaxis = list(title = "Relative Influence", zeroline = FALSE),
yaxis = list(title = "Disruption")
) %>%
config(displayModeBar = FALSE)
# Sorted tibble to trace
jab_data <- values$jab_samples %>%
dplyr::select(deleted_case, rel_influence, disruption, outlier) %>%
arrange(rel_influence) %>%
set_colnames(c("deleted_case", "x", "y", "outlier"))
# Uncertainty bound tibble
uncertainty_data <- tibble(
x = c(0, max(jab_data$x)) * 1.05,
y = values$uncertainty
)
# Add traces
fig <- fig %>%
# Uncertainty bound
add_trace(
data = uncertainty_data,
x = ~x, y = ~y,
name = "Uncertainty Bound",
mode = "lines", line = list(color = "#000000", dash = "dash"),
fill = "tozeroy", fillcolor = "rgba(0, 0, 0, 0.2)",
hoverinfo = "skip"
) %>%
# Jackknife quantiles
add_trace(
data = jab_data,
x = ~x, y = ~y,
mode = "markers", marker = list(size = 10),
symbol = ~outlier, symbols = c("o", "x-thin"),
color = "orange",
customdata = ~deleted_case,
hovertemplate = "ID: %{customdata}<br>Influence: %{x}<br>Disruption: %{y}"
) %>%
add_trace(
data = jab_data,
x = ~x, y = ~y,
mode = "lines",
color = "orange",
hoverinfo = "skip"
)
fig
})
# Outlier list ---------------------------------------------------------------
output$outlier_list <- renderDataTable({
outliers <- filter(values$jab_samples, outlier)
jab_data <- outliers %>%
dplyr::select(
`Case No.` = deleted_case,
`Relative Influence` = rel_influence,
Disruption = disruption
)
replication_data <- outliers %>%
dplyr::select(starts_with("replication")) %>%
set_colnames(
str_to_title(str_replace_all(colnames(.), "replication_", ""))
)
cbind(replication_data, jab_data) %>%
relocate(`Case No.`) %>%
return()
})
# Outlier boundary -----------------------------------------------------------
output$boundary_display <- renderUI({
HTML(paste0("<b>Outlier Boundary: ", values$uncertainty, "</b>"))
})
# UI event handlers ##########################################################
# Procedure setup page =======================================================
# File upload checking -------------------------------------------------------
observeEvent(input$user_file, {
# Check that file is a CSV; reject if not
path <- input$user_file$datapath
if (tools::file_ext(path) != "csv") {
message(paste0("File ", path, " is not a .csv file."))
} else {
# Add row IDs and store
df <- read_csv(path, col_types = cols())
values$data <- mutate(df, row_id = 1:nrow(df))
values$col_names <- colnames(df)
values$upload_flag <- TRUE
}
})
# Toy data input -------------------------------------------------------------
observeEvent(input$data_select, {
if (input$data_select != "User Upload") {
path <- paste0("toy_data/", input$data_select, ".csv")
df <- read_csv(path, col_types = cols())
values$data <- mutate(df, row_id = 1:nrow(df))
values$col_names <- colnames(df)
values$upload_flag <- TRUE
}
})
# Navigation button event handlers ###########################################
# Welcome page ===============================================================
observeEvent(input$welcome_next, {
updateTabsetPanel(session, "wizard", "setup")
})
# Procedure setup page =======================================================
observeEvent(input$settings_next, {
# Display busy dialog ------------------------------------------------------
show_modal_spinner(
spin = "swapping-squares",
color = "#112446",
text = "Bootstrapping..."
)
# Set up procedure ---------------------------------------------------------
if (!is.na(input$param_B) & input$param_B > 0) {
values$param_B <- round(input$param_B)
} else values$param_B <- 1000
if (!is.na(input$param_seed) & input$param_seed > 0) {
values$param_seed <- round(input$param_seed)
} else values$param_seed <- NA
if (input$param_strata == " ") values$strata <- NULL
else values$strata <- input$param_strata
# Generate procedure specification
if (input$param_stat == "Correlation") {
values$spec <- make_spec(
input$param_stat,
input$param_var1,
input$param_var2
)
} else if (input$param_stat == "Linear Regression") {
values$spec <- make_spec(
input$param_stat,
input$param_var1,
input$param_vars,
fit = input$param_fit
)
} else if (input$param_stat == "Smoothing Spline") {
values$spec <- make_spec(
input$param_stat,
input$param_var1,
input$param_var2,
input$param_threshold
)
} else if (input$param_stat == "LOESS") {
values$spec <- make_spec(
input$param_stat,
input$param_var1,
input$param_vars,
target = c(
input$param_target1,
input$param_target2,
input$param_target3
)
)
}
else values$spec <- make_spec(
input$param_stat,
input$param_var1
)
# Random seed --------------------------------------------------------------
if (!is.na(values$param_seed)) set.seed(values$param_seed)
else set.seed(NULL)
# Execute resampling -------------------------------------------------------
values$estimate <- point_estimate(values$data, values$spec)
values$boots <- do_bootstrap(
df = values$data,
B = values$param_B,
spec = values$spec,
coefs = values$estimate,
strata = values$strata
)
# Run diagnostics ----------------------------------------------------------
values$jab_samples <- values$boots %>%
jackknife_after_bootstrap() %>%
jackknife_influence()
# Bootstrap estimates ------------------------------------------------------
values$se <- estimate_summary(values$boots, sd)
values$bias <- map2(
estimate_summary(values$boots, mean),
values$estimate,
`-`
)
values$skewness <- estimate_summary(values$boots, skewness)
values$kurtosis <- estimate_summary(values$boots, kurtosis)
# Display results ----------------------------------------------------------
# Generate results modules for each estimated parameter
for (parameter in names(values$estimate)) {
id <- str_replace(parameter, "replication_", "")
insertUI("#results_next", where = "beforeBegin", results_ui(id))
results_server(id, values)
}
# Show results
updateTabsetPanel(session, "wizard", "results")
# Terminate busy dialog
remove_modal_spinner()
})
# Results page ===============================================================
observeEvent(input$results_next, {
updateTabsetPanel(session, "wizard", "outliers")
})
})
|
/server.R
|
no_license
|
Alex-Lehmann/Turner
|
R
| false | false | 10,932 |
r
|
library(shiny)
shinyServer(function(input, output, session) {
values <- reactiveValues()
values$upload_flag <- FALSE
# UI element definitions #####################################################
# Procedure setup ============================================================
# Data selector/upload -------------------------------------------------------
output$data_selector <- renderUI({
fluidRow(
column(width = 12,
selectInput("data_select",
"Data:",
c("User Upload", "mtcars", "iris", "cell_survival", "galaxies")
),
conditionalPanel("input.data_select == 'User Upload'",
fileInput("user_file", "Upload CSV data", accept = ".csv")
)
)
)
})
# Data preview ---------------------------------------------------------------
output$data_preview <- renderDataTable({
# Don't display table until something is loaded
if (is.null(values$data)) return(NULL)
else return(dplyr::select(values$data, -row_id))
}, options = list(scrollX=TRUE))
# Variable selectors ---------------------------------------------------------
output$var_selector <- renderUI({
# Define selectors
var1_selector <- selectInput("param_var1", "var1_selector", values$col_names)
var2_selector <- selectInput("param_var2", "var2_selector", values$col_names)
vars_selector <- selectInput("param_vars",
"vars_selector",
values$col_names,
multiple = TRUE
)
# Generate control
if (input$param_stat %in% c("Correlation", "Smoothing Spline")) {
control <- fluidRow(column(width = 12, var1_selector, var2_selector))
} else if (input$param_stat %in% c("Linear Regression", "LOESS")) {
control <- fluidRow(column(width = 12, var1_selector, vars_selector))
} else control <- var1_selector
return(control)
})
output$strata_selector <- renderUI({
selectInput("param_strata", "Strata", c(" ", values$col_names))
})
# Jackknife-after-bootstrap ==================================================
# Plot -----------------------------------------------------------------------
output$jab_plot <- renderPlotly({
# Calculate disruption for given quantile
values$jab_samples <- bootstrap_disruption(
input$jab_quantile,
values$boots,
values$jab_samples
)
# Calculate uncertainty for given quantile and detect outliers
values$uncertainty <- get_uncertainty_bound(
input$outlier_threshold,
values$boots,
values$jab_samples
)
values$jab_samples <- mutate(values$jab_samples,
outlier = disruption > values$uncertainty
)
# Base Plotly object
fig <- plot_ly(type = "scatter", showlegend = FALSE) %>%
layout(
title = "Jackknife-After-Bootstrap Plot",
xaxis = list(title = "Relative Influence", zeroline = FALSE),
yaxis = list(title = "Disruption")
) %>%
config(displayModeBar = FALSE)
# Sorted tibble to trace
jab_data <- values$jab_samples %>%
dplyr::select(deleted_case, rel_influence, disruption, outlier) %>%
arrange(rel_influence) %>%
set_colnames(c("deleted_case", "x", "y", "outlier"))
# Uncertainty bound tibble
uncertainty_data <- tibble(
x = c(0, max(jab_data$x)) * 1.05,
y = values$uncertainty
)
# Add traces
fig <- fig %>%
# Uncertainty bound
add_trace(
data = uncertainty_data,
x = ~x, y = ~y,
name = "Uncertainty Bound",
mode = "lines", line = list(color = "#000000", dash = "dash"),
fill = "tozeroy", fillcolor = "rgba(0, 0, 0, 0.2)",
hoverinfo = "skip"
) %>%
# Jackknife quantiles
add_trace(
data = jab_data,
x = ~x, y = ~y,
mode = "markers", marker = list(size = 10),
symbol = ~outlier, symbols = c("o", "x-thin"),
color = "orange",
customdata = ~deleted_case,
hovertemplate = "ID: %{customdata}<br>Influence: %{x}<br>Disruption: %{y}"
) %>%
add_trace(
data = jab_data,
x = ~x, y = ~y,
mode = "lines",
color = "orange",
hoverinfo = "skip"
)
fig
})
# Outlier list ---------------------------------------------------------------
output$outlier_list <- renderDataTable({
outliers <- filter(values$jab_samples, outlier)
jab_data <- outliers %>%
dplyr::select(
`Case No.` = deleted_case,
`Relative Influence` = rel_influence,
Disruption = disruption
)
replication_data <- outliers %>%
dplyr::select(starts_with("replication")) %>%
set_colnames(
str_to_title(str_replace_all(colnames(.), "replication_", ""))
)
cbind(replication_data, jab_data) %>%
relocate(`Case No.`) %>%
return()
})
# Outlier boundary -----------------------------------------------------------
output$boundary_display <- renderUI({
HTML(paste0("<b>Outlier Boundary: ", values$uncertainty, "</b>"))
})
# UI event handlers ##########################################################
# Procedure setup page =======================================================
# File upload checking -------------------------------------------------------
observeEvent(input$user_file, {
# Check that file is a CSV; reject if not
path <- input$user_file$datapath
if (tools::file_ext(path) != "csv") {
message(paste0("File ", path, " is not a .csv file."))
} else {
# Add row IDs and store
df <- read_csv(path, col_types = cols())
values$data <- mutate(df, row_id = 1:nrow(df))
values$col_names <- colnames(df)
values$upload_flag <- TRUE
}
})
# Toy data input -------------------------------------------------------------
observeEvent(input$data_select, {
if (input$data_select != "User Upload") {
path <- paste0("toy_data/", input$data_select, ".csv")
df <- read_csv(path, col_types = cols())
values$data <- mutate(df, row_id = 1:nrow(df))
values$col_names <- colnames(df)
values$upload_flag <- TRUE
}
})
# Navigation button event handlers ###########################################
# Welcome page ===============================================================
observeEvent(input$welcome_next, {
updateTabsetPanel(session, "wizard", "setup")
})
# Procedure setup page =======================================================
observeEvent(input$settings_next, {
# Display busy dialog ------------------------------------------------------
show_modal_spinner(
spin = "swapping-squares",
color = "#112446",
text = "Bootstrapping..."
)
# Set up procedure ---------------------------------------------------------
if (!is.na(input$param_B) & input$param_B > 0) {
values$param_B <- round(input$param_B)
} else values$param_B <- 1000
if (!is.na(input$param_seed) & input$param_seed > 0) {
values$param_seed <- round(input$param_seed)
} else values$param_seed <- NA
if (input$param_strata == " ") values$strata <- NULL
else values$strata <- input$param_strata
# Generate procedure specification
if (input$param_stat == "Correlation") {
values$spec <- make_spec(
input$param_stat,
input$param_var1,
input$param_var2
)
} else if (input$param_stat == "Linear Regression") {
values$spec <- make_spec(
input$param_stat,
input$param_var1,
input$param_vars,
fit = input$param_fit
)
} else if (input$param_stat == "Smoothing Spline") {
values$spec <- make_spec(
input$param_stat,
input$param_var1,
input$param_var2,
input$param_threshold
)
} else if (input$param_stat == "LOESS") {
values$spec <- make_spec(
input$param_stat,
input$param_var1,
input$param_vars,
target = c(
input$param_target1,
input$param_target2,
input$param_target3
)
)
}
else values$spec <- make_spec(
input$param_stat,
input$param_var1
)
# Random seed --------------------------------------------------------------
if (!is.na(values$param_seed)) set.seed(values$param_seed)
else set.seed(NULL)
# Execute resampling -------------------------------------------------------
values$estimate <- point_estimate(values$data, values$spec)
values$boots <- do_bootstrap(
df = values$data,
B = values$param_B,
spec = values$spec,
coefs = values$estimate,
strata = values$strata
)
# Run diagnostics ----------------------------------------------------------
values$jab_samples <- values$boots %>%
jackknife_after_bootstrap() %>%
jackknife_influence()
# Bootstrap estimates ------------------------------------------------------
values$se <- estimate_summary(values$boots, sd)
values$bias <- map2(
estimate_summary(values$boots, mean),
values$estimate,
`-`
)
values$skewness <- estimate_summary(values$boots, skewness)
values$kurtosis <- estimate_summary(values$boots, kurtosis)
# Display results ----------------------------------------------------------
# Generate results modules for each estimated parameter
for (parameter in names(values$estimate)) {
id <- str_replace(parameter, "replication_", "")
insertUI("#results_next", where = "beforeBegin", results_ui(id))
results_server(id, values)
}
# Show results
updateTabsetPanel(session, "wizard", "results")
# Terminate busy dialog
remove_modal_spinner()
})
# Results page ===============================================================
observeEvent(input$results_next, {
updateTabsetPanel(session, "wizard", "outliers")
})
})
|
library(lubridate)
Sys.setlocale(category = "LC_ALL", locale = "english")
setwd("C:\\Users\\ankkn\\Desktop\\R\\kursus\\data_exploration")
data<-read.csv("household_power_consumption.txt",sep=";", header = T)
data<-subset(data,Date=="1/2/2007" | Date=="2/2/2007")
data[data=="?"] = NA
date<-as.Date(data$Date, format = "%d/%m/%Y")
time<-hms(data$Time)
gap<-as.numeric(as.character(data$Global_active_power))
grp<-as.numeric(as.character(data$Global_reactive_power))
vol<-as.numeric(as.character(data$Voltage))
gi<-as.numeric(as.character(data$Global_intensity))
sm1<-as.numeric(as.character(data$Sub_metering_1))
sm2<-as.numeric(as.character(data$Sub_metering_2))
sm3<-as.numeric(as.character(data$Sub_metering_3))
datetime<-date+time
data<-cbind(datetime, date, time, gap, grp, vol, gi, sm1, sm2, sm3)
png(filename="plot1.png", width=480, height=480)
hist(gap, col="red", xlab="Global Active Power (kilowatts)", main="Global Active Power")
dev.off()
|
/plot1.R
|
no_license
|
AndersKramerKnudsen/BasicPlotting
|
R
| false | false | 955 |
r
|
library(lubridate)
Sys.setlocale(category = "LC_ALL", locale = "english")
setwd("C:\\Users\\ankkn\\Desktop\\R\\kursus\\data_exploration")
data<-read.csv("household_power_consumption.txt",sep=";", header = T)
data<-subset(data,Date=="1/2/2007" | Date=="2/2/2007")
data[data=="?"] = NA
date<-as.Date(data$Date, format = "%d/%m/%Y")
time<-hms(data$Time)
gap<-as.numeric(as.character(data$Global_active_power))
grp<-as.numeric(as.character(data$Global_reactive_power))
vol<-as.numeric(as.character(data$Voltage))
gi<-as.numeric(as.character(data$Global_intensity))
sm1<-as.numeric(as.character(data$Sub_metering_1))
sm2<-as.numeric(as.character(data$Sub_metering_2))
sm3<-as.numeric(as.character(data$Sub_metering_3))
datetime<-date+time
data<-cbind(datetime, date, time, gap, grp, vol, gi, sm1, sm2, sm3)
png(filename="plot1.png", width=480, height=480)
hist(gap, col="red", xlab="Global Active Power (kilowatts)", main="Global Active Power")
dev.off()
|
## ---------------
## Read the Data
## ---------------
## Read the a subset of electric power consumption data
epc <- read.table("household_power_consumption.txt", skip = 66637, nrow = 2880, sep = ";",
col.names = colnames(read.table("household_power_consumption.txt",nrow = 1, header = TRUE, sep=";")))
## Merge the Date and Time data into one column separated with a blank
cols <- c( 'Date' , 'Time' )
epc$Date_Time <- apply( epc[ , cols ] , 1 , paste , collapse = " " )
## Cast the combine 'Date and Time' data from factor type to Date Class
epc$Date_Time <- strptime(epc$Date_Time, format='%d/%m/%Y %H:%M:%S')
## --------------------------------------------
## 3. Line plot of 'Sub Metering 1, 2, 3' data
## --------------------------------------------
png(file = "plot3.png", width = 480, height = 480)
opar=par(ps=12)
plot_col <- c("red","green","blue")
plot(epc$Date_Time, epc$Sub_metering_1, type = "l", ylab="Energy sub metering", xlab = "", col = plot_col[1])
lines(epc$Date_Time, epc$Sub_metering_2, type = "l", col = plot_col[2])
lines(epc$Date_Time, epc$Sub_metering_3, type = "l", col = plot_col[3])
opar=par(ps=8)
legend("topright", c("Sub_metering_1","Sub_metering_2", "Sub_metering_3"), lty=c(1,1,1), col = plot_col)
dev.off()
|
/figure/plot3.R
|
no_license
|
RaheemIqbal1/ExData_Plotting1
|
R
| false | false | 1,274 |
r
|
## ---------------
## Read the Data
## ---------------
## Read the a subset of electric power consumption data
epc <- read.table("household_power_consumption.txt", skip = 66637, nrow = 2880, sep = ";",
col.names = colnames(read.table("household_power_consumption.txt",nrow = 1, header = TRUE, sep=";")))
## Merge the Date and Time data into one column separated with a blank
cols <- c( 'Date' , 'Time' )
epc$Date_Time <- apply( epc[ , cols ] , 1 , paste , collapse = " " )
## Cast the combine 'Date and Time' data from factor type to Date Class
epc$Date_Time <- strptime(epc$Date_Time, format='%d/%m/%Y %H:%M:%S')
## --------------------------------------------
## 3. Line plot of 'Sub Metering 1, 2, 3' data
## --------------------------------------------
png(file = "plot3.png", width = 480, height = 480)
opar=par(ps=12)
plot_col <- c("red","green","blue")
plot(epc$Date_Time, epc$Sub_metering_1, type = "l", ylab="Energy sub metering", xlab = "", col = plot_col[1])
lines(epc$Date_Time, epc$Sub_metering_2, type = "l", col = plot_col[2])
lines(epc$Date_Time, epc$Sub_metering_3, type = "l", col = plot_col[3])
opar=par(ps=8)
legend("topright", c("Sub_metering_1","Sub_metering_2", "Sub_metering_3"), lty=c(1,1,1), col = plot_col)
dev.off()
|
# nocov start - compat-lifecycle (last updated: rlang 0.3.0.9000)
scoped_lifecycle_silence <- function(frame = rlang::caller_env()) {
rlang::scoped_options(.frame = frame,
lifecycle_verbosity = "quiet"
)
}
with_lifecycle_silence <- function(expr) {
scoped_lifecycle_silence()
expr
}
scoped_lifecycle_warnings <- function(frame = rlang::caller_env()) {
rlang::scoped_options(.frame = frame,
lifecycle_verbosity = "warning"
)
}
with_lifecycle_warnings <- function(expr) {
scoped_lifecycle_warnings()
expr
}
scoped_lifecycle_errors <- function(frame = rlang::caller_env()) {
scoped_lifecycle_warnings(frame = frame)
rlang::scoped_options(.frame = frame,
lifecycle_verbosity = "error"
)
}
with_lifecycle_errors <- function(expr) {
scoped_lifecycle_errors()
expr
}
# Enable once signal_superseded() reaches stable state
signal_superseded <- function(...) {}
foreign_caller_env <- function(my_env = ns_env()) {
for (n in 2:10) {
caller <- caller_env(n)
if (!is_reference(env_parent(caller), my_env)) {
return(caller)
}
}
# Safety net
caller
}
# nocov end
|
/R/compat-lifecycle.R
|
permissive
|
datacamp/tibble
|
R
| false | false | 1,120 |
r
|
# nocov start - compat-lifecycle (last updated: rlang 0.3.0.9000)
scoped_lifecycle_silence <- function(frame = rlang::caller_env()) {
rlang::scoped_options(.frame = frame,
lifecycle_verbosity = "quiet"
)
}
with_lifecycle_silence <- function(expr) {
scoped_lifecycle_silence()
expr
}
scoped_lifecycle_warnings <- function(frame = rlang::caller_env()) {
rlang::scoped_options(.frame = frame,
lifecycle_verbosity = "warning"
)
}
with_lifecycle_warnings <- function(expr) {
scoped_lifecycle_warnings()
expr
}
scoped_lifecycle_errors <- function(frame = rlang::caller_env()) {
scoped_lifecycle_warnings(frame = frame)
rlang::scoped_options(.frame = frame,
lifecycle_verbosity = "error"
)
}
with_lifecycle_errors <- function(expr) {
scoped_lifecycle_errors()
expr
}
# Enable once signal_superseded() reaches stable state
signal_superseded <- function(...) {}
foreign_caller_env <- function(my_env = ns_env()) {
for (n in 2:10) {
caller <- caller_env(n)
if (!is_reference(env_parent(caller), my_env)) {
return(caller)
}
}
# Safety net
caller
}
# nocov end
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/calc_probs.R
\name{calc_probs}
\alias{calc_probs}
\title{Calculate Probabilities
Calculate probabilites based on type of profits and type of catch}
\usage{
calc_probs(prof_type = "avg_profit_fuel_only", catch_type,
objective = "difference", in_cp_name = "poss_clusts1",
poss_clusts1 = poss_clusts)
}
\arguments{
\item{prof_type}{Type of profit; can be "avg_haul_profit" or "avg_profit_fuel_only"}
\item{catch_type}{Type of catch; can be "type_clust_catch", "type_clust_perc", or "type_prop_hauls"}
\item{objective}{Objective; to catch the "highest" of target species? "lowest" of weak stock species?
Or area with the biggest "difference" between target and weaks?}
\item{in_cp_name}{Character string of input to calc_probs name}
\item{poss_clusts1}{Input to function; defaults to poss_clusts, called in the parent environment}
}
\description{
Calculate Probabilities
Calculate probabilites based on type of profits and type of catch
}
\examples{
f1 <- find_best_clusts(catch_type = "type_clust_perc", objective = "highest")
f2 <- find_best_clusts(catch_type = "type_prop_hauls", objective = "highest")
f3 <- find_best_clusts(catch_type = "type_clust_perc", objective = "lowest")
f4 <- find_best_clusts(catch_type = "type_prop_hauls", objective = "lowest")
#Results might be slightly different for different columns
cbind(f3$unq_clust, f4$unq_clust)
}
|
/man/calc_probs.Rd
|
no_license
|
peterkuriyama/ch4
|
R
| false | true | 1,440 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/calc_probs.R
\name{calc_probs}
\alias{calc_probs}
\title{Calculate Probabilities
Calculate probabilites based on type of profits and type of catch}
\usage{
calc_probs(prof_type = "avg_profit_fuel_only", catch_type,
objective = "difference", in_cp_name = "poss_clusts1",
poss_clusts1 = poss_clusts)
}
\arguments{
\item{prof_type}{Type of profit; can be "avg_haul_profit" or "avg_profit_fuel_only"}
\item{catch_type}{Type of catch; can be "type_clust_catch", "type_clust_perc", or "type_prop_hauls"}
\item{objective}{Objective; to catch the "highest" of target species? "lowest" of weak stock species?
Or area with the biggest "difference" between target and weaks?}
\item{in_cp_name}{Character string of input to calc_probs name}
\item{poss_clusts1}{Input to function; defaults to poss_clusts, called in the parent environment}
}
\description{
Calculate Probabilities
Calculate probabilites based on type of profits and type of catch
}
\examples{
f1 <- find_best_clusts(catch_type = "type_clust_perc", objective = "highest")
f2 <- find_best_clusts(catch_type = "type_prop_hauls", objective = "highest")
f3 <- find_best_clusts(catch_type = "type_clust_perc", objective = "lowest")
f4 <- find_best_clusts(catch_type = "type_prop_hauls", objective = "lowest")
#Results might be slightly different for different columns
cbind(f3$unq_clust, f4$unq_clust)
}
|
# Epiphytic diatoms associated with the south African kelp
# Diatom data exploration, analysis and presentation
# Serge Mayombo
# 11th Febraury 2018
library(tidyverse)
library(readr)
# read in the data
counts <- read_csv("Diatom_counts_tidy.csv")
# select only some columns
counts.spec <- counts %>%
select(Site, Host, Replicate, Host_spp, Host_size, Genus, Density) %>%
na.omit()
# Summary stats:
library(Rmisc)
library(ggplot2)
counts.av <- summarySE(counts.spec, measurevar = "Density", groupvars = c("Host", "Genus"), na.rm = TRUE)
counts.av
# Plotting mean diatom abundances with error bars
ggplot(counts.av, aes(Genus, Density, fill = Genus)) +
geom_bar(stat = "identity") +
geom_errorbar(aes(ymin = Density - se, ymax = Density + se), size = .3, width = .2, position = position_dodge(.9)) +
facet_grid(.~Host, drop = TRUE, scales = "free", space = "free_x") +
theme_bw() + ylab("Diatom density (cells/square millimeter)") + xlab("Diatom genus") +
scale_y_continuous(expand = c(0,0)) + theme(strip.background = element_rect(fill="gray85")) +
theme(panel.margin = unit(0.3, "lines")) +
scale_fill_hue(name = "Diatom genus", guide = FALSE) +
theme(axis.text.x = element_text(angle = 90, hjust = 1, vjust = 0.5), strip.text = element_text(size = 6, face = "bold"))
ggsave("diatom_boxlot.png", width = 6, height = 4, scale = 1.4)
ggplot(counts.av, aes(Genus, Density, fill = Genus)) +
geom_bar(stat = "identity") +
geom_errorbar(aes(ymin = Density - se, ymax = Density + se), size = .3, width = .2, position = position_dodge(.9)) +
facet_grid(.~Host, drop = TRUE, scales = "free", space = "free_x") +
theme_bw() + ylab(expression("Diatom density" ~ "(cells mm"^-2*")")) + xlab("Diatom genus") +
scale_y_continuous(expand = c(0,0)) + theme(strip.background = element_rect(fill="gray85")) +
theme(panel.margin = unit(0.3, "lines")) +
scale_fill_hue(name = "Diatom genus", guide = FALSE) +
theme(axis.text.x = element_text(face = "bold.italic", angle = 90, hjust = 1, vjust = 0.5), strip.text = element_text(size = 6, face = "bold"))
ggsave("diatom_boxlot1.pdf", width = 6, height = 4, scale = 1.4)
ggplot(ChickWeight, aes(x = Time, y = weight)) +
geom_line(aes(group = Chick)) +
labs(x = expression("cells/mm"^2))
# nMDS
library(vegan)
library(grid)
abund_table<-read.csv("PB_data_matrix.csv",row.names=1,check.names=FALSE)
meta_table<-read.csv("PB_diat_env.csv",row.names=1,check.names=FALSE)
#Just a check to ensure that the samples in meta_table are in the same order as in abund_table
meta_table<-meta_table[rownames(abund_table),]
#Get grouping information
grouping_info<-data.frame(row.names=rownames(abund_table),t(as.data.frame(strsplit(rownames(abund_table),"_"))))
head(grouping_info)
#Get MDS stats
sol<-metaMDS(abund_table,distance = "bray", k = 2, trymax = 50)
#Make a new data frame, and put country, latrine, and depth information there, to be useful for coloring, and shape of points
NMDS=data.frame(x=sol$point[,1],y=sol$point[,2],Host_spp=as.factor(grouping_info[,1]),Host_size=as.factor(grouping_info[,2]),Replicate=as.factor(grouping_info[,3]))
#Get spread of points based on Host-spp
plot.new()
ord<-ordiellipse(sol, as.factor(grouping_info[,1]) ,display = "sites", kind ="sd", conf = 0.95, label = T)
dev.off()
#Reference: http://stackoverflow.com/questions/13794419/plotting-ordiellipse-function-from-vegan-package-onto-nmds-plot-created-in-ggplo
#Data frame df_ell contains values to show ellipses. It is calculated with function veganCovEllipse which is hidden in vegan package. This function is applied to each level of NMDS (group) and it uses also function cov.wt to calculate covariance matrix.
veganCovEllipse<-function (cov, center = c(0, 0), scale = 1, npoints = 100)
{
theta <- (0:npoints) * 2 * pi/npoints
Circle <- cbind(cos(theta), sin(theta))
t(center + scale * t(Circle %*% chol(cov)))
}
#Generate ellipse points
df_ell <- data.frame()
for(g in levels(NMDS$Host_spp)){
if(g!="" && (g %in% names(ord))){
df_ell <- rbind(df_ell, cbind(as.data.frame(with(NMDS[NMDS$Host_spp==g,],
veganCovEllipse(ord[[g]]$cov,ord[[g]]$center,ord[[g]]$scale)))
,Host_spp=g))
}
}
head(df_ell)
tail(df_ell)
#Generate mean values from NMDS plot grouped on Host_spp
NMDS.mean=aggregate(NMDS[,1:2],list(group=NMDS$Host_spp),mean)
NMDS.mean
#Now do the actual plotting
library(ggplot2)
shape_values<-seq(1,2)
p<-ggplot(data=NMDS,aes(x,y,colour=Host_spp))
p<-p+ annotate("text",x=NMDS.mean$x,y=NMDS.mean$y,label=NMDS.mean$group,size=4)
p<-p+ geom_path(data=df_ell, aes(x=NMDS1, y=NMDS2), size=1, linetype=2)
p<-p+geom_point(aes(shape=Host_size))+scale_shape_manual(values=shape_values)+theme_bw()
pdf("NMDS.pdf")
print(p)
dev.off()
|
/Diatom_counts/Mean diatom abundances plot.R
|
no_license
|
sergemayombo/Diatoms
|
R
| false | false | 4,822 |
r
|
# Epiphytic diatoms associated with the south African kelp
# Diatom data exploration, analysis and presentation
# Serge Mayombo
# 11th Febraury 2018
library(tidyverse)
library(readr)
# read in the data
counts <- read_csv("Diatom_counts_tidy.csv")
# select only some columns
counts.spec <- counts %>%
select(Site, Host, Replicate, Host_spp, Host_size, Genus, Density) %>%
na.omit()
# Summary stats:
library(Rmisc)
library(ggplot2)
counts.av <- summarySE(counts.spec, measurevar = "Density", groupvars = c("Host", "Genus"), na.rm = TRUE)
counts.av
# Plotting mean diatom abundances with error bars
ggplot(counts.av, aes(Genus, Density, fill = Genus)) +
geom_bar(stat = "identity") +
geom_errorbar(aes(ymin = Density - se, ymax = Density + se), size = .3, width = .2, position = position_dodge(.9)) +
facet_grid(.~Host, drop = TRUE, scales = "free", space = "free_x") +
theme_bw() + ylab("Diatom density (cells/square millimeter)") + xlab("Diatom genus") +
scale_y_continuous(expand = c(0,0)) + theme(strip.background = element_rect(fill="gray85")) +
theme(panel.margin = unit(0.3, "lines")) +
scale_fill_hue(name = "Diatom genus", guide = FALSE) +
theme(axis.text.x = element_text(angle = 90, hjust = 1, vjust = 0.5), strip.text = element_text(size = 6, face = "bold"))
ggsave("diatom_boxlot.png", width = 6, height = 4, scale = 1.4)
ggplot(counts.av, aes(Genus, Density, fill = Genus)) +
geom_bar(stat = "identity") +
geom_errorbar(aes(ymin = Density - se, ymax = Density + se), size = .3, width = .2, position = position_dodge(.9)) +
facet_grid(.~Host, drop = TRUE, scales = "free", space = "free_x") +
theme_bw() + ylab(expression("Diatom density" ~ "(cells mm"^-2*")")) + xlab("Diatom genus") +
scale_y_continuous(expand = c(0,0)) + theme(strip.background = element_rect(fill="gray85")) +
theme(panel.margin = unit(0.3, "lines")) +
scale_fill_hue(name = "Diatom genus", guide = FALSE) +
theme(axis.text.x = element_text(face = "bold.italic", angle = 90, hjust = 1, vjust = 0.5), strip.text = element_text(size = 6, face = "bold"))
ggsave("diatom_boxlot1.pdf", width = 6, height = 4, scale = 1.4)
ggplot(ChickWeight, aes(x = Time, y = weight)) +
geom_line(aes(group = Chick)) +
labs(x = expression("cells/mm"^2))
# nMDS
library(vegan)
library(grid)
abund_table<-read.csv("PB_data_matrix.csv",row.names=1,check.names=FALSE)
meta_table<-read.csv("PB_diat_env.csv",row.names=1,check.names=FALSE)
#Just a check to ensure that the samples in meta_table are in the same order as in abund_table
meta_table<-meta_table[rownames(abund_table),]
#Get grouping information
grouping_info<-data.frame(row.names=rownames(abund_table),t(as.data.frame(strsplit(rownames(abund_table),"_"))))
head(grouping_info)
#Get MDS stats
sol<-metaMDS(abund_table,distance = "bray", k = 2, trymax = 50)
#Make a new data frame, and put country, latrine, and depth information there, to be useful for coloring, and shape of points
NMDS=data.frame(x=sol$point[,1],y=sol$point[,2],Host_spp=as.factor(grouping_info[,1]),Host_size=as.factor(grouping_info[,2]),Replicate=as.factor(grouping_info[,3]))
#Get spread of points based on Host-spp
plot.new()
ord<-ordiellipse(sol, as.factor(grouping_info[,1]) ,display = "sites", kind ="sd", conf = 0.95, label = T)
dev.off()
#Reference: http://stackoverflow.com/questions/13794419/plotting-ordiellipse-function-from-vegan-package-onto-nmds-plot-created-in-ggplo
#Data frame df_ell contains values to show ellipses. It is calculated with function veganCovEllipse which is hidden in vegan package. This function is applied to each level of NMDS (group) and it uses also function cov.wt to calculate covariance matrix.
veganCovEllipse<-function (cov, center = c(0, 0), scale = 1, npoints = 100)
{
theta <- (0:npoints) * 2 * pi/npoints
Circle <- cbind(cos(theta), sin(theta))
t(center + scale * t(Circle %*% chol(cov)))
}
#Generate ellipse points
df_ell <- data.frame()
for(g in levels(NMDS$Host_spp)){
if(g!="" && (g %in% names(ord))){
df_ell <- rbind(df_ell, cbind(as.data.frame(with(NMDS[NMDS$Host_spp==g,],
veganCovEllipse(ord[[g]]$cov,ord[[g]]$center,ord[[g]]$scale)))
,Host_spp=g))
}
}
head(df_ell)
tail(df_ell)
#Generate mean values from NMDS plot grouped on Host_spp
NMDS.mean=aggregate(NMDS[,1:2],list(group=NMDS$Host_spp),mean)
NMDS.mean
#Now do the actual plotting
library(ggplot2)
shape_values<-seq(1,2)
p<-ggplot(data=NMDS,aes(x,y,colour=Host_spp))
p<-p+ annotate("text",x=NMDS.mean$x,y=NMDS.mean$y,label=NMDS.mean$group,size=4)
p<-p+ geom_path(data=df_ell, aes(x=NMDS1, y=NMDS2), size=1, linetype=2)
p<-p+geom_point(aes(shape=Host_size))+scale_shape_manual(values=shape_values)+theme_bw()
pdf("NMDS.pdf")
print(p)
dev.off()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ex_flow.R
\docType{data}
\name{ex_flow}
\alias{ex_flow}
\title{Traded volume of land between countries}
\format{A data frame with 10866 rows and 6 variables:
\describe{
\item{O}{name of exporting country}
\item{D}{name of importing country}
\item{vegetable}{volume of land associated with trading vegetables, in ha}
\item{fruit}{volume of land associated with trading fruits, in ha}
\item{wheat}{volume of land associated with trading wheats, in ha}
\item{soybean}{volume of land associated with trading soybeans, in ha}
}}
\source{
\url{http://fao.org/faostat/}
}
\usage{
data(ex_flow)
}
\description{
A dataset containing trade data between countries and traded volumes for each 4 category.
}
\keyword{datasets}
|
/man/ex_flow.Rd
|
no_license
|
cran/halfcircle
|
R
| false | true | 829 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ex_flow.R
\docType{data}
\name{ex_flow}
\alias{ex_flow}
\title{Traded volume of land between countries}
\format{A data frame with 10866 rows and 6 variables:
\describe{
\item{O}{name of exporting country}
\item{D}{name of importing country}
\item{vegetable}{volume of land associated with trading vegetables, in ha}
\item{fruit}{volume of land associated with trading fruits, in ha}
\item{wheat}{volume of land associated with trading wheats, in ha}
\item{soybean}{volume of land associated with trading soybeans, in ha}
}}
\source{
\url{http://fao.org/faostat/}
}
\usage{
data(ex_flow)
}
\description{
A dataset containing trade data between countries and traded volumes for each 4 category.
}
\keyword{datasets}
|
glycan_map <- function(prep_pdb, map_file, glycan_map_file)
{
source('~/Desktop/Protein_analysis/scripts/LinkInfo.R')
links <- link_info(in_file = prep_pdb, out_file = '', return_d = T)
new_data <- links[rep(1:nrow(links), 11) ,]
for (i in 1:nrow(links))
{
start <- as.numeric(links$hetresno[i])
new_data$hetresno[new_data$resno == links$resno[i]] <- start:(start + 10)
}
map <- read.csv(map_file, sep = "", stringsAsFactors = F)
new_data <- merge(new_data, map, by.x = 'resno', by.y = 'seq')
write.table(new_data, glycan_map_file, row.names = F)
}
glycan_map('~/Desktop/Protein_analysis/Prep/Du156/Du156_m9_2_301_prep.pdb',
'~/Desktop/Protein_analysis/map/Du156_301/map.txt',
'~/Desktop/Protein_analysis/map/Du156_301/glycan_map.txt')
glycan_map_to_CA <- function(prep_pdb, map_file, glycan_map_file)
{
source('~/Desktop/Protein_analysis/scripts/LinkInfo.R')
links <- link_info(in_file = prep_pdb, out_file = '', return_d = T, cut_off = 6, NLN_elety = "CA")
new_data <- links[rep(1:nrow(links), 11) ,]
for (i in 1:nrow(links))
{
start <- as.numeric(links$hetresno[i])
new_data$hetresno[new_data$resno == links$resno[i]] <- start:(start + 10)
}
map <- read.csv(map_file, sep = "", stringsAsFactors = F)
new_data <- merge(new_data, map, by.x = 'resno', by.y = 'seq')
write.table(new_data, glycan_map_file, row.names = F)
}
|
/glycan_map.R
|
no_license
|
RouxCil/Phd_scripts
|
R
| false | false | 1,418 |
r
|
glycan_map <- function(prep_pdb, map_file, glycan_map_file)
{
source('~/Desktop/Protein_analysis/scripts/LinkInfo.R')
links <- link_info(in_file = prep_pdb, out_file = '', return_d = T)
new_data <- links[rep(1:nrow(links), 11) ,]
for (i in 1:nrow(links))
{
start <- as.numeric(links$hetresno[i])
new_data$hetresno[new_data$resno == links$resno[i]] <- start:(start + 10)
}
map <- read.csv(map_file, sep = "", stringsAsFactors = F)
new_data <- merge(new_data, map, by.x = 'resno', by.y = 'seq')
write.table(new_data, glycan_map_file, row.names = F)
}
glycan_map('~/Desktop/Protein_analysis/Prep/Du156/Du156_m9_2_301_prep.pdb',
'~/Desktop/Protein_analysis/map/Du156_301/map.txt',
'~/Desktop/Protein_analysis/map/Du156_301/glycan_map.txt')
glycan_map_to_CA <- function(prep_pdb, map_file, glycan_map_file)
{
source('~/Desktop/Protein_analysis/scripts/LinkInfo.R')
links <- link_info(in_file = prep_pdb, out_file = '', return_d = T, cut_off = 6, NLN_elety = "CA")
new_data <- links[rep(1:nrow(links), 11) ,]
for (i in 1:nrow(links))
{
start <- as.numeric(links$hetresno[i])
new_data$hetresno[new_data$resno == links$resno[i]] <- start:(start + 10)
}
map <- read.csv(map_file, sep = "", stringsAsFactors = F)
new_data <- merge(new_data, map, by.x = 'resno', by.y = 'seq')
write.table(new_data, glycan_map_file, row.names = F)
}
|
##############
# Download all the TCGA data
###############
# Downloads and compiles TCGA gene expression, mutation, and clinical data
library(TCGAbiolinks)
library(SummarizedExperiment)
library(GenomicDataCommons)
library(data.table)
library(R.utils)
library(scales)
#### USER MUST SET THEIR OWN DOWNLOAD DIRECTORY
## Note: In windows, if this path is too long, it will cause an error for GDCdownload()
working_TCGA_data_dir <- "/data/TCGA/"
source("scripts/get_LCAM_scores.R")
## functions:
TCGAtranslateID = function(file_ids, legacy = FALSE) {
info = files(legacy = legacy) %>%
filter( ~ file_id %in% file_ids) %>%
select('cases.samples.submitter_id') %>%
results_all()
# The mess of code below is to extract TCGA barcodes
# id_list will contain a list (one item for each file_id)
# of TCGA barcodes of the form 'TCGA-XX-YYYY-ZZZ'
id_list = lapply(info$cases,function(a) {
a[[1]][[1]][[1]]})
# so we can later expand to a data.frame of the right size
barcodes_per_file = sapply(id_list,length)
# And build the data.frame
return(data.frame(file_id = rep(ids(info),barcodes_per_file),
submitter_id = unlist(id_list)))
}
## Start interactive workflow:
tcga_abbreviations <- read.csv("input_tables/TCGA_abbreviations.csv",stringsAsFactors=F,h=F,fileEncoding="UTF-8-BOM")
tcga_abbreviations[tcga_abbreviations=="Brain Lower Grade Glioma"] <- "Low Grade Glioma"
tcga_abbreviations[tcga_abbreviations=="Breast invasive carcinoma"] <- "Breast Cancer"
tcga_abbreviations[tcga_abbreviations=="Colon adenocarcinoma"] <- "colorectal adenocarcinoma"
tcga_abbreviations[tcga_abbreviations=="Kidney Chromophobe"] <- "kidney chromophobe renal cell carcinoma"
tcga_abbreviations[tcga_abbreviations=="Pancreatic adenocarcinoma"] <- "Pancreatic ductal adenocarcinoma"
tcga_abbreviations[tcga_abbreviations=="Pheochromocytoma and Paraganglioma"] <- "Paraganglioma and Pheochromocytoma"
tcga_abbreviations[tcga_abbreviations=="Rectum adenocarcinoma"] <- "colorectal adenocarcinoma"
tcga_abbreviations[tcga_abbreviations=="Cervical squamous cell carcinoma and endocervical adenocarcinoma"] <- "Cervical Carcinoma"
tcga_abbreviations[tcga_abbreviations=="Thyroid carcinoma"] <- "Thyroid papillary carcinoma"
exclude_projects <- c("Controls","FFPE Pilot Phase II",
"Miscellaneous","Chronic Myelogenous Leukemia")
tcga_abbreviations <- tcga_abbreviations[!tcga_abbreviations[,2]%in%exclude_projects,]
project_IDs <- tcga_abbreviations[,1]
#project_IDs <- project_IDs[1:3]
if(!dir.exists(working_TCGA_data_dir)){
dir.create(working_TCGA_data_dir)
}
github_repo_dir <- getwd()
setwd(working_TCGA_data_dir)
# a translation table from ensemble IDs to gene symbols. Taken from 10X workflow so that they are as consistent as possible.
gene_ids <- read.table(file.path(github_repo_dir,"input_tables/ensemble_ids.tsv"),sep="\t",r=1,h=0,stringsAsFactors = F)
# THESE LOOPS DOWNLOADS ALL THE TCGA DATA
# YOU MAY NEED TO RUN IT MULTIPLE TIMES FOR THE DATA TO COMPLETE
for(project in project_IDs){
if(!file.exists(file.path(working_TCGA_data_dir,paste(project,"_exprs.rd",sep="")))){
#get transcriptome data
q <- GDCquery(project = paste("TCGA",project,sep="-"),
data.category = "Transcriptome Profiling",
data.type="Gene Expression Quantification",
workflow.type = "HTSeq - FPKM",experimental.strategy = "RNA-Seq",
legacy = F)
tmp <- try({GDCdownload(q,directory = working_TCGA_data_dir,method="api",files.per.chunk = 10)})
try(GDCdownload(q,directory = working_TCGA_data_dir,method="api",files.per.chunk = 10))
GDCdownload(q,directory = working_TCGA_data_dir,method="api",files.per.chunk = 10)
uuids <- list.dirs(file.path(working_TCGA_data_dir,paste("TCGA-",project,sep=""),"harmonized/Transcriptome_Profiling/Gene_Expression_Quantification/"))
uuids <- unlist(lapply(strsplit(uuids,"/"),function(x){tail(x,1)}))
uuids <- setdiff(uuids,"Gene_Expression_Quantification")
submitter_ids <- TCGAtranslateID(uuids)
uuids <- list.dirs(file.path(working_TCGA_data_dir,paste("TCGA-",project,sep=""),"harmonized/Transcriptome_Profiling/Gene_Expression_Quantification/"))
uuids <- uuids[-1]
fn <- uuids[1]
fn <- file.path(fn,list.files(fn))
expr_vec <- read.table(gzfile(fn),sep="\t",stringsAsFactors = F)
expr_mat <- matrix(NA,nrow=nrow(expr_vec),ncol=nrow(submitter_ids),
dimnames=list(expr_vec$V1,submitter_ids$file_id))
message("reading files")
for(fns in uuids){
uuid <- strsplit(fns,"/")
uuid <- tail(uuid[[1]],1)
fn <- file.path(fns,list.files(fns))
expr_vec <- read.table(gzfile(fn),sep="\t",stringsAsFactors = F)
expr_mat[,uuid] <- expr_vec$V2
}
colnames(expr_mat) <- submitter_ids[match(colnames(expr_mat),submitter_ids$file_id),]$submitter_id
rownames(expr_mat) <- unlist(lapply(strsplit(rownames(expr_mat),"\\."),function(x){x[1]}))
expr_mat <- expr_mat[intersect(rownames(expr_mat),gene_ids$V2),]
rownames(expr_mat) <- rownames(gene_ids)[match(rownames(expr_mat),gene_ids$V2)]
message("saving expr")
save(expr_mat,file=paste(project,"exprs.rd",sep="_"))
message("removing read files")
unlink(file.path(working_TCGA_data_dir,paste("TCGA-",project,sep="")),recursive=T)
}
}
#patient_barcodes <- TCGAtranslateID(uuids)
# get MAF files
for(project in project_IDs){
if(!dir.exists(file.path(working_TCGA_data_dir,"GDCdata",paste("TCGA-",project,sep="")))){
try(GDCquery_Maf(tumor=project,pipelines="mutect2"))
GDCquery_Maf(tumor=project,pipelines="mutect2")
}
}
# get clinical data
for(project in project_IDs){
clin <- GDCquery_clinic(project=paste("TCGA",project,sep="-"),type="clinical",save.csv=TRUE)
}
###############
#Download all the ep_scores for the TCGA data
not_in_ESTIMATE <- c("Cholangiocarcinoma",
"Lymphoid Neoplasm Diffuse Large B-cell Lymphoma",
"Mesothelioma",
"Sarcoma",
"Testicular Germ Cell Tumors",
"Thymoma",
"Uveal Melanoma")
ESTIMATE_IDs <- tcga_abbreviations[!tcga_abbreviations[,2]%in%not_in_ESTIMATE,1]
for(project_ID in ESTIMATE_IDs){
project_name <- tcga_abbreviations[match(project_ID,tcga_abbreviations[,1]),2]
url <- paste("https://bioinformatics.mdanderson.org/estimate/tables/",
tolower(gsub(" ","_",project_name)),"_RNAseqV2.txt",sep="")
destfile <- file.path(working_TCGA_data_dir,paste("estimate_",gsub(" ","_",project_name),".txt",sep=""))
if(!file.exists(destfile)){
download.file(url,destfile = file.path(working_TCGA_data_dir,paste("estimate_",gsub(" ","_",project_name),".txt",sep="")))
}
}
# Compute LCAM scores and n_mut
output_list <- list()
for(project_ID in project_IDs){
message(project_ID)
expr_fn <- paste(project_ID,"exprs.rd",sep="_")
load(expr_fn)
cols <- unique(colnames(expr_mat)[substr(colnames(expr_mat),14,15)%in%c("01","03") &
substr(colnames(expr_mat),16,16)%in%c("A","-")])
output_list[[project_ID]] <- get_LCAM_scores(expr_mat[,cols])
estimate_fn <- file.path(working_TCGA_data_dir,paste("estimate_",gsub(" ","_",tcga_abbreviations[match(project_ID,tcga_abbreviations[,1]),2]),".txt",sep=""))
if(file.exists(estimate_fn)){
message("reading ESTIMATE scores")
estimate <- read.csv(estimate_fn,stringsAsFactors = F,sep="\t")
output_list[[project_ID]] <- cbind(output_list[[project_ID]],
estimate[match(substr(rownames(output_list[[project_ID]]),1,15),estimate$ID),"Immune_score"])
colnames(output_list[[project_ID]])[4] <- "ESTIMATE_Immune"
}
message("loading mutation data")
mut_fn <- list.files(file.path(working_TCGA_data_dir,"GDCdata",paste("TCGA-",project_ID,sep="")),r=T)
mut_fn <- file.path(working_TCGA_data_dir,"GDCdata",paste("TCGA-",project_ID,sep=""),mut_fn)
file.copy(from=mut_fn,to=file.path(working_TCGA_data_dir,"staged_mut_file.maf.gz"),overwrite=T)
mut_data <- fread(file.path(working_TCGA_data_dir,"staged_mut_file.maf.gz"))
mut_data <- mut_data[mut_data$Variant_Classification%in%c("Missense_Mutation","Nonsense_Mutation")]
mut_tab <- table(mut_data$Hugo_Symbol,mut_data$Tumor_Sample_Barcode)
colnames(mut_tab) <- substr(colnames(mut_tab),1,15)
n_mut <- colSums(mut_tab)
if(!"n_muts"%in%colnames(output_list)){
output_list[[project_ID]] <- cbind(output_list[[project_ID]],n_mut[substr(rownames(output_list[[project_ID]]),1,15)])
colnames(output_list[[project_ID]])[ncol(output_list[[project_ID]])] <- "n_muts"
}
mut_data <- mut_data[!mut_data$Variant_Classification%in%c("RNA","Silent"),]
mut_tab <- table(mut_data$Hugo_Symbol,mut_data$Tumor_Sample_Barcode)
colnames(mut_tab) <- substr(colnames(mut_tab),1,15)
not_in_tab <- !c("TP53","KRAS","EGFR","STK11")%in%rownames(mut_tab)
driver_mut_tab <- as.matrix(mut_tab[setdiff(c("TP53","KRAS","EGFR","STK11"),c("TP53","KRAS","EGFR","STK11")[not_in_tab]),])
if(sum(!not_in_tab)==1){
driver_mut_tab <- t(driver_mut_tab)
rownames(driver_mut_tab) <- c("TP53","KRAS","EGFR","STK11")[!not_in_tab]
}
driver_mut_tab <- rbind(driver_mut_tab,matrix(0,nrow=sum(not_in_tab),ncol=ncol(driver_mut_tab),
dimnames=list(c("TP53","KRAS","EGFR","STK11")[not_in_tab],NULL)))
no_mut_pats <- setdiff(substr(rownames(output_list[[project_ID]]),1,15),colnames(mut_tab))
driver_mut_tab <- cbind(driver_mut_tab,matrix(NA,nrow=nrow(driver_mut_tab),ncol=length(no_mut_pats),
dimnames=list(c("TP53","KRAS","EGFR","STK11"),no_mut_pats)))
output_list[[project_ID]] <- cbind(output_list[[project_ID]],t(driver_mut_tab)[substr(rownames(output_list[[project_ID]]),1,15),])
colnames(output_list[[project_ID]])[(ncol(output_list[[project_ID]])-3):ncol(output_list[[project_ID]])] <- c("TP53","KRAS","EGFR","STK11")
}
save(output_list,file=file.path(github_repo_dir,"output/statistics/TCGA_results.rd"))
#load("/users/Andrew Leader/Google Drive/merad/scRNAseq_analysis/results/AL/lung_main/output/TCGA_results.rd")
#Make plots
subtype_ord <- order(unlist(lapply(output_list,function(x){cor(x[,"difference"],x[,"n_muts"],method="spearman",use="p")})))
p_value <- unlist(lapply(output_list,function(x){
cor.test(x[,"difference"],x[,"n_muts"],method="spearman",use="p")$p.value}))
q_value <- p.adjust(p_value,method="bonf")
cor_value <- unlist(lapply(output_list,function(x){cor.test(x[,"difference"],x[,"n_muts"],method="spearman",use="p")$estimate}))
write.csv(cbind(p_value,q_value),file.path(github_repo_dir,"output/statistics/figure_7f.csv"))
#cancer_var <- unlist(lapply(output_list,function(x){var(log10(1+x[,"n_muts"]),na.rm=T)}))
#plot(cancer_var,cor_value,pch=16,xlab="Var( Log10[1+#Mut] )",ylab="Rho( Log10[1+#Mut] , LCAM )",
# main="Variance of #mutations vs. detected association with LCAM score",col="white")
#text(names(cancer_var),x=cancer_var,y=cor_value,col=1+as.numeric(names(cancer_var)%in%c("SKCM","LUAD","LUSC")),
# font=1+as.numeric(names(cancer_var)%in%c("SKCM","LUAD","LUSC")))
#text(paste("rho=",round(cor(cancer_var,cor_value,method="spearman"),2),"; p=",signif(cor.test(cancer_var,cor_value,method="spearman")$p.value,2)),x=0.4,y=-0.1)
#plot(cancer_var,-log10(1e-16+q_value),pch=16,xlab="Var( Log10[1+#Mut] )",ylab="-log10(q value) of cor( Log10[1+#Mut] , LCAM )",
# main="Variance of #mutations vs. detected association with LCAM score",col="white")
#text(names(cancer_var),x=cancer_var,y=-log10(1e-16+q_value),col=1+as.numeric(names(cancer_var)%in%c("SKCM","LUAD","LUSC")),
# font=1+as.numeric(names(cancer_var)%in%c("SKCM","LUAD","LUSC")))
#text(paste("rho=",round(cor(cancer_var,-log10(q_value+1e-16),method="spearman"),2),"; p=",signif(cor.test(cancer_var,-log10(q_value+1e-16),method="spearman")$p.value,2)),x=0.4,y=14)
###############
# png(file.path(github_repo_dir,"output/figures/figure_7f.png"),
# units="in",height=4,width=4,res=300,bg="transparent")
# par(mgp=c(2,1,0))
# plot(cor_value,-log10(q_value+1e-16),xlab=expression(rho),xlim=c(-.5,.5),ylab=expression(paste('-log10(p'['adj'],")",sep="")),col=alpha(1,0.5),pch=16)
#
# abline(lty=2,col="red",h=2)
# text(expression(paste(alpha,"=0.05")),x=-0.4,y=4,col="red")
# mtext("TCGA tumor types",cex=1.5)
# dev.off()
png(file.path(github_repo_dir,"output/figures/figure_7f.png"),
units="in",height=4,width=4,res=300,bg="transparent")
par(mgp=c(2,1,0))
plot(cor_value,-log10(q_value+1e-16),xlab=expression(rho),xlim=c(-.5,.5),ylab=expression(paste('-log10(p'['adj'],")",sep="")),col=alpha(1,0.5),pch=16)
abline(lty=2,col="red",h=2)
text(expression(paste(alpha,"=0.05")),x=-0.4,y=4,col="red")
#text(x=0.7,y=-2.5,paste("r=",signif(res$estimate,2),"; p=",signif(res$p.value,2),sep=""))
mtext("TCGA tumor types",cex=1.5)
text(cor_value,-log10(q_value+1e-16),names(output_list))
dev.off()
#############
residual_list <- lapply(output_list,function(x){
mask <-
lm(x[,3]~log10(1+x[,"n_muts"]))$residuals
})
#sig_types <- names(q_value)[q_value < 0.05]
sig_types <- names(output_list)
#t_stat_mat <- matrix(NA,nrow=4,ncol=length(sig_types),dimnames=list(c("TP53","KRAS","EGFR","STK11"),sig_types))
t_stat_mat <- matrix(NA,nrow=2,ncol=length(sig_types),dimnames=list(c("TP53","KRAS"),sig_types))
t_pval_mat <- t_stat_mat
frac_mat <- t_stat_mat
count_mat <- t_stat_mat
for(col in colnames(t_stat_mat)){
for(row in rownames(t_stat_mat)){
t <- NA
t <- try(t.test(residual_list[[col]]~output_list[[col]][names(residual_list[[col]]),row]==0))
if(!is.null(names(t))){
t_stat_mat[row,col] <- t.test(residual_list[[col]]~output_list[[col]][names(residual_list[[col]]),row]==0)$statistic
t_pval_mat[row,col] <- t.test(residual_list[[col]]~output_list[[col]][names(residual_list[[col]]),row]==0)$p.value
frac_mat[row,col] <- sum(output_list[[col]][,row],na.rm=T)/sum(!is.na(output_list[[col]][,"n_muts"]))
count_mat[row,col] <- sum(output_list[[col]][,row],na.rm=T)
if(count_mat[row,col] < 10){
t_stat_mat[row,col] <- NA
t_pval_mat[row,col] <- NA
}
}
}
}
count_mat[is.na(count_mat)] <- 0
t_stat_mat <- t_stat_mat[,setdiff(colnames(t_stat_mat),"LUAD")]
t_pval_mat <- t_pval_mat[,setdiff(colnames(t_pval_mat),"LUAD")]
thorrsen <- read.csv(file.path(github_repo_dir,"input_tables/Thorsson_et_al_TCGA_tumor_immunology_metrics.csv"),
r=1,h=1,stringsAsFactors = F)
cta_rho <- array(NA,ncol(t_stat_mat),dimnames=list(colnames(t_stat_mat)))
cta_cor_pval <- cta_rho
for(type in names(cta_rho)){
cor_res <- try(cor.test(residual_list[[type]],thorrsen[substr(names(residual_list[[type]]),1,12),]$CTA.Score,na.rm=T,method="spearman"))
cta_rho[type] <- try(cor_res$estimate)
cta_cor_pval[type] <- try(cor_res$p.value)
}
res_mat <- rbind(t_stat_mat,as.numeric(cta_rho))
pmat <- rbind(t_pval_mat,as.numeric(cta_cor_pval))
res_vec <- c(pmat[1,],pmat[2,],pmat[3,])
names(res_vec) <- paste(names(res_vec),rep(c("TP53","KRAS","CTA"),each=ncol(pmat)),sep=".")
res_vec <- res_vec[!is.na(res_vec)]
write.csv(cbind(res_vec,p.adjust(res_vec,method="bonf")),file.path(github_repo_dir,"output/statistics/figure_s7cd.csv"))
#
# png(file.path(github_repo_dir,"output/figures/figure_s7c.png"),
# units="in",height=4,width=15,res=300,bg="transparent",pointsize=16)
# par(mfrow=c(1,3))
# par(mgp=c(2,1,0))
#
# plot(res_mat[1,],-log10(1e-4+pmat[1,]*sum(!is.na(res_mat))),ylim=c(-2,4),pch=16,col=alpha(1,.6),xlim=c(-10,10),xlab="t-statistic",
# ylab=expression(paste('-log10(p'['adj'],")",sep="")))
# abline(h=-log10(1e-4+0.05),col="red",lty=2)
# text(expression(paste(alpha,"=0.05")),x=-8,y=2,col="red")
# mtext("TP53",cex=1.5)
#
#
# plot(res_mat[2,],-log10(1e-4+pmat[2,]*sum(!is.na(res_mat))),ylim=c(-2,4),pch=16,col=alpha(1,.6),xlim=c(-10,10),xlab="t-statistic",
# ylab=expression(paste('-log10(p'['adj'],")",sep="")))
# abline(h=-log10(1e-4+0.05),col="red",lty=2)
# text(expression(paste(alpha,"=0.05")),x=-8,y=2,col="red")
# mtext("KRAS",cex=1.5)
# plot(res_mat[3,],-log10(1e-4+pmat[3,]*sum(!is.na(res_mat))),ylim=c(-2,4),pch=16,col=alpha(1,.6),xlim=c(-.4,.4),xlab=expression(rho),
# ylab=expression(paste('-log10(p'['adj'],")",sep="")))
# abline(h=-log10(1e-4+0.05),col="red",lty=2)
# text(expression(paste(alpha,"=0.05")),x=-0.3,y=2,col="red")
# mtext("CTA score",cex=1.5)
#
# dev.off()
png(file.path(github_repo_dir,"output/figures/figure_s7cd.png"),
units="in",height=4,width=15,res=300,bg="transparent",pointsize=16)
par(mfrow=c(1,3))
par(mgp=c(2,1,0))
plot(res_mat[1,],-log10(1e-4+pmat[1,]*sum(!is.na(res_mat))),ylim=c(-2,4),pch=16,col=alpha(1,.6),xlim=c(-10,10),xlab="t-statistic",
ylab=expression(paste('-log10(p'['adj'],")",sep="")))
abline(h=-log10(1e-4+0.05),col="red",lty=2)
text(expression(paste(alpha,"=0.05")),x=-8,y=2,col="red")
mtext("TP53",cex=1.5)
text(colnames(res_mat),x=res_mat[1,],y=-log10(1e-4+pmat[1,]*sum(!is.na(res_mat))))
plot(res_mat[2,],-log10(1e-4+pmat[2,]*sum(!is.na(res_mat))),ylim=c(-2,4),pch=16,col=alpha(1,.6),xlim=c(-10,10),xlab="t-statistic",
ylab=expression(paste('-log10(p'['adj'],")",sep="")))
abline(h=-log10(1e-4+0.05),col="red",lty=2)
text(expression(paste(alpha,"=0.05")),x=-8,y=2,col="red")
mtext("KRAS",cex=1.5)
plot(res_mat[3,],-log10(1e-4+pmat[3,]*sum(!is.na(res_mat))),ylim=c(-2,4),pch=16,col=alpha(1,.6),xlim=c(-.4,.4),xlab=expression(rho),
ylab=expression(paste('-log10(p'['adj'],")",sep="")))
abline(h=-log10(1e-4+0.05),col="red",lty=2)
text(expression(paste(alpha,"=0.05")),x=-0.3,y=2,col="red")
mtext("CTA score",cex=1.5)
text(colnames(res_mat),x=res_mat[3,],y=-log10(1e-4+pmat[3,]*sum(!is.na(res_mat))))
dev.off()
#
# #text(x=0.7,y=-2.5,paste("r=",signif(res$estimate,2),"; p=",signif(res$p.value,2),sep=""))
# mtext("TCGA tumor types",cex=1.5)
# text(cor_value,-log10(q_value+1e-16),names(output_list))
#
# dev.off()
#
# par(mfrow=c(2,2))
# for(row in rownames(t_stat_mat)[1:2]){
# frac <- round(unlist(lapply(output_list[sig_types],function(x){sum(x[,row]/sum(!is.na(x[,"n_muts"])),na.rm=T)})),2)
# labs <- paste(names(frac),frac,"-")
# labs <- names(frac)
# # plot(t_stat_mat[row,],-log10(1e-4+t_pval_mat[row,]*sum(!is.na(t_pval_mat))),pch="",main=row,ylim=c(-2,4),xlim=c(-10,10),
# # ylab="-log10(q-value)",xlab="t_statistic")
# x <- t_stat_mat[row,]
# y <- -log10(1e-4+t_pval_mat[row,]*sum(!is.na(t_stat_mat)))
# plot(x,y,pch="",main=row,ylim=c(-2,4),xlim=c(-10,10),
# ylab="-log10(q-value)",xlab="t_statistic")
# text(x=x,y=y,names(x),xpd=NA)
# abline(h=-log10(1e-4+0.05),col="red")
#
# }
#
thorrsen <- read.csv(file.path(github_repo_dir,"input_tables/Thorsson_et_al_TCGA_tumor_immunology_metrics.csv"),
r=1,h=1,stringsAsFactors = F)
cta_rho <- array(NA,ncol(t_stat_mat),dimnames=list(colnames(t_stat_mat)))
cta_cor_pval <- cta_rho
for(type in names(cta_rho)){
cor_res <- try(cor.test(residual_list[[type]],thorrsen[substr(names(residual_list[[type]]),1,12),]$CTA.Score,na.rm=T,method="spearman"))
cta_rho[type] <- try(cor_res$estimate)
cta_cor_pval[type] <- try(cor_res$p.value)
}
x <- as.numeric(cta_rho)
y <- -log10(as.numeric(cta_cor_pval)*15)
plot(x,y,pch="",xlab="Rho(residuals, CTA score)",ylab="-log10(padj)")
text(x=x,y=y,label=names(cta_rho),xpd=NA)
#
# genes <- strsplit("IGHG3,IGHG4,IGHG1,MZB1,FAM92B,SPAG4,DERL3,JSRP1,TBCEL,LINC01485,SLC2A5,SPP1,CCL7,HAMP,CXCL13,ZBED2,GNG4,KRT86,TNFRSF9,LAYN,GZMB,KIR2DL4,GAPT,CTB-133G6.1,AKR1C3,RSPO3,MCEMP1,RND3,HP,FOLR3,GPD1,GS1-600G8.5,PCOLCE2,CAMP,CCL17,CD1B,CD1C,CD1E,PLD4,TRPC6,CLEC10A,FCER1A,PLA1A,CFP,CEACAM8,AZU1,PKP2,RETN,LYZ,ELANE,ATP6AP1L,RP6-159A1.4,THBS1,CFD,P2RY14,DNASE1L3,PTGER3,CST3,BATF3,CPVL,RGS18,SERPINF2,LGALS2",",")[[1]]
#
#
# figure_path <- "/users/andrew leader/google drive/merad/scRNAseq_analysis/results/AL/lung_main/output/figures/revisions"
# dir.create(figure_path)
# dir.create(file.path(figure_path,"TCGA"))
#
# for(iter in subtype_ord){
#
# #plot heatmap
# project <- names(output_list)[iter]
# load(file.path(working_TCGA_data_dir,paste(project,"_exprs.rd",sep="")))
# patient_ord <- rev(rownames(output_list[[project]])[order(output_list[[project]][,3])])
#
# mat <- expr_mat[genes,patient_ord]
#
# mat <- t(t(mat)/rowSums(t(mat)))
# mat <- log10(1e-6+mat)
# mat <- t(scale(t(mat)))
# mat[mat > 2] <- 2
# mat[mat < -2] <- -2
# pdf(file.path(figure_path,"TCGA",paste(project,"_heatmap.pdf",sep="")),height=3,width=5,pointsize=6)
# pimage(t(mat),axes="x")
# dev.off()
#
# #plot correlation by group
# if(colnames(output_list[[project]])[4]=="ESTIMATE_Immune"){
# ep_group <- cut(output_list[[project]][,4],quantile(output_list[[project]][,4],seq(0,1,.1),na.rm=T))
# tumor_split <- split(rownames(output_list[[project]]),ep_group)
#
# split_cor <- lapply(tumor_split,function(x){
# return(cor.test(output_list[[project]][x,1],output_list[[project]][x,2],method="spearman"))})
# #split_cor <- unlist(split_cor)
#
# cor_list <- unlist(lapply(split_cor,function(x){x$estimate}))
# lower_bound <- unlist(lapply(split_cor,function(x){x$conf.int[[1]]}))
# upper_bound <- unlist(lapply(split_cor,function(x){x$conf.int[[2]]}))
#
#
# #with spearman correlation:
#
# sig <- 1.96/sqrt(unlist(lapply(tumor_split,length))-3)
# lower_bound <- tanh(atanh(unlist(lapply(split_cor,function(x){x$estimate})))-sig)
# upper_bound <- tanh(atanh(unlist(lapply(split_cor,function(x){x$estimate})))+sig)
#
#
# png(file.path(figure_path,"TCGA",paste(project,"_quantile_cor.png",sep="")),height=1.4,width=1.7,units="in",res=300,pointsize=6,bg="transparent")
# par(mgp=c(2,1,0))
# plot(1:10,cor_list,cex=0,ylim=c(-1,1)*max(abs(c(lower_bound,upper_bound))),xlim=c(0.5,10.5),xaxt="n",xlab="",
# ylab="Spearman rho\n(LCAMhi score, LCAMlo score)")
# segments(x0=1:10-0.15,x1=1:10+0.15,y0=cor_list,y1=cor_list,lwd=3)
# segments(x0=1:10,x1=1:10,y0=lower_bound,y1=upper_bound)
# abline(h=0,col="red",lty=2)
# mtext(side=1,at=1:10,1:10)
# mtext(side=1,line=2,"Decile of immune infiltrate")
# mtext("LCAM-hi/lo summary scores")
# dev.off()
# }
# png(file.path(figure_path,"TCGA",paste(project,"_mut_cor.png",sep="")))
# plot(log10(1+output_list[[project]][,"n_muts"]),output_list[[project]][,3])
# dev.off()
# }
#
#
#
# cor_value <- unlist(lapply(output_list,function(x){cor.test(x[,"difference"],x[,"n_muts"],method="spearman",use="p")$estimate}))
# q_value <- unlist(lapply(output_list,function(x){cor.test(x[,"difference"],x[,"n_muts"],method="spearman",use="p")$p.value}))*length(output_list)
#
# plot(cor_value,-log10(q_value+1e-10),xlab="rho",xlim=c(-.5,.5),ylab="-log10[bonf. adj. p]",pch="")
# text(cor_value,-log10(q_value+1e-10),names(output_list),col=1+as.numeric(tcga_abbreviations$type=="A"))
# abline(lty=2,col="red",h=2)
# #sorting by the above correlation value, draw heatmap, correlation by ep_decile, and logTMB vs. LCAM.
#
# cor_value <- unlist(lapply(output_list,function(x){cor.test(x[,"LCAMhi"],x[,"n_muts"],method="spearman",use="p")$estimate}))
# q_value <- unlist(lapply(output_list,function(x){cor.test(x[,"LCAMhi"],x[,"n_muts"],method="spearman",use="p")$p.value}))*length(output_list)
#
# plot(cor_value,-log10(q_value+1e-10),xlab="rho",xlim=c(-.5,.5),ylab="-log10[bonf. adj. p]",pch="")
# text(cor_value,-log10(q_value+1e-10),names(output_list),col=1+as.numeric(tcga_abbreviations$type=="A"))
# abline(lty=2,col="red",h=2)
#
#
# cor_value <- unlist(lapply(output_list,function(x){cor.test(x[,"LCAMlo"],x[,"n_muts"],method="spearman",use="p")$estimate}))
# q_value <- unlist(lapply(output_list,function(x){cor.test(x[,"LCAMlo"],x[,"n_muts"],method="spearman",use="p")$p.value}))*length(output_list)
#
# plot(cor_value,-log10(q_value+1e-10),xlab="rho",xlim=c(-.5,.5),ylab="-log10[bonf. adj. p]",pch="")
# text(cor_value,-log10(q_value+1e-10),names(output_list),col=1+as.numeric(tcga_abbreviations$type=="A"))
# abline(lty=2,col="red",h=2)
#
# tcga_abbreviations$type <- "NA"
# adenos <- c("ACC","BRCA","COAD","LUAD","OV","PAAD","PRAD","READ","STAD","UCEC")
# tcga_abbreviations$type[tcga_abbreviations$V1%in%adenos] <- "A"
#
# plot(cor_value,-log10(q_value+1e-10),xlab="rho",xlim=c(-.5,.5),ylab="-log10[bonf. adj. p]",pch="",col=1+as.numeric(tcga_abbreviations$type=="A"))
# text(cor_value,-log10(q_value+1e-10),names(output_list),col=1+as.numeric(tcga_abbreviations$type=="A"))
library(scales)
#plot(log10(1+output_list$LUAD[,"n_muts"]),output_list$LUAD[,3],pch=16,col=alpha(1,.4),xlab="LogTMB",ylab="LCAMhi-LCAMlo")
#points(log10(1+output_list$LUSC[,"n_muts"]),output_list$LUSC[,3],col=alpha("red",.4),pch=16)
png(file.path(github_repo_dir,"output/figures/figure_7e.png"),height=4,width=4,units="in",res=300,bg="transparent")
par(mgp=c(2,1,0))
plot(log10((1+output_list$LUAD[,"n_muts"])/48.2),output_list$LUAD[,3],pch=16,col=alpha(1,.4),
xlab=expression(paste("Log"["10"],"[TMB/Mb]",sep="")),
ylab="LCAM score",cex=0.5)
points(log10((1+output_list$LUSC[,"n_muts"])/48.2),output_list$LUSC[,3],col=alpha("red",.4),pch=16,cex=0.5)
dev.off()
|
/scripts/figure_7ef_s7cd.R
|
no_license
|
Sudolin/Leader_et_al
|
R
| false | false | 25,256 |
r
|
##############
# Download all the TCGA data
###############
# Downloads and compiles TCGA gene expression, mutation, and clinical data
library(TCGAbiolinks)
library(SummarizedExperiment)
library(GenomicDataCommons)
library(data.table)
library(R.utils)
library(scales)
#### USER MUST SET THEIR OWN DOWNLOAD DIRECTORY
## Note: In windows, if this path is too long, it will cause an error for GDCdownload()
working_TCGA_data_dir <- "/data/TCGA/"
source("scripts/get_LCAM_scores.R")
## functions:
TCGAtranslateID = function(file_ids, legacy = FALSE) {
info = files(legacy = legacy) %>%
filter( ~ file_id %in% file_ids) %>%
select('cases.samples.submitter_id') %>%
results_all()
# The mess of code below is to extract TCGA barcodes
# id_list will contain a list (one item for each file_id)
# of TCGA barcodes of the form 'TCGA-XX-YYYY-ZZZ'
id_list = lapply(info$cases,function(a) {
a[[1]][[1]][[1]]})
# so we can later expand to a data.frame of the right size
barcodes_per_file = sapply(id_list,length)
# And build the data.frame
return(data.frame(file_id = rep(ids(info),barcodes_per_file),
submitter_id = unlist(id_list)))
}
## Start interactive workflow:
tcga_abbreviations <- read.csv("input_tables/TCGA_abbreviations.csv",stringsAsFactors=F,h=F,fileEncoding="UTF-8-BOM")
tcga_abbreviations[tcga_abbreviations=="Brain Lower Grade Glioma"] <- "Low Grade Glioma"
tcga_abbreviations[tcga_abbreviations=="Breast invasive carcinoma"] <- "Breast Cancer"
tcga_abbreviations[tcga_abbreviations=="Colon adenocarcinoma"] <- "colorectal adenocarcinoma"
tcga_abbreviations[tcga_abbreviations=="Kidney Chromophobe"] <- "kidney chromophobe renal cell carcinoma"
tcga_abbreviations[tcga_abbreviations=="Pancreatic adenocarcinoma"] <- "Pancreatic ductal adenocarcinoma"
tcga_abbreviations[tcga_abbreviations=="Pheochromocytoma and Paraganglioma"] <- "Paraganglioma and Pheochromocytoma"
tcga_abbreviations[tcga_abbreviations=="Rectum adenocarcinoma"] <- "colorectal adenocarcinoma"
tcga_abbreviations[tcga_abbreviations=="Cervical squamous cell carcinoma and endocervical adenocarcinoma"] <- "Cervical Carcinoma"
tcga_abbreviations[tcga_abbreviations=="Thyroid carcinoma"] <- "Thyroid papillary carcinoma"
exclude_projects <- c("Controls","FFPE Pilot Phase II",
"Miscellaneous","Chronic Myelogenous Leukemia")
tcga_abbreviations <- tcga_abbreviations[!tcga_abbreviations[,2]%in%exclude_projects,]
project_IDs <- tcga_abbreviations[,1]
#project_IDs <- project_IDs[1:3]
if(!dir.exists(working_TCGA_data_dir)){
dir.create(working_TCGA_data_dir)
}
github_repo_dir <- getwd()
setwd(working_TCGA_data_dir)
# a translation table from ensemble IDs to gene symbols. Taken from 10X workflow so that they are as consistent as possible.
gene_ids <- read.table(file.path(github_repo_dir,"input_tables/ensemble_ids.tsv"),sep="\t",r=1,h=0,stringsAsFactors = F)
# THESE LOOPS DOWNLOADS ALL THE TCGA DATA
# YOU MAY NEED TO RUN IT MULTIPLE TIMES FOR THE DATA TO COMPLETE
for(project in project_IDs){
if(!file.exists(file.path(working_TCGA_data_dir,paste(project,"_exprs.rd",sep="")))){
#get transcriptome data
q <- GDCquery(project = paste("TCGA",project,sep="-"),
data.category = "Transcriptome Profiling",
data.type="Gene Expression Quantification",
workflow.type = "HTSeq - FPKM",experimental.strategy = "RNA-Seq",
legacy = F)
tmp <- try({GDCdownload(q,directory = working_TCGA_data_dir,method="api",files.per.chunk = 10)})
try(GDCdownload(q,directory = working_TCGA_data_dir,method="api",files.per.chunk = 10))
GDCdownload(q,directory = working_TCGA_data_dir,method="api",files.per.chunk = 10)
uuids <- list.dirs(file.path(working_TCGA_data_dir,paste("TCGA-",project,sep=""),"harmonized/Transcriptome_Profiling/Gene_Expression_Quantification/"))
uuids <- unlist(lapply(strsplit(uuids,"/"),function(x){tail(x,1)}))
uuids <- setdiff(uuids,"Gene_Expression_Quantification")
submitter_ids <- TCGAtranslateID(uuids)
uuids <- list.dirs(file.path(working_TCGA_data_dir,paste("TCGA-",project,sep=""),"harmonized/Transcriptome_Profiling/Gene_Expression_Quantification/"))
uuids <- uuids[-1]
fn <- uuids[1]
fn <- file.path(fn,list.files(fn))
expr_vec <- read.table(gzfile(fn),sep="\t",stringsAsFactors = F)
expr_mat <- matrix(NA,nrow=nrow(expr_vec),ncol=nrow(submitter_ids),
dimnames=list(expr_vec$V1,submitter_ids$file_id))
message("reading files")
for(fns in uuids){
uuid <- strsplit(fns,"/")
uuid <- tail(uuid[[1]],1)
fn <- file.path(fns,list.files(fns))
expr_vec <- read.table(gzfile(fn),sep="\t",stringsAsFactors = F)
expr_mat[,uuid] <- expr_vec$V2
}
colnames(expr_mat) <- submitter_ids[match(colnames(expr_mat),submitter_ids$file_id),]$submitter_id
rownames(expr_mat) <- unlist(lapply(strsplit(rownames(expr_mat),"\\."),function(x){x[1]}))
expr_mat <- expr_mat[intersect(rownames(expr_mat),gene_ids$V2),]
rownames(expr_mat) <- rownames(gene_ids)[match(rownames(expr_mat),gene_ids$V2)]
message("saving expr")
save(expr_mat,file=paste(project,"exprs.rd",sep="_"))
message("removing read files")
unlink(file.path(working_TCGA_data_dir,paste("TCGA-",project,sep="")),recursive=T)
}
}
#patient_barcodes <- TCGAtranslateID(uuids)
# get MAF files
for(project in project_IDs){
if(!dir.exists(file.path(working_TCGA_data_dir,"GDCdata",paste("TCGA-",project,sep="")))){
try(GDCquery_Maf(tumor=project,pipelines="mutect2"))
GDCquery_Maf(tumor=project,pipelines="mutect2")
}
}
# get clinical data
for(project in project_IDs){
clin <- GDCquery_clinic(project=paste("TCGA",project,sep="-"),type="clinical",save.csv=TRUE)
}
###############
#Download all the ep_scores for the TCGA data
not_in_ESTIMATE <- c("Cholangiocarcinoma",
"Lymphoid Neoplasm Diffuse Large B-cell Lymphoma",
"Mesothelioma",
"Sarcoma",
"Testicular Germ Cell Tumors",
"Thymoma",
"Uveal Melanoma")
ESTIMATE_IDs <- tcga_abbreviations[!tcga_abbreviations[,2]%in%not_in_ESTIMATE,1]
for(project_ID in ESTIMATE_IDs){
project_name <- tcga_abbreviations[match(project_ID,tcga_abbreviations[,1]),2]
url <- paste("https://bioinformatics.mdanderson.org/estimate/tables/",
tolower(gsub(" ","_",project_name)),"_RNAseqV2.txt",sep="")
destfile <- file.path(working_TCGA_data_dir,paste("estimate_",gsub(" ","_",project_name),".txt",sep=""))
if(!file.exists(destfile)){
download.file(url,destfile = file.path(working_TCGA_data_dir,paste("estimate_",gsub(" ","_",project_name),".txt",sep="")))
}
}
# Compute LCAM scores and n_mut
output_list <- list()
for(project_ID in project_IDs){
message(project_ID)
expr_fn <- paste(project_ID,"exprs.rd",sep="_")
load(expr_fn)
cols <- unique(colnames(expr_mat)[substr(colnames(expr_mat),14,15)%in%c("01","03") &
substr(colnames(expr_mat),16,16)%in%c("A","-")])
output_list[[project_ID]] <- get_LCAM_scores(expr_mat[,cols])
estimate_fn <- file.path(working_TCGA_data_dir,paste("estimate_",gsub(" ","_",tcga_abbreviations[match(project_ID,tcga_abbreviations[,1]),2]),".txt",sep=""))
if(file.exists(estimate_fn)){
message("reading ESTIMATE scores")
estimate <- read.csv(estimate_fn,stringsAsFactors = F,sep="\t")
output_list[[project_ID]] <- cbind(output_list[[project_ID]],
estimate[match(substr(rownames(output_list[[project_ID]]),1,15),estimate$ID),"Immune_score"])
colnames(output_list[[project_ID]])[4] <- "ESTIMATE_Immune"
}
message("loading mutation data")
mut_fn <- list.files(file.path(working_TCGA_data_dir,"GDCdata",paste("TCGA-",project_ID,sep="")),r=T)
mut_fn <- file.path(working_TCGA_data_dir,"GDCdata",paste("TCGA-",project_ID,sep=""),mut_fn)
file.copy(from=mut_fn,to=file.path(working_TCGA_data_dir,"staged_mut_file.maf.gz"),overwrite=T)
mut_data <- fread(file.path(working_TCGA_data_dir,"staged_mut_file.maf.gz"))
mut_data <- mut_data[mut_data$Variant_Classification%in%c("Missense_Mutation","Nonsense_Mutation")]
mut_tab <- table(mut_data$Hugo_Symbol,mut_data$Tumor_Sample_Barcode)
colnames(mut_tab) <- substr(colnames(mut_tab),1,15)
n_mut <- colSums(mut_tab)
if(!"n_muts"%in%colnames(output_list)){
output_list[[project_ID]] <- cbind(output_list[[project_ID]],n_mut[substr(rownames(output_list[[project_ID]]),1,15)])
colnames(output_list[[project_ID]])[ncol(output_list[[project_ID]])] <- "n_muts"
}
mut_data <- mut_data[!mut_data$Variant_Classification%in%c("RNA","Silent"),]
mut_tab <- table(mut_data$Hugo_Symbol,mut_data$Tumor_Sample_Barcode)
colnames(mut_tab) <- substr(colnames(mut_tab),1,15)
not_in_tab <- !c("TP53","KRAS","EGFR","STK11")%in%rownames(mut_tab)
driver_mut_tab <- as.matrix(mut_tab[setdiff(c("TP53","KRAS","EGFR","STK11"),c("TP53","KRAS","EGFR","STK11")[not_in_tab]),])
if(sum(!not_in_tab)==1){
driver_mut_tab <- t(driver_mut_tab)
rownames(driver_mut_tab) <- c("TP53","KRAS","EGFR","STK11")[!not_in_tab]
}
driver_mut_tab <- rbind(driver_mut_tab,matrix(0,nrow=sum(not_in_tab),ncol=ncol(driver_mut_tab),
dimnames=list(c("TP53","KRAS","EGFR","STK11")[not_in_tab],NULL)))
no_mut_pats <- setdiff(substr(rownames(output_list[[project_ID]]),1,15),colnames(mut_tab))
driver_mut_tab <- cbind(driver_mut_tab,matrix(NA,nrow=nrow(driver_mut_tab),ncol=length(no_mut_pats),
dimnames=list(c("TP53","KRAS","EGFR","STK11"),no_mut_pats)))
output_list[[project_ID]] <- cbind(output_list[[project_ID]],t(driver_mut_tab)[substr(rownames(output_list[[project_ID]]),1,15),])
colnames(output_list[[project_ID]])[(ncol(output_list[[project_ID]])-3):ncol(output_list[[project_ID]])] <- c("TP53","KRAS","EGFR","STK11")
}
save(output_list,file=file.path(github_repo_dir,"output/statistics/TCGA_results.rd"))
#load("/users/Andrew Leader/Google Drive/merad/scRNAseq_analysis/results/AL/lung_main/output/TCGA_results.rd")
#Make plots
subtype_ord <- order(unlist(lapply(output_list,function(x){cor(x[,"difference"],x[,"n_muts"],method="spearman",use="p")})))
p_value <- unlist(lapply(output_list,function(x){
cor.test(x[,"difference"],x[,"n_muts"],method="spearman",use="p")$p.value}))
q_value <- p.adjust(p_value,method="bonf")
cor_value <- unlist(lapply(output_list,function(x){cor.test(x[,"difference"],x[,"n_muts"],method="spearman",use="p")$estimate}))
write.csv(cbind(p_value,q_value),file.path(github_repo_dir,"output/statistics/figure_7f.csv"))
#cancer_var <- unlist(lapply(output_list,function(x){var(log10(1+x[,"n_muts"]),na.rm=T)}))
#plot(cancer_var,cor_value,pch=16,xlab="Var( Log10[1+#Mut] )",ylab="Rho( Log10[1+#Mut] , LCAM )",
# main="Variance of #mutations vs. detected association with LCAM score",col="white")
#text(names(cancer_var),x=cancer_var,y=cor_value,col=1+as.numeric(names(cancer_var)%in%c("SKCM","LUAD","LUSC")),
# font=1+as.numeric(names(cancer_var)%in%c("SKCM","LUAD","LUSC")))
#text(paste("rho=",round(cor(cancer_var,cor_value,method="spearman"),2),"; p=",signif(cor.test(cancer_var,cor_value,method="spearman")$p.value,2)),x=0.4,y=-0.1)
#plot(cancer_var,-log10(1e-16+q_value),pch=16,xlab="Var( Log10[1+#Mut] )",ylab="-log10(q value) of cor( Log10[1+#Mut] , LCAM )",
# main="Variance of #mutations vs. detected association with LCAM score",col="white")
#text(names(cancer_var),x=cancer_var,y=-log10(1e-16+q_value),col=1+as.numeric(names(cancer_var)%in%c("SKCM","LUAD","LUSC")),
# font=1+as.numeric(names(cancer_var)%in%c("SKCM","LUAD","LUSC")))
#text(paste("rho=",round(cor(cancer_var,-log10(q_value+1e-16),method="spearman"),2),"; p=",signif(cor.test(cancer_var,-log10(q_value+1e-16),method="spearman")$p.value,2)),x=0.4,y=14)
###############
# png(file.path(github_repo_dir,"output/figures/figure_7f.png"),
# units="in",height=4,width=4,res=300,bg="transparent")
# par(mgp=c(2,1,0))
# plot(cor_value,-log10(q_value+1e-16),xlab=expression(rho),xlim=c(-.5,.5),ylab=expression(paste('-log10(p'['adj'],")",sep="")),col=alpha(1,0.5),pch=16)
#
# abline(lty=2,col="red",h=2)
# text(expression(paste(alpha,"=0.05")),x=-0.4,y=4,col="red")
# mtext("TCGA tumor types",cex=1.5)
# dev.off()
png(file.path(github_repo_dir,"output/figures/figure_7f.png"),
units="in",height=4,width=4,res=300,bg="transparent")
par(mgp=c(2,1,0))
plot(cor_value,-log10(q_value+1e-16),xlab=expression(rho),xlim=c(-.5,.5),ylab=expression(paste('-log10(p'['adj'],")",sep="")),col=alpha(1,0.5),pch=16)
abline(lty=2,col="red",h=2)
text(expression(paste(alpha,"=0.05")),x=-0.4,y=4,col="red")
#text(x=0.7,y=-2.5,paste("r=",signif(res$estimate,2),"; p=",signif(res$p.value,2),sep=""))
mtext("TCGA tumor types",cex=1.5)
text(cor_value,-log10(q_value+1e-16),names(output_list))
dev.off()
#############
residual_list <- lapply(output_list,function(x){
mask <-
lm(x[,3]~log10(1+x[,"n_muts"]))$residuals
})
#sig_types <- names(q_value)[q_value < 0.05]
sig_types <- names(output_list)
#t_stat_mat <- matrix(NA,nrow=4,ncol=length(sig_types),dimnames=list(c("TP53","KRAS","EGFR","STK11"),sig_types))
t_stat_mat <- matrix(NA,nrow=2,ncol=length(sig_types),dimnames=list(c("TP53","KRAS"),sig_types))
t_pval_mat <- t_stat_mat
frac_mat <- t_stat_mat
count_mat <- t_stat_mat
for(col in colnames(t_stat_mat)){
for(row in rownames(t_stat_mat)){
t <- NA
t <- try(t.test(residual_list[[col]]~output_list[[col]][names(residual_list[[col]]),row]==0))
if(!is.null(names(t))){
t_stat_mat[row,col] <- t.test(residual_list[[col]]~output_list[[col]][names(residual_list[[col]]),row]==0)$statistic
t_pval_mat[row,col] <- t.test(residual_list[[col]]~output_list[[col]][names(residual_list[[col]]),row]==0)$p.value
frac_mat[row,col] <- sum(output_list[[col]][,row],na.rm=T)/sum(!is.na(output_list[[col]][,"n_muts"]))
count_mat[row,col] <- sum(output_list[[col]][,row],na.rm=T)
if(count_mat[row,col] < 10){
t_stat_mat[row,col] <- NA
t_pval_mat[row,col] <- NA
}
}
}
}
count_mat[is.na(count_mat)] <- 0
t_stat_mat <- t_stat_mat[,setdiff(colnames(t_stat_mat),"LUAD")]
t_pval_mat <- t_pval_mat[,setdiff(colnames(t_pval_mat),"LUAD")]
thorrsen <- read.csv(file.path(github_repo_dir,"input_tables/Thorsson_et_al_TCGA_tumor_immunology_metrics.csv"),
r=1,h=1,stringsAsFactors = F)
cta_rho <- array(NA,ncol(t_stat_mat),dimnames=list(colnames(t_stat_mat)))
cta_cor_pval <- cta_rho
for(type in names(cta_rho)){
cor_res <- try(cor.test(residual_list[[type]],thorrsen[substr(names(residual_list[[type]]),1,12),]$CTA.Score,na.rm=T,method="spearman"))
cta_rho[type] <- try(cor_res$estimate)
cta_cor_pval[type] <- try(cor_res$p.value)
}
res_mat <- rbind(t_stat_mat,as.numeric(cta_rho))
pmat <- rbind(t_pval_mat,as.numeric(cta_cor_pval))
res_vec <- c(pmat[1,],pmat[2,],pmat[3,])
names(res_vec) <- paste(names(res_vec),rep(c("TP53","KRAS","CTA"),each=ncol(pmat)),sep=".")
res_vec <- res_vec[!is.na(res_vec)]
write.csv(cbind(res_vec,p.adjust(res_vec,method="bonf")),file.path(github_repo_dir,"output/statistics/figure_s7cd.csv"))
#
# png(file.path(github_repo_dir,"output/figures/figure_s7c.png"),
# units="in",height=4,width=15,res=300,bg="transparent",pointsize=16)
# par(mfrow=c(1,3))
# par(mgp=c(2,1,0))
#
# plot(res_mat[1,],-log10(1e-4+pmat[1,]*sum(!is.na(res_mat))),ylim=c(-2,4),pch=16,col=alpha(1,.6),xlim=c(-10,10),xlab="t-statistic",
# ylab=expression(paste('-log10(p'['adj'],")",sep="")))
# abline(h=-log10(1e-4+0.05),col="red",lty=2)
# text(expression(paste(alpha,"=0.05")),x=-8,y=2,col="red")
# mtext("TP53",cex=1.5)
#
#
# plot(res_mat[2,],-log10(1e-4+pmat[2,]*sum(!is.na(res_mat))),ylim=c(-2,4),pch=16,col=alpha(1,.6),xlim=c(-10,10),xlab="t-statistic",
# ylab=expression(paste('-log10(p'['adj'],")",sep="")))
# abline(h=-log10(1e-4+0.05),col="red",lty=2)
# text(expression(paste(alpha,"=0.05")),x=-8,y=2,col="red")
# mtext("KRAS",cex=1.5)
# plot(res_mat[3,],-log10(1e-4+pmat[3,]*sum(!is.na(res_mat))),ylim=c(-2,4),pch=16,col=alpha(1,.6),xlim=c(-.4,.4),xlab=expression(rho),
# ylab=expression(paste('-log10(p'['adj'],")",sep="")))
# abline(h=-log10(1e-4+0.05),col="red",lty=2)
# text(expression(paste(alpha,"=0.05")),x=-0.3,y=2,col="red")
# mtext("CTA score",cex=1.5)
#
# dev.off()
png(file.path(github_repo_dir,"output/figures/figure_s7cd.png"),
units="in",height=4,width=15,res=300,bg="transparent",pointsize=16)
par(mfrow=c(1,3))
par(mgp=c(2,1,0))
plot(res_mat[1,],-log10(1e-4+pmat[1,]*sum(!is.na(res_mat))),ylim=c(-2,4),pch=16,col=alpha(1,.6),xlim=c(-10,10),xlab="t-statistic",
ylab=expression(paste('-log10(p'['adj'],")",sep="")))
abline(h=-log10(1e-4+0.05),col="red",lty=2)
text(expression(paste(alpha,"=0.05")),x=-8,y=2,col="red")
mtext("TP53",cex=1.5)
text(colnames(res_mat),x=res_mat[1,],y=-log10(1e-4+pmat[1,]*sum(!is.na(res_mat))))
plot(res_mat[2,],-log10(1e-4+pmat[2,]*sum(!is.na(res_mat))),ylim=c(-2,4),pch=16,col=alpha(1,.6),xlim=c(-10,10),xlab="t-statistic",
ylab=expression(paste('-log10(p'['adj'],")",sep="")))
abline(h=-log10(1e-4+0.05),col="red",lty=2)
text(expression(paste(alpha,"=0.05")),x=-8,y=2,col="red")
mtext("KRAS",cex=1.5)
plot(res_mat[3,],-log10(1e-4+pmat[3,]*sum(!is.na(res_mat))),ylim=c(-2,4),pch=16,col=alpha(1,.6),xlim=c(-.4,.4),xlab=expression(rho),
ylab=expression(paste('-log10(p'['adj'],")",sep="")))
abline(h=-log10(1e-4+0.05),col="red",lty=2)
text(expression(paste(alpha,"=0.05")),x=-0.3,y=2,col="red")
mtext("CTA score",cex=1.5)
text(colnames(res_mat),x=res_mat[3,],y=-log10(1e-4+pmat[3,]*sum(!is.na(res_mat))))
dev.off()
#
# #text(x=0.7,y=-2.5,paste("r=",signif(res$estimate,2),"; p=",signif(res$p.value,2),sep=""))
# mtext("TCGA tumor types",cex=1.5)
# text(cor_value,-log10(q_value+1e-16),names(output_list))
#
# dev.off()
#
# par(mfrow=c(2,2))
# for(row in rownames(t_stat_mat)[1:2]){
# frac <- round(unlist(lapply(output_list[sig_types],function(x){sum(x[,row]/sum(!is.na(x[,"n_muts"])),na.rm=T)})),2)
# labs <- paste(names(frac),frac,"-")
# labs <- names(frac)
# # plot(t_stat_mat[row,],-log10(1e-4+t_pval_mat[row,]*sum(!is.na(t_pval_mat))),pch="",main=row,ylim=c(-2,4),xlim=c(-10,10),
# # ylab="-log10(q-value)",xlab="t_statistic")
# x <- t_stat_mat[row,]
# y <- -log10(1e-4+t_pval_mat[row,]*sum(!is.na(t_stat_mat)))
# plot(x,y,pch="",main=row,ylim=c(-2,4),xlim=c(-10,10),
# ylab="-log10(q-value)",xlab="t_statistic")
# text(x=x,y=y,names(x),xpd=NA)
# abline(h=-log10(1e-4+0.05),col="red")
#
# }
#
thorrsen <- read.csv(file.path(github_repo_dir,"input_tables/Thorsson_et_al_TCGA_tumor_immunology_metrics.csv"),
r=1,h=1,stringsAsFactors = F)
cta_rho <- array(NA,ncol(t_stat_mat),dimnames=list(colnames(t_stat_mat)))
cta_cor_pval <- cta_rho
for(type in names(cta_rho)){
cor_res <- try(cor.test(residual_list[[type]],thorrsen[substr(names(residual_list[[type]]),1,12),]$CTA.Score,na.rm=T,method="spearman"))
cta_rho[type] <- try(cor_res$estimate)
cta_cor_pval[type] <- try(cor_res$p.value)
}
x <- as.numeric(cta_rho)
y <- -log10(as.numeric(cta_cor_pval)*15)
plot(x,y,pch="",xlab="Rho(residuals, CTA score)",ylab="-log10(padj)")
text(x=x,y=y,label=names(cta_rho),xpd=NA)
#
# genes <- strsplit("IGHG3,IGHG4,IGHG1,MZB1,FAM92B,SPAG4,DERL3,JSRP1,TBCEL,LINC01485,SLC2A5,SPP1,CCL7,HAMP,CXCL13,ZBED2,GNG4,KRT86,TNFRSF9,LAYN,GZMB,KIR2DL4,GAPT,CTB-133G6.1,AKR1C3,RSPO3,MCEMP1,RND3,HP,FOLR3,GPD1,GS1-600G8.5,PCOLCE2,CAMP,CCL17,CD1B,CD1C,CD1E,PLD4,TRPC6,CLEC10A,FCER1A,PLA1A,CFP,CEACAM8,AZU1,PKP2,RETN,LYZ,ELANE,ATP6AP1L,RP6-159A1.4,THBS1,CFD,P2RY14,DNASE1L3,PTGER3,CST3,BATF3,CPVL,RGS18,SERPINF2,LGALS2",",")[[1]]
#
#
# figure_path <- "/users/andrew leader/google drive/merad/scRNAseq_analysis/results/AL/lung_main/output/figures/revisions"
# dir.create(figure_path)
# dir.create(file.path(figure_path,"TCGA"))
#
# for(iter in subtype_ord){
#
# #plot heatmap
# project <- names(output_list)[iter]
# load(file.path(working_TCGA_data_dir,paste(project,"_exprs.rd",sep="")))
# patient_ord <- rev(rownames(output_list[[project]])[order(output_list[[project]][,3])])
#
# mat <- expr_mat[genes,patient_ord]
#
# mat <- t(t(mat)/rowSums(t(mat)))
# mat <- log10(1e-6+mat)
# mat <- t(scale(t(mat)))
# mat[mat > 2] <- 2
# mat[mat < -2] <- -2
# pdf(file.path(figure_path,"TCGA",paste(project,"_heatmap.pdf",sep="")),height=3,width=5,pointsize=6)
# pimage(t(mat),axes="x")
# dev.off()
#
# #plot correlation by group
# if(colnames(output_list[[project]])[4]=="ESTIMATE_Immune"){
# ep_group <- cut(output_list[[project]][,4],quantile(output_list[[project]][,4],seq(0,1,.1),na.rm=T))
# tumor_split <- split(rownames(output_list[[project]]),ep_group)
#
# split_cor <- lapply(tumor_split,function(x){
# return(cor.test(output_list[[project]][x,1],output_list[[project]][x,2],method="spearman"))})
# #split_cor <- unlist(split_cor)
#
# cor_list <- unlist(lapply(split_cor,function(x){x$estimate}))
# lower_bound <- unlist(lapply(split_cor,function(x){x$conf.int[[1]]}))
# upper_bound <- unlist(lapply(split_cor,function(x){x$conf.int[[2]]}))
#
#
# #with spearman correlation:
#
# sig <- 1.96/sqrt(unlist(lapply(tumor_split,length))-3)
# lower_bound <- tanh(atanh(unlist(lapply(split_cor,function(x){x$estimate})))-sig)
# upper_bound <- tanh(atanh(unlist(lapply(split_cor,function(x){x$estimate})))+sig)
#
#
# png(file.path(figure_path,"TCGA",paste(project,"_quantile_cor.png",sep="")),height=1.4,width=1.7,units="in",res=300,pointsize=6,bg="transparent")
# par(mgp=c(2,1,0))
# plot(1:10,cor_list,cex=0,ylim=c(-1,1)*max(abs(c(lower_bound,upper_bound))),xlim=c(0.5,10.5),xaxt="n",xlab="",
# ylab="Spearman rho\n(LCAMhi score, LCAMlo score)")
# segments(x0=1:10-0.15,x1=1:10+0.15,y0=cor_list,y1=cor_list,lwd=3)
# segments(x0=1:10,x1=1:10,y0=lower_bound,y1=upper_bound)
# abline(h=0,col="red",lty=2)
# mtext(side=1,at=1:10,1:10)
# mtext(side=1,line=2,"Decile of immune infiltrate")
# mtext("LCAM-hi/lo summary scores")
# dev.off()
# }
# png(file.path(figure_path,"TCGA",paste(project,"_mut_cor.png",sep="")))
# plot(log10(1+output_list[[project]][,"n_muts"]),output_list[[project]][,3])
# dev.off()
# }
#
#
#
# cor_value <- unlist(lapply(output_list,function(x){cor.test(x[,"difference"],x[,"n_muts"],method="spearman",use="p")$estimate}))
# q_value <- unlist(lapply(output_list,function(x){cor.test(x[,"difference"],x[,"n_muts"],method="spearman",use="p")$p.value}))*length(output_list)
#
# plot(cor_value,-log10(q_value+1e-10),xlab="rho",xlim=c(-.5,.5),ylab="-log10[bonf. adj. p]",pch="")
# text(cor_value,-log10(q_value+1e-10),names(output_list),col=1+as.numeric(tcga_abbreviations$type=="A"))
# abline(lty=2,col="red",h=2)
# #sorting by the above correlation value, draw heatmap, correlation by ep_decile, and logTMB vs. LCAM.
#
# cor_value <- unlist(lapply(output_list,function(x){cor.test(x[,"LCAMhi"],x[,"n_muts"],method="spearman",use="p")$estimate}))
# q_value <- unlist(lapply(output_list,function(x){cor.test(x[,"LCAMhi"],x[,"n_muts"],method="spearman",use="p")$p.value}))*length(output_list)
#
# plot(cor_value,-log10(q_value+1e-10),xlab="rho",xlim=c(-.5,.5),ylab="-log10[bonf. adj. p]",pch="")
# text(cor_value,-log10(q_value+1e-10),names(output_list),col=1+as.numeric(tcga_abbreviations$type=="A"))
# abline(lty=2,col="red",h=2)
#
#
# cor_value <- unlist(lapply(output_list,function(x){cor.test(x[,"LCAMlo"],x[,"n_muts"],method="spearman",use="p")$estimate}))
# q_value <- unlist(lapply(output_list,function(x){cor.test(x[,"LCAMlo"],x[,"n_muts"],method="spearman",use="p")$p.value}))*length(output_list)
#
# plot(cor_value,-log10(q_value+1e-10),xlab="rho",xlim=c(-.5,.5),ylab="-log10[bonf. adj. p]",pch="")
# text(cor_value,-log10(q_value+1e-10),names(output_list),col=1+as.numeric(tcga_abbreviations$type=="A"))
# abline(lty=2,col="red",h=2)
#
# tcga_abbreviations$type <- "NA"
# adenos <- c("ACC","BRCA","COAD","LUAD","OV","PAAD","PRAD","READ","STAD","UCEC")
# tcga_abbreviations$type[tcga_abbreviations$V1%in%adenos] <- "A"
#
# plot(cor_value,-log10(q_value+1e-10),xlab="rho",xlim=c(-.5,.5),ylab="-log10[bonf. adj. p]",pch="",col=1+as.numeric(tcga_abbreviations$type=="A"))
# text(cor_value,-log10(q_value+1e-10),names(output_list),col=1+as.numeric(tcga_abbreviations$type=="A"))
library(scales)
#plot(log10(1+output_list$LUAD[,"n_muts"]),output_list$LUAD[,3],pch=16,col=alpha(1,.4),xlab="LogTMB",ylab="LCAMhi-LCAMlo")
#points(log10(1+output_list$LUSC[,"n_muts"]),output_list$LUSC[,3],col=alpha("red",.4),pch=16)
png(file.path(github_repo_dir,"output/figures/figure_7e.png"),height=4,width=4,units="in",res=300,bg="transparent")
par(mgp=c(2,1,0))
plot(log10((1+output_list$LUAD[,"n_muts"])/48.2),output_list$LUAD[,3],pch=16,col=alpha(1,.4),
xlab=expression(paste("Log"["10"],"[TMB/Mb]",sep="")),
ylab="LCAM score",cex=0.5)
points(log10((1+output_list$LUSC[,"n_muts"])/48.2),output_list$LUSC[,3],col=alpha("red",.4),pch=16,cex=0.5)
dev.off()
|
rm(list = ls())
#read in data
source("Code/functions.R")
fishes <- read.csv("Data/fishes.csv" )#, na.strings = c("","NA"))
#######Changing continuous variables to nominal
fishes$trophic_bin <- cut(fishes$Trophic_level,breaks = c(-Inf,3,3.5,4,Inf),
labels = c("Low", "Medium", "High", "VHigh"))
fishes$maxDepth_bin <- cut(fishes$max_depth,breaks = c(-Inf, 50,200,500,1000, Inf),
labels = c("Reef", "Shallow", "Ocean", "Deep", "Bathy"))
fishes$CommaxDepth_bin <- cut(fishes$DepthRangeComDeep_F,breaks = c(-Inf, 50,200,500, Inf),
labels = c("Reef", "Shallow", "Ocean", "Deep"))
fishes$maxLength_bin <- cut(fishes$maxLength,breaks = c(-Inf, 15,50,100,250,500, Inf),
labels = c("Tiny","VSmall", "Small", "Medium", "Large", "VLarge"))
###Missing data imputation
library(missMDA) #for imputation
library(missForest) #for creating missing values
#library(compareDF)
library(dplyr)
##Test the accuracy of the imputing method missMDA
##Create a dataset with no missing values
dataSET <- fishes[fishes$Occurance_TBGB=="RAR"|fishes$Occurance_TBGB=="RES",
c("Scientific_name",
"Occurance_TBGB",
"Diet",
"maxDepth_bin",
"Temp_type",
"Vertical_Habitat",
"Horizontal_Habitat",
"Pelvic_finPosition",
#"Caudal_finShape",
#"Dorsal_finShape",
"Swimming_mode",
"Body_form",
"Eye_position",
"Oral_gapePosition",
"Colour",
"maxLength_bin")]
#Check there are no missing values
dataSET <- droplevels(dataSET)
round(apply(dataSET, 2, count_NAs), 2)
#summary(dataSET)
#create a dataset with known missing values
change_output <- NULL
#run a simulation on different missing data - change the proportion of missing data
for(i in seq(0.05,0.3,0.05)){
replicate_missMDA <- replicate(100, simulate_missMDA(dataSET[,2:14],i))
save(replicate_missMDA, file=paste0("output/missMDA/data/missMDA_accuracy_",i,"missing.RData"))
png(filename = paste0("output/missMDA/histograms/missMDA_accuracy_",i,"missing.png"),
height = 10, width = 12, res=300,units = "cm")
hist(replicate_missMDA, main = "", xlab = "Accuracy", cex = 2,
breaks = 7, xlim = c(0,1), ylim = c(0,40))
dev.off()
}
##use the package to impute the data
fish <- fishes[fishes$Occurance_TBGB=="RAR"|fishes$Occurance_TBGB=="RES",
c("group_name",
"Scientific_name",
"CommonName",
"Class",
"Order",
"Family",
#"Reference",
#"IUCN_class",
"Occurance_TBGB",
"Diet",
"trophic_bin",
"CommaxDepth_bin",
"maxDepth_bin",
#"DepthRangeComShallow_F",
#"DepthRangeComDeep_F",
"Temp_type",
"Vertical_Habitat",
"Horizontal_Habitat",
#"Estuarine_use",
"Pelvic_finPosition",
"Caudal_finShape",
#"Dorsal_finShape",
"Swimming_mode",
"Body_form",
"Eye_position",
"Oral_gapePosition",
#"Spine",
#"Colour",
"maxLength_bin",
#"Reproductive_Season",
"Reproductive_strategy",
"Sexual_differentation",
"Migration",
"Parental_care",
"Egg_attach",
"Reproduction_location",
"Shooling_type",
"pop_double")]
res.comp <- imputeMCA(fish[,8:29], ncp = 5, maxiter = 500)
ClusterImp <- res.comp$completeObs
ClusterImp <- cbind(ClusterImp,fish[,1:7])
save(ClusterImp, file = "Data/missing/missMDA_imputed.RData")
|
/2.4 missMDA_analysis.R
|
no_license
|
jimjunker1/FunctionalGroupClassification
|
R
| false | false | 4,093 |
r
|
rm(list = ls())
#read in data
source("Code/functions.R")
fishes <- read.csv("Data/fishes.csv" )#, na.strings = c("","NA"))
#######Changing continuous variables to nominal
fishes$trophic_bin <- cut(fishes$Trophic_level,breaks = c(-Inf,3,3.5,4,Inf),
labels = c("Low", "Medium", "High", "VHigh"))
fishes$maxDepth_bin <- cut(fishes$max_depth,breaks = c(-Inf, 50,200,500,1000, Inf),
labels = c("Reef", "Shallow", "Ocean", "Deep", "Bathy"))
fishes$CommaxDepth_bin <- cut(fishes$DepthRangeComDeep_F,breaks = c(-Inf, 50,200,500, Inf),
labels = c("Reef", "Shallow", "Ocean", "Deep"))
fishes$maxLength_bin <- cut(fishes$maxLength,breaks = c(-Inf, 15,50,100,250,500, Inf),
labels = c("Tiny","VSmall", "Small", "Medium", "Large", "VLarge"))
###Missing data imputation
library(missMDA) #for imputation
library(missForest) #for creating missing values
#library(compareDF)
library(dplyr)
##Test the accuracy of the imputing method missMDA
##Create a dataset with no missing values
dataSET <- fishes[fishes$Occurance_TBGB=="RAR"|fishes$Occurance_TBGB=="RES",
c("Scientific_name",
"Occurance_TBGB",
"Diet",
"maxDepth_bin",
"Temp_type",
"Vertical_Habitat",
"Horizontal_Habitat",
"Pelvic_finPosition",
#"Caudal_finShape",
#"Dorsal_finShape",
"Swimming_mode",
"Body_form",
"Eye_position",
"Oral_gapePosition",
"Colour",
"maxLength_bin")]
#Check there are no missing values
dataSET <- droplevels(dataSET)
round(apply(dataSET, 2, count_NAs), 2)
#summary(dataSET)
#create a dataset with known missing values
change_output <- NULL
#run a simulation on different missing data - change the proportion of missing data
for(i in seq(0.05,0.3,0.05)){
replicate_missMDA <- replicate(100, simulate_missMDA(dataSET[,2:14],i))
save(replicate_missMDA, file=paste0("output/missMDA/data/missMDA_accuracy_",i,"missing.RData"))
png(filename = paste0("output/missMDA/histograms/missMDA_accuracy_",i,"missing.png"),
height = 10, width = 12, res=300,units = "cm")
hist(replicate_missMDA, main = "", xlab = "Accuracy", cex = 2,
breaks = 7, xlim = c(0,1), ylim = c(0,40))
dev.off()
}
##use the package to impute the data
fish <- fishes[fishes$Occurance_TBGB=="RAR"|fishes$Occurance_TBGB=="RES",
c("group_name",
"Scientific_name",
"CommonName",
"Class",
"Order",
"Family",
#"Reference",
#"IUCN_class",
"Occurance_TBGB",
"Diet",
"trophic_bin",
"CommaxDepth_bin",
"maxDepth_bin",
#"DepthRangeComShallow_F",
#"DepthRangeComDeep_F",
"Temp_type",
"Vertical_Habitat",
"Horizontal_Habitat",
#"Estuarine_use",
"Pelvic_finPosition",
"Caudal_finShape",
#"Dorsal_finShape",
"Swimming_mode",
"Body_form",
"Eye_position",
"Oral_gapePosition",
#"Spine",
#"Colour",
"maxLength_bin",
#"Reproductive_Season",
"Reproductive_strategy",
"Sexual_differentation",
"Migration",
"Parental_care",
"Egg_attach",
"Reproduction_location",
"Shooling_type",
"pop_double")]
res.comp <- imputeMCA(fish[,8:29], ncp = 5, maxiter = 500)
ClusterImp <- res.comp$completeObs
ClusterImp <- cbind(ClusterImp,fish[,1:7])
save(ClusterImp, file = "Data/missing/missMDA_imputed.RData")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ti_monocle_ddrtree.R
\name{ti_monocle_ddrtree}
\alias{ti_monocle_ddrtree}
\title{Monocle DDRTree}
\usage{
ti_monocle_ddrtree(reduction_method = "DDRTree", max_components = 2L,
norm_method = "log", auto_param_selection = TRUE,
filter_features = TRUE, filter_features_mean_expression = 0.1)
}
\arguments{
\item{reduction_method}{A character string specifying the algorithm to use for
dimensionality reduction. Domain: {DDRTree}. Default: DDRTree. Format:
character.}
\item{max_components}{The dimensionality of the reduced space. Domain: U(2,
20). Default: 2. Format: integer.}
\item{norm_method}{Determines how to transform expression values prior to
reducing dimensionality. Domain: {vstExprs, log, none}. Default: log. Format:
character.}
\item{auto_param_selection}{When this argument is set to TRUE (default), it
will automatically calculate the proper value for the ncenter (number of
centroids) parameters which will be passed into DDRTree call. Default: TRUE.
Format: logical.}
\item{filter_features}{Whether to include monocle feature filtering. Default:
TRUE. Format: logical.}
\item{filter_features_mean_expression}{Minimal mean feature expression, only
used when \code{filter_features} is set to TRUE. Domain: U(0, 10). Default: 0.1.
Format: numeric.}
}
\value{
A TI method wrapper to be used together with
\code{\link[dynwrap:infer_trajectories]{infer_trajectory}}
}
\description{
Will generate a trajectory using \href{https://doi.org/10.1038/nmeth.4402}{Monocle DDRTree}.
This method was wrapped inside a
\href{https://github.com/dynverse/ti_monocle_ddrtree}{container}.
The original code of this method is available
\href{https://github.com/cole-trapnell-lab/monocle-release}{here}.
}
\references{
Qiu, X., Mao, Q., Tang, Y., Wang, L., Chawla, R., Pliner, H.A.,
Trapnell, C., 2017. Reversed graph embedding resolves complex single-cell
trajectories. Nature Methods 14, 979–982.
}
\keyword{method}
|
/man/ti_monocle_ddrtree.Rd
|
no_license
|
Feigeliudan01/dynmethods
|
R
| false | true | 2,001 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ti_monocle_ddrtree.R
\name{ti_monocle_ddrtree}
\alias{ti_monocle_ddrtree}
\title{Monocle DDRTree}
\usage{
ti_monocle_ddrtree(reduction_method = "DDRTree", max_components = 2L,
norm_method = "log", auto_param_selection = TRUE,
filter_features = TRUE, filter_features_mean_expression = 0.1)
}
\arguments{
\item{reduction_method}{A character string specifying the algorithm to use for
dimensionality reduction. Domain: {DDRTree}. Default: DDRTree. Format:
character.}
\item{max_components}{The dimensionality of the reduced space. Domain: U(2,
20). Default: 2. Format: integer.}
\item{norm_method}{Determines how to transform expression values prior to
reducing dimensionality. Domain: {vstExprs, log, none}. Default: log. Format:
character.}
\item{auto_param_selection}{When this argument is set to TRUE (default), it
will automatically calculate the proper value for the ncenter (number of
centroids) parameters which will be passed into DDRTree call. Default: TRUE.
Format: logical.}
\item{filter_features}{Whether to include monocle feature filtering. Default:
TRUE. Format: logical.}
\item{filter_features_mean_expression}{Minimal mean feature expression, only
used when \code{filter_features} is set to TRUE. Domain: U(0, 10). Default: 0.1.
Format: numeric.}
}
\value{
A TI method wrapper to be used together with
\code{\link[dynwrap:infer_trajectories]{infer_trajectory}}
}
\description{
Will generate a trajectory using \href{https://doi.org/10.1038/nmeth.4402}{Monocle DDRTree}.
This method was wrapped inside a
\href{https://github.com/dynverse/ti_monocle_ddrtree}{container}.
The original code of this method is available
\href{https://github.com/cole-trapnell-lab/monocle-release}{here}.
}
\references{
Qiu, X., Mao, Q., Tang, Y., Wang, L., Chawla, R., Pliner, H.A.,
Trapnell, C., 2017. Reversed graph embedding resolves complex single-cell
trajectories. Nature Methods 14, 979–982.
}
\keyword{method}
|
library(shiny)
# Define UI for data upload app ----
ui <- fluidPage(
# App title ----
titlePanel("LongGold Codebook"),
# Sidebar layout with flowLayout input and output definitions ----
sidebarLayout(
sidebarPanel(
# Input: Select information ----
checkboxGroupInput("info", "Information",
choices = c("Brief Information" = "info",
"Number of items" = "number_items",
"Response format" = "response_format",
"Link to online version" = "link_online_version",
"Link to paper" = "link_publication",
"Reliability Alpha" = "rel_cronbach_alpha",
"Reliability Omega" = "rel_macdonald_omega",
"Reliabilty Retest" = "rel_test_retest",
"SEM" = "sem",
"Guessing Probability" = "threshold_guess_prob",
"Reference" = "reference",
"Additional reference" = "reference_2",
"Mean from original study" = "mean",
"SD from original study" = "sd"
),
selected = "info"),
checkboxGroupInput("language", "Language selection",
choices = c("English" = "english",
"German" = "german"),
selected = "english"),
width = 2
),
# Main panel for displaying outputs ----
mainPanel(
tabsetPanel(type = "tabs",
tabPanel("Introduction", htmlOutput("introduction")),
tabPanel("Select Self-report Scales",
inputPanel(
# Input: Select scale ----
checkboxGroupInput("scales", "Select Self-report Scales",
choices = c("Concurrent Musical Activities (CCM)" = "ccm",
"Drama Activity (DAC)" = "dac",
"Demographics (DEG)" = "deg",
"Goldsmiths Musical Sophistication Index (GMS)" = "gms",
"Social Desirability (Lie) Scale (LIE)" = "lie",
"Musical Home Environment (MHE)" = "mhe",
"Physical Activity Questionnaire for Older Children (PAC)" = "pac",
"Academic Self Concept (SCA)" = "sca",
"Social Self Concept (SCS)" = "scs",
"Strengths and Difficulties Questionnaire (SDQ)" = "sdq",
"School Engagement Measurement (SEM)" = "sem",
"Socio-economic Status (SES)" = "ses",
"Student Opinion Scale (SOS)" = "sos",
"Theory of Intelligence (TOI)" = "toi",
"Theory of Musicality (TOM)" = "tom",
"Ten Item Personality Inventory (TIP)" = "tip"
),
selected = "gms")
)),
tabPanel("Select Performance Tests", inputPanel(
# Input: Select tests ----
selectInput("bat", "Beat Perception Test (BAT)",
choices = c("Not selected" = "",
"Default" = "default",
"24" = "24",
"23" = "23",
"22" = "22",
"21" = "21",
"20" = "20",
"19" = "19",
"18" = "18",
"17" = "17",
"16" = "16",
"15" = "15"
),
selected = ""),
selectInput("edt", "Emotion Discrimination Test (EDT)",
choices = c("Not selected" = "",
"Default" = "default",
"17" = "17",
"16" = "16",
"15" = "15"
),
selected = ""),
selectInput("jaj", "Jack and Jill Working Memory Test (JAJ)",
choices = c("Not selected" = "",
"Default" = "default"
),
selected = ""),
selectInput("mdt", "Melody Discrimination Test (MDT)",
choices = c("Not selected" = "",
"Default" = "default",
"19" = "19",
"18" = "18",
"17" = "17",
"16" = "16",
"15" = "15",
"14" = "14",
"13" = "13",
"12" = "12"),
selected = ""),
selectInput("miq", "Matrix reasoning IQ Test (MIQ)",
choices = c("Not selected" = "",
"Default" = "default",
"11" = "11",
"10" = "10",
"9" = "9",
"8" = "8",
"7" = "7",
"6" = "6",
"5" = "5",
"4" = "4"),
selected = ""),
selectInput("mps", "Musical Preference Assessment (MPS)",
choices = c("Not selected" = "",
"Default" = "default"),
selected = ""),
selectInput("mpt", "Mistuning Perception Test (MPT)",
choices = c("Not selected" = "",
"Default" = "default",
"21" = "21",
"20" = "20",
"19" = "19",
"18" = "18",
"17" = "17",
"16" = "16",
"15" = "15"),
selected = ""),
selectInput("rat", "Rhythm Ability Test (RAT)",
choices = c("Not selected" = "",
"Default" = "default",
"19" = "19",
"18" = "18",
"17" = "17",
"16" = "16",
"15" = "15",
"14" = "14",
"13" = "13",
"12" = "12",
"11" = "11",
"10" = "10",
"9" = "9",
"8" = "8"),
selected = ""),
selectInput("sss", "Sound Similiarity Sorting Test (SSS)",
choices = c("Not selected" = "",
"Default" = "default"),
selected = ""),
selectInput("pit", "Pitch Imagery Arrows Test (PIT)",
choices = c("Not selected" = "",
"35" = "35",
"34" = "34",
"33" = "33",
"32" = "32",
"31" = "31",
"Default" = "default",
"29" = "29",
"28" = "28",
"27" = "27",
"26" = "26",
"25" = "25",
"24" = "24",
"23" = "23",
"22" = "22",
"21" = "21",
"20" = "20",
"19" = "19",
"18" = "18",
"17" = "17",
"16" = "16"),
selected = ""),
selectInput("bds", "Backward Digit Span Working Memory Test (BDS)",
choices = c("Not selected" = "",
"Default" = "default"),
selected = "")
)),
tabPanel("Output",
uiOutput("CB_custom"),
tableOutput("outscale"),
tableOutput("outtest")
),
tabPanel("Glossary",
htmlOutput("glossary")
)
)
)
)
)
|
/ui.R
|
no_license
|
NicolasRuth/LongGoldCodebook
|
R
| false | false | 12,572 |
r
|
library(shiny)
# Define UI for data upload app ----
ui <- fluidPage(
# App title ----
titlePanel("LongGold Codebook"),
# Sidebar layout with flowLayout input and output definitions ----
sidebarLayout(
sidebarPanel(
# Input: Select information ----
checkboxGroupInput("info", "Information",
choices = c("Brief Information" = "info",
"Number of items" = "number_items",
"Response format" = "response_format",
"Link to online version" = "link_online_version",
"Link to paper" = "link_publication",
"Reliability Alpha" = "rel_cronbach_alpha",
"Reliability Omega" = "rel_macdonald_omega",
"Reliabilty Retest" = "rel_test_retest",
"SEM" = "sem",
"Guessing Probability" = "threshold_guess_prob",
"Reference" = "reference",
"Additional reference" = "reference_2",
"Mean from original study" = "mean",
"SD from original study" = "sd"
),
selected = "info"),
checkboxGroupInput("language", "Language selection",
choices = c("English" = "english",
"German" = "german"),
selected = "english"),
width = 2
),
# Main panel for displaying outputs ----
mainPanel(
tabsetPanel(type = "tabs",
tabPanel("Introduction", htmlOutput("introduction")),
tabPanel("Select Self-report Scales",
inputPanel(
# Input: Select scale ----
checkboxGroupInput("scales", "Select Self-report Scales",
choices = c("Concurrent Musical Activities (CCM)" = "ccm",
"Drama Activity (DAC)" = "dac",
"Demographics (DEG)" = "deg",
"Goldsmiths Musical Sophistication Index (GMS)" = "gms",
"Social Desirability (Lie) Scale (LIE)" = "lie",
"Musical Home Environment (MHE)" = "mhe",
"Physical Activity Questionnaire for Older Children (PAC)" = "pac",
"Academic Self Concept (SCA)" = "sca",
"Social Self Concept (SCS)" = "scs",
"Strengths and Difficulties Questionnaire (SDQ)" = "sdq",
"School Engagement Measurement (SEM)" = "sem",
"Socio-economic Status (SES)" = "ses",
"Student Opinion Scale (SOS)" = "sos",
"Theory of Intelligence (TOI)" = "toi",
"Theory of Musicality (TOM)" = "tom",
"Ten Item Personality Inventory (TIP)" = "tip"
),
selected = "gms")
)),
tabPanel("Select Performance Tests", inputPanel(
# Input: Select tests ----
selectInput("bat", "Beat Perception Test (BAT)",
choices = c("Not selected" = "",
"Default" = "default",
"24" = "24",
"23" = "23",
"22" = "22",
"21" = "21",
"20" = "20",
"19" = "19",
"18" = "18",
"17" = "17",
"16" = "16",
"15" = "15"
),
selected = ""),
selectInput("edt", "Emotion Discrimination Test (EDT)",
choices = c("Not selected" = "",
"Default" = "default",
"17" = "17",
"16" = "16",
"15" = "15"
),
selected = ""),
selectInput("jaj", "Jack and Jill Working Memory Test (JAJ)",
choices = c("Not selected" = "",
"Default" = "default"
),
selected = ""),
selectInput("mdt", "Melody Discrimination Test (MDT)",
choices = c("Not selected" = "",
"Default" = "default",
"19" = "19",
"18" = "18",
"17" = "17",
"16" = "16",
"15" = "15",
"14" = "14",
"13" = "13",
"12" = "12"),
selected = ""),
selectInput("miq", "Matrix reasoning IQ Test (MIQ)",
choices = c("Not selected" = "",
"Default" = "default",
"11" = "11",
"10" = "10",
"9" = "9",
"8" = "8",
"7" = "7",
"6" = "6",
"5" = "5",
"4" = "4"),
selected = ""),
selectInput("mps", "Musical Preference Assessment (MPS)",
choices = c("Not selected" = "",
"Default" = "default"),
selected = ""),
selectInput("mpt", "Mistuning Perception Test (MPT)",
choices = c("Not selected" = "",
"Default" = "default",
"21" = "21",
"20" = "20",
"19" = "19",
"18" = "18",
"17" = "17",
"16" = "16",
"15" = "15"),
selected = ""),
selectInput("rat", "Rhythm Ability Test (RAT)",
choices = c("Not selected" = "",
"Default" = "default",
"19" = "19",
"18" = "18",
"17" = "17",
"16" = "16",
"15" = "15",
"14" = "14",
"13" = "13",
"12" = "12",
"11" = "11",
"10" = "10",
"9" = "9",
"8" = "8"),
selected = ""),
selectInput("sss", "Sound Similiarity Sorting Test (SSS)",
choices = c("Not selected" = "",
"Default" = "default"),
selected = ""),
selectInput("pit", "Pitch Imagery Arrows Test (PIT)",
choices = c("Not selected" = "",
"35" = "35",
"34" = "34",
"33" = "33",
"32" = "32",
"31" = "31",
"Default" = "default",
"29" = "29",
"28" = "28",
"27" = "27",
"26" = "26",
"25" = "25",
"24" = "24",
"23" = "23",
"22" = "22",
"21" = "21",
"20" = "20",
"19" = "19",
"18" = "18",
"17" = "17",
"16" = "16"),
selected = ""),
selectInput("bds", "Backward Digit Span Working Memory Test (BDS)",
choices = c("Not selected" = "",
"Default" = "default"),
selected = "")
)),
tabPanel("Output",
uiOutput("CB_custom"),
tableOutput("outscale"),
tableOutput("outtest")
),
tabPanel("Glossary",
htmlOutput("glossary")
)
)
)
)
)
|
findPython <- function(...) {
versionPattern <- c("--version"='.* ([0-9.a-z]+)?')
findExternal(command="python", versionPattern=versionPattern, ...)
} # findPython()
|
/R/findPython.R
|
no_license
|
HenrikBengtsson/aroma.seq
|
R
| false | false | 170 |
r
|
findPython <- function(...) {
versionPattern <- c("--version"='.* ([0-9.a-z]+)?')
findExternal(command="python", versionPattern=versionPattern, ...)
} # findPython()
|
# This script provides functions to prepare Human Activity Recognition Using Smartphones Dataset
# and run analysis on the formatted dataset to get the average of each variable for each activity and
# each subject
library(reshape2)
# run analysis on the dataset to get the average of each variable for each activity and each subject
# all the variables must be either the mean or the standard deviation of the measurements.
#
# Args:
# dataset_path: the full path to the dataset
#
# Returns:
# a tidy dataset which contains the average of each variable for each activity
# and each subject.
runAnalysis <- function(dataset_path) {
# prepare the dataset
prepared_data <- prepareDataset(dataset_path)
# get all the column names
features <- names(prepared_data)
# define all the ids columns
ids <- c("subject", "activity")
# retain all the measurements
features <- features[! features %in% ids]
# melt the dataset according to its measurements
prepared_data_melt <- melt(prepared_data, id=ids, measure.vars=features)
# calculate the average of each variable
results <- dcast(prepared_data_melt, subject + activity ~ variable,mean)
# return results
results
}
# Prepare the input dataset according to the following requirements:
#
# 1. merges the training and test dataset.
# 2. extracts only the measurements on the mean and standard deviation for each measurements.
# 3. uses descriptive activity names to name the activities in the dataset.
# 4. labels the dataset with descriptive variable names.
#
# Args:
# dataset_path: the full path to the dataset
#
# Returns:
# a dataset which contains only mean and standard deviation measurements, it also
# contains descriptive labels for the activities
prepareDataset <- function(dataset_path) {
# merge training and test data
x_data <- mergeTrainAndTestDataset(dataset_path)
# merge training and test labels
y_label <- mergeTrainAndTestLabel(dataset_path)
merged_subject <- mergeTrainAndTestSubject(dataset_path)
# extract only the measurements on the mean and standard deviation for each measurement
x_subset <- subsetByMeanAndStd(dataset_path, x_data)
# add column name for label
descriptive_activity_labels <- extractDescriptiveActivityLabels(dataset_path, y_label)
# Assign descriptive activity names to the dataset
final_data <- cbind(merged_subject, descriptive_activity_labels, x_subset)
# return the final data
final_data
}
# Merge training and test data
#
# Args:
# dataset_path: the full path to the dataset
#
# Returns:
# merged training and test dataset
mergeTrainAndTestDataset <- function(dataset_path) {
# read training set
x_train <- read.csv(file.path(dataset_path, "train/X_train.txt"), sep="", header=FALSE)
# read test set
x_test <- read.csv(file.path(dataset_path, "test/X_test.txt"), sep="", header=FALSE)
# merge training set with test set
x_data <- rbind(x_train, x_test)
# return merged dataset
x_data
}
# Merge training and test activity labels
#
# Args:
# dataset_path: the full path to the dataset
#
# Returns:
# merged training and test activity labels
mergeTrainAndTestLabel <- function(dataset_path) {
# read training label
y_train <- read.csv(file.path(dataset_path, "train/y_train.txt"), sep="", header=FALSE)
# read test label
y_test <- read.csv(file.path(dataset_path, "test/y_test.txt"), sep="", header=FALSE)
# merge training label with test label
y_label <- rbind(y_train, y_test)
# return merged labels
y_label
}
# Merge training and test activity subjects
#
# Args:
# dataset_path: the full path to the dataset
#
# Returns:
# merged training and test subjects
mergeTrainAndTestSubject <- function(dataset_path) {
# read training subjects
subject_train <- read.csv(file.path(dataset_path, "train/subject_train.txt"), sep="", header=FALSE)
# read test subjects
subject_test <- read.csv(file.path(dataset_path, "test/subject_test.txt"), sep="", header=FALSE)
# merge training subjects with test subjects
merged_subject <- rbind(subject_train, subject_test)
names(merged_subject) <- "subject"
# return merged subjects
merged_subject
}
# Subset the merged training and test dataset
#
# Args:
# dataset_path: the full path to the dataset
# x_data: merged training and test dataset
#
# Returns:
# subset of the merged training and test dataset, only contains the mean and the standard
# deviation measurements
subsetByMeanAndStd <- function(dataset_path, x_data) {
# read feature names
x_features <- read.csv(file.path(dataset_path, "features.txt"), sep="", header=FALSE)
# label features as the column name for the whole dataset
names(x_data) <- x_features$V2
# select features with mean() and std()
x_mean_std_features <- x_features[grepl("mean()", x_features$V2, fixed=TRUE) | grepl("std()", x_features$V2, fixed=TRUE), ]
# create a vector that contains all the mean() and std() features
mean_std_features <- as.character(x_mean_std_features$V2)
# extract only the measurements on the mean and standard deviation for each measurement
x_subset <- subset(x_data, select=mean_std_features)
# return subset
x_subset
}
# extract descriptive activity labels
#
# Args:
# dataset_path: the full path to the dataset
# y_label: training and test labels
#
# Returns:
# descriptive activity labels
extractDescriptiveActivityLabels <- function(dataset_path, y_label) {
# read activity labels
activity_labels <- read.csv(file.path(dataset_path, "activity_labels.txt"), sep="", header=FALSE)
# merge y_label with activity labels to have descriptive names
merged_activity_labels <- merge(y_label, activity_labels, by.x="V1", by.y="V1")
# add column name for label
merged_activity_labels <- subset(merged_activity_labels, select=c("V2"))
names(merged_activity_labels) <- "activity"
# return descriptive activity labels
merged_activity_labels
}
|
/run_analysis.R
|
no_license
|
ruiwanguk/getting_and_clean_data_project
|
R
| false | false | 5,877 |
r
|
# This script provides functions to prepare Human Activity Recognition Using Smartphones Dataset
# and run analysis on the formatted dataset to get the average of each variable for each activity and
# each subject
library(reshape2)
# run analysis on the dataset to get the average of each variable for each activity and each subject
# all the variables must be either the mean or the standard deviation of the measurements.
#
# Args:
# dataset_path: the full path to the dataset
#
# Returns:
# a tidy dataset which contains the average of each variable for each activity
# and each subject.
runAnalysis <- function(dataset_path) {
# prepare the dataset
prepared_data <- prepareDataset(dataset_path)
# get all the column names
features <- names(prepared_data)
# define all the ids columns
ids <- c("subject", "activity")
# retain all the measurements
features <- features[! features %in% ids]
# melt the dataset according to its measurements
prepared_data_melt <- melt(prepared_data, id=ids, measure.vars=features)
# calculate the average of each variable
results <- dcast(prepared_data_melt, subject + activity ~ variable,mean)
# return results
results
}
# Prepare the input dataset according to the following requirements:
#
# 1. merges the training and test dataset.
# 2. extracts only the measurements on the mean and standard deviation for each measurements.
# 3. uses descriptive activity names to name the activities in the dataset.
# 4. labels the dataset with descriptive variable names.
#
# Args:
# dataset_path: the full path to the dataset
#
# Returns:
# a dataset which contains only mean and standard deviation measurements, it also
# contains descriptive labels for the activities
prepareDataset <- function(dataset_path) {
# merge training and test data
x_data <- mergeTrainAndTestDataset(dataset_path)
# merge training and test labels
y_label <- mergeTrainAndTestLabel(dataset_path)
merged_subject <- mergeTrainAndTestSubject(dataset_path)
# extract only the measurements on the mean and standard deviation for each measurement
x_subset <- subsetByMeanAndStd(dataset_path, x_data)
# add column name for label
descriptive_activity_labels <- extractDescriptiveActivityLabels(dataset_path, y_label)
# Assign descriptive activity names to the dataset
final_data <- cbind(merged_subject, descriptive_activity_labels, x_subset)
# return the final data
final_data
}
# Merge training and test data
#
# Args:
# dataset_path: the full path to the dataset
#
# Returns:
# merged training and test dataset
mergeTrainAndTestDataset <- function(dataset_path) {
# read training set
x_train <- read.csv(file.path(dataset_path, "train/X_train.txt"), sep="", header=FALSE)
# read test set
x_test <- read.csv(file.path(dataset_path, "test/X_test.txt"), sep="", header=FALSE)
# merge training set with test set
x_data <- rbind(x_train, x_test)
# return merged dataset
x_data
}
# Merge training and test activity labels
#
# Args:
# dataset_path: the full path to the dataset
#
# Returns:
# merged training and test activity labels
mergeTrainAndTestLabel <- function(dataset_path) {
# read training label
y_train <- read.csv(file.path(dataset_path, "train/y_train.txt"), sep="", header=FALSE)
# read test label
y_test <- read.csv(file.path(dataset_path, "test/y_test.txt"), sep="", header=FALSE)
# merge training label with test label
y_label <- rbind(y_train, y_test)
# return merged labels
y_label
}
# Merge training and test activity subjects
#
# Args:
# dataset_path: the full path to the dataset
#
# Returns:
# merged training and test subjects
mergeTrainAndTestSubject <- function(dataset_path) {
# read training subjects
subject_train <- read.csv(file.path(dataset_path, "train/subject_train.txt"), sep="", header=FALSE)
# read test subjects
subject_test <- read.csv(file.path(dataset_path, "test/subject_test.txt"), sep="", header=FALSE)
# merge training subjects with test subjects
merged_subject <- rbind(subject_train, subject_test)
names(merged_subject) <- "subject"
# return merged subjects
merged_subject
}
# Subset the merged training and test dataset
#
# Args:
# dataset_path: the full path to the dataset
# x_data: merged training and test dataset
#
# Returns:
# subset of the merged training and test dataset, only contains the mean and the standard
# deviation measurements
subsetByMeanAndStd <- function(dataset_path, x_data) {
# read feature names
x_features <- read.csv(file.path(dataset_path, "features.txt"), sep="", header=FALSE)
# label features as the column name for the whole dataset
names(x_data) <- x_features$V2
# select features with mean() and std()
x_mean_std_features <- x_features[grepl("mean()", x_features$V2, fixed=TRUE) | grepl("std()", x_features$V2, fixed=TRUE), ]
# create a vector that contains all the mean() and std() features
mean_std_features <- as.character(x_mean_std_features$V2)
# extract only the measurements on the mean and standard deviation for each measurement
x_subset <- subset(x_data, select=mean_std_features)
# return subset
x_subset
}
# extract descriptive activity labels
#
# Args:
# dataset_path: the full path to the dataset
# y_label: training and test labels
#
# Returns:
# descriptive activity labels
extractDescriptiveActivityLabels <- function(dataset_path, y_label) {
# read activity labels
activity_labels <- read.csv(file.path(dataset_path, "activity_labels.txt"), sep="", header=FALSE)
# merge y_label with activity labels to have descriptive names
merged_activity_labels <- merge(y_label, activity_labels, by.x="V1", by.y="V1")
# add column name for label
merged_activity_labels <- subset(merged_activity_labels, select=c("V2"))
names(merged_activity_labels) <- "activity"
# return descriptive activity labels
merged_activity_labels
}
|
# Load up the shiny package.
library(shiny)
# Load up the Scales package.
library('scales')
#Load the file containing the DataFrame with Autovit data
load("ExibitionAutovit.RData")
#Load the file containing DataFrame with Otomot data
load("ExibitionOtomoto.RData")
source('ExibitionFunctions.R')
#Define the Shiny Server informations
server <- function(input, output) {
#colSums (TotalAutovit[,c(2,3,4)])
output$AutovitTotal <- renderDataTable({
TotalExibitionAutovit(input)
}, options = list(ordering=F,paging = FALSE,
searching = FALSE,
orderClasses = TRUE,
order = list(list(0, 'desc'))
)
)
output$OtomotoTotal <- renderDataTable({
TotalExibitionOtomoto(input)
}, options = list(ordering=F,paging = FALSE,
searching = FALSE,
orderClasses = TRUE,
order = list(list(0, 'desc'))
)
)
output$AutovitTable <- renderDataTable({
ExibitionAutovit[ExibitionAutovit$Date >= input$date_range[1] & ExibitionAutovit$Date <= input$date_range[2], ]
}, options = list(paging = FALSE,
searching = FALSE,
orderClasses = TRUE,
order = list(list(0, 'desc'))
)
)
output$OtomotoTable <- renderDataTable({
ExibitionOtomoto[ExibitionOtomoto$Date >= input$date_range[1] & ExibitionOtomoto$Date <= input$date_range[2], ]
}, options = list(paging = FALSE,
searching = FALSE,
orderClasses = TRUE,
order = list(list(0, 'desc'))
)
)
}
#Define the UI Shiny Informations
ui <- fluidPage(
#Title of the page
titlePanel("Sorting Analysis"),
helpText("Analysis of the amount of use of the sorting
function per Mileage and Power Engine in relation to total sortings"),
sidebarPanel(
sliderInput("date_range", "Choose Date Range:", min = as.POSIXlt("2016-12-12"),
max = as.POSIXlt(AutovitExecutedDate-1),
value = c(as.POSIXlt(AutovitExecutedDate-11),as.POSIXlt(AutovitExecutedDate-1)),
timeFormat = "%Y-%m-%d", ticks = F, animate = F,width = '98%'),
hr(),
helpText("Source: Google Analytics"),
h6("Author: Rodrigo de Caro")),
mainPanel(
tabsetPanel(id = "tabSelected",
tabPanel("Autovit",
h6("Date: ", AutovitExecutedDate),
dataTableOutput("AutovitTable"),
dataTableOutput("AutovitTotal")
),
tabPanel("Otomoto",
h6("Date: ", OtomotoExecutedDate),
dataTableOutput("OtomotoTable"),
dataTableOutput("OtomotoTotal")
)
)
))
shinyApp(ui = ui, server = server)
|
/RC0001-SortingCarsSearchResults/app.R
|
no_license
|
antoniocostaolx/personal
|
R
| false | false | 2,811 |
r
|
# Load up the shiny package.
library(shiny)
# Load up the Scales package.
library('scales')
#Load the file containing the DataFrame with Autovit data
load("ExibitionAutovit.RData")
#Load the file containing DataFrame with Otomot data
load("ExibitionOtomoto.RData")
source('ExibitionFunctions.R')
#Define the Shiny Server informations
server <- function(input, output) {
#colSums (TotalAutovit[,c(2,3,4)])
output$AutovitTotal <- renderDataTable({
TotalExibitionAutovit(input)
}, options = list(ordering=F,paging = FALSE,
searching = FALSE,
orderClasses = TRUE,
order = list(list(0, 'desc'))
)
)
output$OtomotoTotal <- renderDataTable({
TotalExibitionOtomoto(input)
}, options = list(ordering=F,paging = FALSE,
searching = FALSE,
orderClasses = TRUE,
order = list(list(0, 'desc'))
)
)
output$AutovitTable <- renderDataTable({
ExibitionAutovit[ExibitionAutovit$Date >= input$date_range[1] & ExibitionAutovit$Date <= input$date_range[2], ]
}, options = list(paging = FALSE,
searching = FALSE,
orderClasses = TRUE,
order = list(list(0, 'desc'))
)
)
output$OtomotoTable <- renderDataTable({
ExibitionOtomoto[ExibitionOtomoto$Date >= input$date_range[1] & ExibitionOtomoto$Date <= input$date_range[2], ]
}, options = list(paging = FALSE,
searching = FALSE,
orderClasses = TRUE,
order = list(list(0, 'desc'))
)
)
}
#Define the UI Shiny Informations
ui <- fluidPage(
#Title of the page
titlePanel("Sorting Analysis"),
helpText("Analysis of the amount of use of the sorting
function per Mileage and Power Engine in relation to total sortings"),
sidebarPanel(
sliderInput("date_range", "Choose Date Range:", min = as.POSIXlt("2016-12-12"),
max = as.POSIXlt(AutovitExecutedDate-1),
value = c(as.POSIXlt(AutovitExecutedDate-11),as.POSIXlt(AutovitExecutedDate-1)),
timeFormat = "%Y-%m-%d", ticks = F, animate = F,width = '98%'),
hr(),
helpText("Source: Google Analytics"),
h6("Author: Rodrigo de Caro")),
mainPanel(
tabsetPanel(id = "tabSelected",
tabPanel("Autovit",
h6("Date: ", AutovitExecutedDate),
dataTableOutput("AutovitTable"),
dataTableOutput("AutovitTotal")
),
tabPanel("Otomoto",
h6("Date: ", OtomotoExecutedDate),
dataTableOutput("OtomotoTable"),
dataTableOutput("OtomotoTotal")
)
)
))
shinyApp(ui = ui, server = server)
|
#historical climate review
source("climateReaderFunctions.R")
wi.station.code <- 10
wi.station.name <- "C130929"
ic.station.code <- 34
ic.station.name <- "C121319"
cc.station.code <- 37
cc.station.name <- "C138062"
h.pcp.path <- "/Volumes/pbittermSSD/climateInputs/historicalObserved/pcp1.pcp"
h.tmp.path <- "/Volumes/pbittermSSD/climateInputs/historicalObserved/Tmp1.tmp"
h.pcp <- pcp.parser(h.pcp.path)
h.tmp <- tmp.parser(h.tmp.path)
cc.pcp <- h.pcp %>% select(newdate, C138062) %>%
mutate(month = format(newdate, "%m"), year = format(newdate, "%Y")) %>%
mutate(C138062 = ifelse(C138062 < 0, 0, C138062)) %>%
group_by(year, month) %>% summarise(monthlyPrecip = sum(C138062)) %>%
mutate(yrMonStr = paste0(year, "-", month, "-1")) %>%
mutate(yrMon = as.Date(yrMonStr))
ggplot(data = cc.pcp, aes(x = month, y = monthlyPrecip, colour = year)) +
#geom_line(aes(group = year)) +
geom_point(aes(colour = year))
cc.tmp <- h.tmp %>% select(newdate, C138062max, C138062min) %>% mutate(C138062mean = (C138062max + C138062min) / 2)
|
/hist_climate.R
|
permissive
|
pjbitterman/rabm_prep_public
|
R
| false | false | 1,061 |
r
|
#historical climate review
source("climateReaderFunctions.R")
wi.station.code <- 10
wi.station.name <- "C130929"
ic.station.code <- 34
ic.station.name <- "C121319"
cc.station.code <- 37
cc.station.name <- "C138062"
h.pcp.path <- "/Volumes/pbittermSSD/climateInputs/historicalObserved/pcp1.pcp"
h.tmp.path <- "/Volumes/pbittermSSD/climateInputs/historicalObserved/Tmp1.tmp"
h.pcp <- pcp.parser(h.pcp.path)
h.tmp <- tmp.parser(h.tmp.path)
cc.pcp <- h.pcp %>% select(newdate, C138062) %>%
mutate(month = format(newdate, "%m"), year = format(newdate, "%Y")) %>%
mutate(C138062 = ifelse(C138062 < 0, 0, C138062)) %>%
group_by(year, month) %>% summarise(monthlyPrecip = sum(C138062)) %>%
mutate(yrMonStr = paste0(year, "-", month, "-1")) %>%
mutate(yrMon = as.Date(yrMonStr))
ggplot(data = cc.pcp, aes(x = month, y = monthlyPrecip, colour = year)) +
#geom_line(aes(group = year)) +
geom_point(aes(colour = year))
cc.tmp <- h.tmp %>% select(newdate, C138062max, C138062min) %>% mutate(C138062mean = (C138062max + C138062min) / 2)
|
#' Credit approval data
#'
#'
#'
#' @format A data frame containing 690 rows and 15 columns
#' @source https://archive.ics.uci.edu/ml/datasets/Credit+Approval
#'
#'
"creditapproval"
|
/R/creditapproval_doc.R
|
no_license
|
Barardo/FFTrees
|
R
| false | false | 182 |
r
|
#' Credit approval data
#'
#'
#'
#' @format A data frame containing 690 rows and 15 columns
#' @source https://archive.ics.uci.edu/ml/datasets/Credit+Approval
#'
#'
"creditapproval"
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.